OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [config/] [avr/] [libgcc.S] - Blame information for rev 38

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/*  -*- Mode: Asm -*-  */
2
/* Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
3
   Contributed by Denis Chertykov 
4
 
5
This file is free software; you can redistribute it and/or modify it
6
under the terms of the GNU General Public License as published by the
7
Free Software Foundation; either version 2, or (at your option) any
8
later version.
9
 
10
In addition to the permissions in the GNU General Public License, the
11
Free Software Foundation gives you unlimited permission to link the
12
compiled version of this file into combinations with other programs,
13
and to distribute those combinations without any restriction coming
14
from the use of this file.  (The General Public License restrictions
15
do apply in other respects; for example, they cover modification of
16
the file, and distribution when not linked into a combine
17
executable.)
18
 
19
This file is distributed in the hope that it will be useful, but
20
WITHOUT ANY WARRANTY; without even the implied warranty of
21
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22
General Public License for more details.
23
 
24
You should have received a copy of the GNU General Public License
25
along with this program; see the file COPYING.  If not, write to
26
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
27
Boston, MA 02110-1301, USA.  */
28
 
29
#define __zero_reg__ r1
30
#define __tmp_reg__ r0
31
#define __SREG__ 0x3f
32
#define __SP_H__ 0x3e
33
#define __SP_L__ 0x3d
34
 
35
/* Most of the functions here are called directly from avr.md
36
   patterns, instead of using the standard libcall mechanisms.
37
   This can make better code because GCC knows exactly which
38
   of the call-used registers (not all of them) are clobbered.  */
39
 
40
        .section .text.libgcc, "ax", @progbits
41
 
42
        .macro  mov_l  r_dest, r_src
43
#if defined (__AVR_HAVE_MOVW__)
44
        movw    \r_dest, \r_src
45
#else
46
        mov     \r_dest, \r_src
47
#endif
48
        .endm
49
 
50
        .macro  mov_h  r_dest, r_src
51
#if defined (__AVR_HAVE_MOVW__)
52
        ; empty
53
#else
54
        mov     \r_dest, \r_src
55
#endif
56
        .endm
57
 
58
/* Note: mulqi3, mulhi3 are open-coded on the enhanced core.  */
59
#if !defined (__AVR_ENHANCED__)
60
/*******************************************************
61
               Multiplication  8 x 8
62
*******************************************************/
63
#if defined (L_mulqi3)
64
 
65
#define r_arg2  r22             /* multiplicand */
66
#define r_arg1  r24             /* multiplier */
67
#define r_res   __tmp_reg__     /* result */
68
 
69
        .global __mulqi3
70
        .func   __mulqi3
71
__mulqi3:
72
        clr     r_res           ; clear result
73
__mulqi3_loop:
74
        sbrc    r_arg1,0
75
        add     r_res,r_arg2
76
        add     r_arg2,r_arg2   ; shift multiplicand
77
        breq    __mulqi3_exit   ; while multiplicand != 0
78
        lsr     r_arg1          ;
79
        brne    __mulqi3_loop   ; exit if multiplier = 0
80
__mulqi3_exit:
81
        mov     r_arg1,r_res    ; result to return register
82
        ret
83
 
84
#undef r_arg2
85
#undef r_arg1
86
#undef r_res
87
 
88
.endfunc
89
#endif  /* defined (L_mulqi3) */
90
 
91
#if defined (L_mulqihi3)
92
        .global __mulqihi3
93
        .func   __mulqihi3
94
__mulqihi3:
95
        clr     r25
96
        sbrc    r24, 7
97
        dec     r25
98
        clr     r23
99
        sbrc    r22, 7
100
        dec     r22
101
        rjmp    __mulhi3
102
        .endfunc
103
#endif /* defined (L_mulqihi3) */
104
 
105
#if defined (L_umulqihi3)
106
        .global __umulqihi3
107
        .func   __umulqihi3
108
__umulqihi3:
109
        clr     r25
110
        clr     r23
111
        rjmp    __mulhi3
112
        .endfunc
113
#endif /* defined (L_umulqihi3) */
114
 
115
/*******************************************************
116
               Multiplication  16 x 16
117
*******************************************************/
118
#if defined (L_mulhi3)
119
#define r_arg1L r24             /* multiplier Low */
120
#define r_arg1H r25             /* multiplier High */
121
#define r_arg2L r22             /* multiplicand Low */
122
#define r_arg2H r23             /* multiplicand High */
123
#define r_resL  __tmp_reg__     /* result Low */
124
#define r_resH  r21             /* result High */
125
 
126
        .global __mulhi3
127
        .func   __mulhi3
128
__mulhi3:
129
        clr     r_resH          ; clear result
130
        clr     r_resL          ; clear result
131
__mulhi3_loop:
132
        sbrs    r_arg1L,0
133
        rjmp    __mulhi3_skip1
134
        add     r_resL,r_arg2L  ; result + multiplicand
135
        adc     r_resH,r_arg2H
136
__mulhi3_skip1:
137
        add     r_arg2L,r_arg2L ; shift multiplicand
138
        adc     r_arg2H,r_arg2H
139
 
140
        cp      r_arg2L,__zero_reg__
141
        cpc     r_arg2H,__zero_reg__
142
        breq    __mulhi3_exit   ; while multiplicand != 0
143
 
144
        lsr     r_arg1H         ; gets LSB of multiplier
145
        ror     r_arg1L
146
        sbiw    r_arg1L,0
147
        brne    __mulhi3_loop   ; exit if multiplier = 0
148
__mulhi3_exit:
149
        mov     r_arg1H,r_resH  ; result to return register
150
        mov     r_arg1L,r_resL
151
        ret
152
 
153
#undef r_arg1L
154
#undef r_arg1H
155
#undef r_arg2L
156
#undef r_arg2H
157
#undef r_resL
158
#undef r_resH
159
 
160
.endfunc
161
#endif /* defined (L_mulhi3) */
162
#endif /* !defined (__AVR_ENHANCED__) */
163
 
164
#if defined (L_mulhisi3)
165
        .global __mulhisi3
166
        .func   __mulhisi3
167
__mulhisi3:
168
        mov_l   r18, r24
169
        mov_h   r19, r25
170
        clr     r24
171
        sbrc    r23, 7
172
        dec     r24
173
        mov     r25, r24
174
        clr     r20
175
        sbrc    r19, 7
176
        dec     r20
177
        mov     r21, r20
178
        rjmp    __mulsi3
179
        .endfunc
180
#endif /* defined (L_mulhisi3) */
181
 
182
#if defined (L_umulhisi3)
183
        .global __umulhisi3
184
        .func   __umulhisi3
185
__umulhisi3:
186
        mov_l   r18, r24
187
        mov_h   r19, r25
188
        clr     r24
189
        clr     r25
190
        clr     r20
191
        clr     r21
192
        rjmp    __mulsi3
193
        .endfunc
194
#endif /* defined (L_umulhisi3) */
195
 
196
#if defined (L_mulsi3)
197
/*******************************************************
198
               Multiplication  32 x 32
199
*******************************************************/
200
#define r_arg1L  r22            /* multiplier Low */
201
#define r_arg1H  r23
202
#define r_arg1HL r24
203
#define r_arg1HH r25            /* multiplier High */
204
 
205
 
206
#define r_arg2L  r18            /* multiplicand Low */
207
#define r_arg2H  r19
208
#define r_arg2HL r20
209
#define r_arg2HH r21            /* multiplicand High */
210
 
211
#define r_resL   r26            /* result Low */
212
#define r_resH   r27
213
#define r_resHL  r30
214
#define r_resHH  r31            /* result High */
215
 
216
 
217
        .global __mulsi3
218
        .func   __mulsi3
219
__mulsi3:
220
#if defined (__AVR_ENHANCED__)
221
        mul     r_arg1L, r_arg2L
222
        movw    r_resL, r0
223
        mul     r_arg1H, r_arg2H
224
        movw    r_resHL, r0
225
        mul     r_arg1HL, r_arg2L
226
        add     r_resHL, r0
227
        adc     r_resHH, r1
228
        mul     r_arg1L, r_arg2HL
229
        add     r_resHL, r0
230
        adc     r_resHH, r1
231
        mul     r_arg1HH, r_arg2L
232
        add     r_resHH, r0
233
        mul     r_arg1HL, r_arg2H
234
        add     r_resHH, r0
235
        mul     r_arg1H, r_arg2HL
236
        add     r_resHH, r0
237
        mul     r_arg1L, r_arg2HH
238
        add     r_resHH, r0
239
        clr     r_arg1HH        ; use instead of __zero_reg__ to add carry
240
        mul     r_arg1H, r_arg2L
241
        add     r_resH, r0
242
        adc     r_resHL, r1
243
        adc     r_resHH, r_arg1HH ; add carry
244
        mul     r_arg1L, r_arg2H
245
        add     r_resH, r0
246
        adc     r_resHL, r1
247
        adc     r_resHH, r_arg1HH ; add carry
248
        movw    r_arg1L, r_resL
249
        movw    r_arg1HL, r_resHL
250
        clr     r1              ; __zero_reg__ clobbered by "mul"
251
        ret
252
#else
253
        clr     r_resHH         ; clear result
254
        clr     r_resHL         ; clear result
255
        clr     r_resH          ; clear result
256
        clr     r_resL          ; clear result
257
__mulsi3_loop:
258
        sbrs    r_arg1L,0
259
        rjmp    __mulsi3_skip1
260
        add     r_resL,r_arg2L          ; result + multiplicand
261
        adc     r_resH,r_arg2H
262
        adc     r_resHL,r_arg2HL
263
        adc     r_resHH,r_arg2HH
264
__mulsi3_skip1:
265
        add     r_arg2L,r_arg2L         ; shift multiplicand
266
        adc     r_arg2H,r_arg2H
267
        adc     r_arg2HL,r_arg2HL
268
        adc     r_arg2HH,r_arg2HH
269
 
270
        lsr     r_arg1HH        ; gets LSB of multiplier
271
        ror     r_arg1HL
272
        ror     r_arg1H
273
        ror     r_arg1L
274
        brne    __mulsi3_loop
275
        sbiw    r_arg1HL,0
276
        cpc     r_arg1H,r_arg1L
277
        brne    __mulsi3_loop           ; exit if multiplier = 0
278
__mulsi3_exit:
279
        mov_h   r_arg1HH,r_resHH        ; result to return register
280
        mov_l   r_arg1HL,r_resHL
281
        mov_h   r_arg1H,r_resH
282
        mov_l   r_arg1L,r_resL
283
        ret
284
#endif /* !defined (__AVR_ENHANCED__) */
285
#undef r_arg1L
286
#undef r_arg1H
287
#undef r_arg1HL
288
#undef r_arg1HH
289
 
290
 
291
#undef r_arg2L
292
#undef r_arg2H
293
#undef r_arg2HL
294
#undef r_arg2HH
295
 
296
#undef r_resL
297
#undef r_resH
298
#undef r_resHL
299
#undef r_resHH
300
 
301
.endfunc
302
#endif /* defined (L_mulsi3) */
303
 
304
/*******************************************************
305
       Division 8 / 8 => (result + remainder)
306
*******************************************************/
307
#define r_rem   r25     /* remainder */
308
#define r_arg1  r24     /* dividend, quotient */
309
#define r_arg2  r22     /* divisor */
310
#define r_cnt   r23     /* loop count */
311
 
312
#if defined (L_udivmodqi4)
313
        .global __udivmodqi4
314
        .func   __udivmodqi4
315
__udivmodqi4:
316
        sub     r_rem,r_rem     ; clear remainder and carry
317
        ldi     r_cnt,9         ; init loop counter
318
        rjmp    __udivmodqi4_ep ; jump to entry point
319
__udivmodqi4_loop:
320
        rol     r_rem           ; shift dividend into remainder
321
        cp      r_rem,r_arg2    ; compare remainder & divisor
322
        brcs    __udivmodqi4_ep ; remainder <= divisor
323
        sub     r_rem,r_arg2    ; restore remainder
324
__udivmodqi4_ep:
325
        rol     r_arg1          ; shift dividend (with CARRY)
326
        dec     r_cnt           ; decrement loop counter
327
        brne    __udivmodqi4_loop
328
        com     r_arg1          ; complement result
329
                                ; because C flag was complemented in loop
330
        ret
331
        .endfunc
332
#endif /* defined (L_udivmodqi4) */
333
 
334
#if defined (L_divmodqi4)
335
        .global __divmodqi4
336
        .func   __divmodqi4
337
__divmodqi4:
338
        bst     r_arg1,7        ; store sign of dividend
339
        mov     __tmp_reg__,r_arg1
340
        eor     __tmp_reg__,r_arg2; r0.7 is sign of result
341
        sbrc    r_arg1,7
342
        neg     r_arg1          ; dividend negative : negate
343
        sbrc    r_arg2,7
344
        neg     r_arg2          ; divisor negative : negate
345
        rcall   __udivmodqi4    ; do the unsigned div/mod
346
        brtc    __divmodqi4_1
347
        neg     r_rem           ; correct remainder sign
348
__divmodqi4_1:
349
        sbrc    __tmp_reg__,7
350
        neg     r_arg1          ; correct result sign
351
__divmodqi4_exit:
352
        ret
353
        .endfunc
354
#endif /* defined (L_divmodqi4) */
355
 
356
#undef r_rem
357
#undef r_arg1
358
#undef r_arg2
359
#undef r_cnt
360
 
361
 
362
/*******************************************************
363
       Division 16 / 16 => (result + remainder)
364
*******************************************************/
365
#define r_remL  r26     /* remainder Low */
366
#define r_remH  r27     /* remainder High */
367
 
368
/* return: remainder */
369
#define r_arg1L r24     /* dividend Low */
370
#define r_arg1H r25     /* dividend High */
371
 
372
/* return: quotient */
373
#define r_arg2L r22     /* divisor Low */
374
#define r_arg2H r23     /* divisor High */
375
 
376
#define r_cnt   r21     /* loop count */
377
 
378
#if defined (L_udivmodhi4)
379
        .global __udivmodhi4
380
        .func   __udivmodhi4
381
__udivmodhi4:
382
        sub     r_remL,r_remL
383
        sub     r_remH,r_remH   ; clear remainder and carry
384
        ldi     r_cnt,17        ; init loop counter
385
        rjmp    __udivmodhi4_ep ; jump to entry point
386
__udivmodhi4_loop:
387
        rol     r_remL          ; shift dividend into remainder
388
        rol     r_remH
389
        cp      r_remL,r_arg2L  ; compare remainder & divisor
390
        cpc     r_remH,r_arg2H
391
        brcs    __udivmodhi4_ep ; remainder < divisor
392
        sub     r_remL,r_arg2L  ; restore remainder
393
        sbc     r_remH,r_arg2H
394
__udivmodhi4_ep:
395
        rol     r_arg1L         ; shift dividend (with CARRY)
396
        rol     r_arg1H
397
        dec     r_cnt           ; decrement loop counter
398
        brne    __udivmodhi4_loop
399
        com     r_arg1L
400
        com     r_arg1H
401
; div/mod results to return registers, as for the div() function
402
        mov_l   r_arg2L, r_arg1L        ; quotient
403
        mov_h   r_arg2H, r_arg1H
404
        mov_l   r_arg1L, r_remL         ; remainder
405
        mov_h   r_arg1H, r_remH
406
        ret
407
        .endfunc
408
#endif /* defined (L_udivmodhi4) */
409
 
410
#if defined (L_divmodhi4)
411
        .global __divmodhi4
412
        .func   __divmodhi4
413
__divmodhi4:
414
        .global _div
415
_div:
416
        bst     r_arg1H,7       ; store sign of dividend
417
        mov     __tmp_reg__,r_arg1H
418
        eor     __tmp_reg__,r_arg2H   ; r0.7 is sign of result
419
        rcall   __divmodhi4_neg1 ; dividend negative : negate
420
        sbrc    r_arg2H,7
421
        rcall   __divmodhi4_neg2 ; divisor negative : negate
422
        rcall   __udivmodhi4    ; do the unsigned div/mod
423
        rcall   __divmodhi4_neg1 ; correct remainder sign
424
        tst     __tmp_reg__
425
        brpl    __divmodhi4_exit
426
__divmodhi4_neg2:
427
        com     r_arg2H
428
        neg     r_arg2L         ; correct divisor/result sign
429
        sbci    r_arg2H,0xff
430
__divmodhi4_exit:
431
        ret
432
__divmodhi4_neg1:
433
        brtc    __divmodhi4_exit
434
        com     r_arg1H
435
        neg     r_arg1L         ; correct dividend/remainder sign
436
        sbci    r_arg1H,0xff
437
        ret
438
        .endfunc
439
#endif /* defined (L_divmodhi4) */
440
 
441
#undef r_remH
442
#undef r_remL
443
 
444
#undef r_arg1H
445
#undef r_arg1L
446
 
447
#undef r_arg2H
448
#undef r_arg2L
449
 
450
#undef r_cnt
451
 
452
/*******************************************************
453
       Division 32 / 32 => (result + remainder)
454
*******************************************************/
455
#define r_remHH r31     /* remainder High */
456
#define r_remHL r30
457
#define r_remH  r27
458
#define r_remL  r26     /* remainder Low */
459
 
460
/* return: remainder */
461
#define r_arg1HH r25    /* dividend High */
462
#define r_arg1HL r24
463
#define r_arg1H  r23
464
#define r_arg1L  r22    /* dividend Low */
465
 
466
/* return: quotient */
467
#define r_arg2HH r21    /* divisor High */
468
#define r_arg2HL r20
469
#define r_arg2H  r19
470
#define r_arg2L  r18    /* divisor Low */
471
 
472
#define r_cnt __zero_reg__  /* loop count (0 after the loop!) */
473
 
474
#if defined (L_udivmodsi4)
475
        .global __udivmodsi4
476
        .func   __udivmodsi4
477
__udivmodsi4:
478
        ldi     r_remL, 33      ; init loop counter
479
        mov     r_cnt, r_remL
480
        sub     r_remL,r_remL
481
        sub     r_remH,r_remH   ; clear remainder and carry
482
        mov_l   r_remHL, r_remL
483
        mov_h   r_remHH, r_remH
484
        rjmp    __udivmodsi4_ep ; jump to entry point
485
__udivmodsi4_loop:
486
        rol     r_remL          ; shift dividend into remainder
487
        rol     r_remH
488
        rol     r_remHL
489
        rol     r_remHH
490
        cp      r_remL,r_arg2L  ; compare remainder & divisor
491
        cpc     r_remH,r_arg2H
492
        cpc     r_remHL,r_arg2HL
493
        cpc     r_remHH,r_arg2HH
494
        brcs    __udivmodsi4_ep ; remainder <= divisor
495
        sub     r_remL,r_arg2L  ; restore remainder
496
        sbc     r_remH,r_arg2H
497
        sbc     r_remHL,r_arg2HL
498
        sbc     r_remHH,r_arg2HH
499
__udivmodsi4_ep:
500
        rol     r_arg1L         ; shift dividend (with CARRY)
501
        rol     r_arg1H
502
        rol     r_arg1HL
503
        rol     r_arg1HH
504
        dec     r_cnt           ; decrement loop counter
505
        brne    __udivmodsi4_loop
506
                                ; __zero_reg__ now restored (r_cnt == 0)
507
        com     r_arg1L
508
        com     r_arg1H
509
        com     r_arg1HL
510
        com     r_arg1HH
511
; div/mod results to return registers, as for the ldiv() function
512
        mov_l   r_arg2L,  r_arg1L       ; quotient
513
        mov_h   r_arg2H,  r_arg1H
514
        mov_l   r_arg2HL, r_arg1HL
515
        mov_h   r_arg2HH, r_arg1HH
516
        mov_l   r_arg1L,  r_remL        ; remainder
517
        mov_h   r_arg1H,  r_remH
518
        mov_l   r_arg1HL, r_remHL
519
        mov_h   r_arg1HH, r_remHH
520
        ret
521
        .endfunc
522
#endif /* defined (L_udivmodsi4) */
523
 
524
#if defined (L_divmodsi4)
525
        .global __divmodsi4
526
        .func   __divmodsi4
527
__divmodsi4:
528
        bst     r_arg1HH,7      ; store sign of dividend
529
        mov     __tmp_reg__,r_arg1HH
530
        eor     __tmp_reg__,r_arg2HH   ; r0.7 is sign of result
531
        rcall   __divmodsi4_neg1 ; dividend negative : negate
532
        sbrc    r_arg2HH,7
533
        rcall   __divmodsi4_neg2 ; divisor negative : negate
534
        rcall   __udivmodsi4    ; do the unsigned div/mod
535
        rcall   __divmodsi4_neg1 ; correct remainder sign
536
        rol     __tmp_reg__
537
        brcc    __divmodsi4_exit
538
__divmodsi4_neg2:
539
        com     r_arg2HH
540
        com     r_arg2HL
541
        com     r_arg2H
542
        neg     r_arg2L         ; correct divisor/quotient sign
543
        sbci    r_arg2H,0xff
544
        sbci    r_arg2HL,0xff
545
        sbci    r_arg2HH,0xff
546
__divmodsi4_exit:
547
        ret
548
__divmodsi4_neg1:
549
        brtc    __divmodsi4_exit
550
        com     r_arg1HH
551
        com     r_arg1HL
552
        com     r_arg1H
553
        neg     r_arg1L         ; correct dividend/remainder sign
554
        sbci    r_arg1H, 0xff
555
        sbci    r_arg1HL,0xff
556
        sbci    r_arg1HH,0xff
557
        ret
558
        .endfunc
559
#endif /* defined (L_divmodsi4) */
560
 
561
/**********************************
562
 * This is a prologue subroutine
563
 **********************************/
564
#if defined (L_prologue)
565
 
566
        .global __prologue_saves__
567
        .func   __prologue_saves__
568
__prologue_saves__:
569
        push r2
570
        push r3
571
        push r4
572
        push r5
573
        push r6
574
        push r7
575
        push r8
576
        push r9
577
        push r10
578
        push r11
579
        push r12
580
        push r13
581
        push r14
582
        push r15
583
        push r16
584
        push r17
585
        push r28
586
        push r29
587
        in      r28,__SP_L__
588
        in      r29,__SP_H__
589
        sub     r28,r26
590
        sbc     r29,r27
591
        in      __tmp_reg__,__SREG__
592
        cli
593
        out     __SP_H__,r29
594
        out     __SREG__,__tmp_reg__
595
        out     __SP_L__,r28
596
        ijmp
597
.endfunc
598
#endif /* defined (L_prologue) */
599
 
600
/*
601
 * This is an epilogue subroutine
602
 */
603
#if defined (L_epilogue)
604
 
605
        .global __epilogue_restores__
606
        .func   __epilogue_restores__
607
__epilogue_restores__:
608
        ldd     r2,Y+18
609
        ldd     r3,Y+17
610
        ldd     r4,Y+16
611
        ldd     r5,Y+15
612
        ldd     r6,Y+14
613
        ldd     r7,Y+13
614
        ldd     r8,Y+12
615
        ldd     r9,Y+11
616
        ldd     r10,Y+10
617
        ldd     r11,Y+9
618
        ldd     r12,Y+8
619
        ldd     r13,Y+7
620
        ldd     r14,Y+6
621
        ldd     r15,Y+5
622
        ldd     r16,Y+4
623
        ldd     r17,Y+3
624
        ldd     r26,Y+2
625
        ldd     r27,Y+1
626
        add     r28,r30
627
        adc     r29,__zero_reg__
628
        in      __tmp_reg__,__SREG__
629
        cli
630
        out     __SP_H__,r29
631
        out     __SREG__,__tmp_reg__
632
        out     __SP_L__,r28
633
        mov_l   r28, r26
634
        mov_h   r29, r27
635
        ret
636
.endfunc
637
#endif /* defined (L_epilogue) */
638
 
639
#ifdef L_exit
640
        .section .fini9,"ax",@progbits
641
        .global _exit
642
        .func   _exit
643
_exit:
644
        .weak   exit
645
exit:
646
 
647
        /* Code from .fini8 ... .fini1 sections inserted by ld script.  */
648
 
649
        .section .fini0,"ax",@progbits
650
__stop_program:
651
        rjmp    __stop_program
652
        .endfunc
653
#endif /* defined (L_exit) */
654
 
655
#ifdef L_cleanup
656
        .weak   _cleanup
657
        .func   _cleanup
658
_cleanup:
659
        ret
660
.endfunc
661
#endif /* defined (L_cleanup) */
662
 
663
#ifdef L_tablejump
664
        .global __tablejump2__
665
        .func   __tablejump2__
666
__tablejump2__:
667
        lsl     r30
668
        rol     r31
669
        .global __tablejump__
670
__tablejump__:
671
#if defined (__AVR_ENHANCED__)
672
        lpm     __tmp_reg__, Z+
673
        lpm     r31, Z
674
        mov     r30, __tmp_reg__
675
        ijmp
676
#else
677
        lpm
678
        adiw    r30, 1
679
        push    r0
680
        lpm
681
        push    r0
682
        ret
683
#endif
684
        .endfunc
685
#endif /* defined (L_tablejump) */
686
 
687
/* __do_copy_data is only necessary if there is anything in .data section.
688
   Does not use RAMPZ - crt*.o provides a replacement for >64K devices.  */
689
 
690
#ifdef L_copy_data
691
        .section .init4,"ax",@progbits
692
        .global __do_copy_data
693
__do_copy_data:
694
        ldi     r17, hi8(__data_end)
695
        ldi     r26, lo8(__data_start)
696
        ldi     r27, hi8(__data_start)
697
        ldi     r30, lo8(__data_load_start)
698
        ldi     r31, hi8(__data_load_start)
699
        rjmp    .do_copy_data_start
700
.do_copy_data_loop:
701
#if defined (__AVR_HAVE_LPMX__)
702
        lpm     r0, Z+
703
#else
704
        lpm
705
        adiw    r30, 1
706
#endif
707
        st      X+, r0
708
.do_copy_data_start:
709
        cpi     r26, lo8(__data_end)
710
        cpc     r27, r17
711
        brne    .do_copy_data_loop
712
#endif /* L_copy_data */
713
 
714
/* __do_clear_bss is only necessary if there is anything in .bss section.  */
715
 
716
#ifdef L_clear_bss
717
        .section .init4,"ax",@progbits
718
        .global __do_clear_bss
719
__do_clear_bss:
720
        ldi     r17, hi8(__bss_end)
721
        ldi     r26, lo8(__bss_start)
722
        ldi     r27, hi8(__bss_start)
723
        rjmp    .do_clear_bss_start
724
.do_clear_bss_loop:
725
        st      X+, __zero_reg__
726
.do_clear_bss_start:
727
        cpi     r26, lo8(__bss_end)
728
        cpc     r27, r17
729
        brne    .do_clear_bss_loop
730
#endif /* L_clear_bss */
731
 
732
/* __do_global_ctors and __do_global_dtors are only necessary
733
   if there are any constructors/destructors.  */
734
 
735
#if defined (__AVR_MEGA__)
736
#define XCALL call
737
#else
738
#define XCALL rcall
739
#endif
740
 
741
#ifdef L_ctors
742
        .section .init6,"ax",@progbits
743
        .global __do_global_ctors
744
__do_global_ctors:
745
        ldi     r17, hi8(__ctors_start)
746
        ldi     r28, lo8(__ctors_end)
747
        ldi     r29, hi8(__ctors_end)
748
        rjmp    .do_global_ctors_start
749
.do_global_ctors_loop:
750
        sbiw    r28, 2
751
        mov_h   r31, r29
752
        mov_l   r30, r28
753
        XCALL   __tablejump__
754
.do_global_ctors_start:
755
        cpi     r28, lo8(__ctors_start)
756
        cpc     r29, r17
757
        brne    .do_global_ctors_loop
758
#endif /* L_ctors */
759
 
760
#ifdef L_dtors
761
        .section .fini6,"ax",@progbits
762
        .global __do_global_dtors
763
__do_global_dtors:
764
        ldi     r17, hi8(__dtors_end)
765
        ldi     r28, lo8(__dtors_start)
766
        ldi     r29, hi8(__dtors_start)
767
        rjmp    .do_global_dtors_start
768
.do_global_dtors_loop:
769
        mov_h   r31, r29
770
        mov_l   r30, r28
771
        XCALL   __tablejump__
772
        adiw    r28, 2
773
.do_global_dtors_start:
774
        cpi     r28, lo8(__dtors_end)
775
        cpc     r29, r17
776
        brne    .do_global_dtors_loop
777
#endif /* L_dtors */
778
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.