OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [config/] [m68k/] [lb1sf68.asm] - Blame information for rev 433

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* libgcc routines for 68000 w/o floating-point hardware.
2
   Copyright (C) 1994, 1996, 1997, 1998, 2008, 2009 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it
7
under the terms of the GNU General Public License as published by the
8
Free Software Foundation; either version 3, or (at your option) any
9
later version.
10
 
11
This file is distributed in the hope that it will be useful, but
12
WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
General Public License for more details.
15
 
16
Under Section 7 of GPL version 3, you are granted additional
17
permissions described in the GCC Runtime Library Exception, version
18
3.1, as published by the Free Software Foundation.
19
 
20
You should have received a copy of the GNU General Public License and
21
a copy of the GCC Runtime Library Exception along with this program;
22
see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
.  */
24
 
25
/* Use this one for any 680x0; assumes no floating point hardware.
26
   The trailing " '" appearing on some lines is for ANSI preprocessors.  Yuk.
27
   Some of this code comes from MINIX, via the folks at ericsson.
28
   D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
29
*/
30
 
31
/* These are predefined by new versions of GNU cpp.  */
32
 
33
#ifndef __USER_LABEL_PREFIX__
34
#define __USER_LABEL_PREFIX__ _
35
#endif
36
 
37
#ifndef __REGISTER_PREFIX__
38
#define __REGISTER_PREFIX__
39
#endif
40
 
41
#ifndef __IMMEDIATE_PREFIX__
42
#define __IMMEDIATE_PREFIX__ #
43
#endif
44
 
45
/* ANSI concatenation macros.  */
46
 
47
#define CONCAT1(a, b) CONCAT2(a, b)
48
#define CONCAT2(a, b) a ## b
49
 
50
/* Use the right prefix for global labels.  */
51
 
52
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
53
 
54
/* Note that X is a function.  */
55
 
56
#ifdef __ELF__
57
#define FUNC(x) .type SYM(x),function
58
#else
59
/* The .proc pseudo-op is accepted, but ignored, by GAS.  We could just
60
   define this to the empty string for non-ELF systems, but defining it
61
   to .proc means that the information is available to the assembler if
62
   the need arises.  */
63
#define FUNC(x) .proc
64
#endif
65
 
66
/* Use the right prefix for registers.  */
67
 
68
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
69
 
70
/* Use the right prefix for immediate values.  */
71
 
72
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
73
 
74
#define d0 REG (d0)
75
#define d1 REG (d1)
76
#define d2 REG (d2)
77
#define d3 REG (d3)
78
#define d4 REG (d4)
79
#define d5 REG (d5)
80
#define d6 REG (d6)
81
#define d7 REG (d7)
82
#define a0 REG (a0)
83
#define a1 REG (a1)
84
#define a2 REG (a2)
85
#define a3 REG (a3)
86
#define a4 REG (a4)
87
#define a5 REG (a5)
88
#define a6 REG (a6)
89
#define fp REG (fp)
90
#define sp REG (sp)
91
#define pc REG (pc)
92
 
93
/* Provide a few macros to allow for PIC code support.
94
 * With PIC, data is stored A5 relative so we've got to take a bit of special
95
 * care to ensure that all loads of global data is via A5.  PIC also requires
96
 * jumps and subroutine calls to be PC relative rather than absolute.  We cheat
97
 * a little on this and in the PIC case, we use short offset branches and
98
 * hope that the final object code is within range (which it should be).
99
 */
100
#ifndef __PIC__
101
 
102
        /* Non PIC (absolute/relocatable) versions */
103
 
104
        .macro PICCALL addr
105
        jbsr    \addr
106
        .endm
107
 
108
        .macro PICJUMP addr
109
        jmp     \addr
110
        .endm
111
 
112
        .macro PICLEA sym, reg
113
        lea     \sym, \reg
114
        .endm
115
 
116
        .macro PICPEA sym, areg
117
        pea     \sym
118
        .endm
119
 
120
#else /* __PIC__ */
121
 
122
# if defined (__uClinux__)
123
 
124
        /* Versions for uClinux */
125
 
126
#  if defined(__ID_SHARED_LIBRARY__)
127
 
128
        /* -mid-shared-library versions  */
129
 
130
        .macro PICLEA sym, reg
131
        movel   a5@(_current_shared_library_a5_offset_), \reg
132
        movel   \sym@GOT(\reg), \reg
133
        .endm
134
 
135
        .macro PICPEA sym, areg
136
        movel   a5@(_current_shared_library_a5_offset_), \areg
137
        movel   \sym@GOT(\areg), sp@-
138
        .endm
139
 
140
        .macro PICCALL addr
141
        PICLEA  \addr,a0
142
        jsr     a0@
143
        .endm
144
 
145
        .macro PICJUMP addr
146
        PICLEA  \addr,a0
147
        jmp     a0@
148
        .endm
149
 
150
#  else /* !__ID_SHARED_LIBRARY__ */
151
 
152
        /* Versions for -msep-data */
153
 
154
        .macro PICLEA sym, reg
155
        movel   \sym@GOT(a5), \reg
156
        .endm
157
 
158
        .macro PICPEA sym, areg
159
        movel   \sym@GOT(a5), sp@-
160
        .endm
161
 
162
        .macro PICCALL addr
163
#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
164
        lea     \addr-.-8,a0
165
        jsr     pc@(a0)
166
#else
167
        jbsr    \addr
168
#endif
169
        .endm
170
 
171
        .macro PICJUMP addr
172
        /* ISA C has no bra.l instruction, and since this assembly file
173
           gets assembled into multiple object files, we avoid the
174
           bra instruction entirely.  */
175
#if defined (__mcoldfire__) && !defined (__mcfisab__)
176
        lea     \addr-.-8,a0
177
        jmp     pc@(a0)
178
#else
179
        bra     \addr
180
#endif
181
        .endm
182
 
183
#  endif
184
 
185
# else /* !__uClinux__ */
186
 
187
        /* Versions for Linux */
188
 
189
        .macro PICLEA sym, reg
190
        movel   #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
191
        lea     (-6, pc, \reg), \reg
192
        movel   \sym@GOT(\reg), \reg
193
        .endm
194
 
195
        .macro PICPEA sym, areg
196
        movel   #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
197
        lea     (-6, pc, \areg), \areg
198
        movel   \sym@GOT(\areg), sp@-
199
        .endm
200
 
201
        .macro PICCALL addr
202
#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
203
        lea     \addr-.-8,a0
204
        jsr     pc@(a0)
205
#else
206
        jbsr    \addr
207
#endif
208
        .endm
209
 
210
        .macro PICJUMP addr
211
        /* ISA C has no bra.l instruction, and since this assembly file
212
           gets assembled into multiple object files, we avoid the
213
           bra instruction entirely.  */
214
#if defined (__mcoldfire__) && !defined (__mcfisab__)
215
        lea     \addr-.-8,a0
216
        jmp     pc@(a0)
217
#else
218
        bra     \addr
219
#endif
220
        .endm
221
 
222
# endif
223
#endif /* __PIC__ */
224
 
225
 
226
#ifdef L_floatex
227
 
228
| This is an attempt at a decent floating point (single, double and
229
| extended double) code for the GNU C compiler. It should be easy to
230
| adapt to other compilers (but beware of the local labels!).
231
 
232
| Starting date: 21 October, 1990
233
 
234
| It is convenient to introduce the notation (s,e,f) for a floating point
235
| number, where s=sign, e=exponent, f=fraction. We will call a floating
236
| point number fpn to abbreviate, independently of the precision.
237
| Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
238
| for doubles and 16383 for long doubles). We then have the following
239
| different cases:
240
|  1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
241
|     (-1)^s x 1.f x 2^(e-bias-1).
242
|  2. Denormalized fpns have e=0. They correspond to numbers of the form
243
|     (-1)^s x 0.f x 2^(-bias).
244
|  3. +/-INFINITY have e=MAX_EXP, f=0.
245
|  4. Quiet NaN (Not a Number) have all bits set.
246
|  5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
247
 
248
|=============================================================================
249
|                                  exceptions
250
|=============================================================================
251
 
252
| This is the floating point condition code register (_fpCCR):
253
|
254
| struct {
255
|   short _exception_bits;
256
|   short _trap_enable_bits;
257
|   short _sticky_bits;
258
|   short _rounding_mode;
259
|   short _format;
260
|   short _last_operation;
261
|   union {
262
|     float sf;
263
|     double df;
264
|   } _operand1;
265
|   union {
266
|     float sf;
267
|     double df;
268
|   } _operand2;
269
| } _fpCCR;
270
 
271
        .data
272
        .even
273
 
274
        .globl  SYM (_fpCCR)
275
 
276
SYM (_fpCCR):
277
__exception_bits:
278
        .word   0
279
__trap_enable_bits:
280
        .word   0
281
__sticky_bits:
282
        .word   0
283
__rounding_mode:
284
        .word   ROUND_TO_NEAREST
285
__format:
286
        .word   NIL
287
__last_operation:
288
        .word   NOOP
289
__operand1:
290
        .long   0
291
        .long   0
292
__operand2:
293
        .long   0
294
        .long   0
295
 
296
| Offsets:
297
EBITS  = __exception_bits - SYM (_fpCCR)
298
TRAPE  = __trap_enable_bits - SYM (_fpCCR)
299
STICK  = __sticky_bits - SYM (_fpCCR)
300
ROUND  = __rounding_mode - SYM (_fpCCR)
301
FORMT  = __format - SYM (_fpCCR)
302
LASTO  = __last_operation - SYM (_fpCCR)
303
OPER1  = __operand1 - SYM (_fpCCR)
304
OPER2  = __operand2 - SYM (_fpCCR)
305
 
306
| The following exception types are supported:
307
INEXACT_RESULT          = 0x0001
308
UNDERFLOW               = 0x0002
309
OVERFLOW                = 0x0004
310
DIVIDE_BY_ZERO          = 0x0008
311
INVALID_OPERATION       = 0x0010
312
 
313
| The allowed rounding modes are:
314
UNKNOWN           = -1
315
ROUND_TO_NEAREST  = 0 | round result to nearest representable value
316
ROUND_TO_ZERO     = 1 | round result towards zero
317
ROUND_TO_PLUS     = 2 | round result towards plus infinity
318
ROUND_TO_MINUS    = 3 | round result towards minus infinity
319
 
320
| The allowed values of format are:
321
NIL          = 0
322
SINGLE_FLOAT = 1
323
DOUBLE_FLOAT = 2
324
LONG_FLOAT   = 3
325
 
326
| The allowed values for the last operation are:
327
NOOP         = 0
328
ADD          = 1
329
MULTIPLY     = 2
330
DIVIDE       = 3
331
NEGATE       = 4
332
COMPARE      = 5
333
EXTENDSFDF   = 6
334
TRUNCDFSF    = 7
335
 
336
|=============================================================================
337
|                           __clear_sticky_bits
338
|=============================================================================
339
 
340
| The sticky bits are normally not cleared (thus the name), whereas the
341
| exception type and exception value reflect the last computation.
342
| This routine is provided to clear them (you can also write to _fpCCR,
343
| since it is globally visible).
344
 
345
        .globl  SYM (__clear_sticky_bit)
346
 
347
        .text
348
        .even
349
 
350
| void __clear_sticky_bits(void);
351
SYM (__clear_sticky_bit):
352
        PICLEA  SYM (_fpCCR),a0
353
#ifndef __mcoldfire__
354
        movew   IMM (0),a0@(STICK)
355
#else
356
        clr.w   a0@(STICK)
357
#endif
358
        rts
359
 
360
|=============================================================================
361
|                           $_exception_handler
362
|=============================================================================
363
 
364
        .globl  $_exception_handler
365
 
366
        .text
367
        .even
368
 
369
| This is the common exit point if an exception occurs.
370
| NOTE: it is NOT callable from C!
371
| It expects the exception type in d7, the format (SINGLE_FLOAT,
372
| DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
373
| It sets the corresponding exception and sticky bits, and the format.
374
| Depending on the format if fills the corresponding slots for the
375
| operands which produced the exception (all this information is provided
376
| so if you write your own exception handlers you have enough information
377
| to deal with the problem).
378
| Then checks to see if the corresponding exception is trap-enabled,
379
| in which case it pushes the address of _fpCCR and traps through
380
| trap FPTRAP (15 for the moment).
381
 
382
FPTRAP = 15
383
 
384
$_exception_handler:
385
        PICLEA  SYM (_fpCCR),a0
386
        movew   d7,a0@(EBITS)   | set __exception_bits
387
#ifndef __mcoldfire__
388
        orw     d7,a0@(STICK)   | and __sticky_bits
389
#else
390
        movew   a0@(STICK),d4
391
        orl     d7,d4
392
        movew   d4,a0@(STICK)
393
#endif
394
        movew   d6,a0@(FORMT)   | and __format
395
        movew   d5,a0@(LASTO)   | and __last_operation
396
 
397
| Now put the operands in place:
398
#ifndef __mcoldfire__
399
        cmpw    IMM (SINGLE_FLOAT),d6
400
#else
401
        cmpl    IMM (SINGLE_FLOAT),d6
402
#endif
403
        beq     1f
404
        movel   a6@(8),a0@(OPER1)
405
        movel   a6@(12),a0@(OPER1+4)
406
        movel   a6@(16),a0@(OPER2)
407
        movel   a6@(20),a0@(OPER2+4)
408
        bra     2f
409
1:      movel   a6@(8),a0@(OPER1)
410
        movel   a6@(12),a0@(OPER2)
411
2:
412
| And check whether the exception is trap-enabled:
413
#ifndef __mcoldfire__
414
        andw    a0@(TRAPE),d7   | is exception trap-enabled?
415
#else
416
        clrl    d6
417
        movew   a0@(TRAPE),d6
418
        andl    d6,d7
419
#endif
420
        beq     1f              | no, exit
421
        PICPEA  SYM (_fpCCR),a1 | yes, push address of _fpCCR
422
        trap    IMM (FPTRAP)    | and trap
423
#ifndef __mcoldfire__
424
1:      moveml  sp@+,d2-d7      | restore data registers
425
#else
426
1:      moveml  sp@,d2-d7
427
        | XXX if frame pointer is ever removed, stack pointer must
428
        | be adjusted here.
429
#endif
430
        unlk    a6              | and return
431
        rts
432
#endif /* L_floatex */
433
 
434
#ifdef  L_mulsi3
435
        .text
436
        FUNC(__mulsi3)
437
        .globl  SYM (__mulsi3)
438
SYM (__mulsi3):
439
        movew   sp@(4), d0      /* x0 -> d0 */
440
        muluw   sp@(10), d0     /* x0*y1 */
441
        movew   sp@(6), d1      /* x1 -> d1 */
442
        muluw   sp@(8), d1      /* x1*y0 */
443
#ifndef __mcoldfire__
444
        addw    d1, d0
445
#else
446
        addl    d1, d0
447
#endif
448
        swap    d0
449
        clrw    d0
450
        movew   sp@(6), d1      /* x1 -> d1 */
451
        muluw   sp@(10), d1     /* x1*y1 */
452
        addl    d1, d0
453
 
454
        rts
455
#endif /* L_mulsi3 */
456
 
457
#ifdef  L_udivsi3
458
        .text
459
        FUNC(__udivsi3)
460
        .globl  SYM (__udivsi3)
461
SYM (__udivsi3):
462
#ifndef __mcoldfire__
463
        movel   d2, sp@-
464
        movel   sp@(12), d1     /* d1 = divisor */
465
        movel   sp@(8), d0      /* d0 = dividend */
466
 
467
        cmpl    IMM (0x10000), d1 /* divisor >= 2 ^ 16 ?   */
468
        jcc     L3              /* then try next algorithm */
469
        movel   d0, d2
470
        clrw    d2
471
        swap    d2
472
        divu    d1, d2          /* high quotient in lower word */
473
        movew   d2, d0          /* save high quotient */
474
        swap    d0
475
        movew   sp@(10), d2     /* get low dividend + high rest */
476
        divu    d1, d2          /* low quotient */
477
        movew   d2, d0
478
        jra     L6
479
 
480
L3:     movel   d1, d2          /* use d2 as divisor backup */
481
L4:     lsrl    IMM (1), d1     /* shift divisor */
482
        lsrl    IMM (1), d0     /* shift dividend */
483
        cmpl    IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ?  */
484
        jcc     L4
485
        divu    d1, d0          /* now we have 16-bit divisor */
486
        andl    IMM (0xffff), d0 /* mask out divisor, ignore remainder */
487
 
488
/* Multiply the 16-bit tentative quotient with the 32-bit divisor.  Because of
489
   the operand ranges, this might give a 33-bit product.  If this product is
490
   greater than the dividend, the tentative quotient was too large. */
491
        movel   d2, d1
492
        mulu    d0, d1          /* low part, 32 bits */
493
        swap    d2
494
        mulu    d0, d2          /* high part, at most 17 bits */
495
        swap    d2              /* align high part with low part */
496
        tstw    d2              /* high part 17 bits? */
497
        jne     L5              /* if 17 bits, quotient was too large */
498
        addl    d2, d1          /* add parts */
499
        jcs     L5              /* if sum is 33 bits, quotient was too large */
500
        cmpl    sp@(8), d1      /* compare the sum with the dividend */
501
        jls     L6              /* if sum > dividend, quotient was too large */
502
L5:     subql   IMM (1), d0     /* adjust quotient */
503
 
504
L6:     movel   sp@+, d2
505
        rts
506
 
507
#else /* __mcoldfire__ */
508
 
509
/* ColdFire implementation of non-restoring division algorithm from
510
   Hennessy & Patterson, Appendix A. */
511
        link    a6,IMM (-12)
512
        moveml  d2-d4,sp@
513
        movel   a6@(8),d0
514
        movel   a6@(12),d1
515
        clrl    d2              | clear p
516
        moveq   IMM (31),d4
517
L1:     addl    d0,d0           | shift reg pair (p,a) one bit left
518
        addxl   d2,d2
519
        movl    d2,d3           | subtract b from p, store in tmp.
520
        subl    d1,d3
521
        jcs     L2              | if no carry,
522
        bset    IMM (0),d0      | set the low order bit of a to 1,
523
        movl    d3,d2           | and store tmp in p.
524
L2:     subql   IMM (1),d4
525
        jcc     L1
526
        moveml  sp@,d2-d4       | restore data registers
527
        unlk    a6              | and return
528
        rts
529
#endif /* __mcoldfire__ */
530
 
531
#endif /* L_udivsi3 */
532
 
533
#ifdef  L_divsi3
534
        .text
535
        FUNC(__divsi3)
536
        .globl  SYM (__divsi3)
537
SYM (__divsi3):
538
        movel   d2, sp@-
539
 
540
        moveq   IMM (1), d2     /* sign of result stored in d2 (=1 or =-1) */
541
        movel   sp@(12), d1     /* d1 = divisor */
542
        jpl     L1
543
        negl    d1
544
#ifndef __mcoldfire__
545
        negb    d2              /* change sign because divisor <0  */
546
#else
547
        negl    d2              /* change sign because divisor <0  */
548
#endif
549
L1:     movel   sp@(8), d0      /* d0 = dividend */
550
        jpl     L2
551
        negl    d0
552
#ifndef __mcoldfire__
553
        negb    d2
554
#else
555
        negl    d2
556
#endif
557
 
558
L2:     movel   d1, sp@-
559
        movel   d0, sp@-
560
        PICCALL SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
561
        addql   IMM (8), sp
562
 
563
        tstb    d2
564
        jpl     L3
565
        negl    d0
566
 
567
L3:     movel   sp@+, d2
568
        rts
569
#endif /* L_divsi3 */
570
 
571
#ifdef  L_umodsi3
572
        .text
573
        FUNC(__umodsi3)
574
        .globl  SYM (__umodsi3)
575
SYM (__umodsi3):
576
        movel   sp@(8), d1      /* d1 = divisor */
577
        movel   sp@(4), d0      /* d0 = dividend */
578
        movel   d1, sp@-
579
        movel   d0, sp@-
580
        PICCALL SYM (__udivsi3)
581
        addql   IMM (8), sp
582
        movel   sp@(8), d1      /* d1 = divisor */
583
#ifndef __mcoldfire__
584
        movel   d1, sp@-
585
        movel   d0, sp@-
586
        PICCALL SYM (__mulsi3)  /* d0 = (a/b)*b */
587
        addql   IMM (8), sp
588
#else
589
        mulsl   d1,d0
590
#endif
591
        movel   sp@(4), d1      /* d1 = dividend */
592
        subl    d0, d1          /* d1 = a - (a/b)*b */
593
        movel   d1, d0
594
        rts
595
#endif /* L_umodsi3 */
596
 
597
#ifdef  L_modsi3
598
        .text
599
        FUNC(__modsi3)
600
        .globl  SYM (__modsi3)
601
SYM (__modsi3):
602
        movel   sp@(8), d1      /* d1 = divisor */
603
        movel   sp@(4), d0      /* d0 = dividend */
604
        movel   d1, sp@-
605
        movel   d0, sp@-
606
        PICCALL SYM (__divsi3)
607
        addql   IMM (8), sp
608
        movel   sp@(8), d1      /* d1 = divisor */
609
#ifndef __mcoldfire__
610
        movel   d1, sp@-
611
        movel   d0, sp@-
612
        PICCALL SYM (__mulsi3)  /* d0 = (a/b)*b */
613
        addql   IMM (8), sp
614
#else
615
        mulsl   d1,d0
616
#endif
617
        movel   sp@(4), d1      /* d1 = dividend */
618
        subl    d0, d1          /* d1 = a - (a/b)*b */
619
        movel   d1, d0
620
        rts
621
#endif /* L_modsi3 */
622
 
623
 
624
#ifdef  L_double
625
 
626
        .globl  SYM (_fpCCR)
627
        .globl  $_exception_handler
628
 
629
QUIET_NaN      = 0xffffffff
630
 
631
D_MAX_EXP      = 0x07ff
632
D_BIAS         = 1022
633
DBL_MAX_EXP    = D_MAX_EXP - D_BIAS
634
DBL_MIN_EXP    = 1 - D_BIAS
635
DBL_MANT_DIG   = 53
636
 
637
INEXACT_RESULT          = 0x0001
638
UNDERFLOW               = 0x0002
639
OVERFLOW                = 0x0004
640
DIVIDE_BY_ZERO          = 0x0008
641
INVALID_OPERATION       = 0x0010
642
 
643
DOUBLE_FLOAT = 2
644
 
645
NOOP         = 0
646
ADD          = 1
647
MULTIPLY     = 2
648
DIVIDE       = 3
649
NEGATE       = 4
650
COMPARE      = 5
651
EXTENDSFDF   = 6
652
TRUNCDFSF    = 7
653
 
654
UNKNOWN           = -1
655
ROUND_TO_NEAREST  = 0 | round result to nearest representable value
656
ROUND_TO_ZERO     = 1 | round result towards zero
657
ROUND_TO_PLUS     = 2 | round result towards plus infinity
658
ROUND_TO_MINUS    = 3 | round result towards minus infinity
659
 
660
| Entry points:
661
 
662
        .globl SYM (__adddf3)
663
        .globl SYM (__subdf3)
664
        .globl SYM (__muldf3)
665
        .globl SYM (__divdf3)
666
        .globl SYM (__negdf2)
667
        .globl SYM (__cmpdf2)
668
        .globl SYM (__cmpdf2_internal)
669
        .hidden SYM (__cmpdf2_internal)
670
 
671
        .text
672
        .even
673
 
674
| These are common routines to return and signal exceptions.
675
 
676
Ld$den:
677
| Return and signal a denormalized number
678
        orl     d7,d0
679
        movew   IMM (INEXACT_RESULT+UNDERFLOW),d7
680
        moveq   IMM (DOUBLE_FLOAT),d6
681
        PICJUMP $_exception_handler
682
 
683
Ld$infty:
684
Ld$overflow:
685
| Return a properly signed INFINITY and set the exception flags
686
        movel   IMM (0x7ff00000),d0
687
        movel   IMM (0),d1
688
        orl     d7,d0
689
        movew   IMM (INEXACT_RESULT+OVERFLOW),d7
690
        moveq   IMM (DOUBLE_FLOAT),d6
691
        PICJUMP $_exception_handler
692
 
693
Ld$underflow:
694
| Return 0 and set the exception flags
695
        movel   IMM (0),d0
696
        movel   d0,d1
697
        movew   IMM (INEXACT_RESULT+UNDERFLOW),d7
698
        moveq   IMM (DOUBLE_FLOAT),d6
699
        PICJUMP $_exception_handler
700
 
701
Ld$inop:
702
| Return a quiet NaN and set the exception flags
703
        movel   IMM (QUIET_NaN),d0
704
        movel   d0,d1
705
        movew   IMM (INEXACT_RESULT+INVALID_OPERATION),d7
706
        moveq   IMM (DOUBLE_FLOAT),d6
707
        PICJUMP $_exception_handler
708
 
709
Ld$div$0:
710
| Return a properly signed INFINITY and set the exception flags
711
        movel   IMM (0x7ff00000),d0
712
        movel   IMM (0),d1
713
        orl     d7,d0
714
        movew   IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
715
        moveq   IMM (DOUBLE_FLOAT),d6
716
        PICJUMP $_exception_handler
717
 
718
|=============================================================================
719
|=============================================================================
720
|                         double precision routines
721
|=============================================================================
722
|=============================================================================
723
 
724
| A double precision floating point number (double) has the format:
725
|
726
| struct _double {
727
|  unsigned int sign      : 1;  /* sign bit */
728
|  unsigned int exponent  : 11; /* exponent, shifted by 126 */
729
|  unsigned int fraction  : 52; /* fraction */
730
| } double;
731
|
732
| Thus sizeof(double) = 8 (64 bits).
733
|
734
| All the routines are callable from C programs, and return the result
735
| in the register pair d0-d1. They also preserve all registers except
736
| d0-d1 and a0-a1.
737
 
738
|=============================================================================
739
|                              __subdf3
740
|=============================================================================
741
 
742
| double __subdf3(double, double);
743
        FUNC(__subdf3)
744
SYM (__subdf3):
745
        bchg    IMM (31),sp@(12) | change sign of second operand
746
                                | and fall through, so we always add
747
|=============================================================================
748
|                              __adddf3
749
|=============================================================================
750
 
751
| double __adddf3(double, double);
752
        FUNC(__adddf3)
753
SYM (__adddf3):
754
#ifndef __mcoldfire__
755
        link    a6,IMM (0)      | everything will be done in registers
756
        moveml  d2-d7,sp@-      | save all data registers and a2 (but d0-d1)
757
#else
758
        link    a6,IMM (-24)
759
        moveml  d2-d7,sp@
760
#endif
761
        movel   a6@(8),d0       | get first operand
762
        movel   a6@(12),d1      |
763
        movel   a6@(16),d2      | get second operand
764
        movel   a6@(20),d3      |
765
 
766
        movel   d0,d7           | get d0's sign bit in d7 '
767
        addl    d1,d1           | check and clear sign bit of a, and gain one
768
        addxl   d0,d0           | bit of extra precision
769
        beq     Ladddf$b        | if zero return second operand
770
 
771
        movel   d2,d6           | save sign in d6
772
        addl    d3,d3           | get rid of sign bit and gain one bit of
773
        addxl   d2,d2           | extra precision
774
        beq     Ladddf$a        | if zero return first operand
775
 
776
        andl    IMM (0x80000000),d7 | isolate a's sign bit '
777
        swap    d6              | and also b's sign bit '
778
#ifndef __mcoldfire__
779
        andw    IMM (0x8000),d6 |
780
        orw     d6,d7           | and combine them into d7, so that a's sign '
781
                                | bit is in the high word and b's is in the '
782
                                | low word, so d6 is free to be used
783
#else
784
        andl    IMM (0x8000),d6
785
        orl     d6,d7
786
#endif
787
        movel   d7,a0           | now save d7 into a0, so d7 is free to
788
                                | be used also
789
 
790
| Get the exponents and check for denormalized and/or infinity.
791
 
792
        movel   IMM (0x001fffff),d6 | mask for the fraction
793
        movel   IMM (0x00200000),d7 | mask to put hidden bit back
794
 
795
        movel   d0,d4           |
796
        andl    d6,d0           | get fraction in d0
797
        notl    d6              | make d6 into mask for the exponent
798
        andl    d6,d4           | get exponent in d4
799
        beq     Ladddf$a$den    | branch if a is denormalized
800
        cmpl    d6,d4           | check for INFINITY or NaN
801
        beq     Ladddf$nf       |
802
        orl     d7,d0           | and put hidden bit back
803
Ladddf$1:
804
        swap    d4              | shift right exponent so that it starts
805
#ifndef __mcoldfire__
806
        lsrw    IMM (5),d4      | in bit 0 and not bit 20
807
#else
808
        lsrl    IMM (5),d4      | in bit 0 and not bit 20
809
#endif
810
| Now we have a's exponent in d4 and fraction in d0-d1 '
811
        movel   d2,d5           | save b to get exponent
812
        andl    d6,d5           | get exponent in d5
813
        beq     Ladddf$b$den    | branch if b is denormalized
814
        cmpl    d6,d5           | check for INFINITY or NaN
815
        beq     Ladddf$nf
816
        notl    d6              | make d6 into mask for the fraction again
817
        andl    d6,d2           | and get fraction in d2
818
        orl     d7,d2           | and put hidden bit back
819
Ladddf$2:
820
        swap    d5              | shift right exponent so that it starts
821
#ifndef __mcoldfire__
822
        lsrw    IMM (5),d5      | in bit 0 and not bit 20
823
#else
824
        lsrl    IMM (5),d5      | in bit 0 and not bit 20
825
#endif
826
 
827
| Now we have b's exponent in d5 and fraction in d2-d3. '
828
 
829
| The situation now is as follows: the signs are combined in a0, the
830
| numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
831
| and d5 (b). To do the rounding correctly we need to keep all the
832
| bits until the end, so we need to use d0-d1-d2-d3 for the first number
833
| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
834
| exponents in a2-a3.
835
 
836
#ifndef __mcoldfire__
837
        moveml  a2-a3,sp@-      | save the address registers
838
#else
839
        movel   a2,sp@-
840
        movel   a3,sp@-
841
        movel   a4,sp@-
842
#endif
843
 
844
        movel   d4,a2           | save the exponents
845
        movel   d5,a3           |
846
 
847
        movel   IMM (0),d7      | and move the numbers around
848
        movel   d7,d6           |
849
        movel   d3,d5           |
850
        movel   d2,d4           |
851
        movel   d7,d3           |
852
        movel   d7,d2           |
853
 
854
| Here we shift the numbers until the exponents are the same, and put
855
| the largest exponent in a2.
856
#ifndef __mcoldfire__
857
        exg     d4,a2           | get exponents back
858
        exg     d5,a3           |
859
        cmpw    d4,d5           | compare the exponents
860
#else
861
        movel   d4,a4           | get exponents back
862
        movel   a2,d4
863
        movel   a4,a2
864
        movel   d5,a4
865
        movel   a3,d5
866
        movel   a4,a3
867
        cmpl    d4,d5           | compare the exponents
868
#endif
869
        beq     Ladddf$3        | if equal don't shift '
870
        bhi     9f              | branch if second exponent is higher
871
 
872
| Here we have a's exponent larger than b's, so we have to shift b. We do
873
| this by using as counter d2:
874
1:      movew   d4,d2           | move largest exponent to d2
875
#ifndef __mcoldfire__
876
        subw    d5,d2           | and subtract second exponent
877
        exg     d4,a2           | get back the longs we saved
878
        exg     d5,a3           |
879
#else
880
        subl    d5,d2           | and subtract second exponent
881
        movel   d4,a4           | get back the longs we saved
882
        movel   a2,d4
883
        movel   a4,a2
884
        movel   d5,a4
885
        movel   a3,d5
886
        movel   a4,a3
887
#endif
888
| if difference is too large we don't shift (actually, we can just exit) '
889
#ifndef __mcoldfire__
890
        cmpw    IMM (DBL_MANT_DIG+2),d2
891
#else
892
        cmpl    IMM (DBL_MANT_DIG+2),d2
893
#endif
894
        bge     Ladddf$b$small
895
#ifndef __mcoldfire__
896
        cmpw    IMM (32),d2     | if difference >= 32, shift by longs
897
#else
898
        cmpl    IMM (32),d2     | if difference >= 32, shift by longs
899
#endif
900
        bge     5f
901
2:
902
#ifndef __mcoldfire__
903
        cmpw    IMM (16),d2     | if difference >= 16, shift by words
904
#else
905
        cmpl    IMM (16),d2     | if difference >= 16, shift by words
906
#endif
907
        bge     6f
908
        bra     3f              | enter dbra loop
909
 
910
4:
911
#ifndef __mcoldfire__
912
        lsrl    IMM (1),d4
913
        roxrl   IMM (1),d5
914
        roxrl   IMM (1),d6
915
        roxrl   IMM (1),d7
916
#else
917
        lsrl    IMM (1),d7
918
        btst    IMM (0),d6
919
        beq     10f
920
        bset    IMM (31),d7
921
10:     lsrl    IMM (1),d6
922
        btst    IMM (0),d5
923
        beq     11f
924
        bset    IMM (31),d6
925
11:     lsrl    IMM (1),d5
926
        btst    IMM (0),d4
927
        beq     12f
928
        bset    IMM (31),d5
929
12:     lsrl    IMM (1),d4
930
#endif
931
3:
932
#ifndef __mcoldfire__
933
        dbra    d2,4b
934
#else
935
        subql   IMM (1),d2
936
        bpl     4b
937
#endif
938
        movel   IMM (0),d2
939
        movel   d2,d3
940
        bra     Ladddf$4
941
5:
942
        movel   d6,d7
943
        movel   d5,d6
944
        movel   d4,d5
945
        movel   IMM (0),d4
946
#ifndef __mcoldfire__
947
        subw    IMM (32),d2
948
#else
949
        subl    IMM (32),d2
950
#endif
951
        bra     2b
952
6:
953
        movew   d6,d7
954
        swap    d7
955
        movew   d5,d6
956
        swap    d6
957
        movew   d4,d5
958
        swap    d5
959
        movew   IMM (0),d4
960
        swap    d4
961
#ifndef __mcoldfire__
962
        subw    IMM (16),d2
963
#else
964
        subl    IMM (16),d2
965
#endif
966
        bra     3b
967
 
968
9:
969
#ifndef __mcoldfire__
970
        exg     d4,d5
971
        movew   d4,d6
972
        subw    d5,d6           | keep d5 (largest exponent) in d4
973
        exg     d4,a2
974
        exg     d5,a3
975
#else
976
        movel   d5,d6
977
        movel   d4,d5
978
        movel   d6,d4
979
        subl    d5,d6
980
        movel   d4,a4
981
        movel   a2,d4
982
        movel   a4,a2
983
        movel   d5,a4
984
        movel   a3,d5
985
        movel   a4,a3
986
#endif
987
| if difference is too large we don't shift (actually, we can just exit) '
988
#ifndef __mcoldfire__
989
        cmpw    IMM (DBL_MANT_DIG+2),d6
990
#else
991
        cmpl    IMM (DBL_MANT_DIG+2),d6
992
#endif
993
        bge     Ladddf$a$small
994
#ifndef __mcoldfire__
995
        cmpw    IMM (32),d6     | if difference >= 32, shift by longs
996
#else
997
        cmpl    IMM (32),d6     | if difference >= 32, shift by longs
998
#endif
999
        bge     5f
1000
2:
1001
#ifndef __mcoldfire__
1002
        cmpw    IMM (16),d6     | if difference >= 16, shift by words
1003
#else
1004
        cmpl    IMM (16),d6     | if difference >= 16, shift by words
1005
#endif
1006
        bge     6f
1007
        bra     3f              | enter dbra loop
1008
 
1009
4:
1010
#ifndef __mcoldfire__
1011
        lsrl    IMM (1),d0
1012
        roxrl   IMM (1),d1
1013
        roxrl   IMM (1),d2
1014
        roxrl   IMM (1),d3
1015
#else
1016
        lsrl    IMM (1),d3
1017
        btst    IMM (0),d2
1018
        beq     10f
1019
        bset    IMM (31),d3
1020
10:     lsrl    IMM (1),d2
1021
        btst    IMM (0),d1
1022
        beq     11f
1023
        bset    IMM (31),d2
1024
11:     lsrl    IMM (1),d1
1025
        btst    IMM (0),d0
1026
        beq     12f
1027
        bset    IMM (31),d1
1028
12:     lsrl    IMM (1),d0
1029
#endif
1030
3:
1031
#ifndef __mcoldfire__
1032
        dbra    d6,4b
1033
#else
1034
        subql   IMM (1),d6
1035
        bpl     4b
1036
#endif
1037
        movel   IMM (0),d7
1038
        movel   d7,d6
1039
        bra     Ladddf$4
1040
5:
1041
        movel   d2,d3
1042
        movel   d1,d2
1043
        movel   d0,d1
1044
        movel   IMM (0),d0
1045
#ifndef __mcoldfire__
1046
        subw    IMM (32),d6
1047
#else
1048
        subl    IMM (32),d6
1049
#endif
1050
        bra     2b
1051
6:
1052
        movew   d2,d3
1053
        swap    d3
1054
        movew   d1,d2
1055
        swap    d2
1056
        movew   d0,d1
1057
        swap    d1
1058
        movew   IMM (0),d0
1059
        swap    d0
1060
#ifndef __mcoldfire__
1061
        subw    IMM (16),d6
1062
#else
1063
        subl    IMM (16),d6
1064
#endif
1065
        bra     3b
1066
Ladddf$3:
1067
#ifndef __mcoldfire__
1068
        exg     d4,a2
1069
        exg     d5,a3
1070
#else
1071
        movel   d4,a4
1072
        movel   a2,d4
1073
        movel   a4,a2
1074
        movel   d5,a4
1075
        movel   a3,d5
1076
        movel   a4,a3
1077
#endif
1078
Ladddf$4:
1079
| Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
1080
| the signs in a4.
1081
 
1082
| Here we have to decide whether to add or subtract the numbers:
1083
#ifndef __mcoldfire__
1084
        exg     d7,a0           | get the signs
1085
        exg     d6,a3           | a3 is free to be used
1086
#else
1087
        movel   d7,a4
1088
        movel   a0,d7
1089
        movel   a4,a0
1090
        movel   d6,a4
1091
        movel   a3,d6
1092
        movel   a4,a3
1093
#endif
1094
        movel   d7,d6           |
1095
        movew   IMM (0),d7      | get a's sign in d7 '
1096
        swap    d6              |
1097
        movew   IMM (0),d6      | and b's sign in d6 '
1098
        eorl    d7,d6           | compare the signs
1099
        bmi     Lsubdf$0        | if the signs are different we have
1100
                                | to subtract
1101
#ifndef __mcoldfire__
1102
        exg     d7,a0           | else we add the numbers
1103
        exg     d6,a3           |
1104
#else
1105
        movel   d7,a4
1106
        movel   a0,d7
1107
        movel   a4,a0
1108
        movel   d6,a4
1109
        movel   a3,d6
1110
        movel   a4,a3
1111
#endif
1112
        addl    d7,d3           |
1113
        addxl   d6,d2           |
1114
        addxl   d5,d1           |
1115
        addxl   d4,d0           |
1116
 
1117
        movel   a2,d4           | return exponent to d4
1118
        movel   a0,d7           |
1119
        andl    IMM (0x80000000),d7 | d7 now has the sign
1120
 
1121
#ifndef __mcoldfire__
1122
        moveml  sp@+,a2-a3
1123
#else
1124
        movel   sp@+,a4
1125
        movel   sp@+,a3
1126
        movel   sp@+,a2
1127
#endif
1128
 
1129
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
1130
| the case of denormalized numbers in the rounding routine itself).
1131
| As in the addition (not in the subtraction!) we could have set
1132
| one more bit we check this:
1133
        btst    IMM (DBL_MANT_DIG+1),d0
1134
        beq     1f
1135
#ifndef __mcoldfire__
1136
        lsrl    IMM (1),d0
1137
        roxrl   IMM (1),d1
1138
        roxrl   IMM (1),d2
1139
        roxrl   IMM (1),d3
1140
        addw    IMM (1),d4
1141
#else
1142
        lsrl    IMM (1),d3
1143
        btst    IMM (0),d2
1144
        beq     10f
1145
        bset    IMM (31),d3
1146
10:     lsrl    IMM (1),d2
1147
        btst    IMM (0),d1
1148
        beq     11f
1149
        bset    IMM (31),d2
1150
11:     lsrl    IMM (1),d1
1151
        btst    IMM (0),d0
1152
        beq     12f
1153
        bset    IMM (31),d1
1154
12:     lsrl    IMM (1),d0
1155
        addl    IMM (1),d4
1156
#endif
1157
1:
1158
        lea     pc@(Ladddf$5),a0 | to return from rounding routine
1159
        PICLEA  SYM (_fpCCR),a1 | check the rounding mode
1160
#ifdef __mcoldfire__
1161
        clrl    d6
1162
#endif
1163
        movew   a1@(6),d6       | rounding mode in d6
1164
        beq     Lround$to$nearest
1165
#ifndef __mcoldfire__
1166
        cmpw    IMM (ROUND_TO_PLUS),d6
1167
#else
1168
        cmpl    IMM (ROUND_TO_PLUS),d6
1169
#endif
1170
        bhi     Lround$to$minus
1171
        blt     Lround$to$zero
1172
        bra     Lround$to$plus
1173
Ladddf$5:
1174
| Put back the exponent and check for overflow
1175
#ifndef __mcoldfire__
1176
        cmpw    IMM (0x7ff),d4  | is the exponent big?
1177
#else
1178
        cmpl    IMM (0x7ff),d4  | is the exponent big?
1179
#endif
1180
        bge     1f
1181
        bclr    IMM (DBL_MANT_DIG-1),d0
1182
#ifndef __mcoldfire__
1183
        lslw    IMM (4),d4      | put exponent back into position
1184
#else
1185
        lsll    IMM (4),d4      | put exponent back into position
1186
#endif
1187
        swap    d0              |
1188
#ifndef __mcoldfire__
1189
        orw     d4,d0           |
1190
#else
1191
        orl     d4,d0           |
1192
#endif
1193
        swap    d0              |
1194
        bra     Ladddf$ret
1195
1:
1196
        moveq   IMM (ADD),d5
1197
        bra     Ld$overflow
1198
 
1199
Lsubdf$0:
1200
| Here we do the subtraction.
1201
#ifndef __mcoldfire__
1202
        exg     d7,a0           | put sign back in a0
1203
        exg     d6,a3           |
1204
#else
1205
        movel   d7,a4
1206
        movel   a0,d7
1207
        movel   a4,a0
1208
        movel   d6,a4
1209
        movel   a3,d6
1210
        movel   a4,a3
1211
#endif
1212
        subl    d7,d3           |
1213
        subxl   d6,d2           |
1214
        subxl   d5,d1           |
1215
        subxl   d4,d0           |
1216
        beq     Ladddf$ret$1    | if zero just exit
1217
        bpl     1f              | if positive skip the following
1218
        movel   a0,d7           |
1219
        bchg    IMM (31),d7     | change sign bit in d7
1220
        movel   d7,a0           |
1221
        negl    d3              |
1222
        negxl   d2              |
1223
        negxl   d1              | and negate result
1224
        negxl   d0              |
1225
1:
1226
        movel   a2,d4           | return exponent to d4
1227
        movel   a0,d7
1228
        andl    IMM (0x80000000),d7 | isolate sign bit
1229
#ifndef __mcoldfire__
1230
        moveml  sp@+,a2-a3      |
1231
#else
1232
        movel   sp@+,a4
1233
        movel   sp@+,a3
1234
        movel   sp@+,a2
1235
#endif
1236
 
1237
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
1238
| the case of denormalized numbers in the rounding routine itself).
1239
| As in the addition (not in the subtraction!) we could have set
1240
| one more bit we check this:
1241
        btst    IMM (DBL_MANT_DIG+1),d0
1242
        beq     1f
1243
#ifndef __mcoldfire__
1244
        lsrl    IMM (1),d0
1245
        roxrl   IMM (1),d1
1246
        roxrl   IMM (1),d2
1247
        roxrl   IMM (1),d3
1248
        addw    IMM (1),d4
1249
#else
1250
        lsrl    IMM (1),d3
1251
        btst    IMM (0),d2
1252
        beq     10f
1253
        bset    IMM (31),d3
1254
10:     lsrl    IMM (1),d2
1255
        btst    IMM (0),d1
1256
        beq     11f
1257
        bset    IMM (31),d2
1258
11:     lsrl    IMM (1),d1
1259
        btst    IMM (0),d0
1260
        beq     12f
1261
        bset    IMM (31),d1
1262
12:     lsrl    IMM (1),d0
1263
        addl    IMM (1),d4
1264
#endif
1265
1:
1266
        lea     pc@(Lsubdf$1),a0 | to return from rounding routine
1267
        PICLEA  SYM (_fpCCR),a1 | check the rounding mode
1268
#ifdef __mcoldfire__
1269
        clrl    d6
1270
#endif
1271
        movew   a1@(6),d6       | rounding mode in d6
1272
        beq     Lround$to$nearest
1273
#ifndef __mcoldfire__
1274
        cmpw    IMM (ROUND_TO_PLUS),d6
1275
#else
1276
        cmpl    IMM (ROUND_TO_PLUS),d6
1277
#endif
1278
        bhi     Lround$to$minus
1279
        blt     Lround$to$zero
1280
        bra     Lround$to$plus
1281
Lsubdf$1:
1282
| Put back the exponent and sign (we don't have overflow). '
1283
        bclr    IMM (DBL_MANT_DIG-1),d0
1284
#ifndef __mcoldfire__
1285
        lslw    IMM (4),d4      | put exponent back into position
1286
#else
1287
        lsll    IMM (4),d4      | put exponent back into position
1288
#endif
1289
        swap    d0              |
1290
#ifndef __mcoldfire__
1291
        orw     d4,d0           |
1292
#else
1293
        orl     d4,d0           |
1294
#endif
1295
        swap    d0              |
1296
        bra     Ladddf$ret
1297
 
1298
| If one of the numbers was too small (difference of exponents >=
1299
| DBL_MANT_DIG+1) we return the other (and now we don't have to '
1300
| check for finiteness or zero).
1301
Ladddf$a$small:
1302
#ifndef __mcoldfire__
1303
        moveml  sp@+,a2-a3
1304
#else
1305
        movel   sp@+,a4
1306
        movel   sp@+,a3
1307
        movel   sp@+,a2
1308
#endif
1309
        movel   a6@(16),d0
1310
        movel   a6@(20),d1
1311
        PICLEA  SYM (_fpCCR),a0
1312
        movew   IMM (0),a0@
1313
#ifndef __mcoldfire__
1314
        moveml  sp@+,d2-d7      | restore data registers
1315
#else
1316
        moveml  sp@,d2-d7
1317
        | XXX if frame pointer is ever removed, stack pointer must
1318
        | be adjusted here.
1319
#endif
1320
        unlk    a6              | and return
1321
        rts
1322
 
1323
Ladddf$b$small:
1324
#ifndef __mcoldfire__
1325
        moveml  sp@+,a2-a3
1326
#else
1327
        movel   sp@+,a4
1328
        movel   sp@+,a3
1329
        movel   sp@+,a2
1330
#endif
1331
        movel   a6@(8),d0
1332
        movel   a6@(12),d1
1333
        PICLEA  SYM (_fpCCR),a0
1334
        movew   IMM (0),a0@
1335
#ifndef __mcoldfire__
1336
        moveml  sp@+,d2-d7      | restore data registers
1337
#else
1338
        moveml  sp@,d2-d7
1339
        | XXX if frame pointer is ever removed, stack pointer must
1340
        | be adjusted here.
1341
#endif
1342
        unlk    a6              | and return
1343
        rts
1344
 
1345
Ladddf$a$den:
1346
        movel   d7,d4           | d7 contains 0x00200000
1347
        bra     Ladddf$1
1348
 
1349
Ladddf$b$den:
1350
        movel   d7,d5           | d7 contains 0x00200000
1351
        notl    d6
1352
        bra     Ladddf$2
1353
 
1354
Ladddf$b:
1355
| Return b (if a is zero)
1356
        movel   d2,d0
1357
        movel   d3,d1
1358
        bne     1f                      | Check if b is -0
1359
        cmpl    IMM (0x80000000),d0
1360
        bne     1f
1361
        andl    IMM (0x80000000),d7     | Use the sign of a
1362
        clrl    d0
1363
        bra     Ladddf$ret
1364
Ladddf$a:
1365
        movel   a6@(8),d0
1366
        movel   a6@(12),d1
1367
1:
1368
        moveq   IMM (ADD),d5
1369
| Check for NaN and +/-INFINITY.
1370
        movel   d0,d7                   |
1371
        andl    IMM (0x80000000),d7     |
1372
        bclr    IMM (31),d0             |
1373
        cmpl    IMM (0x7ff00000),d0     |
1374
        bge     2f                      |
1375
        movel   d0,d0                   | check for zero, since we don't  '
1376
        bne     Ladddf$ret              | want to return -0 by mistake
1377
        bclr    IMM (31),d7             |
1378
        bra     Ladddf$ret              |
1379
2:
1380
        andl    IMM (0x000fffff),d0     | check for NaN (nonzero fraction)
1381
        orl     d1,d0                   |
1382
        bne     Ld$inop                 |
1383
        bra     Ld$infty                |
1384
 
1385
Ladddf$ret$1:
1386
#ifndef __mcoldfire__
1387
        moveml  sp@+,a2-a3      | restore regs and exit
1388
#else
1389
        movel   sp@+,a4
1390
        movel   sp@+,a3
1391
        movel   sp@+,a2
1392
#endif
1393
 
1394
Ladddf$ret:
1395
| Normal exit.
1396
        PICLEA  SYM (_fpCCR),a0
1397
        movew   IMM (0),a0@
1398
        orl     d7,d0           | put sign bit back
1399
#ifndef __mcoldfire__
1400
        moveml  sp@+,d2-d7
1401
#else
1402
        moveml  sp@,d2-d7
1403
        | XXX if frame pointer is ever removed, stack pointer must
1404
        | be adjusted here.
1405
#endif
1406
        unlk    a6
1407
        rts
1408
 
1409
Ladddf$ret$den:
1410
| Return a denormalized number.
1411
#ifndef __mcoldfire__
1412
        lsrl    IMM (1),d0      | shift right once more
1413
        roxrl   IMM (1),d1      |
1414
#else
1415
        lsrl    IMM (1),d1
1416
        btst    IMM (0),d0
1417
        beq     10f
1418
        bset    IMM (31),d1
1419
10:     lsrl    IMM (1),d0
1420
#endif
1421
        bra     Ladddf$ret
1422
 
1423
Ladddf$nf:
1424
        moveq   IMM (ADD),d5
1425
| This could be faster but it is not worth the effort, since it is not
1426
| executed very often. We sacrifice speed for clarity here.
1427
        movel   a6@(8),d0       | get the numbers back (remember that we
1428
        movel   a6@(12),d1      | did some processing already)
1429
        movel   a6@(16),d2      |
1430
        movel   a6@(20),d3      |
1431
        movel   IMM (0x7ff00000),d4 | useful constant (INFINITY)
1432
        movel   d0,d7           | save sign bits
1433
        movel   d2,d6           |
1434
        bclr    IMM (31),d0     | clear sign bits
1435
        bclr    IMM (31),d2     |
1436
| We know that one of them is either NaN of +/-INFINITY
1437
| Check for NaN (if either one is NaN return NaN)
1438
        cmpl    d4,d0           | check first a (d0)
1439
        bhi     Ld$inop         | if d0 > 0x7ff00000 or equal and
1440
        bne     2f
1441
        tstl    d1              | d1 > 0, a is NaN
1442
        bne     Ld$inop         |
1443
2:      cmpl    d4,d2           | check now b (d1)
1444
        bhi     Ld$inop         |
1445
        bne     3f
1446
        tstl    d3              |
1447
        bne     Ld$inop         |
1448
3:
1449
| Now comes the check for +/-INFINITY. We know that both are (maybe not
1450
| finite) numbers, but we have to check if both are infinite whether we
1451
| are adding or subtracting them.
1452
        eorl    d7,d6           | to check sign bits
1453
        bmi     1f
1454
        andl    IMM (0x80000000),d7 | get (common) sign bit
1455
        bra     Ld$infty
1456
1:
1457
| We know one (or both) are infinite, so we test for equality between the
1458
| two numbers (if they are equal they have to be infinite both, so we
1459
| return NaN).
1460
        cmpl    d2,d0           | are both infinite?
1461
        bne     1f              | if d0 <> d2 they are not equal
1462
        cmpl    d3,d1           | if d0 == d2 test d3 and d1
1463
        beq     Ld$inop         | if equal return NaN
1464
1:
1465
        andl    IMM (0x80000000),d7 | get a's sign bit '
1466
        cmpl    d4,d0           | test now for infinity
1467
        beq     Ld$infty        | if a is INFINITY return with this sign
1468
        bchg    IMM (31),d7     | else we know b is INFINITY and has
1469
        bra     Ld$infty        | the opposite sign
1470
 
1471
|=============================================================================
1472
|                              __muldf3
1473
|=============================================================================
1474
 
1475
| double __muldf3(double, double);
1476
        FUNC(__muldf3)
1477
SYM (__muldf3):
1478
#ifndef __mcoldfire__
1479
        link    a6,IMM (0)
1480
        moveml  d2-d7,sp@-
1481
#else
1482
        link    a6,IMM (-24)
1483
        moveml  d2-d7,sp@
1484
#endif
1485
        movel   a6@(8),d0               | get a into d0-d1
1486
        movel   a6@(12),d1              |
1487
        movel   a6@(16),d2              | and b into d2-d3
1488
        movel   a6@(20),d3              |
1489
        movel   d0,d7                   | d7 will hold the sign of the product
1490
        eorl    d2,d7                   |
1491
        andl    IMM (0x80000000),d7     |
1492
        movel   d7,a0                   | save sign bit into a0
1493
        movel   IMM (0x7ff00000),d7     | useful constant (+INFINITY)
1494
        movel   d7,d6                   | another (mask for fraction)
1495
        notl    d6                      |
1496
        bclr    IMM (31),d0             | get rid of a's sign bit '
1497
        movel   d0,d4                   |
1498
        orl     d1,d4                   |
1499
        beq     Lmuldf$a$0              | branch if a is zero
1500
        movel   d0,d4                   |
1501
        bclr    IMM (31),d2             | get rid of b's sign bit '
1502
        movel   d2,d5                   |
1503
        orl     d3,d5                   |
1504
        beq     Lmuldf$b$0              | branch if b is zero
1505
        movel   d2,d5                   |
1506
        cmpl    d7,d0                   | is a big?
1507
        bhi     Lmuldf$inop             | if a is NaN return NaN
1508
        beq     Lmuldf$a$nf             | we still have to check d1 and b ...
1509
        cmpl    d7,d2                   | now compare b with INFINITY
1510
        bhi     Lmuldf$inop             | is b NaN?
1511
        beq     Lmuldf$b$nf             | we still have to check d3 ...
1512
| Here we have both numbers finite and nonzero (and with no sign bit).
1513
| Now we get the exponents into d4 and d5.
1514
        andl    d7,d4                   | isolate exponent in d4
1515
        beq     Lmuldf$a$den            | if exponent zero, have denormalized
1516
        andl    d6,d0                   | isolate fraction
1517
        orl     IMM (0x00100000),d0     | and put hidden bit back
1518
        swap    d4                      | I like exponents in the first byte
1519
#ifndef __mcoldfire__
1520
        lsrw    IMM (4),d4              |
1521
#else
1522
        lsrl    IMM (4),d4              |
1523
#endif
1524
Lmuldf$1:
1525
        andl    d7,d5                   |
1526
        beq     Lmuldf$b$den            |
1527
        andl    d6,d2                   |
1528
        orl     IMM (0x00100000),d2     | and put hidden bit back
1529
        swap    d5                      |
1530
#ifndef __mcoldfire__
1531
        lsrw    IMM (4),d5              |
1532
#else
1533
        lsrl    IMM (4),d5              |
1534
#endif
1535
Lmuldf$2:                               |
1536
#ifndef __mcoldfire__
1537
        addw    d5,d4                   | add exponents
1538
        subw    IMM (D_BIAS+1),d4       | and subtract bias (plus one)
1539
#else
1540
        addl    d5,d4                   | add exponents
1541
        subl    IMM (D_BIAS+1),d4       | and subtract bias (plus one)
1542
#endif
1543
 
1544
| We are now ready to do the multiplication. The situation is as follows:
1545
| both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
1546
| denormalized to start with!), which means that in the product bit 104
1547
| (which will correspond to bit 8 of the fourth long) is set.
1548
 
1549
| Here we have to do the product.
1550
| To do it we have to juggle the registers back and forth, as there are not
1551
| enough to keep everything in them. So we use the address registers to keep
1552
| some intermediate data.
1553
 
1554
#ifndef __mcoldfire__
1555
        moveml  a2-a3,sp@-      | save a2 and a3 for temporary use
1556
#else
1557
        movel   a2,sp@-
1558
        movel   a3,sp@-
1559
        movel   a4,sp@-
1560
#endif
1561
        movel   IMM (0),a2      | a2 is a null register
1562
        movel   d4,a3           | and a3 will preserve the exponent
1563
 
1564
| First, shift d2-d3 so bit 20 becomes bit 31:
1565
#ifndef __mcoldfire__
1566
        rorl    IMM (5),d2      | rotate d2 5 places right
1567
        swap    d2              | and swap it
1568
        rorl    IMM (5),d3      | do the same thing with d3
1569
        swap    d3              |
1570
        movew   d3,d6           | get the rightmost 11 bits of d3
1571
        andw    IMM (0x07ff),d6 |
1572
        orw     d6,d2           | and put them into d2
1573
        andw    IMM (0xf800),d3 | clear those bits in d3
1574
#else
1575
        moveq   IMM (11),d7     | left shift d2 11 bits
1576
        lsll    d7,d2
1577
        movel   d3,d6           | get a copy of d3
1578
        lsll    d7,d3           | left shift d3 11 bits
1579
        andl    IMM (0xffe00000),d6 | get the top 11 bits of d3
1580
        moveq   IMM (21),d7     | right shift them 21 bits
1581
        lsrl    d7,d6
1582
        orl     d6,d2           | stick them at the end of d2
1583
#endif
1584
 
1585
        movel   d2,d6           | move b into d6-d7
1586
        movel   d3,d7           | move a into d4-d5
1587
        movel   d0,d4           | and clear d0-d1-d2-d3 (to put result)
1588
        movel   d1,d5           |
1589
        movel   IMM (0),d3      |
1590
        movel   d3,d2           |
1591
        movel   d3,d1           |
1592
        movel   d3,d0           |
1593
 
1594
| We use a1 as counter:
1595
        movel   IMM (DBL_MANT_DIG-1),a1
1596
#ifndef __mcoldfire__
1597
        exg     d7,a1
1598
#else
1599
        movel   d7,a4
1600
        movel   a1,d7
1601
        movel   a4,a1
1602
#endif
1603
 
1604
1:
1605
#ifndef __mcoldfire__
1606
        exg     d7,a1           | put counter back in a1
1607
#else
1608
        movel   d7,a4
1609
        movel   a1,d7
1610
        movel   a4,a1
1611
#endif
1612
        addl    d3,d3           | shift sum once left
1613
        addxl   d2,d2           |
1614
        addxl   d1,d1           |
1615
        addxl   d0,d0           |
1616
        addl    d7,d7           |
1617
        addxl   d6,d6           |
1618
        bcc     2f              | if bit clear skip the following
1619
#ifndef __mcoldfire__
1620
        exg     d7,a2           |
1621
#else
1622
        movel   d7,a4
1623
        movel   a2,d7
1624
        movel   a4,a2
1625
#endif
1626
        addl    d5,d3           | else add a to the sum
1627
        addxl   d4,d2           |
1628
        addxl   d7,d1           |
1629
        addxl   d7,d0           |
1630
#ifndef __mcoldfire__
1631
        exg     d7,a2           |
1632
#else
1633
        movel   d7,a4
1634
        movel   a2,d7
1635
        movel   a4,a2
1636
#endif
1637
2:
1638
#ifndef __mcoldfire__
1639
        exg     d7,a1           | put counter in d7
1640
        dbf     d7,1b           | decrement and branch
1641
#else
1642
        movel   d7,a4
1643
        movel   a1,d7
1644
        movel   a4,a1
1645
        subql   IMM (1),d7
1646
        bpl     1b
1647
#endif
1648
 
1649
        movel   a3,d4           | restore exponent
1650
#ifndef __mcoldfire__
1651
        moveml  sp@+,a2-a3
1652
#else
1653
        movel   sp@+,a4
1654
        movel   sp@+,a3
1655
        movel   sp@+,a2
1656
#endif
1657
 
1658
| Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
1659
| first thing to do now is to normalize it so bit 8 becomes bit
1660
| DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
1661
        swap    d0
1662
        swap    d1
1663
        movew   d1,d0
1664
        swap    d2
1665
        movew   d2,d1
1666
        swap    d3
1667
        movew   d3,d2
1668
        movew   IMM (0),d3
1669
#ifndef __mcoldfire__
1670
        lsrl    IMM (1),d0
1671
        roxrl   IMM (1),d1
1672
        roxrl   IMM (1),d2
1673
        roxrl   IMM (1),d3
1674
        lsrl    IMM (1),d0
1675
        roxrl   IMM (1),d1
1676
        roxrl   IMM (1),d2
1677
        roxrl   IMM (1),d3
1678
        lsrl    IMM (1),d0
1679
        roxrl   IMM (1),d1
1680
        roxrl   IMM (1),d2
1681
        roxrl   IMM (1),d3
1682
#else
1683
        moveq   IMM (29),d6
1684
        lsrl    IMM (3),d3
1685
        movel   d2,d7
1686
        lsll    d6,d7
1687
        orl     d7,d3
1688
        lsrl    IMM (3),d2
1689
        movel   d1,d7
1690
        lsll    d6,d7
1691
        orl     d7,d2
1692
        lsrl    IMM (3),d1
1693
        movel   d0,d7
1694
        lsll    d6,d7
1695
        orl     d7,d1
1696
        lsrl    IMM (3),d0
1697
#endif
1698
 
1699
| Now round, check for over- and underflow, and exit.
1700
        movel   a0,d7           | get sign bit back into d7
1701
        moveq   IMM (MULTIPLY),d5
1702
 
1703
        btst    IMM (DBL_MANT_DIG+1-32),d0
1704
        beq     Lround$exit
1705
#ifndef __mcoldfire__
1706
        lsrl    IMM (1),d0
1707
        roxrl   IMM (1),d1
1708
        addw    IMM (1),d4
1709
#else
1710
        lsrl    IMM (1),d1
1711
        btst    IMM (0),d0
1712
        beq     10f
1713
        bset    IMM (31),d1
1714
10:     lsrl    IMM (1),d0
1715
        addl    IMM (1),d4
1716
#endif
1717
        bra     Lround$exit
1718
 
1719
Lmuldf$inop:
1720
        moveq   IMM (MULTIPLY),d5
1721
        bra     Ld$inop
1722
 
1723
Lmuldf$b$nf:
1724
        moveq   IMM (MULTIPLY),d5
1725
        movel   a0,d7           | get sign bit back into d7
1726
        tstl    d3              | we know d2 == 0x7ff00000, so check d3
1727
        bne     Ld$inop         | if d3 <> 0 b is NaN
1728
        bra     Ld$overflow     | else we have overflow (since a is finite)
1729
 
1730
Lmuldf$a$nf:
1731
        moveq   IMM (MULTIPLY),d5
1732
        movel   a0,d7           | get sign bit back into d7
1733
        tstl    d1              | we know d0 == 0x7ff00000, so check d1
1734
        bne     Ld$inop         | if d1 <> 0 a is NaN
1735
        bra     Ld$overflow     | else signal overflow
1736
 
1737
| If either number is zero return zero, unless the other is +/-INFINITY or
1738
| NaN, in which case we return NaN.
1739
Lmuldf$b$0:
1740
        moveq   IMM (MULTIPLY),d5
1741
#ifndef __mcoldfire__
1742
        exg     d2,d0           | put b (==0) into d0-d1
1743
        exg     d3,d1           | and a (with sign bit cleared) into d2-d3
1744
        movel   a0,d0           | set result sign
1745
#else
1746
        movel   d0,d2           | put a into d2-d3
1747
        movel   d1,d3
1748
        movel   a0,d0           | put result zero into d0-d1
1749
        movq    IMM(0),d1
1750
#endif
1751
        bra     1f
1752
Lmuldf$a$0:
1753
        movel   a0,d0           | set result sign
1754
        movel   a6@(16),d2      | put b into d2-d3 again
1755
        movel   a6@(20),d3      |
1756
        bclr    IMM (31),d2     | clear sign bit
1757
1:      cmpl    IMM (0x7ff00000),d2 | check for non-finiteness
1758
        bge     Ld$inop         | in case NaN or +/-INFINITY return NaN
1759
        PICLEA  SYM (_fpCCR),a0
1760
        movew   IMM (0),a0@
1761
#ifndef __mcoldfire__
1762
        moveml  sp@+,d2-d7
1763
#else
1764
        moveml  sp@,d2-d7
1765
        | XXX if frame pointer is ever removed, stack pointer must
1766
        | be adjusted here.
1767
#endif
1768
        unlk    a6
1769
        rts
1770
 
1771
| If a number is denormalized we put an exponent of 1 but do not put the
1772
| hidden bit back into the fraction; instead we shift left until bit 21
1773
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
1774
| to ensure that the product of the fractions is close to 1.
1775
Lmuldf$a$den:
1776
        movel   IMM (1),d4
1777
        andl    d6,d0
1778
1:      addl    d1,d1           | shift a left until bit 20 is set
1779
        addxl   d0,d0           |
1780
#ifndef __mcoldfire__
1781
        subw    IMM (1),d4      | and adjust exponent
1782
#else
1783
        subl    IMM (1),d4      | and adjust exponent
1784
#endif
1785
        btst    IMM (20),d0     |
1786
        bne     Lmuldf$1        |
1787
        bra     1b
1788
 
1789
Lmuldf$b$den:
1790
        movel   IMM (1),d5
1791
        andl    d6,d2
1792
1:      addl    d3,d3           | shift b left until bit 20 is set
1793
        addxl   d2,d2           |
1794
#ifndef __mcoldfire__
1795
        subw    IMM (1),d5      | and adjust exponent
1796
#else
1797
        subql   IMM (1),d5      | and adjust exponent
1798
#endif
1799
        btst    IMM (20),d2     |
1800
        bne     Lmuldf$2        |
1801
        bra     1b
1802
 
1803
 
1804
|=============================================================================
1805
|                              __divdf3
1806
|=============================================================================
1807
 
1808
| double __divdf3(double, double);
1809
        FUNC(__divdf3)
1810
SYM (__divdf3):
1811
#ifndef __mcoldfire__
1812
        link    a6,IMM (0)
1813
        moveml  d2-d7,sp@-
1814
#else
1815
        link    a6,IMM (-24)
1816
        moveml  d2-d7,sp@
1817
#endif
1818
        movel   a6@(8),d0       | get a into d0-d1
1819
        movel   a6@(12),d1      |
1820
        movel   a6@(16),d2      | and b into d2-d3
1821
        movel   a6@(20),d3      |
1822
        movel   d0,d7           | d7 will hold the sign of the result
1823
        eorl    d2,d7           |
1824
        andl    IMM (0x80000000),d7
1825
        movel   d7,a0           | save sign into a0
1826
        movel   IMM (0x7ff00000),d7 | useful constant (+INFINITY)
1827
        movel   d7,d6           | another (mask for fraction)
1828
        notl    d6              |
1829
        bclr    IMM (31),d0     | get rid of a's sign bit '
1830
        movel   d0,d4           |
1831
        orl     d1,d4           |
1832
        beq     Ldivdf$a$0      | branch if a is zero
1833
        movel   d0,d4           |
1834
        bclr    IMM (31),d2     | get rid of b's sign bit '
1835
        movel   d2,d5           |
1836
        orl     d3,d5           |
1837
        beq     Ldivdf$b$0      | branch if b is zero
1838
        movel   d2,d5
1839
        cmpl    d7,d0           | is a big?
1840
        bhi     Ldivdf$inop     | if a is NaN return NaN
1841
        beq     Ldivdf$a$nf     | if d0 == 0x7ff00000 we check d1
1842
        cmpl    d7,d2           | now compare b with INFINITY
1843
        bhi     Ldivdf$inop     | if b is NaN return NaN
1844
        beq     Ldivdf$b$nf     | if d2 == 0x7ff00000 we check d3
1845
| Here we have both numbers finite and nonzero (and with no sign bit).
1846
| Now we get the exponents into d4 and d5 and normalize the numbers to
1847
| ensure that the ratio of the fractions is around 1. We do this by
1848
| making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
1849
| set, even if they were denormalized to start with.
1850
| Thus, the result will satisfy: 2 > result > 1/2.
1851
        andl    d7,d4           | and isolate exponent in d4
1852
        beq     Ldivdf$a$den    | if exponent is zero we have a denormalized
1853
        andl    d6,d0           | and isolate fraction
1854
        orl     IMM (0x00100000),d0 | and put hidden bit back
1855
        swap    d4              | I like exponents in the first byte
1856
#ifndef __mcoldfire__
1857
        lsrw    IMM (4),d4      |
1858
#else
1859
        lsrl    IMM (4),d4      |
1860
#endif
1861
Ldivdf$1:                       |
1862
        andl    d7,d5           |
1863
        beq     Ldivdf$b$den    |
1864
        andl    d6,d2           |
1865
        orl     IMM (0x00100000),d2
1866
        swap    d5              |
1867
#ifndef __mcoldfire__
1868
        lsrw    IMM (4),d5      |
1869
#else
1870
        lsrl    IMM (4),d5      |
1871
#endif
1872
Ldivdf$2:                       |
1873
#ifndef __mcoldfire__
1874
        subw    d5,d4           | subtract exponents
1875
        addw    IMM (D_BIAS),d4 | and add bias
1876
#else
1877
        subl    d5,d4           | subtract exponents
1878
        addl    IMM (D_BIAS),d4 | and add bias
1879
#endif
1880
 
1881
| We are now ready to do the division. We have prepared things in such a way
1882
| that the ratio of the fractions will be less than 2 but greater than 1/2.
1883
| At this point the registers in use are:
1884
| d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
1885
| DBL_MANT_DIG-1-32=1)
1886
| d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
1887
| d4    holds the difference of the exponents, corrected by the bias
1888
| a0    holds the sign of the ratio
1889
 
1890
| To do the rounding correctly we need to keep information about the
1891
| nonsignificant bits. One way to do this would be to do the division
1892
| using four registers; another is to use two registers (as originally
1893
| I did), but use a sticky bit to preserve information about the
1894
| fractional part. Note that we can keep that info in a1, which is not
1895
| used.
1896
        movel   IMM (0),d6      | d6-d7 will hold the result
1897
        movel   d6,d7           |
1898
        movel   IMM (0),a1      | and a1 will hold the sticky bit
1899
 
1900
        movel   IMM (DBL_MANT_DIG-32+1),d5
1901
 
1902
1:      cmpl    d0,d2           | is a < b?
1903
        bhi     3f              | if b > a skip the following
1904
        beq     4f              | if d0==d2 check d1 and d3
1905
2:      subl    d3,d1           |
1906
        subxl   d2,d0           | a <-- a - b
1907
        bset    d5,d6           | set the corresponding bit in d6
1908
3:      addl    d1,d1           | shift a by 1
1909
        addxl   d0,d0           |
1910
#ifndef __mcoldfire__
1911
        dbra    d5,1b           | and branch back
1912
#else
1913
        subql   IMM (1), d5
1914
        bpl     1b
1915
#endif
1916
        bra     5f
1917
4:      cmpl    d1,d3           | here d0==d2, so check d1 and d3
1918
        bhi     3b              | if d1 > d2 skip the subtraction
1919
        bra     2b              | else go do it
1920
5:
1921
| Here we have to start setting the bits in the second long.
1922
        movel   IMM (31),d5     | again d5 is counter
1923
 
1924
1:      cmpl    d0,d2           | is a < b?
1925
        bhi     3f              | if b > a skip the following
1926
        beq     4f              | if d0==d2 check d1 and d3
1927
2:      subl    d3,d1           |
1928
        subxl   d2,d0           | a <-- a - b
1929
        bset    d5,d7           | set the corresponding bit in d7
1930
3:      addl    d1,d1           | shift a by 1
1931
        addxl   d0,d0           |
1932
#ifndef __mcoldfire__
1933
        dbra    d5,1b           | and branch back
1934
#else
1935
        subql   IMM (1), d5
1936
        bpl     1b
1937
#endif
1938
        bra     5f
1939
4:      cmpl    d1,d3           | here d0==d2, so check d1 and d3
1940
        bhi     3b              | if d1 > d2 skip the subtraction
1941
        bra     2b              | else go do it
1942
5:
1943
| Now go ahead checking until we hit a one, which we store in d2.
1944
        movel   IMM (DBL_MANT_DIG),d5
1945
1:      cmpl    d2,d0           | is a < b?
1946
        bhi     4f              | if b < a, exit
1947
        beq     3f              | if d0==d2 check d1 and d3
1948
2:      addl    d1,d1           | shift a by 1
1949
        addxl   d0,d0           |
1950
#ifndef __mcoldfire__
1951
        dbra    d5,1b           | and branch back
1952
#else
1953
        subql   IMM (1), d5
1954
        bpl     1b
1955
#endif
1956
        movel   IMM (0),d2      | here no sticky bit was found
1957
        movel   d2,d3
1958
        bra     5f
1959
3:      cmpl    d1,d3           | here d0==d2, so check d1 and d3
1960
        bhi     2b              | if d1 > d2 go back
1961
4:
1962
| Here put the sticky bit in d2-d3 (in the position which actually corresponds
1963
| to it; if you don't do this the algorithm loses in some cases). '
1964
        movel   IMM (0),d2
1965
        movel   d2,d3
1966
#ifndef __mcoldfire__
1967
        subw    IMM (DBL_MANT_DIG),d5
1968
        addw    IMM (63),d5
1969
        cmpw    IMM (31),d5
1970
#else
1971
        subl    IMM (DBL_MANT_DIG),d5
1972
        addl    IMM (63),d5
1973
        cmpl    IMM (31),d5
1974
#endif
1975
        bhi     2f
1976
1:      bset    d5,d3
1977
        bra     5f
1978
#ifndef __mcoldfire__
1979
        subw    IMM (32),d5
1980
#else
1981
        subl    IMM (32),d5
1982
#endif
1983
2:      bset    d5,d2
1984
5:
1985
| Finally we are finished! Move the longs in the address registers to
1986
| their final destination:
1987
        movel   d6,d0
1988
        movel   d7,d1
1989
        movel   IMM (0),d3
1990
 
1991
| Here we have finished the division, with the result in d0-d1-d2-d3, with
1992
| 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
1993
| If it is not, then definitely bit 21 is set. Normalize so bit 22 is
1994
| not set:
1995
        btst    IMM (DBL_MANT_DIG-32+1),d0
1996
        beq     1f
1997
#ifndef __mcoldfire__
1998
        lsrl    IMM (1),d0
1999
        roxrl   IMM (1),d1
2000
        roxrl   IMM (1),d2
2001
        roxrl   IMM (1),d3
2002
        addw    IMM (1),d4
2003
#else
2004
        lsrl    IMM (1),d3
2005
        btst    IMM (0),d2
2006
        beq     10f
2007
        bset    IMM (31),d3
2008
10:     lsrl    IMM (1),d2
2009
        btst    IMM (0),d1
2010
        beq     11f
2011
        bset    IMM (31),d2
2012
11:     lsrl    IMM (1),d1
2013
        btst    IMM (0),d0
2014
        beq     12f
2015
        bset    IMM (31),d1
2016
12:     lsrl    IMM (1),d0
2017
        addl    IMM (1),d4
2018
#endif
2019
1:
2020
| Now round, check for over- and underflow, and exit.
2021
        movel   a0,d7           | restore sign bit to d7
2022
        moveq   IMM (DIVIDE),d5
2023
        bra     Lround$exit
2024
 
2025
Ldivdf$inop:
2026
        moveq   IMM (DIVIDE),d5
2027
        bra     Ld$inop
2028
 
2029
Ldivdf$a$0:
2030
| If a is zero check to see whether b is zero also. In that case return
2031
| NaN; then check if b is NaN, and return NaN also in that case. Else
2032
| return a properly signed zero.
2033
        moveq   IMM (DIVIDE),d5
2034
        bclr    IMM (31),d2     |
2035
        movel   d2,d4           |
2036
        orl     d3,d4           |
2037
        beq     Ld$inop         | if b is also zero return NaN
2038
        cmpl    IMM (0x7ff00000),d2 | check for NaN
2039
        bhi     Ld$inop         |
2040
        blt     1f              |
2041
        tstl    d3              |
2042
        bne     Ld$inop         |
2043
1:      movel   a0,d0           | else return signed zero
2044
        moveq   IMM(0),d1       |
2045
        PICLEA  SYM (_fpCCR),a0 | clear exception flags
2046
        movew   IMM (0),a0@     |
2047
#ifndef __mcoldfire__
2048
        moveml  sp@+,d2-d7      |
2049
#else
2050
        moveml  sp@,d2-d7       |
2051
        | XXX if frame pointer is ever removed, stack pointer must
2052
        | be adjusted here.
2053
#endif
2054
        unlk    a6              |
2055
        rts                     |
2056
 
2057
Ldivdf$b$0:
2058
        moveq   IMM (DIVIDE),d5
2059
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
2060
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
2061
| cleared already.
2062
        movel   a0,d7           | put a's sign bit back in d7 '
2063
        cmpl    IMM (0x7ff00000),d0 | compare d0 with INFINITY
2064
        bhi     Ld$inop         | if larger it is NaN
2065
        tstl    d1              |
2066
        bne     Ld$inop         |
2067
        bra     Ld$div$0        | else signal DIVIDE_BY_ZERO
2068
 
2069
Ldivdf$b$nf:
2070
        moveq   IMM (DIVIDE),d5
2071
| If d2 == 0x7ff00000 we have to check d3.
2072
        tstl    d3              |
2073
        bne     Ld$inop         | if d3 <> 0, b is NaN
2074
        bra     Ld$underflow    | else b is +/-INFINITY, so signal underflow
2075
 
2076
Ldivdf$a$nf:
2077
        moveq   IMM (DIVIDE),d5
2078
| If d0 == 0x7ff00000 we have to check d1.
2079
        tstl    d1              |
2080
        bne     Ld$inop         | if d1 <> 0, a is NaN
2081
| If a is INFINITY we have to check b
2082
        cmpl    d7,d2           | compare b with INFINITY
2083
        bge     Ld$inop         | if b is NaN or INFINITY return NaN
2084
        tstl    d3              |
2085
        bne     Ld$inop         |
2086
        bra     Ld$overflow     | else return overflow
2087
 
2088
| If a number is denormalized we put an exponent of 1 but do not put the
2089
| bit back into the fraction.
2090
Ldivdf$a$den:
2091
        movel   IMM (1),d4
2092
        andl    d6,d0
2093
1:      addl    d1,d1           | shift a left until bit 20 is set
2094
        addxl   d0,d0
2095
#ifndef __mcoldfire__
2096
        subw    IMM (1),d4      | and adjust exponent
2097
#else
2098
        subl    IMM (1),d4      | and adjust exponent
2099
#endif
2100
        btst    IMM (DBL_MANT_DIG-32-1),d0
2101
        bne     Ldivdf$1
2102
        bra     1b
2103
 
2104
Ldivdf$b$den:
2105
        movel   IMM (1),d5
2106
        andl    d6,d2
2107
1:      addl    d3,d3           | shift b left until bit 20 is set
2108
        addxl   d2,d2
2109
#ifndef __mcoldfire__
2110
        subw    IMM (1),d5      | and adjust exponent
2111
#else
2112
        subql   IMM (1),d5      | and adjust exponent
2113
#endif
2114
        btst    IMM (DBL_MANT_DIG-32-1),d2
2115
        bne     Ldivdf$2
2116
        bra     1b
2117
 
2118
Lround$exit:
2119
| This is a common exit point for __muldf3 and __divdf3. When they enter
2120
| this point the sign of the result is in d7, the result in d0-d1, normalized
2121
| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
2122
 
2123
| First check for underlow in the exponent:
2124
#ifndef __mcoldfire__
2125
        cmpw    IMM (-DBL_MANT_DIG-1),d4
2126
#else
2127
        cmpl    IMM (-DBL_MANT_DIG-1),d4
2128
#endif
2129
        blt     Ld$underflow
2130
| It could happen that the exponent is less than 1, in which case the
2131
| number is denormalized. In this case we shift right and adjust the
2132
| exponent until it becomes 1 or the fraction is zero (in the latter case
2133
| we signal underflow and return zero).
2134
        movel   d7,a0           |
2135
        movel   IMM (0),d6      | use d6-d7 to collect bits flushed right
2136
        movel   d6,d7           | use d6-d7 to collect bits flushed right
2137
#ifndef __mcoldfire__
2138
        cmpw    IMM (1),d4      | if the exponent is less than 1 we
2139
#else
2140
        cmpl    IMM (1),d4      | if the exponent is less than 1 we
2141
#endif
2142
        bge     2f              | have to shift right (denormalize)
2143
1:
2144
#ifndef __mcoldfire__
2145
        addw    IMM (1),d4      | adjust the exponent
2146
        lsrl    IMM (1),d0      | shift right once
2147
        roxrl   IMM (1),d1      |
2148
        roxrl   IMM (1),d2      |
2149
        roxrl   IMM (1),d3      |
2150
        roxrl   IMM (1),d6      |
2151
        roxrl   IMM (1),d7      |
2152
        cmpw    IMM (1),d4      | is the exponent 1 already?
2153
#else
2154
        addl    IMM (1),d4      | adjust the exponent
2155
        lsrl    IMM (1),d7
2156
        btst    IMM (0),d6
2157
        beq     13f
2158
        bset    IMM (31),d7
2159
13:     lsrl    IMM (1),d6
2160
        btst    IMM (0),d3
2161
        beq     14f
2162
        bset    IMM (31),d6
2163
14:     lsrl    IMM (1),d3
2164
        btst    IMM (0),d2
2165
        beq     10f
2166
        bset    IMM (31),d3
2167
10:     lsrl    IMM (1),d2
2168
        btst    IMM (0),d1
2169
        beq     11f
2170
        bset    IMM (31),d2
2171
11:     lsrl    IMM (1),d1
2172
        btst    IMM (0),d0
2173
        beq     12f
2174
        bset    IMM (31),d1
2175
12:     lsrl    IMM (1),d0
2176
        cmpl    IMM (1),d4      | is the exponent 1 already?
2177
#endif
2178
        beq     2f              | if not loop back
2179
        bra     1b              |
2180
        bra     Ld$underflow    | safety check, shouldn't execute '
2181
2:      orl     d6,d2           | this is a trick so we don't lose  '
2182
        orl     d7,d3           | the bits which were flushed right
2183
        movel   a0,d7           | get back sign bit into d7
2184
| Now call the rounding routine (which takes care of denormalized numbers):
2185
        lea     pc@(Lround$0),a0 | to return from rounding routine
2186
        PICLEA  SYM (_fpCCR),a1 | check the rounding mode
2187
#ifdef __mcoldfire__
2188
        clrl    d6
2189
#endif
2190
        movew   a1@(6),d6       | rounding mode in d6
2191
        beq     Lround$to$nearest
2192
#ifndef __mcoldfire__
2193
        cmpw    IMM (ROUND_TO_PLUS),d6
2194
#else
2195
        cmpl    IMM (ROUND_TO_PLUS),d6
2196
#endif
2197
        bhi     Lround$to$minus
2198
        blt     Lround$to$zero
2199
        bra     Lround$to$plus
2200
Lround$0:
2201
| Here we have a correctly rounded result (either normalized or denormalized).
2202
 
2203
| Here we should have either a normalized number or a denormalized one, and
2204
| the exponent is necessarily larger or equal to 1 (so we don't have to  '
2205
| check again for underflow!). We have to check for overflow or for a
2206
| denormalized number (which also signals underflow).
2207
| Check for overflow (i.e., exponent >= 0x7ff).
2208
#ifndef __mcoldfire__
2209
        cmpw    IMM (0x07ff),d4
2210
#else
2211
        cmpl    IMM (0x07ff),d4
2212
#endif
2213
        bge     Ld$overflow
2214
| Now check for a denormalized number (exponent==0):
2215
        movew   d4,d4
2216
        beq     Ld$den
2217
1:
2218
| Put back the exponents and sign and return.
2219
#ifndef __mcoldfire__
2220
        lslw    IMM (4),d4      | exponent back to fourth byte
2221
#else
2222
        lsll    IMM (4),d4      | exponent back to fourth byte
2223
#endif
2224
        bclr    IMM (DBL_MANT_DIG-32-1),d0
2225
        swap    d0              | and put back exponent
2226
#ifndef __mcoldfire__
2227
        orw     d4,d0           |
2228
#else
2229
        orl     d4,d0           |
2230
#endif
2231
        swap    d0              |
2232
        orl     d7,d0           | and sign also
2233
 
2234
        PICLEA  SYM (_fpCCR),a0
2235
        movew   IMM (0),a0@
2236
#ifndef __mcoldfire__
2237
        moveml  sp@+,d2-d7
2238
#else
2239
        moveml  sp@,d2-d7
2240
        | XXX if frame pointer is ever removed, stack pointer must
2241
        | be adjusted here.
2242
#endif
2243
        unlk    a6
2244
        rts
2245
 
2246
|=============================================================================
2247
|                              __negdf2
2248
|=============================================================================
2249
 
2250
| double __negdf2(double, double);
2251
        FUNC(__negdf2)
2252
SYM (__negdf2):
2253
#ifndef __mcoldfire__
2254
        link    a6,IMM (0)
2255
        moveml  d2-d7,sp@-
2256
#else
2257
        link    a6,IMM (-24)
2258
        moveml  d2-d7,sp@
2259
#endif
2260
        moveq   IMM (NEGATE),d5
2261
        movel   a6@(8),d0       | get number to negate in d0-d1
2262
        movel   a6@(12),d1      |
2263
        bchg    IMM (31),d0     | negate
2264
        movel   d0,d2           | make a positive copy (for the tests)
2265
        bclr    IMM (31),d2     |
2266
        movel   d2,d4           | check for zero
2267
        orl     d1,d4           |
2268
        beq     2f              | if zero (either sign) return +zero
2269
        cmpl    IMM (0x7ff00000),d2 | compare to +INFINITY
2270
        blt     1f              | if finite, return
2271
        bhi     Ld$inop         | if larger (fraction not zero) is NaN
2272
        tstl    d1              | if d2 == 0x7ff00000 check d1
2273
        bne     Ld$inop         |
2274
        movel   d0,d7           | else get sign and return INFINITY
2275
        andl    IMM (0x80000000),d7
2276
        bra     Ld$infty
2277
1:      PICLEA  SYM (_fpCCR),a0
2278
        movew   IMM (0),a0@
2279
#ifndef __mcoldfire__
2280
        moveml  sp@+,d2-d7
2281
#else
2282
        moveml  sp@,d2-d7
2283
        | XXX if frame pointer is ever removed, stack pointer must
2284
        | be adjusted here.
2285
#endif
2286
        unlk    a6
2287
        rts
2288
2:      bclr    IMM (31),d0
2289
        bra     1b
2290
 
2291
|=============================================================================
2292
|                              __cmpdf2
2293
|=============================================================================
2294
 
2295
GREATER =  1
2296
LESS    = -1
2297
EQUAL   =  0
2298
 
2299
| int __cmpdf2_internal(double, double, int);
2300
SYM (__cmpdf2_internal):
2301
#ifndef __mcoldfire__
2302
        link    a6,IMM (0)
2303
        moveml  d2-d7,sp@-      | save registers
2304
#else
2305
        link    a6,IMM (-24)
2306
        moveml  d2-d7,sp@
2307
#endif
2308
        moveq   IMM (COMPARE),d5
2309
        movel   a6@(8),d0       | get first operand
2310
        movel   a6@(12),d1      |
2311
        movel   a6@(16),d2      | get second operand
2312
        movel   a6@(20),d3      |
2313
| First check if a and/or b are (+/-) zero and in that case clear
2314
| the sign bit.
2315
        movel   d0,d6           | copy signs into d6 (a) and d7(b)
2316
        bclr    IMM (31),d0     | and clear signs in d0 and d2
2317
        movel   d2,d7           |
2318
        bclr    IMM (31),d2     |
2319
        cmpl    IMM (0x7ff00000),d0 | check for a == NaN
2320
        bhi     Lcmpd$inop              | if d0 > 0x7ff00000, a is NaN
2321
        beq     Lcmpdf$a$nf     | if equal can be INFINITY, so check d1
2322
        movel   d0,d4           | copy into d4 to test for zero
2323
        orl     d1,d4           |
2324
        beq     Lcmpdf$a$0      |
2325
Lcmpdf$0:
2326
        cmpl    IMM (0x7ff00000),d2 | check for b == NaN
2327
        bhi     Lcmpd$inop              | if d2 > 0x7ff00000, b is NaN
2328
        beq     Lcmpdf$b$nf     | if equal can be INFINITY, so check d3
2329
        movel   d2,d4           |
2330
        orl     d3,d4           |
2331
        beq     Lcmpdf$b$0      |
2332
Lcmpdf$1:
2333
| Check the signs
2334
        eorl    d6,d7
2335
        bpl     1f
2336
| If the signs are not equal check if a >= 0
2337
        tstl    d6
2338
        bpl     Lcmpdf$a$gt$b   | if (a >= 0 && b < 0) => a > b
2339
        bmi     Lcmpdf$b$gt$a   | if (a < 0 && b >= 0) => a < b
2340
1:
2341
| If the signs are equal check for < 0
2342
        tstl    d6
2343
        bpl     1f
2344
| If both are negative exchange them
2345
#ifndef __mcoldfire__
2346
        exg     d0,d2
2347
        exg     d1,d3
2348
#else
2349
        movel   d0,d7
2350
        movel   d2,d0
2351
        movel   d7,d2
2352
        movel   d1,d7
2353
        movel   d3,d1
2354
        movel   d7,d3
2355
#endif
2356
1:
2357
| Now that they are positive we just compare them as longs (does this also
2358
| work for denormalized numbers?).
2359
        cmpl    d0,d2
2360
        bhi     Lcmpdf$b$gt$a   | |b| > |a|
2361
        bne     Lcmpdf$a$gt$b   | |b| < |a|
2362
| If we got here d0 == d2, so we compare d1 and d3.
2363
        cmpl    d1,d3
2364
        bhi     Lcmpdf$b$gt$a   | |b| > |a|
2365
        bne     Lcmpdf$a$gt$b   | |b| < |a|
2366
| If we got here a == b.
2367
        movel   IMM (EQUAL),d0
2368
#ifndef __mcoldfire__
2369
        moveml  sp@+,d2-d7      | put back the registers
2370
#else
2371
        moveml  sp@,d2-d7
2372
        | XXX if frame pointer is ever removed, stack pointer must
2373
        | be adjusted here.
2374
#endif
2375
        unlk    a6
2376
        rts
2377
Lcmpdf$a$gt$b:
2378
        movel   IMM (GREATER),d0
2379
#ifndef __mcoldfire__
2380
        moveml  sp@+,d2-d7      | put back the registers
2381
#else
2382
        moveml  sp@,d2-d7
2383
        | XXX if frame pointer is ever removed, stack pointer must
2384
        | be adjusted here.
2385
#endif
2386
        unlk    a6
2387
        rts
2388
Lcmpdf$b$gt$a:
2389
        movel   IMM (LESS),d0
2390
#ifndef __mcoldfire__
2391
        moveml  sp@+,d2-d7      | put back the registers
2392
#else
2393
        moveml  sp@,d2-d7
2394
        | XXX if frame pointer is ever removed, stack pointer must
2395
        | be adjusted here.
2396
#endif
2397
        unlk    a6
2398
        rts
2399
 
2400
Lcmpdf$a$0:
2401
        bclr    IMM (31),d6
2402
        bra     Lcmpdf$0
2403
Lcmpdf$b$0:
2404
        bclr    IMM (31),d7
2405
        bra     Lcmpdf$1
2406
 
2407
Lcmpdf$a$nf:
2408
        tstl    d1
2409
        bne     Ld$inop
2410
        bra     Lcmpdf$0
2411
 
2412
Lcmpdf$b$nf:
2413
        tstl    d3
2414
        bne     Ld$inop
2415
        bra     Lcmpdf$1
2416
 
2417
Lcmpd$inop:
2418
        movl    a6@(24),d0
2419
        moveq   IMM (INEXACT_RESULT+INVALID_OPERATION),d7
2420
        moveq   IMM (DOUBLE_FLOAT),d6
2421
        PICJUMP $_exception_handler
2422
 
2423
| int __cmpdf2(double, double);
2424
        FUNC(__cmpdf2)
2425
SYM (__cmpdf2):
2426
        link    a6,IMM (0)
2427
        pea     1
2428
        movl    a6@(20),sp@-
2429
        movl    a6@(16),sp@-
2430
        movl    a6@(12),sp@-
2431
        movl    a6@(8),sp@-
2432
        PICCALL SYM (__cmpdf2_internal)
2433
        unlk    a6
2434
        rts
2435
 
2436
|=============================================================================
2437
|                           rounding routines
2438
|=============================================================================
2439
 
2440
| The rounding routines expect the number to be normalized in registers
2441
| d0-d1-d2-d3, with the exponent in register d4. They assume that the
2442
| exponent is larger or equal to 1. They return a properly normalized number
2443
| if possible, and a denormalized number otherwise. The exponent is returned
2444
| in d4.
2445
 
2446
Lround$to$nearest:
2447
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
2448
| Here we assume that the exponent is not too small (this should be checked
2449
| before entering the rounding routine), but the number could be denormalized.
2450
 
2451
| Check for denormalized numbers:
2452
1:      btst    IMM (DBL_MANT_DIG-32),d0
2453
        bne     2f              | if set the number is normalized
2454
| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
2455
| is one (remember that a denormalized number corresponds to an
2456
| exponent of -D_BIAS+1).
2457
#ifndef __mcoldfire__
2458
        cmpw    IMM (1),d4      | remember that the exponent is at least one
2459
#else
2460
        cmpl    IMM (1),d4      | remember that the exponent is at least one
2461
#endif
2462
        beq     2f              | an exponent of one means denormalized
2463
        addl    d3,d3           | else shift and adjust the exponent
2464
        addxl   d2,d2           |
2465
        addxl   d1,d1           |
2466
        addxl   d0,d0           |
2467
#ifndef __mcoldfire__
2468
        dbra    d4,1b           |
2469
#else
2470
        subql   IMM (1), d4
2471
        bpl     1b
2472
#endif
2473
2:
2474
| Now round: we do it as follows: after the shifting we can write the
2475
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
2476
| If delta < 1, do nothing. If delta > 1, add 1 to f.
2477
| If delta == 1, we make sure the rounded number will be even (odd?)
2478
| (after shifting).
2479
        btst    IMM (0),d1      | is delta < 1?
2480
        beq     2f              | if so, do not do anything
2481
        orl     d2,d3           | is delta == 1?
2482
        bne     1f              | if so round to even
2483
        movel   d1,d3           |
2484
        andl    IMM (2),d3      | bit 1 is the last significant bit
2485
        movel   IMM (0),d2      |
2486
        addl    d3,d1           |
2487
        addxl   d2,d0           |
2488
        bra     2f              |
2489
1:      movel   IMM (1),d3      | else add 1
2490
        movel   IMM (0),d2      |
2491
        addl    d3,d1           |
2492
        addxl   d2,d0
2493
| Shift right once (because we used bit #DBL_MANT_DIG-32!).
2494
2:
2495
#ifndef __mcoldfire__
2496
        lsrl    IMM (1),d0
2497
        roxrl   IMM (1),d1
2498
#else
2499
        lsrl    IMM (1),d1
2500
        btst    IMM (0),d0
2501
        beq     10f
2502
        bset    IMM (31),d1
2503
10:     lsrl    IMM (1),d0
2504
#endif
2505
 
2506
| Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
2507
| 'fraction overflow' ...).
2508
        btst    IMM (DBL_MANT_DIG-32),d0
2509
        beq     1f
2510
#ifndef __mcoldfire__
2511
        lsrl    IMM (1),d0
2512
        roxrl   IMM (1),d1
2513
        addw    IMM (1),d4
2514
#else
2515
        lsrl    IMM (1),d1
2516
        btst    IMM (0),d0
2517
        beq     10f
2518
        bset    IMM (31),d1
2519
10:     lsrl    IMM (1),d0
2520
        addl    IMM (1),d4
2521
#endif
2522
1:
2523
| If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
2524
| have to put the exponent to zero and return a denormalized number.
2525
        btst    IMM (DBL_MANT_DIG-32-1),d0
2526
        beq     1f
2527
        jmp     a0@
2528
1:      movel   IMM (0),d4
2529
        jmp     a0@
2530
 
2531
Lround$to$zero:
2532
Lround$to$plus:
2533
Lround$to$minus:
2534
        jmp     a0@
2535
#endif /* L_double */
2536
 
2537
#ifdef  L_float
2538
 
2539
        .globl  SYM (_fpCCR)
2540
        .globl  $_exception_handler
2541
 
2542
QUIET_NaN    = 0xffffffff
2543
SIGNL_NaN    = 0x7f800001
2544
INFINITY     = 0x7f800000
2545
 
2546
F_MAX_EXP      = 0xff
2547
F_BIAS         = 126
2548
FLT_MAX_EXP    = F_MAX_EXP - F_BIAS
2549
FLT_MIN_EXP    = 1 - F_BIAS
2550
FLT_MANT_DIG   = 24
2551
 
2552
INEXACT_RESULT          = 0x0001
2553
UNDERFLOW               = 0x0002
2554
OVERFLOW                = 0x0004
2555
DIVIDE_BY_ZERO          = 0x0008
2556
INVALID_OPERATION       = 0x0010
2557
 
2558
SINGLE_FLOAT = 1
2559
 
2560
NOOP         = 0
2561
ADD          = 1
2562
MULTIPLY     = 2
2563
DIVIDE       = 3
2564
NEGATE       = 4
2565
COMPARE      = 5
2566
EXTENDSFDF   = 6
2567
TRUNCDFSF    = 7
2568
 
2569
UNKNOWN           = -1
2570
ROUND_TO_NEAREST  = 0 | round result to nearest representable value
2571
ROUND_TO_ZERO     = 1 | round result towards zero
2572
ROUND_TO_PLUS     = 2 | round result towards plus infinity
2573
ROUND_TO_MINUS    = 3 | round result towards minus infinity
2574
 
2575
| Entry points:
2576
 
2577
        .globl SYM (__addsf3)
2578
        .globl SYM (__subsf3)
2579
        .globl SYM (__mulsf3)
2580
        .globl SYM (__divsf3)
2581
        .globl SYM (__negsf2)
2582
        .globl SYM (__cmpsf2)
2583
        .globl SYM (__cmpsf2_internal)
2584
        .hidden SYM (__cmpsf2_internal)
2585
 
2586
| These are common routines to return and signal exceptions.
2587
 
2588
        .text
2589
        .even
2590
 
2591
Lf$den:
2592
| Return and signal a denormalized number
2593
        orl     d7,d0
2594
        moveq   IMM (INEXACT_RESULT+UNDERFLOW),d7
2595
        moveq   IMM (SINGLE_FLOAT),d6
2596
        PICJUMP $_exception_handler
2597
 
2598
Lf$infty:
2599
Lf$overflow:
2600
| Return a properly signed INFINITY and set the exception flags
2601
        movel   IMM (INFINITY),d0
2602
        orl     d7,d0
2603
        moveq   IMM (INEXACT_RESULT+OVERFLOW),d7
2604
        moveq   IMM (SINGLE_FLOAT),d6
2605
        PICJUMP $_exception_handler
2606
 
2607
Lf$underflow:
2608
| Return 0 and set the exception flags
2609
        moveq   IMM (0),d0
2610
        moveq   IMM (INEXACT_RESULT+UNDERFLOW),d7
2611
        moveq   IMM (SINGLE_FLOAT),d6
2612
        PICJUMP $_exception_handler
2613
 
2614
Lf$inop:
2615
| Return a quiet NaN and set the exception flags
2616
        movel   IMM (QUIET_NaN),d0
2617
        moveq   IMM (INEXACT_RESULT+INVALID_OPERATION),d7
2618
        moveq   IMM (SINGLE_FLOAT),d6
2619
        PICJUMP $_exception_handler
2620
 
2621
Lf$div$0:
2622
| Return a properly signed INFINITY and set the exception flags
2623
        movel   IMM (INFINITY),d0
2624
        orl     d7,d0
2625
        moveq   IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
2626
        moveq   IMM (SINGLE_FLOAT),d6
2627
        PICJUMP $_exception_handler
2628
 
2629
|=============================================================================
2630
|=============================================================================
2631
|                         single precision routines
2632
|=============================================================================
2633
|=============================================================================
2634
 
2635
| A single precision floating point number (float) has the format:
2636
|
2637
| struct _float {
2638
|  unsigned int sign      : 1;  /* sign bit */
2639
|  unsigned int exponent  : 8;  /* exponent, shifted by 126 */
2640
|  unsigned int fraction  : 23; /* fraction */
2641
| } float;
2642
|
2643
| Thus sizeof(float) = 4 (32 bits).
2644
|
2645
| All the routines are callable from C programs, and return the result
2646
| in the single register d0. They also preserve all registers except
2647
| d0-d1 and a0-a1.
2648
 
2649
|=============================================================================
2650
|                              __subsf3
2651
|=============================================================================
2652
 
2653
| float __subsf3(float, float);
2654
        FUNC(__subsf3)
2655
SYM (__subsf3):
2656
        bchg    IMM (31),sp@(8) | change sign of second operand
2657
                                | and fall through
2658
|=============================================================================
2659
|                              __addsf3
2660
|=============================================================================
2661
 
2662
| float __addsf3(float, float);
2663
        FUNC(__addsf3)
2664
SYM (__addsf3):
2665
#ifndef __mcoldfire__
2666
        link    a6,IMM (0)      | everything will be done in registers
2667
        moveml  d2-d7,sp@-      | save all data registers but d0-d1
2668
#else
2669
        link    a6,IMM (-24)
2670
        moveml  d2-d7,sp@
2671
#endif
2672
        movel   a6@(8),d0       | get first operand
2673
        movel   a6@(12),d1      | get second operand
2674
        movel   d0,a0           | get d0's sign bit '
2675
        addl    d0,d0           | check and clear sign bit of a
2676
        beq     Laddsf$b        | if zero return second operand
2677
        movel   d1,a1           | save b's sign bit '
2678
        addl    d1,d1           | get rid of sign bit
2679
        beq     Laddsf$a        | if zero return first operand
2680
 
2681
| Get the exponents and check for denormalized and/or infinity.
2682
 
2683
        movel   IMM (0x00ffffff),d4     | mask to get fraction
2684
        movel   IMM (0x01000000),d5     | mask to put hidden bit back
2685
 
2686
        movel   d0,d6           | save a to get exponent
2687
        andl    d4,d0           | get fraction in d0
2688
        notl    d4              | make d4 into a mask for the exponent
2689
        andl    d4,d6           | get exponent in d6
2690
        beq     Laddsf$a$den    | branch if a is denormalized
2691
        cmpl    d4,d6           | check for INFINITY or NaN
2692
        beq     Laddsf$nf
2693
        swap    d6              | put exponent into first word
2694
        orl     d5,d0           | and put hidden bit back
2695
Laddsf$1:
2696
| Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
2697
        movel   d1,d7           | get exponent in d7
2698
        andl    d4,d7           |
2699
        beq     Laddsf$b$den    | branch if b is denormalized
2700
        cmpl    d4,d7           | check for INFINITY or NaN
2701
        beq     Laddsf$nf
2702
        swap    d7              | put exponent into first word
2703
        notl    d4              | make d4 into a mask for the fraction
2704
        andl    d4,d1           | get fraction in d1
2705
        orl     d5,d1           | and put hidden bit back
2706
Laddsf$2:
2707
| Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
2708
 
2709
| Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
2710
| shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
2711
| bit).
2712
 
2713
        movel   d1,d2           | move b to d2, since we want to use
2714
                                | two registers to do the sum
2715
        movel   IMM (0),d1      | and clear the new ones
2716
        movel   d1,d3           |
2717
 
2718
| Here we shift the numbers in registers d0 and d1 so the exponents are the
2719
| same, and put the largest exponent in d6. Note that we are using two
2720
| registers for each number (see the discussion by D. Knuth in "Seminumerical
2721
| Algorithms").
2722
#ifndef __mcoldfire__
2723
        cmpw    d6,d7           | compare exponents
2724
#else
2725
        cmpl    d6,d7           | compare exponents
2726
#endif
2727
        beq     Laddsf$3        | if equal don't shift '
2728
        bhi     5f              | branch if second exponent largest
2729
1:
2730
        subl    d6,d7           | keep the largest exponent
2731
        negl    d7
2732
#ifndef __mcoldfire__
2733
        lsrw    IMM (8),d7      | put difference in lower byte
2734
#else
2735
        lsrl    IMM (8),d7      | put difference in lower byte
2736
#endif
2737
| if difference is too large we don't shift (actually, we can just exit) '
2738
#ifndef __mcoldfire__
2739
        cmpw    IMM (FLT_MANT_DIG+2),d7
2740
#else
2741
        cmpl    IMM (FLT_MANT_DIG+2),d7
2742
#endif
2743
        bge     Laddsf$b$small
2744
#ifndef __mcoldfire__
2745
        cmpw    IMM (16),d7     | if difference >= 16 swap
2746
#else
2747
        cmpl    IMM (16),d7     | if difference >= 16 swap
2748
#endif
2749
        bge     4f
2750
2:
2751
#ifndef __mcoldfire__
2752
        subw    IMM (1),d7
2753
#else
2754
        subql   IMM (1), d7
2755
#endif
2756
3:
2757
#ifndef __mcoldfire__
2758
        lsrl    IMM (1),d2      | shift right second operand
2759
        roxrl   IMM (1),d3
2760
        dbra    d7,3b
2761
#else
2762
        lsrl    IMM (1),d3
2763
        btst    IMM (0),d2
2764
        beq     10f
2765
        bset    IMM (31),d3
2766
10:     lsrl    IMM (1),d2
2767
        subql   IMM (1), d7
2768
        bpl     3b
2769
#endif
2770
        bra     Laddsf$3
2771
4:
2772
        movew   d2,d3
2773
        swap    d3
2774
        movew   d3,d2
2775
        swap    d2
2776
#ifndef __mcoldfire__
2777
        subw    IMM (16),d7
2778
#else
2779
        subl    IMM (16),d7
2780
#endif
2781
        bne     2b              | if still more bits, go back to normal case
2782
        bra     Laddsf$3
2783
5:
2784
#ifndef __mcoldfire__
2785
        exg     d6,d7           | exchange the exponents
2786
#else
2787
        eorl    d6,d7
2788
        eorl    d7,d6
2789
        eorl    d6,d7
2790
#endif
2791
        subl    d6,d7           | keep the largest exponent
2792
        negl    d7              |
2793
#ifndef __mcoldfire__
2794
        lsrw    IMM (8),d7      | put difference in lower byte
2795
#else
2796
        lsrl    IMM (8),d7      | put difference in lower byte
2797
#endif
2798
| if difference is too large we don't shift (and exit!) '
2799
#ifndef __mcoldfire__
2800
        cmpw    IMM (FLT_MANT_DIG+2),d7
2801
#else
2802
        cmpl    IMM (FLT_MANT_DIG+2),d7
2803
#endif
2804
        bge     Laddsf$a$small
2805
#ifndef __mcoldfire__
2806
        cmpw    IMM (16),d7     | if difference >= 16 swap
2807
#else
2808
        cmpl    IMM (16),d7     | if difference >= 16 swap
2809
#endif
2810
        bge     8f
2811
6:
2812
#ifndef __mcoldfire__
2813
        subw    IMM (1),d7
2814
#else
2815
        subl    IMM (1),d7
2816
#endif
2817
7:
2818
#ifndef __mcoldfire__
2819
        lsrl    IMM (1),d0      | shift right first operand
2820
        roxrl   IMM (1),d1
2821
        dbra    d7,7b
2822
#else
2823
        lsrl    IMM (1),d1
2824
        btst    IMM (0),d0
2825
        beq     10f
2826
        bset    IMM (31),d1
2827
10:     lsrl    IMM (1),d0
2828
        subql   IMM (1),d7
2829
        bpl     7b
2830
#endif
2831
        bra     Laddsf$3
2832
8:
2833
        movew   d0,d1
2834
        swap    d1
2835
        movew   d1,d0
2836
        swap    d0
2837
#ifndef __mcoldfire__
2838
        subw    IMM (16),d7
2839
#else
2840
        subl    IMM (16),d7
2841
#endif
2842
        bne     6b              | if still more bits, go back to normal case
2843
                                | otherwise we fall through
2844
 
2845
| Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
2846
| signs are stored in a0 and a1).
2847
 
2848
Laddsf$3:
2849
| Here we have to decide whether to add or subtract the numbers
2850
#ifndef __mcoldfire__
2851
        exg     d6,a0           | get signs back
2852
        exg     d7,a1           | and save the exponents
2853
#else
2854
        movel   d6,d4
2855
        movel   a0,d6
2856
        movel   d4,a0
2857
        movel   d7,d4
2858
        movel   a1,d7
2859
        movel   d4,a1
2860
#endif
2861
        eorl    d6,d7           | combine sign bits
2862
        bmi     Lsubsf$0        | if negative a and b have opposite
2863
                                | sign so we actually subtract the
2864
                                | numbers
2865
 
2866
| Here we have both positive or both negative
2867
#ifndef __mcoldfire__
2868
        exg     d6,a0           | now we have the exponent in d6
2869
#else
2870
        movel   d6,d4
2871
        movel   a0,d6
2872
        movel   d4,a0
2873
#endif
2874
        movel   a0,d7           | and sign in d7
2875
        andl    IMM (0x80000000),d7
2876
| Here we do the addition.
2877
        addl    d3,d1
2878
        addxl   d2,d0
2879
| Note: now we have d2, d3, d4 and d5 to play with!
2880
 
2881
| Put the exponent, in the first byte, in d2, to use the "standard" rounding
2882
| routines:
2883
        movel   d6,d2
2884
#ifndef __mcoldfire__
2885
        lsrw    IMM (8),d2
2886
#else
2887
        lsrl    IMM (8),d2
2888
#endif
2889
 
2890
| Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
2891
| the case of denormalized numbers in the rounding routine itself).
2892
| As in the addition (not in the subtraction!) we could have set
2893
| one more bit we check this:
2894
        btst    IMM (FLT_MANT_DIG+1),d0
2895
        beq     1f
2896
#ifndef __mcoldfire__
2897
        lsrl    IMM (1),d0
2898
        roxrl   IMM (1),d1
2899
#else
2900
        lsrl    IMM (1),d1
2901
        btst    IMM (0),d0
2902
        beq     10f
2903
        bset    IMM (31),d1
2904
10:     lsrl    IMM (1),d0
2905
#endif
2906
        addl    IMM (1),d2
2907
1:
2908
        lea     pc@(Laddsf$4),a0 | to return from rounding routine
2909
        PICLEA  SYM (_fpCCR),a1 | check the rounding mode
2910
#ifdef __mcoldfire__
2911
        clrl    d6
2912
#endif
2913
        movew   a1@(6),d6       | rounding mode in d6
2914
        beq     Lround$to$nearest
2915
#ifndef __mcoldfire__
2916
        cmpw    IMM (ROUND_TO_PLUS),d6
2917
#else
2918
        cmpl    IMM (ROUND_TO_PLUS),d6
2919
#endif
2920
        bhi     Lround$to$minus
2921
        blt     Lround$to$zero
2922
        bra     Lround$to$plus
2923
Laddsf$4:
2924
| Put back the exponent, but check for overflow.
2925
#ifndef __mcoldfire__
2926
        cmpw    IMM (0xff),d2
2927
#else
2928
        cmpl    IMM (0xff),d2
2929
#endif
2930
        bhi     1f
2931
        bclr    IMM (FLT_MANT_DIG-1),d0
2932
#ifndef __mcoldfire__
2933
        lslw    IMM (7),d2
2934
#else
2935
        lsll    IMM (7),d2
2936
#endif
2937
        swap    d2
2938
        orl     d2,d0
2939
        bra     Laddsf$ret
2940
1:
2941
        moveq   IMM (ADD),d5
2942
        bra     Lf$overflow
2943
 
2944
Lsubsf$0:
2945
| We are here if a > 0 and b < 0 (sign bits cleared).
2946
| Here we do the subtraction.
2947
        movel   d6,d7           | put sign in d7
2948
        andl    IMM (0x80000000),d7
2949
 
2950
        subl    d3,d1           | result in d0-d1
2951
        subxl   d2,d0           |
2952
        beq     Laddsf$ret      | if zero just exit
2953
        bpl     1f              | if positive skip the following
2954
        bchg    IMM (31),d7     | change sign bit in d7
2955
        negl    d1
2956
        negxl   d0
2957
1:
2958
#ifndef __mcoldfire__
2959
        exg     d2,a0           | now we have the exponent in d2
2960
        lsrw    IMM (8),d2      | put it in the first byte
2961
#else
2962
        movel   d2,d4
2963
        movel   a0,d2
2964
        movel   d4,a0
2965
        lsrl    IMM (8),d2      | put it in the first byte
2966
#endif
2967
 
2968
| Now d0-d1 is positive and the sign bit is in d7.
2969
 
2970
| Note that we do not have to normalize, since in the subtraction bit
2971
| #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
2972
| the rounding routines themselves.
2973
        lea     pc@(Lsubsf$1),a0 | to return from rounding routine
2974
        PICLEA  SYM (_fpCCR),a1 | check the rounding mode
2975
#ifdef __mcoldfire__
2976
        clrl    d6
2977
#endif
2978
        movew   a1@(6),d6       | rounding mode in d6
2979
        beq     Lround$to$nearest
2980
#ifndef __mcoldfire__
2981
        cmpw    IMM (ROUND_TO_PLUS),d6
2982
#else
2983
        cmpl    IMM (ROUND_TO_PLUS),d6
2984
#endif
2985
        bhi     Lround$to$minus
2986
        blt     Lround$to$zero
2987
        bra     Lround$to$plus
2988
Lsubsf$1:
2989
| Put back the exponent (we can't have overflow!). '
2990
        bclr    IMM (FLT_MANT_DIG-1),d0
2991
#ifndef __mcoldfire__
2992
        lslw    IMM (7),d2
2993
#else
2994
        lsll    IMM (7),d2
2995
#endif
2996
        swap    d2
2997
        orl     d2,d0
2998
        bra     Laddsf$ret
2999
 
3000
| If one of the numbers was too small (difference of exponents >=
3001
| FLT_MANT_DIG+2) we return the other (and now we don't have to '
3002
| check for finiteness or zero).
3003
Laddsf$a$small:
3004
        movel   a6@(12),d0
3005
        PICLEA  SYM (_fpCCR),a0
3006
        movew   IMM (0),a0@
3007
#ifndef __mcoldfire__
3008
        moveml  sp@+,d2-d7      | restore data registers
3009
#else
3010
        moveml  sp@,d2-d7
3011
        | XXX if frame pointer is ever removed, stack pointer must
3012
        | be adjusted here.
3013
#endif
3014
        unlk    a6              | and return
3015
        rts
3016
 
3017
Laddsf$b$small:
3018
        movel   a6@(8),d0
3019
        PICLEA  SYM (_fpCCR),a0
3020
        movew   IMM (0),a0@
3021
#ifndef __mcoldfire__
3022
        moveml  sp@+,d2-d7      | restore data registers
3023
#else
3024
        moveml  sp@,d2-d7
3025
        | XXX if frame pointer is ever removed, stack pointer must
3026
        | be adjusted here.
3027
#endif
3028
        unlk    a6              | and return
3029
        rts
3030
 
3031
| If the numbers are denormalized remember to put exponent equal to 1.
3032
 
3033
Laddsf$a$den:
3034
        movel   d5,d6           | d5 contains 0x01000000
3035
        swap    d6
3036
        bra     Laddsf$1
3037
 
3038
Laddsf$b$den:
3039
        movel   d5,d7
3040
        swap    d7
3041
        notl    d4              | make d4 into a mask for the fraction
3042
                                | (this was not executed after the jump)
3043
        bra     Laddsf$2
3044
 
3045
| The rest is mainly code for the different results which can be
3046
| returned (checking always for +/-INFINITY and NaN).
3047
 
3048
Laddsf$b:
3049
| Return b (if a is zero).
3050
        movel   a6@(12),d0
3051
        cmpl    IMM (0x80000000),d0     | Check if b is -0
3052
        bne     1f
3053
        movel   a0,d7
3054
        andl    IMM (0x80000000),d7     | Use the sign of a
3055
        clrl    d0
3056
        bra     Laddsf$ret
3057
Laddsf$a:
3058
| Return a (if b is zero).
3059
        movel   a6@(8),d0
3060
1:
3061
        moveq   IMM (ADD),d5
3062
| We have to check for NaN and +/-infty.
3063
        movel   d0,d7
3064
        andl    IMM (0x80000000),d7     | put sign in d7
3065
        bclr    IMM (31),d0             | clear sign
3066
        cmpl    IMM (INFINITY),d0       | check for infty or NaN
3067
        bge     2f
3068
        movel   d0,d0           | check for zero (we do this because we don't '
3069
        bne     Laddsf$ret      | want to return -0 by mistake
3070
        bclr    IMM (31),d7     | if zero be sure to clear sign
3071
        bra     Laddsf$ret      | if everything OK just return
3072
2:
3073
| The value to be returned is either +/-infty or NaN
3074
        andl    IMM (0x007fffff),d0     | check for NaN
3075
        bne     Lf$inop                 | if mantissa not zero is NaN
3076
        bra     Lf$infty
3077
 
3078
Laddsf$ret:
3079
| Normal exit (a and b nonzero, result is not NaN nor +/-infty).
3080
| We have to clear the exception flags (just the exception type).
3081
        PICLEA  SYM (_fpCCR),a0
3082
        movew   IMM (0),a0@
3083
        orl     d7,d0           | put sign bit
3084
#ifndef __mcoldfire__
3085
        moveml  sp@+,d2-d7      | restore data registers
3086
#else
3087
        moveml  sp@,d2-d7
3088
        | XXX if frame pointer is ever removed, stack pointer must
3089
        | be adjusted here.
3090
#endif
3091
        unlk    a6              | and return
3092
        rts
3093
 
3094
Laddsf$ret$den:
3095
| Return a denormalized number (for addition we don't signal underflow) '
3096
        lsrl    IMM (1),d0      | remember to shift right back once
3097
        bra     Laddsf$ret      | and return
3098
 
3099
| Note: when adding two floats of the same sign if either one is
3100
| NaN we return NaN without regard to whether the other is finite or
3101
| not. When subtracting them (i.e., when adding two numbers of
3102
| opposite signs) things are more complicated: if both are INFINITY
3103
| we return NaN, if only one is INFINITY and the other is NaN we return
3104
| NaN, but if it is finite we return INFINITY with the corresponding sign.
3105
 
3106
Laddsf$nf:
3107
        moveq   IMM (ADD),d5
3108
| This could be faster but it is not worth the effort, since it is not
3109
| executed very often. We sacrifice speed for clarity here.
3110
        movel   a6@(8),d0       | get the numbers back (remember that we
3111
        movel   a6@(12),d1      | did some processing already)
3112
        movel   IMM (INFINITY),d4 | useful constant (INFINITY)
3113
        movel   d0,d2           | save sign bits
3114
        movel   d1,d3
3115
        bclr    IMM (31),d0     | clear sign bits
3116
        bclr    IMM (31),d1
3117
| We know that one of them is either NaN of +/-INFINITY
3118
| Check for NaN (if either one is NaN return NaN)
3119
        cmpl    d4,d0           | check first a (d0)
3120
        bhi     Lf$inop
3121
        cmpl    d4,d1           | check now b (d1)
3122
        bhi     Lf$inop
3123
| Now comes the check for +/-INFINITY. We know that both are (maybe not
3124
| finite) numbers, but we have to check if both are infinite whether we
3125
| are adding or subtracting them.
3126
        eorl    d3,d2           | to check sign bits
3127
        bmi     1f
3128
        movel   d0,d7
3129
        andl    IMM (0x80000000),d7     | get (common) sign bit
3130
        bra     Lf$infty
3131
1:
3132
| We know one (or both) are infinite, so we test for equality between the
3133
| two numbers (if they are equal they have to be infinite both, so we
3134
| return NaN).
3135
        cmpl    d1,d0           | are both infinite?
3136
        beq     Lf$inop         | if so return NaN
3137
 
3138
        movel   d0,d7
3139
        andl    IMM (0x80000000),d7 | get a's sign bit '
3140
        cmpl    d4,d0           | test now for infinity
3141
        beq     Lf$infty        | if a is INFINITY return with this sign
3142
        bchg    IMM (31),d7     | else we know b is INFINITY and has
3143
        bra     Lf$infty        | the opposite sign
3144
 
3145
|=============================================================================
3146
|                             __mulsf3
3147
|=============================================================================
3148
 
3149
| float __mulsf3(float, float);
3150
        FUNC(__mulsf3)
3151
SYM (__mulsf3):
3152
#ifndef __mcoldfire__
3153
        link    a6,IMM (0)
3154
        moveml  d2-d7,sp@-
3155
#else
3156
        link    a6,IMM (-24)
3157
        moveml  d2-d7,sp@
3158
#endif
3159
        movel   a6@(8),d0       | get a into d0
3160
        movel   a6@(12),d1      | and b into d1
3161
        movel   d0,d7           | d7 will hold the sign of the product
3162
        eorl    d1,d7           |
3163
        andl    IMM (0x80000000),d7
3164
        movel   IMM (INFINITY),d6       | useful constant (+INFINITY)
3165
        movel   d6,d5                   | another (mask for fraction)
3166
        notl    d5                      |
3167
        movel   IMM (0x00800000),d4     | this is to put hidden bit back
3168
        bclr    IMM (31),d0             | get rid of a's sign bit '
3169
        movel   d0,d2                   |
3170
        beq     Lmulsf$a$0              | branch if a is zero
3171
        bclr    IMM (31),d1             | get rid of b's sign bit '
3172
        movel   d1,d3           |
3173
        beq     Lmulsf$b$0      | branch if b is zero
3174
        cmpl    d6,d0           | is a big?
3175
        bhi     Lmulsf$inop     | if a is NaN return NaN
3176
        beq     Lmulsf$inf      | if a is INFINITY we have to check b
3177
        cmpl    d6,d1           | now compare b with INFINITY
3178
        bhi     Lmulsf$inop     | is b NaN?
3179
        beq     Lmulsf$overflow | is b INFINITY?
3180
| Here we have both numbers finite and nonzero (and with no sign bit).
3181
| Now we get the exponents into d2 and d3.
3182
        andl    d6,d2           | and isolate exponent in d2
3183
        beq     Lmulsf$a$den    | if exponent is zero we have a denormalized
3184
        andl    d5,d0           | and isolate fraction
3185
        orl     d4,d0           | and put hidden bit back
3186
        swap    d2              | I like exponents in the first byte
3187
#ifndef __mcoldfire__
3188
        lsrw    IMM (7),d2      |
3189
#else
3190
        lsrl    IMM (7),d2      |
3191
#endif
3192
Lmulsf$1:                       | number
3193
        andl    d6,d3           |
3194
        beq     Lmulsf$b$den    |
3195
        andl    d5,d1           |
3196
        orl     d4,d1           |
3197
        swap    d3              |
3198
#ifndef __mcoldfire__
3199
        lsrw    IMM (7),d3      |
3200
#else
3201
        lsrl    IMM (7),d3      |
3202
#endif
3203
Lmulsf$2:                       |
3204
#ifndef __mcoldfire__
3205
        addw    d3,d2           | add exponents
3206
        subw    IMM (F_BIAS+1),d2 | and subtract bias (plus one)
3207
#else
3208
        addl    d3,d2           | add exponents
3209
        subl    IMM (F_BIAS+1),d2 | and subtract bias (plus one)
3210
#endif
3211
 
3212
| We are now ready to do the multiplication. The situation is as follows:
3213
| both a and b have bit FLT_MANT_DIG-1 set (even if they were
3214
| denormalized to start with!), which means that in the product
3215
| bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
3216
| high long) is set.
3217
 
3218
| To do the multiplication let us move the number a little bit around ...
3219
        movel   d1,d6           | second operand in d6
3220
        movel   d0,d5           | first operand in d4-d5
3221
        movel   IMM (0),d4
3222
        movel   d4,d1           | the sums will go in d0-d1
3223
        movel   d4,d0
3224
 
3225
| now bit FLT_MANT_DIG-1 becomes bit 31:
3226
        lsll    IMM (31-FLT_MANT_DIG+1),d6
3227
 
3228
| Start the loop (we loop #FLT_MANT_DIG times):
3229
        moveq   IMM (FLT_MANT_DIG-1),d3
3230
1:      addl    d1,d1           | shift sum
3231
        addxl   d0,d0
3232
        lsll    IMM (1),d6      | get bit bn
3233
        bcc     2f              | if not set skip sum
3234
        addl    d5,d1           | add a
3235
        addxl   d4,d0
3236
2:
3237
#ifndef __mcoldfire__
3238
        dbf     d3,1b           | loop back
3239
#else
3240
        subql   IMM (1),d3
3241
        bpl     1b
3242
#endif
3243
 
3244
| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
3245
| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
3246
| FLT_MANT_DIG is set (to do the rounding).
3247
#ifndef __mcoldfire__
3248
        rorl    IMM (6),d1
3249
        swap    d1
3250
        movew   d1,d3
3251
        andw    IMM (0x03ff),d3
3252
        andw    IMM (0xfd00),d1
3253
#else
3254
        movel   d1,d3
3255
        lsll    IMM (8),d1
3256
        addl    d1,d1
3257
        addl    d1,d1
3258
        moveq   IMM (22),d5
3259
        lsrl    d5,d3
3260
        orl     d3,d1
3261
        andl    IMM (0xfffffd00),d1
3262
#endif
3263
        lsll    IMM (8),d0
3264
        addl    d0,d0
3265
        addl    d0,d0
3266
#ifndef __mcoldfire__
3267
        orw     d3,d0
3268
#else
3269
        orl     d3,d0
3270
#endif
3271
 
3272
        moveq   IMM (MULTIPLY),d5
3273
 
3274
        btst    IMM (FLT_MANT_DIG+1),d0
3275
        beq     Lround$exit
3276
#ifndef __mcoldfire__
3277
        lsrl    IMM (1),d0
3278
        roxrl   IMM (1),d1
3279
        addw    IMM (1),d2
3280
#else
3281
        lsrl    IMM (1),d1
3282
        btst    IMM (0),d0
3283
        beq     10f
3284
        bset    IMM (31),d1
3285
10:     lsrl    IMM (1),d0
3286
        addql   IMM (1),d2
3287
#endif
3288
        bra     Lround$exit
3289
 
3290
Lmulsf$inop:
3291
        moveq   IMM (MULTIPLY),d5
3292
        bra     Lf$inop
3293
 
3294
Lmulsf$overflow:
3295
        moveq   IMM (MULTIPLY),d5
3296
        bra     Lf$overflow
3297
 
3298
Lmulsf$inf:
3299
        moveq   IMM (MULTIPLY),d5
3300
| If either is NaN return NaN; else both are (maybe infinite) numbers, so
3301
| return INFINITY with the correct sign (which is in d7).
3302
        cmpl    d6,d1           | is b NaN?
3303
        bhi     Lf$inop         | if so return NaN
3304
        bra     Lf$overflow     | else return +/-INFINITY
3305
 
3306
| If either number is zero return zero, unless the other is +/-INFINITY,
3307
| or NaN, in which case we return NaN.
3308
Lmulsf$b$0:
3309
| Here d1 (==b) is zero.
3310
        movel   a6@(8),d1       | get a again to check for non-finiteness
3311
        bra     1f
3312
Lmulsf$a$0:
3313
        movel   a6@(12),d1      | get b again to check for non-finiteness
3314
1:      bclr    IMM (31),d1     | clear sign bit
3315
        cmpl    IMM (INFINITY),d1 | and check for a large exponent
3316
        bge     Lf$inop         | if b is +/-INFINITY or NaN return NaN
3317
        movel   d7,d0           | else return signed zero
3318
        PICLEA  SYM (_fpCCR),a0 |
3319
        movew   IMM (0),a0@     |
3320
#ifndef __mcoldfire__
3321
        moveml  sp@+,d2-d7      |
3322
#else
3323
        moveml  sp@,d2-d7
3324
        | XXX if frame pointer is ever removed, stack pointer must
3325
        | be adjusted here.
3326
#endif
3327
        unlk    a6              |
3328
        rts                     |
3329
 
3330
| If a number is denormalized we put an exponent of 1 but do not put the
3331
| hidden bit back into the fraction; instead we shift left until bit 23
3332
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
3333
| to ensure that the product of the fractions is close to 1.
3334
Lmulsf$a$den:
3335
        movel   IMM (1),d2
3336
        andl    d5,d0
3337
1:      addl    d0,d0           | shift a left (until bit 23 is set)
3338
#ifndef __mcoldfire__
3339
        subw    IMM (1),d2      | and adjust exponent
3340
#else
3341
        subql   IMM (1),d2      | and adjust exponent
3342
#endif
3343
        btst    IMM (FLT_MANT_DIG-1),d0
3344
        bne     Lmulsf$1        |
3345
        bra     1b              | else loop back
3346
 
3347
Lmulsf$b$den:
3348
        movel   IMM (1),d3
3349
        andl    d5,d1
3350
1:      addl    d1,d1           | shift b left until bit 23 is set
3351
#ifndef __mcoldfire__
3352
        subw    IMM (1),d3      | and adjust exponent
3353
#else
3354
        subql   IMM (1),d3      | and adjust exponent
3355
#endif
3356
        btst    IMM (FLT_MANT_DIG-1),d1
3357
        bne     Lmulsf$2        |
3358
        bra     1b              | else loop back
3359
 
3360
|=============================================================================
3361
|                             __divsf3
3362
|=============================================================================
3363
 
3364
| float __divsf3(float, float);
3365
        FUNC(__divsf3)
3366
SYM (__divsf3):
3367
#ifndef __mcoldfire__
3368
        link    a6,IMM (0)
3369
        moveml  d2-d7,sp@-
3370
#else
3371
        link    a6,IMM (-24)
3372
        moveml  d2-d7,sp@
3373
#endif
3374
        movel   a6@(8),d0               | get a into d0
3375
        movel   a6@(12),d1              | and b into d1
3376
        movel   d0,d7                   | d7 will hold the sign of the result
3377
        eorl    d1,d7                   |
3378
        andl    IMM (0x80000000),d7     |
3379
        movel   IMM (INFINITY),d6       | useful constant (+INFINITY)
3380
        movel   d6,d5                   | another (mask for fraction)
3381
        notl    d5                      |
3382
        movel   IMM (0x00800000),d4     | this is to put hidden bit back
3383
        bclr    IMM (31),d0             | get rid of a's sign bit '
3384
        movel   d0,d2                   |
3385
        beq     Ldivsf$a$0              | branch if a is zero
3386
        bclr    IMM (31),d1             | get rid of b's sign bit '
3387
        movel   d1,d3                   |
3388
        beq     Ldivsf$b$0              | branch if b is zero
3389
        cmpl    d6,d0                   | is a big?
3390
        bhi     Ldivsf$inop             | if a is NaN return NaN
3391
        beq     Ldivsf$inf              | if a is INFINITY we have to check b
3392
        cmpl    d6,d1                   | now compare b with INFINITY
3393
        bhi     Ldivsf$inop             | if b is NaN return NaN
3394
        beq     Ldivsf$underflow
3395
| Here we have both numbers finite and nonzero (and with no sign bit).
3396
| Now we get the exponents into d2 and d3 and normalize the numbers to
3397
| ensure that the ratio of the fractions is close to 1. We do this by
3398
| making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
3399
        andl    d6,d2           | and isolate exponent in d2
3400
        beq     Ldivsf$a$den    | if exponent is zero we have a denormalized
3401
        andl    d5,d0           | and isolate fraction
3402
        orl     d4,d0           | and put hidden bit back
3403
        swap    d2              | I like exponents in the first byte
3404
#ifndef __mcoldfire__
3405
        lsrw    IMM (7),d2      |
3406
#else
3407
        lsrl    IMM (7),d2      |
3408
#endif
3409
Ldivsf$1:                       |
3410
        andl    d6,d3           |
3411
        beq     Ldivsf$b$den    |
3412
        andl    d5,d1           |
3413
        orl     d4,d1           |
3414
        swap    d3              |
3415
#ifndef __mcoldfire__
3416
        lsrw    IMM (7),d3      |
3417
#else
3418
        lsrl    IMM (7),d3      |
3419
#endif
3420
Ldivsf$2:                       |
3421
#ifndef __mcoldfire__
3422
        subw    d3,d2           | subtract exponents
3423
        addw    IMM (F_BIAS),d2 | and add bias
3424
#else
3425
        subl    d3,d2           | subtract exponents
3426
        addl    IMM (F_BIAS),d2 | and add bias
3427
#endif
3428
 
3429
| We are now ready to do the division. We have prepared things in such a way
3430
| that the ratio of the fractions will be less than 2 but greater than 1/2.
3431
| At this point the registers in use are:
3432
| d0    holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
3433
| d1    holds b (second operand, bit FLT_MANT_DIG=1)
3434
| d2    holds the difference of the exponents, corrected by the bias
3435
| d7    holds the sign of the ratio
3436
| d4, d5, d6 hold some constants
3437
        movel   d7,a0           | d6-d7 will hold the ratio of the fractions
3438
        movel   IMM (0),d6      |
3439
        movel   d6,d7
3440
 
3441
        moveq   IMM (FLT_MANT_DIG+1),d3
3442
1:      cmpl    d0,d1           | is a < b?
3443
        bhi     2f              |
3444
        bset    d3,d6           | set a bit in d6
3445
        subl    d1,d0           | if a >= b  a <-- a-b
3446
        beq     3f              | if a is zero, exit
3447
2:      addl    d0,d0           | multiply a by 2
3448
#ifndef __mcoldfire__
3449
        dbra    d3,1b
3450
#else
3451
        subql   IMM (1),d3
3452
        bpl     1b
3453
#endif
3454
 
3455
| Now we keep going to set the sticky bit ...
3456
        moveq   IMM (FLT_MANT_DIG),d3
3457
1:      cmpl    d0,d1
3458
        ble     2f
3459
        addl    d0,d0
3460
#ifndef __mcoldfire__
3461
        dbra    d3,1b
3462
#else
3463
        subql   IMM(1),d3
3464
        bpl     1b
3465
#endif
3466
        movel   IMM (0),d1
3467
        bra     3f
3468
2:      movel   IMM (0),d1
3469
#ifndef __mcoldfire__
3470
        subw    IMM (FLT_MANT_DIG),d3
3471
        addw    IMM (31),d3
3472
#else
3473
        subl    IMM (FLT_MANT_DIG),d3
3474
        addl    IMM (31),d3
3475
#endif
3476
        bset    d3,d1
3477
3:
3478
        movel   d6,d0           | put the ratio in d0-d1
3479
        movel   a0,d7           | get sign back
3480
 
3481
| Because of the normalization we did before we are guaranteed that
3482
| d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
3483
| bit 25 could be set, and if it is not set then bit 24 is necessarily set.
3484
        btst    IMM (FLT_MANT_DIG+1),d0
3485
        beq     1f              | if it is not set, then bit 24 is set
3486
        lsrl    IMM (1),d0      |
3487
#ifndef __mcoldfire__
3488
        addw    IMM (1),d2      |
3489
#else
3490
        addl    IMM (1),d2      |
3491
#endif
3492
1:
3493
| Now round, check for over- and underflow, and exit.
3494
        moveq   IMM (DIVIDE),d5
3495
        bra     Lround$exit
3496
 
3497
Ldivsf$inop:
3498
        moveq   IMM (DIVIDE),d5
3499
        bra     Lf$inop
3500
 
3501
Ldivsf$overflow:
3502
        moveq   IMM (DIVIDE),d5
3503
        bra     Lf$overflow
3504
 
3505
Ldivsf$underflow:
3506
        moveq   IMM (DIVIDE),d5
3507
        bra     Lf$underflow
3508
 
3509
Ldivsf$a$0:
3510
        moveq   IMM (DIVIDE),d5
3511
| If a is zero check to see whether b is zero also. In that case return
3512
| NaN; then check if b is NaN, and return NaN also in that case. Else
3513
| return a properly signed zero.
3514
        andl    IMM (0x7fffffff),d1     | clear sign bit and test b
3515
        beq     Lf$inop                 | if b is also zero return NaN
3516
        cmpl    IMM (INFINITY),d1       | check for NaN
3517
        bhi     Lf$inop                 |
3518
        movel   d7,d0                   | else return signed zero
3519
        PICLEA  SYM (_fpCCR),a0         |
3520
        movew   IMM (0),a0@             |
3521
#ifndef __mcoldfire__
3522
        moveml  sp@+,d2-d7              |
3523
#else
3524
        moveml  sp@,d2-d7               |
3525
        | XXX if frame pointer is ever removed, stack pointer must
3526
        | be adjusted here.
3527
#endif
3528
        unlk    a6                      |
3529
        rts                             |
3530
 
3531
Ldivsf$b$0:
3532
        moveq   IMM (DIVIDE),d5
3533
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
3534
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
3535
| cleared already.
3536
        cmpl    IMM (INFINITY),d0       | compare d0 with INFINITY
3537
        bhi     Lf$inop                 | if larger it is NaN
3538
        bra     Lf$div$0                | else signal DIVIDE_BY_ZERO
3539
 
3540
Ldivsf$inf:
3541
        moveq   IMM (DIVIDE),d5
3542
| If a is INFINITY we have to check b
3543
        cmpl    IMM (INFINITY),d1       | compare b with INFINITY
3544
        bge     Lf$inop                 | if b is NaN or INFINITY return NaN
3545
        bra     Lf$overflow             | else return overflow
3546
 
3547
| If a number is denormalized we put an exponent of 1 but do not put the
3548
| bit back into the fraction.
3549
Ldivsf$a$den:
3550
        movel   IMM (1),d2
3551
        andl    d5,d0
3552
1:      addl    d0,d0           | shift a left until bit FLT_MANT_DIG-1 is set
3553
#ifndef __mcoldfire__
3554
        subw    IMM (1),d2      | and adjust exponent
3555
#else
3556
        subl    IMM (1),d2      | and adjust exponent
3557
#endif
3558
        btst    IMM (FLT_MANT_DIG-1),d0
3559
        bne     Ldivsf$1
3560
        bra     1b
3561
 
3562
Ldivsf$b$den:
3563
        movel   IMM (1),d3
3564
        andl    d5,d1
3565
1:      addl    d1,d1           | shift b left until bit FLT_MANT_DIG is set
3566
#ifndef __mcoldfire__
3567
        subw    IMM (1),d3      | and adjust exponent
3568
#else
3569
        subl    IMM (1),d3      | and adjust exponent
3570
#endif
3571
        btst    IMM (FLT_MANT_DIG-1),d1
3572
        bne     Ldivsf$2
3573
        bra     1b
3574
 
3575
Lround$exit:
3576
| This is a common exit point for __mulsf3 and __divsf3.
3577
 
3578
| First check for underlow in the exponent:
3579
#ifndef __mcoldfire__
3580
        cmpw    IMM (-FLT_MANT_DIG-1),d2
3581
#else
3582
        cmpl    IMM (-FLT_MANT_DIG-1),d2
3583
#endif
3584
        blt     Lf$underflow
3585
| It could happen that the exponent is less than 1, in which case the
3586
| number is denormalized. In this case we shift right and adjust the
3587
| exponent until it becomes 1 or the fraction is zero (in the latter case
3588
| we signal underflow and return zero).
3589
        movel   IMM (0),d6      | d6 is used temporarily
3590
#ifndef __mcoldfire__
3591
        cmpw    IMM (1),d2      | if the exponent is less than 1 we
3592
#else
3593
        cmpl    IMM (1),d2      | if the exponent is less than 1 we
3594
#endif
3595
        bge     2f              | have to shift right (denormalize)
3596
1:
3597
#ifndef __mcoldfire__
3598
        addw    IMM (1),d2      | adjust the exponent
3599
        lsrl    IMM (1),d0      | shift right once
3600
        roxrl   IMM (1),d1      |
3601
        roxrl   IMM (1),d6      | d6 collect bits we would lose otherwise
3602
        cmpw    IMM (1),d2      | is the exponent 1 already?
3603
#else
3604
        addql   IMM (1),d2      | adjust the exponent
3605
        lsrl    IMM (1),d6
3606
        btst    IMM (0),d1
3607
        beq     11f
3608
        bset    IMM (31),d6
3609
11:     lsrl    IMM (1),d1
3610
        btst    IMM (0),d0
3611
        beq     10f
3612
        bset    IMM (31),d1
3613
10:     lsrl    IMM (1),d0
3614
        cmpl    IMM (1),d2      | is the exponent 1 already?
3615
#endif
3616
        beq     2f              | if not loop back
3617
        bra     1b              |
3618
        bra     Lf$underflow    | safety check, shouldn't execute '
3619
2:      orl     d6,d1           | this is a trick so we don't lose  '
3620
                                | the extra bits which were flushed right
3621
| Now call the rounding routine (which takes care of denormalized numbers):
3622
        lea     pc@(Lround$0),a0 | to return from rounding routine
3623
        PICLEA  SYM (_fpCCR),a1 | check the rounding mode
3624
#ifdef __mcoldfire__
3625
        clrl    d6
3626
#endif
3627
        movew   a1@(6),d6       | rounding mode in d6
3628
        beq     Lround$to$nearest
3629
#ifndef __mcoldfire__
3630
        cmpw    IMM (ROUND_TO_PLUS),d6
3631
#else
3632
        cmpl    IMM (ROUND_TO_PLUS),d6
3633
#endif
3634
        bhi     Lround$to$minus
3635
        blt     Lround$to$zero
3636
        bra     Lround$to$plus
3637
Lround$0:
3638
| Here we have a correctly rounded result (either normalized or denormalized).
3639
 
3640
| Here we should have either a normalized number or a denormalized one, and
3641
| the exponent is necessarily larger or equal to 1 (so we don't have to  '
3642
| check again for underflow!). We have to check for overflow or for a
3643
| denormalized number (which also signals underflow).
3644
| Check for overflow (i.e., exponent >= 255).
3645
#ifndef __mcoldfire__
3646
        cmpw    IMM (0x00ff),d2
3647
#else
3648
        cmpl    IMM (0x00ff),d2
3649
#endif
3650
        bge     Lf$overflow
3651
| Now check for a denormalized number (exponent==0).
3652
        movew   d2,d2
3653
        beq     Lf$den
3654
1:
3655
| Put back the exponents and sign and return.
3656
#ifndef __mcoldfire__
3657
        lslw    IMM (7),d2      | exponent back to fourth byte
3658
#else
3659
        lsll    IMM (7),d2      | exponent back to fourth byte
3660
#endif
3661
        bclr    IMM (FLT_MANT_DIG-1),d0
3662
        swap    d0              | and put back exponent
3663
#ifndef __mcoldfire__
3664
        orw     d2,d0           |
3665
#else
3666
        orl     d2,d0
3667
#endif
3668
        swap    d0              |
3669
        orl     d7,d0           | and sign also
3670
 
3671
        PICLEA  SYM (_fpCCR),a0
3672
        movew   IMM (0),a0@
3673
#ifndef __mcoldfire__
3674
        moveml  sp@+,d2-d7
3675
#else
3676
        moveml  sp@,d2-d7
3677
        | XXX if frame pointer is ever removed, stack pointer must
3678
        | be adjusted here.
3679
#endif
3680
        unlk    a6
3681
        rts
3682
 
3683
|=============================================================================
3684
|                             __negsf2
3685
|=============================================================================
3686
 
3687
| This is trivial and could be shorter if we didn't bother checking for NaN '
3688
| and +/-INFINITY.
3689
 
3690
| float __negsf2(float);
3691
        FUNC(__negsf2)
3692
SYM (__negsf2):
3693
#ifndef __mcoldfire__
3694
        link    a6,IMM (0)
3695
        moveml  d2-d7,sp@-
3696
#else
3697
        link    a6,IMM (-24)
3698
        moveml  d2-d7,sp@
3699
#endif
3700
        moveq   IMM (NEGATE),d5
3701
        movel   a6@(8),d0       | get number to negate in d0
3702
        bchg    IMM (31),d0     | negate
3703
        movel   d0,d1           | make a positive copy
3704
        bclr    IMM (31),d1     |
3705
        tstl    d1              | check for zero
3706
        beq     2f              | if zero (either sign) return +zero
3707
        cmpl    IMM (INFINITY),d1 | compare to +INFINITY
3708
        blt     1f              |
3709
        bhi     Lf$inop         | if larger (fraction not zero) is NaN
3710
        movel   d0,d7           | else get sign and return INFINITY
3711
        andl    IMM (0x80000000),d7
3712
        bra     Lf$infty
3713
1:      PICLEA  SYM (_fpCCR),a0
3714
        movew   IMM (0),a0@
3715
#ifndef __mcoldfire__
3716
        moveml  sp@+,d2-d7
3717
#else
3718
        moveml  sp@,d2-d7
3719
        | XXX if frame pointer is ever removed, stack pointer must
3720
        | be adjusted here.
3721
#endif
3722
        unlk    a6
3723
        rts
3724
2:      bclr    IMM (31),d0
3725
        bra     1b
3726
 
3727
|=============================================================================
3728
|                             __cmpsf2
3729
|=============================================================================
3730
 
3731
GREATER =  1
3732
LESS    = -1
3733
EQUAL   =  0
3734
 
3735
| int __cmpsf2_internal(float, float, int);
3736
SYM (__cmpsf2_internal):
3737
#ifndef __mcoldfire__
3738
        link    a6,IMM (0)
3739
        moveml  d2-d7,sp@-      | save registers
3740
#else
3741
        link    a6,IMM (-24)
3742
        moveml  d2-d7,sp@
3743
#endif
3744
        moveq   IMM (COMPARE),d5
3745
        movel   a6@(8),d0       | get first operand
3746
        movel   a6@(12),d1      | get second operand
3747
| Check if either is NaN, and in that case return garbage and signal
3748
| INVALID_OPERATION. Check also if either is zero, and clear the signs
3749
| if necessary.
3750
        movel   d0,d6
3751
        andl    IMM (0x7fffffff),d0
3752
        beq     Lcmpsf$a$0
3753
        cmpl    IMM (0x7f800000),d0
3754
        bhi     Lcmpf$inop
3755
Lcmpsf$1:
3756
        movel   d1,d7
3757
        andl    IMM (0x7fffffff),d1
3758
        beq     Lcmpsf$b$0
3759
        cmpl    IMM (0x7f800000),d1
3760
        bhi     Lcmpf$inop
3761
Lcmpsf$2:
3762
| Check the signs
3763
        eorl    d6,d7
3764
        bpl     1f
3765
| If the signs are not equal check if a >= 0
3766
        tstl    d6
3767
        bpl     Lcmpsf$a$gt$b   | if (a >= 0 && b < 0) => a > b
3768
        bmi     Lcmpsf$b$gt$a   | if (a < 0 && b >= 0) => a < b
3769
1:
3770
| If the signs are equal check for < 0
3771
        tstl    d6
3772
        bpl     1f
3773
| If both are negative exchange them
3774
#ifndef __mcoldfire__
3775
        exg     d0,d1
3776
#else
3777
        movel   d0,d7
3778
        movel   d1,d0
3779
        movel   d7,d1
3780
#endif
3781
1:
3782
| Now that they are positive we just compare them as longs (does this also
3783
| work for denormalized numbers?).
3784
        cmpl    d0,d1
3785
        bhi     Lcmpsf$b$gt$a   | |b| > |a|
3786
        bne     Lcmpsf$a$gt$b   | |b| < |a|
3787
| If we got here a == b.
3788
        movel   IMM (EQUAL),d0
3789
#ifndef __mcoldfire__
3790
        moveml  sp@+,d2-d7      | put back the registers
3791
#else
3792
        moveml  sp@,d2-d7
3793
#endif
3794
        unlk    a6
3795
        rts
3796
Lcmpsf$a$gt$b:
3797
        movel   IMM (GREATER),d0
3798
#ifndef __mcoldfire__
3799
        moveml  sp@+,d2-d7      | put back the registers
3800
#else
3801
        moveml  sp@,d2-d7
3802
        | XXX if frame pointer is ever removed, stack pointer must
3803
        | be adjusted here.
3804
#endif
3805
        unlk    a6
3806
        rts
3807
Lcmpsf$b$gt$a:
3808
        movel   IMM (LESS),d0
3809
#ifndef __mcoldfire__
3810
        moveml  sp@+,d2-d7      | put back the registers
3811
#else
3812
        moveml  sp@,d2-d7
3813
        | XXX if frame pointer is ever removed, stack pointer must
3814
        | be adjusted here.
3815
#endif
3816
        unlk    a6
3817
        rts
3818
 
3819
Lcmpsf$a$0:
3820
        bclr    IMM (31),d6
3821
        bra     Lcmpsf$1
3822
Lcmpsf$b$0:
3823
        bclr    IMM (31),d7
3824
        bra     Lcmpsf$2
3825
 
3826
Lcmpf$inop:
3827
        movl    a6@(16),d0
3828
        moveq   IMM (INEXACT_RESULT+INVALID_OPERATION),d7
3829
        moveq   IMM (SINGLE_FLOAT),d6
3830
        PICJUMP $_exception_handler
3831
 
3832
| int __cmpsf2(float, float);
3833
        FUNC(__cmpsf2)
3834
SYM (__cmpsf2):
3835
        link    a6,IMM (0)
3836
        pea     1
3837
        movl    a6@(12),sp@-
3838
        movl    a6@(8),sp@-
3839
        PICCALL SYM (__cmpsf2_internal)
3840
        unlk    a6
3841
        rts
3842
 
3843
|=============================================================================
3844
|                           rounding routines
3845
|=============================================================================
3846
 
3847
| The rounding routines expect the number to be normalized in registers
3848
| d0-d1, with the exponent in register d2. They assume that the
3849
| exponent is larger or equal to 1. They return a properly normalized number
3850
| if possible, and a denormalized number otherwise. The exponent is returned
3851
| in d2.
3852
 
3853
Lround$to$nearest:
3854
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
3855
| Here we assume that the exponent is not too small (this should be checked
3856
| before entering the rounding routine), but the number could be denormalized.
3857
 
3858
| Check for denormalized numbers:
3859
1:      btst    IMM (FLT_MANT_DIG),d0
3860
        bne     2f              | if set the number is normalized
3861
| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
3862
| is one (remember that a denormalized number corresponds to an
3863
| exponent of -F_BIAS+1).
3864
#ifndef __mcoldfire__
3865
        cmpw    IMM (1),d2      | remember that the exponent is at least one
3866
#else
3867
        cmpl    IMM (1),d2      | remember that the exponent is at least one
3868
#endif
3869
        beq     2f              | an exponent of one means denormalized
3870
        addl    d1,d1           | else shift and adjust the exponent
3871
        addxl   d0,d0           |
3872
#ifndef __mcoldfire__
3873
        dbra    d2,1b           |
3874
#else
3875
        subql   IMM (1),d2
3876
        bpl     1b
3877
#endif
3878
2:
3879
| Now round: we do it as follows: after the shifting we can write the
3880
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
3881
| If delta < 1, do nothing. If delta > 1, add 1 to f.
3882
| If delta == 1, we make sure the rounded number will be even (odd?)
3883
| (after shifting).
3884
        btst    IMM (0),d0      | is delta < 1?
3885
        beq     2f              | if so, do not do anything
3886
        tstl    d1              | is delta == 1?
3887
        bne     1f              | if so round to even
3888
        movel   d0,d1           |
3889
        andl    IMM (2),d1      | bit 1 is the last significant bit
3890
        addl    d1,d0           |
3891
        bra     2f              |
3892
1:      movel   IMM (1),d1      | else add 1
3893
        addl    d1,d0           |
3894
| Shift right once (because we used bit #FLT_MANT_DIG!).
3895
2:      lsrl    IMM (1),d0
3896
| Now check again bit #FLT_MANT_DIG (rounding could have produced a
3897
| 'fraction overflow' ...).
3898
        btst    IMM (FLT_MANT_DIG),d0
3899
        beq     1f
3900
        lsrl    IMM (1),d0
3901
#ifndef __mcoldfire__
3902
        addw    IMM (1),d2
3903
#else
3904
        addql   IMM (1),d2
3905
#endif
3906
1:
3907
| If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
3908
| have to put the exponent to zero and return a denormalized number.
3909
        btst    IMM (FLT_MANT_DIG-1),d0
3910
        beq     1f
3911
        jmp     a0@
3912
1:      movel   IMM (0),d2
3913
        jmp     a0@
3914
 
3915
Lround$to$zero:
3916
Lround$to$plus:
3917
Lround$to$minus:
3918
        jmp     a0@
3919
#endif /* L_float */
3920
 
3921
| gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
3922
| __ledf2, __ltdf2 to all return the same value as a direct call to
3923
| __cmpdf2 would.  In this implementation, each of these routines
3924
| simply calls __cmpdf2.  It would be more efficient to give the
3925
| __cmpdf2 routine several names, but separating them out will make it
3926
| easier to write efficient versions of these routines someday.
3927
| If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
3928
| The other routines return 1.
3929
 
3930
#ifdef  L_eqdf2
3931
        .text
3932
        FUNC(__eqdf2)
3933
        .globl  SYM (__eqdf2)
3934
SYM (__eqdf2):
3935
        link    a6,IMM (0)
3936
        pea     1
3937
        movl    a6@(20),sp@-
3938
        movl    a6@(16),sp@-
3939
        movl    a6@(12),sp@-
3940
        movl    a6@(8),sp@-
3941
        PICCALL SYM (__cmpdf2_internal)
3942
        unlk    a6
3943
        rts
3944
#endif /* L_eqdf2 */
3945
 
3946
#ifdef  L_nedf2
3947
        .text
3948
        FUNC(__nedf2)
3949
        .globl  SYM (__nedf2)
3950
SYM (__nedf2):
3951
        link    a6,IMM (0)
3952
        pea     1
3953
        movl    a6@(20),sp@-
3954
        movl    a6@(16),sp@-
3955
        movl    a6@(12),sp@-
3956
        movl    a6@(8),sp@-
3957
        PICCALL SYM (__cmpdf2_internal)
3958
        unlk    a6
3959
        rts
3960
#endif /* L_nedf2 */
3961
 
3962
#ifdef  L_gtdf2
3963
        .text
3964
        FUNC(__gtdf2)
3965
        .globl  SYM (__gtdf2)
3966
SYM (__gtdf2):
3967
        link    a6,IMM (0)
3968
        pea     -1
3969
        movl    a6@(20),sp@-
3970
        movl    a6@(16),sp@-
3971
        movl    a6@(12),sp@-
3972
        movl    a6@(8),sp@-
3973
        PICCALL SYM (__cmpdf2_internal)
3974
        unlk    a6
3975
        rts
3976
#endif /* L_gtdf2 */
3977
 
3978
#ifdef  L_gedf2
3979
        .text
3980
        FUNC(__gedf2)
3981
        .globl  SYM (__gedf2)
3982
SYM (__gedf2):
3983
        link    a6,IMM (0)
3984
        pea     -1
3985
        movl    a6@(20),sp@-
3986
        movl    a6@(16),sp@-
3987
        movl    a6@(12),sp@-
3988
        movl    a6@(8),sp@-
3989
        PICCALL SYM (__cmpdf2_internal)
3990
        unlk    a6
3991
        rts
3992
#endif /* L_gedf2 */
3993
 
3994
#ifdef  L_ltdf2
3995
        .text
3996
        FUNC(__ltdf2)
3997
        .globl  SYM (__ltdf2)
3998
SYM (__ltdf2):
3999
        link    a6,IMM (0)
4000
        pea     1
4001
        movl    a6@(20),sp@-
4002
        movl    a6@(16),sp@-
4003
        movl    a6@(12),sp@-
4004
        movl    a6@(8),sp@-
4005
        PICCALL SYM (__cmpdf2_internal)
4006
        unlk    a6
4007
        rts
4008
#endif /* L_ltdf2 */
4009
 
4010
#ifdef  L_ledf2
4011
        .text
4012
        FUNC(__ledf2)
4013
        .globl  SYM (__ledf2)
4014
SYM (__ledf2):
4015
        link    a6,IMM (0)
4016
        pea     1
4017
        movl    a6@(20),sp@-
4018
        movl    a6@(16),sp@-
4019
        movl    a6@(12),sp@-
4020
        movl    a6@(8),sp@-
4021
        PICCALL SYM (__cmpdf2_internal)
4022
        unlk    a6
4023
        rts
4024
#endif /* L_ledf2 */
4025
 
4026
| The comments above about __eqdf2, et. al., also apply to __eqsf2,
4027
| et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
4028
 
4029
#ifdef  L_eqsf2
4030
        .text
4031
        FUNC(__eqsf2)
4032
        .globl  SYM (__eqsf2)
4033
SYM (__eqsf2):
4034
        link    a6,IMM (0)
4035
        pea     1
4036
        movl    a6@(12),sp@-
4037
        movl    a6@(8),sp@-
4038
        PICCALL SYM (__cmpsf2_internal)
4039
        unlk    a6
4040
        rts
4041
#endif /* L_eqsf2 */
4042
 
4043
#ifdef  L_nesf2
4044
        .text
4045
        FUNC(__nesf2)
4046
        .globl  SYM (__nesf2)
4047
SYM (__nesf2):
4048
        link    a6,IMM (0)
4049
        pea     1
4050
        movl    a6@(12),sp@-
4051
        movl    a6@(8),sp@-
4052
        PICCALL SYM (__cmpsf2_internal)
4053
        unlk    a6
4054
        rts
4055
#endif /* L_nesf2 */
4056
 
4057
#ifdef  L_gtsf2
4058
        .text
4059
        FUNC(__gtsf2)
4060
        .globl  SYM (__gtsf2)
4061
SYM (__gtsf2):
4062
        link    a6,IMM (0)
4063
        pea     -1
4064
        movl    a6@(12),sp@-
4065
        movl    a6@(8),sp@-
4066
        PICCALL SYM (__cmpsf2_internal)
4067
        unlk    a6
4068
        rts
4069
#endif /* L_gtsf2 */
4070
 
4071
#ifdef  L_gesf2
4072
        .text
4073
        FUNC(__gesf2)
4074
        .globl  SYM (__gesf2)
4075
SYM (__gesf2):
4076
        link    a6,IMM (0)
4077
        pea     -1
4078
        movl    a6@(12),sp@-
4079
        movl    a6@(8),sp@-
4080
        PICCALL SYM (__cmpsf2_internal)
4081
        unlk    a6
4082
        rts
4083
#endif /* L_gesf2 */
4084
 
4085
#ifdef  L_ltsf2
4086
        .text
4087
        FUNC(__ltsf2)
4088
        .globl  SYM (__ltsf2)
4089
SYM (__ltsf2):
4090
        link    a6,IMM (0)
4091
        pea     1
4092
        movl    a6@(12),sp@-
4093
        movl    a6@(8),sp@-
4094
        PICCALL SYM (__cmpsf2_internal)
4095
        unlk    a6
4096
        rts
4097
#endif /* L_ltsf2 */
4098
 
4099
#ifdef  L_lesf2
4100
        .text
4101
        FUNC(__lesf2)
4102
        .globl  SYM (__lesf2)
4103
SYM (__lesf2):
4104
        link    a6,IMM (0)
4105
        pea     1
4106
        movl    a6@(12),sp@-
4107
        movl    a6@(8),sp@-
4108
        PICCALL SYM (__cmpsf2_internal)
4109
        unlk    a6
4110
        rts
4111
#endif /* L_lesf2 */
4112
 
4113
#if defined (__ELF__) && defined (__linux__)
4114
        /* Make stack non-executable for ELF linux targets.  */
4115
        .section        .note.GNU-stack,"",@progbits
4116
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.