OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [or1ksim/] [softfloat/] [softfloat-macros] - Blame information for rev 236

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 233 julius
 
2
/*============================================================================
3
 
4
This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
5
Arithmetic Package, Release 2b.
6
 
7
Written by John R. Hauser.  This work was made possible in part by the
8
International Computer Science Institute, located at Suite 600, 1947 Center
9
Street, Berkeley, California 94704.  Funding was partially provided by the
10
National Science Foundation under grant MIP-9311980.  The original version
11
of this code was written as part of a project to build a fixed-point vector
12
processor in collaboration with the University of California at Berkeley,
13
overseen by Profs. Nelson Morgan and John Wawrzynek.  More information
14
is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
15
arithmetic/SoftFloat.html'.
16
 
17
THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE.  Although reasonable effort has
18
been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
19
RESULT IN INCORRECT BEHAVIOR.  USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
20
AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
21
COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
22
EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
23
INSTITUTE (possibly via similar legal notice) AGAINST ALL LOSSES, COSTS, OR
24
OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
25
 
26
Derivative works are acceptable, even for commercial purposes, so long as
27
(1) the source code for the derivative work includes prominent notice that
28
the work is derivative, and (2) the source code includes prominent notice with
29
these four paragraphs for those parts of this code that are retained.
30
 
31
=============================================================================*/
32
 
33
/*----------------------------------------------------------------------------
34
| Shifts `a' right by the number of bits given in `count'.  If any nonzero
35
| bits are shifted off, they are ``jammed'' into the least significant bit of
36
| the result by setting the least significant bit to 1.  The value of `count'
37
| can be arbitrarily large; in particular, if `count' is greater than 32, the
38
| result will be either 0 or 1, depending on whether `a' is zero or nonzero.
39
| The result is stored in the location pointed to by `zPtr'.
40
*----------------------------------------------------------------------------*/
41
 
42
INLINE void shift32RightJamming( bits32 a, int16 count, bits32 *zPtr )
43
{
44
    bits32 z;
45
 
46
    if ( count == 0 ) {
47
        z = a;
48
    }
49
    else if ( count < 32 ) {
50
        z = ( a>>count ) | ( ( a<<( ( - count ) & 31 ) ) != 0 );
51
    }
52
    else {
53
        z = ( a != 0 );
54
    }
55
    *zPtr = z;
56
 
57
}
58
 
59
/*----------------------------------------------------------------------------
60
| Shifts `a' right by the number of bits given in `count'.  If any nonzero
61
| bits are shifted off, they are ``jammed'' into the least significant bit of
62
| the result by setting the least significant bit to 1.  The value of `count'
63
| can be arbitrarily large; in particular, if `count' is greater than 64, the
64
| result will be either 0 or 1, depending on whether `a' is zero or nonzero.
65
| The result is stored in the location pointed to by `zPtr'.
66
*----------------------------------------------------------------------------*/
67
 
68
INLINE void shift64RightJamming( bits64 a, int16 count, bits64 *zPtr )
69
{
70
    bits64 z;
71
 
72
    if ( count == 0 ) {
73
        z = a;
74
    }
75
    else if ( count < 64 ) {
76
        z = ( a>>count ) | ( ( a<<( ( - count ) & 63 ) ) != 0 );
77
    }
78
    else {
79
        z = ( a != 0 );
80
    }
81
    *zPtr = z;
82
 
83
}
84
 
85
/*----------------------------------------------------------------------------
86
| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64
87
| _plus_ the number of bits given in `count'.  The shifted result is at most
88
| 64 nonzero bits; this is stored at the location pointed to by `z0Ptr'.  The
89
| bits shifted off form a second 64-bit result as follows:  The _last_ bit
90
| shifted off is the most-significant bit of the extra result, and the other
91
| 63 bits of the extra result are all zero if and only if _all_but_the_last_
92
| bits shifted off were all zero.  This extra result is stored in the location
93
| pointed to by `z1Ptr'.  The value of `count' can be arbitrarily large.
94
|     (This routine makes more sense if `a0' and `a1' are considered to form
95
| a fixed-point value with binary point between `a0' and `a1'.  This fixed-
96
| point value is shifted right by the number of bits given in `count', and
97
| the integer part of the result is returned at the location pointed to by
98
| `z0Ptr'.  The fractional part of the result may be slightly corrupted as
99
| described above, and is returned at the location pointed to by `z1Ptr'.)
100
*----------------------------------------------------------------------------*/
101
 
102
INLINE void
103
 shift64ExtraRightJamming(
104
     bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
105
{
106
    bits64 z0, z1;
107
    int8 negCount = ( - count ) & 63;
108
 
109
    if ( count == 0 ) {
110
        z1 = a1;
111
        z0 = a0;
112
    }
113
    else if ( count < 64 ) {
114
        z1 = ( a0<
115
        z0 = a0>>count;
116
    }
117
    else {
118
        if ( count == 64 ) {
119
            z1 = a0 | ( a1 != 0 );
120
        }
121
        else {
122
            z1 = ( ( a0 | a1 ) != 0 );
123
        }
124
        z0 = 0;
125
    }
126
    *z1Ptr = z1;
127
    *z0Ptr = z0;
128
 
129
}
130
 
131 236 jeremybenn
 
132
#ifndef NO_SOFTFLOAT_UNUSED
133 233 julius
/*----------------------------------------------------------------------------
134
| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the
135
| number of bits given in `count'.  Any bits shifted off are lost.  The value
136
| of `count' can be arbitrarily large; in particular, if `count' is greater
137
| than 128, the result will be 0.  The result is broken into two 64-bit pieces
138
| which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
139
*----------------------------------------------------------------------------*/
140
 
141
INLINE void
142
 shift128Right(
143
     bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
144
{
145
    bits64 z0, z1;
146
    int8 negCount = ( - count ) & 63;
147
 
148
    if ( count == 0 ) {
149
        z1 = a1;
150
        z0 = a0;
151
    }
152
    else if ( count < 64 ) {
153
        z1 = ( a0<>count );
154
        z0 = a0>>count;
155
    }
156
    else {
157
        z1 = ( count < 64 ) ? ( a0>>( count & 63 ) ) : 0;
158
        z0 = 0;
159
    }
160
    *z1Ptr = z1;
161
    *z0Ptr = z0;
162
 
163
}
164
 
165
/*----------------------------------------------------------------------------
166
| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the
167
| number of bits given in `count'.  If any nonzero bits are shifted off, they
168
| are ``jammed'' into the least significant bit of the result by setting the
169
| least significant bit to 1.  The value of `count' can be arbitrarily large;
170
| in particular, if `count' is greater than 128, the result will be either
171
| 0 or 1, depending on whether the concatenation of `a0' and `a1' is zero or
172
| nonzero.  The result is broken into two 64-bit pieces which are stored at
173
| the locations pointed to by `z0Ptr' and `z1Ptr'.
174
*----------------------------------------------------------------------------*/
175
 
176
INLINE void
177
 shift128RightJamming(
178
     bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
179
{
180
    bits64 z0, z1;
181
    int8 negCount = ( - count ) & 63;
182
 
183
    if ( count == 0 ) {
184
        z1 = a1;
185
        z0 = a0;
186
    }
187
    else if ( count < 64 ) {
188
        z1 = ( a0<>count ) | ( ( a1<
189
        z0 = a0>>count;
190
    }
191
    else {
192
        if ( count == 64 ) {
193
            z1 = a0 | ( a1 != 0 );
194
        }
195
        else if ( count < 128 ) {
196
            z1 = ( a0>>( count & 63 ) ) | ( ( ( a0<
197
        }
198
        else {
199
            z1 = ( ( a0 | a1 ) != 0 );
200
        }
201
        z0 = 0;
202
    }
203
    *z1Ptr = z1;
204
    *z0Ptr = z0;
205
 
206
}
207
 
208
/*----------------------------------------------------------------------------
209
| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right
210
| by 64 _plus_ the number of bits given in `count'.  The shifted result is
211
| at most 128 nonzero bits; these are broken into two 64-bit pieces which are
212
| stored at the locations pointed to by `z0Ptr' and `z1Ptr'.  The bits shifted
213
| off form a third 64-bit result as follows:  The _last_ bit shifted off is
214
| the most-significant bit of the extra result, and the other 63 bits of the
215
| extra result are all zero if and only if _all_but_the_last_ bits shifted off
216
| were all zero.  This extra result is stored in the location pointed to by
217
| `z2Ptr'.  The value of `count' can be arbitrarily large.
218
|     (This routine makes more sense if `a0', `a1', and `a2' are considered
219
| to form a fixed-point value with binary point between `a1' and `a2'.  This
220
| fixed-point value is shifted right by the number of bits given in `count',
221
| and the integer part of the result is returned at the locations pointed to
222
| by `z0Ptr' and `z1Ptr'.  The fractional part of the result may be slightly
223
| corrupted as described above, and is returned at the location pointed to by
224
| `z2Ptr'.)
225
*----------------------------------------------------------------------------*/
226
 
227
INLINE void
228
 shift128ExtraRightJamming(
229
     bits64 a0,
230
     bits64 a1,
231
     bits64 a2,
232
     int16 count,
233
     bits64 *z0Ptr,
234
     bits64 *z1Ptr,
235
     bits64 *z2Ptr
236
 )
237
{
238
    bits64 z0, z1, z2;
239
    int8 negCount = ( - count ) & 63;
240
 
241
    if ( count == 0 ) {
242
        z2 = a2;
243
        z1 = a1;
244
        z0 = a0;
245
    }
246
    else {
247
        if ( count < 64 ) {
248
            z2 = a1<
249
            z1 = ( a0<>count );
250
            z0 = a0>>count;
251
        }
252
        else {
253
            if ( count == 64 ) {
254
                z2 = a1;
255
                z1 = a0;
256
            }
257
            else {
258
                a2 |= a1;
259
                if ( count < 128 ) {
260
                    z2 = a0<
261
                    z1 = a0>>( count & 63 );
262
                }
263
                else {
264
                    z2 = ( count == 128 ) ? a0 : ( a0 != 0 );
265
                    z1 = 0;
266
                }
267
            }
268
            z0 = 0;
269
        }
270
        z2 |= ( a2 != 0 );
271
    }
272
    *z2Ptr = z2;
273
    *z1Ptr = z1;
274
    *z0Ptr = z0;
275
 
276
}
277
 
278
/*----------------------------------------------------------------------------
279
| Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the
280
| number of bits given in `count'.  Any bits shifted off are lost.  The value
281
| of `count' must be less than 64.  The result is broken into two 64-bit
282
| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
283
*----------------------------------------------------------------------------*/
284
 
285
INLINE void
286
 shortShift128Left(
287
     bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
288
{
289
 
290
    *z1Ptr = a1<
291
    *z0Ptr =
292
        ( count == 0 ) ? a0 : ( a0<>( ( - count ) & 63 ) );
293
 
294
}
295
 
296
/*----------------------------------------------------------------------------
297
| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left
298
| by the number of bits given in `count'.  Any bits shifted off are lost.
299
| The value of `count' must be less than 64.  The result is broken into three
300
| 64-bit pieces which are stored at the locations pointed to by `z0Ptr',
301
| `z1Ptr', and `z2Ptr'.
302
*----------------------------------------------------------------------------*/
303
 
304
INLINE void
305
 shortShift192Left(
306
     bits64 a0,
307
     bits64 a1,
308
     bits64 a2,
309
     int16 count,
310
     bits64 *z0Ptr,
311
     bits64 *z1Ptr,
312
     bits64 *z2Ptr
313
 )
314
{
315
    bits64 z0, z1, z2;
316
    int8 negCount;
317
 
318
    z2 = a2<
319
    z1 = a1<
320
    z0 = a0<
321
    if ( 0 < count ) {
322
        negCount = ( ( - count ) & 63 );
323
        z1 |= a2>>negCount;
324
        z0 |= a1>>negCount;
325
    }
326
    *z2Ptr = z2;
327
    *z1Ptr = z1;
328
    *z0Ptr = z0;
329
 
330
}
331
 
332 236 jeremybenn
#endif
333
 
334 233 julius
/*----------------------------------------------------------------------------
335
| Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit
336
| value formed by concatenating `b0' and `b1'.  Addition is modulo 2^128, so
337
| any carry out is lost.  The result is broken into two 64-bit pieces which
338
| are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
339
*----------------------------------------------------------------------------*/
340
 
341
INLINE void
342
 add128(
343
     bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr )
344
{
345
    bits64 z1;
346
 
347
    z1 = a1 + b1;
348
    *z1Ptr = z1;
349
    *z0Ptr = a0 + b0 + ( z1 < a1 );
350
 
351
}
352
 
353 236 jeremybenn
#ifndef NO_SOFTFLOAT_UNUSED
354
 
355 233 julius
/*----------------------------------------------------------------------------
356
| Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the
357
| 192-bit value formed by concatenating `b0', `b1', and `b2'.  Addition is
358
| modulo 2^192, so any carry out is lost.  The result is broken into three
359
| 64-bit pieces which are stored at the locations pointed to by `z0Ptr',
360
| `z1Ptr', and `z2Ptr'.
361
*----------------------------------------------------------------------------*/
362
 
363
INLINE void
364
 add192(
365
     bits64 a0,
366
     bits64 a1,
367
     bits64 a2,
368
     bits64 b0,
369
     bits64 b1,
370
     bits64 b2,
371
     bits64 *z0Ptr,
372
     bits64 *z1Ptr,
373
     bits64 *z2Ptr
374
 )
375
{
376
    bits64 z0, z1, z2;
377
    int8 carry0, carry1;
378
 
379
    z2 = a2 + b2;
380
    carry1 = ( z2 < a2 );
381
    z1 = a1 + b1;
382
    carry0 = ( z1 < a1 );
383
    z0 = a0 + b0;
384
    z1 += carry1;
385
    z0 += ( z1 < carry1 );
386
    z0 += carry0;
387
    *z2Ptr = z2;
388
    *z1Ptr = z1;
389
    *z0Ptr = z0;
390
 
391
}
392
 
393 236 jeremybenn
#endif
394
 
395 233 julius
/*----------------------------------------------------------------------------
396
| Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the
397
| 128-bit value formed by concatenating `a0' and `a1'.  Subtraction is modulo
398
| 2^128, so any borrow out (carry out) is lost.  The result is broken into two
399
| 64-bit pieces which are stored at the locations pointed to by `z0Ptr' and
400
| `z1Ptr'.
401
*----------------------------------------------------------------------------*/
402
 
403
INLINE void
404
 sub128(
405
     bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr )
406
{
407
 
408
    *z1Ptr = a1 - b1;
409
    *z0Ptr = a0 - b0 - ( a1 < b1 );
410
 
411
}
412
 
413 236 jeremybenn
#ifndef NO_SOFTFLOAT_UNUSED
414
 
415 233 julius
/*----------------------------------------------------------------------------
416
| Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2'
417
| from the 192-bit value formed by concatenating `a0', `a1', and `a2'.
418
| Subtraction is modulo 2^192, so any borrow out (carry out) is lost.  The
419
| result is broken into three 64-bit pieces which are stored at the locations
420
| pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'.
421
*----------------------------------------------------------------------------*/
422
 
423
INLINE void
424
 sub192(
425
     bits64 a0,
426
     bits64 a1,
427
     bits64 a2,
428
     bits64 b0,
429
     bits64 b1,
430
     bits64 b2,
431
     bits64 *z0Ptr,
432
     bits64 *z1Ptr,
433
     bits64 *z2Ptr
434
 )
435
{
436
    bits64 z0, z1, z2;
437
    int8 borrow0, borrow1;
438
 
439
    z2 = a2 - b2;
440
    borrow1 = ( a2 < b2 );
441
    z1 = a1 - b1;
442
    borrow0 = ( a1 < b1 );
443
    z0 = a0 - b0;
444
    z0 -= ( z1 < borrow1 );
445
    z1 -= borrow1;
446
    z0 -= borrow0;
447
    *z2Ptr = z2;
448
    *z1Ptr = z1;
449
    *z0Ptr = z0;
450
 
451
}
452
 
453 236 jeremybenn
#endif
454
 
455 233 julius
/*----------------------------------------------------------------------------
456
| Multiplies `a' by `b' to obtain a 128-bit product.  The product is broken
457
| into two 64-bit pieces which are stored at the locations pointed to by
458
| `z0Ptr' and `z1Ptr'.
459
*----------------------------------------------------------------------------*/
460
 
461
INLINE void mul64To128( bits64 a, bits64 b, bits64 *z0Ptr, bits64 *z1Ptr )
462
{
463
    bits32 aHigh, aLow, bHigh, bLow;
464
    bits64 z0, zMiddleA, zMiddleB, z1;
465
 
466
    aLow = a;
467
    aHigh = a>>32;
468
    bLow = b;
469
    bHigh = b>>32;
470
    z1 = ( (bits64) aLow ) * bLow;
471
    zMiddleA = ( (bits64) aLow ) * bHigh;
472
    zMiddleB = ( (bits64) aHigh ) * bLow;
473
    z0 = ( (bits64) aHigh ) * bHigh;
474
    zMiddleA += zMiddleB;
475
    z0 += ( ( (bits64) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 );
476
    zMiddleA <<= 32;
477
    z1 += zMiddleA;
478
    z0 += ( z1 < zMiddleA );
479
    *z1Ptr = z1;
480
    *z0Ptr = z0;
481
 
482
}
483
 
484 236 jeremybenn
#ifndef NO_SOFTFLOAT_UNUSED
485
 
486 233 julius
/*----------------------------------------------------------------------------
487
| Multiplies the 128-bit value formed by concatenating `a0' and `a1' by
488
| `b' to obtain a 192-bit product.  The product is broken into three 64-bit
489
| pieces which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and
490
| `z2Ptr'.
491
*----------------------------------------------------------------------------*/
492
 
493
INLINE void
494
 mul128By64To192(
495
     bits64 a0,
496
     bits64 a1,
497
     bits64 b,
498
     bits64 *z0Ptr,
499
     bits64 *z1Ptr,
500
     bits64 *z2Ptr
501
 )
502
{
503
    bits64 z0, z1, z2, more1;
504
 
505
    mul64To128( a1, b, &z1, &z2 );
506
    mul64To128( a0, b, &z0, &more1 );
507
    add128( z0, more1, 0, z1, &z0, &z1 );
508
    *z2Ptr = z2;
509
    *z1Ptr = z1;
510
    *z0Ptr = z0;
511
 
512
}
513
 
514
/*----------------------------------------------------------------------------
515
| Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the
516
| 128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit
517
| product.  The product is broken into four 64-bit pieces which are stored at
518
| the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
519
*----------------------------------------------------------------------------*/
520
 
521
INLINE void
522
 mul128To256(
523
     bits64 a0,
524
     bits64 a1,
525
     bits64 b0,
526
     bits64 b1,
527
     bits64 *z0Ptr,
528
     bits64 *z1Ptr,
529
     bits64 *z2Ptr,
530
     bits64 *z3Ptr
531
 )
532
{
533
    bits64 z0, z1, z2, z3;
534
    bits64 more1, more2;
535
 
536
    mul64To128( a1, b1, &z2, &z3 );
537
    mul64To128( a1, b0, &z1, &more2 );
538
    add128( z1, more2, 0, z2, &z1, &z2 );
539
    mul64To128( a0, b0, &z0, &more1 );
540
    add128( z0, more1, 0, z1, &z0, &z1 );
541
    mul64To128( a0, b1, &more1, &more2 );
542
    add128( more1, more2, 0, z2, &more1, &z2 );
543
    add128( z0, z1, 0, more1, &z0, &z1 );
544
    *z3Ptr = z3;
545
    *z2Ptr = z2;
546
    *z1Ptr = z1;
547
    *z0Ptr = z0;
548
 
549
}
550
 
551 236 jeremybenn
#endif
552
 
553 233 julius
/*----------------------------------------------------------------------------
554
| Returns an approximation to the 64-bit integer quotient obtained by dividing
555
| `b' into the 128-bit value formed by concatenating `a0' and `a1'.  The
556
| divisor `b' must be at least 2^63.  If q is the exact quotient truncated
557
| toward zero, the approximation returned lies between q and q + 2 inclusive.
558
| If the exact quotient q is larger than 64 bits, the maximum positive 64-bit
559
| unsigned integer is returned.
560
*----------------------------------------------------------------------------*/
561
 
562
static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
563
{
564
    bits64 b0, b1;
565
    bits64 rem0, rem1, term0, term1;
566
    bits64 z;
567
 
568
    if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF );
569
    b0 = b>>32;
570
    z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32;
571
    mul64To128( b, z, &term0, &term1 );
572
    sub128( a0, a1, term0, term1, &rem0, &rem1 );
573
    while ( ( (sbits64) rem0 ) < 0 ) {
574
        z -= LIT64( 0x100000000 );
575
        b1 = b<<32;
576
        add128( rem0, rem1, b0, b1, &rem0, &rem1 );
577
    }
578
    rem0 = ( rem0<<32 ) | ( rem1>>32 );
579
    z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0;
580
    return z;
581
 
582
}
583
 
584
/*----------------------------------------------------------------------------
585
| Returns an approximation to the square root of the 32-bit significand given
586
| by `a'.  Considered as an integer, `a' must be at least 2^31.  If bit 0 of
587
| `aExp' (the least significant bit) is 1, the integer returned approximates
588
| 2^31*sqrt(`a'/2^31), where `a' is considered an integer.  If bit 0 of `aExp'
589
| is 0, the integer returned approximates 2^31*sqrt(`a'/2^30).  In either
590
| case, the approximation returned lies strictly within +/-2 of the exact
591
| value.
592
*----------------------------------------------------------------------------*/
593
 
594
static bits32 estimateSqrt32( int16 aExp, bits32 a )
595
{
596
    static const bits16 sqrtOddAdjustments[] = {
597
        0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0,
598
        0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67
599
    };
600
    static const bits16 sqrtEvenAdjustments[] = {
601
        0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E,
602
        0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002
603
    };
604
    int8 index;
605
    bits32 z;
606
 
607
    index = ( a>>27 ) & 15;
608
    if ( aExp & 1 ) {
609
        z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ];
610
        z = ( ( a / z )<<14 ) + ( z<<15 );
611
        a >>= 1;
612
    }
613
    else {
614
        z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ];
615
        z = a / z + z;
616
        z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 );
617
        if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 );
618
    }
619
    return ( (bits32) ( ( ( (bits64) a )<<31 ) / z ) ) + ( z>>1 );
620
 
621
}
622
 
623
/*----------------------------------------------------------------------------
624
| Returns the number of leading 0 bits before the most-significant 1 bit of
625
| `a'.  If `a' is zero, 32 is returned.
626
*----------------------------------------------------------------------------*/
627
 
628
static int8 countLeadingZeros32( bits32 a )
629
{
630
    static const int8 countLeadingZerosHigh[] = {
631
        8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
632
        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
633
        2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
634
        2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
635
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
636
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
637
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
638
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
639
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
640
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
641
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
642
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
643
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
644
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
645
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
646
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
647
    };
648
    int8 shiftCount;
649
 
650
    shiftCount = 0;
651
    if ( a < 0x10000 ) {
652
        shiftCount += 16;
653
        a <<= 16;
654
    }
655
    if ( a < 0x1000000 ) {
656
        shiftCount += 8;
657
        a <<= 8;
658
    }
659
    shiftCount += countLeadingZerosHigh[ a>>24 ];
660
    return shiftCount;
661
 
662
}
663
 
664
/*----------------------------------------------------------------------------
665
| Returns the number of leading 0 bits before the most-significant 1 bit of
666
| `a'.  If `a' is zero, 64 is returned.
667
*----------------------------------------------------------------------------*/
668
 
669
static int8 countLeadingZeros64( bits64 a )
670
{
671
    int8 shiftCount;
672
 
673
    shiftCount = 0;
674
    if ( a < ( (bits64) 1 )<<32 ) {
675
        shiftCount += 32;
676
    }
677
    else {
678
        a >>= 32;
679
    }
680
    shiftCount += countLeadingZeros32( a );
681
    return shiftCount;
682
 
683
}
684
 
685 236 jeremybenn
#ifndef NO_SOFTFLOAT_UNUSED
686
 
687 233 julius
/*----------------------------------------------------------------------------
688
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1'
689
| is equal to the 128-bit value formed by concatenating `b0' and `b1'.
690
| Otherwise, returns 0.
691
*----------------------------------------------------------------------------*/
692
 
693
INLINE flag eq128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
694
{
695
 
696
    return ( a0 == b0 ) && ( a1 == b1 );
697
 
698
}
699
 
700
/*----------------------------------------------------------------------------
701
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less
702
| than or equal to the 128-bit value formed by concatenating `b0' and `b1'.
703
| Otherwise, returns 0.
704
*----------------------------------------------------------------------------*/
705
 
706
INLINE flag le128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
707
{
708
 
709
    return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) );
710
 
711
}
712
 
713
/*----------------------------------------------------------------------------
714
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less
715
| than the 128-bit value formed by concatenating `b0' and `b1'.  Otherwise,
716
| returns 0.
717
*----------------------------------------------------------------------------*/
718
 
719
INLINE flag lt128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
720
{
721
 
722
    return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) );
723
 
724
}
725
 
726
/*----------------------------------------------------------------------------
727
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is
728
| not equal to the 128-bit value formed by concatenating `b0' and `b1'.
729
| Otherwise, returns 0.
730
*----------------------------------------------------------------------------*/
731
 
732
INLINE flag ne128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
733
{
734
 
735
    return ( a0 != b0 ) || ( a1 != b1 );
736
 
737
}
738
 
739 236 jeremybenn
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.