OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [sreal.c] - Blame information for rev 822

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Simple data type for positive real numbers for the GNU compiler.
2
   Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it under
7
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10
 
11
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12
WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
/* This library supports positive real numbers and 0;
21
   inf and nan are NOT supported.
22
   It is written to be simple and fast.
23
 
24
   Value of sreal is
25
        x = sig * 2 ^ exp
26
   where
27
        sig = significant
28
          (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
29
        exp = exponent
30
 
31
   One HOST_WIDE_INT is used for the significant on 64-bit (and more than
32
   64-bit) machines,
33
   otherwise two HOST_WIDE_INTs are used for the significant.
34
   Only a half of significant bits is used (in normalized sreals) so that we do
35
   not have problems with overflow, for example when c->sig = a->sig * b->sig.
36
   So the precision for 64-bit and 32-bit machines is 32-bit.
37
 
38
   Invariant: The numbers are normalized before and after each call of sreal_*.
39
 
40
   Normalized sreals:
41
   All numbers (except zero) meet following conditions:
42
         SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
43
        -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
44
 
45
   If the number would be too large, it is set to upper bounds of these
46
   conditions.
47
 
48
   If the number is zero or would be too small it meets following conditions:
49
        sig == 0 && exp == -SREAL_MAX_EXP
50
*/
51
 
52
#include "config.h"
53
#include "system.h"
54
#include "coretypes.h"
55
#include "tm.h"
56
#include "sreal.h"
57
 
58
static inline void copy (sreal *, sreal *);
59
static inline void shift_right (sreal *, int);
60
static void normalize (sreal *);
61
 
62
/* Print the content of struct sreal.  */
63
 
64
void
65
dump_sreal (FILE *file, sreal *x)
66
{
67
#if SREAL_PART_BITS < 32
68
  fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
69
           HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
70
           x->sig_hi, x->sig_lo, x->exp);
71
#else
72
  fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
73
#endif
74
}
75
 
76
/* Copy the sreal number.  */
77
 
78
static inline void
79
copy (sreal *r, sreal *a)
80
{
81
#if SREAL_PART_BITS < 32
82
  r->sig_lo = a->sig_lo;
83
  r->sig_hi = a->sig_hi;
84
#else
85
  r->sig = a->sig;
86
#endif
87
  r->exp = a->exp;
88
}
89
 
90
/* Shift X right by S bits.  Needed: 0 < S <= SREAL_BITS.
91
   When the most significant bit shifted out is 1, add 1 to X (rounding).  */
92
 
93
static inline void
94
shift_right (sreal *x, int s)
95
{
96
  gcc_assert (s > 0);
97
  gcc_assert (s <= SREAL_BITS);
98
  /* Exponent should never be so large because shift_right is used only by
99
     sreal_add and sreal_sub ant thus the number cannot be shifted out from
100
     exponent range.  */
101
  gcc_assert (x->exp + s <= SREAL_MAX_EXP);
102
 
103
  x->exp += s;
104
 
105
#if SREAL_PART_BITS < 32
106
  if (s > SREAL_PART_BITS)
107
    {
108
      s -= SREAL_PART_BITS;
109
      x->sig_hi += (uhwi) 1 << (s - 1);
110
      x->sig_lo = x->sig_hi >> s;
111
      x->sig_hi = 0;
112
    }
113
  else
114
    {
115
      x->sig_lo += (uhwi) 1 << (s - 1);
116
      if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
117
        {
118
          x->sig_hi++;
119
          x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
120
        }
121
      x->sig_lo >>= s;
122
      x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
123
      x->sig_hi >>= s;
124
    }
125
#else
126
  x->sig += (uhwi) 1 << (s - 1);
127
  x->sig >>= s;
128
#endif
129
}
130
 
131
/* Normalize *X.  */
132
 
133
static void
134
normalize (sreal *x)
135
{
136
#if SREAL_PART_BITS < 32
137
  int shift;
138
  HOST_WIDE_INT mask;
139
 
140
  if (x->sig_lo == 0 && x->sig_hi == 0)
141
    {
142
      x->exp = -SREAL_MAX_EXP;
143
    }
144
  else if (x->sig_hi < SREAL_MIN_SIG)
145
    {
146
      if (x->sig_hi == 0)
147
        {
148
          /* Move lower part of significant to higher part.  */
149
          x->sig_hi = x->sig_lo;
150
          x->sig_lo = 0;
151
          x->exp -= SREAL_PART_BITS;
152
        }
153
      shift = 0;
154
      while (x->sig_hi < SREAL_MIN_SIG)
155
        {
156
          x->sig_hi <<= 1;
157
          x->exp--;
158
          shift++;
159
        }
160
      /* Check underflow.  */
161
      if (x->exp < -SREAL_MAX_EXP)
162
        {
163
          x->exp = -SREAL_MAX_EXP;
164
          x->sig_hi = 0;
165
          x->sig_lo = 0;
166
        }
167
      else if (shift)
168
        {
169
          mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
170
          x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
171
          x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
172
        }
173
    }
174
  else if (x->sig_hi > SREAL_MAX_SIG)
175
    {
176
      unsigned HOST_WIDE_INT tmp = x->sig_hi;
177
 
178
      /* Find out how many bits will be shifted.  */
179
      shift = 0;
180
      do
181
        {
182
          tmp >>= 1;
183
          shift++;
184
        }
185
      while (tmp > SREAL_MAX_SIG);
186
 
187
      /* Round the number.  */
188
      x->sig_lo += (uhwi) 1 << (shift - 1);
189
 
190
      x->sig_lo >>= shift;
191
      x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
192
                    << (SREAL_PART_BITS - shift));
193
      x->sig_hi >>= shift;
194
      x->exp += shift;
195
      if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
196
        {
197
          x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
198
          x->sig_hi++;
199
          if (x->sig_hi > SREAL_MAX_SIG)
200
            {
201
              /* x->sig_hi was SREAL_MAX_SIG before increment
202
                 so now last bit is zero.  */
203
              x->sig_hi >>= 1;
204
              x->sig_lo >>= 1;
205
              x->exp++;
206
            }
207
        }
208
 
209
      /* Check overflow.  */
210
      if (x->exp > SREAL_MAX_EXP)
211
        {
212
          x->exp = SREAL_MAX_EXP;
213
          x->sig_hi = SREAL_MAX_SIG;
214
          x->sig_lo = SREAL_MAX_SIG;
215
        }
216
    }
217
#else
218
  if (x->sig == 0)
219
    {
220
      x->exp = -SREAL_MAX_EXP;
221
    }
222
  else if (x->sig < SREAL_MIN_SIG)
223
    {
224
      do
225
        {
226
          x->sig <<= 1;
227
          x->exp--;
228
        }
229
      while (x->sig < SREAL_MIN_SIG);
230
 
231
      /* Check underflow.  */
232
      if (x->exp < -SREAL_MAX_EXP)
233
        {
234
          x->exp = -SREAL_MAX_EXP;
235
          x->sig = 0;
236
        }
237
    }
238
  else if (x->sig > SREAL_MAX_SIG)
239
    {
240
      int last_bit;
241
      do
242
        {
243
          last_bit = x->sig & 1;
244
          x->sig >>= 1;
245
          x->exp++;
246
        }
247
      while (x->sig > SREAL_MAX_SIG);
248
 
249
      /* Round the number.  */
250
      x->sig += last_bit;
251
      if (x->sig > SREAL_MAX_SIG)
252
        {
253
          x->sig >>= 1;
254
          x->exp++;
255
        }
256
 
257
      /* Check overflow.  */
258
      if (x->exp > SREAL_MAX_EXP)
259
        {
260
          x->exp = SREAL_MAX_EXP;
261
          x->sig = SREAL_MAX_SIG;
262
        }
263
    }
264
#endif
265
}
266
 
267
/* Set *R to SIG * 2 ^ EXP.  Return R.  */
268
 
269
sreal *
270
sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
271
{
272
#if SREAL_PART_BITS < 32
273
  r->sig_lo = 0;
274
  r->sig_hi = sig;
275
  r->exp = exp - 16;
276
#else
277
  r->sig = sig;
278
  r->exp = exp;
279
#endif
280
  normalize (r);
281
  return r;
282
}
283
 
284
/* Return integer value of *R.  */
285
 
286
HOST_WIDE_INT
287
sreal_to_int (sreal *r)
288
{
289
#if SREAL_PART_BITS < 32
290
  if (r->exp <= -SREAL_BITS)
291
    return 0;
292
  if (r->exp >= 0)
293
    return MAX_HOST_WIDE_INT;
294
  return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
295
#else
296
  if (r->exp <= -SREAL_BITS)
297
    return 0;
298
  if (r->exp >= SREAL_PART_BITS)
299
    return MAX_HOST_WIDE_INT;
300
  if (r->exp > 0)
301
    return r->sig << r->exp;
302
  if (r->exp < 0)
303
    return r->sig >> -r->exp;
304
  return r->sig;
305
#endif
306
}
307
 
308
/* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B.  */
309
 
310
int
311
sreal_compare (sreal *a, sreal *b)
312
{
313
  if (a->exp > b->exp)
314
    return 1;
315
  if (a->exp < b->exp)
316
    return -1;
317
#if SREAL_PART_BITS < 32
318
  if (a->sig_hi > b->sig_hi)
319
    return 1;
320
  if (a->sig_hi < b->sig_hi)
321
    return -1;
322
  if (a->sig_lo > b->sig_lo)
323
    return 1;
324
  if (a->sig_lo < b->sig_lo)
325
    return -1;
326
#else
327
  if (a->sig > b->sig)
328
    return 1;
329
  if (a->sig < b->sig)
330
    return -1;
331
#endif
332
  return 0;
333
}
334
 
335
/* *R = *A + *B.  Return R.  */
336
 
337
sreal *
338
sreal_add (sreal *r, sreal *a, sreal *b)
339
{
340
  int dexp;
341
  sreal tmp;
342
  sreal *bb;
343
 
344
  if (sreal_compare (a, b) < 0)
345
    {
346
      sreal *swap;
347
      swap = a;
348
      a = b;
349
      b = swap;
350
    }
351
 
352
  dexp = a->exp - b->exp;
353
  r->exp = a->exp;
354
  if (dexp > SREAL_BITS)
355
    {
356
#if SREAL_PART_BITS < 32
357
      r->sig_hi = a->sig_hi;
358
      r->sig_lo = a->sig_lo;
359
#else
360
      r->sig = a->sig;
361
#endif
362
      return r;
363
    }
364
 
365
  if (dexp == 0)
366
    bb = b;
367
  else
368
    {
369
      copy (&tmp, b);
370
      shift_right (&tmp, dexp);
371
      bb = &tmp;
372
    }
373
 
374
#if SREAL_PART_BITS < 32
375
  r->sig_hi = a->sig_hi + bb->sig_hi;
376
  r->sig_lo = a->sig_lo + bb->sig_lo;
377
  if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
378
    {
379
      r->sig_hi++;
380
      r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
381
    }
382
#else
383
  r->sig = a->sig + bb->sig;
384
#endif
385
  normalize (r);
386
  return r;
387
}
388
 
389
/* *R = *A - *B.  Return R.  */
390
 
391
sreal *
392
sreal_sub (sreal *r, sreal *a, sreal *b)
393
{
394
  int dexp;
395
  sreal tmp;
396
  sreal *bb;
397
 
398
  gcc_assert (sreal_compare (a, b) >= 0);
399
 
400
  dexp = a->exp - b->exp;
401
  r->exp = a->exp;
402
  if (dexp > SREAL_BITS)
403
    {
404
#if SREAL_PART_BITS < 32
405
      r->sig_hi = a->sig_hi;
406
      r->sig_lo = a->sig_lo;
407
#else
408
      r->sig = a->sig;
409
#endif
410
      return r;
411
    }
412
  if (dexp == 0)
413
    bb = b;
414
  else
415
    {
416
      copy (&tmp, b);
417
      shift_right (&tmp, dexp);
418
      bb = &tmp;
419
    }
420
 
421
#if SREAL_PART_BITS < 32
422
  if (a->sig_lo < bb->sig_lo)
423
    {
424
      r->sig_hi = a->sig_hi - bb->sig_hi - 1;
425
      r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
426
    }
427
  else
428
    {
429
      r->sig_hi = a->sig_hi - bb->sig_hi;
430
      r->sig_lo = a->sig_lo - bb->sig_lo;
431
    }
432
#else
433
  r->sig = a->sig - bb->sig;
434
#endif
435
  normalize (r);
436
  return r;
437
}
438
 
439
/* *R = *A * *B.  Return R.  */
440
 
441
sreal *
442
sreal_mul (sreal *r, sreal *a, sreal *b)
443
{
444
#if SREAL_PART_BITS < 32
445
  if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
446
    {
447
      r->sig_lo = 0;
448
      r->sig_hi = 0;
449
      r->exp = -SREAL_MAX_EXP;
450
    }
451
  else
452
    {
453
      unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
454
      if (sreal_compare (a, b) < 0)
455
        {
456
          sreal *swap;
457
          swap = a;
458
          a = b;
459
          b = swap;
460
        }
461
 
462
      r->exp = a->exp + b->exp + SREAL_PART_BITS;
463
 
464
      tmp1 = a->sig_lo * b->sig_lo;
465
      tmp2 = a->sig_lo * b->sig_hi;
466
      tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
467
 
468
      r->sig_hi = a->sig_hi * b->sig_hi;
469
      r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
470
      tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
471
      tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
472
      tmp1 = tmp2 + tmp3;
473
 
474
      r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
475
      r->sig_hi += tmp1 >> SREAL_PART_BITS;
476
 
477
      normalize (r);
478
    }
479
#else
480
  if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
481
    {
482
      r->sig = 0;
483
      r->exp = -SREAL_MAX_EXP;
484
    }
485
  else
486
    {
487
      r->sig = a->sig * b->sig;
488
      r->exp = a->exp + b->exp;
489
      normalize (r);
490
    }
491
#endif
492
  return r;
493
}
494
 
495
/* *R = *A / *B.  Return R.  */
496
 
497
sreal *
498
sreal_div (sreal *r, sreal *a, sreal *b)
499
{
500
#if SREAL_PART_BITS < 32
501
  unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
502
 
503
  gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
504
  if (a->sig_hi < SREAL_MIN_SIG)
505
    {
506
      r->sig_hi = 0;
507
      r->sig_lo = 0;
508
      r->exp = -SREAL_MAX_EXP;
509
    }
510
  else
511
    {
512
      /* Since division by the whole number is pretty ugly to write
513
         we are dividing by first 3/4 of bits of number.  */
514
 
515
      tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
516
      tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
517
              + (b->sig_lo >> (SREAL_PART_BITS / 2)));
518
      if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
519
        tmp2++;
520
 
521
      r->sig_lo = 0;
522
      tmp = tmp1 / tmp2;
523
      tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
524
      r->sig_hi = tmp << SREAL_PART_BITS;
525
 
526
      tmp = tmp1 / tmp2;
527
      tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
528
      r->sig_hi += tmp << (SREAL_PART_BITS / 2);
529
 
530
      tmp = tmp1 / tmp2;
531
      r->sig_hi += tmp;
532
 
533
      r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
534
      normalize (r);
535
    }
536
#else
537
  gcc_assert (b->sig != 0);
538
  r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
539
  r->exp = a->exp - b->exp - SREAL_PART_BITS;
540
  normalize (r);
541
#endif
542
  return r;
543
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.