OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [sreal.c] - Blame information for rev 774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Simple data type for positive real numbers for the GNU compiler.
2
   Copyright (C) 2002, 2003, 2004, 2007, 2010 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it under
7
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10
 
11
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12
WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
/* This library supports positive real numbers and 0;
21
   inf and nan are NOT supported.
22
   It is written to be simple and fast.
23
 
24
   Value of sreal is
25
        x = sig * 2 ^ exp
26
   where
27
        sig = significant
28
          (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
29
        exp = exponent
30
 
31
   One HOST_WIDE_INT is used for the significant on 64-bit (and more than
32
   64-bit) machines,
33
   otherwise two HOST_WIDE_INTs are used for the significant.
34
   Only a half of significant bits is used (in normalized sreals) so that we do
35
   not have problems with overflow, for example when c->sig = a->sig * b->sig.
36
   So the precision for 64-bit and 32-bit machines is 32-bit.
37
 
38
   Invariant: The numbers are normalized before and after each call of sreal_*.
39
 
40
   Normalized sreals:
41
   All numbers (except zero) meet following conditions:
42
         SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
43
        -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
44
 
45
   If the number would be too large, it is set to upper bounds of these
46
   conditions.
47
 
48
   If the number is zero or would be too small it meets following conditions:
49
        sig == 0 && exp == -SREAL_MAX_EXP
50
*/
51
 
52
#include "config.h"
53
#include "system.h"
54
#include "coretypes.h"
55
#include "sreal.h"
56
 
57
static inline void copy (sreal *, sreal *);
58
static inline void shift_right (sreal *, int);
59
static void normalize (sreal *);
60
 
61
/* Print the content of struct sreal.  */
62
 
63
void
64
dump_sreal (FILE *file, sreal *x)
65
{
66
#if SREAL_PART_BITS < 32
67
  fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
68
           HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
69
           x->sig_hi, x->sig_lo, x->exp);
70
#else
71
  fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
72
#endif
73
}
74
 
75
/* Copy the sreal number.  */
76
 
77
static inline void
78
copy (sreal *r, sreal *a)
79
{
80
#if SREAL_PART_BITS < 32
81
  r->sig_lo = a->sig_lo;
82
  r->sig_hi = a->sig_hi;
83
#else
84
  r->sig = a->sig;
85
#endif
86
  r->exp = a->exp;
87
}
88
 
89
/* Shift X right by S bits.  Needed: 0 < S <= SREAL_BITS.
90
   When the most significant bit shifted out is 1, add 1 to X (rounding).  */
91
 
92
static inline void
93
shift_right (sreal *x, int s)
94
{
95
  gcc_assert (s > 0);
96
  gcc_assert (s <= SREAL_BITS);
97
  /* Exponent should never be so large because shift_right is used only by
98
     sreal_add and sreal_sub ant thus the number cannot be shifted out from
99
     exponent range.  */
100
  gcc_assert (x->exp + s <= SREAL_MAX_EXP);
101
 
102
  x->exp += s;
103
 
104
#if SREAL_PART_BITS < 32
105
  if (s > SREAL_PART_BITS)
106
    {
107
      s -= SREAL_PART_BITS;
108
      x->sig_hi += (uhwi) 1 << (s - 1);
109
      x->sig_lo = x->sig_hi >> s;
110
      x->sig_hi = 0;
111
    }
112
  else
113
    {
114
      x->sig_lo += (uhwi) 1 << (s - 1);
115
      if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
116
        {
117
          x->sig_hi++;
118
          x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
119
        }
120
      x->sig_lo >>= s;
121
      x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
122
      x->sig_hi >>= s;
123
    }
124
#else
125
  x->sig += (uhwi) 1 << (s - 1);
126
  x->sig >>= s;
127
#endif
128
}
129
 
130
/* Normalize *X.  */
131
 
132
static void
133
normalize (sreal *x)
134
{
135
#if SREAL_PART_BITS < 32
136
  int shift;
137
  HOST_WIDE_INT mask;
138
 
139
  if (x->sig_lo == 0 && x->sig_hi == 0)
140
    {
141
      x->exp = -SREAL_MAX_EXP;
142
    }
143
  else if (x->sig_hi < SREAL_MIN_SIG)
144
    {
145
      if (x->sig_hi == 0)
146
        {
147
          /* Move lower part of significant to higher part.  */
148
          x->sig_hi = x->sig_lo;
149
          x->sig_lo = 0;
150
          x->exp -= SREAL_PART_BITS;
151
        }
152
      shift = 0;
153
      while (x->sig_hi < SREAL_MIN_SIG)
154
        {
155
          x->sig_hi <<= 1;
156
          x->exp--;
157
          shift++;
158
        }
159
      /* Check underflow.  */
160
      if (x->exp < -SREAL_MAX_EXP)
161
        {
162
          x->exp = -SREAL_MAX_EXP;
163
          x->sig_hi = 0;
164
          x->sig_lo = 0;
165
        }
166
      else if (shift)
167
        {
168
          mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
169
          x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
170
          x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
171
        }
172
    }
173
  else if (x->sig_hi > SREAL_MAX_SIG)
174
    {
175
      unsigned HOST_WIDE_INT tmp = x->sig_hi;
176
 
177
      /* Find out how many bits will be shifted.  */
178
      shift = 0;
179
      do
180
        {
181
          tmp >>= 1;
182
          shift++;
183
        }
184
      while (tmp > SREAL_MAX_SIG);
185
 
186
      /* Round the number.  */
187
      x->sig_lo += (uhwi) 1 << (shift - 1);
188
 
189
      x->sig_lo >>= shift;
190
      x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
191
                    << (SREAL_PART_BITS - shift));
192
      x->sig_hi >>= shift;
193
      x->exp += shift;
194
      if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
195
        {
196
          x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
197
          x->sig_hi++;
198
          if (x->sig_hi > SREAL_MAX_SIG)
199
            {
200
              /* x->sig_hi was SREAL_MAX_SIG before increment
201
                 so now last bit is zero.  */
202
              x->sig_hi >>= 1;
203
              x->sig_lo >>= 1;
204
              x->exp++;
205
            }
206
        }
207
 
208
      /* Check overflow.  */
209
      if (x->exp > SREAL_MAX_EXP)
210
        {
211
          x->exp = SREAL_MAX_EXP;
212
          x->sig_hi = SREAL_MAX_SIG;
213
          x->sig_lo = SREAL_MAX_SIG;
214
        }
215
    }
216
#else
217
  if (x->sig == 0)
218
    {
219
      x->exp = -SREAL_MAX_EXP;
220
    }
221
  else if (x->sig < SREAL_MIN_SIG)
222
    {
223
      do
224
        {
225
          x->sig <<= 1;
226
          x->exp--;
227
        }
228
      while (x->sig < SREAL_MIN_SIG);
229
 
230
      /* Check underflow.  */
231
      if (x->exp < -SREAL_MAX_EXP)
232
        {
233
          x->exp = -SREAL_MAX_EXP;
234
          x->sig = 0;
235
        }
236
    }
237
  else if (x->sig > SREAL_MAX_SIG)
238
    {
239
      int last_bit;
240
      do
241
        {
242
          last_bit = x->sig & 1;
243
          x->sig >>= 1;
244
          x->exp++;
245
        }
246
      while (x->sig > SREAL_MAX_SIG);
247
 
248
      /* Round the number.  */
249
      x->sig += last_bit;
250
      if (x->sig > SREAL_MAX_SIG)
251
        {
252
          x->sig >>= 1;
253
          x->exp++;
254
        }
255
 
256
      /* Check overflow.  */
257
      if (x->exp > SREAL_MAX_EXP)
258
        {
259
          x->exp = SREAL_MAX_EXP;
260
          x->sig = SREAL_MAX_SIG;
261
        }
262
    }
263
#endif
264
}
265
 
266
/* Set *R to SIG * 2 ^ EXP.  Return R.  */
267
 
268
sreal *
269
sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
270
{
271
#if SREAL_PART_BITS < 32
272
  r->sig_lo = 0;
273
  r->sig_hi = sig;
274
  r->exp = exp - 16;
275
#else
276
  r->sig = sig;
277
  r->exp = exp;
278
#endif
279
  normalize (r);
280
  return r;
281
}
282
 
283
/* Return integer value of *R.  */
284
 
285
HOST_WIDE_INT
286
sreal_to_int (sreal *r)
287
{
288
#if SREAL_PART_BITS < 32
289
  if (r->exp <= -SREAL_BITS)
290
    return 0;
291
  if (r->exp >= 0)
292
    return MAX_HOST_WIDE_INT;
293
  return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
294
#else
295
  if (r->exp <= -SREAL_BITS)
296
    return 0;
297
  if (r->exp >= SREAL_PART_BITS)
298
    return MAX_HOST_WIDE_INT;
299
  if (r->exp > 0)
300
    return r->sig << r->exp;
301
  if (r->exp < 0)
302
    return r->sig >> -r->exp;
303
  return r->sig;
304
#endif
305
}
306
 
307
/* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B.  */
308
 
309
int
310
sreal_compare (sreal *a, sreal *b)
311
{
312
  if (a->exp > b->exp)
313
    return 1;
314
  if (a->exp < b->exp)
315
    return -1;
316
#if SREAL_PART_BITS < 32
317
  if (a->sig_hi > b->sig_hi)
318
    return 1;
319
  if (a->sig_hi < b->sig_hi)
320
    return -1;
321
  if (a->sig_lo > b->sig_lo)
322
    return 1;
323
  if (a->sig_lo < b->sig_lo)
324
    return -1;
325
#else
326
  if (a->sig > b->sig)
327
    return 1;
328
  if (a->sig < b->sig)
329
    return -1;
330
#endif
331
  return 0;
332
}
333
 
334
/* *R = *A + *B.  Return R.  */
335
 
336
sreal *
337
sreal_add (sreal *r, sreal *a, sreal *b)
338
{
339
  int dexp;
340
  sreal tmp;
341
  sreal *bb;
342
 
343
  if (sreal_compare (a, b) < 0)
344
    {
345
      sreal *swap;
346
      swap = a;
347
      a = b;
348
      b = swap;
349
    }
350
 
351
  dexp = a->exp - b->exp;
352
  r->exp = a->exp;
353
  if (dexp > SREAL_BITS)
354
    {
355
#if SREAL_PART_BITS < 32
356
      r->sig_hi = a->sig_hi;
357
      r->sig_lo = a->sig_lo;
358
#else
359
      r->sig = a->sig;
360
#endif
361
      return r;
362
    }
363
 
364
  if (dexp == 0)
365
    bb = b;
366
  else
367
    {
368
      copy (&tmp, b);
369
      shift_right (&tmp, dexp);
370
      bb = &tmp;
371
    }
372
 
373
#if SREAL_PART_BITS < 32
374
  r->sig_hi = a->sig_hi + bb->sig_hi;
375
  r->sig_lo = a->sig_lo + bb->sig_lo;
376
  if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
377
    {
378
      r->sig_hi++;
379
      r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
380
    }
381
#else
382
  r->sig = a->sig + bb->sig;
383
#endif
384
  normalize (r);
385
  return r;
386
}
387
 
388
/* *R = *A - *B.  Return R.  */
389
 
390
sreal *
391
sreal_sub (sreal *r, sreal *a, sreal *b)
392
{
393
  int dexp;
394
  sreal tmp;
395
  sreal *bb;
396
 
397
  gcc_assert (sreal_compare (a, b) >= 0);
398
 
399
  dexp = a->exp - b->exp;
400
  r->exp = a->exp;
401
  if (dexp > SREAL_BITS)
402
    {
403
#if SREAL_PART_BITS < 32
404
      r->sig_hi = a->sig_hi;
405
      r->sig_lo = a->sig_lo;
406
#else
407
      r->sig = a->sig;
408
#endif
409
      return r;
410
    }
411
  if (dexp == 0)
412
    bb = b;
413
  else
414
    {
415
      copy (&tmp, b);
416
      shift_right (&tmp, dexp);
417
      bb = &tmp;
418
    }
419
 
420
#if SREAL_PART_BITS < 32
421
  if (a->sig_lo < bb->sig_lo)
422
    {
423
      r->sig_hi = a->sig_hi - bb->sig_hi - 1;
424
      r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
425
    }
426
  else
427
    {
428
      r->sig_hi = a->sig_hi - bb->sig_hi;
429
      r->sig_lo = a->sig_lo - bb->sig_lo;
430
    }
431
#else
432
  r->sig = a->sig - bb->sig;
433
#endif
434
  normalize (r);
435
  return r;
436
}
437
 
438
/* *R = *A * *B.  Return R.  */
439
 
440
sreal *
441
sreal_mul (sreal *r, sreal *a, sreal *b)
442
{
443
#if SREAL_PART_BITS < 32
444
  if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
445
    {
446
      r->sig_lo = 0;
447
      r->sig_hi = 0;
448
      r->exp = -SREAL_MAX_EXP;
449
    }
450
  else
451
    {
452
      unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
453
      if (sreal_compare (a, b) < 0)
454
        {
455
          sreal *swap;
456
          swap = a;
457
          a = b;
458
          b = swap;
459
        }
460
 
461
      r->exp = a->exp + b->exp + SREAL_PART_BITS;
462
 
463
      tmp1 = a->sig_lo * b->sig_lo;
464
      tmp2 = a->sig_lo * b->sig_hi;
465
      tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
466
 
467
      r->sig_hi = a->sig_hi * b->sig_hi;
468
      r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
469
      tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
470
      tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
471
      tmp1 = tmp2 + tmp3;
472
 
473
      r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
474
      r->sig_hi += tmp1 >> SREAL_PART_BITS;
475
 
476
      normalize (r);
477
    }
478
#else
479
  if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
480
    {
481
      r->sig = 0;
482
      r->exp = -SREAL_MAX_EXP;
483
    }
484
  else
485
    {
486
      r->sig = a->sig * b->sig;
487
      r->exp = a->exp + b->exp;
488
      normalize (r);
489
    }
490
#endif
491
  return r;
492
}
493
 
494
/* *R = *A / *B.  Return R.  */
495
 
496
sreal *
497
sreal_div (sreal *r, sreal *a, sreal *b)
498
{
499
#if SREAL_PART_BITS < 32
500
  unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
501
 
502
  gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
503
  if (a->sig_hi < SREAL_MIN_SIG)
504
    {
505
      r->sig_hi = 0;
506
      r->sig_lo = 0;
507
      r->exp = -SREAL_MAX_EXP;
508
    }
509
  else
510
    {
511
      /* Since division by the whole number is pretty ugly to write
512
         we are dividing by first 3/4 of bits of number.  */
513
 
514
      tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
515
      tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
516
              + (b->sig_lo >> (SREAL_PART_BITS / 2)));
517
      if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
518
        tmp2++;
519
 
520
      r->sig_lo = 0;
521
      tmp = tmp1 / tmp2;
522
      tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
523
      r->sig_hi = tmp << SREAL_PART_BITS;
524
 
525
      tmp = tmp1 / tmp2;
526
      tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
527
      r->sig_hi += tmp << (SREAL_PART_BITS / 2);
528
 
529
      tmp = tmp1 / tmp2;
530
      r->sig_hi += tmp;
531
 
532
      r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
533
      normalize (r);
534
    }
535
#else
536
  gcc_assert (b->sig != 0);
537
  r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
538
  r->exp = a->exp - b->exp - SREAL_PART_BITS;
539
  normalize (r);
540
#endif
541
  return r;
542
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.