OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [mp3/] [sw/] [mad-xess/] [libmad/] [fixed.h] - Blame information for rev 291

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 291 simons
/*
2
 * mad - MPEG audio decoder
3
 * Copyright (C) 2000-2001 Robert Leslie
4
 *
5
 * This program is free software; you can redistribute it and/or modify
6
 * it under the terms of the GNU General Public License as published by
7
 * the Free Software Foundation; either version 2 of the License, or
8
 * (at your option) any later version.
9
 *
10
 * This program is distributed in the hope that it will be useful,
11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
 * GNU General Public License for more details.
14
 *
15
 * You should have received a copy of the GNU General Public License
16
 * along with this program; if not, write to the Free Software
17
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18
 *
19
 * $Id: fixed.h,v 1.3 2001-11-06 17:01:28 simons Exp $
20
 */
21
 
22
# ifndef LIBMAD_FIXED_H
23
# define LIBMAD_FIXED_H
24
 
25
# if SIZEOF_INT >= 4
26
typedef   signed int mad_fixed_t;
27
 
28
typedef   signed int mad_fixed64hi_t;
29
typedef unsigned int mad_fixed64lo_t;
30
# else
31
typedef   signed long mad_fixed_t;
32
 
33
typedef   signed long mad_fixed64hi_t;
34
typedef unsigned long mad_fixed64lo_t;
35
# endif
36
 
37
/*
38
 * Fixed-point format: 0xABBBBBBB
39
 * A == whole part      (sign + 3 bits)
40
 * B == fractional part (28 bits)
41
 *
42
 * Values are signed two's complement, so the effective range is:
43
 * 0x80000000 to 0x7fffffff
44
 *       -8.0 to +7.9999999962747097015380859375
45
 *
46
 * The smallest representable value is:
47
 * 0x00000001 == 0.0000000037252902984619140625 (i.e. about 3.725e-9)
48
 *
49
 * 28 bits of fractional accuracy represent about
50
 * 8.6 digits of decimal accuracy.
51
 *
52
 * Fixed-point numbers can be added or subtracted as normal
53
 * integers, but multiplication requires shifting the 64-bit result
54
 * from 56 fractional bits back to 28 (and rounding.)
55
 *
56
 * Changing the definition of MAD_F_FRACBITS is only partially
57
 * supported, and must be done with care.
58
 */
59
 
60
# define MAD_F_FRACBITS         28
61
 
62
# if MAD_F_FRACBITS == 28
63
#  define MAD_F(x)              ((mad_fixed_t) (x##L))
64
# else
65
#  if MAD_F_FRACBITS < 28
66
#   warning "MAD_F_FRACBITS < 28"
67
#   define MAD_F(x)             ((mad_fixed_t)  \
68
                                 (((x##L) +  \
69
                                   (1L << (28 - MAD_F_FRACBITS - 1))) >>  \
70
                                  (28 - MAD_F_FRACBITS)))
71
#  elif MAD_F_FRACBITS > 28
72
#   error "MAD_F_FRACBITS > 28 not currently supported"
73
#   define MAD_F(x)             ((mad_fixed_t)  \
74
                                 ((x##L) << (MAD_F_FRACBITS - 28)))
75
#  endif
76
# endif
77
 
78
# define MAD_F_MIN              ((mad_fixed_t) -0x80000000L)
79
# define MAD_F_MAX              ((mad_fixed_t) +0x7fffffffL)
80
 
81
# define MAD_F_ONE              MAD_F(0x10000000)
82
 
83
#ifndef EMBED
84
# define mad_f_tofixed(x)       ((mad_fixed_t)  \
85
                                 ((x) * (double) (1L << MAD_F_FRACBITS) + 0.5))
86
# define mad_f_todouble(x)      ((double)  \
87
                                 ((x) / (double) (1L << MAD_F_FRACBITS)))
88
#endif
89
 
90
# define mad_f_intpart(x)       ((x) >> MAD_F_FRACBITS)
91
# define mad_f_fracpart(x)      ((x) & ((1L << MAD_F_FRACBITS) - 1))
92
                                /* (x should be positive) */
93
 
94
# define mad_f_fromint(x)       ((x) << MAD_F_FRACBITS)
95
 
96
# define mad_f_add(x, y)        ((x) + (y))
97
# define mad_f_sub(x, y)        ((x) - (y))
98
 
99
# if defined(FPM_64BIT)
100
 
101
/*
102
 * This version should be the most accurate if 64-bit (long long) types are
103
 * supported by the compiler, although it may not be the most efficient.
104
 */
105
#  if defined(OPT_ACCURACY)
106
#   define mad_f_mul(x, y)  \
107
    ((mad_fixed_t)  \
108
     ((((signed long long) (x) * (y)) +  \
109
       (1L << (MAD_F_SCALEBITS - 1))) >> MAD_F_SCALEBITS))
110
#  else
111
#   define mad_f_mul(x, y)  \
112
    ((mad_fixed_t) (((signed long long) (x) * (y)) >> MAD_F_SCALEBITS))
113
#  endif
114
 
115
#  define MAD_F_SCALEBITS  MAD_F_FRACBITS
116
 
117
/* --- Intel --------------------------------------------------------------- */
118
# elif defined(FPM_INTEL)
119
 
120
/*
121
 * This Intel version is fast and accurate; the disposition of the least
122
 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
123
 */
124
#  define MAD_F_MLX(hi, lo, x, y)  \
125
    asm ("imull %3"  \
126
         : "=a" (lo), "=d" (hi)  \
127
         : "%a" (x), "rm" (y)  \
128
         : "cc")
129
 
130
#  if defined(OPT_ACCURACY)
131
/*
132
 * This gives best accuracy but is not very fast.
133
 */
134
#   define MAD_F_MLA(hi, lo, x, y)  \
135
    ({ mad_fixed64hi_t __hi;  \
136
       mad_fixed64lo_t __lo;  \
137
       MAD_F_MLX(__hi, __lo, (x), (y));  \
138
       asm ("addl %2,%0\n\t"  \
139
            "adcl %3,%1"  \
140
            : "=rm" (lo), "=rm" (hi)  \
141
            : "r" (__lo), "r" (__hi), "0" (lo), "1" (hi)  \
142
            : "cc");  \
143
    })
144
#  endif  /* OPT_ACCURACY */
145
 
146
#  if defined(OPT_ACCURACY)
147
/*
148
 * Surprisingly, this is faster than SHRD followed by ADC.
149
 */
150
#   define mad_f_scale64(hi, lo)  \
151
    ({ mad_fixed64hi_t __hi_;  \
152
       mad_fixed64lo_t __lo_;  \
153
       mad_fixed_t __result;  \
154
       asm ("addl %4,%2\n\t"  \
155
            "adcl %5,%3"  \
156
            : "=rm" (__lo_), "=rm" (__hi_)  \
157
            : "0" (lo), "1" (hi),  \
158
              "ir" (1L << (MAD_F_SCALEBITS - 1)), "ir" (0)  \
159
            : "cc");  \
160
       asm ("shrdl %3,%2,%1"  \
161
            : "=rm" (__result)  \
162
            : "0" (__lo_), "r" (__hi_), "I" (MAD_F_SCALEBITS)  \
163
            : "cc");  \
164
       __result;  \
165
    })
166
#  else
167
#   define mad_f_scale64(hi, lo)  \
168
    ({ mad_fixed_t __result;  \
169
       asm ("shrdl %3,%2,%1"  \
170
            : "=rm" (__result)  \
171
            : "0" (lo), "r" (hi), "I" (MAD_F_SCALEBITS)  \
172
            : "cc");  \
173
       __result;  \
174
    })
175
#  endif  /* OPT_ACCURACY */
176
 
177
#  define MAD_F_SCALEBITS  MAD_F_FRACBITS
178
 
179
/* --- ARM ----------------------------------------------------------------- */
180
 
181
# elif defined(FPM_ARM)
182
 
183
/*
184
 * This ARM V4 version is as accurate as FPM_64BIT but much faster. The
185
 * least significant bit is properly rounded at no CPU cycle cost!
186
 */
187
# if 1
188
/*
189
 * There's a bug somewhere, possibly in the compiler, that sometimes makes
190
 * this necessary instead of the default implementation via MAD_F_MLX and
191
 * mad_f_scale64. It may be related to the use (or lack) of
192
 * -finline-functions and/or -fstrength-reduce.
193
 *
194
 * This is also apparently faster than MAD_F_MLX/mad_f_scale64.
195
 */
196
#  define mad_f_mul(x, y)  \
197
    ({ mad_fixed64hi_t __hi;  \
198
       mad_fixed64lo_t __lo;  \
199
       mad_fixed_t __result;  \
200
       asm ("smull      %0, %1, %3, %4\n\t"  \
201
            "movs       %0, %0, lsr %5\n\t"  \
202
            "adc        %2, %0, %1, lsl %6"  \
203
            : "=&r" (__lo), "=&r" (__hi), "=r" (__result)  \
204
            : "%r" (x), "r" (y),  \
205
              "M" (MAD_F_SCALEBITS), "M" (32 - MAD_F_SCALEBITS)  \
206
            : "cc");  \
207
       __result;  \
208
    })
209
# endif
210
 
211
#  define MAD_F_MLX(hi, lo, x, y)  \
212
    asm ("smull %0, %1, %2, %3"  \
213
         : "=&r" (lo), "=&r" (hi)  \
214
         : "%r" (x), "r" (y))
215
 
216
#  define MAD_F_MLA(hi, lo, x, y)  \
217
    asm ("smlal %0, %1, %2, %3"  \
218
         : "+r" (lo), "+r" (hi)  \
219
         : "%r" (x), "r" (y))
220
 
221
#  define mad_f_scale64(hi, lo)  \
222
    ({ mad_fixed_t __result;  \
223
       asm ("movs       %0, %1, lsr %3\n\t"  \
224
            "adc        %0, %0, %2, lsl %4"  \
225
            : "=r" (__result)  \
226
            : "r" (lo), "r" (hi),  \
227
              "M" (MAD_F_SCALEBITS), "M" (32 - MAD_F_SCALEBITS)  \
228
            : "cc");  \
229
       __result;  \
230
    })
231
 
232
#  define MAD_F_SCALEBITS  MAD_F_FRACBITS
233
 
234
/* --- MIPS ---------------------------------------------------------------- */
235
 
236
# elif defined(FPM_MIPS)
237
 
238
/*
239
 * This MIPS version is fast and accurate; the disposition of the least
240
 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
241
 */
242
#  define MAD_F_MLX(hi, lo, x, y)  \
243
    asm ("mult  %2,%3"  \
244
         : "=l" (lo), "=h" (hi)  \
245
         : "%r" (x), "r" (y))
246
 
247
# if defined(HAVE_MADD_ASM)
248
#  define MAD_F_MLA(hi, lo, x, y)  \
249
    asm ("madd  %2,%3"  \
250
         : "+l" (lo), "+h" (hi)  \
251
         : "%r" (x), "r" (y))
252
# elif defined(HAVE_MADD16_ASM)
253
/*
254
 * This loses significant accuracy due to the 16-bit integer limit in the
255
 * multiply/accumulate instruction.
256
 */
257
#  define MAD_F_ML0(hi, lo, x, y)  \
258
    asm ("mult  %2,%3"  \
259
         : "=l" (lo), "=h" (hi)  \
260
         : "%r" ((x) >> 12), "r" ((y) >> 16))
261
#  define MAD_F_MLA(hi, lo, x, y)  \
262
    asm ("madd16        %2,%3"  \
263
         : "+l" (lo), "+h" (hi)  \
264
         : "%r" ((x) >> 12), "r" ((y) >> 16))
265
#  define MAD_F_MLZ(hi, lo)  ((mad_fixed_t) (lo))
266
# endif
267
 
268
# if defined(OPT_SPEED)
269
#  define mad_f_scale64(hi, lo)  \
270
    ((mad_fixed_t) ((hi) << (32 - MAD_F_SCALEBITS)))
271
#  define MAD_F_SCALEBITS  MAD_F_FRACBITS
272
# endif
273
 
274
/* --- SPARC --------------------------------------------------------------- */
275
 
276
# elif defined(FPM_SPARC)
277
 
278
/*
279
 * This SPARC V8 version is fast and accurate; the disposition of the least
280
 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
281
 */
282
#  define MAD_F_MLX(hi, lo, x, y)  \
283
    asm ("smul %2, %3, %0\n\t"  \
284
         "rd %%y, %1"  \
285
         : "=r" (lo), "=r" (hi)  \
286
         : "%r" (x), "rI" (y))
287
 
288
/* --- PowerPC ------------------------------------------------------------- */
289
 
290
# elif defined(FPM_PPC)
291
 
292
/*
293
 * This PowerPC version is tuned for the 4xx embedded processors. It is
294
 * effectively a tuned version of FPM_64BIT. It is a little faster and just
295
 * as accurate. The disposition of the least significant bit depends on
296
 * OPT_ACCURACY via mad_f_scale64().
297
 */
298
#  define MAD_F_MLX(hi, lo, x, y)  \
299
    asm ("mulhw %1, %2, %3\n\t"  \
300
         "mullw %0, %2, %3"  \
301
         : "=&r" (lo), "=&r" (hi)  \
302
         : "%r" (x), "r" (y))
303
 
304
#  define MAD_F_MLA(hi, lo, x, y)  \
305
    ({ mad_fixed64hi_t __hi;  \
306
       mad_fixed64lo_t __lo;  \
307
       MAD_F_MLX(__hi, __lo, (x), (y));  \
308
       asm ("addc %0, %2, %3\n\t"  \
309
            "adde %1, %4, %5"  \
310
            : "=r" (lo), "=r" (hi)  \
311
            : "%r" (__lo), "0" (lo), "%r" (__hi), "1" (hi));  \
312
    })
313
 
314
#  if defined(OPT_ACCURACY)
315
/*
316
 * This is accurate and ~2 - 2.5 times slower than the unrounded version.
317
 *
318
 * The __volatile__ improves the generated code by another 5% (fewer spills
319
 * to memory); eventually they should be removed.
320
 */
321
#   define mad_f_scale64(hi, lo)  \
322
    ({ mad_fixed_t __result;  \
323
       mad_fixed64hi_t __hi_;  \
324
       mad_fixed64lo_t __lo_;  \
325
       asm __volatile__ ("addc %0, %2, %4\n\t"  \
326
                         "addze %1, %3"  \
327
            : "=r" (__lo_), "=r" (__hi_)  \
328
            : "r" (lo), "r" (hi), "r" (1 << (MAD_F_SCALEBITS - 1)));  \
329
       asm __volatile__ ("rlwinm %0, %2,32-%3,0,%3-1\n\t"  \
330
                         "rlwimi %0, %1,32-%3,%3,31"  \
331
            : "=&r" (__result)  \
332
            : "r" (__lo_), "r" (__hi_), "I" (MAD_F_SCALEBITS));  \
333
            __result;  \
334
    })
335
#  else
336
#   define mad_f_scale64(hi, lo)  \
337
    ({ mad_fixed_t __result;  \
338
       asm ("rlwinm %0, %2,32-%3,0,%3-1\n\t"  \
339
            "rlwimi %0, %1,32-%3,%3,31"  \
340
            : "=r" (__result)  \
341
            : "r" (lo), "r" (hi), "I" (MAD_F_SCALEBITS));  \
342
            __result;  \
343
    })
344
#  endif  /* OPT_ACCURACY */
345
 
346
#  define MAD_F_SCALEBITS  MAD_F_FRACBITS
347
 
348
/* ------ OR32 ------------------------------------------------------------- */
349
 
350
# elif defined(FPM_OR32)
351
 
352
/* We assume here that we always call macros in following sequence:
353
 MAD_F_ML0
354
 MAD_F_MLA
355
 ...
356
 MAD_F_MLA
357
 MAD_F_MLX
358
*/
359
 
360
#  define MAD_F_MLX(hi, lo, x, y)  \
361
    asm volatile ("l.mac %0,%1" : : "%r" (x), "r" (y))
362
 
363
#  define MAD_F_MLA(hi, lo, x, y) MAX_F_MLX(hi, lo, x, y)
364
 
365
 
366
#  define MAX_F_ML0(hi, lo, x, y) MAX_F_MLX(hi, lo, x, y)
367
 
368
#  define MAX_F_MLZ(hi, lo, x, y)  \
369
    asm volatile ("l.macrc %0" : "=r" (lo))
370
 
371
#  define MAD_F_SCALEBITS  MAD_F_FRACBITS
372
 
373
/* --- Default ------------------------------------------------------------- */
374
 
375
# elif defined(FPM_DEFAULT)
376
 
377
/*
378
 * This version is the most portable but it loses significant accuracy.
379
 * Furthermore, accuracy is biased against the second argument, so care
380
 * should be taken when ordering operands.
381
 *
382
 * The scale factors are constant as this is not used with SSO.
383
 *
384
 * Pre-rounding is required to stay within the limits of compliance.
385
 */
386
#  define mad_f_mul(x, y)       ( (((x) + (1L << 11)) >> 12) * \
387
  (((y) + (1L << 15)) >> 16) )
388
/*#  define mad_f_mul(x, y) ((x)+(y))*/
389
/* --- Default 16 ------------------------------------------------------------- */
390
 
391
# elif defined(FPM_DEFAULT16)
392
 
393
/*
394
 * This version is the most portable but it loses significant accuracy.
395
 * Furthermore, accuracy is biased against the second argument, so care
396
 * should be taken when ordering operands.
397
 *
398
 * The scale factors are constant as this is not used with SSO.
399
 *
400
 * Pre-rounding is required to stay within the limits of compliance.
401
 */
402
#  define mad_f_mul(x, y)       (( (((x) + (1L << 15)) >> 16) *  \
403
                                   (((y) + (1L << 15)) >> 16) ) << 4)
404
/* ------------------------------------------------------------------------- */
405
 
406
# else
407
#  error "no FPM selected"
408
# endif
409
 
410
/* default implementations */
411
 
412
# if !defined(mad_f_mul)
413
#  define mad_f_mul(x, y)  \
414
    ({ mad_fixed64hi_t __hi;  \
415
       mad_fixed64lo_t __lo;  \
416
       MAD_F_MLX(__hi, __lo, (x), (y));  \
417
       mad_f_scale64(__hi, __lo);  \
418
    })
419
# endif
420
 
421
# if !defined(MAD_F_MLA)
422
#  define MAD_F_ML0(hi, lo, x, y)       ((lo)  = mad_f_mul((x), (y)))
423
#  define MAD_F_MLA(hi, lo, x, y)       ((lo) += mad_f_mul((x), (y)))
424
#  define MAD_F_MLZ(hi, lo)             ((void) (hi), (mad_fixed_t) (lo))
425
# endif
426
 
427
# if !defined(MAD_F_ML0)
428
#  define MAD_F_ML0(hi, lo, x, y)       MAD_F_MLX((hi), (lo), (x), (y))
429
# endif
430
 
431
# if !defined(MAD_F_MLZ)
432
#  define MAD_F_MLZ(hi, lo)             mad_f_scale64((hi), (lo))
433
# endif
434
 
435
# if !defined(mad_f_scale64)
436
#  if defined(OPT_ACCURACY)
437
#   define mad_f_scale64(hi, lo)  \
438
    ((((mad_fixed_t)  \
439
       (((hi) << (32 - (MAD_F_SCALEBITS - 1))) |  \
440
        ((lo) >> (MAD_F_SCALEBITS - 1)))) + 1) >> 1)
441
#  else
442
#   define mad_f_scale64(hi, lo)  \
443
    ((mad_fixed_t)  \
444
     (((hi) << (32 - MAD_F_SCALEBITS)) |  \
445
      ((lo) >> MAD_F_SCALEBITS)))
446
#  endif
447
#  define MAD_F_SCALEBITS  MAD_F_FRACBITS
448
# endif
449
 
450
/* miscellaneous C routines */
451
 
452
mad_fixed_t mad_f_abs(mad_fixed_t);
453
 
454
# endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.