OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [gcc/] [config/] [pa/] [milli64.S] - Blame information for rev 455

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* 32 and 64-bit millicode, original author Hewlett-Packard
2
   adapted for gcc by Paul Bame 
3
   and Alan Modra .
4
 
5
   Copyright 2001, 2002, 2003, 2007 Free Software Foundation, Inc.
6
 
7
This file is part of GCC.
8
 
9
GCC is free software; you can redistribute it and/or modify it under
10
the terms of the GNU General Public License as published by the Free
11
Software Foundation; either version 3, or (at your option) any later
12
version.
13
 
14
In addition to the permissions in the GNU General Public License, the
15
Free Software Foundation gives you unlimited permission to link the
16
compiled version of this file into combinations with other programs,
17
and to distribute those combinations without any restriction coming
18
from the use of this file.  (The General Public License restrictions
19
do apply in other respects; for example, they cover modification of
20
the file, and distribution when not linked into a combine
21
executable.)
22
 
23
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
24
WARRANTY; without even the implied warranty of MERCHANTABILITY or
25
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
26
for more details.
27
 
28
You should have received a copy of the GNU General Public License
29
        along with GCC; see the file COPYING3.  If not see
30
.  */
31
 
32
#ifdef pa64
33
        .level  2.0w
34
#endif
35
 
36
/* Hardware General Registers.  */
37
r0:     .reg    %r0
38
r1:     .reg    %r1
39
r2:     .reg    %r2
40
r3:     .reg    %r3
41
r4:     .reg    %r4
42
r5:     .reg    %r5
43
r6:     .reg    %r6
44
r7:     .reg    %r7
45
r8:     .reg    %r8
46
r9:     .reg    %r9
47
r10:    .reg    %r10
48
r11:    .reg    %r11
49
r12:    .reg    %r12
50
r13:    .reg    %r13
51
r14:    .reg    %r14
52
r15:    .reg    %r15
53
r16:    .reg    %r16
54
r17:    .reg    %r17
55
r18:    .reg    %r18
56
r19:    .reg    %r19
57
r20:    .reg    %r20
58
r21:    .reg    %r21
59
r22:    .reg    %r22
60
r23:    .reg    %r23
61
r24:    .reg    %r24
62
r25:    .reg    %r25
63
r26:    .reg    %r26
64
r27:    .reg    %r27
65
r28:    .reg    %r28
66
r29:    .reg    %r29
67
r30:    .reg    %r30
68
r31:    .reg    %r31
69
 
70
/* Hardware Space Registers.  */
71
sr0:    .reg    %sr0
72
sr1:    .reg    %sr1
73
sr2:    .reg    %sr2
74
sr3:    .reg    %sr3
75
sr4:    .reg    %sr4
76
sr5:    .reg    %sr5
77
sr6:    .reg    %sr6
78
sr7:    .reg    %sr7
79
 
80
/* Hardware Floating Point Registers.  */
81
fr0:    .reg    %fr0
82
fr1:    .reg    %fr1
83
fr2:    .reg    %fr2
84
fr3:    .reg    %fr3
85
fr4:    .reg    %fr4
86
fr5:    .reg    %fr5
87
fr6:    .reg    %fr6
88
fr7:    .reg    %fr7
89
fr8:    .reg    %fr8
90
fr9:    .reg    %fr9
91
fr10:   .reg    %fr10
92
fr11:   .reg    %fr11
93
fr12:   .reg    %fr12
94
fr13:   .reg    %fr13
95
fr14:   .reg    %fr14
96
fr15:   .reg    %fr15
97
 
98
/* Hardware Control Registers.  */
99
cr11:   .reg    %cr11
100
sar:    .reg    %cr11   /* Shift Amount Register */
101
 
102
/* Software Architecture General Registers.  */
103
rp:     .reg    r2      /* return pointer */
104
#ifdef pa64
105
mrp:    .reg    r2      /* millicode return pointer */
106
#else
107
mrp:    .reg    r31     /* millicode return pointer */
108
#endif
109
ret0:   .reg    r28     /* return value */
110
ret1:   .reg    r29     /* return value (high part of double) */
111
sp:     .reg    r30     /* stack pointer */
112
dp:     .reg    r27     /* data pointer */
113
arg0:   .reg    r26     /* argument */
114
arg1:   .reg    r25     /* argument or high part of double argument */
115
arg2:   .reg    r24     /* argument */
116
arg3:   .reg    r23     /* argument or high part of double argument */
117
 
118
/* Software Architecture Space Registers.  */
119
/*              sr0     ; return link from BLE */
120
sret:   .reg    sr1     /* return value */
121
sarg:   .reg    sr1     /* argument */
122
/*              sr4     ; PC SPACE tracker */
123
/*              sr5     ; process private data */
124
 
125
/* Frame Offsets (millicode convention!)  Used when calling other
126
   millicode routines.  Stack unwinding is dependent upon these
127
   definitions.  */
128
r31_slot:       .equ    -20     /* "current RP" slot */
129
sr0_slot:       .equ    -16     /* "static link" slot */
130
#if defined(pa64)
131
mrp_slot:       .equ    -16     /* "current RP" slot */
132
psp_slot:       .equ    -8      /* "previous SP" slot */
133
#else
134
mrp_slot:       .equ    -20     /* "current RP" slot (replacing "r31_slot") */
135
#endif
136
 
137
 
138
#define DEFINE(name,value)name: .EQU    value
139
#define RDEFINE(name,value)name:        .REG    value
140
#ifdef milliext
141
#define MILLI_BE(lbl)   BE    lbl(sr7,r0)
142
#define MILLI_BEN(lbl)  BE,n  lbl(sr7,r0)
143
#define MILLI_BLE(lbl)  BLE   lbl(sr7,r0)
144
#define MILLI_BLEN(lbl) BLE,n lbl(sr7,r0)
145
#define MILLIRETN       BE,n  0(sr0,mrp)
146
#define MILLIRET        BE    0(sr0,mrp)
147
#define MILLI_RETN      BE,n  0(sr0,mrp)
148
#define MILLI_RET       BE    0(sr0,mrp)
149
#else
150
#define MILLI_BE(lbl)   B     lbl
151
#define MILLI_BEN(lbl)  B,n   lbl
152
#define MILLI_BLE(lbl)  BL    lbl,mrp
153
#define MILLI_BLEN(lbl) BL,n  lbl,mrp
154
#define MILLIRETN       BV,n  0(mrp)
155
#define MILLIRET        BV    0(mrp)
156
#define MILLI_RETN      BV,n  0(mrp)
157
#define MILLI_RET       BV    0(mrp)
158
#endif
159
 
160
#ifdef __STDC__
161
#define CAT(a,b)        a##b
162
#else
163
#define CAT(a,b)        a/**/b
164
#endif
165
 
166
#ifdef ELF
167
#define SUBSPA_MILLI     .section .text
168
#define SUBSPA_MILLI_DIV .section .text.div,"ax",@progbits! .align 16
169
#define SUBSPA_MILLI_MUL .section .text.mul,"ax",@progbits! .align 16
170
#define ATTR_MILLI
171
#define SUBSPA_DATA      .section .data
172
#define ATTR_DATA
173
#define GLOBAL           $global$
174
#define GSYM(sym)        !sym:
175
#define LSYM(sym)        !CAT(.L,sym:)
176
#define LREF(sym)        CAT(.L,sym)
177
 
178
#else
179
 
180
#ifdef coff
181
/* This used to be .milli but since link32 places different named
182
   sections in different segments millicode ends up a long ways away
183
   from .text (1meg?).  This way they will be a lot closer.
184
 
185
   The SUBSPA_MILLI_* specify locality sets for certain millicode
186
   modules in order to ensure that modules that call one another are
187
   placed close together. Without locality sets this is unlikely to
188
   happen because of the Dynamite linker library search algorithm. We
189
   want these modules close together so that short calls always reach
190
   (we don't want to require long calls or use long call stubs).  */
191
 
192
#define SUBSPA_MILLI     .subspa .text
193
#define SUBSPA_MILLI_DIV .subspa .text$dv,align=16
194
#define SUBSPA_MILLI_MUL .subspa .text$mu,align=16
195
#define ATTR_MILLI       .attr code,read,execute
196
#define SUBSPA_DATA      .subspa .data
197
#define ATTR_DATA        .attr init_data,read,write
198
#define GLOBAL           _gp
199
#else
200
#define SUBSPA_MILLI     .subspa $MILLICODE$,QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=8
201
#define SUBSPA_MILLI_DIV SUBSPA_MILLI
202
#define SUBSPA_MILLI_MUL SUBSPA_MILLI
203
#define ATTR_MILLI
204
#define SUBSPA_DATA      .subspa $BSS$,quad=1,align=8,access=0x1f,sort=80,zero
205
#define ATTR_DATA
206
#define GLOBAL           $global$
207
#endif
208
#define SPACE_DATA       .space $PRIVATE$,spnum=1,sort=16
209
 
210
#define GSYM(sym)        !sym
211
#define LSYM(sym)        !CAT(L$,sym)
212
#define LREF(sym)        CAT(L$,sym)
213
#endif
214
 
215
#ifdef L_dyncall
216
        SUBSPA_MILLI
217
        ATTR_DATA
218
GSYM($$dyncall)
219
        .export $$dyncall,millicode
220
        .proc
221
        .callinfo       millicode
222
        .entry
223
        bb,>=,n %r22,30,LREF(1)         ; branch if not plabel address
224
        depi    0,31,2,%r22             ; clear the two least significant bits
225
        ldw     4(%r22),%r19            ; load new LTP value
226
        ldw     0(%r22),%r22            ; load address of target
227
LSYM(1)
228
#ifdef LINUX
229
        bv      %r0(%r22)               ; branch to the real target
230
#else
231
        ldsid   (%sr0,%r22),%r1         ; get the "space ident" selected by r22
232
        mtsp    %r1,%sr0                ; move that space identifier into sr0
233
        be      0(%sr0,%r22)            ; branch to the real target
234
#endif
235
        stw     %r2,-24(%r30)           ; save return address into frame marker
236
        .exit
237
        .procend
238
#endif
239
 
240
#ifdef L_divI
241
/* ROUTINES:    $$divI, $$divoI
242
 
243
   Single precision divide for signed binary integers.
244
 
245
   The quotient is truncated towards zero.
246
   The sign of the quotient is the XOR of the signs of the dividend and
247
   divisor.
248
   Divide by zero is trapped.
249
   Divide of -2**31 by -1 is trapped for $$divoI but not for $$divI.
250
 
251
   INPUT REGISTERS:
252
   .    arg0 == dividend
253
   .    arg1 == divisor
254
   .    mrp  == return pc
255
   .    sr0  == return space when called externally
256
 
257
   OUTPUT REGISTERS:
258
   .    arg0 =  undefined
259
   .    arg1 =  undefined
260
   .    ret1 =  quotient
261
 
262
   OTHER REGISTERS AFFECTED:
263
   .    r1   =  undefined
264
 
265
   SIDE EFFECTS:
266
   .    Causes a trap under the following conditions:
267
   .            divisor is zero  (traps with ADDIT,=  0,25,0)
268
   .            dividend==-2**31  and divisor==-1 and routine is $$divoI
269
   .                             (traps with ADDO  26,25,0)
270
   .    Changes memory at the following places:
271
   .            NONE
272
 
273
   PERMISSIBLE CONTEXT:
274
   .    Unwindable.
275
   .    Suitable for internal or external millicode.
276
   .    Assumes the special millicode register conventions.
277
 
278
   DISCUSSION:
279
   .    Branchs to other millicode routines using BE
280
   .            $$div_# for # being 2,3,4,5,6,7,8,9,10,12,14,15
281
   .
282
   .    For selected divisors, calls a divide by constant routine written by
283
   .    Karl Pettis.  Eligible divisors are 1..15 excluding 11 and 13.
284
   .
285
   .    The only overflow case is -2**31 divided by -1.
286
   .    Both routines return -2**31 but only $$divoI traps.  */
287
 
288
RDEFINE(temp,r1)
289
RDEFINE(retreg,ret1)    /*  r29 */
290
RDEFINE(temp1,arg0)
291
        SUBSPA_MILLI_DIV
292
        ATTR_MILLI
293
        .import $$divI_2,millicode
294
        .import $$divI_3,millicode
295
        .import $$divI_4,millicode
296
        .import $$divI_5,millicode
297
        .import $$divI_6,millicode
298
        .import $$divI_7,millicode
299
        .import $$divI_8,millicode
300
        .import $$divI_9,millicode
301
        .import $$divI_10,millicode
302
        .import $$divI_12,millicode
303
        .import $$divI_14,millicode
304
        .import $$divI_15,millicode
305
        .export $$divI,millicode
306
        .export $$divoI,millicode
307
        .proc
308
        .callinfo       millicode
309
        .entry
310
GSYM($$divoI)
311
        comib,=,n  -1,arg1,LREF(negative1)      /*  when divisor == -1 */
312
GSYM($$divI)
313
        ldo     -1(arg1),temp           /*  is there at most one bit set ? */
314
        and,<>  arg1,temp,r0            /*  if not, don't use power of 2 divide */
315
        addi,>  0,arg1,r0               /*  if divisor > 0, use power of 2 divide */
316
        b,n     LREF(neg_denom)
317
LSYM(pow2)
318
        addi,>= 0,arg0,retreg           /*  if numerator is negative, add the */
319
        add     arg0,temp,retreg        /*  (denominaotr -1) to correct for shifts */
320
        extru,= arg1,15,16,temp         /*  test denominator with 0xffff0000 */
321
        extrs   retreg,15,16,retreg     /*  retreg = retreg >> 16 */
322
        or      arg1,temp,arg1          /*  arg1 = arg1 | (arg1 >> 16) */
323
        ldi     0xcc,temp1              /*  setup 0xcc in temp1 */
324
        extru,= arg1,23,8,temp          /*  test denominator with 0xff00 */
325
        extrs   retreg,23,24,retreg     /*  retreg = retreg >> 8 */
326
        or      arg1,temp,arg1          /*  arg1 = arg1 | (arg1 >> 8) */
327
        ldi     0xaa,temp               /*  setup 0xaa in temp */
328
        extru,= arg1,27,4,r0            /*  test denominator with 0xf0 */
329
        extrs   retreg,27,28,retreg     /*  retreg = retreg >> 4 */
330
        and,=   arg1,temp1,r0           /*  test denominator with 0xcc */
331
        extrs   retreg,29,30,retreg     /*  retreg = retreg >> 2 */
332
        and,=   arg1,temp,r0            /*  test denominator with 0xaa */
333
        extrs   retreg,30,31,retreg     /*  retreg = retreg >> 1 */
334
        MILLIRETN
335
LSYM(neg_denom)
336
        addi,<	0,arg1,r0		/*  if arg1 >= 0, it's not power of 2 */
337
        b,n     LREF(regular_seq)
338
        sub     r0,arg1,temp            /*  make denominator positive */
339
        comb,=,n  arg1,temp,LREF(regular_seq)   /*  test against 0x80000000 and 0 */
340
        ldo     -1(temp),retreg         /*  is there at most one bit set ? */
341
        and,=   temp,retreg,r0          /*  if so, the denominator is power of 2 */
342
        b,n     LREF(regular_seq)
343
        sub     r0,arg0,retreg          /*  negate numerator */
344
        comb,=,n arg0,retreg,LREF(regular_seq) /*  test against 0x80000000 */
345
        copy    retreg,arg0             /*  set up arg0, arg1 and temp  */
346
        copy    temp,arg1               /*  before branching to pow2 */
347
        b       LREF(pow2)
348
        ldo     -1(arg1),temp
349
LSYM(regular_seq)
350
        comib,>>=,n 15,arg1,LREF(small_divisor)
351
        add,>=  0,arg0,retreg           /*  move dividend, if retreg < 0, */
352
LSYM(normal)
353
        subi    0,retreg,retreg         /*    make it positive */
354
        sub     0,arg1,temp             /*  clear carry,  */
355
                                        /*    negate the divisor */
356
        ds      0,temp,0                /*  set V-bit to the comple- */
357
                                        /*    ment of the divisor sign */
358
        add     retreg,retreg,retreg    /*  shift msb bit into carry */
359
        ds      r0,arg1,temp            /*  1st divide step, if no carry */
360
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
361
        ds      temp,arg1,temp          /*  2nd divide step */
362
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
363
        ds      temp,arg1,temp          /*  3rd divide step */
364
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
365
        ds      temp,arg1,temp          /*  4th divide step */
366
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
367
        ds      temp,arg1,temp          /*  5th divide step */
368
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
369
        ds      temp,arg1,temp          /*  6th divide step */
370
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
371
        ds      temp,arg1,temp          /*  7th divide step */
372
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
373
        ds      temp,arg1,temp          /*  8th divide step */
374
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
375
        ds      temp,arg1,temp          /*  9th divide step */
376
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
377
        ds      temp,arg1,temp          /*  10th divide step */
378
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
379
        ds      temp,arg1,temp          /*  11th divide step */
380
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
381
        ds      temp,arg1,temp          /*  12th divide step */
382
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
383
        ds      temp,arg1,temp          /*  13th divide step */
384
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
385
        ds      temp,arg1,temp          /*  14th divide step */
386
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
387
        ds      temp,arg1,temp          /*  15th divide step */
388
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
389
        ds      temp,arg1,temp          /*  16th divide step */
390
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
391
        ds      temp,arg1,temp          /*  17th divide step */
392
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
393
        ds      temp,arg1,temp          /*  18th divide step */
394
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
395
        ds      temp,arg1,temp          /*  19th divide step */
396
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
397
        ds      temp,arg1,temp          /*  20th divide step */
398
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
399
        ds      temp,arg1,temp          /*  21st divide step */
400
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
401
        ds      temp,arg1,temp          /*  22nd divide step */
402
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
403
        ds      temp,arg1,temp          /*  23rd divide step */
404
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
405
        ds      temp,arg1,temp          /*  24th divide step */
406
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
407
        ds      temp,arg1,temp          /*  25th divide step */
408
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
409
        ds      temp,arg1,temp          /*  26th divide step */
410
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
411
        ds      temp,arg1,temp          /*  27th divide step */
412
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
413
        ds      temp,arg1,temp          /*  28th divide step */
414
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
415
        ds      temp,arg1,temp          /*  29th divide step */
416
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
417
        ds      temp,arg1,temp          /*  30th divide step */
418
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
419
        ds      temp,arg1,temp          /*  31st divide step */
420
        addc    retreg,retreg,retreg    /*  shift retreg with/into carry */
421
        ds      temp,arg1,temp          /*  32nd divide step, */
422
        addc    retreg,retreg,retreg    /*  shift last retreg bit into retreg */
423
        xor,>=  arg0,arg1,0             /*  get correct sign of quotient */
424
          sub   0,retreg,retreg         /*    based on operand signs */
425
        MILLIRETN
426
        nop
427
 
428
LSYM(small_divisor)
429
 
430
#if defined(pa64)
431
/*  Clear the upper 32 bits of the arg1 register.  We are working with  */
432
/*  small divisors (and 32 bit integers)   We must not be mislead  */
433
/*  by "1" bits left in the upper 32 bits.  */
434
        depd %r0,31,32,%r25
435
#endif
436
        blr,n   arg1,r0
437
        nop
438
/*  table for divisor == 0,1, ... ,15 */
439
        addit,= 0,arg1,r0       /*  trap if divisor == 0 */
440
        nop
441
        MILLIRET                /*  divisor == 1 */
442
        copy    arg0,retreg
443
        MILLI_BEN($$divI_2)     /*  divisor == 2 */
444
        nop
445
        MILLI_BEN($$divI_3)     /*  divisor == 3 */
446
        nop
447
        MILLI_BEN($$divI_4)     /*  divisor == 4 */
448
        nop
449
        MILLI_BEN($$divI_5)     /*  divisor == 5 */
450
        nop
451
        MILLI_BEN($$divI_6)     /*  divisor == 6 */
452
        nop
453
        MILLI_BEN($$divI_7)     /*  divisor == 7 */
454
        nop
455
        MILLI_BEN($$divI_8)     /*  divisor == 8 */
456
        nop
457
        MILLI_BEN($$divI_9)     /*  divisor == 9 */
458
        nop
459
        MILLI_BEN($$divI_10)    /*  divisor == 10 */
460
        nop
461
        b       LREF(normal)            /*  divisor == 11 */
462
        add,>=  0,arg0,retreg
463
        MILLI_BEN($$divI_12)    /*  divisor == 12 */
464
        nop
465
        b       LREF(normal)            /*  divisor == 13 */
466
        add,>=  0,arg0,retreg
467
        MILLI_BEN($$divI_14)    /*  divisor == 14 */
468
        nop
469
        MILLI_BEN($$divI_15)    /*  divisor == 15 */
470
        nop
471
 
472
LSYM(negative1)
473
        sub     0,arg0,retreg   /*  result is negation of dividend */
474
        MILLIRET
475
        addo    arg0,arg1,r0    /*  trap iff dividend==0x80000000 && divisor==-1 */
476
        .exit
477
        .procend
478
        .end
479
#endif
480
 
481
#ifdef L_divU
482
/* ROUTINE:     $$divU
483
   .
484
   .    Single precision divide for unsigned integers.
485
   .
486
   .    Quotient is truncated towards zero.
487
   .    Traps on divide by zero.
488
 
489
   INPUT REGISTERS:
490
   .    arg0 == dividend
491
   .    arg1 == divisor
492
   .    mrp  == return pc
493
   .    sr0  == return space when called externally
494
 
495
   OUTPUT REGISTERS:
496
   .    arg0 =  undefined
497
   .    arg1 =  undefined
498
   .    ret1 =  quotient
499
 
500
   OTHER REGISTERS AFFECTED:
501
   .    r1   =  undefined
502
 
503
   SIDE EFFECTS:
504
   .    Causes a trap under the following conditions:
505
   .            divisor is zero
506
   .    Changes memory at the following places:
507
   .            NONE
508
 
509
   PERMISSIBLE CONTEXT:
510
   .    Unwindable.
511
   .    Does not create a stack frame.
512
   .    Suitable for internal or external millicode.
513
   .    Assumes the special millicode register conventions.
514
 
515
   DISCUSSION:
516
   .    Branchs to other millicode routines using BE:
517
   .            $$divU_# for 3,5,6,7,9,10,12,14,15
518
   .
519
   .    For selected small divisors calls the special divide by constant
520
   .    routines written by Karl Pettis.  These are: 3,5,6,7,9,10,12,14,15.  */
521
 
522
RDEFINE(temp,r1)
523
RDEFINE(retreg,ret1)    /* r29 */
524
RDEFINE(temp1,arg0)
525
        SUBSPA_MILLI_DIV
526
        ATTR_MILLI
527
        .export $$divU,millicode
528
        .import $$divU_3,millicode
529
        .import $$divU_5,millicode
530
        .import $$divU_6,millicode
531
        .import $$divU_7,millicode
532
        .import $$divU_9,millicode
533
        .import $$divU_10,millicode
534
        .import $$divU_12,millicode
535
        .import $$divU_14,millicode
536
        .import $$divU_15,millicode
537
        .proc
538
        .callinfo       millicode
539
        .entry
540
GSYM($$divU)
541
/* The subtract is not nullified since it does no harm and can be used
542
   by the two cases that branch back to "normal".  */
543
        ldo     -1(arg1),temp           /* is there at most one bit set ? */
544
        and,=   arg1,temp,r0            /* if so, denominator is power of 2 */
545
        b       LREF(regular_seq)
546
        addit,= 0,arg1,0                /* trap for zero dvr */
547
        copy    arg0,retreg
548
        extru,= arg1,15,16,temp         /* test denominator with 0xffff0000 */
549
        extru   retreg,15,16,retreg     /* retreg = retreg >> 16 */
550
        or      arg1,temp,arg1          /* arg1 = arg1 | (arg1 >> 16) */
551
        ldi     0xcc,temp1              /* setup 0xcc in temp1 */
552
        extru,= arg1,23,8,temp          /* test denominator with 0xff00 */
553
        extru   retreg,23,24,retreg     /* retreg = retreg >> 8 */
554
        or      arg1,temp,arg1          /* arg1 = arg1 | (arg1 >> 8) */
555
        ldi     0xaa,temp               /* setup 0xaa in temp */
556
        extru,= arg1,27,4,r0            /* test denominator with 0xf0 */
557
        extru   retreg,27,28,retreg     /* retreg = retreg >> 4 */
558
        and,=   arg1,temp1,r0           /* test denominator with 0xcc */
559
        extru   retreg,29,30,retreg     /* retreg = retreg >> 2 */
560
        and,=   arg1,temp,r0            /* test denominator with 0xaa */
561
        extru   retreg,30,31,retreg     /* retreg = retreg >> 1 */
562
        MILLIRETN
563
        nop
564
LSYM(regular_seq)
565
        comib,>=  15,arg1,LREF(special_divisor)
566
        subi    0,arg1,temp             /* clear carry, negate the divisor */
567
        ds      r0,temp,r0              /* set V-bit to 1 */
568
LSYM(normal)
569
        add     arg0,arg0,retreg        /* shift msb bit into carry */
570
        ds      r0,arg1,temp            /* 1st divide step, if no carry */
571
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
572
        ds      temp,arg1,temp          /* 2nd divide step */
573
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
574
        ds      temp,arg1,temp          /* 3rd divide step */
575
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
576
        ds      temp,arg1,temp          /* 4th divide step */
577
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
578
        ds      temp,arg1,temp          /* 5th divide step */
579
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
580
        ds      temp,arg1,temp          /* 6th divide step */
581
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
582
        ds      temp,arg1,temp          /* 7th divide step */
583
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
584
        ds      temp,arg1,temp          /* 8th divide step */
585
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
586
        ds      temp,arg1,temp          /* 9th divide step */
587
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
588
        ds      temp,arg1,temp          /* 10th divide step */
589
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
590
        ds      temp,arg1,temp          /* 11th divide step */
591
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
592
        ds      temp,arg1,temp          /* 12th divide step */
593
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
594
        ds      temp,arg1,temp          /* 13th divide step */
595
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
596
        ds      temp,arg1,temp          /* 14th divide step */
597
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
598
        ds      temp,arg1,temp          /* 15th divide step */
599
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
600
        ds      temp,arg1,temp          /* 16th divide step */
601
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
602
        ds      temp,arg1,temp          /* 17th divide step */
603
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
604
        ds      temp,arg1,temp          /* 18th divide step */
605
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
606
        ds      temp,arg1,temp          /* 19th divide step */
607
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
608
        ds      temp,arg1,temp          /* 20th divide step */
609
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
610
        ds      temp,arg1,temp          /* 21st divide step */
611
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
612
        ds      temp,arg1,temp          /* 22nd divide step */
613
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
614
        ds      temp,arg1,temp          /* 23rd divide step */
615
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
616
        ds      temp,arg1,temp          /* 24th divide step */
617
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
618
        ds      temp,arg1,temp          /* 25th divide step */
619
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
620
        ds      temp,arg1,temp          /* 26th divide step */
621
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
622
        ds      temp,arg1,temp          /* 27th divide step */
623
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
624
        ds      temp,arg1,temp          /* 28th divide step */
625
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
626
        ds      temp,arg1,temp          /* 29th divide step */
627
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
628
        ds      temp,arg1,temp          /* 30th divide step */
629
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
630
        ds      temp,arg1,temp          /* 31st divide step */
631
        addc    retreg,retreg,retreg    /* shift retreg with/into carry */
632
        ds      temp,arg1,temp          /* 32nd divide step, */
633
        MILLIRET
634
        addc    retreg,retreg,retreg    /* shift last retreg bit into retreg */
635
 
636
/* Handle the cases where divisor is a small constant or has high bit on.  */
637
LSYM(special_divisor)
638
/*      blr     arg1,r0 */
639
/*      comib,>,n  0,arg1,LREF(big_divisor) ; nullify previous instruction */
640
 
641
/* Pratap 8/13/90. The 815 Stirling chip set has a bug that prevents us from
642
   generating such a blr, comib sequence. A problem in nullification. So I
643
   rewrote this code.  */
644
 
645
#if defined(pa64)
646
/* Clear the upper 32 bits of the arg1 register.  We are working with
647
   small divisors (and 32 bit unsigned integers)   We must not be mislead
648
   by "1" bits left in the upper 32 bits.  */
649
        depd %r0,31,32,%r25
650
#endif
651
        comib,> 0,arg1,LREF(big_divisor)
652
        nop
653
        blr     arg1,r0
654
        nop
655
 
656
LSYM(zero_divisor)      /* this label is here to provide external visibility */
657
        addit,= 0,arg1,0                /* trap for zero dvr */
658
        nop
659
        MILLIRET                        /* divisor == 1 */
660
        copy    arg0,retreg
661
        MILLIRET                        /* divisor == 2 */
662
        extru   arg0,30,31,retreg
663
        MILLI_BEN($$divU_3)             /* divisor == 3 */
664
        nop
665
        MILLIRET                        /* divisor == 4 */
666
        extru   arg0,29,30,retreg
667
        MILLI_BEN($$divU_5)             /* divisor == 5 */
668
        nop
669
        MILLI_BEN($$divU_6)             /* divisor == 6 */
670
        nop
671
        MILLI_BEN($$divU_7)             /* divisor == 7 */
672
        nop
673
        MILLIRET                        /* divisor == 8 */
674
        extru   arg0,28,29,retreg
675
        MILLI_BEN($$divU_9)             /* divisor == 9 */
676
        nop
677
        MILLI_BEN($$divU_10)            /* divisor == 10 */
678
        nop
679
        b       LREF(normal)            /* divisor == 11 */
680
        ds      r0,temp,r0              /* set V-bit to 1 */
681
        MILLI_BEN($$divU_12)            /* divisor == 12 */
682
        nop
683
        b       LREF(normal)            /* divisor == 13 */
684
        ds      r0,temp,r0              /* set V-bit to 1 */
685
        MILLI_BEN($$divU_14)            /* divisor == 14 */
686
        nop
687
        MILLI_BEN($$divU_15)            /* divisor == 15 */
688
        nop
689
 
690
/* Handle the case where the high bit is on in the divisor.
691
   Compute:     if( dividend>=divisor) quotient=1; else quotient=0;
692
   Note:        dividend>==divisor iff dividend-divisor does not borrow
693
   and          not borrow iff carry.  */
694
LSYM(big_divisor)
695
        sub     arg0,arg1,r0
696
        MILLIRET
697
        addc    r0,r0,retreg
698
        .exit
699
        .procend
700
        .end
701
#endif
702
 
703
#ifdef L_remI
704
/* ROUTINE:     $$remI
705
 
706
   DESCRIPTION:
707
   .    $$remI returns the remainder of the division of two signed 32-bit
708
   .    integers.  The sign of the remainder is the same as the sign of
709
   .    the dividend.
710
 
711
 
712
   INPUT REGISTERS:
713
   .    arg0 == dividend
714
   .    arg1 == divisor
715
   .    mrp  == return pc
716
   .    sr0  == return space when called externally
717
 
718
   OUTPUT REGISTERS:
719
   .    arg0 = destroyed
720
   .    arg1 = destroyed
721
   .    ret1 = remainder
722
 
723
   OTHER REGISTERS AFFECTED:
724
   .    r1   = undefined
725
 
726
   SIDE EFFECTS:
727
   .    Causes a trap under the following conditions:  DIVIDE BY ZERO
728
   .    Changes memory at the following places:  NONE
729
 
730
   PERMISSIBLE CONTEXT:
731
   .    Unwindable
732
   .    Does not create a stack frame
733
   .    Is usable for internal or external microcode
734
 
735
   DISCUSSION:
736
   .    Calls other millicode routines via mrp:  NONE
737
   .    Calls other millicode routines:  NONE  */
738
 
739
RDEFINE(tmp,r1)
740
RDEFINE(retreg,ret1)
741
 
742
        SUBSPA_MILLI
743
        ATTR_MILLI
744
        .proc
745
        .callinfo millicode
746
        .entry
747
GSYM($$remI)
748
GSYM($$remoI)
749
        .export $$remI,MILLICODE
750
        .export $$remoI,MILLICODE
751
        ldo             -1(arg1),tmp            /*  is there at most one bit set ? */
752
        and,<>          arg1,tmp,r0             /*  if not, don't use power of 2 */
753
        addi,>          0,arg1,r0               /*  if denominator > 0, use power */
754
                                                /*  of 2 */
755
        b,n             LREF(neg_denom)
756
LSYM(pow2)
757
        comb,>,n        0,arg0,LREF(neg_num)    /*  is numerator < 0 ? */
758
        and             arg0,tmp,retreg         /*  get the result */
759
        MILLIRETN
760
LSYM(neg_num)
761
        subi            0,arg0,arg0             /*  negate numerator */
762
        and             arg0,tmp,retreg         /*  get the result */
763
        subi            0,retreg,retreg         /*  negate result */
764
        MILLIRETN
765
LSYM(neg_denom)
766
        addi,<		0,arg1,r0		/*  if arg1 >= 0, it's not power */
767
                                                /*  of 2 */
768
        b,n             LREF(regular_seq)
769
        sub             r0,arg1,tmp             /*  make denominator positive */
770
        comb,=,n        arg1,tmp,LREF(regular_seq) /*  test against 0x80000000 and 0 */
771
        ldo             -1(tmp),retreg          /*  is there at most one bit set ? */
772
        and,=           tmp,retreg,r0           /*  if not, go to regular_seq */
773
        b,n             LREF(regular_seq)
774
        comb,>,n        0,arg0,LREF(neg_num_2)  /*  if arg0 < 0, negate it  */
775
        and             arg0,retreg,retreg
776
        MILLIRETN
777
LSYM(neg_num_2)
778
        subi            0,arg0,tmp              /*  test against 0x80000000 */
779
        and             tmp,retreg,retreg
780
        subi            0,retreg,retreg
781
        MILLIRETN
782
LSYM(regular_seq)
783
        addit,=         0,arg1,0                /*  trap if div by zero */
784
        add,>=          0,arg0,retreg           /*  move dividend, if retreg < 0, */
785
        sub             0,retreg,retreg         /*    make it positive */
786
        sub             0,arg1, tmp             /*  clear carry,  */
787
                                                /*    negate the divisor */
788
        ds              0, tmp,0                /*  set V-bit to the comple- */
789
                                                /*    ment of the divisor sign */
790
        or              0,0, tmp                /*  clear  tmp */
791
        add             retreg,retreg,retreg    /*  shift msb bit into carry */
792
        ds               tmp,arg1, tmp          /*  1st divide step, if no carry */
793
                                                /*    out, msb of quotient = 0 */
794
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
795
LSYM(t1)
796
        ds               tmp,arg1, tmp          /*  2nd divide step */
797
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
798
        ds               tmp,arg1, tmp          /*  3rd divide step */
799
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
800
        ds               tmp,arg1, tmp          /*  4th divide step */
801
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
802
        ds               tmp,arg1, tmp          /*  5th divide step */
803
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
804
        ds               tmp,arg1, tmp          /*  6th divide step */
805
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
806
        ds               tmp,arg1, tmp          /*  7th divide step */
807
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
808
        ds               tmp,arg1, tmp          /*  8th divide step */
809
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
810
        ds               tmp,arg1, tmp          /*  9th divide step */
811
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
812
        ds               tmp,arg1, tmp          /*  10th divide step */
813
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
814
        ds               tmp,arg1, tmp          /*  11th divide step */
815
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
816
        ds               tmp,arg1, tmp          /*  12th divide step */
817
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
818
        ds               tmp,arg1, tmp          /*  13th divide step */
819
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
820
        ds               tmp,arg1, tmp          /*  14th divide step */
821
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
822
        ds               tmp,arg1, tmp          /*  15th divide step */
823
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
824
        ds               tmp,arg1, tmp          /*  16th divide step */
825
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
826
        ds               tmp,arg1, tmp          /*  17th divide step */
827
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
828
        ds               tmp,arg1, tmp          /*  18th divide step */
829
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
830
        ds               tmp,arg1, tmp          /*  19th divide step */
831
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
832
        ds               tmp,arg1, tmp          /*  20th divide step */
833
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
834
        ds               tmp,arg1, tmp          /*  21st divide step */
835
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
836
        ds               tmp,arg1, tmp          /*  22nd divide step */
837
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
838
        ds               tmp,arg1, tmp          /*  23rd divide step */
839
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
840
        ds               tmp,arg1, tmp          /*  24th divide step */
841
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
842
        ds               tmp,arg1, tmp          /*  25th divide step */
843
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
844
        ds               tmp,arg1, tmp          /*  26th divide step */
845
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
846
        ds               tmp,arg1, tmp          /*  27th divide step */
847
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
848
        ds               tmp,arg1, tmp          /*  28th divide step */
849
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
850
        ds               tmp,arg1, tmp          /*  29th divide step */
851
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
852
        ds               tmp,arg1, tmp          /*  30th divide step */
853
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
854
        ds               tmp,arg1, tmp          /*  31st divide step */
855
        addc            retreg,retreg,retreg    /*  shift retreg with/into carry */
856
        ds               tmp,arg1, tmp          /*  32nd divide step, */
857
        addc            retreg,retreg,retreg    /*  shift last bit into retreg */
858
        movb,>=,n        tmp,retreg,LREF(finish) /*  branch if pos.  tmp */
859
        add,<		arg1,0,0		/*  if arg1 > 0, add arg1 */
860
        add,tr           tmp,arg1,retreg        /*    for correcting remainder tmp */
861
        sub              tmp,arg1,retreg        /*  else add absolute value arg1 */
862
LSYM(finish)
863
        add,>=          arg0,0,0                /*  set sign of remainder */
864
        sub             0,retreg,retreg         /*    to sign of dividend */
865
        MILLIRET
866
        nop
867
        .exit
868
        .procend
869
#ifdef milliext
870
        .origin 0x00000200
871
#endif
872
        .end
873
#endif
874
 
875
#ifdef L_remU
876
/* ROUTINE:     $$remU
877
   .    Single precision divide for remainder with unsigned binary integers.
878
   .
879
   .    The remainder must be dividend-(dividend/divisor)*divisor.
880
   .    Divide by zero is trapped.
881
 
882
   INPUT REGISTERS:
883
   .    arg0 == dividend
884
   .    arg1 == divisor
885
   .    mrp  == return pc
886
   .    sr0  == return space when called externally
887
 
888
   OUTPUT REGISTERS:
889
   .    arg0 =  undefined
890
   .    arg1 =  undefined
891
   .    ret1 =  remainder
892
 
893
   OTHER REGISTERS AFFECTED:
894
   .    r1   =  undefined
895
 
896
   SIDE EFFECTS:
897
   .    Causes a trap under the following conditions:  DIVIDE BY ZERO
898
   .    Changes memory at the following places:  NONE
899
 
900
   PERMISSIBLE CONTEXT:
901
   .    Unwindable.
902
   .    Does not create a stack frame.
903
   .    Suitable for internal or external millicode.
904
   .    Assumes the special millicode register conventions.
905
 
906
   DISCUSSION:
907
   .    Calls other millicode routines using mrp: NONE
908
   .    Calls other millicode routines: NONE  */
909
 
910
 
911
RDEFINE(temp,r1)
912
RDEFINE(rmndr,ret1)     /*  r29 */
913
        SUBSPA_MILLI
914
        ATTR_MILLI
915
        .export $$remU,millicode
916
        .proc
917
        .callinfo       millicode
918
        .entry
919
GSYM($$remU)
920
        ldo     -1(arg1),temp           /*  is there at most one bit set ? */
921
        and,=   arg1,temp,r0            /*  if not, don't use power of 2 */
922
        b       LREF(regular_seq)
923
        addit,= 0,arg1,r0               /*  trap on div by zero */
924
        and     arg0,temp,rmndr         /*  get the result for power of 2 */
925
        MILLIRETN
926
LSYM(regular_seq)
927
        comib,>=,n  0,arg1,LREF(special_case)
928
        subi    0,arg1,rmndr            /*  clear carry, negate the divisor */
929
        ds      r0,rmndr,r0             /*  set V-bit to 1 */
930
        add     arg0,arg0,temp          /*  shift msb bit into carry */
931
        ds      r0,arg1,rmndr           /*  1st divide step, if no carry */
932
        addc    temp,temp,temp          /*  shift temp with/into carry */
933
        ds      rmndr,arg1,rmndr                /*  2nd divide step */
934
        addc    temp,temp,temp          /*  shift temp with/into carry */
935
        ds      rmndr,arg1,rmndr                /*  3rd divide step */
936
        addc    temp,temp,temp          /*  shift temp with/into carry */
937
        ds      rmndr,arg1,rmndr                /*  4th divide step */
938
        addc    temp,temp,temp          /*  shift temp with/into carry */
939
        ds      rmndr,arg1,rmndr                /*  5th divide step */
940
        addc    temp,temp,temp          /*  shift temp with/into carry */
941
        ds      rmndr,arg1,rmndr                /*  6th divide step */
942
        addc    temp,temp,temp          /*  shift temp with/into carry */
943
        ds      rmndr,arg1,rmndr                /*  7th divide step */
944
        addc    temp,temp,temp          /*  shift temp with/into carry */
945
        ds      rmndr,arg1,rmndr                /*  8th divide step */
946
        addc    temp,temp,temp          /*  shift temp with/into carry */
947
        ds      rmndr,arg1,rmndr                /*  9th divide step */
948
        addc    temp,temp,temp          /*  shift temp with/into carry */
949
        ds      rmndr,arg1,rmndr                /*  10th divide step */
950
        addc    temp,temp,temp          /*  shift temp with/into carry */
951
        ds      rmndr,arg1,rmndr                /*  11th divide step */
952
        addc    temp,temp,temp          /*  shift temp with/into carry */
953
        ds      rmndr,arg1,rmndr                /*  12th divide step */
954
        addc    temp,temp,temp          /*  shift temp with/into carry */
955
        ds      rmndr,arg1,rmndr                /*  13th divide step */
956
        addc    temp,temp,temp          /*  shift temp with/into carry */
957
        ds      rmndr,arg1,rmndr                /*  14th divide step */
958
        addc    temp,temp,temp          /*  shift temp with/into carry */
959
        ds      rmndr,arg1,rmndr                /*  15th divide step */
960
        addc    temp,temp,temp          /*  shift temp with/into carry */
961
        ds      rmndr,arg1,rmndr                /*  16th divide step */
962
        addc    temp,temp,temp          /*  shift temp with/into carry */
963
        ds      rmndr,arg1,rmndr                /*  17th divide step */
964
        addc    temp,temp,temp          /*  shift temp with/into carry */
965
        ds      rmndr,arg1,rmndr                /*  18th divide step */
966
        addc    temp,temp,temp          /*  shift temp with/into carry */
967
        ds      rmndr,arg1,rmndr                /*  19th divide step */
968
        addc    temp,temp,temp          /*  shift temp with/into carry */
969
        ds      rmndr,arg1,rmndr                /*  20th divide step */
970
        addc    temp,temp,temp          /*  shift temp with/into carry */
971
        ds      rmndr,arg1,rmndr                /*  21st divide step */
972
        addc    temp,temp,temp          /*  shift temp with/into carry */
973
        ds      rmndr,arg1,rmndr                /*  22nd divide step */
974
        addc    temp,temp,temp          /*  shift temp with/into carry */
975
        ds      rmndr,arg1,rmndr                /*  23rd divide step */
976
        addc    temp,temp,temp          /*  shift temp with/into carry */
977
        ds      rmndr,arg1,rmndr                /*  24th divide step */
978
        addc    temp,temp,temp          /*  shift temp with/into carry */
979
        ds      rmndr,arg1,rmndr                /*  25th divide step */
980
        addc    temp,temp,temp          /*  shift temp with/into carry */
981
        ds      rmndr,arg1,rmndr                /*  26th divide step */
982
        addc    temp,temp,temp          /*  shift temp with/into carry */
983
        ds      rmndr,arg1,rmndr                /*  27th divide step */
984
        addc    temp,temp,temp          /*  shift temp with/into carry */
985
        ds      rmndr,arg1,rmndr                /*  28th divide step */
986
        addc    temp,temp,temp          /*  shift temp with/into carry */
987
        ds      rmndr,arg1,rmndr                /*  29th divide step */
988
        addc    temp,temp,temp          /*  shift temp with/into carry */
989
        ds      rmndr,arg1,rmndr                /*  30th divide step */
990
        addc    temp,temp,temp          /*  shift temp with/into carry */
991
        ds      rmndr,arg1,rmndr                /*  31st divide step */
992
        addc    temp,temp,temp          /*  shift temp with/into carry */
993
        ds      rmndr,arg1,rmndr                /*  32nd divide step, */
994
        comiclr,<= 0,rmndr,r0
995
          add   rmndr,arg1,rmndr        /*  correction */
996
        MILLIRETN
997
        nop
998
 
999
/* Putting >= on the last DS and deleting COMICLR does not work!  */
1000
LSYM(special_case)
1001
        sub,>>= arg0,arg1,rmndr
1002
          copy  arg0,rmndr
1003
        MILLIRETN
1004
        nop
1005
        .exit
1006
        .procend
1007
        .end
1008
#endif
1009
 
1010
#ifdef L_div_const
1011
/* ROUTINE:     $$divI_2
1012
   .            $$divI_3        $$divU_3
1013
   .            $$divI_4
1014
   .            $$divI_5        $$divU_5
1015
   .            $$divI_6        $$divU_6
1016
   .            $$divI_7        $$divU_7
1017
   .            $$divI_8
1018
   .            $$divI_9        $$divU_9
1019
   .            $$divI_10       $$divU_10
1020
   .
1021
   .            $$divI_12       $$divU_12
1022
   .
1023
   .            $$divI_14       $$divU_14
1024
   .            $$divI_15       $$divU_15
1025
   .            $$divI_16
1026
   .            $$divI_17       $$divU_17
1027
   .
1028
   .    Divide by selected constants for single precision binary integers.
1029
 
1030
   INPUT REGISTERS:
1031
   .    arg0 == dividend
1032
   .    mrp  == return pc
1033
   .    sr0  == return space when called externally
1034
 
1035
   OUTPUT REGISTERS:
1036
   .    arg0 =  undefined
1037
   .    arg1 =  undefined
1038
   .    ret1 =  quotient
1039
 
1040
   OTHER REGISTERS AFFECTED:
1041
   .    r1   =  undefined
1042
 
1043
   SIDE EFFECTS:
1044
   .    Causes a trap under the following conditions: NONE
1045
   .    Changes memory at the following places:  NONE
1046
 
1047
   PERMISSIBLE CONTEXT:
1048
   .    Unwindable.
1049
   .    Does not create a stack frame.
1050
   .    Suitable for internal or external millicode.
1051
   .    Assumes the special millicode register conventions.
1052
 
1053
   DISCUSSION:
1054
   .    Calls other millicode routines using mrp:  NONE
1055
   .    Calls other millicode routines:  NONE  */
1056
 
1057
 
1058
/* TRUNCATED DIVISION BY SMALL INTEGERS
1059
 
1060
   We are interested in q(x) = floor(x/y), where x >= 0 and y > 0
1061
   (with y fixed).
1062
 
1063
   Let a = floor(z/y), for some choice of z.  Note that z will be
1064
   chosen so that division by z is cheap.
1065
 
1066
   Let r be the remainder(z/y).  In other words, r = z - ay.
1067
 
1068
   Now, our method is to choose a value for b such that
1069
 
1070
   q'(x) = floor((ax+b)/z)
1071
 
1072
   is equal to q(x) over as large a range of x as possible.  If the
1073
   two are equal over a sufficiently large range, and if it is easy to
1074
   form the product (ax), and it is easy to divide by z, then we can
1075
   perform the division much faster than the general division algorithm.
1076
 
1077
   So, we want the following to be true:
1078
 
1079
   .    For x in the following range:
1080
   .
1081
   .        ky <= x < (k+1)y
1082
   .
1083
   .    implies that
1084
   .
1085
   .        k <= (ax+b)/z < (k+1)
1086
 
1087
   We want to determine b such that this is true for all k in the
1088
   range {0..K} for some maximum K.
1089
 
1090
   Since (ax+b) is an increasing function of x, we can take each
1091
   bound separately to determine the "best" value for b.
1092
 
1093
   (ax+b)/z < (k+1)            implies
1094
 
1095
   (a((k+1)y-1)+b < (k+1)z     implies
1096
 
1097
   b < a + (k+1)(z-ay)         implies
1098
 
1099
   b < a + (k+1)r
1100
 
1101
   This needs to be true for all k in the range {0..K}.  In
1102
   particular, it is true for k = 0 and this leads to a maximum
1103
   acceptable value for b.
1104
 
1105
   b < a+r   or   b <= a+r-1
1106
 
1107
   Taking the other bound, we have
1108
 
1109
   k <= (ax+b)/z               implies
1110
 
1111
   k <= (aky+b)/z              implies
1112
 
1113
   k(z-ay) <= b                implies
1114
 
1115
   kr <= b
1116
 
1117
   Clearly, the largest range for k will be achieved by maximizing b,
1118
   when r is not zero.  When r is zero, then the simplest choice for b
1119
   is 0.  When r is not 0, set
1120
 
1121
   .    b = a+r-1
1122
 
1123
   Now, by construction, q'(x) = floor((ax+b)/z) = q(x) = floor(x/y)
1124
   for all x in the range:
1125
 
1126
   .    0 <= x < (K+1)y
1127
 
1128
   We need to determine what K is.  Of our two bounds,
1129
 
1130
   .    b < a+(k+1)r	is satisfied for all k >= 0, by construction.
1131
 
1132
   The other bound is
1133
 
1134
   .    kr <= b
1135
 
1136
   This is always true if r = 0.  If r is not 0 (the usual case), then
1137
   K = floor((a+r-1)/r), is the maximum value for k.
1138
 
1139
   Therefore, the formula q'(x) = floor((ax+b)/z) yields the correct
1140
   answer for q(x) = floor(x/y) when x is in the range
1141
 
1142
   (0,(K+1)y-1)        K = floor((a+r-1)/r)
1143
 
1144
   To be most useful, we want (K+1)y-1 = (max x) >= 2**32-1 so that
1145
   the formula for q'(x) yields the correct value of q(x) for all x
1146
   representable by a single word in HPPA.
1147
 
1148
   We are also constrained in that computing the product (ax), adding
1149
   b, and dividing by z must all be done quickly, otherwise we will be
1150
   better off going through the general algorithm using the DS
1151
   instruction, which uses approximately 70 cycles.
1152
 
1153
   For each y, there is a choice of z which satisfies the constraints
1154
   for (K+1)y >= 2**32.  We may not, however, be able to satisfy the
1155
   timing constraints for arbitrary y.  It seems that z being equal to
1156
   a power of 2 or a power of 2 minus 1 is as good as we can do, since
1157
   it minimizes the time to do division by z.  We want the choice of z
1158
   to also result in a value for (a) that minimizes the computation of
1159
   the product (ax).  This is best achieved if (a) has a regular bit
1160
   pattern (so the multiplication can be done with shifts and adds).
1161
   The value of (a) also needs to be less than 2**32 so the product is
1162
   always guaranteed to fit in 2 words.
1163
 
1164
   In actual practice, the following should be done:
1165
 
1166
   1) For negative x, you should take the absolute value and remember
1167
   .  the fact so that the result can be negated.  This obviously does
1168
   .  not apply in the unsigned case.
1169
   2) For even y, you should factor out the power of 2 that divides y
1170
   .  and divide x by it.  You can then proceed by dividing by the
1171
   .  odd factor of y.
1172
 
1173
   Here is a table of some odd values of y, and corresponding choices
1174
   for z which are "good".
1175
 
1176
    y     z       r      a (hex)     max x (hex)
1177
 
1178
    3   2**32     1     55555555      100000001
1179
    5   2**32     1     33333333      100000003
1180
    7  2**24-1    0       249249     (infinite)
1181
    9  2**24-1    0       1c71c7     (infinite)
1182
   11  2**20-1    0        1745d     (infinite)
1183
   13  2**24-1    0       13b13b     (infinite)
1184
   15   2**32     1     11111111      10000000d
1185
   17   2**32     1      f0f0f0f      10000000f
1186
 
1187
   If r is 1, then b = a+r-1 = a.  This simplifies the computation
1188
   of (ax+b), since you can compute (x+1)(a) instead.  If r is 0,
1189
   then b = 0 is ok to use which simplifies (ax+b).
1190
 
1191
   The bit patterns for 55555555, 33333333, and 11111111 are obviously
1192
   very regular.  The bit patterns for the other values of a above are:
1193
 
1194
    y      (hex)          (binary)
1195
 
1196
    7     249249  001001001001001001001001  << regular >>
1197
    9     1c71c7  000111000111000111000111  << regular >>
1198
   11      1745d  000000010111010001011101  << irregular >>
1199
   13     13b13b  000100111011000100111011  << irregular >>
1200
 
1201
   The bit patterns for (a) corresponding to (y) of 11 and 13 may be
1202
   too irregular to warrant using this method.
1203
 
1204
   When z is a power of 2 minus 1, then the division by z is slightly
1205
   more complicated, involving an iterative solution.
1206
 
1207
   The code presented here solves division by 1 through 17, except for
1208
   11 and 13. There are algorithms for both signed and unsigned
1209
   quantities given.
1210
 
1211
   TIMINGS (cycles)
1212
 
1213
   divisor  positive  negative  unsigned
1214
 
1215
   .   1        2          2         2
1216
   .   2        4          4         2
1217
   .   3       19         21        19
1218
   .   4        4          4         2
1219
   .   5       18         22        19
1220
   .   6       19         22        19
1221
   .   8        4          4         2
1222
   .  10       18         19        17
1223
   .  12       18         20        18
1224
   .  15       16         18        16
1225
   .  16        4          4         2
1226
   .  17       16         18        16
1227
 
1228
   Now, the algorithm for 7, 9, and 14 is an iterative one.  That is,
1229
   a loop body is executed until the tentative quotient is 0.  The
1230
   number of times the loop body is executed varies depending on the
1231
   dividend, but is never more than two times.  If the dividend is
1232
   less than the divisor, then the loop body is not executed at all.
1233
   Each iteration adds 4 cycles to the timings.
1234
 
1235
   divisor  positive  negative  unsigned
1236
 
1237
   .   7       19+4n     20+4n     20+4n    n = number of iterations
1238
   .   9       21+4n     22+4n     21+4n
1239
   .  14       21+4n     22+4n     20+4n
1240
 
1241
   To give an idea of how the number of iterations varies, here is a
1242
   table of dividend versus number of iterations when dividing by 7.
1243
 
1244
   smallest      largest       required
1245
   dividend     dividend      iterations
1246
 
1247
   .    0             6              0
1248
   .    7        0x6ffffff          1
1249
   0x1000006    0xffffffff          2
1250
 
1251
   There is some overlap in the range of numbers requiring 1 and 2
1252
   iterations.  */
1253
 
1254
RDEFINE(t2,r1)
1255
RDEFINE(x2,arg0)        /*  r26 */
1256
RDEFINE(t1,arg1)        /*  r25 */
1257
RDEFINE(x1,ret1)        /*  r29 */
1258
 
1259
        SUBSPA_MILLI_DIV
1260
        ATTR_MILLI
1261
 
1262
        .proc
1263
        .callinfo       millicode
1264
        .entry
1265
/* NONE of these routines require a stack frame
1266
   ALL of these routines are unwindable from millicode  */
1267
 
1268
GSYM($$divide_by_constant)
1269
        .export $$divide_by_constant,millicode
1270
/*  Provides a "nice" label for the code covered by the unwind descriptor
1271
    for things like gprof.  */
1272
 
1273
/* DIVISION BY 2 (shift by 1) */
1274
GSYM($$divI_2)
1275
        .export         $$divI_2,millicode
1276
        comclr,>=       arg0,0,0
1277
        addi            1,arg0,arg0
1278
        MILLIRET
1279
        extrs           arg0,30,31,ret1
1280
 
1281
 
1282
/* DIVISION BY 4 (shift by 2) */
1283
GSYM($$divI_4)
1284
        .export         $$divI_4,millicode
1285
        comclr,>=       arg0,0,0
1286
        addi            3,arg0,arg0
1287
        MILLIRET
1288
        extrs           arg0,29,30,ret1
1289
 
1290
 
1291
/* DIVISION BY 8 (shift by 3) */
1292
GSYM($$divI_8)
1293
        .export         $$divI_8,millicode
1294
        comclr,>=       arg0,0,0
1295
        addi            7,arg0,arg0
1296
        MILLIRET
1297
        extrs           arg0,28,29,ret1
1298
 
1299
/* DIVISION BY 16 (shift by 4) */
1300
GSYM($$divI_16)
1301
        .export         $$divI_16,millicode
1302
        comclr,>=       arg0,0,0
1303
        addi            15,arg0,arg0
1304
        MILLIRET
1305
        extrs           arg0,27,28,ret1
1306
 
1307
/****************************************************************************
1308
*
1309
*       DIVISION BY DIVISORS OF FFFFFFFF, and powers of 2 times these
1310
*
1311
*       includes 3,5,15,17 and also 6,10,12
1312
*
1313
****************************************************************************/
1314
 
1315
/* DIVISION BY 3 (use z = 2**32; a = 55555555) */
1316
 
1317
GSYM($$divI_3)
1318
        .export         $$divI_3,millicode
1319
        comb,<,N        x2,0,LREF(neg3)
1320
 
1321
        addi            1,x2,x2         /* this cannot overflow */
1322
        extru           x2,1,2,x1       /* multiply by 5 to get started */
1323
        sh2add          x2,x2,x2
1324
        b               LREF(pos)
1325
        addc            x1,0,x1
1326
 
1327
LSYM(neg3)
1328
        subi            1,x2,x2         /* this cannot overflow */
1329
        extru           x2,1,2,x1       /* multiply by 5 to get started */
1330
        sh2add          x2,x2,x2
1331
        b               LREF(neg)
1332
        addc            x1,0,x1
1333
 
1334
GSYM($$divU_3)
1335
        .export         $$divU_3,millicode
1336
        addi            1,x2,x2         /* this CAN overflow */
1337
        addc            0,0,x1
1338
        shd             x1,x2,30,t1     /* multiply by 5 to get started */
1339
        sh2add          x2,x2,x2
1340
        b               LREF(pos)
1341
        addc            x1,t1,x1
1342
 
1343
/* DIVISION BY 5 (use z = 2**32; a = 33333333) */
1344
 
1345
GSYM($$divI_5)
1346
        .export         $$divI_5,millicode
1347
        comb,<,N        x2,0,LREF(neg5)
1348
 
1349
        addi            3,x2,t1         /* this cannot overflow */
1350
        sh1add          x2,t1,x2        /* multiply by 3 to get started */
1351
        b               LREF(pos)
1352
        addc            0,0,x1
1353
 
1354
LSYM(neg5)
1355
        sub             0,x2,x2         /* negate x2                    */
1356
        addi            1,x2,x2         /* this cannot overflow */
1357
        shd             0,x2,31,x1      /* get top bit (can be 1)       */
1358
        sh1add          x2,x2,x2        /* multiply by 3 to get started */
1359
        b               LREF(neg)
1360
        addc            x1,0,x1
1361
 
1362
GSYM($$divU_5)
1363
        .export         $$divU_5,millicode
1364
        addi            1,x2,x2         /* this CAN overflow */
1365
        addc            0,0,x1
1366
        shd             x1,x2,31,t1     /* multiply by 3 to get started */
1367
        sh1add          x2,x2,x2
1368
        b               LREF(pos)
1369
        addc            t1,x1,x1
1370
 
1371
/* DIVISION BY  6 (shift to divide by 2 then divide by 3) */
1372
GSYM($$divI_6)
1373
        .export         $$divI_6,millicode
1374
        comb,<,N        x2,0,LREF(neg6)
1375
        extru           x2,30,31,x2     /* divide by 2                  */
1376
        addi            5,x2,t1         /* compute 5*(x2+1) = 5*x2+5    */
1377
        sh2add          x2,t1,x2        /* multiply by 5 to get started */
1378
        b               LREF(pos)
1379
        addc            0,0,x1
1380
 
1381
LSYM(neg6)
1382
        subi            2,x2,x2         /* negate, divide by 2, and add 1 */
1383
                                        /* negation and adding 1 are done */
1384
                                        /* at the same time by the SUBI   */
1385
        extru           x2,30,31,x2
1386
        shd             0,x2,30,x1
1387
        sh2add          x2,x2,x2        /* multiply by 5 to get started */
1388
        b               LREF(neg)
1389
        addc            x1,0,x1
1390
 
1391
GSYM($$divU_6)
1392
        .export         $$divU_6,millicode
1393
        extru           x2,30,31,x2     /* divide by 2 */
1394
        addi            1,x2,x2         /* cannot carry */
1395
        shd             0,x2,30,x1      /* multiply by 5 to get started */
1396
        sh2add          x2,x2,x2
1397
        b               LREF(pos)
1398
        addc            x1,0,x1
1399
 
1400
/* DIVISION BY 10 (shift to divide by 2 then divide by 5) */
1401
GSYM($$divU_10)
1402
        .export         $$divU_10,millicode
1403
        extru           x2,30,31,x2     /* divide by 2 */
1404
        addi            3,x2,t1         /* compute 3*(x2+1) = (3*x2)+3  */
1405
        sh1add          x2,t1,x2        /* multiply by 3 to get started */
1406
        addc            0,0,x1
1407
LSYM(pos)
1408
        shd             x1,x2,28,t1     /* multiply by 0x11 */
1409
        shd             x2,0,28,t2
1410
        add             x2,t2,x2
1411
        addc            x1,t1,x1
1412
LSYM(pos_for_17)
1413
        shd             x1,x2,24,t1     /* multiply by 0x101 */
1414
        shd             x2,0,24,t2
1415
        add             x2,t2,x2
1416
        addc            x1,t1,x1
1417
 
1418
        shd             x1,x2,16,t1     /* multiply by 0x10001 */
1419
        shd             x2,0,16,t2
1420
        add             x2,t2,x2
1421
        MILLIRET
1422
        addc            x1,t1,x1
1423
 
1424
GSYM($$divI_10)
1425
        .export         $$divI_10,millicode
1426
        comb,<          x2,0,LREF(neg10)
1427
        copy            0,x1
1428
        extru           x2,30,31,x2     /* divide by 2 */
1429
        addib,TR        1,x2,LREF(pos)  /* add 1 (cannot overflow)     */
1430
        sh1add          x2,x2,x2        /* multiply by 3 to get started */
1431
 
1432
LSYM(neg10)
1433
        subi            2,x2,x2         /* negate, divide by 2, and add 1 */
1434
                                        /* negation and adding 1 are done */
1435
                                        /* at the same time by the SUBI   */
1436
        extru           x2,30,31,x2
1437
        sh1add          x2,x2,x2        /* multiply by 3 to get started */
1438
LSYM(neg)
1439
        shd             x1,x2,28,t1     /* multiply by 0x11 */
1440
        shd             x2,0,28,t2
1441
        add             x2,t2,x2
1442
        addc            x1,t1,x1
1443
LSYM(neg_for_17)
1444
        shd             x1,x2,24,t1     /* multiply by 0x101 */
1445
        shd             x2,0,24,t2
1446
        add             x2,t2,x2
1447
        addc            x1,t1,x1
1448
 
1449
        shd             x1,x2,16,t1     /* multiply by 0x10001 */
1450
        shd             x2,0,16,t2
1451
        add             x2,t2,x2
1452
        addc            x1,t1,x1
1453
        MILLIRET
1454
        sub             0,x1,x1
1455
 
1456
/* DIVISION BY 12 (shift to divide by 4 then divide by 3) */
1457
GSYM($$divI_12)
1458
        .export         $$divI_12,millicode
1459
        comb,<          x2,0,LREF(neg12)
1460
        copy            0,x1
1461
        extru           x2,29,30,x2     /* divide by 4                  */
1462
        addib,tr        1,x2,LREF(pos)  /* compute 5*(x2+1) = 5*x2+5    */
1463
        sh2add          x2,x2,x2        /* multiply by 5 to get started */
1464
 
1465
LSYM(neg12)
1466
        subi            4,x2,x2         /* negate, divide by 4, and add 1 */
1467
                                        /* negation and adding 1 are done */
1468
                                        /* at the same time by the SUBI   */
1469
        extru           x2,29,30,x2
1470
        b               LREF(neg)
1471
        sh2add          x2,x2,x2        /* multiply by 5 to get started */
1472
 
1473
GSYM($$divU_12)
1474
        .export         $$divU_12,millicode
1475
        extru           x2,29,30,x2     /* divide by 4   */
1476
        addi            5,x2,t1         /* cannot carry */
1477
        sh2add          x2,t1,x2        /* multiply by 5 to get started */
1478
        b               LREF(pos)
1479
        addc            0,0,x1
1480
 
1481
/* DIVISION BY 15 (use z = 2**32; a = 11111111) */
1482
GSYM($$divI_15)
1483
        .export         $$divI_15,millicode
1484
        comb,<          x2,0,LREF(neg15)
1485
        copy            0,x1
1486
        addib,tr        1,x2,LREF(pos)+4
1487
        shd             x1,x2,28,t1
1488
 
1489
LSYM(neg15)
1490
        b               LREF(neg)
1491
        subi            1,x2,x2
1492
 
1493
GSYM($$divU_15)
1494
        .export         $$divU_15,millicode
1495
        addi            1,x2,x2         /* this CAN overflow */
1496
        b               LREF(pos)
1497
        addc            0,0,x1
1498
 
1499
/* DIVISION BY 17 (use z = 2**32; a =  f0f0f0f) */
1500
GSYM($$divI_17)
1501
        .export         $$divI_17,millicode
1502
        comb,<,n        x2,0,LREF(neg17)
1503
        addi            1,x2,x2         /* this cannot overflow */
1504
        shd             0,x2,28,t1      /* multiply by 0xf to get started */
1505
        shd             x2,0,28,t2
1506
        sub             t2,x2,x2
1507
        b               LREF(pos_for_17)
1508
        subb            t1,0,x1
1509
 
1510
LSYM(neg17)
1511
        subi            1,x2,x2         /* this cannot overflow */
1512
        shd             0,x2,28,t1      /* multiply by 0xf to get started */
1513
        shd             x2,0,28,t2
1514
        sub             t2,x2,x2
1515
        b               LREF(neg_for_17)
1516
        subb            t1,0,x1
1517
 
1518
GSYM($$divU_17)
1519
        .export         $$divU_17,millicode
1520
        addi            1,x2,x2         /* this CAN overflow */
1521
        addc            0,0,x1
1522
        shd             x1,x2,28,t1     /* multiply by 0xf to get started */
1523
LSYM(u17)
1524
        shd             x2,0,28,t2
1525
        sub             t2,x2,x2
1526
        b               LREF(pos_for_17)
1527
        subb            t1,x1,x1
1528
 
1529
 
1530
/* DIVISION BY DIVISORS OF FFFFFF, and powers of 2 times these
1531
   includes 7,9 and also 14
1532
 
1533
 
1534
   z = 2**24-1
1535
   r = z mod x = 0
1536
 
1537
   so choose b = 0
1538
 
1539
   Also, in order to divide by z = 2**24-1, we approximate by dividing
1540
   by (z+1) = 2**24 (which is easy), and then correcting.
1541
 
1542
   (ax) = (z+1)q' + r
1543
   .    = zq' + (q'+r)
1544
 
1545
   So to compute (ax)/z, compute q' = (ax)/(z+1) and r = (ax) mod (z+1)
1546
   Then the true remainder of (ax)/z is (q'+r).  Repeat the process
1547
   with this new remainder, adding the tentative quotients together,
1548
   until a tentative quotient is 0 (and then we are done).  There is
1549
   one last correction to be done.  It is possible that (q'+r) = z.
1550
   If so, then (q'+r)/(z+1) = 0 and it looks like we are done.  But,
1551
   in fact, we need to add 1 more to the quotient.  Now, it turns
1552
   out that this happens if and only if the original value x is
1553
   an exact multiple of y.  So, to avoid a three instruction test at
1554
   the end, instead use 1 instruction to add 1 to x at the beginning.  */
1555
 
1556
/* DIVISION BY 7 (use z = 2**24-1; a = 249249) */
1557
GSYM($$divI_7)
1558
        .export         $$divI_7,millicode
1559
        comb,<,n        x2,0,LREF(neg7)
1560
LSYM(7)
1561
        addi            1,x2,x2         /* cannot overflow */
1562
        shd             0,x2,29,x1
1563
        sh3add          x2,x2,x2
1564
        addc            x1,0,x1
1565
LSYM(pos7)
1566
        shd             x1,x2,26,t1
1567
        shd             x2,0,26,t2
1568
        add             x2,t2,x2
1569
        addc            x1,t1,x1
1570
 
1571
        shd             x1,x2,20,t1
1572
        shd             x2,0,20,t2
1573
        add             x2,t2,x2
1574
        addc            x1,t1,t1
1575
 
1576
        /* computed .  Now divide it by (2**24 - 1)     */
1577
 
1578
        copy            0,x1
1579
        shd,=           t1,x2,24,t1     /* tentative quotient  */
1580
LSYM(1)
1581
        addb,tr         t1,x1,LREF(2)   /* add to previous quotient   */
1582
        extru           x2,31,24,x2     /* new remainder (unadjusted) */
1583
 
1584
        MILLIRETN
1585
 
1586
LSYM(2)
1587
        addb,tr         t1,x2,LREF(1)   /* adjust remainder */
1588
        extru,=         x2,7,8,t1       /* new quotient     */
1589
 
1590
LSYM(neg7)
1591
        subi            1,x2,x2         /* negate x2 and add 1 */
1592
LSYM(8)
1593
        shd             0,x2,29,x1
1594
        sh3add          x2,x2,x2
1595
        addc            x1,0,x1
1596
 
1597
LSYM(neg7_shift)
1598
        shd             x1,x2,26,t1
1599
        shd             x2,0,26,t2
1600
        add             x2,t2,x2
1601
        addc            x1,t1,x1
1602
 
1603
        shd             x1,x2,20,t1
1604
        shd             x2,0,20,t2
1605
        add             x2,t2,x2
1606
        addc            x1,t1,t1
1607
 
1608
        /* computed .  Now divide it by (2**24 - 1)     */
1609
 
1610
        copy            0,x1
1611
        shd,=           t1,x2,24,t1     /* tentative quotient  */
1612
LSYM(3)
1613
        addb,tr         t1,x1,LREF(4)   /* add to previous quotient   */
1614
        extru           x2,31,24,x2     /* new remainder (unadjusted) */
1615
 
1616
        MILLIRET
1617
        sub             0,x1,x1         /* negate result    */
1618
 
1619
LSYM(4)
1620
        addb,tr         t1,x2,LREF(3)   /* adjust remainder */
1621
        extru,=         x2,7,8,t1       /* new quotient     */
1622
 
1623
GSYM($$divU_7)
1624
        .export         $$divU_7,millicode
1625
        addi            1,x2,x2         /* can carry */
1626
        addc            0,0,x1
1627
        shd             x1,x2,29,t1
1628
        sh3add          x2,x2,x2
1629
        b               LREF(pos7)
1630
        addc            t1,x1,x1
1631
 
1632
/* DIVISION BY 9 (use z = 2**24-1; a = 1c71c7) */
1633
GSYM($$divI_9)
1634
        .export         $$divI_9,millicode
1635
        comb,<,n        x2,0,LREF(neg9)
1636
        addi            1,x2,x2         /* cannot overflow */
1637
        shd             0,x2,29,t1
1638
        shd             x2,0,29,t2
1639
        sub             t2,x2,x2
1640
        b               LREF(pos7)
1641
        subb            t1,0,x1
1642
 
1643
LSYM(neg9)
1644
        subi            1,x2,x2         /* negate and add 1 */
1645
        shd             0,x2,29,t1
1646
        shd             x2,0,29,t2
1647
        sub             t2,x2,x2
1648
        b               LREF(neg7_shift)
1649
        subb            t1,0,x1
1650
 
1651
GSYM($$divU_9)
1652
        .export         $$divU_9,millicode
1653
        addi            1,x2,x2         /* can carry */
1654
        addc            0,0,x1
1655
        shd             x1,x2,29,t1
1656
        shd             x2,0,29,t2
1657
        sub             t2,x2,x2
1658
        b               LREF(pos7)
1659
        subb            t1,x1,x1
1660
 
1661
/* DIVISION BY 14 (shift to divide by 2 then divide by 7) */
1662
GSYM($$divI_14)
1663
        .export         $$divI_14,millicode
1664
        comb,<,n        x2,0,LREF(neg14)
1665
GSYM($$divU_14)
1666
        .export         $$divU_14,millicode
1667
        b               LREF(7)         /* go to 7 case */
1668
        extru           x2,30,31,x2     /* divide by 2  */
1669
 
1670
LSYM(neg14)
1671
        subi            2,x2,x2         /* negate (and add 2) */
1672
        b               LREF(8)
1673
        extru           x2,30,31,x2     /* divide by 2        */
1674
        .exit
1675
        .procend
1676
        .end
1677
#endif
1678
 
1679
#ifdef L_mulI
1680
/* VERSION "@(#)$$mulI $ Revision: 12.4 $ $ Date: 94/03/17 17:18:51 $" */
1681
/******************************************************************************
1682
This routine is used on PA2.0 processors when gcc -mno-fpregs is used
1683
 
1684
ROUTINE:        $$mulI
1685
 
1686
 
1687
DESCRIPTION:
1688
 
1689
        $$mulI multiplies two single word integers, giving a single
1690
        word result.
1691
 
1692
 
1693
INPUT REGISTERS:
1694
 
1695
        arg0 = Operand 1
1696
        arg1 = Operand 2
1697
        r31  == return pc
1698
        sr0  == return space when called externally
1699
 
1700
 
1701
OUTPUT REGISTERS:
1702
 
1703
        arg0 = undefined
1704
        arg1 = undefined
1705
        ret1 = result
1706
 
1707
OTHER REGISTERS AFFECTED:
1708
 
1709
        r1   = undefined
1710
 
1711
SIDE EFFECTS:
1712
 
1713
        Causes a trap under the following conditions:  NONE
1714
        Changes memory at the following places:  NONE
1715
 
1716
PERMISSIBLE CONTEXT:
1717
 
1718
        Unwindable
1719
        Does not create a stack frame
1720
        Is usable for internal or external microcode
1721
 
1722
DISCUSSION:
1723
 
1724
        Calls other millicode routines via mrp:  NONE
1725
        Calls other millicode routines:  NONE
1726
 
1727
***************************************************************************/
1728
 
1729
 
1730
#define a0      %arg0
1731
#define a1      %arg1
1732
#define t0      %r1
1733
#define r       %ret1
1734
 
1735
#define a0__128a0       zdep    a0,24,25,a0
1736
#define a0__256a0       zdep    a0,23,24,a0
1737
#define a1_ne_0_b_l0    comb,<> a1,0,LREF(l0)
1738
#define a1_ne_0_b_l1    comb,<> a1,0,LREF(l1)
1739
#define a1_ne_0_b_l2    comb,<> a1,0,LREF(l2)
1740
#define b_n_ret_t0      b,n     LREF(ret_t0)
1741
#define b_e_shift       b       LREF(e_shift)
1742
#define b_e_t0ma0       b       LREF(e_t0ma0)
1743
#define b_e_t0          b       LREF(e_t0)
1744
#define b_e_t0a0        b       LREF(e_t0a0)
1745
#define b_e_t02a0       b       LREF(e_t02a0)
1746
#define b_e_t04a0       b       LREF(e_t04a0)
1747
#define b_e_2t0         b       LREF(e_2t0)
1748
#define b_e_2t0a0       b       LREF(e_2t0a0)
1749
#define b_e_2t04a0      b       LREF(e2t04a0)
1750
#define b_e_3t0         b       LREF(e_3t0)
1751
#define b_e_4t0         b       LREF(e_4t0)
1752
#define b_e_4t0a0       b       LREF(e_4t0a0)
1753
#define b_e_4t08a0      b       LREF(e4t08a0)
1754
#define b_e_5t0         b       LREF(e_5t0)
1755
#define b_e_8t0         b       LREF(e_8t0)
1756
#define b_e_8t0a0       b       LREF(e_8t0a0)
1757
#define r__r_a0         add     r,a0,r
1758
#define r__r_2a0        sh1add  a0,r,r
1759
#define r__r_4a0        sh2add  a0,r,r
1760
#define r__r_8a0        sh3add  a0,r,r
1761
#define r__r_t0         add     r,t0,r
1762
#define r__r_2t0        sh1add  t0,r,r
1763
#define r__r_4t0        sh2add  t0,r,r
1764
#define r__r_8t0        sh3add  t0,r,r
1765
#define t0__3a0         sh1add  a0,a0,t0
1766
#define t0__4a0         sh2add  a0,0,t0
1767
#define t0__5a0         sh2add  a0,a0,t0
1768
#define t0__8a0         sh3add  a0,0,t0
1769
#define t0__9a0         sh3add  a0,a0,t0
1770
#define t0__16a0        zdep    a0,27,28,t0
1771
#define t0__32a0        zdep    a0,26,27,t0
1772
#define t0__64a0        zdep    a0,25,26,t0
1773
#define t0__128a0       zdep    a0,24,25,t0
1774
#define t0__t0ma0       sub     t0,a0,t0
1775
#define t0__t0_a0       add     t0,a0,t0
1776
#define t0__t0_2a0      sh1add  a0,t0,t0
1777
#define t0__t0_4a0      sh2add  a0,t0,t0
1778
#define t0__t0_8a0      sh3add  a0,t0,t0
1779
#define t0__2t0_a0      sh1add  t0,a0,t0
1780
#define t0__3t0         sh1add  t0,t0,t0
1781
#define t0__4t0         sh2add  t0,0,t0
1782
#define t0__4t0_a0      sh2add  t0,a0,t0
1783
#define t0__5t0         sh2add  t0,t0,t0
1784
#define t0__8t0         sh3add  t0,0,t0
1785
#define t0__8t0_a0      sh3add  t0,a0,t0
1786
#define t0__9t0         sh3add  t0,t0,t0
1787
#define t0__16t0        zdep    t0,27,28,t0
1788
#define t0__32t0        zdep    t0,26,27,t0
1789
#define t0__256a0       zdep    a0,23,24,t0
1790
 
1791
 
1792
        SUBSPA_MILLI
1793
        ATTR_MILLI
1794
        .align 16
1795
        .proc
1796
        .callinfo millicode
1797
        .export $$mulI,millicode
1798
GSYM($$mulI)
1799
        combt,<<=	a1,a0,LREF(l4)	/* swap args if unsigned a1>a0 */
1800
        copy            0,r             /* zero out the result */
1801
        xor             a0,a1,a0        /* swap a0 & a1 using the */
1802
        xor             a0,a1,a1        /*  old xor trick */
1803
        xor             a0,a1,a0
1804
LSYM(l4)
1805
        combt,<=	0,a0,LREF(l3)		/* if a0>=0 then proceed like unsigned */
1806
        zdep            a1,30,8,t0      /* t0 = (a1&0xff)<<1 ********* */
1807
        sub,>           0,a1,t0         /* otherwise negate both and */
1808
        combt,<=,n      a0,t0,LREF(l2)  /*  swap back if |a0|<|a1| */
1809
        sub             0,a0,a1
1810
        movb,tr,n       t0,a0,LREF(l2)  /* 10th inst.  */
1811
 
1812
LSYM(l0)        r__r_t0                         /* add in this partial product */
1813
LSYM(l1)        a0__256a0                       /* a0 <<= 8 ****************** */
1814
LSYM(l2)        zdep            a1,30,8,t0      /* t0 = (a1&0xff)<<1 ********* */
1815
LSYM(l3)        blr             t0,0            /* case on these 8 bits ****** */
1816
                extru           a1,23,24,a1     /* a1 >>= 8 ****************** */
1817
 
1818
/*16 insts before this.  */
1819
/*                        a0 <<= 8 ************************** */
1820
LSYM(x0)        a1_ne_0_b_l2    ! a0__256a0     ! MILLIRETN     ! nop
1821
LSYM(x1)        a1_ne_0_b_l1    ! r__r_a0       ! MILLIRETN     ! nop
1822
LSYM(x2)        a1_ne_0_b_l1    ! r__r_2a0      ! MILLIRETN     ! nop
1823
LSYM(x3)        a1_ne_0_b_l0    ! t0__3a0       ! MILLIRET      ! r__r_t0
1824
LSYM(x4)        a1_ne_0_b_l1    ! r__r_4a0      ! MILLIRETN     ! nop
1825
LSYM(x5)        a1_ne_0_b_l0    ! t0__5a0       ! MILLIRET      ! r__r_t0
1826
LSYM(x6)        t0__3a0         ! a1_ne_0_b_l1  ! r__r_2t0      ! MILLIRETN
1827
LSYM(x7)        t0__3a0         ! a1_ne_0_b_l0  ! r__r_4a0      ! b_n_ret_t0
1828
LSYM(x8)        a1_ne_0_b_l1    ! r__r_8a0      ! MILLIRETN     ! nop
1829
LSYM(x9)        a1_ne_0_b_l0    ! t0__9a0       ! MILLIRET      ! r__r_t0
1830
LSYM(x10)       t0__5a0         ! a1_ne_0_b_l1  ! r__r_2t0      ! MILLIRETN
1831
LSYM(x11)       t0__3a0         ! a1_ne_0_b_l0  ! r__r_8a0      ! b_n_ret_t0
1832
LSYM(x12)       t0__3a0         ! a1_ne_0_b_l1  ! r__r_4t0      ! MILLIRETN
1833
LSYM(x13)       t0__5a0         ! a1_ne_0_b_l0  ! r__r_8a0      ! b_n_ret_t0
1834
LSYM(x14)       t0__3a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_2t0
1835
LSYM(x15)       t0__5a0         ! a1_ne_0_b_l0  ! t0__3t0       ! b_n_ret_t0
1836
LSYM(x16)       t0__16a0        ! a1_ne_0_b_l1  ! r__r_t0       ! MILLIRETN
1837
LSYM(x17)       t0__9a0         ! a1_ne_0_b_l0  ! t0__t0_8a0    ! b_n_ret_t0
1838
LSYM(x18)       t0__9a0         ! a1_ne_0_b_l1  ! r__r_2t0      ! MILLIRETN
1839
LSYM(x19)       t0__9a0         ! a1_ne_0_b_l0  ! t0__2t0_a0    ! b_n_ret_t0
1840
LSYM(x20)       t0__5a0         ! a1_ne_0_b_l1  ! r__r_4t0      ! MILLIRETN
1841
LSYM(x21)       t0__5a0         ! a1_ne_0_b_l0  ! t0__4t0_a0    ! b_n_ret_t0
1842
LSYM(x22)       t0__5a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_2t0
1843
LSYM(x23)       t0__5a0         ! t0__2t0_a0    ! b_e_t0        ! t0__2t0_a0
1844
LSYM(x24)       t0__3a0         ! a1_ne_0_b_l1  ! r__r_8t0      ! MILLIRETN
1845
LSYM(x25)       t0__5a0         ! a1_ne_0_b_l0  ! t0__5t0       ! b_n_ret_t0
1846
LSYM(x26)       t0__3a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_2t0
1847
LSYM(x27)       t0__3a0         ! a1_ne_0_b_l0  ! t0__9t0       ! b_n_ret_t0
1848
LSYM(x28)       t0__3a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_4t0
1849
LSYM(x29)       t0__3a0         ! t0__2t0_a0    ! b_e_t0        ! t0__4t0_a0
1850
LSYM(x30)       t0__5a0         ! t0__3t0       ! b_e_shift     ! r__r_2t0
1851
LSYM(x31)       t0__32a0        ! a1_ne_0_b_l0  ! t0__t0ma0     ! b_n_ret_t0
1852
LSYM(x32)       t0__32a0        ! a1_ne_0_b_l1  ! r__r_t0       ! MILLIRETN
1853
LSYM(x33)       t0__8a0         ! a1_ne_0_b_l0  ! t0__4t0_a0    ! b_n_ret_t0
1854
LSYM(x34)       t0__16a0        ! t0__t0_a0     ! b_e_shift     ! r__r_2t0
1855
LSYM(x35)       t0__9a0         ! t0__3t0       ! b_e_t0        ! t0__t0_8a0
1856
LSYM(x36)       t0__9a0         ! a1_ne_0_b_l1  ! r__r_4t0      ! MILLIRETN
1857
LSYM(x37)       t0__9a0         ! a1_ne_0_b_l0  ! t0__4t0_a0    ! b_n_ret_t0
1858
LSYM(x38)       t0__9a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_2t0
1859
LSYM(x39)       t0__9a0         ! t0__2t0_a0    ! b_e_t0        ! t0__2t0_a0
1860
LSYM(x40)       t0__5a0         ! a1_ne_0_b_l1  ! r__r_8t0      ! MILLIRETN
1861
LSYM(x41)       t0__5a0         ! a1_ne_0_b_l0  ! t0__8t0_a0    ! b_n_ret_t0
1862
LSYM(x42)       t0__5a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_2t0
1863
LSYM(x43)       t0__5a0         ! t0__4t0_a0    ! b_e_t0        ! t0__2t0_a0
1864
LSYM(x44)       t0__5a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_4t0
1865
LSYM(x45)       t0__9a0         ! a1_ne_0_b_l0  ! t0__5t0       ! b_n_ret_t0
1866
LSYM(x46)       t0__9a0         ! t0__5t0       ! b_e_t0        ! t0__t0_a0
1867
LSYM(x47)       t0__9a0         ! t0__5t0       ! b_e_t0        ! t0__t0_2a0
1868
LSYM(x48)       t0__3a0         ! a1_ne_0_b_l0  ! t0__16t0      ! b_n_ret_t0
1869
LSYM(x49)       t0__9a0         ! t0__5t0       ! b_e_t0        ! t0__t0_4a0
1870
LSYM(x50)       t0__5a0         ! t0__5t0       ! b_e_shift     ! r__r_2t0
1871
LSYM(x51)       t0__9a0         ! t0__t0_8a0    ! b_e_t0        ! t0__3t0
1872
LSYM(x52)       t0__3a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_4t0
1873
LSYM(x53)       t0__3a0         ! t0__4t0_a0    ! b_e_t0        ! t0__4t0_a0
1874
LSYM(x54)       t0__9a0         ! t0__3t0       ! b_e_shift     ! r__r_2t0
1875
LSYM(x55)       t0__9a0         ! t0__3t0       ! b_e_t0        ! t0__2t0_a0
1876
LSYM(x56)       t0__3a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_8t0
1877
LSYM(x57)       t0__9a0         ! t0__2t0_a0    ! b_e_t0        ! t0__3t0
1878
LSYM(x58)       t0__3a0         ! t0__2t0_a0    ! b_e_2t0       ! t0__4t0_a0
1879
LSYM(x59)       t0__9a0         ! t0__2t0_a0    ! b_e_t02a0     ! t0__3t0
1880
LSYM(x60)       t0__5a0         ! t0__3t0       ! b_e_shift     ! r__r_4t0
1881
LSYM(x61)       t0__5a0         ! t0__3t0       ! b_e_t0        ! t0__4t0_a0
1882
LSYM(x62)       t0__32a0        ! t0__t0ma0     ! b_e_shift     ! r__r_2t0
1883
LSYM(x63)       t0__64a0        ! a1_ne_0_b_l0  ! t0__t0ma0     ! b_n_ret_t0
1884
LSYM(x64)       t0__64a0        ! a1_ne_0_b_l1  ! r__r_t0       ! MILLIRETN
1885
LSYM(x65)       t0__8a0         ! a1_ne_0_b_l0  ! t0__8t0_a0    ! b_n_ret_t0
1886
LSYM(x66)       t0__32a0        ! t0__t0_a0     ! b_e_shift     ! r__r_2t0
1887
LSYM(x67)       t0__8a0         ! t0__4t0_a0    ! b_e_t0        ! t0__2t0_a0
1888
LSYM(x68)       t0__8a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_4t0
1889
LSYM(x69)       t0__8a0         ! t0__2t0_a0    ! b_e_t0        ! t0__4t0_a0
1890
LSYM(x70)       t0__64a0        ! t0__t0_4a0    ! b_e_t0        ! t0__t0_2a0
1891
LSYM(x71)       t0__9a0         ! t0__8t0       ! b_e_t0        ! t0__t0ma0
1892
LSYM(x72)       t0__9a0         ! a1_ne_0_b_l1  ! r__r_8t0      ! MILLIRETN
1893
LSYM(x73)       t0__9a0         ! t0__8t0_a0    ! b_e_shift     ! r__r_t0
1894
LSYM(x74)       t0__9a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_2t0
1895
LSYM(x75)       t0__9a0         ! t0__4t0_a0    ! b_e_t0        ! t0__2t0_a0
1896
LSYM(x76)       t0__9a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_4t0
1897
LSYM(x77)       t0__9a0         ! t0__2t0_a0    ! b_e_t0        ! t0__4t0_a0
1898
LSYM(x78)       t0__9a0         ! t0__2t0_a0    ! b_e_2t0       ! t0__2t0_a0
1899
LSYM(x79)       t0__16a0        ! t0__5t0       ! b_e_t0        ! t0__t0ma0
1900
LSYM(x80)       t0__16a0        ! t0__5t0       ! b_e_shift     ! r__r_t0
1901
LSYM(x81)       t0__9a0         ! t0__9t0       ! b_e_shift     ! r__r_t0
1902
LSYM(x82)       t0__5a0         ! t0__8t0_a0    ! b_e_shift     ! r__r_2t0
1903
LSYM(x83)       t0__5a0         ! t0__8t0_a0    ! b_e_t0        ! t0__2t0_a0
1904
LSYM(x84)       t0__5a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_4t0
1905
LSYM(x85)       t0__8a0         ! t0__2t0_a0    ! b_e_t0        ! t0__5t0
1906
LSYM(x86)       t0__5a0         ! t0__4t0_a0    ! b_e_2t0       ! t0__2t0_a0
1907
LSYM(x87)       t0__9a0         ! t0__9t0       ! b_e_t02a0     ! t0__t0_4a0
1908
LSYM(x88)       t0__5a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_8t0
1909
LSYM(x89)       t0__5a0         ! t0__2t0_a0    ! b_e_t0        ! t0__8t0_a0
1910
LSYM(x90)       t0__9a0         ! t0__5t0       ! b_e_shift     ! r__r_2t0
1911
LSYM(x91)       t0__9a0         ! t0__5t0       ! b_e_t0        ! t0__2t0_a0
1912
LSYM(x92)       t0__5a0         ! t0__2t0_a0    ! b_e_4t0       ! t0__2t0_a0
1913
LSYM(x93)       t0__32a0        ! t0__t0ma0     ! b_e_t0        ! t0__3t0
1914
LSYM(x94)       t0__9a0         ! t0__5t0       ! b_e_2t0       ! t0__t0_2a0
1915
LSYM(x95)       t0__9a0         ! t0__2t0_a0    ! b_e_t0        ! t0__5t0
1916
LSYM(x96)       t0__8a0         ! t0__3t0       ! b_e_shift     ! r__r_4t0
1917
LSYM(x97)       t0__8a0         ! t0__3t0       ! b_e_t0        ! t0__4t0_a0
1918
LSYM(x98)       t0__32a0        ! t0__3t0       ! b_e_t0        ! t0__t0_2a0
1919
LSYM(x99)       t0__8a0         ! t0__4t0_a0    ! b_e_t0        ! t0__3t0
1920
LSYM(x100)      t0__5a0         ! t0__5t0       ! b_e_shift     ! r__r_4t0
1921
LSYM(x101)      t0__5a0         ! t0__5t0       ! b_e_t0        ! t0__4t0_a0
1922
LSYM(x102)      t0__32a0        ! t0__t0_2a0    ! b_e_t0        ! t0__3t0
1923
LSYM(x103)      t0__5a0         ! t0__5t0       ! b_e_t02a0     ! t0__4t0_a0
1924
LSYM(x104)      t0__3a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_8t0
1925
LSYM(x105)      t0__5a0         ! t0__4t0_a0    ! b_e_t0        ! t0__5t0
1926
LSYM(x106)      t0__3a0         ! t0__4t0_a0    ! b_e_2t0       ! t0__4t0_a0
1927
LSYM(x107)      t0__9a0         ! t0__t0_4a0    ! b_e_t02a0     ! t0__8t0_a0
1928
LSYM(x108)      t0__9a0         ! t0__3t0       ! b_e_shift     ! r__r_4t0
1929
LSYM(x109)      t0__9a0         ! t0__3t0       ! b_e_t0        ! t0__4t0_a0
1930
LSYM(x110)      t0__9a0         ! t0__3t0       ! b_e_2t0       ! t0__2t0_a0
1931
LSYM(x111)      t0__9a0         ! t0__4t0_a0    ! b_e_t0        ! t0__3t0
1932
LSYM(x112)      t0__3a0         ! t0__2t0_a0    ! b_e_t0        ! t0__16t0
1933
LSYM(x113)      t0__9a0         ! t0__4t0_a0    ! b_e_t02a0     ! t0__3t0
1934
LSYM(x114)      t0__9a0         ! t0__2t0_a0    ! b_e_2t0       ! t0__3t0
1935
LSYM(x115)      t0__9a0         ! t0__2t0_a0    ! b_e_2t0a0     ! t0__3t0
1936
LSYM(x116)      t0__3a0         ! t0__2t0_a0    ! b_e_4t0       ! t0__4t0_a0
1937
LSYM(x117)      t0__3a0         ! t0__4t0_a0    ! b_e_t0        ! t0__9t0
1938
LSYM(x118)      t0__3a0         ! t0__4t0_a0    ! b_e_t0a0      ! t0__9t0
1939
LSYM(x119)      t0__3a0         ! t0__4t0_a0    ! b_e_t02a0     ! t0__9t0
1940
LSYM(x120)      t0__5a0         ! t0__3t0       ! b_e_shift     ! r__r_8t0
1941
LSYM(x121)      t0__5a0         ! t0__3t0       ! b_e_t0        ! t0__8t0_a0
1942
LSYM(x122)      t0__5a0         ! t0__3t0       ! b_e_2t0       ! t0__4t0_a0
1943
LSYM(x123)      t0__5a0         ! t0__8t0_a0    ! b_e_t0        ! t0__3t0
1944
LSYM(x124)      t0__32a0        ! t0__t0ma0     ! b_e_shift     ! r__r_4t0
1945
LSYM(x125)      t0__5a0         ! t0__5t0       ! b_e_t0        ! t0__5t0
1946
LSYM(x126)      t0__64a0        ! t0__t0ma0     ! b_e_shift     ! r__r_2t0
1947
LSYM(x127)      t0__128a0       ! a1_ne_0_b_l0  ! t0__t0ma0     ! b_n_ret_t0
1948
LSYM(x128)      t0__128a0       ! a1_ne_0_b_l1  ! r__r_t0       ! MILLIRETN
1949
LSYM(x129)      t0__128a0       ! a1_ne_0_b_l0  ! t0__t0_a0     ! b_n_ret_t0
1950
LSYM(x130)      t0__64a0        ! t0__t0_a0     ! b_e_shift     ! r__r_2t0
1951
LSYM(x131)      t0__8a0         ! t0__8t0_a0    ! b_e_t0        ! t0__2t0_a0
1952
LSYM(x132)      t0__8a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_4t0
1953
LSYM(x133)      t0__8a0         ! t0__4t0_a0    ! b_e_t0        ! t0__4t0_a0
1954
LSYM(x134)      t0__8a0         ! t0__4t0_a0    ! b_e_2t0       ! t0__2t0_a0
1955
LSYM(x135)      t0__9a0         ! t0__5t0       ! b_e_t0        ! t0__3t0
1956
LSYM(x136)      t0__8a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_8t0
1957
LSYM(x137)      t0__8a0         ! t0__2t0_a0    ! b_e_t0        ! t0__8t0_a0
1958
LSYM(x138)      t0__8a0         ! t0__2t0_a0    ! b_e_2t0       ! t0__4t0_a0
1959
LSYM(x139)      t0__8a0         ! t0__2t0_a0    ! b_e_2t0a0     ! t0__4t0_a0
1960
LSYM(x140)      t0__3a0         ! t0__2t0_a0    ! b_e_4t0       ! t0__5t0
1961
LSYM(x141)      t0__8a0         ! t0__2t0_a0    ! b_e_4t0a0     ! t0__2t0_a0
1962
LSYM(x142)      t0__9a0         ! t0__8t0       ! b_e_2t0       ! t0__t0ma0
1963
LSYM(x143)      t0__16a0        ! t0__9t0       ! b_e_t0        ! t0__t0ma0
1964
LSYM(x144)      t0__9a0         ! t0__8t0       ! b_e_shift     ! r__r_2t0
1965
LSYM(x145)      t0__9a0         ! t0__8t0       ! b_e_t0        ! t0__2t0_a0
1966
LSYM(x146)      t0__9a0         ! t0__8t0_a0    ! b_e_shift     ! r__r_2t0
1967
LSYM(x147)      t0__9a0         ! t0__8t0_a0    ! b_e_t0        ! t0__2t0_a0
1968
LSYM(x148)      t0__9a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_4t0
1969
LSYM(x149)      t0__9a0         ! t0__4t0_a0    ! b_e_t0        ! t0__4t0_a0
1970
LSYM(x150)      t0__9a0         ! t0__4t0_a0    ! b_e_2t0       ! t0__2t0_a0
1971
LSYM(x151)      t0__9a0         ! t0__4t0_a0    ! b_e_2t0a0     ! t0__2t0_a0
1972
LSYM(x152)      t0__9a0         ! t0__2t0_a0    ! b_e_shift     ! r__r_8t0
1973
LSYM(x153)      t0__9a0         ! t0__2t0_a0    ! b_e_t0        ! t0__8t0_a0
1974
LSYM(x154)      t0__9a0         ! t0__2t0_a0    ! b_e_2t0       ! t0__4t0_a0
1975
LSYM(x155)      t0__32a0        ! t0__t0ma0     ! b_e_t0        ! t0__5t0
1976
LSYM(x156)      t0__9a0         ! t0__2t0_a0    ! b_e_4t0       ! t0__2t0_a0
1977
LSYM(x157)      t0__32a0        ! t0__t0ma0     ! b_e_t02a0     ! t0__5t0
1978
LSYM(x158)      t0__16a0        ! t0__5t0       ! b_e_2t0       ! t0__t0ma0
1979
LSYM(x159)      t0__32a0        ! t0__5t0       ! b_e_t0        ! t0__t0ma0
1980
LSYM(x160)      t0__5a0         ! t0__4t0       ! b_e_shift     ! r__r_8t0
1981
LSYM(x161)      t0__8a0         ! t0__5t0       ! b_e_t0        ! t0__4t0_a0
1982
LSYM(x162)      t0__9a0         ! t0__9t0       ! b_e_shift     ! r__r_2t0
1983
LSYM(x163)      t0__9a0         ! t0__9t0       ! b_e_t0        ! t0__2t0_a0
1984
LSYM(x164)      t0__5a0         ! t0__8t0_a0    ! b_e_shift     ! r__r_4t0
1985
LSYM(x165)      t0__8a0         ! t0__4t0_a0    ! b_e_t0        ! t0__5t0
1986
LSYM(x166)      t0__5a0         ! t0__8t0_a0    ! b_e_2t0       ! t0__2t0_a0
1987
LSYM(x167)      t0__5a0         ! t0__8t0_a0    ! b_e_2t0a0     ! t0__2t0_a0
1988
LSYM(x168)      t0__5a0         ! t0__4t0_a0    ! b_e_shift     ! r__r_8t0
1989
LSYM(x169)      t0__5a0         ! t0__4t0_a0    ! b_e_t0        ! t0__8t0_a0
1990
LSYM(x170)      t0__32a0        ! t0__t0_2a0    ! b_e_t0        ! t0__5t0
1991
LSYM(x171)      t0__9a0         ! t0__2t0_a0    ! b_e_t0        ! t0__9t0
1992
LSYM(x172)      t0__5a0         ! t0__4t0_a0    ! b_e_4t0       ! t0__2t0_a0
1993
LSYM(x173)      t0__9a0         ! t0__2t0_a0    ! b_e_t02a0     ! t0__9t0
1994
LSYM(x174)      t0__32a0        ! t0__t0_2a0    ! b_e_t04a0     ! t0__5t0
1995
LSYM(x175)      t0__8a0         ! t0__2t0_a0    ! b_e_5t0       ! t0__2t0_a0
1996
LSYM(x176)      t0__5a0         ! t0__4t0_a0    ! b_e_8t0       ! t0__t0_a0
1997
LSYM(x177)      t0__5a0         ! t0__4t0_a0    ! b_e_8t0a0     ! t0__t0_a0
1998
LSYM(x178)      t0__5a0         ! t0__2t0_a0    ! b_e_2t0       ! t0__8t0_a0
1999
LSYM(x179)      t0__5a0         ! t0__2t0_a0    ! b_e_2t0a0     ! t0__8t0_a0
2000
LSYM(x180)      t0__9a0         ! t0__5t0       ! b_e_shift     ! r__r_4t0
2001
LSYM(x181)      t0__9a0         ! t0__5t0       ! b_e_t0        ! t0__4t0_a0
2002
LSYM(x182)      t0__9a0         ! t0__5t0       ! b_e_2t0       ! t0__2t0_a0
2003
LSYM(x183)      t0__9a0         ! t0__5t0       ! b_e_2t0a0     ! t0__2t0_a0
2004
LSYM(x184)      t0__5a0         ! t0__9t0       ! b_e_4t0       ! t0__t0_a0
2005
LSYM(x185)      t0__9a0         ! t0__4t0_a0    ! b_e_t0        ! t0__5t0
2006
LSYM(x186)      t0__32a0        ! t0__t0ma0     ! b_e_2t0       ! t0__3t0
2007
LSYM(x187)      t0__9a0         ! t0__4t0_a0    ! b_e_t02a0     ! t0__5t0
2008
LSYM(x188)      t0__9a0         ! t0__5t0       ! b_e_4t0       ! t0__t0_2a0
2009
LSYM(x189)      t0__5a0         ! t0__4t0_a0    ! b_e_t0        ! t0__9t0
2010
LSYM(x190)      t0__9a0         ! t0__2t0_a0    ! b_e_2t0       ! t0__5t0
2011
LSYM(x191)      t0__64a0        ! t0__3t0       ! b_e_t0        ! t0__t0ma0
2012
LSYM(x192)      t0__8a0         ! t0__3t0       ! b_e_shift     ! r__r_8t0
2013
LSYM(x193)      t0__8a0         ! t0__3t0       ! b_e_t0        ! t0__8t0_a0
2014
LSYM(x194)      t0__8a0         ! t0__3t0       ! b_e_2t0       ! t0__4t0_a0
2015
LSYM(x195)      t0__8a0         ! t0__8t0_a0    ! b_e_t0        ! t0__3t0
2016
LSYM(x196)      t0__8a0         ! t0__3t0       ! b_e_4t0       ! t0__2t0_a0
2017
LSYM(x197)      t0__8a0         ! t0__3t0       ! b_e_4t0a0     ! t0__2t0_a0
2018
LSYM(x198)      t0__64a0        ! t0__t0_2a0    ! b_e_t0        ! t0__3t0
2019
LSYM(x199)      t0__8a0         ! t0__4t0_a0    ! b_e_2t0a0     ! t0__3t0
2020
LSYM(x200)      t0__5a0         ! t0__5t0       ! b_e_shift     ! r__r_8t0
2021
LSYM(x201)      t0__5a0         ! t0__5t0       ! b_e_t0        ! t0__8t0_a0
2022
LSYM(x202)      t0__5a0         ! t0__5t0       ! b_e_2t0       ! t0__4t0_a0
2023
LSYM(x203)      t0__5a0         ! t0__5t0       ! b_e_2t0a0     ! t0__4t0_a0
2024
LSYM(x204)      t0__8a0         ! t0__2t0_a0    ! b_e_4t0       ! t0__3t0
2025
LSYM(x205)      t0__5a0         ! t0__8t0_a0    ! b_e_t0        ! t0__5t0
2026
LSYM(x206)      t0__64a0        ! t0__t0_4a0    ! b_e_t02a0     ! t0__3t0
2027
LSYM(x207)      t0__8a0         ! t0__2t0_a0    ! b_e_3t0       ! t0__4t0_a0
2028
LSYM(x208)      t0__5a0         ! t0__5t0       ! b_e_8t0       ! t0__t0_a0
2029
LSYM(x209)      t0__5a0         ! t0__5t0       ! b_e_8t0a0     ! t0__t0_a0
2030
LSYM(x210)      t0__5a0         ! t0__4t0_a0    ! b_e_2t0       ! t0__5t0
2031
LSYM(x211)      t0__5a0         ! t0__4t0_a0    ! b_e_2t0a0     ! t0__5t0
2032
LSYM(x212)      t0__3a0         ! t0__4t0_a0    ! b_e_4t0       ! t0__4t0_a0
2033
LSYM(x213)      t0__3a0         ! t0__4t0_a0    ! b_e_4t0a0     ! t0__4t0_a0
2034
LSYM(x214)      t0__9a0         ! t0__t0_4a0    ! b_e_2t04a0    ! t0__8t0_a0
2035
LSYM(x215)      t0__5a0         ! t0__4t0_a0    ! b_e_5t0       ! t0__2t0_a0
2036
LSYM(x216)      t0__9a0         ! t0__3t0       ! b_e_shift     ! r__r_8t0
2037
LSYM(x217)      t0__9a0         ! t0__3t0       ! b_e_t0        ! t0__8t0_a0
2038
LSYM(x218)      t0__9a0         ! t0__3t0       ! b_e_2t0       ! t0__4t0_a0
2039
LSYM(x219)      t0__9a0         ! t0__8t0_a0    ! b_e_t0        ! t0__3t0
2040
LSYM(x220)      t0__3a0         ! t0__9t0       ! b_e_4t0       ! t0__2t0_a0
2041
LSYM(x221)      t0__3a0         ! t0__9t0       ! b_e_4t0a0     ! t0__2t0_a0
2042
LSYM(x222)      t0__9a0         ! t0__4t0_a0    ! b_e_2t0       ! t0__3t0
2043
LSYM(x223)      t0__9a0         ! t0__4t0_a0    ! b_e_2t0a0     ! t0__3t0
2044
LSYM(x224)      t0__9a0         ! t0__3t0       ! b_e_8t0       ! t0__t0_a0
2045
LSYM(x225)      t0__9a0         ! t0__5t0       ! b_e_t0        ! t0__5t0
2046
LSYM(x226)      t0__3a0         ! t0__2t0_a0    ! b_e_t02a0     ! t0__32t0
2047
LSYM(x227)      t0__9a0         ! t0__5t0       ! b_e_t02a0     ! t0__5t0
2048
LSYM(x228)      t0__9a0         ! t0__2t0_a0    ! b_e_4t0       ! t0__3t0
2049
LSYM(x229)      t0__9a0         ! t0__2t0_a0    ! b_e_4t0a0     ! t0__3t0
2050
LSYM(x230)      t0__9a0         ! t0__5t0       ! b_e_5t0       ! t0__t0_a0
2051
LSYM(x231)      t0__9a0         ! t0__2t0_a0    ! b_e_3t0       ! t0__4t0_a0
2052
LSYM(x232)      t0__3a0         ! t0__2t0_a0    ! b_e_8t0       ! t0__4t0_a0
2053
LSYM(x233)      t0__3a0         ! t0__2t0_a0    ! b_e_8t0a0     ! t0__4t0_a0
2054
LSYM(x234)      t0__3a0         ! t0__4t0_a0    ! b_e_2t0       ! t0__9t0
2055
LSYM(x235)      t0__3a0         ! t0__4t0_a0    ! b_e_2t0a0     ! t0__9t0
2056
LSYM(x236)      t0__9a0         ! t0__2t0_a0    ! b_e_4t08a0    ! t0__3t0
2057
LSYM(x237)      t0__16a0        ! t0__5t0       ! b_e_3t0       ! t0__t0ma0
2058
LSYM(x238)      t0__3a0         ! t0__4t0_a0    ! b_e_2t04a0    ! t0__9t0
2059
LSYM(x239)      t0__16a0        ! t0__5t0       ! b_e_t0ma0     ! t0__3t0
2060
LSYM(x240)      t0__9a0         ! t0__t0_a0     ! b_e_8t0       ! t0__3t0
2061
LSYM(x241)      t0__9a0         ! t0__t0_a0     ! b_e_8t0a0     ! t0__3t0
2062
LSYM(x242)      t0__5a0         ! t0__3t0       ! b_e_2t0       ! t0__8t0_a0
2063
LSYM(x243)      t0__9a0         ! t0__9t0       ! b_e_t0        ! t0__3t0
2064
LSYM(x244)      t0__5a0         ! t0__3t0       ! b_e_4t0       ! t0__4t0_a0
2065
LSYM(x245)      t0__8a0         ! t0__3t0       ! b_e_5t0       ! t0__2t0_a0
2066
LSYM(x246)      t0__5a0         ! t0__8t0_a0    ! b_e_2t0       ! t0__3t0
2067
LSYM(x247)      t0__5a0         ! t0__8t0_a0    ! b_e_2t0a0     ! t0__3t0
2068
LSYM(x248)      t0__32a0        ! t0__t0ma0     ! b_e_shift     ! r__r_8t0
2069
LSYM(x249)      t0__32a0        ! t0__t0ma0     ! b_e_t0        ! t0__8t0_a0
2070
LSYM(x250)      t0__5a0         ! t0__5t0       ! b_e_2t0       ! t0__5t0
2071
LSYM(x251)      t0__5a0         ! t0__5t0       ! b_e_2t0a0     ! t0__5t0
2072
LSYM(x252)      t0__64a0        ! t0__t0ma0     ! b_e_shift     ! r__r_4t0
2073
LSYM(x253)      t0__64a0        ! t0__t0ma0     ! b_e_t0        ! t0__4t0_a0
2074
LSYM(x254)      t0__128a0       ! t0__t0ma0     ! b_e_shift     ! r__r_2t0
2075
LSYM(x255)      t0__256a0       ! a1_ne_0_b_l0  ! t0__t0ma0     ! b_n_ret_t0
2076
/*1040 insts before this.  */
2077
LSYM(ret_t0)    MILLIRET
2078
LSYM(e_t0)      r__r_t0
2079
LSYM(e_shift)   a1_ne_0_b_l2
2080
        a0__256a0       /* a0 <<= 8 *********** */
2081
        MILLIRETN
2082
LSYM(e_t0ma0)   a1_ne_0_b_l0
2083
        t0__t0ma0
2084
        MILLIRET
2085
        r__r_t0
2086
LSYM(e_t0a0)    a1_ne_0_b_l0
2087
        t0__t0_a0
2088
        MILLIRET
2089
        r__r_t0
2090
LSYM(e_t02a0)   a1_ne_0_b_l0
2091
        t0__t0_2a0
2092
        MILLIRET
2093
        r__r_t0
2094
LSYM(e_t04a0)   a1_ne_0_b_l0
2095
        t0__t0_4a0
2096
        MILLIRET
2097
        r__r_t0
2098
LSYM(e_2t0)     a1_ne_0_b_l1
2099
        r__r_2t0
2100
        MILLIRETN
2101
LSYM(e_2t0a0)   a1_ne_0_b_l0
2102
        t0__2t0_a0
2103
        MILLIRET
2104
        r__r_t0
2105
LSYM(e2t04a0)   t0__t0_2a0
2106
        a1_ne_0_b_l1
2107
        r__r_2t0
2108
        MILLIRETN
2109
LSYM(e_3t0)     a1_ne_0_b_l0
2110
        t0__3t0
2111
        MILLIRET
2112
        r__r_t0
2113
LSYM(e_4t0)     a1_ne_0_b_l1
2114
        r__r_4t0
2115
        MILLIRETN
2116
LSYM(e_4t0a0)   a1_ne_0_b_l0
2117
        t0__4t0_a0
2118
        MILLIRET
2119
        r__r_t0
2120
LSYM(e4t08a0)   t0__t0_2a0
2121
        a1_ne_0_b_l1
2122
        r__r_4t0
2123
        MILLIRETN
2124
LSYM(e_5t0)     a1_ne_0_b_l0
2125
        t0__5t0
2126
        MILLIRET
2127
        r__r_t0
2128
LSYM(e_8t0)     a1_ne_0_b_l1
2129
        r__r_8t0
2130
        MILLIRETN
2131
LSYM(e_8t0a0)   a1_ne_0_b_l0
2132
        t0__8t0_a0
2133
        MILLIRET
2134
        r__r_t0
2135
 
2136
        .procend
2137
        .end
2138
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.