OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [rtos/] [rtems/] [c/] [src/] [lib/] [libcpu/] [hppa1.1/] [milli/] [milli.S] - Blame information for rev 617

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 30 unneback
;
2
;  (c) Copyright 1986 HEWLETT-PACKARD COMPANY
3
;
4
;  To anyone who acknowledges that this file is provided "AS IS"
5
;  without any express or implied warranty:
6
;      permission to use, copy, modify, and distribute this file
7
;  for any purpose is hereby granted without fee, provided that
8
;  the above copyright notice and this notice appears in all
9
;  copies, and that the name of Hewlett-Packard Company not be
10
;  used in advertising or publicity pertaining to distribution
11
;  of the software without specific, written prior permission.
12
;  Hewlett-Packard Company makes no representations about the
13
;  suitability of this software for any purpose.
14
;
15
 
16
; Standard Hardware Register Definitions for Use with Assembler
17
; version A.08.06
18
;       - fr16-31 added at Utah
19
;
20
;  $Id: milli.S,v 1.2 2001-09-27 12:01:21 chris Exp $
21
;
22
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23
; Hardware General Registers
24
r0: .equ        0
25
 
26
r1: .equ        1
27
 
28
r2: .equ        2
29
 
30
r3: .equ        3
31
 
32
r4: .equ        4
33
 
34
r5: .equ        5
35
 
36
r6: .equ        6
37
 
38
r7: .equ        7
39
 
40
r8: .equ        8
41
 
42
r9: .equ        9
43
 
44
r10: .equ       10
45
 
46
r11: .equ       11
47
 
48
r12: .equ       12
49
 
50
r13: .equ       13
51
 
52
r14: .equ       14
53
 
54
r15: .equ       15
55
 
56
r16: .equ       16
57
 
58
r17: .equ       17
59
 
60
r18: .equ       18
61
 
62
r19: .equ       19
63
 
64
r20: .equ       20
65
 
66
r21: .equ       21
67
 
68
r22: .equ       22
69
 
70
r23: .equ       23
71
 
72
r24: .equ       24
73
 
74
r25: .equ       25
75
 
76
r26: .equ       26
77
 
78
r27: .equ       27
79
 
80
r28: .equ       28
81
 
82
r29: .equ       29
83
 
84
r30: .equ       30
85
 
86
r31: .equ       31
87
 
88
; Hardware Space Registers
89
sr0: .equ       0
90
 
91
sr1: .equ       1
92
 
93
sr2: .equ       2
94
 
95
sr3: .equ       3
96
 
97
sr4: .equ       4
98
 
99
sr5: .equ       5
100
 
101
sr6: .equ       6
102
 
103
sr7: .equ       7
104
 
105
; Hardware Floating Point Registers
106
fr0: .equ       0
107
 
108
fr1: .equ       1
109
 
110
fr2: .equ       2
111
 
112
fr3: .equ       3
113
 
114
fr4: .equ       4
115
 
116
fr5: .equ       5
117
 
118
fr6: .equ       6
119
 
120
fr7: .equ       7
121
 
122
fr8: .equ       8
123
 
124
fr9: .equ       9
125
 
126
fr10: .equ      10
127
 
128
fr11: .equ      11
129
 
130
fr12: .equ      12
131
 
132
fr13: .equ      13
133
 
134
fr14: .equ      14
135
 
136
fr15: .equ      15
137
 
138
fr16: .equ      16
139
 
140
fr17: .equ      17
141
 
142
fr18: .equ      18
143
 
144
fr19: .equ      19
145
 
146
fr20: .equ      20
147
 
148
fr21: .equ      21
149
 
150
fr22: .equ      22
151
 
152
fr23: .equ      23
153
 
154
fr24: .equ      24
155
 
156
fr25: .equ      25
157
 
158
fr26: .equ      26
159
 
160
fr27: .equ      27
161
 
162
fr28: .equ      28
163
 
164
fr29: .equ      29
165
 
166
fr30: .equ      30
167
 
168
fr31: .equ      31
169
 
170
; Hardware Control Registers
171
cr0: .equ       0
172
 
173
rctr: .equ      0                        ; Recovery Counter Register
174
 
175
 
176
cr8: .equ       8                       ; Protection ID 1
177
 
178
pidr1: .equ     8
179
 
180
 
181
cr9: .equ       9                       ; Protection ID 2
182
 
183
pidr2: .equ     9
184
 
185
 
186
cr10: .equ      10
187
 
188
ccr: .equ       10                      ; Coprocessor Confiquration Register
189
 
190
 
191
cr11: .equ      11
192
 
193
sar: .equ       11                      ; Shift Amount Register
194
 
195
 
196
cr12: .equ      12
197
 
198
pidr3: .equ     12                      ; Protection ID 3
199
 
200
 
201
cr13: .equ      13
202
 
203
pidr4: .equ     13                      ; Protection ID 4
204
 
205
 
206
cr14: .equ      14
207
 
208
iva: .equ       14                      ; Interrupt Vector Address
209
 
210
 
211
cr15: .equ      15
212
 
213
eiem: .equ      15                      ; External Interrupt Enable Mask
214
 
215
 
216
cr16: .equ      16
217
 
218
itmr: .equ      16                      ; Interval Timer
219
 
220
 
221
cr17: .equ      17
222
 
223
pcsq: .equ      17                      ; Program Counter Space queue
224
 
225
 
226
cr18: .equ      18
227
 
228
pcoq: .equ      18                      ; Program Counter Offset queue
229
 
230
 
231
cr19: .equ      19
232
 
233
iir: .equ       19                      ; Interruption Instruction Register
234
 
235
 
236
cr20: .equ      20
237
 
238
isr: .equ       20                      ; Interruption Space Register
239
 
240
 
241
cr21: .equ      21
242
 
243
ior: .equ       21                      ; Interruption Offset Register
244
 
245
 
246
cr22: .equ      22
247
 
248
ipsw: .equ      22                      ; Interrpution Processor Status Word
249
 
250
 
251
cr23: .equ      23
252
 
253
eirr: .equ      23                      ; External Interrupt Request
254
 
255
 
256
cr24: .equ      24
257
 
258
ppda: .equ      24                      ; Physcial Page Directory Address
259
 
260
tr0: .equ       24                      ; Temporary register 0
261
 
262
 
263
cr25: .equ      25
264
 
265
hta: .equ       25                      ; Hash Table Address
266
 
267
tr1: .equ       25                      ; Temporary register 1
268
 
269
 
270
cr26: .equ      26
271
 
272
tr2: .equ       26                      ; Temporary register 2
273
 
274
 
275
cr27: .equ      27
276
 
277
tr3: .equ       27                      ; Temporary register 3
278
 
279
 
280
cr28: .equ      28
281
 
282
tr4: .equ       28                      ; Temporary register 4
283
 
284
 
285
cr29: .equ      29
286
 
287
tr5: .equ       29                      ; Temporary register 5
288
 
289
 
290
cr30: .equ      30
291
 
292
tr6: .equ       30                      ; Temporary register 6
293
 
294
 
295
cr31: .equ      31
296
 
297
tr7: .equ       31                      ; Temporary register 7
298
 
299
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
300
; Procedure Call Convention                                                  ~
301
; Register Definitions for Use with Assembler                                ~
302
; version A.08.06
303
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
304
; Software Architecture General Registers
305
rp: .equ    r2  ; return pointer
306
 
307
mrp: .equ       r31     ; millicode return pointer
308
 
309
ret0: .equ    r28       ; return value
310
 
311
ret1: .equ    r29       ; return value (high part of double)
312
 
313
sl: .equ    r29 ; static link
314
 
315
sp: .equ        r30     ; stack pointer
316
 
317
dp: .equ        r27     ; data pointer
318
 
319
arg0: .equ      r26     ; argument
320
 
321
arg1: .equ      r25     ; argument or high part of double argument
322
 
323
arg2: .equ      r24     ; argument
324
 
325
arg3: .equ      r23     ; argument or high part of double argument
326
 
327
;_____________________________________________________________________________
328
; Software Architecture Space Registers
329
;               sr0     ; return link form BLE
330
sret: .equ      sr1     ; return value
331
 
332
sarg: .equ      sr1     ; argument
333
 
334
;               sr4     ; PC SPACE tracker
335
;               sr5     ; process private data
336
;_____________________________________________________________________________
337
; Software Architecture Pseudo Registers
338
previous_sp: .equ       64      ; old stack pointer (locates previous frame)
339
 
340
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
341
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
342
; Standard space and subspace definitions.  version A.08.06
343
; These are generally suitable for programs on HP_UX and HPE.
344
; Statements commented out are used when building such things as operating
345
; system kernels.
346
;;;;;;;;;;;;;;;;
347
        .SPACE  $TEXT$,         SPNUM=0,SORT=8
348
        .subspa $MILLICODE$,    QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
349
        .subspa $LIT$,          QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
350
        .subspa $CODE$,         QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
351
; Additional code subspaces should have ALIGN=8 for an interspace BV
352
; and should have SORT=24.
353
;
354
; For an incomplete executable (program bound to shared libraries),
355
; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
356
; and $PLT$ subspaces respectively.
357
;;;;;;;;;;;;;;;
358
        .SPACE $PRIVATE$,       SPNUM=1,PRIVATE,SORT=16
359
        .subspa $GLOBAL$,       QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
360
        .import $global$
361
        .subspa $DATA$,         QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
362
        .subspa $BSS$,          QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
363
 
364
        .SPACE $TEXT$
365
        .SUBSPA $MILLICODE$
366
 
367
        .align 8
368
        .EXPORT $$remI,millicode
369
;       .IMPORT cerror
370
$$remI:
371
        .PROC
372
        .CALLINFO millicode
373
        .ENTRY
374
        addit,= 0,arg1,r0
375
        add,>= r0,arg0,ret1
376
        sub r0,ret1,ret1
377
        sub r0,arg1,r1
378
        ds r0,r1,r0
379
        or r0,r0,r1
380
        add ret1,ret1,ret1
381
        ds r1,arg1,r1
382
        addc ret1,ret1,ret1
383
        ds r1,arg1,r1
384
        addc ret1,ret1,ret1
385
        ds r1,arg1,r1
386
        addc ret1,ret1,ret1
387
        ds r1,arg1,r1
388
        addc ret1,ret1,ret1
389
        ds r1,arg1,r1
390
        addc ret1,ret1,ret1
391
        ds r1,arg1,r1
392
        addc ret1,ret1,ret1
393
        ds r1,arg1,r1
394
        addc ret1,ret1,ret1
395
        ds r1,arg1,r1
396
        addc ret1,ret1,ret1
397
        ds r1,arg1,r1
398
        addc ret1,ret1,ret1
399
        ds r1,arg1,r1
400
        addc ret1,ret1,ret1
401
        ds r1,arg1,r1
402
        addc ret1,ret1,ret1
403
        ds r1,arg1,r1
404
        addc ret1,ret1,ret1
405
        ds r1,arg1,r1
406
        addc ret1,ret1,ret1
407
        ds r1,arg1,r1
408
        addc ret1,ret1,ret1
409
        ds r1,arg1,r1
410
        addc ret1,ret1,ret1
411
        ds r1,arg1,r1
412
        addc ret1,ret1,ret1
413
        ds r1,arg1,r1
414
        addc ret1,ret1,ret1
415
        ds r1,arg1,r1
416
        addc ret1,ret1,ret1
417
        ds r1,arg1,r1
418
        addc ret1,ret1,ret1
419
        ds r1,arg1,r1
420
        addc ret1,ret1,ret1
421
        ds r1,arg1,r1
422
        addc ret1,ret1,ret1
423
        ds r1,arg1,r1
424
        addc ret1,ret1,ret1
425
        ds r1,arg1,r1
426
        addc ret1,ret1,ret1
427
        ds r1,arg1,r1
428
        addc ret1,ret1,ret1
429
        ds r1,arg1,r1
430
        addc ret1,ret1,ret1
431
        ds r1,arg1,r1
432
        addc ret1,ret1,ret1
433
        ds r1,arg1,r1
434
        addc ret1,ret1,ret1
435
        ds r1,arg1,r1
436
        addc ret1,ret1,ret1
437
        ds r1,arg1,r1
438
        addc ret1,ret1,ret1
439
        ds r1,arg1,r1
440
        addc ret1,ret1,ret1
441
        ds r1,arg1,r1
442
        addc ret1,ret1,ret1
443
        ds r1,arg1,r1
444
        addc ret1,ret1,ret1
445
        movb,>=,n r1,ret1,remI300
446
        add,< arg1,r0,r0
447
        add,tr r1,arg1,ret1
448
        sub r1,arg1,ret1
449
remI300: add,>= arg0,r0,r0
450
 
451
        sub r0,ret1,ret1
452
        bv r0(r31)
453
        nop
454
        .EXIT
455
        .PROCEND
456
 
457
bit1:  .equ 1
458
 
459
bit30: .equ 30
460
bit31: .equ 31
461
 
462
len2:  .equ 2
463
 
464
len4:  .equ 4
465
 
466
 
467
$$dyncall:
468
        .proc
469
        .callinfo NO_CALLS
470
        .entry
471
        .export $$dyncall,MILLICODE
472
 
473
        bb,>=,n 22,bit30,noshlibs
474
 
475
        depi    0,bit31,len2,22
476
        ldw     4(22),19
477
        ldw     0(22),22
478
noshlibs:
479
        ldsid   (22),r1
480
        mtsp    r1,sr0
481
        be      0(sr0,r22)
482
        stw     rp,-24(sp)
483
        .exit
484
        .procend
485
 
486
temp: .EQU      r1
487
 
488
retreg: .EQU    ret1    ; r29
489
 
490
 
491
        .export $$divU,millicode
492
        .import $$divU_3,millicode
493
        .import $$divU_5,millicode
494
        .import $$divU_6,millicode
495
        .import $$divU_7,millicode
496
        .import $$divU_9,millicode
497
        .import $$divU_10,millicode
498
        .import $$divU_12,millicode
499
        .import $$divU_14,millicode
500
        .import $$divU_15,millicode
501
$$divU:
502
        .proc
503
        .callinfo millicode
504
        .entry
505
; The subtract is not nullified since it does no harm and can be used
506
; by the two cases that branch back to "normal".
507
        comib,>=  15,arg1,special_divisor
508
        sub     r0,arg1,temp            ; clear carry, negate the divisor
509
        ds      r0,temp,r0              ; set V-bit to 1
510
normal:
511
        add     arg0,arg0,retreg        ; shift msb bit into carry
512
        ds      r0,arg1,temp            ; 1st divide step, if no carry
513
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
514
        ds      temp,arg1,temp          ; 2nd divide step
515
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
516
        ds      temp,arg1,temp          ; 3rd divide step
517
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
518
        ds      temp,arg1,temp          ; 4th divide step
519
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
520
        ds      temp,arg1,temp          ; 5th divide step
521
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
522
        ds      temp,arg1,temp          ; 6th divide step
523
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
524
        ds      temp,arg1,temp          ; 7th divide step
525
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
526
        ds      temp,arg1,temp          ; 8th divide step
527
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
528
        ds      temp,arg1,temp          ; 9th divide step
529
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
530
        ds      temp,arg1,temp          ; 10th divide step
531
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
532
        ds      temp,arg1,temp          ; 11th divide step
533
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
534
        ds      temp,arg1,temp          ; 12th divide step
535
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
536
        ds      temp,arg1,temp          ; 13th divide step
537
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
538
        ds      temp,arg1,temp          ; 14th divide step
539
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
540
        ds      temp,arg1,temp          ; 15th divide step
541
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
542
        ds      temp,arg1,temp          ; 16th divide step
543
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
544
        ds      temp,arg1,temp          ; 17th divide step
545
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
546
        ds      temp,arg1,temp          ; 18th divide step
547
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
548
        ds      temp,arg1,temp          ; 19th divide step
549
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
550
        ds      temp,arg1,temp          ; 20th divide step
551
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
552
        ds      temp,arg1,temp          ; 21st divide step
553
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
554
        ds      temp,arg1,temp          ; 22nd divide step
555
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
556
        ds      temp,arg1,temp          ; 23rd divide step
557
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
558
        ds      temp,arg1,temp          ; 24th divide step
559
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
560
        ds      temp,arg1,temp          ; 25th divide step
561
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
562
        ds      temp,arg1,temp          ; 26th divide step
563
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
564
        ds      temp,arg1,temp          ; 27th divide step
565
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
566
        ds      temp,arg1,temp          ; 28th divide step
567
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
568
        ds      temp,arg1,temp          ; 29th divide step
569
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
570
        ds      temp,arg1,temp          ; 30th divide step
571
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
572
        ds      temp,arg1,temp          ; 31st divide step
573
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
574
        ds      temp,arg1,temp          ; 32nd divide step,
575
        bv    0(r31)
576
        addc    retreg,retreg,retreg    ; shift last retreg bit into retreg
577
;_____________________________________________________________________________
578
; handle the cases where divisor is a small constant or has high bit on
579
special_divisor:
580
        comib,>  0,arg1,big_divisor
581
        nop
582
        blr     arg1,r0
583
        nop
584
zero_divisor: ; this label is here to provide external visibility
585
 
586
        addit,= 0,arg1,0                ; trap for zero dvr
587
        nop
588
        bv    0(r31)                    ; divisor == 1
589
        copy    arg0,retreg
590
        bv    0(r31)                    ; divisor == 2
591
        extru   arg0,30,31,retreg
592
         b,n   $$divU_3         ; divisor == 3
593
        nop
594
        bv    0(r31)                    ; divisor == 4
595
        extru   arg0,29,30,retreg
596
         b,n   $$divU_5         ; divisor == 5
597
        nop
598
         b,n   $$divU_6         ; divisor == 6
599
        nop
600
         b,n   $$divU_7         ; divisor == 7
601
        nop
602
        bv    0(r31)                    ; divisor == 8
603
        extru   arg0,28,29,retreg
604
         b,n   $$divU_9         ; divisor == 9
605
        nop
606
         b,n   $$divU_10                ; divisor == 10
607
        nop
608
        b       normal                  ; divisor == 11
609
        ds      r0,temp,r0              ; set V-bit to 1
610
         b,n   $$divU_12                ; divisor == 12
611
        nop
612
        b       normal                  ; divisor == 13
613
        ds      r0,temp,r0              ; set V-bit to 1
614
         b,n   $$divU_14                ; divisor == 14
615
        nop
616
         b,n   $$divU_15                ; divisor == 15
617
        nop
618
;_____________________________________________________________________________
619
; Handle the case where the high bit is on in the divisor.
620
; Compute:      if( dividend>=divisor) quotient=1; else quotient=0;
621
; Note:         dividend>==divisor iff dividend-divisor does not borrow
622
; and           not borrow iff carry
623
big_divisor:
624
        sub     arg0,arg1,r0
625
        bv    0(r31)
626
        addc    r0,r0,retreg
627
        .exit
628
        .procend
629
        .end
630
 
631
t2: .EQU        r1
632
 
633
; x2    .EQU    arg0    ; r26
634
t1: .EQU        arg1    ; r25
635
 
636
; x1    .EQU    ret1    ; r29
637
;_____________________________________________________________________________
638
 
639
$$divide_by_constant:
640
        .PROC
641
        .CALLINFO millicode
642
        .entry
643
 
644
 
645
        .export $$divide_by_constant,millicode
646
; Provides a "nice" label for the code covered by the unwind descriptor
647
; for things like gprof.
648
 
649
 
650
 
651
 
652
 
653
 
654
 
655
 
656
 
657
$$divI_2:
658
        .EXPORT         $$divI_2,MILLICODE
659
        COMCLR,>=       arg0,0,0
660
        ADDI            1,arg0,arg0
661
        bv    0(r31)
662
        EXTRS           arg0,30,31,ret1
663
 
664
 
665
 
666
$$divI_4:
667
        .EXPORT         $$divI_4,MILLICODE
668
        COMCLR,>=       arg0,0,0
669
        ADDI            3,arg0,arg0
670
        bv    0(r31)
671
        EXTRS           arg0,29,30,ret1
672
 
673
 
674
 
675
$$divI_8:
676
        .EXPORT         $$divI_8,MILLICODE
677
        COMCLR,>=       arg0,0,0
678
        ADDI            7,arg0,arg0
679
        bv    0(r31)
680
        EXTRS           arg0,28,29,ret1
681
 
682
 
683
$$divI_16:
684
        .EXPORT         $$divI_16,MILLICODE
685
        COMCLR,>=       arg0,0,0
686
        ADDI            15,arg0,arg0
687
        bv    0(r31)
688
        EXTRS           arg0,27,28,ret1
689
 
690
 
691
 
692
 
693
 
694
 
695
 
696
 
697
 
698
 
699
 
700
$$divI_3:
701
        .EXPORT         $$divI_3,MILLICODE
702
        COMB,<,N        arg0,0,$neg3
703
 
704
        ADDI            1,arg0,arg0
705
        EXTRU           arg0,1,2,ret1
706
        SH2ADD          arg0,arg0,arg0
707
        B               $pos
708
        ADDC            ret1,0,ret1
709
 
710
$neg3:
711
        SUBI            1,arg0,arg0
712
        EXTRU           arg0,1,2,ret1
713
        SH2ADD          arg0,arg0,arg0
714
        B               $neg
715
        ADDC            ret1,0,ret1
716
 
717
$$divU_3:
718
        .EXPORT         $$divU_3,MILLICODE
719
        ADDI            1,arg0,arg0
720
        ADDC            0,0,ret1
721
        SHD             ret1,arg0,30,t1
722
        SH2ADD          arg0,arg0,arg0
723
        B               $pos
724
        ADDC            ret1,t1,ret1
725
 
726
 
727
 
728
$$divI_5:
729
        .EXPORT         $$divI_5,MILLICODE
730
        COMB,<,N        arg0,0,$neg5
731
        ADDI            3,arg0,t1
732
        SH1ADD          arg0,t1,arg0
733
        B               $pos
734
        ADDC            0,0,ret1
735
 
736
$neg5:
737
        SUB             0,arg0,arg0
738
        ADDI            1,arg0,arg0
739
        SHD             0,arg0,31,ret1
740
        SH1ADD          arg0,arg0,arg0
741
        B               $neg
742
        ADDC            ret1,0,ret1
743
 
744
$$divU_5:
745
        .EXPORT         $$divU_5,MILLICODE
746
        ADDI            1,arg0,arg0
747
        ADDC            0,0,ret1
748
        SHD             ret1,arg0,31,t1
749
        SH1ADD          arg0,arg0,arg0
750
        B               $pos
751
        ADDC            t1,ret1,ret1
752
 
753
 
754
$$divI_6:
755
        .EXPORT         $$divI_6,MILLICODE
756
        COMB,<,N        arg0,0,$neg6
757
        EXTRU           arg0,30,31,arg0
758
        ADDI            5,arg0,t1
759
        SH2ADD          arg0,t1,arg0
760
        B               $pos
761
        ADDC            0,0,ret1
762
 
763
$neg6:
764
        SUBI            2,arg0,arg0
765
 
766
 
767
        EXTRU           arg0,30,31,arg0
768
        SHD             0,arg0,30,ret1
769
        SH2ADD          arg0,arg0,arg0
770
        B               $neg
771
        ADDC            ret1,0,ret1
772
 
773
$$divU_6:
774
        .EXPORT         $$divU_6,MILLICODE
775
        EXTRU           arg0,30,31,arg0
776
        ADDI            1,arg0,arg0
777
        SHD             0,arg0,30,ret1
778
        SH2ADD          arg0,arg0,arg0
779
        B               $pos
780
        ADDC            ret1,0,ret1
781
 
782
 
783
$$divU_10:
784
        .EXPORT         $$divU_10,MILLICODE
785
        EXTRU           arg0,30,31,arg0
786
        ADDI            3,arg0,t1
787
        SH1ADD          arg0,t1,arg0
788
        ADDC            0,0,ret1
789
$pos:
790
        SHD             ret1,arg0,28,t1
791
        SHD             arg0,0,28,t2
792
        ADD             arg0,t2,arg0
793
        ADDC            ret1,t1,ret1
794
$pos_for_17:
795
        SHD             ret1,arg0,24,t1
796
        SHD             arg0,0,24,t2
797
        ADD             arg0,t2,arg0
798
        ADDC            ret1,t1,ret1
799
 
800
        SHD             ret1,arg0,16,t1
801
        SHD             arg0,0,16,t2
802
        ADD             arg0,t2,arg0
803
        bv    0(r31)
804
        ADDC            ret1,t1,ret1
805
 
806
$$divI_10:
807
        .EXPORT         $$divI_10,MILLICODE
808
        COMB,<          arg0,0,$neg10
809
        COPY            0,ret1
810
        EXTRU           arg0,30,31,arg0
811
        ADDIB,TR        1,arg0,$pos
812
        SH1ADD          arg0,arg0,arg0
813
 
814
$neg10:
815
        SUBI            2,arg0,arg0
816
 
817
 
818
        EXTRU           arg0,30,31,arg0
819
        SH1ADD          arg0,arg0,arg0
820
$neg:
821
        SHD             ret1,arg0,28,t1
822
        SHD             arg0,0,28,t2
823
        ADD             arg0,t2,arg0
824
        ADDC            ret1,t1,ret1
825
$neg_for_17:
826
        SHD             ret1,arg0,24,t1
827
        SHD             arg0,0,24,t2
828
        ADD             arg0,t2,arg0
829
        ADDC            ret1,t1,ret1
830
 
831
        SHD             ret1,arg0,16,t1
832
        SHD             arg0,0,16,t2
833
        ADD             arg0,t2,arg0
834
        ADDC            ret1,t1,ret1
835
        bv    0(r31)
836
        SUB             0,ret1,ret1
837
 
838
 
839
$$divI_12:
840
        .EXPORT         $$divI_12,MILLICODE
841
        COMB,<          arg0,0,$neg12
842
        COPY            0,ret1
843
        EXTRU           arg0,29,30,arg0
844
        ADDIB,TR        1,arg0,$pos
845
        SH2ADD          arg0,arg0,arg0
846
 
847
$neg12:
848
        SUBI            4,arg0,arg0
849
 
850
 
851
        EXTRU           arg0,29,30,arg0
852
        B               $neg
853
        SH2ADD          arg0,arg0,arg0
854
 
855
$$divU_12:
856
        .EXPORT         $$divU_12,MILLICODE
857
        EXTRU           arg0,29,30,arg0
858
        ADDI            5,arg0,t1
859
        SH2ADD          arg0,t1,arg0
860
        B               $pos
861
        ADDC            0,0,ret1
862
 
863
 
864
$$divI_15:
865
        .EXPORT         $$divI_15,MILLICODE
866
        COMB,<          arg0,0,$neg15
867
        COPY            0,ret1
868
        ADDIB,TR        1,arg0,$pos+4
869
        SHD             ret1,arg0,28,t1
870
 
871
$neg15:
872
        B               $neg
873
        SUBI            1,arg0,arg0
874
 
875
$$divU_15:
876
        .EXPORT         $$divU_15,MILLICODE
877
        ADDI            1,arg0,arg0
878
        B               $pos
879
        ADDC            0,0,ret1
880
 
881
 
882
$$divI_17:
883
        .EXPORT         $$divI_17,MILLICODE
884
        COMB,<,N        arg0,0,$neg17
885
        ADDI            1,arg0,arg0
886
        SHD             0,arg0,28,t1
887
        SHD             arg0,0,28,t2
888
        SUB             t2,arg0,arg0
889
        B               $pos_for_17
890
        SUBB            t1,0,ret1
891
 
892
$neg17:
893
        SUBI            1,arg0,arg0
894
        SHD             0,arg0,28,t1
895
        SHD             arg0,0,28,t2
896
        SUB             t2,arg0,arg0
897
        B               $neg_for_17
898
        SUBB            t1,0,ret1
899
 
900
$$divU_17:
901
        .EXPORT         $$divU_17,MILLICODE
902
        ADDI            1,arg0,arg0
903
        ADDC            0,0,ret1
904
        SHD             ret1,arg0,28,t1
905
$u17:
906
        SHD             arg0,0,28,t2
907
        SUB             t2,arg0,arg0
908
        B               $pos_for_17
909
        SUBB            t1,ret1,ret1
910
 
911
 
912
 
913
 
914
 
915
 
916
 
917
 
918
 
919
 
920
 
921
 
922
 
923
 
924
 
925
 
926
 
927
 
928
 
929
 
930
 
931
 
932
 
933
 
934
 
935
 
936
 
937
 
938
 
939
 
940
 
941
 
942
 
943
 
944
 
945
 
946
$$divI_7:
947
        .EXPORT         $$divI_7,MILLICODE
948
        COMB,<,N        arg0,0,$neg7
949
$7:
950
        ADDI            1,arg0,arg0
951
        SHD             0,arg0,29,ret1
952
        SH3ADD          arg0,arg0,arg0
953
        ADDC            ret1,0,ret1
954
$pos7:
955
        SHD             ret1,arg0,26,t1
956
        SHD             arg0,0,26,t2
957
        ADD             arg0,t2,arg0
958
        ADDC            ret1,t1,ret1
959
 
960
        SHD             ret1,arg0,20,t1
961
        SHD             arg0,0,20,t2
962
        ADD             arg0,t2,arg0
963
        ADDC            ret1,t1,t1
964
 
965
 
966
 
967
        COPY            0,ret1
968
        SHD,=           t1,arg0,24,t1
969
$1:
970
        ADDB,TR         t1,ret1,$2
971
        EXTRU           arg0,31,24,arg0
972
 
973
        bv,n  0(r31)
974
 
975
$2:
976
        ADDB,TR         t1,arg0,$1
977
        EXTRU,=         arg0,7,8,t1
978
 
979
$neg7:
980
        SUBI            1,arg0,arg0
981
$8:
982
        SHD             0,arg0,29,ret1
983
        SH3ADD          arg0,arg0,arg0
984
        ADDC            ret1,0,ret1
985
 
986
$neg7_shift:
987
        SHD             ret1,arg0,26,t1
988
        SHD             arg0,0,26,t2
989
        ADD             arg0,t2,arg0
990
        ADDC            ret1,t1,ret1
991
 
992
        SHD             ret1,arg0,20,t1
993
        SHD             arg0,0,20,t2
994
        ADD             arg0,t2,arg0
995
        ADDC            ret1,t1,t1
996
 
997
 
998
 
999
        COPY            0,ret1
1000
        SHD,=           t1,arg0,24,t1
1001
$3:
1002
        ADDB,TR         t1,ret1,$4
1003
        EXTRU           arg0,31,24,arg0
1004
 
1005
        bv    0(r31)
1006
        SUB             0,ret1,ret1
1007
 
1008
$4:
1009
        ADDB,TR         t1,arg0,$3
1010
        EXTRU,=         arg0,7,8,t1
1011
 
1012
$$divU_7:
1013
        .EXPORT         $$divU_7,MILLICODE
1014
        ADDI            1,arg0,arg0
1015
        ADDC            0,0,ret1
1016
        SHD             ret1,arg0,29,t1
1017
        SH3ADD          arg0,arg0,arg0
1018
        B               $pos7
1019
        ADDC            t1,ret1,ret1
1020
 
1021
 
1022
$$divI_9:
1023
        .EXPORT         $$divI_9,MILLICODE
1024
        COMB,<,N        arg0,0,$neg9
1025
        ADDI            1,arg0,arg0
1026
        SHD             0,arg0,29,t1
1027
        SHD             arg0,0,29,t2
1028
        SUB             t2,arg0,arg0
1029
        B               $pos7
1030
        SUBB            t1,0,ret1
1031
 
1032
$neg9:
1033
        SUBI            1,arg0,arg0
1034
        SHD             0,arg0,29,t1
1035
        SHD             arg0,0,29,t2
1036
        SUB             t2,arg0,arg0
1037
        B               $neg7_shift
1038
        SUBB            t1,0,ret1
1039
 
1040
$$divU_9:
1041
        .EXPORT         $$divU_9,MILLICODE
1042
        ADDI            1,arg0,arg0
1043
        ADDC            0,0,ret1
1044
        SHD             ret1,arg0,29,t1
1045
        SHD             arg0,0,29,t2
1046
        SUB             t2,arg0,arg0
1047
        B               $pos7
1048
        SUBB            t1,ret1,ret1
1049
 
1050
 
1051
$$divI_14:
1052
        .EXPORT         $$divI_14,MILLICODE
1053
        COMB,<,N        arg0,0,$neg14
1054
$$divU_14:
1055
        .EXPORT         $$divU_14,MILLICODE
1056
        B               $7
1057
        EXTRU           arg0,30,31,arg0
1058
 
1059
$neg14:
1060
        SUBI            2,arg0,arg0
1061
        B               $8
1062
        EXTRU           arg0,30,31,arg0
1063
 
1064
        .exit
1065
        .PROCEND
1066
        .END
1067
 
1068
rmndr: .EQU     ret1    ; r29
1069
 
1070
 
1071
        .export $$remU,millicode
1072
$$remU:
1073
        .proc
1074
        .callinfo millicode
1075
        .entry
1076
 
1077
        comib,>=,n  0,arg1,special_case
1078
        sub     r0,arg1,rmndr           ; clear carry, negate the divisor
1079
        ds      r0,rmndr,r0             ; set V-bit to 1
1080
        add     arg0,arg0,temp          ; shift msb bit into carry
1081
        ds      r0,arg1,rmndr           ; 1st divide step, if no carry
1082
        addc    temp,temp,temp          ; shift temp with/into carry
1083
        ds      rmndr,arg1,rmndr                ; 2nd divide step
1084
        addc    temp,temp,temp          ; shift temp with/into carry
1085
        ds      rmndr,arg1,rmndr                ; 3rd divide step
1086
        addc    temp,temp,temp          ; shift temp with/into carry
1087
        ds      rmndr,arg1,rmndr                ; 4th divide step
1088
        addc    temp,temp,temp          ; shift temp with/into carry
1089
        ds      rmndr,arg1,rmndr                ; 5th divide step
1090
        addc    temp,temp,temp          ; shift temp with/into carry
1091
        ds      rmndr,arg1,rmndr                ; 6th divide step
1092
        addc    temp,temp,temp          ; shift temp with/into carry
1093
        ds      rmndr,arg1,rmndr                ; 7th divide step
1094
        addc    temp,temp,temp          ; shift temp with/into carry
1095
        ds      rmndr,arg1,rmndr                ; 8th divide step
1096
        addc    temp,temp,temp          ; shift temp with/into carry
1097
        ds      rmndr,arg1,rmndr                ; 9th divide step
1098
        addc    temp,temp,temp          ; shift temp with/into carry
1099
        ds      rmndr,arg1,rmndr                ; 10th divide step
1100
        addc    temp,temp,temp          ; shift temp with/into carry
1101
        ds      rmndr,arg1,rmndr                ; 11th divide step
1102
        addc    temp,temp,temp          ; shift temp with/into carry
1103
        ds      rmndr,arg1,rmndr                ; 12th divide step
1104
        addc    temp,temp,temp          ; shift temp with/into carry
1105
        ds      rmndr,arg1,rmndr                ; 13th divide step
1106
        addc    temp,temp,temp          ; shift temp with/into carry
1107
        ds      rmndr,arg1,rmndr                ; 14th divide step
1108
        addc    temp,temp,temp          ; shift temp with/into carry
1109
        ds      rmndr,arg1,rmndr                ; 15th divide step
1110
        addc    temp,temp,temp          ; shift temp with/into carry
1111
        ds      rmndr,arg1,rmndr                ; 16th divide step
1112
        addc    temp,temp,temp          ; shift temp with/into carry
1113
        ds      rmndr,arg1,rmndr                ; 17th divide step
1114
        addc    temp,temp,temp          ; shift temp with/into carry
1115
        ds      rmndr,arg1,rmndr                ; 18th divide step
1116
        addc    temp,temp,temp          ; shift temp with/into carry
1117
        ds      rmndr,arg1,rmndr                ; 19th divide step
1118
        addc    temp,temp,temp          ; shift temp with/into carry
1119
        ds      rmndr,arg1,rmndr                ; 20th divide step
1120
        addc    temp,temp,temp          ; shift temp with/into carry
1121
        ds      rmndr,arg1,rmndr                ; 21st divide step
1122
        addc    temp,temp,temp          ; shift temp with/into carry
1123
        ds      rmndr,arg1,rmndr                ; 22nd divide step
1124
        addc    temp,temp,temp          ; shift temp with/into carry
1125
        ds      rmndr,arg1,rmndr                ; 23rd divide step
1126
        addc    temp,temp,temp          ; shift temp with/into carry
1127
        ds      rmndr,arg1,rmndr                ; 24th divide step
1128
        addc    temp,temp,temp          ; shift temp with/into carry
1129
        ds      rmndr,arg1,rmndr                ; 25th divide step
1130
        addc    temp,temp,temp          ; shift temp with/into carry
1131
        ds      rmndr,arg1,rmndr                ; 26th divide step
1132
        addc    temp,temp,temp          ; shift temp with/into carry
1133
        ds      rmndr,arg1,rmndr                ; 27th divide step
1134
        addc    temp,temp,temp          ; shift temp with/into carry
1135
        ds      rmndr,arg1,rmndr                ; 28th divide step
1136
        addc    temp,temp,temp          ; shift temp with/into carry
1137
        ds      rmndr,arg1,rmndr                ; 29th divide step
1138
        addc    temp,temp,temp          ; shift temp with/into carry
1139
        ds      rmndr,arg1,rmndr                ; 30th divide step
1140
        addc    temp,temp,temp          ; shift temp with/into carry
1141
        ds      rmndr,arg1,rmndr                ; 31st divide step
1142
        addc    temp,temp,temp          ; shift temp with/into carry
1143
        ds      rmndr,arg1,rmndr                ; 32nd divide step,
1144
        comiclr,<= 0,rmndr,r0
1145
          add   rmndr,arg1,rmndr        ; correction
1146
;       .exit
1147
        bv,n  0(r31)
1148
        nop
1149
; Putting >= on the last DS and deleting COMICLR does not work!
1150
;_____________________________________________________________________________
1151
special_case:
1152
        addit,= 0,arg1,r0               ; trap on div by zero
1153
        sub,>>= arg0,arg1,rmndr
1154
          copy  arg0,rmndr
1155
        bv,n  0(r31)
1156
        nop
1157
        .exit
1158
        .procend
1159
        .end
1160
 
1161
 
1162
 
1163
 
1164
 
1165
 
1166
 
1167
 
1168
 
1169
 
1170
 
1171
 
1172
 
1173
 
1174
 
1175
 
1176
 
1177
 
1178
 
1179
 
1180
 
1181
 
1182
 
1183
 
1184
 
1185
 
1186
 
1187
 
1188
 
1189
 
1190
 
1191
 
1192
 
1193
 
1194
 
1195
 
1196
 
1197
; Use bv    0(r31) and bv,n  0(r31) instead.
1198
; #define       return          bv      0(%mrp)
1199
; #define       return_n        bv,n    0(%mrp)
1200
 
1201
 
1202
 
1203
 
1204
 
1205
 
1206
 
1207
 
1208
 
1209
 
1210
 
1211
 
1212
 
1213
 
1214
 
1215
 
1216
 
1217
 
1218
 
1219
 
1220
 
1221
 
1222
 
1223
 
1224
 
1225
 
1226
 
1227
        .subspa $MILLICODE$
1228
        .align 16
1229
$$mulI:
1230
 
1231
        .proc
1232
        .callinfo millicode
1233
        .entry
1234
        .export $$mulI, millicode
1235
        combt,<<=	%r25,%r26,l4	; swap args if unsigned %r25>%r26
1236
        copy            0,%r29          ; zero out the result
1237
        xor             %r26,%r25,%r26  ; swap %r26 & %r25 using the
1238
        xor             %r26,%r25,%r25  ;  old xor trick
1239
        xor             %r26,%r25,%r26
1240
l4: combt,<=	0,%r26,l3		; if %r26>=0 then proceed like unsigned
1241
 
1242
        zdep            %r25,30,8,%r1   ; %r1 = (%r25&0xff)<<1 *********
1243
        sub,>           0,%r25,%r1              ; otherwise negate both and
1244
        combt,<=,n      %r26,%r1,l2     ;  swap back if |%r26|<|%r25|
1245
        sub             0,%r26,%r25
1246
        movb,tr,n       %r1,%r26,l2     ; 10th inst.
1247
 
1248
l0:     add     %r29,%r1,%r29                           ; add in this partial product
1249
 
1250
l1: zdep        %r26,23,24,%r26                 ; %r26 <<= 8 ******************
1251
 
1252
l2: zdep                %r25,30,8,%r1   ; %r1 = (%r25&0xff)<<1 *********
1253
 
1254
l3: blr         %r1,0           ; case on these 8 bits ******
1255
 
1256
        extru           %r25,23,24,%r25 ; %r25 >>= 8 ******************
1257
 
1258
;16 insts before this.
1259
;                         %r26 <<= 8 **************************
1260
x0: comb,<>     %r25,0,l2       ! zdep  %r26,23,24,%r26 ! bv,n  0(r31)  ! nop
1261
 
1262
x1: comb,<>     %r25,0,l1       !       add     %r29,%r26,%r29  ! bv,n  0(r31)  ! nop
1263
 
1264
x2: comb,<>     %r25,0,l1       ! sh1add        %r26,%r29,%r29  ! bv,n  0(r31)  ! nop
1265
 
1266
x3: comb,<>     %r25,0,l0       !       sh1add  %r26,%r26,%r1   ! bv    0(r31)  !       add     %r29,%r1,%r29
1267
 
1268
x4: comb,<>     %r25,0,l1       ! sh2add        %r26,%r29,%r29  ! bv,n  0(r31)  ! nop
1269
 
1270
x5: comb,<>     %r25,0,l0       !       sh2add  %r26,%r26,%r1   ! bv    0(r31)  !       add     %r29,%r1,%r29
1271
 
1272
x6:     sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh1add        %r1,%r29,%r29   ! bv,n  0(r31)
1273
 
1274
x7:     sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh2add        %r26,%r29,%r29  ! b,n   ret_t0
1275
 
1276
x8: comb,<>     %r25,0,l1       ! sh3add        %r26,%r29,%r29  ! bv,n  0(r31)  ! nop
1277
 
1278
x9: comb,<>     %r25,0,l0       !       sh3add  %r26,%r26,%r1   ! bv    0(r31)  !       add     %r29,%r1,%r29
1279
 
1280
x10:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh1add        %r1,%r29,%r29   ! bv,n  0(r31)
1281
 
1282
x11:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh3add        %r26,%r29,%r29  ! b,n   ret_t0
1283
 
1284
x12:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh2add        %r1,%r29,%r29   ! bv,n  0(r31)
1285
 
1286
x13:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh3add        %r26,%r29,%r29  ! b,n   ret_t0
1287
 
1288
x14:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1289
 
1290
x15:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       !       sh1add  %r1,%r1,%r1     ! b,n   ret_t0
1291
 
1292
x16: zdep       %r26,27,28,%r1  ! comb,<>       %r25,0,l1       !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1293
 
1294
x17:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh3add        %r26,%r1,%r1    ! b,n   ret_t0
1295
 
1296
x18:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh1add        %r1,%r29,%r29   ! bv,n  0(r31)
1297
 
1298
x19:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh1add        %r1,%r26,%r1    ! b,n   ret_t0
1299
 
1300
x20:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh2add        %r1,%r29,%r29   ! bv,n  0(r31)
1301
 
1302
x21:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh2add        %r1,%r26,%r1    ! b,n   ret_t0
1303
 
1304
x22:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1305
 
1306
x23:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1307
 
1308
x24:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh3add        %r1,%r29,%r29   ! bv,n  0(r31)
1309
 
1310
x25:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       !       sh2add  %r1,%r1,%r1     ! b,n   ret_t0
1311
 
1312
x26:    sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1313
 
1314
x27:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       !       sh3add  %r1,%r1,%r1     ! b,n   ret_t0
1315
 
1316
x28:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1317
 
1318
x29:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1319
 
1320
x30:    sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1321
 
1322
x31: zdep       %r26,26,27,%r1  ! comb,<>       %r25,0,l0       ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1323
 
1324
x32: zdep       %r26,26,27,%r1  ! comb,<>       %r25,0,l1       !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1325
 
1326
x33:    sh3add  %r26,0,%r1              ! comb,<>       %r25,0,l0       ! sh2add        %r1,%r26,%r1    ! b,n   ret_t0
1327
 
1328
x34: zdep       %r26,27,28,%r1  ! add   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1329
 
1330
x35:    sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r26,%r1,%r1
1331
 
1332
x36:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh2add        %r1,%r29,%r29   ! bv,n  0(r31)
1333
 
1334
x37:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh2add        %r1,%r26,%r1    ! b,n   ret_t0
1335
 
1336
x38:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1337
 
1338
x39:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1339
 
1340
x40:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh3add        %r1,%r29,%r29   ! bv,n  0(r31)
1341
 
1342
x41:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! sh3add        %r1,%r26,%r1    ! b,n   ret_t0
1343
 
1344
x42:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1345
 
1346
x43:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1347
 
1348
x44:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1349
 
1350
x45:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       !       sh2add  %r1,%r1,%r1     ! b,n   ret_t0
1351
 
1352
x46:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! add   %r1,%r26,%r1
1353
 
1354
x47:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r26,%r1,%r1
1355
 
1356
x48:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0       ! zdep  %r1,27,28,%r1   ! b,n   ret_t0
1357
 
1358
x49:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r26,%r1,%r1
1359
 
1360
x50:    sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1361
 
1362
x51:    sh3add  %r26,%r26,%r1           ! sh3add        %r26,%r1,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1363
 
1364
x52:    sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1365
 
1366
x53:    sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1367
 
1368
x54:    sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1369
 
1370
x55:    sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r1,%r26,%r1
1371
 
1372
x56:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1373
 
1374
x57:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1375
 
1376
x58:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1377
 
1378
x59:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_t02a0 !       sh1add  %r1,%r1,%r1
1379
 
1380
x60:    sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1381
 
1382
x61:    sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1383
 
1384
x62: zdep       %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1385
 
1386
x63: zdep       %r26,25,26,%r1  ! comb,<>       %r25,0,l0       ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1387
 
1388
x64: zdep       %r26,25,26,%r1  ! comb,<>       %r25,0,l1       !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1389
 
1390
x65:    sh3add  %r26,0,%r1              ! comb,<>       %r25,0,l0       ! sh3add        %r1,%r26,%r1    ! b,n   ret_t0
1391
 
1392
x66: zdep       %r26,26,27,%r1  ! add   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1393
 
1394
x67:    sh3add  %r26,0,%r1              ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1395
 
1396
x68:    sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1397
 
1398
x69:    sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1399
 
1400
x70: zdep       %r26,25,26,%r1  ! sh2add        %r26,%r1,%r1    !       b       e_t0    ! sh1add        %r26,%r1,%r1
1401
 
1402
x71:    sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1       !       b       e_t0    ! sub   %r1,%r26,%r1
1403
 
1404
x72:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1       ! sh3add        %r1,%r29,%r29   ! bv,n  0(r31)
1405
 
1406
x73:    sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift !       add     %r29,%r1,%r29
1407
 
1408
x74:    sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1409
 
1410
x75:    sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1411
 
1412
x76:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1413
 
1414
x77:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1415
 
1416
x78:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1417
 
1418
x79: zdep       %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1419
 
1420
x80: zdep       %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     ! b     e_shift !       add     %r29,%r1,%r29
1421
 
1422
x81:    sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_shift !       add     %r29,%r1,%r29
1423
 
1424
x82:    sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1425
 
1426
x83:    sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1427
 
1428
x84:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1429
 
1430
x85:    sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1431
 
1432
x86:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1433
 
1434
x87:    sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_t02a0 ! sh2add        %r26,%r1,%r1
1435
 
1436
x88:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1437
 
1438
x89:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1439
 
1440
x90:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1441
 
1442
x91:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r1,%r26,%r1
1443
 
1444
x92:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1445
 
1446
x93: zdep       %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1447
 
1448
x94:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sh1add        %r26,%r1,%r1
1449
 
1450
x95:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1451
 
1452
x96:    sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1453
 
1454
x97:    sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1455
 
1456
x98: zdep       %r26,26,27,%r1  !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r26,%r1,%r1
1457
 
1458
x99:    sh3add  %r26,0,%r1              ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1459
 
1460
x100:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1461
 
1462
x101:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1463
 
1464
x102: zdep      %r26,26,27,%r1  ! sh1add        %r26,%r1,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1465
 
1466
x103:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_t02a0 ! sh2add        %r1,%r26,%r1
1467
 
1468
x104:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1469
 
1470
x105:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1471
 
1472
x106:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1473
 
1474
x107:   sh3add  %r26,%r26,%r1           ! sh2add        %r26,%r1,%r1    ! b     e_t02a0 ! sh3add        %r1,%r26,%r1
1475
 
1476
x108:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1477
 
1478
x109:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1479
 
1480
x110:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1481
 
1482
x111:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1483
 
1484
x112:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! zdep  %r1,27,28,%r1
1485
 
1486
x113:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t02a0 !       sh1add  %r1,%r1,%r1
1487
 
1488
x114:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1489
 
1490
x115:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1491
 
1492
x116:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   ! sh2add        %r1,%r26,%r1
1493
 
1494
x117:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh3add  %r1,%r1,%r1
1495
 
1496
x118:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t0a0  !       sh3add  %r1,%r1,%r1
1497
 
1498
x119:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t02a0 !       sh3add  %r1,%r1,%r1
1499
 
1500
x120:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1501
 
1502
x121:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1503
 
1504
x122:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1505
 
1506
x123:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1507
 
1508
x124: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1509
 
1510
x125:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    !       sh2add  %r1,%r1,%r1
1511
 
1512
x126: zdep      %r26,25,26,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1513
 
1514
x127: zdep      %r26,24,25,%r1  ! comb,<>       %r25,0,l0       ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1515
 
1516
x128: zdep      %r26,24,25,%r1  ! comb,<>       %r25,0,l1       !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1517
 
1518
x129: zdep      %r26,24,25,%r1  ! comb,<>       %r25,0,l0       ! add   %r1,%r26,%r1    ! b,n   ret_t0
1519
 
1520
x130: zdep      %r26,25,26,%r1  ! add   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1521
 
1522
x131:   sh3add  %r26,0,%r1              ! sh3add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1523
 
1524
x132:   sh3add  %r26,0,%r1              ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1525
 
1526
x133:   sh3add  %r26,0,%r1              ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1527
 
1528
x134:   sh3add  %r26,0,%r1              ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1529
 
1530
x135:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    !       sh1add  %r1,%r1,%r1
1531
 
1532
x136:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1533
 
1534
x137:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1535
 
1536
x138:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1537
 
1538
x139:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh2add        %r1,%r26,%r1
1539
 
1540
x140:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   !       sh2add  %r1,%r1,%r1
1541
 
1542
x141:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    ! b     e_4t0a0 ! sh1add        %r1,%r26,%r1
1543
 
1544
x142:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1       !       b       e_2t0   ! sub   %r1,%r26,%r1
1545
 
1546
x143: zdep      %r26,27,28,%r1  !       sh3add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1547
 
1548
x144:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1       ! b     e_shift ! sh1add        %r1,%r29,%r29
1549
 
1550
x145:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1       !       b       e_t0    ! sh1add        %r1,%r26,%r1
1551
 
1552
x146:   sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1553
 
1554
x147:   sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1555
 
1556
x148:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1557
 
1558
x149:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1559
 
1560
x150:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1561
 
1562
x151:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh1add        %r1,%r26,%r1
1563
 
1564
x152:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1565
 
1566
x153:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1567
 
1568
x154:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1569
 
1570
x155: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1571
 
1572
x156:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1573
 
1574
x157: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_t02a0 !       sh2add  %r1,%r1,%r1
1575
 
1576
x158: zdep      %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sub   %r1,%r26,%r1
1577
 
1578
x159: zdep      %r26,26,27,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1579
 
1580
x160:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,0,%r1       ! b     e_shift ! sh3add        %r1,%r29,%r29
1581
 
1582
x161:   sh3add  %r26,0,%r1              !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1583
 
1584
x162:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1585
 
1586
x163:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r1,%r26,%r1
1587
 
1588
x164:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1589
 
1590
x165:   sh3add  %r26,0,%r1              ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1591
 
1592
x166:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1593
 
1594
x167:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh1add        %r1,%r26,%r1
1595
 
1596
x168:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1597
 
1598
x169:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1599
 
1600
x170: zdep      %r26,26,27,%r1  ! sh1add        %r26,%r1,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1601
 
1602
x171:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh3add  %r1,%r1,%r1
1603
 
1604
x172:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1605
 
1606
x173:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_t02a0 !       sh3add  %r1,%r1,%r1
1607
 
1608
x174: zdep      %r26,26,27,%r1  ! sh1add        %r26,%r1,%r1    ! b     e_t04a0 !       sh2add  %r1,%r1,%r1
1609
 
1610
x175:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    !       b       e_5t0   ! sh1add        %r1,%r26,%r1
1611
 
1612
x176:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_8t0   ! add   %r1,%r26,%r1
1613
 
1614
x177:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_8t0a0 ! add   %r1,%r26,%r1
1615
 
1616
x178:   sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh3add        %r1,%r26,%r1
1617
 
1618
x179:   sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh3add        %r1,%r26,%r1
1619
 
1620
x180:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1621
 
1622
x181:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1623
 
1624
x182:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1625
 
1626
x183:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_2t0a0 ! sh1add        %r1,%r26,%r1
1627
 
1628
x184:   sh2add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_4t0   ! add   %r1,%r26,%r1
1629
 
1630
x185:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1631
 
1632
x186: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1633
 
1634
x187:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t02a0 !       sh2add  %r1,%r1,%r1
1635
 
1636
x188:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_4t0   ! sh1add        %r26,%r1,%r1
1637
 
1638
x189:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh3add  %r1,%r1,%r1
1639
 
1640
x190:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   !       sh2add  %r1,%r1,%r1
1641
 
1642
x191: zdep      %r26,25,26,%r1  !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1643
 
1644
x192:   sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1645
 
1646
x193:   sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1647
 
1648
x194:   sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1649
 
1650
x195:   sh3add  %r26,0,%r1              ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1651
 
1652
x196:   sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1653
 
1654
x197:   sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     ! b     e_4t0a0 ! sh1add        %r1,%r26,%r1
1655
 
1656
x198: zdep      %r26,25,26,%r1  ! sh1add        %r26,%r1,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1657
 
1658
x199:   sh3add  %r26,0,%r1              ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1659
 
1660
x200:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1661
 
1662
x201:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1663
 
1664
x202:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1665
 
1666
x203:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_2t0a0 ! sh2add        %r1,%r26,%r1
1667
 
1668
x204:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    !       b       e_4t0   !       sh1add  %r1,%r1,%r1
1669
 
1670
x205:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1671
 
1672
x206: zdep      %r26,25,26,%r1  ! sh2add        %r26,%r1,%r1    ! b     e_t02a0 !       sh1add  %r1,%r1,%r1
1673
 
1674
x207:   sh3add  %r26,0,%r1              ! sh1add        %r1,%r26,%r1    !       b       e_3t0   ! sh2add        %r1,%r26,%r1
1675
 
1676
x208:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_8t0   ! add   %r1,%r26,%r1
1677
 
1678
x209:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_8t0a0 ! add   %r1,%r26,%r1
1679
 
1680
x210:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   !       sh2add  %r1,%r1,%r1
1681
 
1682
x211:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh2add  %r1,%r1,%r1
1683
 
1684
x212:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_4t0   ! sh2add        %r1,%r26,%r1
1685
 
1686
x213:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_4t0a0 ! sh2add        %r1,%r26,%r1
1687
 
1688
x214:   sh3add  %r26,%r26,%r1           ! sh2add        %r26,%r1,%r1    ! b     e2t04a0 ! sh3add        %r1,%r26,%r1
1689
 
1690
x215:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_5t0   ! sh1add        %r1,%r26,%r1
1691
 
1692
x216:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1693
 
1694
x217:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1695
 
1696
x218:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1697
 
1698
x219:   sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1699
 
1700
x220:   sh1add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1701
 
1702
x221:   sh1add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_4t0a0 ! sh1add        %r1,%r26,%r1
1703
 
1704
x222:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1705
 
1706
x223:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1707
 
1708
x224:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_8t0   ! add   %r1,%r26,%r1
1709
 
1710
x225:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    !       sh2add  %r1,%r1,%r1
1711
 
1712
x226:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_t02a0 ! zdep  %r1,26,27,%r1
1713
 
1714
x227:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_t02a0 !       sh2add  %r1,%r1,%r1
1715
 
1716
x228:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   !       sh1add  %r1,%r1,%r1
1717
 
1718
x229:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_4t0a0 !       sh1add  %r1,%r1,%r1
1719
 
1720
x230:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_5t0   ! add   %r1,%r26,%r1
1721
 
1722
x231:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_3t0   ! sh2add        %r1,%r26,%r1
1723
 
1724
x232:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_8t0   ! sh2add        %r1,%r26,%r1
1725
 
1726
x233:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_8t0a0 ! sh2add        %r1,%r26,%r1
1727
 
1728
x234:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   !       sh3add  %r1,%r1,%r1
1729
 
1730
x235:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh3add  %r1,%r1,%r1
1731
 
1732
x236:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e4t08a0 !       sh1add  %r1,%r1,%r1
1733
 
1734
x237: zdep      %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_3t0   ! sub   %r1,%r26,%r1
1735
 
1736
x238:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e2t04a0 !       sh3add  %r1,%r1,%r1
1737
 
1738
x239: zdep      %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     ! b     e_t0ma0 !       sh1add  %r1,%r1,%r1
1739
 
1740
x240:   sh3add  %r26,%r26,%r1           ! add   %r1,%r26,%r1    !       b       e_8t0   !       sh1add  %r1,%r1,%r1
1741
 
1742
x241:   sh3add  %r26,%r26,%r1           ! add   %r1,%r26,%r1    ! b     e_8t0a0 !       sh1add  %r1,%r1,%r1
1743
 
1744
x242:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh3add        %r1,%r26,%r1
1745
 
1746
x243:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_t0    !       sh1add  %r1,%r1,%r1
1747
 
1748
x244:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_4t0   ! sh2add        %r1,%r26,%r1
1749
 
1750
x245:   sh3add  %r26,0,%r1              !       sh1add  %r1,%r1,%r1     !       b       e_5t0   ! sh1add        %r1,%r26,%r1
1751
 
1752
x246:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1753
 
1754
x247:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1755
 
1756
x248: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1757
 
1758
x249: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1759
 
1760
x250:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   !       sh2add  %r1,%r1,%r1
1761
 
1762
x251:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_2t0a0 !       sh2add  %r1,%r1,%r1
1763
 
1764
x252: zdep      %r26,25,26,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1765
 
1766
x253: zdep      %r26,25,26,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1767
 
1768
x254: zdep      %r26,24,25,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1769
 
1770
x255: zdep      %r26,23,24,%r1  ! comb,<>       %r25,0,l0       ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1771
 
1772
;1040 insts before this.
1773
ret_t0: bv    0(r31)
1774
 
1775
e_t0:   add     %r29,%r1,%r29
1776
 
1777
e_shift: comb,<>        %r25,0,l2
1778
 
1779
        zdep    %r26,23,24,%r26 ; %r26 <<= 8 ***********
1780
        bv,n  0(r31)
1781
e_t0ma0: comb,<>        %r25,0,l0
1782
 
1783
        sub     %r1,%r26,%r1
1784
        bv    0(r31)
1785
                add     %r29,%r1,%r29
1786
e_t0a0: comb,<> %r25,0,l0
1787
 
1788
        add     %r1,%r26,%r1
1789
        bv    0(r31)
1790
                add     %r29,%r1,%r29
1791
e_t02a0: comb,<>        %r25,0,l0
1792
 
1793
        sh1add  %r26,%r1,%r1
1794
        bv    0(r31)
1795
                add     %r29,%r1,%r29
1796
e_t04a0: comb,<>        %r25,0,l0
1797
 
1798
        sh2add  %r26,%r1,%r1
1799
        bv    0(r31)
1800
                add     %r29,%r1,%r29
1801
e_2t0: comb,<>  %r25,0,l1
1802
 
1803
        sh1add  %r1,%r29,%r29
1804
        bv,n  0(r31)
1805
e_2t0a0: comb,<>        %r25,0,l0
1806
 
1807
        sh1add  %r1,%r26,%r1
1808
        bv    0(r31)
1809
                add     %r29,%r1,%r29
1810
e2t04a0: sh1add %r26,%r1,%r1
1811
 
1812
        comb,<> %r25,0,l1
1813
        sh1add  %r1,%r29,%r29
1814
        bv,n  0(r31)
1815
e_3t0: comb,<>  %r25,0,l0
1816
 
1817
                sh1add  %r1,%r1,%r1
1818
        bv    0(r31)
1819
                add     %r29,%r1,%r29
1820
e_4t0: comb,<>  %r25,0,l1
1821
 
1822
        sh2add  %r1,%r29,%r29
1823
        bv,n  0(r31)
1824
e_4t0a0: comb,<>        %r25,0,l0
1825
 
1826
        sh2add  %r1,%r26,%r1
1827
        bv    0(r31)
1828
                add     %r29,%r1,%r29
1829
e4t08a0: sh1add %r26,%r1,%r1
1830
 
1831
        comb,<> %r25,0,l1
1832
        sh2add  %r1,%r29,%r29
1833
        bv,n  0(r31)
1834
e_5t0: comb,<>  %r25,0,l0
1835
 
1836
                sh2add  %r1,%r1,%r1
1837
        bv    0(r31)
1838
                add     %r29,%r1,%r29
1839
e_8t0: comb,<>  %r25,0,l1
1840
 
1841
        sh3add  %r1,%r29,%r29
1842
        bv,n  0(r31)
1843
e_8t0a0: comb,<>        %r25,0,l0
1844
 
1845
        sh3add  %r1,%r26,%r1
1846
        bv    0(r31)
1847
                add     %r29,%r1,%r29
1848
 
1849
        .exit
1850
        .procend
1851
        .end
1852
 
1853
        .import $$divI_2,millicode
1854
        .import $$divI_3,millicode
1855
        .import $$divI_4,millicode
1856
        .import $$divI_5,millicode
1857
        .import $$divI_6,millicode
1858
        .import $$divI_7,millicode
1859
        .import $$divI_8,millicode
1860
        .import $$divI_9,millicode
1861
        .import $$divI_10,millicode
1862
        .import $$divI_12,millicode
1863
        .import $$divI_14,millicode
1864
        .import $$divI_15,millicode
1865
        .export $$divI,millicode
1866
        .export $$divoI,millicode
1867
$$divoI:
1868
        .proc
1869
        .callinfo millicode
1870
        .entry
1871
        comib,=,n  -1,arg1,negative1    ; when divisor == -1
1872
$$divI:
1873
        comib,>>=,n 15,arg1,small_divisor
1874
        add,>=  0,arg0,retreg           ; move dividend, if retreg < 0,
1875
normal1:
1876
          sub   0,retreg,retreg         ;   make it positive
1877
        sub     0,arg1,temp             ; clear carry,
1878
                                        ;   negate the divisor
1879
        ds      0,temp,0                ; set V-bit to the comple-
1880
                                        ;   ment of the divisor sign
1881
        add     retreg,retreg,retreg    ; shift msb bit into carry
1882
        ds      r0,arg1,temp            ; 1st divide step, if no carry
1883
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1884
        ds      temp,arg1,temp          ; 2nd divide step
1885
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1886
        ds      temp,arg1,temp          ; 3rd divide step
1887
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1888
        ds      temp,arg1,temp          ; 4th divide step
1889
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1890
        ds      temp,arg1,temp          ; 5th divide step
1891
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1892
        ds      temp,arg1,temp          ; 6th divide step
1893
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1894
        ds      temp,arg1,temp          ; 7th divide step
1895
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1896
        ds      temp,arg1,temp          ; 8th divide step
1897
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1898
        ds      temp,arg1,temp          ; 9th divide step
1899
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1900
        ds      temp,arg1,temp          ; 10th divide step
1901
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1902
        ds      temp,arg1,temp          ; 11th divide step
1903
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1904
        ds      temp,arg1,temp          ; 12th divide step
1905
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1906
        ds      temp,arg1,temp          ; 13th divide step
1907
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1908
        ds      temp,arg1,temp          ; 14th divide step
1909
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1910
        ds      temp,arg1,temp          ; 15th divide step
1911
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1912
        ds      temp,arg1,temp          ; 16th divide step
1913
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1914
        ds      temp,arg1,temp          ; 17th divide step
1915
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1916
        ds      temp,arg1,temp          ; 18th divide step
1917
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1918
        ds      temp,arg1,temp          ; 19th divide step
1919
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1920
        ds      temp,arg1,temp          ; 20th divide step
1921
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1922
        ds      temp,arg1,temp          ; 21st divide step
1923
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1924
        ds      temp,arg1,temp          ; 22nd divide step
1925
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1926
        ds      temp,arg1,temp          ; 23rd divide step
1927
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1928
        ds      temp,arg1,temp          ; 24th divide step
1929
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1930
        ds      temp,arg1,temp          ; 25th divide step
1931
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1932
        ds      temp,arg1,temp          ; 26th divide step
1933
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1934
        ds      temp,arg1,temp          ; 27th divide step
1935
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1936
        ds      temp,arg1,temp          ; 28th divide step
1937
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1938
        ds      temp,arg1,temp          ; 29th divide step
1939
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1940
        ds      temp,arg1,temp          ; 30th divide step
1941
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1942
        ds      temp,arg1,temp          ; 31st divide step
1943
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1944
        ds      temp,arg1,temp          ; 32nd divide step,
1945
        addc    retreg,retreg,retreg    ; shift last retreg bit into retreg
1946
        xor,>=  arg0,arg1,0             ; get correct sign of quotient
1947
          sub   0,retreg,retreg         ;   based on operand signs
1948
        bv,n  0(r31)
1949
        nop
1950
;______________________________________________________________________
1951
small_divisor:
1952
        blr,n   arg1,r0
1953
        nop
1954
; table for divisor == 0,1, ... ,15
1955
        addit,= 0,arg1,r0       ; trap if divisor == 0
1956
        nop
1957
        bv    0(r31)            ; divisor == 1
1958
        copy    arg0,retreg
1959
         b,n   $$divI_2 ; divisor == 2
1960
        nop
1961
         b,n   $$divI_3 ; divisor == 3
1962
        nop
1963
         b,n   $$divI_4 ; divisor == 4
1964
        nop
1965
         b,n   $$divI_5 ; divisor == 5
1966
        nop
1967
         b,n   $$divI_6 ; divisor == 6
1968
        nop
1969
         b,n   $$divI_7 ; divisor == 7
1970
        nop
1971
         b,n   $$divI_8 ; divisor == 8
1972
        nop
1973
         b,n   $$divI_9 ; divisor == 9
1974
        nop
1975
         b,n   $$divI_10        ; divisor == 10
1976
        nop
1977
        b       normal1         ; divisor == 11
1978
        add,>=  0,arg0,retreg
1979
         b,n   $$divI_12        ; divisor == 12
1980
        nop
1981
        b       normal1         ; divisor == 13
1982
        add,>=  0,arg0,retreg
1983
         b,n   $$divI_14        ; divisor == 14
1984
        nop
1985
         b,n   $$divI_15        ; divisor == 15
1986
        nop
1987
;______________________________________________________________________
1988
negative1:
1989
        sub     0,arg0,retreg   ; result is negation of dividend
1990
        bv    0(r31)
1991
        addo    arg0,arg1,r0    ; trap iff dividend==0x80000000 && divisor==-1
1992
        .exit
1993
        .procend
1994
 
1995
        .subspa $LIT$
1996
___hp_free_copyright:
1997
        .export ___hp_free_copyright,data
1998
        .align 4
1999
        .string "(c) Copyright 1986 HEWLETT-PACKARD COMPANY\x0aTo anyone who acknowledges that this file is provided \"AS IS\"\x0awithout any express or implied warranty:\x0a    permission to use, copy, modify, and distribute this file\x0afor any purpose is hereby granted without fee, provided that\x0athe above copyright notice and this notice appears in all\x0acopies, and that the name of Hewlett-Packard Company not be\x0aused in advertising or publicity pertaining to distribution\x0aof the software without specific, written prior permission.\x0aHewlett-Packard Company makes no representations about the\x0asuitability of this software for any purpose.\x0a\x00"
2000
        .align 4
2001
        .end

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.