OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [newlib/] [libgloss/] [pa/] [hp-milli.s] - Blame information for rev 1765

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 39 lampret
;
2
;  (c) Copyright 1986 HEWLETT-PACKARD COMPANY
3
;
4
;  To anyone who acknowledges that this file is provided "AS IS"
5
;  without any express or implied warranty:
6
;      permission to use, copy, modify, and distribute this file
7
;  for any purpose is hereby granted without fee, provided that
8
;  the above copyright notice and this notice appears in all
9
;  copies, and that the name of Hewlett-Packard Company not be
10
;  used in advertising or publicity pertaining to distribution
11
;  of the software without specific, written prior permission.
12
;  Hewlett-Packard Company makes no representations about the
13
;  suitability of this software for any purpose.
14
;
15
 
16
; Standard Hardware Register Definitions for Use with Assembler
17
; version A.08.06
18
;       - fr16-31 added at Utah
19
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20
; Hardware General Registers
21
r0: .equ        0
22
 
23
r1: .equ        1
24
 
25
r2: .equ        2
26
 
27
r3: .equ        3
28
 
29
r4: .equ        4
30
 
31
r5: .equ        5
32
 
33
r6: .equ        6
34
 
35
r7: .equ        7
36
 
37
r8: .equ        8
38
 
39
r9: .equ        9
40
 
41
r10: .equ       10
42
 
43
r11: .equ       11
44
 
45
r12: .equ       12
46
 
47
r13: .equ       13
48
 
49
r14: .equ       14
50
 
51
r15: .equ       15
52
 
53
r16: .equ       16
54
 
55
r17: .equ       17
56
 
57
r18: .equ       18
58
 
59
r19: .equ       19
60
 
61
r20: .equ       20
62
 
63
r21: .equ       21
64
 
65
r22: .equ       22
66
 
67
r23: .equ       23
68
 
69
r24: .equ       24
70
 
71
r25: .equ       25
72
 
73
r26: .equ       26
74
 
75
r27: .equ       27
76
 
77
r28: .equ       28
78
 
79
r29: .equ       29
80
 
81
r30: .equ       30
82
 
83
r31: .equ       31
84
 
85
; Hardware Space Registers
86
sr0: .equ       0
87
 
88
sr1: .equ       1
89
 
90
sr2: .equ       2
91
 
92
sr3: .equ       3
93
 
94
sr4: .equ       4
95
 
96
sr5: .equ       5
97
 
98
sr6: .equ       6
99
 
100
sr7: .equ       7
101
 
102
; Hardware Floating Point Registers
103
fr0: .equ       0
104
 
105
fr1: .equ       1
106
 
107
fr2: .equ       2
108
 
109
fr3: .equ       3
110
 
111
fr4: .equ       4
112
 
113
fr5: .equ       5
114
 
115
fr6: .equ       6
116
 
117
fr7: .equ       7
118
 
119
fr8: .equ       8
120
 
121
fr9: .equ       9
122
 
123
fr10: .equ      10
124
 
125
fr11: .equ      11
126
 
127
fr12: .equ      12
128
 
129
fr13: .equ      13
130
 
131
fr14: .equ      14
132
 
133
fr15: .equ      15
134
 
135
fr16: .equ      16
136
 
137
fr17: .equ      17
138
 
139
fr18: .equ      18
140
 
141
fr19: .equ      19
142
 
143
fr20: .equ      20
144
 
145
fr21: .equ      21
146
 
147
fr22: .equ      22
148
 
149
fr23: .equ      23
150
 
151
fr24: .equ      24
152
 
153
fr25: .equ      25
154
 
155
fr26: .equ      26
156
 
157
fr27: .equ      27
158
 
159
fr28: .equ      28
160
 
161
fr29: .equ      29
162
 
163
fr30: .equ      30
164
 
165
fr31: .equ      31
166
 
167
; Hardware Control Registers
168
cr0: .equ       0
169
 
170
rctr: .equ      0                        ; Recovery Counter Register
171
 
172
 
173
cr8: .equ       8                       ; Protection ID 1
174
 
175
pidr1: .equ     8
176
 
177
 
178
cr9: .equ       9                       ; Protection ID 2
179
 
180
pidr2: .equ     9
181
 
182
 
183
cr10: .equ      10
184
 
185
ccr: .equ       10                      ; Coprocessor Confiquration Register
186
 
187
 
188
cr11: .equ      11
189
 
190
sar: .equ       11                      ; Shift Amount Register
191
 
192
 
193
cr12: .equ      12
194
 
195
pidr3: .equ     12                      ; Protection ID 3
196
 
197
 
198
cr13: .equ      13
199
 
200
pidr4: .equ     13                      ; Protection ID 4
201
 
202
 
203
cr14: .equ      14
204
 
205
iva: .equ       14                      ; Interrupt Vector Address
206
 
207
 
208
cr15: .equ      15
209
 
210
eiem: .equ      15                      ; External Interrupt Enable Mask
211
 
212
 
213
cr16: .equ      16
214
 
215
itmr: .equ      16                      ; Interval Timer
216
 
217
 
218
cr17: .equ      17
219
 
220
pcsq: .equ      17                      ; Program Counter Space queue
221
 
222
 
223
cr18: .equ      18
224
 
225
pcoq: .equ      18                      ; Program Counter Offset queue
226
 
227
 
228
cr19: .equ      19
229
 
230
iir: .equ       19                      ; Interruption Instruction Register
231
 
232
 
233
cr20: .equ      20
234
 
235
isr: .equ       20                      ; Interruption Space Register
236
 
237
 
238
cr21: .equ      21
239
 
240
ior: .equ       21                      ; Interruption Offset Register
241
 
242
 
243
cr22: .equ      22
244
 
245
ipsw: .equ      22                      ; Interrpution Processor Status Word
246
 
247
 
248
cr23: .equ      23
249
 
250
eirr: .equ      23                      ; External Interrupt Request
251
 
252
 
253
cr24: .equ      24
254
 
255
ppda: .equ      24                      ; Physcial Page Directory Address
256
 
257
tr0: .equ       24                      ; Temporary register 0
258
 
259
 
260
cr25: .equ      25
261
 
262
hta: .equ       25                      ; Hash Table Address
263
 
264
tr1: .equ       25                      ; Temporary register 1
265
 
266
 
267
cr26: .equ      26
268
 
269
tr2: .equ       26                      ; Temporary register 2
270
 
271
 
272
cr27: .equ      27
273
 
274
tr3: .equ       27                      ; Temporary register 3
275
 
276
 
277
cr28: .equ      28
278
 
279
tr4: .equ       28                      ; Temporary register 4
280
 
281
 
282
cr29: .equ      29
283
 
284
tr5: .equ       29                      ; Temporary register 5
285
 
286
 
287
cr30: .equ      30
288
 
289
tr6: .equ       30                      ; Temporary register 6
290
 
291
 
292
cr31: .equ      31
293
 
294
tr7: .equ       31                      ; Temporary register 7
295
 
296
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
297
; Procedure Call Convention                                                  ~
298
; Register Definitions for Use with Assembler                                ~
299
; version A.08.06
300
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
301
; Software Architecture General Registers
302
rp: .equ    r2  ; return pointer
303
 
304
mrp: .equ       r31     ; millicode return pointer
305
 
306
ret0: .equ    r28       ; return value
307
 
308
ret1: .equ    r29       ; return value (high part of double)
309
 
310
sl: .equ    r29 ; static link
311
 
312
sp: .equ        r30     ; stack pointer
313
 
314
dp: .equ        r27     ; data pointer
315
 
316
arg0: .equ      r26     ; argument
317
 
318
arg1: .equ      r25     ; argument or high part of double argument
319
 
320
arg2: .equ      r24     ; argument
321
 
322
arg3: .equ      r23     ; argument or high part of double argument
323
 
324
;_____________________________________________________________________________
325
; Software Architecture Space Registers
326
;               sr0     ; return link form BLE
327
sret: .equ      sr1     ; return value
328
 
329
sarg: .equ      sr1     ; argument
330
 
331
;               sr4     ; PC SPACE tracker
332
;               sr5     ; process private data
333
;_____________________________________________________________________________
334
; Software Architecture Pseudo Registers
335
previous_sp: .equ       64      ; old stack pointer (locates previous frame)
336
 
337
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
338
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
339
; Standard space and subspace definitions.  version A.08.06
340
; These are generally suitable for programs on HP_UX and HPE.
341
; Statements commented out are used when building such things as operating
342
; system kernels.
343
;;;;;;;;;;;;;;;;
344
        .SPACE  $TEXT$,         SPNUM=0,SORT=8
345
        .subspa $MILLICODE$,    QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
346
        .subspa $LIT$,          QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
347
        .subspa $CODE$,         QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
348
; Additional code subspaces should have ALIGN=8 for an interspace BV
349
; and should have SORT=24.
350
; 
351
; For an incomplete executable (program bound to shared libraries), 
352
; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$ 
353
; and $PLT$ subspaces respectively. 
354
;;;;;;;;;;;;;;;
355
        .SPACE $PRIVATE$,       SPNUM=1,PRIVATE,SORT=16
356
        .subspa $GLOBAL$,       QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
357
        .import $global$
358
        .subspa $DATA$,         QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
359
        .subspa $BSS$,          QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
360
 
361
        .SPACE $TEXT$
362
        .SUBSPA $MILLICODE$
363
 
364
        .align 8
365
        .EXPORT $$remI,millicode
366
;       .IMPORT cerror
367
$$remI:
368
        .PROC
369
        .CALLINFO millicode
370
        .ENTRY
371
        addit,= 0,arg1,r0
372
        add,>= r0,arg0,ret1
373
        sub r0,ret1,ret1
374
        sub r0,arg1,r1
375
        ds r0,r1,r0
376
        or r0,r0,r1
377
        add ret1,ret1,ret1
378
        ds r1,arg1,r1
379
        addc ret1,ret1,ret1
380
        ds r1,arg1,r1
381
        addc ret1,ret1,ret1
382
        ds r1,arg1,r1
383
        addc ret1,ret1,ret1
384
        ds r1,arg1,r1
385
        addc ret1,ret1,ret1
386
        ds r1,arg1,r1
387
        addc ret1,ret1,ret1
388
        ds r1,arg1,r1
389
        addc ret1,ret1,ret1
390
        ds r1,arg1,r1
391
        addc ret1,ret1,ret1
392
        ds r1,arg1,r1
393
        addc ret1,ret1,ret1
394
        ds r1,arg1,r1
395
        addc ret1,ret1,ret1
396
        ds r1,arg1,r1
397
        addc ret1,ret1,ret1
398
        ds r1,arg1,r1
399
        addc ret1,ret1,ret1
400
        ds r1,arg1,r1
401
        addc ret1,ret1,ret1
402
        ds r1,arg1,r1
403
        addc ret1,ret1,ret1
404
        ds r1,arg1,r1
405
        addc ret1,ret1,ret1
406
        ds r1,arg1,r1
407
        addc ret1,ret1,ret1
408
        ds r1,arg1,r1
409
        addc ret1,ret1,ret1
410
        ds r1,arg1,r1
411
        addc ret1,ret1,ret1
412
        ds r1,arg1,r1
413
        addc ret1,ret1,ret1
414
        ds r1,arg1,r1
415
        addc ret1,ret1,ret1
416
        ds r1,arg1,r1
417
        addc ret1,ret1,ret1
418
        ds r1,arg1,r1
419
        addc ret1,ret1,ret1
420
        ds r1,arg1,r1
421
        addc ret1,ret1,ret1
422
        ds r1,arg1,r1
423
        addc ret1,ret1,ret1
424
        ds r1,arg1,r1
425
        addc ret1,ret1,ret1
426
        ds r1,arg1,r1
427
        addc ret1,ret1,ret1
428
        ds r1,arg1,r1
429
        addc ret1,ret1,ret1
430
        ds r1,arg1,r1
431
        addc ret1,ret1,ret1
432
        ds r1,arg1,r1
433
        addc ret1,ret1,ret1
434
        ds r1,arg1,r1
435
        addc ret1,ret1,ret1
436
        ds r1,arg1,r1
437
        addc ret1,ret1,ret1
438
        ds r1,arg1,r1
439
        addc ret1,ret1,ret1
440
        ds r1,arg1,r1
441
        addc ret1,ret1,ret1
442
        movb,>=,n r1,ret1,remI300
443
        add,< arg1,r0,r0
444
        add,tr r1,arg1,ret1
445
        sub r1,arg1,ret1
446
remI300: add,>= arg0,r0,r0
447
 
448
        sub r0,ret1,ret1
449
        bv r0(r31)
450
        nop
451
        .EXIT
452
        .PROCEND
453
 
454
bit1:  .equ 1
455
 
456
bit30: .equ 30
457
bit31: .equ 31
458
 
459
len2:  .equ 2
460
 
461
len4:  .equ 4
462
 
463
 
464
$$dyncall:
465
        .proc
466
        .callinfo NO_CALLS
467
        .entry
468
        .export $$dyncall,MILLICODE
469
 
470
        bb,>=,n 22,bit30,noshlibs
471
 
472
        depi    0,bit31,len2,22
473
        ldw     4(22),19
474
        ldw     0(22),22
475
noshlibs:
476
        ldsid   (22),r1
477
        mtsp    r1,sr0
478
        be      0(sr0,r22)
479
        stw     rp,-24(sp)
480
        .exit
481
        .procend
482
 
483
temp: .EQU      r1
484
 
485
retreg: .EQU    ret1    ; r29
486
 
487
 
488
        .export $$divU,millicode
489
        .import $$divU_3,millicode
490
        .import $$divU_5,millicode
491
        .import $$divU_6,millicode
492
        .import $$divU_7,millicode
493
        .import $$divU_9,millicode
494
        .import $$divU_10,millicode
495
        .import $$divU_12,millicode
496
        .import $$divU_14,millicode
497
        .import $$divU_15,millicode
498
$$divU:
499
        .proc
500
        .callinfo millicode
501
        .entry
502
; The subtract is not nullified since it does no harm and can be used
503
; by the two cases that branch back to "normal".
504
        comib,>=  15,arg1,special_divisor
505
        sub     r0,arg1,temp            ; clear carry, negate the divisor
506
        ds      r0,temp,r0              ; set V-bit to 1
507
normal:
508
        add     arg0,arg0,retreg        ; shift msb bit into carry
509
        ds      r0,arg1,temp            ; 1st divide step, if no carry
510
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
511
        ds      temp,arg1,temp          ; 2nd divide step
512
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
513
        ds      temp,arg1,temp          ; 3rd divide step
514
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
515
        ds      temp,arg1,temp          ; 4th divide step
516
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
517
        ds      temp,arg1,temp          ; 5th divide step
518
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
519
        ds      temp,arg1,temp          ; 6th divide step
520
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
521
        ds      temp,arg1,temp          ; 7th divide step
522
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
523
        ds      temp,arg1,temp          ; 8th divide step
524
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
525
        ds      temp,arg1,temp          ; 9th divide step
526
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
527
        ds      temp,arg1,temp          ; 10th divide step
528
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
529
        ds      temp,arg1,temp          ; 11th divide step
530
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
531
        ds      temp,arg1,temp          ; 12th divide step
532
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
533
        ds      temp,arg1,temp          ; 13th divide step
534
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
535
        ds      temp,arg1,temp          ; 14th divide step
536
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
537
        ds      temp,arg1,temp          ; 15th divide step
538
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
539
        ds      temp,arg1,temp          ; 16th divide step
540
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
541
        ds      temp,arg1,temp          ; 17th divide step
542
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
543
        ds      temp,arg1,temp          ; 18th divide step
544
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
545
        ds      temp,arg1,temp          ; 19th divide step
546
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
547
        ds      temp,arg1,temp          ; 20th divide step
548
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
549
        ds      temp,arg1,temp          ; 21st divide step
550
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
551
        ds      temp,arg1,temp          ; 22nd divide step
552
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
553
        ds      temp,arg1,temp          ; 23rd divide step
554
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
555
        ds      temp,arg1,temp          ; 24th divide step
556
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
557
        ds      temp,arg1,temp          ; 25th divide step
558
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
559
        ds      temp,arg1,temp          ; 26th divide step
560
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
561
        ds      temp,arg1,temp          ; 27th divide step
562
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
563
        ds      temp,arg1,temp          ; 28th divide step
564
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
565
        ds      temp,arg1,temp          ; 29th divide step
566
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
567
        ds      temp,arg1,temp          ; 30th divide step
568
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
569
        ds      temp,arg1,temp          ; 31st divide step
570
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
571
        ds      temp,arg1,temp          ; 32nd divide step,
572
        bv    0(r31)
573
        addc    retreg,retreg,retreg    ; shift last retreg bit into retreg
574
;_____________________________________________________________________________
575
; handle the cases where divisor is a small constant or has high bit on
576
special_divisor:
577
        comib,>  0,arg1,big_divisor
578
        nop
579
        blr     arg1,r0
580
        nop
581
zero_divisor: ; this label is here to provide external visibility
582
 
583
        addit,= 0,arg1,0          ; trap for zero dvr
584
        nop
585
        bv    0(r31)                     ; divisor == 1
586
        copy    arg0,retreg
587
        bv    0(r31)                     ; divisor == 2
588
        extru   arg0,30,31,retreg
589
         b,n   $$divU_3         ; divisor == 3
590
        nop
591
        bv    0(r31)                     ; divisor == 4
592
        extru   arg0,29,30,retreg
593
         b,n   $$divU_5         ; divisor == 5
594
        nop
595
         b,n   $$divU_6         ; divisor == 6
596
        nop
597
         b,n   $$divU_7         ; divisor == 7
598
        nop
599
        bv    0(r31)                     ; divisor == 8
600
        extru   arg0,28,29,retreg
601
         b,n   $$divU_9         ; divisor == 9
602
        nop
603
         b,n   $$divU_10                ; divisor == 10
604
        nop
605
        b       normal                  ; divisor == 11
606
        ds      r0,temp,r0              ; set V-bit to 1
607
         b,n   $$divU_12                ; divisor == 12
608
        nop
609
        b       normal                  ; divisor == 13
610
        ds      r0,temp,r0              ; set V-bit to 1
611
         b,n   $$divU_14                ; divisor == 14
612
        nop
613
         b,n   $$divU_15                ; divisor == 15
614
        nop
615
;_____________________________________________________________________________
616
; Handle the case where the high bit is on in the divisor.
617
; Compute:      if( dividend>=divisor) quotient=1; else quotient=0;
618
; Note:         dividend>==divisor iff dividend-divisor does not borrow
619
; and           not borrow iff carry
620
big_divisor:
621
        sub     arg0,arg1,r0
622
        bv    0(r31)
623
        addc    r0,r0,retreg
624
        .exit
625
        .procend
626
        .end
627
 
628
t2: .EQU        r1
629
 
630
; x2    .EQU    arg0    ; r26
631
t1: .EQU        arg1    ; r25
632
 
633
; x1    .EQU    ret1    ; r29
634
;_____________________________________________________________________________
635
 
636
$$divide_by_constant:
637
        .PROC
638
        .CALLINFO millicode
639
        .entry
640
 
641
 
642
        .export $$divide_by_constant,millicode
643
; Provides a "nice" label for the code covered by the unwind descriptor
644
; for things like gprof.
645
 
646
 
647
 
648
 
649
 
650
 
651
 
652
 
653
 
654
$$divI_2:
655
        .EXPORT         $$divI_2,MILLICODE
656
        COMCLR,>=       arg0,0,0
657
        ADDI            1,arg0,arg0
658
        bv    0(r31)
659
        EXTRS           arg0,30,31,ret1
660
 
661
 
662
 
663
$$divI_4:
664
        .EXPORT         $$divI_4,MILLICODE
665
        COMCLR,>=       arg0,0,0
666
        ADDI            3,arg0,arg0
667
        bv    0(r31)
668
        EXTRS           arg0,29,30,ret1
669
 
670
 
671
 
672
$$divI_8:
673
        .EXPORT         $$divI_8,MILLICODE
674
        COMCLR,>=       arg0,0,0
675
        ADDI            7,arg0,arg0
676
        bv    0(r31)
677
        EXTRS           arg0,28,29,ret1
678
 
679
 
680
$$divI_16:
681
        .EXPORT         $$divI_16,MILLICODE
682
        COMCLR,>=       arg0,0,0
683
        ADDI            15,arg0,arg0
684
        bv    0(r31)
685
        EXTRS           arg0,27,28,ret1
686
 
687
 
688
 
689
 
690
 
691
 
692
 
693
 
694
 
695
 
696
 
697
$$divI_3:
698
        .EXPORT         $$divI_3,MILLICODE
699
        COMB,<,N        arg0,0,$neg3
700
 
701
        ADDI            1,arg0,arg0
702
        EXTRU           arg0,1,2,ret1
703
        SH2ADD          arg0,arg0,arg0
704
        B               $pos
705
        ADDC            ret1,0,ret1
706
 
707
$neg3:
708
        SUBI            1,arg0,arg0
709
        EXTRU           arg0,1,2,ret1
710
        SH2ADD          arg0,arg0,arg0
711
        B               $neg
712
        ADDC            ret1,0,ret1
713
 
714
$$divU_3:
715
        .EXPORT         $$divU_3,MILLICODE
716
        ADDI            1,arg0,arg0
717
        ADDC            0,0,ret1
718
        SHD             ret1,arg0,30,t1
719
        SH2ADD          arg0,arg0,arg0
720
        B               $pos
721
        ADDC            ret1,t1,ret1
722
 
723
 
724
 
725
$$divI_5:
726
        .EXPORT         $$divI_5,MILLICODE
727
        COMB,<,N        arg0,0,$neg5
728
        ADDI            3,arg0,t1
729
        SH1ADD          arg0,t1,arg0
730
        B               $pos
731
        ADDC            0,0,ret1
732
 
733
$neg5:
734
        SUB             0,arg0,arg0
735
        ADDI            1,arg0,arg0
736
        SHD             0,arg0,31,ret1
737
        SH1ADD          arg0,arg0,arg0
738
        B               $neg
739
        ADDC            ret1,0,ret1
740
 
741
$$divU_5:
742
        .EXPORT         $$divU_5,MILLICODE
743
        ADDI            1,arg0,arg0
744
        ADDC            0,0,ret1
745
        SHD             ret1,arg0,31,t1
746
        SH1ADD          arg0,arg0,arg0
747
        B               $pos
748
        ADDC            t1,ret1,ret1
749
 
750
 
751
$$divI_6:
752
        .EXPORT         $$divI_6,MILLICODE
753
        COMB,<,N        arg0,0,$neg6
754
        EXTRU           arg0,30,31,arg0
755
        ADDI            5,arg0,t1
756
        SH2ADD          arg0,t1,arg0
757
        B               $pos
758
        ADDC            0,0,ret1
759
 
760
$neg6:
761
        SUBI            2,arg0,arg0
762
 
763
 
764
        EXTRU           arg0,30,31,arg0
765
        SHD             0,arg0,30,ret1
766
        SH2ADD          arg0,arg0,arg0
767
        B               $neg
768
        ADDC            ret1,0,ret1
769
 
770
$$divU_6:
771
        .EXPORT         $$divU_6,MILLICODE
772
        EXTRU           arg0,30,31,arg0
773
        ADDI            1,arg0,arg0
774
        SHD             0,arg0,30,ret1
775
        SH2ADD          arg0,arg0,arg0
776
        B               $pos
777
        ADDC            ret1,0,ret1
778
 
779
 
780
$$divU_10:
781
        .EXPORT         $$divU_10,MILLICODE
782
        EXTRU           arg0,30,31,arg0
783
        ADDI            3,arg0,t1
784
        SH1ADD          arg0,t1,arg0
785
        ADDC            0,0,ret1
786
$pos:
787
        SHD             ret1,arg0,28,t1
788
        SHD             arg0,0,28,t2
789
        ADD             arg0,t2,arg0
790
        ADDC            ret1,t1,ret1
791
$pos_for_17:
792
        SHD             ret1,arg0,24,t1
793
        SHD             arg0,0,24,t2
794
        ADD             arg0,t2,arg0
795
        ADDC            ret1,t1,ret1
796
 
797
        SHD             ret1,arg0,16,t1
798
        SHD             arg0,0,16,t2
799
        ADD             arg0,t2,arg0
800
        bv    0(r31)
801
        ADDC            ret1,t1,ret1
802
 
803
$$divI_10:
804
        .EXPORT         $$divI_10,MILLICODE
805
        COMB,<          arg0,0,$neg10
806
        COPY            0,ret1
807
        EXTRU           arg0,30,31,arg0
808
        ADDIB,TR        1,arg0,$pos
809
        SH1ADD          arg0,arg0,arg0
810
 
811
$neg10:
812
        SUBI            2,arg0,arg0
813
 
814
 
815
        EXTRU           arg0,30,31,arg0
816
        SH1ADD          arg0,arg0,arg0
817
$neg:
818
        SHD             ret1,arg0,28,t1
819
        SHD             arg0,0,28,t2
820
        ADD             arg0,t2,arg0
821
        ADDC            ret1,t1,ret1
822
$neg_for_17:
823
        SHD             ret1,arg0,24,t1
824
        SHD             arg0,0,24,t2
825
        ADD             arg0,t2,arg0
826
        ADDC            ret1,t1,ret1
827
 
828
        SHD             ret1,arg0,16,t1
829
        SHD             arg0,0,16,t2
830
        ADD             arg0,t2,arg0
831
        ADDC            ret1,t1,ret1
832
        bv    0(r31)
833
        SUB             0,ret1,ret1
834
 
835
 
836
$$divI_12:
837
        .EXPORT         $$divI_12,MILLICODE
838
        COMB,<          arg0,0,$neg12
839
        COPY            0,ret1
840
        EXTRU           arg0,29,30,arg0
841
        ADDIB,TR        1,arg0,$pos
842
        SH2ADD          arg0,arg0,arg0
843
 
844
$neg12:
845
        SUBI            4,arg0,arg0
846
 
847
 
848
        EXTRU           arg0,29,30,arg0
849
        B               $neg
850
        SH2ADD          arg0,arg0,arg0
851
 
852
$$divU_12:
853
        .EXPORT         $$divU_12,MILLICODE
854
        EXTRU           arg0,29,30,arg0
855
        ADDI            5,arg0,t1
856
        SH2ADD          arg0,t1,arg0
857
        B               $pos
858
        ADDC            0,0,ret1
859
 
860
 
861
$$divI_15:
862
        .EXPORT         $$divI_15,MILLICODE
863
        COMB,<          arg0,0,$neg15
864
        COPY            0,ret1
865
        ADDIB,TR        1,arg0,$pos+4
866
        SHD             ret1,arg0,28,t1
867
 
868
$neg15:
869
        B               $neg
870
        SUBI            1,arg0,arg0
871
 
872
$$divU_15:
873
        .EXPORT         $$divU_15,MILLICODE
874
        ADDI            1,arg0,arg0
875
        B               $pos
876
        ADDC            0,0,ret1
877
 
878
 
879
$$divI_17:
880
        .EXPORT         $$divI_17,MILLICODE
881
        COMB,<,N        arg0,0,$neg17
882
        ADDI            1,arg0,arg0
883
        SHD             0,arg0,28,t1
884
        SHD             arg0,0,28,t2
885
        SUB             t2,arg0,arg0
886
        B               $pos_for_17
887
        SUBB            t1,0,ret1
888
 
889
$neg17:
890
        SUBI            1,arg0,arg0
891
        SHD             0,arg0,28,t1
892
        SHD             arg0,0,28,t2
893
        SUB             t2,arg0,arg0
894
        B               $neg_for_17
895
        SUBB            t1,0,ret1
896
 
897
$$divU_17:
898
        .EXPORT         $$divU_17,MILLICODE
899
        ADDI            1,arg0,arg0
900
        ADDC            0,0,ret1
901
        SHD             ret1,arg0,28,t1
902
$u17:
903
        SHD             arg0,0,28,t2
904
        SUB             t2,arg0,arg0
905
        B               $pos_for_17
906
        SUBB            t1,ret1,ret1
907
 
908
 
909
 
910
 
911
 
912
 
913
 
914
 
915
 
916
 
917
 
918
 
919
 
920
 
921
 
922
 
923
 
924
 
925
 
926
 
927
 
928
 
929
 
930
 
931
 
932
 
933
 
934
 
935
 
936
 
937
 
938
 
939
 
940
 
941
 
942
 
943
$$divI_7:
944
        .EXPORT         $$divI_7,MILLICODE
945
        COMB,<,N        arg0,0,$neg7
946
$7:
947
        ADDI            1,arg0,arg0
948
        SHD             0,arg0,29,ret1
949
        SH3ADD          arg0,arg0,arg0
950
        ADDC            ret1,0,ret1
951
$pos7:
952
        SHD             ret1,arg0,26,t1
953
        SHD             arg0,0,26,t2
954
        ADD             arg0,t2,arg0
955
        ADDC            ret1,t1,ret1
956
 
957
        SHD             ret1,arg0,20,t1
958
        SHD             arg0,0,20,t2
959
        ADD             arg0,t2,arg0
960
        ADDC            ret1,t1,t1
961
 
962
 
963
 
964
        COPY            0,ret1
965
        SHD,=           t1,arg0,24,t1
966
$1:
967
        ADDB,TR         t1,ret1,$2
968
        EXTRU           arg0,31,24,arg0
969
 
970
        bv,n  0(r31)
971
 
972
$2:
973
        ADDB,TR         t1,arg0,$1
974
        EXTRU,=         arg0,7,8,t1
975
 
976
$neg7:
977
        SUBI            1,arg0,arg0
978
$8:
979
        SHD             0,arg0,29,ret1
980
        SH3ADD          arg0,arg0,arg0
981
        ADDC            ret1,0,ret1
982
 
983
$neg7_shift:
984
        SHD             ret1,arg0,26,t1
985
        SHD             arg0,0,26,t2
986
        ADD             arg0,t2,arg0
987
        ADDC            ret1,t1,ret1
988
 
989
        SHD             ret1,arg0,20,t1
990
        SHD             arg0,0,20,t2
991
        ADD             arg0,t2,arg0
992
        ADDC            ret1,t1,t1
993
 
994
 
995
 
996
        COPY            0,ret1
997
        SHD,=           t1,arg0,24,t1
998
$3:
999
        ADDB,TR         t1,ret1,$4
1000
        EXTRU           arg0,31,24,arg0
1001
 
1002
        bv    0(r31)
1003
        SUB             0,ret1,ret1
1004
 
1005
$4:
1006
        ADDB,TR         t1,arg0,$3
1007
        EXTRU,=         arg0,7,8,t1
1008
 
1009
$$divU_7:
1010
        .EXPORT         $$divU_7,MILLICODE
1011
        ADDI            1,arg0,arg0
1012
        ADDC            0,0,ret1
1013
        SHD             ret1,arg0,29,t1
1014
        SH3ADD          arg0,arg0,arg0
1015
        B               $pos7
1016
        ADDC            t1,ret1,ret1
1017
 
1018
 
1019
$$divI_9:
1020
        .EXPORT         $$divI_9,MILLICODE
1021
        COMB,<,N        arg0,0,$neg9
1022
        ADDI            1,arg0,arg0
1023
        SHD             0,arg0,29,t1
1024
        SHD             arg0,0,29,t2
1025
        SUB             t2,arg0,arg0
1026
        B               $pos7
1027
        SUBB            t1,0,ret1
1028
 
1029
$neg9:
1030
        SUBI            1,arg0,arg0
1031
        SHD             0,arg0,29,t1
1032
        SHD             arg0,0,29,t2
1033
        SUB             t2,arg0,arg0
1034
        B               $neg7_shift
1035
        SUBB            t1,0,ret1
1036
 
1037
$$divU_9:
1038
        .EXPORT         $$divU_9,MILLICODE
1039
        ADDI            1,arg0,arg0
1040
        ADDC            0,0,ret1
1041
        SHD             ret1,arg0,29,t1
1042
        SHD             arg0,0,29,t2
1043
        SUB             t2,arg0,arg0
1044
        B               $pos7
1045
        SUBB            t1,ret1,ret1
1046
 
1047
 
1048
$$divI_14:
1049
        .EXPORT         $$divI_14,MILLICODE
1050
        COMB,<,N        arg0,0,$neg14
1051
$$divU_14:
1052
        .EXPORT         $$divU_14,MILLICODE
1053
        B               $7
1054
        EXTRU           arg0,30,31,arg0
1055
 
1056
$neg14:
1057
        SUBI            2,arg0,arg0
1058
        B               $8
1059
        EXTRU           arg0,30,31,arg0
1060
 
1061
        .exit
1062
        .PROCEND
1063
        .END
1064
 
1065
rmndr: .EQU     ret1    ; r29
1066
 
1067
 
1068
        .export $$remU,millicode
1069
$$remU:
1070
        .proc
1071
        .callinfo millicode
1072
        .entry
1073
 
1074
        comib,>=,n  0,arg1,special_case
1075
        sub     r0,arg1,rmndr           ; clear carry, negate the divisor
1076
        ds      r0,rmndr,r0             ; set V-bit to 1
1077
        add     arg0,arg0,temp          ; shift msb bit into carry
1078
        ds      r0,arg1,rmndr           ; 1st divide step, if no carry
1079
        addc    temp,temp,temp          ; shift temp with/into carry
1080
        ds      rmndr,arg1,rmndr                ; 2nd divide step
1081
        addc    temp,temp,temp          ; shift temp with/into carry
1082
        ds      rmndr,arg1,rmndr                ; 3rd divide step
1083
        addc    temp,temp,temp          ; shift temp with/into carry
1084
        ds      rmndr,arg1,rmndr                ; 4th divide step
1085
        addc    temp,temp,temp          ; shift temp with/into carry
1086
        ds      rmndr,arg1,rmndr                ; 5th divide step
1087
        addc    temp,temp,temp          ; shift temp with/into carry
1088
        ds      rmndr,arg1,rmndr                ; 6th divide step
1089
        addc    temp,temp,temp          ; shift temp with/into carry
1090
        ds      rmndr,arg1,rmndr                ; 7th divide step
1091
        addc    temp,temp,temp          ; shift temp with/into carry
1092
        ds      rmndr,arg1,rmndr                ; 8th divide step
1093
        addc    temp,temp,temp          ; shift temp with/into carry
1094
        ds      rmndr,arg1,rmndr                ; 9th divide step
1095
        addc    temp,temp,temp          ; shift temp with/into carry
1096
        ds      rmndr,arg1,rmndr                ; 10th divide step
1097
        addc    temp,temp,temp          ; shift temp with/into carry
1098
        ds      rmndr,arg1,rmndr                ; 11th divide step
1099
        addc    temp,temp,temp          ; shift temp with/into carry
1100
        ds      rmndr,arg1,rmndr                ; 12th divide step
1101
        addc    temp,temp,temp          ; shift temp with/into carry
1102
        ds      rmndr,arg1,rmndr                ; 13th divide step
1103
        addc    temp,temp,temp          ; shift temp with/into carry
1104
        ds      rmndr,arg1,rmndr                ; 14th divide step
1105
        addc    temp,temp,temp          ; shift temp with/into carry
1106
        ds      rmndr,arg1,rmndr                ; 15th divide step
1107
        addc    temp,temp,temp          ; shift temp with/into carry
1108
        ds      rmndr,arg1,rmndr                ; 16th divide step
1109
        addc    temp,temp,temp          ; shift temp with/into carry
1110
        ds      rmndr,arg1,rmndr                ; 17th divide step
1111
        addc    temp,temp,temp          ; shift temp with/into carry
1112
        ds      rmndr,arg1,rmndr                ; 18th divide step
1113
        addc    temp,temp,temp          ; shift temp with/into carry
1114
        ds      rmndr,arg1,rmndr                ; 19th divide step
1115
        addc    temp,temp,temp          ; shift temp with/into carry
1116
        ds      rmndr,arg1,rmndr                ; 20th divide step
1117
        addc    temp,temp,temp          ; shift temp with/into carry
1118
        ds      rmndr,arg1,rmndr                ; 21st divide step
1119
        addc    temp,temp,temp          ; shift temp with/into carry
1120
        ds      rmndr,arg1,rmndr                ; 22nd divide step
1121
        addc    temp,temp,temp          ; shift temp with/into carry
1122
        ds      rmndr,arg1,rmndr                ; 23rd divide step
1123
        addc    temp,temp,temp          ; shift temp with/into carry
1124
        ds      rmndr,arg1,rmndr                ; 24th divide step
1125
        addc    temp,temp,temp          ; shift temp with/into carry
1126
        ds      rmndr,arg1,rmndr                ; 25th divide step
1127
        addc    temp,temp,temp          ; shift temp with/into carry
1128
        ds      rmndr,arg1,rmndr                ; 26th divide step
1129
        addc    temp,temp,temp          ; shift temp with/into carry
1130
        ds      rmndr,arg1,rmndr                ; 27th divide step
1131
        addc    temp,temp,temp          ; shift temp with/into carry
1132
        ds      rmndr,arg1,rmndr                ; 28th divide step
1133
        addc    temp,temp,temp          ; shift temp with/into carry
1134
        ds      rmndr,arg1,rmndr                ; 29th divide step
1135
        addc    temp,temp,temp          ; shift temp with/into carry
1136
        ds      rmndr,arg1,rmndr                ; 30th divide step
1137
        addc    temp,temp,temp          ; shift temp with/into carry
1138
        ds      rmndr,arg1,rmndr                ; 31st divide step
1139
        addc    temp,temp,temp          ; shift temp with/into carry
1140
        ds      rmndr,arg1,rmndr                ; 32nd divide step,
1141
        comiclr,<= 0,rmndr,r0
1142
          add   rmndr,arg1,rmndr        ; correction
1143
;       .exit
1144
        bv,n  0(r31)
1145
        nop
1146
; Putting >= on the last DS and deleting COMICLR does not work!
1147
;_____________________________________________________________________________
1148
special_case:
1149
        addit,= 0,arg1,r0                ; trap on div by zero
1150
        sub,>>= arg0,arg1,rmndr
1151
          copy  arg0,rmndr
1152
        bv,n  0(r31)
1153
        nop
1154
        .exit
1155
        .procend
1156
        .end
1157
 
1158
 
1159
 
1160
 
1161
 
1162
 
1163
 
1164
 
1165
 
1166
 
1167
 
1168
 
1169
 
1170
 
1171
 
1172
 
1173
 
1174
 
1175
 
1176
 
1177
 
1178
 
1179
 
1180
 
1181
 
1182
 
1183
 
1184
 
1185
 
1186
 
1187
 
1188
 
1189
 
1190
 
1191
 
1192
 
1193
 
1194
; Use bv    0(r31) and bv,n  0(r31) instead.
1195
; #define       return          bv      0(%mrp)
1196
; #define       return_n        bv,n    0(%mrp)
1197
 
1198
 
1199
 
1200
 
1201
 
1202
 
1203
 
1204
 
1205
 
1206
 
1207
 
1208
 
1209
 
1210
 
1211
 
1212
 
1213
 
1214
 
1215
 
1216
 
1217
 
1218
 
1219
 
1220
 
1221
 
1222
 
1223
 
1224
        .subspa $MILLICODE$
1225
        .align 16
1226
$$mulI:
1227
 
1228
        .proc
1229
        .callinfo millicode
1230
        .entry
1231
        .export $$mulI, millicode
1232
        combt,<<=       %r25,%r26,l4    ; swap args if unsigned %r25>%r26
1233
        copy            0,%r29           ; zero out the result
1234
        xor             %r26,%r25,%r26  ; swap %r26 & %r25 using the
1235
        xor             %r26,%r25,%r25  ;  old xor trick
1236
        xor             %r26,%r25,%r26
1237
l4: combt,<=    0,%r26,l3                ; if %r26>=0 then proceed like unsigned
1238
 
1239
        zdep            %r25,30,8,%r1   ; %r1 = (%r25&0xff)<<1 *********
1240
        sub,>           0,%r25,%r1               ; otherwise negate both and
1241
        combt,<=,n      %r26,%r1,l2     ;  swap back if |%r26|<|%r25|
1242
        sub             0,%r26,%r25
1243
        movb,tr,n       %r1,%r26,l2     ; 10th inst.
1244
 
1245
l0:     add     %r29,%r1,%r29                           ; add in this partial product
1246
 
1247
l1: zdep        %r26,23,24,%r26                 ; %r26 <<= 8 ******************
1248
 
1249
l2: zdep                %r25,30,8,%r1   ; %r1 = (%r25&0xff)<<1 *********
1250
 
1251
l3: blr         %r1,0            ; case on these 8 bits ******
1252
 
1253
        extru           %r25,23,24,%r25 ; %r25 >>= 8 ******************
1254
 
1255
;16 insts before this.
1256
;                         %r26 <<= 8 **************************
1257
x0: comb,<>     %r25,0,l2        ! zdep  %r26,23,24,%r26 ! bv,n  0(r31)   ! nop
1258
 
1259
x1: comb,<>     %r25,0,l1        !       add     %r29,%r26,%r29  ! bv,n  0(r31)   ! nop
1260
 
1261
x2: comb,<>     %r25,0,l1        ! sh1add        %r26,%r29,%r29  ! bv,n  0(r31)   ! nop
1262
 
1263
x3: comb,<>     %r25,0,l0        !       sh1add  %r26,%r26,%r1   ! bv    0(r31)   !       add     %r29,%r1,%r29
1264
 
1265
x4: comb,<>     %r25,0,l1        ! sh2add        %r26,%r29,%r29  ! bv,n  0(r31)   ! nop
1266
 
1267
x5: comb,<>     %r25,0,l0        !       sh2add  %r26,%r26,%r1   ! bv    0(r31)   !       add     %r29,%r1,%r29
1268
 
1269
x6:     sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh1add        %r1,%r29,%r29   ! bv,n  0(r31)
1270
 
1271
x7:     sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh2add        %r26,%r29,%r29  ! b,n   ret_t0
1272
 
1273
x8: comb,<>     %r25,0,l1        ! sh3add        %r26,%r29,%r29  ! bv,n  0(r31)   ! nop
1274
 
1275
x9: comb,<>     %r25,0,l0        !       sh3add  %r26,%r26,%r1   ! bv    0(r31)   !       add     %r29,%r1,%r29
1276
 
1277
x10:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh1add        %r1,%r29,%r29   ! bv,n  0(r31)
1278
 
1279
x11:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh3add        %r26,%r29,%r29  ! b,n   ret_t0
1280
 
1281
x12:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh2add        %r1,%r29,%r29   ! bv,n  0(r31)
1282
 
1283
x13:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh3add        %r26,%r29,%r29  ! b,n   ret_t0
1284
 
1285
x14:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1286
 
1287
x15:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        !       sh1add  %r1,%r1,%r1     ! b,n   ret_t0
1288
 
1289
x16: zdep       %r26,27,28,%r1  ! comb,<>       %r25,0,l1        !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1290
 
1291
x17:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh3add        %r26,%r1,%r1    ! b,n   ret_t0
1292
 
1293
x18:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh1add        %r1,%r29,%r29   ! bv,n  0(r31)
1294
 
1295
x19:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh1add        %r1,%r26,%r1    ! b,n   ret_t0
1296
 
1297
x20:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh2add        %r1,%r29,%r29   ! bv,n  0(r31)
1298
 
1299
x21:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh2add        %r1,%r26,%r1    ! b,n   ret_t0
1300
 
1301
x22:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1302
 
1303
x23:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1304
 
1305
x24:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh3add        %r1,%r29,%r29   ! bv,n  0(r31)
1306
 
1307
x25:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        !       sh2add  %r1,%r1,%r1     ! b,n   ret_t0
1308
 
1309
x26:    sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1310
 
1311
x27:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        !       sh3add  %r1,%r1,%r1     ! b,n   ret_t0
1312
 
1313
x28:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1314
 
1315
x29:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1316
 
1317
x30:    sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1318
 
1319
x31: zdep       %r26,26,27,%r1  ! comb,<>       %r25,0,l0        ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1320
 
1321
x32: zdep       %r26,26,27,%r1  ! comb,<>       %r25,0,l1        !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1322
 
1323
x33:    sh3add  %r26,0,%r1               ! comb,<>       %r25,0,l0        ! sh2add        %r1,%r26,%r1    ! b,n   ret_t0
1324
 
1325
x34: zdep       %r26,27,28,%r1  ! add   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1326
 
1327
x35:    sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r26,%r1,%r1
1328
 
1329
x36:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh2add        %r1,%r29,%r29   ! bv,n  0(r31)
1330
 
1331
x37:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh2add        %r1,%r26,%r1    ! b,n   ret_t0
1332
 
1333
x38:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1334
 
1335
x39:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1336
 
1337
x40:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh3add        %r1,%r29,%r29   ! bv,n  0(r31)
1338
 
1339
x41:    sh2add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! sh3add        %r1,%r26,%r1    ! b,n   ret_t0
1340
 
1341
x42:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1342
 
1343
x43:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1344
 
1345
x44:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1346
 
1347
x45:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        !       sh2add  %r1,%r1,%r1     ! b,n   ret_t0
1348
 
1349
x46:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! add   %r1,%r26,%r1
1350
 
1351
x47:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r26,%r1,%r1
1352
 
1353
x48:    sh1add  %r26,%r26,%r1           ! comb,<>       %r25,0,l0        ! zdep  %r1,27,28,%r1   ! b,n   ret_t0
1354
 
1355
x49:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r26,%r1,%r1
1356
 
1357
x50:    sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1358
 
1359
x51:    sh3add  %r26,%r26,%r1           ! sh3add        %r26,%r1,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1360
 
1361
x52:    sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1362
 
1363
x53:    sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1364
 
1365
x54:    sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1366
 
1367
x55:    sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r1,%r26,%r1
1368
 
1369
x56:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1370
 
1371
x57:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1372
 
1373
x58:    sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1374
 
1375
x59:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_t02a0 !       sh1add  %r1,%r1,%r1
1376
 
1377
x60:    sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1378
 
1379
x61:    sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1380
 
1381
x62: zdep       %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1382
 
1383
x63: zdep       %r26,25,26,%r1  ! comb,<>       %r25,0,l0        ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1384
 
1385
x64: zdep       %r26,25,26,%r1  ! comb,<>       %r25,0,l1        !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1386
 
1387
x65:    sh3add  %r26,0,%r1               ! comb,<>       %r25,0,l0        ! sh3add        %r1,%r26,%r1    ! b,n   ret_t0
1388
 
1389
x66: zdep       %r26,26,27,%r1  ! add   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1390
 
1391
x67:    sh3add  %r26,0,%r1               ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1392
 
1393
x68:    sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1394
 
1395
x69:    sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1396
 
1397
x70: zdep       %r26,25,26,%r1  ! sh2add        %r26,%r1,%r1    !       b       e_t0    ! sh1add        %r26,%r1,%r1
1398
 
1399
x71:    sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1        !       b       e_t0    ! sub   %r1,%r26,%r1
1400
 
1401
x72:    sh3add  %r26,%r26,%r1           ! comb,<>       %r25,0,l1        ! sh3add        %r1,%r29,%r29   ! bv,n  0(r31)
1402
 
1403
x73:    sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift !       add     %r29,%r1,%r29
1404
 
1405
x74:    sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1406
 
1407
x75:    sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1408
 
1409
x76:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1410
 
1411
x77:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1412
 
1413
x78:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1414
 
1415
x79: zdep       %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1416
 
1417
x80: zdep       %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     ! b     e_shift !       add     %r29,%r1,%r29
1418
 
1419
x81:    sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_shift !       add     %r29,%r1,%r29
1420
 
1421
x82:    sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1422
 
1423
x83:    sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1424
 
1425
x84:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1426
 
1427
x85:    sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1428
 
1429
x86:    sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1430
 
1431
x87:    sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_t02a0 ! sh2add        %r26,%r1,%r1
1432
 
1433
x88:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1434
 
1435
x89:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1436
 
1437
x90:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1438
 
1439
x91:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r1,%r26,%r1
1440
 
1441
x92:    sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1442
 
1443
x93: zdep       %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1444
 
1445
x94:    sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sh1add        %r26,%r1,%r1
1446
 
1447
x95:    sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1448
 
1449
x96:    sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1450
 
1451
x97:    sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1452
 
1453
x98: zdep       %r26,26,27,%r1  !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r26,%r1,%r1
1454
 
1455
x99:    sh3add  %r26,0,%r1               ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1456
 
1457
x100:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1458
 
1459
x101:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1460
 
1461
x102: zdep      %r26,26,27,%r1  ! sh1add        %r26,%r1,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1462
 
1463
x103:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_t02a0 ! sh2add        %r1,%r26,%r1
1464
 
1465
x104:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1466
 
1467
x105:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1468
 
1469
x106:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1470
 
1471
x107:   sh3add  %r26,%r26,%r1           ! sh2add        %r26,%r1,%r1    ! b     e_t02a0 ! sh3add        %r1,%r26,%r1
1472
 
1473
x108:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1474
 
1475
x109:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1476
 
1477
x110:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1478
 
1479
x111:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1480
 
1481
x112:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! zdep  %r1,27,28,%r1
1482
 
1483
x113:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t02a0 !       sh1add  %r1,%r1,%r1
1484
 
1485
x114:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1486
 
1487
x115:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1488
 
1489
x116:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   ! sh2add        %r1,%r26,%r1
1490
 
1491
x117:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh3add  %r1,%r1,%r1
1492
 
1493
x118:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t0a0  !       sh3add  %r1,%r1,%r1
1494
 
1495
x119:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t02a0 !       sh3add  %r1,%r1,%r1
1496
 
1497
x120:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1498
 
1499
x121:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1500
 
1501
x122:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1502
 
1503
x123:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1504
 
1505
x124: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1506
 
1507
x125:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    !       sh2add  %r1,%r1,%r1
1508
 
1509
x126: zdep      %r26,25,26,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1510
 
1511
x127: zdep      %r26,24,25,%r1  ! comb,<>       %r25,0,l0        ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1512
 
1513
x128: zdep      %r26,24,25,%r1  ! comb,<>       %r25,0,l1        !       add     %r29,%r1,%r29   ! bv,n  0(r31)
1514
 
1515
x129: zdep      %r26,24,25,%r1  ! comb,<>       %r25,0,l0        ! add   %r1,%r26,%r1    ! b,n   ret_t0
1516
 
1517
x130: zdep      %r26,25,26,%r1  ! add   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1518
 
1519
x131:   sh3add  %r26,0,%r1               ! sh3add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1520
 
1521
x132:   sh3add  %r26,0,%r1               ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1522
 
1523
x133:   sh3add  %r26,0,%r1               ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1524
 
1525
x134:   sh3add  %r26,0,%r1               ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1526
 
1527
x135:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    !       sh1add  %r1,%r1,%r1
1528
 
1529
x136:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1530
 
1531
x137:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1532
 
1533
x138:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1534
 
1535
x139:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh2add        %r1,%r26,%r1
1536
 
1537
x140:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   !       sh2add  %r1,%r1,%r1
1538
 
1539
x141:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    ! b     e_4t0a0 ! sh1add        %r1,%r26,%r1
1540
 
1541
x142:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1        !       b       e_2t0   ! sub   %r1,%r26,%r1
1542
 
1543
x143: zdep      %r26,27,28,%r1  !       sh3add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1544
 
1545
x144:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1        ! b     e_shift ! sh1add        %r1,%r29,%r29
1546
 
1547
x145:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,0,%r1        !       b       e_t0    ! sh1add        %r1,%r26,%r1
1548
 
1549
x146:   sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1550
 
1551
x147:   sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    ! sh1add        %r1,%r26,%r1
1552
 
1553
x148:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1554
 
1555
x149:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1556
 
1557
x150:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1558
 
1559
x151:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh1add        %r1,%r26,%r1
1560
 
1561
x152:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1562
 
1563
x153:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1564
 
1565
x154:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1566
 
1567
x155: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1568
 
1569
x156:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1570
 
1571
x157: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_t02a0 !       sh2add  %r1,%r1,%r1
1572
 
1573
x158: zdep      %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sub   %r1,%r26,%r1
1574
 
1575
x159: zdep      %r26,26,27,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1576
 
1577
x160:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,0,%r1        ! b     e_shift ! sh3add        %r1,%r29,%r29
1578
 
1579
x161:   sh3add  %r26,0,%r1               !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1580
 
1581
x162:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_shift ! sh1add        %r1,%r29,%r29
1582
 
1583
x163:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_t0    ! sh1add        %r1,%r26,%r1
1584
 
1585
x164:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1586
 
1587
x165:   sh3add  %r26,0,%r1               ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1588
 
1589
x166:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1590
 
1591
x167:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh1add        %r1,%r26,%r1
1592
 
1593
x168:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1594
 
1595
x169:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1596
 
1597
x170: zdep      %r26,26,27,%r1  ! sh1add        %r26,%r1,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1598
 
1599
x171:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_t0    !       sh3add  %r1,%r1,%r1
1600
 
1601
x172:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1602
 
1603
x173:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_t02a0 !       sh3add  %r1,%r1,%r1
1604
 
1605
x174: zdep      %r26,26,27,%r1  ! sh1add        %r26,%r1,%r1    ! b     e_t04a0 !       sh2add  %r1,%r1,%r1
1606
 
1607
x175:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    !       b       e_5t0   ! sh1add        %r1,%r26,%r1
1608
 
1609
x176:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_8t0   ! add   %r1,%r26,%r1
1610
 
1611
x177:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_8t0a0 ! add   %r1,%r26,%r1
1612
 
1613
x178:   sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   ! sh3add        %r1,%r26,%r1
1614
 
1615
x179:   sh2add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_2t0a0 ! sh3add        %r1,%r26,%r1
1616
 
1617
x180:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh2add        %r1,%r29,%r29
1618
 
1619
x181:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh2add        %r1,%r26,%r1
1620
 
1621
x182:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sh1add        %r1,%r26,%r1
1622
 
1623
x183:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_2t0a0 ! sh1add        %r1,%r26,%r1
1624
 
1625
x184:   sh2add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_4t0   ! add   %r1,%r26,%r1
1626
 
1627
x185:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1628
 
1629
x186: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1630
 
1631
x187:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_t02a0 !       sh2add  %r1,%r1,%r1
1632
 
1633
x188:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_4t0   ! sh1add        %r26,%r1,%r1
1634
 
1635
x189:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_t0    !       sh3add  %r1,%r1,%r1
1636
 
1637
x190:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_2t0   !       sh2add  %r1,%r1,%r1
1638
 
1639
x191: zdep      %r26,25,26,%r1  !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sub   %r1,%r26,%r1
1640
 
1641
x192:   sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1642
 
1643
x193:   sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1644
 
1645
x194:   sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1646
 
1647
x195:   sh3add  %r26,0,%r1               ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1648
 
1649
x196:   sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1650
 
1651
x197:   sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     ! b     e_4t0a0 ! sh1add        %r1,%r26,%r1
1652
 
1653
x198: zdep      %r26,25,26,%r1  ! sh1add        %r26,%r1,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1654
 
1655
x199:   sh3add  %r26,0,%r1               ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1656
 
1657
x200:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1658
 
1659
x201:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1660
 
1661
x202:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1662
 
1663
x203:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_2t0a0 ! sh2add        %r1,%r26,%r1
1664
 
1665
x204:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    !       b       e_4t0   !       sh1add  %r1,%r1,%r1
1666
 
1667
x205:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh2add  %r1,%r1,%r1
1668
 
1669
x206: zdep      %r26,25,26,%r1  ! sh2add        %r26,%r1,%r1    ! b     e_t02a0 !       sh1add  %r1,%r1,%r1
1670
 
1671
x207:   sh3add  %r26,0,%r1               ! sh1add        %r1,%r26,%r1    !       b       e_3t0   ! sh2add        %r1,%r26,%r1
1672
 
1673
x208:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_8t0   ! add   %r1,%r26,%r1
1674
 
1675
x209:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_8t0a0 ! add   %r1,%r26,%r1
1676
 
1677
x210:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   !       sh2add  %r1,%r1,%r1
1678
 
1679
x211:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh2add  %r1,%r1,%r1
1680
 
1681
x212:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_4t0   ! sh2add        %r1,%r26,%r1
1682
 
1683
x213:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_4t0a0 ! sh2add        %r1,%r26,%r1
1684
 
1685
x214:   sh3add  %r26,%r26,%r1           ! sh2add        %r26,%r1,%r1    ! b     e2t04a0 ! sh3add        %r1,%r26,%r1
1686
 
1687
x215:   sh2add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_5t0   ! sh1add        %r1,%r26,%r1
1688
 
1689
x216:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     ! b     e_shift ! sh3add        %r1,%r29,%r29
1690
 
1691
x217:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_t0    ! sh3add        %r1,%r26,%r1
1692
 
1693
x218:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh2add        %r1,%r26,%r1
1694
 
1695
x219:   sh3add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_t0    !       sh1add  %r1,%r1,%r1
1696
 
1697
x220:   sh1add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_4t0   ! sh1add        %r1,%r26,%r1
1698
 
1699
x221:   sh1add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     ! b     e_4t0a0 ! sh1add        %r1,%r26,%r1
1700
 
1701
x222:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1702
 
1703
x223:   sh3add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1704
 
1705
x224:   sh3add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_8t0   ! add   %r1,%r26,%r1
1706
 
1707
x225:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_t0    !       sh2add  %r1,%r1,%r1
1708
 
1709
x226:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_t02a0 ! zdep  %r1,26,27,%r1
1710
 
1711
x227:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_t02a0 !       sh2add  %r1,%r1,%r1
1712
 
1713
x228:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_4t0   !       sh1add  %r1,%r1,%r1
1714
 
1715
x229:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_4t0a0 !       sh1add  %r1,%r1,%r1
1716
 
1717
x230:   sh3add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_5t0   ! add   %r1,%r26,%r1
1718
 
1719
x231:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_3t0   ! sh2add        %r1,%r26,%r1
1720
 
1721
x232:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    !       b       e_8t0   ! sh2add        %r1,%r26,%r1
1722
 
1723
x233:   sh1add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e_8t0a0 ! sh2add        %r1,%r26,%r1
1724
 
1725
x234:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    !       b       e_2t0   !       sh3add  %r1,%r1,%r1
1726
 
1727
x235:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh3add  %r1,%r1,%r1
1728
 
1729
x236:   sh3add  %r26,%r26,%r1           ! sh1add        %r1,%r26,%r1    ! b     e4t08a0 !       sh1add  %r1,%r1,%r1
1730
 
1731
x237: zdep      %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     !       b       e_3t0   ! sub   %r1,%r26,%r1
1732
 
1733
x238:   sh1add  %r26,%r26,%r1           ! sh2add        %r1,%r26,%r1    ! b     e2t04a0 !       sh3add  %r1,%r1,%r1
1734
 
1735
x239: zdep      %r26,27,28,%r1  !       sh2add  %r1,%r1,%r1     ! b     e_t0ma0 !       sh1add  %r1,%r1,%r1
1736
 
1737
x240:   sh3add  %r26,%r26,%r1           ! add   %r1,%r26,%r1    !       b       e_8t0   !       sh1add  %r1,%r1,%r1
1738
 
1739
x241:   sh3add  %r26,%r26,%r1           ! add   %r1,%r26,%r1    ! b     e_8t0a0 !       sh1add  %r1,%r1,%r1
1740
 
1741
x242:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_2t0   ! sh3add        %r1,%r26,%r1
1742
 
1743
x243:   sh3add  %r26,%r26,%r1           !       sh3add  %r1,%r1,%r1     !       b       e_t0    !       sh1add  %r1,%r1,%r1
1744
 
1745
x244:   sh2add  %r26,%r26,%r1           !       sh1add  %r1,%r1,%r1     !       b       e_4t0   ! sh2add        %r1,%r26,%r1
1746
 
1747
x245:   sh3add  %r26,0,%r1               !       sh1add  %r1,%r1,%r1     !       b       e_5t0   ! sh1add        %r1,%r26,%r1
1748
 
1749
x246:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    !       b       e_2t0   !       sh1add  %r1,%r1,%r1
1750
 
1751
x247:   sh2add  %r26,%r26,%r1           ! sh3add        %r1,%r26,%r1    ! b     e_2t0a0 !       sh1add  %r1,%r1,%r1
1752
 
1753
x248: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh3add        %r1,%r29,%r29
1754
 
1755
x249: zdep      %r26,26,27,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    ! sh3add        %r1,%r26,%r1
1756
 
1757
x250:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     !       b       e_2t0   !       sh2add  %r1,%r1,%r1
1758
 
1759
x251:   sh2add  %r26,%r26,%r1           !       sh2add  %r1,%r1,%r1     ! b     e_2t0a0 !       sh2add  %r1,%r1,%r1
1760
 
1761
x252: zdep      %r26,25,26,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh2add        %r1,%r29,%r29
1762
 
1763
x253: zdep      %r26,25,26,%r1  ! sub   %r1,%r26,%r1    !       b       e_t0    ! sh2add        %r1,%r26,%r1
1764
 
1765
x254: zdep      %r26,24,25,%r1  ! sub   %r1,%r26,%r1    ! b     e_shift ! sh1add        %r1,%r29,%r29
1766
 
1767
x255: zdep      %r26,23,24,%r1  ! comb,<>       %r25,0,l0        ! sub   %r1,%r26,%r1    ! b,n   ret_t0
1768
 
1769
;1040 insts before this.
1770
ret_t0: bv    0(r31)
1771
 
1772
e_t0:   add     %r29,%r1,%r29
1773
 
1774
e_shift: comb,<>        %r25,0,l2
1775
 
1776
        zdep    %r26,23,24,%r26 ; %r26 <<= 8 ***********
1777
        bv,n  0(r31)
1778
e_t0ma0: comb,<>        %r25,0,l0
1779
 
1780
        sub     %r1,%r26,%r1
1781
        bv    0(r31)
1782
                add     %r29,%r1,%r29
1783
e_t0a0: comb,<> %r25,0,l0
1784
 
1785
        add     %r1,%r26,%r1
1786
        bv    0(r31)
1787
                add     %r29,%r1,%r29
1788
e_t02a0: comb,<>        %r25,0,l0
1789
 
1790
        sh1add  %r26,%r1,%r1
1791
        bv    0(r31)
1792
                add     %r29,%r1,%r29
1793
e_t04a0: comb,<>        %r25,0,l0
1794
 
1795
        sh2add  %r26,%r1,%r1
1796
        bv    0(r31)
1797
                add     %r29,%r1,%r29
1798
e_2t0: comb,<>  %r25,0,l1
1799
 
1800
        sh1add  %r1,%r29,%r29
1801
        bv,n  0(r31)
1802
e_2t0a0: comb,<>        %r25,0,l0
1803
 
1804
        sh1add  %r1,%r26,%r1
1805
        bv    0(r31)
1806
                add     %r29,%r1,%r29
1807
e2t04a0: sh1add %r26,%r1,%r1
1808
 
1809
        comb,<> %r25,0,l1
1810
        sh1add  %r1,%r29,%r29
1811
        bv,n  0(r31)
1812
e_3t0: comb,<>  %r25,0,l0
1813
 
1814
                sh1add  %r1,%r1,%r1
1815
        bv    0(r31)
1816
                add     %r29,%r1,%r29
1817
e_4t0: comb,<>  %r25,0,l1
1818
 
1819
        sh2add  %r1,%r29,%r29
1820
        bv,n  0(r31)
1821
e_4t0a0: comb,<>        %r25,0,l0
1822
 
1823
        sh2add  %r1,%r26,%r1
1824
        bv    0(r31)
1825
                add     %r29,%r1,%r29
1826
e4t08a0: sh1add %r26,%r1,%r1
1827
 
1828
        comb,<> %r25,0,l1
1829
        sh2add  %r1,%r29,%r29
1830
        bv,n  0(r31)
1831
e_5t0: comb,<>  %r25,0,l0
1832
 
1833
                sh2add  %r1,%r1,%r1
1834
        bv    0(r31)
1835
                add     %r29,%r1,%r29
1836
e_8t0: comb,<>  %r25,0,l1
1837
 
1838
        sh3add  %r1,%r29,%r29
1839
        bv,n  0(r31)
1840
e_8t0a0: comb,<>        %r25,0,l0
1841
 
1842
        sh3add  %r1,%r26,%r1
1843
        bv    0(r31)
1844
                add     %r29,%r1,%r29
1845
 
1846
        .exit
1847
        .procend
1848
        .end
1849
 
1850
        .import $$divI_2,millicode
1851
        .import $$divI_3,millicode
1852
        .import $$divI_4,millicode
1853
        .import $$divI_5,millicode
1854
        .import $$divI_6,millicode
1855
        .import $$divI_7,millicode
1856
        .import $$divI_8,millicode
1857
        .import $$divI_9,millicode
1858
        .import $$divI_10,millicode
1859
        .import $$divI_12,millicode
1860
        .import $$divI_14,millicode
1861
        .import $$divI_15,millicode
1862
        .export $$divI,millicode
1863
        .export $$divoI,millicode
1864
$$divoI:
1865
        .proc
1866
        .callinfo millicode
1867
        .entry
1868
        comib,=,n  -1,arg1,negative1    ; when divisor == -1
1869
$$divI:
1870
        comib,>>=,n 15,arg1,small_divisor
1871
        add,>=  0,arg0,retreg            ; move dividend, if retreg < 0,
1872
normal1:
1873
          sub   0,retreg,retreg          ;   make it positive
1874
        sub     0,arg1,temp              ; clear carry, 
1875
                                        ;   negate the divisor
1876
        ds      0,temp,0          ; set V-bit to the comple-
1877
                                        ;   ment of the divisor sign
1878
        add     retreg,retreg,retreg    ; shift msb bit into carry
1879
        ds      r0,arg1,temp            ; 1st divide step, if no carry
1880
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1881
        ds      temp,arg1,temp          ; 2nd divide step
1882
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1883
        ds      temp,arg1,temp          ; 3rd divide step
1884
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1885
        ds      temp,arg1,temp          ; 4th divide step
1886
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1887
        ds      temp,arg1,temp          ; 5th divide step
1888
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1889
        ds      temp,arg1,temp          ; 6th divide step
1890
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1891
        ds      temp,arg1,temp          ; 7th divide step
1892
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1893
        ds      temp,arg1,temp          ; 8th divide step
1894
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1895
        ds      temp,arg1,temp          ; 9th divide step
1896
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1897
        ds      temp,arg1,temp          ; 10th divide step
1898
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1899
        ds      temp,arg1,temp          ; 11th divide step
1900
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1901
        ds      temp,arg1,temp          ; 12th divide step
1902
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1903
        ds      temp,arg1,temp          ; 13th divide step
1904
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1905
        ds      temp,arg1,temp          ; 14th divide step
1906
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1907
        ds      temp,arg1,temp          ; 15th divide step
1908
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1909
        ds      temp,arg1,temp          ; 16th divide step
1910
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1911
        ds      temp,arg1,temp          ; 17th divide step
1912
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1913
        ds      temp,arg1,temp          ; 18th divide step
1914
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1915
        ds      temp,arg1,temp          ; 19th divide step
1916
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1917
        ds      temp,arg1,temp          ; 20th divide step
1918
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1919
        ds      temp,arg1,temp          ; 21st divide step
1920
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1921
        ds      temp,arg1,temp          ; 22nd divide step
1922
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1923
        ds      temp,arg1,temp          ; 23rd divide step
1924
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1925
        ds      temp,arg1,temp          ; 24th divide step
1926
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1927
        ds      temp,arg1,temp          ; 25th divide step
1928
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1929
        ds      temp,arg1,temp          ; 26th divide step
1930
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1931
        ds      temp,arg1,temp          ; 27th divide step
1932
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1933
        ds      temp,arg1,temp          ; 28th divide step
1934
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1935
        ds      temp,arg1,temp          ; 29th divide step
1936
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1937
        ds      temp,arg1,temp          ; 30th divide step
1938
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1939
        ds      temp,arg1,temp          ; 31st divide step
1940
        addc    retreg,retreg,retreg    ; shift retreg with/into carry
1941
        ds      temp,arg1,temp          ; 32nd divide step,
1942
        addc    retreg,retreg,retreg    ; shift last retreg bit into retreg
1943
        xor,>=  arg0,arg1,0              ; get correct sign of quotient
1944
          sub   0,retreg,retreg          ;   based on operand signs
1945
        bv,n  0(r31)
1946
        nop
1947
;______________________________________________________________________
1948
small_divisor:
1949
        blr,n   arg1,r0
1950
        nop
1951
; table for divisor == 0,1, ... ,15
1952
        addit,= 0,arg1,r0        ; trap if divisor == 0
1953
        nop
1954
        bv    0(r31)             ; divisor == 1
1955
        copy    arg0,retreg
1956
         b,n   $$divI_2 ; divisor == 2
1957
        nop
1958
         b,n   $$divI_3 ; divisor == 3
1959
        nop
1960
         b,n   $$divI_4 ; divisor == 4
1961
        nop
1962
         b,n   $$divI_5 ; divisor == 5
1963
        nop
1964
         b,n   $$divI_6 ; divisor == 6
1965
        nop
1966
         b,n   $$divI_7 ; divisor == 7
1967
        nop
1968
         b,n   $$divI_8 ; divisor == 8
1969
        nop
1970
         b,n   $$divI_9 ; divisor == 9
1971
        nop
1972
         b,n   $$divI_10        ; divisor == 10
1973
        nop
1974
        b       normal1         ; divisor == 11
1975
        add,>=  0,arg0,retreg
1976
         b,n   $$divI_12        ; divisor == 12
1977
        nop
1978
        b       normal1         ; divisor == 13
1979
        add,>=  0,arg0,retreg
1980
         b,n   $$divI_14        ; divisor == 14
1981
        nop
1982
         b,n   $$divI_15        ; divisor == 15
1983
        nop
1984
;______________________________________________________________________
1985
negative1:
1986
        sub     0,arg0,retreg    ; result is negation of dividend
1987
        bv    0(r31)
1988
        addo    arg0,arg1,r0    ; trap iff dividend==0x80000000 && divisor==-1
1989
        .exit
1990
        .procend
1991
 
1992
        .subspa $LIT$
1993
___hp_free_copyright:
1994
        .export ___hp_free_copyright,data
1995
        .align 4
1996
        .string "(c) Copyright 1986 HEWLETT-PACKARD COMPANY\x0aTo anyone who acknowledges that this file is provided \"AS IS\"\x0awithout any express or implied warranty:\x0a    permission to use, copy, modify, and distribute this file\x0afor any purpose is hereby granted without fee, provided that\x0athe above copyright notice and this notice appears in all\x0acopies, and that the name of Hewlett-Packard Company not be\x0aused in advertising or publicity pertaining to distribution\x0aof the software without specific, written prior permission.\x0aHewlett-Packard Company makes no representations about the\x0asuitability of this software for any purpose.\x0a\x00"
1997
        .align 4
1998
        .end

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.