URL
https://opencores.org/ocsvn/or1k/or1k/trunk
Subversion Repositories or1k
[/] [or1k/] [trunk/] [rtems/] [c/] [src/] [lib/] [libcpu/] [hppa1.1/] [milli/] [milli.S] - Rev 1765
Compare with Previous | Blame | View Log
;; (c) Copyright 1986 HEWLETT-PACKARD COMPANY;; To anyone who acknowledges that this file is provided "AS IS"; without any express or implied warranty:; permission to use, copy, modify, and distribute this file; for any purpose is hereby granted without fee, provided that; the above copyright notice and this notice appears in all; copies, and that the name of Hewlett-Packard Company not be; used in advertising or publicity pertaining to distribution; of the software without specific, written prior permission.; Hewlett-Packard Company makes no representations about the; suitability of this software for any purpose.;; Standard Hardware Register Definitions for Use with Assembler; version A.08.06; - fr16-31 added at Utah;; $Id: milli.S,v 1.2 2001-09-27 12:01:21 chris Exp $;;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~; Hardware General Registersr0: .equ 0r1: .equ 1r2: .equ 2r3: .equ 3r4: .equ 4r5: .equ 5r6: .equ 6r7: .equ 7r8: .equ 8r9: .equ 9r10: .equ 10r11: .equ 11r12: .equ 12r13: .equ 13r14: .equ 14r15: .equ 15r16: .equ 16r17: .equ 17r18: .equ 18r19: .equ 19r20: .equ 20r21: .equ 21r22: .equ 22r23: .equ 23r24: .equ 24r25: .equ 25r26: .equ 26r27: .equ 27r28: .equ 28r29: .equ 29r30: .equ 30r31: .equ 31; Hardware Space Registerssr0: .equ 0sr1: .equ 1sr2: .equ 2sr3: .equ 3sr4: .equ 4sr5: .equ 5sr6: .equ 6sr7: .equ 7; Hardware Floating Point Registersfr0: .equ 0fr1: .equ 1fr2: .equ 2fr3: .equ 3fr4: .equ 4fr5: .equ 5fr6: .equ 6fr7: .equ 7fr8: .equ 8fr9: .equ 9fr10: .equ 10fr11: .equ 11fr12: .equ 12fr13: .equ 13fr14: .equ 14fr15: .equ 15fr16: .equ 16fr17: .equ 17fr18: .equ 18fr19: .equ 19fr20: .equ 20fr21: .equ 21fr22: .equ 22fr23: .equ 23fr24: .equ 24fr25: .equ 25fr26: .equ 26fr27: .equ 27fr28: .equ 28fr29: .equ 29fr30: .equ 30fr31: .equ 31; Hardware Control Registerscr0: .equ 0rctr: .equ 0 ; Recovery Counter Registercr8: .equ 8 ; Protection ID 1pidr1: .equ 8cr9: .equ 9 ; Protection ID 2pidr2: .equ 9cr10: .equ 10ccr: .equ 10 ; Coprocessor Confiquration Registercr11: .equ 11sar: .equ 11 ; Shift Amount Registercr12: .equ 12pidr3: .equ 12 ; Protection ID 3cr13: .equ 13pidr4: .equ 13 ; Protection ID 4cr14: .equ 14iva: .equ 14 ; Interrupt Vector Addresscr15: .equ 15eiem: .equ 15 ; External Interrupt Enable Maskcr16: .equ 16itmr: .equ 16 ; Interval Timercr17: .equ 17pcsq: .equ 17 ; Program Counter Space queuecr18: .equ 18pcoq: .equ 18 ; Program Counter Offset queuecr19: .equ 19iir: .equ 19 ; Interruption Instruction Registercr20: .equ 20isr: .equ 20 ; Interruption Space Registercr21: .equ 21ior: .equ 21 ; Interruption Offset Registercr22: .equ 22ipsw: .equ 22 ; Interrpution Processor Status Wordcr23: .equ 23eirr: .equ 23 ; External Interrupt Requestcr24: .equ 24ppda: .equ 24 ; Physcial Page Directory Addresstr0: .equ 24 ; Temporary register 0cr25: .equ 25hta: .equ 25 ; Hash Table Addresstr1: .equ 25 ; Temporary register 1cr26: .equ 26tr2: .equ 26 ; Temporary register 2cr27: .equ 27tr3: .equ 27 ; Temporary register 3cr28: .equ 28tr4: .equ 28 ; Temporary register 4cr29: .equ 29tr5: .equ 29 ; Temporary register 5cr30: .equ 30tr6: .equ 30 ; Temporary register 6cr31: .equ 31tr7: .equ 31 ; Temporary register 7;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~; Procedure Call Convention ~; Register Definitions for Use with Assembler ~; version A.08.06;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~; Software Architecture General Registersrp: .equ r2 ; return pointermrp: .equ r31 ; millicode return pointerret0: .equ r28 ; return valueret1: .equ r29 ; return value (high part of double)sl: .equ r29 ; static linksp: .equ r30 ; stack pointerdp: .equ r27 ; data pointerarg0: .equ r26 ; argumentarg1: .equ r25 ; argument or high part of double argumentarg2: .equ r24 ; argumentarg3: .equ r23 ; argument or high part of double argument;_____________________________________________________________________________; Software Architecture Space Registers; sr0 ; return link form BLEsret: .equ sr1 ; return valuesarg: .equ sr1 ; argument; sr4 ; PC SPACE tracker; sr5 ; process private data;_____________________________________________________________________________; Software Architecture Pseudo Registersprevious_sp: .equ 64 ; old stack pointer (locates previous frame);~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~; Standard space and subspace definitions. version A.08.06; These are generally suitable for programs on HP_UX and HPE.; Statements commented out are used when building such things as operating; system kernels.;;;;;;;;;;;;;;;;.SPACE $TEXT$, SPNUM=0,SORT=8.subspa $MILLICODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8.subspa $LIT$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16.subspa $CODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24; Additional code subspaces should have ALIGN=8 for an interspace BV; and should have SORT=24.;; For an incomplete executable (program bound to shared libraries),; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$; and $PLT$ subspaces respectively.;;;;;;;;;;;;;;;.SPACE $PRIVATE$, SPNUM=1,PRIVATE,SORT=16.subspa $GLOBAL$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40.import $global$.subspa $DATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16.subspa $BSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO.SPACE $TEXT$.SUBSPA $MILLICODE$.align 8.EXPORT $$remI,millicode; .IMPORT cerror$$remI:.PROC.CALLINFO millicode.ENTRYaddit,= 0,arg1,r0add,>= r0,arg0,ret1sub r0,ret1,ret1sub r0,arg1,r1ds r0,r1,r0or r0,r0,r1add ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1ds r1,arg1,r1addc ret1,ret1,ret1movb,>=,n r1,ret1,remI300add,< arg1,r0,r0add,tr r1,arg1,ret1sub r1,arg1,ret1remI300: add,>= arg0,r0,r0sub r0,ret1,ret1bv r0(r31)nop.EXIT.PROCENDbit1: .equ 1bit30: .equ 30bit31: .equ 31len2: .equ 2len4: .equ 4$$dyncall:.proc.callinfo NO_CALLS.entry.export $$dyncall,MILLICODEbb,>=,n 22,bit30,noshlibsdepi 0,bit31,len2,22ldw 4(22),19ldw 0(22),22noshlibs:ldsid (22),r1mtsp r1,sr0be 0(sr0,r22)stw rp,-24(sp).exit.procendtemp: .EQU r1retreg: .EQU ret1 ; r29.export $$divU,millicode.import $$divU_3,millicode.import $$divU_5,millicode.import $$divU_6,millicode.import $$divU_7,millicode.import $$divU_9,millicode.import $$divU_10,millicode.import $$divU_12,millicode.import $$divU_14,millicode.import $$divU_15,millicode$$divU:.proc.callinfo millicode.entry; The subtract is not nullified since it does no harm and can be used; by the two cases that branch back to "normal".comib,>= 15,arg1,special_divisorsub r0,arg1,temp ; clear carry, negate the divisords r0,temp,r0 ; set V-bit to 1normal:add arg0,arg0,retreg ; shift msb bit into carryds r0,arg1,temp ; 1st divide step, if no carryaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 2nd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 3rd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 4th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 5th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 6th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 7th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 8th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 9th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 10th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 11th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 12th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 13th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 14th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 15th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 16th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 17th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 18th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 19th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 20th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 21st divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 22nd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 23rd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 24th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 25th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 26th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 27th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 28th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 29th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 30th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 31st divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 32nd divide step,bv 0(r31)addc retreg,retreg,retreg ; shift last retreg bit into retreg;_____________________________________________________________________________; handle the cases where divisor is a small constant or has high bit onspecial_divisor:comib,> 0,arg1,big_divisornopblr arg1,r0nopzero_divisor: ; this label is here to provide external visibilityaddit,= 0,arg1,0 ; trap for zero dvrnopbv 0(r31) ; divisor == 1copy arg0,retregbv 0(r31) ; divisor == 2extru arg0,30,31,retregb,n $$divU_3 ; divisor == 3nopbv 0(r31) ; divisor == 4extru arg0,29,30,retregb,n $$divU_5 ; divisor == 5nopb,n $$divU_6 ; divisor == 6nopb,n $$divU_7 ; divisor == 7nopbv 0(r31) ; divisor == 8extru arg0,28,29,retregb,n $$divU_9 ; divisor == 9nopb,n $$divU_10 ; divisor == 10nopb normal ; divisor == 11ds r0,temp,r0 ; set V-bit to 1b,n $$divU_12 ; divisor == 12nopb normal ; divisor == 13ds r0,temp,r0 ; set V-bit to 1b,n $$divU_14 ; divisor == 14nopb,n $$divU_15 ; divisor == 15nop;_____________________________________________________________________________; Handle the case where the high bit is on in the divisor.; Compute: if( dividend>=divisor) quotient=1; else quotient=0;; Note: dividend>==divisor iff dividend-divisor does not borrow; and not borrow iff carrybig_divisor:sub arg0,arg1,r0bv 0(r31)addc r0,r0,retreg.exit.procend.endt2: .EQU r1; x2 .EQU arg0 ; r26t1: .EQU arg1 ; r25; x1 .EQU ret1 ; r29;_____________________________________________________________________________$$divide_by_constant:.PROC.CALLINFO millicode.entry.export $$divide_by_constant,millicode; Provides a "nice" label for the code covered by the unwind descriptor; for things like gprof.$$divI_2:.EXPORT $$divI_2,MILLICODECOMCLR,>= arg0,0,0ADDI 1,arg0,arg0bv 0(r31)EXTRS arg0,30,31,ret1$$divI_4:.EXPORT $$divI_4,MILLICODECOMCLR,>= arg0,0,0ADDI 3,arg0,arg0bv 0(r31)EXTRS arg0,29,30,ret1$$divI_8:.EXPORT $$divI_8,MILLICODECOMCLR,>= arg0,0,0ADDI 7,arg0,arg0bv 0(r31)EXTRS arg0,28,29,ret1$$divI_16:.EXPORT $$divI_16,MILLICODECOMCLR,>= arg0,0,0ADDI 15,arg0,arg0bv 0(r31)EXTRS arg0,27,28,ret1$$divI_3:.EXPORT $$divI_3,MILLICODECOMB,<,N arg0,0,$neg3ADDI 1,arg0,arg0EXTRU arg0,1,2,ret1SH2ADD arg0,arg0,arg0B $posADDC ret1,0,ret1$neg3:SUBI 1,arg0,arg0EXTRU arg0,1,2,ret1SH2ADD arg0,arg0,arg0B $negADDC ret1,0,ret1$$divU_3:.EXPORT $$divU_3,MILLICODEADDI 1,arg0,arg0ADDC 0,0,ret1SHD ret1,arg0,30,t1SH2ADD arg0,arg0,arg0B $posADDC ret1,t1,ret1$$divI_5:.EXPORT $$divI_5,MILLICODECOMB,<,N arg0,0,$neg5ADDI 3,arg0,t1SH1ADD arg0,t1,arg0B $posADDC 0,0,ret1$neg5:SUB 0,arg0,arg0ADDI 1,arg0,arg0SHD 0,arg0,31,ret1SH1ADD arg0,arg0,arg0B $negADDC ret1,0,ret1$$divU_5:.EXPORT $$divU_5,MILLICODEADDI 1,arg0,arg0ADDC 0,0,ret1SHD ret1,arg0,31,t1SH1ADD arg0,arg0,arg0B $posADDC t1,ret1,ret1$$divI_6:.EXPORT $$divI_6,MILLICODECOMB,<,N arg0,0,$neg6EXTRU arg0,30,31,arg0ADDI 5,arg0,t1SH2ADD arg0,t1,arg0B $posADDC 0,0,ret1$neg6:SUBI 2,arg0,arg0EXTRU arg0,30,31,arg0SHD 0,arg0,30,ret1SH2ADD arg0,arg0,arg0B $negADDC ret1,0,ret1$$divU_6:.EXPORT $$divU_6,MILLICODEEXTRU arg0,30,31,arg0ADDI 1,arg0,arg0SHD 0,arg0,30,ret1SH2ADD arg0,arg0,arg0B $posADDC ret1,0,ret1$$divU_10:.EXPORT $$divU_10,MILLICODEEXTRU arg0,30,31,arg0ADDI 3,arg0,t1SH1ADD arg0,t1,arg0ADDC 0,0,ret1$pos:SHD ret1,arg0,28,t1SHD arg0,0,28,t2ADD arg0,t2,arg0ADDC ret1,t1,ret1$pos_for_17:SHD ret1,arg0,24,t1SHD arg0,0,24,t2ADD arg0,t2,arg0ADDC ret1,t1,ret1SHD ret1,arg0,16,t1SHD arg0,0,16,t2ADD arg0,t2,arg0bv 0(r31)ADDC ret1,t1,ret1$$divI_10:.EXPORT $$divI_10,MILLICODECOMB,< arg0,0,$neg10COPY 0,ret1EXTRU arg0,30,31,arg0ADDIB,TR 1,arg0,$posSH1ADD arg0,arg0,arg0$neg10:SUBI 2,arg0,arg0EXTRU arg0,30,31,arg0SH1ADD arg0,arg0,arg0$neg:SHD ret1,arg0,28,t1SHD arg0,0,28,t2ADD arg0,t2,arg0ADDC ret1,t1,ret1$neg_for_17:SHD ret1,arg0,24,t1SHD arg0,0,24,t2ADD arg0,t2,arg0ADDC ret1,t1,ret1SHD ret1,arg0,16,t1SHD arg0,0,16,t2ADD arg0,t2,arg0ADDC ret1,t1,ret1bv 0(r31)SUB 0,ret1,ret1$$divI_12:.EXPORT $$divI_12,MILLICODECOMB,< arg0,0,$neg12COPY 0,ret1EXTRU arg0,29,30,arg0ADDIB,TR 1,arg0,$posSH2ADD arg0,arg0,arg0$neg12:SUBI 4,arg0,arg0EXTRU arg0,29,30,arg0B $negSH2ADD arg0,arg0,arg0$$divU_12:.EXPORT $$divU_12,MILLICODEEXTRU arg0,29,30,arg0ADDI 5,arg0,t1SH2ADD arg0,t1,arg0B $posADDC 0,0,ret1$$divI_15:.EXPORT $$divI_15,MILLICODECOMB,< arg0,0,$neg15COPY 0,ret1ADDIB,TR 1,arg0,$pos+4SHD ret1,arg0,28,t1$neg15:B $negSUBI 1,arg0,arg0$$divU_15:.EXPORT $$divU_15,MILLICODEADDI 1,arg0,arg0B $posADDC 0,0,ret1$$divI_17:.EXPORT $$divI_17,MILLICODECOMB,<,N arg0,0,$neg17ADDI 1,arg0,arg0SHD 0,arg0,28,t1SHD arg0,0,28,t2SUB t2,arg0,arg0B $pos_for_17SUBB t1,0,ret1$neg17:SUBI 1,arg0,arg0SHD 0,arg0,28,t1SHD arg0,0,28,t2SUB t2,arg0,arg0B $neg_for_17SUBB t1,0,ret1$$divU_17:.EXPORT $$divU_17,MILLICODEADDI 1,arg0,arg0ADDC 0,0,ret1SHD ret1,arg0,28,t1$u17:SHD arg0,0,28,t2SUB t2,arg0,arg0B $pos_for_17SUBB t1,ret1,ret1$$divI_7:.EXPORT $$divI_7,MILLICODECOMB,<,N arg0,0,$neg7$7:ADDI 1,arg0,arg0SHD 0,arg0,29,ret1SH3ADD arg0,arg0,arg0ADDC ret1,0,ret1$pos7:SHD ret1,arg0,26,t1SHD arg0,0,26,t2ADD arg0,t2,arg0ADDC ret1,t1,ret1SHD ret1,arg0,20,t1SHD arg0,0,20,t2ADD arg0,t2,arg0ADDC ret1,t1,t1COPY 0,ret1SHD,= t1,arg0,24,t1$1:ADDB,TR t1,ret1,$2EXTRU arg0,31,24,arg0bv,n 0(r31)$2:ADDB,TR t1,arg0,$1EXTRU,= arg0,7,8,t1$neg7:SUBI 1,arg0,arg0$8:SHD 0,arg0,29,ret1SH3ADD arg0,arg0,arg0ADDC ret1,0,ret1$neg7_shift:SHD ret1,arg0,26,t1SHD arg0,0,26,t2ADD arg0,t2,arg0ADDC ret1,t1,ret1SHD ret1,arg0,20,t1SHD arg0,0,20,t2ADD arg0,t2,arg0ADDC ret1,t1,t1COPY 0,ret1SHD,= t1,arg0,24,t1$3:ADDB,TR t1,ret1,$4EXTRU arg0,31,24,arg0bv 0(r31)SUB 0,ret1,ret1$4:ADDB,TR t1,arg0,$3EXTRU,= arg0,7,8,t1$$divU_7:.EXPORT $$divU_7,MILLICODEADDI 1,arg0,arg0ADDC 0,0,ret1SHD ret1,arg0,29,t1SH3ADD arg0,arg0,arg0B $pos7ADDC t1,ret1,ret1$$divI_9:.EXPORT $$divI_9,MILLICODECOMB,<,N arg0,0,$neg9ADDI 1,arg0,arg0SHD 0,arg0,29,t1SHD arg0,0,29,t2SUB t2,arg0,arg0B $pos7SUBB t1,0,ret1$neg9:SUBI 1,arg0,arg0SHD 0,arg0,29,t1SHD arg0,0,29,t2SUB t2,arg0,arg0B $neg7_shiftSUBB t1,0,ret1$$divU_9:.EXPORT $$divU_9,MILLICODEADDI 1,arg0,arg0ADDC 0,0,ret1SHD ret1,arg0,29,t1SHD arg0,0,29,t2SUB t2,arg0,arg0B $pos7SUBB t1,ret1,ret1$$divI_14:.EXPORT $$divI_14,MILLICODECOMB,<,N arg0,0,$neg14$$divU_14:.EXPORT $$divU_14,MILLICODEB $7EXTRU arg0,30,31,arg0$neg14:SUBI 2,arg0,arg0B $8EXTRU arg0,30,31,arg0.exit.PROCEND.ENDrmndr: .EQU ret1 ; r29.export $$remU,millicode$$remU:.proc.callinfo millicode.entrycomib,>=,n 0,arg1,special_casesub r0,arg1,rmndr ; clear carry, negate the divisords r0,rmndr,r0 ; set V-bit to 1add arg0,arg0,temp ; shift msb bit into carryds r0,arg1,rmndr ; 1st divide step, if no carryaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 2nd divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 3rd divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 4th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 5th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 6th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 7th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 8th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 9th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 10th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 11th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 12th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 13th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 14th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 15th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 16th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 17th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 18th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 19th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 20th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 21st divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 22nd divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 23rd divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 24th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 25th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 26th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 27th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 28th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 29th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 30th divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 31st divide stepaddc temp,temp,temp ; shift temp with/into carryds rmndr,arg1,rmndr ; 32nd divide step,comiclr,<= 0,rmndr,r0add rmndr,arg1,rmndr ; correction; .exitbv,n 0(r31)nop; Putting >= on the last DS and deleting COMICLR does not work!;_____________________________________________________________________________special_case:addit,= 0,arg1,r0 ; trap on div by zerosub,>>= arg0,arg1,rmndrcopy arg0,rmndrbv,n 0(r31)nop.exit.procend.end; Use bv 0(r31) and bv,n 0(r31) instead.; #define return bv 0(%mrp); #define return_n bv,n 0(%mrp).subspa $MILLICODE$.align 16$$mulI:.proc.callinfo millicode.entry.export $$mulI, millicodecombt,<<= %r25,%r26,l4 ; swap args if unsigned %r25>%r26copy 0,%r29 ; zero out the resultxor %r26,%r25,%r26 ; swap %r26 & %r25 using thexor %r26,%r25,%r25 ; old xor trickxor %r26,%r25,%r26l4: combt,<= 0,%r26,l3 ; if %r26>=0 then proceed like unsignedzdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********sub,> 0,%r25,%r1 ; otherwise negate both andcombt,<=,n %r26,%r1,l2 ; swap back if |%r26|<|%r25|sub 0,%r26,%r25movb,tr,n %r1,%r26,l2 ; 10th inst.l0: add %r29,%r1,%r29 ; add in this partial productl1: zdep %r26,23,24,%r26 ; %r26 <<= 8 ******************l2: zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********l3: blr %r1,0 ; case on these 8 bits ******extru %r25,23,24,%r25 ; %r25 >>= 8 ******************;16 insts before this.; %r26 <<= 8 **************************x0: comb,<> %r25,0,l2 ! zdep %r26,23,24,%r26 ! bv,n 0(r31) ! nopx1: comb,<> %r25,0,l1 ! add %r29,%r26,%r29 ! bv,n 0(r31) ! nopx2: comb,<> %r25,0,l1 ! sh1add %r26,%r29,%r29 ! bv,n 0(r31) ! nopx3: comb,<> %r25,0,l0 ! sh1add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29x4: comb,<> %r25,0,l1 ! sh2add %r26,%r29,%r29 ! bv,n 0(r31) ! nopx5: comb,<> %r25,0,l0 ! sh2add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29x6: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)x7: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r26,%r29,%r29 ! b,n ret_t0x8: comb,<> %r25,0,l1 ! sh3add %r26,%r29,%r29 ! bv,n 0(r31) ! nopx9: comb,<> %r25,0,l0 ! sh3add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29x10: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)x11: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0x12: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)x13: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0x14: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x15: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r1,%r1 ! b,n ret_t0x16: zdep %r26,27,28,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)x17: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r1,%r1 ! b,n ret_t0x18: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)x19: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r26,%r1 ! b,n ret_t0x20: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)x21: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0x22: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x23: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x24: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)x25: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0x26: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x27: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r1,%r1 ! b,n ret_t0x28: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x29: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x30: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x31: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0x32: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)x33: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0x34: zdep %r26,27,28,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x35: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r26,%r1,%r1x36: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)x37: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0x38: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x39: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x40: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)x41: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0x42: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x43: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x44: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x45: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0x46: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! add %r1,%r26,%r1x47: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1x48: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! zdep %r1,27,28,%r1 ! b,n ret_t0x49: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r26,%r1,%r1x50: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x51: sh3add %r26,%r26,%r1 ! sh3add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x52: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x53: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x54: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x55: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x56: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x57: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x58: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x59: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1x60: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x61: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x62: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x63: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0x64: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)x65: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0x66: zdep %r26,26,27,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x67: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x68: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x69: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x70: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1x71: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sub %r1,%r26,%r1x72: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)x73: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! add %r29,%r1,%r29x74: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x75: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x76: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x77: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x78: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1x79: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1x80: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29x81: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29x82: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x83: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x84: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x85: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x86: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1x87: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r26,%r1,%r1x88: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x89: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x90: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x91: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x92: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1x93: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x94: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r26,%r1,%r1x95: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x96: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x97: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x98: zdep %r26,26,27,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1x99: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x100: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x101: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x102: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x103: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r26,%r1x104: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x105: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x106: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x107: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh3add %r1,%r26,%r1x108: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x109: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x110: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1x111: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x112: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! zdep %r1,27,28,%r1x113: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1x114: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1x115: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1x116: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1x117: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1x118: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0a0 ! sh3add %r1,%r1,%r1x119: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1x120: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x121: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x122: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x123: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x124: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x125: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x126: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x127: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0x128: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)x129: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! add %r1,%r26,%r1 ! b,n ret_t0x130: zdep %r26,25,26,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x131: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x132: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x133: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x134: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1x135: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x136: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x137: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x138: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x139: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1x140: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r1,%r1x141: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1x142: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_2t0 ! sub %r1,%r26,%r1x143: zdep %r26,27,28,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1x144: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x145: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x146: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x147: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x148: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x149: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x150: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1x151: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1x152: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x153: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x154: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x155: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x156: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1x157: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1x158: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sub %r1,%r26,%r1x159: zdep %r26,26,27,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1x160: sh2add %r26,%r26,%r1 ! sh2add %r1,0,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x161: sh3add %r26,0,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x162: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x163: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1x164: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x165: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x166: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1x167: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1x168: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x169: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x170: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x171: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1x172: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1x173: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1x174: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t04a0 ! sh2add %r1,%r1,%r1x175: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1x176: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0 ! add %r1,%r26,%r1x177: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1x178: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1x179: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r26,%r1x180: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x181: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x182: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1x183: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1x184: sh2add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! add %r1,%r26,%r1x185: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x186: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1x187: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1x188: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r26,%r1,%r1x189: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1x190: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1x191: zdep %r26,25,26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1x192: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x193: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x194: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x195: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x196: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1x197: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1x198: zdep %r26,25,26,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x199: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1x200: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x201: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x202: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x203: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1x204: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1x205: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x206: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1x207: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1x208: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1x209: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1x210: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1x211: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1x212: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1x213: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0a0 ! sh2add %r1,%r26,%r1x214: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e2t04a0 ! sh3add %r1,%r26,%r1x215: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1x216: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x217: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x218: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1x219: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x220: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1x221: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1x222: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1x223: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1x224: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1x225: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1x226: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! zdep %r1,26,27,%r1x227: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1x228: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1x229: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r1,%r1x230: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_5t0 ! add %r1,%r26,%r1x231: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1x232: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0 ! sh2add %r1,%r26,%r1x233: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0a0 ! sh2add %r1,%r26,%r1x234: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r1,%r1x235: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r1,%r1x236: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e4t08a0 ! sh1add %r1,%r1,%r1x237: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_3t0 ! sub %r1,%r26,%r1x238: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e2t04a0 ! sh3add %r1,%r1,%r1x239: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0ma0 ! sh1add %r1,%r1,%r1x240: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0 ! sh1add %r1,%r1,%r1x241: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0a0 ! sh1add %r1,%r1,%r1x242: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1x243: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1x244: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1x245: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1x246: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1x247: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1x248: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29x249: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1x250: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1x251: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1x252: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29x253: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1x254: zdep %r26,24,25,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29x255: zdep %r26,23,24,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0;1040 insts before this.ret_t0: bv 0(r31)e_t0: add %r29,%r1,%r29e_shift: comb,<> %r25,0,l2zdep %r26,23,24,%r26 ; %r26 <<= 8 ***********bv,n 0(r31)e_t0ma0: comb,<> %r25,0,l0sub %r1,%r26,%r1bv 0(r31)add %r29,%r1,%r29e_t0a0: comb,<> %r25,0,l0add %r1,%r26,%r1bv 0(r31)add %r29,%r1,%r29e_t02a0: comb,<> %r25,0,l0sh1add %r26,%r1,%r1bv 0(r31)add %r29,%r1,%r29e_t04a0: comb,<> %r25,0,l0sh2add %r26,%r1,%r1bv 0(r31)add %r29,%r1,%r29e_2t0: comb,<> %r25,0,l1sh1add %r1,%r29,%r29bv,n 0(r31)e_2t0a0: comb,<> %r25,0,l0sh1add %r1,%r26,%r1bv 0(r31)add %r29,%r1,%r29e2t04a0: sh1add %r26,%r1,%r1comb,<> %r25,0,l1sh1add %r1,%r29,%r29bv,n 0(r31)e_3t0: comb,<> %r25,0,l0sh1add %r1,%r1,%r1bv 0(r31)add %r29,%r1,%r29e_4t0: comb,<> %r25,0,l1sh2add %r1,%r29,%r29bv,n 0(r31)e_4t0a0: comb,<> %r25,0,l0sh2add %r1,%r26,%r1bv 0(r31)add %r29,%r1,%r29e4t08a0: sh1add %r26,%r1,%r1comb,<> %r25,0,l1sh2add %r1,%r29,%r29bv,n 0(r31)e_5t0: comb,<> %r25,0,l0sh2add %r1,%r1,%r1bv 0(r31)add %r29,%r1,%r29e_8t0: comb,<> %r25,0,l1sh3add %r1,%r29,%r29bv,n 0(r31)e_8t0a0: comb,<> %r25,0,l0sh3add %r1,%r26,%r1bv 0(r31)add %r29,%r1,%r29.exit.procend.end.import $$divI_2,millicode.import $$divI_3,millicode.import $$divI_4,millicode.import $$divI_5,millicode.import $$divI_6,millicode.import $$divI_7,millicode.import $$divI_8,millicode.import $$divI_9,millicode.import $$divI_10,millicode.import $$divI_12,millicode.import $$divI_14,millicode.import $$divI_15,millicode.export $$divI,millicode.export $$divoI,millicode$$divoI:.proc.callinfo millicode.entrycomib,=,n -1,arg1,negative1 ; when divisor == -1$$divI:comib,>>=,n 15,arg1,small_divisoradd,>= 0,arg0,retreg ; move dividend, if retreg < 0,normal1:sub 0,retreg,retreg ; make it positivesub 0,arg1,temp ; clear carry,; negate the divisords 0,temp,0 ; set V-bit to the comple-; ment of the divisor signadd retreg,retreg,retreg ; shift msb bit into carryds r0,arg1,temp ; 1st divide step, if no carryaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 2nd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 3rd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 4th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 5th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 6th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 7th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 8th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 9th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 10th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 11th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 12th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 13th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 14th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 15th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 16th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 17th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 18th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 19th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 20th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 21st divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 22nd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 23rd divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 24th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 25th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 26th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 27th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 28th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 29th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 30th divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 31st divide stepaddc retreg,retreg,retreg ; shift retreg with/into carryds temp,arg1,temp ; 32nd divide step,addc retreg,retreg,retreg ; shift last retreg bit into retregxor,>= arg0,arg1,0 ; get correct sign of quotientsub 0,retreg,retreg ; based on operand signsbv,n 0(r31)nop;______________________________________________________________________small_divisor:blr,n arg1,r0nop; table for divisor == 0,1, ... ,15addit,= 0,arg1,r0 ; trap if divisor == 0nopbv 0(r31) ; divisor == 1copy arg0,retregb,n $$divI_2 ; divisor == 2nopb,n $$divI_3 ; divisor == 3nopb,n $$divI_4 ; divisor == 4nopb,n $$divI_5 ; divisor == 5nopb,n $$divI_6 ; divisor == 6nopb,n $$divI_7 ; divisor == 7nopb,n $$divI_8 ; divisor == 8nopb,n $$divI_9 ; divisor == 9nopb,n $$divI_10 ; divisor == 10nopb normal1 ; divisor == 11add,>= 0,arg0,retregb,n $$divI_12 ; divisor == 12nopb normal1 ; divisor == 13add,>= 0,arg0,retregb,n $$divI_14 ; divisor == 14nopb,n $$divI_15 ; divisor == 15nop;______________________________________________________________________negative1:sub 0,arg0,retreg ; result is negation of dividendbv 0(r31)addo arg0,arg1,r0 ; trap iff dividend==0x80000000 && divisor==-1.exit.procend.subspa $LIT$___hp_free_copyright:.export ___hp_free_copyright,data.align 4.string "(c) Copyright 1986 HEWLETT-PACKARD COMPANY\x0aTo anyone who acknowledges that this file is provided \"AS IS\"\x0awithout any express or implied warranty:\x0a permission to use, copy, modify, and distribute this file\x0afor any purpose is hereby granted without fee, provided that\x0athe above copyright notice and this notice appears in all\x0acopies, and that the name of Hewlett-Packard Company not be\x0aused in advertising or publicity pertaining to distribution\x0aof the software without specific, written prior permission.\x0aHewlett-Packard Company makes no representations about the\x0asuitability of this software for any purpose.\x0a\x00".align 4.end
