URL
https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk
Subversion Repositories openrisc_me
[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [config/] [xtensa/] [ieee754-df.S] - Rev 282
Compare with Previous | Blame | View Log
/* IEEE-754 double-precision functions for XtensaCopyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.This file is part of GCC.GCC is free software; you can redistribute it and/or modify itunder the terms of the GNU General Public License as published bythe Free Software Foundation; either version 3, or (at your option)any later version.GCC is distributed in the hope that it will be useful, but WITHOUTANY WARRANTY; without even the implied warranty of MERCHANTABILITYor FITNESS FOR A PARTICULAR PURPOSE. See the GNU General PublicLicense for more details.Under Section 7 of GPL version 3, you are granted additionalpermissions described in the GCC Runtime Library Exception, version3.1, as published by the Free Software Foundation.You should have received a copy of the GNU General Public License anda copy of the GCC Runtime Library Exception along with this program;see the files COPYING3 and COPYING.RUNTIME respectively. If not, see<http://www.gnu.org/licenses/>. */#ifdef __XTENSA_EB__#define xh a2#define xl a3#define yh a4#define yl a5#else#define xh a3#define xl a2#define yh a5#define yl a4#endif/* Warning! The branch displacements for some Xtensa branch instructionsare quite small, and this code has been carefully laid out to keepbranch targets in range. If you change anything, be sure to check thatthe assembler is not relaxing anything to branch over a jump. */#ifdef L_negdf2.align 4.global __negdf2.type __negdf2, @function__negdf2:leaf_entry sp, 16movi a4, 0x80000000xor xh, xh, a4leaf_return#endif /* L_negdf2 */#ifdef L_addsubdf3/* Addition */__adddf3_aux:/* Handle NaNs and Infinities. (This code is placed before thestart of the function just to keep it in range of the limitedbranch displacements.) */.Ladd_xnan_or_inf:/* If y is neither Infinity nor NaN, return x. */bnall yh, a6, 1f/* If x is a NaN, return it. Otherwise, return y. */slli a7, xh, 12or a7, a7, xlbeqz a7, .Ladd_ynan_or_inf1: leaf_return.Ladd_ynan_or_inf:/* Return y. */mov xh, yhmov xl, ylleaf_return.Ladd_opposite_signs:/* Operand signs differ. Do a subtraction. */slli a7, a6, 11xor yh, yh, a7j .Lsub_same_sign.align 4.global __adddf3.type __adddf3, @function__adddf3:leaf_entry sp, 16movi a6, 0x7ff00000/* Check if the two operands have the same sign. */xor a7, xh, yhbltz a7, .Ladd_opposite_signs.Ladd_same_sign:/* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */ball xh, a6, .Ladd_xnan_or_infball yh, a6, .Ladd_ynan_or_inf/* Compare the exponents. The smaller operand will be shiftedright by the exponent difference and added to the largerone. */extui a7, xh, 20, 12extui a8, yh, 20, 12bltu a7, a8, .Ladd_shiftx.Ladd_shifty:/* Check if the smaller (or equal) exponent is zero. */bnone yh, a6, .Ladd_yexpzero/* Replace yh sign/exponent with 0x001. */or yh, yh, a6slli yh, yh, 11srli yh, yh, 11.Ladd_yexpdiff:/* Compute the exponent difference. Optimize for difference < 32. */sub a10, a7, a8bgeui a10, 32, .Ladd_bigshifty/* Shift yh/yl right by the exponent difference. Any bits that areshifted out of yl are saved in a9 for rounding the result. */ssr a10movi a9, 0src a9, yl, a9src yl, yh, ylsrl yh, yh.Ladd_addy:/* Do the 64-bit addition. */add xl, xl, yladd xh, xh, yhbgeu xl, yl, 1faddi xh, xh, 11:/* Check if the add overflowed into the exponent. */extui a10, xh, 20, 12beq a10, a7, .Ladd_roundmov a8, a7j .Ladd_carry.Ladd_yexpzero:/* y is a subnormal value. Replace its sign/exponent with zero,i.e., no implicit "1.0", and increment the apparent exponentbecause subnormals behave as if they had the minimum (nonzero)exponent. Test for the case when both exponents are zero. */slli yh, yh, 12srli yh, yh, 12bnone xh, a6, .Ladd_bothexpzeroaddi a8, a8, 1j .Ladd_yexpdiff.Ladd_bothexpzero:/* Both exponents are zero. Handle this as a special case. Thereis no need to shift or round, and the normal code for handlinga carry into the exponent field will not work because itassumes there is an implicit "1.0" that needs to be added. */add xl, xl, yladd xh, xh, yhbgeu xl, yl, 1faddi xh, xh, 11: leaf_return.Ladd_bigshifty:/* Exponent difference > 64 -- just return the bigger value. */bgeui a10, 64, 1b/* Shift yh/yl right by the exponent difference. Any bits that areshifted out are saved in a9 for rounding the result. */ssr a10sll a11, yl /* lost bits shifted out of yl */src a9, yh, ylsrl yl, yhmovi yh, 0beqz a11, .Ladd_addyor a9, a9, a10 /* any positive, nonzero value will work */j .Ladd_addy.Ladd_xexpzero:/* Same as "yexpzero" except skip handling the case when bothexponents are zero. */slli xh, xh, 12srli xh, xh, 12addi a7, a7, 1j .Ladd_xexpdiff.Ladd_shiftx:/* Same thing as the "shifty" code, but with x and y swapped. Also,because the exponent difference is always nonzero in this version,the shift sequence can use SLL and skip loading a constant zero. */bnone xh, a6, .Ladd_xexpzeroor xh, xh, a6slli xh, xh, 11srli xh, xh, 11.Ladd_xexpdiff:sub a10, a8, a7bgeui a10, 32, .Ladd_bigshiftxssr a10sll a9, xlsrc xl, xh, xlsrl xh, xh.Ladd_addx:add xl, xl, yladd xh, xh, yhbgeu xl, yl, 1faddi xh, xh, 11:/* Check if the add overflowed into the exponent. */extui a10, xh, 20, 12bne a10, a8, .Ladd_carry.Ladd_round:/* Round up if the leftover fraction is >= 1/2. */bgez a9, 1faddi xl, xl, 1beqz xl, .Ladd_roundcarry/* Check if the leftover fraction is exactly 1/2. */slli a9, a9, 1beqz a9, .Ladd_exactlyhalf1: leaf_return.Ladd_bigshiftx:/* Mostly the same thing as "bigshifty".... */bgeui a10, 64, .Ladd_returnyssr a10sll a11, xlsrc a9, xh, xlsrl xl, xhmovi xh, 0beqz a11, .Ladd_addxor a9, a9, a10j .Ladd_addx.Ladd_returny:mov xh, yhmov xl, ylleaf_return.Ladd_carry:/* The addition has overflowed into the exponent field, so thevalue needs to be renormalized. The mantissa of the resultcan be recovered by subtracting the original exponent andadding 0x100000 (which is the explicit "1.0" for themantissa of the non-shifted operand -- the "1.0" for theshifted operand was already added). The mantissa can thenbe shifted right by one bit. The explicit "1.0" of theshifted mantissa then needs to be replaced by the exponent,incremented by one to account for the normalizing shift.It is faster to combine these operations: do the shift firstand combine the additions and subtractions. If x is theoriginal exponent, the result is:shifted mantissa - (x << 19) + (1 << 19) + (x << 20)or:shifted mantissa + ((x + 1) << 19)Note that the exponent is incremented here by leaving theexplicit "1.0" of the mantissa in the exponent field. *//* Shift xh/xl right by one bit. Save the lsb of xl. */mov a10, xlssai 1src xl, xh, xlsrl xh, xh/* See explanation above. The original exponent is in a8. */addi a8, a8, 1slli a8, a8, 19add xh, xh, a8/* Return an Infinity if the exponent overflowed. */ball xh, a6, .Ladd_infinity/* Same thing as the "round" code except the msb of the leftoverfraction is bit 0 of a10, with the rest of the fraction in a9. */bbci.l a10, 0, 1faddi xl, xl, 1beqz xl, .Ladd_roundcarrybeqz a9, .Ladd_exactlyhalf1: leaf_return.Ladd_infinity:/* Clear the mantissa. */movi xl, 0srli xh, xh, 20slli xh, xh, 20/* The sign bit may have been lost in a carry-out. Put it back. */slli a8, a8, 1or xh, xh, a8leaf_return.Ladd_exactlyhalf:/* Round down to the nearest even value. */srli xl, xl, 1slli xl, xl, 1leaf_return.Ladd_roundcarry:/* xl is always zero when the rounding increment overflows, sothere's no need to round it to an even value. */addi xh, xh, 1/* Overflow to the exponent is OK. */leaf_return/* Subtraction */__subdf3_aux:/* Handle NaNs and Infinities. (This code is placed before thestart of the function just to keep it in range of the limitedbranch displacements.) */.Lsub_xnan_or_inf:/* If y is neither Infinity nor NaN, return x. */bnall yh, a6, 1f/* Both x and y are either NaN or Inf, so the result is NaN. */movi a4, 0x80000 /* make it a quiet NaN */or xh, xh, a41: leaf_return.Lsub_ynan_or_inf:/* Negate y and return it. */slli a7, a6, 11xor xh, yh, a7mov xl, ylleaf_return.Lsub_opposite_signs:/* Operand signs differ. Do an addition. */slli a7, a6, 11xor yh, yh, a7j .Ladd_same_sign.align 4.global __subdf3.type __subdf3, @function__subdf3:leaf_entry sp, 16movi a6, 0x7ff00000/* Check if the two operands have the same sign. */xor a7, xh, yhbltz a7, .Lsub_opposite_signs.Lsub_same_sign:/* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */ball xh, a6, .Lsub_xnan_or_infball yh, a6, .Lsub_ynan_or_inf/* Compare the operands. In contrast to addition, the entirevalue matters here. */extui a7, xh, 20, 11extui a8, yh, 20, 11bltu xh, yh, .Lsub_xsmallerbeq xh, yh, .Lsub_compare_low.Lsub_ysmaller:/* Check if the smaller (or equal) exponent is zero. */bnone yh, a6, .Lsub_yexpzero/* Replace yh sign/exponent with 0x001. */or yh, yh, a6slli yh, yh, 11srli yh, yh, 11.Lsub_yexpdiff:/* Compute the exponent difference. Optimize for difference < 32. */sub a10, a7, a8bgeui a10, 32, .Lsub_bigshifty/* Shift yh/yl right by the exponent difference. Any bits that areshifted out of yl are saved in a9 for rounding the result. */ssr a10movi a9, 0src a9, yl, a9src yl, yh, ylsrl yh, yh.Lsub_suby:/* Do the 64-bit subtraction. */sub xh, xh, yhbgeu xl, yl, 1faddi xh, xh, -11: sub xl, xl, yl/* Subtract the leftover bits in a9 from zero and propagate anyborrow from xh/xl. */neg a9, a9beqz a9, 1faddi a5, xh, -1moveqz xh, a5, xladdi xl, xl, -11:/* Check if the subtract underflowed into the exponent. */extui a10, xh, 20, 11beq a10, a7, .Lsub_roundj .Lsub_borrow.Lsub_compare_low:/* The high words are equal. Compare the low words. */bltu xl, yl, .Lsub_xsmallerbltu yl, xl, .Lsub_ysmaller/* The operands are equal. Return 0.0. */movi xh, 0movi xl, 01: leaf_return.Lsub_yexpzero:/* y is a subnormal value. Replace its sign/exponent with zero,i.e., no implicit "1.0". Unless x is also a subnormal, incrementy's apparent exponent because subnormals behave as if they hadthe minimum (nonzero) exponent. */slli yh, yh, 12srli yh, yh, 12bnone xh, a6, .Lsub_yexpdiffaddi a8, a8, 1j .Lsub_yexpdiff.Lsub_bigshifty:/* Exponent difference > 64 -- just return the bigger value. */bgeui a10, 64, 1b/* Shift yh/yl right by the exponent difference. Any bits that areshifted out are saved in a9 for rounding the result. */ssr a10sll a11, yl /* lost bits shifted out of yl */src a9, yh, ylsrl yl, yhmovi yh, 0beqz a11, .Lsub_subyor a9, a9, a10 /* any positive, nonzero value will work */j .Lsub_suby.Lsub_xsmaller:/* Same thing as the "ysmaller" code, but with x and y swapped andwith y negated. */bnone xh, a6, .Lsub_xexpzeroor xh, xh, a6slli xh, xh, 11srli xh, xh, 11.Lsub_xexpdiff:sub a10, a8, a7bgeui a10, 32, .Lsub_bigshiftxssr a10movi a9, 0src a9, xl, a9src xl, xh, xlsrl xh, xh/* Negate y. */slli a11, a6, 11xor yh, yh, a11.Lsub_subx:sub xl, yl, xlsub xh, yh, xhbgeu yl, xl, 1faddi xh, xh, -11:/* Subtract the leftover bits in a9 from zero and propagate anyborrow from xh/xl. */neg a9, a9beqz a9, 1faddi a5, xh, -1moveqz xh, a5, xladdi xl, xl, -11:/* Check if the subtract underflowed into the exponent. */extui a10, xh, 20, 11bne a10, a8, .Lsub_borrow.Lsub_round:/* Round up if the leftover fraction is >= 1/2. */bgez a9, 1faddi xl, xl, 1beqz xl, .Lsub_roundcarry/* Check if the leftover fraction is exactly 1/2. */slli a9, a9, 1beqz a9, .Lsub_exactlyhalf1: leaf_return.Lsub_xexpzero:/* Same as "yexpzero". */slli xh, xh, 12srli xh, xh, 12bnone yh, a6, .Lsub_xexpdiffaddi a7, a7, 1j .Lsub_xexpdiff.Lsub_bigshiftx:/* Mostly the same thing as "bigshifty", but with the sign bit of theshifted value set so that the subsequent subtraction flips thesign of y. */bgeui a10, 64, .Lsub_returnyssr a10sll a11, xlsrc a9, xh, xlsrl xl, xhslli xh, a6, 11 /* set sign bit of xh */beqz a11, .Lsub_subxor a9, a9, a10j .Lsub_subx.Lsub_returny:/* Negate and return y. */slli a7, a6, 11xor xh, yh, a7mov xl, ylleaf_return.Lsub_borrow:/* The subtraction has underflowed into the exponent field, so thevalue needs to be renormalized. Shift the mantissa left asneeded to remove any leading zeros and adjust the exponentaccordingly. If the exponent is not large enough to removeall the leading zeros, the result will be a subnormal value. */slli a8, xh, 12beqz a8, .Lsub_xhzerodo_nsau a6, a8, a7, a11srli a8, a8, 12bge a6, a10, .Lsub_subnormaladdi a6, a6, 1.Lsub_shift_lt32:/* Shift the mantissa (a8/xl/a9) left by a6. */ssl a6src a8, a8, xlsrc xl, xl, a9sll a9, a9/* Combine the shifted mantissa with the sign and exponent,decrementing the exponent by a6. (The exponent has alreadybeen decremented by one due to the borrow from the subtraction,but adding the mantissa will increment the exponent by one.) */srli xh, xh, 20sub xh, xh, a6slli xh, xh, 20add xh, xh, a8j .Lsub_round.Lsub_exactlyhalf:/* Round down to the nearest even value. */srli xl, xl, 1slli xl, xl, 1leaf_return.Lsub_roundcarry:/* xl is always zero when the rounding increment overflows, sothere's no need to round it to an even value. */addi xh, xh, 1/* Overflow to the exponent is OK. */leaf_return.Lsub_xhzero:/* When normalizing the result, all the mantissa bits in the highword are zero. Shift by "20 + (leading zero count of xl) + 1". */do_nsau a6, xl, a7, a11addi a6, a6, 21blt a10, a6, .Lsub_subnormal.Lsub_normalize_shift:bltui a6, 32, .Lsub_shift_lt32ssl a6src a8, xl, a9sll xl, a9movi a9, 0srli xh, xh, 20sub xh, xh, a6slli xh, xh, 20add xh, xh, a8j .Lsub_round.Lsub_subnormal:/* The exponent is too small to shift away all the leading zeros.Set a6 to the current exponent (which has already beendecremented by the borrow) so that the exponent of the resultwill be zero. Do not add 1 to a6 in this case, because: (1)adding the mantissa will not increment the exponent, so there isno need to subtract anything extra from the exponent tocompensate, and (2) the effective exponent of a subnormal is 1not 0 so the shift amount must be 1 smaller than normal. */mov a6, a10j .Lsub_normalize_shift#endif /* L_addsubdf3 */#ifdef L_muldf3/* Multiplication */#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16#define XCHAL_NO_MUL 1#endif__muldf3_aux:/* Handle unusual cases (zeros, subnormals, NaNs and Infinities).(This code is placed before the start of the function just tokeep it in range of the limited branch displacements.) */.Lmul_xexpzero:/* Clear the sign bit of x. */slli xh, xh, 1srli xh, xh, 1/* If x is zero, return zero. */or a10, xh, xlbeqz a10, .Lmul_return_zero/* Normalize x. Adjust the exponent in a8. */beqz xh, .Lmul_xh_zerodo_nsau a10, xh, a11, a12addi a10, a10, -11ssl a10src xh, xh, xlsll xl, xlmovi a8, 1sub a8, a8, a10j .Lmul_xnormalized.Lmul_xh_zero:do_nsau a10, xl, a11, a12addi a10, a10, -11movi a8, -31sub a8, a8, a10ssl a10bltz a10, .Lmul_xl_srlsll xh, xlmovi xl, 0j .Lmul_xnormalized.Lmul_xl_srl:srl xh, xlsll xl, xlj .Lmul_xnormalized.Lmul_yexpzero:/* Clear the sign bit of y. */slli yh, yh, 1srli yh, yh, 1/* If y is zero, return zero. */or a10, yh, ylbeqz a10, .Lmul_return_zero/* Normalize y. Adjust the exponent in a9. */beqz yh, .Lmul_yh_zerodo_nsau a10, yh, a11, a12addi a10, a10, -11ssl a10src yh, yh, ylsll yl, ylmovi a9, 1sub a9, a9, a10j .Lmul_ynormalized.Lmul_yh_zero:do_nsau a10, yl, a11, a12addi a10, a10, -11movi a9, -31sub a9, a9, a10ssl a10bltz a10, .Lmul_yl_srlsll yh, ylmovi yl, 0j .Lmul_ynormalized.Lmul_yl_srl:srl yh, ylsll yl, ylj .Lmul_ynormalized.Lmul_return_zero:/* Return zero with the appropriate sign bit. */srli xh, a7, 31slli xh, xh, 31movi xl, 0j .Lmul_done.Lmul_xnan_or_inf:/* If y is zero, return NaN. */bnez yl, 1fslli a8, yh, 1bnez a8, 1fmovi a4, 0x80000 /* make it a quiet NaN */or xh, xh, a4j .Lmul_done1:/* If y is NaN, return y. */bnall yh, a6, .Lmul_returnxslli a8, yh, 12or a8, a8, ylbeqz a8, .Lmul_returnx.Lmul_returny:mov xh, yhmov xl, yl.Lmul_returnx:/* Set the sign bit and return. */extui a7, a7, 31, 1slli xh, xh, 1ssai 1src xh, a7, xhj .Lmul_done.Lmul_ynan_or_inf:/* If x is zero, return NaN. */bnez xl, .Lmul_returnyslli a8, xh, 1bnez a8, .Lmul_returnymovi a7, 0x80000 /* make it a quiet NaN */or xh, yh, a7j .Lmul_done.align 4.global __muldf3.type __muldf3, @function__muldf3:#if __XTENSA_CALL0_ABI__leaf_entry sp, 32addi sp, sp, -32s32i a12, sp, 16s32i a13, sp, 20s32i a14, sp, 24s32i a15, sp, 28#elif XCHAL_NO_MUL/* This is not really a leaf function; allocate enough stack spaceto allow CALL12s to a helper function. */leaf_entry sp, 64#elseleaf_entry sp, 32#endifmovi a6, 0x7ff00000/* Get the sign of the result. */xor a7, xh, yh/* Check for NaN and infinity. */ball xh, a6, .Lmul_xnan_or_infball yh, a6, .Lmul_ynan_or_inf/* Extract the exponents. */extui a8, xh, 20, 11extui a9, yh, 20, 11beqz a8, .Lmul_xexpzero.Lmul_xnormalized:beqz a9, .Lmul_yexpzero.Lmul_ynormalized:/* Add the exponents. */add a8, a8, a9/* Replace sign/exponent fields with explicit "1.0". */movi a10, 0x1fffffor xh, xh, a6and xh, xh, a10or yh, yh, a6and yh, yh, a10/* Multiply 64x64 to 128 bits. The result ends up in xh/xl/a6.The least-significant word of the result is thrown away exceptthat if it is nonzero, the lsb of a6 is set to 1. */#if XCHAL_HAVE_MUL32_HIGH/* Compute a6 with any carry-outs in a10. */movi a10, 0mull a6, xl, yhmull a11, xh, yladd a6, a6, a11bgeu a6, a11, 1faddi a10, a10, 11:muluh a11, xl, yladd a6, a6, a11bgeu a6, a11, 1faddi a10, a10, 11:/* If the low word of the result is nonzero, set the lsb of a6. */mull a11, xl, ylbeqz a11, 1fmovi a9, 1or a6, a6, a91:/* Compute xl with any carry-outs in a9. */movi a9, 0mull a11, xh, yhadd a10, a10, a11bgeu a10, a11, 1faddi a9, a9, 11:muluh a11, xh, yladd a10, a10, a11bgeu a10, a11, 1faddi a9, a9, 11:muluh xl, xl, yhadd xl, xl, a10bgeu xl, a10, 1faddi a9, a9, 11:/* Compute xh. */muluh xh, xh, yhadd xh, xh, a9#else /* ! XCHAL_HAVE_MUL32_HIGH *//* Break the inputs into 16-bit chunks and compute 16 32-bit partialproducts. These partial products are:0 xll * yll1 xll * ylh2 xlh * yll3 xll * yhl4 xlh * ylh5 xhl * yll6 xll * yhh7 xlh * yhl8 xhl * ylh9 xhh * yll10 xlh * yhh11 xhl * yhl12 xhh * ylh13 xhl * yhh14 xhh * yhl15 xhh * yhhwhere the input chunks are (hh, hl, lh, ll). If using the Mul16or Mul32 multiplier options, these input chunks must be stored inseparate registers. For Mac16, the UMUL.AA.* opcodes can specifythat the inputs come from either half of the registers, so thereis no need to shift them out ahead of time. If there is nomultiply hardware, the 16-bit chunks can be extracted when settingup the arguments to the separate multiply function. *//* Save a7 since it is needed to hold a temporary value. */s32i a7, sp, 4#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL/* Calling a separate multiply function will clobber a0 and requiresuse of a8 as a temporary, so save those values now. (The functionuses a custom ABI so nothing else needs to be saved.) */s32i a0, sp, 0s32i a8, sp, 8#endif#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32#define xlh a12#define ylh a13#define xhh a14#define yhh a15/* Get the high halves of the inputs into registers. */srli xlh, xl, 16srli ylh, yl, 16srli xhh, xh, 16srli yhh, yh, 16#define xll xl#define yll yl#define xhl xh#define yhl yh#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16/* Clear the high halves of the inputs. This does not matterfor MUL16 because the high bits are ignored. */extui xl, xl, 0, 16extui xh, xh, 0, 16extui yl, yl, 0, 16extui yh, yh, 0, 16#endif#endif /* MUL16 || MUL32 */#if XCHAL_HAVE_MUL16#define do_mul(dst, xreg, xhalf, yreg, yhalf) \mul16u dst, xreg ## xhalf, yreg ## yhalf#elif XCHAL_HAVE_MUL32#define do_mul(dst, xreg, xhalf, yreg, yhalf) \mull dst, xreg ## xhalf, yreg ## yhalf#elif XCHAL_HAVE_MAC16/* The preprocessor insists on inserting a space when concatenating aftera period in the definition of do_mul below. These macros are a workaroundusing underscores instead of periods when doing the concatenation. */#define umul_aa_ll umul.aa.ll#define umul_aa_lh umul.aa.lh#define umul_aa_hl umul.aa.hl#define umul_aa_hh umul.aa.hh#define do_mul(dst, xreg, xhalf, yreg, yhalf) \umul_aa_ ## xhalf ## yhalf xreg, yreg; \rsr dst, ACCLO#else /* no multiply hardware */#define set_arg_l(dst, src) \extui dst, src, 0, 16#define set_arg_h(dst, src) \srli dst, src, 16#if __XTENSA_CALL0_ABI__#define do_mul(dst, xreg, xhalf, yreg, yhalf) \set_arg_ ## xhalf (a13, xreg); \set_arg_ ## yhalf (a14, yreg); \call0 .Lmul_mulsi3; \mov dst, a12#else#define do_mul(dst, xreg, xhalf, yreg, yhalf) \set_arg_ ## xhalf (a14, xreg); \set_arg_ ## yhalf (a15, yreg); \call12 .Lmul_mulsi3; \mov dst, a14#endif /* __XTENSA_CALL0_ABI__ */#endif /* no multiply hardware *//* Add pp1 and pp2 into a10 with carry-out in a9. */do_mul(a10, xl, l, yl, h) /* pp 1 */do_mul(a11, xl, h, yl, l) /* pp 2 */movi a9, 0add a10, a10, a11bgeu a10, a11, 1faddi a9, a9, 11:/* Initialize a6 with a9/a10 shifted into position. Note thatthis value can be safely incremented without any carry-outs. */ssai 16src a6, a9, a10/* Compute the low word into a10. */do_mul(a11, xl, l, yl, l) /* pp 0 */sll a10, a10add a10, a10, a11bgeu a10, a11, 1faddi a6, a6, 11:/* Compute the contributions of pp0-5 to a6, with carry-outs in a9.This is good enough to determine the low half of a6, so that anynonzero bits from the low word of the result can be collapsedinto a6, freeing up a register. */movi a9, 0do_mul(a11, xl, l, yh, l) /* pp 3 */add a6, a6, a11bgeu a6, a11, 1faddi a9, a9, 11:do_mul(a11, xl, h, yl, h) /* pp 4 */add a6, a6, a11bgeu a6, a11, 1faddi a9, a9, 11:do_mul(a11, xh, l, yl, l) /* pp 5 */add a6, a6, a11bgeu a6, a11, 1faddi a9, a9, 11:/* Collapse any nonzero bits from the low word into a6. */beqz a10, 1fmovi a11, 1or a6, a6, a111:/* Add pp6-9 into a11 with carry-outs in a10. */do_mul(a7, xl, l, yh, h) /* pp 6 */do_mul(a11, xh, h, yl, l) /* pp 9 */movi a10, 0add a11, a11, a7bgeu a11, a7, 1faddi a10, a10, 11:do_mul(a7, xl, h, yh, l) /* pp 7 */add a11, a11, a7bgeu a11, a7, 1faddi a10, a10, 11:do_mul(a7, xh, l, yl, h) /* pp 8 */add a11, a11, a7bgeu a11, a7, 1faddi a10, a10, 11:/* Shift a10/a11 into position, and add low half of a11 to a6. */src a10, a10, a11add a10, a10, a9sll a11, a11add a6, a6, a11bgeu a6, a11, 1faddi a10, a10, 11:/* Add pp10-12 into xl with carry-outs in a9. */movi a9, 0do_mul(xl, xl, h, yh, h) /* pp 10 */add xl, xl, a10bgeu xl, a10, 1faddi a9, a9, 11:do_mul(a10, xh, l, yh, l) /* pp 11 */add xl, xl, a10bgeu xl, a10, 1faddi a9, a9, 11:do_mul(a10, xh, h, yl, h) /* pp 12 */add xl, xl, a10bgeu xl, a10, 1faddi a9, a9, 11:/* Add pp13-14 into a11 with carry-outs in a10. */do_mul(a11, xh, l, yh, h) /* pp 13 */do_mul(a7, xh, h, yh, l) /* pp 14 */movi a10, 0add a11, a11, a7bgeu a11, a7, 1faddi a10, a10, 11:/* Shift a10/a11 into position, and add low half of a11 to a6. */src a10, a10, a11add a10, a10, a9sll a11, a11add xl, xl, a11bgeu xl, a11, 1faddi a10, a10, 11:/* Compute xh. */do_mul(xh, xh, h, yh, h) /* pp 15 */add xh, xh, a10/* Restore values saved on the stack during the multiplication. */l32i a7, sp, 4#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MULl32i a0, sp, 0l32i a8, sp, 8#endif#endif /* ! XCHAL_HAVE_MUL32_HIGH *//* Shift left by 12 bits, unless there was a carry-out from themultiply, in which case, shift by 11 bits and increment theexponent. Note: It is convenient to use the constant 0x3ffinstead of 0x400 when removing the extra exponent bias (so thatit is easy to construct 0x7fe for the overflow check). Reversethe logic here to decrement the exponent sum by one unless therewas a carry-out. */movi a4, 11srli a5, xh, 21 - 12bnez a5, 1faddi a4, a4, 1addi a8, a8, -11: ssl a4src xh, xh, xlsrc xl, xl, a6sll a6, a6/* Subtract the extra bias from the exponent sum (plus one to accountfor the explicit "1.0" of the mantissa that will be added to theexponent in the final result). */movi a4, 0x3ffsub a8, a8, a4/* Check for over/underflow. The value in a8 is one less than thefinal exponent, so values in the range 0..7fd are OK here. */slli a4, a4, 1 /* 0x7fe */bgeu a8, a4, .Lmul_overflow.Lmul_round:/* Round. */bgez a6, .Lmul_roundedaddi xl, xl, 1beqz xl, .Lmul_roundcarryslli a6, a6, 1beqz a6, .Lmul_exactlyhalf.Lmul_rounded:/* Add the exponent to the mantissa. */slli a8, a8, 20add xh, xh, a8.Lmul_addsign:/* Add the sign bit. */srli a7, a7, 31slli a7, a7, 31or xh, xh, a7.Lmul_done:#if __XTENSA_CALL0_ABI__l32i a12, sp, 16l32i a13, sp, 20l32i a14, sp, 24l32i a15, sp, 28addi sp, sp, 32#endifleaf_return.Lmul_exactlyhalf:/* Round down to the nearest even value. */srli xl, xl, 1slli xl, xl, 1j .Lmul_rounded.Lmul_roundcarry:/* xl is always zero when the rounding increment overflows, sothere's no need to round it to an even value. */addi xh, xh, 1/* Overflow is OK -- it will be added to the exponent. */j .Lmul_rounded.Lmul_overflow:bltz a8, .Lmul_underflow/* Return +/- Infinity. */addi a8, a4, 1 /* 0x7ff */slli xh, a8, 20movi xl, 0j .Lmul_addsign.Lmul_underflow:/* Create a subnormal value, where the exponent field contains zero,but the effective exponent is 1. The value of a8 is one less thanthe actual exponent, so just negate it to get the shift amount. */neg a8, a8mov a9, a6ssr a8bgeui a8, 32, .Lmul_bigshift/* Shift xh/xl right. Any bits that are shifted out of xl are savedin a6 (combined with the shifted-out bits currently in a6) forrounding the result. */sll a6, xlsrc xl, xh, xlsrl xh, xhj 1f.Lmul_bigshift:bgeui a8, 64, .Lmul_flush_to_zerosll a10, xl /* lost bits shifted out of xl */src a6, xh, xlsrl xl, xhmovi xh, 0or a9, a9, a10/* Set the exponent to zero. */1: movi a8, 0/* Pack any nonzero bits shifted out into a6. */beqz a9, .Lmul_roundmovi a9, 1or a6, a6, a9j .Lmul_round.Lmul_flush_to_zero:/* Return zero with the appropriate sign bit. */srli xh, a7, 31slli xh, xh, 31movi xl, 0j .Lmul_done#if XCHAL_NO_MUL/* For Xtensa processors with no multiply hardware, this simplifiedversion of _mulsi3 is used for multiplying 16-bit chunks ofthe floating-point mantissas. When using CALL0, this functionuses a custom ABI: the inputs are passed in a13 and a14, theresult is returned in a12, and a8 and a15 are clobbered. */.align 4.Lmul_mulsi3:leaf_entry sp, 16.macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2movi \dst, 01: add \tmp1, \src2, \dstextui \tmp2, \src1, 0, 1movnez \dst, \tmp1, \tmp2do_addx2 \tmp1, \src2, \dst, \tmp1extui \tmp2, \src1, 1, 1movnez \dst, \tmp1, \tmp2do_addx4 \tmp1, \src2, \dst, \tmp1extui \tmp2, \src1, 2, 1movnez \dst, \tmp1, \tmp2do_addx8 \tmp1, \src2, \dst, \tmp1extui \tmp2, \src1, 3, 1movnez \dst, \tmp1, \tmp2srli \src1, \src1, 4slli \src2, \src2, 4bnez \src1, 1b.endm#if __XTENSA_CALL0_ABI__mul_mulsi3_body a12, a13, a14, a15, a8#else/* The result will be written into a2, so save that argument in a4. */mov a4, a2mul_mulsi3_body a2, a4, a3, a5, a6#endifleaf_return#endif /* XCHAL_NO_MUL */#endif /* L_muldf3 */#ifdef L_divdf3/* Division */__divdf3_aux:/* Handle unusual cases (zeros, subnormals, NaNs and Infinities).(This code is placed before the start of the function just tokeep it in range of the limited branch displacements.) */.Ldiv_yexpzero:/* Clear the sign bit of y. */slli yh, yh, 1srli yh, yh, 1/* Check for division by zero. */or a10, yh, ylbeqz a10, .Ldiv_yzero/* Normalize y. Adjust the exponent in a9. */beqz yh, .Ldiv_yh_zerodo_nsau a10, yh, a11, a9addi a10, a10, -11ssl a10src yh, yh, ylsll yl, ylmovi a9, 1sub a9, a9, a10j .Ldiv_ynormalized.Ldiv_yh_zero:do_nsau a10, yl, a11, a9addi a10, a10, -11movi a9, -31sub a9, a9, a10ssl a10bltz a10, .Ldiv_yl_srlsll yh, ylmovi yl, 0j .Ldiv_ynormalized.Ldiv_yl_srl:srl yh, ylsll yl, ylj .Ldiv_ynormalized.Ldiv_yzero:/* y is zero. Return NaN if x is also zero; otherwise, infinity. */slli xh, xh, 1srli xh, xh, 1or xl, xl, xhsrli xh, a7, 31slli xh, xh, 31or xh, xh, a6bnez xl, 1fmovi a4, 0x80000 /* make it a quiet NaN */or xh, xh, a41: movi xl, 0leaf_return.Ldiv_xexpzero:/* Clear the sign bit of x. */slli xh, xh, 1srli xh, xh, 1/* If x is zero, return zero. */or a10, xh, xlbeqz a10, .Ldiv_return_zero/* Normalize x. Adjust the exponent in a8. */beqz xh, .Ldiv_xh_zerodo_nsau a10, xh, a11, a8addi a10, a10, -11ssl a10src xh, xh, xlsll xl, xlmovi a8, 1sub a8, a8, a10j .Ldiv_xnormalized.Ldiv_xh_zero:do_nsau a10, xl, a11, a8addi a10, a10, -11movi a8, -31sub a8, a8, a10ssl a10bltz a10, .Ldiv_xl_srlsll xh, xlmovi xl, 0j .Ldiv_xnormalized.Ldiv_xl_srl:srl xh, xlsll xl, xlj .Ldiv_xnormalized.Ldiv_return_zero:/* Return zero with the appropriate sign bit. */srli xh, a7, 31slli xh, xh, 31movi xl, 0leaf_return.Ldiv_xnan_or_inf:/* Set the sign bit of the result. */srli a7, yh, 31slli a7, a7, 31xor xh, xh, a7/* If y is NaN or Inf, return NaN. */bnall yh, a6, 1fmovi a4, 0x80000 /* make it a quiet NaN */or xh, xh, a41: leaf_return.Ldiv_ynan_or_inf:/* If y is Infinity, return zero. */slli a8, yh, 12or a8, a8, ylbeqz a8, .Ldiv_return_zero/* y is NaN; return it. */mov xh, yhmov xl, ylleaf_return.Ldiv_highequal1:bltu xl, yl, 2fj 3f.align 4.global __divdf3.type __divdf3, @function__divdf3:leaf_entry sp, 16movi a6, 0x7ff00000/* Get the sign of the result. */xor a7, xh, yh/* Check for NaN and infinity. */ball xh, a6, .Ldiv_xnan_or_infball yh, a6, .Ldiv_ynan_or_inf/* Extract the exponents. */extui a8, xh, 20, 11extui a9, yh, 20, 11beqz a9, .Ldiv_yexpzero.Ldiv_ynormalized:beqz a8, .Ldiv_xexpzero.Ldiv_xnormalized:/* Subtract the exponents. */sub a8, a8, a9/* Replace sign/exponent fields with explicit "1.0". */movi a10, 0x1fffffor xh, xh, a6and xh, xh, a10or yh, yh, a6and yh, yh, a10/* Set SAR for left shift by one. */ssai (32 - 1)/* The first digit of the mantissa division must be a one.Shift x (and adjust the exponent) as needed to make this true. */bltu yh, xh, 3fbeq yh, xh, .Ldiv_highequal12: src xh, xh, xlsll xl, xladdi a8, a8, -13:/* Do the first subtraction and shift. */sub xh, xh, yhbgeu xl, yl, 1faddi xh, xh, -11: sub xl, xl, ylsrc xh, xh, xlsll xl, xl/* Put the quotient into a10/a11. */movi a10, 0movi a11, 1/* Divide one bit at a time for 52 bits. */movi a9, 52#if XCHAL_HAVE_LOOPSloop a9, .Ldiv_loopend#endif.Ldiv_loop:/* Shift the quotient << 1. */src a10, a10, a11sll a11, a11/* Is this digit a 0 or 1? */bltu xh, yh, 3fbeq xh, yh, .Ldiv_highequal2/* Output a 1 and subtract. */2: addi a11, a11, 1sub xh, xh, yhbgeu xl, yl, 1faddi xh, xh, -11: sub xl, xl, yl/* Shift the dividend << 1. */3: src xh, xh, xlsll xl, xl#if !XCHAL_HAVE_LOOPSaddi a9, a9, -1bnez a9, .Ldiv_loop#endif.Ldiv_loopend:/* Add the exponent bias (less one to account for the explicit "1.0"of the mantissa that will be added to the exponent in the finalresult). */movi a9, 0x3feadd a8, a8, a9/* Check for over/underflow. The value in a8 is one less than thefinal exponent, so values in the range 0..7fd are OK here. */addmi a9, a9, 0x400 /* 0x7fe */bgeu a8, a9, .Ldiv_overflow.Ldiv_round:/* Round. The remainder (<< 1) is in xh/xl. */bltu xh, yh, .Ldiv_roundedbeq xh, yh, .Ldiv_highequal3.Ldiv_roundup:addi a11, a11, 1beqz a11, .Ldiv_roundcarry.Ldiv_rounded:mov xl, a11/* Add the exponent to the mantissa. */slli a8, a8, 20add xh, a10, a8.Ldiv_addsign:/* Add the sign bit. */srli a7, a7, 31slli a7, a7, 31or xh, xh, a7leaf_return.Ldiv_highequal2:bgeu xl, yl, 2bj 3b.Ldiv_highequal3:bltu xl, yl, .Ldiv_roundedbne xl, yl, .Ldiv_roundup/* Remainder is exactly half the divisor. Round even. */addi a11, a11, 1beqz a11, .Ldiv_roundcarrysrli a11, a11, 1slli a11, a11, 1j .Ldiv_rounded.Ldiv_overflow:bltz a8, .Ldiv_underflow/* Return +/- Infinity. */addi a8, a9, 1 /* 0x7ff */slli xh, a8, 20movi xl, 0j .Ldiv_addsign.Ldiv_underflow:/* Create a subnormal value, where the exponent field contains zero,but the effective exponent is 1. The value of a8 is one less thanthe actual exponent, so just negate it to get the shift amount. */neg a8, a8ssr a8bgeui a8, 32, .Ldiv_bigshift/* Shift a10/a11 right. Any bits that are shifted out of a11 aresaved in a6 for rounding the result. */sll a6, a11src a11, a10, a11srl a10, a10j 1f.Ldiv_bigshift:bgeui a8, 64, .Ldiv_flush_to_zerosll a9, a11 /* lost bits shifted out of a11 */src a6, a10, a11srl a11, a10movi a10, 0or xl, xl, a9/* Set the exponent to zero. */1: movi a8, 0/* Pack any nonzero remainder (in xh/xl) into a6. */or xh, xh, xlbeqz xh, 1fmovi a9, 1or a6, a6, a9/* Round a10/a11 based on the bits shifted out into a6. */1: bgez a6, .Ldiv_roundedaddi a11, a11, 1beqz a11, .Ldiv_roundcarryslli a6, a6, 1bnez a6, .Ldiv_roundedsrli a11, a11, 1slli a11, a11, 1j .Ldiv_rounded.Ldiv_roundcarry:/* a11 is always zero when the rounding increment overflows, sothere's no need to round it to an even value. */addi a10, a10, 1/* Overflow to the exponent field is OK. */j .Ldiv_rounded.Ldiv_flush_to_zero:/* Return zero with the appropriate sign bit. */srli xh, a7, 31slli xh, xh, 31movi xl, 0leaf_return#endif /* L_divdf3 */#ifdef L_cmpdf2/* Equal and Not Equal */.align 4.global __eqdf2.global __nedf2.set __nedf2, __eqdf2.type __eqdf2, @function__eqdf2:leaf_entry sp, 16bne xl, yl, 2fbne xh, yh, 4f/* The values are equal but NaN != NaN. Check the exponent. */movi a6, 0x7ff00000ball xh, a6, 3f/* Equal. */movi a2, 0leaf_return/* Not equal. */2: movi a2, 1leaf_return/* Check if the mantissas are nonzero. */3: slli a7, xh, 12or a7, a7, xlj 5f/* Check if x and y are zero with different signs. */4: or a7, xh, yhslli a7, a7, 1or a7, a7, xl /* xl == yl here *//* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissaor x when exponent(x) = 0x7ff and x == y. */5: movi a2, 0movi a3, 1movnez a2, a3, a7leaf_return/* Greater Than */.align 4.global __gtdf2.type __gtdf2, @function__gtdf2:leaf_entry sp, 16movi a6, 0x7ff00000ball xh, a6, 2f1: bnall yh, a6, .Lle_cmp/* Check if y is a NaN. */slli a7, yh, 12or a7, a7, ylbeqz a7, .Lle_cmpmovi a2, 0leaf_return/* Check if x is a NaN. */2: slli a7, xh, 12or a7, a7, xlbeqz a7, 1bmovi a2, 0leaf_return/* Less Than or Equal */.align 4.global __ledf2.type __ledf2, @function__ledf2:leaf_entry sp, 16movi a6, 0x7ff00000ball xh, a6, 2f1: bnall yh, a6, .Lle_cmp/* Check if y is a NaN. */slli a7, yh, 12or a7, a7, ylbeqz a7, .Lle_cmpmovi a2, 1leaf_return/* Check if x is a NaN. */2: slli a7, xh, 12or a7, a7, xlbeqz a7, 1bmovi a2, 1leaf_return.Lle_cmp:/* Check if x and y have different signs. */xor a7, xh, yhbltz a7, .Lle_diff_signs/* Check if x is negative. */bltz xh, .Lle_xneg/* Check if x <= y. */bltu xh, yh, 4fbne xh, yh, 5fbltu yl, xl, 5f4: movi a2, 0leaf_return.Lle_xneg:/* Check if y <= x. */bltu yh, xh, 4bbne yh, xh, 5fbgeu xl, yl, 4b5: movi a2, 1leaf_return.Lle_diff_signs:bltz xh, 4b/* Check if both x and y are zero. */or a7, xh, yhslli a7, a7, 1or a7, a7, xlor a7, a7, ylmovi a2, 1movi a3, 0moveqz a2, a3, a7leaf_return/* Greater Than or Equal */.align 4.global __gedf2.type __gedf2, @function__gedf2:leaf_entry sp, 16movi a6, 0x7ff00000ball xh, a6, 2f1: bnall yh, a6, .Llt_cmp/* Check if y is a NaN. */slli a7, yh, 12or a7, a7, ylbeqz a7, .Llt_cmpmovi a2, -1leaf_return/* Check if x is a NaN. */2: slli a7, xh, 12or a7, a7, xlbeqz a7, 1bmovi a2, -1leaf_return/* Less Than */.align 4.global __ltdf2.type __ltdf2, @function__ltdf2:leaf_entry sp, 16movi a6, 0x7ff00000ball xh, a6, 2f1: bnall yh, a6, .Llt_cmp/* Check if y is a NaN. */slli a7, yh, 12or a7, a7, ylbeqz a7, .Llt_cmpmovi a2, 0leaf_return/* Check if x is a NaN. */2: slli a7, xh, 12or a7, a7, xlbeqz a7, 1bmovi a2, 0leaf_return.Llt_cmp:/* Check if x and y have different signs. */xor a7, xh, yhbltz a7, .Llt_diff_signs/* Check if x is negative. */bltz xh, .Llt_xneg/* Check if x < y. */bltu xh, yh, 4fbne xh, yh, 5fbgeu xl, yl, 5f4: movi a2, -1leaf_return.Llt_xneg:/* Check if y < x. */bltu yh, xh, 4bbne yh, xh, 5fbltu yl, xl, 4b5: movi a2, 0leaf_return.Llt_diff_signs:bgez xh, 5b/* Check if both x and y are nonzero. */or a7, xh, yhslli a7, a7, 1or a7, a7, xlor a7, a7, ylmovi a2, 0movi a3, -1movnez a2, a3, a7leaf_return/* Unordered */.align 4.global __unorddf2.type __unorddf2, @function__unorddf2:leaf_entry sp, 16movi a6, 0x7ff00000ball xh, a6, 3f1: ball yh, a6, 4f2: movi a2, 0leaf_return3: slli a7, xh, 12or a7, a7, xlbeqz a7, 1bmovi a2, 1leaf_return4: slli a7, yh, 12or a7, a7, ylbeqz a7, 2bmovi a2, 1leaf_return#endif /* L_cmpdf2 */#ifdef L_fixdfsi.align 4.global __fixdfsi.type __fixdfsi, @function__fixdfsi:leaf_entry sp, 16/* Check for NaN and Infinity. */movi a6, 0x7ff00000ball xh, a6, .Lfixdfsi_nan_or_inf/* Extract the exponent and check if 0 < (exp - 0x3fe) < 32. */extui a4, xh, 20, 11extui a5, a6, 19, 10 /* 0x3fe */sub a4, a4, a5bgei a4, 32, .Lfixdfsi_maxintblti a4, 1, .Lfixdfsi_zero/* Add explicit "1.0" and shift << 11. */or a7, xh, a6ssai (32 - 11)src a5, a7, xl/* Shift back to the right, based on the exponent. */ssl a4 /* shift by 32 - a4 */srl a5, a5/* Negate the result if sign != 0. */neg a2, a5movgez a2, a5, a7leaf_return.Lfixdfsi_nan_or_inf:/* Handle Infinity and NaN. */slli a4, xh, 12or a4, a4, xlbeqz a4, .Lfixdfsi_maxint/* Translate NaN to +maxint. */movi xh, 0.Lfixdfsi_maxint:slli a4, a6, 11 /* 0x80000000 */addi a5, a4, -1 /* 0x7fffffff */movgez a4, a5, xhmov a2, a4leaf_return.Lfixdfsi_zero:movi a2, 0leaf_return#endif /* L_fixdfsi */#ifdef L_fixdfdi.align 4.global __fixdfdi.type __fixdfdi, @function__fixdfdi:leaf_entry sp, 16/* Check for NaN and Infinity. */movi a6, 0x7ff00000ball xh, a6, .Lfixdfdi_nan_or_inf/* Extract the exponent and check if 0 < (exp - 0x3fe) < 64. */extui a4, xh, 20, 11extui a5, a6, 19, 10 /* 0x3fe */sub a4, a4, a5bgei a4, 64, .Lfixdfdi_maxintblti a4, 1, .Lfixdfdi_zero/* Add explicit "1.0" and shift << 11. */or a7, xh, a6ssai (32 - 11)src xh, a7, xlsll xl, xl/* Shift back to the right, based on the exponent. */ssl a4 /* shift by 64 - a4 */bgei a4, 32, .Lfixdfdi_smallshiftsrl xl, xhmovi xh, 0.Lfixdfdi_shifted:/* Negate the result if sign != 0. */bgez a7, 1fneg xl, xlneg xh, xhbeqz xl, 1faddi xh, xh, -11: leaf_return.Lfixdfdi_smallshift:src xl, xh, xlsrl xh, xhj .Lfixdfdi_shifted.Lfixdfdi_nan_or_inf:/* Handle Infinity and NaN. */slli a4, xh, 12or a4, a4, xlbeqz a4, .Lfixdfdi_maxint/* Translate NaN to +maxint. */movi xh, 0.Lfixdfdi_maxint:slli a7, a6, 11 /* 0x80000000 */bgez xh, 1fmov xh, a7movi xl, 0leaf_return1: addi xh, a7, -1 /* 0x7fffffff */movi xl, -1leaf_return.Lfixdfdi_zero:movi xh, 0movi xl, 0leaf_return#endif /* L_fixdfdi */#ifdef L_fixunsdfsi.align 4.global __fixunsdfsi.type __fixunsdfsi, @function__fixunsdfsi:leaf_entry sp, 16/* Check for NaN and Infinity. */movi a6, 0x7ff00000ball xh, a6, .Lfixunsdfsi_nan_or_inf/* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32. */extui a4, xh, 20, 11extui a5, a6, 20, 10 /* 0x3ff */sub a4, a4, a5bgei a4, 32, .Lfixunsdfsi_maxintbltz a4, .Lfixunsdfsi_zero/* Add explicit "1.0" and shift << 11. */or a7, xh, a6ssai (32 - 11)src a5, a7, xl/* Shift back to the right, based on the exponent. */addi a4, a4, 1beqi a4, 32, .Lfixunsdfsi_bigexpssl a4 /* shift by 32 - a4 */srl a5, a5/* Negate the result if sign != 0. */neg a2, a5movgez a2, a5, a7leaf_return.Lfixunsdfsi_nan_or_inf:/* Handle Infinity and NaN. */slli a4, xh, 12or a4, a4, xlbeqz a4, .Lfixunsdfsi_maxint/* Translate NaN to 0xffffffff. */movi a2, -1leaf_return.Lfixunsdfsi_maxint:slli a4, a6, 11 /* 0x80000000 */movi a5, -1 /* 0xffffffff */movgez a4, a5, xhmov a2, a4leaf_return.Lfixunsdfsi_zero:movi a2, 0leaf_return.Lfixunsdfsi_bigexp:/* Handle unsigned maximum exponent case. */bltz xh, 1fmov a2, a5 /* no shift needed */leaf_return/* Return 0x80000000 if negative. */1: slli a2, a6, 11leaf_return#endif /* L_fixunsdfsi */#ifdef L_fixunsdfdi.align 4.global __fixunsdfdi.type __fixunsdfdi, @function__fixunsdfdi:leaf_entry sp, 16/* Check for NaN and Infinity. */movi a6, 0x7ff00000ball xh, a6, .Lfixunsdfdi_nan_or_inf/* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64. */extui a4, xh, 20, 11extui a5, a6, 20, 10 /* 0x3ff */sub a4, a4, a5bgei a4, 64, .Lfixunsdfdi_maxintbltz a4, .Lfixunsdfdi_zero/* Add explicit "1.0" and shift << 11. */or a7, xh, a6ssai (32 - 11)src xh, a7, xlsll xl, xl/* Shift back to the right, based on the exponent. */addi a4, a4, 1beqi a4, 64, .Lfixunsdfdi_bigexpssl a4 /* shift by 64 - a4 */bgei a4, 32, .Lfixunsdfdi_smallshiftsrl xl, xhmovi xh, 0.Lfixunsdfdi_shifted:/* Negate the result if sign != 0. */bgez a7, 1fneg xl, xlneg xh, xhbeqz xl, 1faddi xh, xh, -11: leaf_return.Lfixunsdfdi_smallshift:src xl, xh, xlsrl xh, xhj .Lfixunsdfdi_shifted.Lfixunsdfdi_nan_or_inf:/* Handle Infinity and NaN. */slli a4, xh, 12or a4, a4, xlbeqz a4, .Lfixunsdfdi_maxint/* Translate NaN to 0xffffffff.... */1: movi xh, -1movi xl, -1leaf_return.Lfixunsdfdi_maxint:bgez xh, 1b2: slli xh, a6, 11 /* 0x80000000 */movi xl, 0leaf_return.Lfixunsdfdi_zero:movi xh, 0movi xl, 0leaf_return.Lfixunsdfdi_bigexp:/* Handle unsigned maximum exponent case. */bltz a7, 2bleaf_return /* no shift needed */#endif /* L_fixunsdfdi */#ifdef L_floatsidf.align 4.global __floatunsidf.type __floatunsidf, @function__floatunsidf:leaf_entry sp, 16beqz a2, .Lfloatsidf_return_zero/* Set the sign to zero and jump to the floatsidf code. */movi a7, 0j .Lfloatsidf_normalize.align 4.global __floatsidf.type __floatsidf, @function__floatsidf:leaf_entry sp, 16/* Check for zero. */beqz a2, .Lfloatsidf_return_zero/* Save the sign. */extui a7, a2, 31, 1/* Get the absolute value. */#if XCHAL_HAVE_ABSabs a2, a2#elseneg a4, a2movltz a2, a4, a2#endif.Lfloatsidf_normalize:/* Normalize with the first 1 bit in the msb. */do_nsau a4, a2, a5, a6ssl a4sll a5, a2/* Shift the mantissa into position. */srli xh, a5, 11slli xl, a5, (32 - 11)/* Set the exponent. */movi a5, 0x41d /* 0x3fe + 31 */sub a5, a5, a4slli a5, a5, 20add xh, xh, a5/* Add the sign and return. */slli a7, a7, 31or xh, xh, a7leaf_return.Lfloatsidf_return_zero:movi a3, 0leaf_return#endif /* L_floatsidf */#ifdef L_floatdidf.align 4.global __floatundidf.type __floatundidf, @function__floatundidf:leaf_entry sp, 16/* Check for zero. */or a4, xh, xlbeqz a4, 2f/* Set the sign to zero and jump to the floatdidf code. */movi a7, 0j .Lfloatdidf_normalize.align 4.global __floatdidf.type __floatdidf, @function__floatdidf:leaf_entry sp, 16/* Check for zero. */or a4, xh, xlbeqz a4, 2f/* Save the sign. */extui a7, xh, 31, 1/* Get the absolute value. */bgez xh, .Lfloatdidf_normalizeneg xl, xlneg xh, xhbeqz xl, .Lfloatdidf_normalizeaddi xh, xh, -1.Lfloatdidf_normalize:/* Normalize with the first 1 bit in the msb of xh. */beqz xh, .Lfloatdidf_bigshiftdo_nsau a4, xh, a5, a6ssl a4src xh, xh, xlsll xl, xl.Lfloatdidf_shifted:/* Shift the mantissa into position, with rounding bits in a6. */ssai 11sll a6, xlsrc xl, xh, xlsrl xh, xh/* Set the exponent. */movi a5, 0x43d /* 0x3fe + 63 */sub a5, a5, a4slli a5, a5, 20add xh, xh, a5/* Add the sign. */slli a7, a7, 31or xh, xh, a7/* Round up if the leftover fraction is >= 1/2. */bgez a6, 2faddi xl, xl, 1beqz xl, .Lfloatdidf_roundcarry/* Check if the leftover fraction is exactly 1/2. */slli a6, a6, 1beqz a6, .Lfloatdidf_exactlyhalf2: leaf_return.Lfloatdidf_bigshift:/* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */do_nsau a4, xl, a5, a6ssl a4sll xh, xlmovi xl, 0addi a4, a4, 32j .Lfloatdidf_shifted.Lfloatdidf_exactlyhalf:/* Round down to the nearest even value. */srli xl, xl, 1slli xl, xl, 1leaf_return.Lfloatdidf_roundcarry:/* xl is always zero when the rounding increment overflows, sothere's no need to round it to an even value. */addi xh, xh, 1/* Overflow to the exponent is OK. */leaf_return#endif /* L_floatdidf */#ifdef L_truncdfsf2.align 4.global __truncdfsf2.type __truncdfsf2, @function__truncdfsf2:leaf_entry sp, 16/* Adjust the exponent bias. */movi a4, (0x3ff - 0x7f) << 20sub a5, xh, a4/* Check for underflow. */xor a6, xh, a5bltz a6, .Ltrunc_underflowextui a6, a5, 20, 11beqz a6, .Ltrunc_underflow/* Check for overflow. */movi a4, 255bge a6, a4, .Ltrunc_overflow/* Shift a5/xl << 3 into a5/a4. */ssai (32 - 3)src a5, a5, xlsll a4, xl.Ltrunc_addsign:/* Add the sign bit. */extui a6, xh, 31, 1slli a6, a6, 31or a2, a6, a5/* Round up if the leftover fraction is >= 1/2. */bgez a4, 1faddi a2, a2, 1/* Overflow to the exponent is OK. The answer will be correct. *//* Check if the leftover fraction is exactly 1/2. */slli a4, a4, 1beqz a4, .Ltrunc_exactlyhalf1: leaf_return.Ltrunc_exactlyhalf:/* Round down to the nearest even value. */srli a2, a2, 1slli a2, a2, 1leaf_return.Ltrunc_overflow:/* Check if exponent == 0x7ff. */movi a4, 0x7ff00000bnall xh, a4, 1f/* Check if mantissa is nonzero. */slli a5, xh, 12or a5, a5, xlbeqz a5, 1f/* Shift a4 to set a bit in the mantissa, making a quiet NaN. */srli a4, a4, 11: slli a4, a4, 4 /* 0xff000000 or 0xff800000 *//* Add the sign bit. */extui a6, xh, 31, 1ssai 1src a2, a6, a4leaf_return.Ltrunc_underflow:/* Find shift count for a subnormal. Flush to zero if >= 32. */extui a6, xh, 20, 11movi a5, 0x3ff - 0x7fsub a6, a5, a6addi a6, a6, 1bgeui a6, 32, 1f/* Replace the exponent with an explicit "1.0". */slli a5, a5, 13 /* 0x700000 */or a5, a5, xhslli a5, a5, 11srli a5, a5, 11/* Shift the mantissa left by 3 bits (into a5/a4). */ssai (32 - 3)src a5, a5, xlsll a4, xl/* Shift right by a6. */ssr a6sll a7, a4src a4, a5, a4srl a5, a5beqz a7, .Ltrunc_addsignor a4, a4, a6 /* any positive, nonzero value will work */j .Ltrunc_addsign/* Return +/- zero. */1: extui a2, xh, 31, 1slli a2, a2, 31leaf_return#endif /* L_truncdfsf2 */#ifdef L_extendsfdf2.align 4.global __extendsfdf2.type __extendsfdf2, @function__extendsfdf2:leaf_entry sp, 16/* Save the sign bit and then shift it off. */extui a5, a2, 31, 1slli a5, a5, 31slli a4, a2, 1/* Extract and check the exponent. */extui a6, a2, 23, 8beqz a6, .Lextend_expzeroaddi a6, a6, 1beqi a6, 256, .Lextend_nan_or_inf/* Shift >> 3 into a4/xl. */srli a4, a4, 4slli xl, a2, (32 - 3)/* Adjust the exponent bias. */movi a6, (0x3ff - 0x7f) << 20add a4, a4, a6/* Add the sign bit. */or xh, a4, a5leaf_return.Lextend_nan_or_inf:movi a4, 0x7ff00000/* Check for NaN. */slli a7, a2, 9beqz a7, 1fslli a6, a6, 11 /* 0x80000 */or a4, a4, a6/* Add the sign and return. */1: or xh, a4, a5movi xl, 0leaf_return.Lextend_expzero:beqz a4, 1b/* Normalize it to have 8 zero bits before the first 1 bit. */do_nsau a7, a4, a2, a3addi a7, a7, -8ssl a7sll a4, a4/* Shift >> 3 into a4/xl. */slli xl, a4, (32 - 3)srli a4, a4, 3/* Set the exponent. */movi a6, 0x3fe - 0x7fsub a6, a6, a7slli a6, a6, 20add a4, a4, a6/* Add the sign and return. */or xh, a4, a5leaf_return#endif /* L_extendsfdf2 */
