OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [tags/] [gnu-src/] [gcc-4.5.1/] [gcc-4.5.1-or32-1.0rc2/] [gcc/] [config/] [xtensa/] [ieee754-df.S] - Diff between revs 282 and 384

Go to most recent revision | Only display areas with differences | Details | Blame | View Log

Rev 282 Rev 384
/* IEEE-754 double-precision functions for Xtensa
/* IEEE-754 double-precision functions for Xtensa
   Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
   Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
   Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
   Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
   This file is part of GCC.
   This file is part of GCC.
   GCC is free software; you can redistribute it and/or modify it
   GCC is free software; you can redistribute it and/or modify it
   under the terms of the GNU General Public License as published by
   under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 3, or (at your option)
   the Free Software Foundation; either version 3, or (at your option)
   any later version.
   any later version.
   GCC is distributed in the hope that it will be useful, but WITHOUT
   GCC is distributed in the hope that it will be useful, but WITHOUT
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
   License for more details.
   License for more details.
   Under Section 7 of GPL version 3, you are granted additional
   Under Section 7 of GPL version 3, you are granted additional
   permissions described in the GCC Runtime Library Exception, version
   permissions described in the GCC Runtime Library Exception, version
   3.1, as published by the Free Software Foundation.
   3.1, as published by the Free Software Foundation.
   You should have received a copy of the GNU General Public License and
   You should have received a copy of the GNU General Public License and
   a copy of the GCC Runtime Library Exception along with this program;
   a copy of the GCC Runtime Library Exception along with this program;
   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   .  */
   .  */
#ifdef __XTENSA_EB__
#ifdef __XTENSA_EB__
#define xh a2
#define xh a2
#define xl a3
#define xl a3
#define yh a4
#define yh a4
#define yl a5
#define yl a5
#else
#else
#define xh a3
#define xh a3
#define xl a2
#define xl a2
#define yh a5
#define yh a5
#define yl a4
#define yl a4
#endif
#endif
/*  Warning!  The branch displacements for some Xtensa branch instructions
/*  Warning!  The branch displacements for some Xtensa branch instructions
    are quite small, and this code has been carefully laid out to keep
    are quite small, and this code has been carefully laid out to keep
    branch targets in range.  If you change anything, be sure to check that
    branch targets in range.  If you change anything, be sure to check that
    the assembler is not relaxing anything to branch over a jump.  */
    the assembler is not relaxing anything to branch over a jump.  */
#ifdef L_negdf2
#ifdef L_negdf2
        .align  4
        .align  4
        .global __negdf2
        .global __negdf2
        .type   __negdf2, @function
        .type   __negdf2, @function
__negdf2:
__negdf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a4, 0x80000000
        movi    a4, 0x80000000
        xor     xh, xh, a4
        xor     xh, xh, a4
        leaf_return
        leaf_return
#endif /* L_negdf2 */
#endif /* L_negdf2 */
#ifdef L_addsubdf3
#ifdef L_addsubdf3
        /* Addition */
        /* Addition */
__adddf3_aux:
__adddf3_aux:
        /* Handle NaNs and Infinities.  (This code is placed before the
        /* Handle NaNs and Infinities.  (This code is placed before the
           start of the function just to keep it in range of the limited
           start of the function just to keep it in range of the limited
           branch displacements.)  */
           branch displacements.)  */
.Ladd_xnan_or_inf:
.Ladd_xnan_or_inf:
        /* If y is neither Infinity nor NaN, return x.  */
        /* If y is neither Infinity nor NaN, return x.  */
        bnall   yh, a6, 1f
        bnall   yh, a6, 1f
        /* If x is a NaN, return it.  Otherwise, return y.  */
        /* If x is a NaN, return it.  Otherwise, return y.  */
        slli    a7, xh, 12
        slli    a7, xh, 12
        or      a7, a7, xl
        or      a7, a7, xl
        beqz    a7, .Ladd_ynan_or_inf
        beqz    a7, .Ladd_ynan_or_inf
1:      leaf_return
1:      leaf_return
.Ladd_ynan_or_inf:
.Ladd_ynan_or_inf:
        /* Return y.  */
        /* Return y.  */
        mov     xh, yh
        mov     xh, yh
        mov     xl, yl
        mov     xl, yl
        leaf_return
        leaf_return
.Ladd_opposite_signs:
.Ladd_opposite_signs:
        /* Operand signs differ.  Do a subtraction.  */
        /* Operand signs differ.  Do a subtraction.  */
        slli    a7, a6, 11
        slli    a7, a6, 11
        xor     yh, yh, a7
        xor     yh, yh, a7
        j       .Lsub_same_sign
        j       .Lsub_same_sign
        .align  4
        .align  4
        .global __adddf3
        .global __adddf3
        .type   __adddf3, @function
        .type   __adddf3, @function
__adddf3:
__adddf3:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        /* Check if the two operands have the same sign.  */
        /* Check if the two operands have the same sign.  */
        xor     a7, xh, yh
        xor     a7, xh, yh
        bltz    a7, .Ladd_opposite_signs
        bltz    a7, .Ladd_opposite_signs
.Ladd_same_sign:
.Ladd_same_sign:
        /* Check if either exponent == 0x7ff (i.e., NaN or Infinity).  */
        /* Check if either exponent == 0x7ff (i.e., NaN or Infinity).  */
        ball    xh, a6, .Ladd_xnan_or_inf
        ball    xh, a6, .Ladd_xnan_or_inf
        ball    yh, a6, .Ladd_ynan_or_inf
        ball    yh, a6, .Ladd_ynan_or_inf
        /* Compare the exponents.  The smaller operand will be shifted
        /* Compare the exponents.  The smaller operand will be shifted
           right by the exponent difference and added to the larger
           right by the exponent difference and added to the larger
           one.  */
           one.  */
        extui   a7, xh, 20, 12
        extui   a7, xh, 20, 12
        extui   a8, yh, 20, 12
        extui   a8, yh, 20, 12
        bltu    a7, a8, .Ladd_shiftx
        bltu    a7, a8, .Ladd_shiftx
.Ladd_shifty:
.Ladd_shifty:
        /* Check if the smaller (or equal) exponent is zero.  */
        /* Check if the smaller (or equal) exponent is zero.  */
        bnone   yh, a6, .Ladd_yexpzero
        bnone   yh, a6, .Ladd_yexpzero
        /* Replace yh sign/exponent with 0x001.  */
        /* Replace yh sign/exponent with 0x001.  */
        or      yh, yh, a6
        or      yh, yh, a6
        slli    yh, yh, 11
        slli    yh, yh, 11
        srli    yh, yh, 11
        srli    yh, yh, 11
.Ladd_yexpdiff:
.Ladd_yexpdiff:
        /* Compute the exponent difference.  Optimize for difference < 32.  */
        /* Compute the exponent difference.  Optimize for difference < 32.  */
        sub     a10, a7, a8
        sub     a10, a7, a8
        bgeui   a10, 32, .Ladd_bigshifty
        bgeui   a10, 32, .Ladd_bigshifty
        /* Shift yh/yl right by the exponent difference.  Any bits that are
        /* Shift yh/yl right by the exponent difference.  Any bits that are
           shifted out of yl are saved in a9 for rounding the result.  */
           shifted out of yl are saved in a9 for rounding the result.  */
        ssr     a10
        ssr     a10
        movi    a9, 0
        movi    a9, 0
        src     a9, yl, a9
        src     a9, yl, a9
        src     yl, yh, yl
        src     yl, yh, yl
        srl     yh, yh
        srl     yh, yh
.Ladd_addy:
.Ladd_addy:
        /* Do the 64-bit addition.  */
        /* Do the 64-bit addition.  */
        add     xl, xl, yl
        add     xl, xl, yl
        add     xh, xh, yh
        add     xh, xh, yh
        bgeu    xl, yl, 1f
        bgeu    xl, yl, 1f
        addi    xh, xh, 1
        addi    xh, xh, 1
1:
1:
        /* Check if the add overflowed into the exponent.  */
        /* Check if the add overflowed into the exponent.  */
        extui   a10, xh, 20, 12
        extui   a10, xh, 20, 12
        beq     a10, a7, .Ladd_round
        beq     a10, a7, .Ladd_round
        mov     a8, a7
        mov     a8, a7
        j       .Ladd_carry
        j       .Ladd_carry
.Ladd_yexpzero:
.Ladd_yexpzero:
        /* y is a subnormal value.  Replace its sign/exponent with zero,
        /* y is a subnormal value.  Replace its sign/exponent with zero,
           i.e., no implicit "1.0", and increment the apparent exponent
           i.e., no implicit "1.0", and increment the apparent exponent
           because subnormals behave as if they had the minimum (nonzero)
           because subnormals behave as if they had the minimum (nonzero)
           exponent.  Test for the case when both exponents are zero.  */
           exponent.  Test for the case when both exponents are zero.  */
        slli    yh, yh, 12
        slli    yh, yh, 12
        srli    yh, yh, 12
        srli    yh, yh, 12
        bnone   xh, a6, .Ladd_bothexpzero
        bnone   xh, a6, .Ladd_bothexpzero
        addi    a8, a8, 1
        addi    a8, a8, 1
        j       .Ladd_yexpdiff
        j       .Ladd_yexpdiff
.Ladd_bothexpzero:
.Ladd_bothexpzero:
        /* Both exponents are zero.  Handle this as a special case.  There
        /* Both exponents are zero.  Handle this as a special case.  There
           is no need to shift or round, and the normal code for handling
           is no need to shift or round, and the normal code for handling
           a carry into the exponent field will not work because it
           a carry into the exponent field will not work because it
           assumes there is an implicit "1.0" that needs to be added.  */
           assumes there is an implicit "1.0" that needs to be added.  */
        add     xl, xl, yl
        add     xl, xl, yl
        add     xh, xh, yh
        add     xh, xh, yh
        bgeu    xl, yl, 1f
        bgeu    xl, yl, 1f
        addi    xh, xh, 1
        addi    xh, xh, 1
1:      leaf_return
1:      leaf_return
.Ladd_bigshifty:
.Ladd_bigshifty:
        /* Exponent difference > 64 -- just return the bigger value.  */
        /* Exponent difference > 64 -- just return the bigger value.  */
        bgeui   a10, 64, 1b
        bgeui   a10, 64, 1b
        /* Shift yh/yl right by the exponent difference.  Any bits that are
        /* Shift yh/yl right by the exponent difference.  Any bits that are
           shifted out are saved in a9 for rounding the result.  */
           shifted out are saved in a9 for rounding the result.  */
        ssr     a10
        ssr     a10
        sll     a11, yl         /* lost bits shifted out of yl */
        sll     a11, yl         /* lost bits shifted out of yl */
        src     a9, yh, yl
        src     a9, yh, yl
        srl     yl, yh
        srl     yl, yh
        movi    yh, 0
        movi    yh, 0
        beqz    a11, .Ladd_addy
        beqz    a11, .Ladd_addy
        or      a9, a9, a10     /* any positive, nonzero value will work */
        or      a9, a9, a10     /* any positive, nonzero value will work */
        j       .Ladd_addy
        j       .Ladd_addy
.Ladd_xexpzero:
.Ladd_xexpzero:
        /* Same as "yexpzero" except skip handling the case when both
        /* Same as "yexpzero" except skip handling the case when both
           exponents are zero.  */
           exponents are zero.  */
        slli    xh, xh, 12
        slli    xh, xh, 12
        srli    xh, xh, 12
        srli    xh, xh, 12
        addi    a7, a7, 1
        addi    a7, a7, 1
        j       .Ladd_xexpdiff
        j       .Ladd_xexpdiff
.Ladd_shiftx:
.Ladd_shiftx:
        /* Same thing as the "shifty" code, but with x and y swapped.  Also,
        /* Same thing as the "shifty" code, but with x and y swapped.  Also,
           because the exponent difference is always nonzero in this version,
           because the exponent difference is always nonzero in this version,
           the shift sequence can use SLL and skip loading a constant zero.  */
           the shift sequence can use SLL and skip loading a constant zero.  */
        bnone   xh, a6, .Ladd_xexpzero
        bnone   xh, a6, .Ladd_xexpzero
        or      xh, xh, a6
        or      xh, xh, a6
        slli    xh, xh, 11
        slli    xh, xh, 11
        srli    xh, xh, 11
        srli    xh, xh, 11
.Ladd_xexpdiff:
.Ladd_xexpdiff:
        sub     a10, a8, a7
        sub     a10, a8, a7
        bgeui   a10, 32, .Ladd_bigshiftx
        bgeui   a10, 32, .Ladd_bigshiftx
        ssr     a10
        ssr     a10
        sll     a9, xl
        sll     a9, xl
        src     xl, xh, xl
        src     xl, xh, xl
        srl     xh, xh
        srl     xh, xh
.Ladd_addx:
.Ladd_addx:
        add     xl, xl, yl
        add     xl, xl, yl
        add     xh, xh, yh
        add     xh, xh, yh
        bgeu    xl, yl, 1f
        bgeu    xl, yl, 1f
        addi    xh, xh, 1
        addi    xh, xh, 1
1:
1:
        /* Check if the add overflowed into the exponent.  */
        /* Check if the add overflowed into the exponent.  */
        extui   a10, xh, 20, 12
        extui   a10, xh, 20, 12
        bne     a10, a8, .Ladd_carry
        bne     a10, a8, .Ladd_carry
.Ladd_round:
.Ladd_round:
        /* Round up if the leftover fraction is >= 1/2.  */
        /* Round up if the leftover fraction is >= 1/2.  */
        bgez    a9, 1f
        bgez    a9, 1f
        addi    xl, xl, 1
        addi    xl, xl, 1
        beqz    xl, .Ladd_roundcarry
        beqz    xl, .Ladd_roundcarry
        /* Check if the leftover fraction is exactly 1/2.  */
        /* Check if the leftover fraction is exactly 1/2.  */
        slli    a9, a9, 1
        slli    a9, a9, 1
        beqz    a9, .Ladd_exactlyhalf
        beqz    a9, .Ladd_exactlyhalf
1:      leaf_return
1:      leaf_return
.Ladd_bigshiftx:
.Ladd_bigshiftx:
        /* Mostly the same thing as "bigshifty"....  */
        /* Mostly the same thing as "bigshifty"....  */
        bgeui   a10, 64, .Ladd_returny
        bgeui   a10, 64, .Ladd_returny
        ssr     a10
        ssr     a10
        sll     a11, xl
        sll     a11, xl
        src     a9, xh, xl
        src     a9, xh, xl
        srl     xl, xh
        srl     xl, xh
        movi    xh, 0
        movi    xh, 0
        beqz    a11, .Ladd_addx
        beqz    a11, .Ladd_addx
        or      a9, a9, a10
        or      a9, a9, a10
        j       .Ladd_addx
        j       .Ladd_addx
.Ladd_returny:
.Ladd_returny:
        mov     xh, yh
        mov     xh, yh
        mov     xl, yl
        mov     xl, yl
        leaf_return
        leaf_return
.Ladd_carry:
.Ladd_carry:
        /* The addition has overflowed into the exponent field, so the
        /* The addition has overflowed into the exponent field, so the
           value needs to be renormalized.  The mantissa of the result
           value needs to be renormalized.  The mantissa of the result
           can be recovered by subtracting the original exponent and
           can be recovered by subtracting the original exponent and
           adding 0x100000 (which is the explicit "1.0" for the
           adding 0x100000 (which is the explicit "1.0" for the
           mantissa of the non-shifted operand -- the "1.0" for the
           mantissa of the non-shifted operand -- the "1.0" for the
           shifted operand was already added).  The mantissa can then
           shifted operand was already added).  The mantissa can then
           be shifted right by one bit.  The explicit "1.0" of the
           be shifted right by one bit.  The explicit "1.0" of the
           shifted mantissa then needs to be replaced by the exponent,
           shifted mantissa then needs to be replaced by the exponent,
           incremented by one to account for the normalizing shift.
           incremented by one to account for the normalizing shift.
           It is faster to combine these operations: do the shift first
           It is faster to combine these operations: do the shift first
           and combine the additions and subtractions.  If x is the
           and combine the additions and subtractions.  If x is the
           original exponent, the result is:
           original exponent, the result is:
               shifted mantissa - (x << 19) + (1 << 19) + (x << 20)
               shifted mantissa - (x << 19) + (1 << 19) + (x << 20)
           or:
           or:
               shifted mantissa + ((x + 1) << 19)
               shifted mantissa + ((x + 1) << 19)
           Note that the exponent is incremented here by leaving the
           Note that the exponent is incremented here by leaving the
           explicit "1.0" of the mantissa in the exponent field.  */
           explicit "1.0" of the mantissa in the exponent field.  */
        /* Shift xh/xl right by one bit.  Save the lsb of xl.  */
        /* Shift xh/xl right by one bit.  Save the lsb of xl.  */
        mov     a10, xl
        mov     a10, xl
        ssai    1
        ssai    1
        src     xl, xh, xl
        src     xl, xh, xl
        srl     xh, xh
        srl     xh, xh
        /* See explanation above.  The original exponent is in a8.  */
        /* See explanation above.  The original exponent is in a8.  */
        addi    a8, a8, 1
        addi    a8, a8, 1
        slli    a8, a8, 19
        slli    a8, a8, 19
        add     xh, xh, a8
        add     xh, xh, a8
        /* Return an Infinity if the exponent overflowed.  */
        /* Return an Infinity if the exponent overflowed.  */
        ball    xh, a6, .Ladd_infinity
        ball    xh, a6, .Ladd_infinity
        /* Same thing as the "round" code except the msb of the leftover
        /* Same thing as the "round" code except the msb of the leftover
           fraction is bit 0 of a10, with the rest of the fraction in a9.  */
           fraction is bit 0 of a10, with the rest of the fraction in a9.  */
        bbci.l  a10, 0, 1f
        bbci.l  a10, 0, 1f
        addi    xl, xl, 1
        addi    xl, xl, 1
        beqz    xl, .Ladd_roundcarry
        beqz    xl, .Ladd_roundcarry
        beqz    a9, .Ladd_exactlyhalf
        beqz    a9, .Ladd_exactlyhalf
1:      leaf_return
1:      leaf_return
.Ladd_infinity:
.Ladd_infinity:
        /* Clear the mantissa.  */
        /* Clear the mantissa.  */
        movi    xl, 0
        movi    xl, 0
        srli    xh, xh, 20
        srli    xh, xh, 20
        slli    xh, xh, 20
        slli    xh, xh, 20
        /* The sign bit may have been lost in a carry-out.  Put it back.  */
        /* The sign bit may have been lost in a carry-out.  Put it back.  */
        slli    a8, a8, 1
        slli    a8, a8, 1
        or      xh, xh, a8
        or      xh, xh, a8
        leaf_return
        leaf_return
.Ladd_exactlyhalf:
.Ladd_exactlyhalf:
        /* Round down to the nearest even value.  */
        /* Round down to the nearest even value.  */
        srli    xl, xl, 1
        srli    xl, xl, 1
        slli    xl, xl, 1
        slli    xl, xl, 1
        leaf_return
        leaf_return
.Ladd_roundcarry:
.Ladd_roundcarry:
        /* xl is always zero when the rounding increment overflows, so
        /* xl is always zero when the rounding increment overflows, so
           there's no need to round it to an even value.  */
           there's no need to round it to an even value.  */
        addi    xh, xh, 1
        addi    xh, xh, 1
        /* Overflow to the exponent is OK.  */
        /* Overflow to the exponent is OK.  */
        leaf_return
        leaf_return
        /* Subtraction */
        /* Subtraction */
__subdf3_aux:
__subdf3_aux:
        /* Handle NaNs and Infinities.  (This code is placed before the
        /* Handle NaNs and Infinities.  (This code is placed before the
           start of the function just to keep it in range of the limited
           start of the function just to keep it in range of the limited
           branch displacements.)  */
           branch displacements.)  */
.Lsub_xnan_or_inf:
.Lsub_xnan_or_inf:
        /* If y is neither Infinity nor NaN, return x.  */
        /* If y is neither Infinity nor NaN, return x.  */
        bnall   yh, a6, 1f
        bnall   yh, a6, 1f
        /* Both x and y are either NaN or Inf, so the result is NaN.  */
        /* Both x and y are either NaN or Inf, so the result is NaN.  */
        movi    a4, 0x80000     /* make it a quiet NaN */
        movi    a4, 0x80000     /* make it a quiet NaN */
        or      xh, xh, a4
        or      xh, xh, a4
1:      leaf_return
1:      leaf_return
.Lsub_ynan_or_inf:
.Lsub_ynan_or_inf:
        /* Negate y and return it.  */
        /* Negate y and return it.  */
        slli    a7, a6, 11
        slli    a7, a6, 11
        xor     xh, yh, a7
        xor     xh, yh, a7
        mov     xl, yl
        mov     xl, yl
        leaf_return
        leaf_return
.Lsub_opposite_signs:
.Lsub_opposite_signs:
        /* Operand signs differ.  Do an addition.  */
        /* Operand signs differ.  Do an addition.  */
        slli    a7, a6, 11
        slli    a7, a6, 11
        xor     yh, yh, a7
        xor     yh, yh, a7
        j       .Ladd_same_sign
        j       .Ladd_same_sign
        .align  4
        .align  4
        .global __subdf3
        .global __subdf3
        .type   __subdf3, @function
        .type   __subdf3, @function
__subdf3:
__subdf3:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        /* Check if the two operands have the same sign.  */
        /* Check if the two operands have the same sign.  */
        xor     a7, xh, yh
        xor     a7, xh, yh
        bltz    a7, .Lsub_opposite_signs
        bltz    a7, .Lsub_opposite_signs
.Lsub_same_sign:
.Lsub_same_sign:
        /* Check if either exponent == 0x7ff (i.e., NaN or Infinity).  */
        /* Check if either exponent == 0x7ff (i.e., NaN or Infinity).  */
        ball    xh, a6, .Lsub_xnan_or_inf
        ball    xh, a6, .Lsub_xnan_or_inf
        ball    yh, a6, .Lsub_ynan_or_inf
        ball    yh, a6, .Lsub_ynan_or_inf
        /* Compare the operands.  In contrast to addition, the entire
        /* Compare the operands.  In contrast to addition, the entire
           value matters here.  */
           value matters here.  */
        extui   a7, xh, 20, 11
        extui   a7, xh, 20, 11
        extui   a8, yh, 20, 11
        extui   a8, yh, 20, 11
        bltu    xh, yh, .Lsub_xsmaller
        bltu    xh, yh, .Lsub_xsmaller
        beq     xh, yh, .Lsub_compare_low
        beq     xh, yh, .Lsub_compare_low
.Lsub_ysmaller:
.Lsub_ysmaller:
        /* Check if the smaller (or equal) exponent is zero.  */
        /* Check if the smaller (or equal) exponent is zero.  */
        bnone   yh, a6, .Lsub_yexpzero
        bnone   yh, a6, .Lsub_yexpzero
        /* Replace yh sign/exponent with 0x001.  */
        /* Replace yh sign/exponent with 0x001.  */
        or      yh, yh, a6
        or      yh, yh, a6
        slli    yh, yh, 11
        slli    yh, yh, 11
        srli    yh, yh, 11
        srli    yh, yh, 11
.Lsub_yexpdiff:
.Lsub_yexpdiff:
        /* Compute the exponent difference.  Optimize for difference < 32.  */
        /* Compute the exponent difference.  Optimize for difference < 32.  */
        sub     a10, a7, a8
        sub     a10, a7, a8
        bgeui   a10, 32, .Lsub_bigshifty
        bgeui   a10, 32, .Lsub_bigshifty
        /* Shift yh/yl right by the exponent difference.  Any bits that are
        /* Shift yh/yl right by the exponent difference.  Any bits that are
           shifted out of yl are saved in a9 for rounding the result.  */
           shifted out of yl are saved in a9 for rounding the result.  */
        ssr     a10
        ssr     a10
        movi    a9, 0
        movi    a9, 0
        src     a9, yl, a9
        src     a9, yl, a9
        src     yl, yh, yl
        src     yl, yh, yl
        srl     yh, yh
        srl     yh, yh
.Lsub_suby:
.Lsub_suby:
        /* Do the 64-bit subtraction.  */
        /* Do the 64-bit subtraction.  */
        sub     xh, xh, yh
        sub     xh, xh, yh
        bgeu    xl, yl, 1f
        bgeu    xl, yl, 1f
        addi    xh, xh, -1
        addi    xh, xh, -1
1:      sub     xl, xl, yl
1:      sub     xl, xl, yl
        /* Subtract the leftover bits in a9 from zero and propagate any
        /* Subtract the leftover bits in a9 from zero and propagate any
           borrow from xh/xl.  */
           borrow from xh/xl.  */
        neg     a9, a9
        neg     a9, a9
        beqz    a9, 1f
        beqz    a9, 1f
        addi    a5, xh, -1
        addi    a5, xh, -1
        moveqz  xh, a5, xl
        moveqz  xh, a5, xl
        addi    xl, xl, -1
        addi    xl, xl, -1
1:
1:
        /* Check if the subtract underflowed into the exponent.  */
        /* Check if the subtract underflowed into the exponent.  */
        extui   a10, xh, 20, 11
        extui   a10, xh, 20, 11
        beq     a10, a7, .Lsub_round
        beq     a10, a7, .Lsub_round
        j       .Lsub_borrow
        j       .Lsub_borrow
.Lsub_compare_low:
.Lsub_compare_low:
        /* The high words are equal.  Compare the low words.  */
        /* The high words are equal.  Compare the low words.  */
        bltu    xl, yl, .Lsub_xsmaller
        bltu    xl, yl, .Lsub_xsmaller
        bltu    yl, xl, .Lsub_ysmaller
        bltu    yl, xl, .Lsub_ysmaller
        /* The operands are equal.  Return 0.0.  */
        /* The operands are equal.  Return 0.0.  */
        movi    xh, 0
        movi    xh, 0
        movi    xl, 0
        movi    xl, 0
1:      leaf_return
1:      leaf_return
.Lsub_yexpzero:
.Lsub_yexpzero:
        /* y is a subnormal value.  Replace its sign/exponent with zero,
        /* y is a subnormal value.  Replace its sign/exponent with zero,
           i.e., no implicit "1.0".  Unless x is also a subnormal, increment
           i.e., no implicit "1.0".  Unless x is also a subnormal, increment
           y's apparent exponent because subnormals behave as if they had
           y's apparent exponent because subnormals behave as if they had
           the minimum (nonzero) exponent.  */
           the minimum (nonzero) exponent.  */
        slli    yh, yh, 12
        slli    yh, yh, 12
        srli    yh, yh, 12
        srli    yh, yh, 12
        bnone   xh, a6, .Lsub_yexpdiff
        bnone   xh, a6, .Lsub_yexpdiff
        addi    a8, a8, 1
        addi    a8, a8, 1
        j       .Lsub_yexpdiff
        j       .Lsub_yexpdiff
.Lsub_bigshifty:
.Lsub_bigshifty:
        /* Exponent difference > 64 -- just return the bigger value.  */
        /* Exponent difference > 64 -- just return the bigger value.  */
        bgeui   a10, 64, 1b
        bgeui   a10, 64, 1b
        /* Shift yh/yl right by the exponent difference.  Any bits that are
        /* Shift yh/yl right by the exponent difference.  Any bits that are
           shifted out are saved in a9 for rounding the result.  */
           shifted out are saved in a9 for rounding the result.  */
        ssr     a10
        ssr     a10
        sll     a11, yl         /* lost bits shifted out of yl */
        sll     a11, yl         /* lost bits shifted out of yl */
        src     a9, yh, yl
        src     a9, yh, yl
        srl     yl, yh
        srl     yl, yh
        movi    yh, 0
        movi    yh, 0
        beqz    a11, .Lsub_suby
        beqz    a11, .Lsub_suby
        or      a9, a9, a10     /* any positive, nonzero value will work */
        or      a9, a9, a10     /* any positive, nonzero value will work */
        j       .Lsub_suby
        j       .Lsub_suby
.Lsub_xsmaller:
.Lsub_xsmaller:
        /* Same thing as the "ysmaller" code, but with x and y swapped and
        /* Same thing as the "ysmaller" code, but with x and y swapped and
           with y negated.  */
           with y negated.  */
        bnone   xh, a6, .Lsub_xexpzero
        bnone   xh, a6, .Lsub_xexpzero
        or      xh, xh, a6
        or      xh, xh, a6
        slli    xh, xh, 11
        slli    xh, xh, 11
        srli    xh, xh, 11
        srli    xh, xh, 11
.Lsub_xexpdiff:
.Lsub_xexpdiff:
        sub     a10, a8, a7
        sub     a10, a8, a7
        bgeui   a10, 32, .Lsub_bigshiftx
        bgeui   a10, 32, .Lsub_bigshiftx
        ssr     a10
        ssr     a10
        movi    a9, 0
        movi    a9, 0
        src     a9, xl, a9
        src     a9, xl, a9
        src     xl, xh, xl
        src     xl, xh, xl
        srl     xh, xh
        srl     xh, xh
        /* Negate y.  */
        /* Negate y.  */
        slli    a11, a6, 11
        slli    a11, a6, 11
        xor     yh, yh, a11
        xor     yh, yh, a11
.Lsub_subx:
.Lsub_subx:
        sub     xl, yl, xl
        sub     xl, yl, xl
        sub     xh, yh, xh
        sub     xh, yh, xh
        bgeu    yl, xl, 1f
        bgeu    yl, xl, 1f
        addi    xh, xh, -1
        addi    xh, xh, -1
1:
1:
        /* Subtract the leftover bits in a9 from zero and propagate any
        /* Subtract the leftover bits in a9 from zero and propagate any
           borrow from xh/xl.  */
           borrow from xh/xl.  */
        neg     a9, a9
        neg     a9, a9
        beqz    a9, 1f
        beqz    a9, 1f
        addi    a5, xh, -1
        addi    a5, xh, -1
        moveqz  xh, a5, xl
        moveqz  xh, a5, xl
        addi    xl, xl, -1
        addi    xl, xl, -1
1:
1:
        /* Check if the subtract underflowed into the exponent.  */
        /* Check if the subtract underflowed into the exponent.  */
        extui   a10, xh, 20, 11
        extui   a10, xh, 20, 11
        bne     a10, a8, .Lsub_borrow
        bne     a10, a8, .Lsub_borrow
.Lsub_round:
.Lsub_round:
        /* Round up if the leftover fraction is >= 1/2.  */
        /* Round up if the leftover fraction is >= 1/2.  */
        bgez    a9, 1f
        bgez    a9, 1f
        addi    xl, xl, 1
        addi    xl, xl, 1
        beqz    xl, .Lsub_roundcarry
        beqz    xl, .Lsub_roundcarry
        /* Check if the leftover fraction is exactly 1/2.  */
        /* Check if the leftover fraction is exactly 1/2.  */
        slli    a9, a9, 1
        slli    a9, a9, 1
        beqz    a9, .Lsub_exactlyhalf
        beqz    a9, .Lsub_exactlyhalf
1:      leaf_return
1:      leaf_return
.Lsub_xexpzero:
.Lsub_xexpzero:
        /* Same as "yexpzero".  */
        /* Same as "yexpzero".  */
        slli    xh, xh, 12
        slli    xh, xh, 12
        srli    xh, xh, 12
        srli    xh, xh, 12
        bnone   yh, a6, .Lsub_xexpdiff
        bnone   yh, a6, .Lsub_xexpdiff
        addi    a7, a7, 1
        addi    a7, a7, 1
        j       .Lsub_xexpdiff
        j       .Lsub_xexpdiff
.Lsub_bigshiftx:
.Lsub_bigshiftx:
        /* Mostly the same thing as "bigshifty", but with the sign bit of the
        /* Mostly the same thing as "bigshifty", but with the sign bit of the
           shifted value set so that the subsequent subtraction flips the
           shifted value set so that the subsequent subtraction flips the
           sign of y.  */
           sign of y.  */
        bgeui   a10, 64, .Lsub_returny
        bgeui   a10, 64, .Lsub_returny
        ssr     a10
        ssr     a10
        sll     a11, xl
        sll     a11, xl
        src     a9, xh, xl
        src     a9, xh, xl
        srl     xl, xh
        srl     xl, xh
        slli    xh, a6, 11      /* set sign bit of xh */
        slli    xh, a6, 11      /* set sign bit of xh */
        beqz    a11, .Lsub_subx
        beqz    a11, .Lsub_subx
        or      a9, a9, a10
        or      a9, a9, a10
        j       .Lsub_subx
        j       .Lsub_subx
.Lsub_returny:
.Lsub_returny:
        /* Negate and return y.  */
        /* Negate and return y.  */
        slli    a7, a6, 11
        slli    a7, a6, 11
        xor     xh, yh, a7
        xor     xh, yh, a7
        mov     xl, yl
        mov     xl, yl
        leaf_return
        leaf_return
.Lsub_borrow:
.Lsub_borrow:
        /* The subtraction has underflowed into the exponent field, so the
        /* The subtraction has underflowed into the exponent field, so the
           value needs to be renormalized.  Shift the mantissa left as
           value needs to be renormalized.  Shift the mantissa left as
           needed to remove any leading zeros and adjust the exponent
           needed to remove any leading zeros and adjust the exponent
           accordingly.  If the exponent is not large enough to remove
           accordingly.  If the exponent is not large enough to remove
           all the leading zeros, the result will be a subnormal value.  */
           all the leading zeros, the result will be a subnormal value.  */
        slli    a8, xh, 12
        slli    a8, xh, 12
        beqz    a8, .Lsub_xhzero
        beqz    a8, .Lsub_xhzero
        do_nsau a6, a8, a7, a11
        do_nsau a6, a8, a7, a11
        srli    a8, a8, 12
        srli    a8, a8, 12
        bge     a6, a10, .Lsub_subnormal
        bge     a6, a10, .Lsub_subnormal
        addi    a6, a6, 1
        addi    a6, a6, 1
.Lsub_shift_lt32:
.Lsub_shift_lt32:
        /* Shift the mantissa (a8/xl/a9) left by a6.  */
        /* Shift the mantissa (a8/xl/a9) left by a6.  */
        ssl     a6
        ssl     a6
        src     a8, a8, xl
        src     a8, a8, xl
        src     xl, xl, a9
        src     xl, xl, a9
        sll     a9, a9
        sll     a9, a9
        /* Combine the shifted mantissa with the sign and exponent,
        /* Combine the shifted mantissa with the sign and exponent,
           decrementing the exponent by a6.  (The exponent has already
           decrementing the exponent by a6.  (The exponent has already
           been decremented by one due to the borrow from the subtraction,
           been decremented by one due to the borrow from the subtraction,
           but adding the mantissa will increment the exponent by one.)  */
           but adding the mantissa will increment the exponent by one.)  */
        srli    xh, xh, 20
        srli    xh, xh, 20
        sub     xh, xh, a6
        sub     xh, xh, a6
        slli    xh, xh, 20
        slli    xh, xh, 20
        add     xh, xh, a8
        add     xh, xh, a8
        j       .Lsub_round
        j       .Lsub_round
.Lsub_exactlyhalf:
.Lsub_exactlyhalf:
        /* Round down to the nearest even value.  */
        /* Round down to the nearest even value.  */
        srli    xl, xl, 1
        srli    xl, xl, 1
        slli    xl, xl, 1
        slli    xl, xl, 1
        leaf_return
        leaf_return
.Lsub_roundcarry:
.Lsub_roundcarry:
        /* xl is always zero when the rounding increment overflows, so
        /* xl is always zero when the rounding increment overflows, so
           there's no need to round it to an even value.  */
           there's no need to round it to an even value.  */
        addi    xh, xh, 1
        addi    xh, xh, 1
        /* Overflow to the exponent is OK.  */
        /* Overflow to the exponent is OK.  */
        leaf_return
        leaf_return
.Lsub_xhzero:
.Lsub_xhzero:
        /* When normalizing the result, all the mantissa bits in the high
        /* When normalizing the result, all the mantissa bits in the high
           word are zero.  Shift by "20 + (leading zero count of xl) + 1".  */
           word are zero.  Shift by "20 + (leading zero count of xl) + 1".  */
        do_nsau a6, xl, a7, a11
        do_nsau a6, xl, a7, a11
        addi    a6, a6, 21
        addi    a6, a6, 21
        blt     a10, a6, .Lsub_subnormal
        blt     a10, a6, .Lsub_subnormal
.Lsub_normalize_shift:
.Lsub_normalize_shift:
        bltui   a6, 32, .Lsub_shift_lt32
        bltui   a6, 32, .Lsub_shift_lt32
        ssl     a6
        ssl     a6
        src     a8, xl, a9
        src     a8, xl, a9
        sll     xl, a9
        sll     xl, a9
        movi    a9, 0
        movi    a9, 0
        srli    xh, xh, 20
        srli    xh, xh, 20
        sub     xh, xh, a6
        sub     xh, xh, a6
        slli    xh, xh, 20
        slli    xh, xh, 20
        add     xh, xh, a8
        add     xh, xh, a8
        j       .Lsub_round
        j       .Lsub_round
.Lsub_subnormal:
.Lsub_subnormal:
        /* The exponent is too small to shift away all the leading zeros.
        /* The exponent is too small to shift away all the leading zeros.
           Set a6 to the current exponent (which has already been
           Set a6 to the current exponent (which has already been
           decremented by the borrow) so that the exponent of the result
           decremented by the borrow) so that the exponent of the result
           will be zero.  Do not add 1 to a6 in this case, because: (1)
           will be zero.  Do not add 1 to a6 in this case, because: (1)
           adding the mantissa will not increment the exponent, so there is
           adding the mantissa will not increment the exponent, so there is
           no need to subtract anything extra from the exponent to
           no need to subtract anything extra from the exponent to
           compensate, and (2) the effective exponent of a subnormal is 1
           compensate, and (2) the effective exponent of a subnormal is 1
           not 0 so the shift amount must be 1 smaller than normal. */
           not 0 so the shift amount must be 1 smaller than normal. */
        mov     a6, a10
        mov     a6, a10
        j       .Lsub_normalize_shift
        j       .Lsub_normalize_shift
#endif /* L_addsubdf3 */
#endif /* L_addsubdf3 */
#ifdef L_muldf3
#ifdef L_muldf3
        /* Multiplication */
        /* Multiplication */
#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
#define XCHAL_NO_MUL 1
#define XCHAL_NO_MUL 1
#endif
#endif
__muldf3_aux:
__muldf3_aux:
        /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
        /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
           (This code is placed before the start of the function just to
           (This code is placed before the start of the function just to
           keep it in range of the limited branch displacements.)  */
           keep it in range of the limited branch displacements.)  */
.Lmul_xexpzero:
.Lmul_xexpzero:
        /* Clear the sign bit of x.  */
        /* Clear the sign bit of x.  */
        slli    xh, xh, 1
        slli    xh, xh, 1
        srli    xh, xh, 1
        srli    xh, xh, 1
        /* If x is zero, return zero.  */
        /* If x is zero, return zero.  */
        or      a10, xh, xl
        or      a10, xh, xl
        beqz    a10, .Lmul_return_zero
        beqz    a10, .Lmul_return_zero
        /* Normalize x.  Adjust the exponent in a8.  */
        /* Normalize x.  Adjust the exponent in a8.  */
        beqz    xh, .Lmul_xh_zero
        beqz    xh, .Lmul_xh_zero
        do_nsau a10, xh, a11, a12
        do_nsau a10, xh, a11, a12
        addi    a10, a10, -11
        addi    a10, a10, -11
        ssl     a10
        ssl     a10
        src     xh, xh, xl
        src     xh, xh, xl
        sll     xl, xl
        sll     xl, xl
        movi    a8, 1
        movi    a8, 1
        sub     a8, a8, a10
        sub     a8, a8, a10
        j       .Lmul_xnormalized
        j       .Lmul_xnormalized
.Lmul_xh_zero:
.Lmul_xh_zero:
        do_nsau a10, xl, a11, a12
        do_nsau a10, xl, a11, a12
        addi    a10, a10, -11
        addi    a10, a10, -11
        movi    a8, -31
        movi    a8, -31
        sub     a8, a8, a10
        sub     a8, a8, a10
        ssl     a10
        ssl     a10
        bltz    a10, .Lmul_xl_srl
        bltz    a10, .Lmul_xl_srl
        sll     xh, xl
        sll     xh, xl
        movi    xl, 0
        movi    xl, 0
        j       .Lmul_xnormalized
        j       .Lmul_xnormalized
.Lmul_xl_srl:
.Lmul_xl_srl:
        srl     xh, xl
        srl     xh, xl
        sll     xl, xl
        sll     xl, xl
        j       .Lmul_xnormalized
        j       .Lmul_xnormalized
.Lmul_yexpzero:
.Lmul_yexpzero:
        /* Clear the sign bit of y.  */
        /* Clear the sign bit of y.  */
        slli    yh, yh, 1
        slli    yh, yh, 1
        srli    yh, yh, 1
        srli    yh, yh, 1
        /* If y is zero, return zero.  */
        /* If y is zero, return zero.  */
        or      a10, yh, yl
        or      a10, yh, yl
        beqz    a10, .Lmul_return_zero
        beqz    a10, .Lmul_return_zero
        /* Normalize y.  Adjust the exponent in a9.  */
        /* Normalize y.  Adjust the exponent in a9.  */
        beqz    yh, .Lmul_yh_zero
        beqz    yh, .Lmul_yh_zero
        do_nsau a10, yh, a11, a12
        do_nsau a10, yh, a11, a12
        addi    a10, a10, -11
        addi    a10, a10, -11
        ssl     a10
        ssl     a10
        src     yh, yh, yl
        src     yh, yh, yl
        sll     yl, yl
        sll     yl, yl
        movi    a9, 1
        movi    a9, 1
        sub     a9, a9, a10
        sub     a9, a9, a10
        j       .Lmul_ynormalized
        j       .Lmul_ynormalized
.Lmul_yh_zero:
.Lmul_yh_zero:
        do_nsau a10, yl, a11, a12
        do_nsau a10, yl, a11, a12
        addi    a10, a10, -11
        addi    a10, a10, -11
        movi    a9, -31
        movi    a9, -31
        sub     a9, a9, a10
        sub     a9, a9, a10
        ssl     a10
        ssl     a10
        bltz    a10, .Lmul_yl_srl
        bltz    a10, .Lmul_yl_srl
        sll     yh, yl
        sll     yh, yl
        movi    yl, 0
        movi    yl, 0
        j       .Lmul_ynormalized
        j       .Lmul_ynormalized
.Lmul_yl_srl:
.Lmul_yl_srl:
        srl     yh, yl
        srl     yh, yl
        sll     yl, yl
        sll     yl, yl
        j       .Lmul_ynormalized
        j       .Lmul_ynormalized
.Lmul_return_zero:
.Lmul_return_zero:
        /* Return zero with the appropriate sign bit.  */
        /* Return zero with the appropriate sign bit.  */
        srli    xh, a7, 31
        srli    xh, a7, 31
        slli    xh, xh, 31
        slli    xh, xh, 31
        movi    xl, 0
        movi    xl, 0
        j       .Lmul_done
        j       .Lmul_done
.Lmul_xnan_or_inf:
.Lmul_xnan_or_inf:
        /* If y is zero, return NaN.  */
        /* If y is zero, return NaN.  */
        bnez    yl, 1f
        bnez    yl, 1f
        slli    a8, yh, 1
        slli    a8, yh, 1
        bnez    a8, 1f
        bnez    a8, 1f
        movi    a4, 0x80000     /* make it a quiet NaN */
        movi    a4, 0x80000     /* make it a quiet NaN */
        or      xh, xh, a4
        or      xh, xh, a4
        j       .Lmul_done
        j       .Lmul_done
1:
1:
        /* If y is NaN, return y.  */
        /* If y is NaN, return y.  */
        bnall   yh, a6, .Lmul_returnx
        bnall   yh, a6, .Lmul_returnx
        slli    a8, yh, 12
        slli    a8, yh, 12
        or      a8, a8, yl
        or      a8, a8, yl
        beqz    a8, .Lmul_returnx
        beqz    a8, .Lmul_returnx
.Lmul_returny:
.Lmul_returny:
        mov     xh, yh
        mov     xh, yh
        mov     xl, yl
        mov     xl, yl
.Lmul_returnx:
.Lmul_returnx:
        /* Set the sign bit and return.  */
        /* Set the sign bit and return.  */
        extui   a7, a7, 31, 1
        extui   a7, a7, 31, 1
        slli    xh, xh, 1
        slli    xh, xh, 1
        ssai    1
        ssai    1
        src     xh, a7, xh
        src     xh, a7, xh
        j       .Lmul_done
        j       .Lmul_done
.Lmul_ynan_or_inf:
.Lmul_ynan_or_inf:
        /* If x is zero, return NaN.  */
        /* If x is zero, return NaN.  */
        bnez    xl, .Lmul_returny
        bnez    xl, .Lmul_returny
        slli    a8, xh, 1
        slli    a8, xh, 1
        bnez    a8, .Lmul_returny
        bnez    a8, .Lmul_returny
        movi    a7, 0x80000     /* make it a quiet NaN */
        movi    a7, 0x80000     /* make it a quiet NaN */
        or      xh, yh, a7
        or      xh, yh, a7
        j       .Lmul_done
        j       .Lmul_done
        .align  4
        .align  4
        .global __muldf3
        .global __muldf3
        .type   __muldf3, @function
        .type   __muldf3, @function
__muldf3:
__muldf3:
#if __XTENSA_CALL0_ABI__
#if __XTENSA_CALL0_ABI__
        leaf_entry sp, 32
        leaf_entry sp, 32
        addi    sp, sp, -32
        addi    sp, sp, -32
        s32i    a12, sp, 16
        s32i    a12, sp, 16
        s32i    a13, sp, 20
        s32i    a13, sp, 20
        s32i    a14, sp, 24
        s32i    a14, sp, 24
        s32i    a15, sp, 28
        s32i    a15, sp, 28
#elif XCHAL_NO_MUL
#elif XCHAL_NO_MUL
        /* This is not really a leaf function; allocate enough stack space
        /* This is not really a leaf function; allocate enough stack space
           to allow CALL12s to a helper function.  */
           to allow CALL12s to a helper function.  */
        leaf_entry sp, 64
        leaf_entry sp, 64
#else
#else
        leaf_entry sp, 32
        leaf_entry sp, 32
#endif
#endif
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        /* Get the sign of the result.  */
        /* Get the sign of the result.  */
        xor     a7, xh, yh
        xor     a7, xh, yh
        /* Check for NaN and infinity.  */
        /* Check for NaN and infinity.  */
        ball    xh, a6, .Lmul_xnan_or_inf
        ball    xh, a6, .Lmul_xnan_or_inf
        ball    yh, a6, .Lmul_ynan_or_inf
        ball    yh, a6, .Lmul_ynan_or_inf
        /* Extract the exponents.  */
        /* Extract the exponents.  */
        extui   a8, xh, 20, 11
        extui   a8, xh, 20, 11
        extui   a9, yh, 20, 11
        extui   a9, yh, 20, 11
        beqz    a8, .Lmul_xexpzero
        beqz    a8, .Lmul_xexpzero
.Lmul_xnormalized:
.Lmul_xnormalized:
        beqz    a9, .Lmul_yexpzero
        beqz    a9, .Lmul_yexpzero
.Lmul_ynormalized:
.Lmul_ynormalized:
        /* Add the exponents.  */
        /* Add the exponents.  */
        add     a8, a8, a9
        add     a8, a8, a9
        /* Replace sign/exponent fields with explicit "1.0".  */
        /* Replace sign/exponent fields with explicit "1.0".  */
        movi    a10, 0x1fffff
        movi    a10, 0x1fffff
        or      xh, xh, a6
        or      xh, xh, a6
        and     xh, xh, a10
        and     xh, xh, a10
        or      yh, yh, a6
        or      yh, yh, a6
        and     yh, yh, a10
        and     yh, yh, a10
        /* Multiply 64x64 to 128 bits.  The result ends up in xh/xl/a6.
        /* Multiply 64x64 to 128 bits.  The result ends up in xh/xl/a6.
           The least-significant word of the result is thrown away except
           The least-significant word of the result is thrown away except
           that if it is nonzero, the lsb of a6 is set to 1.  */
           that if it is nonzero, the lsb of a6 is set to 1.  */
#if XCHAL_HAVE_MUL32_HIGH
#if XCHAL_HAVE_MUL32_HIGH
        /* Compute a6 with any carry-outs in a10.  */
        /* Compute a6 with any carry-outs in a10.  */
        movi    a10, 0
        movi    a10, 0
        mull    a6, xl, yh
        mull    a6, xl, yh
        mull    a11, xh, yl
        mull    a11, xh, yl
        add     a6, a6, a11
        add     a6, a6, a11
        bgeu    a6, a11, 1f
        bgeu    a6, a11, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        muluh   a11, xl, yl
        muluh   a11, xl, yl
        add     a6, a6, a11
        add     a6, a6, a11
        bgeu    a6, a11, 1f
        bgeu    a6, a11, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        /* If the low word of the result is nonzero, set the lsb of a6.  */
        /* If the low word of the result is nonzero, set the lsb of a6.  */
        mull    a11, xl, yl
        mull    a11, xl, yl
        beqz    a11, 1f
        beqz    a11, 1f
        movi    a9, 1
        movi    a9, 1
        or      a6, a6, a9
        or      a6, a6, a9
1:
1:
        /* Compute xl with any carry-outs in a9.  */
        /* Compute xl with any carry-outs in a9.  */
        movi    a9, 0
        movi    a9, 0
        mull    a11, xh, yh
        mull    a11, xh, yh
        add     a10, a10, a11
        add     a10, a10, a11
        bgeu    a10, a11, 1f
        bgeu    a10, a11, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        muluh   a11, xh, yl
        muluh   a11, xh, yl
        add     a10, a10, a11
        add     a10, a10, a11
        bgeu    a10, a11, 1f
        bgeu    a10, a11, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        muluh   xl, xl, yh
        muluh   xl, xl, yh
        add     xl, xl, a10
        add     xl, xl, a10
        bgeu    xl, a10, 1f
        bgeu    xl, a10, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        /* Compute xh.  */
        /* Compute xh.  */
        muluh   xh, xh, yh
        muluh   xh, xh, yh
        add     xh, xh, a9
        add     xh, xh, a9
#else /* ! XCHAL_HAVE_MUL32_HIGH */
#else /* ! XCHAL_HAVE_MUL32_HIGH */
        /* Break the inputs into 16-bit chunks and compute 16 32-bit partial
        /* Break the inputs into 16-bit chunks and compute 16 32-bit partial
           products.  These partial products are:
           products.  These partial products are:
                0 xll * yll
                0 xll * yll
                1 xll * ylh
                1 xll * ylh
                2 xlh * yll
                2 xlh * yll
                3 xll * yhl
                3 xll * yhl
                4 xlh * ylh
                4 xlh * ylh
                5 xhl * yll
                5 xhl * yll
                6 xll * yhh
                6 xll * yhh
                7 xlh * yhl
                7 xlh * yhl
                8 xhl * ylh
                8 xhl * ylh
                9 xhh * yll
                9 xhh * yll
                10 xlh * yhh
                10 xlh * yhh
                11 xhl * yhl
                11 xhl * yhl
                12 xhh * ylh
                12 xhh * ylh
                13 xhl * yhh
                13 xhl * yhh
                14 xhh * yhl
                14 xhh * yhl
                15 xhh * yhh
                15 xhh * yhh
           where the input chunks are (hh, hl, lh, ll).  If using the Mul16
           where the input chunks are (hh, hl, lh, ll).  If using the Mul16
           or Mul32 multiplier options, these input chunks must be stored in
           or Mul32 multiplier options, these input chunks must be stored in
           separate registers.  For Mac16, the UMUL.AA.* opcodes can specify
           separate registers.  For Mac16, the UMUL.AA.* opcodes can specify
           that the inputs come from either half of the registers, so there
           that the inputs come from either half of the registers, so there
           is no need to shift them out ahead of time.  If there is no
           is no need to shift them out ahead of time.  If there is no
           multiply hardware, the 16-bit chunks can be extracted when setting
           multiply hardware, the 16-bit chunks can be extracted when setting
           up the arguments to the separate multiply function.  */
           up the arguments to the separate multiply function.  */
        /* Save a7 since it is needed to hold a temporary value.  */
        /* Save a7 since it is needed to hold a temporary value.  */
        s32i    a7, sp, 4
        s32i    a7, sp, 4
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
        /* Calling a separate multiply function will clobber a0 and requires
        /* Calling a separate multiply function will clobber a0 and requires
           use of a8 as a temporary, so save those values now.  (The function
           use of a8 as a temporary, so save those values now.  (The function
           uses a custom ABI so nothing else needs to be saved.)  */
           uses a custom ABI so nothing else needs to be saved.)  */
        s32i    a0, sp, 0
        s32i    a0, sp, 0
        s32i    a8, sp, 8
        s32i    a8, sp, 8
#endif
#endif
#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
#define xlh a12
#define xlh a12
#define ylh a13
#define ylh a13
#define xhh a14
#define xhh a14
#define yhh a15
#define yhh a15
        /* Get the high halves of the inputs into registers.  */
        /* Get the high halves of the inputs into registers.  */
        srli    xlh, xl, 16
        srli    xlh, xl, 16
        srli    ylh, yl, 16
        srli    ylh, yl, 16
        srli    xhh, xh, 16
        srli    xhh, xh, 16
        srli    yhh, yh, 16
        srli    yhh, yh, 16
#define xll xl
#define xll xl
#define yll yl
#define yll yl
#define xhl xh
#define xhl xh
#define yhl yh
#define yhl yh
#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
        /* Clear the high halves of the inputs.  This does not matter
        /* Clear the high halves of the inputs.  This does not matter
           for MUL16 because the high bits are ignored.  */
           for MUL16 because the high bits are ignored.  */
        extui   xl, xl, 0, 16
        extui   xl, xl, 0, 16
        extui   xh, xh, 0, 16
        extui   xh, xh, 0, 16
        extui   yl, yl, 0, 16
        extui   yl, yl, 0, 16
        extui   yh, yh, 0, 16
        extui   yh, yh, 0, 16
#endif
#endif
#endif /* MUL16 || MUL32 */
#endif /* MUL16 || MUL32 */
#if XCHAL_HAVE_MUL16
#if XCHAL_HAVE_MUL16
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
        mul16u  dst, xreg ## xhalf, yreg ## yhalf
        mul16u  dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MUL32
#elif XCHAL_HAVE_MUL32
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
        mull    dst, xreg ## xhalf, yreg ## yhalf
        mull    dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MAC16
#elif XCHAL_HAVE_MAC16
/* The preprocessor insists on inserting a space when concatenating after
/* The preprocessor insists on inserting a space when concatenating after
   a period in the definition of do_mul below.  These macros are a workaround
   a period in the definition of do_mul below.  These macros are a workaround
   using underscores instead of periods when doing the concatenation.  */
   using underscores instead of periods when doing the concatenation.  */
#define umul_aa_ll umul.aa.ll
#define umul_aa_ll umul.aa.ll
#define umul_aa_lh umul.aa.lh
#define umul_aa_lh umul.aa.lh
#define umul_aa_hl umul.aa.hl
#define umul_aa_hl umul.aa.hl
#define umul_aa_hh umul.aa.hh
#define umul_aa_hh umul.aa.hh
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
        umul_aa_ ## xhalf ## yhalf      xreg, yreg; \
        umul_aa_ ## xhalf ## yhalf      xreg, yreg; \
        rsr     dst, ACCLO
        rsr     dst, ACCLO
#else /* no multiply hardware */
#else /* no multiply hardware */
#define set_arg_l(dst, src) \
#define set_arg_l(dst, src) \
        extui   dst, src, 0, 16
        extui   dst, src, 0, 16
#define set_arg_h(dst, src) \
#define set_arg_h(dst, src) \
        srli    dst, src, 16
        srli    dst, src, 16
#if __XTENSA_CALL0_ABI__
#if __XTENSA_CALL0_ABI__
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
        set_arg_ ## xhalf (a13, xreg); \
        set_arg_ ## xhalf (a13, xreg); \
        set_arg_ ## yhalf (a14, yreg); \
        set_arg_ ## yhalf (a14, yreg); \
        call0   .Lmul_mulsi3; \
        call0   .Lmul_mulsi3; \
        mov     dst, a12
        mov     dst, a12
#else
#else
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
        set_arg_ ## xhalf (a14, xreg); \
        set_arg_ ## xhalf (a14, xreg); \
        set_arg_ ## yhalf (a15, yreg); \
        set_arg_ ## yhalf (a15, yreg); \
        call12  .Lmul_mulsi3; \
        call12  .Lmul_mulsi3; \
        mov     dst, a14
        mov     dst, a14
#endif /* __XTENSA_CALL0_ABI__ */
#endif /* __XTENSA_CALL0_ABI__ */
#endif /* no multiply hardware */
#endif /* no multiply hardware */
        /* Add pp1 and pp2 into a10 with carry-out in a9.  */
        /* Add pp1 and pp2 into a10 with carry-out in a9.  */
        do_mul(a10, xl, l, yl, h)       /* pp 1 */
        do_mul(a10, xl, l, yl, h)       /* pp 1 */
        do_mul(a11, xl, h, yl, l)       /* pp 2 */
        do_mul(a11, xl, h, yl, l)       /* pp 2 */
        movi    a9, 0
        movi    a9, 0
        add     a10, a10, a11
        add     a10, a10, a11
        bgeu    a10, a11, 1f
        bgeu    a10, a11, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        /* Initialize a6 with a9/a10 shifted into position.  Note that
        /* Initialize a6 with a9/a10 shifted into position.  Note that
           this value can be safely incremented without any carry-outs.  */
           this value can be safely incremented without any carry-outs.  */
        ssai    16
        ssai    16
        src     a6, a9, a10
        src     a6, a9, a10
        /* Compute the low word into a10.  */
        /* Compute the low word into a10.  */
        do_mul(a11, xl, l, yl, l)       /* pp 0 */
        do_mul(a11, xl, l, yl, l)       /* pp 0 */
        sll     a10, a10
        sll     a10, a10
        add     a10, a10, a11
        add     a10, a10, a11
        bgeu    a10, a11, 1f
        bgeu    a10, a11, 1f
        addi    a6, a6, 1
        addi    a6, a6, 1
1:
1:
        /* Compute the contributions of pp0-5 to a6, with carry-outs in a9.
        /* Compute the contributions of pp0-5 to a6, with carry-outs in a9.
           This is good enough to determine the low half of a6, so that any
           This is good enough to determine the low half of a6, so that any
           nonzero bits from the low word of the result can be collapsed
           nonzero bits from the low word of the result can be collapsed
           into a6, freeing up a register.  */
           into a6, freeing up a register.  */
        movi    a9, 0
        movi    a9, 0
        do_mul(a11, xl, l, yh, l)       /* pp 3 */
        do_mul(a11, xl, l, yh, l)       /* pp 3 */
        add     a6, a6, a11
        add     a6, a6, a11
        bgeu    a6, a11, 1f
        bgeu    a6, a11, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        do_mul(a11, xl, h, yl, h)       /* pp 4 */
        do_mul(a11, xl, h, yl, h)       /* pp 4 */
        add     a6, a6, a11
        add     a6, a6, a11
        bgeu    a6, a11, 1f
        bgeu    a6, a11, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        do_mul(a11, xh, l, yl, l)       /* pp 5 */
        do_mul(a11, xh, l, yl, l)       /* pp 5 */
        add     a6, a6, a11
        add     a6, a6, a11
        bgeu    a6, a11, 1f
        bgeu    a6, a11, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        /* Collapse any nonzero bits from the low word into a6.  */
        /* Collapse any nonzero bits from the low word into a6.  */
        beqz    a10, 1f
        beqz    a10, 1f
        movi    a11, 1
        movi    a11, 1
        or      a6, a6, a11
        or      a6, a6, a11
1:
1:
        /* Add pp6-9 into a11 with carry-outs in a10.  */
        /* Add pp6-9 into a11 with carry-outs in a10.  */
        do_mul(a7, xl, l, yh, h)        /* pp 6 */
        do_mul(a7, xl, l, yh, h)        /* pp 6 */
        do_mul(a11, xh, h, yl, l)       /* pp 9 */
        do_mul(a11, xh, h, yl, l)       /* pp 9 */
        movi    a10, 0
        movi    a10, 0
        add     a11, a11, a7
        add     a11, a11, a7
        bgeu    a11, a7, 1f
        bgeu    a11, a7, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        do_mul(a7, xl, h, yh, l)        /* pp 7 */
        do_mul(a7, xl, h, yh, l)        /* pp 7 */
        add     a11, a11, a7
        add     a11, a11, a7
        bgeu    a11, a7, 1f
        bgeu    a11, a7, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        do_mul(a7, xh, l, yl, h)        /* pp 8 */
        do_mul(a7, xh, l, yl, h)        /* pp 8 */
        add     a11, a11, a7
        add     a11, a11, a7
        bgeu    a11, a7, 1f
        bgeu    a11, a7, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        /* Shift a10/a11 into position, and add low half of a11 to a6.  */
        /* Shift a10/a11 into position, and add low half of a11 to a6.  */
        src     a10, a10, a11
        src     a10, a10, a11
        add     a10, a10, a9
        add     a10, a10, a9
        sll     a11, a11
        sll     a11, a11
        add     a6, a6, a11
        add     a6, a6, a11
        bgeu    a6, a11, 1f
        bgeu    a6, a11, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        /* Add pp10-12 into xl with carry-outs in a9.  */
        /* Add pp10-12 into xl with carry-outs in a9.  */
        movi    a9, 0
        movi    a9, 0
        do_mul(xl, xl, h, yh, h)        /* pp 10 */
        do_mul(xl, xl, h, yh, h)        /* pp 10 */
        add     xl, xl, a10
        add     xl, xl, a10
        bgeu    xl, a10, 1f
        bgeu    xl, a10, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        do_mul(a10, xh, l, yh, l)       /* pp 11 */
        do_mul(a10, xh, l, yh, l)       /* pp 11 */
        add     xl, xl, a10
        add     xl, xl, a10
        bgeu    xl, a10, 1f
        bgeu    xl, a10, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        do_mul(a10, xh, h, yl, h)       /* pp 12 */
        do_mul(a10, xh, h, yl, h)       /* pp 12 */
        add     xl, xl, a10
        add     xl, xl, a10
        bgeu    xl, a10, 1f
        bgeu    xl, a10, 1f
        addi    a9, a9, 1
        addi    a9, a9, 1
1:
1:
        /* Add pp13-14 into a11 with carry-outs in a10.  */
        /* Add pp13-14 into a11 with carry-outs in a10.  */
        do_mul(a11, xh, l, yh, h)       /* pp 13 */
        do_mul(a11, xh, l, yh, h)       /* pp 13 */
        do_mul(a7, xh, h, yh, l)        /* pp 14 */
        do_mul(a7, xh, h, yh, l)        /* pp 14 */
        movi    a10, 0
        movi    a10, 0
        add     a11, a11, a7
        add     a11, a11, a7
        bgeu    a11, a7, 1f
        bgeu    a11, a7, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        /* Shift a10/a11 into position, and add low half of a11 to a6.  */
        /* Shift a10/a11 into position, and add low half of a11 to a6.  */
        src     a10, a10, a11
        src     a10, a10, a11
        add     a10, a10, a9
        add     a10, a10, a9
        sll     a11, a11
        sll     a11, a11
        add     xl, xl, a11
        add     xl, xl, a11
        bgeu    xl, a11, 1f
        bgeu    xl, a11, 1f
        addi    a10, a10, 1
        addi    a10, a10, 1
1:
1:
        /* Compute xh.  */
        /* Compute xh.  */
        do_mul(xh, xh, h, yh, h)        /* pp 15 */
        do_mul(xh, xh, h, yh, h)        /* pp 15 */
        add     xh, xh, a10
        add     xh, xh, a10
        /* Restore values saved on the stack during the multiplication.  */
        /* Restore values saved on the stack during the multiplication.  */
        l32i    a7, sp, 4
        l32i    a7, sp, 4
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
        l32i    a0, sp, 0
        l32i    a0, sp, 0
        l32i    a8, sp, 8
        l32i    a8, sp, 8
#endif
#endif
#endif /* ! XCHAL_HAVE_MUL32_HIGH */
#endif /* ! XCHAL_HAVE_MUL32_HIGH */
        /* Shift left by 12 bits, unless there was a carry-out from the
        /* Shift left by 12 bits, unless there was a carry-out from the
           multiply, in which case, shift by 11 bits and increment the
           multiply, in which case, shift by 11 bits and increment the
           exponent.  Note: It is convenient to use the constant 0x3ff
           exponent.  Note: It is convenient to use the constant 0x3ff
           instead of 0x400 when removing the extra exponent bias (so that
           instead of 0x400 when removing the extra exponent bias (so that
           it is easy to construct 0x7fe for the overflow check).  Reverse
           it is easy to construct 0x7fe for the overflow check).  Reverse
           the logic here to decrement the exponent sum by one unless there
           the logic here to decrement the exponent sum by one unless there
           was a carry-out.  */
           was a carry-out.  */
        movi    a4, 11
        movi    a4, 11
        srli    a5, xh, 21 - 12
        srli    a5, xh, 21 - 12
        bnez    a5, 1f
        bnez    a5, 1f
        addi    a4, a4, 1
        addi    a4, a4, 1
        addi    a8, a8, -1
        addi    a8, a8, -1
1:      ssl     a4
1:      ssl     a4
        src     xh, xh, xl
        src     xh, xh, xl
        src     xl, xl, a6
        src     xl, xl, a6
        sll     a6, a6
        sll     a6, a6
        /* Subtract the extra bias from the exponent sum (plus one to account
        /* Subtract the extra bias from the exponent sum (plus one to account
           for the explicit "1.0" of the mantissa that will be added to the
           for the explicit "1.0" of the mantissa that will be added to the
           exponent in the final result).  */
           exponent in the final result).  */
        movi    a4, 0x3ff
        movi    a4, 0x3ff
        sub     a8, a8, a4
        sub     a8, a8, a4
        /* Check for over/underflow.  The value in a8 is one less than the
        /* Check for over/underflow.  The value in a8 is one less than the
           final exponent, so values in the range 0..7fd are OK here.  */
           final exponent, so values in the range 0..7fd are OK here.  */
        slli    a4, a4, 1       /* 0x7fe */
        slli    a4, a4, 1       /* 0x7fe */
        bgeu    a8, a4, .Lmul_overflow
        bgeu    a8, a4, .Lmul_overflow
.Lmul_round:
.Lmul_round:
        /* Round.  */
        /* Round.  */
        bgez    a6, .Lmul_rounded
        bgez    a6, .Lmul_rounded
        addi    xl, xl, 1
        addi    xl, xl, 1
        beqz    xl, .Lmul_roundcarry
        beqz    xl, .Lmul_roundcarry
        slli    a6, a6, 1
        slli    a6, a6, 1
        beqz    a6, .Lmul_exactlyhalf
        beqz    a6, .Lmul_exactlyhalf
.Lmul_rounded:
.Lmul_rounded:
        /* Add the exponent to the mantissa.  */
        /* Add the exponent to the mantissa.  */
        slli    a8, a8, 20
        slli    a8, a8, 20
        add     xh, xh, a8
        add     xh, xh, a8
.Lmul_addsign:
.Lmul_addsign:
        /* Add the sign bit.  */
        /* Add the sign bit.  */
        srli    a7, a7, 31
        srli    a7, a7, 31
        slli    a7, a7, 31
        slli    a7, a7, 31
        or      xh, xh, a7
        or      xh, xh, a7
.Lmul_done:
.Lmul_done:
#if __XTENSA_CALL0_ABI__
#if __XTENSA_CALL0_ABI__
        l32i    a12, sp, 16
        l32i    a12, sp, 16
        l32i    a13, sp, 20
        l32i    a13, sp, 20
        l32i    a14, sp, 24
        l32i    a14, sp, 24
        l32i    a15, sp, 28
        l32i    a15, sp, 28
        addi    sp, sp, 32
        addi    sp, sp, 32
#endif
#endif
        leaf_return
        leaf_return
.Lmul_exactlyhalf:
.Lmul_exactlyhalf:
        /* Round down to the nearest even value.  */
        /* Round down to the nearest even value.  */
        srli    xl, xl, 1
        srli    xl, xl, 1
        slli    xl, xl, 1
        slli    xl, xl, 1
        j       .Lmul_rounded
        j       .Lmul_rounded
.Lmul_roundcarry:
.Lmul_roundcarry:
        /* xl is always zero when the rounding increment overflows, so
        /* xl is always zero when the rounding increment overflows, so
           there's no need to round it to an even value.  */
           there's no need to round it to an even value.  */
        addi    xh, xh, 1
        addi    xh, xh, 1
        /* Overflow is OK -- it will be added to the exponent.  */
        /* Overflow is OK -- it will be added to the exponent.  */
        j       .Lmul_rounded
        j       .Lmul_rounded
.Lmul_overflow:
.Lmul_overflow:
        bltz    a8, .Lmul_underflow
        bltz    a8, .Lmul_underflow
        /* Return +/- Infinity.  */
        /* Return +/- Infinity.  */
        addi    a8, a4, 1       /* 0x7ff */
        addi    a8, a4, 1       /* 0x7ff */
        slli    xh, a8, 20
        slli    xh, a8, 20
        movi    xl, 0
        movi    xl, 0
        j       .Lmul_addsign
        j       .Lmul_addsign
.Lmul_underflow:
.Lmul_underflow:
        /* Create a subnormal value, where the exponent field contains zero,
        /* Create a subnormal value, where the exponent field contains zero,
           but the effective exponent is 1.  The value of a8 is one less than
           but the effective exponent is 1.  The value of a8 is one less than
           the actual exponent, so just negate it to get the shift amount.  */
           the actual exponent, so just negate it to get the shift amount.  */
        neg     a8, a8
        neg     a8, a8
        mov     a9, a6
        mov     a9, a6
        ssr     a8
        ssr     a8
        bgeui   a8, 32, .Lmul_bigshift
        bgeui   a8, 32, .Lmul_bigshift
        /* Shift xh/xl right.  Any bits that are shifted out of xl are saved
        /* Shift xh/xl right.  Any bits that are shifted out of xl are saved
           in a6 (combined with the shifted-out bits currently in a6) for
           in a6 (combined with the shifted-out bits currently in a6) for
           rounding the result.  */
           rounding the result.  */
        sll     a6, xl
        sll     a6, xl
        src     xl, xh, xl
        src     xl, xh, xl
        srl     xh, xh
        srl     xh, xh
        j       1f
        j       1f
.Lmul_bigshift:
.Lmul_bigshift:
        bgeui   a8, 64, .Lmul_flush_to_zero
        bgeui   a8, 64, .Lmul_flush_to_zero
        sll     a10, xl         /* lost bits shifted out of xl */
        sll     a10, xl         /* lost bits shifted out of xl */
        src     a6, xh, xl
        src     a6, xh, xl
        srl     xl, xh
        srl     xl, xh
        movi    xh, 0
        movi    xh, 0
        or      a9, a9, a10
        or      a9, a9, a10
        /* Set the exponent to zero.  */
        /* Set the exponent to zero.  */
1:      movi    a8, 0
1:      movi    a8, 0
        /* Pack any nonzero bits shifted out into a6.  */
        /* Pack any nonzero bits shifted out into a6.  */
        beqz    a9, .Lmul_round
        beqz    a9, .Lmul_round
        movi    a9, 1
        movi    a9, 1
        or      a6, a6, a9
        or      a6, a6, a9
        j       .Lmul_round
        j       .Lmul_round
.Lmul_flush_to_zero:
.Lmul_flush_to_zero:
        /* Return zero with the appropriate sign bit.  */
        /* Return zero with the appropriate sign bit.  */
        srli    xh, a7, 31
        srli    xh, a7, 31
        slli    xh, xh, 31
        slli    xh, xh, 31
        movi    xl, 0
        movi    xl, 0
        j       .Lmul_done
        j       .Lmul_done
#if XCHAL_NO_MUL
#if XCHAL_NO_MUL
        /* For Xtensa processors with no multiply hardware, this simplified
        /* For Xtensa processors with no multiply hardware, this simplified
           version of _mulsi3 is used for multiplying 16-bit chunks of
           version of _mulsi3 is used for multiplying 16-bit chunks of
           the floating-point mantissas.  When using CALL0, this function
           the floating-point mantissas.  When using CALL0, this function
           uses a custom ABI: the inputs are passed in a13 and a14, the
           uses a custom ABI: the inputs are passed in a13 and a14, the
           result is returned in a12, and a8 and a15 are clobbered.  */
           result is returned in a12, and a8 and a15 are clobbered.  */
        .align  4
        .align  4
.Lmul_mulsi3:
.Lmul_mulsi3:
        leaf_entry sp, 16
        leaf_entry sp, 16
        .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
        .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
        movi    \dst, 0
        movi    \dst, 0
1:      add     \tmp1, \src2, \dst
1:      add     \tmp1, \src2, \dst
        extui   \tmp2, \src1, 0, 1
        extui   \tmp2, \src1, 0, 1
        movnez  \dst, \tmp1, \tmp2
        movnez  \dst, \tmp1, \tmp2
        do_addx2 \tmp1, \src2, \dst, \tmp1
        do_addx2 \tmp1, \src2, \dst, \tmp1
        extui   \tmp2, \src1, 1, 1
        extui   \tmp2, \src1, 1, 1
        movnez  \dst, \tmp1, \tmp2
        movnez  \dst, \tmp1, \tmp2
        do_addx4 \tmp1, \src2, \dst, \tmp1
        do_addx4 \tmp1, \src2, \dst, \tmp1
        extui   \tmp2, \src1, 2, 1
        extui   \tmp2, \src1, 2, 1
        movnez  \dst, \tmp1, \tmp2
        movnez  \dst, \tmp1, \tmp2
        do_addx8 \tmp1, \src2, \dst, \tmp1
        do_addx8 \tmp1, \src2, \dst, \tmp1
        extui   \tmp2, \src1, 3, 1
        extui   \tmp2, \src1, 3, 1
        movnez  \dst, \tmp1, \tmp2
        movnez  \dst, \tmp1, \tmp2
        srli    \src1, \src1, 4
        srli    \src1, \src1, 4
        slli    \src2, \src2, 4
        slli    \src2, \src2, 4
        bnez    \src1, 1b
        bnez    \src1, 1b
        .endm
        .endm
#if __XTENSA_CALL0_ABI__
#if __XTENSA_CALL0_ABI__
        mul_mulsi3_body a12, a13, a14, a15, a8
        mul_mulsi3_body a12, a13, a14, a15, a8
#else
#else
        /* The result will be written into a2, so save that argument in a4.  */
        /* The result will be written into a2, so save that argument in a4.  */
        mov     a4, a2
        mov     a4, a2
        mul_mulsi3_body a2, a4, a3, a5, a6
        mul_mulsi3_body a2, a4, a3, a5, a6
#endif
#endif
        leaf_return
        leaf_return
#endif /* XCHAL_NO_MUL */
#endif /* XCHAL_NO_MUL */
#endif /* L_muldf3 */
#endif /* L_muldf3 */
#ifdef L_divdf3
#ifdef L_divdf3
        /* Division */
        /* Division */
__divdf3_aux:
__divdf3_aux:
        /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
        /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
           (This code is placed before the start of the function just to
           (This code is placed before the start of the function just to
           keep it in range of the limited branch displacements.)  */
           keep it in range of the limited branch displacements.)  */
.Ldiv_yexpzero:
.Ldiv_yexpzero:
        /* Clear the sign bit of y.  */
        /* Clear the sign bit of y.  */
        slli    yh, yh, 1
        slli    yh, yh, 1
        srli    yh, yh, 1
        srli    yh, yh, 1
        /* Check for division by zero.  */
        /* Check for division by zero.  */
        or      a10, yh, yl
        or      a10, yh, yl
        beqz    a10, .Ldiv_yzero
        beqz    a10, .Ldiv_yzero
        /* Normalize y.  Adjust the exponent in a9.  */
        /* Normalize y.  Adjust the exponent in a9.  */
        beqz    yh, .Ldiv_yh_zero
        beqz    yh, .Ldiv_yh_zero
        do_nsau a10, yh, a11, a9
        do_nsau a10, yh, a11, a9
        addi    a10, a10, -11
        addi    a10, a10, -11
        ssl     a10
        ssl     a10
        src     yh, yh, yl
        src     yh, yh, yl
        sll     yl, yl
        sll     yl, yl
        movi    a9, 1
        movi    a9, 1
        sub     a9, a9, a10
        sub     a9, a9, a10
        j       .Ldiv_ynormalized
        j       .Ldiv_ynormalized
.Ldiv_yh_zero:
.Ldiv_yh_zero:
        do_nsau a10, yl, a11, a9
        do_nsau a10, yl, a11, a9
        addi    a10, a10, -11
        addi    a10, a10, -11
        movi    a9, -31
        movi    a9, -31
        sub     a9, a9, a10
        sub     a9, a9, a10
        ssl     a10
        ssl     a10
        bltz    a10, .Ldiv_yl_srl
        bltz    a10, .Ldiv_yl_srl
        sll     yh, yl
        sll     yh, yl
        movi    yl, 0
        movi    yl, 0
        j       .Ldiv_ynormalized
        j       .Ldiv_ynormalized
.Ldiv_yl_srl:
.Ldiv_yl_srl:
        srl     yh, yl
        srl     yh, yl
        sll     yl, yl
        sll     yl, yl
        j       .Ldiv_ynormalized
        j       .Ldiv_ynormalized
.Ldiv_yzero:
.Ldiv_yzero:
        /* y is zero.  Return NaN if x is also zero; otherwise, infinity.  */
        /* y is zero.  Return NaN if x is also zero; otherwise, infinity.  */
        slli    xh, xh, 1
        slli    xh, xh, 1
        srli    xh, xh, 1
        srli    xh, xh, 1
        or      xl, xl, xh
        or      xl, xl, xh
        srli    xh, a7, 31
        srli    xh, a7, 31
        slli    xh, xh, 31
        slli    xh, xh, 31
        or      xh, xh, a6
        or      xh, xh, a6
        bnez    xl, 1f
        bnez    xl, 1f
        movi    a4, 0x80000     /* make it a quiet NaN */
        movi    a4, 0x80000     /* make it a quiet NaN */
        or      xh, xh, a4
        or      xh, xh, a4
1:      movi    xl, 0
1:      movi    xl, 0
        leaf_return
        leaf_return
.Ldiv_xexpzero:
.Ldiv_xexpzero:
        /* Clear the sign bit of x.  */
        /* Clear the sign bit of x.  */
        slli    xh, xh, 1
        slli    xh, xh, 1
        srli    xh, xh, 1
        srli    xh, xh, 1
        /* If x is zero, return zero.  */
        /* If x is zero, return zero.  */
        or      a10, xh, xl
        or      a10, xh, xl
        beqz    a10, .Ldiv_return_zero
        beqz    a10, .Ldiv_return_zero
        /* Normalize x.  Adjust the exponent in a8.  */
        /* Normalize x.  Adjust the exponent in a8.  */
        beqz    xh, .Ldiv_xh_zero
        beqz    xh, .Ldiv_xh_zero
        do_nsau a10, xh, a11, a8
        do_nsau a10, xh, a11, a8
        addi    a10, a10, -11
        addi    a10, a10, -11
        ssl     a10
        ssl     a10
        src     xh, xh, xl
        src     xh, xh, xl
        sll     xl, xl
        sll     xl, xl
        movi    a8, 1
        movi    a8, 1
        sub     a8, a8, a10
        sub     a8, a8, a10
        j       .Ldiv_xnormalized
        j       .Ldiv_xnormalized
.Ldiv_xh_zero:
.Ldiv_xh_zero:
        do_nsau a10, xl, a11, a8
        do_nsau a10, xl, a11, a8
        addi    a10, a10, -11
        addi    a10, a10, -11
        movi    a8, -31
        movi    a8, -31
        sub     a8, a8, a10
        sub     a8, a8, a10
        ssl     a10
        ssl     a10
        bltz    a10, .Ldiv_xl_srl
        bltz    a10, .Ldiv_xl_srl
        sll     xh, xl
        sll     xh, xl
        movi    xl, 0
        movi    xl, 0
        j       .Ldiv_xnormalized
        j       .Ldiv_xnormalized
.Ldiv_xl_srl:
.Ldiv_xl_srl:
        srl     xh, xl
        srl     xh, xl
        sll     xl, xl
        sll     xl, xl
        j       .Ldiv_xnormalized
        j       .Ldiv_xnormalized
.Ldiv_return_zero:
.Ldiv_return_zero:
        /* Return zero with the appropriate sign bit.  */
        /* Return zero with the appropriate sign bit.  */
        srli    xh, a7, 31
        srli    xh, a7, 31
        slli    xh, xh, 31
        slli    xh, xh, 31
        movi    xl, 0
        movi    xl, 0
        leaf_return
        leaf_return
.Ldiv_xnan_or_inf:
.Ldiv_xnan_or_inf:
        /* Set the sign bit of the result.  */
        /* Set the sign bit of the result.  */
        srli    a7, yh, 31
        srli    a7, yh, 31
        slli    a7, a7, 31
        slli    a7, a7, 31
        xor     xh, xh, a7
        xor     xh, xh, a7
        /* If y is NaN or Inf, return NaN.  */
        /* If y is NaN or Inf, return NaN.  */
        bnall   yh, a6, 1f
        bnall   yh, a6, 1f
        movi    a4, 0x80000     /* make it a quiet NaN */
        movi    a4, 0x80000     /* make it a quiet NaN */
        or      xh, xh, a4
        or      xh, xh, a4
1:      leaf_return
1:      leaf_return
.Ldiv_ynan_or_inf:
.Ldiv_ynan_or_inf:
        /* If y is Infinity, return zero.  */
        /* If y is Infinity, return zero.  */
        slli    a8, yh, 12
        slli    a8, yh, 12
        or      a8, a8, yl
        or      a8, a8, yl
        beqz    a8, .Ldiv_return_zero
        beqz    a8, .Ldiv_return_zero
        /* y is NaN; return it.  */
        /* y is NaN; return it.  */
        mov     xh, yh
        mov     xh, yh
        mov     xl, yl
        mov     xl, yl
        leaf_return
        leaf_return
.Ldiv_highequal1:
.Ldiv_highequal1:
        bltu    xl, yl, 2f
        bltu    xl, yl, 2f
        j       3f
        j       3f
        .align  4
        .align  4
        .global __divdf3
        .global __divdf3
        .type   __divdf3, @function
        .type   __divdf3, @function
__divdf3:
__divdf3:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        /* Get the sign of the result.  */
        /* Get the sign of the result.  */
        xor     a7, xh, yh
        xor     a7, xh, yh
        /* Check for NaN and infinity.  */
        /* Check for NaN and infinity.  */
        ball    xh, a6, .Ldiv_xnan_or_inf
        ball    xh, a6, .Ldiv_xnan_or_inf
        ball    yh, a6, .Ldiv_ynan_or_inf
        ball    yh, a6, .Ldiv_ynan_or_inf
        /* Extract the exponents.  */
        /* Extract the exponents.  */
        extui   a8, xh, 20, 11
        extui   a8, xh, 20, 11
        extui   a9, yh, 20, 11
        extui   a9, yh, 20, 11
        beqz    a9, .Ldiv_yexpzero
        beqz    a9, .Ldiv_yexpzero
.Ldiv_ynormalized:
.Ldiv_ynormalized:
        beqz    a8, .Ldiv_xexpzero
        beqz    a8, .Ldiv_xexpzero
.Ldiv_xnormalized:
.Ldiv_xnormalized:
        /* Subtract the exponents.  */
        /* Subtract the exponents.  */
        sub     a8, a8, a9
        sub     a8, a8, a9
        /* Replace sign/exponent fields with explicit "1.0".  */
        /* Replace sign/exponent fields with explicit "1.0".  */
        movi    a10, 0x1fffff
        movi    a10, 0x1fffff
        or      xh, xh, a6
        or      xh, xh, a6
        and     xh, xh, a10
        and     xh, xh, a10
        or      yh, yh, a6
        or      yh, yh, a6
        and     yh, yh, a10
        and     yh, yh, a10
        /* Set SAR for left shift by one.  */
        /* Set SAR for left shift by one.  */
        ssai    (32 - 1)
        ssai    (32 - 1)
        /* The first digit of the mantissa division must be a one.
        /* The first digit of the mantissa division must be a one.
           Shift x (and adjust the exponent) as needed to make this true.  */
           Shift x (and adjust the exponent) as needed to make this true.  */
        bltu    yh, xh, 3f
        bltu    yh, xh, 3f
        beq     yh, xh, .Ldiv_highequal1
        beq     yh, xh, .Ldiv_highequal1
2:      src     xh, xh, xl
2:      src     xh, xh, xl
        sll     xl, xl
        sll     xl, xl
        addi    a8, a8, -1
        addi    a8, a8, -1
3:
3:
        /* Do the first subtraction and shift.  */
        /* Do the first subtraction and shift.  */
        sub     xh, xh, yh
        sub     xh, xh, yh
        bgeu    xl, yl, 1f
        bgeu    xl, yl, 1f
        addi    xh, xh, -1
        addi    xh, xh, -1
1:      sub     xl, xl, yl
1:      sub     xl, xl, yl
        src     xh, xh, xl
        src     xh, xh, xl
        sll     xl, xl
        sll     xl, xl
        /* Put the quotient into a10/a11.  */
        /* Put the quotient into a10/a11.  */
        movi    a10, 0
        movi    a10, 0
        movi    a11, 1
        movi    a11, 1
        /* Divide one bit at a time for 52 bits.  */
        /* Divide one bit at a time for 52 bits.  */
        movi    a9, 52
        movi    a9, 52
#if XCHAL_HAVE_LOOPS
#if XCHAL_HAVE_LOOPS
        loop    a9, .Ldiv_loopend
        loop    a9, .Ldiv_loopend
#endif
#endif
.Ldiv_loop:
.Ldiv_loop:
        /* Shift the quotient << 1.  */
        /* Shift the quotient << 1.  */
        src     a10, a10, a11
        src     a10, a10, a11
        sll     a11, a11
        sll     a11, a11
        /* Is this digit a 0 or 1?  */
        /* Is this digit a 0 or 1?  */
        bltu    xh, yh, 3f
        bltu    xh, yh, 3f
        beq     xh, yh, .Ldiv_highequal2
        beq     xh, yh, .Ldiv_highequal2
        /* Output a 1 and subtract.  */
        /* Output a 1 and subtract.  */
2:      addi    a11, a11, 1
2:      addi    a11, a11, 1
        sub     xh, xh, yh
        sub     xh, xh, yh
        bgeu    xl, yl, 1f
        bgeu    xl, yl, 1f
        addi    xh, xh, -1
        addi    xh, xh, -1
1:      sub     xl, xl, yl
1:      sub     xl, xl, yl
        /* Shift the dividend << 1.  */
        /* Shift the dividend << 1.  */
3:      src     xh, xh, xl
3:      src     xh, xh, xl
        sll     xl, xl
        sll     xl, xl
#if !XCHAL_HAVE_LOOPS
#if !XCHAL_HAVE_LOOPS
        addi    a9, a9, -1
        addi    a9, a9, -1
        bnez    a9, .Ldiv_loop
        bnez    a9, .Ldiv_loop
#endif
#endif
.Ldiv_loopend:
.Ldiv_loopend:
        /* Add the exponent bias (less one to account for the explicit "1.0"
        /* Add the exponent bias (less one to account for the explicit "1.0"
           of the mantissa that will be added to the exponent in the final
           of the mantissa that will be added to the exponent in the final
           result).  */
           result).  */
        movi    a9, 0x3fe
        movi    a9, 0x3fe
        add     a8, a8, a9
        add     a8, a8, a9
        /* Check for over/underflow.  The value in a8 is one less than the
        /* Check for over/underflow.  The value in a8 is one less than the
           final exponent, so values in the range 0..7fd are OK here.  */
           final exponent, so values in the range 0..7fd are OK here.  */
        addmi   a9, a9, 0x400   /* 0x7fe */
        addmi   a9, a9, 0x400   /* 0x7fe */
        bgeu    a8, a9, .Ldiv_overflow
        bgeu    a8, a9, .Ldiv_overflow
.Ldiv_round:
.Ldiv_round:
        /* Round.  The remainder (<< 1) is in xh/xl.  */
        /* Round.  The remainder (<< 1) is in xh/xl.  */
        bltu    xh, yh, .Ldiv_rounded
        bltu    xh, yh, .Ldiv_rounded
        beq     xh, yh, .Ldiv_highequal3
        beq     xh, yh, .Ldiv_highequal3
.Ldiv_roundup:
.Ldiv_roundup:
        addi    a11, a11, 1
        addi    a11, a11, 1
        beqz    a11, .Ldiv_roundcarry
        beqz    a11, .Ldiv_roundcarry
.Ldiv_rounded:
.Ldiv_rounded:
        mov     xl, a11
        mov     xl, a11
        /* Add the exponent to the mantissa.  */
        /* Add the exponent to the mantissa.  */
        slli    a8, a8, 20
        slli    a8, a8, 20
        add     xh, a10, a8
        add     xh, a10, a8
.Ldiv_addsign:
.Ldiv_addsign:
        /* Add the sign bit.  */
        /* Add the sign bit.  */
        srli    a7, a7, 31
        srli    a7, a7, 31
        slli    a7, a7, 31
        slli    a7, a7, 31
        or      xh, xh, a7
        or      xh, xh, a7
        leaf_return
        leaf_return
.Ldiv_highequal2:
.Ldiv_highequal2:
        bgeu    xl, yl, 2b
        bgeu    xl, yl, 2b
        j       3b
        j       3b
.Ldiv_highequal3:
.Ldiv_highequal3:
        bltu    xl, yl, .Ldiv_rounded
        bltu    xl, yl, .Ldiv_rounded
        bne     xl, yl, .Ldiv_roundup
        bne     xl, yl, .Ldiv_roundup
        /* Remainder is exactly half the divisor.  Round even.  */
        /* Remainder is exactly half the divisor.  Round even.  */
        addi    a11, a11, 1
        addi    a11, a11, 1
        beqz    a11, .Ldiv_roundcarry
        beqz    a11, .Ldiv_roundcarry
        srli    a11, a11, 1
        srli    a11, a11, 1
        slli    a11, a11, 1
        slli    a11, a11, 1
        j       .Ldiv_rounded
        j       .Ldiv_rounded
.Ldiv_overflow:
.Ldiv_overflow:
        bltz    a8, .Ldiv_underflow
        bltz    a8, .Ldiv_underflow
        /* Return +/- Infinity.  */
        /* Return +/- Infinity.  */
        addi    a8, a9, 1       /* 0x7ff */
        addi    a8, a9, 1       /* 0x7ff */
        slli    xh, a8, 20
        slli    xh, a8, 20
        movi    xl, 0
        movi    xl, 0
        j       .Ldiv_addsign
        j       .Ldiv_addsign
.Ldiv_underflow:
.Ldiv_underflow:
        /* Create a subnormal value, where the exponent field contains zero,
        /* Create a subnormal value, where the exponent field contains zero,
           but the effective exponent is 1.  The value of a8 is one less than
           but the effective exponent is 1.  The value of a8 is one less than
           the actual exponent, so just negate it to get the shift amount.  */
           the actual exponent, so just negate it to get the shift amount.  */
        neg     a8, a8
        neg     a8, a8
        ssr     a8
        ssr     a8
        bgeui   a8, 32, .Ldiv_bigshift
        bgeui   a8, 32, .Ldiv_bigshift
        /* Shift a10/a11 right.  Any bits that are shifted out of a11 are
        /* Shift a10/a11 right.  Any bits that are shifted out of a11 are
           saved in a6 for rounding the result.  */
           saved in a6 for rounding the result.  */
        sll     a6, a11
        sll     a6, a11
        src     a11, a10, a11
        src     a11, a10, a11
        srl     a10, a10
        srl     a10, a10
        j       1f
        j       1f
.Ldiv_bigshift:
.Ldiv_bigshift:
        bgeui   a8, 64, .Ldiv_flush_to_zero
        bgeui   a8, 64, .Ldiv_flush_to_zero
        sll     a9, a11         /* lost bits shifted out of a11 */
        sll     a9, a11         /* lost bits shifted out of a11 */
        src     a6, a10, a11
        src     a6, a10, a11
        srl     a11, a10
        srl     a11, a10
        movi    a10, 0
        movi    a10, 0
        or      xl, xl, a9
        or      xl, xl, a9
        /* Set the exponent to zero.  */
        /* Set the exponent to zero.  */
1:      movi    a8, 0
1:      movi    a8, 0
        /* Pack any nonzero remainder (in xh/xl) into a6.  */
        /* Pack any nonzero remainder (in xh/xl) into a6.  */
        or      xh, xh, xl
        or      xh, xh, xl
        beqz    xh, 1f
        beqz    xh, 1f
        movi    a9, 1
        movi    a9, 1
        or      a6, a6, a9
        or      a6, a6, a9
        /* Round a10/a11 based on the bits shifted out into a6.  */
        /* Round a10/a11 based on the bits shifted out into a6.  */
1:      bgez    a6, .Ldiv_rounded
1:      bgez    a6, .Ldiv_rounded
        addi    a11, a11, 1
        addi    a11, a11, 1
        beqz    a11, .Ldiv_roundcarry
        beqz    a11, .Ldiv_roundcarry
        slli    a6, a6, 1
        slli    a6, a6, 1
        bnez    a6, .Ldiv_rounded
        bnez    a6, .Ldiv_rounded
        srli    a11, a11, 1
        srli    a11, a11, 1
        slli    a11, a11, 1
        slli    a11, a11, 1
        j       .Ldiv_rounded
        j       .Ldiv_rounded
.Ldiv_roundcarry:
.Ldiv_roundcarry:
        /* a11 is always zero when the rounding increment overflows, so
        /* a11 is always zero when the rounding increment overflows, so
           there's no need to round it to an even value.  */
           there's no need to round it to an even value.  */
        addi    a10, a10, 1
        addi    a10, a10, 1
        /* Overflow to the exponent field is OK.  */
        /* Overflow to the exponent field is OK.  */
        j       .Ldiv_rounded
        j       .Ldiv_rounded
.Ldiv_flush_to_zero:
.Ldiv_flush_to_zero:
        /* Return zero with the appropriate sign bit.  */
        /* Return zero with the appropriate sign bit.  */
        srli    xh, a7, 31
        srli    xh, a7, 31
        slli    xh, xh, 31
        slli    xh, xh, 31
        movi    xl, 0
        movi    xl, 0
        leaf_return
        leaf_return
#endif /* L_divdf3 */
#endif /* L_divdf3 */
#ifdef L_cmpdf2
#ifdef L_cmpdf2
        /* Equal and Not Equal */
        /* Equal and Not Equal */
        .align  4
        .align  4
        .global __eqdf2
        .global __eqdf2
        .global __nedf2
        .global __nedf2
        .set    __nedf2, __eqdf2
        .set    __nedf2, __eqdf2
        .type   __eqdf2, @function
        .type   __eqdf2, @function
__eqdf2:
__eqdf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        bne     xl, yl, 2f
        bne     xl, yl, 2f
        bne     xh, yh, 4f
        bne     xh, yh, 4f
        /* The values are equal but NaN != NaN.  Check the exponent.  */
        /* The values are equal but NaN != NaN.  Check the exponent.  */
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, 3f
        ball    xh, a6, 3f
        /* Equal.  */
        /* Equal.  */
        movi    a2, 0
        movi    a2, 0
        leaf_return
        leaf_return
        /* Not equal.  */
        /* Not equal.  */
2:      movi    a2, 1
2:      movi    a2, 1
        leaf_return
        leaf_return
        /* Check if the mantissas are nonzero.  */
        /* Check if the mantissas are nonzero.  */
3:      slli    a7, xh, 12
3:      slli    a7, xh, 12
        or      a7, a7, xl
        or      a7, a7, xl
        j       5f
        j       5f
        /* Check if x and y are zero with different signs.  */
        /* Check if x and y are zero with different signs.  */
4:      or      a7, xh, yh
4:      or      a7, xh, yh
        slli    a7, a7, 1
        slli    a7, a7, 1
        or      a7, a7, xl      /* xl == yl here */
        or      a7, a7, xl      /* xl == yl here */
        /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
        /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
           or x when exponent(x) = 0x7ff and x == y.  */
           or x when exponent(x) = 0x7ff and x == y.  */
5:      movi    a2, 0
5:      movi    a2, 0
        movi    a3, 1
        movi    a3, 1
        movnez  a2, a3, a7
        movnez  a2, a3, a7
        leaf_return
        leaf_return
        /* Greater Than */
        /* Greater Than */
        .align  4
        .align  4
        .global __gtdf2
        .global __gtdf2
        .type   __gtdf2, @function
        .type   __gtdf2, @function
__gtdf2:
__gtdf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, 2f
        ball    xh, a6, 2f
1:      bnall   yh, a6, .Lle_cmp
1:      bnall   yh, a6, .Lle_cmp
        /* Check if y is a NaN.  */
        /* Check if y is a NaN.  */
        slli    a7, yh, 12
        slli    a7, yh, 12
        or      a7, a7, yl
        or      a7, a7, yl
        beqz    a7, .Lle_cmp
        beqz    a7, .Lle_cmp
        movi    a2, 0
        movi    a2, 0
        leaf_return
        leaf_return
        /* Check if x is a NaN.  */
        /* Check if x is a NaN.  */
2:      slli    a7, xh, 12
2:      slli    a7, xh, 12
        or      a7, a7, xl
        or      a7, a7, xl
        beqz    a7, 1b
        beqz    a7, 1b
        movi    a2, 0
        movi    a2, 0
        leaf_return
        leaf_return
        /* Less Than or Equal */
        /* Less Than or Equal */
        .align  4
        .align  4
        .global __ledf2
        .global __ledf2
        .type   __ledf2, @function
        .type   __ledf2, @function
__ledf2:
__ledf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, 2f
        ball    xh, a6, 2f
1:      bnall   yh, a6, .Lle_cmp
1:      bnall   yh, a6, .Lle_cmp
        /* Check if y is a NaN.  */
        /* Check if y is a NaN.  */
        slli    a7, yh, 12
        slli    a7, yh, 12
        or      a7, a7, yl
        or      a7, a7, yl
        beqz    a7, .Lle_cmp
        beqz    a7, .Lle_cmp
        movi    a2, 1
        movi    a2, 1
        leaf_return
        leaf_return
        /* Check if x is a NaN.  */
        /* Check if x is a NaN.  */
2:      slli    a7, xh, 12
2:      slli    a7, xh, 12
        or      a7, a7, xl
        or      a7, a7, xl
        beqz    a7, 1b
        beqz    a7, 1b
        movi    a2, 1
        movi    a2, 1
        leaf_return
        leaf_return
.Lle_cmp:
.Lle_cmp:
        /* Check if x and y have different signs.  */
        /* Check if x and y have different signs.  */
        xor     a7, xh, yh
        xor     a7, xh, yh
        bltz    a7, .Lle_diff_signs
        bltz    a7, .Lle_diff_signs
        /* Check if x is negative.  */
        /* Check if x is negative.  */
        bltz    xh, .Lle_xneg
        bltz    xh, .Lle_xneg
        /* Check if x <= y.  */
        /* Check if x <= y.  */
        bltu    xh, yh, 4f
        bltu    xh, yh, 4f
        bne     xh, yh, 5f
        bne     xh, yh, 5f
        bltu    yl, xl, 5f
        bltu    yl, xl, 5f
4:      movi    a2, 0
4:      movi    a2, 0
        leaf_return
        leaf_return
.Lle_xneg:
.Lle_xneg:
        /* Check if y <= x.  */
        /* Check if y <= x.  */
        bltu    yh, xh, 4b
        bltu    yh, xh, 4b
        bne     yh, xh, 5f
        bne     yh, xh, 5f
        bgeu    xl, yl, 4b
        bgeu    xl, yl, 4b
5:      movi    a2, 1
5:      movi    a2, 1
        leaf_return
        leaf_return
.Lle_diff_signs:
.Lle_diff_signs:
        bltz    xh, 4b
        bltz    xh, 4b
        /* Check if both x and y are zero.  */
        /* Check if both x and y are zero.  */
        or      a7, xh, yh
        or      a7, xh, yh
        slli    a7, a7, 1
        slli    a7, a7, 1
        or      a7, a7, xl
        or      a7, a7, xl
        or      a7, a7, yl
        or      a7, a7, yl
        movi    a2, 1
        movi    a2, 1
        movi    a3, 0
        movi    a3, 0
        moveqz  a2, a3, a7
        moveqz  a2, a3, a7
        leaf_return
        leaf_return
        /* Greater Than or Equal */
        /* Greater Than or Equal */
        .align  4
        .align  4
        .global __gedf2
        .global __gedf2
        .type   __gedf2, @function
        .type   __gedf2, @function
__gedf2:
__gedf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, 2f
        ball    xh, a6, 2f
1:      bnall   yh, a6, .Llt_cmp
1:      bnall   yh, a6, .Llt_cmp
        /* Check if y is a NaN.  */
        /* Check if y is a NaN.  */
        slli    a7, yh, 12
        slli    a7, yh, 12
        or      a7, a7, yl
        or      a7, a7, yl
        beqz    a7, .Llt_cmp
        beqz    a7, .Llt_cmp
        movi    a2, -1
        movi    a2, -1
        leaf_return
        leaf_return
        /* Check if x is a NaN.  */
        /* Check if x is a NaN.  */
2:      slli    a7, xh, 12
2:      slli    a7, xh, 12
        or      a7, a7, xl
        or      a7, a7, xl
        beqz    a7, 1b
        beqz    a7, 1b
        movi    a2, -1
        movi    a2, -1
        leaf_return
        leaf_return
        /* Less Than */
        /* Less Than */
        .align  4
        .align  4
        .global __ltdf2
        .global __ltdf2
        .type   __ltdf2, @function
        .type   __ltdf2, @function
__ltdf2:
__ltdf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, 2f
        ball    xh, a6, 2f
1:      bnall   yh, a6, .Llt_cmp
1:      bnall   yh, a6, .Llt_cmp
        /* Check if y is a NaN.  */
        /* Check if y is a NaN.  */
        slli    a7, yh, 12
        slli    a7, yh, 12
        or      a7, a7, yl
        or      a7, a7, yl
        beqz    a7, .Llt_cmp
        beqz    a7, .Llt_cmp
        movi    a2, 0
        movi    a2, 0
        leaf_return
        leaf_return
        /* Check if x is a NaN.  */
        /* Check if x is a NaN.  */
2:      slli    a7, xh, 12
2:      slli    a7, xh, 12
        or      a7, a7, xl
        or      a7, a7, xl
        beqz    a7, 1b
        beqz    a7, 1b
        movi    a2, 0
        movi    a2, 0
        leaf_return
        leaf_return
.Llt_cmp:
.Llt_cmp:
        /* Check if x and y have different signs.  */
        /* Check if x and y have different signs.  */
        xor     a7, xh, yh
        xor     a7, xh, yh
        bltz    a7, .Llt_diff_signs
        bltz    a7, .Llt_diff_signs
        /* Check if x is negative.  */
        /* Check if x is negative.  */
        bltz    xh, .Llt_xneg
        bltz    xh, .Llt_xneg
        /* Check if x < y.  */
        /* Check if x < y.  */
        bltu    xh, yh, 4f
        bltu    xh, yh, 4f
        bne     xh, yh, 5f
        bne     xh, yh, 5f
        bgeu    xl, yl, 5f
        bgeu    xl, yl, 5f
4:      movi    a2, -1
4:      movi    a2, -1
        leaf_return
        leaf_return
.Llt_xneg:
.Llt_xneg:
        /* Check if y < x.  */
        /* Check if y < x.  */
        bltu    yh, xh, 4b
        bltu    yh, xh, 4b
        bne     yh, xh, 5f
        bne     yh, xh, 5f
        bltu    yl, xl, 4b
        bltu    yl, xl, 4b
5:      movi    a2, 0
5:      movi    a2, 0
        leaf_return
        leaf_return
.Llt_diff_signs:
.Llt_diff_signs:
        bgez    xh, 5b
        bgez    xh, 5b
        /* Check if both x and y are nonzero.  */
        /* Check if both x and y are nonzero.  */
        or      a7, xh, yh
        or      a7, xh, yh
        slli    a7, a7, 1
        slli    a7, a7, 1
        or      a7, a7, xl
        or      a7, a7, xl
        or      a7, a7, yl
        or      a7, a7, yl
        movi    a2, 0
        movi    a2, 0
        movi    a3, -1
        movi    a3, -1
        movnez  a2, a3, a7
        movnez  a2, a3, a7
        leaf_return
        leaf_return
        /* Unordered */
        /* Unordered */
        .align  4
        .align  4
        .global __unorddf2
        .global __unorddf2
        .type   __unorddf2, @function
        .type   __unorddf2, @function
__unorddf2:
__unorddf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, 3f
        ball    xh, a6, 3f
1:      ball    yh, a6, 4f
1:      ball    yh, a6, 4f
2:      movi    a2, 0
2:      movi    a2, 0
        leaf_return
        leaf_return
3:      slli    a7, xh, 12
3:      slli    a7, xh, 12
        or      a7, a7, xl
        or      a7, a7, xl
        beqz    a7, 1b
        beqz    a7, 1b
        movi    a2, 1
        movi    a2, 1
        leaf_return
        leaf_return
4:      slli    a7, yh, 12
4:      slli    a7, yh, 12
        or      a7, a7, yl
        or      a7, a7, yl
        beqz    a7, 2b
        beqz    a7, 2b
        movi    a2, 1
        movi    a2, 1
        leaf_return
        leaf_return
#endif /* L_cmpdf2 */
#endif /* L_cmpdf2 */
#ifdef L_fixdfsi
#ifdef L_fixdfsi
        .align  4
        .align  4
        .global __fixdfsi
        .global __fixdfsi
        .type   __fixdfsi, @function
        .type   __fixdfsi, @function
__fixdfsi:
__fixdfsi:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Check for NaN and Infinity.  */
        /* Check for NaN and Infinity.  */
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, .Lfixdfsi_nan_or_inf
        ball    xh, a6, .Lfixdfsi_nan_or_inf
        /* Extract the exponent and check if 0 < (exp - 0x3fe) < 32.  */
        /* Extract the exponent and check if 0 < (exp - 0x3fe) < 32.  */
        extui   a4, xh, 20, 11
        extui   a4, xh, 20, 11
        extui   a5, a6, 19, 10  /* 0x3fe */
        extui   a5, a6, 19, 10  /* 0x3fe */
        sub     a4, a4, a5
        sub     a4, a4, a5
        bgei    a4, 32, .Lfixdfsi_maxint
        bgei    a4, 32, .Lfixdfsi_maxint
        blti    a4, 1, .Lfixdfsi_zero
        blti    a4, 1, .Lfixdfsi_zero
        /* Add explicit "1.0" and shift << 11.  */
        /* Add explicit "1.0" and shift << 11.  */
        or      a7, xh, a6
        or      a7, xh, a6
        ssai    (32 - 11)
        ssai    (32 - 11)
        src     a5, a7, xl
        src     a5, a7, xl
        /* Shift back to the right, based on the exponent.  */
        /* Shift back to the right, based on the exponent.  */
        ssl     a4              /* shift by 32 - a4 */
        ssl     a4              /* shift by 32 - a4 */
        srl     a5, a5
        srl     a5, a5
        /* Negate the result if sign != 0.  */
        /* Negate the result if sign != 0.  */
        neg     a2, a5
        neg     a2, a5
        movgez  a2, a5, a7
        movgez  a2, a5, a7
        leaf_return
        leaf_return
.Lfixdfsi_nan_or_inf:
.Lfixdfsi_nan_or_inf:
        /* Handle Infinity and NaN.  */
        /* Handle Infinity and NaN.  */
        slli    a4, xh, 12
        slli    a4, xh, 12
        or      a4, a4, xl
        or      a4, a4, xl
        beqz    a4, .Lfixdfsi_maxint
        beqz    a4, .Lfixdfsi_maxint
        /* Translate NaN to +maxint.  */
        /* Translate NaN to +maxint.  */
        movi    xh, 0
        movi    xh, 0
.Lfixdfsi_maxint:
.Lfixdfsi_maxint:
        slli    a4, a6, 11      /* 0x80000000 */
        slli    a4, a6, 11      /* 0x80000000 */
        addi    a5, a4, -1      /* 0x7fffffff */
        addi    a5, a4, -1      /* 0x7fffffff */
        movgez  a4, a5, xh
        movgez  a4, a5, xh
        mov     a2, a4
        mov     a2, a4
        leaf_return
        leaf_return
.Lfixdfsi_zero:
.Lfixdfsi_zero:
        movi    a2, 0
        movi    a2, 0
        leaf_return
        leaf_return
#endif /* L_fixdfsi */
#endif /* L_fixdfsi */
#ifdef L_fixdfdi
#ifdef L_fixdfdi
        .align  4
        .align  4
        .global __fixdfdi
        .global __fixdfdi
        .type   __fixdfdi, @function
        .type   __fixdfdi, @function
__fixdfdi:
__fixdfdi:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Check for NaN and Infinity.  */
        /* Check for NaN and Infinity.  */
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, .Lfixdfdi_nan_or_inf
        ball    xh, a6, .Lfixdfdi_nan_or_inf
        /* Extract the exponent and check if 0 < (exp - 0x3fe) < 64.  */
        /* Extract the exponent and check if 0 < (exp - 0x3fe) < 64.  */
        extui   a4, xh, 20, 11
        extui   a4, xh, 20, 11
        extui   a5, a6, 19, 10  /* 0x3fe */
        extui   a5, a6, 19, 10  /* 0x3fe */
        sub     a4, a4, a5
        sub     a4, a4, a5
        bgei    a4, 64, .Lfixdfdi_maxint
        bgei    a4, 64, .Lfixdfdi_maxint
        blti    a4, 1, .Lfixdfdi_zero
        blti    a4, 1, .Lfixdfdi_zero
        /* Add explicit "1.0" and shift << 11.  */
        /* Add explicit "1.0" and shift << 11.  */
        or      a7, xh, a6
        or      a7, xh, a6
        ssai    (32 - 11)
        ssai    (32 - 11)
        src     xh, a7, xl
        src     xh, a7, xl
        sll     xl, xl
        sll     xl, xl
        /* Shift back to the right, based on the exponent.  */
        /* Shift back to the right, based on the exponent.  */
        ssl     a4              /* shift by 64 - a4 */
        ssl     a4              /* shift by 64 - a4 */
        bgei    a4, 32, .Lfixdfdi_smallshift
        bgei    a4, 32, .Lfixdfdi_smallshift
        srl     xl, xh
        srl     xl, xh
        movi    xh, 0
        movi    xh, 0
.Lfixdfdi_shifted:
.Lfixdfdi_shifted:
        /* Negate the result if sign != 0.  */
        /* Negate the result if sign != 0.  */
        bgez    a7, 1f
        bgez    a7, 1f
        neg     xl, xl
        neg     xl, xl
        neg     xh, xh
        neg     xh, xh
        beqz    xl, 1f
        beqz    xl, 1f
        addi    xh, xh, -1
        addi    xh, xh, -1
1:      leaf_return
1:      leaf_return
.Lfixdfdi_smallshift:
.Lfixdfdi_smallshift:
        src     xl, xh, xl
        src     xl, xh, xl
        srl     xh, xh
        srl     xh, xh
        j       .Lfixdfdi_shifted
        j       .Lfixdfdi_shifted
.Lfixdfdi_nan_or_inf:
.Lfixdfdi_nan_or_inf:
        /* Handle Infinity and NaN.  */
        /* Handle Infinity and NaN.  */
        slli    a4, xh, 12
        slli    a4, xh, 12
        or      a4, a4, xl
        or      a4, a4, xl
        beqz    a4, .Lfixdfdi_maxint
        beqz    a4, .Lfixdfdi_maxint
        /* Translate NaN to +maxint.  */
        /* Translate NaN to +maxint.  */
        movi    xh, 0
        movi    xh, 0
.Lfixdfdi_maxint:
.Lfixdfdi_maxint:
        slli    a7, a6, 11      /* 0x80000000 */
        slli    a7, a6, 11      /* 0x80000000 */
        bgez    xh, 1f
        bgez    xh, 1f
        mov     xh, a7
        mov     xh, a7
        movi    xl, 0
        movi    xl, 0
        leaf_return
        leaf_return
1:      addi    xh, a7, -1      /* 0x7fffffff */
1:      addi    xh, a7, -1      /* 0x7fffffff */
        movi    xl, -1
        movi    xl, -1
        leaf_return
        leaf_return
.Lfixdfdi_zero:
.Lfixdfdi_zero:
        movi    xh, 0
        movi    xh, 0
        movi    xl, 0
        movi    xl, 0
        leaf_return
        leaf_return
#endif /* L_fixdfdi */
#endif /* L_fixdfdi */
#ifdef L_fixunsdfsi
#ifdef L_fixunsdfsi
        .align  4
        .align  4
        .global __fixunsdfsi
        .global __fixunsdfsi
        .type   __fixunsdfsi, @function
        .type   __fixunsdfsi, @function
__fixunsdfsi:
__fixunsdfsi:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Check for NaN and Infinity.  */
        /* Check for NaN and Infinity.  */
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, .Lfixunsdfsi_nan_or_inf
        ball    xh, a6, .Lfixunsdfsi_nan_or_inf
        /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32.  */
        /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32.  */
        extui   a4, xh, 20, 11
        extui   a4, xh, 20, 11
        extui   a5, a6, 20, 10  /* 0x3ff */
        extui   a5, a6, 20, 10  /* 0x3ff */
        sub     a4, a4, a5
        sub     a4, a4, a5
        bgei    a4, 32, .Lfixunsdfsi_maxint
        bgei    a4, 32, .Lfixunsdfsi_maxint
        bltz    a4, .Lfixunsdfsi_zero
        bltz    a4, .Lfixunsdfsi_zero
        /* Add explicit "1.0" and shift << 11.  */
        /* Add explicit "1.0" and shift << 11.  */
        or      a7, xh, a6
        or      a7, xh, a6
        ssai    (32 - 11)
        ssai    (32 - 11)
        src     a5, a7, xl
        src     a5, a7, xl
        /* Shift back to the right, based on the exponent.  */
        /* Shift back to the right, based on the exponent.  */
        addi    a4, a4, 1
        addi    a4, a4, 1
        beqi    a4, 32, .Lfixunsdfsi_bigexp
        beqi    a4, 32, .Lfixunsdfsi_bigexp
        ssl     a4              /* shift by 32 - a4 */
        ssl     a4              /* shift by 32 - a4 */
        srl     a5, a5
        srl     a5, a5
        /* Negate the result if sign != 0.  */
        /* Negate the result if sign != 0.  */
        neg     a2, a5
        neg     a2, a5
        movgez  a2, a5, a7
        movgez  a2, a5, a7
        leaf_return
        leaf_return
.Lfixunsdfsi_nan_or_inf:
.Lfixunsdfsi_nan_or_inf:
        /* Handle Infinity and NaN.  */
        /* Handle Infinity and NaN.  */
        slli    a4, xh, 12
        slli    a4, xh, 12
        or      a4, a4, xl
        or      a4, a4, xl
        beqz    a4, .Lfixunsdfsi_maxint
        beqz    a4, .Lfixunsdfsi_maxint
        /* Translate NaN to 0xffffffff.  */
        /* Translate NaN to 0xffffffff.  */
        movi    a2, -1
        movi    a2, -1
        leaf_return
        leaf_return
.Lfixunsdfsi_maxint:
.Lfixunsdfsi_maxint:
        slli    a4, a6, 11      /* 0x80000000 */
        slli    a4, a6, 11      /* 0x80000000 */
        movi    a5, -1          /* 0xffffffff */
        movi    a5, -1          /* 0xffffffff */
        movgez  a4, a5, xh
        movgez  a4, a5, xh
        mov     a2, a4
        mov     a2, a4
        leaf_return
        leaf_return
.Lfixunsdfsi_zero:
.Lfixunsdfsi_zero:
        movi    a2, 0
        movi    a2, 0
        leaf_return
        leaf_return
.Lfixunsdfsi_bigexp:
.Lfixunsdfsi_bigexp:
        /* Handle unsigned maximum exponent case.  */
        /* Handle unsigned maximum exponent case.  */
        bltz    xh, 1f
        bltz    xh, 1f
        mov     a2, a5          /* no shift needed */
        mov     a2, a5          /* no shift needed */
        leaf_return
        leaf_return
        /* Return 0x80000000 if negative.  */
        /* Return 0x80000000 if negative.  */
1:      slli    a2, a6, 11
1:      slli    a2, a6, 11
        leaf_return
        leaf_return
#endif /* L_fixunsdfsi */
#endif /* L_fixunsdfsi */
#ifdef L_fixunsdfdi
#ifdef L_fixunsdfdi
        .align  4
        .align  4
        .global __fixunsdfdi
        .global __fixunsdfdi
        .type   __fixunsdfdi, @function
        .type   __fixunsdfdi, @function
__fixunsdfdi:
__fixunsdfdi:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Check for NaN and Infinity.  */
        /* Check for NaN and Infinity.  */
        movi    a6, 0x7ff00000
        movi    a6, 0x7ff00000
        ball    xh, a6, .Lfixunsdfdi_nan_or_inf
        ball    xh, a6, .Lfixunsdfdi_nan_or_inf
        /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64.  */
        /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64.  */
        extui   a4, xh, 20, 11
        extui   a4, xh, 20, 11
        extui   a5, a6, 20, 10  /* 0x3ff */
        extui   a5, a6, 20, 10  /* 0x3ff */
        sub     a4, a4, a5
        sub     a4, a4, a5
        bgei    a4, 64, .Lfixunsdfdi_maxint
        bgei    a4, 64, .Lfixunsdfdi_maxint
        bltz    a4, .Lfixunsdfdi_zero
        bltz    a4, .Lfixunsdfdi_zero
        /* Add explicit "1.0" and shift << 11.  */
        /* Add explicit "1.0" and shift << 11.  */
        or      a7, xh, a6
        or      a7, xh, a6
        ssai    (32 - 11)
        ssai    (32 - 11)
        src     xh, a7, xl
        src     xh, a7, xl
        sll     xl, xl
        sll     xl, xl
        /* Shift back to the right, based on the exponent.  */
        /* Shift back to the right, based on the exponent.  */
        addi    a4, a4, 1
        addi    a4, a4, 1
        beqi    a4, 64, .Lfixunsdfdi_bigexp
        beqi    a4, 64, .Lfixunsdfdi_bigexp
        ssl     a4              /* shift by 64 - a4 */
        ssl     a4              /* shift by 64 - a4 */
        bgei    a4, 32, .Lfixunsdfdi_smallshift
        bgei    a4, 32, .Lfixunsdfdi_smallshift
        srl     xl, xh
        srl     xl, xh
        movi    xh, 0
        movi    xh, 0
.Lfixunsdfdi_shifted:
.Lfixunsdfdi_shifted:
        /* Negate the result if sign != 0.  */
        /* Negate the result if sign != 0.  */
        bgez    a7, 1f
        bgez    a7, 1f
        neg     xl, xl
        neg     xl, xl
        neg     xh, xh
        neg     xh, xh
        beqz    xl, 1f
        beqz    xl, 1f
        addi    xh, xh, -1
        addi    xh, xh, -1
1:      leaf_return
1:      leaf_return
.Lfixunsdfdi_smallshift:
.Lfixunsdfdi_smallshift:
        src     xl, xh, xl
        src     xl, xh, xl
        srl     xh, xh
        srl     xh, xh
        j       .Lfixunsdfdi_shifted
        j       .Lfixunsdfdi_shifted
.Lfixunsdfdi_nan_or_inf:
.Lfixunsdfdi_nan_or_inf:
        /* Handle Infinity and NaN.  */
        /* Handle Infinity and NaN.  */
        slli    a4, xh, 12
        slli    a4, xh, 12
        or      a4, a4, xl
        or      a4, a4, xl
        beqz    a4, .Lfixunsdfdi_maxint
        beqz    a4, .Lfixunsdfdi_maxint
        /* Translate NaN to 0xffffffff.... */
        /* Translate NaN to 0xffffffff.... */
1:      movi    xh, -1
1:      movi    xh, -1
        movi    xl, -1
        movi    xl, -1
        leaf_return
        leaf_return
.Lfixunsdfdi_maxint:
.Lfixunsdfdi_maxint:
        bgez    xh, 1b
        bgez    xh, 1b
2:      slli    xh, a6, 11      /* 0x80000000 */
2:      slli    xh, a6, 11      /* 0x80000000 */
        movi    xl, 0
        movi    xl, 0
        leaf_return
        leaf_return
.Lfixunsdfdi_zero:
.Lfixunsdfdi_zero:
        movi    xh, 0
        movi    xh, 0
        movi    xl, 0
        movi    xl, 0
        leaf_return
        leaf_return
.Lfixunsdfdi_bigexp:
.Lfixunsdfdi_bigexp:
        /* Handle unsigned maximum exponent case.  */
        /* Handle unsigned maximum exponent case.  */
        bltz    a7, 2b
        bltz    a7, 2b
        leaf_return             /* no shift needed */
        leaf_return             /* no shift needed */
#endif /* L_fixunsdfdi */
#endif /* L_fixunsdfdi */
#ifdef L_floatsidf
#ifdef L_floatsidf
        .align  4
        .align  4
        .global __floatunsidf
        .global __floatunsidf
        .type   __floatunsidf, @function
        .type   __floatunsidf, @function
__floatunsidf:
__floatunsidf:
        leaf_entry sp, 16
        leaf_entry sp, 16
        beqz    a2, .Lfloatsidf_return_zero
        beqz    a2, .Lfloatsidf_return_zero
        /* Set the sign to zero and jump to the floatsidf code.  */
        /* Set the sign to zero and jump to the floatsidf code.  */
        movi    a7, 0
        movi    a7, 0
        j       .Lfloatsidf_normalize
        j       .Lfloatsidf_normalize
        .align  4
        .align  4
        .global __floatsidf
        .global __floatsidf
        .type   __floatsidf, @function
        .type   __floatsidf, @function
__floatsidf:
__floatsidf:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Check for zero.  */
        /* Check for zero.  */
        beqz    a2, .Lfloatsidf_return_zero
        beqz    a2, .Lfloatsidf_return_zero
        /* Save the sign.  */
        /* Save the sign.  */
        extui   a7, a2, 31, 1
        extui   a7, a2, 31, 1
        /* Get the absolute value.  */
        /* Get the absolute value.  */
#if XCHAL_HAVE_ABS
#if XCHAL_HAVE_ABS
        abs     a2, a2
        abs     a2, a2
#else
#else
        neg     a4, a2
        neg     a4, a2
        movltz  a2, a4, a2
        movltz  a2, a4, a2
#endif
#endif
.Lfloatsidf_normalize:
.Lfloatsidf_normalize:
        /* Normalize with the first 1 bit in the msb.  */
        /* Normalize with the first 1 bit in the msb.  */
        do_nsau a4, a2, a5, a6
        do_nsau a4, a2, a5, a6
        ssl     a4
        ssl     a4
        sll     a5, a2
        sll     a5, a2
        /* Shift the mantissa into position.  */
        /* Shift the mantissa into position.  */
        srli    xh, a5, 11
        srli    xh, a5, 11
        slli    xl, a5, (32 - 11)
        slli    xl, a5, (32 - 11)
        /* Set the exponent.  */
        /* Set the exponent.  */
        movi    a5, 0x41d       /* 0x3fe + 31 */
        movi    a5, 0x41d       /* 0x3fe + 31 */
        sub     a5, a5, a4
        sub     a5, a5, a4
        slli    a5, a5, 20
        slli    a5, a5, 20
        add     xh, xh, a5
        add     xh, xh, a5
        /* Add the sign and return. */
        /* Add the sign and return. */
        slli    a7, a7, 31
        slli    a7, a7, 31
        or      xh, xh, a7
        or      xh, xh, a7
        leaf_return
        leaf_return
.Lfloatsidf_return_zero:
.Lfloatsidf_return_zero:
        movi    a3, 0
        movi    a3, 0
        leaf_return
        leaf_return
#endif /* L_floatsidf */
#endif /* L_floatsidf */
#ifdef L_floatdidf
#ifdef L_floatdidf
        .align  4
        .align  4
        .global __floatundidf
        .global __floatundidf
        .type   __floatundidf, @function
        .type   __floatundidf, @function
__floatundidf:
__floatundidf:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Check for zero.  */
        /* Check for zero.  */
        or      a4, xh, xl
        or      a4, xh, xl
        beqz    a4, 2f
        beqz    a4, 2f
        /* Set the sign to zero and jump to the floatdidf code.  */
        /* Set the sign to zero and jump to the floatdidf code.  */
        movi    a7, 0
        movi    a7, 0
        j       .Lfloatdidf_normalize
        j       .Lfloatdidf_normalize
        .align  4
        .align  4
        .global __floatdidf
        .global __floatdidf
        .type   __floatdidf, @function
        .type   __floatdidf, @function
__floatdidf:
__floatdidf:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Check for zero.  */
        /* Check for zero.  */
        or      a4, xh, xl
        or      a4, xh, xl
        beqz    a4, 2f
        beqz    a4, 2f
        /* Save the sign.  */
        /* Save the sign.  */
        extui   a7, xh, 31, 1
        extui   a7, xh, 31, 1
        /* Get the absolute value.  */
        /* Get the absolute value.  */
        bgez    xh, .Lfloatdidf_normalize
        bgez    xh, .Lfloatdidf_normalize
        neg     xl, xl
        neg     xl, xl
        neg     xh, xh
        neg     xh, xh
        beqz    xl, .Lfloatdidf_normalize
        beqz    xl, .Lfloatdidf_normalize
        addi    xh, xh, -1
        addi    xh, xh, -1
.Lfloatdidf_normalize:
.Lfloatdidf_normalize:
        /* Normalize with the first 1 bit in the msb of xh.  */
        /* Normalize with the first 1 bit in the msb of xh.  */
        beqz    xh, .Lfloatdidf_bigshift
        beqz    xh, .Lfloatdidf_bigshift
        do_nsau a4, xh, a5, a6
        do_nsau a4, xh, a5, a6
        ssl     a4
        ssl     a4
        src     xh, xh, xl
        src     xh, xh, xl
        sll     xl, xl
        sll     xl, xl
.Lfloatdidf_shifted:
.Lfloatdidf_shifted:
        /* Shift the mantissa into position, with rounding bits in a6.  */
        /* Shift the mantissa into position, with rounding bits in a6.  */
        ssai    11
        ssai    11
        sll     a6, xl
        sll     a6, xl
        src     xl, xh, xl
        src     xl, xh, xl
        srl     xh, xh
        srl     xh, xh
        /* Set the exponent.  */
        /* Set the exponent.  */
        movi    a5, 0x43d       /* 0x3fe + 63 */
        movi    a5, 0x43d       /* 0x3fe + 63 */
        sub     a5, a5, a4
        sub     a5, a5, a4
        slli    a5, a5, 20
        slli    a5, a5, 20
        add     xh, xh, a5
        add     xh, xh, a5
        /* Add the sign.  */
        /* Add the sign.  */
        slli    a7, a7, 31
        slli    a7, a7, 31
        or      xh, xh, a7
        or      xh, xh, a7
        /* Round up if the leftover fraction is >= 1/2.  */
        /* Round up if the leftover fraction is >= 1/2.  */
        bgez    a6, 2f
        bgez    a6, 2f
        addi    xl, xl, 1
        addi    xl, xl, 1
        beqz    xl, .Lfloatdidf_roundcarry
        beqz    xl, .Lfloatdidf_roundcarry
        /* Check if the leftover fraction is exactly 1/2.  */
        /* Check if the leftover fraction is exactly 1/2.  */
        slli    a6, a6, 1
        slli    a6, a6, 1
        beqz    a6, .Lfloatdidf_exactlyhalf
        beqz    a6, .Lfloatdidf_exactlyhalf
2:      leaf_return
2:      leaf_return
.Lfloatdidf_bigshift:
.Lfloatdidf_bigshift:
        /* xh is zero.  Normalize with first 1 bit of xl in the msb of xh.  */
        /* xh is zero.  Normalize with first 1 bit of xl in the msb of xh.  */
        do_nsau a4, xl, a5, a6
        do_nsau a4, xl, a5, a6
        ssl     a4
        ssl     a4
        sll     xh, xl
        sll     xh, xl
        movi    xl, 0
        movi    xl, 0
        addi    a4, a4, 32
        addi    a4, a4, 32
        j       .Lfloatdidf_shifted
        j       .Lfloatdidf_shifted
.Lfloatdidf_exactlyhalf:
.Lfloatdidf_exactlyhalf:
        /* Round down to the nearest even value.  */
        /* Round down to the nearest even value.  */
        srli    xl, xl, 1
        srli    xl, xl, 1
        slli    xl, xl, 1
        slli    xl, xl, 1
        leaf_return
        leaf_return
.Lfloatdidf_roundcarry:
.Lfloatdidf_roundcarry:
        /* xl is always zero when the rounding increment overflows, so
        /* xl is always zero when the rounding increment overflows, so
           there's no need to round it to an even value.  */
           there's no need to round it to an even value.  */
        addi    xh, xh, 1
        addi    xh, xh, 1
        /* Overflow to the exponent is OK.  */
        /* Overflow to the exponent is OK.  */
        leaf_return
        leaf_return
#endif /* L_floatdidf */
#endif /* L_floatdidf */
#ifdef L_truncdfsf2
#ifdef L_truncdfsf2
        .align  4
        .align  4
        .global __truncdfsf2
        .global __truncdfsf2
        .type   __truncdfsf2, @function
        .type   __truncdfsf2, @function
__truncdfsf2:
__truncdfsf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Adjust the exponent bias.  */
        /* Adjust the exponent bias.  */
        movi    a4, (0x3ff - 0x7f) << 20
        movi    a4, (0x3ff - 0x7f) << 20
        sub     a5, xh, a4
        sub     a5, xh, a4
        /* Check for underflow.  */
        /* Check for underflow.  */
        xor     a6, xh, a5
        xor     a6, xh, a5
        bltz    a6, .Ltrunc_underflow
        bltz    a6, .Ltrunc_underflow
        extui   a6, a5, 20, 11
        extui   a6, a5, 20, 11
        beqz    a6, .Ltrunc_underflow
        beqz    a6, .Ltrunc_underflow
        /* Check for overflow.  */
        /* Check for overflow.  */
        movi    a4, 255
        movi    a4, 255
        bge     a6, a4, .Ltrunc_overflow
        bge     a6, a4, .Ltrunc_overflow
        /* Shift a5/xl << 3 into a5/a4.  */
        /* Shift a5/xl << 3 into a5/a4.  */
        ssai    (32 - 3)
        ssai    (32 - 3)
        src     a5, a5, xl
        src     a5, a5, xl
        sll     a4, xl
        sll     a4, xl
.Ltrunc_addsign:
.Ltrunc_addsign:
        /* Add the sign bit.  */
        /* Add the sign bit.  */
        extui   a6, xh, 31, 1
        extui   a6, xh, 31, 1
        slli    a6, a6, 31
        slli    a6, a6, 31
        or      a2, a6, a5
        or      a2, a6, a5
        /* Round up if the leftover fraction is >= 1/2.  */
        /* Round up if the leftover fraction is >= 1/2.  */
        bgez    a4, 1f
        bgez    a4, 1f
        addi    a2, a2, 1
        addi    a2, a2, 1
        /* Overflow to the exponent is OK.  The answer will be correct.  */
        /* Overflow to the exponent is OK.  The answer will be correct.  */
        /* Check if the leftover fraction is exactly 1/2.  */
        /* Check if the leftover fraction is exactly 1/2.  */
        slli    a4, a4, 1
        slli    a4, a4, 1
        beqz    a4, .Ltrunc_exactlyhalf
        beqz    a4, .Ltrunc_exactlyhalf
1:      leaf_return
1:      leaf_return
.Ltrunc_exactlyhalf:
.Ltrunc_exactlyhalf:
        /* Round down to the nearest even value.  */
        /* Round down to the nearest even value.  */
        srli    a2, a2, 1
        srli    a2, a2, 1
        slli    a2, a2, 1
        slli    a2, a2, 1
        leaf_return
        leaf_return
.Ltrunc_overflow:
.Ltrunc_overflow:
        /* Check if exponent == 0x7ff.  */
        /* Check if exponent == 0x7ff.  */
        movi    a4, 0x7ff00000
        movi    a4, 0x7ff00000
        bnall   xh, a4, 1f
        bnall   xh, a4, 1f
        /* Check if mantissa is nonzero.  */
        /* Check if mantissa is nonzero.  */
        slli    a5, xh, 12
        slli    a5, xh, 12
        or      a5, a5, xl
        or      a5, a5, xl
        beqz    a5, 1f
        beqz    a5, 1f
        /* Shift a4 to set a bit in the mantissa, making a quiet NaN.  */
        /* Shift a4 to set a bit in the mantissa, making a quiet NaN.  */
        srli    a4, a4, 1
        srli    a4, a4, 1
1:      slli    a4, a4, 4       /* 0xff000000 or 0xff800000 */
1:      slli    a4, a4, 4       /* 0xff000000 or 0xff800000 */
        /* Add the sign bit.  */
        /* Add the sign bit.  */
        extui   a6, xh, 31, 1
        extui   a6, xh, 31, 1
        ssai    1
        ssai    1
        src     a2, a6, a4
        src     a2, a6, a4
        leaf_return
        leaf_return
.Ltrunc_underflow:
.Ltrunc_underflow:
        /* Find shift count for a subnormal.  Flush to zero if >= 32.  */
        /* Find shift count for a subnormal.  Flush to zero if >= 32.  */
        extui   a6, xh, 20, 11
        extui   a6, xh, 20, 11
        movi    a5, 0x3ff - 0x7f
        movi    a5, 0x3ff - 0x7f
        sub     a6, a5, a6
        sub     a6, a5, a6
        addi    a6, a6, 1
        addi    a6, a6, 1
        bgeui   a6, 32, 1f
        bgeui   a6, 32, 1f
        /* Replace the exponent with an explicit "1.0".  */
        /* Replace the exponent with an explicit "1.0".  */
        slli    a5, a5, 13      /* 0x700000 */
        slli    a5, a5, 13      /* 0x700000 */
        or      a5, a5, xh
        or      a5, a5, xh
        slli    a5, a5, 11
        slli    a5, a5, 11
        srli    a5, a5, 11
        srli    a5, a5, 11
        /* Shift the mantissa left by 3 bits (into a5/a4).  */
        /* Shift the mantissa left by 3 bits (into a5/a4).  */
        ssai    (32 - 3)
        ssai    (32 - 3)
        src     a5, a5, xl
        src     a5, a5, xl
        sll     a4, xl
        sll     a4, xl
        /* Shift right by a6.  */
        /* Shift right by a6.  */
        ssr     a6
        ssr     a6
        sll     a7, a4
        sll     a7, a4
        src     a4, a5, a4
        src     a4, a5, a4
        srl     a5, a5
        srl     a5, a5
        beqz    a7, .Ltrunc_addsign
        beqz    a7, .Ltrunc_addsign
        or      a4, a4, a6      /* any positive, nonzero value will work */
        or      a4, a4, a6      /* any positive, nonzero value will work */
        j       .Ltrunc_addsign
        j       .Ltrunc_addsign
        /* Return +/- zero.  */
        /* Return +/- zero.  */
1:      extui   a2, xh, 31, 1
1:      extui   a2, xh, 31, 1
        slli    a2, a2, 31
        slli    a2, a2, 31
        leaf_return
        leaf_return
#endif /* L_truncdfsf2 */
#endif /* L_truncdfsf2 */
#ifdef L_extendsfdf2
#ifdef L_extendsfdf2
        .align  4
        .align  4
        .global __extendsfdf2
        .global __extendsfdf2
        .type   __extendsfdf2, @function
        .type   __extendsfdf2, @function
__extendsfdf2:
__extendsfdf2:
        leaf_entry sp, 16
        leaf_entry sp, 16
        /* Save the sign bit and then shift it off.  */
        /* Save the sign bit and then shift it off.  */
        extui   a5, a2, 31, 1
        extui   a5, a2, 31, 1
        slli    a5, a5, 31
        slli    a5, a5, 31
        slli    a4, a2, 1
        slli    a4, a2, 1
        /* Extract and check the exponent.  */
        /* Extract and check the exponent.  */
        extui   a6, a2, 23, 8
        extui   a6, a2, 23, 8
        beqz    a6, .Lextend_expzero
        beqz    a6, .Lextend_expzero
        addi    a6, a6, 1
        addi    a6, a6, 1
        beqi    a6, 256, .Lextend_nan_or_inf
        beqi    a6, 256, .Lextend_nan_or_inf
        /* Shift >> 3 into a4/xl.  */
        /* Shift >> 3 into a4/xl.  */
        srli    a4, a4, 4
        srli    a4, a4, 4
        slli    xl, a2, (32 - 3)
        slli    xl, a2, (32 - 3)
        /* Adjust the exponent bias.  */
        /* Adjust the exponent bias.  */
        movi    a6, (0x3ff - 0x7f) << 20
        movi    a6, (0x3ff - 0x7f) << 20
        add     a4, a4, a6
        add     a4, a4, a6
        /* Add the sign bit.  */
        /* Add the sign bit.  */
        or      xh, a4, a5
        or      xh, a4, a5
        leaf_return
        leaf_return
.Lextend_nan_or_inf:
.Lextend_nan_or_inf:
        movi    a4, 0x7ff00000
        movi    a4, 0x7ff00000
        /* Check for NaN.  */
        /* Check for NaN.  */
        slli    a7, a2, 9
        slli    a7, a2, 9
        beqz    a7, 1f
        beqz    a7, 1f
        slli    a6, a6, 11      /* 0x80000 */
        slli    a6, a6, 11      /* 0x80000 */
        or      a4, a4, a6
        or      a4, a4, a6
        /* Add the sign and return.  */
        /* Add the sign and return.  */
1:      or      xh, a4, a5
1:      or      xh, a4, a5
        movi    xl, 0
        movi    xl, 0
        leaf_return
        leaf_return
.Lextend_expzero:
.Lextend_expzero:
        beqz    a4, 1b
        beqz    a4, 1b
        /* Normalize it to have 8 zero bits before the first 1 bit.  */
        /* Normalize it to have 8 zero bits before the first 1 bit.  */
        do_nsau a7, a4, a2, a3
        do_nsau a7, a4, a2, a3
        addi    a7, a7, -8
        addi    a7, a7, -8
        ssl     a7
        ssl     a7
        sll     a4, a4
        sll     a4, a4
        /* Shift >> 3 into a4/xl.  */
        /* Shift >> 3 into a4/xl.  */
        slli    xl, a4, (32 - 3)
        slli    xl, a4, (32 - 3)
        srli    a4, a4, 3
        srli    a4, a4, 3
        /* Set the exponent.  */
        /* Set the exponent.  */
        movi    a6, 0x3fe - 0x7f
        movi    a6, 0x3fe - 0x7f
        sub     a6, a6, a7
        sub     a6, a6, a7
        slli    a6, a6, 20
        slli    a6, a6, 20
        add     a4, a4, a6
        add     a4, a4, a6
        /* Add the sign and return.  */
        /* Add the sign and return.  */
        or      xh, a4, a5
        or      xh, a4, a5
        leaf_return
        leaf_return
#endif /* L_extendsfdf2 */
#endif /* L_extendsfdf2 */
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.