OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/trunk/gnu-old/gcc-4.2.2/gcc/config/v850
    from Rev 154 to Rev 816
    Reverse comparison

Rev 154 → Rev 816

/v850.md
0,0 → 1,2036
;; GCC machine description for NEC V850
;; Copyright (C) 1996, 1997, 1998, 1999, 2002, 2004, 2005, 2007
;; Free Software Foundation, Inc.
;; Contributed by Jeff Law (law@cygnus.com).
 
;; This file is part of GCC.
 
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
 
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
 
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
 
;; The original PO technology requires these to be ordered by speed,
;; so that assigner will pick the fastest.
 
;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
 
;; The V851 manual states that the instruction address space is 16M;
;; the various branch/call instructions only have a 22bit offset (4M range).
;;
;; One day we'll probably need to handle calls to targets more than 4M
;; away.
 
;; The size of instructions in bytes.
 
(define_attr "length" ""
(const_int 4))
 
(define_attr "long_calls" "yes,no"
(const (if_then_else (symbol_ref "TARGET_LONG_CALLS")
(const_string "yes")
(const_string "no"))))
;; Types of instructions (for scheduling purposes).
 
(define_attr "type" "load,mult,other"
(const_string "other"))
 
;; Condition code settings.
;; none - insn does not affect cc
;; none_0hit - insn does not affect cc but it does modify operand 0
;; This attribute is used to keep track of when operand 0 changes.
;; See the description of NOTICE_UPDATE_CC for more info.
;; set_znv - sets z,n,v to usable values; c is unknown.
;; set_zn - sets z,n to usable values; v,c is unknown.
;; compare - compare instruction
;; clobber - value of cc is unknown
(define_attr "cc" "none,none_0hit,set_zn,set_znv,compare,clobber"
(const_string "clobber"))
;; Function units for the V850. As best as I can tell, there's
;; a traditional memory load/use stall as well as a stall if
;; the result of a multiply is used too early.
 
(define_insn_reservation "v850_other" 1
(eq_attr "type" "other")
"nothing")
(define_insn_reservation "v850_mult" 2
(eq_attr "type" "mult")
"nothing")
(define_insn_reservation "v850_memory" 2
(eq_attr "type" "load")
"nothing")
 
(include "predicates.md")
;; ----------------------------------------------------------------------
;; MOVE INSTRUCTIONS
;; ----------------------------------------------------------------------
 
;; movqi
 
(define_expand "movqi"
[(set (match_operand:QI 0 "general_operand" "")
(match_operand:QI 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register or 0 */
if (!register_operand (operand0, QImode)
&& !reg_or_0_operand (operand1, QImode))
operands[1] = copy_to_mode_reg (QImode, operand1);
}")
 
(define_insn "*movqi_internal"
[(set (match_operand:QI 0 "general_operand" "=r,r,r,Q,r,m,m")
(match_operand:QI 1 "general_operand" "Jr,n,Q,Ir,m,r,I"))]
"register_operand (operands[0], QImode)
|| reg_or_0_operand (operands[1], QImode)"
"* return output_move_single (operands);"
[(set_attr "length" "2,4,2,2,4,4,4")
(set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
(set_attr "type" "other,other,load,other,load,other,other")])
 
;; movhi
 
(define_expand "movhi"
[(set (match_operand:HI 0 "general_operand" "")
(match_operand:HI 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register or 0 */
if (!register_operand (operand0, HImode)
&& !reg_or_0_operand (operand1, HImode))
operands[1] = copy_to_mode_reg (HImode, operand1);
}")
 
(define_insn "*movhi_internal"
[(set (match_operand:HI 0 "general_operand" "=r,r,r,Q,r,m,m")
(match_operand:HI 1 "general_operand" "Jr,n,Q,Ir,m,r,I"))]
"register_operand (operands[0], HImode)
|| reg_or_0_operand (operands[1], HImode)"
"* return output_move_single (operands);"
[(set_attr "length" "2,4,2,2,4,4,4")
(set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
(set_attr "type" "other,other,load,other,load,other,other")])
 
;; movsi and helpers
 
(define_insn "*movsi_high"
[(set (match_operand:SI 0 "register_operand" "=r")
(high:SI (match_operand 1 "" "")))]
""
"movhi hi(%1),%.,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")
(set_attr "type" "other")])
 
(define_insn "*movsi_lo"
[(set (match_operand:SI 0 "register_operand" "=r")
(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "immediate_operand" "i")))]
""
"movea lo(%2),%1,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")
(set_attr "type" "other")])
 
(define_expand "movsi"
[(set (match_operand:SI 0 "general_operand" "")
(match_operand:SI 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register or 0 */
if (!register_operand (operand0, SImode)
&& !reg_or_0_operand (operand1, SImode))
operands[1] = copy_to_mode_reg (SImode, operand1);
 
/* Some constants, as well as symbolic operands
must be done with HIGH & LO_SUM patterns. */
if (CONSTANT_P (operands[1])
&& GET_CODE (operands[1]) != HIGH
&& ! TARGET_V850E
&& !special_symbolref_operand (operands[1], VOIDmode)
&& !(GET_CODE (operands[1]) == CONST_INT
&& (CONST_OK_FOR_J (INTVAL (operands[1]))
|| CONST_OK_FOR_K (INTVAL (operands[1]))
|| CONST_OK_FOR_L (INTVAL (operands[1])))))
{
rtx temp;
 
if (reload_in_progress || reload_completed)
temp = operands[0];
else
temp = gen_reg_rtx (SImode);
 
emit_insn (gen_rtx_SET (SImode, temp,
gen_rtx_HIGH (SImode, operand1)));
emit_insn (gen_rtx_SET (SImode, operand0,
gen_rtx_LO_SUM (SImode, temp, operand1)));
DONE;
}
}")
 
;; This is the same as the following pattern, except that it includes
;; support for arbitrary 32 bit immediates.
 
;; ??? This always loads addresses using hilo. If the only use of this address
;; was in a load/store, then we would get smaller code if we only loaded the
;; upper part with hi, and then put the lower part in the load/store insn.
 
(define_insn "*movsi_internal_v850e"
[(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m,r")
(match_operand:SI 1 "general_operand" "Jr,K,L,Q,Ir,m,R,r,I,i"))]
"TARGET_V850E
&& (register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))"
"* return output_move_single (operands);"
[(set_attr "length" "2,4,4,2,2,4,4,4,4,6")
(set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
(set_attr "type" "other,other,other,load,other,load,other,other,other,other")])
 
(define_insn "*movsi_internal"
[(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m")
(match_operand:SI 1 "movsi_source_operand" "Jr,K,L,Q,Ir,m,R,r,I"))]
"register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode)"
"* return output_move_single (operands);"
[(set_attr "length" "2,4,4,2,2,4,4,4,4")
(set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
(set_attr "type" "other,other,other,load,other,load,other,other,other")])
 
 
 
(define_expand "movdi"
[(set (match_operand:DI 0 "general_operand" "")
(match_operand:DI 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register or 0 */
if (!register_operand (operand0, DImode)
&& !reg_or_0_operand (operand1, DImode))
operands[1] = copy_to_mode_reg (DImode, operand1);
}")
 
(define_insn "*movdi_internal"
[(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,m,m,r")
(match_operand:DI 1 "general_operand" "Jr,K,L,i,m,r,IG,iF"))]
"register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode)"
"* return output_move_double (operands);"
[(set_attr "length" "4,8,8,16,8,8,8,16")
(set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
(set_attr "type" "other,other,other,other,load,other,other,other")])
 
(define_expand "movsf"
[(set (match_operand:SF 0 "general_operand" "")
(match_operand:SF 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register or 0 */
if (!register_operand (operand0, SFmode)
&& !reg_or_0_operand (operand1, SFmode))
operands[1] = copy_to_mode_reg (SFmode, operand1);
}")
 
(define_insn "*movsf_internal"
[(set (match_operand:SF 0 "general_operand" "=r,r,r,r,r,Q,r,m,m,r")
(match_operand:SF 1 "general_operand" "Jr,K,L,n,Q,Ir,m,r,IG,iF"))]
"register_operand (operands[0], SFmode)
|| reg_or_0_operand (operands[1], SFmode)"
"* return output_move_single (operands);"
[(set_attr "length" "2,4,4,8,2,2,4,4,4,8")
(set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
(set_attr "type" "other,other,other,other,load,other,load,other,other,other")])
 
(define_expand "movdf"
[(set (match_operand:DF 0 "general_operand" "")
(match_operand:DF 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register or 0 */
if (!register_operand (operand0, DFmode)
&& !reg_or_0_operand (operand1, DFmode))
operands[1] = copy_to_mode_reg (DFmode, operand1);
}")
 
(define_insn "*movdf_internal"
[(set (match_operand:DF 0 "general_operand" "=r,r,r,r,r,m,m,r")
(match_operand:DF 1 "general_operand" "Jr,K,L,i,m,r,IG,iF"))]
"register_operand (operands[0], DFmode)
|| reg_or_0_operand (operands[1], DFmode)"
"* return output_move_double (operands);"
[(set_attr "length" "4,8,8,16,8,8,8,16")
(set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
(set_attr "type" "other,other,other,other,load,other,other,other")])
 
;; ----------------------------------------------------------------------
;; TEST INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn "*v850_tst1"
[(set (cc0) (zero_extract:SI (match_operand:QI 0 "memory_operand" "m")
(const_int 1)
(match_operand:QI 1 "const_int_operand" "n")))]
""
"tst1 %1,%0"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
;; This replaces ld.b;sar;andi with tst1;setf nz.
 
;; ??? The zero_extract sets the Z bit to the opposite of what one would
;; expect. This perhaps should be wrapped in a (eq: X (const_int 0)).
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(zero_extract:SI (match_operand:QI 1 "memory_operand" "")
(const_int 1)
(match_operand 2 "const_int_operand" "")))]
""
[(set (cc0) (zero_extract:SI (match_dup 1)
(const_int 1)
(match_dup 2)))
(set (match_dup 0) (ne:SI (cc0) (const_int 0)))])
 
(define_insn "tstsi"
[(set (cc0) (match_operand:SI 0 "register_operand" "r"))]
""
"cmp %.,%0"
[(set_attr "length" "2")
(set_attr "cc" "set_znv")])
 
(define_insn "cmpsi"
[(set (cc0)
(compare (match_operand:SI 0 "register_operand" "r,r")
(match_operand:SI 1 "reg_or_int5_operand" "r,J")))]
""
"@
cmp %1,%0
cmp %1,%0"
[(set_attr "length" "2,2")
(set_attr "cc" "compare")])
;; ----------------------------------------------------------------------
;; ADD INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn "addsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(plus:SI (match_operand:SI 1 "register_operand" "%0,r,r")
(match_operand:SI 2 "nonmemory_operand" "rJ,K,U")))]
""
"@
add %2,%0
addi %2,%1,%0
addi %O2(%P2),%1,%0"
[(set_attr "length" "2,4,4")
(set_attr "cc" "set_zn,set_zn,set_zn")])
 
;; ----------------------------------------------------------------------
;; SUBTRACT INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn "subsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(minus:SI (match_operand:SI 1 "register_operand" "0,r")
(match_operand:SI 2 "register_operand" "r,0")))]
""
"@
sub %2,%0
subr %1,%0"
[(set_attr "length" "2,2")
(set_attr "cc" "set_zn")])
 
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" "0")))]
""
"subr %.,%0"
[(set_attr "length" "2")
(set_attr "cc" "set_zn")])
 
;; ----------------------------------------------------------------------
;; MULTIPLY INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_expand "mulhisi3"
[(set (match_operand:SI 0 "register_operand" "")
(mult:SI
(sign_extend:SI (match_operand:HI 1 "register_operand" ""))
(sign_extend:SI (match_operand:HI 2 "nonmemory_operand" ""))))]
""
"if (GET_CODE (operands[2]) == CONST_INT)
{
emit_insn (gen_mulhisi3_internal2 (operands[0], operands[1], operands[2]));
DONE;
}")
 
(define_insn "*mulhisi3_internal1"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI
(sign_extend:SI (match_operand:HI 1 "register_operand" "%0"))
(sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
""
"mulh %2,%0"
[(set_attr "length" "2")
(set_attr "cc" "none_0hit")
(set_attr "type" "mult")])
 
(define_insn "mulhisi3_internal2"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(mult:SI
(sign_extend:SI (match_operand:HI 1 "register_operand" "%0,r"))
(match_operand:HI 2 "const_int_operand" "J,K")))]
""
"@
mulh %2,%0
mulhi %2,%1,%0"
[(set_attr "length" "2,4")
(set_attr "cc" "none_0hit,none_0hit")
(set_attr "type" "mult")])
 
;; ??? The scheduling info is probably wrong.
 
;; ??? This instruction can also generate the 32 bit highpart, but using it
;; may increase code size counter to the desired result.
 
;; ??? This instructions can also give a DImode result.
 
;; ??? There is unsigned version, but it matters only for the DImode/highpart
;; results.
 
(define_insn "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (match_operand:SI 1 "register_operand" "%0")
(match_operand:SI 2 "reg_or_int9_operand" "rO")))]
"TARGET_V850E"
"mul %2,%1,%."
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")
(set_attr "type" "mult")])
 
;; ----------------------------------------------------------------------
;; DIVIDE INSTRUCTIONS
;; ----------------------------------------------------------------------
 
;; ??? These insns do set the Z/N condition codes, except that they are based
;; on only one of the two results, so it doesn't seem to make sense to use
;; them.
 
;; ??? The scheduling info is probably wrong.
 
(define_insn "divmodsi4"
[(set (match_operand:SI 0 "register_operand" "=r")
(div:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "register_operand" "r")))
(set (match_operand:SI 3 "register_operand" "=r")
(mod:SI (match_dup 1)
(match_dup 2)))]
"TARGET_V850E"
"div %2,%0,%3"
[(set_attr "length" "4")
(set_attr "cc" "clobber")
(set_attr "type" "other")])
(define_insn "udivmodsi4"
[(set (match_operand:SI 0 "register_operand" "=r")
(udiv:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "register_operand" "r")))
(set (match_operand:SI 3 "register_operand" "=r")
(umod:SI (match_dup 1)
(match_dup 2)))]
"TARGET_V850E"
"divu %2,%0,%3"
[(set_attr "length" "4")
(set_attr "cc" "clobber")
(set_attr "type" "other")])
;; ??? There is a 2 byte instruction for generating only the quotient.
;; However, it isn't clear how to compute the length field correctly.
 
(define_insn "divmodhi4"
[(set (match_operand:HI 0 "register_operand" "=r")
(div:HI (match_operand:HI 1 "register_operand" "0")
(match_operand:HI 2 "register_operand" "r")))
(set (match_operand:HI 3 "register_operand" "=r")
(mod:HI (match_dup 1)
(match_dup 2)))]
"TARGET_V850E"
"divh %2,%0,%3"
[(set_attr "length" "4")
(set_attr "cc" "clobber")
(set_attr "type" "other")])
 
;; Half-words are sign-extended by default, so we must zero extend to a word
;; here before doing the divide.
 
(define_insn "udivmodhi4"
[(set (match_operand:HI 0 "register_operand" "=r")
(udiv:HI (match_operand:HI 1 "register_operand" "0")
(match_operand:HI 2 "register_operand" "r")))
(set (match_operand:HI 3 "register_operand" "=r")
(umod:HI (match_dup 1)
(match_dup 2)))]
"TARGET_V850E"
"zxh %0 ; divhu %2,%0,%3"
[(set_attr "length" "4")
(set_attr "cc" "clobber")
(set_attr "type" "other")])
;; ----------------------------------------------------------------------
;; AND INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn "*v850_clr1_1"
[(set (match_operand:QI 0 "memory_operand" "=m")
(subreg:QI
(and:SI (subreg:SI (match_dup 0) 0)
(match_operand:QI 1 "not_power_of_two_operand" "")) 0))]
""
"*
{
rtx xoperands[2];
xoperands[0] = operands[0];
xoperands[1] = GEN_INT (~INTVAL (operands[1]) & 0xff);
output_asm_insn (\"clr1 %M1,%0\", xoperands);
return \"\";
}"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "*v850_clr1_2"
[(set (match_operand:HI 0 "indirect_operand" "=m")
(subreg:HI
(and:SI (subreg:SI (match_dup 0) 0)
(match_operand:HI 1 "not_power_of_two_operand" "")) 0))]
""
"*
{
int log2 = exact_log2 (~INTVAL (operands[1]) & 0xffff);
 
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
plus_constant (XEXP (operands[0], 0), log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn (\"clr1 %1,%0\", xoperands);
return \"\";
}"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "*v850_clr1_3"
[(set (match_operand:SI 0 "indirect_operand" "=m")
(and:SI (match_dup 0)
(match_operand:SI 1 "not_power_of_two_operand" "")))]
""
"*
{
int log2 = exact_log2 (~INTVAL (operands[1]) & 0xffffffff);
 
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
plus_constant (XEXP (operands[0], 0), log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn (\"clr1 %1,%0\", xoperands);
return \"\";
}"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "andsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(and:SI (match_operand:SI 1 "register_operand" "%0,0,r")
(match_operand:SI 2 "nonmemory_operand" "r,I,M")))]
""
"@
and %2,%0
and %.,%0
andi %2,%1,%0"
[(set_attr "length" "2,2,4")
(set_attr "cc" "set_znv")])
 
;; ----------------------------------------------------------------------
;; OR INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn "*v850_set1_1"
[(set (match_operand:QI 0 "memory_operand" "=m")
(subreg:QI (ior:SI (subreg:SI (match_dup 0) 0)
(match_operand 1 "power_of_two_operand" "")) 0))]
""
"set1 %M1,%0"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "*v850_set1_2"
[(set (match_operand:HI 0 "indirect_operand" "=m")
(subreg:HI (ior:SI (subreg:SI (match_dup 0) 0)
(match_operand 1 "power_of_two_operand" "")) 0))]
""
"*
{
int log2 = exact_log2 (INTVAL (operands[1]));
 
if (log2 < 8)
return \"set1 %M1,%0\";
else
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
plus_constant (XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn (\"set1 %1,%0\", xoperands);
}
return \"\";
}"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "*v850_set1_3"
[(set (match_operand:SI 0 "indirect_operand" "=m")
(ior:SI (match_dup 0)
(match_operand 1 "power_of_two_operand" "")))]
""
"*
{
int log2 = exact_log2 (INTVAL (operands[1]));
 
if (log2 < 8)
return \"set1 %M1,%0\";
else
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
plus_constant (XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn (\"set1 %1,%0\", xoperands);
}
return \"\";
}"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "iorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(ior:SI (match_operand:SI 1 "register_operand" "%0,0,r")
(match_operand:SI 2 "nonmemory_operand" "r,I,M")))]
""
"@
or %2,%0
or %.,%0
ori %2,%1,%0"
[(set_attr "length" "2,2,4")
(set_attr "cc" "set_znv")])
 
;; ----------------------------------------------------------------------
;; XOR INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn "*v850_not1_1"
[(set (match_operand:QI 0 "memory_operand" "=m")
(subreg:QI (xor:SI (subreg:SI (match_dup 0) 0)
(match_operand 1 "power_of_two_operand" "")) 0))]
""
"not1 %M1,%0"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "*v850_not1_2"
[(set (match_operand:HI 0 "indirect_operand" "=m")
(subreg:HI (xor:SI (subreg:SI (match_dup 0) 0)
(match_operand 1 "power_of_two_operand" "")) 0))]
""
"*
{
int log2 = exact_log2 (INTVAL (operands[1]));
 
if (log2 < 8)
return \"not1 %M1,%0\";
else
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
plus_constant (XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn (\"not1 %1,%0\", xoperands);
}
return \"\";
}"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "*v850_not1_3"
[(set (match_operand:SI 0 "indirect_operand" "=m")
(xor:SI (match_dup 0)
(match_operand 1 "power_of_two_operand" "")))]
""
"*
{
int log2 = exact_log2 (INTVAL (operands[1]));
 
if (log2 < 8)
return \"not1 %M1,%0\";
else
{
rtx xoperands[2];
xoperands[0] = gen_rtx_MEM (QImode,
plus_constant (XEXP (operands[0], 0),
log2 / 8));
xoperands[1] = GEN_INT (log2 % 8);
output_asm_insn (\"not1 %1,%0\", xoperands);
}
return \"\";
}"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_insn "xorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(xor:SI (match_operand:SI 1 "register_operand" "%0,0,r")
(match_operand:SI 2 "nonmemory_operand" "r,I,M")))]
""
"@
xor %2,%0
xor %.,%0
xori %2,%1,%0"
[(set_attr "length" "2,2,4")
(set_attr "cc" "set_znv")])
;; ----------------------------------------------------------------------
;; NOT INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn "one_cmplsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_operand:SI 1 "register_operand" "r")))]
""
"not %1,%0"
[(set_attr "length" "2")
(set_attr "cc" "set_znv")])
;; -----------------------------------------------------------------
;; BIT FIELDS
;; -----------------------------------------------------------------
 
;; ??? Is it worth defining insv and extv for the V850 series?!?
 
;; An insv pattern would be useful, but does not get used because
;; store_bit_field never calls insv when storing a constant value into a
;; single-bit bitfield.
 
;; extv/extzv patterns would be useful, but do not get used because
;; optimize_bitfield_compare in fold-const usually converts single
;; bit extracts into an AND with a mask.
 
;; -----------------------------------------------------------------
;; Scc INSTRUCTIONS
;; -----------------------------------------------------------------
 
(define_insn "sle"
[(set (match_operand:SI 0 "register_operand" "=r")
(le:SI (cc0) (const_int 0)))]
""
"*
{
if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
return 0;
 
return \"setf le,%0\";
}"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "sleu"
[(set (match_operand:SI 0 "register_operand" "=r")
(leu:SI (cc0) (const_int 0)))]
""
"setf nh,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "sge"
[(set (match_operand:SI 0 "register_operand" "=r")
(ge:SI (cc0) (const_int 0)))]
""
"*
{
if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
return 0;
 
return \"setf ge,%0\";
}"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "sgeu"
[(set (match_operand:SI 0 "register_operand" "=r")
(geu:SI (cc0) (const_int 0)))]
""
"setf nl,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "slt"
[(set (match_operand:SI 0 "register_operand" "=r")
(lt:SI (cc0) (const_int 0)))]
""
"*
{
if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
return 0;
 
return \"setf lt,%0\";
}"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "sltu"
[(set (match_operand:SI 0 "register_operand" "=r")
(ltu:SI (cc0) (const_int 0)))]
""
"setf l,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "sgt"
[(set (match_operand:SI 0 "register_operand" "=r")
(gt:SI (cc0) (const_int 0)))]
""
"*
{
if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0)
return 0;
 
return \"setf gt,%0\";
}"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "sgtu"
[(set (match_operand:SI 0 "register_operand" "=r")
(gtu:SI (cc0) (const_int 0)))]
""
"setf h,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "seq"
[(set (match_operand:SI 0 "register_operand" "=r")
(eq:SI (cc0) (const_int 0)))]
""
"setf z,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
(define_insn "sne"
[(set (match_operand:SI 0 "register_operand" "=r")
(ne:SI (cc0) (const_int 0)))]
""
"setf nz,%0"
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
 
;; ----------------------------------------------------------------------
;; CONDITIONAL MOVE INSTRUCTIONS
;; ----------------------------------------------------------------------
 
;; Instructions using cc0 aren't allowed to have input reloads, so we must
;; hide the fact that this instruction uses cc0. We do so by including the
;; compare instruction inside it.
 
;; ??? This is very ugly. The right way to do this is to modify cmpsi so
;; that it doesn't emit RTL, and then modify the bcc/scc patterns so that
;; they emit RTL for the compare instruction. Unfortunately, this requires
;; lots of changes that will be hard to sanitize. So for now, cmpsi still
;; emits RTL, and I get the compare operands here from the previous insn.
 
(define_expand "movsicc"
[(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI
(match_operator 1 "comparison_operator"
[(match_dup 4) (match_dup 5)])
(match_operand:SI 2 "reg_or_const_operand" "rJ")
(match_operand:SI 3 "reg_or_const_operand" "rI")))]
"TARGET_V850E"
"
{
rtx insn = get_last_insn_anywhere ();
rtx src;
 
if ( (GET_CODE (operands[2]) == CONST_INT
&& GET_CODE (operands[3]) == CONST_INT))
{
int o2 = INTVAL (operands[2]);
int o3 = INTVAL (operands[3]);
 
if (o2 == 1 && o3 == 0)
FAIL; /* setf */
if (o3 == 1 && o2 == 0)
FAIL; /* setf */
if (o2 == 0 && (o3 < -16 || o3 > 15) && exact_log2 (o3) >= 0)
FAIL; /* setf + shift */
if (o3 == 0 && (o2 < -16 || o2 > 15) && exact_log2 (o2) >=0)
FAIL; /* setf + shift */
if (o2 != 0)
operands[2] = copy_to_mode_reg (SImode, operands[2]);
if (o3 !=0 )
operands[3] = copy_to_mode_reg (SImode, operands[3]);
}
else
{
if (GET_CODE (operands[2]) != REG)
operands[2] = copy_to_mode_reg (SImode,operands[2]);
if (GET_CODE (operands[3]) != REG)
operands[3] = copy_to_mode_reg (SImode, operands[3]);
}
gcc_assert (GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) == SET
&& SET_DEST (PATTERN (insn)) == cc0_rtx);
src = SET_SRC (PATTERN (insn));
 
switch (GET_CODE (src))
{
case COMPARE:
operands[4] = XEXP (src, 0);
operands[5] = XEXP (src, 1);
break;
 
case REG:
case SUBREG:
operands[4] = src;
operands[5] = const0_rtx;
break;
 
default:
gcc_unreachable ();
}
}")
 
;; ??? Clobbering the condition codes is overkill.
 
;; ??? We sometimes emit an unnecessary compare instruction because the
;; condition codes may have already been set by an earlier instruction,
;; but we have no code here to avoid the compare if it is unnecessary.
 
(define_insn "*movsicc_normal"
[(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI
(match_operator 1 "comparison_operator"
[(match_operand:SI 4 "register_operand" "r")
(match_operand:SI 5 "reg_or_int5_operand" "rJ")])
(match_operand:SI 2 "reg_or_int5_operand" "rJ")
(match_operand:SI 3 "reg_or_0_operand" "rI")))]
"TARGET_V850E"
"cmp %5,%4 ; cmov %c1,%2,%z3,%0"
[(set_attr "length" "6")
(set_attr "cc" "clobber")])
 
(define_insn "*movsicc_reversed"
[(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI
(match_operator 1 "comparison_operator"
[(match_operand:SI 4 "register_operand" "r")
(match_operand:SI 5 "reg_or_int5_operand" "rJ")])
(match_operand:SI 2 "reg_or_0_operand" "rI")
(match_operand:SI 3 "reg_or_int5_operand" "rJ")))]
"TARGET_V850E"
"cmp %5,%4 ; cmov %C1,%3,%z2,%0"
[(set_attr "length" "6")
(set_attr "cc" "clobber")])
 
(define_insn "*movsicc_tst1"
[(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI
(match_operator 1 "comparison_operator"
[(zero_extract:SI
(match_operand:QI 2 "memory_operand" "m")
(const_int 1)
(match_operand 3 "const_int_operand" "n"))
(const_int 0)])
(match_operand:SI 4 "reg_or_int5_operand" "rJ")
(match_operand:SI 5 "reg_or_0_operand" "rI")))]
"TARGET_V850E"
"tst1 %3,%2 ; cmov %c1,%4,%z5,%0"
[(set_attr "length" "8")
(set_attr "cc" "clobber")])
 
(define_insn "*movsicc_tst1_reversed"
[(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI
(match_operator 1 "comparison_operator"
[(zero_extract:SI
(match_operand:QI 2 "memory_operand" "m")
(const_int 1)
(match_operand 3 "const_int_operand" "n"))
(const_int 0)])
(match_operand:SI 4 "reg_or_0_operand" "rI")
(match_operand:SI 5 "reg_or_int5_operand" "rJ")))]
"TARGET_V850E"
"tst1 %3,%2 ; cmov %C1,%5,%z4,%0"
[(set_attr "length" "8")
(set_attr "cc" "clobber")])
 
;; Matching for sasf requires combining 4 instructions, so we provide a
;; dummy pattern to match the first 3, which will always be turned into the
;; second pattern by subsequent combining. As above, we must include the
;; comparison to avoid input reloads in an insn using cc0.
 
(define_insn "*sasf_1"
[(set (match_operand:SI 0 "register_operand" "")
(ior:SI (match_operator 1 "comparison_operator" [(cc0) (const_int 0)])
(ashift:SI (match_operand:SI 2 "register_operand" "")
(const_int 1))))]
"TARGET_V850E"
"* gcc_unreachable ();")
 
(define_insn "*sasf_2"
[(set (match_operand:SI 0 "register_operand" "=r")
(ior:SI
(match_operator 1 "comparison_operator"
[(match_operand:SI 3 "register_operand" "r")
(match_operand:SI 4 "reg_or_int5_operand" "rJ")])
(ashift:SI (match_operand:SI 2 "register_operand" "0")
(const_int 1))))]
"TARGET_V850E"
"cmp %4,%3 ; sasf %c1,%0"
[(set_attr "length" "6")
(set_attr "cc" "clobber")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(if_then_else:SI
(match_operator 1 "comparison_operator"
[(match_operand:SI 4 "register_operand" "")
(match_operand:SI 5 "reg_or_int5_operand" "")])
(match_operand:SI 2 "const_int_operand" "")
(match_operand:SI 3 "const_int_operand" "")))]
"TARGET_V850E
&& ((INTVAL (operands[2]) ^ INTVAL (operands[3])) == 1)
&& ((INTVAL (operands[2]) + INTVAL (operands[3])) != 1)
&& (GET_CODE (operands[5]) == CONST_INT
|| REGNO (operands[0]) != REGNO (operands[5]))
&& REGNO (operands[0]) != REGNO (operands[4])"
[(set (match_dup 0) (match_dup 6))
(set (match_dup 0)
(ior:SI (match_op_dup 7 [(match_dup 4) (match_dup 5)])
(ashift:SI (match_dup 0) (const_int 1))))]
"
{
operands[6] = GEN_INT (INTVAL (operands[2]) >> 1);
if (INTVAL (operands[2]) & 0x1)
operands[7] = operands[1];
else
operands[7] = gen_rtx_fmt_ee (reverse_condition (GET_CODE (operands[1])),
GET_MODE (operands[1]),
XEXP (operands[1], 0), XEXP (operands[1], 1));
}")
;; ---------------------------------------------------------------------
;; BYTE SWAP INSTRUCTIONS
;; ---------------------------------------------------------------------
 
(define_expand "rotlhi3"
[(set (match_operand:HI 0 "register_operand" "")
(rotate:HI (match_operand:HI 1 "register_operand" "")
(match_operand:HI 2 "const_int_operand" "")))]
"TARGET_V850E"
"
{
if (INTVAL (operands[2]) != 8)
FAIL;
}")
 
(define_insn "*rotlhi3_8"
[(set (match_operand:HI 0 "register_operand" "=r")
(rotate:HI (match_operand:HI 1 "register_operand" "r")
(const_int 8)))]
"TARGET_V850E"
"bsh %1,%0"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
(define_expand "rotlsi3"
[(set (match_operand:SI 0 "register_operand" "")
(rotate:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "const_int_operand" "")))]
"TARGET_V850E"
"
{
if (INTVAL (operands[2]) != 16)
FAIL;
}")
 
(define_insn "*rotlsi3_16"
[(set (match_operand:SI 0 "register_operand" "=r")
(rotate:SI (match_operand:SI 1 "register_operand" "r")
(const_int 16)))]
"TARGET_V850E"
"hsw %1,%0"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
;; ----------------------------------------------------------------------
;; JUMP INSTRUCTIONS
;; ----------------------------------------------------------------------
 
;; Conditional jump instructions
 
(define_expand "ble"
[(set (pc)
(if_then_else (le (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "bleu"
[(set (pc)
(if_then_else (leu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "bge"
[(set (pc)
(if_then_else (ge (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "bgeu"
[(set (pc)
(if_then_else (geu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "blt"
[(set (pc)
(if_then_else (lt (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "bltu"
[(set (pc)
(if_then_else (ltu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "bgt"
[(set (pc)
(if_then_else (gt (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "bgtu"
[(set (pc)
(if_then_else (gtu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "beq"
[(set (pc)
(if_then_else (eq (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_expand "bne"
[(set (pc)
(if_then_else (ne (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"")
 
(define_insn "*branch_normal"
[(set (pc)
(if_then_else (match_operator 1 "comparison_operator"
[(cc0) (const_int 0)])
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"*
{
if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0
&& (GET_CODE (operands[1]) == GT
|| GET_CODE (operands[1]) == GE
|| GET_CODE (operands[1]) == LE
|| GET_CODE (operands[1]) == LT))
return 0;
 
if (get_attr_length (insn) == 2)
return \"b%b1 %l0\";
else
return \"b%B1 .+6 ; jr %l0\";
}"
[(set (attr "length")
(if_then_else (lt (abs (minus (match_dup 0) (pc)))
(const_int 256))
(const_int 2)
(const_int 6)))
(set_attr "cc" "none")])
 
(define_insn "*branch_invert"
[(set (pc)
(if_then_else (match_operator 1 "comparison_operator"
[(cc0) (const_int 0)])
(pc)
(label_ref (match_operand 0 "" ""))))]
""
"*
{
if ((cc_status.flags & CC_OVERFLOW_UNUSABLE) != 0
&& (GET_CODE (operands[1]) == GT
|| GET_CODE (operands[1]) == GE
|| GET_CODE (operands[1]) == LE
|| GET_CODE (operands[1]) == LT))
return 0;
if (get_attr_length (insn) == 2)
return \"b%B1 %l0\";
else
return \"b%b1 .+6 ; jr %l0\";
}"
[(set (attr "length")
(if_then_else (lt (abs (minus (match_dup 0) (pc)))
(const_int 256))
(const_int 2)
(const_int 6)))
(set_attr "cc" "none")])
 
;; Unconditional and other jump instructions.
 
(define_insn "jump"
[(set (pc)
(label_ref (match_operand 0 "" "")))]
""
"*
{
if (get_attr_length (insn) == 2)
return \"br %0\";
else
return \"jr %0\";
}"
[(set (attr "length")
(if_then_else (lt (abs (minus (match_dup 0) (pc)))
(const_int 256))
(const_int 2)
(const_int 4)))
(set_attr "cc" "none")])
 
(define_insn "indirect_jump"
[(set (pc) (match_operand:SI 0 "register_operand" "r"))]
""
"jmp %0"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
(define_insn "tablejump"
[(set (pc) (match_operand:SI 0 "register_operand" "r"))
(use (label_ref (match_operand 1 "" "")))]
""
"jmp %0"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
(define_insn "switch"
[(set (pc)
(plus:SI
(sign_extend:SI
(mem:HI
(plus:SI (ashift:SI (match_operand:SI 0 "register_operand" "r")
(const_int 1))
(label_ref (match_operand 1 "" "")))))
(label_ref (match_dup 1))))]
"TARGET_V850E"
"switch %0"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
(define_expand "casesi"
[(match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")
(match_operand 3 "" "") (match_operand 4 "" "")]
""
"
{
rtx reg = gen_reg_rtx (SImode);
rtx tableaddress = gen_reg_rtx (SImode);
rtx mem;
 
/* Subtract the lower bound from the index. */
emit_insn (gen_subsi3 (reg, operands[0], operands[1]));
/* Compare the result against the number of table entries. */
emit_insn (gen_cmpsi (reg, operands[2]));
/* Branch to the default label if out of range of the table. */
emit_jump_insn (gen_bgtu (operands[4]));
 
/* Disabled because the switch pattern is not being recognized
properly at the moment. eg. compiling vfscanf.c in newlib. */
if (0 && ! TARGET_BIG_SWITCH && TARGET_V850E)
{
emit_jump_insn (gen_switch (reg, operands[3]));
DONE;
}
 
/* Shift index for the table array access. */
emit_insn (gen_ashlsi3 (reg, reg, GEN_INT (TARGET_BIG_SWITCH ? 2 : 1)));
/* Load the table address into a pseudo. */
emit_insn (gen_movsi (tableaddress,
gen_rtx_LABEL_REF (Pmode, operands[3])));
/* Add the table address to the index. */
emit_insn (gen_addsi3 (reg, reg, tableaddress));
/* Load the table entry. */
mem = gen_const_mem (CASE_VECTOR_MODE, reg);
if (! TARGET_BIG_SWITCH)
{
rtx reg2 = gen_reg_rtx (HImode);
emit_insn (gen_movhi (reg2, mem));
emit_insn (gen_extendhisi2 (reg, reg2));
}
else
emit_insn (gen_movsi (reg, mem));
/* Add the table address. */
emit_insn (gen_addsi3 (reg, reg, tableaddress));
/* Branch to the switch label. */
emit_jump_insn (gen_tablejump (reg, operands[3]));
DONE;
}")
 
;; Call subroutine with no return value.
 
(define_expand "call"
[(call (match_operand:QI 0 "general_operand" "")
(match_operand:SI 1 "general_operand" ""))]
""
"
{
if (! call_address_operand (XEXP (operands[0], 0), QImode)
|| TARGET_LONG_CALLS)
XEXP (operands[0], 0) = force_reg (SImode, XEXP (operands[0], 0));
if (TARGET_LONG_CALLS)
emit_call_insn (gen_call_internal_long (XEXP (operands[0], 0), operands[1]));
else
emit_call_insn (gen_call_internal_short (XEXP (operands[0], 0), operands[1]));
DONE;
}")
 
(define_insn "call_internal_short"
[(call (mem:QI (match_operand:SI 0 "call_address_operand" "S,r"))
(match_operand:SI 1 "general_operand" "g,g"))
(clobber (reg:SI 31))]
"! TARGET_LONG_CALLS"
"@
jarl %0,r31
jarl .+4,r31 ; add 4,r31 ; jmp %0"
[(set_attr "length" "4,8")]
)
 
(define_insn "call_internal_long"
[(call (mem:QI (match_operand:SI 0 "call_address_operand" "S,r"))
(match_operand:SI 1 "general_operand" "g,g"))
(clobber (reg:SI 31))]
"TARGET_LONG_CALLS"
"*
{
if (which_alternative == 0)
{
if (GET_CODE (operands[0]) == REG)
return \"jarl %0,r31\";
else
return \"movhi hi(%0), r0, r11 ; movea lo(%0), r11, r11 ; jarl .+4,r31 ; add 4, r31 ; jmp r11\";
}
else
return \"jarl .+4,r31 ; add 4,r31 ; jmp %0\";
}"
[(set_attr "length" "16,8")]
)
 
;; Call subroutine, returning value in operand 0
;; (which must be a hard register).
 
(define_expand "call_value"
[(set (match_operand 0 "" "")
(call (match_operand:QI 1 "general_operand" "")
(match_operand:SI 2 "general_operand" "")))]
""
"
{
if (! call_address_operand (XEXP (operands[1], 0), QImode)
|| TARGET_LONG_CALLS)
XEXP (operands[1], 0) = force_reg (SImode, XEXP (operands[1], 0));
if (TARGET_LONG_CALLS)
emit_call_insn (gen_call_value_internal_long (operands[0],
XEXP (operands[1], 0),
operands[2]));
else
emit_call_insn (gen_call_value_internal_short (operands[0],
XEXP (operands[1], 0),
operands[2]));
DONE;
}")
 
(define_insn "call_value_internal_short"
[(set (match_operand 0 "" "=r,r")
(call (mem:QI (match_operand:SI 1 "call_address_operand" "S,r"))
(match_operand:SI 2 "general_operand" "g,g")))
(clobber (reg:SI 31))]
"! TARGET_LONG_CALLS"
"@
jarl %1,r31
jarl .+4,r31 ; add 4,r31 ; jmp %1"
[(set_attr "length" "4,8")]
)
 
(define_insn "call_value_internal_long"
[(set (match_operand 0 "" "=r,r")
(call (mem:QI (match_operand:SI 1 "call_address_operand" "S,r"))
(match_operand:SI 2 "general_operand" "g,g")))
(clobber (reg:SI 31))]
"TARGET_LONG_CALLS"
"*
{
if (which_alternative == 0)
{
if (GET_CODE (operands[1]) == REG)
return \"jarl %1, r31\";
else
/* Reload can generate this pattern.... */
return \"movhi hi(%1), r0, r11 ; movea lo(%1), r11, r11 ; jarl .+4, r31 ; add 4, r31 ; jmp r11\";
}
else
return \"jarl .+4, r31 ; add 4, r31 ; jmp %1\";
}"
[(set_attr "length" "16,8")]
)
 
(define_insn "nop"
[(const_int 0)]
""
"nop"
[(set_attr "length" "2")
(set_attr "cc" "none")])
;; ----------------------------------------------------------------------
;; EXTEND INSTRUCTIONS
;; ----------------------------------------------------------------------
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
(zero_extend:SI
(match_operand:HI 1 "nonimmediate_operand" "0,r,T,m")))]
"TARGET_V850E"
"@
zxh %0
andi 65535,%1,%0
sld.hu %1,%0
ld.hu %1,%0"
[(set_attr "length" "2,4,2,4")
(set_attr "cc" "none_0hit,set_znv,none_0hit,none_0hit")])
 
(define_insn "zero_extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI
(match_operand:HI 1 "register_operand" "r")))]
""
"andi 65535,%1,%0"
[(set_attr "length" "4")
(set_attr "cc" "set_znv")])
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
(zero_extend:SI
(match_operand:QI 1 "nonimmediate_operand" "0,r,T,m")))]
"TARGET_V850E"
"@
zxb %0
andi 255,%1,%0
sld.bu %1,%0
ld.bu %1,%0"
[(set_attr "length" "2,4,2,4")
(set_attr "cc" "none_0hit,set_znv,none_0hit,none_0hit")])
 
(define_insn "zero_extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI
(match_operand:QI 1 "register_operand" "r")))]
""
"andi 255,%1,%0"
[(set_attr "length" "4")
(set_attr "cc" "set_znv")])
 
;;- sign extension instructions
 
;; ??? The extendhisi2 pattern should not emit shifts for v850e?
 
(define_insn "*extendhisi_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,Q,m")))]
"TARGET_V850E"
"@
sxh %0
sld.h %1,%0
ld.h %1,%0"
[(set_attr "length" "2,2,4")
(set_attr "cc" "none_0hit,none_0hit,none_0hit")])
 
;; ??? This is missing a sign extend from memory pattern to match the ld.h
;; instruction.
 
(define_expand "extendhisi2"
[(set (match_dup 2)
(ashift:SI (match_operand:HI 1 "register_operand" "")
(const_int 16)))
(set (match_operand:SI 0 "register_operand" "")
(ashiftrt:SI (match_dup 2)
(const_int 16)))]
""
"
{
operands[1] = gen_lowpart (SImode, operands[1]);
operands[2] = gen_reg_rtx (SImode);
}")
 
;; ??? The extendqisi2 pattern should not emit shifts for v850e?
 
(define_insn "*extendqisi_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,Q,m")))]
"TARGET_V850E"
"@
sxb %0
sld.b %1,%0
ld.b %1,%0"
[(set_attr "length" "2,2,4")
(set_attr "cc" "none_0hit,none_0hit,none_0hit")])
 
;; ??? This is missing a sign extend from memory pattern to match the ld.b
;; instruction.
 
(define_expand "extendqisi2"
[(set (match_dup 2)
(ashift:SI (match_operand:QI 1 "register_operand" "")
(const_int 24)))
(set (match_operand:SI 0 "register_operand" "")
(ashiftrt:SI (match_dup 2)
(const_int 24)))]
""
"
{
operands[1] = gen_lowpart (SImode, operands[1]);
operands[2] = gen_reg_rtx (SImode);
}")
;; ----------------------------------------------------------------------
;; SHIFTS
;; ----------------------------------------------------------------------
 
(define_insn "ashlsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ashift:SI
(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "r,N")))]
""
"@
shl %2,%0
shl %2,%0"
[(set_attr "length" "4,2")
(set_attr "cc" "set_znv")])
 
(define_insn "lshrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(lshiftrt:SI
(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "r,N")))]
""
"@
shr %2,%0
shr %2,%0"
[(set_attr "length" "4,2")
(set_attr "cc" "set_znv")])
 
(define_insn "ashrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ashiftrt:SI
(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "r,N")))]
""
"@
sar %2,%0
sar %2,%0"
[(set_attr "length" "4,2")
(set_attr "cc" "set_znv")])
 
;; ----------------------------------------------------------------------
;; PROLOGUE/EPILOGUE
;; ----------------------------------------------------------------------
(define_expand "prologue"
[(const_int 0)]
""
"expand_prologue (); DONE;")
 
(define_expand "epilogue"
[(return)]
""
"
{
/* Try to use the trivial return first. Else use the
full epilogue. */
if (0)
emit_jump_insn (gen_return ());
else
expand_epilogue ();
DONE;
}")
 
(define_insn "return"
[(return)]
"reload_completed && compute_frame_size (get_frame_size (), (long *)0) == 0"
"jmp [r31]"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
(define_insn "return_internal"
[(return)
(use (reg:SI 31))]
""
"jmp [r31]"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
 
;; ----------------------------------------------------------------------
;; HELPER INSTRUCTIONS for saving the prologue and epilog registers
;; ----------------------------------------------------------------------
 
;; This pattern will match a stack adjust RTX followed by any number of push
;; RTXs. These RTXs will then be turned into a suitable call to a worker
;; function.
 
;;
;; Actually, convert the RTXs into a PREPARE instruction.
;;
(define_insn ""
[(match_parallel 0 "pattern_is_ok_for_prepare"
[(set (reg:SI 3)
(plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
(set (mem:SI (plus:SI (reg:SI 3)
(match_operand:SI 2 "immediate_operand" "i")))
(match_operand:SI 3 "register_is_ok_for_epilogue" "r"))])]
"TARGET_PROLOG_FUNCTION && TARGET_V850E"
"* return construct_prepare_instruction (operands[0]);
"
[(set_attr "length" "4")
(set_attr "cc" "none")])
 
(define_insn ""
[(match_parallel 0 "pattern_is_ok_for_prologue"
[(set (reg:SI 3)
(plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
(set (mem:SI (plus:SI (reg:SI 3)
(match_operand:SI 2 "immediate_operand" "i")))
(match_operand:SI 3 "register_is_ok_for_epilogue" "r"))])]
"TARGET_PROLOG_FUNCTION && TARGET_V850"
"* return construct_save_jarl (operands[0]);
"
[(set (attr "length") (if_then_else (eq_attr "long_calls" "yes")
(const_string "16")
(const_string "4")))
(set_attr "cc" "clobber")])
 
;;
;; Actually, turn the RTXs into a DISPOSE instruction.
;;
(define_insn ""
[(match_parallel 0 "pattern_is_ok_for_dispose"
[(return)
(set (reg:SI 3)
(plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
(set (match_operand:SI 2 "register_is_ok_for_epilogue" "=r")
(mem:SI (plus:SI (reg:SI 3)
(match_operand:SI 3 "immediate_operand" "i"))))])]
"TARGET_PROLOG_FUNCTION && TARGET_V850E"
"* return construct_dispose_instruction (operands[0]);
"
[(set_attr "length" "4")
(set_attr "cc" "none")])
 
;; This pattern will match a return RTX followed by any number of pop RTXs
;; and possible a stack adjustment as well. These RTXs will be turned into
;; a suitable call to a worker function.
 
(define_insn ""
[(match_parallel 0 "pattern_is_ok_for_epilogue"
[(return)
(set (reg:SI 3)
(plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
(set (match_operand:SI 2 "register_is_ok_for_epilogue" "=r")
(mem:SI (plus:SI (reg:SI 3)
(match_operand:SI 3 "immediate_operand" "i"))))])]
"TARGET_PROLOG_FUNCTION && TARGET_V850"
"* return construct_restore_jr (operands[0]);
"
[(set (attr "length") (if_then_else (eq_attr "long_calls" "yes")
(const_string "12")
(const_string "4")))
(set_attr "cc" "clobber")])
 
;; Initialize an interrupt function. Do not depend on TARGET_PROLOG_FUNCTION.
(define_insn "callt_save_interrupt"
[(unspec_volatile [(const_int 0)] 2)]
"TARGET_V850E && !TARGET_DISABLE_CALLT"
;; The CALLT instruction stores the next address of CALLT to CTPC register
;; without saving its previous value. So if the interrupt handler
;; or its caller could possibly execute the CALLT insn, save_interrupt
;; MUST NOT be called via CALLT.
"*
{
output_asm_insn (\"addi -24, sp, sp\", operands);
output_asm_insn (\"st.w r10, 12[sp]\", operands);
output_asm_insn (\"stsr ctpc, r10\", operands);
output_asm_insn (\"st.w r10, 16[sp]\", operands);
output_asm_insn (\"stsr ctpsw, r10\", operands);
output_asm_insn (\"st.w r10, 20[sp]\", operands);
output_asm_insn (\"callt ctoff(__callt_save_interrupt)\", operands);
return \"\";
}"
[(set_attr "length" "26")
(set_attr "cc" "none")])
 
(define_insn "callt_return_interrupt"
[(unspec_volatile [(const_int 0)] 3)]
"TARGET_V850E && !TARGET_DISABLE_CALLT"
"callt ctoff(__callt_return_interrupt)"
[(set_attr "length" "2")
(set_attr "cc" "clobber")])
 
(define_insn "save_interrupt"
[(set (reg:SI 3) (plus:SI (reg:SI 3) (const_int -16)))
(set (mem:SI (plus:SI (reg:SI 3) (const_int -16))) (reg:SI 30))
(set (mem:SI (plus:SI (reg:SI 3) (const_int -12))) (reg:SI 4))
(set (mem:SI (plus:SI (reg:SI 3) (const_int -8))) (reg:SI 1))
(set (mem:SI (plus:SI (reg:SI 3) (const_int -4))) (reg:SI 10))]
""
"*
{
if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
return \"add -16,sp\;st.w r10,12[sp]\;jarl __save_interrupt,r10\";
else
{
output_asm_insn (\"add -16, sp\", operands);
output_asm_insn (\"st.w r10, 12[sp]\", operands);
output_asm_insn (\"st.w ep, 0[sp]\", operands);
output_asm_insn (\"st.w gp, 4[sp]\", operands);
output_asm_insn (\"st.w r1, 8[sp]\", operands);
output_asm_insn (\"movhi hi(__ep), r0, ep\", operands);
output_asm_insn (\"movea lo(__ep), ep, ep\", operands);
output_asm_insn (\"movhi hi(__gp), r0, gp\", operands);
output_asm_insn (\"movea lo(__gp), gp, gp\", operands);
return \"\";
}
}"
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
(const_int 10)
(const_int 34)))
(set_attr "cc" "clobber")])
;; Restore r1, r4, r10, and return from the interrupt
(define_insn "return_interrupt"
[(return)
(set (reg:SI 3) (plus:SI (reg:SI 3) (const_int 16)))
(set (reg:SI 10) (mem:SI (plus:SI (reg:SI 3) (const_int 12))))
(set (reg:SI 1) (mem:SI (plus:SI (reg:SI 3) (const_int 8))))
(set (reg:SI 4) (mem:SI (plus:SI (reg:SI 3) (const_int 4))))
(set (reg:SI 30) (mem:SI (reg:SI 3)))]
""
"*
{
if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
return \"jr __return_interrupt\";
else
{
output_asm_insn (\"ld.w 0[sp], ep\", operands);
output_asm_insn (\"ld.w 4[sp], gp\", operands);
output_asm_insn (\"ld.w 8[sp], r1\", operands);
output_asm_insn (\"ld.w 12[sp], r10\", operands);
output_asm_insn (\"addi 16, sp, sp\", operands);
output_asm_insn (\"reti\", operands);
return \"\";
}
}"
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
(const_int 4)
(const_int 24)))
(set_attr "cc" "clobber")])
 
;; Save all registers except for the registers saved in save_interrupt when
;; an interrupt function makes a call.
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
;; This is needed because the rest of the compiler is not ready to handle
;; insns this complicated.
 
(define_insn "callt_save_all_interrupt"
[(unspec_volatile [(const_int 0)] 0)]
"TARGET_V850E && !TARGET_DISABLE_CALLT"
"callt ctoff(__callt_save_all_interrupt)"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
(define_insn "save_all_interrupt"
[(unspec_volatile [(const_int 0)] 0)]
""
"*
{
if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
return \"jarl __save_all_interrupt,r10\";
 
output_asm_insn (\"addi -120, sp, sp\", operands);
 
if (TARGET_EP)
{
output_asm_insn (\"mov ep, r1\", operands);
output_asm_insn (\"mov sp, ep\", operands);
output_asm_insn (\"sst.w r31, 116[ep]\", operands);
output_asm_insn (\"sst.w r2, 112[ep]\", operands);
output_asm_insn (\"sst.w gp, 108[ep]\", operands);
output_asm_insn (\"sst.w r6, 104[ep]\", operands);
output_asm_insn (\"sst.w r7, 100[ep]\", operands);
output_asm_insn (\"sst.w r8, 96[ep]\", operands);
output_asm_insn (\"sst.w r9, 92[ep]\", operands);
output_asm_insn (\"sst.w r11, 88[ep]\", operands);
output_asm_insn (\"sst.w r12, 84[ep]\", operands);
output_asm_insn (\"sst.w r13, 80[ep]\", operands);
output_asm_insn (\"sst.w r14, 76[ep]\", operands);
output_asm_insn (\"sst.w r15, 72[ep]\", operands);
output_asm_insn (\"sst.w r16, 68[ep]\", operands);
output_asm_insn (\"sst.w r17, 64[ep]\", operands);
output_asm_insn (\"sst.w r18, 60[ep]\", operands);
output_asm_insn (\"sst.w r19, 56[ep]\", operands);
output_asm_insn (\"sst.w r20, 52[ep]\", operands);
output_asm_insn (\"sst.w r21, 48[ep]\", operands);
output_asm_insn (\"sst.w r22, 44[ep]\", operands);
output_asm_insn (\"sst.w r23, 40[ep]\", operands);
output_asm_insn (\"sst.w r24, 36[ep]\", operands);
output_asm_insn (\"sst.w r25, 32[ep]\", operands);
output_asm_insn (\"sst.w r26, 28[ep]\", operands);
output_asm_insn (\"sst.w r27, 24[ep]\", operands);
output_asm_insn (\"sst.w r28, 20[ep]\", operands);
output_asm_insn (\"sst.w r29, 16[ep]\", operands);
output_asm_insn (\"mov r1, ep\", operands);
}
else
{
output_asm_insn (\"st.w r31, 116[sp]\", operands);
output_asm_insn (\"st.w r2, 112[sp]\", operands);
output_asm_insn (\"st.w gp, 108[sp]\", operands);
output_asm_insn (\"st.w r6, 104[sp]\", operands);
output_asm_insn (\"st.w r7, 100[sp]\", operands);
output_asm_insn (\"st.w r8, 96[sp]\", operands);
output_asm_insn (\"st.w r9, 92[sp]\", operands);
output_asm_insn (\"st.w r11, 88[sp]\", operands);
output_asm_insn (\"st.w r12, 84[sp]\", operands);
output_asm_insn (\"st.w r13, 80[sp]\", operands);
output_asm_insn (\"st.w r14, 76[sp]\", operands);
output_asm_insn (\"st.w r15, 72[sp]\", operands);
output_asm_insn (\"st.w r16, 68[sp]\", operands);
output_asm_insn (\"st.w r17, 64[sp]\", operands);
output_asm_insn (\"st.w r18, 60[sp]\", operands);
output_asm_insn (\"st.w r19, 56[sp]\", operands);
output_asm_insn (\"st.w r20, 52[sp]\", operands);
output_asm_insn (\"st.w r21, 48[sp]\", operands);
output_asm_insn (\"st.w r22, 44[sp]\", operands);
output_asm_insn (\"st.w r23, 40[sp]\", operands);
output_asm_insn (\"st.w r24, 36[sp]\", operands);
output_asm_insn (\"st.w r25, 32[sp]\", operands);
output_asm_insn (\"st.w r26, 28[sp]\", operands);
output_asm_insn (\"st.w r27, 24[sp]\", operands);
output_asm_insn (\"st.w r28, 20[sp]\", operands);
output_asm_insn (\"st.w r29, 16[sp]\", operands);
}
return \"\";
}"
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
(const_int 4)
(const_int 62)
))
(set_attr "cc" "clobber")])
 
(define_insn "_save_all_interrupt"
[(unspec_volatile [(const_int 0)] 0)]
"TARGET_V850 && ! TARGET_LONG_CALLS"
"jarl __save_all_interrupt,r10"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
;; Restore all registers saved when an interrupt function makes a call.
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
;; This is needed because the rest of the compiler is not ready to handle
;; insns this complicated.
 
(define_insn "callt_restore_all_interrupt"
[(unspec_volatile [(const_int 0)] 1)]
"TARGET_V850E && !TARGET_DISABLE_CALLT"
"callt ctoff(__callt_restore_all_interrupt)"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
(define_insn "restore_all_interrupt"
[(unspec_volatile [(const_int 0)] 1)]
""
"*
{
if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
return \"jarl __restore_all_interrupt,r10\";
 
if (TARGET_EP)
{
output_asm_insn (\"mov ep, r1\", operands);
output_asm_insn (\"mov sp, ep\", operands);
output_asm_insn (\"sld.w 116[ep], r31\", operands);
output_asm_insn (\"sld.w 112[ep], r2\", operands);
output_asm_insn (\"sld.w 108[ep], gp\", operands);
output_asm_insn (\"sld.w 104[ep], r6\", operands);
output_asm_insn (\"sld.w 100[ep], r7\", operands);
output_asm_insn (\"sld.w 96[ep], r8\", operands);
output_asm_insn (\"sld.w 92[ep], r9\", operands);
output_asm_insn (\"sld.w 88[ep], r11\", operands);
output_asm_insn (\"sld.w 84[ep], r12\", operands);
output_asm_insn (\"sld.w 80[ep], r13\", operands);
output_asm_insn (\"sld.w 76[ep], r14\", operands);
output_asm_insn (\"sld.w 72[ep], r15\", operands);
output_asm_insn (\"sld.w 68[ep], r16\", operands);
output_asm_insn (\"sld.w 64[ep], r17\", operands);
output_asm_insn (\"sld.w 60[ep], r18\", operands);
output_asm_insn (\"sld.w 56[ep], r19\", operands);
output_asm_insn (\"sld.w 52[ep], r20\", operands);
output_asm_insn (\"sld.w 48[ep], r21\", operands);
output_asm_insn (\"sld.w 44[ep], r22\", operands);
output_asm_insn (\"sld.w 40[ep], r23\", operands);
output_asm_insn (\"sld.w 36[ep], r24\", operands);
output_asm_insn (\"sld.w 32[ep], r25\", operands);
output_asm_insn (\"sld.w 28[ep], r26\", operands);
output_asm_insn (\"sld.w 24[ep], r27\", operands);
output_asm_insn (\"sld.w 20[ep], r28\", operands);
output_asm_insn (\"sld.w 16[ep], r29\", operands);
output_asm_insn (\"mov r1, ep\", operands);
}
else
{
output_asm_insn (\"ld.w 116[sp], r31\", operands);
output_asm_insn (\"ld.w 112[sp], r2\", operands);
output_asm_insn (\"ld.w 108[sp], gp\", operands);
output_asm_insn (\"ld.w 104[sp], r6\", operands);
output_asm_insn (\"ld.w 100[sp], r7\", operands);
output_asm_insn (\"ld.w 96[sp], r8\", operands);
output_asm_insn (\"ld.w 92[sp], r9\", operands);
output_asm_insn (\"ld.w 88[sp], r11\", operands);
output_asm_insn (\"ld.w 84[sp], r12\", operands);
output_asm_insn (\"ld.w 80[sp], r13\", operands);
output_asm_insn (\"ld.w 76[sp], r14\", operands);
output_asm_insn (\"ld.w 72[sp], r15\", operands);
output_asm_insn (\"ld.w 68[sp], r16\", operands);
output_asm_insn (\"ld.w 64[sp], r17\", operands);
output_asm_insn (\"ld.w 60[sp], r18\", operands);
output_asm_insn (\"ld.w 56[sp], r19\", operands);
output_asm_insn (\"ld.w 52[sp], r20\", operands);
output_asm_insn (\"ld.w 48[sp], r21\", operands);
output_asm_insn (\"ld.w 44[sp], r22\", operands);
output_asm_insn (\"ld.w 40[sp], r23\", operands);
output_asm_insn (\"ld.w 36[sp], r24\", operands);
output_asm_insn (\"ld.w 32[sp], r25\", operands);
output_asm_insn (\"ld.w 28[sp], r26\", operands);
output_asm_insn (\"ld.w 24[sp], r27\", operands);
output_asm_insn (\"ld.w 20[sp], r28\", operands);
output_asm_insn (\"ld.w 16[sp], r29\", operands);
}
output_asm_insn (\"addi 120, sp, sp\", operands);
return \"\";
}"
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
(const_int 4)
(const_int 62)
))
(set_attr "cc" "clobber")])
 
(define_insn "_restore_all_interrupt"
[(unspec_volatile [(const_int 0)] 1)]
"TARGET_V850 && ! TARGET_LONG_CALLS"
"jarl __restore_all_interrupt,r10"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
;; Save r6-r9 for a variable argument function
(define_insn "save_r6_r9_v850e"
[(set (mem:SI (reg:SI 3)) (reg:SI 6))
(set (mem:SI (plus:SI (reg:SI 3) (const_int 4))) (reg:SI 7))
(set (mem:SI (plus:SI (reg:SI 3) (const_int 8))) (reg:SI 8))
(set (mem:SI (plus:SI (reg:SI 3) (const_int 12))) (reg:SI 9))
]
"TARGET_PROLOG_FUNCTION && TARGET_V850E && !TARGET_DISABLE_CALLT"
"callt ctoff(__callt_save_r6_r9)"
[(set_attr "length" "2")
(set_attr "cc" "none")])
 
(define_insn "save_r6_r9"
[(set (mem:SI (reg:SI 3)) (reg:SI 6))
(set (mem:SI (plus:SI (reg:SI 3) (const_int 4))) (reg:SI 7))
(set (mem:SI (plus:SI (reg:SI 3) (const_int 8))) (reg:SI 8))
(set (mem:SI (plus:SI (reg:SI 3) (const_int 12))) (reg:SI 9))
(clobber (reg:SI 10))]
"TARGET_PROLOG_FUNCTION && ! TARGET_LONG_CALLS"
"jarl __save_r6_r9,r10"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
 
/predicates.md
0,0 → 1,438
;; Predicate definitions for NEC V850.
;; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
 
;; Return true if OP is either a register or 0.
 
(define_predicate "reg_or_0_operand"
(match_code "reg,subreg,const_int,const_double")
{
if (GET_CODE (op) == CONST_INT)
return INTVAL (op) == 0;
 
else if (GET_CODE (op) == CONST_DOUBLE)
return CONST_DOUBLE_OK_FOR_G (op);
 
else
return register_operand (op, mode);
})
 
;; Return true if OP is either a register or a signed five bit
;; integer.
 
(define_predicate "reg_or_int5_operand"
(match_code "reg,subreg,const_int")
{
if (GET_CODE (op) == CONST_INT)
return CONST_OK_FOR_J (INTVAL (op));
 
else
return register_operand (op, mode);
})
 
;; Return true if OP is either a register or a signed nine bit
;; integer.
 
(define_predicate "reg_or_int9_operand"
(match_code "reg,subreg,const_int")
{
if (GET_CODE (op) == CONST_INT)
return CONST_OK_FOR_O (INTVAL (op));
 
return register_operand (op, mode);
})
 
;; Return true if OP is either a register or a const integer.
 
(define_predicate "reg_or_const_operand"
(match_code "reg,const_int")
{
if (GET_CODE (op) == CONST_INT)
return TRUE;
 
return register_operand (op, mode);
})
 
;; Return true if OP is a valid call operand.
 
(define_predicate "call_address_operand"
(match_code "reg,symbol_ref")
{
/* Only registers are valid call operands if TARGET_LONG_CALLS. */
if (TARGET_LONG_CALLS)
return GET_CODE (op) == REG;
return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG);
})
 
;; TODO: Add a comment here.
 
(define_predicate "movsi_source_operand"
(match_code "label_ref,symbol_ref,const_int,const_double,const,high,mem,reg,subreg")
{
/* Some constants, as well as symbolic operands
must be done with HIGH & LO_SUM patterns. */
if (CONSTANT_P (op)
&& GET_CODE (op) != HIGH
&& !(GET_CODE (op) == CONST_INT
&& (CONST_OK_FOR_J (INTVAL (op))
|| CONST_OK_FOR_K (INTVAL (op))
|| CONST_OK_FOR_L (INTVAL (op)))))
return special_symbolref_operand (op, mode);
else
return general_operand (op, mode);
})
 
;; TODO: Add a comment here.
 
(define_predicate "special_symbolref_operand"
(match_code "symbol_ref")
{
if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
&& CONST_OK_FOR_K (INTVAL (XEXP (XEXP (op, 0), 1))))
op = XEXP (XEXP (op, 0), 0);
 
if (GET_CODE (op) == SYMBOL_REF)
return (SYMBOL_REF_FLAGS (op)
& (SYMBOL_FLAG_ZDA | SYMBOL_FLAG_TDA | SYMBOL_FLAG_SDA)) != 0;
 
return FALSE;
})
 
;; TODO: Add a comment here.
 
(define_predicate "power_of_two_operand"
(match_code "const_int")
{
if (GET_CODE (op) != CONST_INT)
return 0;
 
if (exact_log2 (INTVAL (op)) == -1)
return 0;
return 1;
})
 
;; Return nonzero if the given RTX is suitable for collapsing into a
;; jump to a function prologue.
 
(define_predicate "pattern_is_ok_for_prologue"
(match_code "parallel")
{
int count = XVECLEN (op, 0);
int i;
rtx vector_element;
 
/* If there are no registers to save then the function prologue
is not suitable. */
if (count <= 2)
return 0;
 
/* The pattern matching has already established that we are adjusting the
stack and pushing at least one register. We must now check that the
remaining entries in the vector to make sure that they are also register
pushes, except for the last entry which should be a CLOBBER of r10.
 
The test below performs the C equivalent of this machine description
pattern match:
 
(set (mem:SI (plus:SI (reg:SI 3)
(match_operand:SI 2 "immediate_operand" "i")))
(match_operand:SI 3 "register_is_ok_for_epilogue" "r"))
 
*/
 
for (i = 2; i < count - (TARGET_LONG_CALLS ? 2: 1); i++)
{
rtx dest;
rtx src;
rtx plus;
 
vector_element = XVECEXP (op, 0, i);
 
if (GET_CODE (vector_element) != SET)
return 0;
 
dest = SET_DEST (vector_element);
src = SET_SRC (vector_element);
 
if (GET_CODE (dest) != MEM
|| GET_MODE (dest) != SImode
|| GET_CODE (src) != REG
|| GET_MODE (src) != SImode
|| ! register_is_ok_for_epilogue (src, SImode))
return 0;
 
plus = XEXP (dest, 0);
 
if ( GET_CODE (plus) != PLUS
|| GET_CODE (XEXP (plus, 0)) != REG
|| GET_MODE (XEXP (plus, 0)) != SImode
|| REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
|| GET_CODE (XEXP (plus, 1)) != CONST_INT)
return 0;
 
/* If the register is being pushed somewhere other than the stack
space just acquired by the first operand then abandon this quest.
Note: the test is <= because both values are negative. */
if (INTVAL (XEXP (plus, 1))
<= INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)))
{
return 0;
}
}
 
/* Make sure that the last entries in the vector are clobbers. */
for (; i < count; i++)
{
vector_element = XVECEXP (op, 0, i);
 
if (GET_CODE (vector_element) != CLOBBER
|| GET_CODE (XEXP (vector_element, 0)) != REG
|| !(REGNO (XEXP (vector_element, 0)) == 10
|| (TARGET_LONG_CALLS ? (REGNO (XEXP (vector_element, 0)) == 11) : 0 )))
return 0;
}
 
return 1;
})
 
;; Return nonzero if the given RTX is suitable for collapsing into
;; jump to a function epilogue.
 
(define_predicate "pattern_is_ok_for_epilogue"
(match_code "parallel")
{
int count = XVECLEN (op, 0);
int i;
 
/* If there are no registers to restore then the function epilogue
is not suitable. */
if (count <= 2)
return 0;
 
/* The pattern matching has already established that we are performing a
function epilogue and that we are popping at least one register. We must
now check the remaining entries in the vector to make sure that they are
also register pops. There is no good reason why there should ever be
anything else in this vector, but being paranoid always helps...
 
The test below performs the C equivalent of this machine description
pattern match:
 
(set (match_operand:SI n "register_is_ok_for_epilogue" "r")
(mem:SI (plus:SI (reg:SI 3) (match_operand:SI n "immediate_operand" "i"))))
*/
 
for (i = 3; i < count; i++)
{
rtx vector_element = XVECEXP (op, 0, i);
rtx dest;
rtx src;
rtx plus;
 
if (GET_CODE (vector_element) != SET)
return 0;
 
dest = SET_DEST (vector_element);
src = SET_SRC (vector_element);
 
if (GET_CODE (dest) != REG
|| GET_MODE (dest) != SImode
|| ! register_is_ok_for_epilogue (dest, SImode)
|| GET_CODE (src) != MEM
|| GET_MODE (src) != SImode)
return 0;
 
plus = XEXP (src, 0);
 
if (GET_CODE (plus) != PLUS
|| GET_CODE (XEXP (plus, 0)) != REG
|| GET_MODE (XEXP (plus, 0)) != SImode
|| REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
|| GET_CODE (XEXP (plus, 1)) != CONST_INT)
return 0;
}
 
return 1;
})
 
;; Return true if the given RTX is a register which can be restored by
;; a function epilogue.
 
(define_predicate "register_is_ok_for_epilogue"
(match_code "reg")
{
/* The save/restore routines can only cope with registers 20 - 31. */
return ((GET_CODE (op) == REG)
&& (((REGNO (op) >= 20) && REGNO (op) <= 31)));
})
 
;; Return nonzero if the given RTX is suitable for collapsing into a
;; DISPOSE instruction.
 
(define_predicate "pattern_is_ok_for_dispose"
(match_code "parallel")
{
int count = XVECLEN (op, 0);
int i;
 
/* If there are no registers to restore then
the dispose instruction is not suitable. */
if (count <= 2)
return 0;
 
/* The pattern matching has already established that we are performing a
function epilogue and that we are popping at least one register. We must
now check the remaining entries in the vector to make sure that they are
also register pops. There is no good reason why there should ever be
anything else in this vector, but being paranoid always helps...
 
The test below performs the C equivalent of this machine description
pattern match:
 
(set (match_operand:SI n "register_is_ok_for_epilogue" "r")
(mem:SI (plus:SI (reg:SI 3)
(match_operand:SI n "immediate_operand" "i"))))
*/
 
for (i = 3; i < count; i++)
{
rtx vector_element = XVECEXP (op, 0, i);
rtx dest;
rtx src;
rtx plus;
 
if (GET_CODE (vector_element) != SET)
return 0;
 
dest = SET_DEST (vector_element);
src = SET_SRC (vector_element);
 
if ( GET_CODE (dest) != REG
|| GET_MODE (dest) != SImode
|| ! register_is_ok_for_epilogue (dest, SImode)
|| GET_CODE (src) != MEM
|| GET_MODE (src) != SImode)
return 0;
 
plus = XEXP (src, 0);
 
if ( GET_CODE (plus) != PLUS
|| GET_CODE (XEXP (plus, 0)) != REG
|| GET_MODE (XEXP (plus, 0)) != SImode
|| REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
|| GET_CODE (XEXP (plus, 1)) != CONST_INT)
return 0;
}
 
return 1;
})
 
;; Return nonzero if the given RTX is suitable for collapsing into a
;; PREPARE instruction.
 
(define_predicate "pattern_is_ok_for_prepare"
(match_code "parallel")
{
int count = XVECLEN (op, 0);
int i;
 
/* If there are no registers to restore then the prepare instruction
is not suitable. */
if (count <= 1)
return 0;
 
/* The pattern matching has already established that we are adjusting the
stack and pushing at least one register. We must now check that the
remaining entries in the vector to make sure that they are also register
pushes.
 
The test below performs the C equivalent of this machine description
pattern match:
 
(set (mem:SI (plus:SI (reg:SI 3)
(match_operand:SI 2 "immediate_operand" "i")))
(match_operand:SI 3 "register_is_ok_for_epilogue" "r"))
 
*/
 
for (i = 2; i < count; i++)
{
rtx vector_element = XVECEXP (op, 0, i);
rtx dest;
rtx src;
rtx plus;
 
if (GET_CODE (vector_element) != SET)
return 0;
 
dest = SET_DEST (vector_element);
src = SET_SRC (vector_element);
 
if ( GET_CODE (dest) != MEM
|| GET_MODE (dest) != SImode
|| GET_CODE (src) != REG
|| GET_MODE (src) != SImode
|| ! register_is_ok_for_epilogue (src, SImode)
)
return 0;
 
plus = XEXP (dest, 0);
 
if ( GET_CODE (plus) != PLUS
|| GET_CODE (XEXP (plus, 0)) != REG
|| GET_MODE (XEXP (plus, 0)) != SImode
|| REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
|| GET_CODE (XEXP (plus, 1)) != CONST_INT)
return 0;
 
/* If the register is being pushed somewhere other than the stack
space just acquired by the first operand then abandon this quest.
Note: the test is <= because both values are negative. */
if (INTVAL (XEXP (plus, 1))
<= INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)))
return 0;
}
 
return 1;
})
 
;; TODO: Add a comment here.
 
(define_predicate "not_power_of_two_operand"
(match_code "const_int")
{
unsigned int mask;
 
if (mode == QImode)
mask = 0xff;
else if (mode == HImode)
mask = 0xffff;
else if (mode == SImode)
mask = 0xffffffff;
else
return 0;
 
if (GET_CODE (op) != CONST_INT)
return 0;
 
if (exact_log2 (~INTVAL (op) & mask) == -1)
return 0;
return 1;
})
/t-v850
0,0 → 1,97
LIB1ASMSRC = v850/lib1funcs.asm
LIB1ASMFUNCS = _mulsi3 \
_divsi3 \
_udivsi3 \
_modsi3 \
_umodsi3 \
_save_2 \
_save_20 \
_save_21 \
_save_22 \
_save_23 \
_save_24 \
_save_25 \
_save_26 \
_save_27 \
_save_28 \
_save_29 \
_save_2c \
_save_20c \
_save_21c \
_save_22c \
_save_23c \
_save_24c \
_save_25c \
_save_26c \
_save_27c \
_save_28c \
_save_29c \
_save_31c \
_save_varargs \
_save_interrupt \
_save_all_interrupt \
_callt_save_20 \
_callt_save_21 \
_callt_save_22 \
_callt_save_23 \
_callt_save_24 \
_callt_save_25 \
_callt_save_26 \
_callt_save_27 \
_callt_save_28 \
_callt_save_29 \
_callt_save_20c \
_callt_save_21c \
_callt_save_22c \
_callt_save_23c \
_callt_save_24c \
_callt_save_25c \
_callt_save_26c \
_callt_save_27c \
_callt_save_28c \
_callt_save_29c \
_callt_save_31c \
_callt_save_varargs \
_callt_save_interrupt \
_callt_save_all_interrupt \
_callt_save_r2_r29 \
_callt_save_r2_r31 \
_callt_save_r6_r9 \
_negdi2 \
_cmpdi2 \
_ucmpdi2 \
_muldi3
 
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
 
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifdef __LITTLE_ENDIAN__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >>dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
 
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifdef __LITTLE_ENDIAN__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >>fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
 
# Create target-specific versions of the libraries
MULTILIB_OPTIONS = mv850e
MULTILIB_DIRNAMES = v850e
INSTALL_LIBGCC = install-multilib
MULTILIB_MATCHES = mv850e=mv850e1
 
TCFLAGS = -mno-app-regs -msmall-sld -Wa,-mwarn-signed-overflow -Wa,-mwarn-unsigned-overflow
 
v850-c.o: $(srcdir)/config/v850/v850-c.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(CPPLIB_H) $(TREE_H) c-pragma.h toplev.h $(GGC_H) $(TM_P_H)
$(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/v850/v850-c.c
 
# Local Variables:
# mode: Makefile
# End:
/v850-c.c
0,0 → 1,268
/* v850 specific, C compiler specific functions.
Copyright (C) 2000, 2007 Free Software Foundation, Inc.
Contributed by Jeff Law (law@cygnus.com).
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "cpplib.h"
#include "tree.h"
#include "c-pragma.h"
#include "toplev.h"
#include "ggc.h"
#include "tm_p.h"
 
#ifndef streq
#define streq(a,b) (strcmp (a, b) == 0)
#endif
static int pop_data_area (v850_data_area);
static int push_data_area (v850_data_area);
static void mark_current_function_as_interrupt (void);
/* Push a data area onto the stack. */
 
static int
push_data_area (v850_data_area data_area)
{
data_area_stack_element * elem;
 
elem = (data_area_stack_element *) xmalloc (sizeof (* elem));
 
if (elem == NULL)
return 0;
 
elem->prev = data_area_stack;
elem->data_area = data_area;
 
data_area_stack = elem;
 
return 1;
}
 
/* Remove a data area from the stack. */
 
static int
pop_data_area (v850_data_area data_area)
{
if (data_area_stack == NULL)
warning (OPT_Wpragmas, "#pragma GHS endXXXX found without "
"previous startXXX");
else if (data_area != data_area_stack->data_area)
warning (OPT_Wpragmas, "#pragma GHS endXXX does not match "
"previous startXXX");
else
{
data_area_stack_element * elem;
 
elem = data_area_stack;
data_area_stack = data_area_stack->prev;
 
free (elem);
 
return 1;
}
 
return 0;
}
 
/* Set the machine specific 'interrupt' attribute on the current function. */
 
static void
mark_current_function_as_interrupt (void)
{
tree name;
if (current_function_decl == NULL_TREE)
{
warning (0, "cannot set interrupt attribute: no current function");
return;
}
 
name = get_identifier ("interrupt");
 
if (name == NULL_TREE || TREE_CODE (name) != IDENTIFIER_NODE)
{
warning (0, "cannot set interrupt attribute: no such identifier");
return;
}
decl_attributes (&current_function_decl,
tree_cons (name, NULL_TREE, NULL_TREE), 0);
}
 
/* Support for GHS pragmata. */
 
void
ghs_pragma_section (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
int repeat;
 
/* #pragma ghs section [name = alias [, name = alias [, ...]]] */
do
{
tree x;
enum cpp_ttype type;
const char *sect, *alias;
enum GHS_section_kind kind;
type = pragma_lex (&x);
if (type == CPP_EOF && !repeat)
goto reset;
else if (type == CPP_NAME)
sect = IDENTIFIER_POINTER (x);
else
goto bad;
repeat = 0;
if (pragma_lex (&x) != CPP_EQ)
goto bad;
if (pragma_lex (&x) != CPP_NAME)
goto bad;
alias = IDENTIFIER_POINTER (x);
type = pragma_lex (&x);
if (type == CPP_COMMA)
repeat = 1;
else if (type != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs section");
if (streq (sect, "data")) kind = GHS_SECTION_KIND_DATA;
else if (streq (sect, "text")) kind = GHS_SECTION_KIND_TEXT;
else if (streq (sect, "rodata")) kind = GHS_SECTION_KIND_RODATA;
else if (streq (sect, "const")) kind = GHS_SECTION_KIND_RODATA;
else if (streq (sect, "rosdata")) kind = GHS_SECTION_KIND_ROSDATA;
else if (streq (sect, "rozdata")) kind = GHS_SECTION_KIND_ROZDATA;
else if (streq (sect, "sdata")) kind = GHS_SECTION_KIND_SDATA;
else if (streq (sect, "tdata")) kind = GHS_SECTION_KIND_TDATA;
else if (streq (sect, "zdata")) kind = GHS_SECTION_KIND_ZDATA;
/* According to GHS beta documentation, the following should not be
allowed! */
else if (streq (sect, "bss")) kind = GHS_SECTION_KIND_BSS;
else if (streq (sect, "zbss")) kind = GHS_SECTION_KIND_ZDATA;
else
{
warning (0, "unrecognized section name \"%s\"", sect);
return;
}
if (streq (alias, "default"))
GHS_current_section_names [kind] = NULL;
else
GHS_current_section_names [kind] =
build_string (strlen (alias) + 1, alias);
}
while (repeat);
 
return;
 
bad:
warning (OPT_Wpragmas, "malformed #pragma ghs section");
return;
 
reset:
/* #pragma ghs section \n: Reset all section names back to their defaults. */
{
int i;
for (i = COUNT_OF_GHS_SECTION_KINDS; i--;)
GHS_current_section_names [i] = NULL;
}
}
 
void
ghs_pragma_interrupt (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
tree x;
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs interrupt");
mark_current_function_as_interrupt ();
}
 
void
ghs_pragma_starttda (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
tree x;
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs starttda");
push_data_area (DATA_AREA_TDA);
}
 
void
ghs_pragma_startsda (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
tree x;
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs startsda");
push_data_area (DATA_AREA_SDA);
}
 
void
ghs_pragma_startzda (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
tree x;
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs startzda");
push_data_area (DATA_AREA_ZDA);
}
 
void
ghs_pragma_endtda (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
tree x;
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs endtda");
pop_data_area (DATA_AREA_TDA);
}
 
void
ghs_pragma_endsda (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
tree x;
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs endsda");
pop_data_area (DATA_AREA_SDA);
}
 
void
ghs_pragma_endzda (cpp_reader * pfile ATTRIBUTE_UNUSED)
{
tree x;
if (pragma_lex (&x) != CPP_EOF)
warning (OPT_Wpragmas, "junk at end of #pragma ghs endzda");
pop_data_area (DATA_AREA_ZDA);
}
/v850.c
0,0 → 1,3024
/* Subroutines for insn-output.c for NEC V850 series
Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
2007 Free Software Foundation, Inc.
Contributed by Jeff Law (law@cygnus.com).
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
#include "insn-config.h"
#include "conditions.h"
#include "output.h"
#include "insn-attr.h"
#include "flags.h"
#include "recog.h"
#include "expr.h"
#include "function.h"
#include "toplev.h"
#include "ggc.h"
#include "integrate.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
 
#ifndef streq
#define streq(a,b) (strcmp (a, b) == 0)
#endif
 
/* Function prototypes for stupid compilers: */
static bool v850_handle_option (size_t, const char *, int);
static void const_double_split (rtx, HOST_WIDE_INT *, HOST_WIDE_INT *);
static int const_costs_int (HOST_WIDE_INT, int);
static int const_costs (rtx, enum rtx_code);
static bool v850_rtx_costs (rtx, int, int, int *);
static void substitute_ep_register (rtx, rtx, int, int, rtx *, rtx *);
static void v850_reorg (void);
static int ep_memory_offset (enum machine_mode, int);
static void v850_set_data_area (tree, v850_data_area);
const struct attribute_spec v850_attribute_table[];
static tree v850_handle_interrupt_attribute (tree *, tree, tree, int, bool *);
static tree v850_handle_data_area_attribute (tree *, tree, tree, int, bool *);
static void v850_insert_attributes (tree, tree *);
static void v850_asm_init_sections (void);
static section *v850_select_section (tree, int, unsigned HOST_WIDE_INT);
static void v850_encode_data_area (tree, rtx);
static void v850_encode_section_info (tree, rtx, int);
static bool v850_return_in_memory (tree, tree);
static void v850_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
static bool v850_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
static int v850_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
 
/* Information about the various small memory areas. */
struct small_memory_info small_memory[ (int)SMALL_MEMORY_max ] =
{
/* name max physical max */
{ "tda", 0, 256 },
{ "sda", 0, 65536 },
{ "zda", 0, 32768 },
};
 
/* Names of the various data areas used on the v850. */
tree GHS_default_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
tree GHS_current_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
 
/* Track the current data area set by the data area pragma (which
can be nested). Tested by check_default_data_area. */
data_area_stack_element * data_area_stack = NULL;
 
/* True if we don't need to check any more if the current
function is an interrupt handler. */
static int v850_interrupt_cache_p = FALSE;
 
/* Whether current function is an interrupt handler. */
static int v850_interrupt_p = FALSE;
 
static GTY(()) section *rosdata_section;
static GTY(()) section *rozdata_section;
static GTY(()) section *tdata_section;
static GTY(()) section *zdata_section;
static GTY(()) section *zbss_section;
/* Initialize the GCC target structure. */
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
 
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE v850_attribute_table
 
#undef TARGET_INSERT_ATTRIBUTES
#define TARGET_INSERT_ATTRIBUTES v850_insert_attributes
 
#undef TARGET_ASM_SELECT_SECTION
#define TARGET_ASM_SELECT_SECTION v850_select_section
 
/* The assembler supports switchable .bss sections, but
v850_select_section doesn't yet make use of them. */
#undef TARGET_HAVE_SWITCHABLE_BSS_SECTIONS
#define TARGET_HAVE_SWITCHABLE_BSS_SECTIONS false
 
#undef TARGET_ENCODE_SECTION_INFO
#define TARGET_ENCODE_SECTION_INFO v850_encode_section_info
 
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
 
#undef TARGET_DEFAULT_TARGET_FLAGS
#define TARGET_DEFAULT_TARGET_FLAGS (MASK_DEFAULT | MASK_APP_REGS)
#undef TARGET_HANDLE_OPTION
#define TARGET_HANDLE_OPTION v850_handle_option
 
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS v850_rtx_costs
 
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST hook_int_rtx_0
 
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG v850_reorg
 
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
 
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY v850_return_in_memory
 
#undef TARGET_PASS_BY_REFERENCE
#define TARGET_PASS_BY_REFERENCE v850_pass_by_reference
 
#undef TARGET_CALLEE_COPIES
#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
 
#undef TARGET_SETUP_INCOMING_VARARGS
#define TARGET_SETUP_INCOMING_VARARGS v850_setup_incoming_varargs
 
#undef TARGET_ARG_PARTIAL_BYTES
#define TARGET_ARG_PARTIAL_BYTES v850_arg_partial_bytes
 
struct gcc_target targetm = TARGET_INITIALIZER;
/* Set the maximum size of small memory area TYPE to the value given
by VALUE. Return true if VALUE was syntactically correct. VALUE
starts with the argument separator: either "-" or "=". */
 
static bool
v850_handle_memory_option (enum small_memory_type type, const char *value)
{
int i, size;
 
if (*value != '-' && *value != '=')
return false;
 
value++;
for (i = 0; value[i]; i++)
if (!ISDIGIT (value[i]))
return false;
 
size = atoi (value);
if (size > small_memory[type].physical_max)
error ("value passed to %<-m%s%> is too large", small_memory[type].name);
else
small_memory[type].max = size;
return true;
}
 
/* Implement TARGET_HANDLE_OPTION. */
 
static bool
v850_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
{
switch (code)
{
case OPT_mspace:
target_flags |= MASK_EP | MASK_PROLOG_FUNCTION;
return true;
 
case OPT_mv850:
target_flags &= ~(MASK_CPU ^ MASK_V850);
return true;
 
case OPT_mv850e:
case OPT_mv850e1:
target_flags &= ~(MASK_CPU ^ MASK_V850E);
return true;
 
case OPT_mtda:
return v850_handle_memory_option (SMALL_MEMORY_TDA, arg);
 
case OPT_msda:
return v850_handle_memory_option (SMALL_MEMORY_SDA, arg);
 
case OPT_mzda:
return v850_handle_memory_option (SMALL_MEMORY_ZDA, arg);
 
default:
return true;
}
}
static bool
v850_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
enum machine_mode mode, tree type,
bool named ATTRIBUTE_UNUSED)
{
unsigned HOST_WIDE_INT size;
 
if (type)
size = int_size_in_bytes (type);
else
size = GET_MODE_SIZE (mode);
 
return size > 8;
}
 
/* Return an RTX to represent where a value with mode MODE will be returned
from a function. If the result is 0, the argument is pushed. */
 
rtx
function_arg (CUMULATIVE_ARGS * cum,
enum machine_mode mode,
tree type,
int named)
{
rtx result = 0;
int size, align;
 
if (TARGET_GHS && !named)
return NULL_RTX;
 
if (mode == BLKmode)
size = int_size_in_bytes (type);
else
size = GET_MODE_SIZE (mode);
 
if (size < 1)
return 0;
 
if (type)
align = TYPE_ALIGN (type) / BITS_PER_UNIT;
else
align = size;
 
cum->nbytes = (cum->nbytes + align - 1) &~(align - 1);
 
if (cum->nbytes > 4 * UNITS_PER_WORD)
return 0;
 
if (type == NULL_TREE
&& cum->nbytes + size > 4 * UNITS_PER_WORD)
return 0;
 
switch (cum->nbytes / UNITS_PER_WORD)
{
case 0:
result = gen_rtx_REG (mode, 6);
break;
case 1:
result = gen_rtx_REG (mode, 7);
break;
case 2:
result = gen_rtx_REG (mode, 8);
break;
case 3:
result = gen_rtx_REG (mode, 9);
break;
default:
result = 0;
}
 
return result;
}
 
/* Return the number of bytes which must be put into registers
for values which are part in registers and part in memory. */
 
static int
v850_arg_partial_bytes (CUMULATIVE_ARGS * cum, enum machine_mode mode,
tree type, bool named)
{
int size, align;
 
if (TARGET_GHS && !named)
return 0;
 
if (mode == BLKmode)
size = int_size_in_bytes (type);
else
size = GET_MODE_SIZE (mode);
 
if (type)
align = TYPE_ALIGN (type) / BITS_PER_UNIT;
else
align = size;
 
cum->nbytes = (cum->nbytes + align - 1) &~(align - 1);
 
if (cum->nbytes > 4 * UNITS_PER_WORD)
return 0;
 
if (cum->nbytes + size <= 4 * UNITS_PER_WORD)
return 0;
 
if (type == NULL_TREE
&& cum->nbytes + size > 4 * UNITS_PER_WORD)
return 0;
 
return 4 * UNITS_PER_WORD - cum->nbytes;
}
 
/* Return the high and low words of a CONST_DOUBLE */
 
static void
const_double_split (rtx x, HOST_WIDE_INT * p_high, HOST_WIDE_INT * p_low)
{
if (GET_CODE (x) == CONST_DOUBLE)
{
long t[2];
REAL_VALUE_TYPE rv;
 
switch (GET_MODE (x))
{
case DFmode:
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
REAL_VALUE_TO_TARGET_DOUBLE (rv, t);
*p_high = t[1]; /* since v850 is little endian */
*p_low = t[0]; /* high is second word */
return;
 
case SFmode:
REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
REAL_VALUE_TO_TARGET_SINGLE (rv, *p_high);
*p_low = 0;
return;
 
case VOIDmode:
case DImode:
*p_high = CONST_DOUBLE_HIGH (x);
*p_low = CONST_DOUBLE_LOW (x);
return;
 
default:
break;
}
}
 
fatal_insn ("const_double_split got a bad insn:", x);
}
 
/* Return the cost of the rtx R with code CODE. */
 
static int
const_costs_int (HOST_WIDE_INT value, int zero_cost)
{
if (CONST_OK_FOR_I (value))
return zero_cost;
else if (CONST_OK_FOR_J (value))
return 1;
else if (CONST_OK_FOR_K (value))
return 2;
else
return 4;
}
 
static int
const_costs (rtx r, enum rtx_code c)
{
HOST_WIDE_INT high, low;
 
switch (c)
{
case CONST_INT:
return const_costs_int (INTVAL (r), 0);
 
case CONST_DOUBLE:
const_double_split (r, &high, &low);
if (GET_MODE (r) == SFmode)
return const_costs_int (high, 1);
else
return const_costs_int (high, 1) + const_costs_int (low, 1);
 
case SYMBOL_REF:
case LABEL_REF:
case CONST:
return 2;
 
case HIGH:
return 1;
 
default:
return 4;
}
}
 
static bool
v850_rtx_costs (rtx x,
int code,
int outer_code ATTRIBUTE_UNUSED,
int * total)
{
switch (code)
{
case CONST_INT:
case CONST_DOUBLE:
case CONST:
case SYMBOL_REF:
case LABEL_REF:
*total = COSTS_N_INSNS (const_costs (x, code));
return true;
 
case MOD:
case DIV:
case UMOD:
case UDIV:
if (TARGET_V850E && optimize_size)
*total = 6;
else
*total = 60;
return true;
 
case MULT:
if (TARGET_V850E
&& ( GET_MODE (x) == SImode
|| GET_MODE (x) == HImode
|| GET_MODE (x) == QImode))
{
if (GET_CODE (XEXP (x, 1)) == REG)
*total = 4;
else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
{
if (CONST_OK_FOR_O (INTVAL (XEXP (x, 1))))
*total = 6;
else if (CONST_OK_FOR_K (INTVAL (XEXP (x, 1))))
*total = 10;
}
}
else
*total = 20;
return true;
 
default:
return false;
}
}
/* Print operand X using operand code CODE to assembly language output file
FILE. */
 
void
print_operand (FILE * file, rtx x, int code)
{
HOST_WIDE_INT high, low;
 
switch (code)
{
case 'c':
/* We use 'c' operands with symbols for .vtinherit */
if (GET_CODE (x) == SYMBOL_REF)
{
output_addr_const(file, x);
break;
}
/* fall through */
case 'b':
case 'B':
case 'C':
switch ((code == 'B' || code == 'C')
? reverse_condition (GET_CODE (x)) : GET_CODE (x))
{
case NE:
if (code == 'c' || code == 'C')
fprintf (file, "nz");
else
fprintf (file, "ne");
break;
case EQ:
if (code == 'c' || code == 'C')
fprintf (file, "z");
else
fprintf (file, "e");
break;
case GE:
fprintf (file, "ge");
break;
case GT:
fprintf (file, "gt");
break;
case LE:
fprintf (file, "le");
break;
case LT:
fprintf (file, "lt");
break;
case GEU:
fprintf (file, "nl");
break;
case GTU:
fprintf (file, "h");
break;
case LEU:
fprintf (file, "nh");
break;
case LTU:
fprintf (file, "l");
break;
default:
gcc_unreachable ();
}
break;
case 'F': /* high word of CONST_DOUBLE */
switch (GET_CODE (x))
{
case CONST_INT:
fprintf (file, "%d", (INTVAL (x) >= 0) ? 0 : -1);
break;
case CONST_DOUBLE:
const_double_split (x, &high, &low);
fprintf (file, "%ld", (long) high);
break;
 
default:
gcc_unreachable ();
}
break;
case 'G': /* low word of CONST_DOUBLE */
switch (GET_CODE (x))
{
case CONST_INT:
fprintf (file, "%ld", (long) INTVAL (x));
break;
case CONST_DOUBLE:
const_double_split (x, &high, &low);
fprintf (file, "%ld", (long) low);
break;
 
default:
gcc_unreachable ();
}
break;
case 'L':
fprintf (file, "%d\n", (int)(INTVAL (x) & 0xffff));
break;
case 'M':
fprintf (file, "%d", exact_log2 (INTVAL (x)));
break;
case 'O':
gcc_assert (special_symbolref_operand (x, VOIDmode));
if (GET_CODE (x) == CONST)
x = XEXP (XEXP (x, 0), 0);
else
gcc_assert (GET_CODE (x) == SYMBOL_REF);
if (SYMBOL_REF_ZDA_P (x))
fprintf (file, "zdaoff");
else if (SYMBOL_REF_SDA_P (x))
fprintf (file, "sdaoff");
else if (SYMBOL_REF_TDA_P (x))
fprintf (file, "tdaoff");
else
gcc_unreachable ();
break;
case 'P':
gcc_assert (special_symbolref_operand (x, VOIDmode));
output_addr_const (file, x);
break;
case 'Q':
gcc_assert (special_symbolref_operand (x, VOIDmode));
if (GET_CODE (x) == CONST)
x = XEXP (XEXP (x, 0), 0);
else
gcc_assert (GET_CODE (x) == SYMBOL_REF);
if (SYMBOL_REF_ZDA_P (x))
fprintf (file, "r0");
else if (SYMBOL_REF_SDA_P (x))
fprintf (file, "gp");
else if (SYMBOL_REF_TDA_P (x))
fprintf (file, "ep");
else
gcc_unreachable ();
break;
case 'R': /* 2nd word of a double. */
switch (GET_CODE (x))
{
case REG:
fprintf (file, reg_names[REGNO (x) + 1]);
break;
case MEM:
x = XEXP (adjust_address (x, SImode, 4), 0);
print_operand_address (file, x);
if (GET_CODE (x) == CONST_INT)
fprintf (file, "[r0]");
break;
default:
break;
}
break;
case 'S':
{
/* If it's a reference to a TDA variable, use sst/sld vs. st/ld. */
if (GET_CODE (x) == MEM && ep_memory_operand (x, GET_MODE (x), FALSE))
fputs ("s", file);
 
break;
}
case 'T':
{
/* Like an 'S' operand above, but for unsigned loads only. */
if (GET_CODE (x) == MEM && ep_memory_operand (x, GET_MODE (x), TRUE))
fputs ("s", file);
 
break;
}
case 'W': /* print the instruction suffix */
switch (GET_MODE (x))
{
default:
gcc_unreachable ();
 
case QImode: fputs (".b", file); break;
case HImode: fputs (".h", file); break;
case SImode: fputs (".w", file); break;
case SFmode: fputs (".w", file); break;
}
break;
case '.': /* register r0 */
fputs (reg_names[0], file);
break;
case 'z': /* reg or zero */
if (GET_CODE (x) == REG)
fputs (reg_names[REGNO (x)], file);
else
{
gcc_assert (x == const0_rtx);
fputs (reg_names[0], file);
}
break;
default:
switch (GET_CODE (x))
{
case MEM:
if (GET_CODE (XEXP (x, 0)) == CONST_INT)
output_address (gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 0),
XEXP (x, 0)));
else
output_address (XEXP (x, 0));
break;
 
case REG:
fputs (reg_names[REGNO (x)], file);
break;
case SUBREG:
fputs (reg_names[subreg_regno (x)], file);
break;
case CONST_INT:
case SYMBOL_REF:
case CONST:
case LABEL_REF:
case CODE_LABEL:
print_operand_address (file, x);
break;
default:
gcc_unreachable ();
}
break;
 
}
}
 
/* Output assembly language output for the address ADDR to FILE. */
 
void
print_operand_address (FILE * file, rtx addr)
{
switch (GET_CODE (addr))
{
case REG:
fprintf (file, "0[");
print_operand (file, addr, 0);
fprintf (file, "]");
break;
case LO_SUM:
if (GET_CODE (XEXP (addr, 0)) == REG)
{
/* reg,foo */
fprintf (file, "lo(");
print_operand (file, XEXP (addr, 1), 0);
fprintf (file, ")[");
print_operand (file, XEXP (addr, 0), 0);
fprintf (file, "]");
}
break;
case PLUS:
if (GET_CODE (XEXP (addr, 0)) == REG
|| GET_CODE (XEXP (addr, 0)) == SUBREG)
{
/* reg,foo */
print_operand (file, XEXP (addr, 1), 0);
fprintf (file, "[");
print_operand (file, XEXP (addr, 0), 0);
fprintf (file, "]");
}
else
{
print_operand (file, XEXP (addr, 0), 0);
fprintf (file, "+");
print_operand (file, XEXP (addr, 1), 0);
}
break;
case SYMBOL_REF:
{
const char *off_name = NULL;
const char *reg_name = NULL;
 
if (SYMBOL_REF_ZDA_P (addr))
{
off_name = "zdaoff";
reg_name = "r0";
}
else if (SYMBOL_REF_SDA_P (addr))
{
off_name = "sdaoff";
reg_name = "gp";
}
else if (SYMBOL_REF_TDA_P (addr))
{
off_name = "tdaoff";
reg_name = "ep";
}
 
if (off_name)
fprintf (file, "%s(", off_name);
output_addr_const (file, addr);
if (reg_name)
fprintf (file, ")[%s]", reg_name);
}
break;
case CONST:
if (special_symbolref_operand (addr, VOIDmode))
{
rtx x = XEXP (XEXP (addr, 0), 0);
const char *off_name;
const char *reg_name;
 
if (SYMBOL_REF_ZDA_P (x))
{
off_name = "zdaoff";
reg_name = "r0";
}
else if (SYMBOL_REF_SDA_P (x))
{
off_name = "sdaoff";
reg_name = "gp";
}
else if (SYMBOL_REF_TDA_P (x))
{
off_name = "tdaoff";
reg_name = "ep";
}
else
gcc_unreachable ();
 
fprintf (file, "%s(", off_name);
output_addr_const (file, addr);
fprintf (file, ")[%s]", reg_name);
}
else
output_addr_const (file, addr);
break;
default:
output_addr_const (file, addr);
break;
}
}
 
/* When assemble_integer is used to emit the offsets for a switch
table it can encounter (TRUNCATE:HI (MINUS:SI (LABEL_REF:SI) (LABEL_REF:SI))).
output_addr_const will normally barf at this, but it is OK to omit
the truncate and just emit the difference of the two labels. The
.hword directive will automatically handle the truncation for us.
Returns 1 if rtx was handled, 0 otherwise. */
 
int
v850_output_addr_const_extra (FILE * file, rtx x)
{
if (GET_CODE (x) != TRUNCATE)
return 0;
 
x = XEXP (x, 0);
 
/* We must also handle the case where the switch table was passed a
constant value and so has been collapsed. In this case the first
label will have been deleted. In such a case it is OK to emit
nothing, since the table will not be used.
(cf gcc.c-torture/compile/990801-1.c). */
if (GET_CODE (x) == MINUS
&& GET_CODE (XEXP (x, 0)) == LABEL_REF
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == CODE_LABEL
&& INSN_DELETED_P (XEXP (XEXP (x, 0), 0)))
return 1;
 
output_addr_const (file, x);
return 1;
}
/* Return appropriate code to load up a 1, 2, or 4 integer/floating
point value. */
 
const char *
output_move_single (rtx * operands)
{
rtx dst = operands[0];
rtx src = operands[1];
 
if (REG_P (dst))
{
if (REG_P (src))
return "mov %1,%0";
 
else if (GET_CODE (src) == CONST_INT)
{
HOST_WIDE_INT value = INTVAL (src);
 
if (CONST_OK_FOR_J (value)) /* Signed 5 bit immediate. */
return "mov %1,%0";
 
else if (CONST_OK_FOR_K (value)) /* Signed 16 bit immediate. */
return "movea lo(%1),%.,%0";
 
else if (CONST_OK_FOR_L (value)) /* Upper 16 bits were set. */
return "movhi hi(%1),%.,%0";
 
/* A random constant. */
else if (TARGET_V850E)
return "mov %1,%0";
else
return "movhi hi(%1),%.,%0\n\tmovea lo(%1),%0,%0";
}
 
else if (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode)
{
HOST_WIDE_INT high, low;
 
const_double_split (src, &high, &low);
 
if (CONST_OK_FOR_J (high)) /* Signed 5 bit immediate. */
return "mov %F1,%0";
 
else if (CONST_OK_FOR_K (high)) /* Signed 16 bit immediate. */
return "movea lo(%F1),%.,%0";
 
else if (CONST_OK_FOR_L (high)) /* Upper 16 bits were set. */
return "movhi hi(%F1),%.,%0";
 
/* A random constant. */
else if (TARGET_V850E)
return "mov %F1,%0";
 
else
return "movhi hi(%F1),%.,%0\n\tmovea lo(%F1),%0,%0";
}
 
else if (GET_CODE (src) == MEM)
return "%S1ld%W1 %1,%0";
 
else if (special_symbolref_operand (src, VOIDmode))
return "movea %O1(%P1),%Q1,%0";
 
else if (GET_CODE (src) == LABEL_REF
|| GET_CODE (src) == SYMBOL_REF
|| GET_CODE (src) == CONST)
{
if (TARGET_V850E)
return "mov hilo(%1),%0";
else
return "movhi hi(%1),%.,%0\n\tmovea lo(%1),%0,%0";
}
 
else if (GET_CODE (src) == HIGH)
return "movhi hi(%1),%.,%0";
 
else if (GET_CODE (src) == LO_SUM)
{
operands[2] = XEXP (src, 0);
operands[3] = XEXP (src, 1);
return "movea lo(%3),%2,%0";
}
}
 
else if (GET_CODE (dst) == MEM)
{
if (REG_P (src))
return "%S0st%W0 %1,%0";
 
else if (GET_CODE (src) == CONST_INT && INTVAL (src) == 0)
return "%S0st%W0 %.,%0";
 
else if (GET_CODE (src) == CONST_DOUBLE
&& CONST0_RTX (GET_MODE (dst)) == src)
return "%S0st%W0 %.,%0";
}
 
fatal_insn ("output_move_single:", gen_rtx_SET (VOIDmode, dst, src));
return "";
}
 
/* Return appropriate code to load up an 8 byte integer or
floating point value */
 
const char *
output_move_double (rtx * operands)
{
enum machine_mode mode = GET_MODE (operands[0]);
rtx dst = operands[0];
rtx src = operands[1];
 
if (register_operand (dst, mode)
&& register_operand (src, mode))
{
if (REGNO (src) + 1 == REGNO (dst))
return "mov %R1,%R0\n\tmov %1,%0";
else
return "mov %1,%0\n\tmov %R1,%R0";
}
 
/* Storing 0 */
if (GET_CODE (dst) == MEM
&& ((GET_CODE (src) == CONST_INT && INTVAL (src) == 0)
|| (GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src))))
return "st.w %.,%0\n\tst.w %.,%R0";
 
if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
{
HOST_WIDE_INT high_low[2];
int i;
rtx xop[10];
 
if (GET_CODE (src) == CONST_DOUBLE)
const_double_split (src, &high_low[1], &high_low[0]);
else
{
high_low[0] = INTVAL (src);
high_low[1] = (INTVAL (src) >= 0) ? 0 : -1;
}
 
for (i = 0; i < 2; i++)
{
xop[0] = gen_rtx_REG (SImode, REGNO (dst)+i);
xop[1] = GEN_INT (high_low[i]);
output_asm_insn (output_move_single (xop), xop);
}
 
return "";
}
 
if (GET_CODE (src) == MEM)
{
int ptrreg = -1;
int dreg = REGNO (dst);
rtx inside = XEXP (src, 0);
 
if (GET_CODE (inside) == REG)
ptrreg = REGNO (inside);
else if (GET_CODE (inside) == SUBREG)
ptrreg = subreg_regno (inside);
else if (GET_CODE (inside) == PLUS)
ptrreg = REGNO (XEXP (inside, 0));
else if (GET_CODE (inside) == LO_SUM)
ptrreg = REGNO (XEXP (inside, 0));
 
if (dreg == ptrreg)
return "ld.w %R1,%R0\n\tld.w %1,%0";
}
 
if (GET_CODE (src) == MEM)
return "ld.w %1,%0\n\tld.w %R1,%R0";
if (GET_CODE (dst) == MEM)
return "st.w %1,%0\n\tst.w %R1,%R0";
 
return "mov %1,%0\n\tmov %R1,%R0";
}
 
/* Return maximum offset supported for a short EP memory reference of mode
MODE and signedness UNSIGNEDP. */
 
static int
ep_memory_offset (enum machine_mode mode, int unsignedp ATTRIBUTE_UNUSED)
{
int max_offset = 0;
 
switch (mode)
{
case QImode:
if (TARGET_SMALL_SLD)
max_offset = (1 << 4);
else if (TARGET_V850E
&& ( ( unsignedp && ! TARGET_US_BIT_SET)
|| (! unsignedp && TARGET_US_BIT_SET)))
max_offset = (1 << 4);
else
max_offset = (1 << 7);
break;
 
case HImode:
if (TARGET_SMALL_SLD)
max_offset = (1 << 5);
else if (TARGET_V850E
&& ( ( unsignedp && ! TARGET_US_BIT_SET)
|| (! unsignedp && TARGET_US_BIT_SET)))
max_offset = (1 << 5);
else
max_offset = (1 << 8);
break;
 
case SImode:
case SFmode:
max_offset = (1 << 8);
break;
default:
break;
}
 
return max_offset;
}
 
/* Return true if OP is a valid short EP memory reference */
 
int
ep_memory_operand (rtx op, enum machine_mode mode, int unsigned_load)
{
rtx addr, op0, op1;
int max_offset;
int mask;
 
/* If we are not using the EP register on a per-function basis
then do not allow this optimization at all. This is to
prevent the use of the SLD/SST instructions which cannot be
guaranteed to work properly due to a hardware bug. */
if (!TARGET_EP)
return FALSE;
 
if (GET_CODE (op) != MEM)
return FALSE;
 
max_offset = ep_memory_offset (mode, unsigned_load);
 
mask = GET_MODE_SIZE (mode) - 1;
 
addr = XEXP (op, 0);
if (GET_CODE (addr) == CONST)
addr = XEXP (addr, 0);
 
switch (GET_CODE (addr))
{
default:
break;
 
case SYMBOL_REF:
return SYMBOL_REF_TDA_P (addr);
 
case REG:
return REGNO (addr) == EP_REGNUM;
 
case PLUS:
op0 = XEXP (addr, 0);
op1 = XEXP (addr, 1);
if (GET_CODE (op1) == CONST_INT
&& INTVAL (op1) < max_offset
&& INTVAL (op1) >= 0
&& (INTVAL (op1) & mask) == 0)
{
if (GET_CODE (op0) == REG && REGNO (op0) == EP_REGNUM)
return TRUE;
 
if (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_TDA_P (op0))
return TRUE;
}
break;
}
 
return FALSE;
}
/* Substitute memory references involving a pointer, to use the ep pointer,
taking care to save and preserve the ep. */
 
static void
substitute_ep_register (rtx first_insn,
rtx last_insn,
int uses,
int regno,
rtx * p_r1,
rtx * p_ep)
{
rtx reg = gen_rtx_REG (Pmode, regno);
rtx insn;
 
if (!*p_r1)
{
regs_ever_live[1] = 1;
*p_r1 = gen_rtx_REG (Pmode, 1);
*p_ep = gen_rtx_REG (Pmode, 30);
}
 
if (TARGET_DEBUG)
fprintf (stderr, "\
Saved %d bytes (%d uses of register %s) in function %s, starting as insn %d, ending at %d\n",
2 * (uses - 3), uses, reg_names[regno],
IDENTIFIER_POINTER (DECL_NAME (current_function_decl)),
INSN_UID (first_insn), INSN_UID (last_insn));
 
if (GET_CODE (first_insn) == NOTE)
first_insn = next_nonnote_insn (first_insn);
 
last_insn = next_nonnote_insn (last_insn);
for (insn = first_insn; insn && insn != last_insn; insn = NEXT_INSN (insn))
{
if (GET_CODE (insn) == INSN)
{
rtx pattern = single_set (insn);
 
/* Replace the memory references. */
if (pattern)
{
rtx *p_mem;
/* Memory operands are signed by default. */
int unsignedp = FALSE;
 
if (GET_CODE (SET_DEST (pattern)) == MEM
&& GET_CODE (SET_SRC (pattern)) == MEM)
p_mem = (rtx *)0;
 
else if (GET_CODE (SET_DEST (pattern)) == MEM)
p_mem = &SET_DEST (pattern);
 
else if (GET_CODE (SET_SRC (pattern)) == MEM)
p_mem = &SET_SRC (pattern);
 
else if (GET_CODE (SET_SRC (pattern)) == SIGN_EXTEND
&& GET_CODE (XEXP (SET_SRC (pattern), 0)) == MEM)
p_mem = &XEXP (SET_SRC (pattern), 0);
 
else if (GET_CODE (SET_SRC (pattern)) == ZERO_EXTEND
&& GET_CODE (XEXP (SET_SRC (pattern), 0)) == MEM)
{
p_mem = &XEXP (SET_SRC (pattern), 0);
unsignedp = TRUE;
}
else
p_mem = (rtx *)0;
 
if (p_mem)
{
rtx addr = XEXP (*p_mem, 0);
 
if (GET_CODE (addr) == REG && REGNO (addr) == (unsigned) regno)
*p_mem = change_address (*p_mem, VOIDmode, *p_ep);
 
else if (GET_CODE (addr) == PLUS
&& GET_CODE (XEXP (addr, 0)) == REG
&& REGNO (XEXP (addr, 0)) == (unsigned) regno
&& GET_CODE (XEXP (addr, 1)) == CONST_INT
&& ((INTVAL (XEXP (addr, 1)))
< ep_memory_offset (GET_MODE (*p_mem),
unsignedp))
&& ((INTVAL (XEXP (addr, 1))) >= 0))
*p_mem = change_address (*p_mem, VOIDmode,
gen_rtx_PLUS (Pmode,
*p_ep,
XEXP (addr, 1)));
}
}
}
}
 
/* Optimize back to back cases of ep <- r1 & r1 <- ep. */
insn = prev_nonnote_insn (first_insn);
if (insn && GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) == SET
&& SET_DEST (PATTERN (insn)) == *p_ep
&& SET_SRC (PATTERN (insn)) == *p_r1)
delete_insn (insn);
else
emit_insn_before (gen_rtx_SET (Pmode, *p_r1, *p_ep), first_insn);
 
emit_insn_before (gen_rtx_SET (Pmode, *p_ep, reg), first_insn);
emit_insn_before (gen_rtx_SET (Pmode, *p_ep, *p_r1), last_insn);
}
 
/* TARGET_MACHINE_DEPENDENT_REORG. On the 850, we use it to implement
the -mep mode to copy heavily used pointers to ep to use the implicit
addressing. */
 
static void
v850_reorg (void)
{
struct
{
int uses;
rtx first_insn;
rtx last_insn;
}
regs[FIRST_PSEUDO_REGISTER];
 
int i;
int use_ep = FALSE;
rtx r1 = NULL_RTX;
rtx ep = NULL_RTX;
rtx insn;
rtx pattern;
 
/* If not ep mode, just return now. */
if (!TARGET_EP)
return;
 
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
regs[i].uses = 0;
regs[i].first_insn = NULL_RTX;
regs[i].last_insn = NULL_RTX;
}
 
for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
{
switch (GET_CODE (insn))
{
/* End of basic block */
default:
if (!use_ep)
{
int max_uses = -1;
int max_regno = -1;
 
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
if (max_uses < regs[i].uses)
{
max_uses = regs[i].uses;
max_regno = i;
}
}
 
if (max_uses > 3)
substitute_ep_register (regs[max_regno].first_insn,
regs[max_regno].last_insn,
max_uses, max_regno, &r1, &ep);
}
 
use_ep = FALSE;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
regs[i].uses = 0;
regs[i].first_insn = NULL_RTX;
regs[i].last_insn = NULL_RTX;
}
break;
 
case NOTE:
break;
 
case INSN:
pattern = single_set (insn);
 
/* See if there are any memory references we can shorten */
if (pattern)
{
rtx src = SET_SRC (pattern);
rtx dest = SET_DEST (pattern);
rtx mem;
/* Memory operands are signed by default. */
int unsignedp = FALSE;
 
/* We might have (SUBREG (MEM)) here, so just get rid of the
subregs to make this code simpler. */
if (GET_CODE (dest) == SUBREG
&& (GET_CODE (SUBREG_REG (dest)) == MEM
|| GET_CODE (SUBREG_REG (dest)) == REG))
alter_subreg (&dest);
if (GET_CODE (src) == SUBREG
&& (GET_CODE (SUBREG_REG (src)) == MEM
|| GET_CODE (SUBREG_REG (src)) == REG))
alter_subreg (&src);
 
if (GET_CODE (dest) == MEM && GET_CODE (src) == MEM)
mem = NULL_RTX;
 
else if (GET_CODE (dest) == MEM)
mem = dest;
 
else if (GET_CODE (src) == MEM)
mem = src;
 
else if (GET_CODE (src) == SIGN_EXTEND
&& GET_CODE (XEXP (src, 0)) == MEM)
mem = XEXP (src, 0);
 
else if (GET_CODE (src) == ZERO_EXTEND
&& GET_CODE (XEXP (src, 0)) == MEM)
{
mem = XEXP (src, 0);
unsignedp = TRUE;
}
else
mem = NULL_RTX;
 
if (mem && ep_memory_operand (mem, GET_MODE (mem), unsignedp))
use_ep = TRUE;
 
else if (!use_ep && mem
&& GET_MODE_SIZE (GET_MODE (mem)) <= UNITS_PER_WORD)
{
rtx addr = XEXP (mem, 0);
int regno = -1;
int short_p;
 
if (GET_CODE (addr) == REG)
{
short_p = TRUE;
regno = REGNO (addr);
}
 
else if (GET_CODE (addr) == PLUS
&& GET_CODE (XEXP (addr, 0)) == REG
&& GET_CODE (XEXP (addr, 1)) == CONST_INT
&& ((INTVAL (XEXP (addr, 1)))
< ep_memory_offset (GET_MODE (mem), unsignedp))
&& ((INTVAL (XEXP (addr, 1))) >= 0))
{
short_p = TRUE;
regno = REGNO (XEXP (addr, 0));
}
 
else
short_p = FALSE;
 
if (short_p)
{
regs[regno].uses++;
regs[regno].last_insn = insn;
if (!regs[regno].first_insn)
regs[regno].first_insn = insn;
}
}
 
/* Loading up a register in the basic block zaps any savings
for the register */
if (GET_CODE (dest) == REG)
{
enum machine_mode mode = GET_MODE (dest);
int regno;
int endregno;
 
regno = REGNO (dest);
endregno = regno + HARD_REGNO_NREGS (regno, mode);
 
if (!use_ep)
{
/* See if we can use the pointer before this
modification. */
int max_uses = -1;
int max_regno = -1;
 
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
if (max_uses < regs[i].uses)
{
max_uses = regs[i].uses;
max_regno = i;
}
}
 
if (max_uses > 3
&& max_regno >= regno
&& max_regno < endregno)
{
substitute_ep_register (regs[max_regno].first_insn,
regs[max_regno].last_insn,
max_uses, max_regno, &r1,
&ep);
 
/* Since we made a substitution, zap all remembered
registers. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
regs[i].uses = 0;
regs[i].first_insn = NULL_RTX;
regs[i].last_insn = NULL_RTX;
}
}
}
 
for (i = regno; i < endregno; i++)
{
regs[i].uses = 0;
regs[i].first_insn = NULL_RTX;
regs[i].last_insn = NULL_RTX;
}
}
}
}
}
}
 
/* # of registers saved by the interrupt handler. */
#define INTERRUPT_FIXED_NUM 4
 
/* # of bytes for registers saved by the interrupt handler. */
#define INTERRUPT_FIXED_SAVE_SIZE (4 * INTERRUPT_FIXED_NUM)
 
/* # of registers saved in register parameter area. */
#define INTERRUPT_REGPARM_NUM 4
/* # of words saved for other registers. */
#define INTERRUPT_ALL_SAVE_NUM \
(30 - INTERRUPT_FIXED_NUM + INTERRUPT_REGPARM_NUM)
 
#define INTERRUPT_ALL_SAVE_SIZE (4 * INTERRUPT_ALL_SAVE_NUM)
 
int
compute_register_save_size (long * p_reg_saved)
{
int size = 0;
int i;
int interrupt_handler = v850_interrupt_function_p (current_function_decl);
int call_p = regs_ever_live [LINK_POINTER_REGNUM];
long reg_saved = 0;
 
/* Count the return pointer if we need to save it. */
if (current_function_profile && !call_p)
regs_ever_live [LINK_POINTER_REGNUM] = call_p = 1;
/* Count space for the register saves. */
if (interrupt_handler)
{
for (i = 0; i <= 31; i++)
switch (i)
{
default:
if (regs_ever_live[i] || call_p)
{
size += 4;
reg_saved |= 1L << i;
}
break;
 
/* We don't save/restore r0 or the stack pointer */
case 0:
case STACK_POINTER_REGNUM:
break;
 
/* For registers with fixed use, we save them, set them to the
appropriate value, and then restore them.
These registers are handled specially, so don't list them
on the list of registers to save in the prologue. */
case 1: /* temp used to hold ep */
case 4: /* gp */
case 10: /* temp used to call interrupt save/restore */
case EP_REGNUM: /* ep */
size += 4;
break;
}
}
else
{
/* Find the first register that needs to be saved. */
for (i = 0; i <= 31; i++)
if (regs_ever_live[i] && ((! call_used_regs[i])
|| i == LINK_POINTER_REGNUM))
break;
 
/* If it is possible that an out-of-line helper function might be
used to generate the prologue for the current function, then we
need to cover the possibility that such a helper function will
be used, despite the fact that there might be gaps in the list of
registers that need to be saved. To detect this we note that the
helper functions always push at least register r29 (provided
that the function is not an interrupt handler). */
if (TARGET_PROLOG_FUNCTION
&& (i == 2 || ((i >= 20) && (i < 30))))
{
if (i == 2)
{
size += 4;
reg_saved |= 1L << i;
 
i = 20;
}
 
/* Helper functions save all registers between the starting
register and the last register, regardless of whether they
are actually used by the function or not. */
for (; i <= 29; i++)
{
size += 4;
reg_saved |= 1L << i;
}
 
if (regs_ever_live [LINK_POINTER_REGNUM])
{
size += 4;
reg_saved |= 1L << LINK_POINTER_REGNUM;
}
}
else
{
for (; i <= 31; i++)
if (regs_ever_live[i] && ((! call_used_regs[i])
|| i == LINK_POINTER_REGNUM))
{
size += 4;
reg_saved |= 1L << i;
}
}
}
if (p_reg_saved)
*p_reg_saved = reg_saved;
 
return size;
}
 
int
compute_frame_size (int size, long * p_reg_saved)
{
return (size
+ compute_register_save_size (p_reg_saved)
+ current_function_outgoing_args_size);
}
 
void
expand_prologue (void)
{
unsigned int i;
int offset;
unsigned int size = get_frame_size ();
unsigned int actual_fsize;
unsigned int init_stack_alloc = 0;
rtx save_regs[32];
rtx save_all;
unsigned int num_save;
unsigned int default_stack;
int code;
int interrupt_handler = v850_interrupt_function_p (current_function_decl);
long reg_saved = 0;
 
actual_fsize = compute_frame_size (size, &reg_saved);
 
/* Save/setup global registers for interrupt functions right now. */
if (interrupt_handler)
{
if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
emit_insn (gen_callt_save_interrupt ());
else
emit_insn (gen_save_interrupt ());
 
actual_fsize -= INTERRUPT_FIXED_SAVE_SIZE;
if (((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
actual_fsize -= INTERRUPT_ALL_SAVE_SIZE;
}
 
/* Save arg registers to the stack if necessary. */
else if (current_function_args_info.anonymous_args)
{
if (TARGET_PROLOG_FUNCTION && TARGET_V850E && !TARGET_DISABLE_CALLT)
emit_insn (gen_save_r6_r9_v850e ());
else if (TARGET_PROLOG_FUNCTION && ! TARGET_LONG_CALLS)
emit_insn (gen_save_r6_r9 ());
else
{
offset = 0;
for (i = 6; i < 10; i++)
{
emit_move_insn (gen_rtx_MEM (SImode,
plus_constant (stack_pointer_rtx,
offset)),
gen_rtx_REG (SImode, i));
offset += 4;
}
}
}
 
/* Identify all of the saved registers. */
num_save = 0;
default_stack = 0;
for (i = 1; i < 31; i++)
{
if (((1L << i) & reg_saved) != 0)
save_regs[num_save++] = gen_rtx_REG (Pmode, i);
}
 
/* If the return pointer is saved, the helper functions also allocate
16 bytes of stack for arguments to be saved in. */
if (((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
{
save_regs[num_save++] = gen_rtx_REG (Pmode, LINK_POINTER_REGNUM);
default_stack = 16;
}
 
/* See if we have an insn that allocates stack space and saves the particular
registers we want to. */
save_all = NULL_RTX;
if (TARGET_PROLOG_FUNCTION && num_save > 0 && actual_fsize >= default_stack)
{
int alloc_stack = (4 * num_save) + default_stack;
int unalloc_stack = actual_fsize - alloc_stack;
int save_func_len = 4;
int save_normal_len;
 
if (unalloc_stack)
save_func_len += CONST_OK_FOR_J (unalloc_stack) ? 2 : 4;
 
/* see if we would have used ep to save the stack */
if (TARGET_EP && num_save > 3 && (unsigned)actual_fsize < 255)
save_normal_len = (3 * 2) + (2 * num_save);
else
save_normal_len = 4 * num_save;
 
save_normal_len += CONST_OK_FOR_J (actual_fsize) ? 2 : 4;
 
/* Don't bother checking if we don't actually save any space.
This happens for instance if one register is saved and additional
stack space is allocated. */
if (save_func_len < save_normal_len)
{
save_all = gen_rtx_PARALLEL
(VOIDmode,
rtvec_alloc (num_save + 1
+ (TARGET_V850 ? (TARGET_LONG_CALLS ? 2 : 1) : 0)));
 
XVECEXP (save_all, 0, 0)
= gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
plus_constant (stack_pointer_rtx, -alloc_stack));
 
offset = - default_stack;
for (i = 0; i < num_save; i++)
{
XVECEXP (save_all, 0, i+1)
= gen_rtx_SET (VOIDmode,
gen_rtx_MEM (Pmode,
plus_constant (stack_pointer_rtx,
offset)),
save_regs[i]);
offset -= 4;
}
 
if (TARGET_V850)
{
XVECEXP (save_all, 0, num_save + 1)
= gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 10));
 
if (TARGET_LONG_CALLS)
XVECEXP (save_all, 0, num_save + 2)
= gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
}
 
code = recog (save_all, NULL_RTX, NULL);
if (code >= 0)
{
rtx insn = emit_insn (save_all);
INSN_CODE (insn) = code;
actual_fsize -= alloc_stack;
if (TARGET_DEBUG)
fprintf (stderr, "\
Saved %d bytes via prologue function (%d vs. %d) for function %s\n",
save_normal_len - save_func_len,
save_normal_len, save_func_len,
IDENTIFIER_POINTER (DECL_NAME (current_function_decl)));
}
else
save_all = NULL_RTX;
}
}
 
/* If no prolog save function is available, store the registers the old
fashioned way (one by one). */
if (!save_all)
{
/* Special case interrupt functions that save all registers for a call. */
if (interrupt_handler && ((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
{
if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
emit_insn (gen_callt_save_all_interrupt ());
else
emit_insn (gen_save_all_interrupt ());
}
else
{
/* If the stack is too big, allocate it in chunks so we can do the
register saves. We use the register save size so we use the ep
register. */
if (actual_fsize && !CONST_OK_FOR_K (-actual_fsize))
init_stack_alloc = compute_register_save_size (NULL);
else
init_stack_alloc = actual_fsize;
/* Save registers at the beginning of the stack frame. */
offset = init_stack_alloc - 4;
if (init_stack_alloc)
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (-init_stack_alloc)));
/* Save the return pointer first. */
if (num_save > 0 && REGNO (save_regs[num_save-1]) == LINK_POINTER_REGNUM)
{
emit_move_insn (gen_rtx_MEM (SImode,
plus_constant (stack_pointer_rtx,
offset)),
save_regs[--num_save]);
offset -= 4;
}
for (i = 0; i < num_save; i++)
{
emit_move_insn (gen_rtx_MEM (SImode,
plus_constant (stack_pointer_rtx,
offset)),
save_regs[i]);
offset -= 4;
}
}
}
 
/* Allocate the rest of the stack that was not allocated above (either it is
> 32K or we just called a function to save the registers and needed more
stack. */
if (actual_fsize > init_stack_alloc)
{
int diff = actual_fsize - init_stack_alloc;
if (CONST_OK_FOR_K (diff))
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (-diff)));
else
{
rtx reg = gen_rtx_REG (Pmode, 12);
emit_move_insn (reg, GEN_INT (-diff));
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
}
}
 
/* If we need a frame pointer, set it up now. */
if (frame_pointer_needed)
emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
}
 
void
expand_epilogue (void)
{
unsigned int i;
int offset;
unsigned int size = get_frame_size ();
long reg_saved = 0;
unsigned int actual_fsize = compute_frame_size (size, &reg_saved);
unsigned int init_stack_free = 0;
rtx restore_regs[32];
rtx restore_all;
unsigned int num_restore;
unsigned int default_stack;
int code;
int interrupt_handler = v850_interrupt_function_p (current_function_decl);
 
/* Eliminate the initial stack stored by interrupt functions. */
if (interrupt_handler)
{
actual_fsize -= INTERRUPT_FIXED_SAVE_SIZE;
if (((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
actual_fsize -= INTERRUPT_ALL_SAVE_SIZE;
}
 
/* Cut off any dynamic stack created. */
if (frame_pointer_needed)
emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
 
/* Identify all of the saved registers. */
num_restore = 0;
default_stack = 0;
for (i = 1; i < 31; i++)
{
if (((1L << i) & reg_saved) != 0)
restore_regs[num_restore++] = gen_rtx_REG (Pmode, i);
}
 
/* If the return pointer is saved, the helper functions also allocate
16 bytes of stack for arguments to be saved in. */
if (((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
{
restore_regs[num_restore++] = gen_rtx_REG (Pmode, LINK_POINTER_REGNUM);
default_stack = 16;
}
 
/* See if we have an insn that restores the particular registers we
want to. */
restore_all = NULL_RTX;
if (TARGET_PROLOG_FUNCTION
&& num_restore > 0
&& actual_fsize >= default_stack
&& !interrupt_handler)
{
int alloc_stack = (4 * num_restore) + default_stack;
int unalloc_stack = actual_fsize - alloc_stack;
int restore_func_len = 4;
int restore_normal_len;
 
if (unalloc_stack)
restore_func_len += CONST_OK_FOR_J (unalloc_stack) ? 2 : 4;
 
/* See if we would have used ep to restore the registers. */
if (TARGET_EP && num_restore > 3 && (unsigned)actual_fsize < 255)
restore_normal_len = (3 * 2) + (2 * num_restore);
else
restore_normal_len = 4 * num_restore;
 
restore_normal_len += (CONST_OK_FOR_J (actual_fsize) ? 2 : 4) + 2;
 
/* Don't bother checking if we don't actually save any space. */
if (restore_func_len < restore_normal_len)
{
restore_all = gen_rtx_PARALLEL (VOIDmode,
rtvec_alloc (num_restore + 2));
XVECEXP (restore_all, 0, 0) = gen_rtx_RETURN (VOIDmode);
XVECEXP (restore_all, 0, 1)
= gen_rtx_SET (VOIDmode, stack_pointer_rtx,
gen_rtx_PLUS (Pmode,
stack_pointer_rtx,
GEN_INT (alloc_stack)));
 
offset = alloc_stack - 4;
for (i = 0; i < num_restore; i++)
{
XVECEXP (restore_all, 0, i+2)
= gen_rtx_SET (VOIDmode,
restore_regs[i],
gen_rtx_MEM (Pmode,
plus_constant (stack_pointer_rtx,
offset)));
offset -= 4;
}
 
code = recog (restore_all, NULL_RTX, NULL);
if (code >= 0)
{
rtx insn;
 
actual_fsize -= alloc_stack;
if (actual_fsize)
{
if (CONST_OK_FOR_K (actual_fsize))
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (actual_fsize)));
else
{
rtx reg = gen_rtx_REG (Pmode, 12);
emit_move_insn (reg, GEN_INT (actual_fsize));
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
reg));
}
}
 
insn = emit_jump_insn (restore_all);
INSN_CODE (insn) = code;
 
if (TARGET_DEBUG)
fprintf (stderr, "\
Saved %d bytes via epilogue function (%d vs. %d) in function %s\n",
restore_normal_len - restore_func_len,
restore_normal_len, restore_func_len,
IDENTIFIER_POINTER (DECL_NAME (current_function_decl)));
}
else
restore_all = NULL_RTX;
}
}
 
/* If no epilog save function is available, restore the registers the
old fashioned way (one by one). */
if (!restore_all)
{
/* If the stack is large, we need to cut it down in 2 pieces. */
if (actual_fsize && !CONST_OK_FOR_K (-actual_fsize))
init_stack_free = 4 * num_restore;
else
init_stack_free = actual_fsize;
 
/* Deallocate the rest of the stack if it is > 32K. */
if (actual_fsize > init_stack_free)
{
int diff;
 
diff = actual_fsize - ((interrupt_handler) ? 0 : init_stack_free);
 
if (CONST_OK_FOR_K (diff))
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (diff)));
else
{
rtx reg = gen_rtx_REG (Pmode, 12);
emit_move_insn (reg, GEN_INT (diff));
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
reg));
}
}
 
/* Special case interrupt functions that save all registers
for a call. */
if (interrupt_handler && ((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
{
if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
emit_insn (gen_callt_restore_all_interrupt ());
else
emit_insn (gen_restore_all_interrupt ());
}
else
{
/* Restore registers from the beginning of the stack frame. */
offset = init_stack_free - 4;
 
/* Restore the return pointer first. */
if (num_restore > 0
&& REGNO (restore_regs [num_restore - 1]) == LINK_POINTER_REGNUM)
{
emit_move_insn (restore_regs[--num_restore],
gen_rtx_MEM (SImode,
plus_constant (stack_pointer_rtx,
offset)));
offset -= 4;
}
 
for (i = 0; i < num_restore; i++)
{
emit_move_insn (restore_regs[i],
gen_rtx_MEM (SImode,
plus_constant (stack_pointer_rtx,
offset)));
 
emit_insn (gen_rtx_USE (VOIDmode, restore_regs[i]));
offset -= 4;
}
 
/* Cut back the remainder of the stack. */
if (init_stack_free)
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (init_stack_free)));
}
 
/* And return or use reti for interrupt handlers. */
if (interrupt_handler)
{
if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
emit_insn (gen_callt_return_interrupt ());
else
emit_jump_insn (gen_return_interrupt ());
}
else if (actual_fsize)
emit_jump_insn (gen_return_internal ());
else
emit_jump_insn (gen_return ());
}
 
v850_interrupt_cache_p = FALSE;
v850_interrupt_p = FALSE;
}
 
/* Update the condition code from the insn. */
 
void
notice_update_cc (rtx body, rtx insn)
{
switch (get_attr_cc (insn))
{
case CC_NONE:
/* Insn does not affect CC at all. */
break;
 
case CC_NONE_0HIT:
/* Insn does not change CC, but the 0'th operand has been changed. */
if (cc_status.value1 != 0
&& reg_overlap_mentioned_p (recog_data.operand[0], cc_status.value1))
cc_status.value1 = 0;
break;
 
case CC_SET_ZN:
/* Insn sets the Z,N flags of CC to recog_data.operand[0].
V,C is in an unusable state. */
CC_STATUS_INIT;
cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
cc_status.value1 = recog_data.operand[0];
break;
 
case CC_SET_ZNV:
/* Insn sets the Z,N,V flags of CC to recog_data.operand[0].
C is in an unusable state. */
CC_STATUS_INIT;
cc_status.flags |= CC_NO_CARRY;
cc_status.value1 = recog_data.operand[0];
break;
 
case CC_COMPARE:
/* The insn is a compare instruction. */
CC_STATUS_INIT;
cc_status.value1 = SET_SRC (body);
break;
 
case CC_CLOBBER:
/* Insn doesn't leave CC in a usable state. */
CC_STATUS_INIT;
break;
}
}
/* Retrieve the data area that has been chosen for the given decl. */
 
v850_data_area
v850_get_data_area (tree decl)
{
if (lookup_attribute ("sda", DECL_ATTRIBUTES (decl)) != NULL_TREE)
return DATA_AREA_SDA;
if (lookup_attribute ("tda", DECL_ATTRIBUTES (decl)) != NULL_TREE)
return DATA_AREA_TDA;
if (lookup_attribute ("zda", DECL_ATTRIBUTES (decl)) != NULL_TREE)
return DATA_AREA_ZDA;
 
return DATA_AREA_NORMAL;
}
 
/* Store the indicated data area in the decl's attributes. */
 
static void
v850_set_data_area (tree decl, v850_data_area data_area)
{
tree name;
switch (data_area)
{
case DATA_AREA_SDA: name = get_identifier ("sda"); break;
case DATA_AREA_TDA: name = get_identifier ("tda"); break;
case DATA_AREA_ZDA: name = get_identifier ("zda"); break;
default:
return;
}
 
DECL_ATTRIBUTES (decl) = tree_cons
(name, NULL, DECL_ATTRIBUTES (decl));
}
const struct attribute_spec v850_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
{ "interrupt_handler", 0, 0, true, false, false, v850_handle_interrupt_attribute },
{ "interrupt", 0, 0, true, false, false, v850_handle_interrupt_attribute },
{ "sda", 0, 0, true, false, false, v850_handle_data_area_attribute },
{ "tda", 0, 0, true, false, false, v850_handle_data_area_attribute },
{ "zda", 0, 0, true, false, false, v850_handle_data_area_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
 
/* Handle an "interrupt" attribute; arguments as in
struct attribute_spec.handler. */
static tree
v850_handle_interrupt_attribute (tree * node,
tree name,
tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED,
bool * no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
warning (OPT_Wattributes, "%qs attribute only applies to functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
 
return NULL_TREE;
}
 
/* Handle a "sda", "tda" or "zda" attribute; arguments as in
struct attribute_spec.handler. */
static tree
v850_handle_data_area_attribute (tree* node,
tree name,
tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED,
bool * no_add_attrs)
{
v850_data_area data_area;
v850_data_area area;
tree decl = *node;
 
/* Implement data area attribute. */
if (is_attribute_p ("sda", name))
data_area = DATA_AREA_SDA;
else if (is_attribute_p ("tda", name))
data_area = DATA_AREA_TDA;
else if (is_attribute_p ("zda", name))
data_area = DATA_AREA_ZDA;
else
gcc_unreachable ();
switch (TREE_CODE (decl))
{
case VAR_DECL:
if (current_function_decl != NULL_TREE)
{
error ("%Jdata area attributes cannot be specified for "
"local variables", decl);
*no_add_attrs = true;
}
 
/* Drop through. */
 
case FUNCTION_DECL:
area = v850_get_data_area (decl);
if (area != DATA_AREA_NORMAL && data_area != area)
{
error ("data area of %q+D conflicts with previous declaration",
decl);
*no_add_attrs = true;
}
break;
default:
break;
}
 
return NULL_TREE;
}
 
/* Return nonzero if FUNC is an interrupt function as specified
by the "interrupt" attribute. */
 
int
v850_interrupt_function_p (tree func)
{
tree a;
int ret = 0;
 
if (v850_interrupt_cache_p)
return v850_interrupt_p;
 
if (TREE_CODE (func) != FUNCTION_DECL)
return 0;
 
a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
if (a != NULL_TREE)
ret = 1;
 
else
{
a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
ret = a != NULL_TREE;
}
 
/* Its not safe to trust global variables until after function inlining has
been done. */
if (reload_completed | reload_in_progress)
v850_interrupt_p = ret;
 
return ret;
}
 
static void
v850_encode_data_area (tree decl, rtx symbol)
{
int flags;
 
/* Map explicit sections into the appropriate attribute */
if (v850_get_data_area (decl) == DATA_AREA_NORMAL)
{
if (DECL_SECTION_NAME (decl))
{
const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
if (streq (name, ".zdata") || streq (name, ".zbss"))
v850_set_data_area (decl, DATA_AREA_ZDA);
 
else if (streq (name, ".sdata") || streq (name, ".sbss"))
v850_set_data_area (decl, DATA_AREA_SDA);
 
else if (streq (name, ".tdata"))
v850_set_data_area (decl, DATA_AREA_TDA);
}
 
/* If no attribute, support -m{zda,sda,tda}=n */
else
{
int size = int_size_in_bytes (TREE_TYPE (decl));
if (size <= 0)
;
 
else if (size <= small_memory [(int) SMALL_MEMORY_TDA].max)
v850_set_data_area (decl, DATA_AREA_TDA);
 
else if (size <= small_memory [(int) SMALL_MEMORY_SDA].max)
v850_set_data_area (decl, DATA_AREA_SDA);
 
else if (size <= small_memory [(int) SMALL_MEMORY_ZDA].max)
v850_set_data_area (decl, DATA_AREA_ZDA);
}
if (v850_get_data_area (decl) == DATA_AREA_NORMAL)
return;
}
 
flags = SYMBOL_REF_FLAGS (symbol);
switch (v850_get_data_area (decl))
{
case DATA_AREA_ZDA: flags |= SYMBOL_FLAG_ZDA; break;
case DATA_AREA_TDA: flags |= SYMBOL_FLAG_TDA; break;
case DATA_AREA_SDA: flags |= SYMBOL_FLAG_SDA; break;
default: gcc_unreachable ();
}
SYMBOL_REF_FLAGS (symbol) = flags;
}
 
static void
v850_encode_section_info (tree decl, rtx rtl, int first)
{
default_encode_section_info (decl, rtl, first);
 
if (TREE_CODE (decl) == VAR_DECL
&& (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
v850_encode_data_area (decl, XEXP (rtl, 0));
}
 
/* Construct a JR instruction to a routine that will perform the equivalent of
the RTL passed in as an argument. This RTL is a function epilogue that
pops registers off the stack and possibly releases some extra stack space
as well. The code has already verified that the RTL matches these
requirements. */
char *
construct_restore_jr (rtx op)
{
int count = XVECLEN (op, 0);
int stack_bytes;
unsigned long int mask;
unsigned long int first;
unsigned long int last;
int i;
static char buff [100]; /* XXX */
if (count <= 2)
{
error ("bogus JR construction: %d", count);
return NULL;
}
 
/* Work out how many bytes to pop off the stack before retrieving
registers. */
gcc_assert (GET_CODE (XVECEXP (op, 0, 1)) == SET);
gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) == PLUS);
gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1)) == CONST_INT);
stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1));
 
/* Each pop will remove 4 bytes from the stack.... */
stack_bytes -= (count - 2) * 4;
 
/* Make sure that the amount we are popping either 0 or 16 bytes. */
if (stack_bytes != 0 && stack_bytes != 16)
{
error ("bad amount of stack space removal: %d", stack_bytes);
return NULL;
}
 
/* Now compute the bit mask of registers to push. */
mask = 0;
for (i = 2; i < count; i++)
{
rtx vector_element = XVECEXP (op, 0, i);
gcc_assert (GET_CODE (vector_element) == SET);
gcc_assert (GET_CODE (SET_DEST (vector_element)) == REG);
gcc_assert (register_is_ok_for_epilogue (SET_DEST (vector_element),
SImode));
mask |= 1 << REGNO (SET_DEST (vector_element));
}
 
/* Scan for the first register to pop. */
for (first = 0; first < 32; first++)
{
if (mask & (1 << first))
break;
}
 
gcc_assert (first < 32);
 
/* Discover the last register to pop. */
if (mask & (1 << LINK_POINTER_REGNUM))
{
gcc_assert (stack_bytes == 16);
last = LINK_POINTER_REGNUM;
}
else
{
gcc_assert (!stack_bytes);
gcc_assert (mask & (1 << 29));
last = 29;
}
 
/* Note, it is possible to have gaps in the register mask.
We ignore this here, and generate a JR anyway. We will
be popping more registers than is strictly necessary, but
it does save code space. */
if (TARGET_LONG_CALLS)
{
char name[40];
if (first == last)
sprintf (name, "__return_%s", reg_names [first]);
else
sprintf (name, "__return_%s_%s", reg_names [first], reg_names [last]);
sprintf (buff, "movhi hi(%s), r0, r6\n\tmovea lo(%s), r6, r6\n\tjmp r6",
name, name);
}
else
{
if (first == last)
sprintf (buff, "jr __return_%s", reg_names [first]);
else
sprintf (buff, "jr __return_%s_%s", reg_names [first], reg_names [last]);
}
return buff;
}
 
 
/* Construct a JARL instruction to a routine that will perform the equivalent
of the RTL passed as a parameter. This RTL is a function prologue that
saves some of the registers r20 - r31 onto the stack, and possibly acquires
some stack space as well. The code has already verified that the RTL
matches these requirements. */
char *
construct_save_jarl (rtx op)
{
int count = XVECLEN (op, 0);
int stack_bytes;
unsigned long int mask;
unsigned long int first;
unsigned long int last;
int i;
static char buff [100]; /* XXX */
if (count <= 2)
{
error ("bogus JARL construction: %d\n", count);
return NULL;
}
 
/* Paranoia. */
gcc_assert (GET_CODE (XVECEXP (op, 0, 0)) == SET);
gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) == PLUS);
gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0)) == REG);
gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)) == CONST_INT);
/* Work out how many bytes to push onto the stack after storing the
registers. */
stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1));
 
/* Each push will put 4 bytes from the stack.... */
stack_bytes += (count - (TARGET_LONG_CALLS ? 3 : 2)) * 4;
 
/* Make sure that the amount we are popping either 0 or 16 bytes. */
if (stack_bytes != 0 && stack_bytes != -16)
{
error ("bad amount of stack space removal: %d", stack_bytes);
return NULL;
}
 
/* Now compute the bit mask of registers to push. */
mask = 0;
for (i = 1; i < count - (TARGET_LONG_CALLS ? 2 : 1); i++)
{
rtx vector_element = XVECEXP (op, 0, i);
gcc_assert (GET_CODE (vector_element) == SET);
gcc_assert (GET_CODE (SET_SRC (vector_element)) == REG);
gcc_assert (register_is_ok_for_epilogue (SET_SRC (vector_element),
SImode));
mask |= 1 << REGNO (SET_SRC (vector_element));
}
 
/* Scan for the first register to push. */
for (first = 0; first < 32; first++)
{
if (mask & (1 << first))
break;
}
 
gcc_assert (first < 32);
 
/* Discover the last register to push. */
if (mask & (1 << LINK_POINTER_REGNUM))
{
gcc_assert (stack_bytes == -16);
last = LINK_POINTER_REGNUM;
}
else
{
gcc_assert (!stack_bytes);
gcc_assert (mask & (1 << 29));
last = 29;
}
 
/* Note, it is possible to have gaps in the register mask.
We ignore this here, and generate a JARL anyway. We will
be pushing more registers than is strictly necessary, but
it does save code space. */
if (TARGET_LONG_CALLS)
{
char name[40];
if (first == last)
sprintf (name, "__save_%s", reg_names [first]);
else
sprintf (name, "__save_%s_%s", reg_names [first], reg_names [last]);
sprintf (buff, "movhi hi(%s), r0, r11\n\tmovea lo(%s), r11, r11\n\tjarl .+4, r10\n\tadd 4, r10\n\tjmp r11",
name, name);
}
else
{
if (first == last)
sprintf (buff, "jarl __save_%s, r10", reg_names [first]);
else
sprintf (buff, "jarl __save_%s_%s, r10", reg_names [first],
reg_names [last]);
}
 
return buff;
}
 
extern tree last_assemble_variable_decl;
extern int size_directive_output;
 
/* A version of asm_output_aligned_bss() that copes with the special
data areas of the v850. */
void
v850_output_aligned_bss (FILE * file,
tree decl,
const char * name,
unsigned HOST_WIDE_INT size,
int align)
{
switch (v850_get_data_area (decl))
{
case DATA_AREA_ZDA:
switch_to_section (zbss_section);
break;
 
case DATA_AREA_SDA:
switch_to_section (sbss_section);
break;
 
case DATA_AREA_TDA:
switch_to_section (tdata_section);
default:
switch_to_section (bss_section);
break;
}
ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
#ifdef ASM_DECLARE_OBJECT_NAME
last_assemble_variable_decl = decl;
ASM_DECLARE_OBJECT_NAME (file, name, decl);
#else
/* Standard thing is just output label for the object. */
ASM_OUTPUT_LABEL (file, name);
#endif /* ASM_DECLARE_OBJECT_NAME */
ASM_OUTPUT_SKIP (file, size ? size : 1);
}
 
/* Called via the macro ASM_OUTPUT_DECL_COMMON */
void
v850_output_common (FILE * file,
tree decl,
const char * name,
int size,
int align)
{
if (decl == NULL_TREE)
{
fprintf (file, "%s", COMMON_ASM_OP);
}
else
{
switch (v850_get_data_area (decl))
{
case DATA_AREA_ZDA:
fprintf (file, "%s", ZCOMMON_ASM_OP);
break;
 
case DATA_AREA_SDA:
fprintf (file, "%s", SCOMMON_ASM_OP);
break;
 
case DATA_AREA_TDA:
fprintf (file, "%s", TCOMMON_ASM_OP);
break;
default:
fprintf (file, "%s", COMMON_ASM_OP);
break;
}
}
assemble_name (file, name);
fprintf (file, ",%u,%u\n", size, align / BITS_PER_UNIT);
}
 
/* Called via the macro ASM_OUTPUT_DECL_LOCAL */
void
v850_output_local (FILE * file,
tree decl,
const char * name,
int size,
int align)
{
fprintf (file, "%s", LOCAL_ASM_OP);
assemble_name (file, name);
fprintf (file, "\n");
ASM_OUTPUT_ALIGNED_DECL_COMMON (file, decl, name, size, align);
}
 
/* Add data area to the given declaration if a ghs data area pragma is
currently in effect (#pragma ghs startXXX/endXXX). */
static void
v850_insert_attributes (tree decl, tree * attr_ptr ATTRIBUTE_UNUSED )
{
if (data_area_stack
&& data_area_stack->data_area
&& current_function_decl == NULL_TREE
&& (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == CONST_DECL)
&& v850_get_data_area (decl) == DATA_AREA_NORMAL)
v850_set_data_area (decl, data_area_stack->data_area);
 
/* Initialize the default names of the v850 specific sections,
if this has not been done before. */
if (GHS_default_section_names [(int) GHS_SECTION_KIND_SDATA] == NULL)
{
GHS_default_section_names [(int) GHS_SECTION_KIND_SDATA]
= build_string (sizeof (".sdata")-1, ".sdata");
 
GHS_default_section_names [(int) GHS_SECTION_KIND_ROSDATA]
= build_string (sizeof (".rosdata")-1, ".rosdata");
 
GHS_default_section_names [(int) GHS_SECTION_KIND_TDATA]
= build_string (sizeof (".tdata")-1, ".tdata");
GHS_default_section_names [(int) GHS_SECTION_KIND_ZDATA]
= build_string (sizeof (".zdata")-1, ".zdata");
 
GHS_default_section_names [(int) GHS_SECTION_KIND_ROZDATA]
= build_string (sizeof (".rozdata")-1, ".rozdata");
}
if (current_function_decl == NULL_TREE
&& (TREE_CODE (decl) == VAR_DECL
|| TREE_CODE (decl) == CONST_DECL
|| TREE_CODE (decl) == FUNCTION_DECL)
&& (!DECL_EXTERNAL (decl) || DECL_INITIAL (decl))
&& !DECL_SECTION_NAME (decl))
{
enum GHS_section_kind kind = GHS_SECTION_KIND_DEFAULT;
tree chosen_section;
 
if (TREE_CODE (decl) == FUNCTION_DECL)
kind = GHS_SECTION_KIND_TEXT;
else
{
/* First choose a section kind based on the data area of the decl. */
switch (v850_get_data_area (decl))
{
default:
gcc_unreachable ();
case DATA_AREA_SDA:
kind = ((TREE_READONLY (decl))
? GHS_SECTION_KIND_ROSDATA
: GHS_SECTION_KIND_SDATA);
break;
case DATA_AREA_TDA:
kind = GHS_SECTION_KIND_TDATA;
break;
case DATA_AREA_ZDA:
kind = ((TREE_READONLY (decl))
? GHS_SECTION_KIND_ROZDATA
: GHS_SECTION_KIND_ZDATA);
break;
case DATA_AREA_NORMAL: /* default data area */
if (TREE_READONLY (decl))
kind = GHS_SECTION_KIND_RODATA;
else if (DECL_INITIAL (decl))
kind = GHS_SECTION_KIND_DATA;
else
kind = GHS_SECTION_KIND_BSS;
}
}
 
/* Now, if the section kind has been explicitly renamed,
then attach a section attribute. */
chosen_section = GHS_current_section_names [(int) kind];
 
/* Otherwise, if this kind of section needs an explicit section
attribute, then also attach one. */
if (chosen_section == NULL)
chosen_section = GHS_default_section_names [(int) kind];
 
if (chosen_section)
{
/* Only set the section name if specified by a pragma, because
otherwise it will force those variables to get allocated storage
in this module, rather than by the linker. */
DECL_SECTION_NAME (decl) = chosen_section;
}
}
}
 
/* Construct a DISPOSE instruction that is the equivalent of
the given RTX. We have already verified that this should
be possible. */
 
char *
construct_dispose_instruction (rtx op)
{
int count = XVECLEN (op, 0);
int stack_bytes;
unsigned long int mask;
int i;
static char buff[ 100 ]; /* XXX */
int use_callt = 0;
if (count <= 2)
{
error ("bogus DISPOSE construction: %d", count);
return NULL;
}
 
/* Work out how many bytes to pop off the
stack before retrieving registers. */
gcc_assert (GET_CODE (XVECEXP (op, 0, 1)) == SET);
gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) == PLUS);
gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1)) == CONST_INT);
stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1));
 
/* Each pop will remove 4 bytes from the stack.... */
stack_bytes -= (count - 2) * 4;
 
/* Make sure that the amount we are popping
will fit into the DISPOSE instruction. */
if (stack_bytes > 128)
{
error ("too much stack space to dispose of: %d", stack_bytes);
return NULL;
}
 
/* Now compute the bit mask of registers to push. */
mask = 0;
 
for (i = 2; i < count; i++)
{
rtx vector_element = XVECEXP (op, 0, i);
gcc_assert (GET_CODE (vector_element) == SET);
gcc_assert (GET_CODE (SET_DEST (vector_element)) == REG);
gcc_assert (register_is_ok_for_epilogue (SET_DEST (vector_element),
SImode));
 
if (REGNO (SET_DEST (vector_element)) == 2)
use_callt = 1;
else
mask |= 1 << REGNO (SET_DEST (vector_element));
}
 
if (! TARGET_DISABLE_CALLT
&& (use_callt || stack_bytes == 0 || stack_bytes == 16))
{
if (use_callt)
{
sprintf (buff, "callt ctoff(__callt_return_r2_r%d)", (mask & (1 << 31)) ? 31 : 29);
return buff;
}
else
{
for (i = 20; i < 32; i++)
if (mask & (1 << i))
break;
if (i == 31)
sprintf (buff, "callt ctoff(__callt_return_r31c)");
else
sprintf (buff, "callt ctoff(__callt_return_r%d_r%d%s)",
i, (mask & (1 << 31)) ? 31 : 29, stack_bytes ? "c" : "");
}
}
else
{
static char regs [100]; /* XXX */
int done_one;
/* Generate the DISPOSE instruction. Note we could just issue the
bit mask as a number as the assembler can cope with this, but for
the sake of our readers we turn it into a textual description. */
regs[0] = 0;
done_one = 0;
for (i = 20; i < 32; i++)
{
if (mask & (1 << i))
{
int first;
if (done_one)
strcat (regs, ", ");
else
done_one = 1;
first = i;
strcat (regs, reg_names[ first ]);
for (i++; i < 32; i++)
if ((mask & (1 << i)) == 0)
break;
if (i > first + 1)
{
strcat (regs, " - ");
strcat (regs, reg_names[ i - 1 ] );
}
}
}
sprintf (buff, "dispose %d {%s}, r31", stack_bytes / 4, regs);
}
return buff;
}
 
/* Construct a PREPARE instruction that is the equivalent of
the given RTL. We have already verified that this should
be possible. */
 
char *
construct_prepare_instruction (rtx op)
{
int count = XVECLEN (op, 0);
int stack_bytes;
unsigned long int mask;
int i;
static char buff[ 100 ]; /* XXX */
int use_callt = 0;
if (count <= 1)
{
error ("bogus PREPEARE construction: %d", count);
return NULL;
}
 
/* Work out how many bytes to push onto
the stack after storing the registers. */
gcc_assert (GET_CODE (XVECEXP (op, 0, 0)) == SET);
gcc_assert (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) == PLUS);
gcc_assert (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)) == CONST_INT);
stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1));
 
/* Each push will put 4 bytes from the stack. */
stack_bytes += (count - 1) * 4;
 
/* Make sure that the amount we are popping
will fit into the DISPOSE instruction. */
if (stack_bytes < -128)
{
error ("too much stack space to prepare: %d", stack_bytes);
return NULL;
}
 
/* Now compute the bit mask of registers to push. */
mask = 0;
for (i = 1; i < count; i++)
{
rtx vector_element = XVECEXP (op, 0, i);
gcc_assert (GET_CODE (vector_element) == SET);
gcc_assert (GET_CODE (SET_SRC (vector_element)) == REG);
gcc_assert (register_is_ok_for_epilogue (SET_SRC (vector_element),
SImode));
 
if (REGNO (SET_SRC (vector_element)) == 2)
use_callt = 1;
else
mask |= 1 << REGNO (SET_SRC (vector_element));
}
 
if ((! TARGET_DISABLE_CALLT)
&& (use_callt || stack_bytes == 0 || stack_bytes == -16))
{
if (use_callt)
{
sprintf (buff, "callt ctoff(__callt_save_r2_r%d)", (mask & (1 << 31)) ? 31 : 29 );
return buff;
}
for (i = 20; i < 32; i++)
if (mask & (1 << i))
break;
 
if (i == 31)
sprintf (buff, "callt ctoff(__callt_save_r31c)");
else
sprintf (buff, "callt ctoff(__callt_save_r%d_r%d%s)",
i, (mask & (1 << 31)) ? 31 : 29, stack_bytes ? "c" : "");
}
else
{
static char regs [100]; /* XXX */
int done_one;
 
/* Generate the PREPARE instruction. Note we could just issue the
bit mask as a number as the assembler can cope with this, but for
the sake of our readers we turn it into a textual description. */
regs[0] = 0;
done_one = 0;
for (i = 20; i < 32; i++)
{
if (mask & (1 << i))
{
int first;
if (done_one)
strcat (regs, ", ");
else
done_one = 1;
first = i;
strcat (regs, reg_names[ first ]);
for (i++; i < 32; i++)
if ((mask & (1 << i)) == 0)
break;
if (i > first + 1)
{
strcat (regs, " - ");
strcat (regs, reg_names[ i - 1 ] );
}
}
}
sprintf (buff, "prepare {%s}, %d", regs, (- stack_bytes) / 4);
}
return buff;
}
/* Return an RTX indicating where the return address to the
calling function can be found. */
 
rtx
v850_return_addr (int count)
{
if (count != 0)
return const0_rtx;
 
return get_hard_reg_initial_val (Pmode, LINK_POINTER_REGNUM);
}
/* Implement TARGET_ASM_INIT_SECTIONS. */
 
static void
v850_asm_init_sections (void)
{
rosdata_section
= get_unnamed_section (0, output_section_asm_op,
"\t.section .rosdata,\"a\"");
 
rozdata_section
= get_unnamed_section (0, output_section_asm_op,
"\t.section .rozdata,\"a\"");
 
tdata_section
= get_unnamed_section (SECTION_WRITE, output_section_asm_op,
"\t.section .tdata,\"aw\"");
 
zdata_section
= get_unnamed_section (SECTION_WRITE, output_section_asm_op,
"\t.section .zdata,\"aw\"");
 
zbss_section
= get_unnamed_section (SECTION_WRITE | SECTION_BSS,
output_section_asm_op,
"\t.section .zbss,\"aw\"");
}
 
static section *
v850_select_section (tree exp,
int reloc ATTRIBUTE_UNUSED,
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
{
if (TREE_CODE (exp) == VAR_DECL)
{
int is_const;
if (!TREE_READONLY (exp)
|| TREE_SIDE_EFFECTS (exp)
|| !DECL_INITIAL (exp)
|| (DECL_INITIAL (exp) != error_mark_node
&& !TREE_CONSTANT (DECL_INITIAL (exp))))
is_const = FALSE;
else
is_const = TRUE;
 
switch (v850_get_data_area (exp))
{
case DATA_AREA_ZDA:
return is_const ? rozdata_section : zdata_section;
 
case DATA_AREA_TDA:
return tdata_section;
 
case DATA_AREA_SDA:
return is_const ? rosdata_section : sdata_section;
 
default:
return is_const ? readonly_data_section : data_section;
}
}
return readonly_data_section;
}
/* Worker function for TARGET_RETURN_IN_MEMORY. */
 
static bool
v850_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
{
/* Return values > 8 bytes in length in memory. */
return int_size_in_bytes (type) > 8 || TYPE_MODE (type) == BLKmode;
}
/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
 
static void
v850_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
enum machine_mode mode ATTRIBUTE_UNUSED,
tree type ATTRIBUTE_UNUSED,
int *pretend_arg_size ATTRIBUTE_UNUSED,
int second_time ATTRIBUTE_UNUSED)
{
ca->anonymous_args = (!TARGET_GHS ? 1 : 0);
}
 
#include "gt-v850.h"
/v850.opt
0,0 → 1,90
; Options for the NEC V850 port of the compiler.
 
; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
; GCC is free software; you can redistribute it and/or modify it under
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 3, or (at your option) any later
; version.
;
; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
; WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING3. If not see
; <http://www.gnu.org/licenses/>.
 
mapp-regs
Target Report Mask(APP_REGS)
Use registers r2 and r5
 
mbig-switch
Target Report Mask(BIG_SWITCH)
Use 4 byte entries in switch tables
 
mdebug
Target Report Mask(DEBUG)
Enable backend debugging
 
mdisable-callt
Target Report Mask(DISABLE_CALLT)
Do not use the callt instruction
 
mep
Target Report Mask(EP)
Reuse r30 on a per function basis
 
mghs
Target Report Mask(GHS)
Support Green Hills ABI
 
mlong-calls
Target Report Mask(LONG_CALLS)
Prohibit PC relative function calls
 
mprolog-function
Target Report Mask(PROLOG_FUNCTION)
Use stubs for function prologues
 
msda
Target RejectNegative Joined
Set the max size of data eligible for the SDA area
 
msmall-sld
Target Report Mask(SMALL_SLD)
Enable the use of the short load instructions
 
mspace
Target RejectNegative
Same as: -mep -mprolog-function
 
mtda
Target RejectNegative Joined
Set the max size of data eligible for the TDA area
 
mstrict-align
Target Report Mask(STRICT_ALIGN)
Enforce strict alignment
 
mUS-bit-set
Target Report Mask(US_BIT_SET)
 
mv850
Target Report RejectNegative Mask(V850)
Compile for the v850 processor
 
mv850e
Target Report RejectNegative Mask(V850E)
Compile for the v850e processor
 
mv850e1
Target RejectNegative Mask(V850E) MaskExists
Compile for the v850e1 processor
 
mzda
Target RejectNegative Joined
Set the max size of data eligible for the ZDA area
/lib1funcs.asm
0,0 → 1,2386
/* libgcc routines for NEC V850.
Copyright (C) 1996, 1997, 2002, 2005 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
 
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
 
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
 
#ifdef L_mulsi3
.text
.globl ___mulsi3
.type ___mulsi3,@function
___mulsi3:
#ifdef __v850__
/*
#define SHIFT 12
#define MASK ((1 << SHIFT) - 1)
#define STEP(i, j) \
({ \
short a_part = (a >> (i)) & MASK; \
short b_part = (b >> (j)) & MASK; \
int res = (((int) a_part) * ((int) b_part)); \
res; \
})
int
__mulsi3 (unsigned a, unsigned b)
{
return STEP (0, 0) +
((STEP (SHIFT, 0) + STEP (0, SHIFT)) << SHIFT) +
((STEP (0, 2 * SHIFT) + STEP (SHIFT, SHIFT) + STEP (2 * SHIFT, 0))
<< (2 * SHIFT));
}
*/
mov r6, r14
movea lo(32767), r0, r10
and r10, r14
mov r7, r15
and r10, r15
shr 15, r6
mov r6, r13
and r10, r13
shr 15, r7
mov r7, r12
and r10, r12
shr 15, r6
shr 15, r7
mov r14, r10
mulh r15, r10
mov r14, r11
mulh r12, r11
mov r13, r16
mulh r15, r16
mulh r14, r7
mulh r15, r6
add r16, r11
mulh r13, r12
shl 15, r11
add r11, r10
add r12, r7
add r6, r7
shl 30, r7
add r7, r10
jmp [r31]
#endif /* __v850__ */
#if defined(__v850e__) || defined(__v850ea__)
/* This routine is almost unneccesarry because gcc
generates the MUL instruction for the RTX mulsi3.
But if someone wants to link his application with
previsously compiled v850 objects then they will
need this function. */
/* It isn't good to put the inst sequence as below;
mul r7, r6,
mov r6, r10, r0
In this case, there is a RAW hazard between them.
MUL inst takes 2 cycle in EX stage, then MOV inst
must wait 1cycle. */
mov r7, r10
mul r6, r10, r0
jmp [r31]
#endif /* __v850e__ */
.size ___mulsi3,.-___mulsi3
#endif /* L_mulsi3 */
 
 
#ifdef L_udivsi3
.text
.global ___udivsi3
.type ___udivsi3,@function
___udivsi3:
#ifdef __v850__
mov 1,r12
mov 0,r10
cmp r6,r7
bnl .L12
movhi hi(-2147483648),r0,r13
cmp r0,r7
blt .L12
.L4:
shl 1,r7
shl 1,r12
cmp r6,r7
bnl .L12
cmp r0,r12
be .L8
mov r7,r19
and r13,r19
be .L4
br .L12
.L9:
cmp r7,r6
bl .L10
sub r7,r6
or r12,r10
.L10:
shr 1,r12
shr 1,r7
.L12:
cmp r0,r12
bne .L9
.L8:
jmp [r31]
 
#else /* defined(__v850e__) */
 
/* See comments at end of __mulsi3. */
mov r6, r10
divu r7, r10, r0
jmp [r31]
 
#endif /* __v850e__ */
 
.size ___udivsi3,.-___udivsi3
#endif
 
#ifdef L_divsi3
.text
.globl ___divsi3
.type ___divsi3,@function
___divsi3:
#ifdef __v850__
add -8,sp
st.w r31,4[sp]
st.w r22,0[sp]
mov 1,r22
tst r7,r7
bp .L3
subr r0,r7
subr r0,r22
.L3:
tst r6,r6
bp .L4
subr r0,r6
subr r0,r22
.L4:
jarl ___udivsi3,r31
cmp r0,r22
bp .L7
subr r0,r10
.L7:
ld.w 0[sp],r22
ld.w 4[sp],r31
add 8,sp
jmp [r31]
 
#else /* defined(__v850e__) */
 
/* See comments at end of __mulsi3. */
mov r6, r10
div r7, r10, r0
jmp [r31]
 
#endif /* __v850e__ */
 
.size ___divsi3,.-___divsi3
#endif
 
#ifdef L_umodsi3
.text
.globl ___umodsi3
.type ___umodsi3,@function
___umodsi3:
#ifdef __v850__
add -12,sp
st.w r31,8[sp]
st.w r7,4[sp]
st.w r6,0[sp]
jarl ___udivsi3,r31
ld.w 4[sp],r7
mov r10,r6
jarl ___mulsi3,r31
ld.w 0[sp],r6
subr r6,r10
ld.w 8[sp],r31
add 12,sp
jmp [r31]
 
#else /* defined(__v850e__) */
 
/* See comments at end of __mulsi3. */
divu r7, r6, r10
jmp [r31]
 
#endif /* __v850e__ */
 
.size ___umodsi3,.-___umodsi3
#endif /* L_umodsi3 */
 
#ifdef L_modsi3
.text
.globl ___modsi3
.type ___modsi3,@function
___modsi3:
#ifdef __v850__
add -12,sp
st.w r31,8[sp]
st.w r7,4[sp]
st.w r6,0[sp]
jarl ___divsi3,r31
ld.w 4[sp],r7
mov r10,r6
jarl ___mulsi3,r31
ld.w 0[sp],r6
subr r6,r10
ld.w 8[sp],r31
add 12,sp
jmp [r31]
 
#else /* defined(__v850e__) */
 
/* See comments at end of __mulsi3. */
div r7, r6, r10
jmp [r31]
 
#endif /* __v850e__ */
 
.size ___modsi3,.-___modsi3
#endif /* L_modsi3 */
 
#ifdef L_save_2
.text
.align 2
.globl __save_r2_r29
.type __save_r2_r29,@function
/* Allocate space and save registers 2, 20 .. 29 on the stack */
/* Called via: jalr __save_r2_r29,r10 */
__save_r2_r29:
#ifdef __EP__
mov ep,r1
addi -44,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
sst.w r20,36[ep]
sst.w r2,40[ep]
mov r1,ep
#else
addi -44,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
st.w r20,36[sp]
st.w r2,40[sp]
#endif
jmp [r10]
.size __save_r2_r29,.-__save_r2_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r2_r29 */
.align 2
.globl __return_r2_r29
.type __return_r2_r29,@function
__return_r2_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
sld.w 36[ep],r20
sld.w 40[ep],r2
addi 44,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
ld.w 36[sp],r20
ld.w 40[sp],r2
addi 44,sp,sp
#endif
jmp [r31]
.size __return_r2_r29,.-__return_r2_r29
#endif /* L_save_2 */
 
#ifdef L_save_20
.text
.align 2
.globl __save_r20_r29
.type __save_r20_r29,@function
/* Allocate space and save registers 20 .. 29 on the stack */
/* Called via: jalr __save_r20_r29,r10 */
__save_r20_r29:
#ifdef __EP__
mov ep,r1
addi -40,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
sst.w r20,36[ep]
mov r1,ep
#else
addi -40,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
st.w r20,36[sp]
#endif
jmp [r10]
.size __save_r20_r29,.-__save_r20_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r20_r29 */
.align 2
.globl __return_r20_r29
.type __return_r20_r29,@function
__return_r20_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
sld.w 36[ep],r20
addi 40,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
ld.w 36[sp],r20
addi 40,sp,sp
#endif
jmp [r31]
.size __return_r20_r29,.-__return_r20_r29
#endif /* L_save_20 */
 
#ifdef L_save_21
.text
.align 2
.globl __save_r21_r29
.type __save_r21_r29,@function
/* Allocate space and save registers 21 .. 29 on the stack */
/* Called via: jalr __save_r21_r29,r10 */
__save_r21_r29:
#ifdef __EP__
mov ep,r1
addi -36,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
mov r1,ep
#else
addi -36,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
#endif
jmp [r10]
.size __save_r21_r29,.-__save_r21_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r21_r29 */
.align 2
.globl __return_r21_r29
.type __return_r21_r29,@function
__return_r21_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
addi 36,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
addi 36,sp,sp
#endif
jmp [r31]
.size __return_r21_r29,.-__return_r21_r29
#endif /* L_save_21 */
 
#ifdef L_save_22
.text
.align 2
.globl __save_r22_r29
.type __save_r22_r29,@function
/* Allocate space and save registers 22 .. 29 on the stack */
/* Called via: jalr __save_r22_r29,r10 */
__save_r22_r29:
#ifdef __EP__
mov ep,r1
addi -32,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
mov r1,ep
#else
addi -32,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
#endif
jmp [r10]
.size __save_r22_r29,.-__save_r22_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r22_r29 */
.align 2
.globl __return_r22_r29
.type __return_r22_r29,@function
__return_r22_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
addi 32,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
addi 32,sp,sp
#endif
jmp [r31]
.size __return_r22_r29,.-__return_r22_r29
#endif /* L_save_22 */
 
#ifdef L_save_23
.text
.align 2
.globl __save_r23_r29
.type __save_r23_r29,@function
/* Allocate space and save registers 23 .. 29 on the stack */
/* Called via: jalr __save_r23_r29,r10 */
__save_r23_r29:
#ifdef __EP__
mov ep,r1
addi -28,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
mov r1,ep
#else
addi -28,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
#endif
jmp [r10]
.size __save_r23_r29,.-__save_r23_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r23_r29 */
.align 2
.globl __return_r23_r29
.type __return_r23_r29,@function
__return_r23_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
addi 28,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
addi 28,sp,sp
#endif
jmp [r31]
.size __return_r23_r29,.-__return_r23_r29
#endif /* L_save_23 */
 
#ifdef L_save_24
.text
.align 2
.globl __save_r24_r29
.type __save_r24_r29,@function
/* Allocate space and save registers 24 .. 29 on the stack */
/* Called via: jalr __save_r24_r29,r10 */
__save_r24_r29:
#ifdef __EP__
mov ep,r1
addi -24,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
mov r1,ep
#else
addi -24,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
#endif
jmp [r10]
.size __save_r24_r29,.-__save_r24_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r24_r29 */
.align 2
.globl __return_r24_r29
.type __return_r24_r29,@function
__return_r24_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
addi 24,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
addi 24,sp,sp
#endif
jmp [r31]
.size __return_r24_r29,.-__return_r24_r29
#endif /* L_save_24 */
 
#ifdef L_save_25
.text
.align 2
.globl __save_r25_r29
.type __save_r25_r29,@function
/* Allocate space and save registers 25 .. 29 on the stack */
/* Called via: jalr __save_r25_r29,r10 */
__save_r25_r29:
#ifdef __EP__
mov ep,r1
addi -20,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
mov r1,ep
#else
addi -20,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
#endif
jmp [r10]
.size __save_r25_r29,.-__save_r25_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r25_r29 */
.align 2
.globl __return_r25_r29
.type __return_r25_r29,@function
__return_r25_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
addi 20,sp,sp
mov r1,ep
#else
ld.w 0[ep],r29
ld.w 4[ep],r28
ld.w 8[ep],r27
ld.w 12[ep],r26
ld.w 16[ep],r25
addi 20,sp,sp
#endif
jmp [r31]
.size __return_r25_r29,.-__return_r25_r29
#endif /* L_save_25 */
 
#ifdef L_save_26
.text
.align 2
.globl __save_r26_r29
.type __save_r26_r29,@function
/* Allocate space and save registers 26 .. 29 on the stack */
/* Called via: jalr __save_r26_r29,r10 */
__save_r26_r29:
#ifdef __EP__
mov ep,r1
add -16,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
mov r1,ep
#else
add -16,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
#endif
jmp [r10]
.size __save_r26_r29,.-__save_r26_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r26_r29 */
.align 2
.globl __return_r26_r29
.type __return_r26_r29,@function
__return_r26_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
addi 16,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
addi 16,sp,sp
#endif
jmp [r31]
.size __return_r26_r29,.-__return_r26_r29
#endif /* L_save_26 */
 
#ifdef L_save_27
.text
.align 2
.globl __save_r27_r29
.type __save_r27_r29,@function
/* Allocate space and save registers 27 .. 29 on the stack */
/* Called via: jalr __save_r27_r29,r10 */
__save_r27_r29:
add -12,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
jmp [r10]
.size __save_r27_r29,.-__save_r27_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r27_r29 */
.align 2
.globl __return_r27_r29
.type __return_r27_r29,@function
__return_r27_r29:
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
add 12,sp
jmp [r31]
.size __return_r27_r29,.-__return_r27_r29
#endif /* L_save_27 */
 
#ifdef L_save_28
.text
.align 2
.globl __save_r28_r29
.type __save_r28_r29,@function
/* Allocate space and save registers 28,29 on the stack */
/* Called via: jalr __save_r28_r29,r10 */
__save_r28_r29:
add -8,sp
st.w r29,0[sp]
st.w r28,4[sp]
jmp [r10]
.size __save_r28_r29,.-__save_r28_r29
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r28_r29 */
.align 2
.globl __return_r28_r29
.type __return_r28_r29,@function
__return_r28_r29:
ld.w 0[sp],r29
ld.w 4[sp],r28
add 8,sp
jmp [r31]
.size __return_r28_r29,.-__return_r28_r29
#endif /* L_save_28 */
 
#ifdef L_save_29
.text
.align 2
.globl __save_r29
.type __save_r29,@function
/* Allocate space and save register 29 on the stack */
/* Called via: jalr __save_r29,r10 */
__save_r29:
add -4,sp
st.w r29,0[sp]
jmp [r10]
.size __save_r29,.-__save_r29
 
/* Restore saved register 29, deallocate stack and return to the user */
/* Called via: jr __return_r29 */
.align 2
.globl __return_r29
.type __return_r29,@function
__return_r29:
ld.w 0[sp],r29
add 4,sp
jmp [r31]
.size __return_r29,.-__return_r29
#endif /* L_save_28 */
 
#ifdef L_save_2c
.text
.align 2
.globl __save_r2_r31
.type __save_r2_r31,@function
/* Allocate space and save registers 20 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r2_r31,r10. */
__save_r2_r31:
#ifdef __EP__
mov ep,r1
addi -64,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r25,32[ep]
sst.w r24,36[ep]
sst.w r23,40[ep]
sst.w r22,44[ep]
sst.w r21,48[ep]
sst.w r20,52[ep]
sst.w r2,56[ep]
sst.w r31,60[ep]
mov r1,ep
#else
addi -64,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r25,32[sp]
st.w r24,36[sp]
st.w r23,40[sp]
st.w r22,44[sp]
st.w r21,48[sp]
st.w r20,52[sp]
st.w r2,56[sp]
st.w r31,60[sp]
#endif
jmp [r10]
.size __save_r2_r31,.-__save_r2_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r20_r31 */
.align 2
.globl __return_r2_r31
.type __return_r2_r31,@function
__return_r2_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r25
sld.w 36[ep],r24
sld.w 40[ep],r23
sld.w 44[ep],r22
sld.w 48[ep],r21
sld.w 52[ep],r20
sld.w 56[ep],r2
sld.w 60[ep],r31
addi 64,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r25
ld.w 36[sp],r24
ld.w 40[sp],r23
ld.w 44[sp],r22
ld.w 48[sp],r21
ld.w 52[sp],r20
ld.w 56[sp],r2
ld.w 60[sp],r31
addi 64,sp,sp
#endif
jmp [r31]
.size __return_r2_r31,.-__return_r2_r31
#endif /* L_save_2c */
 
#ifdef L_save_20c
.text
.align 2
.globl __save_r20_r31
.type __save_r20_r31,@function
/* Allocate space and save registers 20 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r20_r31,r10 */
__save_r20_r31:
#ifdef __EP__
mov ep,r1
addi -60,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r25,32[ep]
sst.w r24,36[ep]
sst.w r23,40[ep]
sst.w r22,44[ep]
sst.w r21,48[ep]
sst.w r20,52[ep]
sst.w r31,56[ep]
mov r1,ep
#else
addi -60,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r25,32[sp]
st.w r24,36[sp]
st.w r23,40[sp]
st.w r22,44[sp]
st.w r21,48[sp]
st.w r20,52[sp]
st.w r31,56[sp]
#endif
jmp [r10]
.size __save_r20_r31,.-__save_r20_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r20_r31 */
.align 2
.globl __return_r20_r31
.type __return_r20_r31,@function
__return_r20_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r25
sld.w 36[ep],r24
sld.w 40[ep],r23
sld.w 44[ep],r22
sld.w 48[ep],r21
sld.w 52[ep],r20
sld.w 56[ep],r31
addi 60,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r25
ld.w 36[sp],r24
ld.w 40[sp],r23
ld.w 44[sp],r22
ld.w 48[sp],r21
ld.w 52[sp],r20
ld.w 56[sp],r31
addi 60,sp,sp
#endif
jmp [r31]
.size __return_r20_r31,.-__return_r20_r31
#endif /* L_save_20c */
 
#ifdef L_save_21c
.text
.align 2
.globl __save_r21_r31
.type __save_r21_r31,@function
/* Allocate space and save registers 21 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r21_r31,r10 */
__save_r21_r31:
#ifdef __EP__
mov ep,r1
addi -56,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r25,32[ep]
sst.w r24,36[ep]
sst.w r23,40[ep]
sst.w r22,44[ep]
sst.w r21,48[ep]
sst.w r31,52[ep]
mov r1,ep
#else
addi -56,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r25,32[sp]
st.w r24,36[sp]
st.w r23,40[sp]
st.w r22,44[sp]
st.w r21,48[sp]
st.w r31,52[sp]
#endif
jmp [r10]
.size __save_r21_r31,.-__save_r21_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r21_r31 */
.align 2
.globl __return_r21_r31
.type __return_r21_r31,@function
__return_r21_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r25
sld.w 36[ep],r24
sld.w 40[ep],r23
sld.w 44[ep],r22
sld.w 48[ep],r21
sld.w 52[ep],r31
addi 56,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r25
ld.w 36[sp],r24
ld.w 40[sp],r23
ld.w 44[sp],r22
ld.w 48[sp],r21
ld.w 52[sp],r31
addi 56,sp,sp
#endif
jmp [r31]
.size __return_r21_r31,.-__return_r21_r31
#endif /* L_save_21c */
 
#ifdef L_save_22c
.text
.align 2
.globl __save_r22_r31
.type __save_r22_r31,@function
/* Allocate space and save registers 22 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r22_r31,r10 */
__save_r22_r31:
#ifdef __EP__
mov ep,r1
addi -52,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r25,32[ep]
sst.w r24,36[ep]
sst.w r23,40[ep]
sst.w r22,44[ep]
sst.w r31,48[ep]
mov r1,ep
#else
addi -52,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r25,32[sp]
st.w r24,36[sp]
st.w r23,40[sp]
st.w r22,44[sp]
st.w r31,48[sp]
#endif
jmp [r10]
.size __save_r22_r31,.-__save_r22_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r22_r31 */
.align 2
.globl __return_r22_r31
.type __return_r22_r31,@function
__return_r22_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r25
sld.w 36[ep],r24
sld.w 40[ep],r23
sld.w 44[ep],r22
sld.w 48[ep],r31
addi 52,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r25
ld.w 36[sp],r24
ld.w 40[sp],r23
ld.w 44[sp],r22
ld.w 48[sp],r31
addi 52,sp,sp
#endif
jmp [r31]
.size __return_r22_r31,.-__return_r22_r31
#endif /* L_save_22c */
 
#ifdef L_save_23c
.text
.align 2
.globl __save_r23_r31
.type __save_r23_r31,@function
/* Allocate space and save registers 23 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r23_r31,r10 */
__save_r23_r31:
#ifdef __EP__
mov ep,r1
addi -48,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r25,32[ep]
sst.w r24,36[ep]
sst.w r23,40[ep]
sst.w r31,44[ep]
mov r1,ep
#else
addi -48,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r25,32[sp]
st.w r24,36[sp]
st.w r23,40[sp]
st.w r31,44[sp]
#endif
jmp [r10]
.size __save_r23_r31,.-__save_r23_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r23_r31 */
.align 2
.globl __return_r23_r31
.type __return_r23_r31,@function
__return_r23_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r25
sld.w 36[ep],r24
sld.w 40[ep],r23
sld.w 44[ep],r31
addi 48,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r25
ld.w 36[sp],r24
ld.w 40[sp],r23
ld.w 44[sp],r31
addi 48,sp,sp
#endif
jmp [r31]
.size __return_r23_r31,.-__return_r23_r31
#endif /* L_save_23c */
 
#ifdef L_save_24c
.text
.align 2
.globl __save_r24_r31
.type __save_r24_r31,@function
/* Allocate space and save registers 24 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r24_r31,r10 */
__save_r24_r31:
#ifdef __EP__
mov ep,r1
addi -44,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r25,32[ep]
sst.w r24,36[ep]
sst.w r31,40[ep]
mov r1,ep
#else
addi -44,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r25,32[sp]
st.w r24,36[sp]
st.w r31,40[sp]
#endif
jmp [r10]
.size __save_r24_r31,.-__save_r24_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r24_r31 */
.align 2
.globl __return_r24_r31
.type __return_r24_r31,@function
__return_r24_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r25
sld.w 36[ep],r24
sld.w 40[ep],r31
addi 44,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r25
ld.w 36[sp],r24
ld.w 40[sp],r31
addi 44,sp,sp
#endif
jmp [r31]
.size __return_r24_r31,.-__return_r24_r31
#endif /* L_save_24c */
 
#ifdef L_save_25c
.text
.align 2
.globl __save_r25_r31
.type __save_r25_r31,@function
/* Allocate space and save registers 25 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r25_r31,r10 */
__save_r25_r31:
#ifdef __EP__
mov ep,r1
addi -40,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r25,32[ep]
sst.w r31,36[ep]
mov r1,ep
#else
addi -40,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r25,32[sp]
st.w r31,36[sp]
#endif
jmp [r10]
.size __save_r25_r31,.-__save_r25_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r25_r31 */
.align 2
.globl __return_r25_r31
.type __return_r25_r31,@function
__return_r25_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r25
sld.w 36[ep],r31
addi 40,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r25
ld.w 36[sp],r31
addi 40,sp,sp
#endif
jmp [r31]
.size __return_r25_r31,.-__return_r25_r31
#endif /* L_save_25c */
 
#ifdef L_save_26c
.text
.align 2
.globl __save_r26_r31
.type __save_r26_r31,@function
/* Allocate space and save registers 26 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r26_r31,r10 */
__save_r26_r31:
#ifdef __EP__
mov ep,r1
addi -36,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r26,28[ep]
sst.w r31,32[ep]
mov r1,ep
#else
addi -36,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r26,28[sp]
st.w r31,32[sp]
#endif
jmp [r10]
.size __save_r26_r31,.-__save_r26_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r26_r31 */
.align 2
.globl __return_r26_r31
.type __return_r26_r31,@function
__return_r26_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r26
sld.w 32[ep],r31
addi 36,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r26
ld.w 32[sp],r31
addi 36,sp,sp
#endif
jmp [r31]
.size __return_r26_r31,.-__return_r26_r31
#endif /* L_save_26c */
 
#ifdef L_save_27c
.text
.align 2
.globl __save_r27_r31
.type __save_r27_r31,@function
/* Allocate space and save registers 27 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r27_r31,r10 */
__save_r27_r31:
#ifdef __EP__
mov ep,r1
addi -32,sp,sp
mov sp,ep
sst.w r29,16[ep]
sst.w r28,20[ep]
sst.w r27,24[ep]
sst.w r31,28[ep]
mov r1,ep
#else
addi -32,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r27,24[sp]
st.w r31,28[sp]
#endif
jmp [r10]
.size __save_r27_r31,.-__save_r27_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r27_r31 */
.align 2
.globl __return_r27_r31
.type __return_r27_r31,@function
__return_r27_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 16[ep],r29
sld.w 20[ep],r28
sld.w 24[ep],r27
sld.w 28[ep],r31
addi 32,sp,sp
mov r1,ep
#else
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r27
ld.w 28[sp],r31
addi 32,sp,sp
#endif
jmp [r31]
.size __return_r27_r31,.-__return_r27_r31
#endif /* L_save_27c */
 
#ifdef L_save_28c
.text
.align 2
.globl __save_r28_r31
.type __save_r28_r31,@function
/* Allocate space and save registers 28 .. 29, 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r28_r31,r10 */
__save_r28_r31:
addi -28,sp,sp
st.w r29,16[sp]
st.w r28,20[sp]
st.w r31,24[sp]
jmp [r10]
.size __save_r28_r31,.-__save_r28_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r28_r31 */
.align 2
.globl __return_r28_r31
.type __return_r28_r31,@function
__return_r28_r31:
ld.w 16[sp],r29
ld.w 20[sp],r28
ld.w 24[sp],r31
addi 28,sp,sp
jmp [r31]
.size __return_r28_r31,.-__return_r28_r31
#endif /* L_save_28c */
 
#ifdef L_save_29c
.text
.align 2
.globl __save_r29_r31
.type __save_r29_r31,@function
/* Allocate space and save registers 29 & 31 on the stack */
/* Also allocate space for the argument save area */
/* Called via: jalr __save_r29_r31,r10 */
__save_r29_r31:
addi -24,sp,sp
st.w r29,16[sp]
st.w r31,20[sp]
jmp [r10]
.size __save_r29_r31,.-__save_r29_r31
 
/* Restore saved registers, deallocate stack and return to the user */
/* Called via: jr __return_r29_r31 */
.align 2
.globl __return_r29_r31
.type __return_r29_r31,@function
__return_r29_r31:
ld.w 16[sp],r29
ld.w 20[sp],r31
addi 24,sp,sp
jmp [r31]
.size __return_r29_r31,.-__return_r29_r31
#endif /* L_save_29c */
 
#ifdef L_save_31c
.text
.align 2
.globl __save_r31
.type __save_r31,@function
/* Allocate space and save register 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r31,r10 */
__save_r31:
addi -20,sp,sp
st.w r31,16[sp]
jmp [r10]
.size __save_r31,.-__save_r31
 
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r31 */
.align 2
.globl __return_r31
.type __return_r31,@function
__return_r31:
ld.w 16[sp],r31
addi 20,sp,sp
jmp [r31]
.size __return_r31,.-__return_r31
#endif /* L_save_31c */
 
#ifdef L_save_varargs
.text
.align 2
.globl __save_r6_r9
.type __save_r6_r9,@function
/* Save registers 6 .. 9 on the stack for variable argument functions. */
/* Called via: jalr __save_r6_r9,r10 */
__save_r6_r9:
#ifdef __EP__
mov ep,r1
mov sp,ep
sst.w r6,0[ep]
sst.w r7,4[ep]
sst.w r8,8[ep]
sst.w r9,12[ep]
mov r1,ep
#else
st.w r6,0[sp]
st.w r7,4[sp]
st.w r8,8[sp]
st.w r9,12[sp]
#endif
jmp [r10]
.size __save_r6_r9,.-__save_r6_r9
#endif /* L_save_varargs */
 
#ifdef L_save_interrupt
.text
.align 2
.globl __save_interrupt
.type __save_interrupt,@function
/* Save registers r1, r4 on stack and load up with expected values. */
/* Note, 12 bytes of stack have already been allocated. */
/* Called via: jalr __save_interrupt,r10 */
__save_interrupt:
st.w ep,0[sp]
st.w gp,4[sp]
st.w r1,8[sp]
movhi hi(__ep),r0,ep
movea lo(__ep),ep,ep
movhi hi(__gp),r0,gp
movea lo(__gp),gp,gp
jmp [r10]
.size __save_interrupt,.-__save_interrupt
 
/* Restore saved registers, deallocate stack and return from the interrupt. */
/* Called via: jr __return_interrupt */
.align 2
.globl __return_interrupt
.type __return_interrupt,@function
__return_interrupt:
ld.w 0[sp],ep
ld.w 4[sp],gp
ld.w 8[sp],r1
ld.w 12[sp],r10
addi 16,sp,sp
reti
.size __return_interrupt,.-__return_interrupt
#endif /* L_save_interrupt */
 
#ifdef L_save_all_interrupt
.text
.align 2
.globl __save_all_interrupt
.type __save_all_interrupt,@function
/* Save all registers except for those saved in __save_interrupt. */
/* Allocate enough stack for all of the registers & 16 bytes of space. */
/* Called via: jalr __save_all_interrupt,r10 */
__save_all_interrupt:
addi -120,sp,sp
#ifdef __EP__
mov ep,r1
mov sp,ep
sst.w r31,116[ep]
sst.w r2,112[ep]
sst.w gp,108[ep]
sst.w r6,104[ep]
sst.w r7,100[ep]
sst.w r8,96[ep]
sst.w r9,92[ep]
sst.w r11,88[ep]
sst.w r12,84[ep]
sst.w r13,80[ep]
sst.w r14,76[ep]
sst.w r15,72[ep]
sst.w r16,68[ep]
sst.w r17,64[ep]
sst.w r18,60[ep]
sst.w r19,56[ep]
sst.w r20,52[ep]
sst.w r21,48[ep]
sst.w r22,44[ep]
sst.w r23,40[ep]
sst.w r24,36[ep]
sst.w r25,32[ep]
sst.w r26,28[ep]
sst.w r27,24[ep]
sst.w r28,20[ep]
sst.w r29,16[ep]
mov r1,ep
#else
st.w r31,116[sp]
st.w r2,112[sp]
st.w gp,108[sp]
st.w r6,104[sp]
st.w r7,100[sp]
st.w r8,96[sp]
st.w r9,92[sp]
st.w r11,88[sp]
st.w r12,84[sp]
st.w r13,80[sp]
st.w r14,76[sp]
st.w r15,72[sp]
st.w r16,68[sp]
st.w r17,64[sp]
st.w r18,60[sp]
st.w r19,56[sp]
st.w r20,52[sp]
st.w r21,48[sp]
st.w r22,44[sp]
st.w r23,40[sp]
st.w r24,36[sp]
st.w r25,32[sp]
st.w r26,28[sp]
st.w r27,24[sp]
st.w r28,20[sp]
st.w r29,16[sp]
#endif
jmp [r10]
.size __save_all_interrupt,.-__save_all_interrupt
 
.globl __restore_all_interrupt
.type __restore_all_interrupt,@function
/* Restore all registers saved in __save_all_interrupt and
deallocate the stack space. */
/* Called via: jalr __restore_all_interrupt,r10 */
__restore_all_interrupt:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 116[ep],r31
sld.w 112[ep],r2
sld.w 108[ep],gp
sld.w 104[ep],r6
sld.w 100[ep],r7
sld.w 96[ep],r8
sld.w 92[ep],r9
sld.w 88[ep],r11
sld.w 84[ep],r12
sld.w 80[ep],r13
sld.w 76[ep],r14
sld.w 72[ep],r15
sld.w 68[ep],r16
sld.w 64[ep],r17
sld.w 60[ep],r18
sld.w 56[ep],r19
sld.w 52[ep],r20
sld.w 48[ep],r21
sld.w 44[ep],r22
sld.w 40[ep],r23
sld.w 36[ep],r24
sld.w 32[ep],r25
sld.w 28[ep],r26
sld.w 24[ep],r27
sld.w 20[ep],r28
sld.w 16[ep],r29
mov r1,ep
#else
ld.w 116[sp],r31
ld.w 112[sp],r2
ld.w 108[sp],gp
ld.w 104[sp],r6
ld.w 100[sp],r7
ld.w 96[sp],r8
ld.w 92[sp],r9
ld.w 88[sp],r11
ld.w 84[sp],r12
ld.w 80[sp],r13
ld.w 76[sp],r14
ld.w 72[sp],r15
ld.w 68[sp],r16
ld.w 64[sp],r17
ld.w 60[sp],r18
ld.w 56[sp],r19
ld.w 52[sp],r20
ld.w 48[sp],r21
ld.w 44[sp],r22
ld.w 40[sp],r23
ld.w 36[sp],r24
ld.w 32[sp],r25
ld.w 28[sp],r26
ld.w 24[sp],r27
ld.w 20[sp],r28
ld.w 16[sp],r29
#endif
addi 120,sp,sp
jmp [r10]
.size __restore_all_interrupt,.-__restore_all_interrupt
#endif /* L_save_all_interrupt */
 
#if defined __v850e__
#ifdef L_callt_save_r2_r29
/* Put these functions into the call table area. */
.call_table_text
/* Allocate space and save registers 2, 20 .. 29 on the stack. */
/* Called via: callt ctoff(__callt_save_r2_r29). */
.align 2
.L_save_r2_r29:
add -4, sp
st.w r2, 0[sp]
prepare {r20 - r29}, 0
ctret
 
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: callt ctoff(__callt_return_r2_r29). */
.align 2
.L_return_r2_r29:
dispose 0, {r20-r29}
ld.w 0[sp], r2
add 4, sp
jmp [r31]
 
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
 
.global __callt_save_r2_r29
.type __callt_save_r2_r29,@function
__callt_save_r2_r29: .short ctoff(.L_save_r2_r29)
.global __callt_return_r2_r29
.type __callt_return_r2_r29,@function
__callt_return_r2_r29: .short ctoff(.L_return_r2_r29)
#endif /* L_callt_save_r2_r29 */
 
#ifdef L_callt_save_r2_r31
/* Put these functions into the call table area. */
.call_table_text
/* Allocate space and save registers 2 and 20 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: callt ctoff(__callt_save_r2_r31). */
.align 2
.L_save_r2_r31:
add -4, sp
st.w r2, 0[sp]
prepare {r20 - r29, r31}, 4
ctret
 
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: callt ctoff(__callt_return_r2_r31). */
.align 2
.L_return_r2_r31:
dispose 4, {r20 - r29, r31}
ld.w 0[sp], r2
addi 4, sp, sp
jmp [r31]
 
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
 
.global __callt_save_r2_r31
.type __callt_save_r2_r31,@function
__callt_save_r2_r31: .short ctoff(.L_save_r2_r31)
.global __callt_return_r2_r31
.type __callt_return_r2_r31,@function
__callt_return_r2_r31: .short ctoff(.L_return_r2_r31)
#endif /* L_callt_save_r2_r31 */
 
 
#ifdef L_callt_save_r6_r9
/* Put these functions into the call table area. */
.call_table_text
/* Save registers r6 - r9 onto the stack in the space reserved for them.
Use by variable argument functions.
Called via: callt ctoff(__callt_save_r6_r9). */
.align 2
.L_save_r6_r9:
#ifdef __EP__
mov ep,r1
mov sp,ep
sst.w r6,0[ep]
sst.w r7,4[ep]
sst.w r8,8[ep]
sst.w r9,12[ep]
mov r1,ep
#else
st.w r6,0[sp]
st.w r7,4[sp]
st.w r8,8[sp]
st.w r9,12[sp]
#endif
ctret
 
/* Place the offsets of the start of this routines into the call table. */
.call_table_data
 
.global __callt_save_r6_r9
.type __callt_save_r6_r9,@function
__callt_save_r6_r9: .short ctoff(.L_save_r6_r9)
#endif /* L_callt_save_r6_r9 */
 
#ifdef L_callt_save_interrupt
/* Put these functions into the call table area. */
.call_table_text
/* Save registers r1, ep, gp, r10 on stack and load up with expected values. */
/* Called via: callt ctoff(__callt_save_interrupt). */
.align 2
.L_save_interrupt:
/* SP has already been moved before callt ctoff(_save_interrupt). */
/* addi -24, sp, sp */
st.w ep, 0[sp]
st.w gp, 4[sp]
st.w r1, 8[sp]
/* R10 has already been saved before callt ctoff(_save_interrupt). */
/* st.w r10, 12[sp] */
mov hilo(__ep),ep
mov hilo(__gp),gp
ctret
 
/* Restore saved registers, deallocate stack and return from the interrupt. */
/* Called via: callt ctoff(__callt_restore_interrupt). */
.align 2
.globl __return_interrupt
.type __return_interrupt,@function
.L_return_interrupt:
ld.w 20[sp], r1
ldsr r1, ctpsw
ld.w 16[sp], r1
ldsr r1, ctpc
ld.w 12[sp], r10
ld.w 8[sp], r1
ld.w 4[sp], gp
ld.w 0[sp], ep
addi 24, sp, sp
reti
 
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
 
.global __callt_save_interrupt
.type __callt_save_interrupt,@function
__callt_save_interrupt: .short ctoff(.L_save_interrupt)
 
.global __callt_return_interrupt
.type __callt_return_interrupt,@function
__callt_return_interrupt: .short ctoff(.L_return_interrupt)
#endif /* L_callt_save_interrupt */
 
#ifdef L_callt_save_all_interrupt
/* Put these functions into the call table area. */
.call_table_text
/* Save all registers except for those saved in __save_interrupt. */
/* Allocate enough stack for all of the registers & 16 bytes of space. */
/* Called via: callt ctoff(__callt_save_all_interrupt). */
.align 2
.L_save_all_interrupt:
addi -60, sp, sp
#ifdef __EP__
mov ep, r1
mov sp, ep
sst.w r2, 56[ep]
sst.w r5, 52[ep]
sst.w r6, 48[ep]
sst.w r7, 44[ep]
sst.w r8, 40[ep]
sst.w r9, 36[ep]
sst.w r11, 32[ep]
sst.w r12, 28[ep]
sst.w r13, 24[ep]
sst.w r14, 20[ep]
sst.w r15, 16[ep]
sst.w r16, 12[ep]
sst.w r17, 8[ep]
sst.w r18, 4[ep]
sst.w r19, 0[ep]
mov r1, ep
#else
st.w r2, 56[sp]
st.w r5, 52[sp]
st.w r6, 48[sp]
st.w r7, 44[sp]
st.w r8, 40[sp]
st.w r9, 36[sp]
st.w r11, 32[sp]
st.w r12, 28[sp]
st.w r13, 24[sp]
st.w r14, 20[sp]
st.w r15, 16[sp]
st.w r16, 12[sp]
st.w r17, 8[sp]
st.w r18, 4[sp]
st.w r19, 0[sp]
#endif
prepare {r20 - r29, r31}, 4
ctret
 
/* Restore all registers saved in __save_all_interrupt
deallocate the stack space. */
/* Called via: callt ctoff(__callt_restore_all_interrupt). */
.align 2
.L_restore_all_interrupt:
dispose 4, {r20 - r29, r31}
#ifdef __EP__
mov ep, r1
mov sp, ep
sld.w 0 [ep], r19
sld.w 4 [ep], r18
sld.w 8 [ep], r17
sld.w 12[ep], r16
sld.w 16[ep], r15
sld.w 20[ep], r14
sld.w 24[ep], r13
sld.w 28[ep], r12
sld.w 32[ep], r11
sld.w 36[ep], r9
sld.w 40[ep], r8
sld.w 44[ep], r7
sld.w 48[ep], r6
sld.w 52[ep], r5
sld.w 56[ep], r2
mov r1, ep
#else
ld.w 0 [sp], r19
ld.w 4 [sp], r18
ld.w 8 [sp], r17
ld.w 12[sp], r16
ld.w 16[sp], r15
ld.w 20[sp], r14
ld.w 24[sp], r13
ld.w 28[sp], r12
ld.w 32[sp], r11
ld.w 36[sp], r9
ld.w 40[sp], r8
ld.w 44[sp], r7
ld.w 48[sp], r6
ld.w 52[sp], r5
ld.w 56[sp], r2
#endif
addi 60, sp, sp
ctret
 
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
 
.global __callt_save_all_interrupt
.type __callt_save_all_interrupt,@function
__callt_save_all_interrupt: .short ctoff(.L_save_all_interrupt)
.global __callt_restore_all_interrupt
.type __callt_restore_all_interrupt,@function
__callt_restore_all_interrupt: .short ctoff(.L_restore_all_interrupt)
#endif /* L_callt_save_all_interrupt */
 
 
#define MAKE_CALLT_FUNCS( START ) \
.call_table_text ;\
.align 2 ;\
/* Allocate space and save registers START .. r29 on the stack. */ ;\
/* Called via: callt ctoff(__callt_save_START_r29). */ ;\
.L_save_##START##_r29: ;\
prepare { START - r29 }, 0 ;\
ctret ;\
;\
/* Restore saved registers, deallocate stack and return. */ ;\
/* Called via: callt ctoff(__return_START_r29) */ ;\
.align 2 ;\
.L_return_##START##_r29: ;\
dispose 0, { START - r29 }, r31 ;\
;\
/* Place the offsets of the start of these funcs into the call table. */;\
.call_table_data ;\
;\
.global __callt_save_##START##_r29 ;\
.type __callt_save_##START##_r29,@function ;\
__callt_save_##START##_r29: .short ctoff(.L_save_##START##_r29 ) ;\
;\
.global __callt_return_##START##_r29 ;\
.type __callt_return_##START##_r29,@function ;\
__callt_return_##START##_r29: .short ctoff(.L_return_##START##_r29 )
 
 
#define MAKE_CALLT_CFUNCS( START ) \
.call_table_text ;\
.align 2 ;\
/* Allocate space and save registers START .. r31 on the stack. */ ;\
/* Called via: callt ctoff(__callt_save_START_r31c). */ ;\
.L_save_##START##_r31c: ;\
prepare { START - r29, r31}, 4 ;\
ctret ;\
;\
/* Restore saved registers, deallocate stack and return. */ ;\
/* Called via: callt ctoff(__return_START_r31c). */ ;\
.align 2 ;\
.L_return_##START##_r31c: ;\
dispose 4, { START - r29, r31}, r31 ;\
;\
/* Place the offsets of the start of these funcs into the call table. */;\
.call_table_data ;\
;\
.global __callt_save_##START##_r31c ;\
.type __callt_save_##START##_r31c,@function ;\
__callt_save_##START##_r31c: .short ctoff(.L_save_##START##_r31c ) ;\
;\
.global __callt_return_##START##_r31c ;\
.type __callt_return_##START##_r31c,@function ;\
__callt_return_##START##_r31c: .short ctoff(.L_return_##START##_r31c )
 
#ifdef L_callt_save_20
MAKE_CALLT_FUNCS (r20)
#endif
#ifdef L_callt_save_21
MAKE_CALLT_FUNCS (r21)
#endif
#ifdef L_callt_save_22
MAKE_CALLT_FUNCS (r22)
#endif
#ifdef L_callt_save_23
MAKE_CALLT_FUNCS (r23)
#endif
#ifdef L_callt_save_24
MAKE_CALLT_FUNCS (r24)
#endif
#ifdef L_callt_save_25
MAKE_CALLT_FUNCS (r25)
#endif
#ifdef L_callt_save_26
MAKE_CALLT_FUNCS (r26)
#endif
#ifdef L_callt_save_27
MAKE_CALLT_FUNCS (r27)
#endif
#ifdef L_callt_save_28
MAKE_CALLT_FUNCS (r28)
#endif
#ifdef L_callt_save_29
MAKE_CALLT_FUNCS (r29)
#endif
 
#ifdef L_callt_save_20c
MAKE_CALLT_CFUNCS (r20)
#endif
#ifdef L_callt_save_21c
MAKE_CALLT_CFUNCS (r21)
#endif
#ifdef L_callt_save_22c
MAKE_CALLT_CFUNCS (r22)
#endif
#ifdef L_callt_save_23c
MAKE_CALLT_CFUNCS (r23)
#endif
#ifdef L_callt_save_24c
MAKE_CALLT_CFUNCS (r24)
#endif
#ifdef L_callt_save_25c
MAKE_CALLT_CFUNCS (r25)
#endif
#ifdef L_callt_save_26c
MAKE_CALLT_CFUNCS (r26)
#endif
#ifdef L_callt_save_27c
MAKE_CALLT_CFUNCS (r27)
#endif
#ifdef L_callt_save_28c
MAKE_CALLT_CFUNCS (r28)
#endif
#ifdef L_callt_save_29c
MAKE_CALLT_CFUNCS (r29)
#endif
 
#ifdef L_callt_save_31c
.call_table_text
.align 2
/* Allocate space and save register r31 on the stack. */
/* Called via: callt ctoff(__callt_save_r31c). */
.L_callt_save_r31c:
prepare {r31}, 4
ctret
 
/* Restore saved registers, deallocate stack and return. */
/* Called via: callt ctoff(__return_r31c). */
.align 2
.L_callt_return_r31c:
dispose 4, {r31}, r31
/* Place the offsets of the start of these funcs into the call table. */
.call_table_data
 
.global __callt_save_r31c
.type __callt_save_r31c,@function
__callt_save_r31c: .short ctoff(.L_callt_save_r31c)
 
.global __callt_return_r31c
.type __callt_return_r31c,@function
__callt_return_r31c: .short ctoff(.L_callt_return_r31c)
#endif
 
#endif /* __v850e__ */
 
/* libgcc2 routines for NEC V850. */
/* Double Integer Arithmetical Operation. */
 
#ifdef L_negdi2
.text
.global ___negdi2
.type ___negdi2, @function
___negdi2:
not r6, r10
add 1, r10
setf l, r6
not r7, r11
add r6, r11
jmp [lp]
 
.size ___negdi2,.-___negdi2
#endif
 
#ifdef L_cmpdi2
.text
.global ___cmpdi2
.type ___cmpdi2,@function
___cmpdi2:
# Signed comparison bitween each high word.
cmp r9, r7
be .L_cmpdi_cmp_low
setf ge, r10
setf gt, r6
add r6, r10
jmp [lp]
.L_cmpdi_cmp_low:
# Unsigned comparigon bitween each low word.
cmp r8, r6
setf nl, r10
setf h, r6
add r6, r10
jmp [lp]
.size ___cmpdi2, . - ___cmpdi2
#endif
 
#ifdef L_ucmpdi2
.text
.global ___ucmpdi2
.type ___ucmpdi2,@function
___ucmpdi2:
cmp r9, r7 # Check if each high word are same.
bne .L_ucmpdi_check_psw
cmp r8, r6 # Compare the word.
.L_ucmpdi_check_psw:
setf nl, r10 #
setf h, r6 #
add r6, r10 # Add the result of comparison NL and comparison H.
jmp [lp]
.size ___ucmpdi2, . - ___ucmpdi2
#endif
 
#ifdef L_muldi3
.text
.global ___muldi3
.type ___muldi3,@function
___muldi3:
#ifdef __v850__
jarl __save_r26_r31, r10
addi 16, sp, sp
mov r6, r28
shr 15, r28
movea lo(32767), r0, r14
and r14, r28
mov r8, r10
shr 15, r10
and r14, r10
mov r6, r19
shr 30, r19
mov r7, r12
shl 2, r12
or r12, r19
and r14, r19
mov r8, r13
shr 30, r13
mov r9, r12
shl 2, r12
or r12, r13
and r14, r13
mov r7, r11
shr 13, r11
and r14, r11
mov r9, r31
shr 13, r31
and r14, r31
mov r7, r29
shr 28, r29
and r14, r29
mov r9, r12
shr 28, r12
and r14, r12
and r14, r6
and r14, r8
mov r6, r14
mulh r8, r14
mov r6, r16
mulh r10, r16
mov r6, r18
mulh r13, r18
mov r6, r15
mulh r31, r15
mulh r12, r6
mov r28, r17
mulh r10, r17
add -16, sp
mov r28, r12
mulh r8, r12
add r17, r18
mov r28, r17
mulh r31, r17
add r12, r16
mov r28, r12
mulh r13, r12
add r17, r6
mov r19, r17
add r12, r15
mov r19, r12
mulh r8, r12
mulh r10, r17
add r12, r18
mov r19, r12
mulh r13, r12
add r17, r15
mov r11, r13
mulh r8, r13
add r12, r6
mov r11, r12
mulh r10, r12
add r13, r15
mulh r29, r8
add r12, r6
mov r16, r13
shl 15, r13
add r14, r13
mov r18, r12
shl 30, r12
mov r13, r26
add r12, r26
shr 15, r14
movhi hi(131071), r0, r12
movea lo(131071), r12, r13
and r13, r14
mov r16, r12
and r13, r12
add r12, r14
mov r18, r12
shl 15, r12
and r13, r12
add r12, r14
shr 17, r14
shr 17, r16
add r14, r16
shl 13, r15
shr 2, r18
add r18, r15
add r15, r16
mov r16, r27
add r8, r6
shl 28, r6
add r6, r27
mov r26, r10
mov r27, r11
jr __return_r26_r31
#endif /* __v850__ */
#if defined(__v850e__) || defined(__v850ea__)
/* (Ahi << 32 + Alo) * (Bhi << 32 + Blo) */
/* r7 r6 r9 r8 */
mov r8, r10
mulu r7, r8, r0 /* Ahi * Blo */
mulu r6, r9, r0 /* Alo * Bhi */
mulu r6, r10, r11 /* Alo * Blo */
add r8, r11
add r9, r11
jmp [r31]
 
#endif /* defined(__v850e__) || defined(__v850ea__) */
.size ___muldi3, . - ___muldi3
#endif
/t-v850e
0,0 → 1,96
LIB1ASMSRC = v850/lib1funcs.asm
LIB1ASMFUNCS = _mulsi3 \
_divsi3 \
_udivsi3 \
_modsi3 \
_umodsi3 \
_save_2 \
_save_20 \
_save_21 \
_save_22 \
_save_23 \
_save_24 \
_save_25 \
_save_26 \
_save_27 \
_save_28 \
_save_29 \
_save_2c \
_save_20c \
_save_21c \
_save_22c \
_save_23c \
_save_24c \
_save_25c \
_save_26c \
_save_27c \
_save_28c \
_save_29c \
_save_31c \
_save_varargs \
_save_interrupt \
_save_all_interrupt \
_callt_save_20 \
_callt_save_21 \
_callt_save_22 \
_callt_save_23 \
_callt_save_24 \
_callt_save_25 \
_callt_save_26 \
_callt_save_27 \
_callt_save_28 \
_callt_save_29 \
_callt_save_20c \
_callt_save_21c \
_callt_save_22c \
_callt_save_23c \
_callt_save_24c \
_callt_save_25c \
_callt_save_26c \
_callt_save_27c \
_callt_save_28c \
_callt_save_29c \
_callt_save_31c \
_callt_save_varargs \
_callt_save_interrupt \
_callt_save_all_interrupt \
_callt_save_r2_r29 \
_callt_save_r2_r31 \
_callt_save_r6_r9 \
_negdi2 \
_cmpdi2 \
_ucmpdi2 \
_muldi3
 
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
 
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifdef __LITTLE_ENDIAN__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >>dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
 
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifdef __LITTLE_ENDIAN__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >>fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
 
# Create target-specific versions of the libraries
MULTILIB_OPTIONS = mv850
MULTILIB_DIRNAMES = v850
INSTALL_LIBGCC = install-multilib
 
TCFLAGS = -mno-app-regs -msmall-sld -Wa,-mwarn-signed-overflow -Wa,-mwarn-unsigned-overflow
 
v850-c.o: $(srcdir)/config/v850/v850-c.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(TM_H) $(CPPLIB_H) $(TREE_H) c-pragma.h toplev.h $(GGC_H) $(TM_P_H)
$(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/v850/v850-c.c
 
# Local Variables:
# mode: Makefile
# End:
/v850.h
0,0 → 1,1146
/* Definitions of target machine for GNU compiler. NEC V850 series
Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
2007 Free Software Foundation, Inc.
Contributed by Jeff Law (law@cygnus.com).
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#ifndef GCC_V850_H
#define GCC_V850_H
 
/* These are defined in svr4.h but we want to override them. */
#undef LIB_SPEC
#undef ENDFILE_SPEC
#undef LINK_SPEC
#undef STARTFILE_SPEC
#undef ASM_SPEC
 
#define TARGET_CPU_generic 1
#define TARGET_CPU_v850e 2
#define TARGET_CPU_v850e1 3
 
#ifndef TARGET_CPU_DEFAULT
#define TARGET_CPU_DEFAULT TARGET_CPU_generic
#endif
 
#define MASK_DEFAULT MASK_V850
#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850}"
#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850__}"
#define TARGET_VERSION fprintf (stderr, " (NEC V850)");
 
/* Choose which processor will be the default.
We must pass a -mv850xx option to the assembler if no explicit -mv* option
is given, because the assembler's processor default may not be correct. */
#if TARGET_CPU_DEFAULT == TARGET_CPU_v850e
#undef MASK_DEFAULT
#define MASK_DEFAULT MASK_V850E
#undef SUBTARGET_ASM_SPEC
#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850e}"
#undef SUBTARGET_CPP_SPEC
#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850e__}"
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (NEC V850E)");
#endif
 
#if TARGET_CPU_DEFAULT == TARGET_CPU_v850e1
#undef MASK_DEFAULT
#define MASK_DEFAULT MASK_V850E /* No practical difference. */
#undef SUBTARGET_ASM_SPEC
#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850e1}"
#undef SUBTARGET_CPP_SPEC
#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850e1__} %{mv850e1:-D__v850e1__}"
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (NEC V850E1)");
#endif
 
#define ASM_SPEC "%{mv*:-mv%*}"
#define CPP_SPEC "%{mv850e:-D__v850e__} %{mv850:-D__v850__} %(subtarget_cpp_spec)"
 
#define EXTRA_SPECS \
{ "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \
{ "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }
 
/* Names to predefine in the preprocessor for this target machine. */
#define TARGET_CPU_CPP_BUILTINS() do { \
builtin_define( "__v851__" ); \
builtin_define( "__v850" ); \
builtin_assert( "machine=v850" ); \
builtin_assert( "cpu=v850" ); \
if (TARGET_EP) \
builtin_define ("__EP__"); \
} while(0)
 
#define MASK_CPU (MASK_V850 | MASK_V850E)
 
/* Information about the various small memory areas. */
struct small_memory_info {
const char *name;
long max;
long physical_max;
};
 
enum small_memory_type {
/* tiny data area, using EP as base register */
SMALL_MEMORY_TDA = 0,
/* small data area using dp as base register */
SMALL_MEMORY_SDA,
/* zero data area using r0 as base register */
SMALL_MEMORY_ZDA,
SMALL_MEMORY_max
};
 
extern struct small_memory_info small_memory[(int)SMALL_MEMORY_max];
 
/* Show we can debug even without a frame pointer. */
#define CAN_DEBUG_WITHOUT_FP
 
/* Some machines may desire to change what optimizations are
performed for various optimization levels. This macro, if
defined, is executed once just after the optimization level is
determined and before the remainder of the command options have
been parsed. Values set in this macro are used as the default
values for the other command line options.
 
LEVEL is the optimization level specified; 2 if `-O2' is
specified, 1 if `-O' is specified, and 0 if neither is specified.
 
SIZE is nonzero if `-Os' is specified, 0 otherwise.
 
You should not use this macro to change options that are not
machine-specific. These should uniformly selected by the same
optimization level on all supported machines. Use this macro to
enable machine-specific optimizations.
 
*Do not examine `write_symbols' in this macro!* The debugging
options are not supposed to alter the generated code. */
 
#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) \
{ \
target_flags |= MASK_STRICT_ALIGN; \
if (LEVEL) \
/* Note - we no longer enable MASK_EP when optimizing. This is \
because of a hardware bug which stops the SLD and SST instructions\
from correctly detecting some hazards. If the user is sure that \
their hardware is fixed or that their program will not encounter \
the conditions that trigger the bug then they can enable -mep by \
hand. */ \
target_flags |= MASK_PROLOG_FUNCTION; \
}
 
/* Target machine storage layout */
 
/* Define this if most significant bit is lowest numbered
in instructions that operate on numbered bit-fields.
This is not true on the NEC V850. */
#define BITS_BIG_ENDIAN 0
 
/* Define this if most significant byte of a word is the lowest numbered. */
/* This is not true on the NEC V850. */
#define BYTES_BIG_ENDIAN 0
 
/* Define this if most significant word of a multiword number is lowest
numbered.
This is not true on the NEC V850. */
#define WORDS_BIG_ENDIAN 0
 
/* Width of a word, in units (bytes). */
#define UNITS_PER_WORD 4
 
/* Define this macro if it is advisable to hold scalars in registers
in a wider mode than that declared by the program. In such cases,
the value is constrained to be within the bounds of the declared
type, but kept valid in the wider mode. The signedness of the
extension may differ from that of the type.
 
Some simple experiments have shown that leaving UNSIGNEDP alone
generates the best overall code. */
 
#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
if (GET_MODE_CLASS (MODE) == MODE_INT \
&& GET_MODE_SIZE (MODE) < 4) \
{ (MODE) = SImode; }
 
/* Allocation boundary (in *bits*) for storing arguments in argument list. */
#define PARM_BOUNDARY 32
 
/* The stack goes in 32 bit lumps. */
#define STACK_BOUNDARY 32
 
/* Allocation boundary (in *bits*) for the code of a function.
16 is the minimum boundary; 32 would give better performance. */
#define FUNCTION_BOUNDARY 16
 
/* No data type wants to be aligned rounder than this. */
#define BIGGEST_ALIGNMENT 32
 
/* Alignment of field after `int : 0' in a structure. */
#define EMPTY_FIELD_BOUNDARY 32
 
/* No structure field wants to be aligned rounder than this. */
#define BIGGEST_FIELD_ALIGNMENT 32
 
/* Define this if move instructions will actually fail to work
when given unaligned data. */
#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
 
/* Define this as 1 if `char' should by default be signed; else as 0.
 
On the NEC V850, loads do sign extension, so make this default. */
#define DEFAULT_SIGNED_CHAR 1
/* Standard register usage. */
 
/* Number of actual hardware registers.
The hardware registers are assigned numbers for the compiler
from 0 to just below FIRST_PSEUDO_REGISTER.
 
All registers that the compiler knows about must be given numbers,
even those that are not normally considered general registers. */
 
#define FIRST_PSEUDO_REGISTER 34
 
/* 1 for registers that have pervasive standard uses
and are not available for the register allocator. */
 
#define FIXED_REGISTERS \
{ 1, 1, 0, 1, 1, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 1, 0, \
1, 1}
 
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
registers that can be used without being saved.
The latter must include the registers where values are returned
and the register where structure-value addresses are passed.
Aside from that, you can include as many other registers as you
like. */
 
#define CALL_USED_REGISTERS \
{ 1, 1, 0, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 1, 1, \
1, 1}
 
/* List the order in which to allocate registers. Each register must be
listed once, even those in FIXED_REGISTERS.
 
On the 850, we make the return registers first, then all of the volatile
registers, then the saved registers in reverse order to better save the
registers with an out of line function, and finally the fixed
registers. */
 
#define REG_ALLOC_ORDER \
{ \
10, 11, /* return registers */ \
12, 13, 14, 15, 16, 17, 18, 19, /* scratch registers */ \
6, 7, 8, 9, 31, /* argument registers */ \
29, 28, 27, 26, 25, 24, 23, 22, /* saved registers */ \
21, 20, 2, \
0, 1, 3, 4, 5, 30, 32, 33 /* fixed registers */ \
}
 
/* If TARGET_APP_REGS is not defined then add r2 and r5 to
the pool of fixed registers. See PR 14505. */
#define CONDITIONAL_REGISTER_USAGE \
{ \
if (!TARGET_APP_REGS) \
{ \
fixed_regs[2] = 1; call_used_regs[2] = 1; \
fixed_regs[5] = 1; call_used_regs[5] = 1; \
} \
}
 
/* Return number of consecutive hard regs needed starting at reg REGNO
to hold something of mode MODE.
 
This is ordinarily the length in words of a value of mode MODE
but can be less for certain modes in special long registers. */
 
#define HARD_REGNO_NREGS(REGNO, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* Value is 1 if hard register REGNO can hold a value of machine-mode
MODE. */
 
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
((((REGNO) & 1) == 0) || (GET_MODE_SIZE (MODE) <= 4))
 
/* Value is 1 if it is a good idea to tie two pseudo registers
when one has mode MODE1 and one has mode MODE2.
If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
for any hard reg, then this must be 0 for correct output. */
#define MODES_TIEABLE_P(MODE1, MODE2) \
(MODE1 == MODE2 || (GET_MODE_SIZE (MODE1) <= 4 && GET_MODE_SIZE (MODE2) <= 4))
 
/* Define the classes of registers for register constraints in the
machine description. Also define ranges of constants.
 
One of the classes must always be named ALL_REGS and include all hard regs.
If there is more than one class, another class must be named NO_REGS
and contain no registers.
 
The name GENERAL_REGS must be the name of a class (or an alias for
another name such as ALL_REGS). This is the class of registers
that is allowed by "g" or "r" in a register constraint.
Also, registers outside this class are allocated only when
instructions express preferences for them.
 
The classes must be numbered in nondecreasing order; that is,
a larger-numbered class must never be contained completely
in a smaller-numbered class.
 
For any two classes, it is very desirable that there be another
class that represents their union. */
enum reg_class
{
NO_REGS, GENERAL_REGS, ALL_REGS, LIM_REG_CLASSES
};
 
#define N_REG_CLASSES (int) LIM_REG_CLASSES
 
/* Give names of register classes as strings for dump file. */
 
#define REG_CLASS_NAMES \
{ "NO_REGS", "GENERAL_REGS", "ALL_REGS", "LIM_REGS" }
 
/* Define which registers fit in which classes.
This is an initializer for a vector of HARD_REG_SET
of length N_REG_CLASSES. */
 
#define REG_CLASS_CONTENTS \
{ \
{ 0x00000000 }, /* NO_REGS */ \
{ 0xffffffff }, /* GENERAL_REGS */ \
{ 0xffffffff }, /* ALL_REGS */ \
}
 
/* The same information, inverted:
Return the class number of the smallest class containing
reg number REGNO. This could be a conditional expression
or could index an array. */
 
#define REGNO_REG_CLASS(REGNO) GENERAL_REGS
 
/* The class value for index registers, and the one for base regs. */
 
#define INDEX_REG_CLASS NO_REGS
#define BASE_REG_CLASS GENERAL_REGS
 
/* Get reg_class from a letter such as appears in the machine description. */
 
#define REG_CLASS_FROM_LETTER(C) (NO_REGS)
 
/* Macros to check register numbers against specific register classes. */
 
/* These assume that REGNO is a hard or pseudo reg number.
They give nonzero only if REGNO is a hard reg of the suitable class
or a pseudo reg currently allocated to a suitable hard reg.
Since they use reg_renumber, they are safe only once reg_renumber
has been allocated, which happens in local-alloc.c. */
#define REGNO_OK_FOR_BASE_P(regno) \
((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0)
 
#define REGNO_OK_FOR_INDEX_P(regno) 0
 
/* Given an rtx X being reloaded into a reg required to be
in class CLASS, return the class of reg to actually use.
In general this is just CLASS; but on some machines
in some cases it is preferable to use a more restrictive class. */
 
#define PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
 
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS. */
 
#define CLASS_MAX_NREGS(CLASS, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* The letters I, J, K, L, M, N, O, P in a register constraint string
can be used to stand for particular ranges of immediate operands.
This macro defines what the ranges are.
C is the letter, and VALUE is a constant value.
Return 1 if VALUE is in the range specified by C. */
 
#define INT_7_BITS(VALUE) ((unsigned) (VALUE) + 0x40 < 0x80)
#define INT_8_BITS(VALUE) ((unsigned) (VALUE) + 0x80 < 0x100)
/* zero */
#define CONST_OK_FOR_I(VALUE) ((VALUE) == 0)
/* 5 bit signed immediate */
#define CONST_OK_FOR_J(VALUE) ((unsigned) (VALUE) + 0x10 < 0x20)
/* 16 bit signed immediate */
#define CONST_OK_FOR_K(VALUE) ((unsigned) (VALUE) + 0x8000 < 0x10000)
/* valid constant for movhi instruction. */
#define CONST_OK_FOR_L(VALUE) \
(((unsigned) ((int) (VALUE) >> 16) + 0x8000 < 0x10000) \
&& CONST_OK_FOR_I ((VALUE & 0xffff)))
/* 16 bit unsigned immediate */
#define CONST_OK_FOR_M(VALUE) ((unsigned)(VALUE) < 0x10000)
/* 5 bit unsigned immediate in shift instructions */
#define CONST_OK_FOR_N(VALUE) ((unsigned) (VALUE) <= 31)
/* 9 bit signed immediate for word multiply instruction. */
#define CONST_OK_FOR_O(VALUE) ((unsigned) (VALUE) + 0x100 < 0x200)
 
#define CONST_OK_FOR_P(VALUE) 0
 
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'I' ? CONST_OK_FOR_I (VALUE) : \
(C) == 'J' ? CONST_OK_FOR_J (VALUE) : \
(C) == 'K' ? CONST_OK_FOR_K (VALUE) : \
(C) == 'L' ? CONST_OK_FOR_L (VALUE) : \
(C) == 'M' ? CONST_OK_FOR_M (VALUE) : \
(C) == 'N' ? CONST_OK_FOR_N (VALUE) : \
(C) == 'O' ? CONST_OK_FOR_O (VALUE) : \
(C) == 'P' ? CONST_OK_FOR_P (VALUE) : \
0)
 
/* Similar, but for floating constants, and defining letters G and H.
Here VALUE is the CONST_DOUBLE rtx itself.
`G' is a zero of some form. */
 
#define CONST_DOUBLE_OK_FOR_G(VALUE) \
((GET_MODE_CLASS (GET_MODE (VALUE)) == MODE_FLOAT \
&& (VALUE) == CONST0_RTX (GET_MODE (VALUE))) \
|| (GET_MODE_CLASS (GET_MODE (VALUE)) == MODE_INT \
&& CONST_DOUBLE_LOW (VALUE) == 0 \
&& CONST_DOUBLE_HIGH (VALUE) == 0))
 
#define CONST_DOUBLE_OK_FOR_H(VALUE) 0
 
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'G' ? CONST_DOUBLE_OK_FOR_G (VALUE) \
: (C) == 'H' ? CONST_DOUBLE_OK_FOR_H (VALUE) \
: 0)
 
/* Stack layout; function entry, exit and calling. */
 
/* Define this if pushing a word on the stack
makes the stack pointer a smaller address. */
 
#define STACK_GROWS_DOWNWARD
 
/* Define this to nonzero if the nominal address of the stack frame
is at the high-address end of the local variables;
that is, each additional local variable allocated
goes at a more negative offset in the frame. */
 
#define FRAME_GROWS_DOWNWARD 1
 
/* Offset within stack frame to start allocating local variables at.
If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
first local allocated. Otherwise, it is the offset to the BEGINNING
of the first local allocated. */
 
#define STARTING_FRAME_OFFSET 0
 
/* Offset of first parameter from the argument pointer register value. */
/* Is equal to the size of the saved fp + pc, even if an fp isn't
saved since the value is used before we know. */
 
#define FIRST_PARM_OFFSET(FNDECL) 0
 
/* Specify the registers used for certain standard purposes.
The values of these macros are register numbers. */
 
/* Register to use for pushing function arguments. */
#define STACK_POINTER_REGNUM 3
 
/* Base register for access to local variables of the function. */
#define FRAME_POINTER_REGNUM 32
 
/* Register containing return address from latest function call. */
#define LINK_POINTER_REGNUM 31
/* On some machines the offset between the frame pointer and starting
offset of the automatic variables is not known until after register
allocation has been done (for example, because the saved registers
are between these two locations). On those machines, define
`FRAME_POINTER_REGNUM' the number of a special, fixed register to
be used internally until the offset is known, and define
`HARD_FRAME_POINTER_REGNUM' to be actual the hard register number
used for the frame pointer.
 
You should define this macro only in the very rare circumstances
when it is not possible to calculate the offset between the frame
pointer and the automatic variables until after register
allocation has been completed. When this macro is defined, you
must also indicate in your definition of `ELIMINABLE_REGS' how to
eliminate `FRAME_POINTER_REGNUM' into either
`HARD_FRAME_POINTER_REGNUM' or `STACK_POINTER_REGNUM'.
 
Do not define this macro if it would be the same as
`FRAME_POINTER_REGNUM'. */
#undef HARD_FRAME_POINTER_REGNUM
#define HARD_FRAME_POINTER_REGNUM 29
 
/* Base register for access to arguments of the function. */
#define ARG_POINTER_REGNUM 33
 
/* Register in which static-chain is passed to a function. */
#define STATIC_CHAIN_REGNUM 20
 
/* Value should be nonzero if functions must have frame pointers.
Zero means the frame pointer need not be set up (and parms
may be accessed via the stack pointer) in functions that seem suitable.
This is computed in `reload', in reload1.c. */
#define FRAME_POINTER_REQUIRED 0
 
/* If defined, this macro specifies a table of register pairs used to
eliminate unneeded registers that point into the stack frame. If
it is not defined, the only elimination attempted by the compiler
is to replace references to the frame pointer with references to
the stack pointer.
 
The definition of this macro is a list of structure
initializations, each of which specifies an original and
replacement register.
 
On some machines, the position of the argument pointer is not
known until the compilation is completed. In such a case, a
separate hard register must be used for the argument pointer.
This register can be eliminated by replacing it with either the
frame pointer or the argument pointer, depending on whether or not
the frame pointer has been eliminated.
 
In this case, you might specify:
#define ELIMINABLE_REGS \
{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
{ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
{FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
 
Note that the elimination of the argument pointer with the stack
pointer is specified first since that is the preferred elimination. */
 
#define ELIMINABLE_REGS \
{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
{ FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
{ ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }} \
 
/* A C expression that returns nonzero if the compiler is allowed to
try to replace register number FROM-REG with register number
TO-REG. This macro need only be defined if `ELIMINABLE_REGS' is
defined, and will usually be the constant 1, since most of the
cases preventing register elimination are things that the compiler
already knows about. */
 
#define CAN_ELIMINATE(FROM, TO) \
((TO) == STACK_POINTER_REGNUM ? ! frame_pointer_needed : 1)
 
/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
specifies the initial difference between the specified pair of
registers. This macro must be defined if `ELIMINABLE_REGS' is
defined. */
 
#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
{ \
if ((FROM) == FRAME_POINTER_REGNUM) \
(OFFSET) = get_frame_size () + current_function_outgoing_args_size; \
else if ((FROM) == ARG_POINTER_REGNUM) \
(OFFSET) = compute_frame_size (get_frame_size (), (long *)0); \
else \
gcc_unreachable (); \
}
 
/* Keep the stack pointer constant throughout the function. */
#define ACCUMULATE_OUTGOING_ARGS 1
 
/* Value is the number of bytes of arguments automatically
popped when returning from a subroutine call.
FUNDECL is the declaration node of the function (as a tree),
FUNTYPE is the data type of the function (as a tree),
or for a library call it is an identifier node for the subroutine name.
SIZE is the number of bytes of arguments passed on the stack. */
 
#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
 
#define RETURN_ADDR_RTX(COUNT, FP) v850_return_addr (COUNT)
/* Define a data type for recording info about an argument list
during the scan of that argument list. This data type should
hold all necessary information about the function itself
and about the args processed so far, enough to enable macros
such as FUNCTION_ARG to determine where the next arg should go. */
 
#define CUMULATIVE_ARGS struct cum_arg
struct cum_arg { int nbytes; int anonymous_args; };
 
/* Define where to put the arguments to a function.
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
 
MODE is the argument's machine mode.
TYPE is the data type of the argument (as a tree).
This is null for libcalls where that information may
not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
NAMED is nonzero if this argument is a named parameter
(otherwise it is an extra parameter matching an ellipsis). */
 
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
function_arg (&CUM, MODE, TYPE, NAMED)
 
/* Initialize a variable CUM of type CUMULATIVE_ARGS
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0. */
 
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
((CUM).nbytes = 0, (CUM).anonymous_args = 0)
 
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be available.) */
 
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
((CUM).nbytes += ((MODE) != BLKmode \
? (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) & -UNITS_PER_WORD \
: (int_size_in_bytes (TYPE) + UNITS_PER_WORD - 1) & -UNITS_PER_WORD))
 
/* When a parameter is passed in a register, stack space is still
allocated for it. */
#define REG_PARM_STACK_SPACE(DECL) (!TARGET_GHS ? 16 : 0)
 
/* Define this if the above stack space is to be considered part of the
space allocated by the caller. */
#define OUTGOING_REG_PARM_STACK_SPACE
 
/* 1 if N is a possible register number for function argument passing. */
 
#define FUNCTION_ARG_REGNO_P(N) (N >= 6 && N <= 9)
 
/* Define how to find the value returned by a function.
VALTYPE is the data type of the value (as a tree).
If the precise function being called is known, FUNC is its FUNCTION_DECL;
otherwise, FUNC is 0. */
#define FUNCTION_VALUE(VALTYPE, FUNC) \
gen_rtx_REG (TYPE_MODE (VALTYPE), 10)
 
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
 
#define LIBCALL_VALUE(MODE) \
gen_rtx_REG (MODE, 10)
 
/* 1 if N is a possible register number for a function value. */
 
#define FUNCTION_VALUE_REGNO_P(N) ((N) == 10)
 
#define DEFAULT_PCC_STRUCT_RETURN 0
 
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
the stack pointer does not matter. The value is tested only in
functions that have frame pointers.
No definition is equivalent to always zero. */
 
#define EXIT_IGNORE_STACK 1
 
/* Define this macro as a C expression that is nonzero for registers
used by the epilogue or the `return' pattern. */
 
#define EPILOGUE_USES(REGNO) \
(reload_completed && (REGNO) == LINK_POINTER_REGNUM)
 
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
 
#define FUNCTION_PROFILER(FILE, LABELNO) ;
 
#define TRAMPOLINE_TEMPLATE(FILE) \
do { \
fprintf (FILE, "\tjarl .+4,r12\n"); \
fprintf (FILE, "\tld.w 12[r12],r20\n"); \
fprintf (FILE, "\tld.w 16[r12],r12\n"); \
fprintf (FILE, "\tjmp [r12]\n"); \
fprintf (FILE, "\tnop\n"); \
fprintf (FILE, "\t.long 0\n"); \
fprintf (FILE, "\t.long 0\n"); \
} while (0)
 
/* Length in units of the trampoline for entering a nested function. */
 
#define TRAMPOLINE_SIZE 24
 
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function. */
 
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
{ \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant ((TRAMP), 16)), \
(CXT)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant ((TRAMP), 20)), \
(FNADDR)); \
}
 
/* Addressing modes, and classification of registers for them. */
 
/* 1 if X is an rtx for a constant that is a valid address. */
 
/* ??? This seems too exclusive. May get better code by accepting more
possibilities here, in particular, should accept ZDA_NAME SYMBOL_REFs. */
 
#define CONSTANT_ADDRESS_P(X) \
(GET_CODE (X) == CONST_INT \
&& CONST_OK_FOR_K (INTVAL (X)))
 
/* Maximum number of registers that can appear in a valid memory address. */
 
#define MAX_REGS_PER_ADDRESS 1
 
/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
and check its validity for a certain class.
We have two alternate definitions for each of them.
The usual definition accepts all pseudo regs; the other rejects
them unless they have been allocated suitable hard regs.
The symbol REG_OK_STRICT causes the latter definition to be used.
 
Most source files want to accept pseudo regs in the hope that
they will get allocated to the class that the insn wants them to be in.
Source files for reload pass need to be strict.
After reload, it makes no difference, since pseudo regs have
been eliminated by then. */
 
#ifndef REG_OK_STRICT
 
/* Nonzero if X is a hard reg that can be used as an index
or if it is a pseudo reg. */
#define REG_OK_FOR_INDEX_P(X) 0
/* Nonzero if X is a hard reg that can be used as a base reg
or if it is a pseudo reg. */
#define REG_OK_FOR_BASE_P(X) 1
#define REG_OK_FOR_INDEX_P_STRICT(X) 0
#define REG_OK_FOR_BASE_P_STRICT(X) REGNO_OK_FOR_BASE_P (REGNO (X))
#define STRICT 0
 
#else
 
/* Nonzero if X is a hard reg that can be used as an index. */
#define REG_OK_FOR_INDEX_P(X) 0
/* Nonzero if X is a hard reg that can be used as a base reg. */
#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
#define STRICT 1
 
#endif
 
/* A C expression that defines the optional machine-dependent
constraint letters that can be used to segregate specific types of
operands, usually memory references, for the target machine.
Normally this macro will not be defined. If it is required for a
particular target machine, it should return 1 if VALUE corresponds
to the operand type represented by the constraint letter C. If C
is not defined as an extra constraint, the value returned should
be 0 regardless of VALUE.
 
For example, on the ROMP, load instructions cannot have their
output in r0 if the memory reference contains a symbolic address.
Constraint letter `Q' is defined as representing a memory address
that does *not* contain a symbolic address. An alternative is
specified with a `Q' constraint on the input and `r' on the
output. The next alternative specifies `m' on the input and a
register class that does not include r0 on the output. */
 
#define EXTRA_CONSTRAINT(OP, C) \
((C) == 'Q' ? ep_memory_operand (OP, GET_MODE (OP), FALSE) \
: (C) == 'R' ? special_symbolref_operand (OP, VOIDmode) \
: (C) == 'S' ? (GET_CODE (OP) == SYMBOL_REF \
&& !SYMBOL_REF_ZDA_P (OP)) \
: (C) == 'T' ? ep_memory_operand (OP, GET_MODE (OP), TRUE) \
: (C) == 'U' ? ((GET_CODE (OP) == SYMBOL_REF \
&& SYMBOL_REF_ZDA_P (OP)) \
|| (GET_CODE (OP) == CONST \
&& GET_CODE (XEXP (OP, 0)) == PLUS \
&& GET_CODE (XEXP (XEXP (OP, 0), 0)) == SYMBOL_REF \
&& SYMBOL_REF_ZDA_P (XEXP (XEXP (OP, 0), 0)))) \
: 0)
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
that wants to use this address.
 
The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
except for CONSTANT_ADDRESS_P which is actually
machine-independent. */
 
/* Accept either REG or SUBREG where a register is valid. */
#define RTX_OK_FOR_BASE_P(X) \
((REG_P (X) && REG_OK_FOR_BASE_P (X)) \
|| (GET_CODE (X) == SUBREG && REG_P (SUBREG_REG (X)) \
&& REG_OK_FOR_BASE_P (SUBREG_REG (X))))
 
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
do { \
if (RTX_OK_FOR_BASE_P (X)) \
goto ADDR; \
if (CONSTANT_ADDRESS_P (X) \
&& (MODE == QImode || INTVAL (X) % 2 == 0) \
&& (GET_MODE_SIZE (MODE) <= 4 || INTVAL (X) % 4 == 0)) \
goto ADDR; \
if (GET_CODE (X) == LO_SUM \
&& REG_P (XEXP (X, 0)) \
&& REG_OK_FOR_BASE_P (XEXP (X, 0)) \
&& CONSTANT_P (XEXP (X, 1)) \
&& (GET_CODE (XEXP (X, 1)) != CONST_INT \
|| ((MODE == QImode || INTVAL (XEXP (X, 1)) % 2 == 0) \
&& CONST_OK_FOR_K (INTVAL (XEXP (X, 1))))) \
&& GET_MODE_SIZE (MODE) <= GET_MODE_SIZE (word_mode)) \
goto ADDR; \
if (special_symbolref_operand (X, MODE) \
&& (GET_MODE_SIZE (MODE) <= GET_MODE_SIZE (word_mode))) \
goto ADDR; \
if (GET_CODE (X) == PLUS \
&& RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
&& CONSTANT_ADDRESS_P (XEXP (X, 1)) \
&& ((MODE == QImode || INTVAL (XEXP (X, 1)) % 2 == 0) \
&& CONST_OK_FOR_K (INTVAL (XEXP (X, 1)) \
+ (GET_MODE_NUNITS (MODE) * UNITS_PER_WORD)))) \
goto ADDR; \
} while (0)
 
/* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for. */
 
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) {}
 
/* Nonzero if the constant value X is a legitimate general operand.
It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
 
#define LEGITIMATE_CONSTANT_P(X) \
(GET_CODE (X) == CONST_DOUBLE \
|| !(GET_CODE (X) == CONST \
&& GET_CODE (XEXP (X, 0)) == PLUS \
&& GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
&& GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \
&& ! CONST_OK_FOR_K (INTVAL (XEXP (XEXP (X, 0), 1)))))
/* Tell final.c how to eliminate redundant test instructions. */
 
/* Here we define machine-dependent flags and fields in cc_status
(see `conditions.h'). No extra ones are needed for the VAX. */
 
/* Store in cc_status the expressions
that the condition codes will describe
after execution of an instruction whose pattern is EXP.
Do not alter them if the instruction would not alter the cc's. */
 
#define CC_OVERFLOW_UNUSABLE 0x200
#define CC_NO_CARRY CC_NO_OVERFLOW
#define NOTICE_UPDATE_CC(EXP, INSN) notice_update_cc(EXP, INSN)
 
/* Nonzero if access to memory by bytes or half words is no faster
than accessing full words. */
#define SLOW_BYTE_ACCESS 1
 
/* According expr.c, a value of around 6 should minimize code size, and
for the V850 series, that's our primary concern. */
#define MOVE_RATIO 6
 
/* Indirect calls are expensive, never turn a direct call
into an indirect call. */
#define NO_FUNCTION_CSE
 
/* The four different data regions on the v850. */
typedef enum
{
DATA_AREA_NORMAL,
DATA_AREA_SDA,
DATA_AREA_TDA,
DATA_AREA_ZDA
} v850_data_area;
 
#define TEXT_SECTION_ASM_OP "\t.section .text"
#define DATA_SECTION_ASM_OP "\t.section .data"
#define BSS_SECTION_ASM_OP "\t.section .bss"
#define SDATA_SECTION_ASM_OP "\t.section .sdata,\"aw\""
#define SBSS_SECTION_ASM_OP "\t.section .sbss,\"aw\""
 
#define SCOMMON_ASM_OP "\t.scomm\t"
#define ZCOMMON_ASM_OP "\t.zcomm\t"
#define TCOMMON_ASM_OP "\t.tcomm\t"
 
#define ASM_COMMENT_START "#"
 
/* Output to assembler file text saying following lines
may contain character constants, extra white space, comments, etc. */
 
#define ASM_APP_ON "#APP\n"
 
/* Output to assembler file text saying following lines
no longer contain unusual constructs. */
 
#define ASM_APP_OFF "#NO_APP\n"
 
#undef USER_LABEL_PREFIX
#define USER_LABEL_PREFIX "_"
 
#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \
if (! v850_output_addr_const_extra (FILE, X)) \
goto FAIL
 
/* This says how to output the assembler to define a global
uninitialized but not common symbol. */
 
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
asm_output_aligned_bss ((FILE), (DECL), (NAME), (SIZE), (ALIGN))
 
#undef ASM_OUTPUT_ALIGNED_BSS
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
v850_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
 
/* This says how to output the assembler to define a global
uninitialized, common symbol. */
#undef ASM_OUTPUT_ALIGNED_COMMON
#undef ASM_OUTPUT_COMMON
#define ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN) \
v850_output_common (FILE, DECL, NAME, SIZE, ALIGN)
 
/* This says how to output the assembler to define a local
uninitialized symbol. */
#undef ASM_OUTPUT_ALIGNED_LOCAL
#undef ASM_OUTPUT_LOCAL
#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
v850_output_local (FILE, DECL, NAME, SIZE, ALIGN)
/* Globalizing directive for a label. */
#define GLOBAL_ASM_OP "\t.global "
 
#define ASM_PN_FORMAT "%s___%lu"
 
/* This is how we tell the assembler that two symbols have the same value. */
 
#define ASM_OUTPUT_DEF(FILE,NAME1,NAME2) \
do { assemble_name(FILE, NAME1); \
fputs(" = ", FILE); \
assemble_name(FILE, NAME2); \
fputc('\n', FILE); } while (0)
 
 
/* How to refer to registers in assembler output.
This sequence is indexed by compiler's hard-register-number (see above). */
 
#define REGISTER_NAMES \
{ "r0", "r1", "r2", "sp", "gp", "r5", "r6" , "r7", \
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
"r24", "r25", "r26", "r27", "r28", "r29", "ep", "r31", \
".fp", ".ap"}
 
#define ADDITIONAL_REGISTER_NAMES \
{ { "zero", 0 }, \
{ "hp", 2 }, \
{ "r3", 3 }, \
{ "r4", 4 }, \
{ "tp", 5 }, \
{ "fp", 29 }, \
{ "r30", 30 }, \
{ "lp", 31} }
 
/* Print an instruction operand X on file FILE.
look in v850.c for details */
 
#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
 
#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
((CODE) == '.')
 
/* Print a memory operand whose address is X, on file FILE.
This uses a function in output-vax.c. */
 
#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
 
#define ASM_OUTPUT_REG_PUSH(FILE,REGNO)
#define ASM_OUTPUT_REG_POP(FILE,REGNO)
 
/* This is how to output an element of a case-vector that is absolute. */
 
#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
fprintf (FILE, "\t%s .L%d\n", \
(TARGET_BIG_SWITCH ? ".long" : ".short"), VALUE)
 
/* This is how to output an element of a case-vector that is relative. */
 
/* Disable the shift, which is for the currently disabled "switch"
opcode. Se casesi in v850.md. */
#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t%s %s.L%d-.L%d%s\n", \
(TARGET_BIG_SWITCH ? ".long" : ".short"), \
(0 && ! TARGET_BIG_SWITCH && TARGET_V850E ? "(" : ""), \
VALUE, REL, \
(0 && ! TARGET_BIG_SWITCH && TARGET_V850E ? ")>>1" : ""))
 
#define ASM_OUTPUT_ALIGN(FILE, LOG) \
if ((LOG) != 0) \
fprintf (FILE, "\t.align %d\n", (LOG))
 
/* We don't have to worry about dbx compatibility for the v850. */
#define DEFAULT_GDB_EXTENSIONS 1
 
/* Use stabs debugging info by default. */
#undef PREFERRED_DEBUGGING_TYPE
#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
 
/* Specify the machine mode that this machine uses
for the index in the tablejump instruction. */
#define CASE_VECTOR_MODE (TARGET_BIG_SWITCH ? SImode : HImode)
 
/* Define as C expression which evaluates to nonzero if the tablejump
instruction expects the table to contain offsets from the address of the
table.
Do not define this if the table should contain absolute addresses. */
#define CASE_VECTOR_PC_RELATIVE 1
 
/* The switch instruction requires that the jump table immediately follow
it. */
#define JUMP_TABLES_IN_TEXT_SECTION 1
 
/* svr4.h defines this assuming that 4 byte alignment is required. */
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
ASM_OUTPUT_ALIGN ((FILE), (TARGET_BIG_SWITCH ? 2 : 1));
 
#define WORD_REGISTER_OPERATIONS
 
/* Byte and short loads sign extend the value to a word. */
#define LOAD_EXTEND_OP(MODE) SIGN_EXTEND
 
/* This flag, if defined, says the same insns that convert to a signed fixnum
also convert validly to an unsigned one. */
#define FIXUNS_TRUNC_LIKE_FIX_TRUNC
 
/* Max number of bytes we can move from memory to memory
in one reasonably fast instruction. */
#define MOVE_MAX 4
 
/* Define if shifts truncate the shift count
which implies one can omit a sign-extension or zero-extension
of a shift count. */
#define SHIFT_COUNT_TRUNCATED 1
 
/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
is done just by pretending it is already truncated. */
#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
 
/* Specify the machine mode that pointers have.
After generation of rtl, the compiler makes no further distinction
between pointers and any other objects of this machine mode. */
#define Pmode SImode
 
/* A function address in a call instruction
is a byte address (for indexing purposes)
so give the MEM rtx a byte's mode. */
#define FUNCTION_MODE QImode
 
/* Tell compiler we want to support GHS pragmas */
#define REGISTER_TARGET_PRAGMAS() do { \
c_register_pragma ("ghs", "interrupt", ghs_pragma_interrupt); \
c_register_pragma ("ghs", "section", ghs_pragma_section); \
c_register_pragma ("ghs", "starttda", ghs_pragma_starttda); \
c_register_pragma ("ghs", "startsda", ghs_pragma_startsda); \
c_register_pragma ("ghs", "startzda", ghs_pragma_startzda); \
c_register_pragma ("ghs", "endtda", ghs_pragma_endtda); \
c_register_pragma ("ghs", "endsda", ghs_pragma_endsda); \
c_register_pragma ("ghs", "endzda", ghs_pragma_endzda); \
} while (0)
 
/* enum GHS_SECTION_KIND is an enumeration of the kinds of sections that
can appear in the "ghs section" pragma. These names are used to index
into the GHS_default_section_names[] and GHS_current_section_names[]
that are defined in v850.c, and so the ordering of each must remain
consistent.
 
These arrays give the default and current names for each kind of
section defined by the GHS pragmas. The current names can be changed
by the "ghs section" pragma. If the current names are null, use
the default names. Note that the two arrays have different types.
 
For the *normal* section kinds (like .data, .text, etc.) we do not
want to explicitly force the name of these sections, but would rather
let the linker (or at least the back end) choose the name of the
section, UNLESS the user has force a specific name for these section
kinds. To accomplish this set the name in ghs_default_section_names
to null. */
 
enum GHS_section_kind
{
GHS_SECTION_KIND_DEFAULT,
 
GHS_SECTION_KIND_TEXT,
GHS_SECTION_KIND_DATA,
GHS_SECTION_KIND_RODATA,
GHS_SECTION_KIND_BSS,
GHS_SECTION_KIND_SDATA,
GHS_SECTION_KIND_ROSDATA,
GHS_SECTION_KIND_TDATA,
GHS_SECTION_KIND_ZDATA,
GHS_SECTION_KIND_ROZDATA,
 
COUNT_OF_GHS_SECTION_KINDS /* must be last */
};
 
/* The following code is for handling pragmas supported by the
v850 compiler produced by Green Hills Software. This is at
the specific request of a customer. */
 
typedef struct data_area_stack_element
{
struct data_area_stack_element * prev;
v850_data_area data_area; /* Current default data area. */
} data_area_stack_element;
 
/* Track the current data area set by the
data area pragma (which can be nested). */
extern data_area_stack_element * data_area_stack;
 
/* Names of the various data areas used on the v850. */
extern union tree_node * GHS_default_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
extern union tree_node * GHS_current_section_names [(int) COUNT_OF_GHS_SECTION_KINDS];
 
/* The assembler op to start the file. */
 
#define FILE_ASM_OP "\t.file\n"
 
/* Enable the register move pass to improve code. */
#define ENABLE_REGMOVE_PASS
 
 
/* Implement ZDA, TDA, and SDA */
 
#define EP_REGNUM 30 /* ep register number */
 
#define SYMBOL_FLAG_ZDA (SYMBOL_FLAG_MACH_DEP << 0)
#define SYMBOL_FLAG_TDA (SYMBOL_FLAG_MACH_DEP << 1)
#define SYMBOL_FLAG_SDA (SYMBOL_FLAG_MACH_DEP << 2)
#define SYMBOL_REF_ZDA_P(X) ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_ZDA) != 0)
#define SYMBOL_REF_TDA_P(X) ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_TDA) != 0)
#define SYMBOL_REF_SDA_P(X) ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_SDA) != 0)
 
#define TARGET_ASM_INIT_SECTIONS v850_asm_init_sections
 
#endif /* ! GCC_V850_H */
/v850-protos.h
0,0 → 1,74
/* Prototypes for v850.c functions used in the md file & elsewhere.
Copyright (C) 1999, 2000, 2002, 2004, 2005, 2007
Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* Function prototypes that cannot exist in v850.h due to dependency
complications. */
#ifndef GCC_V850_PROTOS_H
#define GCC_V850_PROTOS_H
 
#define Mmode enum machine_mode
 
extern void expand_prologue (void);
extern void expand_epilogue (void);
extern int v850_handle_pragma (int (*)(void), void (*)(int), char *);
extern int compute_register_save_size (long *);
extern int compute_frame_size (int, long *);
extern void v850_init_expanders (void);
 
#ifdef RTX_CODE
extern int v850_output_addr_const_extra (FILE *, rtx);
extern rtx v850_return_addr (int);
extern void print_operand (FILE *, rtx, int );
extern void print_operand_address (FILE *, rtx);
extern const char *output_move_double (rtx *);
extern const char *output_move_single (rtx *);
extern void notice_update_cc (rtx, rtx);
extern char * construct_save_jarl (rtx);
extern char * construct_restore_jr (rtx);
#ifdef HAVE_MACHINE_MODES
extern char * construct_dispose_instruction (rtx);
extern char * construct_prepare_instruction (rtx);
extern int ep_memory_operand (rtx, Mmode, int);
#ifdef TREE_CODE
extern rtx function_arg (CUMULATIVE_ARGS *, Mmode, tree, int);
#endif
#endif
#endif /* TREE_CODE */
 
#ifdef TREE_CODE
extern int v850_interrupt_function_p (tree);
extern void v850_output_aligned_bss (FILE *, tree, const char *, unsigned HOST_WIDE_INT, int);
extern void v850_output_common (FILE *, tree, const char *, int, int);
extern void v850_output_local (FILE *, tree, const char *, int, int);
extern v850_data_area v850_get_data_area (tree);
#endif
 
extern void ghs_pragma_section (struct cpp_reader *);
extern void ghs_pragma_interrupt (struct cpp_reader *);
extern void ghs_pragma_starttda (struct cpp_reader *);
extern void ghs_pragma_startsda (struct cpp_reader *);
extern void ghs_pragma_startzda (struct cpp_reader *);
extern void ghs_pragma_endtda (struct cpp_reader *);
extern void ghs_pragma_endsda (struct cpp_reader *);
extern void ghs_pragma_endzda (struct cpp_reader *);
 
#undef Mmode
 
#endif /* ! GCC_V850_PROTOS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.