OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/trunk/gnu-old/gcc-4.2.2/gcc/config/m32r
    from Rev 154 to Rev 816
    Reverse comparison

Rev 154 → Rev 816

/t-linux
0,0 → 1,43
# lib1funcs.asm is currently empty.
CROSS_LIBGCC1 =
 
# These are really part of libgcc1, but this will cause them to be
# built correctly, so...
 
LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
 
# Turn off the SDA while compiling libgcc2. There are no headers for it
# and we want maximal upward compatibility here.
 
TARGET_LIBGCC2_CFLAGS = -G 0 -fPIC
 
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
 
dp-bit.c: $(srcdir)/config/fp-bit.c
cat $(srcdir)/config/fp-bit.c > dp-bit.c
 
# We need to use -fpic when we are using gcc to compile the routines in
# initfini.c. This is only really needed when we are going to use gcc/g++
# to produce a shared library, but since we don't know ahead of time when
# we will be doing that, we just always use -fpic when compiling the
# routines in initfini.c.
# -fpic currently isn't supported for the m32r.
 
CRTSTUFF_T_CFLAGS_S = -fPIC
 
 
# Don't run fixproto
STMP_FIXPROTO =
# Don't install "assert.h" in gcc. We use the one in glibc.
INSTALL_ASSERT_H =
# Do not build libgcc1. Let gcc generate those functions. The GNU/Linux
# C library can handle them.
LIBGCC1 =
CROSS_LIBGCC1 =
LIBGCC1_TEST =
 
SHLIB_MAPFILES += $(srcdir)/config/m32r/libgcc-glibc.ver
/m32r.md
0,0 → 1,2621
;; Machine description of the Renesas M32R cpu for GNU C compiler
;; Copyright (C) 1996, 1997, 1998, 1999, 2001, 2003, 2004, 2005, 2007
;; Free Software Foundation, Inc.
 
;; This file is part of GCC.
 
;; GCC is free software; you can redistribute it and/or modify it
;; under the terms of the GNU General Public License as published
;; by the Free Software Foundation; either version 3, or (at your
;; option) any later version.
 
;; GCC is distributed in the hope that it will be useful, but WITHOUT
;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
;; License for more details.
 
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
 
;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
;; UNSPEC_VOLATILE usage
(define_constants
[(UNSPECV_BLOCKAGE 0)
(UNSPECV_FLUSH_ICACHE 1)])
 
;; UNSPEC usage
(define_constants
[(UNSPEC_LOAD_SDA_BASE 2)
(UNSPEC_SET_CBIT 3)
(UNSPEC_PIC_LOAD_ADDR 4)
(UNSPEC_GET_PC 5)
(UNSPEC_GOTOFF 6)
])
 
;; Insn type. Used to default other attribute values.
(define_attr "type"
"int2,int4,load2,load4,load8,store2,store4,store8,shift2,shift4,mul2,div4,uncond_branch,branch,call,multi,misc"
(const_string "misc"))
 
;; Length in bytes.
(define_attr "length" ""
(cond [(eq_attr "type" "int2,load2,store2,shift2,mul2")
(const_int 2)
 
(eq_attr "type" "int4,load4,store4,shift4,div4")
(const_int 4)
 
(eq_attr "type" "multi")
(const_int 8)
 
(eq_attr "type" "uncond_branch,branch,call")
(const_int 4)]
 
(const_int 4)))
 
;; The length here is the length of a single asm. Unfortunately it might be
;; 2 or 4 so we must allow for 4. That's ok though.
(define_asm_attributes
[(set_attr "length" "4")
(set_attr "type" "multi")])
 
;; Whether an instruction is short (16-bit) or long (32-bit).
(define_attr "insn_size" "short,long"
(if_then_else (eq_attr "type" "int2,load2,store2,shift2,mul2")
(const_string "short")
(const_string "long")))
 
;; The target CPU we're compiling for.
(define_attr "cpu" "m32r,m32r2,m32rx"
(cond [(ne (symbol_ref "TARGET_M32RX") (const_int 0))
(const_string "m32rx")
(ne (symbol_ref "TARGET_M32R2") (const_int 0))
(const_string "m32r2")]
(const_string "m32r")))
 
;; Defines the pipeline where an instruction can be executed on.
;; For the M32R, a short instruction can execute one of the two pipes.
;; For the M32Rx, the restrictions are modelled in the second
;; condition of this attribute definition.
(define_attr "m32r_pipeline" "either,s,o,long"
(cond [(and (eq_attr "cpu" "m32r")
(eq_attr "insn_size" "short"))
(const_string "either")
(eq_attr "insn_size" "!short")
(const_string "long")]
(cond [(eq_attr "type" "int2")
(const_string "either")
(eq_attr "type" "load2,store2,shift2,uncond_branch,branch,call")
(const_string "o")
(eq_attr "type" "mul2")
(const_string "s")]
(const_string "long"))))
;; ::::::::::::::::::::
;; ::
;; :: Pipeline description
;; ::
;; ::::::::::::::::::::
 
;; This model is based on Chapter 2, Appendix 3 and Appendix 4 of the
;; "M32R-FPU Software Manual", Revision 1.01, plus additional information
;; obtained by our best friend and mine, Google.
;;
;; The pipeline is modelled as a fetch unit, and a core with a memory unit,
;; two execution units, where "fetch" models IF and D, "memory" for MEM1
;; and MEM2, and "EXEC" for E, E1, E2, EM, and EA. Writeback and
;; bypasses are not modelled.
(define_automaton "m32r")
 
;; We pretend there are two short (16 bits) instruction fetchers. The
;; "s" short fetcher cannot be reserved until the "o" short fetcher is
;; reserved. Some instructions reserve both the left and right fetchers.
;; These fetch units are a hack to get GCC to better pack the instructions
;; for the M32Rx processor, which has two execution pipes.
;;
;; In reality there is only one decoder, which can decode either two 16 bits
;; instructions, or a single 32 bits instruction.
;;
;; Note, "fetch" models both the IF and the D pipeline stages.
;;
;; The m32rx core has two execution pipes. We name them o_E and s_E.
;; In addition, there's a memory unit.
 
(define_cpu_unit "o_IF,s_IF,o_E,s_E,memory" "m32r")
 
;; Prevent the s pipe from being reserved before the o pipe.
(absence_set "s_IF" "o_IF")
(absence_set "s_E" "o_E")
 
;; On the M32Rx, long instructions execute on both pipes, so reserve
;; both fetch slots and both pipes.
(define_reservation "long_IF" "o_IF+s_IF")
(define_reservation "long_E" "o_E+s_E")
 
;; ::::::::::::::::::::
 
;; Simple instructions do 4 stages: IF D E WB. WB is not modelled.
;; Hence, ready latency is 1.
(define_insn_reservation "short_left" 1
(and (eq_attr "m32r_pipeline" "o")
(and (eq_attr "insn_size" "short")
(eq_attr "type" "!load2")))
"o_IF,o_E")
 
(define_insn_reservation "short_right" 1
(and (eq_attr "m32r_pipeline" "s")
(and (eq_attr "insn_size" "short")
(eq_attr "type" "!load2")))
"s_IF,s_E")
 
(define_insn_reservation "short_either" 1
(and (eq_attr "m32r_pipeline" "either")
(and (eq_attr "insn_size" "short")
(eq_attr "type" "!load2")))
"o_IF|s_IF,o_E|s_E")
 
(define_insn_reservation "long_m32r" 1
(and (eq_attr "cpu" "m32r")
(and (eq_attr "insn_size" "long")
(eq_attr "type" "!load4,load8")))
"long_IF,long_E")
 
(define_insn_reservation "long_m32rx" 2
(and (eq_attr "m32r_pipeline" "long")
(and (eq_attr "insn_size" "long")
(eq_attr "type" "!load4,load8")))
"long_IF,long_E")
 
;; Load/store instructions do 6 stages: IF D E MEM1 MEM2 WB.
;; MEM1 may require more than one cycle depending on locality. We
;; optimistically assume all memory is nearby, i.e. MEM1 takes only
;; one cycle. Hence, ready latency is 3.
 
;; The M32Rx can do short load/store only on the left pipe.
(define_insn_reservation "short_load_left" 3
(and (eq_attr "m32r_pipeline" "o")
(and (eq_attr "insn_size" "short")
(eq_attr "type" "load2")))
"o_IF,o_E,memory*2")
 
(define_insn_reservation "short_load" 3
(and (eq_attr "m32r_pipeline" "either")
(and (eq_attr "insn_size" "short")
(eq_attr "type" "load2")))
"s_IF|o_IF,s_E|o_E,memory*2")
 
(define_insn_reservation "long_load" 3
(and (eq_attr "cpu" "m32r")
(and (eq_attr "insn_size" "long")
(eq_attr "type" "load4,load8")))
"long_IF,long_E,memory*2")
 
(define_insn_reservation "long_load_m32rx" 3
(and (eq_attr "m32r_pipeline" "long")
(eq_attr "type" "load4,load8"))
"long_IF,long_E,memory*2")
 
(include "predicates.md")
;; Expand prologue as RTL
(define_expand "prologue"
[(const_int 1)]
""
"
{
m32r_expand_prologue ();
DONE;
}")
 
;; Move instructions.
;;
;; For QI and HI moves, the register must contain the full properly
;; sign-extended value. nonzero_bits assumes this [otherwise
;; SHORT_IMMEDIATES_SIGN_EXTEND must be used, but the comment for it
;; says it's a kludge and the .md files should be fixed instead].
 
(define_expand "movqi"
[(set (match_operand:QI 0 "general_operand" "")
(match_operand:QI 1 "general_operand" ""))]
""
"
{
/* Fixup PIC cases. */
if (flag_pic)
{
if (symbolic_operand (operands[1], QImode))
{
if (reload_in_progress || reload_completed)
operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
else
operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
}
}
 
/* Everything except mem = const or mem = mem can be done easily.
Objects in the small data area are handled too. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (QImode, operands[1]);
}")
 
(define_insn "*movqi_insn"
[(set (match_operand:QI 0 "move_dest_operand" "=r,r,r,r,r,T,m")
(match_operand:QI 1 "move_src_operand" "r,I,JQR,T,m,r,r"))]
"register_operand (operands[0], QImode) || register_operand (operands[1], QImode)"
"@
mv %0,%1
ldi %0,%#%1
ldi %0,%#%1
ldub %0,%1
ldub %0,%1
stb %1,%0
stb %1,%0"
[(set_attr "type" "int2,int2,int4,load2,load4,store2,store4")
(set_attr "length" "2,2,4,2,4,2,4")])
 
(define_expand "movhi"
[(set (match_operand:HI 0 "general_operand" "")
(match_operand:HI 1 "general_operand" ""))]
""
"
{
/* Fixup PIC cases. */
if (flag_pic)
{
if (symbolic_operand (operands[1], HImode))
{
if (reload_in_progress || reload_completed)
operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
else
operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
}
}
 
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (HImode, operands[1]);
}")
 
(define_insn "*movhi_insn"
[(set (match_operand:HI 0 "move_dest_operand" "=r,r,r,r,r,r,T,m")
(match_operand:HI 1 "move_src_operand" "r,I,JQR,K,T,m,r,r"))]
"register_operand (operands[0], HImode) || register_operand (operands[1], HImode)"
"@
mv %0,%1
ldi %0,%#%1
ldi %0,%#%1
ld24 %0,%#%1
lduh %0,%1
lduh %0,%1
sth %1,%0
sth %1,%0"
[(set_attr "type" "int2,int2,int4,int4,load2,load4,store2,store4")
(set_attr "length" "2,2,4,4,2,4,2,4")])
 
(define_expand "movsi_push"
[(set (mem:SI (pre_dec:SI (match_operand:SI 0 "register_operand" "")))
(match_operand:SI 1 "register_operand" ""))]
""
"")
 
(define_expand "movsi_pop"
[(set (match_operand:SI 0 "register_operand" "")
(mem:SI (post_inc:SI (match_operand:SI 1 "register_operand" ""))))]
""
"")
 
(define_expand "movsi"
[(set (match_operand:SI 0 "general_operand" "")
(match_operand:SI 1 "general_operand" ""))]
""
"
{
/* Fixup PIC cases. */
if (flag_pic)
{
if (symbolic_operand (operands[1], SImode))
{
if (reload_in_progress || reload_completed)
operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
else
operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
}
}
 
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (SImode, operands[1]);
 
/* Small Data Area reference? */
if (small_data_operand (operands[1], SImode))
{
emit_insn (gen_movsi_sda (operands[0], operands[1]));
DONE;
}
 
/* If medium or large code model, symbols have to be loaded with
seth/add3. */
if (addr32_operand (operands[1], SImode))
{
emit_insn (gen_movsi_addr32 (operands[0], operands[1]));
DONE;
}
}")
 
;; ??? Do we need a const_double constraint here for large unsigned values?
(define_insn "*movsi_insn"
[(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,S,m")
(match_operand:SI 1 "move_src_operand" "r,I,J,MQ,L,n,T,U,m,r,r,r"))]
"register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
"*
{
if (GET_CODE (operands[0]) == REG || GET_CODE (operands[1]) == SUBREG)
{
switch (GET_CODE (operands[1]))
{
HOST_WIDE_INT value;
 
default:
break;
 
case REG:
case SUBREG:
return \"mv %0,%1\";
 
case MEM:
if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
&& XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
return \"pop %0\";
 
return \"ld %0,%1\";
 
case CONST_INT:
value = INTVAL (operands[1]);
if (INT16_P (value))
return \"ldi %0,%#%1\\t; %X1\";
 
if (UINT24_P (value))
return \"ld24 %0,%#%1\\t; %X1\";
 
if (UPPER16_P (value))
return \"seth %0,%#%T1\\t; %X1\";
 
return \"#\";
 
case CONST:
case SYMBOL_REF:
case LABEL_REF:
if (TARGET_ADDR24)
return \"ld24 %0,%#%1\";
 
return \"#\";
}
}
 
else if (GET_CODE (operands[0]) == MEM
&& (GET_CODE (operands[1]) == REG || GET_CODE (operands[1]) == SUBREG))
{
if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
&& XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
return \"push %1\";
 
return \"st %1,%0\";
}
 
gcc_unreachable ();
}"
[(set_attr "type" "int2,int2,int4,int4,int4,multi,load2,load2,load4,store2,store2,store4")
(set_attr "length" "2,2,4,4,4,8,2,2,4,2,2,4")])
 
; Try to use a four byte / two byte pair for constants not loadable with
; ldi, ld24, seth.
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "two_insn_const_operand" ""))]
""
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (ior:SI (match_dup 0) (match_dup 3)))]
"
{
unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
unsigned HOST_WIDE_INT tmp;
int shift;
 
/* In all cases we will emit two instructions. However we try to
use 2 byte instructions wherever possible. We can assume the
constant isn't loadable with any of ldi, ld24, or seth. */
 
/* See if we can load a 24 bit unsigned value and invert it. */
if (UINT24_P (~ val))
{
emit_insn (gen_movsi (operands[0], GEN_INT (~ val)));
emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
DONE;
}
 
/* See if we can load a 24 bit unsigned value and shift it into place.
0x01fffffe is just beyond ld24's range. */
for (shift = 1, tmp = 0x01fffffe;
shift < 8;
++shift, tmp <<= 1)
{
if ((val & ~tmp) == 0)
{
emit_insn (gen_movsi (operands[0], GEN_INT (val >> shift)));
emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (shift)));
DONE;
}
}
 
/* Can't use any two byte insn, fall back to seth/or3. Use ~0xffff instead
of 0xffff0000, since the later fails on a 64-bit host. */
operands[2] = GEN_INT ((val) & ~0xffff);
operands[3] = GEN_INT ((val) & 0xffff);
}")
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "seth_add3_operand" ""))]
"TARGET_ADDR32"
[(set (match_dup 0)
(high:SI (match_dup 1)))
(set (match_dup 0)
(lo_sum:SI (match_dup 0)
(match_dup 1)))]
"")
 
;; Small data area support.
;; The address of _SDA_BASE_ is loaded into a register and all objects in
;; the small data area are indexed off that. This is done for each reference
;; but cse will clean things up for us. We let the compiler choose the
;; register to use so we needn't allocate (and maybe even fix) a special
;; register to use. Since the load and store insns have a 16 bit offset the
;; total size of the data area can be 64K. However, if the data area lives
;; above 16M (24 bits), _SDA_BASE_ will have to be loaded with seth/add3 which
;; would then yield 3 instructions to reference an object [though there would
;; be no net loss if two or more objects were referenced]. The 3 insns can be
;; reduced back to 2 if the size of the small data area were reduced to 32K
;; [then seth + ld/st would work for any object in the area]. Doing this
;; would require special handling of _SDA_BASE_ (its value would be
;; (.sdata + 32K) & 0xffff0000) and reloc computations would be different
;; [I think]. What to do about this is deferred until later and for now we
;; require .sdata to be in the first 16M.
 
(define_expand "movsi_sda"
[(set (match_dup 2)
(unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))
(set (match_operand:SI 0 "register_operand" "")
(lo_sum:SI (match_dup 2)
(match_operand:SI 1 "small_data_operand" "")))]
""
"
{
if (reload_in_progress || reload_completed)
operands[2] = operands[0];
else
operands[2] = gen_reg_rtx (SImode);
}")
 
(define_insn "*load_sda_base_32"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
"TARGET_ADDR32"
"seth %0,%#shigh(_SDA_BASE_)\;add3 %0,%0,%#low(_SDA_BASE_)"
[(set_attr "type" "multi")
(set_attr "length" "8")])
 
(define_insn "*load_sda_base"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(const_int 0)] UNSPEC_LOAD_SDA_BASE))]
""
"ld24 %0,#_SDA_BASE_"
[(set_attr "type" "int4")
(set_attr "length" "4")])
 
;; 32 bit address support.
 
(define_expand "movsi_addr32"
[(set (match_dup 2)
; addr32_operand isn't used because it's too restrictive,
; seth_add3_operand is more general and thus safer.
(high:SI (match_operand:SI 1 "seth_add3_operand" "")))
(set (match_operand:SI 0 "register_operand" "")
(lo_sum:SI (match_dup 2) (match_dup 1)))]
""
"
{
if (reload_in_progress || reload_completed)
operands[2] = operands[0];
else
operands[2] = gen_reg_rtx (SImode);
}")
 
(define_insn "set_hi_si"
[(set (match_operand:SI 0 "register_operand" "=r")
(high:SI (match_operand 1 "symbolic_operand" "")))]
""
"seth %0,%#shigh(%1)"
[(set_attr "type" "int4")
(set_attr "length" "4")])
 
(define_insn "lo_sum_si"
[(set (match_operand:SI 0 "register_operand" "=r")
(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "immediate_operand" "in")))]
""
"add3 %0,%1,%#%B2"
[(set_attr "type" "int4")
(set_attr "length" "4")])
 
(define_expand "movdi"
[(set (match_operand:DI 0 "general_operand" "")
(match_operand:DI 1 "general_operand" ""))]
""
"
{
/* Fixup PIC cases. */
if (flag_pic)
{
if (symbolic_operand (operands[1], DImode))
{
if (reload_in_progress || reload_completed)
operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
else
operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
}
}
 
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (DImode, operands[1]);
}")
 
(define_insn "*movdi_insn"
[(set (match_operand:DI 0 "move_dest_operand" "=r,r,r,r,m")
(match_operand:DI 1 "move_double_src_operand" "r,nG,F,m,r"))]
"register_operand (operands[0], DImode) || register_operand (operands[1], DImode)"
"#"
[(set_attr "type" "multi,multi,multi,load8,store8")
(set_attr "length" "4,4,16,6,6")])
 
(define_split
[(set (match_operand:DI 0 "move_dest_operand" "")
(match_operand:DI 1 "move_double_src_operand" ""))]
"reload_completed"
[(match_dup 2)]
"operands[2] = gen_split_move_double (operands);")
;; Floating point move insns.
 
(define_expand "movsf"
[(set (match_operand:SF 0 "general_operand" "")
(match_operand:SF 1 "general_operand" ""))]
""
"
{
/* Fixup PIC cases. */
if (flag_pic)
{
if (symbolic_operand (operands[1], SFmode))
{
if (reload_in_progress || reload_completed)
operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
else
operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
}
}
 
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (SFmode, operands[1]);
}")
 
(define_insn "*movsf_insn"
[(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,r,T,S,m")
(match_operand:SF 1 "move_src_operand" "r,F,U,S,m,r,r,r"))]
"register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)"
"@
mv %0,%1
#
ld %0,%1
ld %0,%1
ld %0,%1
st %1,%0
st %1,%0
st %1,%0"
;; ??? Length of alternative 1 is either 2, 4 or 8.
[(set_attr "type" "int2,multi,load2,load2,load4,store2,store2,store4")
(set_attr "length" "2,8,2,2,4,2,2,4")])
 
(define_split
[(set (match_operand:SF 0 "register_operand" "")
(match_operand:SF 1 "const_double_operand" ""))]
"reload_completed"
[(set (match_dup 2) (match_dup 3))]
"
{
operands[2] = operand_subword (operands[0], 0, 0, SFmode);
operands[3] = operand_subword (operands[1], 0, 0, SFmode);
}")
 
(define_expand "movdf"
[(set (match_operand:DF 0 "general_operand" "")
(match_operand:DF 1 "general_operand" ""))]
""
"
{
/* Fixup PIC cases. */
if (flag_pic)
{
if (symbolic_operand (operands[1], DFmode))
{
if (reload_in_progress || reload_completed)
operands[1] = m32r_legitimize_pic_address (operands[1], operands[0]);
else
operands[1] = m32r_legitimize_pic_address (operands[1], NULL_RTX);
}
}
 
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (DFmode, operands[1]);
}")
 
(define_insn "*movdf_insn"
[(set (match_operand:DF 0 "move_dest_operand" "=r,r,r,m")
(match_operand:DF 1 "move_double_src_operand" "r,F,m,r"))]
"register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode)"
"#"
[(set_attr "type" "multi,multi,load8,store8")
(set_attr "length" "4,16,6,6")])
 
(define_split
[(set (match_operand:DF 0 "move_dest_operand" "")
(match_operand:DF 1 "move_double_src_operand" ""))]
"reload_completed"
[(match_dup 2)]
"operands[2] = gen_split_move_double (operands);")
;; Zero extension instructions.
 
(define_insn "zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=r,r,r")
(zero_extend:HI (match_operand:QI 1 "extend_operand" "r,T,m")))]
""
"@
and3 %0,%1,%#255
ldub %0,%1
ldub %0,%1"
[(set_attr "type" "int4,load2,load4")
(set_attr "length" "4,2,4")])
 
(define_insn "zero_extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(zero_extend:SI (match_operand:QI 1 "extend_operand" "r,T,m")))]
""
"@
and3 %0,%1,%#255
ldub %0,%1
ldub %0,%1"
[(set_attr "type" "int4,load2,load4")
(set_attr "length" "4,2,4")])
 
(define_insn "zero_extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(zero_extend:SI (match_operand:HI 1 "extend_operand" "r,T,m")))]
""
"@
and3 %0,%1,%#65535
lduh %0,%1
lduh %0,%1"
[(set_attr "type" "int4,load2,load4")
(set_attr "length" "4,2,4")])
;; Signed conversions from a smaller integer to a larger integer
(define_insn "extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=r,r,r")
(sign_extend:HI (match_operand:QI 1 "extend_operand" "0,T,m")))]
""
"@
#
ldb %0,%1
ldb %0,%1"
[(set_attr "type" "multi,load2,load4")
(set_attr "length" "2,2,4")])
 
(define_split
[(set (match_operand:HI 0 "register_operand" "")
(sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
"reload_completed"
[(match_dup 2)
(match_dup 3)]
"
{
rtx op0 = gen_lowpart (SImode, operands[0]);
rtx shift = GEN_INT (24);
 
operands[2] = gen_ashlsi3 (op0, op0, shift);
operands[3] = gen_ashrsi3 (op0, op0, shift);
}")
 
(define_insn "extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(sign_extend:SI (match_operand:QI 1 "extend_operand" "0,T,m")))]
""
"@
#
ldb %0,%1
ldb %0,%1"
[(set_attr "type" "multi,load2,load4")
(set_attr "length" "4,2,4")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
"reload_completed"
[(match_dup 2)
(match_dup 3)]
"
{
rtx shift = GEN_INT (24);
 
operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
}")
 
(define_insn "extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(sign_extend:SI (match_operand:HI 1 "extend_operand" "0,T,m")))]
""
"@
#
ldh %0,%1
ldh %0,%1"
[(set_attr "type" "multi,load2,load4")
(set_attr "length" "4,2,4")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
"reload_completed"
[(match_dup 2)
(match_dup 3)]
"
{
rtx shift = GEN_INT (16);
 
operands[2] = gen_ashlsi3 (operands[0], operands[0], shift);
operands[3] = gen_ashrsi3 (operands[0], operands[0], shift);
}")
;; Arithmetic instructions.
 
; ??? Adding an alternative to split add3 of small constants into two
; insns yields better instruction packing but slower code. Adds of small
; values is done a lot.
 
(define_insn "addsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(plus:SI (match_operand:SI 1 "register_operand" "%0,0,r")
(match_operand:SI 2 "nonmemory_operand" "r,I,J")))]
""
"@
add %0,%2
addi %0,%#%2
add3 %0,%1,%#%2"
[(set_attr "type" "int2,int2,int4")
(set_attr "length" "2,2,4")])
 
;(define_split
; [(set (match_operand:SI 0 "register_operand" "")
; (plus:SI (match_operand:SI 1 "register_operand" "")
; (match_operand:SI 2 "int8_operand" "")))]
; "reload_completed
; && REGNO (operands[0]) != REGNO (operands[1])
; && INT8_P (INTVAL (operands[2]))
; && INTVAL (operands[2]) != 0"
; [(set (match_dup 0) (match_dup 1))
; (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
; "")
 
(define_insn "adddi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI (match_operand:DI 1 "register_operand" "%0")
(match_operand:DI 2 "register_operand" "r")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "6")])
 
;; ??? The cmp clears the condition bit. Can we speed up somehow?
(define_split
[(set (match_operand:DI 0 "register_operand" "")
(plus:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "register_operand" "")))
(clobber (reg:CC 17))]
"reload_completed"
[(parallel [(set (reg:CC 17)
(const_int 0))
(use (match_dup 4))])
(parallel [(set (match_dup 4)
(plus:SI (match_dup 4)
(plus:SI (match_dup 5)
(ne:SI (reg:CC 17) (const_int 0)))))
(set (reg:CC 17)
(unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
(parallel [(set (match_dup 6)
(plus:SI (match_dup 6)
(plus:SI (match_dup 7)
(ne:SI (reg:CC 17) (const_int 0)))))
(set (reg:CC 17)
(unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
"
{
operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
}")
 
(define_insn "*clear_c"
[(set (reg:CC 17)
(const_int 0))
(use (match_operand:SI 0 "register_operand" "r"))]
""
"cmp %0,%0"
[(set_attr "type" "int2")
(set_attr "length" "2")])
 
(define_insn "*add_carry"
[(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_operand:SI 1 "register_operand" "%0")
(plus:SI (match_operand:SI 2 "register_operand" "r")
(ne:SI (reg:CC 17) (const_int 0)))))
(set (reg:CC 17)
(unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
""
"addx %0,%2"
[(set_attr "type" "int2")
(set_attr "length" "2")])
 
(define_insn "subsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "register_operand" "r")))]
""
"sub %0,%2"
[(set_attr "type" "int2")
(set_attr "length" "2")])
 
(define_insn "subdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_operand:DI 1 "register_operand" "0")
(match_operand:DI 2 "register_operand" "r")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "6")])
 
;; ??? The cmp clears the condition bit. Can we speed up somehow?
(define_split
[(set (match_operand:DI 0 "register_operand" "")
(minus:DI (match_operand:DI 1 "register_operand" "")
(match_operand:DI 2 "register_operand" "")))
(clobber (reg:CC 17))]
"reload_completed"
[(parallel [(set (reg:CC 17)
(const_int 0))
(use (match_dup 4))])
(parallel [(set (match_dup 4)
(minus:SI (match_dup 4)
(minus:SI (match_dup 5)
(ne:SI (reg:CC 17) (const_int 0)))))
(set (reg:CC 17)
(unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])
(parallel [(set (match_dup 6)
(minus:SI (match_dup 6)
(minus:SI (match_dup 7)
(ne:SI (reg:CC 17) (const_int 0)))))
(set (reg:CC 17)
(unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))])]
"
{
operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
}")
 
(define_insn "*sub_carry"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 1 "register_operand" "%0")
(minus:SI (match_operand:SI 2 "register_operand" "r")
(ne:SI (reg:CC 17) (const_int 0)))))
(set (reg:CC 17)
(unspec:CC [(const_int 0)] UNSPEC_SET_CBIT))]
""
"subx %0,%2"
[(set_attr "type" "int2")
(set_attr "length" "2")])
; Multiply/Divide instructions.
 
(define_insn "mulhisi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "r"))
(sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
""
"mullo %1,%2\;mvfacmi %0"
[(set_attr "type" "multi")
(set_attr "length" "4")])
 
(define_insn "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mult:SI (match_operand:SI 1 "register_operand" "%0")
(match_operand:SI 2 "register_operand" "r")))]
""
"mul %0,%2"
[(set_attr "type" "mul2")
(set_attr "length" "2")])
 
(define_insn "divsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(div:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "register_operand" "r")))]
""
"div %0,%2"
[(set_attr "type" "div4")
(set_attr "length" "4")])
 
(define_insn "udivsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(udiv:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "register_operand" "r")))]
""
"divu %0,%2"
[(set_attr "type" "div4")
(set_attr "length" "4")])
 
(define_insn "modsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(mod:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "register_operand" "r")))]
""
"rem %0,%2"
[(set_attr "type" "div4")
(set_attr "length" "4")])
 
(define_insn "umodsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(umod:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "register_operand" "r")))]
""
"remu %0,%2"
[(set_attr "type" "div4")
(set_attr "length" "4")])
;; Boolean instructions.
;;
;; We don't define the DImode versions as expand_binop does a good enough job.
;; And if it doesn't it should be fixed.
 
(define_insn "andsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(and:SI (match_operand:SI 1 "register_operand" "%0,r")
(match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
""
"*
{
/* If we are worried about space, see if we can break this up into two
short instructions, which might eliminate a NOP being inserted. */
if (optimize_size
&& m32r_not_same_reg (operands[0], operands[1])
&& GET_CODE (operands[2]) == CONST_INT
&& INT8_P (INTVAL (operands[2])))
return \"#\";
 
else if (GET_CODE (operands[2]) == CONST_INT)
return \"and3 %0,%1,%#%X2\";
 
return \"and %0,%2\";
}"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(and:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "int8_operand" "")))]
"optimize_size && m32r_not_same_reg (operands[0], operands[1])"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (and:SI (match_dup 0) (match_dup 1)))]
"")
 
(define_insn "iorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ior:SI (match_operand:SI 1 "register_operand" "%0,r")
(match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
""
"*
{
/* If we are worried about space, see if we can break this up into two
short instructions, which might eliminate a NOP being inserted. */
if (optimize_size
&& m32r_not_same_reg (operands[0], operands[1])
&& GET_CODE (operands[2]) == CONST_INT
&& INT8_P (INTVAL (operands[2])))
return \"#\";
 
else if (GET_CODE (operands[2]) == CONST_INT)
return \"or3 %0,%1,%#%X2\";
 
return \"or %0,%2\";
}"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(ior:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "int8_operand" "")))]
"optimize_size && m32r_not_same_reg (operands[0], operands[1])"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (ior:SI (match_dup 0) (match_dup 1)))]
"")
 
(define_insn "xorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(xor:SI (match_operand:SI 1 "register_operand" "%0,r")
(match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
""
"*
{
/* If we are worried about space, see if we can break this up into two
short instructions, which might eliminate a NOP being inserted. */
if (optimize_size
&& m32r_not_same_reg (operands[0], operands[1])
&& GET_CODE (operands[2]) == CONST_INT
&& INT8_P (INTVAL (operands[2])))
return \"#\";
 
else if (GET_CODE (operands[2]) == CONST_INT)
return \"xor3 %0,%1,%#%X2\";
 
return \"xor %0,%2\";
}"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(xor:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "int8_operand" "")))]
"optimize_size && m32r_not_same_reg (operands[0], operands[1])"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (xor:SI (match_dup 0) (match_dup 1)))]
"")
 
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" "r")))]
""
"neg %0,%1"
[(set_attr "type" "int2")
(set_attr "length" "2")])
 
(define_insn "one_cmplsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_operand:SI 1 "register_operand" "r")))]
""
"not %0,%1"
[(set_attr "type" "int2")
(set_attr "length" "2")])
;; Shift instructions.
 
(define_insn "ashlsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
(match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
""
"@
sll %0,%2
slli %0,%#%2
sll3 %0,%1,%#%2"
[(set_attr "type" "shift2,shift2,shift4")
(set_attr "length" "2,2,4")])
 
(define_insn "ashrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
(match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
""
"@
sra %0,%2
srai %0,%#%2
sra3 %0,%1,%#%2"
[(set_attr "type" "shift2,shift2,shift4")
(set_attr "length" "2,2,4")])
 
(define_insn "lshrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r,r")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
(match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
""
"@
srl %0,%2
srli %0,%#%2
srl3 %0,%1,%#%2"
[(set_attr "type" "shift2,shift2,shift4")
(set_attr "length" "2,2,4")])
;; Compare instructions.
;; This controls RTL generation and register allocation.
 
;; We generate RTL for comparisons and branches by having the cmpxx
;; patterns store away the operands. Then the bcc patterns
;; emit RTL for both the compare and the branch.
;;
;; On the m32r it is more efficient to use the bxxz instructions and
;; thus merge the compare and branch into one instruction, so they are
;; preferred.
 
(define_expand "cmpsi"
[(set (reg:CC 17)
(compare:CC (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "reg_or_cmp_int16_operand" "")))]
""
"
{
m32r_compare_op0 = operands[0];
m32r_compare_op1 = operands[1];
DONE;
}")
 
(define_insn "cmp_eqsi_zero_insn"
[(set (reg:CC 17)
(eq:CC (match_operand:SI 0 "register_operand" "r,r")
(match_operand:SI 1 "reg_or_zero_operand" "r,P")))]
"TARGET_M32RX || TARGET_M32R2"
"@
cmpeq %0, %1
cmpz %0"
[(set_attr "type" "int4")
(set_attr "length" "4")])
 
;; The cmp_xxx_insn patterns set the condition bit to the result of the
;; comparison. There isn't a "compare equal" instruction so cmp_eqsi_insn
;; is quite inefficient. However, it is rarely used.
 
(define_insn "cmp_eqsi_insn"
[(set (reg:CC 17)
(eq:CC (match_operand:SI 0 "register_operand" "r,r")
(match_operand:SI 1 "reg_or_cmp_int16_operand" "r,P")))
(clobber (match_scratch:SI 2 "=&r,&r"))]
""
"*
{
if (which_alternative == 0)
{
return \"mv %2,%0\;sub %2,%1\;cmpui %2,#1\";
}
else
{
if (INTVAL (operands [1]) == 0)
return \"cmpui %0, #1\";
else if (REGNO (operands [2]) == REGNO (operands [0]))
return \"addi %0,%#%N1\;cmpui %2,#1\";
else
return \"add3 %2,%0,%#%N1\;cmpui %2,#1\";
}
}"
[(set_attr "type" "multi,multi")
(set_attr "length" "8,8")])
 
(define_insn "cmp_ltsi_insn"
[(set (reg:CC 17)
(lt:CC (match_operand:SI 0 "register_operand" "r,r")
(match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
""
"@
cmp %0,%1
cmpi %0,%#%1"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
 
(define_insn "cmp_ltusi_insn"
[(set (reg:CC 17)
(ltu:CC (match_operand:SI 0 "register_operand" "r,r")
(match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
""
"@
cmpu %0,%1
cmpui %0,%#%1"
[(set_attr "type" "int2,int4")
(set_attr "length" "2,4")])
;; These control RTL generation for conditional jump insns.
 
(define_expand "beq"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (EQ, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "bne"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (NE, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "bgt"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (GT, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "ble"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (LE, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "bge"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (GE, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "blt"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (LT, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "bgtu"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (GTU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "bleu"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (LEU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "bgeu"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (GEU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
(define_expand "bltu"
[(set (pc)
(if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare (LTU, m32r_compare_op0, m32r_compare_op1, FALSE);
}")
 
;; Now match both normal and inverted jump.
 
(define_insn "*branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "eqne_comparison_operator"
[(reg 17) (const_int 0)])
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"*
{
static char instruction[40];
sprintf (instruction, \"%s%s %%l0\",
(GET_CODE (operands[1]) == NE) ? \"bc\" : \"bnc\",
(get_attr_length (insn) == 2) ? \".s\" : \"\");
return instruction;
}"
[(set_attr "type" "branch")
; cf PR gcc/28508
; We use 300/600 instead of 512,1024 to account for inaccurate insn
; lengths and insn alignments that are complex to track.
; It's not important that we be hyper-precise here. It may be more
; important blah blah blah when the chip supports parallel execution
; blah blah blah but until then blah blah blah this is simple and
; suffices.
(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
(const_int 300))
(const_int 600))
(const_int 2)
(const_int 4)))])
 
(define_insn "*rev_branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "eqne_comparison_operator"
[(reg 17) (const_int 0)])
(pc)
(label_ref (match_operand 0 "" ""))))]
;"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
""
"*
{
static char instruction[40];
sprintf (instruction, \"%s%s %%l0\",
(GET_CODE (operands[1]) == EQ) ? \"bc\" : \"bnc\",
(get_attr_length (insn) == 2) ? \".s\" : \"\");
return instruction;
}"
[(set_attr "type" "branch")
; cf PR gcc/28508
; We use 300/600 instead of 512,1024 to account for inaccurate insn
; lengths and insn alignments that are complex to track.
; It's not important that we be hyper-precise here. It may be more
; important blah blah blah when the chip supports parallel execution
; blah blah blah but until then blah blah blah this is simple and
; suffices.
(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
(const_int 300))
(const_int 600))
(const_int 2)
(const_int 4)))])
 
; reg/reg compare and branch insns
 
(define_insn "*reg_branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "eqne_comparison_operator"
[(match_operand:SI 2 "register_operand" "r")
(match_operand:SI 3 "register_operand" "r")])
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"*
{
/* Is branch target reachable with beq/bne? */
if (get_attr_length (insn) == 4)
{
if (GET_CODE (operands[1]) == EQ)
return \"beq %2,%3,%l0\";
else
return \"bne %2,%3,%l0\";
}
else
{
if (GET_CODE (operands[1]) == EQ)
return \"bne %2,%3,1f\;bra %l0\;1:\";
else
return \"beq %2,%3,1f\;bra %l0\;1:\";
}
}"
[(set_attr "type" "branch")
; We use 25000/50000 instead of 32768/65536 to account for slot filling
; which is complex to track and inaccurate length specs.
(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
(const_int 25000))
(const_int 50000))
(const_int 4)
(const_int 8)))])
 
(define_insn "*rev_reg_branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "eqne_comparison_operator"
[(match_operand:SI 2 "register_operand" "r")
(match_operand:SI 3 "register_operand" "r")])
(pc)
(label_ref (match_operand 0 "" ""))))]
""
"*
{
/* Is branch target reachable with beq/bne? */
if (get_attr_length (insn) == 4)
{
if (GET_CODE (operands[1]) == NE)
return \"beq %2,%3,%l0\";
else
return \"bne %2,%3,%l0\";
}
else
{
if (GET_CODE (operands[1]) == NE)
return \"bne %2,%3,1f\;bra %l0\;1:\";
else
return \"beq %2,%3,1f\;bra %l0\;1:\";
}
}"
[(set_attr "type" "branch")
; We use 25000/50000 instead of 32768/65536 to account for slot filling
; which is complex to track and inaccurate length specs.
(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
(const_int 25000))
(const_int 50000))
(const_int 4)
(const_int 8)))])
 
; reg/zero compare and branch insns
 
(define_insn "*zero_branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "signed_comparison_operator"
[(match_operand:SI 2 "register_operand" "r")
(const_int 0)])
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"*
{
const char *br,*invbr;
char asmtext[40];
 
switch (GET_CODE (operands[1]))
{
case EQ : br = \"eq\"; invbr = \"ne\"; break;
case NE : br = \"ne\"; invbr = \"eq\"; break;
case LE : br = \"le\"; invbr = \"gt\"; break;
case GT : br = \"gt\"; invbr = \"le\"; break;
case LT : br = \"lt\"; invbr = \"ge\"; break;
case GE : br = \"ge\"; invbr = \"lt\"; break;
 
default: gcc_unreachable ();
}
 
/* Is branch target reachable with bxxz? */
if (get_attr_length (insn) == 4)
{
sprintf (asmtext, \"b%sz %%2,%%l0\", br);
output_asm_insn (asmtext, operands);
}
else
{
sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", invbr);
output_asm_insn (asmtext, operands);
}
return \"\";
}"
[(set_attr "type" "branch")
; We use 25000/50000 instead of 32768/65536 to account for slot filling
; which is complex to track and inaccurate length specs.
(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
(const_int 25000))
(const_int 50000))
(const_int 4)
(const_int 8)))])
 
(define_insn "*rev_zero_branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "eqne_comparison_operator"
[(match_operand:SI 2 "register_operand" "r")
(const_int 0)])
(pc)
(label_ref (match_operand 0 "" ""))))]
""
"*
{
const char *br,*invbr;
char asmtext[40];
 
switch (GET_CODE (operands[1]))
{
case EQ : br = \"eq\"; invbr = \"ne\"; break;
case NE : br = \"ne\"; invbr = \"eq\"; break;
case LE : br = \"le\"; invbr = \"gt\"; break;
case GT : br = \"gt\"; invbr = \"le\"; break;
case LT : br = \"lt\"; invbr = \"ge\"; break;
case GE : br = \"ge\"; invbr = \"lt\"; break;
 
default: gcc_unreachable ();
}
 
/* Is branch target reachable with bxxz? */
if (get_attr_length (insn) == 4)
{
sprintf (asmtext, \"b%sz %%2,%%l0\", invbr);
output_asm_insn (asmtext, operands);
}
else
{
sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", br);
output_asm_insn (asmtext, operands);
}
return \"\";
}"
[(set_attr "type" "branch")
; We use 25000/50000 instead of 32768/65536 to account for slot filling
; which is complex to track and inaccurate length specs.
(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
(const_int 25000))
(const_int 50000))
(const_int 4)
(const_int 8)))])
;; S<cc> operations to set a register to 1/0 based on a comparison
 
(define_expand "seq"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (TARGET_M32RX || TARGET_M32R2)
{
if (! reg_or_zero_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_seq_insn_m32rx (op0, op1, op2));
DONE;
}
if (GET_CODE (op2) == CONST_INT && INTVAL (op2) == 0)
{
emit_insn (gen_seq_zero_insn (op0, op1));
DONE;
}
 
if (! reg_or_eq_int16_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_seq_insn (op0, op1, op2));
DONE;
}")
 
(define_insn "seq_insn_m32rx"
[(set (match_operand:SI 0 "register_operand" "=r")
(eq:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "reg_or_zero_operand" "rP")))
(clobber (reg:CC 17))]
"TARGET_M32RX || TARGET_M32R2"
"#"
[(set_attr "type" "multi")
(set_attr "length" "6")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_zero_operand" "")))
(clobber (reg:CC 17))]
"TARGET_M32RX || TARGET_M32R2"
[(set (reg:CC 17)
(eq:CC (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))]
"")
 
(define_insn "seq_zero_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(eq:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 17))]
"TARGET_M32R"
"#"
[(set_attr "type" "multi")
(set_attr "length" "6")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_operand:SI 1 "register_operand" "")
(const_int 0)))
(clobber (reg:CC 17))]
"TARGET_M32R"
[(match_dup 3)]
"
{
rtx op0 = operands[0];
rtx op1 = operands[1];
 
start_sequence ();
emit_insn (gen_cmp_ltusi_insn (op1, const1_rtx));
emit_insn (gen_movcc_insn (op0));
operands[3] = get_insns ();
end_sequence ();
}")
 
(define_insn "seq_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r,??r,r")
(eq:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
(match_operand:SI 2 "reg_or_eq_int16_operand" "r,r,r,PK")))
(clobber (reg:CC 17))
(clobber (match_scratch:SI 3 "=1,2,&r,r"))]
"TARGET_M32R"
"#"
[(set_attr "type" "multi")
(set_attr "length" "8,8,10,10")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(eq:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_eq_int16_operand" "")))
(clobber (reg:CC 17))
(clobber (match_scratch:SI 3 ""))]
"TARGET_M32R && reload_completed"
[(match_dup 4)]
"
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op3 = operands[3];
HOST_WIDE_INT value;
 
if (GET_CODE (op2) == REG && GET_CODE (op3) == REG
&& REGNO (op2) == REGNO (op3))
{
op1 = operands[2];
op2 = operands[1];
}
 
start_sequence ();
if (GET_CODE (op1) == REG && GET_CODE (op3) == REG
&& REGNO (op1) != REGNO (op3))
{
emit_move_insn (op3, op1);
op1 = op3;
}
 
if (GET_CODE (op2) == CONST_INT && (value = INTVAL (op2)) != 0
&& CMP_INT16_P (value))
emit_insn (gen_addsi3 (op3, op1, GEN_INT (-value)));
else
emit_insn (gen_xorsi3 (op3, op1, op2));
 
emit_insn (gen_cmp_ltusi_insn (op3, const1_rtx));
emit_insn (gen_movcc_insn (op0));
operands[4] = get_insns ();
end_sequence ();
}")
 
(define_expand "sne"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (GET_CODE (op2) != CONST_INT
|| (INTVAL (op2) != 0 && UINT16_P (INTVAL (op2))))
{
rtx reg;
 
if (reload_completed || reload_in_progress)
FAIL;
 
reg = gen_reg_rtx (SImode);
emit_insn (gen_xorsi3 (reg, op1, op2));
op1 = reg;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
emit_insn (gen_sne_zero_insn (op0, op1));
DONE;
}
else
FAIL;
}")
 
(define_insn "sne_zero_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(ne:SI (match_operand:SI 1 "register_operand" "r")
(const_int 0)))
(clobber (reg:CC 17))
(clobber (match_scratch:SI 2 "=&r"))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "6")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(ne:SI (match_operand:SI 1 "register_operand" "")
(const_int 0)))
(clobber (reg:CC 17))
(clobber (match_scratch:SI 2 ""))]
"reload_completed"
[(set (match_dup 2)
(const_int 0))
(set (reg:CC 17)
(ltu:CC (match_dup 2)
(match_dup 1)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))]
"")
(define_expand "slt"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (! reg_or_int16_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_slt_insn (op0, op1, op2));
DONE;
}")
 
(define_insn "slt_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(lt:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "reg_or_int16_operand" "r,J")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "4,6")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(lt:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_int16_operand" "")))
(clobber (reg:CC 17))]
""
[(set (reg:CC 17)
(lt:CC (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))]
"")
 
(define_expand "sle"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (GET_CODE (op2) == CONST_INT)
{
HOST_WIDE_INT value = INTVAL (op2);
if (value >= 2147483647)
{
emit_move_insn (op0, const1_rtx);
DONE;
}
 
op2 = GEN_INT (value+1);
if (value < -32768 || value >= 32767)
op2 = force_reg (mode, op2);
 
emit_insn (gen_slt_insn (op0, op1, op2));
DONE;
}
 
if (! register_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_sle_insn (op0, op1, op2));
DONE;
}")
 
(define_insn "sle_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(le:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "register_operand" "r")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "8")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(le:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 17))]
"!optimize_size"
[(set (reg:CC 17)
(lt:CC (match_dup 2)
(match_dup 1)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(xor:SI (match_dup 0)
(const_int 1)))]
"")
 
;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
;; xor reg,reg,1 which might eliminate a NOP being inserted.
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(le:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 17))]
"optimize_size"
[(set (reg:CC 17)
(lt:CC (match_dup 2)
(match_dup 1)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))
(set (match_dup 0)
(neg:SI (match_dup 0)))]
"")
 
(define_expand "sgt"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (! register_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_slt_insn (op0, op2, op1));
DONE;
}")
 
(define_expand "sge"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (! reg_or_int16_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_sge_insn (op0, op1, op2));
DONE;
}")
 
(define_insn "sge_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ge:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "reg_or_int16_operand" "r,J")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "8,10")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(ge:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_int16_operand" "")))
(clobber (reg:CC 17))]
"!optimize_size"
[(set (reg:CC 17)
(lt:CC (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(xor:SI (match_dup 0)
(const_int 1)))]
"")
 
;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
;; xor reg,reg,1 which might eliminate a NOP being inserted.
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(ge:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_int16_operand" "")))
(clobber (reg:CC 17))]
"optimize_size"
[(set (reg:CC 17)
(lt:CC (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))
(set (match_dup 0)
(neg:SI (match_dup 0)))]
"")
 
(define_expand "sltu"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (! reg_or_int16_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_sltu_insn (op0, op1, op2));
DONE;
}")
 
(define_insn "sltu_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ltu:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "reg_or_int16_operand" "r,J")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "6,8")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(ltu:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_int16_operand" "")))
(clobber (reg:CC 17))]
""
[(set (reg:CC 17)
(ltu:CC (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))]
"")
 
(define_expand "sleu"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (GET_CODE (op2) == CONST_INT)
{
HOST_WIDE_INT value = INTVAL (op2);
if (value >= 2147483647)
{
emit_move_insn (op0, const1_rtx);
DONE;
}
 
op2 = GEN_INT (value+1);
if (value < 0 || value >= 32767)
op2 = force_reg (mode, op2);
 
emit_insn (gen_sltu_insn (op0, op1, op2));
DONE;
}
 
if (! register_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_sleu_insn (op0, op1, op2));
DONE;
}")
 
(define_insn "sleu_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(leu:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "register_operand" "r")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "8")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(leu:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 17))]
"!optimize_size"
[(set (reg:CC 17)
(ltu:CC (match_dup 2)
(match_dup 1)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(xor:SI (match_dup 0)
(const_int 1)))]
"")
 
;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
;; xor reg,reg,1 which might eliminate a NOP being inserted.
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(leu:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(clobber (reg:CC 17))]
"optimize_size"
[(set (reg:CC 17)
(ltu:CC (match_dup 2)
(match_dup 1)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))
(set (match_dup 0)
(neg:SI (match_dup 0)))]
"")
 
(define_expand "sgtu"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (! register_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_sltu_insn (op0, op2, op1));
DONE;
}")
 
(define_expand "sgeu"
[(match_operand:SI 0 "register_operand" "")]
""
"
{
rtx op0 = operands[0];
rtx op1 = m32r_compare_op0;
rtx op2 = m32r_compare_op1;
enum machine_mode mode = GET_MODE (op0);
 
if (mode != SImode)
FAIL;
 
if (! register_operand (op1, mode))
op1 = force_reg (mode, op1);
 
if (! reg_or_int16_operand (op2, mode))
op2 = force_reg (mode, op2);
 
emit_insn (gen_sgeu_insn (op0, op1, op2));
DONE;
}")
 
(define_insn "sgeu_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(geu:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "reg_or_int16_operand" "r,J")))
(clobber (reg:CC 17))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "8,10")])
 
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(geu:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_int16_operand" "")))
(clobber (reg:CC 17))]
"!optimize_size"
[(set (reg:CC 17)
(ltu:CC (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(xor:SI (match_dup 0)
(const_int 1)))]
"")
 
;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
;; xor reg,reg,1 which might eliminate a NOP being inserted.
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(geu:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "reg_or_int16_operand" "")))
(clobber (reg:CC 17))]
"optimize_size"
[(set (reg:CC 17)
(ltu:CC (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(ne:SI (reg:CC 17) (const_int 0)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))
(set (match_dup 0)
(neg:SI (match_dup 0)))]
"")
 
(define_insn "movcc_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(ne:SI (reg:CC 17) (const_int 0)))]
""
"mvfc %0, cbr"
[(set_attr "type" "misc")
(set_attr "length" "2")])
 
;; Unconditional and other jump instructions.
 
(define_insn "jump"
[(set (pc) (label_ref (match_operand 0 "" "")))]
""
"bra %l0"
[(set_attr "type" "uncond_branch")
(set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
(const_int 400))
(const_int 800))
(const_int 2)
(const_int 4)))])
 
(define_insn "indirect_jump"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))]
""
"jmp %a0"
[(set_attr "type" "uncond_branch")
(set_attr "length" "2")])
 
(define_insn "return"
[(return)]
"direct_return ()"
"jmp lr"
[(set_attr "type" "uncond_branch")
(set_attr "length" "2")])
(define_expand "tablejump"
[(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
(use (label_ref (match_operand 1 "" "")))])]
""
"
{
/* In pic mode, our address differences are against the base of the
table. Add that base value back in; CSE ought to be able to combine
the two address loads. */
if (flag_pic)
{
rtx tmp, tmp2;
 
tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
tmp2 = operands[0];
tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
operands[0] = memory_address (Pmode, tmp);
}
}")
 
(define_insn "*tablejump_insn"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))
(use (label_ref (match_operand 1 "" "")))]
""
"jmp %a0"
[(set_attr "type" "uncond_branch")
(set_attr "length" "2")])
 
(define_expand "call"
;; operands[1] is stack_size_rtx
;; operands[2] is next_arg_register
[(parallel [(call (match_operand:SI 0 "call_operand" "")
(match_operand 1 "" ""))
(clobber (reg:SI 14))])]
""
"
{
if (flag_pic)
current_function_uses_pic_offset_table = 1;
}")
 
(define_insn "*call_via_reg"
[(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
(match_operand 1 "" ""))
(clobber (reg:SI 14))]
""
"jl %0"
[(set_attr "type" "call")
(set_attr "length" "2")])
 
(define_insn "*call_via_label"
[(call (mem:SI (match_operand:SI 0 "call_address_operand" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 14))]
""
"*
{
int call26_p = call26_operand (operands[0], FUNCTION_MODE);
 
if (! call26_p)
{
/* We may not be able to reach with a `bl' insn so punt and leave it to
the linker.
We do this here, rather than doing a force_reg in the define_expand
so these insns won't be separated, say by scheduling, thus simplifying
the linker. */
return \"seth r14,%T0\;add3 r14,r14,%B0\;jl r14\";
}
else
return \"bl %0\";
}"
[(set_attr "type" "call")
(set (attr "length")
(if_then_else (eq (symbol_ref "call26_operand (operands[0], FUNCTION_MODE)")
(const_int 0))
(const_int 12) ; 10 + 2 for nop filler
; The return address must be on a 4 byte boundary so
; there's no point in using a value of 2 here. A 2 byte
; insn may go in the left slot but we currently can't
; use such knowledge.
(const_int 4)))])
 
(define_expand "call_value"
;; operand 2 is stack_size_rtx
;; operand 3 is next_arg_register
[(parallel [(set (match_operand 0 "register_operand" "=r")
(call (match_operand:SI 1 "call_operand" "")
(match_operand 2 "" "")))
(clobber (reg:SI 14))])]
""
"
{
if (flag_pic)
current_function_uses_pic_offset_table = 1;
}")
 
(define_insn "*call_value_via_reg"
[(set (match_operand 0 "register_operand" "=r")
(call (mem:SI (match_operand:SI 1 "register_operand" "r"))
(match_operand 2 "" "")))
(clobber (reg:SI 14))]
""
"jl %1"
[(set_attr "type" "call")
(set_attr "length" "2")])
 
(define_insn "*call_value_via_label"
[(set (match_operand 0 "register_operand" "=r")
(call (mem:SI (match_operand:SI 1 "call_address_operand" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 14))]
""
"*
{
int call26_p = call26_operand (operands[1], FUNCTION_MODE);
 
if (flag_pic)
current_function_uses_pic_offset_table = 1;
 
if (! call26_p)
{
/* We may not be able to reach with a `bl' insn so punt and leave it to
the linker.
We do this here, rather than doing a force_reg in the define_expand
so these insns won't be separated, say by scheduling, thus simplifying
the linker. */
return \"seth r14,%T1\;add3 r14,r14,%B1\;jl r14\";
}
else
return \"bl %1\";
}"
[(set_attr "type" "call")
(set (attr "length")
(if_then_else (eq (symbol_ref "call26_operand (operands[1], FUNCTION_MODE)")
(const_int 0))
(const_int 12) ; 10 + 2 for nop filler
; The return address must be on a 4 byte boundary so
; there's no point in using a value of 2 here. A 2 byte
; insn may go in the left slot but we currently can't
; use such knowledge.
(const_int 4)))])
(define_insn "nop"
[(const_int 0)]
""
"nop"
[(set_attr "type" "int2")
(set_attr "length" "2")])
 
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
 
(define_insn "blockage"
[(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
""
"")
 
;; Special pattern to flush the icache.
 
(define_insn "flush_icache"
[(unspec_volatile [(match_operand 0 "memory_operand" "m")]
UNSPECV_FLUSH_ICACHE)
(match_operand 1 "" "")
(clobber (reg:SI 17))]
""
"* return \"trap %#%1 ; flush-icache\";"
[(set_attr "type" "int4")
(set_attr "length" "4")])
;; Speed up fabs and provide correct sign handling for -0
 
(define_insn "absdf2"
[(set (match_operand:DF 0 "register_operand" "=r")
(abs:DF (match_operand:DF 1 "register_operand" "0")))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "4")])
 
(define_split
[(set (match_operand:DF 0 "register_operand" "")
(abs:DF (match_operand:DF 1 "register_operand" "")))]
"reload_completed"
[(set (match_dup 2)
(ashift:SI (match_dup 2)
(const_int 1)))
(set (match_dup 2)
(lshiftrt:SI (match_dup 2)
(const_int 1)))]
"operands[2] = gen_highpart (SImode, operands[0]);")
 
(define_insn "abssf2"
[(set (match_operand:SF 0 "register_operand" "=r")
(abs:SF (match_operand:SF 1 "register_operand" "0")))]
""
"#"
[(set_attr "type" "multi")
(set_attr "length" "4")])
 
(define_split
[(set (match_operand:SF 0 "register_operand" "")
(abs:SF (match_operand:SF 1 "register_operand" "")))]
"reload_completed"
[(set (match_dup 2)
(ashift:SI (match_dup 2)
(const_int 1)))
(set (match_dup 2)
(lshiftrt:SI (match_dup 2)
(const_int 1)))]
"operands[2] = gen_highpart (SImode, operands[0]);")
;; Conditional move instructions
;; Based on those done for the d10v
 
(define_expand "movsicc"
[
(set (match_operand:SI 0 "register_operand" "r")
(if_then_else:SI (match_operand 1 "" "")
(match_operand:SI 2 "conditional_move_operand" "O")
(match_operand:SI 3 "conditional_move_operand" "O")
)
)
]
""
"
{
if (! zero_and_one (operands [2], operands [3]))
FAIL;
 
/* Generate the comparison that will set the carry flag. */
operands[1] = gen_compare (GET_CODE (operands[1]), m32r_compare_op0,
m32r_compare_op1, TRUE);
 
/* See other movsicc pattern below for reason why. */
emit_insn (gen_blockage ());
}")
 
;; Generate the conditional instructions based on how the carry flag is examined.
(define_insn "*movsicc_internal"
[(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI (match_operand 1 "carry_compare_operand" "")
(match_operand:SI 2 "conditional_move_operand" "O")
(match_operand:SI 3 "conditional_move_operand" "O")
)
)]
"zero_and_one (operands [2], operands[3])"
"* return emit_cond_move (operands, insn);"
[(set_attr "type" "multi")
(set_attr "length" "8")
]
)
 
;; Block moves, see m32r.c for more details.
;; Argument 0 is the destination
;; Argument 1 is the source
;; Argument 2 is the length
;; Argument 3 is the alignment
 
(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "general_operand" "")
(match_operand:BLK 1 "general_operand" ""))
(use (match_operand:SI 2 "immediate_operand" ""))
(use (match_operand:SI 3 "immediate_operand" ""))])]
""
"
{
if (operands[0]) /* Avoid unused code messages. */
{
if (m32r_expand_block_move (operands))
DONE;
else
FAIL;
}
}")
 
;; Insn generated by block moves
 
(define_insn "movmemsi_internal"
[(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
(mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
(use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
(set (match_operand:SI 3 "register_operand" "=0")
(plus:SI (minus (match_dup 2) (const_int 4))
(match_dup 0)))
(set (match_operand:SI 4 "register_operand" "=1")
(plus:SI (match_dup 1)
(match_dup 2)))
(clobber (match_scratch:SI 5 "=&r")) ;; temp1
(clobber (match_scratch:SI 6 "=&r"))] ;; temp2
""
"* m32r_output_block_move (insn, operands); return \"\"; "
[(set_attr "type" "store8")
(set_attr "length" "72")]) ;; Maximum
 
;; PIC
 
/* When generating pic, we need to load the symbol offset into a register.
So that the optimizer does not confuse this with a normal symbol load
we use an unspec. The offset will be loaded from a constant pool entry,
since that is the only type of relocation we can use. */
 
(define_insn "pic_load_addr"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "" "")] UNSPEC_PIC_LOAD_ADDR))]
"flag_pic"
"ld24 %0,%#%1"
[(set_attr "type" "int4")])
 
(define_insn "gotoff_load_addr"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "" "")] UNSPEC_GOTOFF))]
"flag_pic"
"seth %0, %#shigh(%1@GOTOFF)\;add3 %0, %0, low(%1@GOTOFF)"
[(set_attr "type" "int4")
(set_attr "length" "8")])
 
;; Load program counter insns.
 
(define_insn "get_pc"
[(clobber (reg:SI 14))
(set (match_operand 0 "register_operand" "=r")
(unspec [(match_operand 1 "" "")] UNSPEC_GET_PC))
(use (match_operand:SI 2 "immediate_operand" ""))]
"flag_pic"
"*
{
if (INTVAL(operands[2]))
return \"bl.s .+4\;ld24 %0,%#%1\;add %0,lr\";
else
return \"bl.s .+4\;seth %0,%#shigh(%1)\;add3 %0,%0,%#low(%1+4)\;add %0,lr\";}"
[(set (attr "length") (if_then_else (ne (match_dup 2) (const_int 0))
(const_int 8)
(const_int 12)))])
 
(define_expand "builtin_setjmp_receiver"
[(label_ref (match_operand 0 "" ""))]
"flag_pic"
"
{
m32r_load_pic_register ();
DONE;
}")
/predicates.md
0,0 → 1,441
;; Predicate definitions for Renesas M32R.
;; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
 
;; Return true if OP is a register or the constant 0.
 
(define_predicate "reg_or_zero_operand"
(match_code "reg,subreg,const_int")
{
if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
return register_operand (op, mode);
 
if (GET_CODE (op) != CONST_INT)
return 0;
 
return INTVAL (op) == 0;
})
 
;; Return nonzero if the operand is suitable for use in a conditional
;; move sequence.
 
(define_predicate "conditional_move_operand"
(match_code "reg,subreg,const_int")
{
/* Only defined for simple integers so far... */
if (mode != SImode && mode != HImode && mode != QImode)
return FALSE;
 
/* At the moment we can handle moving registers and loading constants. */
/* To be added: Addition/subtraction/bitops/multiplication of registers. */
 
switch (GET_CODE (op))
{
case REG:
return 1;
 
case CONST_INT:
return INT8_P (INTVAL (op));
 
default:
#if 0
fprintf (stderr, "Test for cond move op of type: %s\n",
GET_RTX_NAME (GET_CODE (op)));
#endif
return 0;
}
})
 
;; Return true if the code is a test of the carry bit.
 
(define_predicate "carry_compare_operand"
(match_code "eq,ne")
{
rtx x;
 
if (GET_MODE (op) != CCmode && GET_MODE (op) != VOIDmode)
return FALSE;
 
if (GET_CODE (op) != NE && GET_CODE (op) != EQ)
return FALSE;
 
x = XEXP (op, 0);
if (GET_CODE (x) != REG || REGNO (x) != CARRY_REGNUM)
return FALSE;
 
x = XEXP (op, 1);
if (GET_CODE (x) != CONST_INT || INTVAL (x) != 0)
return FALSE;
 
return TRUE;
})
 
;; Return 1 if OP is an EQ or NE comparison operator.
 
(define_predicate "eqne_comparison_operator"
(match_code "eq,ne")
{
enum rtx_code code = GET_CODE (op);
 
return (code == EQ || code == NE);
})
 
;; Return 1 if OP is a signed comparison operator.
 
(define_predicate "signed_comparison_operator"
(match_code "eq,ne,lt,le,gt,ge")
{
enum rtx_code code = GET_CODE (op);
 
return (COMPARISON_P (op)
&& (code == EQ || code == NE
|| code == LT || code == LE || code == GT || code == GE));
})
 
;; Return true if OP is an acceptable argument for a move destination.
 
(define_predicate "move_dest_operand"
(match_code "reg,subreg,mem")
{
switch (GET_CODE (op))
{
case REG :
return register_operand (op, mode);
case SUBREG :
/* (subreg (mem ...) ...) can occur here if the inner part was once a
pseudo-reg and is now a stack slot. */
if (GET_CODE (SUBREG_REG (op)) == MEM)
return address_operand (XEXP (SUBREG_REG (op), 0), mode);
else
return register_operand (op, mode);
case MEM :
if (GET_CODE (XEXP (op, 0)) == POST_INC)
return 0; /* stores can't do post inc */
return address_operand (XEXP (op, 0), mode);
default :
return 0;
}
})
 
;; Return true if OP is an acceptable argument for a single word move
;; source.
 
(define_predicate "move_src_operand"
(match_code "reg,subreg,mem,const_int,const_double,label_ref,const,symbol_ref")
{
switch (GET_CODE (op))
{
case LABEL_REF :
case SYMBOL_REF :
case CONST :
return addr24_operand (op, mode);
case CONST_INT :
/* ??? We allow more cse opportunities if we only allow constants
loadable with one insn, and split the rest into two. The instances
where this would help should be rare and the current way is
simpler. */
if (HOST_BITS_PER_WIDE_INT > 32)
{
HOST_WIDE_INT rest = INTVAL (op) >> 31;
return (rest == 0 || rest == -1);
}
else
return 1;
case CONST_DOUBLE :
if (mode == SFmode)
return 1;
else if (mode == SImode)
{
/* Large unsigned constants are represented as const_double's. */
unsigned HOST_WIDE_INT low, high;
 
low = CONST_DOUBLE_LOW (op);
high = CONST_DOUBLE_HIGH (op);
return high == 0 && low <= (unsigned) 0xffffffff;
}
else
return 0;
case REG :
return register_operand (op, mode);
case SUBREG :
/* (subreg (mem ...) ...) can occur here if the inner part was once a
pseudo-reg and is now a stack slot. */
if (GET_CODE (SUBREG_REG (op)) == MEM)
return address_operand (XEXP (SUBREG_REG (op), 0), mode);
else
return register_operand (op, mode);
case MEM :
if (GET_CODE (XEXP (op, 0)) == PRE_INC
|| GET_CODE (XEXP (op, 0)) == PRE_DEC)
return 0; /* loads can't do pre-{inc,dec} */
return address_operand (XEXP (op, 0), mode);
default :
return 0;
}
})
 
;; Return true if OP is an acceptable argument for a double word move
;; source.
 
(define_predicate "move_double_src_operand"
(match_code "reg,subreg,mem,const_int,const_double")
{
switch (GET_CODE (op))
{
case CONST_INT :
case CONST_DOUBLE :
return 1;
case REG :
return register_operand (op, mode);
case SUBREG :
/* (subreg (mem ...) ...) can occur here if the inner part was once a
pseudo-reg and is now a stack slot. */
if (GET_CODE (SUBREG_REG (op)) == MEM)
return move_double_src_operand (SUBREG_REG (op), mode);
else
return register_operand (op, mode);
case MEM :
/* Disallow auto inc/dec for now. */
if (GET_CODE (XEXP (op, 0)) == PRE_DEC
|| GET_CODE (XEXP (op, 0)) == PRE_INC)
return 0;
return address_operand (XEXP (op, 0), mode);
default :
return 0;
}
})
 
;; Return true if OP is a const_int requiring two instructions to
;; load.
 
(define_predicate "two_insn_const_operand"
(match_code "const_int")
{
if (GET_CODE (op) != CONST_INT)
return 0;
if (INT16_P (INTVAL (op))
|| UINT24_P (INTVAL (op))
|| UPPER16_P (INTVAL (op)))
return 0;
return 1;
})
 
;; Returns 1 if OP is a symbol reference.
 
(define_predicate "symbolic_operand"
(match_code "symbol_ref,label_ref,const")
{
switch (GET_CODE (op))
{
case SYMBOL_REF:
case LABEL_REF:
case CONST :
return 1;
 
default:
return 0;
}
})
 
;; Return true if OP is a signed 8 bit immediate value.
 
(define_predicate "int8_operand"
(match_code "const_int")
{
if (GET_CODE (op) != CONST_INT)
return 0;
return INT8_P (INTVAL (op));
})
 
;; Return true if OP is an unsigned 16 bit immediate value.
 
(define_predicate "uint16_operand"
(match_code "const_int")
{
if (GET_CODE (op) != CONST_INT)
return 0;
return UINT16_P (INTVAL (op));
})
 
;; Return true if OP is a register or signed 16 bit value.
 
(define_predicate "reg_or_int16_operand"
(match_code "reg,subreg,const_int")
{
if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
return register_operand (op, mode);
if (GET_CODE (op) != CONST_INT)
return 0;
return INT16_P (INTVAL (op));
})
 
;; Return true if OP is a register or an unsigned 16 bit value.
 
(define_predicate "reg_or_uint16_operand"
(match_code "reg,subreg,const_int")
{
if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
return register_operand (op, mode);
if (GET_CODE (op) != CONST_INT)
return 0;
return UINT16_P (INTVAL (op));
})
 
;; Return true if OP is a register or signed 16 bit value for
;; compares.
 
(define_predicate "reg_or_cmp_int16_operand"
(match_code "reg,subreg,const_int")
{
if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
return register_operand (op, mode);
if (GET_CODE (op) != CONST_INT)
return 0;
return CMP_INT16_P (INTVAL (op));
})
 
;; Return true if OP is a register or an integer value that can be
;; used is SEQ/SNE. We can use either XOR of the value or ADD of the
;; negative of the value for the constant. Don't allow 0, because
;; that is special cased.
 
(define_predicate "reg_or_eq_int16_operand"
(match_code "reg,subreg,const_int")
{
HOST_WIDE_INT value;
 
if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
return register_operand (op, mode);
 
if (GET_CODE (op) != CONST_INT)
return 0;
 
value = INTVAL (op);
return (value != 0) && (UINT16_P (value) || CMP_INT16_P (-value));
})
 
;; Return true if OP is a signed 16 bit immediate value useful in
;; comparisons.
 
(define_predicate "cmp_int16_operand"
(match_code "const_int")
{
if (GET_CODE (op) != CONST_INT)
return 0;
return CMP_INT16_P (INTVAL (op));
})
 
;; Acceptable arguments to the call insn.
 
(define_predicate "call_address_operand"
(match_code "symbol_ref,label_ref,const")
{
return symbolic_operand (op, mode);
 
/* Constants and values in registers are not OK, because
the m32r BL instruction can only support PC relative branching. */
})
 
;; Return true if OP is an acceptable input argument for a zero/sign
;; extend operation.
 
(define_predicate "extend_operand"
(match_code "reg,subreg,mem")
{
rtx addr;
 
switch (GET_CODE (op))
{
case REG :
case SUBREG :
return register_operand (op, mode);
 
case MEM :
addr = XEXP (op, 0);
if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
return 0; /* loads can't do pre inc/pre dec */
 
return address_operand (addr, mode);
 
default :
return 0;
}
})
 
;; Return nonzero if the operand is an insn that is a small
;; insn. Allow const_int 0 as well, which is a placeholder for NOP
;; slots.
 
(define_predicate "small_insn_p"
(match_code "insn,call_insn,jump_insn")
{
if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
return 1;
 
if (! INSN_P (op))
return 0;
 
return get_attr_length (op) == 2;
})
 
;; Return true if op is an integer constant, less than or equal to
;; MAX_MOVE_BYTES.
 
(define_predicate "m32r_block_immediate_operand"
(match_code "const_int")
{
if (GET_CODE (op) != CONST_INT
|| INTVAL (op) > MAX_MOVE_BYTES
|| INTVAL (op) <= 0)
return 0;
 
return 1;
})
 
;; Return nonzero if the operand is an insn that is a large insn.
 
(define_predicate "large_insn_p"
(match_code "insn,call_insn,jump_insn")
{
if (! INSN_P (op))
return 0;
 
return get_attr_length (op) != 2;
})
 
;; Returns 1 if OP is an acceptable operand for seth/add3.
 
(define_predicate "seth_add3_operand"
(match_code "symbol_ref,label_ref,const")
{
if (flag_pic)
return 0;
 
if (GET_CODE (op) == SYMBOL_REF
|| GET_CODE (op) == LABEL_REF)
return 1;
 
if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
&& INT16_P (INTVAL (XEXP (XEXP (op, 0), 1))))
return 1;
 
return 0;
})
/little.h
0,0 → 1,33
/* Definitions for Renesas little endian M32R cpu.
Copyright (C) 2003, 2004, 2005, 2007
Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#define TARGET_LITTLE_ENDIAN 1
 
#define CPP_ENDIAN_SPEC \
" %{mbe:-D__BIG_ENDIAN__} %{mbig-endian:-D__BIG_ENDIAN__}" \
" %{!mbe: %{!mbig-endian:-D__LITTLE_ENDIAN__}}"
#define CC1_ENDIAN_SPEC " %{!mbe: %{!mbig-endian:-mle}}"
 
#define ASM_ENDIAN_SPEC \
" %{!mbe: %{!mbig-endian:-EL}} %{mbe:-EB} %{mbig-endian:-EB}"
 
#define LINK_ENDIAN_SPEC " %{!mbe: %{!mbig-endian:-EL}}"
 
/m32r.c
0,0 → 1,2465
/* Subroutines used for code generation on the Renesas M32R cpu.
Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
2005, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
#include "insn-config.h"
#include "conditions.h"
#include "output.h"
#include "insn-attr.h"
#include "flags.h"
#include "expr.h"
#include "function.h"
#include "recog.h"
#include "toplev.h"
#include "ggc.h"
#include "integrate.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
 
/* Save the operands last given to a compare for use when we
generate a scc or bcc insn. */
rtx m32r_compare_op0, m32r_compare_op1;
 
/* Array of valid operand punctuation characters. */
char m32r_punct_chars[256];
 
/* Selected code model. */
enum m32r_model m32r_model = M32R_MODEL_DEFAULT;
 
/* Selected SDA support. */
enum m32r_sdata m32r_sdata = M32R_SDATA_DEFAULT;
 
/* Machine-specific symbol_ref flags. */
#define SYMBOL_FLAG_MODEL_SHIFT SYMBOL_FLAG_MACH_DEP_SHIFT
#define SYMBOL_REF_MODEL(X) \
((enum m32r_model) ((SYMBOL_REF_FLAGS (X) >> SYMBOL_FLAG_MODEL_SHIFT) & 3))
 
/* For string literals, etc. */
#define LIT_NAME_P(NAME) ((NAME)[0] == '*' && (NAME)[1] == '.')
 
/* Forward declaration. */
static bool m32r_handle_option (size_t, const char *, int);
static void init_reg_tables (void);
static void block_move_call (rtx, rtx, rtx);
static int m32r_is_insn (rtx);
const struct attribute_spec m32r_attribute_table[];
static tree m32r_handle_model_attribute (tree *, tree, tree, int, bool *);
static void m32r_output_function_prologue (FILE *, HOST_WIDE_INT);
static void m32r_output_function_epilogue (FILE *, HOST_WIDE_INT);
 
static void m32r_file_start (void);
 
static int m32r_adjust_priority (rtx, int);
static int m32r_issue_rate (void);
 
static void m32r_encode_section_info (tree, rtx, int);
static bool m32r_in_small_data_p (tree);
static bool m32r_return_in_memory (tree, tree);
static void m32r_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
static void init_idents (void);
static bool m32r_rtx_costs (rtx, int, int, int *);
static bool m32r_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
static int m32r_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE m32r_attribute_table
 
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
#undef TARGET_ASM_ALIGNED_SI_OP
#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
 
#undef TARGET_ASM_FUNCTION_PROLOGUE
#define TARGET_ASM_FUNCTION_PROLOGUE m32r_output_function_prologue
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE m32r_output_function_epilogue
 
#undef TARGET_ASM_FILE_START
#define TARGET_ASM_FILE_START m32r_file_start
 
#undef TARGET_SCHED_ADJUST_PRIORITY
#define TARGET_SCHED_ADJUST_PRIORITY m32r_adjust_priority
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE m32r_issue_rate
 
#undef TARGET_DEFAULT_TARGET_FLAGS
#define TARGET_DEFAULT_TARGET_FLAGS TARGET_CPU_DEFAULT
#undef TARGET_HANDLE_OPTION
#define TARGET_HANDLE_OPTION m32r_handle_option
 
#undef TARGET_ENCODE_SECTION_INFO
#define TARGET_ENCODE_SECTION_INFO m32r_encode_section_info
#undef TARGET_IN_SMALL_DATA_P
#define TARGET_IN_SMALL_DATA_P m32r_in_small_data_p
 
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS m32r_rtx_costs
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST hook_int_rtx_0
 
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY m32r_return_in_memory
#undef TARGET_SETUP_INCOMING_VARARGS
#define TARGET_SETUP_INCOMING_VARARGS m32r_setup_incoming_varargs
#undef TARGET_MUST_PASS_IN_STACK
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
#undef TARGET_PASS_BY_REFERENCE
#define TARGET_PASS_BY_REFERENCE m32r_pass_by_reference
#undef TARGET_ARG_PARTIAL_BYTES
#define TARGET_ARG_PARTIAL_BYTES m32r_arg_partial_bytes
 
struct gcc_target targetm = TARGET_INITIALIZER;
/* Implement TARGET_HANDLE_OPTION. */
 
static bool
m32r_handle_option (size_t code, const char *arg, int value)
{
switch (code)
{
case OPT_m32r:
target_flags &= ~(MASK_M32R2 | MASK_M32RX);
return true;
 
case OPT_mmodel_:
if (strcmp (arg, "small") == 0)
m32r_model = M32R_MODEL_SMALL;
else if (strcmp (arg, "medium") == 0)
m32r_model = M32R_MODEL_MEDIUM;
else if (strcmp (arg, "large") == 0)
m32r_model = M32R_MODEL_LARGE;
else
return false;
return true;
 
case OPT_msdata_:
if (strcmp (arg, "none") == 0)
m32r_sdata = M32R_SDATA_NONE;
else if (strcmp (arg, "sdata") == 0)
m32r_sdata = M32R_SDATA_SDATA;
else if (strcmp (arg, "use") == 0)
m32r_sdata = M32R_SDATA_USE;
else
return false;
return true;
 
case OPT_mno_flush_func:
m32r_cache_flush_func = NULL;
return true;
 
case OPT_mflush_trap_:
return value <= 15;
 
case OPT_mno_flush_trap:
m32r_cache_flush_trap = -1;
return true;
 
default:
return true;
}
}
 
/* Called by OVERRIDE_OPTIONS to initialize various things. */
 
void
m32r_init (void)
{
init_reg_tables ();
 
/* Initialize array for PRINT_OPERAND_PUNCT_VALID_P. */
memset (m32r_punct_chars, 0, sizeof (m32r_punct_chars));
m32r_punct_chars['#'] = 1;
m32r_punct_chars['@'] = 1; /* ??? no longer used */
 
/* Provide default value if not specified. */
if (!g_switch_set)
g_switch_value = SDATA_DEFAULT_SIZE;
}
 
/* Vectors to keep interesting information about registers where it can easily
be got. We use to use the actual mode value as the bit number, but there
is (or may be) more than 32 modes now. Instead we use two tables: one
indexed by hard register number, and one indexed by mode. */
 
/* The purpose of m32r_mode_class is to shrink the range of modes so that
they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
mapped into one m32r_mode_class mode. */
 
enum m32r_mode_class
{
C_MODE,
S_MODE, D_MODE, T_MODE, O_MODE,
SF_MODE, DF_MODE, TF_MODE, OF_MODE, A_MODE
};
 
/* Modes for condition codes. */
#define C_MODES (1 << (int) C_MODE)
 
/* Modes for single-word and smaller quantities. */
#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
 
/* Modes for double-word and smaller quantities. */
#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
 
/* Modes for quad-word and smaller quantities. */
#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
 
/* Modes for accumulators. */
#define A_MODES (1 << (int) A_MODE)
 
/* Value is 1 if register/mode pair is acceptable on arc. */
 
const unsigned int m32r_hard_regno_mode_ok[FIRST_PSEUDO_REGISTER] =
{
T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, S_MODES, S_MODES, S_MODES,
S_MODES, C_MODES, A_MODES, A_MODES
};
 
unsigned int m32r_mode_class [NUM_MACHINE_MODES];
 
enum reg_class m32r_regno_reg_class[FIRST_PSEUDO_REGISTER];
 
static void
init_reg_tables (void)
{
int i;
 
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
switch (GET_MODE_CLASS (i))
{
case MODE_INT:
case MODE_PARTIAL_INT:
case MODE_COMPLEX_INT:
if (GET_MODE_SIZE (i) <= 4)
m32r_mode_class[i] = 1 << (int) S_MODE;
else if (GET_MODE_SIZE (i) == 8)
m32r_mode_class[i] = 1 << (int) D_MODE;
else if (GET_MODE_SIZE (i) == 16)
m32r_mode_class[i] = 1 << (int) T_MODE;
else if (GET_MODE_SIZE (i) == 32)
m32r_mode_class[i] = 1 << (int) O_MODE;
else
m32r_mode_class[i] = 0;
break;
case MODE_FLOAT:
case MODE_COMPLEX_FLOAT:
if (GET_MODE_SIZE (i) <= 4)
m32r_mode_class[i] = 1 << (int) SF_MODE;
else if (GET_MODE_SIZE (i) == 8)
m32r_mode_class[i] = 1 << (int) DF_MODE;
else if (GET_MODE_SIZE (i) == 16)
m32r_mode_class[i] = 1 << (int) TF_MODE;
else if (GET_MODE_SIZE (i) == 32)
m32r_mode_class[i] = 1 << (int) OF_MODE;
else
m32r_mode_class[i] = 0;
break;
case MODE_CC:
m32r_mode_class[i] = 1 << (int) C_MODE;
break;
default:
m32r_mode_class[i] = 0;
break;
}
}
 
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
if (GPR_P (i))
m32r_regno_reg_class[i] = GENERAL_REGS;
else if (i == ARG_POINTER_REGNUM)
m32r_regno_reg_class[i] = GENERAL_REGS;
else
m32r_regno_reg_class[i] = NO_REGS;
}
}
/* M32R specific attribute support.
 
interrupt - for interrupt functions
 
model - select code model used to access object
 
small: addresses use 24 bits, use bl to make calls
medium: addresses use 32 bits, use bl to make calls
large: addresses use 32 bits, use seth/add3/jl to make calls
 
Grep for MODEL in m32r.h for more info. */
 
static tree small_ident1;
static tree small_ident2;
static tree medium_ident1;
static tree medium_ident2;
static tree large_ident1;
static tree large_ident2;
 
static void
init_idents (void)
{
if (small_ident1 == 0)
{
small_ident1 = get_identifier ("small");
small_ident2 = get_identifier ("__small__");
medium_ident1 = get_identifier ("medium");
medium_ident2 = get_identifier ("__medium__");
large_ident1 = get_identifier ("large");
large_ident2 = get_identifier ("__large__");
}
}
 
const struct attribute_spec m32r_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
{ "interrupt", 0, 0, true, false, false, NULL },
{ "model", 1, 1, true, false, false, m32r_handle_model_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
 
 
/* Handle an "model" attribute; arguments as in
struct attribute_spec.handler. */
static tree
m32r_handle_model_attribute (tree *node ATTRIBUTE_UNUSED, tree name,
tree args, int flags ATTRIBUTE_UNUSED,
bool *no_add_attrs)
{
tree arg;
 
init_idents ();
arg = TREE_VALUE (args);
 
if (arg != small_ident1
&& arg != small_ident2
&& arg != medium_ident1
&& arg != medium_ident2
&& arg != large_ident1
&& arg != large_ident2)
{
warning (OPT_Wattributes, "invalid argument of %qs attribute",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
 
return NULL_TREE;
}
/* Encode section information of DECL, which is either a VAR_DECL,
FUNCTION_DECL, STRING_CST, CONSTRUCTOR, or ???.
 
For the M32R we want to record:
 
- whether the object lives in .sdata/.sbss.
- what code model should be used to access the object
*/
 
static void
m32r_encode_section_info (tree decl, rtx rtl, int first)
{
int extra_flags = 0;
tree model_attr;
enum m32r_model model;
 
default_encode_section_info (decl, rtl, first);
 
if (!DECL_P (decl))
return;
 
model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
if (model_attr)
{
tree id;
 
init_idents ();
 
id = TREE_VALUE (TREE_VALUE (model_attr));
 
if (id == small_ident1 || id == small_ident2)
model = M32R_MODEL_SMALL;
else if (id == medium_ident1 || id == medium_ident2)
model = M32R_MODEL_MEDIUM;
else if (id == large_ident1 || id == large_ident2)
model = M32R_MODEL_LARGE;
else
gcc_unreachable (); /* shouldn't happen */
}
else
{
if (TARGET_MODEL_SMALL)
model = M32R_MODEL_SMALL;
else if (TARGET_MODEL_MEDIUM)
model = M32R_MODEL_MEDIUM;
else if (TARGET_MODEL_LARGE)
model = M32R_MODEL_LARGE;
else
gcc_unreachable (); /* shouldn't happen */
}
extra_flags |= model << SYMBOL_FLAG_MODEL_SHIFT;
 
if (extra_flags)
SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
}
 
/* Only mark the object as being small data area addressable if
it hasn't been explicitly marked with a code model.
 
The user can explicitly put an object in the small data area with the
section attribute. If the object is in sdata/sbss and marked with a
code model do both [put the object in .sdata and mark it as being
addressed with a specific code model - don't mark it as being addressed
with an SDA reloc though]. This is ok and might be useful at times. If
the object doesn't fit the linker will give an error. */
 
static bool
m32r_in_small_data_p (tree decl)
{
tree section;
 
if (TREE_CODE (decl) != VAR_DECL)
return false;
 
if (lookup_attribute ("model", DECL_ATTRIBUTES (decl)))
return false;
 
section = DECL_SECTION_NAME (decl);
if (section)
{
char *name = (char *) TREE_STRING_POINTER (section);
if (strcmp (name, ".sdata") == 0 || strcmp (name, ".sbss") == 0)
return true;
}
else
{
if (! TREE_READONLY (decl) && ! TARGET_SDATA_NONE)
{
int size = int_size_in_bytes (TREE_TYPE (decl));
 
if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
return true;
}
}
 
return false;
}
 
/* Do anything needed before RTL is emitted for each function. */
 
void
m32r_init_expanders (void)
{
/* ??? At one point there was code here. The function is left in
to make it easy to experiment. */
}
int
call_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) != MEM)
return 0;
op = XEXP (op, 0);
return call_address_operand (op, mode);
}
 
/* Return 1 if OP is a reference to an object in .sdata/.sbss. */
 
int
small_data_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (! TARGET_SDATA_USE)
return 0;
 
if (GET_CODE (op) == SYMBOL_REF)
return SYMBOL_REF_SMALL_P (op);
 
if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
&& INT16_P (INTVAL (XEXP (XEXP (op, 0), 1))))
return SYMBOL_REF_SMALL_P (XEXP (XEXP (op, 0), 0));
 
return 0;
}
 
/* Return 1 if OP is a symbol that can use 24 bit addressing. */
 
int
addr24_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
rtx sym;
 
if (flag_pic)
return 0;
 
if (GET_CODE (op) == LABEL_REF)
return TARGET_ADDR24;
 
if (GET_CODE (op) == SYMBOL_REF)
sym = op;
else if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
&& UINT24_P (INTVAL (XEXP (XEXP (op, 0), 1))))
sym = XEXP (XEXP (op, 0), 0);
else
return 0;
 
if (SYMBOL_REF_MODEL (sym) == M32R_MODEL_SMALL)
return 1;
 
if (TARGET_ADDR24
&& (CONSTANT_POOL_ADDRESS_P (sym)
|| LIT_NAME_P (XSTR (sym, 0))))
return 1;
 
return 0;
}
 
/* Return 1 if OP is a symbol that needs 32 bit addressing. */
 
int
addr32_operand (rtx op, enum machine_mode mode)
{
rtx sym;
 
if (GET_CODE (op) == LABEL_REF)
return TARGET_ADDR32;
 
if (GET_CODE (op) == SYMBOL_REF)
sym = op;
else if (GET_CODE (op) == CONST
&& GET_CODE (XEXP (op, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
&& GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
&& ! flag_pic)
sym = XEXP (XEXP (op, 0), 0);
else
return 0;
 
return (! addr24_operand (sym, mode)
&& ! small_data_operand (sym, mode));
}
 
/* Return 1 if OP is a function that can be called with the `bl' insn. */
 
int
call26_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (flag_pic)
return 1;
 
if (GET_CODE (op) == SYMBOL_REF)
return SYMBOL_REF_MODEL (op) != M32R_MODEL_LARGE;
 
return TARGET_CALL26;
}
 
/* Return 1 if OP is a DImode const we want to handle inline.
This must match the code in the movdi pattern.
It is used by the 'G' CONST_DOUBLE_OK_FOR_LETTER. */
 
int
easy_di_const (rtx op)
{
rtx high_rtx, low_rtx;
HOST_WIDE_INT high, low;
 
split_double (op, &high_rtx, &low_rtx);
high = INTVAL (high_rtx);
low = INTVAL (low_rtx);
/* Pick constants loadable with 2 16 bit `ldi' insns. */
if (high >= -128 && high <= 127
&& low >= -128 && low <= 127)
return 1;
return 0;
}
 
/* Return 1 if OP is a DFmode const we want to handle inline.
This must match the code in the movdf pattern.
It is used by the 'H' CONST_DOUBLE_OK_FOR_LETTER. */
 
int
easy_df_const (rtx op)
{
REAL_VALUE_TYPE r;
long l[2];
 
REAL_VALUE_FROM_CONST_DOUBLE (r, op);
REAL_VALUE_TO_TARGET_DOUBLE (r, l);
if (l[0] == 0 && l[1] == 0)
return 1;
if ((l[0] & 0xffff) == 0 && l[1] == 0)
return 1;
return 0;
}
 
/* Return 1 if OP is (mem (reg ...)).
This is used in insn length calcs. */
 
int
memreg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == REG;
}
 
/* Return nonzero if TYPE must be passed by indirect reference. */
 
static bool
m32r_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
enum machine_mode mode, tree type,
bool named ATTRIBUTE_UNUSED)
{
int size;
 
if (type)
size = int_size_in_bytes (type);
else
size = GET_MODE_SIZE (mode);
 
return (size < 0 || size > 8);
}
/* Comparisons. */
 
/* X and Y are two things to compare using CODE. Emit the compare insn and
return the rtx for compare [arg0 of the if_then_else].
If need_compare is true then the comparison insn must be generated, rather
than being subsumed into the following branch instruction. */
 
rtx
gen_compare (enum rtx_code code, rtx x, rtx y, int need_compare)
{
enum rtx_code compare_code;
enum rtx_code branch_code;
rtx cc_reg = gen_rtx_REG (CCmode, CARRY_REGNUM);
int must_swap = 0;
 
switch (code)
{
case EQ: compare_code = EQ; branch_code = NE; break;
case NE: compare_code = EQ; branch_code = EQ; break;
case LT: compare_code = LT; branch_code = NE; break;
case LE: compare_code = LT; branch_code = EQ; must_swap = 1; break;
case GT: compare_code = LT; branch_code = NE; must_swap = 1; break;
case GE: compare_code = LT; branch_code = EQ; break;
case LTU: compare_code = LTU; branch_code = NE; break;
case LEU: compare_code = LTU; branch_code = EQ; must_swap = 1; break;
case GTU: compare_code = LTU; branch_code = NE; must_swap = 1; break;
case GEU: compare_code = LTU; branch_code = EQ; break;
 
default:
gcc_unreachable ();
}
 
if (need_compare)
{
switch (compare_code)
{
case EQ:
if (GET_CODE (y) == CONST_INT
&& CMP_INT16_P (INTVAL (y)) /* Reg equal to small const. */
&& y != const0_rtx)
{
rtx tmp = gen_reg_rtx (SImode);
emit_insn (gen_addsi3 (tmp, x, GEN_INT (-INTVAL (y))));
x = tmp;
y = const0_rtx;
}
else if (CONSTANT_P (y)) /* Reg equal to const. */
{
rtx tmp = force_reg (GET_MODE (x), y);
y = tmp;
}
 
if (register_operand (y, SImode) /* Reg equal to reg. */
|| y == const0_rtx) /* Reg equal to zero. */
{
emit_insn (gen_cmp_eqsi_insn (x, y));
return gen_rtx_fmt_ee (code, CCmode, cc_reg, const0_rtx);
}
break;
case LT:
if (register_operand (y, SImode)
|| (GET_CODE (y) == CONST_INT && CMP_INT16_P (INTVAL (y))))
{
rtx tmp = gen_reg_rtx (SImode); /* Reg compared to reg. */
switch (code)
{
case LT:
emit_insn (gen_cmp_ltsi_insn (x, y));
code = EQ;
break;
case LE:
if (y == const0_rtx)
tmp = const1_rtx;
else
emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
emit_insn (gen_cmp_ltsi_insn (x, tmp));
code = EQ;
break;
case GT:
if (GET_CODE (y) == CONST_INT)
tmp = gen_rtx_PLUS (SImode, y, const1_rtx);
else
emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
emit_insn (gen_cmp_ltsi_insn (x, tmp));
code = NE;
break;
case GE:
emit_insn (gen_cmp_ltsi_insn (x, y));
code = NE;
break;
default:
gcc_unreachable ();
}
return gen_rtx_fmt_ee (code, CCmode, cc_reg, const0_rtx);
}
break;
case LTU:
if (register_operand (y, SImode)
|| (GET_CODE (y) == CONST_INT && CMP_INT16_P (INTVAL (y))))
{
rtx tmp = gen_reg_rtx (SImode); /* Reg (unsigned) compared to reg. */
switch (code)
{
case LTU:
emit_insn (gen_cmp_ltusi_insn (x, y));
code = EQ;
break;
case LEU:
if (y == const0_rtx)
tmp = const1_rtx;
else
emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
emit_insn (gen_cmp_ltusi_insn (x, tmp));
code = EQ;
break;
case GTU:
if (GET_CODE (y) == CONST_INT)
tmp = gen_rtx_PLUS (SImode, y, const1_rtx);
else
emit_insn (gen_addsi3 (tmp, y, constm1_rtx));
emit_insn (gen_cmp_ltusi_insn (x, tmp));
code = NE;
break;
case GEU:
emit_insn (gen_cmp_ltusi_insn (x, y));
code = NE;
break;
default:
gcc_unreachable ();
}
return gen_rtx_fmt_ee (code, CCmode, cc_reg, const0_rtx);
}
break;
 
default:
gcc_unreachable ();
}
}
else
{
/* Reg/reg equal comparison. */
if (compare_code == EQ
&& register_operand (y, SImode))
return gen_rtx_fmt_ee (code, CCmode, x, y);
/* Reg/zero signed comparison. */
if ((compare_code == EQ || compare_code == LT)
&& y == const0_rtx)
return gen_rtx_fmt_ee (code, CCmode, x, y);
/* Reg/smallconst equal comparison. */
if (compare_code == EQ
&& GET_CODE (y) == CONST_INT
&& CMP_INT16_P (INTVAL (y)))
{
rtx tmp = gen_reg_rtx (SImode);
 
emit_insn (gen_addsi3 (tmp, x, GEN_INT (-INTVAL (y))));
return gen_rtx_fmt_ee (code, CCmode, tmp, const0_rtx);
}
/* Reg/const equal comparison. */
if (compare_code == EQ
&& CONSTANT_P (y))
{
rtx tmp = force_reg (GET_MODE (x), y);
 
return gen_rtx_fmt_ee (code, CCmode, x, tmp);
}
}
 
if (CONSTANT_P (y))
{
if (must_swap)
y = force_reg (GET_MODE (x), y);
else
{
int ok_const = reg_or_int16_operand (y, GET_MODE (y));
 
if (! ok_const)
y = force_reg (GET_MODE (x), y);
}
}
 
switch (compare_code)
{
case EQ :
emit_insn (gen_cmp_eqsi_insn (must_swap ? y : x, must_swap ? x : y));
break;
case LT :
emit_insn (gen_cmp_ltsi_insn (must_swap ? y : x, must_swap ? x : y));
break;
case LTU :
emit_insn (gen_cmp_ltusi_insn (must_swap ? y : x, must_swap ? x : y));
break;
 
default:
gcc_unreachable ();
}
 
return gen_rtx_fmt_ee (branch_code, VOIDmode, cc_reg, CONST0_RTX (CCmode));
}
/* Split a 2 word move (DI or DF) into component parts. */
 
rtx
gen_split_move_double (rtx operands[])
{
enum machine_mode mode = GET_MODE (operands[0]);
rtx dest = operands[0];
rtx src = operands[1];
rtx val;
 
/* We might have (SUBREG (MEM)) here, so just get rid of the
subregs to make this code simpler. It is safe to call
alter_subreg any time after reload. */
if (GET_CODE (dest) == SUBREG)
alter_subreg (&dest);
if (GET_CODE (src) == SUBREG)
alter_subreg (&src);
 
start_sequence ();
if (GET_CODE (dest) == REG)
{
int dregno = REGNO (dest);
 
/* Reg = reg. */
if (GET_CODE (src) == REG)
{
int sregno = REGNO (src);
 
int reverse = (dregno == sregno + 1);
 
/* We normally copy the low-numbered register first. However, if
the first register operand 0 is the same as the second register of
operand 1, we must copy in the opposite order. */
emit_insn (gen_rtx_SET (VOIDmode,
operand_subword (dest, reverse, TRUE, mode),
operand_subword (src, reverse, TRUE, mode)));
 
emit_insn (gen_rtx_SET (VOIDmode,
operand_subword (dest, !reverse, TRUE, mode),
operand_subword (src, !reverse, TRUE, mode)));
}
 
/* Reg = constant. */
else if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
{
rtx words[2];
split_double (src, &words[0], &words[1]);
emit_insn (gen_rtx_SET (VOIDmode,
operand_subword (dest, 0, TRUE, mode),
words[0]));
 
emit_insn (gen_rtx_SET (VOIDmode,
operand_subword (dest, 1, TRUE, mode),
words[1]));
}
 
/* Reg = mem. */
else if (GET_CODE (src) == MEM)
{
/* If the high-address word is used in the address, we must load it
last. Otherwise, load it first. */
int reverse
= (refers_to_regno_p (dregno, dregno + 1, XEXP (src, 0), 0) != 0);
 
/* We used to optimize loads from single registers as
 
ld r1,r3+; ld r2,r3
 
if r3 were not used subsequently. However, the REG_NOTES aren't
propagated correctly by the reload phase, and it can cause bad
code to be generated. We could still try:
 
ld r1,r3+; ld r2,r3; addi r3,-4
 
which saves 2 bytes and doesn't force longword alignment. */
emit_insn (gen_rtx_SET (VOIDmode,
operand_subword (dest, reverse, TRUE, mode),
adjust_address (src, SImode,
reverse * UNITS_PER_WORD)));
 
emit_insn (gen_rtx_SET (VOIDmode,
operand_subword (dest, !reverse, TRUE, mode),
adjust_address (src, SImode,
!reverse * UNITS_PER_WORD)));
}
else
gcc_unreachable ();
}
 
/* Mem = reg. */
/* We used to optimize loads from single registers as
 
st r1,r3; st r2,+r3
 
if r3 were not used subsequently. However, the REG_NOTES aren't
propagated correctly by the reload phase, and it can cause bad
code to be generated. We could still try:
 
st r1,r3; st r2,+r3; addi r3,-4
 
which saves 2 bytes and doesn't force longword alignment. */
else if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
{
emit_insn (gen_rtx_SET (VOIDmode,
adjust_address (dest, SImode, 0),
operand_subword (src, 0, TRUE, mode)));
 
emit_insn (gen_rtx_SET (VOIDmode,
adjust_address (dest, SImode, UNITS_PER_WORD),
operand_subword (src, 1, TRUE, mode)));
}
 
else
gcc_unreachable ();
 
val = get_insns ();
end_sequence ();
return val;
}
 
static int
m32r_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, bool named ATTRIBUTE_UNUSED)
{
int words;
unsigned int size =
(((mode == BLKmode && type)
? (unsigned int) int_size_in_bytes (type)
: GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1)
/ UNITS_PER_WORD;
 
if (*cum >= M32R_MAX_PARM_REGS)
words = 0;
else if (*cum + size > M32R_MAX_PARM_REGS)
words = (*cum + size) - M32R_MAX_PARM_REGS;
else
words = 0;
 
return words * UNITS_PER_WORD;
}
 
/* Worker function for TARGET_RETURN_IN_MEMORY. */
 
static bool
m32r_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
{
return m32r_pass_by_reference (NULL, TYPE_MODE (type), type, false);
}
 
/* Do any needed setup for a variadic function. For the M32R, we must
create a register parameter block, and then copy any anonymous arguments
in registers to memory.
 
CUM has not been updated for the last named argument which has type TYPE
and mode MODE, and we rely on this fact. */
 
static void
m32r_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int *pretend_size, int no_rtl)
{
int first_anon_arg;
 
if (no_rtl)
return;
 
/* All BLKmode values are passed by reference. */
gcc_assert (mode != BLKmode);
 
first_anon_arg = (ROUND_ADVANCE_CUM (*cum, mode, type)
+ ROUND_ADVANCE_ARG (mode, type));
 
if (first_anon_arg < M32R_MAX_PARM_REGS)
{
/* Note that first_reg_offset < M32R_MAX_PARM_REGS. */
int first_reg_offset = first_anon_arg;
/* Size in words to "pretend" allocate. */
int size = M32R_MAX_PARM_REGS - first_reg_offset;
rtx regblock;
 
regblock = gen_rtx_MEM (BLKmode,
plus_constant (arg_pointer_rtx,
FIRST_PARM_OFFSET (0)));
set_mem_alias_set (regblock, get_varargs_alias_set ());
move_block_from_reg (first_reg_offset, regblock, size);
 
*pretend_size = (size * UNITS_PER_WORD);
}
}
 
/* Return true if INSN is real instruction bearing insn. */
 
static int
m32r_is_insn (rtx insn)
{
return (INSN_P (insn)
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER
&& GET_CODE (PATTERN (insn)) != ADDR_VEC);
}
 
/* Increase the priority of long instructions so that the
short instructions are scheduled ahead of the long ones. */
 
static int
m32r_adjust_priority (rtx insn, int priority)
{
if (m32r_is_insn (insn)
&& get_attr_insn_size (insn) != INSN_SIZE_SHORT)
priority <<= 3;
 
return priority;
}
 
/* Indicate how many instructions can be issued at the same time.
This is sort of a lie. The m32r can issue only 1 long insn at
once, but it can issue 2 short insns. The default therefore is
set at 2, but this can be overridden by the command line option
-missue-rate=1. */
 
static int
m32r_issue_rate (void)
{
return ((TARGET_LOW_ISSUE_RATE) ? 1 : 2);
}
/* Cost functions. */
 
static bool
m32r_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED, int *total)
{
switch (code)
{
/* Small integers are as cheap as registers. 4 byte values can be
fetched as immediate constants - let's give that the cost of an
extra insn. */
case CONST_INT:
if (INT16_P (INTVAL (x)))
{
*total = 0;
return true;
}
/* FALLTHRU */
 
case CONST:
case LABEL_REF:
case SYMBOL_REF:
*total = COSTS_N_INSNS (1);
return true;
 
case CONST_DOUBLE:
{
rtx high, low;
 
split_double (x, &high, &low);
*total = COSTS_N_INSNS (!INT16_P (INTVAL (high))
+ !INT16_P (INTVAL (low)));
return true;
}
 
case MULT:
*total = COSTS_N_INSNS (3);
return true;
 
case DIV:
case UDIV:
case MOD:
case UMOD:
*total = COSTS_N_INSNS (10);
return true;
 
default:
return false;
}
}
/* Type of function DECL.
 
The result is cached. To reset the cache at the end of a function,
call with DECL = NULL_TREE. */
 
enum m32r_function_type
m32r_compute_function_type (tree decl)
{
/* Cached value. */
static enum m32r_function_type fn_type = M32R_FUNCTION_UNKNOWN;
/* Last function we were called for. */
static tree last_fn = NULL_TREE;
 
/* Resetting the cached value? */
if (decl == NULL_TREE)
{
fn_type = M32R_FUNCTION_UNKNOWN;
last_fn = NULL_TREE;
return fn_type;
}
 
if (decl == last_fn && fn_type != M32R_FUNCTION_UNKNOWN)
return fn_type;
 
/* Compute function type. */
fn_type = (lookup_attribute ("interrupt", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE
? M32R_FUNCTION_INTERRUPT
: M32R_FUNCTION_NORMAL);
 
last_fn = decl;
return fn_type;
}
/* Function prologue/epilogue handlers. */
 
/* M32R stack frames look like:
 
Before call After call
+-----------------------+ +-----------------------+
| | | |
high | local variables, | | local variables, |
mem | reg save area, etc. | | reg save area, etc. |
| | | |
+-----------------------+ +-----------------------+
| | | |
| arguments on stack. | | arguments on stack. |
| | | |
SP+0->+-----------------------+ +-----------------------+
| reg parm save area, |
| only created for |
| variable argument |
| functions |
+-----------------------+
| previous frame ptr |
+-----------------------+
| |
| register save area |
| |
+-----------------------+
| return address |
+-----------------------+
| |
| local variables |
| |
+-----------------------+
| |
| alloca allocations |
| |
+-----------------------+
| |
low | arguments on stack |
memory | |
SP+0->+-----------------------+
 
Notes:
1) The "reg parm save area" does not exist for non variable argument fns.
2) The "reg parm save area" can be eliminated completely if we saved regs
containing anonymous args separately but that complicates things too
much (so it's not done).
3) The return address is saved after the register save area so as to have as
many insns as possible between the restoration of `lr' and the `jmp lr'. */
 
/* Structure to be filled in by m32r_compute_frame_size with register
save masks, and offsets for the current function. */
struct m32r_frame_info
{
unsigned int total_size; /* # bytes that the entire frame takes up. */
unsigned int extra_size; /* # bytes of extra stuff. */
unsigned int pretend_size; /* # bytes we push and pretend caller did. */
unsigned int args_size; /* # bytes that outgoing arguments take up. */
unsigned int reg_size; /* # bytes needed to store regs. */
unsigned int var_size; /* # bytes that variables take up. */
unsigned int gmask; /* Mask of saved gp registers. */
unsigned int save_fp; /* Nonzero if fp must be saved. */
unsigned int save_lr; /* Nonzero if lr (return addr) must be saved. */
int initialized; /* Nonzero if frame size already calculated. */
};
 
/* Current frame information calculated by m32r_compute_frame_size. */
static struct m32r_frame_info current_frame_info;
 
/* Zero structure to initialize current_frame_info. */
static struct m32r_frame_info zero_frame_info;
 
#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
 
/* Tell prologue and epilogue if register REGNO should be saved / restored.
The return address and frame pointer are treated separately.
Don't consider them here. */
#define MUST_SAVE_REGISTER(regno, interrupt_p) \
((regno) != RETURN_ADDR_REGNUM && (regno) != FRAME_POINTER_REGNUM \
&& (regs_ever_live[regno] && (!call_really_used_regs[regno] || interrupt_p)))
 
#define MUST_SAVE_FRAME_POINTER (regs_ever_live[FRAME_POINTER_REGNUM])
#define MUST_SAVE_RETURN_ADDR (regs_ever_live[RETURN_ADDR_REGNUM] || current_function_profile)
 
#define SHORT_INSN_SIZE 2 /* Size of small instructions. */
#define LONG_INSN_SIZE 4 /* Size of long instructions. */
 
/* Return the bytes needed to compute the frame pointer from the current
stack pointer.
 
SIZE is the size needed for local variables. */
 
unsigned int
m32r_compute_frame_size (int size) /* # of var. bytes allocated. */
{
int regno;
unsigned int total_size, var_size, args_size, pretend_size, extra_size;
unsigned int reg_size, frame_size;
unsigned int gmask;
enum m32r_function_type fn_type;
int interrupt_p;
int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
| current_function_profile);
 
var_size = M32R_STACK_ALIGN (size);
args_size = M32R_STACK_ALIGN (current_function_outgoing_args_size);
pretend_size = current_function_pretend_args_size;
extra_size = FIRST_PARM_OFFSET (0);
total_size = extra_size + pretend_size + args_size + var_size;
reg_size = 0;
gmask = 0;
 
/* See if this is an interrupt handler. Call used registers must be saved
for them too. */
fn_type = m32r_compute_function_type (current_function_decl);
interrupt_p = M32R_INTERRUPT_P (fn_type);
 
/* Calculate space needed for registers. */
for (regno = 0; regno < M32R_MAX_INT_REGS; regno++)
{
if (MUST_SAVE_REGISTER (regno, interrupt_p)
|| (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
{
reg_size += UNITS_PER_WORD;
gmask |= 1 << regno;
}
}
 
current_frame_info.save_fp = MUST_SAVE_FRAME_POINTER;
current_frame_info.save_lr = MUST_SAVE_RETURN_ADDR || pic_reg_used;
 
reg_size += ((current_frame_info.save_fp + current_frame_info.save_lr)
* UNITS_PER_WORD);
total_size += reg_size;
 
/* ??? Not sure this is necessary, and I don't think the epilogue
handler will do the right thing if this changes total_size. */
total_size = M32R_STACK_ALIGN (total_size);
 
frame_size = total_size - (pretend_size + reg_size);
 
/* Save computed information. */
current_frame_info.total_size = total_size;
current_frame_info.extra_size = extra_size;
current_frame_info.pretend_size = pretend_size;
current_frame_info.var_size = var_size;
current_frame_info.args_size = args_size;
current_frame_info.reg_size = reg_size;
current_frame_info.gmask = gmask;
current_frame_info.initialized = reload_completed;
 
/* Ok, we're done. */
return total_size;
}
/* The table we use to reference PIC data. */
static rtx global_offset_table;
static void
m32r_reload_lr (rtx sp, int size)
{
rtx lr = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
 
if (size == 0)
emit_insn (gen_movsi (lr, gen_rtx_MEM (Pmode, sp)));
else if (size < 32768)
emit_insn (gen_movsi (lr, gen_rtx_MEM (Pmode,
gen_rtx_PLUS (Pmode, sp,
GEN_INT (size)))));
else
{
rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
 
emit_insn (gen_movsi (tmp, GEN_INT (size)));
emit_insn (gen_addsi3 (tmp, tmp, sp));
emit_insn (gen_movsi (lr, gen_rtx_MEM (Pmode, tmp)));
}
 
emit_insn (gen_rtx_USE (VOIDmode, lr));
}
 
void
m32r_load_pic_register (void)
{
global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
emit_insn (gen_get_pc (pic_offset_table_rtx, global_offset_table,
GEN_INT (TARGET_MODEL_SMALL)));
/* Need to emit this whether or not we obey regdecls,
since setjmp/longjmp can cause life info to screw up. */
emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
}
 
/* Expand the m32r prologue as a series of insns. */
 
void
m32r_expand_prologue (void)
{
int regno;
int frame_size;
unsigned int gmask;
int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
| current_function_profile);
 
if (! current_frame_info.initialized)
m32r_compute_frame_size (get_frame_size ());
 
gmask = current_frame_info.gmask;
 
/* These cases shouldn't happen. Catch them now. */
gcc_assert (current_frame_info.total_size || !gmask);
 
/* Allocate space for register arguments if this is a variadic function. */
if (current_frame_info.pretend_size != 0)
{
/* Use a HOST_WIDE_INT temporary, since negating an unsigned int gives
the wrong result on a 64-bit host. */
HOST_WIDE_INT pretend_size = current_frame_info.pretend_size;
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (-pretend_size)));
}
 
/* Save any registers we need to and set up fp. */
if (current_frame_info.save_fp)
emit_insn (gen_movsi_push (stack_pointer_rtx, frame_pointer_rtx));
 
gmask &= ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK);
 
/* Save any needed call-saved regs (and call-used if this is an
interrupt handler). */
for (regno = 0; regno <= M32R_MAX_INT_REGS; ++regno)
{
if ((gmask & (1 << regno)) != 0)
emit_insn (gen_movsi_push (stack_pointer_rtx,
gen_rtx_REG (Pmode, regno)));
}
 
if (current_frame_info.save_lr)
emit_insn (gen_movsi_push (stack_pointer_rtx,
gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM)));
 
/* Allocate the stack frame. */
frame_size = (current_frame_info.total_size
- (current_frame_info.pretend_size
+ current_frame_info.reg_size));
 
if (frame_size == 0)
; /* Nothing to do. */
else if (frame_size <= 32768)
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (-frame_size)));
else
{
rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
 
emit_insn (gen_movsi (tmp, GEN_INT (frame_size)));
emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
}
 
if (frame_pointer_needed)
emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
 
if (current_function_profile)
/* Push lr for mcount (form_pc, x). */
emit_insn (gen_movsi_push (stack_pointer_rtx,
gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM)));
if (pic_reg_used)
{
m32r_load_pic_register ();
m32r_reload_lr (stack_pointer_rtx,
(current_function_profile ? 0 : frame_size));
}
 
if (current_function_profile && !pic_reg_used)
emit_insn (gen_blockage ());
}
 
/* Set up the stack and frame pointer (if desired) for the function.
Note, if this is changed, you need to mirror the changes in
m32r_compute_frame_size which calculates the prolog size. */
 
static void
m32r_output_function_prologue (FILE * file, HOST_WIDE_INT size)
{
enum m32r_function_type fn_type = m32r_compute_function_type (current_function_decl);
 
/* If this is an interrupt handler, mark it as such. */
if (M32R_INTERRUPT_P (fn_type))
fprintf (file, "\t%s interrupt handler\n", ASM_COMMENT_START);
 
if (! current_frame_info.initialized)
m32r_compute_frame_size (size);
 
/* This is only for the human reader. */
fprintf (file,
"\t%s PROLOGUE, vars= %d, regs= %d, args= %d, extra= %d\n",
ASM_COMMENT_START,
current_frame_info.var_size,
current_frame_info.reg_size / 4,
current_frame_info.args_size,
current_frame_info.extra_size);
}
/* Do any necessary cleanup after a function to restore stack, frame,
and regs. */
 
static void
m32r_output_function_epilogue (FILE * file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
int regno;
int noepilogue = FALSE;
int total_size;
enum m32r_function_type fn_type = m32r_compute_function_type (current_function_decl);
 
/* This is only for the human reader. */
fprintf (file, "\t%s EPILOGUE\n", ASM_COMMENT_START);
 
gcc_assert (current_frame_info.initialized);
total_size = current_frame_info.total_size;
 
if (total_size == 0)
{
rtx insn = get_last_insn ();
 
/* If the last insn was a BARRIER, we don't have to write any code
because a jump (aka return) was put there. */
if (GET_CODE (insn) == NOTE)
insn = prev_nonnote_insn (insn);
if (insn && GET_CODE (insn) == BARRIER)
noepilogue = TRUE;
}
 
if (!noepilogue)
{
unsigned int var_size = current_frame_info.var_size;
unsigned int args_size = current_frame_info.args_size;
unsigned int gmask = current_frame_info.gmask;
int can_trust_sp_p = !current_function_calls_alloca;
const char * sp_str = reg_names[STACK_POINTER_REGNUM];
const char * fp_str = reg_names[FRAME_POINTER_REGNUM];
 
/* The first thing to do is point the sp at the bottom of the register
save area. */
if (can_trust_sp_p)
{
unsigned int reg_offset = var_size + args_size;
if (reg_offset == 0)
; /* Nothing to do. */
else if (reg_offset < 128)
fprintf (file, "\taddi %s,%s%d\n",
sp_str, IMMEDIATE_PREFIX, reg_offset);
else if (reg_offset < 32768)
fprintf (file, "\tadd3 %s,%s,%s%d\n",
sp_str, sp_str, IMMEDIATE_PREFIX, reg_offset);
else if (reg_offset < (1 << 24))
fprintf (file, "\tld24 %s,%s%d\n\tadd %s,%s\n",
reg_names[PROLOGUE_TMP_REGNUM],
IMMEDIATE_PREFIX, reg_offset,
sp_str, reg_names[PROLOGUE_TMP_REGNUM]);
else
fprintf (file, "\tseth %s,%s%d\n\tor3 %s,%s,%s%d\n\tadd %s,%s\n",
reg_names[PROLOGUE_TMP_REGNUM],
IMMEDIATE_PREFIX, reg_offset >> 16,
reg_names[PROLOGUE_TMP_REGNUM],
reg_names[PROLOGUE_TMP_REGNUM],
IMMEDIATE_PREFIX, reg_offset & 0xffff,
sp_str, reg_names[PROLOGUE_TMP_REGNUM]);
}
else if (frame_pointer_needed)
{
unsigned int reg_offset = var_size + args_size;
 
if (reg_offset == 0)
fprintf (file, "\tmv %s,%s\n", sp_str, fp_str);
else if (reg_offset < 32768)
fprintf (file, "\tadd3 %s,%s,%s%d\n",
sp_str, fp_str, IMMEDIATE_PREFIX, reg_offset);
else if (reg_offset < (1 << 24))
fprintf (file, "\tld24 %s,%s%d\n\tadd %s,%s\n",
reg_names[PROLOGUE_TMP_REGNUM],
IMMEDIATE_PREFIX, reg_offset,
sp_str, reg_names[PROLOGUE_TMP_REGNUM]);
else
fprintf (file, "\tseth %s,%s%d\n\tor3 %s,%s,%s%d\n\tadd %s,%s\n",
reg_names[PROLOGUE_TMP_REGNUM],
IMMEDIATE_PREFIX, reg_offset >> 16,
reg_names[PROLOGUE_TMP_REGNUM],
reg_names[PROLOGUE_TMP_REGNUM],
IMMEDIATE_PREFIX, reg_offset & 0xffff,
sp_str, reg_names[PROLOGUE_TMP_REGNUM]);
}
else
gcc_unreachable ();
 
if (current_frame_info.save_lr)
fprintf (file, "\tpop %s\n", reg_names[RETURN_ADDR_REGNUM]);
 
/* Restore any saved registers, in reverse order of course. */
gmask &= ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK);
for (regno = M32R_MAX_INT_REGS - 1; regno >= 0; --regno)
{
if ((gmask & (1L << regno)) != 0)
fprintf (file, "\tpop %s\n", reg_names[regno]);
}
 
if (current_frame_info.save_fp)
fprintf (file, "\tpop %s\n", fp_str);
 
/* Remove varargs area if present. */
if (current_frame_info.pretend_size != 0)
fprintf (file, "\taddi %s,%s%d\n",
sp_str, IMMEDIATE_PREFIX, current_frame_info.pretend_size);
/* Emit the return instruction. */
if (M32R_INTERRUPT_P (fn_type))
fprintf (file, "\trte\n");
else
fprintf (file, "\tjmp %s\n", reg_names[RETURN_ADDR_REGNUM]);
}
 
/* Reset state info for each function. */
current_frame_info = zero_frame_info;
m32r_compute_function_type (NULL_TREE);
}
/* Return nonzero if this function is known to have a null or 1 instruction
epilogue. */
 
int
direct_return (void)
{
if (!reload_completed)
return FALSE;
 
if (! current_frame_info.initialized)
m32r_compute_frame_size (get_frame_size ());
 
return current_frame_info.total_size == 0;
}
 
/* PIC. */
 
int
m32r_legitimate_pic_operand_p (rtx x)
{
if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
return 0;
if (GET_CODE (x) == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
&& (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
|| GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)
&& (GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
return 0;
return 1;
}
 
rtx
m32r_legitimize_pic_address (rtx orig, rtx reg)
{
#ifdef DEBUG_PIC
printf("m32r_legitimize_pic_address()\n");
#endif
 
if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
{
rtx pic_ref, address;
rtx insn;
int subregs = 0;
 
if (reg == 0)
{
gcc_assert (!reload_in_progress && !reload_completed);
reg = gen_reg_rtx (Pmode);
 
subregs = 1;
}
 
if (subregs)
address = gen_reg_rtx (Pmode);
else
address = reg;
 
current_function_uses_pic_offset_table = 1;
 
if (GET_CODE (orig) == LABEL_REF
|| (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
{
emit_insn (gen_gotoff_load_addr (reg, orig));
emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
return reg;
}
 
emit_insn (gen_pic_load_addr (address, orig));
 
emit_insn (gen_addsi3 (address, address, pic_offset_table_rtx));
pic_ref = gen_const_mem (Pmode, address);
insn = emit_move_insn (reg, pic_ref);
#if 0
/* Put a REG_EQUAL note on this insn, so that it can be optimized
by loop. */
REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
REG_NOTES (insn));
#endif
return reg;
}
else if (GET_CODE (orig) == CONST)
{
rtx base, offset;
 
if (GET_CODE (XEXP (orig, 0)) == PLUS
&& XEXP (XEXP (orig, 0), 1) == pic_offset_table_rtx)
return orig;
 
if (reg == 0)
{
gcc_assert (!reload_in_progress && !reload_completed);
reg = gen_reg_rtx (Pmode);
}
 
if (GET_CODE (XEXP (orig, 0)) == PLUS)
{
base = m32r_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
if (base == reg)
offset = m32r_legitimize_pic_address (XEXP (XEXP (orig, 0), 1), NULL_RTX);
else
offset = m32r_legitimize_pic_address (XEXP (XEXP (orig, 0), 1), reg);
}
else
return orig;
 
if (GET_CODE (offset) == CONST_INT)
{
if (INT16_P (INTVAL (offset)))
return plus_constant (base, INTVAL (offset));
else
{
gcc_assert (! reload_in_progress && ! reload_completed);
offset = force_reg (Pmode, offset);
}
}
 
return gen_rtx_PLUS (Pmode, base, offset);
}
 
return orig;
}
/* Nested function support. */
 
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function. */
 
void
m32r_initialize_trampoline (rtx tramp ATTRIBUTE_UNUSED,
rtx fnaddr ATTRIBUTE_UNUSED,
rtx cxt ATTRIBUTE_UNUSED)
{
}
static void
m32r_file_start (void)
{
default_file_start ();
 
if (flag_verbose_asm)
fprintf (asm_out_file,
"%s M32R/D special options: -G " HOST_WIDE_INT_PRINT_UNSIGNED "\n",
ASM_COMMENT_START, g_switch_value);
 
if (TARGET_LITTLE_ENDIAN)
fprintf (asm_out_file, "\t.little\n");
}
/* Print operand X (an rtx) in assembler syntax to file FILE.
CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
For `%' followed by punctuation, CODE is the punctuation and X is null. */
 
void
m32r_print_operand (FILE * file, rtx x, int code)
{
rtx addr;
 
switch (code)
{
/* The 's' and 'p' codes are used by output_block_move() to
indicate post-increment 's'tores and 'p're-increment loads. */
case 's':
if (GET_CODE (x) == REG)
fprintf (file, "@+%s", reg_names [REGNO (x)]);
else
output_operand_lossage ("invalid operand to %%s code");
return;
case 'p':
if (GET_CODE (x) == REG)
fprintf (file, "@%s+", reg_names [REGNO (x)]);
else
output_operand_lossage ("invalid operand to %%p code");
return;
 
case 'R' :
/* Write second word of DImode or DFmode reference,
register or memory. */
if (GET_CODE (x) == REG)
fputs (reg_names[REGNO (x)+1], file);
else if (GET_CODE (x) == MEM)
{
fprintf (file, "@(");
/* Handle possible auto-increment. Since it is pre-increment and
we have already done it, we can just use an offset of four. */
/* ??? This is taken from rs6000.c I think. I don't think it is
currently necessary, but keep it around. */
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
output_address (plus_constant (XEXP (XEXP (x, 0), 0), 4));
else
output_address (plus_constant (XEXP (x, 0), 4));
fputc (')', file);
}
else
output_operand_lossage ("invalid operand to %%R code");
return;
 
case 'H' : /* High word. */
case 'L' : /* Low word. */
if (GET_CODE (x) == REG)
{
/* L = least significant word, H = most significant word. */
if ((WORDS_BIG_ENDIAN != 0) ^ (code == 'L'))
fputs (reg_names[REGNO (x)], file);
else
fputs (reg_names[REGNO (x)+1], file);
}
else if (GET_CODE (x) == CONST_INT
|| GET_CODE (x) == CONST_DOUBLE)
{
rtx first, second;
 
split_double (x, &first, &second);
fprintf (file, HOST_WIDE_INT_PRINT_HEX,
code == 'L' ? INTVAL (first) : INTVAL (second));
}
else
output_operand_lossage ("invalid operand to %%H/%%L code");
return;
 
case 'A' :
{
char str[30];
 
if (GET_CODE (x) != CONST_DOUBLE
|| GET_MODE_CLASS (GET_MODE (x)) != MODE_FLOAT)
fatal_insn ("bad insn for 'A'", x);
 
real_to_decimal (str, CONST_DOUBLE_REAL_VALUE (x), sizeof (str), 0, 1);
fprintf (file, "%s", str);
return;
}
 
case 'B' : /* Bottom half. */
case 'T' : /* Top half. */
/* Output the argument to a `seth' insn (sets the Top half-word).
For constants output arguments to a seth/or3 pair to set Top and
Bottom halves. For symbols output arguments to a seth/add3 pair to
set Top and Bottom halves. The difference exists because for
constants seth/or3 is more readable but for symbols we need to use
the same scheme as `ld' and `st' insns (16 bit addend is signed). */
switch (GET_CODE (x))
{
case CONST_INT :
case CONST_DOUBLE :
{
rtx first, second;
 
split_double (x, &first, &second);
x = WORDS_BIG_ENDIAN ? second : first;
fprintf (file, HOST_WIDE_INT_PRINT_HEX,
(code == 'B'
? INTVAL (x) & 0xffff
: (INTVAL (x) >> 16) & 0xffff));
}
return;
case CONST :
case SYMBOL_REF :
if (code == 'B'
&& small_data_operand (x, VOIDmode))
{
fputs ("sda(", file);
output_addr_const (file, x);
fputc (')', file);
return;
}
/* fall through */
case LABEL_REF :
fputs (code == 'T' ? "shigh(" : "low(", file);
output_addr_const (file, x);
fputc (')', file);
return;
default :
output_operand_lossage ("invalid operand to %%T/%%B code");
return;
}
break;
 
case 'U' :
/* ??? wip */
/* Output a load/store with update indicator if appropriate. */
if (GET_CODE (x) == MEM)
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
fputs (".a", file);
}
else
output_operand_lossage ("invalid operand to %%U code");
return;
 
case 'N' :
/* Print a constant value negated. */
if (GET_CODE (x) == CONST_INT)
output_addr_const (file, GEN_INT (- INTVAL (x)));
else
output_operand_lossage ("invalid operand to %%N code");
return;
 
case 'X' :
/* Print a const_int in hex. Used in comments. */
if (GET_CODE (x) == CONST_INT)
fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
return;
 
case '#' :
fputs (IMMEDIATE_PREFIX, file);
return;
 
case 0 :
/* Do nothing special. */
break;
 
default :
/* Unknown flag. */
output_operand_lossage ("invalid operand output code");
}
 
switch (GET_CODE (x))
{
case REG :
fputs (reg_names[REGNO (x)], file);
break;
 
case MEM :
addr = XEXP (x, 0);
if (GET_CODE (addr) == PRE_INC)
{
if (GET_CODE (XEXP (addr, 0)) != REG)
fatal_insn ("pre-increment address is not a register", x);
 
fprintf (file, "@+%s", reg_names[REGNO (XEXP (addr, 0))]);
}
else if (GET_CODE (addr) == PRE_DEC)
{
if (GET_CODE (XEXP (addr, 0)) != REG)
fatal_insn ("pre-decrement address is not a register", x);
 
fprintf (file, "@-%s", reg_names[REGNO (XEXP (addr, 0))]);
}
else if (GET_CODE (addr) == POST_INC)
{
if (GET_CODE (XEXP (addr, 0)) != REG)
fatal_insn ("post-increment address is not a register", x);
 
fprintf (file, "@%s+", reg_names[REGNO (XEXP (addr, 0))]);
}
else
{
fputs ("@(", file);
output_address (XEXP (x, 0));
fputc (')', file);
}
break;
 
case CONST_DOUBLE :
/* We handle SFmode constants here as output_addr_const doesn't. */
if (GET_MODE (x) == SFmode)
{
REAL_VALUE_TYPE d;
long l;
 
REAL_VALUE_FROM_CONST_DOUBLE (d, x);
REAL_VALUE_TO_TARGET_SINGLE (d, l);
fprintf (file, "0x%08lx", l);
break;
}
 
/* Fall through. Let output_addr_const deal with it. */
 
default :
output_addr_const (file, x);
break;
}
}
 
/* Print a memory address as an operand to reference that memory location. */
 
void
m32r_print_operand_address (FILE * file, rtx addr)
{
rtx base;
rtx index = 0;
int offset = 0;
 
switch (GET_CODE (addr))
{
case REG :
fputs (reg_names[REGNO (addr)], file);
break;
 
case PLUS :
if (GET_CODE (XEXP (addr, 0)) == CONST_INT)
offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
else if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
else
base = XEXP (addr, 0), index = XEXP (addr, 1);
if (GET_CODE (base) == REG)
{
/* Print the offset first (if present) to conform to the manual. */
if (index == 0)
{
if (offset != 0)
fprintf (file, "%d,", offset);
fputs (reg_names[REGNO (base)], file);
}
/* The chip doesn't support this, but left in for generality. */
else if (GET_CODE (index) == REG)
fprintf (file, "%s,%s",
reg_names[REGNO (base)], reg_names[REGNO (index)]);
/* Not sure this can happen, but leave in for now. */
else if (GET_CODE (index) == SYMBOL_REF)
{
output_addr_const (file, index);
fputc (',', file);
fputs (reg_names[REGNO (base)], file);
}
else
fatal_insn ("bad address", addr);
}
else if (GET_CODE (base) == LO_SUM)
{
gcc_assert (!index && GET_CODE (XEXP (base, 0)) == REG);
if (small_data_operand (XEXP (base, 1), VOIDmode))
fputs ("sda(", file);
else
fputs ("low(", file);
output_addr_const (file, plus_constant (XEXP (base, 1), offset));
fputs ("),", file);
fputs (reg_names[REGNO (XEXP (base, 0))], file);
}
else
fatal_insn ("bad address", addr);
break;
 
case LO_SUM :
if (GET_CODE (XEXP (addr, 0)) != REG)
fatal_insn ("lo_sum not of register", addr);
if (small_data_operand (XEXP (addr, 1), VOIDmode))
fputs ("sda(", file);
else
fputs ("low(", file);
output_addr_const (file, XEXP (addr, 1));
fputs ("),", file);
fputs (reg_names[REGNO (XEXP (addr, 0))], file);
break;
 
case PRE_INC : /* Assume SImode. */
fprintf (file, "+%s", reg_names[REGNO (XEXP (addr, 0))]);
break;
 
case PRE_DEC : /* Assume SImode. */
fprintf (file, "-%s", reg_names[REGNO (XEXP (addr, 0))]);
break;
 
case POST_INC : /* Assume SImode. */
fprintf (file, "%s+", reg_names[REGNO (XEXP (addr, 0))]);
break;
 
default :
output_addr_const (file, addr);
break;
}
}
 
/* Return true if the operands are the constants 0 and 1. */
 
int
zero_and_one (rtx operand1, rtx operand2)
{
return
GET_CODE (operand1) == CONST_INT
&& GET_CODE (operand2) == CONST_INT
&& ( ((INTVAL (operand1) == 0) && (INTVAL (operand2) == 1))
||((INTVAL (operand1) == 1) && (INTVAL (operand2) == 0)));
}
 
/* Generate the correct assembler code to handle the conditional loading of a
value into a register. It is known that the operands satisfy the
conditional_move_operand() function above. The destination is operand[0].
The condition is operand [1]. The 'true' value is operand [2] and the
'false' value is operand [3]. */
 
char *
emit_cond_move (rtx * operands, rtx insn ATTRIBUTE_UNUSED)
{
static char buffer [100];
const char * dest = reg_names [REGNO (operands [0])];
buffer [0] = 0;
/* Destination must be a register. */
gcc_assert (GET_CODE (operands [0]) == REG);
gcc_assert (conditional_move_operand (operands [2], SImode));
gcc_assert (conditional_move_operand (operands [3], SImode));
/* Check to see if the test is reversed. */
if (GET_CODE (operands [1]) == NE)
{
rtx tmp = operands [2];
operands [2] = operands [3];
operands [3] = tmp;
}
 
sprintf (buffer, "mvfc %s, cbr", dest);
 
/* If the true value was '0' then we need to invert the results of the move. */
if (INTVAL (operands [2]) == 0)
sprintf (buffer + strlen (buffer), "\n\txor3 %s, %s, #1",
dest, dest);
 
return buffer;
}
 
/* Returns true if the registers contained in the two
rtl expressions are different. */
 
int
m32r_not_same_reg (rtx a, rtx b)
{
int reg_a = -1;
int reg_b = -2;
while (GET_CODE (a) == SUBREG)
a = SUBREG_REG (a);
if (GET_CODE (a) == REG)
reg_a = REGNO (a);
while (GET_CODE (b) == SUBREG)
b = SUBREG_REG (b);
if (GET_CODE (b) == REG)
reg_b = REGNO (b);
return reg_a != reg_b;
}
 
rtx
m32r_function_symbol (const char *name)
{
int extra_flags = 0;
enum m32r_model model;
rtx sym = gen_rtx_SYMBOL_REF (Pmode, name);
 
if (TARGET_MODEL_SMALL)
model = M32R_MODEL_SMALL;
else if (TARGET_MODEL_MEDIUM)
model = M32R_MODEL_MEDIUM;
else if (TARGET_MODEL_LARGE)
model = M32R_MODEL_LARGE;
else
gcc_unreachable (); /* Shouldn't happen. */
extra_flags |= model << SYMBOL_FLAG_MODEL_SHIFT;
if (extra_flags)
SYMBOL_REF_FLAGS (sym) |= extra_flags;
 
return sym;
}
 
/* Use a library function to move some bytes. */
 
static void
block_move_call (rtx dest_reg, rtx src_reg, rtx bytes_rtx)
{
/* We want to pass the size as Pmode, which will normally be SImode
but will be DImode if we are using 64 bit longs and pointers. */
if (GET_MODE (bytes_rtx) != VOIDmode
&& GET_MODE (bytes_rtx) != Pmode)
bytes_rtx = convert_to_mode (Pmode, bytes_rtx, 1);
 
emit_library_call (m32r_function_symbol ("memcpy"), 0,
VOIDmode, 3, dest_reg, Pmode, src_reg, Pmode,
convert_to_mode (TYPE_MODE (sizetype), bytes_rtx,
TYPE_UNSIGNED (sizetype)),
TYPE_MODE (sizetype));
}
 
/* Expand string/block move operations.
 
operands[0] is the pointer to the destination.
operands[1] is the pointer to the source.
operands[2] is the number of bytes to move.
operands[3] is the alignment.
 
Returns 1 upon success, 0 otherwise. */
 
int
m32r_expand_block_move (rtx operands[])
{
rtx orig_dst = operands[0];
rtx orig_src = operands[1];
rtx bytes_rtx = operands[2];
rtx align_rtx = operands[3];
int constp = GET_CODE (bytes_rtx) == CONST_INT;
HOST_WIDE_INT bytes = constp ? INTVAL (bytes_rtx) : 0;
int align = INTVAL (align_rtx);
int leftover;
rtx src_reg;
rtx dst_reg;
 
if (constp && bytes <= 0)
return 1;
 
/* Move the address into scratch registers. */
dst_reg = copy_addr_to_reg (XEXP (orig_dst, 0));
src_reg = copy_addr_to_reg (XEXP (orig_src, 0));
 
if (align > UNITS_PER_WORD)
align = UNITS_PER_WORD;
 
/* If we prefer size over speed, always use a function call.
If we do not know the size, use a function call.
If the blocks are not word aligned, use a function call. */
if (optimize_size || ! constp || align != UNITS_PER_WORD)
{
block_move_call (dst_reg, src_reg, bytes_rtx);
return 0;
}
 
leftover = bytes % MAX_MOVE_BYTES;
bytes -= leftover;
/* If necessary, generate a loop to handle the bulk of the copy. */
if (bytes)
{
rtx label = NULL_RTX;
rtx final_src = NULL_RTX;
rtx at_a_time = GEN_INT (MAX_MOVE_BYTES);
rtx rounded_total = GEN_INT (bytes);
rtx new_dst_reg = gen_reg_rtx (SImode);
rtx new_src_reg = gen_reg_rtx (SImode);
 
/* If we are going to have to perform this loop more than
once, then generate a label and compute the address the
source register will contain upon completion of the final
iteration. */
if (bytes > MAX_MOVE_BYTES)
{
final_src = gen_reg_rtx (Pmode);
 
if (INT16_P(bytes))
emit_insn (gen_addsi3 (final_src, src_reg, rounded_total));
else
{
emit_insn (gen_movsi (final_src, rounded_total));
emit_insn (gen_addsi3 (final_src, final_src, src_reg));
}
 
label = gen_label_rtx ();
emit_label (label);
}
 
/* It is known that output_block_move() will update src_reg to point
to the word after the end of the source block, and dst_reg to point
to the last word of the destination block, provided that the block
is MAX_MOVE_BYTES long. */
emit_insn (gen_movmemsi_internal (dst_reg, src_reg, at_a_time,
new_dst_reg, new_src_reg));
emit_move_insn (dst_reg, new_dst_reg);
emit_move_insn (src_reg, new_src_reg);
emit_insn (gen_addsi3 (dst_reg, dst_reg, GEN_INT (4)));
if (bytes > MAX_MOVE_BYTES)
{
emit_insn (gen_cmpsi (src_reg, final_src));
emit_jump_insn (gen_bne (label));
}
}
 
if (leftover)
emit_insn (gen_movmemsi_internal (dst_reg, src_reg, GEN_INT (leftover),
gen_reg_rtx (SImode),
gen_reg_rtx (SImode)));
return 1;
}
 
/* Emit load/stores for a small constant word aligned block_move.
 
operands[0] is the memory address of the destination.
operands[1] is the memory address of the source.
operands[2] is the number of bytes to move.
operands[3] is a temp register.
operands[4] is a temp register. */
 
void
m32r_output_block_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
{
HOST_WIDE_INT bytes = INTVAL (operands[2]);
int first_time;
int got_extra = 0;
gcc_assert (bytes >= 1 && bytes <= MAX_MOVE_BYTES);
/* We do not have a post-increment store available, so the first set of
stores are done without any increment, then the remaining ones can use
the pre-increment addressing mode.
Note: expand_block_move() also relies upon this behavior when building
loops to copy large blocks. */
first_time = 1;
while (bytes > 0)
{
if (bytes >= 8)
{
if (first_time)
{
output_asm_insn ("ld\t%5, %p1", operands);
output_asm_insn ("ld\t%6, %p1", operands);
output_asm_insn ("st\t%5, @%0", operands);
output_asm_insn ("st\t%6, %s0", operands);
}
else
{
output_asm_insn ("ld\t%5, %p1", operands);
output_asm_insn ("ld\t%6, %p1", operands);
output_asm_insn ("st\t%5, %s0", operands);
output_asm_insn ("st\t%6, %s0", operands);
}
 
bytes -= 8;
}
else if (bytes >= 4)
{
if (bytes > 4)
got_extra = 1;
output_asm_insn ("ld\t%5, %p1", operands);
if (got_extra)
output_asm_insn ("ld\t%6, %p1", operands);
if (first_time)
output_asm_insn ("st\t%5, @%0", operands);
else
output_asm_insn ("st\t%5, %s0", operands);
 
bytes -= 4;
}
else
{
/* Get the entire next word, even though we do not want all of it.
The saves us from doing several smaller loads, and we assume that
we cannot cause a page fault when at least part of the word is in
valid memory [since we don't get called if things aren't properly
aligned]. */
int dst_offset = first_time ? 0 : 4;
/* The amount of increment we have to make to the
destination pointer. */
int dst_inc_amount = dst_offset + bytes - 4;
/* The same for the source pointer. */
int src_inc_amount = bytes;
int last_shift;
rtx my_operands[3];
 
/* If got_extra is true then we have already loaded
the next word as part of loading and storing the previous word. */
if (! got_extra)
output_asm_insn ("ld\t%6, @%1", operands);
 
if (bytes >= 2)
{
bytes -= 2;
 
output_asm_insn ("sra3\t%5, %6, #16", operands);
my_operands[0] = operands[5];
my_operands[1] = GEN_INT (dst_offset);
my_operands[2] = operands[0];
output_asm_insn ("sth\t%0, @(%1,%2)", my_operands);
/* If there is a byte left to store then increment the
destination address and shift the contents of the source
register down by 8 bits. We could not do the address
increment in the store half word instruction, because it does
not have an auto increment mode. */
if (bytes > 0) /* assert (bytes == 1) */
{
dst_offset += 2;
last_shift = 8;
}
}
else
last_shift = 24;
 
if (bytes > 0)
{
my_operands[0] = operands[6];
my_operands[1] = GEN_INT (last_shift);
output_asm_insn ("srai\t%0, #%1", my_operands);
my_operands[0] = operands[6];
my_operands[1] = GEN_INT (dst_offset);
my_operands[2] = operands[0];
output_asm_insn ("stb\t%0, @(%1,%2)", my_operands);
}
 
/* Update the destination pointer if needed. We have to do
this so that the patterns matches what we output in this
function. */
if (dst_inc_amount
&& !find_reg_note (insn, REG_UNUSED, operands[0]))
{
my_operands[0] = operands[0];
my_operands[1] = GEN_INT (dst_inc_amount);
output_asm_insn ("addi\t%0, #%1", my_operands);
}
/* Update the source pointer if needed. We have to do this
so that the patterns matches what we output in this
function. */
if (src_inc_amount
&& !find_reg_note (insn, REG_UNUSED, operands[1]))
{
my_operands[0] = operands[1];
my_operands[1] = GEN_INT (src_inc_amount);
output_asm_insn ("addi\t%0, #%1", my_operands);
}
bytes = 0;
}
 
first_time = 0;
}
}
 
/* Return true if using NEW_REG in place of OLD_REG is ok. */
 
int
m32r_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
unsigned int new_reg)
{
/* Interrupt routines can't clobber any register that isn't already used. */
if (lookup_attribute ("interrupt", DECL_ATTRIBUTES (current_function_decl))
&& !regs_ever_live[new_reg])
return 0;
 
/* We currently emit epilogues as text, not rtl, so the liveness
of the return address register isn't visible. */
if (current_function_is_leaf && new_reg == RETURN_ADDR_REGNUM)
return 0;
 
return 1;
}
 
rtx
m32r_return_addr (int count)
{
if (count != 0)
return const0_rtx;
return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
}
/m32r.opt
0,0 → 1,82
; Options for the Renesas M32R port of the compiler.
 
; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
; GCC is free software; you can redistribute it and/or modify it under
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 3, or (at your option) any later
; version.
;
; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
; WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING3. If not see
; <http://www.gnu.org/licenses/>.
 
m32rx
Target Report RejectNegative Mask(M32RX)
Compile for the m32rx
 
m32r2
Target Report RejectNegative Mask(M32R2)
Compile for the m32r2
 
m32r
Target RejectNegative
Compile for the m32r
 
malign-loops
Target Report Mask(ALIGN_LOOPS)
Align all loops to 32 byte boundary
 
mbranch-cost=1
Target Report RejectNegative Mask(BRANCH_COST)
Prefer branches over conditional execution
 
mbranch-cost=2
Target Report RejectNegative InverseMask(BRANCH_COST)
Give branches their default cost
 
mdebug
Target Mask(DEBUG)
Display compile time statistics
 
mflush-func=
Target RejectNegative Joined Var(m32r_cache_flush_func) Init(CACHE_FLUSH_FUNC)
Specify cache flush function
 
mflush-trap=
Target RejectNegative Joined UInteger Var(m32r_cache_flush_trap) Init(CACHE_FLUSH_TRAP)
Specify cache flush trap number
 
missue-rate=1
Target Report RejectNegative Mask(LOW_ISSUE_RATE)
Only issue one instruction per cycle
 
missue-rate=2
Target Report RejectNegative InverseMask(LOW_ISSUE_RATE)
Allow two instructions to be issued per cycle
 
mmodel=
Target RejectNegative Joined
Code size: small, medium or large
 
mno-flush-func
Target RejectNegative
Don't call any cache flush functions
 
mno-flush-trap
Target RejectNegative
Don't call any cache flush trap
 
; mrelax
; Target Mask(RELAX)
 
msdata=
Target RejectNegative Joined
Small data area: none, sdata, use
/t-m32r
0,0 → 1,64
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
 
# Turn off the SDA while compiling libgcc2. There are no headers for it
# and we want maximal upward compatibility here.
 
TARGET_LIBGCC2_CFLAGS = -G 0
 
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
 
dp-bit.c: $(srcdir)/config/fp-bit.c
cat $(srcdir)/config/fp-bit.c > dp-bit.c
 
# We need to use -fpic when we are using gcc to compile the routines in
# initfini.c. This is only really needed when we are going to use gcc/g++
# to produce a shared library, but since we don't know ahead of time when
# we will be doing that, we just always use -fpic when compiling the
# routines in initfini.c.
# -fpic currently isn't supported for the m32r.
 
CRTSTUFF_T_CFLAGS =
 
# .init/.fini section routines
 
$(T)crtinit.o: $(srcdir)/config/m32r/initfini.c $(GCC_PASSES) $(CONFIG_H)
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) \
$(CRTSTUFF_T_CFLAGS) $(INCLUDES) -DCRT_INIT \
-finhibit-size-directive -fno-inline-functions -g0 \
-mmodel=medium -c $(srcdir)/config/m32r/initfini.c \
-o $(T)crtinit.o
 
$(T)crtfini.o: $(srcdir)/config/m32r/initfini.c $(GCC_PASSES) $(CONFIG_H)
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) \
$(CRTSTUFF_T_CFLAGS) $(INCLUDES) -DCRT_FINI \
-finhibit-size-directive -fno-inline-functions -g0 \
-mmodel=medium -c $(srcdir)/config/m32r/initfini.c \
-o $(T)crtfini.o
m32rx:
mkdir $@
m32r2:
mkdir $@
 
# -mmodel={small,medium} requires separate libraries.
# We don't build libraries for the large model, instead we use the medium
# libraries. The only difference is that the large model can handle jumps
# more than 26 signed bits away.
 
MULTILIB_OPTIONS = mmodel=small/mmodel=medium m32r/m32rx/m32r2
MULTILIB_DIRNAMES = small medium m32r m32rx m32r2
MULTILIB_MATCHES = mmodel?medium=mmodel?large
 
# Set MULTILIB_EXTRA_OPTS so shipped libraries have small data in .sdata and
# SHN_M32R_SCOMMON.
# This is important for objects referenced in system header files.
MULTILIB_EXTRA_OPTS = msdata=sdata
 
EXTRA_MULTILIB_PARTS = crtinit.o crtfini.o
 
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
/linux.h
0,0 → 1,112
/* Definitions for Renesas M32R running Linux-based GNU systems using ELF.
Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#define LINUX_DEFAULT_ELF
 
/* A lie, I guess, but the general idea behind linux/ELF is that we are
supposed to be outputting something that will assemble under SVr4.
This gets us pretty close. */
 
#define HANDLE_SYSV_PRAGMA
 
#undef HANDLE_PRAGMA_PACK
 
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (M32R GNU/Linux with ELF)");
 
#undef SIZE_TYPE
#define SIZE_TYPE "unsigned int"
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
#undef WCHAR_TYPE
#define WCHAR_TYPE "long int"
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE BITS_PER_WORD
/* Provide a LINK_SPEC appropriate for Linux. Here we provide support
for the special GCC options -static and -shared, which allow us to
link things in one of these three modes by applying the appropriate
combinations of options at link-time. We like to support here for
as many of the other GNU linker options as possible. But I don't
have the time to search for those flags. I am sure how to add
support for -soname shared_object_name. H.J.
 
I took out %{v:%{!V:-V}}. It is too much :-(. They can use
-Wl,-V.
 
When the -shared link option is used a final link is not being
done. */
 
/* If ELF is the default format, we should not use /lib/elf. */
 
#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2"
 
#undef LINK_SPEC
#if TARGET_LITTLE_ENDIAN
#define LINK_SPEC "%(link_cpu) -m m32rlelf_linux %{shared:-shared} \
%{!shared: \
%{!ibcs: \
%{!static: \
%{rdynamic:-export-dynamic} \
%{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER "}} \
%{static:-static}}}"
#else
#define LINK_SPEC "%(link_cpu) -m m32relf_linux %{shared:-shared} \
%{!shared: \
%{!ibcs: \
%{!static: \
%{rdynamic:-export-dynamic} \
%{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER "}} \
%{static:-static}}}"
#endif
 
#undef LIB_SPEC
#define LIB_SPEC \
"%{shared: -lc} \
%{!shared: %{mieee-fp:-lieee} %{pthread:-lpthread} \
%{profile:-lc_p} %{!profile: -lc}}"
 
#undef STARTFILE_SPEC
#if defined HAVE_LD_PIE
#define STARTFILE_SPEC \
"%{!shared: %{pg|p|profile:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} \
crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
#else
#define STARTFILE_SPEC \
"%{!shared: \
%{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
#endif
 
#undef ENDFILE_SPEC
#define ENDFILE_SPEC \
"%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
 
#undef SUBTARGET_CPP_SPEC
#define SUBTARGET_CPP_SPEC "\
%{posix:-D_POSIX_SOURCE} \
%{pthread:-D_REENTRANT -D_PTHREADS} \
"
#define TARGET_OS_CPP_BUILTINS() LINUX_TARGET_OS_CPP_BUILTINS()
 
#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
/m32r.h
0,0 → 1,1612
/* Definitions of target machine for GNU compiler, Renesas M32R cpu.
Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
2005, 2006, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* Things to do:
- longlong.h?
*/
 
#undef SWITCH_TAKES_ARG
#undef WORD_SWITCH_TAKES_ARG
#undef HANDLE_SYSV_PRAGMA
#undef SIZE_TYPE
#undef PTRDIFF_TYPE
#undef WCHAR_TYPE
#undef WCHAR_TYPE_SIZE
#undef TARGET_VERSION
#undef CPP_SPEC
#undef ASM_SPEC
#undef LINK_SPEC
#undef STARTFILE_SPEC
#undef ENDFILE_SPEC
 
#undef ASM_APP_ON
#undef ASM_APP_OFF
 
/* M32R/X overrides. */
/* Print subsidiary information on the compiler version in use. */
#define TARGET_VERSION fprintf (stderr, " (m32r/x/2)");
 
/* Additional flags for the preprocessor. */
#define CPP_CPU_SPEC "%{m32rx:-D__M32RX__ -D__m32rx__ -U__M32R2__ -U__m32r2__} \
%{m32r2:-D__M32R2__ -D__m32r2__ -U__M32RX__ -U__m32rx__} \
%{m32r:-U__M32RX__ -U__m32rx__ -U__M32R2__ -U__m32r2__} \
"
 
/* Assembler switches. */
#define ASM_CPU_SPEC \
"%{m32r} %{m32rx} %{m32r2} %{!O0: %{O*: -O}} --no-warn-explicit-parallel-conflicts"
 
/* Use m32rx specific crt0/crtinit/crtfini files. */
#define STARTFILE_CPU_SPEC "%{!shared:crt0.o%s} %{m32rx:m32rx/crtinit.o%s} %{!m32rx:crtinit.o%s}"
#define ENDFILE_CPU_SPEC "-lgloss %{m32rx:m32rx/crtfini.o%s} %{!m32rx:crtfini.o%s}"
 
/* Define this macro as a C expression for the initializer of an array of
strings to tell the driver program which options are defaults for this
target and thus do not need to be handled specially when using
`MULTILIB_OPTIONS'. */
#define SUBTARGET_MULTILIB_DEFAULTS , "m32r"
 
/* Number of additional registers the subtarget defines. */
#define SUBTARGET_NUM_REGISTERS 1
 
/* 1 for registers that cannot be allocated. */
#define SUBTARGET_FIXED_REGISTERS , 1
 
/* 1 for registers that are not available across function calls. */
#define SUBTARGET_CALL_USED_REGISTERS , 1
 
/* Order to allocate model specific registers. */
#define SUBTARGET_REG_ALLOC_ORDER , 19
 
/* Registers which are accumulators. */
#define SUBTARGET_REG_CLASS_ACCUM 0x80000
 
/* All registers added. */
#define SUBTARGET_REG_CLASS_ALL SUBTARGET_REG_CLASS_ACCUM
 
/* Additional accumulator registers. */
#define SUBTARGET_ACCUM_P(REGNO) ((REGNO) == 19)
 
/* Define additional register names. */
#define SUBTARGET_REGISTER_NAMES , "a1"
/* end M32R/X overrides. */
 
/* Print subsidiary information on the compiler version in use. */
#ifndef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (m32r)")
#endif
 
/* Switch Recognition by gcc.c. Add -G xx support. */
 
#undef SWITCH_TAKES_ARG
#define SWITCH_TAKES_ARG(CHAR) \
(DEFAULT_SWITCH_TAKES_ARG (CHAR) || (CHAR) == 'G')
 
/* Names to predefine in the preprocessor for this target machine. */
/* __M32R__ is defined by the existing compiler so we use that. */
#define TARGET_CPU_CPP_BUILTINS() \
do \
{ \
builtin_define ("__M32R__"); \
builtin_define ("__m32r__"); \
builtin_assert ("cpu=m32r"); \
builtin_assert ("machine=m32r"); \
builtin_define (TARGET_BIG_ENDIAN \
? "__BIG_ENDIAN__" : "__LITTLE_ENDIAN__"); \
} \
while (0)
 
/* This macro defines names of additional specifications to put in the specs
that can be used in various specifications like CC1_SPEC. Its definition
is an initializer with a subgrouping for each command option.
 
Each subgrouping contains a string constant, that defines the
specification name, and a string constant that used by the GCC driver
program.
 
Do not define this macro if it does not need to do anything. */
 
#ifndef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS
#endif
 
#ifndef ASM_CPU_SPEC
#define ASM_CPU_SPEC ""
#endif
 
#ifndef CPP_CPU_SPEC
#define CPP_CPU_SPEC ""
#endif
 
#ifndef CC1_CPU_SPEC
#define CC1_CPU_SPEC ""
#endif
 
#ifndef LINK_CPU_SPEC
#define LINK_CPU_SPEC ""
#endif
 
#ifndef STARTFILE_CPU_SPEC
#define STARTFILE_CPU_SPEC "%{!shared:crt0.o%s} crtinit.o%s"
#endif
 
#ifndef ENDFILE_CPU_SPEC
#define ENDFILE_CPU_SPEC "-lgloss crtfini.o%s"
#endif
 
#ifndef RELAX_SPEC
#if 0 /* Not supported yet. */
#define RELAX_SPEC "%{mrelax:-relax}"
#else
#define RELAX_SPEC ""
#endif
#endif
 
#define EXTRA_SPECS \
{ "asm_cpu", ASM_CPU_SPEC }, \
{ "cpp_cpu", CPP_CPU_SPEC }, \
{ "cc1_cpu", CC1_CPU_SPEC }, \
{ "link_cpu", LINK_CPU_SPEC }, \
{ "startfile_cpu", STARTFILE_CPU_SPEC }, \
{ "endfile_cpu", ENDFILE_CPU_SPEC }, \
{ "relax", RELAX_SPEC }, \
SUBTARGET_EXTRA_SPECS
 
#define CPP_SPEC "%(cpp_cpu)"
 
#undef CC1_SPEC
#define CC1_SPEC "%{G*} %(cc1_cpu)"
 
/* Options to pass on to the assembler. */
#undef ASM_SPEC
#define ASM_SPEC "%{v} %(asm_cpu) %(relax) %{fpic|fpie:-K PIC} %{fPIC|fPIE:-K PIC}"
 
#define LINK_SPEC "%{v} %(link_cpu) %(relax)"
 
#undef STARTFILE_SPEC
#define STARTFILE_SPEC "%(startfile_cpu)"
 
#undef ENDFILE_SPEC
#define ENDFILE_SPEC "%(endfile_cpu)"
 
#undef LIB_SPEC
/* Run-time compilation parameters selecting different hardware subsets. */
 
#define TARGET_M32R (! TARGET_M32RX && ! TARGET_M32R2)
 
#ifndef TARGET_LITTLE_ENDIAN
#define TARGET_LITTLE_ENDIAN 0
#endif
#define TARGET_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
 
/* This defaults us to m32r. */
#ifndef TARGET_CPU_DEFAULT
#define TARGET_CPU_DEFAULT 0
#endif
 
/* Code Models
 
Code models are used to select between two choices of two separate
possibilities (address space size, call insn to use):
 
small: addresses use 24 bits, use bl to make calls
medium: addresses use 32 bits, use bl to make calls (*1)
large: addresses use 32 bits, use seth/add3/jl to make calls (*2)
 
The fourth is "addresses use 24 bits, use seth/add3/jl to make calls" but
using this one doesn't make much sense.
 
(*1) The linker may eventually be able to relax seth/add3 -> ld24.
(*2) The linker may eventually be able to relax seth/add3/jl -> bl.
 
Internally these are recorded as TARGET_ADDR{24,32} and
TARGET_CALL{26,32}.
 
The __model__ attribute can be used to select the code model to use when
accessing particular objects. */
 
enum m32r_model { M32R_MODEL_SMALL, M32R_MODEL_MEDIUM, M32R_MODEL_LARGE };
 
extern enum m32r_model m32r_model;
#define TARGET_MODEL_SMALL (m32r_model == M32R_MODEL_SMALL)
#define TARGET_MODEL_MEDIUM (m32r_model == M32R_MODEL_MEDIUM)
#define TARGET_MODEL_LARGE (m32r_model == M32R_MODEL_LARGE)
#define TARGET_ADDR24 (m32r_model == M32R_MODEL_SMALL)
#define TARGET_ADDR32 (! TARGET_ADDR24)
#define TARGET_CALL26 (! TARGET_CALL32)
#define TARGET_CALL32 (m32r_model == M32R_MODEL_LARGE)
 
/* The default is the small model. */
#ifndef M32R_MODEL_DEFAULT
#define M32R_MODEL_DEFAULT M32R_MODEL_SMALL
#endif
 
/* Small Data Area
 
The SDA consists of sections .sdata, .sbss, and .scommon.
.scommon isn't a real section, symbols in it have their section index
set to SHN_M32R_SCOMMON, though support for it exists in the linker script.
 
Two switches control the SDA:
 
-G NNN - specifies the maximum size of variable to go in the SDA
 
-msdata=foo - specifies how such variables are handled
 
-msdata=none - small data area is disabled
 
-msdata=sdata - small data goes in the SDA, special code isn't
generated to use it, and special relocs aren't
generated
 
-msdata=use - small data goes in the SDA, special code is generated
to use the SDA and special relocs are generated
 
The SDA is not multilib'd, it isn't necessary.
MULTILIB_EXTRA_OPTS is set in tmake_file to -msdata=sdata so multilib'd
libraries have small data in .sdata/SHN_M32R_SCOMMON so programs that use
-msdata=use will successfully link with them (references in header files
will cause the compiler to emit code that refers to library objects in
.data). ??? There can be a problem if the user passes a -G value greater
than the default and a library object in a header file is that size.
The default is 8 so this should be rare - if it occurs the user
is required to rebuild the libraries or use a smaller value for -G. */
 
/* Maximum size of variables that go in .sdata/.sbss.
The -msdata=foo switch also controls how small variables are handled. */
#ifndef SDATA_DEFAULT_SIZE
#define SDATA_DEFAULT_SIZE 8
#endif
 
enum m32r_sdata { M32R_SDATA_NONE, M32R_SDATA_SDATA, M32R_SDATA_USE };
 
extern enum m32r_sdata m32r_sdata;
#define TARGET_SDATA_NONE (m32r_sdata == M32R_SDATA_NONE)
#define TARGET_SDATA_SDATA (m32r_sdata == M32R_SDATA_SDATA)
#define TARGET_SDATA_USE (m32r_sdata == M32R_SDATA_USE)
 
/* Default is to disable the SDA
[for upward compatibility with previous toolchains]. */
#ifndef M32R_SDATA_DEFAULT
#define M32R_SDATA_DEFAULT M32R_SDATA_NONE
#endif
 
/* Define this macro as a C expression for the initializer of an array of
strings to tell the driver program which options are defaults for this
target and thus do not need to be handled specially when using
`MULTILIB_OPTIONS'. */
#ifndef SUBTARGET_MULTILIB_DEFAULTS
#define SUBTARGET_MULTILIB_DEFAULTS
#endif
 
#ifndef MULTILIB_DEFAULTS
#define MULTILIB_DEFAULTS { "mmodel=small" SUBTARGET_MULTILIB_DEFAULTS }
#endif
 
/* Sometimes certain combinations of command options do not make
sense on a particular target machine. You can define a macro
`OVERRIDE_OPTIONS' to take account of this. This macro, if
defined, is executed once just after all the command options have
been parsed.
 
Don't use this macro to turn on various extra optimizations for
`-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
 
#ifndef SUBTARGET_OVERRIDE_OPTIONS
#define SUBTARGET_OVERRIDE_OPTIONS
#endif
 
#define OVERRIDE_OPTIONS \
do \
{ \
/* These need to be done at start up. \
It's convenient to do them here. */ \
m32r_init (); \
SUBTARGET_OVERRIDE_OPTIONS \
} \
while (0)
 
#ifndef SUBTARGET_OPTIMIZATION_OPTIONS
#define SUBTARGET_OPTIMIZATION_OPTIONS
#endif
 
#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \
do \
{ \
if (LEVEL == 1) \
flag_regmove = TRUE; \
\
if (SIZE) \
{ \
flag_omit_frame_pointer = TRUE; \
} \
\
SUBTARGET_OPTIMIZATION_OPTIONS \
} \
while (0)
 
/* Define this macro if debugging can be performed even without a
frame pointer. If this macro is defined, GCC will turn on the
`-fomit-frame-pointer' option whenever `-O' is specified. */
#define CAN_DEBUG_WITHOUT_FP
/* Target machine storage layout. */
 
/* Define this if most significant bit is lowest numbered
in instructions that operate on numbered bit-fields. */
#define BITS_BIG_ENDIAN 1
 
/* Define this if most significant byte of a word is the lowest numbered. */
#define BYTES_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)
 
/* Define this if most significant word of a multiword number is the lowest
numbered. */
#define WORDS_BIG_ENDIAN (TARGET_LITTLE_ENDIAN == 0)
 
/* Define this macro if WORDS_BIG_ENDIAN is not constant. This must
be a constant value with the same meaning as WORDS_BIG_ENDIAN,
which will be used only when compiling libgcc2.c. Typically the
value will be set based on preprocessor defines. */
/*#define LIBGCC2_WORDS_BIG_ENDIAN 1*/
 
/* Width of a word, in units (bytes). */
#define UNITS_PER_WORD 4
 
/* Define this macro if it is advisable to hold scalars in registers
in a wider mode than that declared by the program. In such cases,
the value is constrained to be within the bounds of the declared
type, but kept valid in the wider mode. The signedness of the
extension may differ from that of the type. */
#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
if (GET_MODE_CLASS (MODE) == MODE_INT \
&& GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
{ \
(MODE) = SImode; \
}
 
/* Allocation boundary (in *bits*) for storing arguments in argument list. */
#define PARM_BOUNDARY 32
 
/* Boundary (in *bits*) on which stack pointer should be aligned. */
#define STACK_BOUNDARY 32
 
/* ALIGN FRAMES on word boundaries */
#define M32R_STACK_ALIGN(LOC) (((LOC) + 3) & ~ 3)
 
/* Allocation boundary (in *bits*) for the code of a function. */
#define FUNCTION_BOUNDARY 32
 
/* Alignment of field after `int : 0' in a structure. */
#define EMPTY_FIELD_BOUNDARY 32
 
/* Every structure's size must be a multiple of this. */
#define STRUCTURE_SIZE_BOUNDARY 8
 
/* A bit-field declared as `int' forces `int' alignment for the struct. */
#define PCC_BITFIELD_TYPE_MATTERS 1
 
/* No data type wants to be aligned rounder than this. */
#define BIGGEST_ALIGNMENT 32
 
/* The best alignment to use in cases where we have a choice. */
#define FASTEST_ALIGNMENT 32
 
/* Make strings word-aligned so strcpy from constants will be faster. */
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
((TREE_CODE (EXP) == STRING_CST \
&& (ALIGN) < FASTEST_ALIGNMENT) \
? FASTEST_ALIGNMENT : (ALIGN))
 
/* Make arrays of chars word-aligned for the same reasons. */
#define DATA_ALIGNMENT(TYPE, ALIGN) \
(TREE_CODE (TYPE) == ARRAY_TYPE \
&& TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
&& (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
 
/* Set this nonzero if move instructions will actually fail to work
when given unaligned data. */
#define STRICT_ALIGNMENT 1
 
/* Define LAVEL_ALIGN to calculate code length of PNOP at labels. */
#define LABEL_ALIGN(insn) 2
/* Layout of source language data types. */
 
#define SHORT_TYPE_SIZE 16
#define INT_TYPE_SIZE 32
#define LONG_TYPE_SIZE 32
#define LONG_LONG_TYPE_SIZE 64
#define FLOAT_TYPE_SIZE 32
#define DOUBLE_TYPE_SIZE 64
#define LONG_DOUBLE_TYPE_SIZE 64
 
/* Define this as 1 if `char' should by default be signed; else as 0. */
#define DEFAULT_SIGNED_CHAR 1
 
#define SIZE_TYPE "long unsigned int"
#define PTRDIFF_TYPE "long int"
#define WCHAR_TYPE "short unsigned int"
#define WCHAR_TYPE_SIZE 16
/* Standard register usage. */
 
/* Number of actual hardware registers.
The hardware registers are assigned numbers for the compiler
from 0 to just below FIRST_PSEUDO_REGISTER.
All registers that the compiler knows about must be given numbers,
even those that are not normally considered general registers. */
 
#define M32R_NUM_REGISTERS 19
 
#ifndef SUBTARGET_NUM_REGISTERS
#define SUBTARGET_NUM_REGISTERS 0
#endif
 
#define FIRST_PSEUDO_REGISTER (M32R_NUM_REGISTERS + SUBTARGET_NUM_REGISTERS)
/* 1 for registers that have pervasive standard uses
and are not available for the register allocator.
 
0-3 - arguments/results
4-5 - call used [4 is used as a tmp during prologue/epilogue generation]
6 - call used, gptmp
7 - call used, static chain pointer
8-11 - call saved
12 - call saved [reserved for global pointer]
13 - frame pointer
14 - subroutine link register
15 - stack pointer
16 - arg pointer
17 - carry flag
18 - accumulator
19 - accumulator 1 in the m32r/x
By default, the extension registers are not available. */
 
#ifndef SUBTARGET_FIXED_REGISTERS
#define SUBTARGET_FIXED_REGISTERS
#endif
 
#define FIXED_REGISTERS \
{ \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 1, \
1, 1, 1 \
SUBTARGET_FIXED_REGISTERS \
}
 
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
registers that can be used without being saved.
The latter must include the registers where values are returned
and the register where structure-value addresses are passed.
Aside from that, you can include as many other registers as you like. */
 
#ifndef SUBTARGET_CALL_USED_REGISTERS
#define SUBTARGET_CALL_USED_REGISTERS
#endif
 
#define CALL_USED_REGISTERS \
{ \
1, 1, 1, 1, 1, 1, 1, 1, \
0, 0, 0, 0, 0, 0, 1, 1, \
1, 1, 1 \
SUBTARGET_CALL_USED_REGISTERS \
}
 
#define CALL_REALLY_USED_REGISTERS CALL_USED_REGISTERS
 
/* Zero or more C statements that may conditionally modify two variables
`fixed_regs' and `call_used_regs' (both of type `char []') after they
have been initialized from the two preceding macros.
 
This is necessary in case the fixed or call-clobbered registers depend
on target flags.
 
You need not define this macro if it has no work to do. */
 
#ifdef SUBTARGET_CONDITIONAL_REGISTER_USAGE
#define CONDITIONAL_REGISTER_USAGE SUBTARGET_CONDITIONAL_REGISTER_USAGE
#else
#define CONDITIONAL_REGISTER_USAGE \
do \
{ \
if (flag_pic) \
{ \
fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
} \
} \
while (0)
#endif
 
/* If defined, an initializer for a vector of integers, containing the
numbers of hard registers in the order in which GCC should
prefer to use them (from most preferred to least). */
 
#ifndef SUBTARGET_REG_ALLOC_ORDER
#define SUBTARGET_REG_ALLOC_ORDER
#endif
 
#if 1 /* Better for int code. */
#define REG_ALLOC_ORDER \
{ \
4, 5, 6, 7, 2, 3, 8, 9, 10, \
11, 12, 13, 14, 0, 1, 15, 16, 17, 18 \
SUBTARGET_REG_ALLOC_ORDER \
}
 
#else /* Better for fp code at expense of int code. */
#define REG_ALLOC_ORDER \
{ \
0, 1, 2, 3, 4, 5, 6, 7, 8, \
9, 10, 11, 12, 13, 14, 15, 16, 17, 18 \
SUBTARGET_REG_ALLOC_ORDER \
}
#endif
 
/* Return number of consecutive hard regs needed starting at reg REGNO
to hold something of mode MODE.
This is ordinarily the length in words of a value of mode MODE
but can be less for certain modes in special long registers. */
#define HARD_REGNO_NREGS(REGNO, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
extern const unsigned int m32r_hard_regno_mode_ok[FIRST_PSEUDO_REGISTER];
extern unsigned int m32r_mode_class[];
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
((m32r_hard_regno_mode_ok[REGNO] & m32r_mode_class[MODE]) != 0)
 
/* A C expression that is nonzero if it is desirable to choose
register allocation so as to avoid move instructions between a
value of mode MODE1 and a value of mode MODE2.
 
If `HARD_REGNO_MODE_OK (R, MODE1)' and `HARD_REGNO_MODE_OK (R,
MODE2)' are ever different for any R, then `MODES_TIEABLE_P (MODE1,
MODE2)' must be zero. */
 
/* Tie QI/HI/SI modes together. */
#define MODES_TIEABLE_P(MODE1, MODE2) \
( GET_MODE_CLASS (MODE1) == MODE_INT \
&& GET_MODE_CLASS (MODE2) == MODE_INT \
&& GET_MODE_SIZE (MODE1) <= UNITS_PER_WORD \
&& GET_MODE_SIZE (MODE2) <= UNITS_PER_WORD)
 
#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \
m32r_hard_regno_rename_ok (OLD_REG, NEW_REG)
/* Register classes and constants. */
 
/* Define the classes of registers for register constraints in the
machine description. Also define ranges of constants.
 
One of the classes must always be named ALL_REGS and include all hard regs.
If there is more than one class, another class must be named NO_REGS
and contain no registers.
 
The name GENERAL_REGS must be the name of a class (or an alias for
another name such as ALL_REGS). This is the class of registers
that is allowed by "g" or "r" in a register constraint.
Also, registers outside this class are allocated only when
instructions express preferences for them.
 
The classes must be numbered in nondecreasing order; that is,
a larger-numbered class must never be contained completely
in a smaller-numbered class.
 
For any two classes, it is very desirable that there be another
class that represents their union.
 
It is important that any condition codes have class NO_REGS.
See `register_operand'. */
 
enum reg_class
{
NO_REGS, CARRY_REG, ACCUM_REGS, GENERAL_REGS, ALL_REGS, LIM_REG_CLASSES
};
 
#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
 
/* Give names of register classes as strings for dump file. */
#define REG_CLASS_NAMES \
{ "NO_REGS", "CARRY_REG", "ACCUM_REGS", "GENERAL_REGS", "ALL_REGS" }
 
/* Define which registers fit in which classes.
This is an initializer for a vector of HARD_REG_SET
of length N_REG_CLASSES. */
 
#ifndef SUBTARGET_REG_CLASS_CARRY
#define SUBTARGET_REG_CLASS_CARRY 0
#endif
 
#ifndef SUBTARGET_REG_CLASS_ACCUM
#define SUBTARGET_REG_CLASS_ACCUM 0
#endif
 
#ifndef SUBTARGET_REG_CLASS_GENERAL
#define SUBTARGET_REG_CLASS_GENERAL 0
#endif
 
#ifndef SUBTARGET_REG_CLASS_ALL
#define SUBTARGET_REG_CLASS_ALL 0
#endif
 
#define REG_CLASS_CONTENTS \
{ \
{ 0x00000 }, \
{ 0x20000 | SUBTARGET_REG_CLASS_CARRY }, \
{ 0x40000 | SUBTARGET_REG_CLASS_ACCUM }, \
{ 0x1ffff | SUBTARGET_REG_CLASS_GENERAL }, \
{ 0x7ffff | SUBTARGET_REG_CLASS_ALL }, \
}
 
/* The same information, inverted:
Return the class number of the smallest class containing
reg number REGNO. This could be a conditional expression
or could index an array. */
extern enum reg_class m32r_regno_reg_class[FIRST_PSEUDO_REGISTER];
#define REGNO_REG_CLASS(REGNO) (m32r_regno_reg_class[REGNO])
 
/* The class value for index registers, and the one for base regs. */
#define INDEX_REG_CLASS GENERAL_REGS
#define BASE_REG_CLASS GENERAL_REGS
 
#define REG_CLASS_FROM_LETTER(C) \
( (C) == 'c' ? CARRY_REG \
: (C) == 'a' ? ACCUM_REGS \
: NO_REGS)
 
/* These assume that REGNO is a hard or pseudo reg number.
They give nonzero only if REGNO is a hard reg of the suitable class
or a pseudo reg currently allocated to a suitable hard reg.
Since they use reg_renumber, they are safe only once reg_renumber
has been allocated, which happens in local-alloc.c. */
#define REGNO_OK_FOR_BASE_P(REGNO) \
((REGNO) < FIRST_PSEUDO_REGISTER \
? GPR_P (REGNO) || (REGNO) == ARG_POINTER_REGNUM \
: GPR_P (reg_renumber[REGNO]))
 
#define REGNO_OK_FOR_INDEX_P(REGNO) REGNO_OK_FOR_BASE_P(REGNO)
 
/* Given an rtx X being reloaded into a reg required to be
in class CLASS, return the class of reg to actually use.
In general this is just CLASS; but on some machines
in some cases it is preferable to use a more restrictive class. */
#define PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
 
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS. */
#define CLASS_MAX_NREGS(CLASS, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* The letters I, J, K, L, M, N, O, P in a register constraint string
can be used to stand for particular ranges of immediate operands.
This macro defines what the ranges are.
C is the letter, and VALUE is a constant value.
Return 1 if VALUE is in the range specified by C. */
/* 'I' is used for 8 bit signed immediates.
'J' is used for 16 bit signed immediates.
'K' is used for 16 bit unsigned immediates.
'L' is used for 16 bit immediates left shifted by 16 (sign ???).
'M' is used for 24 bit unsigned immediates.
'N' is used for any 32 bit non-symbolic value.
'O' is used for 5 bit unsigned immediates (shift count).
'P' is used for 16 bit signed immediates for compares
(values in the range -32767 to +32768). */
 
/* Return true if a value is inside a range. */
#define IN_RANGE_P(VALUE, LOW, HIGH) \
(((unsigned HOST_WIDE_INT)((VALUE) - (LOW))) \
<= ((unsigned HOST_WIDE_INT)((HIGH) - (LOW))))
 
/* Local to this file. */
#define INT8_P(X) ((X) >= - 0x80 && (X) <= 0x7f)
#define INT16_P(X) ((X) >= - 0x8000 && (X) <= 0x7fff)
#define CMP_INT16_P(X) ((X) >= - 0x7fff && (X) <= 0x8000)
#define UPPER16_P(X) (((X) & 0xffff) == 0 \
&& ((X) >> 16) >= - 0x8000 \
&& ((X) >> 16) <= 0x7fff)
#define UINT16_P(X) (((unsigned HOST_WIDE_INT) (X)) <= 0x0000ffff)
#define UINT24_P(X) (((unsigned HOST_WIDE_INT) (X)) <= 0x00ffffff)
#define UINT32_P(X) (((unsigned HOST_WIDE_INT) (X)) <= 0xffffffff)
#define UINT5_P(X) ((X) >= 0 && (X) < 32)
#define INVERTED_SIGNED_8BIT(VAL) ((VAL) >= -127 && (VAL) <= 128)
 
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
( (C) == 'I' ? INT8_P (VALUE) \
: (C) == 'J' ? INT16_P (VALUE) \
: (C) == 'K' ? UINT16_P (VALUE) \
: (C) == 'L' ? UPPER16_P (VALUE) \
: (C) == 'M' ? UINT24_P (VALUE) \
: (C) == 'N' ? INVERTED_SIGNED_8BIT (VALUE) \
: (C) == 'O' ? UINT5_P (VALUE) \
: (C) == 'P' ? CMP_INT16_P (VALUE) \
: 0)
 
/* Similar, but for floating constants, and defining letters G and H.
Here VALUE is the CONST_DOUBLE rtx itself.
For the m32r, handle a few constants inline.
??? We needn't treat DI and DF modes differently, but for now we do. */
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
( (C) == 'G' ? easy_di_const (VALUE) \
: (C) == 'H' ? easy_df_const (VALUE) \
: 0)
 
/* A C expression that defines the optional machine-dependent constraint
letters that can be used to segregate specific types of operands,
usually memory references, for the target machine. It should return 1 if
VALUE corresponds to the operand type represented by the constraint letter
C. If C is not defined as an extra constraint, the value returned should
be 0 regardless of VALUE. */
/* Q is for symbolic addresses loadable with ld24.
R is for symbolic addresses when ld24 can't be used.
S is for stores with pre {inc,dec}rement
T is for indirect of a pointer.
U is for loads with post increment. */
 
#define EXTRA_CONSTRAINT(VALUE, C) \
( (C) == 'Q' ? ((TARGET_ADDR24 && GET_CODE (VALUE) == LABEL_REF) \
|| addr24_operand (VALUE, VOIDmode)) \
: (C) == 'R' ? ((TARGET_ADDR32 && GET_CODE (VALUE) == LABEL_REF) \
|| addr32_operand (VALUE, VOIDmode)) \
: (C) == 'S' ? (GET_CODE (VALUE) == MEM \
&& STORE_PREINC_PREDEC_P (GET_MODE (VALUE), \
XEXP (VALUE, 0))) \
: (C) == 'T' ? (GET_CODE (VALUE) == MEM \
&& memreg_operand (VALUE, GET_MODE (VALUE))) \
: (C) == 'U' ? (GET_CODE (VALUE) == MEM \
&& LOAD_POSTINC_P (GET_MODE (VALUE), \
XEXP (VALUE, 0))) \
: 0)
/* Stack layout and stack pointer usage. */
 
/* Define this macro if pushing a word onto the stack moves the stack
pointer to a smaller address. */
#define STACK_GROWS_DOWNWARD
 
/* Offset from frame pointer to start allocating local variables at.
If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
first local allocated. Otherwise, it is the offset to the BEGINNING
of the first local allocated. */
/* The frame pointer points at the same place as the stack pointer, except if
alloca has been called. */
#define STARTING_FRAME_OFFSET \
M32R_STACK_ALIGN (current_function_outgoing_args_size)
 
/* Offset from the stack pointer register to the first location at which
outgoing arguments are placed. */
#define STACK_POINTER_OFFSET 0
 
/* Offset of first parameter from the argument pointer register value. */
#define FIRST_PARM_OFFSET(FNDECL) 0
 
/* Register to use for pushing function arguments. */
#define STACK_POINTER_REGNUM 15
 
/* Base register for access to local variables of the function. */
#define FRAME_POINTER_REGNUM 13
 
/* Base register for access to arguments of the function. */
#define ARG_POINTER_REGNUM 16
 
/* Register in which static-chain is passed to a function.
This must not be a register used by the prologue. */
#define STATIC_CHAIN_REGNUM 7
 
/* These aren't official macros. */
#define PROLOGUE_TMP_REGNUM 4
#define RETURN_ADDR_REGNUM 14
/* #define GP_REGNUM 12 */
#define CARRY_REGNUM 17
#define ACCUM_REGNUM 18
#define M32R_MAX_INT_REGS 16
 
#ifndef SUBTARGET_GPR_P
#define SUBTARGET_GPR_P(REGNO) 0
#endif
 
#ifndef SUBTARGET_ACCUM_P
#define SUBTARGET_ACCUM_P(REGNO) 0
#endif
 
#ifndef SUBTARGET_CARRY_P
#define SUBTARGET_CARRY_P(REGNO) 0
#endif
 
#define GPR_P(REGNO) (IN_RANGE_P ((REGNO), 0, 15) || SUBTARGET_GPR_P (REGNO))
#define ACCUM_P(REGNO) ((REGNO) == ACCUM_REGNUM || SUBTARGET_ACCUM_P (REGNO))
#define CARRY_P(REGNO) ((REGNO) == CARRY_REGNUM || SUBTARGET_CARRY_P (REGNO))
/* Eliminating the frame and arg pointers. */
 
/* A C expression which is nonzero if a function must have and use a
frame pointer. This expression is evaluated in the reload pass.
If its value is nonzero the function will have a frame pointer. */
#define FRAME_POINTER_REQUIRED current_function_calls_alloca
 
#if 0
/* C statement to store the difference between the frame pointer
and the stack pointer values immediately after the function prologue.
If `ELIMINABLE_REGS' is defined, this macro will be not be used and
need not be defined. */
#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
((VAR) = m32r_compute_frame_size (get_frame_size ()))
#endif
 
/* If defined, this macro specifies a table of register pairs used to
eliminate unneeded registers that point into the stack frame. If
it is not defined, the only elimination attempted by the compiler
is to replace references to the frame pointer with references to
the stack pointer.
 
Note that the elimination of the argument pointer with the stack
pointer is specified first since that is the preferred elimination. */
 
#define ELIMINABLE_REGS \
{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
{ ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }}
 
/* A C expression that returns nonzero if the compiler is allowed to
try to replace register number FROM-REG with register number
TO-REG. This macro need only be defined if `ELIMINABLE_REGS' is
defined, and will usually be the constant 1, since most of the
cases preventing register elimination are things that the compiler
already knows about. */
 
#define CAN_ELIMINATE(FROM, TO) \
((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \
? ! frame_pointer_needed \
: 1)
 
/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
specifies the initial difference between the specified pair of
registers. This macro must be defined if `ELIMINABLE_REGS' is
defined. */
 
#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
do \
{ \
int size = m32r_compute_frame_size (get_frame_size ()); \
\
if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
(OFFSET) = 0; \
else if ((FROM) == ARG_POINTER_REGNUM && (TO) == FRAME_POINTER_REGNUM) \
(OFFSET) = size - current_function_pretend_args_size; \
else if ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
(OFFSET) = size - current_function_pretend_args_size; \
else \
gcc_unreachable (); \
} \
while (0)
/* Function argument passing. */
 
/* If defined, the maximum amount of space required for outgoing
arguments will be computed and placed into the variable
`current_function_outgoing_args_size'. No space will be pushed
onto the stack for each call; instead, the function prologue should
increase the stack frame size by this amount. */
#define ACCUMULATE_OUTGOING_ARGS 1
 
/* Value is the number of bytes of arguments automatically
popped when returning from a subroutine call.
FUNDECL is the declaration node of the function (as a tree),
FUNTYPE is the data type of the function (as a tree),
or for a library call it is an identifier node for the subroutine name.
SIZE is the number of bytes of arguments passed on the stack. */
#define RETURN_POPS_ARGS(DECL, FUNTYPE, SIZE) 0
 
/* Define a data type for recording info about an argument list
during the scan of that argument list. This data type should
hold all necessary information about the function itself
and about the args processed so far, enough to enable macros
such as FUNCTION_ARG to determine where the next arg should go. */
#define CUMULATIVE_ARGS int
 
/* Initialize a variable CUM of type CUMULATIVE_ARGS
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0. */
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
((CUM) = 0)
 
/* The number of registers used for parameter passing. Local to this file. */
#define M32R_MAX_PARM_REGS 4
 
/* 1 if N is a possible register number for function argument passing. */
#define FUNCTION_ARG_REGNO_P(N) \
((unsigned) (N) < M32R_MAX_PARM_REGS)
 
/* The ROUND_ADVANCE* macros are local to this file. */
/* Round SIZE up to a word boundary. */
#define ROUND_ADVANCE(SIZE) \
(((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* Round arg MODE/TYPE up to the next word boundary. */
#define ROUND_ADVANCE_ARG(MODE, TYPE) \
((MODE) == BLKmode \
? ROUND_ADVANCE ((unsigned int) int_size_in_bytes (TYPE)) \
: ROUND_ADVANCE ((unsigned int) GET_MODE_SIZE (MODE)))
 
/* Round CUM up to the necessary point for argument MODE/TYPE. */
#define ROUND_ADVANCE_CUM(CUM, MODE, TYPE) (CUM)
 
/* Return boolean indicating arg of type TYPE and mode MODE will be passed in
a reg. This includes arguments that have to be passed by reference as the
pointer to them is passed in a reg if one is available (and that is what
we're given).
This macro is only used in this file. */
#define PASS_IN_REG_P(CUM, MODE, TYPE) \
(ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) < M32R_MAX_PARM_REGS)
 
/* Determine where to put an argument to a function.
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
 
MODE is the argument's machine mode.
TYPE is the data type of the argument (as a tree).
This is null for libcalls where that information may
not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
NAMED is nonzero if this argument is a named parameter
(otherwise it is an extra parameter matching an ellipsis). */
/* On the M32R the first M32R_MAX_PARM_REGS args are normally in registers
and the rest are pushed. */
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
(PASS_IN_REG_P ((CUM), (MODE), (TYPE)) \
? gen_rtx_REG ((MODE), ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE))) \
: 0)
 
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be available.) */
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
((CUM) = (ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) \
+ ROUND_ADVANCE_ARG ((MODE), (TYPE))))
 
/* If defined, a C expression that gives the alignment boundary, in bits,
of an argument with the specified mode and type. If it is not defined,
PARM_BOUNDARY is used for all arguments. */
#if 0
/* We assume PARM_BOUNDARY == UNITS_PER_WORD here. */
#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
(((TYPE) ? TYPE_ALIGN (TYPE) : GET_MODE_BITSIZE (MODE)) <= PARM_BOUNDARY \
? PARM_BOUNDARY : 2 * PARM_BOUNDARY)
#endif
/* Function results. */
 
/* Define how to find the value returned by a function.
VALTYPE is the data type of the value (as a tree).
If the precise function being called is known, FUNC is its FUNCTION_DECL;
otherwise, FUNC is 0. */
#define FUNCTION_VALUE(VALTYPE, FUNC) gen_rtx_REG (TYPE_MODE (VALTYPE), 0)
 
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, 0)
 
/* 1 if N is a possible register number for a function value
as seen by the caller. */
/* ??? What about r1 in DI/DF values. */
#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
 
/* Tell GCC to use TARGET_RETURN_IN_MEMORY. */
#define DEFAULT_PCC_STRUCT_RETURN 0
/* Function entry and exit. */
 
/* Initialize data used by insn expanders. This is called from
init_emit, once for each function, before code is generated. */
#define INIT_EXPANDERS m32r_init_expanders ()
 
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
the stack pointer does not matter. The value is tested only in
functions that have frame pointers.
No definition is equivalent to always zero. */
#define EXIT_IGNORE_STACK 1
 
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
#undef FUNCTION_PROFILER
#define FUNCTION_PROFILER(FILE, LABELNO) \
do \
{ \
if (flag_pic) \
{ \
fprintf (FILE, "\tld24 r14,#mcount\n"); \
fprintf (FILE, "\tadd r14,r12\n"); \
fprintf (FILE, "\tld r14,@r14\n"); \
fprintf (FILE, "\tjl r14\n"); \
} \
else \
{ \
if (TARGET_ADDR24) \
fprintf (FILE, "\tbl mcount\n"); \
else \
{ \
fprintf (FILE, "\tseth r14,#high(mcount)\n"); \
fprintf (FILE, "\tor3 r14,r14,#low(mcount)\n"); \
fprintf (FILE, "\tjl r14\n"); \
} \
} \
fprintf (FILE, "\taddi sp,#4\n"); \
} \
while (0)
/* Trampolines. */
 
/* On the M32R, the trampoline is:
 
mv r7, lr -> bl L1 ; 178e 7e01
L1: add3 r6, lr, #L2-L1 ; 86ae 000c (L2 - L1 = 12)
mv lr, r7 -> ld r7,@r6+ ; 1e87 27e6
ld r6, @r6 -> jmp r6 ; 26c6 1fc6
L2: .word STATIC
.word FUNCTION */
 
#ifndef CACHE_FLUSH_FUNC
#define CACHE_FLUSH_FUNC "_flush_cache"
#endif
#ifndef CACHE_FLUSH_TRAP
#define CACHE_FLUSH_TRAP 12
#endif
 
/* Length in bytes of the trampoline for entering a nested function. */
#define TRAMPOLINE_SIZE 24
 
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function. */
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
do \
{ \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 0)), \
GEN_INT \
(TARGET_LITTLE_ENDIAN ? 0x017e8e17 : 0x178e7e01)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 4)), \
GEN_INT \
(TARGET_LITTLE_ENDIAN ? 0x0c00ae86 : 0x86ae000c)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 8)), \
GEN_INT \
(TARGET_LITTLE_ENDIAN ? 0xe627871e : 0x1e8727e6)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 12)), \
GEN_INT \
(TARGET_LITTLE_ENDIAN ? 0xc616c626 : 0x26c61fc6)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 16)), \
(CXT)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 20)), \
(FNADDR)); \
if (m32r_cache_flush_trap >= 0) \
emit_insn (gen_flush_icache (validize_mem (gen_rtx_MEM (SImode, TRAMP)),\
GEN_INT (m32r_cache_flush_trap) )); \
else if (m32r_cache_flush_func && m32r_cache_flush_func[0]) \
emit_library_call (m32r_function_symbol (m32r_cache_flush_func), \
0, VOIDmode, 3, TRAMP, Pmode, \
GEN_INT (TRAMPOLINE_SIZE), SImode, \
GEN_INT (3), SImode); \
} \
while (0)
#define RETURN_ADDR_RTX(COUNT, FRAME) m32r_return_addr (COUNT)
 
#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM)
 
/* Addressing modes, and classification of registers for them. */
 
/* Maximum number of registers that can appear in a valid memory address. */
#define MAX_REGS_PER_ADDRESS 1
 
/* We have post-inc load and pre-dec,pre-inc store,
but only for 4 byte vals. */
#define HAVE_PRE_DECREMENT 1
#define HAVE_PRE_INCREMENT 1
#define HAVE_POST_INCREMENT 1
 
/* Recognize any constant value that is a valid address. */
#define CONSTANT_ADDRESS_P(X) \
( GET_CODE (X) == LABEL_REF \
|| GET_CODE (X) == SYMBOL_REF \
|| GET_CODE (X) == CONST_INT \
|| (GET_CODE (X) == CONST \
&& ! (flag_pic && ! m32r_legitimate_pic_operand_p (X))))
 
/* Nonzero if the constant value X is a legitimate general operand.
We don't allow (plus symbol large-constant) as the relocations can't
describe it. INTVAL > 32767 handles both 16 bit and 24 bit relocations.
We allow all CONST_DOUBLE's as the md file patterns will force the
constant to memory if they can't handle them. */
 
#define LEGITIMATE_CONSTANT_P(X) \
(! (GET_CODE (X) == CONST \
&& GET_CODE (XEXP (X, 0)) == PLUS \
&& GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
&& GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \
&& (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (X, 0), 1)) > 32767))
 
/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
and check its validity for a certain class.
We have two alternate definitions for each of them.
The usual definition accepts all pseudo regs; the other rejects
them unless they have been allocated suitable hard regs.
The symbol REG_OK_STRICT causes the latter definition to be used.
 
Most source files want to accept pseudo regs in the hope that
they will get allocated to the class that the insn wants them to be in.
Source files for reload pass need to be strict.
After reload, it makes no difference, since pseudo regs have
been eliminated by then. */
 
#ifdef REG_OK_STRICT
 
/* Nonzero if X is a hard reg that can be used as a base reg. */
#define REG_OK_FOR_BASE_P(X) GPR_P (REGNO (X))
/* Nonzero if X is a hard reg that can be used as an index. */
#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X)
 
#else
 
/* Nonzero if X is a hard reg that can be used as a base reg
or if it is a pseudo reg. */
#define REG_OK_FOR_BASE_P(X) \
(GPR_P (REGNO (X)) \
|| (REGNO (X)) == ARG_POINTER_REGNUM \
|| REGNO (X) >= FIRST_PSEUDO_REGISTER)
/* Nonzero if X is a hard reg that can be used as an index
or if it is a pseudo reg. */
#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X)
 
#endif
 
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
that wants to use this address. */
 
/* Local to this file. */
#define RTX_OK_FOR_BASE_P(X) (REG_P (X) && REG_OK_FOR_BASE_P (X))
 
/* Local to this file. */
#define RTX_OK_FOR_OFFSET_P(X) \
(GET_CODE (X) == CONST_INT && INT16_P (INTVAL (X)))
 
/* Local to this file. */
#define LEGITIMATE_OFFSET_ADDRESS_P(MODE, X) \
(GET_CODE (X) == PLUS \
&& RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
&& RTX_OK_FOR_OFFSET_P (XEXP (X, 1)))
 
/* Local to this file. */
/* For LO_SUM addresses, do not allow them if the MODE is > 1 word,
since more than one instruction will be required. */
#define LEGITIMATE_LO_SUM_ADDRESS_P(MODE, X) \
(GET_CODE (X) == LO_SUM \
&& (MODE != BLKmode && GET_MODE_SIZE (MODE) <= UNITS_PER_WORD)\
&& RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
&& CONSTANT_P (XEXP (X, 1)))
 
/* Local to this file. */
/* Is this a load and increment operation. */
#define LOAD_POSTINC_P(MODE, X) \
(((MODE) == SImode || (MODE) == SFmode) \
&& GET_CODE (X) == POST_INC \
&& GET_CODE (XEXP (X, 0)) == REG \
&& RTX_OK_FOR_BASE_P (XEXP (X, 0)))
 
/* Local to this file. */
/* Is this an increment/decrement and store operation. */
#define STORE_PREINC_PREDEC_P(MODE, X) \
(((MODE) == SImode || (MODE) == SFmode) \
&& (GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC) \
&& GET_CODE (XEXP (X, 0)) == REG \
&& RTX_OK_FOR_BASE_P (XEXP (X, 0)))
 
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
do \
{ \
if (RTX_OK_FOR_BASE_P (X)) \
goto ADDR; \
if (LEGITIMATE_OFFSET_ADDRESS_P ((MODE), (X))) \
goto ADDR; \
if (LEGITIMATE_LO_SUM_ADDRESS_P ((MODE), (X))) \
goto ADDR; \
if (LOAD_POSTINC_P ((MODE), (X))) \
goto ADDR; \
if (STORE_PREINC_PREDEC_P ((MODE), (X))) \
goto ADDR; \
} \
while (0)
 
/* Try machine-dependent ways of modifying an illegitimate address
to be legitimate. If we find one, return the new, valid address.
This macro is used in only one place: `memory_address' in explow.c.
 
OLDX is the address as it was before break_out_memory_refs was called.
In some cases it is useful to look at this to decide what needs to be done.
 
MODE and WIN are passed so that this macro can use
GO_IF_LEGITIMATE_ADDRESS.
 
It is always safe for this macro to do nothing. It exists to recognize
opportunities to optimize the output. */
 
#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
do \
{ \
if (flag_pic) \
(X) = m32r_legitimize_pic_address (X, NULL_RTX); \
if (memory_address_p (MODE, X)) \
goto WIN; \
} \
while (0)
 
/* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for. */
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
do \
{ \
if ( GET_CODE (ADDR) == PRE_DEC \
|| GET_CODE (ADDR) == PRE_INC \
|| GET_CODE (ADDR) == POST_INC \
|| GET_CODE (ADDR) == LO_SUM) \
goto LABEL; \
} \
while (0)
/* Condition code usage. */
 
/* Return nonzero if SELECT_CC_MODE will never return MODE for a
floating point inequality comparison. */
#define REVERSIBLE_CC_MODE(MODE) 1 /*???*/
/* Costs. */
 
/* Compute extra cost of moving data between one register class
and another. */
#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) 2
 
/* Compute the cost of moving data between registers and memory. */
/* Memory is 3 times as expensive as registers.
??? Is that the right way to look at it? */
#define MEMORY_MOVE_COST(MODE,CLASS,IN_P) \
(GET_MODE_SIZE (MODE) <= UNITS_PER_WORD ? 6 : 12)
 
/* The cost of a branch insn. */
/* A value of 2 here causes GCC to avoid using branches in comparisons like
while (a < N && a). Branches aren't that expensive on the M32R so
we define this as 1. Defining it as 2 had a heavy hit in fp-bit.c. */
#define BRANCH_COST ((TARGET_BRANCH_COST) ? 2 : 1)
 
/* Nonzero if access to memory by bytes is slow and undesirable.
For RISC chips, it means that access to memory by bytes is no
better than access by words when possible, so grab a whole word
and maybe make use of that. */
#define SLOW_BYTE_ACCESS 1
 
/* Define this macro if it is as good or better to call a constant
function address than to call an address kept in a register. */
#define NO_FUNCTION_CSE
/* Section selection. */
 
#define TEXT_SECTION_ASM_OP "\t.section .text"
#define DATA_SECTION_ASM_OP "\t.section .data"
#define BSS_SECTION_ASM_OP "\t.section .bss"
 
/* Define this macro if jump tables (for tablejump insns) should be
output in the text section, along with the assembler instructions.
Otherwise, the readonly data section is used.
This macro is irrelevant if there is no separate readonly data section. */
#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
/* Position Independent Code. */
 
/* The register number of the register used to address a table of static
data addresses in memory. In some cases this register is defined by a
processor's ``application binary interface'' (ABI). When this macro
is defined, RTL is generated for this register once, as with the stack
pointer and frame pointer registers. If this macro is not defined, it
is up to the machine-dependent files to allocate such a register (if
necessary). */
#define PIC_OFFSET_TABLE_REGNUM 12
 
/* Define this macro if the register defined by PIC_OFFSET_TABLE_REGNUM is
clobbered by calls. Do not define this macro if PIC_OFFSET_TABLE_REGNUM
is not defined. */
/* This register is call-saved on the M32R. */
/*#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED*/
 
/* A C expression that is nonzero if X is a legitimate immediate
operand on the target machine when generating position independent code.
You can assume that X satisfies CONSTANT_P, so you need not
check this. You can also assume `flag_pic' is true, so you need not
check it either. You need not define this macro if all constants
(including SYMBOL_REF) can be immediate operands when generating
position independent code. */
#define LEGITIMATE_PIC_OPERAND_P(X) m32r_legitimate_pic_operand_p (X)
/* Control the assembler format that we output. */
 
/* A C string constant describing how to begin a comment in the target
assembler language. The compiler assumes that the comment will
end at the end of the line. */
#define ASM_COMMENT_START ";"
 
/* Output to assembler file text saying following lines
may contain character constants, extra white space, comments, etc. */
#define ASM_APP_ON ""
 
/* Output to assembler file text saying following lines
no longer contain unusual constructs. */
#define ASM_APP_OFF ""
 
/* Globalizing directive for a label. */
#define GLOBAL_ASM_OP "\t.global\t"
 
/* We do not use DBX_LINES_FUNCTION_RELATIVE or
dbxout_stab_value_internal_label_diff here because
we need to use .debugsym for the line label. */
 
#define DBX_OUTPUT_SOURCE_LINE(file, line, counter) \
do \
{ \
const char * begin_label = \
XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); \
char label[64]; \
ASM_GENERATE_INTERNAL_LABEL (label, "LM", counter); \
\
dbxout_begin_stabn_sline (line); \
assemble_name (file, label); \
putc ('-', file); \
assemble_name (file, begin_label); \
fputs ("\n\t.debugsym ", file); \
assemble_name (file, label); \
putc ('\n', file); \
counter += 1; \
} \
while (0)
 
/* How to refer to registers in assembler output.
This sequence is indexed by compiler's hard-register-number (see above). */
#ifndef SUBTARGET_REGISTER_NAMES
#define SUBTARGET_REGISTER_NAMES
#endif
 
#define REGISTER_NAMES \
{ \
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
"r8", "r9", "r10", "r11", "r12", "fp", "lr", "sp", \
"ap", "cbit", "a0" \
SUBTARGET_REGISTER_NAMES \
}
 
/* If defined, a C initializer for an array of structures containing
a name and a register number. This macro defines additional names
for hard registers, thus allowing the `asm' option in declarations
to refer to registers using alternate names. */
#ifndef SUBTARGET_ADDITIONAL_REGISTER_NAMES
#define SUBTARGET_ADDITIONAL_REGISTER_NAMES
#endif
 
#define ADDITIONAL_REGISTER_NAMES \
{ \
/*{ "gp", GP_REGNUM },*/ \
{ "r13", FRAME_POINTER_REGNUM }, \
{ "r14", RETURN_ADDR_REGNUM }, \
{ "r15", STACK_POINTER_REGNUM }, \
SUBTARGET_ADDITIONAL_REGISTER_NAMES \
}
 
/* A C expression which evaluates to true if CODE is a valid
punctuation character for use in the `PRINT_OPERAND' macro. */
extern char m32r_punct_chars[256];
#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
m32r_punct_chars[(unsigned char) (CHAR)]
 
/* Print operand X (an rtx) in assembler syntax to file FILE.
CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
For `%' followed by punctuation, CODE is the punctuation and X is null. */
#define PRINT_OPERAND(FILE, X, CODE) \
m32r_print_operand (FILE, X, CODE)
 
/* A C compound statement to output to stdio stream STREAM the
assembler syntax for an instruction operand that is a memory
reference whose address is ADDR. ADDR is an RTL expression. */
#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
m32r_print_operand_address (FILE, ADDR)
 
/* If defined, C string expressions to be used for the `%R', `%L',
`%U', and `%I' options of `asm_fprintf' (see `final.c'). These
are useful when a single `md' file must support multiple assembler
formats. In that case, the various `tm.h' files can define these
macros differently. */
#define REGISTER_PREFIX ""
#define LOCAL_LABEL_PREFIX ".L"
#define USER_LABEL_PREFIX ""
#define IMMEDIATE_PREFIX "#"
 
/* This is how to output an element of a case-vector that is absolute. */
#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
do \
{ \
char label[30]; \
ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
fprintf (FILE, "\t.word\t"); \
assemble_name (FILE, label); \
fprintf (FILE, "\n"); \
} \
while (0)
 
/* This is how to output an element of a case-vector that is relative. */
#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL)\
do \
{ \
char label[30]; \
ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
fprintf (FILE, "\t.word\t"); \
assemble_name (FILE, label); \
fprintf (FILE, "-"); \
ASM_GENERATE_INTERNAL_LABEL (label, "L", REL); \
assemble_name (FILE, label); \
fprintf (FILE, "\n"); \
} \
while (0)
 
/* The desired alignment for the location counter at the beginning
of a loop. */
/* On the M32R, align loops to 32 byte boundaries (cache line size)
if -malign-loops. */
#define LOOP_ALIGN(LABEL) (TARGET_ALIGN_LOOPS ? 5 : 0)
 
/* Define this to be the maximum number of insns to move around when moving
a loop test from the top of a loop to the bottom
and seeing whether to duplicate it. The default is thirty.
 
Loop unrolling currently doesn't like this optimization, so
disable doing if we are unrolling loops and saving space. */
#define LOOP_TEST_THRESHOLD (optimize_size \
&& !flag_unroll_loops \
&& !flag_unroll_all_loops ? 2 : 30)
 
/* This is how to output an assembler line
that says to advance the location counter
to a multiple of 2**LOG bytes. */
/* .balign is used to avoid confusion. */
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
do \
{ \
if ((LOG) != 0) \
fprintf (FILE, "\t.balign %d\n", 1 << (LOG)); \
} \
while (0)
 
/* Like `ASM_OUTPUT_COMMON' except takes the required alignment as a
separate, explicit argument. If you define this macro, it is used in
place of `ASM_OUTPUT_COMMON', and gives you more flexibility in
handling the required alignment of the variable. The alignment is
specified as the number of bits. */
 
#define SCOMMON_ASM_OP "\t.scomm\t"
 
#undef ASM_OUTPUT_ALIGNED_COMMON
#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
do \
{ \
if (! TARGET_SDATA_NONE \
&& (SIZE) > 0 && (SIZE) <= g_switch_value) \
fprintf ((FILE), "%s", SCOMMON_ASM_OP); \
else \
fprintf ((FILE), "%s", COMMON_ASM_OP); \
assemble_name ((FILE), (NAME)); \
fprintf ((FILE), ",%u,%u\n", (int)(SIZE), (ALIGN) / BITS_PER_UNIT);\
} \
while (0)
 
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
do \
{ \
if (! TARGET_SDATA_NONE \
&& (SIZE) > 0 && (SIZE) <= g_switch_value) \
switch_to_section (get_named_section (NULL, ".sbss", 0)); \
else \
switch_to_section (bss_section); \
ASM_OUTPUT_ALIGN (FILE, floor_log2 (ALIGN / BITS_PER_UNIT)); \
last_assemble_variable_decl = DECL; \
ASM_DECLARE_OBJECT_NAME (FILE, NAME, DECL); \
ASM_OUTPUT_SKIP (FILE, SIZE ? SIZE : 1); \
} \
while (0)
/* Debugging information. */
 
/* Generate DBX and DWARF debugging information. */
#define DBX_DEBUGGING_INFO 1
#define DWARF2_DEBUGGING_INFO 1
 
/* Use DWARF2 debugging info by default. */
#undef PREFERRED_DEBUGGING_TYPE
#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
 
/* Turn off splitting of long stabs. */
#define DBX_CONTIN_LENGTH 0
/* Miscellaneous. */
 
/* Specify the machine mode that this machine uses
for the index in the tablejump instruction. */
#define CASE_VECTOR_MODE (flag_pic ? SImode : Pmode)
 
/* Define if operations between registers always perform the operation
on the full register even if a narrower mode is specified. */
#define WORD_REGISTER_OPERATIONS
 
/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
will either zero-extend or sign-extend. The value of this macro should
be the code that says which one of the two operations is implicitly
done, UNKNOWN if none. */
#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
 
/* Max number of bytes we can move from memory
to memory in one reasonably fast instruction. */
#define MOVE_MAX 4
 
/* Define this to be nonzero if shift instructions ignore all but the low-order
few bits. */
#define SHIFT_COUNT_TRUNCATED 1
 
/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
is done just by pretending it is already truncated. */
#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
 
/* Specify the machine mode that pointers have.
After generation of rtl, the compiler makes no further distinction
between pointers and any other objects of this machine mode. */
/* ??? The M32R doesn't have full 32 bit pointers, but making this PSImode has
its own problems (you have to add extendpsisi2 and truncsipsi2).
Try to avoid it. */
#define Pmode SImode
 
/* A function address in a call instruction. */
#define FUNCTION_MODE SImode
/* Define the information needed to generate branch and scc insns. This is
stored from the compare operation. Note that we can't use "rtx" here
since it hasn't been defined! */
extern struct rtx_def * m32r_compare_op0;
extern struct rtx_def * m32r_compare_op1;
 
/* M32R function types. */
enum m32r_function_type
{
M32R_FUNCTION_UNKNOWN, M32R_FUNCTION_NORMAL, M32R_FUNCTION_INTERRUPT
};
 
#define M32R_INTERRUPT_P(TYPE) ((TYPE) == M32R_FUNCTION_INTERRUPT)
 
/* The maximum number of bytes to copy using pairs of load/store instructions.
If a block is larger than this then a loop will be generated to copy
MAX_MOVE_BYTES chunks at a time. The value of 32 is a semi-arbitrary choice.
A customer uses Dhrystome as their benchmark, and Dhrystone has a 31 byte
string copy in it. */
#define MAX_MOVE_BYTES 32
/initfini.c
0,0 → 1,173
/* .init/.fini section handling + C++ global constructor/destructor handling.
This file is based on crtstuff.c, sol2-crti.asm, sol2-crtn.asm.
 
Copyright (C) 1996, 1997, 1998, 2006 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
 
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
 
/* Declare a pointer to void function type. */
typedef void (*func_ptr) (void);
 
#ifdef CRT_INIT
 
/* NOTE: In order to be able to support SVR4 shared libraries, we arrange
to have one set of symbols { __CTOR_LIST__, __DTOR_LIST__, __CTOR_END__,
__DTOR_END__ } per root executable and also one set of these symbols
per shared library. So in any given whole process image, we may have
multiple definitions of each of these symbols. In order to prevent
these definitions from conflicting with one another, and in order to
ensure that the proper lists are used for the initialization/finalization
of each individual shared library (respectively), we give these symbols
only internal (i.e. `static') linkage, and we also make it a point to
refer to only the __CTOR_END__ symbol in crtfini.o and the __DTOR_LIST__
symbol in crtinit.o, where they are defined. */
 
static func_ptr __CTOR_LIST__[1]
__attribute__ ((used, section (".ctors")))
= { (func_ptr) (-1) };
 
static func_ptr __DTOR_LIST__[1]
__attribute__ ((used, section (".dtors")))
= { (func_ptr) (-1) };
 
/* Run all the global destructors on exit from the program. */
/* Some systems place the number of pointers in the first word of the
table. On SVR4 however, that word is -1. In all cases, the table is
null-terminated. On SVR4, we start from the beginning of the list and
invoke each per-compilation-unit destructor routine in order
until we find that null.
 
Note that this function MUST be static. There will be one of these
functions in each root executable and one in each shared library, but
although they all have the same code, each one is unique in that it
refers to one particular associated `__DTOR_LIST__' which belongs to the
same particular root executable or shared library file. */
 
static void __do_global_dtors (void)
asm ("__do_global_dtors") __attribute__ ((used, section (".text")));
 
static void
__do_global_dtors (void)
{
func_ptr *p;
 
for (p = __DTOR_LIST__ + 1; *p; p++)
(*p) ();
}
 
/* .init section start.
This must appear at the start of the .init section. */
 
asm ("\n\
.section .init,\"ax\",@progbits\n\
.balign 4\n\
.global __init\n\
__init:\n\
push fp\n\
push lr\n\
mv fp,sp\n\
seth r0, #shigh(__fini)\n\
add3 r0, r0, #low(__fini)\n\
bl atexit\n\
.fillinsn\n\
");
 
/* .fini section start.
This must appear at the start of the .init section. */
 
asm ("\n\
.section .fini,\"ax\",@progbits\n\
.balign 4\n\
.global __fini\n\
__fini:\n\
push fp\n\
push lr\n\
mv fp,sp\n\
bl __do_global_dtors\n\
.fillinsn\n\
");
 
#endif /* CRT_INIT */
 
#ifdef CRT_FINI
 
/* Put a word containing zero at the end of each of our two lists of function
addresses. Note that the words defined here go into the .ctors and .dtors
sections of the crtend.o file, and since that file is always linked in
last, these words naturally end up at the very ends of the two lists
contained in these two sections. */
 
static func_ptr __CTOR_END__[1]
__attribute__ ((used, section (".ctors")))
= { (func_ptr) 0 };
 
static func_ptr __DTOR_END__[1]
__attribute__ ((used, section (".dtors")))
= { (func_ptr) 0 };
 
/* Run all global constructors for the program.
Note that they are run in reverse order. */
 
static void __do_global_ctors (void)
asm ("__do_global_ctors") __attribute__ ((used, section (".text")));
 
static void
__do_global_ctors (void)
{
func_ptr *p;
 
for (p = __CTOR_END__ - 1; *p != (func_ptr) -1; p--)
(*p) ();
}
 
/* .init section end.
This must live at the end of the .init section. */
 
asm ("\n\
.section .init,\"ax\",@progbits\n\
bl __do_global_ctors\n\
mv sp,fp\n\
pop lr\n\
pop fp\n\
jmp lr\n\
.fillinsn\n\
");
 
/* .fini section end.
This must live at the end of the .fini section. */
 
asm ("\n\
.section .fini,\"ax\",@progbits\n\
mv sp,fp\n\
pop lr\n\
pop fp\n\
jmp lr\n\
.fillinsn\n\
");
 
#endif /* CRT_FINI */
/libgcc-glibc.ver
0,0 → 1,20
# In order to work around the very problems that force us to now generally
# create a libgcc.so, glibc reexported a number of routines from libgcc.a.
# By now choosing the same version tags for these specific routines, we
# maintain enough binary compatibility to allow future versions of glibc
# to defer implementation of these routines to libgcc.so via DT_AUXILIARY.
 
# Note that we cannot use the default libgcc-glibc.ver file on sh,
# because GLIBC_2.0 does not exist on this architecture, as the first
# ever glibc release on the platform was GLIBC_2.3.
 
%inherit GCC_3.0 GLIBC_2.3
GLIBC_2.3 {
__register_frame
__register_frame_table
__deregister_frame
__register_frame_info
__deregister_frame_info
__frame_state_for
__register_frame_info_table
}
/m32r-protos.h
0,0 → 1,69
/* Prototypes for m32r.c functions used in the md file & elsewhere.
Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* Function prototypes that cannot exist in v850.h due to dependency
complications. */
#define Mmode enum machine_mode
 
extern void m32r_init (void);
extern void m32r_init_expanders (void);
extern unsigned m32r_compute_frame_size (int);
extern void m32r_expand_prologue (void);
extern int direct_return (void);
extern void m32r_load_pic_register (void);
 
#ifdef TREE_CODE
extern enum m32r_function_type m32r_compute_function_type (tree);
#endif /* TREE_CODE */
 
#ifdef RTX_CODE
extern int easy_di_const (rtx);
extern int easy_df_const (rtx);
extern rtx gen_compare (enum rtx_code, rtx, rtx, int);
extern rtx gen_split_move_double (rtx *);
extern int m32r_address_code (rtx);
extern void m32r_initialize_trampoline (rtx, rtx, rtx);
extern int zero_and_one (rtx, rtx);
extern char * emit_cond_move (rtx *, rtx);
extern void m32r_output_block_move (rtx, rtx *);
extern int m32r_expand_block_move (rtx *);
extern void m32r_print_operand (FILE *, rtx, int);
extern void m32r_print_operand_address (FILE *, rtx);
extern int m32r_not_same_reg (rtx, rtx);
extern int m32r_hard_regno_rename_ok (unsigned int, unsigned int);
extern int m32r_legitimate_pic_operand_p (rtx);
extern rtx m32r_legitimize_pic_address (rtx, rtx);
extern rtx m32r_return_addr (int);
extern rtx m32r_function_symbol (const char *);
 
#ifdef HAVE_MACHINE_MODES
extern int call_operand (rtx, Mmode);
extern int small_data_operand (rtx, Mmode);
extern int addr24_operand (rtx, Mmode);
extern int addr32_operand (rtx, Mmode);
extern int call26_operand (rtx, Mmode);
extern int memreg_operand (rtx, Mmode);
extern int small_insn_p (rtx, Mmode);
 
#endif /* HAVE_MACHINE_MODES */
 
#endif /* RTX_CODE */
 
#undef Mmode

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.