OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/trunk/gnu-old/gcc-4.2.2/gcc/config/arc
    from Rev 154 to Rev 816
    Reverse comparison

Rev 154 → Rev 816

/arc-protos.h
0,0 → 1,64
/* Definitions of target machine for GNU compiler, Argonaut ARC cpu.
Copyright (C) 2000, 2004, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
extern void arc_va_start (tree, rtx);
 
extern enum machine_mode arc_select_cc_mode (enum rtx_code, rtx, rtx);
 
/* Define the function that build the compare insn for scc and bcc. */
extern struct rtx_def *gen_compare_reg (enum rtx_code, rtx, rtx);
 
/* Declarations for various fns used in the .md file. */
extern const char *output_shift (rtx *);
 
extern int symbolic_operand (rtx, enum machine_mode);
extern int arc_double_limm_p (rtx);
extern int arc_eligible_for_epilogue_delay (rtx, int);
extern void arc_initialize_trampoline (rtx, rtx, rtx);
extern void arc_print_operand (FILE *, rtx, int);
extern void arc_print_operand_address (FILE *, rtx);
extern void arc_final_prescan_insn (rtx, rtx *, int);
extern int call_address_operand (rtx, enum machine_mode);
extern int call_operand (rtx, enum machine_mode);
extern int symbolic_memory_operand (rtx, enum machine_mode);
extern int short_immediate_operand (rtx, enum machine_mode);
extern int long_immediate_operand (rtx, enum machine_mode);
extern int long_immediate_loadstore_operand (rtx, enum machine_mode);
extern int move_src_operand (rtx, enum machine_mode);
extern int move_double_src_operand (rtx, enum machine_mode);
extern int move_dest_operand (rtx, enum machine_mode);
extern int load_update_operand (rtx, enum machine_mode);
extern int store_update_operand (rtx, enum machine_mode);
extern int nonvol_nonimm_operand (rtx, enum machine_mode);
extern int const_sint32_operand (rtx, enum machine_mode);
extern int const_uint32_operand (rtx, enum machine_mode);
extern int proper_comparison_operator (rtx, enum machine_mode);
extern int shift_operator (rtx, enum machine_mode);
 
extern enum arc_function_type arc_compute_function_type (tree);
 
 
extern void arc_init (void);
extern unsigned int arc_compute_frame_size (int);
extern void arc_save_restore (FILE *, const char *, unsigned int,
unsigned int, const char *);
extern int arc_delay_slots_for_epilogue (void);
extern void arc_ccfsm_at_label (const char *, int);
extern int arc_ccfsm_branch_deleted_p (void);
extern void arc_ccfsm_record_branch_deleted (void);
/arc.md
0,0 → 1,1637
;; Machine description of the Argonaut ARC cpu for GNU C compiler
;; Copyright (C) 1994, 1997, 1998, 1999, 2000, 2004, 2005, 2007
;; Free Software Foundation, Inc.
 
;; This file is part of GCC.
 
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
 
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
 
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
 
;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
 
;; ??? This is an old port, and is undoubtedly suffering from bit rot.
;; Insn type. Used to default other attribute values.
 
(define_attr "type"
"move,load,store,cmove,unary,binary,compare,shift,mul,uncond_branch,branch,call,call_no_delay_slot,multi,misc"
(const_string "binary"))
 
;; Length (in # of insns, long immediate constants counted too).
;; ??? There's a nasty interaction between the conditional execution fsm
;; and insn lengths: insns with shimm values cannot be conditionally executed.
(define_attr "length" ""
(cond [(eq_attr "type" "load")
(if_then_else (match_operand 1 "long_immediate_loadstore_operand" "")
(const_int 2) (const_int 1))
 
(eq_attr "type" "store")
(if_then_else (match_operand 0 "long_immediate_loadstore_operand" "")
(const_int 2) (const_int 1))
 
(eq_attr "type" "move,unary,compare")
(if_then_else (match_operand 1 "long_immediate_operand" "")
(const_int 2) (const_int 1))
 
(eq_attr "type" "binary,mul")
(if_then_else (match_operand 2 "long_immediate_operand" "")
(const_int 2) (const_int 1))
 
(eq_attr "type" "cmove")
(if_then_else (match_operand 2 "register_operand" "")
(const_int 1) (const_int 2))
 
(eq_attr "type" "multi") (const_int 2)
]
 
(const_int 1)))
 
;; The length here is the length of a single asm. Unfortunately it might be
;; 1 or 2 so we must allow for 2. That's ok though. How often will users
;; lament asm's not being put in delay slots?
(define_asm_attributes
[(set_attr "length" "2")
(set_attr "type" "multi")])
 
;; Condition codes: this one is used by final_prescan_insn to speed up
;; conditionalizing instructions. It saves having to scan the rtl to see if
;; it uses or alters the condition codes.
 
;; USE: This insn uses the condition codes (e.g.: a conditional branch).
;; CANUSE: This insn can use the condition codes (for conditional execution).
;; SET: All condition codes are set by this insn.
;; SET_ZN: the Z and N flags are set by this insn.
;; SET_ZNC: the Z, N, and C flags are set by this insn.
;; CLOB: The condition codes are set to unknown values by this insn.
;; NOCOND: This insn can't use and doesn't affect the condition codes.
 
(define_attr "cond" "use,canuse,set,set_zn,set_znc,clob,nocond"
(cond [(and (eq_attr "type" "unary,binary,move")
(eq_attr "length" "1"))
(const_string "canuse")
 
(eq_attr "type" "compare")
(const_string "set")
 
(eq_attr "type" "cmove,branch")
(const_string "use")
 
(eq_attr "type" "multi,misc")
(const_string "clob")
]
 
(const_string "nocond")))
;; Delay slots.
 
(define_attr "in_delay_slot" "false,true"
(cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,multi")
(const_string "false")
]
 
(if_then_else (eq_attr "length" "1")
(const_string "true")
(const_string "false"))))
 
(define_delay (eq_attr "type" "call")
[(eq_attr "in_delay_slot" "true")
(eq_attr "in_delay_slot" "true")
(eq_attr "in_delay_slot" "true")])
 
(define_delay (eq_attr "type" "branch,uncond_branch")
[(eq_attr "in_delay_slot" "true")
(eq_attr "in_delay_slot" "true")
(eq_attr "in_delay_slot" "true")])
;; Scheduling description for the ARC
 
(define_cpu_unit "branch")
 
(define_insn_reservation "any_insn" 1 (eq_attr "type" "!load,compare,branch")
"nothing")
 
;; 1) A conditional jump cannot immediately follow the insn setting the flags.
;; This isn't a complete solution as it doesn't come with guarantees. That
;; is done in the branch patterns and in arc_print_operand. This exists to
;; avoid inserting a nop when we can.
 
(define_insn_reservation "compare" 1 (eq_attr "type" "compare")
"nothing,branch")
 
(define_insn_reservation "branch" 1 (eq_attr "type" "branch")
"branch")
 
;; 2) References to loaded registers should wait a cycle.
 
;; Memory with load-delay of 1 (i.e., 2 cycle load).
 
(define_insn_reservation "memory" 2 (eq_attr "type" "load")
"nothing")
;; Move instructions.
 
(define_expand "movqi"
[(set (match_operand:QI 0 "general_operand" "")
(match_operand:QI 1 "general_operand" ""))]
""
"
{
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (QImode, operands[1]);
}")
 
(define_insn "*movqi_insn"
[(set (match_operand:QI 0 "move_dest_operand" "=r,r,r,m")
(match_operand:QI 1 "move_src_operand" "rI,Ji,m,r"))]
;; ??? Needed?
"register_operand (operands[0], QImode)
|| register_operand (operands[1], QImode)"
"@
mov%? %0,%1
mov%? %0,%1
ldb%U1%V1 %0,%1
stb%U0%V0 %1,%0"
[(set_attr "type" "move,move,load,store")])
 
;; ??? This may never match since there's no cmpqi insn.
 
(define_insn "*movqi_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(sign_extend:SI (match_operand:QI 1 "move_src_operand" "rIJi"))
(const_int 0)))
(set (match_operand:QI 0 "move_dest_operand" "=r")
(match_dup 1))]
""
"mov%?.f %0,%1"
[(set_attr "type" "move")
(set_attr "cond" "set_zn")])
 
(define_expand "movhi"
[(set (match_operand:HI 0 "general_operand" "")
(match_operand:HI 1 "general_operand" ""))]
""
"
{
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (HImode, operands[1]);
}")
 
(define_insn "*movhi_insn"
[(set (match_operand:HI 0 "move_dest_operand" "=r,r,r,m")
(match_operand:HI 1 "move_src_operand" "rI,Ji,m,r"))]
"register_operand (operands[0], HImode)
|| register_operand (operands[1], HImode)"
"@
mov%? %0,%1
mov%? %0,%1
ldw%U1%V1 %0,%1
stw%U0%V0 %1,%0"
[(set_attr "type" "move,move,load,store")])
 
;; ??? Will this ever match?
 
(define_insn "*movhi_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(sign_extend:SI (match_operand:HI 1 "move_src_operand" "rIJi"))
(const_int 0)))
(set (match_operand:HI 0 "move_dest_operand" "=r")
(match_dup 1))]
;; ??? Needed?
"register_operand (operands[0], HImode)
|| register_operand (operands[1], HImode)"
"mov%?.f %0,%1"
[(set_attr "type" "move")
(set_attr "cond" "set_zn")])
 
(define_expand "movsi"
[(set (match_operand:SI 0 "general_operand" "")
(match_operand:SI 1 "general_operand" ""))]
""
"
{
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (SImode, operands[1]);
}")
 
(define_insn "*movsi_insn"
[(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,m")
(match_operand:SI 1 "move_src_operand" "rI,GJi,m,r"))]
"register_operand (operands[0], SImode)
|| register_operand (operands[1], SImode)"
"@
mov%? %0,%1
mov%? %0,%S1
ld%U1%V1 %0,%1
st%U0%V0 %1,%0"
[(set_attr "type" "move,move,load,store")])
 
(define_insn "*movsi_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(match_operand:SI 1 "move_src_operand" "rIJi")
(const_int 0)))
(set (match_operand:SI 0 "move_dest_operand" "=r")
(match_dup 1))]
"register_operand (operands[0], SImode)
|| register_operand (operands[1], SImode)"
"mov%?.f %0,%S1"
[(set_attr "type" "move")
(set_attr "cond" "set_zn")])
 
(define_expand "movdi"
[(set (match_operand:DI 0 "general_operand" "")
(match_operand:DI 1 "general_operand" ""))]
""
"
{
/* Everything except mem = const or mem = mem can be done easily. */
 
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (DImode, operands[1]);
}")
 
(define_insn "*movdi_insn"
[(set (match_operand:DI 0 "move_dest_operand" "=r,r,r,m")
(match_operand:DI 1 "move_double_src_operand" "r,HK,m,r"))]
"register_operand (operands[0], DImode)
|| register_operand (operands[1], DImode)"
"*
{
switch (which_alternative)
{
case 0 :
/* We normally copy the low-numbered register first. However, if
the first register operand 0 is the same as the second register of
operand 1, we must copy in the opposite order. */
if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
return \"mov %R0,%R1\;mov %0,%1\";
else
return \"mov %0,%1\;mov %R0,%R1\";
case 1 :
return \"mov %0,%L1\;mov %R0,%H1\";
case 2 :
/* If the low-address word is used in the address, we must load it
last. Otherwise, load it first. Note that we cannot have
auto-increment in that case since the address register is known to be
dead. */
if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
operands [1], 0))
return \"ld%V1 %R0,%R1\;ld%V1 %0,%1\";
else
return \"ld%V1 %0,%1\;ld%V1 %R0,%R1\";
case 3 :
return \"st%V0 %1,%0\;st%V0 %R1,%R0\";
default:
gcc_unreachable ();
}
}"
[(set_attr "type" "move,move,load,store")
;; ??? The ld/st values could be 4 if it's [reg,bignum].
(set_attr "length" "2,4,2,2")])
 
;(define_expand "movdi"
; [(set (match_operand:DI 0 "general_operand" "")
; (match_operand:DI 1 "general_operand" ""))]
; ""
; "
;{
; /* Flow doesn't understand that this is effectively a DFmode move.
; It doesn't know that all of `operands[0]' is set. */
; emit_insn (gen_rtx_CLOBBER (VOIDmode, operands[0]));
;
; /* Emit insns that movsi_insn can handle. */
; emit_insn (gen_movsi (operand_subword (operands[0], 0, 0, DImode),
; operand_subword (operands[1], 0, 0, DImode)));
; emit_insn (gen_movsi (operand_subword (operands[0], 1, 0, DImode),
; operand_subword (operands[1], 1, 0, DImode)));
; DONE;
;}")
;; Floating point move insns.
 
(define_expand "movsf"
[(set (match_operand:SF 0 "general_operand" "")
(match_operand:SF 1 "general_operand" ""))]
""
"
{
/* Everything except mem = const or mem = mem can be done easily. */
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (SFmode, operands[1]);
}")
 
(define_insn "*movsf_insn"
[(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,m")
(match_operand:SF 1 "move_src_operand" "r,E,m,r"))]
"register_operand (operands[0], SFmode)
|| register_operand (operands[1], SFmode)"
"@
mov%? %0,%1
mov%? %0,%1 ; %A1
ld%U1%V1 %0,%1
st%U0%V0 %1,%0"
[(set_attr "type" "move,move,load,store")])
 
(define_expand "movdf"
[(set (match_operand:DF 0 "general_operand" "")
(match_operand:DF 1 "general_operand" ""))]
""
"
{
/* Everything except mem = const or mem = mem can be done easily. */
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (DFmode, operands[1]);
}")
 
(define_insn "*movdf_insn"
[(set (match_operand:DF 0 "move_dest_operand" "=r,r,r,m")
(match_operand:DF 1 "move_double_src_operand" "r,E,m,r"))]
"register_operand (operands[0], DFmode)
|| register_operand (operands[1], DFmode)"
"*
{
switch (which_alternative)
{
case 0 :
/* We normally copy the low-numbered register first. However, if
the first register operand 0 is the same as the second register of
operand 1, we must copy in the opposite order. */
if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
return \"mov %R0,%R1\;mov %0,%1\";
else
return \"mov %0,%1\;mov %R0,%R1\";
case 1 :
return \"mov %0,%L1\;mov %R0,%H1 ; %A1\";
case 2 :
/* If the low-address word is used in the address, we must load it
last. Otherwise, load it first. Note that we cannot have
auto-increment in that case since the address register is known to be
dead. */
if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
operands [1], 0))
return \"ld%V1 %R0,%R1\;ld%V1 %0,%1\";
else
return \"ld%V1 %0,%1\;ld%V1 %R0,%R1\";
case 3 :
return \"st%V0 %1,%0\;st%V0 %R1,%R0\";
default:
gcc_unreachable ();
}
}"
[(set_attr "type" "move,move,load,store")
;; ??? The ld/st values could be 4 if it's [reg,bignum].
(set_attr "length" "2,4,2,2")])
 
;(define_expand "movdf"
; [(set (match_operand:DF 0 "general_operand" "")
; (match_operand:DF 1 "general_operand" ""))]
; ""
; "
;{
; /* Flow doesn't understand that this is effectively a DFmode move.
; It doesn't know that all of `operands[0]' is set. */
; emit_insn (gen_rtx_CLOBBER (VOIDmode, operands[0]));
;
; /* Emit insns that movsi_insn can handle. */
; emit_insn (gen_movsi (operand_subword (operands[0], 0, 0, DFmode),
; operand_subword (operands[1], 0, 0, DFmode)));
; emit_insn (gen_movsi (operand_subword (operands[0], 1, 0, DFmode),
; operand_subword (operands[1], 1, 0, DFmode)));
; DONE;
;}")
;; Load/Store with update instructions.
;;
;; Some of these we can get by using pre-decrement or pre-increment, but the
;; hardware can also do cases where the increment is not the size of the
;; object.
;;
;; In all these cases, we use operands 0 and 1 for the register being
;; incremented because those are the operands that local-alloc will
;; tie and these are the pair most likely to be tieable (and the ones
;; that will benefit the most).
;;
;; We use match_operator here because we need to know whether the memory
;; object is volatile or not.
 
(define_insn "*loadqi_update"
[(set (match_operand:QI 3 "register_operand" "=r,r")
(match_operator:QI 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")]))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ldb.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*load_zeroextendqisi_update"
[(set (match_operand:SI 3 "register_operand" "=r,r")
(zero_extend:SI (match_operator:QI 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")])))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ldb.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*load_signextendqisi_update"
[(set (match_operand:SI 3 "register_operand" "=r,r")
(sign_extend:SI (match_operator:QI 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")])))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ldb.x.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*storeqi_update"
[(set (match_operator:QI 4 "store_update_operand"
[(match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "short_immediate_operand" "I")])
(match_operand:QI 3 "register_operand" "r"))
(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"stb.a%V4 %3,[%0,%2]"
[(set_attr "type" "store")
(set_attr "length" "1")])
 
(define_insn "*loadhi_update"
[(set (match_operand:HI 3 "register_operand" "=r,r")
(match_operator:HI 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")]))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ldw.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*load_zeroextendhisi_update"
[(set (match_operand:SI 3 "register_operand" "=r,r")
(zero_extend:SI (match_operator:HI 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")])))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ldw.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*load_signextendhisi_update"
[(set (match_operand:SI 3 "register_operand" "=r,r")
(sign_extend:SI (match_operator:HI 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")])))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ldw.x.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*storehi_update"
[(set (match_operator:HI 4 "store_update_operand"
[(match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "short_immediate_operand" "I")])
(match_operand:HI 3 "register_operand" "r"))
(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"stw.a%V4 %3,[%0,%2]"
[(set_attr "type" "store")
(set_attr "length" "1")])
 
(define_insn "*loadsi_update"
[(set (match_operand:SI 3 "register_operand" "=r,r")
(match_operator:SI 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")]))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ld.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*storesi_update"
[(set (match_operator:SI 4 "store_update_operand"
[(match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "short_immediate_operand" "I")])
(match_operand:SI 3 "register_operand" "r"))
(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"st.a%V4 %3,[%0,%2]"
[(set_attr "type" "store")
(set_attr "length" "1")])
 
(define_insn "*loadsf_update"
[(set (match_operand:SF 3 "register_operand" "=r,r")
(match_operator:SF 4 "load_update_operand"
[(match_operand:SI 1 "register_operand" "0,0")
(match_operand:SI 2 "nonmemory_operand" "rI,J")]))
(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"ld.a%V4 %3,[%0,%2]"
[(set_attr "type" "load,load")
(set_attr "length" "1,2")])
 
(define_insn "*storesf_update"
[(set (match_operator:SF 4 "store_update_operand"
[(match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "short_immediate_operand" "I")])
(match_operand:SF 3 "register_operand" "r"))
(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_dup 1) (match_dup 2)))]
""
"st.a%V4 %3,[%0,%2]"
[(set_attr "type" "store")
(set_attr "length" "1")])
;; Conditional move instructions.
 
(define_expand "movsicc"
[(set (match_operand:SI 0 "register_operand" "")
(if_then_else:SI (match_operand 1 "comparison_operator" "")
(match_operand:SI 2 "nonmemory_operand" "")
(match_operand:SI 3 "register_operand" "")))]
""
"
{
enum rtx_code code = GET_CODE (operands[1]);
rtx ccreg
= gen_rtx_REG (SELECT_CC_MODE (code, arc_compare_op0, arc_compare_op1),
61);
 
operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
}")
 
;(define_expand "movdicc"
; [(set (match_operand:DI 0 "register_operand" "")
; (if_then_else:DI (match_operand 1 "comparison_operator" "")
; (match_operand:DI 2 "nonmemory_operand" "")
; (match_operand:DI 3 "register_operand" "")))]
; "0 /* ??? this would work better if we had cmpdi */"
; "
;{
; enum rtx_code code = GET_CODE (operands[1]);
; rtx ccreg
; = gen_rtx_REG (SELECT_CC_MODE (code, arc_compare_op0, arc_compare_op1),
; 61);
;
; operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
;}")
 
(define_expand "movsfcc"
[(set (match_operand:SF 0 "register_operand" "")
(if_then_else:SF (match_operand 1 "comparison_operator" "")
(match_operand:SF 2 "nonmemory_operand" "")
(match_operand:SF 3 "register_operand" "")))]
""
"
{
enum rtx_code code = GET_CODE (operands[1]);
rtx ccreg
= gen_rtx_REG (SELECT_CC_MODE (code, arc_compare_op0, arc_compare_op1),
61);
 
operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
}")
 
;(define_expand "movdfcc"
; [(set (match_operand:DF 0 "register_operand" "")
; (if_then_else:DF (match_operand 1 "comparison_operator" "")
; (match_operand:DF 2 "nonmemory_operand" "")
; (match_operand:DF 3 "register_operand" "")))]
; "0 /* ??? can generate less efficient code if constants involved */"
; "
;{
; enum rtx_code code = GET_CODE (operands[1]);
; rtx ccreg
; = gen_rtx_REG (SELECT_CC_MODE (code, arc_compare_op0, arc_compare_op1),
; 61);
;
; operands[1] = gen_rtx_fmt_ee (code, VOIDmode, ccreg, const0_rtx);
;}")
 
(define_insn "*movsicc_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(if_then_else:SI (match_operand 1 "comparison_operator" "")
(match_operand:SI 2 "nonmemory_operand" "rJi")
(match_operand:SI 3 "register_operand" "0")))]
""
"mov.%d1 %0,%S2"
[(set_attr "type" "cmove")])
 
; ??? This doesn't properly handle constants.
;(define_insn "*movdicc_insn"
; [(set (match_operand:DI 0 "register_operand" "=r,r")
; (if_then_else:DI (match_operand 1 "comparison_operator" "")
; (match_operand:DI 2 "nonmemory_operand" "r,Ji")
; (match_operand:DI 3 "register_operand" "0,0")))]
; "0"
; "*
;{
; switch (which_alternative)
; {
; case 0 :
; /* We normally copy the low-numbered register first. However, if
; the first register operand 0 is the same as the second register of
; operand 1, we must copy in the opposite order. */
; if (REGNO (operands[0]) == REGNO (operands[2]) + 1)
; return \"mov.%d1 %R0,%R2\;mov.%d1 %0,%2\";
; else
; return \"mov.%d1 %0,%2\;mov.%d1 %R0,%R2\";
; case 1 :
; return \"mov.%d1 %0,%2\;mov.%d1 %R0,%R2\";
; }
;}"
; [(set_attr "type" "cmove,cmove")
; (set_attr "length" "2,4")])
 
(define_insn "*movsfcc_insn"
[(set (match_operand:SF 0 "register_operand" "=r,r")
(if_then_else:SF (match_operand 1 "comparison_operator" "")
(match_operand:SF 2 "nonmemory_operand" "r,E")
(match_operand:SF 3 "register_operand" "0,0")))]
""
"@
mov.%d1 %0,%2
mov.%d1 %0,%2 ; %A2"
[(set_attr "type" "cmove,cmove")])
 
;(define_insn "*movdfcc_insn"
; [(set (match_operand:DF 0 "register_operand" "=r,r")
; (if_then_else:DF (match_operand 1 "comparison_operator" "")
; (match_operand:DF 2 "nonmemory_operand" "r,E")
; (match_operand:DF 3 "register_operand" "0,0")))]
; "0"
; "*
;{
; switch (which_alternative)
; {
; case 0 :
; /* We normally copy the low-numbered register first. However, if
; the first register operand 0 is the same as the second register of
; operand 1, we must copy in the opposite order. */
; if (REGNO (operands[0]) == REGNO (operands[2]) + 1)
; return \"mov.%d1 %R0,%R2\;mov.%d1 %0,%2\";
; else
; return \"mov.%d1 %0,%2\;mov.%d1 %R0,%R2\";
; case 1 :
; return \"mov.%d1 %0,%L2\;mov.%d1 %R0,%H2 ; %A2\";
; }
;}"
; [(set_attr "type" "cmove,cmove")
; (set_attr "length" "2,4")])
;; Zero extension instructions.
;; ??? We don't support volatile memrefs here, but I'm not sure why.
 
(define_insn "zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=r,r")
(zero_extend:HI (match_operand:QI 1 "nonvol_nonimm_operand" "r,m")))]
""
"@
extb%? %0,%1
ldb%U1 %0,%1"
[(set_attr "type" "unary,load")])
 
(define_insn "*zero_extendqihi2_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:HI 0 "register_operand" "=r")
(zero_extend:HI (match_dup 1)))]
""
"extb%?.f %0,%1"
[(set_attr "type" "unary")
(set_attr "cond" "set_zn")])
 
(define_insn "zero_extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(zero_extend:SI (match_operand:QI 1 "nonvol_nonimm_operand" "r,m")))]
""
"@
extb%? %0,%1
ldb%U1 %0,%1"
[(set_attr "type" "unary,load")])
 
(define_insn "*zero_extendqisi2_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI (match_dup 1)))]
""
"extb%?.f %0,%1"
[(set_attr "type" "unary")
(set_attr "cond" "set_zn")])
 
(define_insn "zero_extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(zero_extend:SI (match_operand:HI 1 "nonvol_nonimm_operand" "r,m")))]
""
"@
extw%? %0,%1
ldw%U1 %0,%1"
[(set_attr "type" "unary,load")])
 
(define_insn "*zero_extendhisi2_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(zero_extend:SI (match_operand:HI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI (match_dup 1)))]
""
"extw%?.f %0,%1"
[(set_attr "type" "unary")
(set_attr "cond" "set_zn")])
;; Sign extension instructions.
 
(define_insn "extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=r,r")
(sign_extend:HI (match_operand:QI 1 "nonvol_nonimm_operand" "r,m")))]
""
"@
sexb%? %0,%1
ldb.x%U1 %0,%1"
[(set_attr "type" "unary,load")])
 
(define_insn "*extendqihi2_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(sign_extend:SI (match_operand:QI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:HI 0 "register_operand" "=r")
(sign_extend:HI (match_dup 1)))]
""
"sexb%?.f %0,%1"
[(set_attr "type" "unary")
(set_attr "cond" "set_zn")])
 
(define_insn "extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(sign_extend:SI (match_operand:QI 1 "nonvol_nonimm_operand" "r,m")))]
""
"@
sexb%? %0,%1
ldb.x%U1 %0,%1"
[(set_attr "type" "unary,load")])
 
(define_insn "*extendqisi2_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(sign_extend:SI (match_operand:QI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(sign_extend:SI (match_dup 1)))]
""
"sexb%?.f %0,%1"
[(set_attr "type" "unary")
(set_attr "cond" "set_zn")])
 
(define_insn "extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(sign_extend:SI (match_operand:HI 1 "nonvol_nonimm_operand" "r,m")))]
""
"@
sexw%? %0,%1
ldw.x%U1 %0,%1"
[(set_attr "type" "unary,load")])
 
(define_insn "*extendhisi2_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(sign_extend:SI (match_operand:HI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(sign_extend:SI (match_dup 1)))]
""
"sexw%?.f %0,%1"
[(set_attr "type" "unary")
(set_attr "cond" "set_zn")])
;; Arithmetic instructions.
 
(define_insn "addsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ")))]
""
"add%? %0,%1,%2")
 
(define_insn "*addsi3_set_cc_insn"
[(set (reg:CC 61) (compare:CC
(plus:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(plus:SI (match_dup 1)
(match_dup 2)))]
""
"add%?.f %0,%1,%2"
[(set_attr "cond" "set")])
 
(define_insn "adddi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(plus:DI (match_operand:DI 1 "nonmemory_operand" "%r")
(match_operand:DI 2 "nonmemory_operand" "ri")))
(clobber (reg:CC 61))]
""
"*
{
rtx op2 = operands[2];
 
if (GET_CODE (op2) == CONST_INT)
{
int sign = INTVAL (op2);
if (sign < 0)
return \"add.f %L0,%L1,%2\;adc %H0,%H1,-1\";
else
return \"add.f %L0,%L1,%2\;adc %H0,%H1,0\";
}
else
return \"add.f %L0,%L1,%L2\;adc %H0,%H1,%H2\";
}"
[(set_attr "length" "2")])
 
(define_insn "subsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "nonmemory_operand" "rIJ")))]
""
"sub%? %0,%1,%2")
 
(define_insn "*subsi3_set_cc_insn"
[(set (reg:CC 61) (compare:CC
(minus:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(minus:SI (match_dup 1)
(match_dup 2)))]
""
"sub%?.f %0,%1,%2"
[(set_attr "cond" "set")])
 
(define_insn "subdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_operand:DI 1 "nonmemory_operand" "r")
(match_operand:DI 2 "nonmemory_operand" "ri")))
(clobber (reg:CC 61))]
""
"*
{
rtx op2 = operands[2];
 
if (GET_CODE (op2) == CONST_INT)
{
int sign = INTVAL (op2);
if (sign < 0)
return \"sub.f %L0,%L1,%2\;sbc %H0,%H1,-1\";
else
return \"sub.f %L0,%L1,%2\;sbc %H0,%H1,0\";
}
else
return \"sub.f %L0,%L1,%L2\;sbc %H0,%H1,%H2\";
}"
[(set_attr "length" "2")])
;; Boolean instructions.
;;
;; We don't define the DImode versions as expand_binop does a good enough job.
 
(define_insn "andsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(and:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ")))]
""
"and%? %0,%1,%2")
 
(define_insn "*andsi3_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(and:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(and:SI (match_dup 1)
(match_dup 2)))]
""
"and%?.f %0,%1,%2"
[(set_attr "cond" "set_zn")])
 
(define_insn "*bicsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
(and:SI (match_operand:SI 1 "nonmemory_operand" "r,r,I,J")
(not:SI (match_operand:SI 2 "nonmemory_operand" "rI,J,r,r"))))]
""
"bic%? %0,%1,%2"
[(set_attr "length" "1,2,1,2")])
 
(define_insn "*bicsi3_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(and:SI (match_operand:SI 1 "register_operand" "%r")
(not:SI (match_operand:SI 2 "nonmemory_operand" "rIJ")))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(and:SI (match_dup 1)
(not:SI (match_dup 2))))]
""
"bic%?.f %0,%1,%2"
[(set_attr "cond" "set_zn")])
 
(define_insn "iorsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(ior:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ")))]
""
"or%? %0,%1,%2")
 
(define_insn "*iorsi3_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(ior:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(ior:SI (match_dup 1)
(match_dup 2)))]
""
"or%?.f %0,%1,%2"
[(set_attr "cond" "set_zn")])
 
(define_insn "xorsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
(xor:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ")))]
""
"xor%? %0,%1,%2")
 
(define_insn "*xorsi3_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(xor:SI (match_operand:SI 1 "register_operand" "%r")
(match_operand:SI 2 "nonmemory_operand" "rIJ"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(xor:SI (match_dup 1)
(match_dup 2)))]
""
"xor%?.f %0,%1,%2"
[(set_attr "cond" "set_zn")])
 
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" "r")))]
""
"sub%? %0,0,%1"
[(set_attr "type" "unary")])
 
(define_insn "*negsi2_set_cc_insn"
[(set (reg:CC 61) (compare:CC
(neg:SI (match_operand:SI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_dup 1)))]
""
"sub%?.f %0,0,%1"
[(set_attr "type" "unary")
(set_attr "cond" "set")])
 
(define_insn "negdi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "register_operand" "r")))
(clobber (reg:SI 61))]
""
"sub.f %L0,0,%L1\;sbc %H0,0,%H1"
[(set_attr "type" "unary")
(set_attr "length" "2")])
 
(define_insn "one_cmplsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_operand:SI 1 "register_operand" "r")))]
""
"xor%? %0,%1,-1"
[(set_attr "type" "unary")])
 
(define_insn "*one_cmplsi2_set_cc_insn"
[(set (reg:CCZN 61) (compare:CCZN
(not:SI (match_operand:SI 1 "register_operand" "r"))
(const_int 0)))
(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_dup 1)))]
""
"xor%?.f %0,%1,-1"
[(set_attr "type" "unary")
(set_attr "cond" "set_zn")])
;; Shift instructions.
 
(define_expand "ashlsi3"
[(set (match_operand:SI 0 "register_operand" "")
(ashift:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")))]
""
"
{
if (! TARGET_SHIFTER)
{
emit_insn (gen_rtx_PARALLEL
(VOIDmode,
gen_rtvec (2,
gen_rtx_SET (VOIDmode, operands[0],
gen_rtx_ASHIFT (SImode, operands[1],
operands[2])),
gen_rtx_CLOBBER (VOIDmode,
gen_rtx_SCRATCH (SImode)))));
DONE;
}
}")
 
(define_expand "ashrsi3"
[(set (match_operand:SI 0 "register_operand" "")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")))]
""
"
{
if (! TARGET_SHIFTER)
{
emit_insn (gen_rtx_PARALLEL
(VOIDmode,
gen_rtvec (2,
gen_rtx_SET (VOIDmode, operands[0],
gen_rtx_ASHIFTRT (SImode,
operands[1],
operands[2])),
gen_rtx_CLOBBER (VOIDmode,
gen_rtx_SCRATCH (SImode)))));
DONE;
}
}")
 
(define_expand "lshrsi3"
[(set (match_operand:SI 0 "register_operand" "")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")))]
""
"
{
if (! TARGET_SHIFTER)
{
emit_insn (gen_rtx_PARALLEL
(VOIDmode,
gen_rtvec (2,
gen_rtx_SET (VOIDmode, operands[0],
gen_rtx_LSHIFTRT (SImode,
operands[1],
operands[2])),
gen_rtx_CLOBBER (VOIDmode,
gen_rtx_SCRATCH (SImode)))));
DONE;
}
}")
 
(define_insn "*ashlsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
(ashift:SI (match_operand:SI 1 "nonmemory_operand" "r,r,I,J")
(match_operand:SI 2 "nonmemory_operand" "rI,J,r,r")))]
"TARGET_SHIFTER"
"asl%? %0,%1,%2"
[(set_attr "type" "shift")
(set_attr "length" "1,2,1,2")])
 
(define_insn "*ashrsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
(ashiftrt:SI (match_operand:SI 1 "nonmemory_operand" "r,r,I,J")
(match_operand:SI 2 "nonmemory_operand" "rI,J,r,r")))]
"TARGET_SHIFTER"
"asr%? %0,%1,%2"
[(set_attr "type" "shift")
(set_attr "length" "1,2,1,2")])
 
(define_insn "*lshrsi3_insn"
[(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
(lshiftrt:SI (match_operand:SI 1 "nonmemory_operand" "r,r,I,J")
(match_operand:SI 2 "nonmemory_operand" "rI,J,r,r")))]
"TARGET_SHIFTER"
"lsr%? %0,%1,%2"
[(set_attr "type" "shift")
(set_attr "length" "1,2,1,2")])
 
(define_insn "*shift_si3"
[(set (match_operand:SI 0 "register_operand" "=r")
(match_operator:SI 3 "shift_operator"
[(match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "nonmemory_operand" "rIJ")]))
(clobber (match_scratch:SI 4 "=&r"))]
"! TARGET_SHIFTER"
"* return output_shift (operands);"
[(set_attr "type" "shift")
(set_attr "length" "8")])
;; Compare instructions.
;; This controls RTL generation and register allocation.
 
;; We generate RTL for comparisons and branches by having the cmpxx
;; patterns store away the operands. Then, the scc and bcc patterns
;; emit RTL for both the compare and the branch.
 
(define_expand "cmpsi"
[(set (reg:CC 61)
(compare:CC (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "nonmemory_operand" "")))]
""
"
{
arc_compare_op0 = operands[0];
arc_compare_op1 = operands[1];
DONE;
}")
 
;; ??? We may be able to relax this a bit by adding a new constant 'K' for 0.
;; This assumes sub.f 0,symbol,0 is a valid insn.
;; Note that "sub.f 0,r0,1" is an 8 byte insn. To avoid unnecessarily
;; creating 8 byte insns we duplicate %1 in the destination reg of the insn
;; if it's a small constant.
 
(define_insn "*cmpsi_cc_insn"
[(set (reg:CC 61)
(compare:CC (match_operand:SI 0 "register_operand" "r,r,r")
(match_operand:SI 1 "nonmemory_operand" "r,I,J")))]
""
"@
sub.f 0,%0,%1
sub.f %1,%0,%1
sub.f 0,%0,%1"
[(set_attr "type" "compare,compare,compare")])
 
(define_insn "*cmpsi_cczn_insn"
[(set (reg:CCZN 61)
(compare:CCZN (match_operand:SI 0 "register_operand" "r,r,r")
(match_operand:SI 1 "nonmemory_operand" "r,I,J")))]
""
"@
sub.f 0,%0,%1
sub.f %1,%0,%1
sub.f 0,%0,%1"
[(set_attr "type" "compare,compare,compare")])
 
(define_insn "*cmpsi_ccznc_insn"
[(set (reg:CCZNC 61)
(compare:CCZNC (match_operand:SI 0 "register_operand" "r,r,r")
(match_operand:SI 1 "nonmemory_operand" "r,I,J")))]
""
"@
sub.f 0,%0,%1
sub.f %1,%0,%1
sub.f 0,%0,%1"
[(set_attr "type" "compare,compare,compare")])
 
;; Next come the scc insns.
 
(define_expand "seq"
[(set (match_operand:SI 0 "register_operand" "=r")
(eq:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (EQ, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sne"
[(set (match_operand:SI 0 "register_operand" "=r")
(ne:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (NE, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sgt"
[(set (match_operand:SI 0 "register_operand" "=r")
(gt:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (GT, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sle"
[(set (match_operand:SI 0 "register_operand" "=r")
(le:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (LE, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sge"
[(set (match_operand:SI 0 "register_operand" "=r")
(ge:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (GE, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "slt"
[(set (match_operand:SI 0 "register_operand" "=r")
(lt:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (LT, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sgtu"
[(set (match_operand:SI 0 "register_operand" "=r")
(gtu:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (GTU, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sleu"
[(set (match_operand:SI 0 "register_operand" "=r")
(leu:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (LEU, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sgeu"
[(set (match_operand:SI 0 "register_operand" "=r")
(geu:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (GEU, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "sltu"
[(set (match_operand:SI 0 "register_operand" "=r")
(ltu:SI (match_dup 1) (const_int 0)))]
""
"
{
operands[1] = gen_compare_reg (LTU, arc_compare_op0, arc_compare_op1);
}")
 
(define_insn "*scc_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(match_operator:SI 1 "comparison_operator" [(reg 61) (const_int 0)]))]
""
"mov %0,1\;sub.%D1 %0,%0,%0"
[(set_attr "type" "unary")
(set_attr "length" "2")])
 
;; ??? Look up negscc insn. See pa.md for example.
(define_insn "*neg_scc_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operator:SI 1 "comparison_operator"
[(reg 61) (const_int 0)])))]
""
"mov %0,-1\;sub.%D1 %0,%0,%0"
[(set_attr "type" "unary")
(set_attr "length" "2")])
 
(define_insn "*not_scc_insn"
[(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_operator:SI 1 "comparison_operator"
[(reg 61) (const_int 0)])))]
""
"mov %0,1\;sub.%d1 %0,%0,%0"
[(set_attr "type" "unary")
(set_attr "length" "2")])
;; These control RTL generation for conditional jump insns
 
(define_expand "beq"
[(set (pc)
(if_then_else (eq (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (EQ, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "bne"
[(set (pc)
(if_then_else (ne (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (NE, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "bgt"
[(set (pc)
(if_then_else (gt (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (GT, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "ble"
[(set (pc)
(if_then_else (le (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (LE, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "bge"
[(set (pc)
(if_then_else (ge (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (GE, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "blt"
[(set (pc)
(if_then_else (lt (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (LT, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "bgtu"
[(set (pc)
(if_then_else (gtu (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (GTU, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "bleu"
[(set (pc)
(if_then_else (leu (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (LEU, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "bgeu"
[(set (pc)
(if_then_else (geu (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (GEU, arc_compare_op0, arc_compare_op1);
}")
 
(define_expand "bltu"
[(set (pc)
(if_then_else (ltu (match_dup 1) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"
{
operands[1] = gen_compare_reg (LTU, arc_compare_op0, arc_compare_op1);
}")
 
;; Now match both normal and inverted jump.
 
(define_insn "*branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "proper_comparison_operator"
[(reg 61) (const_int 0)])
(label_ref (match_operand 0 "" ""))
(pc)))]
""
"*
{
if (arc_ccfsm_branch_deleted_p ())
{
arc_ccfsm_record_branch_deleted ();
return \"; branch deleted, next insns conditionalized\";
}
else
return \"%~b%d1%# %l0\";
}"
[(set_attr "type" "branch")])
 
(define_insn "*rev_branch_insn"
[(set (pc)
(if_then_else (match_operator 1 "proper_comparison_operator"
[(reg 61) (const_int 0)])
(pc)
(label_ref (match_operand 0 "" ""))))]
"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
"*
{
if (arc_ccfsm_branch_deleted_p ())
{
arc_ccfsm_record_branch_deleted ();
return \"; branch deleted, next insns conditionalized\";
}
else
return \"%~b%D1%# %l0\";
}"
[(set_attr "type" "branch")])
;; Unconditional and other jump instructions.
 
(define_insn "jump"
[(set (pc) (label_ref (match_operand 0 "" "")))]
""
"b%* %l0"
[(set_attr "type" "uncond_branch")])
 
(define_insn "indirect_jump"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))]
""
"j%* %a0"
[(set_attr "type" "uncond_branch")])
;; Implement a switch statement.
;; This wouldn't be necessary in the non-pic case if we could distinguish
;; label refs of the jump table from other label refs. The problem is that
;; label refs are output as "%st(.LL42)" but we don't want the %st - we want
;; the real address since it's the address of the table.
 
(define_expand "casesi"
[(set (match_dup 5)
(minus:SI (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "nonmemory_operand" "")))
(set (reg:CC 61)
(compare:CC (match_dup 5)
(match_operand:SI 2 "nonmemory_operand" "")))
(set (pc)
(if_then_else (gtu (reg:CC 61)
(const_int 0))
(label_ref (match_operand 4 "" ""))
(pc)))
(parallel
[(set (pc)
(mem:SI (plus:SI (mult:SI (match_dup 5)
(const_int 4))
(label_ref (match_operand 3 "" "")))))
(clobber (match_scratch:SI 6 ""))
(clobber (match_scratch:SI 7 ""))])]
""
"
{
operands[5] = gen_reg_rtx (SImode);
}")
 
(define_insn "*casesi_insn"
[(set (pc)
(mem:SI (plus:SI (mult:SI (match_operand:SI 0 "register_operand" "r")
(const_int 4))
(label_ref (match_operand 1 "" "")))))
(clobber (match_scratch:SI 2 "=r"))
(clobber (match_scratch:SI 3 "=r"))]
""
"*
{
output_asm_insn (\"mov %2,%1\", operands);
if (TARGET_SHIFTER)
output_asm_insn (\"asl %3,%0,2\", operands);
else
output_asm_insn (\"asl %3,%0\;asl %3,%3\", operands);
output_asm_insn (\"ld %2,[%2,%3]\", operands);
output_asm_insn (\"j.nd %a2\", operands);
return \"\";
}"
[(set_attr "type" "uncond_branch")
(set_attr "length" "6")])
 
(define_insn "tablejump"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))
(use (label_ref (match_operand 1 "" "")))]
"0 /* disabled -> using casesi now */"
"j%* %a0"
[(set_attr "type" "uncond_branch")])
 
(define_expand "call"
;; operands[1] is stack_size_rtx
;; operands[2] is next_arg_register
[(parallel [(call (match_operand:SI 0 "call_operand" "")
(match_operand 1 "" ""))
(clobber (reg:SI 31))])]
""
"")
 
(define_insn "*call_via_reg"
[(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
(match_operand 1 "" ""))
(clobber (reg:SI 31))]
""
"lr blink,[status]\;j.d %0\;add blink,blink,2"
[(set_attr "type" "call_no_delay_slot")
(set_attr "length" "3")])
 
(define_insn "*call_via_label"
[(call (mem:SI (match_operand:SI 0 "call_address_operand" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 31))]
""
; The %~ is necessary in case this insn gets conditionalized and the previous
; insn is the cc setter.
"%~bl%!%* %0"
[(set_attr "type" "call")
(set_attr "cond" "canuse")])
 
(define_expand "call_value"
;; operand 2 is stack_size_rtx
;; operand 3 is next_arg_register
[(parallel [(set (match_operand 0 "register_operand" "=r")
(call (match_operand:SI 1 "call_operand" "")
(match_operand 2 "" "")))
(clobber (reg:SI 31))])]
""
"")
 
(define_insn "*call_value_via_reg"
[(set (match_operand 0 "register_operand" "=r")
(call (mem:SI (match_operand:SI 1 "register_operand" "r"))
(match_operand 2 "" "")))
(clobber (reg:SI 31))]
""
"lr blink,[status]\;j.d %1\;add blink,blink,2"
[(set_attr "type" "call_no_delay_slot")
(set_attr "length" "3")])
 
(define_insn "*call_value_via_label"
[(set (match_operand 0 "register_operand" "=r")
(call (mem:SI (match_operand:SI 1 "call_address_operand" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 31))]
""
; The %~ is necessary in case this insn gets conditionalized and the previous
; insn is the cc setter.
"%~bl%!%* %1"
[(set_attr "type" "call")
(set_attr "cond" "canuse")])
(define_insn "nop"
[(const_int 0)]
""
"nop"
[(set_attr "type" "misc")])
 
;; Special pattern to flush the icache.
;; ??? Not sure what to do here. Some ARC's are known to support this.
 
(define_insn "flush_icache"
[(unspec_volatile [(match_operand 0 "memory_operand" "m")] 0)]
""
"* return \"\";"
[(set_attr "type" "misc")])
;; Split up troublesome insns for better scheduling.
;; Peepholes go at the end.
/lib1funcs.asm
0,0 → 1,271
; libgcc routines for ARC cpu.
 
/* Copyright (C) 1995, 1997,2004 Free Software Foundation, Inc.
 
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
 
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
 
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
 
#ifdef L_mulsi3
.section .text
.align 4
 
#ifdef __base__
.cpu base
.global ___mulsi3
___mulsi3:
 
/* This the simple version.
 
while (a)
{
if (a & 1)
r += b;
a >>= 1;
b <<= 1;
}
*/
mov r2,0 ; Accumulate result here.
.Lloop:
sub.f 0,r0,0 ; while (a)
nop
beq.nd .Ldone
and.f 0,r0,1 ; if (a & 1)
add.nz r2,r2,r1 ; r += b
lsr r0,r0 ; a >>= 1
b.d .Lloop
lsl r1,r1 ; b <<= 1
.Ldone:
j.d blink
mov r0,r2
#endif
 
#endif /* L_mulsi3 */
 
#ifdef L_umulsidi3
.section .text
.align 4
 
#ifdef __base__
.cpu base
.global ___umulsidi3
___umulsidi3:
 
/* This the simple version.
 
while (a)
{
if (a & 1)
r += b;
a >>= 1;
b <<= 1;
}
*/
mov r2,0 ; Top part of b.
mov r3,0 ; Accumulate result here.
mov r4,0
.Lloop:
sub.f 0,r0,0 ; while (a)
nop
beq.nd .Ldone
and.f 0,r0,1 ; if (a & 1)
sub.f 0,r0,0
nop
beq .Ldontadd
add.f r4,r4,r1 ; r += b
adc r3,r3,r2
.Ldontadd:
lsr r0,r0 ; a >>= 1
lsl.f r1,r1 ; b <<= 1
b.d .Lloop
rlc r2,r2
.Ldone:
#ifdef __big_endian__
mov r1,r4
j.d blink
mov r0,r3
#else
mov r0,r4
j.d blink
mov r1,r3
#endif
#endif
 
#endif /* L_umulsidi3 */
 
#ifdef L_divmod_tools
 
; Utilities used by all routines.
 
.section .text
.align 4
 
; inputs: r0 = numerator, r1 = denominator
; outputs: positive r0/r1,
; r6.bit1 = sign of numerator, r6.bit0 = sign of result
 
.global ___divnorm
___divnorm:
mov r6,0 ; keep sign in r6
sub.f 0,r0,0 ; is numerator -ve?
sub.lt r0,0,r0 ; negate numerator
mov.lt r6,3 ; sign is -ve
sub.f 0,r1,0 ; is denominator -ve?
sub.lt r1,0,r1 ; negate denominator
xor.lt r6,r6,1 ; toggle sign
j.nd blink
 
/*
unsigned long
udivmodsi4(int modwanted, unsigned long num, unsigned long den)
{
unsigned long bit = 1;
unsigned long res = 0;
 
while (den < num && bit && !(den & (1L<<31)))
{
den <<=1;
bit <<=1;
}
while (bit)
{
if (num >= den)
{
num -= den;
res |= bit;
}
bit >>=1;
den >>=1;
}
if (modwanted) return num;
return res;
}
*/
 
; inputs: r0 = numerator, r1 = denominator
; outputs: r0 = quotient, r1 = remainder, r2/r3 trashed
 
.global ___udivmodsi4
___udivmodsi4:
mov r2,1 ; bit = 1
mov r3,0 ; res = 0
.Lloop1:
sub.f 0,r1,r0 ; while (den < num
nop
bnc.nd .Lloop2
sub.f 0,r2,0 ; && bit
nop
bz.nd .Lloop2
lsl.f 0,r1 ; && !(den & (1<<31))
nop
bc.nd .Lloop2
lsl r1,r1 ; den <<= 1
b.d .Lloop1
lsl r2,r2 ; bit <<= 1
.Lloop2:
sub.f 0,r2,0 ; while (bit)
nop
bz.nd .Ldivmodend
sub.f 0,r0,r1 ; if (num >= den)
nop
bc.nd .Lshiftdown
sub r0,r0,r1 ; num -= den
or r3,r3,r2 ; res |= bit
.Lshiftdown:
lsr r2,r2 ; bit >>= 1
b.d .Lloop2
lsr r1,r1 ; den >>= 1
.Ldivmodend:
mov r1,r0 ; r1 = mod
j.d blink
mov r0,r3 ; r0 = res
 
#endif
 
#ifdef L_udivsi3
.section .text
.align 4
 
#ifdef __base__
.cpu base
.global ___udivsi3
___udivsi3:
mov r7,blink
bl.nd ___udivmodsi4
j.nd r7
#endif
 
#endif /* L_udivsi3 */
 
#ifdef L_divsi3
.section .text
.align 4
 
#ifdef __base__
.cpu base
.global ___divsi3
___divsi3:
mov r7,blink
bl.nd ___divnorm
bl.nd ___udivmodsi4
and.f 0,r6,1
sub.nz r0,0,r0 ; cannot go in delay slot, has limm value
j.nd r7
#endif
 
#endif /* L_divsi3 */
 
#ifdef L_umodsi3
.section .text
.align 4
 
#ifdef __base__
.cpu base
.global ___umodsi3
___umodsi3:
mov r7,blink
bl.nd ___udivmodsi4
j.d r7
mov r0,r1
#endif
 
#endif /* L_umodsi3 */
 
#ifdef L_modsi3
.section .text
.align 4
 
#ifdef __base__
.cpu base
.global ___modsi3
___modsi3:
mov r7,blink
bl.nd ___divnorm
bl.nd ___udivmodsi4
and.f 0,r6,2
sub.nz r1,0,r1
j.d r7
mov r0,r1
#endif
 
#endif /* L_modsi3 */
/arc.c
0,0 → 1,2351
/* Subroutines used for code generation on the Argonaut ARC cpu.
Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
2005, 2007
Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* ??? This is an old port, and is undoubtedly suffering from bit rot. */
 
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
#include "insn-config.h"
#include "conditions.h"
#include "output.h"
#include "insn-attr.h"
#include "flags.h"
#include "function.h"
#include "expr.h"
#include "recog.h"
#include "toplev.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
 
/* Which cpu we're compiling for. */
int arc_cpu_type;
 
/* Name of mangle string to add to symbols to separate code compiled for each
cpu (or NULL). */
const char *arc_mangle_cpu;
 
/* Save the operands last given to a compare for use when we
generate a scc or bcc insn. */
rtx arc_compare_op0, arc_compare_op1;
 
/* Name of text, data, and rodata sections used in varasm.c. */
const char *arc_text_section;
const char *arc_data_section;
const char *arc_rodata_section;
 
/* Array of valid operand punctuation characters. */
char arc_punct_chars[256];
 
/* Variables used by arc_final_prescan_insn to implement conditional
execution. */
static int arc_ccfsm_state;
static int arc_ccfsm_current_cc;
static rtx arc_ccfsm_target_insn;
static int arc_ccfsm_target_label;
 
/* The maximum number of insns skipped which will be conditionalised if
possible. */
#define MAX_INSNS_SKIPPED 3
 
/* A nop is needed between a 4 byte insn that sets the condition codes and
a branch that uses them (the same isn't true for an 8 byte insn that sets
the condition codes). Set by arc_final_prescan_insn. Used by
arc_print_operand. */
static int last_insn_set_cc_p;
static int current_insn_set_cc_p;
static bool arc_handle_option (size_t, const char *, int);
static void record_cc_ref (rtx);
static void arc_init_reg_tables (void);
static int get_arc_condition_code (rtx);
const struct attribute_spec arc_attribute_table[];
static tree arc_handle_interrupt_attribute (tree *, tree, tree, int, bool *);
static bool arc_assemble_integer (rtx, unsigned int, int);
static void arc_output_function_prologue (FILE *, HOST_WIDE_INT);
static void arc_output_function_epilogue (FILE *, HOST_WIDE_INT);
static void arc_file_start (void);
static void arc_internal_label (FILE *, const char *, unsigned long);
static void arc_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
tree, int *, int);
static bool arc_rtx_costs (rtx, int, int, int *);
static int arc_address_cost (rtx);
static void arc_external_libcall (rtx);
static bool arc_return_in_memory (tree, tree);
static bool arc_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
/* Initialize the GCC target structure. */
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
#undef TARGET_ASM_ALIGNED_SI_OP
#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
#undef TARGET_ASM_INTEGER
#define TARGET_ASM_INTEGER arc_assemble_integer
 
#undef TARGET_ASM_FUNCTION_PROLOGUE
#define TARGET_ASM_FUNCTION_PROLOGUE arc_output_function_prologue
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE arc_output_function_epilogue
#undef TARGET_ASM_FILE_START
#define TARGET_ASM_FILE_START arc_file_start
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE arc_attribute_table
#undef TARGET_ASM_INTERNAL_LABEL
#define TARGET_ASM_INTERNAL_LABEL arc_internal_label
#undef TARGET_ASM_EXTERNAL_LIBCALL
#define TARGET_ASM_EXTERNAL_LIBCALL arc_external_libcall
 
#undef TARGET_HANDLE_OPTION
#define TARGET_HANDLE_OPTION arc_handle_option
 
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS arc_rtx_costs
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST arc_address_cost
 
#undef TARGET_PROMOTE_FUNCTION_ARGS
#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
#undef TARGET_PROMOTE_FUNCTION_RETURN
#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
 
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY arc_return_in_memory
#undef TARGET_PASS_BY_REFERENCE
#define TARGET_PASS_BY_REFERENCE arc_pass_by_reference
#undef TARGET_CALLEE_COPIES
#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
 
#undef TARGET_SETUP_INCOMING_VARARGS
#define TARGET_SETUP_INCOMING_VARARGS arc_setup_incoming_varargs
 
struct gcc_target targetm = TARGET_INITIALIZER;
/* Implement TARGET_HANDLE_OPTION. */
 
static bool
arc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
{
switch (code)
{
case OPT_mcpu_:
return strcmp (arg, "base") == 0 || ARC_EXTENSION_CPU (arg);
 
default:
return true;
}
}
 
/* Called by OVERRIDE_OPTIONS to initialize various things. */
 
void
arc_init (void)
{
char *tmp;
/* Set the pseudo-ops for the various standard sections. */
arc_text_section = tmp = xmalloc (strlen (arc_text_string) + sizeof (ARC_SECTION_FORMAT) + 1);
sprintf (tmp, ARC_SECTION_FORMAT, arc_text_string);
arc_data_section = tmp = xmalloc (strlen (arc_data_string) + sizeof (ARC_SECTION_FORMAT) + 1);
sprintf (tmp, ARC_SECTION_FORMAT, arc_data_string);
arc_rodata_section = tmp = xmalloc (strlen (arc_rodata_string) + sizeof (ARC_SECTION_FORMAT) + 1);
sprintf (tmp, ARC_SECTION_FORMAT, arc_rodata_string);
 
arc_init_reg_tables ();
 
/* Initialize array for PRINT_OPERAND_PUNCT_VALID_P. */
memset (arc_punct_chars, 0, sizeof (arc_punct_chars));
arc_punct_chars['#'] = 1;
arc_punct_chars['*'] = 1;
arc_punct_chars['?'] = 1;
arc_punct_chars['!'] = 1;
arc_punct_chars['~'] = 1;
}
/* The condition codes of the ARC, and the inverse function. */
static const char *const arc_condition_codes[] =
{
"al", 0, "eq", "ne", "p", "n", "c", "nc", "v", "nv",
"gt", "le", "ge", "lt", "hi", "ls", "pnz", 0
};
 
#define ARC_INVERSE_CONDITION_CODE(X) ((X) ^ 1)
 
/* Returns the index of the ARC condition code string in
`arc_condition_codes'. COMPARISON should be an rtx like
`(eq (...) (...))'. */
 
static int
get_arc_condition_code (rtx comparison)
{
switch (GET_CODE (comparison))
{
case EQ : return 2;
case NE : return 3;
case GT : return 10;
case LE : return 11;
case GE : return 12;
case LT : return 13;
case GTU : return 14;
case LEU : return 15;
case LTU : return 6;
case GEU : return 7;
default : gcc_unreachable ();
}
/*NOTREACHED*/
return (42);
}
 
/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
return the mode to be used for the comparison. */
 
enum machine_mode
arc_select_cc_mode (enum rtx_code op,
rtx x ATTRIBUTE_UNUSED,
rtx y ATTRIBUTE_UNUSED)
{
switch (op)
{
case EQ :
case NE :
return CCZNmode;
default :
switch (GET_CODE (x))
{
case AND :
case IOR :
case XOR :
case SIGN_EXTEND :
case ZERO_EXTEND :
return CCZNmode;
case ASHIFT :
case ASHIFTRT :
case LSHIFTRT :
return CCZNCmode;
default:
break;
}
}
return CCmode;
}
/* Vectors to keep interesting information about registers where it can easily
be got. We use to use the actual mode value as the bit number, but there
is (or may be) more than 32 modes now. Instead we use two tables: one
indexed by hard register number, and one indexed by mode. */
 
/* The purpose of arc_mode_class is to shrink the range of modes so that
they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
mapped into one arc_mode_class mode. */
 
enum arc_mode_class {
C_MODE,
S_MODE, D_MODE, T_MODE, O_MODE,
SF_MODE, DF_MODE, TF_MODE, OF_MODE
};
 
/* Modes for condition codes. */
#define C_MODES (1 << (int) C_MODE)
 
/* Modes for single-word and smaller quantities. */
#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
 
/* Modes for double-word and smaller quantities. */
#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
 
/* Modes for quad-word and smaller quantities. */
#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
 
/* Value is 1 if register/mode pair is acceptable on arc. */
 
const unsigned int arc_hard_regno_mode_ok[] = {
T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, D_MODES,
D_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
 
/* ??? Leave these as S_MODES for now. */
S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, S_MODES,
S_MODES, S_MODES, S_MODES, S_MODES, S_MODES, C_MODES
};
 
unsigned int arc_mode_class [NUM_MACHINE_MODES];
 
enum reg_class arc_regno_reg_class[FIRST_PSEUDO_REGISTER];
 
static void
arc_init_reg_tables (void)
{
int i;
 
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
switch (GET_MODE_CLASS (i))
{
case MODE_INT:
case MODE_PARTIAL_INT:
case MODE_COMPLEX_INT:
if (GET_MODE_SIZE (i) <= 4)
arc_mode_class[i] = 1 << (int) S_MODE;
else if (GET_MODE_SIZE (i) == 8)
arc_mode_class[i] = 1 << (int) D_MODE;
else if (GET_MODE_SIZE (i) == 16)
arc_mode_class[i] = 1 << (int) T_MODE;
else if (GET_MODE_SIZE (i) == 32)
arc_mode_class[i] = 1 << (int) O_MODE;
else
arc_mode_class[i] = 0;
break;
case MODE_FLOAT:
case MODE_COMPLEX_FLOAT:
if (GET_MODE_SIZE (i) <= 4)
arc_mode_class[i] = 1 << (int) SF_MODE;
else if (GET_MODE_SIZE (i) == 8)
arc_mode_class[i] = 1 << (int) DF_MODE;
else if (GET_MODE_SIZE (i) == 16)
arc_mode_class[i] = 1 << (int) TF_MODE;
else if (GET_MODE_SIZE (i) == 32)
arc_mode_class[i] = 1 << (int) OF_MODE;
else
arc_mode_class[i] = 0;
break;
case MODE_CC:
arc_mode_class[i] = 1 << (int) C_MODE;
break;
default:
arc_mode_class[i] = 0;
break;
}
}
 
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
if (i < 60)
arc_regno_reg_class[i] = GENERAL_REGS;
else if (i == 60)
arc_regno_reg_class[i] = LPCOUNT_REG;
else if (i == 61)
arc_regno_reg_class[i] = NO_REGS /* CC_REG: must be NO_REGS */;
else
arc_regno_reg_class[i] = NO_REGS;
}
}
/* ARC specific attribute support.
 
The ARC has these attributes:
interrupt - for interrupt functions
*/
 
const struct attribute_spec arc_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
{ "interrupt", 1, 1, true, false, false, arc_handle_interrupt_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
 
/* Handle an "interrupt" attribute; arguments as in
struct attribute_spec.handler. */
static tree
arc_handle_interrupt_attribute (tree *node ATTRIBUTE_UNUSED,
tree name,
tree args,
int flags ATTRIBUTE_UNUSED,
bool *no_add_attrs)
{
tree value = TREE_VALUE (args);
 
if (TREE_CODE (value) != STRING_CST)
{
warning (OPT_Wattributes,
"argument of %qs attribute is not a string constant",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
else if (strcmp (TREE_STRING_POINTER (value), "ilink1")
&& strcmp (TREE_STRING_POINTER (value), "ilink2"))
{
warning (OPT_Wattributes,
"argument of %qs attribute is not \"ilink1\" or \"ilink2\"",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
 
return NULL_TREE;
}
 
/* Acceptable arguments to the call insn. */
 
int
call_address_operand (rtx op, enum machine_mode mode)
{
return (symbolic_operand (op, mode)
|| (GET_CODE (op) == CONST_INT && LEGITIMATE_CONSTANT_P (op))
|| (GET_CODE (op) == REG));
}
 
int
call_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) != MEM)
return 0;
op = XEXP (op, 0);
return call_address_operand (op, mode);
}
 
/* Returns 1 if OP is a symbol reference. */
 
int
symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
switch (GET_CODE (op))
{
case SYMBOL_REF:
case LABEL_REF:
case CONST :
return 1;
default:
return 0;
}
}
 
/* Return truth value of statement that OP is a symbolic memory
operand of mode MODE. */
 
int
symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (GET_CODE (op) == SUBREG)
op = SUBREG_REG (op);
if (GET_CODE (op) != MEM)
return 0;
op = XEXP (op, 0);
return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
|| GET_CODE (op) == LABEL_REF);
}
 
/* Return true if OP is a short immediate (shimm) value. */
 
int
short_immediate_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (GET_CODE (op) != CONST_INT)
return 0;
return SMALL_INT (INTVAL (op));
}
 
/* Return true if OP will require a long immediate (limm) value.
This is currently only used when calculating length attributes. */
 
int
long_immediate_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
switch (GET_CODE (op))
{
case SYMBOL_REF :
case LABEL_REF :
case CONST :
return 1;
case CONST_INT :
return !SMALL_INT (INTVAL (op));
case CONST_DOUBLE :
/* These can happen because large unsigned 32 bit constants are
represented this way (the multiplication patterns can cause these
to be generated). They also occur for SFmode values. */
return 1;
default:
break;
}
return 0;
}
 
/* Return true if OP is a MEM that when used as a load or store address will
require an 8 byte insn.
Load and store instructions don't allow the same possibilities but they're
similar enough that this one function will do.
This is currently only used when calculating length attributes. */
 
int
long_immediate_loadstore_operand (rtx op,
enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (GET_CODE (op) != MEM)
return 0;
 
op = XEXP (op, 0);
switch (GET_CODE (op))
{
case SYMBOL_REF :
case LABEL_REF :
case CONST :
return 1;
case CONST_INT :
/* This must be handled as "st c,[limm]". Ditto for load.
Technically, the assembler could translate some possibilities to
"st c,[limm/2 + limm/2]" if limm/2 will fit in a shimm, but we don't
assume that it does. */
return 1;
case CONST_DOUBLE :
/* These can happen because large unsigned 32 bit constants are
represented this way (the multiplication patterns can cause these
to be generated). They also occur for SFmode values. */
return 1;
case REG :
return 0;
case PLUS :
if (GET_CODE (XEXP (op, 1)) == CONST_INT
&& !SMALL_INT (INTVAL (XEXP (op, 1))))
return 1;
return 0;
default:
break;
}
return 0;
}
 
/* Return true if OP is an acceptable argument for a single word
move source. */
 
int
move_src_operand (rtx op, enum machine_mode mode)
{
switch (GET_CODE (op))
{
case SYMBOL_REF :
case LABEL_REF :
case CONST :
return 1;
case CONST_INT :
return (LARGE_INT (INTVAL (op)));
case CONST_DOUBLE :
/* We can handle DImode integer constants in SImode if the value
(signed or unsigned) will fit in 32 bits. This is needed because
large unsigned 32 bit constants are represented as CONST_DOUBLEs. */
if (mode == SImode)
return arc_double_limm_p (op);
/* We can handle 32 bit floating point constants. */
if (mode == SFmode)
return GET_MODE (op) == SFmode;
return 0;
case REG :
return register_operand (op, mode);
case SUBREG :
/* (subreg (mem ...) ...) can occur here if the inner part was once a
pseudo-reg and is now a stack slot. */
if (GET_CODE (SUBREG_REG (op)) == MEM)
return address_operand (XEXP (SUBREG_REG (op), 0), mode);
else
return register_operand (op, mode);
case MEM :
return address_operand (XEXP (op, 0), mode);
default :
return 0;
}
}
 
/* Return true if OP is an acceptable argument for a double word
move source. */
 
int
move_double_src_operand (rtx op, enum machine_mode mode)
{
switch (GET_CODE (op))
{
case REG :
return register_operand (op, mode);
case SUBREG :
/* (subreg (mem ...) ...) can occur here if the inner part was once a
pseudo-reg and is now a stack slot. */
if (GET_CODE (SUBREG_REG (op)) == MEM)
return move_double_src_operand (SUBREG_REG (op), mode);
else
return register_operand (op, mode);
case MEM :
/* Disallow auto inc/dec for now. */
if (GET_CODE (XEXP (op, 0)) == PRE_DEC
|| GET_CODE (XEXP (op, 0)) == PRE_INC)
return 0;
return address_operand (XEXP (op, 0), mode);
case CONST_INT :
case CONST_DOUBLE :
return 1;
default :
return 0;
}
}
 
/* Return true if OP is an acceptable argument for a move destination. */
 
int
move_dest_operand (rtx op, enum machine_mode mode)
{
switch (GET_CODE (op))
{
case REG :
return register_operand (op, mode);
case SUBREG :
/* (subreg (mem ...) ...) can occur here if the inner part was once a
pseudo-reg and is now a stack slot. */
if (GET_CODE (SUBREG_REG (op)) == MEM)
return address_operand (XEXP (SUBREG_REG (op), 0), mode);
else
return register_operand (op, mode);
case MEM :
return address_operand (XEXP (op, 0), mode);
default :
return 0;
}
}
 
/* Return true if OP is valid load with update operand. */
 
int
load_update_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) != MEM
|| GET_MODE (op) != mode)
return 0;
op = XEXP (op, 0);
if (GET_CODE (op) != PLUS
|| GET_MODE (op) != Pmode
|| !register_operand (XEXP (op, 0), Pmode)
|| !nonmemory_operand (XEXP (op, 1), Pmode))
return 0;
return 1;
}
 
/* Return true if OP is valid store with update operand. */
 
int
store_update_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) != MEM
|| GET_MODE (op) != mode)
return 0;
op = XEXP (op, 0);
if (GET_CODE (op) != PLUS
|| GET_MODE (op) != Pmode
|| !register_operand (XEXP (op, 0), Pmode)
|| !(GET_CODE (XEXP (op, 1)) == CONST_INT
&& SMALL_INT (INTVAL (XEXP (op, 1)))))
return 0;
return 1;
}
 
/* Return true if OP is a non-volatile non-immediate operand.
Volatile memory refs require a special "cache-bypass" instruction
and only the standard movXX patterns are set up to handle them. */
 
int
nonvol_nonimm_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) == MEM && MEM_VOLATILE_P (op))
return 0;
return nonimmediate_operand (op, mode);
}
 
/* Accept integer operands in the range -0x80000000..0x7fffffff. We have
to check the range carefully since this predicate is used in DImode
contexts. */
 
int
const_sint32_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
/* All allowed constants will fit a CONST_INT. */
return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) >= (-0x7fffffff - 1) && INTVAL (op) <= 0x7fffffff));
}
 
/* Accept integer operands in the range 0..0xffffffff. We have to check the
range carefully since this predicate is used in DImode contexts. Also, we
need some extra crud to make it work when hosted on 64-bit machines. */
 
int
const_uint32_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
#if HOST_BITS_PER_WIDE_INT > 32
/* All allowed constants will fit a CONST_INT. */
return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) >= 0 && INTVAL (op) <= 0xffffffffL));
#else
return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0)
|| (GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_HIGH (op) == 0));
#endif
}
 
/* Return 1 if OP is a comparison operator valid for the mode of CC.
This allows the use of MATCH_OPERATOR to recognize all the branch insns.
 
Some insns only set a few bits in the condition code. So only allow those
comparisons that use the bits that are valid. */
 
int
proper_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
enum rtx_code code;
if (!COMPARISON_P (op))
return 0;
 
code = GET_CODE (op);
if (GET_MODE (XEXP (op, 0)) == CCZNmode)
return (code == EQ || code == NE);
if (GET_MODE (XEXP (op, 0)) == CCZNCmode)
return (code == EQ || code == NE
|| code == LTU || code == GEU || code == GTU || code == LEU);
return 1;
}
/* Misc. utilities. */
 
/* X and Y are two things to compare using CODE. Emit the compare insn and
return the rtx for the cc reg in the proper mode. */
 
rtx
gen_compare_reg (enum rtx_code code, rtx x, rtx y)
{
enum machine_mode mode = SELECT_CC_MODE (code, x, y);
rtx cc_reg;
 
cc_reg = gen_rtx_REG (mode, 61);
 
emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
gen_rtx_COMPARE (mode, x, y)));
 
return cc_reg;
}
 
/* Return 1 if VALUE, a const_double, will fit in a limm (4 byte number).
We assume the value can be either signed or unsigned. */
 
int
arc_double_limm_p (rtx value)
{
HOST_WIDE_INT low, high;
 
gcc_assert (GET_CODE (value) == CONST_DOUBLE);
 
low = CONST_DOUBLE_LOW (value);
high = CONST_DOUBLE_HIGH (value);
 
if (low & 0x80000000)
{
return (((unsigned HOST_WIDE_INT) low <= 0xffffffff && high == 0)
|| (((low & - (unsigned HOST_WIDE_INT) 0x80000000)
== - (unsigned HOST_WIDE_INT) 0x80000000)
&& high == -1));
}
else
{
return (unsigned HOST_WIDE_INT) low <= 0x7fffffff && high == 0;
}
}
/* Do any needed setup for a variadic function. For the ARC, we must
create a register parameter block, and then copy any anonymous arguments
in registers to memory.
 
CUM has not been updated for the last named argument which has type TYPE
and mode MODE, and we rely on this fact.
 
We do things a little weird here. We're supposed to only allocate space
for the anonymous arguments. However we need to keep the stack eight byte
aligned. So we round the space up if necessary, and leave it to va_start
to compensate. */
 
static void
arc_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
enum machine_mode mode,
tree type ATTRIBUTE_UNUSED,
int *pretend_size,
int no_rtl)
{
int first_anon_arg;
 
/* All BLKmode values are passed by reference. */
gcc_assert (mode != BLKmode);
 
first_anon_arg = *cum + ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1)
/ UNITS_PER_WORD);
 
if (first_anon_arg < MAX_ARC_PARM_REGS && !no_rtl)
{
/* Note that first_reg_offset < MAX_ARC_PARM_REGS. */
int first_reg_offset = first_anon_arg;
/* Size in words to "pretend" allocate. */
int size = MAX_ARC_PARM_REGS - first_reg_offset;
/* Extra slop to keep stack eight byte aligned. */
int align_slop = size & 1;
rtx regblock;
 
regblock = gen_rtx_MEM (BLKmode,
plus_constant (arg_pointer_rtx,
FIRST_PARM_OFFSET (0)
+ align_slop * UNITS_PER_WORD));
set_mem_alias_set (regblock, get_varargs_alias_set ());
set_mem_align (regblock, BITS_PER_WORD);
move_block_from_reg (first_reg_offset, regblock,
MAX_ARC_PARM_REGS - first_reg_offset);
 
*pretend_size = ((MAX_ARC_PARM_REGS - first_reg_offset + align_slop)
* UNITS_PER_WORD);
}
}
/* Cost functions. */
 
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
scanned. In either case, *TOTAL contains the cost result. */
 
static bool
arc_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED, int *total)
{
switch (code)
{
/* Small integers are as cheap as registers. 4 byte values can
be fetched as immediate constants - let's give that the cost
of an extra insn. */
case CONST_INT:
if (SMALL_INT (INTVAL (x)))
{
*total = 0;
return true;
}
/* FALLTHRU */
 
case CONST:
case LABEL_REF:
case SYMBOL_REF:
*total = COSTS_N_INSNS (1);
return true;
 
case CONST_DOUBLE:
{
rtx high, low;
split_double (x, &high, &low);
*total = COSTS_N_INSNS (!SMALL_INT (INTVAL (high))
+ !SMALL_INT (INTVAL (low)));
return true;
}
 
/* Encourage synth_mult to find a synthetic multiply when reasonable.
If we need more than 12 insns to do a multiply, then go out-of-line,
since the call overhead will be < 10% of the cost of the multiply. */
case ASHIFT:
case ASHIFTRT:
case LSHIFTRT:
if (TARGET_SHIFTER)
*total = COSTS_N_INSNS (1);
else if (GET_CODE (XEXP (x, 1)) != CONST_INT)
*total = COSTS_N_INSNS (16);
else
*total = COSTS_N_INSNS (INTVAL (XEXP ((x), 1)));
return false;
 
default:
return false;
}
}
 
 
/* Provide the costs of an addressing mode that contains ADDR.
If ADDR is not a valid address, its cost is irrelevant. */
 
static int
arc_address_cost (rtx addr)
{
switch (GET_CODE (addr))
{
case REG :
return 1;
 
case LABEL_REF :
case SYMBOL_REF :
case CONST :
return 2;
 
case PLUS :
{
register rtx plus0 = XEXP (addr, 0);
register rtx plus1 = XEXP (addr, 1);
 
if (GET_CODE (plus0) != REG)
break;
 
switch (GET_CODE (plus1))
{
case CONST_INT :
return SMALL_INT (plus1) ? 1 : 2;
case CONST :
case SYMBOL_REF :
case LABEL_REF :
return 2;
default:
break;
}
break;
}
default:
break;
}
 
return 4;
}
/* Function prologue/epilogue handlers. */
 
/* ARC stack frames look like:
 
Before call After call
+-----------------------+ +-----------------------+
| | | |
high | local variables, | | local variables, |
mem | reg save area, etc. | | reg save area, etc. |
| | | |
+-----------------------+ +-----------------------+
| | | |
| arguments on stack. | | arguments on stack. |
| | | |
SP+16->+-----------------------+FP+48->+-----------------------+
| 4 word save area for | | reg parm save area, |
| return addr, prev %fp | | only created for |
SP+0->+-----------------------+ | variable argument |
| functions |
FP+16->+-----------------------+
| 4 word save area for |
| return addr, prev %fp |
FP+0->+-----------------------+
| |
| local variables |
| |
+-----------------------+
| |
| register save area |
| |
+-----------------------+
| |
| alloca allocations |
| |
+-----------------------+
| |
| arguments on stack |
| |
SP+16->+-----------------------+
low | 4 word save area for |
memory | return addr, prev %fp |
SP+0->+-----------------------+
 
Notes:
1) The "reg parm save area" does not exist for non variable argument fns.
The "reg parm save area" can be eliminated completely if we created our
own va-arc.h, but that has tradeoffs as well (so it's not done). */
 
/* Structure to be filled in by arc_compute_frame_size with register
save masks, and offsets for the current function. */
struct arc_frame_info
{
unsigned int total_size; /* # bytes that the entire frame takes up. */
unsigned int extra_size; /* # bytes of extra stuff. */
unsigned int pretend_size; /* # bytes we push and pretend caller did. */
unsigned int args_size; /* # bytes that outgoing arguments take up. */
unsigned int reg_size; /* # bytes needed to store regs. */
unsigned int var_size; /* # bytes that variables take up. */
unsigned int reg_offset; /* Offset from new sp to store regs. */
unsigned int gmask; /* Mask of saved gp registers. */
int initialized; /* Nonzero if frame size already calculated. */
};
 
/* Current frame information calculated by arc_compute_frame_size. */
static struct arc_frame_info current_frame_info;
 
/* Zero structure to initialize current_frame_info. */
static struct arc_frame_info zero_frame_info;
 
/* Type of function DECL.
 
The result is cached. To reset the cache at the end of a function,
call with DECL = NULL_TREE. */
 
enum arc_function_type
arc_compute_function_type (tree decl)
{
tree a;
/* Cached value. */
static enum arc_function_type fn_type = ARC_FUNCTION_UNKNOWN;
/* Last function we were called for. */
static tree last_fn = NULL_TREE;
 
/* Resetting the cached value? */
if (decl == NULL_TREE)
{
fn_type = ARC_FUNCTION_UNKNOWN;
last_fn = NULL_TREE;
return fn_type;
}
 
if (decl == last_fn && fn_type != ARC_FUNCTION_UNKNOWN)
return fn_type;
 
/* Assume we have a normal function (not an interrupt handler). */
fn_type = ARC_FUNCTION_NORMAL;
 
/* Now see if this is an interrupt handler. */
for (a = DECL_ATTRIBUTES (current_function_decl);
a;
a = TREE_CHAIN (a))
{
tree name = TREE_PURPOSE (a), args = TREE_VALUE (a);
 
if (name == get_identifier ("__interrupt__")
&& list_length (args) == 1
&& TREE_CODE (TREE_VALUE (args)) == STRING_CST)
{
tree value = TREE_VALUE (args);
 
if (!strcmp (TREE_STRING_POINTER (value), "ilink1"))
fn_type = ARC_FUNCTION_ILINK1;
else if (!strcmp (TREE_STRING_POINTER (value), "ilink2"))
fn_type = ARC_FUNCTION_ILINK2;
else
gcc_unreachable ();
break;
}
}
 
last_fn = decl;
return fn_type;
}
 
#define ILINK1_REGNUM 29
#define ILINK2_REGNUM 30
#define RETURN_ADDR_REGNUM 31
#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
 
/* Tell prologue and epilogue if register REGNO should be saved / restored.
The return address and frame pointer are treated separately.
Don't consider them here. */
#define MUST_SAVE_REGISTER(regno, interrupt_p) \
((regno) != RETURN_ADDR_REGNUM && (regno) != FRAME_POINTER_REGNUM \
&& (regs_ever_live[regno] && (!call_used_regs[regno] || interrupt_p)))
 
#define MUST_SAVE_RETURN_ADDR (regs_ever_live[RETURN_ADDR_REGNUM])
 
/* Return the bytes needed to compute the frame pointer from the current
stack pointer.
 
SIZE is the size needed for local variables. */
 
unsigned int
arc_compute_frame_size (int size /* # of var. bytes allocated. */)
{
int regno;
unsigned int total_size, var_size, args_size, pretend_size, extra_size;
unsigned int reg_size, reg_offset;
unsigned int gmask;
enum arc_function_type fn_type;
int interrupt_p;
 
var_size = size;
args_size = current_function_outgoing_args_size;
pretend_size = current_function_pretend_args_size;
extra_size = FIRST_PARM_OFFSET (0);
total_size = extra_size + pretend_size + args_size + var_size;
reg_offset = FIRST_PARM_OFFSET(0) + current_function_outgoing_args_size;
reg_size = 0;
gmask = 0;
 
/* See if this is an interrupt handler. Call used registers must be saved
for them too. */
fn_type = arc_compute_function_type (current_function_decl);
interrupt_p = ARC_INTERRUPT_P (fn_type);
 
/* Calculate space needed for registers.
??? We ignore the extension registers for now. */
 
for (regno = 0; regno <= 31; regno++)
{
if (MUST_SAVE_REGISTER (regno, interrupt_p))
{
reg_size += UNITS_PER_WORD;
gmask |= 1 << regno;
}
}
 
total_size += reg_size;
 
/* If the only space to allocate is the fp/blink save area this is an
empty frame. However, if we'll be making a function call we need to
allocate a stack frame for our callee's fp/blink save area. */
if (total_size == extra_size
&& !MUST_SAVE_RETURN_ADDR)
total_size = extra_size = 0;
 
total_size = ARC_STACK_ALIGN (total_size);
 
/* Save computed information. */
current_frame_info.total_size = total_size;
current_frame_info.extra_size = extra_size;
current_frame_info.pretend_size = pretend_size;
current_frame_info.var_size = var_size;
current_frame_info.args_size = args_size;
current_frame_info.reg_size = reg_size;
current_frame_info.reg_offset = reg_offset;
current_frame_info.gmask = gmask;
current_frame_info.initialized = reload_completed;
 
/* Ok, we're done. */
return total_size;
}
/* Common code to save/restore registers. */
 
void
arc_save_restore (FILE *file,
const char *base_reg,
unsigned int offset,
unsigned int gmask,
const char *op)
{
int regno;
 
if (gmask == 0)
return;
 
for (regno = 0; regno <= 31; regno++)
{
if ((gmask & (1L << regno)) != 0)
{
fprintf (file, "\t%s %s,[%s,%d]\n",
op, reg_names[regno], base_reg, offset);
offset += UNITS_PER_WORD;
}
}
}
/* Target hook to assemble an integer object. The ARC version needs to
emit a special directive for references to labels and function
symbols. */
 
static bool
arc_assemble_integer (rtx x, unsigned int size, int aligned_p)
{
if (size == UNITS_PER_WORD && aligned_p
&& ((GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (x))
|| GET_CODE (x) == LABEL_REF))
{
fputs ("\t.word\t%st(", asm_out_file);
output_addr_const (asm_out_file, x);
fputs (")\n", asm_out_file);
return true;
}
return default_assemble_integer (x, size, aligned_p);
}
/* Set up the stack and frame pointer (if desired) for the function. */
 
static void
arc_output_function_prologue (FILE *file, HOST_WIDE_INT size)
{
const char *sp_str = reg_names[STACK_POINTER_REGNUM];
const char *fp_str = reg_names[FRAME_POINTER_REGNUM];
unsigned int gmask = current_frame_info.gmask;
enum arc_function_type fn_type = arc_compute_function_type (current_function_decl);
 
/* If this is an interrupt handler, set up our stack frame.
??? Optimize later. */
if (ARC_INTERRUPT_P (fn_type))
{
fprintf (file, "\t%s interrupt handler\n",
ASM_COMMENT_START);
fprintf (file, "\tsub %s,%s,16\n", sp_str, sp_str);
}
 
/* This is only for the human reader. */
fprintf (file, "\t%s BEGIN PROLOGUE %s vars= %d, regs= %d, args= %d, extra= %d\n",
ASM_COMMENT_START, ASM_COMMENT_START,
current_frame_info.var_size,
current_frame_info.reg_size / 4,
current_frame_info.args_size,
current_frame_info.extra_size);
 
size = ARC_STACK_ALIGN (size);
size = (! current_frame_info.initialized
? arc_compute_frame_size (size)
: current_frame_info.total_size);
 
/* These cases shouldn't happen. Catch them now. */
gcc_assert (size || !gmask);
 
/* Allocate space for register arguments if this is a variadic function. */
if (current_frame_info.pretend_size != 0)
fprintf (file, "\tsub %s,%s,%d\n",
sp_str, sp_str, current_frame_info.pretend_size);
 
/* The home-grown ABI says link register is saved first. */
if (MUST_SAVE_RETURN_ADDR)
fprintf (file, "\tst %s,[%s,%d]\n",
reg_names[RETURN_ADDR_REGNUM], sp_str, UNITS_PER_WORD);
 
/* Set up the previous frame pointer next (if we need to). */
if (frame_pointer_needed)
{
fprintf (file, "\tst %s,[%s]\n", fp_str, sp_str);
fprintf (file, "\tmov %s,%s\n", fp_str, sp_str);
}
 
/* ??? We don't handle the case where the saved regs are more than 252
bytes away from sp. This can be handled by decrementing sp once, saving
the regs, and then decrementing it again. The epilogue doesn't have this
problem as the `ld' insn takes reg+limm values (though it would be more
efficient to avoid reg+limm). */
 
/* Allocate the stack frame. */
if (size - current_frame_info.pretend_size > 0)
fprintf (file, "\tsub %s,%s," HOST_WIDE_INT_PRINT_DEC "\n",
sp_str, sp_str, size - current_frame_info.pretend_size);
 
/* Save any needed call-saved regs (and call-used if this is an
interrupt handler). */
arc_save_restore (file, sp_str, current_frame_info.reg_offset,
/* The zeroing of these two bits is unnecessary,
but leave this in for clarity. */
gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
"st");
 
fprintf (file, "\t%s END PROLOGUE\n", ASM_COMMENT_START);
}
/* Do any necessary cleanup after a function to restore stack, frame,
and regs. */
 
static void
arc_output_function_epilogue (FILE *file, HOST_WIDE_INT size)
{
rtx epilogue_delay = current_function_epilogue_delay_list;
int noepilogue = FALSE;
enum arc_function_type fn_type = arc_compute_function_type (current_function_decl);
 
/* This is only for the human reader. */
fprintf (file, "\t%s EPILOGUE\n", ASM_COMMENT_START);
 
size = ARC_STACK_ALIGN (size);
size = (!current_frame_info.initialized
? arc_compute_frame_size (size)
: current_frame_info.total_size);
 
if (size == 0 && epilogue_delay == 0)
{
rtx insn = get_last_insn ();
 
/* If the last insn was a BARRIER, we don't have to write any code
because a jump (aka return) was put there. */
if (GET_CODE (insn) == NOTE)
insn = prev_nonnote_insn (insn);
if (insn && GET_CODE (insn) == BARRIER)
noepilogue = TRUE;
}
 
if (!noepilogue)
{
unsigned int pretend_size = current_frame_info.pretend_size;
unsigned int frame_size = size - pretend_size;
int restored, fp_restored_p;
int can_trust_sp_p = !current_function_calls_alloca;
const char *sp_str = reg_names[STACK_POINTER_REGNUM];
const char *fp_str = reg_names[FRAME_POINTER_REGNUM];
 
/* ??? There are lots of optimizations that can be done here.
EG: Use fp to restore regs if it's closer.
Maybe in time we'll do them all. For now, always restore regs from
sp, but don't restore sp if we don't have to. */
 
if (!can_trust_sp_p)
{
gcc_assert (frame_pointer_needed);
fprintf (file,"\tsub %s,%s,%d\t\t%s sp not trusted here\n",
sp_str, fp_str, frame_size, ASM_COMMENT_START);
}
 
/* Restore any saved registers. */
arc_save_restore (file, sp_str, current_frame_info.reg_offset,
/* The zeroing of these two bits is unnecessary,
but leave this in for clarity. */
current_frame_info.gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
"ld");
 
if (MUST_SAVE_RETURN_ADDR)
fprintf (file, "\tld %s,[%s,%d]\n",
reg_names[RETURN_ADDR_REGNUM],
frame_pointer_needed ? fp_str : sp_str,
UNITS_PER_WORD + (frame_pointer_needed ? 0 : frame_size));
 
/* Keep track of how much of the stack pointer we've restored.
It makes the following a lot more readable. */
restored = 0;
fp_restored_p = 0;
 
/* We try to emit the epilogue delay slot insn right after the load
of the return address register so that it can execute with the
stack intact. Secondly, loads are delayed. */
/* ??? If stack intactness is important, always emit now. */
if (MUST_SAVE_RETURN_ADDR && epilogue_delay != NULL_RTX)
{
final_scan_insn (XEXP (epilogue_delay, 0), file, 1, 1, NULL);
epilogue_delay = NULL_RTX;
}
 
if (frame_pointer_needed)
{
/* Try to restore the frame pointer in the delay slot. We can't,
however, if any of these is true. */
if (epilogue_delay != NULL_RTX
|| !SMALL_INT (frame_size)
|| pretend_size
|| ARC_INTERRUPT_P (fn_type))
{
/* Note that we restore fp and sp here! */
fprintf (file, "\tld.a %s,[%s,%d]\n", fp_str, sp_str, frame_size);
restored += frame_size;
fp_restored_p = 1;
}
}
else if (!SMALL_INT (size /* frame_size + pretend_size */)
|| ARC_INTERRUPT_P (fn_type))
{
fprintf (file, "\tadd %s,%s,%d\n", sp_str, sp_str, frame_size);
restored += frame_size;
}
 
/* These must be done before the return insn because the delay slot
does the final stack restore. */
if (ARC_INTERRUPT_P (fn_type))
{
if (epilogue_delay)
{
final_scan_insn (XEXP (epilogue_delay, 0), file, 1, 1, NULL);
}
}
 
/* Emit the return instruction. */
{
static const int regs[4] = {
0, RETURN_ADDR_REGNUM, ILINK1_REGNUM, ILINK2_REGNUM
};
 
/* Update the flags, if returning from an interrupt handler. */
if (ARC_INTERRUPT_P (fn_type))
fprintf (file, "\tj.d.f %s\n", reg_names[regs[fn_type]]);
else
fprintf (file, "\tj.d %s\n", reg_names[regs[fn_type]]);
}
 
/* If the only register saved is the return address, we need a
nop, unless we have an instruction to put into it. Otherwise
we don't since reloading multiple registers doesn't reference
the register being loaded. */
 
if (ARC_INTERRUPT_P (fn_type))
fprintf (file, "\tadd %s,%s,16\n", sp_str, sp_str);
else if (epilogue_delay != NULL_RTX)
{
gcc_assert (!frame_pointer_needed || fp_restored_p);
gcc_assert (restored >= size);
final_scan_insn (XEXP (epilogue_delay, 0), file, 1, 1, NULL);
}
else if (frame_pointer_needed && !fp_restored_p)
{
gcc_assert (SMALL_INT (frame_size));
/* Note that we restore fp and sp here! */
fprintf (file, "\tld.a %s,[%s,%d]\n", fp_str, sp_str, frame_size);
}
else if (restored < size)
{
gcc_assert (SMALL_INT (size - restored));
fprintf (file, "\tadd %s,%s," HOST_WIDE_INT_PRINT_DEC "\n",
sp_str, sp_str, size - restored);
}
else
fprintf (file, "\tnop\n");
}
 
/* Reset state info for each function. */
current_frame_info = zero_frame_info;
arc_compute_function_type (NULL_TREE);
}
/* Define the number of delay slots needed for the function epilogue.
 
Interrupt handlers can't have any epilogue delay slots (it's always needed
for something else, I think). For normal functions, we have to worry about
using call-saved regs as they'll be restored before the delay slot insn.
Functions with non-empty frames already have enough choices for the epilogue
delay slot so for now we only consider functions with empty frames. */
 
int
arc_delay_slots_for_epilogue (void)
{
if (arc_compute_function_type (current_function_decl) != ARC_FUNCTION_NORMAL)
return 0;
if (!current_frame_info.initialized)
(void) arc_compute_frame_size (get_frame_size ());
if (current_frame_info.total_size == 0)
return 1;
return 0;
}
 
/* Return true if TRIAL is a valid insn for the epilogue delay slot.
Any single length instruction which doesn't reference the stack or frame
pointer or any call-saved register is OK. SLOT will always be 0. */
 
int
arc_eligible_for_epilogue_delay (rtx trial, int slot)
{
gcc_assert (!slot);
 
if (get_attr_length (trial) == 1
/* If registers where saved, presumably there's more than enough
possibilities for the delay slot. The alternative is something
more complicated (of course, if we expanded the epilogue as rtl
this problem would go away). */
/* ??? Note that this will always be true since only functions with
empty frames have epilogue delay slots. See
arc_delay_slots_for_epilogue. */
&& current_frame_info.gmask == 0
&& ! reg_mentioned_p (stack_pointer_rtx, PATTERN (trial))
&& ! reg_mentioned_p (frame_pointer_rtx, PATTERN (trial)))
return 1;
return 0;
}
/* Return true if OP is a shift operator. */
 
int
shift_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
switch (GET_CODE (op))
{
case ASHIFTRT:
case LSHIFTRT:
case ASHIFT:
return 1;
default:
return 0;
}
}
 
/* Output the assembler code for doing a shift.
We go to a bit of trouble to generate efficient code as the ARC only has
single bit shifts. This is taken from the h8300 port. We only have one
mode of shifting and can't access individual bytes like the h8300 can, so
this is greatly simplified (at the expense of not generating hyper-
efficient code).
 
This function is not used if the variable shift insns are present. */
 
/* ??? We assume the output operand is the same as operand 1.
This can be optimized (deleted) in the case of 1 bit shifts. */
/* ??? We use the loop register here. We don't use it elsewhere (yet) and
using it here will give us a chance to play with it. */
 
const char *
output_shift (rtx *operands)
{
rtx shift = operands[3];
enum machine_mode mode = GET_MODE (shift);
enum rtx_code code = GET_CODE (shift);
const char *shift_one;
 
gcc_assert (mode == SImode);
 
switch (code)
{
case ASHIFT: shift_one = "asl %0,%0"; break;
case ASHIFTRT: shift_one = "asr %0,%0"; break;
case LSHIFTRT: shift_one = "lsr %0,%0"; break;
default: gcc_unreachable ();
}
 
if (GET_CODE (operands[2]) != CONST_INT)
{
if (optimize)
{
output_asm_insn ("sub.f 0,%2,0", operands);
output_asm_insn ("mov lp_count,%2", operands);
output_asm_insn ("bz 2f", operands);
}
else
output_asm_insn ("mov %4,%2", operands);
goto shiftloop;
}
else
{
int n = INTVAL (operands[2]);
 
/* If the count is negative, make it 0. */
if (n < 0)
n = 0;
/* If the count is too big, truncate it.
ANSI says shifts of GET_MODE_BITSIZE are undefined - we choose to
do the intuitive thing. */
else if (n > GET_MODE_BITSIZE (mode))
n = GET_MODE_BITSIZE (mode);
 
/* First see if we can do them inline. */
if (n <= 8)
{
while (--n >= 0)
output_asm_insn (shift_one, operands);
}
/* See if we can use a rotate/and. */
else if (n == BITS_PER_WORD - 1)
{
switch (code)
{
case ASHIFT :
output_asm_insn ("and %0,%0,1\n\tror %0,%0", operands);
break;
case ASHIFTRT :
/* The ARC doesn't have a rol insn. Use something else. */
output_asm_insn ("asl.f 0,%0\n\tsbc %0,0,0", operands);
break;
case LSHIFTRT :
/* The ARC doesn't have a rol insn. Use something else. */
output_asm_insn ("asl.f 0,%0\n\tadc %0,0,0", operands);
break;
default:
break;
}
}
/* Must loop. */
else
{
char buf[100];
 
if (optimize)
output_asm_insn ("mov lp_count,%c2", operands);
else
output_asm_insn ("mov %4,%c2", operands);
shiftloop:
if (optimize)
{
if (flag_pic)
sprintf (buf, "lr %%4,[status]\n\tadd %%4,%%4,6\t%s single insn loop start",
ASM_COMMENT_START);
else
sprintf (buf, "mov %%4,%%%%st(1f)\t%s (single insn loop start) >> 2",
ASM_COMMENT_START);
output_asm_insn (buf, operands);
output_asm_insn ("sr %4,[lp_start]", operands);
output_asm_insn ("add %4,%4,1", operands);
output_asm_insn ("sr %4,[lp_end]", operands);
output_asm_insn ("nop\n\tnop", operands);
if (flag_pic)
fprintf (asm_out_file, "\t%s single insn loop\n",
ASM_COMMENT_START);
else
fprintf (asm_out_file, "1:\t%s single insn loop\n",
ASM_COMMENT_START);
output_asm_insn (shift_one, operands);
fprintf (asm_out_file, "2:\t%s end single insn loop\n",
ASM_COMMENT_START);
}
else
{
fprintf (asm_out_file, "1:\t%s begin shift loop\n",
ASM_COMMENT_START);
output_asm_insn ("sub.f %4,%4,1", operands);
output_asm_insn ("nop", operands);
output_asm_insn ("bn.nd 2f", operands);
output_asm_insn (shift_one, operands);
output_asm_insn ("b.nd 1b", operands);
fprintf (asm_out_file, "2:\t%s end shift loop\n",
ASM_COMMENT_START);
}
}
}
 
return "";
}
/* Nested function support. */
 
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function. */
 
void
arc_initialize_trampoline (rtx tramp ATTRIBUTE_UNUSED,
rtx fnaddr ATTRIBUTE_UNUSED,
rtx cxt ATTRIBUTE_UNUSED)
{
}
/* Set the cpu type and print out other fancy things,
at the top of the file. */
 
static void
arc_file_start (void)
{
default_file_start ();
fprintf (asm_out_file, "\t.cpu %s\n", arc_cpu_string);
}
/* Print operand X (an rtx) in assembler syntax to file FILE.
CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
For `%' followed by punctuation, CODE is the punctuation and X is null. */
 
void
arc_print_operand (FILE *file, rtx x, int code)
{
switch (code)
{
case '#' :
/* Conditional branches. For now these are equivalent. */
case '*' :
/* Unconditional branches. Output the appropriate delay slot suffix. */
if (!final_sequence || XVECLEN (final_sequence, 0) == 1)
{
/* There's nothing in the delay slot. */
fputs (".nd", file);
}
else
{
rtx jump = XVECEXP (final_sequence, 0, 0);
rtx delay = XVECEXP (final_sequence, 0, 1);
if (INSN_ANNULLED_BRANCH_P (jump))
fputs (INSN_FROM_TARGET_P (delay) ? ".jd" : ".nd", file);
else
fputs (".d", file);
}
return;
case '?' : /* with leading "." */
case '!' : /* without leading "." */
/* This insn can be conditionally executed. See if the ccfsm machinery
says it should be conditionalized. */
if (arc_ccfsm_state == 3 || arc_ccfsm_state == 4)
{
/* Is this insn in a delay slot? */
if (final_sequence && XVECLEN (final_sequence, 0) == 2)
{
rtx insn = XVECEXP (final_sequence, 0, 1);
 
/* If the insn is annulled and is from the target path, we need
to inverse the condition test. */
if (INSN_ANNULLED_BRANCH_P (insn))
{
if (INSN_FROM_TARGET_P (insn))
fprintf (file, "%s%s",
code == '?' ? "." : "",
arc_condition_codes[ARC_INVERSE_CONDITION_CODE (arc_ccfsm_current_cc)]);
else
fprintf (file, "%s%s",
code == '?' ? "." : "",
arc_condition_codes[arc_ccfsm_current_cc]);
}
else
{
/* This insn is executed for either path, so don't
conditionalize it at all. */
; /* nothing to do */
}
}
else
{
/* This insn isn't in a delay slot. */
fprintf (file, "%s%s",
code == '?' ? "." : "",
arc_condition_codes[arc_ccfsm_current_cc]);
}
}
return;
case '~' :
/* Output a nop if we're between a set of the condition codes,
and a conditional branch. */
if (last_insn_set_cc_p)
fputs ("nop\n\t", file);
return;
case 'd' :
fputs (arc_condition_codes[get_arc_condition_code (x)], file);
return;
case 'D' :
fputs (arc_condition_codes[ARC_INVERSE_CONDITION_CODE
(get_arc_condition_code (x))],
file);
return;
case 'R' :
/* Write second word of DImode or DFmode reference,
register or memory. */
if (GET_CODE (x) == REG)
fputs (reg_names[REGNO (x)+1], file);
else if (GET_CODE (x) == MEM)
{
fputc ('[', file);
/* Handle possible auto-increment. Since it is pre-increment and
we have already done it, we can just use an offset of four. */
/* ??? This is taken from rs6000.c I think. I don't think it is
currently necessary, but keep it around. */
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
output_address (plus_constant (XEXP (XEXP (x, 0), 0), 4));
else
output_address (plus_constant (XEXP (x, 0), 4));
fputc (']', file);
}
else
output_operand_lossage ("invalid operand to %%R code");
return;
case 'S' :
if ((GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (x))
|| GET_CODE (x) == LABEL_REF)
{
fprintf (file, "%%st(");
output_addr_const (file, x);
fprintf (file, ")");
return;
}
break;
case 'H' :
case 'L' :
if (GET_CODE (x) == REG)
{
/* L = least significant word, H = most significant word */
if ((TARGET_BIG_ENDIAN != 0) ^ (code == 'L'))
fputs (reg_names[REGNO (x)], file);
else
fputs (reg_names[REGNO (x)+1], file);
}
else if (GET_CODE (x) == CONST_INT
|| GET_CODE (x) == CONST_DOUBLE)
{
rtx first, second;
 
split_double (x, &first, &second);
fprintf (file, "0x%08lx",
(long)(code == 'L' ? INTVAL (first) : INTVAL (second)));
}
else
output_operand_lossage ("invalid operand to %%H/%%L code");
return;
case 'A' :
{
char str[30];
 
gcc_assert (GET_CODE (x) == CONST_DOUBLE
&& GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT);
 
real_to_decimal (str, CONST_DOUBLE_REAL_VALUE (x), sizeof (str), 0, 1);
fprintf (file, "%s", str);
return;
}
case 'U' :
/* Output a load/store with update indicator if appropriate. */
if (GET_CODE (x) == MEM)
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
fputs (".a", file);
}
else
output_operand_lossage ("invalid operand to %%U code");
return;
case 'V' :
/* Output cache bypass indicator for a load/store insn. Volatile memory
refs are defined to use the cache bypass mechanism. */
if (GET_CODE (x) == MEM)
{
if (MEM_VOLATILE_P (x))
fputs (".di", file);
}
else
output_operand_lossage ("invalid operand to %%V code");
return;
case 0 :
/* Do nothing special. */
break;
default :
/* Unknown flag. */
output_operand_lossage ("invalid operand output code");
}
 
switch (GET_CODE (x))
{
case REG :
fputs (reg_names[REGNO (x)], file);
break;
case MEM :
fputc ('[', file);
if (GET_CODE (XEXP (x, 0)) == PRE_INC)
output_address (plus_constant (XEXP (XEXP (x, 0), 0),
GET_MODE_SIZE (GET_MODE (x))));
else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
output_address (plus_constant (XEXP (XEXP (x, 0), 0),
- GET_MODE_SIZE (GET_MODE (x))));
else
output_address (XEXP (x, 0));
fputc (']', file);
break;
case CONST_DOUBLE :
/* We handle SFmode constants here as output_addr_const doesn't. */
if (GET_MODE (x) == SFmode)
{
REAL_VALUE_TYPE d;
long l;
 
REAL_VALUE_FROM_CONST_DOUBLE (d, x);
REAL_VALUE_TO_TARGET_SINGLE (d, l);
fprintf (file, "0x%08lx", l);
break;
}
/* Fall through. Let output_addr_const deal with it. */
default :
output_addr_const (file, x);
break;
}
}
 
/* Print a memory address as an operand to reference that memory location. */
 
void
arc_print_operand_address (FILE *file, rtx addr)
{
register rtx base, index = 0;
int offset = 0;
 
switch (GET_CODE (addr))
{
case REG :
fputs (reg_names[REGNO (addr)], file);
break;
case SYMBOL_REF :
if (/*???*/ 0 && SYMBOL_REF_FUNCTION_P (addr))
{
fprintf (file, "%%st(");
output_addr_const (file, addr);
fprintf (file, ")");
}
else
output_addr_const (file, addr);
break;
case PLUS :
if (GET_CODE (XEXP (addr, 0)) == CONST_INT)
offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
else if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
else
base = XEXP (addr, 0), index = XEXP (addr, 1);
gcc_assert (GET_CODE (base) == REG);
fputs (reg_names[REGNO (base)], file);
if (index == 0)
{
if (offset != 0)
fprintf (file, ",%d", offset);
}
else
{
switch (GET_CODE (index))
{
case REG:
fprintf (file, ",%s", reg_names[REGNO (index)]);
break;
case SYMBOL_REF:
fputc (',', file), output_addr_const (file, index);
break;
default:
gcc_unreachable ();
}
}
break;
case PRE_INC :
case PRE_DEC :
/* We shouldn't get here as we've lost the mode of the memory object
(which says how much to inc/dec by. */
gcc_unreachable ();
break;
default :
output_addr_const (file, addr);
break;
}
}
 
/* Update compare/branch separation marker. */
 
static void
record_cc_ref (rtx insn)
{
last_insn_set_cc_p = current_insn_set_cc_p;
 
switch (get_attr_cond (insn))
{
case COND_SET :
case COND_SET_ZN :
case COND_SET_ZNC :
if (get_attr_length (insn) == 1)
current_insn_set_cc_p = 1;
else
current_insn_set_cc_p = 0;
break;
default :
current_insn_set_cc_p = 0;
break;
}
}
/* Conditional execution support.
 
This is based on the ARM port but for now is much simpler.
 
A finite state machine takes care of noticing whether or not instructions
can be conditionally executed, and thus decrease execution time and code
size by deleting branch instructions. The fsm is controlled by
final_prescan_insn, and controls the actions of PRINT_OPERAND. The patterns
in the .md file for the branch insns also have a hand in this. */
 
/* The state of the fsm controlling condition codes are:
0: normal, do nothing special
1: don't output this insn
2: don't output this insn
3: make insns conditional
4: make insns conditional
 
State transitions (state->state by whom, under what condition):
0 -> 1 final_prescan_insn, if insn is conditional branch
0 -> 2 final_prescan_insn, if the `target' is an unconditional branch
1 -> 3 branch patterns, after having not output the conditional branch
2 -> 4 branch patterns, after having not output the conditional branch
3 -> 0 (*targetm.asm_out.internal_label), if the `target' label is reached
(the target label has CODE_LABEL_NUMBER equal to
arc_ccfsm_target_label).
4 -> 0 final_prescan_insn, if `target' unconditional branch is reached
 
If the jump clobbers the conditions then we use states 2 and 4.
 
A similar thing can be done with conditional return insns.
 
We also handle separating branches from sets of the condition code.
This is done here because knowledge of the ccfsm state is required,
we may not be outputting the branch. */
 
void
arc_final_prescan_insn (rtx insn,
rtx *opvec ATTRIBUTE_UNUSED,
int noperands ATTRIBUTE_UNUSED)
{
/* BODY will hold the body of INSN. */
register rtx body = PATTERN (insn);
 
/* This will be 1 if trying to repeat the trick (i.e.: do the `else' part of
an if/then/else), and things need to be reversed. */
int reverse = 0;
 
/* If we start with a return insn, we only succeed if we find another one. */
int seeking_return = 0;
/* START_INSN will hold the insn from where we start looking. This is the
first insn after the following code_label if REVERSE is true. */
rtx start_insn = insn;
 
/* Update compare/branch separation marker. */
record_cc_ref (insn);
 
/* Allow -mdebug-ccfsm to turn this off so we can see how well it does.
We can't do this in macro FINAL_PRESCAN_INSN because its called from
final_scan_insn which has `optimize' as a local. */
if (optimize < 2 || TARGET_NO_COND_EXEC)
return;
 
/* If in state 4, check if the target branch is reached, in order to
change back to state 0. */
if (arc_ccfsm_state == 4)
{
if (insn == arc_ccfsm_target_insn)
{
arc_ccfsm_target_insn = NULL;
arc_ccfsm_state = 0;
}
return;
}
 
/* If in state 3, it is possible to repeat the trick, if this insn is an
unconditional branch to a label, and immediately following this branch
is the previous target label which is only used once, and the label this
branch jumps to is not too far off. Or in other words "we've done the
`then' part, see if we can do the `else' part." */
if (arc_ccfsm_state == 3)
{
if (simplejump_p (insn))
{
start_insn = next_nonnote_insn (start_insn);
if (GET_CODE (start_insn) == BARRIER)
{
/* ??? Isn't this always a barrier? */
start_insn = next_nonnote_insn (start_insn);
}
if (GET_CODE (start_insn) == CODE_LABEL
&& CODE_LABEL_NUMBER (start_insn) == arc_ccfsm_target_label
&& LABEL_NUSES (start_insn) == 1)
reverse = TRUE;
else
return;
}
else if (GET_CODE (body) == RETURN)
{
start_insn = next_nonnote_insn (start_insn);
if (GET_CODE (start_insn) == BARRIER)
start_insn = next_nonnote_insn (start_insn);
if (GET_CODE (start_insn) == CODE_LABEL
&& CODE_LABEL_NUMBER (start_insn) == arc_ccfsm_target_label
&& LABEL_NUSES (start_insn) == 1)
{
reverse = TRUE;
seeking_return = 1;
}
else
return;
}
else
return;
}
 
if (GET_CODE (insn) != JUMP_INSN)
return;
 
/* This jump might be paralleled with a clobber of the condition codes,
the jump should always come first. */
if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
body = XVECEXP (body, 0, 0);
 
if (reverse
|| (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
&& GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
{
int insns_skipped = 0, fail = FALSE, succeed = FALSE;
/* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
int then_not_else = TRUE;
/* Nonzero if next insn must be the target label. */
int next_must_be_target_label_p;
rtx this_insn = start_insn, label = 0;
 
/* Register the insn jumped to. */
if (reverse)
{
if (!seeking_return)
label = XEXP (SET_SRC (body), 0);
}
else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
label = XEXP (XEXP (SET_SRC (body), 1), 0);
else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
{
label = XEXP (XEXP (SET_SRC (body), 2), 0);
then_not_else = FALSE;
}
else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
seeking_return = 1;
else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
{
seeking_return = 1;
then_not_else = FALSE;
}
else
gcc_unreachable ();
 
/* See how many insns this branch skips, and what kind of insns. If all
insns are okay, and the label or unconditional branch to the same
label is not too far away, succeed. */
for (insns_skipped = 0, next_must_be_target_label_p = FALSE;
!fail && !succeed && insns_skipped < MAX_INSNS_SKIPPED;
insns_skipped++)
{
rtx scanbody;
 
this_insn = next_nonnote_insn (this_insn);
if (!this_insn)
break;
 
if (next_must_be_target_label_p)
{
if (GET_CODE (this_insn) == BARRIER)
continue;
if (GET_CODE (this_insn) == CODE_LABEL
&& this_insn == label)
{
arc_ccfsm_state = 1;
succeed = TRUE;
}
else
fail = TRUE;
break;
}
 
scanbody = PATTERN (this_insn);
 
switch (GET_CODE (this_insn))
{
case CODE_LABEL:
/* Succeed if it is the target label, otherwise fail since
control falls in from somewhere else. */
if (this_insn == label)
{
arc_ccfsm_state = 1;
succeed = TRUE;
}
else
fail = TRUE;
break;
 
case BARRIER:
/* Succeed if the following insn is the target label.
Otherwise fail.
If return insns are used then the last insn in a function
will be a barrier. */
next_must_be_target_label_p = TRUE;
break;
 
case CALL_INSN:
/* Can handle a call insn if there are no insns after it.
IE: The next "insn" is the target label. We don't have to
worry about delay slots as such insns are SEQUENCE's inside
INSN's. ??? It is possible to handle such insns though. */
if (get_attr_cond (this_insn) == COND_CANUSE)
next_must_be_target_label_p = TRUE;
else
fail = TRUE;
break;
 
case JUMP_INSN:
/* If this is an unconditional branch to the same label, succeed.
If it is to another label, do nothing. If it is conditional,
fail. */
/* ??? Probably, the test for the SET and the PC are unnecessary. */
 
if (GET_CODE (scanbody) == SET
&& GET_CODE (SET_DEST (scanbody)) == PC)
{
if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
&& XEXP (SET_SRC (scanbody), 0) == label && !reverse)
{
arc_ccfsm_state = 2;
succeed = TRUE;
}
else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
fail = TRUE;
}
else if (GET_CODE (scanbody) == RETURN
&& seeking_return)
{
arc_ccfsm_state = 2;
succeed = TRUE;
}
else if (GET_CODE (scanbody) == PARALLEL)
{
if (get_attr_cond (this_insn) != COND_CANUSE)
fail = TRUE;
}
break;
 
case INSN:
/* We can only do this with insns that can use the condition
codes (and don't set them). */
if (GET_CODE (scanbody) == SET
|| GET_CODE (scanbody) == PARALLEL)
{
if (get_attr_cond (this_insn) != COND_CANUSE)
fail = TRUE;
}
/* We can't handle other insns like sequences. */
else
fail = TRUE;
break;
 
default:
break;
}
}
 
if (succeed)
{
if ((!seeking_return) && (arc_ccfsm_state == 1 || reverse))
arc_ccfsm_target_label = CODE_LABEL_NUMBER (label);
else
{
gcc_assert (seeking_return || arc_ccfsm_state == 2);
while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
{
this_insn = next_nonnote_insn (this_insn);
gcc_assert (!this_insn
|| (GET_CODE (this_insn) != BARRIER
&& GET_CODE (this_insn) != CODE_LABEL));
}
if (!this_insn)
{
/* Oh dear! we ran off the end, give up. */
extract_insn_cached (insn);
arc_ccfsm_state = 0;
arc_ccfsm_target_insn = NULL;
return;
}
arc_ccfsm_target_insn = this_insn;
}
 
/* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
what it was. */
if (!reverse)
arc_ccfsm_current_cc = get_arc_condition_code (XEXP (SET_SRC (body),
0));
 
if (reverse || then_not_else)
arc_ccfsm_current_cc = ARC_INVERSE_CONDITION_CODE (arc_ccfsm_current_cc);
}
 
/* Restore recog_data. Getting the attributes of other insns can
destroy this array, but final.c assumes that it remains intact
across this call. */
extract_insn_cached (insn);
}
}
 
/* Record that we are currently outputting label NUM with prefix PREFIX.
It it's the label we're looking for, reset the ccfsm machinery.
 
Called from (*targetm.asm_out.internal_label). */
 
void
arc_ccfsm_at_label (const char *prefix, int num)
{
if (arc_ccfsm_state == 3 && arc_ccfsm_target_label == num
&& !strcmp (prefix, "L"))
{
arc_ccfsm_state = 0;
arc_ccfsm_target_insn = NULL_RTX;
}
}
 
/* See if the current insn, which is a conditional branch, is to be
deleted. */
 
int
arc_ccfsm_branch_deleted_p (void)
{
if (arc_ccfsm_state == 1 || arc_ccfsm_state == 2)
return 1;
return 0;
}
 
/* Record a branch isn't output because subsequent insns can be
conditionalized. */
 
void
arc_ccfsm_record_branch_deleted (void)
{
/* Indicate we're conditionalizing insns now. */
arc_ccfsm_state += 2;
 
/* If the next insn is a subroutine call, we still need a nop between the
cc setter and user. We need to undo the effect of calling record_cc_ref
for the just deleted branch. */
current_insn_set_cc_p = last_insn_set_cc_p;
}
void
arc_va_start (tree valist, rtx nextarg)
{
/* See arc_setup_incoming_varargs for reasons for this oddity. */
if (current_function_args_info < 8
&& (current_function_args_info & 1))
nextarg = plus_constant (nextarg, UNITS_PER_WORD);
 
std_expand_builtin_va_start (valist, nextarg);
}
 
/* This is how to output a definition of an internal numbered label where
PREFIX is the class of label and NUM is the number within the class. */
 
static void
arc_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
{
arc_ccfsm_at_label (prefix, labelno);
default_internal_label (stream, prefix, labelno);
}
 
/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
 
static void
arc_external_libcall (rtx fun ATTRIBUTE_UNUSED)
{
#if 0
/* On the ARC we want to have libgcc's for multiple cpus in one binary.
We can't use `assemble_name' here as that will call ASM_OUTPUT_LABELREF
and we'll get another suffix added on if -mmangle-cpu. */
if (TARGET_MANGLE_CPU_LIBGCC)
{
fprintf (FILE, "\t.rename\t_%s, _%s%s\n",
XSTR (SYMREF, 0), XSTR (SYMREF, 0),
arc_mangle_suffix);
}
#endif
}
 
/* Worker function for TARGET_RETURN_IN_MEMORY. */
 
static bool
arc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
{
if (AGGREGATE_TYPE_P (type))
return true;
else
{
HOST_WIDE_INT size = int_size_in_bytes (type);
return (size == -1 || size > 8);
}
}
 
/* For ARC, All aggregates and arguments greater than 8 bytes are
passed by reference. */
 
static bool
arc_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
enum machine_mode mode, tree type,
bool named ATTRIBUTE_UNUSED)
{
unsigned HOST_WIDE_INT size;
 
if (type)
{
if (AGGREGATE_TYPE_P (type))
return true;
size = int_size_in_bytes (type);
}
else
size = GET_MODE_SIZE (mode);
 
return size > 8;
}
/arc.opt
0,0 → 1,54
; Options for the Argonaut ARC port of the compiler
;
; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
; GCC is free software; you can redistribute it and/or modify it under
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 3, or (at your option) any later
; version.
;
; GCC is distributed in the hope that it will be useful, but WITHOUT
; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
; License for more details.
;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING3. If not see
; <http://www.gnu.org/licenses/>.
 
malign-loops
Target Undocumented Report Mask(ALIGN_LOOPS)
 
mbig-endian
Target Undocumented Report RejectNegative Mask(BIG_ENDIAN)
 
mlittle-endian
Target Undocumented Report RejectNegative InverseMask(BIG_ENDIAN)
 
mmangle-cpu
Target Report Mask(MANGLE_CPU)
Prepend the name of the cpu to all public symbol names
 
; mmangle-cpu-libgcc
; Target Undocumented Mask(MANGLE_CPU_LIBGC)
 
mno-cond-exec
Target Undocumented Report RejectNegative Mask(NO_COND_EXEC)
 
mcpu=
Target RejectNegative Joined Var(arc_cpu_string) Init("base")
-mcpu=CPU Compile code for ARC variant CPU
 
mtext=
Target RejectNegative Joined Var(arc_text_string) Init(ARC_DEFAULT_TEXT_SECTION)
-mtext=SECTION Put functions in SECTION
 
mdata=
Target RejectNegative Joined Var(arc_data_string) Init(ARC_DEFAULT_DATA_SECTION)
-mdata=SECTION Put data in SECTION
 
mrodata=
Target RejectNegative Joined Var(arc_rodata_string) Init(ARC_DEFAULT_RODATA_SECTION)
-mrodata=SECTION Put read-only data in SECTION
/t-arc
0,0 → 1,41
LIB1ASMSRC = arc/lib1funcs.asm
LIB1ASMFUNCS = _mulsi3 _umulsidi3 _udivsi3 _divsi3 _umodsi3 _modsi3 _divmod_tools
 
# We need libgcc routines to be mangled according to which cpu they
# were compiled for.
# ??? -mmangle-cpu passed by default for now.
#LIBGCC2_CFLAGS = -g1 -O2 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -mmangle-cpu
 
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
 
dp-bit.c: $(srcdir)/config/fp-bit.c
echo '#ifndef __big_endian__' > dp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
echo '#endif' >> dp-bit.c
cat $(srcdir)/config/fp-bit.c >> dp-bit.c
 
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifndef __big_endian__' >> fp-bit.c
echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
echo '#endif' >> fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
 
# .init/.fini section routines
 
$(T)crtinit.o: $(srcdir)/config/arc/initfini.c $(GCC_PASSES) $(CONFIG_H)
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(CRTSTUFF_T_CFLAGS) \
$(MULTILIB_CFLAGS) -DCRT_INIT -finhibit-size-directive -fno-inline-functions \
-g0 -c $(srcdir)/config/arc/initfini.c -o $(T)crtinit.o
 
$(T)crtfini.o: $(srcdir)/config/arc/initfini.c $(GCC_PASSES) $(CONFIG_H)
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(CRTSTUFF_T_CFLAGS) \
-DCRT_FINI $(MULTILIB_CFLAGS) -finhibit-size-directive -fno-inline-functions \
-g0 -c $(srcdir)/config/arc/initfini.c -o $(T)crtfini.o
 
MULTILIB_OPTIONS = EB
MULTILIB_DIRNAMES = be
EXTRA_MULTILIB_PARTS = crtinit.o crtfini.o
/arc-modes.def
0,0 → 1,24
/* Definitions of target machine for GNU compiler, Argonaut ARC cpu.
Copyright (C) 2002, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* Some insns set all condition code flags, some only set the ZNC flags, and
some only set the ZN flags. */
 
CC_MODE (CCZNC);
CC_MODE (CCZN);
/initfini.c
0,0 → 1,160
/* .init/.fini section handling + C++ global constructor/destructor handling.
This file is based on crtstuff.c, sol2-crti.asm, sol2-crtn.asm.
 
Copyright (C) 1995, 1997, 1998 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
 
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
 
/* Declare a pointer to void function type. */
typedef void (*func_ptr) (void);
 
#ifdef CRT_INIT
 
/* NOTE: In order to be able to support SVR4 shared libraries, we arrange
to have one set of symbols { __CTOR_LIST__, __DTOR_LIST__, __CTOR_END__,
__DTOR_END__ } per root executable and also one set of these symbols
per shared library. So in any given whole process image, we may have
multiple definitions of each of these symbols. In order to prevent
these definitions from conflicting with one another, and in order to
ensure that the proper lists are used for the initialization/finalization
of each individual shared library (respectively), we give these symbols
only internal (i.e. `static') linkage, and we also make it a point to
refer to only the __CTOR_END__ symbol in crtfini.o and the __DTOR_LIST__
symbol in crtinit.o, where they are defined. */
 
static func_ptr __CTOR_LIST__[1] __attribute__ ((section (".ctors")))
= { (func_ptr) (-1) };
 
static func_ptr __DTOR_LIST__[1] __attribute__ ((section (".dtors")))
= { (func_ptr) (-1) };
 
/* Run all the global destructors on exit from the program. */
/* Some systems place the number of pointers in the first word of the
table. On SVR4 however, that word is -1. In all cases, the table is
null-terminated. On SVR4, we start from the beginning of the list and
invoke each per-compilation-unit destructor routine in order
until we find that null.
 
Note that this function MUST be static. There will be one of these
functions in each root executable and one in each shared library, but
although they all have the same code, each one is unique in that it
refers to one particular associated `__DTOR_LIST__' which belongs to the
same particular root executable or shared library file. */
 
static void __do_global_dtors (void)
asm ("__do_global_dtors") __attribute__ ((section (".text")));
 
static void
__do_global_dtors (void)
{
func_ptr *p;
for (p = __DTOR_LIST__ + 1; *p; p++)
(*p) ();
}
 
/* .init section start.
This must appear at the start of the .init section. */
 
asm ("\n\
.section .init\n\
.global init\n\
.word 0\n\
init:\n\
st blink,[sp,4]\n\
st fp,[sp]\n\
mov fp,sp\n\
sub sp,sp,16\n\
");
 
/* .fini section start.
This must appear at the start of the .init section. */
 
asm ("\n\
.section .fini\n\
.global fini\n\
.word 0\n\
fini:\n\
st blink,[sp,4]\n\
st fp,[sp]\n\
mov fp,sp\n\
sub sp,sp,16\n\
bl.nd __do_global_dtors\n\
");
 
#endif /* CRT_INIT */
 
#ifdef CRT_FINI
 
/* Put a word containing zero at the end of each of our two lists of function
addresses. Note that the words defined here go into the .ctors and .dtors
sections of the crtend.o file, and since that file is always linked in
last, these words naturally end up at the very ends of the two lists
contained in these two sections. */
 
static func_ptr __CTOR_END__[1] __attribute__ ((section (".ctors")))
= { (func_ptr) 0 };
 
static func_ptr __DTOR_END__[1] __attribute__ ((section (".dtors")))
= { (func_ptr) 0 };
 
/* Run all global constructors for the program.
Note that they are run in reverse order. */
 
static void __do_global_ctors (void)
asm ("__do_global_ctors") __attribute__ ((section (".text")));
 
static void
__do_global_ctors (void)
{
func_ptr *p;
for (p = __CTOR_END__ - 1; *p != (func_ptr) -1; p--)
(*p) ();
}
 
/* .init section end.
This must live at the end of the .init section. */
 
asm ("\n\
.section .init\n\
bl.nd __do_global_ctors\n\
ld blink,[fp,4]\n\
j.d blink\n\
ld.a fp,[sp,16]\n\
");
 
/* .fini section end.
This must live at the end of the .fini section. */
 
asm ("\n\
.section .fini\n\
ld blink,[fp,4]\n\
j.d blink\n\
ld.a fp,[sp,16]\n\
");
 
#endif /* CRT_FINI */
/arc.h
0,0 → 1,1098
/* Definitions of target machine for GNU compiler, Argonaut ARC cpu.
Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2004, 2005,
2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* ??? This is an old port, and is undoubtedly suffering from bit rot. */
 
/* Things to do:
 
- incscc, decscc?
- print active compiler options in assembler output
*/
 
 
#undef ASM_SPEC
#undef LINK_SPEC
#undef STARTFILE_SPEC
#undef ENDFILE_SPEC
#undef SIZE_TYPE
#undef PTRDIFF_TYPE
#undef WCHAR_TYPE
#undef WCHAR_TYPE_SIZE
#undef ASM_OUTPUT_LABELREF
/* Print subsidiary information on the compiler version in use. */
#define TARGET_VERSION fprintf (stderr, " (arc)")
 
/* Names to predefine in the preprocessor for this target machine. */
#define TARGET_CPU_CPP_BUILTINS() \
do \
{ \
builtin_define ("__arc__"); \
if (TARGET_BIG_ENDIAN) \
builtin_define ("__big_endian__"); \
if (arc_cpu_type == 0) \
builtin_define ("__base__"); \
builtin_assert ("cpu=arc"); \
builtin_assert ("machine=arc"); \
} while (0)
 
/* Pass -mmangle-cpu if we get -mcpu=*.
Doing it this way lets one have it on as default with -mcpu=*,
but also lets one turn it off with -mno-mangle-cpu. */
#define CC1_SPEC "\
%{mcpu=*:-mmangle-cpu} \
%{EB:%{EL:%emay not use both -EB and -EL}} \
%{EB:-mbig-endian} %{EL:-mlittle-endian} \
"
 
#define ASM_SPEC "%{v} %{EB} %{EL}"
 
#define LINK_SPEC "%{v} %{EB} %{EL}"
 
#define STARTFILE_SPEC "%{!shared:crt0.o%s} crtinit.o%s"
 
#define ENDFILE_SPEC "crtfini.o%s"
/* Instruction set characteristics.
These are internal macros, set by the appropriate -mcpu= option. */
 
/* Nonzero means the cpu has a barrel shifter. */
#define TARGET_SHIFTER 0
 
/* Which cpu we're compiling for. */
extern int arc_cpu_type;
 
/* Check if CPU is an extension and set `arc_cpu_type' and `arc_mangle_cpu'
appropriately. The result should be nonzero if the cpu is recognized,
otherwise zero. This is intended to be redefined in a cover file.
This is used by arc_init. */
#define ARC_EXTENSION_CPU(cpu) 0
 
/* Sometimes certain combinations of command options do not make
sense on a particular target machine. You can define a macro
`OVERRIDE_OPTIONS' to take account of this. This macro, if
defined, is executed once just after all the command options have
been parsed.
 
Don't use this macro to turn on various extra optimizations for
`-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
 
 
#define OVERRIDE_OPTIONS \
do { \
/* These need to be done at start up. It's convenient to do them here. */ \
arc_init (); \
} while (0)
/* Target machine storage layout. */
 
/* Define this if most significant bit is lowest numbered
in instructions that operate on numbered bit-fields. */
#define BITS_BIG_ENDIAN 1
 
/* Define this if most significant byte of a word is the lowest numbered. */
#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN)
 
/* Define this if most significant word of a multiword number is the lowest
numbered. */
#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN)
 
/* Define this to set the endianness to use in libgcc2.c, which can
not depend on target_flags. */
#ifdef __big_endian__
#define LIBGCC2_WORDS_BIG_ENDIAN 1
#else
#define LIBGCC2_WORDS_BIG_ENDIAN 0
#endif
 
/* Width of a word, in units (bytes). */
#define UNITS_PER_WORD 4
 
/* Define this macro if it is advisable to hold scalars in registers
in a wider mode than that declared by the program. In such cases,
the value is constrained to be within the bounds of the declared
type, but kept valid in the wider mode. The signedness of the
extension may differ from that of the type. */
#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
if (GET_MODE_CLASS (MODE) == MODE_INT \
&& GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
{ \
(MODE) = SImode; \
}
 
/* Allocation boundary (in *bits*) for storing arguments in argument list. */
#define PARM_BOUNDARY 32
 
/* Boundary (in *bits*) on which stack pointer should be aligned. */
#define STACK_BOUNDARY 64
 
/* ALIGN FRAMES on word boundaries */
#define ARC_STACK_ALIGN(LOC) (((LOC)+7) & ~7)
 
/* Allocation boundary (in *bits*) for the code of a function. */
#define FUNCTION_BOUNDARY 32
 
/* Alignment of field after `int : 0' in a structure. */
#define EMPTY_FIELD_BOUNDARY 32
 
/* Every structure's size must be a multiple of this. */
#define STRUCTURE_SIZE_BOUNDARY 8
 
/* A bit-field declared as `int' forces `int' alignment for the struct. */
#define PCC_BITFIELD_TYPE_MATTERS 1
 
/* No data type wants to be aligned rounder than this. */
/* This is bigger than currently necessary for the ARC. If 8 byte floats are
ever added it's not clear whether they'll need such alignment or not. For
now we assume they will. We can always relax it if necessary but the
reverse isn't true. */
#define BIGGEST_ALIGNMENT 64
 
/* The best alignment to use in cases where we have a choice. */
#define FASTEST_ALIGNMENT 32
 
/* Make strings word-aligned so strcpy from constants will be faster. */
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
((TREE_CODE (EXP) == STRING_CST \
&& (ALIGN) < FASTEST_ALIGNMENT) \
? FASTEST_ALIGNMENT : (ALIGN))
 
/* Make arrays of chars word-aligned for the same reasons. */
#define DATA_ALIGNMENT(TYPE, ALIGN) \
(TREE_CODE (TYPE) == ARRAY_TYPE \
&& TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
&& (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
 
/* Set this nonzero if move instructions will actually fail to work
when given unaligned data. */
/* On the ARC the lower address bits are masked to 0 as necessary. The chip
won't croak when given an unaligned address, but the insn will still fail
to produce the correct result. */
#define STRICT_ALIGNMENT 1
/* Layout of source language data types. */
 
#define SHORT_TYPE_SIZE 16
#define INT_TYPE_SIZE 32
#define LONG_TYPE_SIZE 32
#define LONG_LONG_TYPE_SIZE 64
#define FLOAT_TYPE_SIZE 32
#define DOUBLE_TYPE_SIZE 64
#define LONG_DOUBLE_TYPE_SIZE 64
 
/* Define this as 1 if `char' should by default be signed; else as 0. */
#define DEFAULT_SIGNED_CHAR 1
 
#define SIZE_TYPE "long unsigned int"
#define PTRDIFF_TYPE "long int"
#define WCHAR_TYPE "short unsigned int"
#define WCHAR_TYPE_SIZE 16
/* Standard register usage. */
 
/* Number of actual hardware registers.
The hardware registers are assigned numbers for the compiler
from 0 to just below FIRST_PSEUDO_REGISTER.
All registers that the compiler knows about must be given numbers,
even those that are not normally considered general registers. */
/* Registers 61, 62, and 63 are not really registers and we needn't treat
them as such. We still need a register for the condition code. */
#define FIRST_PSEUDO_REGISTER 62
 
/* 1 for registers that have pervasive standard uses
and are not available for the register allocator.
 
0-28 - general purpose registers
29 - ilink1 (interrupt link register)
30 - ilink2 (interrupt link register)
31 - blink (branch link register)
32-59 - reserved for extensions
60 - LP_COUNT
61 - condition code
 
For doc purposes:
61 - short immediate data indicator (setting flags)
62 - long immediate data indicator
63 - short immediate data indicator (not setting flags).
 
The general purpose registers are further broken down into:
0-7 - arguments/results
8-15 - call used
16-23 - call saved
24 - call used, static chain pointer
25 - call used, gptmp
26 - global pointer
27 - frame pointer
28 - stack pointer
 
By default, the extension registers are not available. */
 
#define FIXED_REGISTERS \
{ 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 1, 1, 1, 1, 0, \
\
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1 }
 
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
registers that can be used without being saved.
The latter must include the registers where values are returned
and the register where structure-value addresses are passed.
Aside from that, you can include as many other registers as you like. */
 
#define CALL_USED_REGISTERS \
{ 1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
0, 0, 0, 0, 0, 0, 0, 0, \
1, 1, 1, 1, 1, 1, 1, 1, \
\
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1, 1 }
 
/* If defined, an initializer for a vector of integers, containing the
numbers of hard registers in the order in which GCC should
prefer to use them (from most preferred to least). */
#define REG_ALLOC_ORDER \
{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, \
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 31, \
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, \
27, 28, 29, 30 }
 
/* Macro to conditionally modify fixed_regs/call_used_regs. */
#define CONDITIONAL_REGISTER_USAGE \
do { \
if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
{ \
fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
} \
} while (0)
 
/* Return number of consecutive hard regs needed starting at reg REGNO
to hold something of mode MODE.
This is ordinarily the length in words of a value of mode MODE
but can be less for certain modes in special long registers. */
#define HARD_REGNO_NREGS(REGNO, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
extern const unsigned int arc_hard_regno_mode_ok[];
extern unsigned int arc_mode_class[];
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
((arc_hard_regno_mode_ok[REGNO] & arc_mode_class[MODE]) != 0)
 
/* A C expression that is nonzero if it is desirable to choose
register allocation so as to avoid move instructions between a
value of mode MODE1 and a value of mode MODE2.
 
If `HARD_REGNO_MODE_OK (R, MODE1)' and `HARD_REGNO_MODE_OK (R,
MODE2)' are ever different for any R, then `MODES_TIEABLE_P (MODE1,
MODE2)' must be zero. */
 
/* Tie QI/HI/SI modes together. */
#define MODES_TIEABLE_P(MODE1, MODE2) \
(GET_MODE_CLASS (MODE1) == MODE_INT \
&& GET_MODE_CLASS (MODE2) == MODE_INT \
&& GET_MODE_SIZE (MODE1) <= UNITS_PER_WORD \
&& GET_MODE_SIZE (MODE2) <= UNITS_PER_WORD)
/* Register classes and constants. */
 
/* Define the classes of registers for register constraints in the
machine description. Also define ranges of constants.
 
One of the classes must always be named ALL_REGS and include all hard regs.
If there is more than one class, another class must be named NO_REGS
and contain no registers.
 
The name GENERAL_REGS must be the name of a class (or an alias for
another name such as ALL_REGS). This is the class of registers
that is allowed by "g" or "r" in a register constraint.
Also, registers outside this class are allocated only when
instructions express preferences for them.
 
The classes must be numbered in nondecreasing order; that is,
a larger-numbered class must never be contained completely
in a smaller-numbered class.
 
For any two classes, it is very desirable that there be another
class that represents their union.
 
It is important that any condition codes have class NO_REGS.
See `register_operand'. */
 
enum reg_class {
NO_REGS, LPCOUNT_REG, GENERAL_REGS, ALL_REGS, LIM_REG_CLASSES
};
 
#define N_REG_CLASSES (int) LIM_REG_CLASSES
 
/* Give names of register classes as strings for dump file. */
#define REG_CLASS_NAMES \
{ "NO_REGS", "LPCOUNT_REG", "GENERAL_REGS", "ALL_REGS" }
 
/* Define which registers fit in which classes.
This is an initializer for a vector of HARD_REG_SET
of length N_REG_CLASSES. */
 
#define REG_CLASS_CONTENTS \
{ {0, 0}, {0, 0x10000000}, {0xffffffff, 0xfffffff}, \
{0xffffffff, 0x1fffffff} }
 
/* The same information, inverted:
Return the class number of the smallest class containing
reg number REGNO. This could be a conditional expression
or could index an array. */
extern enum reg_class arc_regno_reg_class[FIRST_PSEUDO_REGISTER];
#define REGNO_REG_CLASS(REGNO) \
(arc_regno_reg_class[REGNO])
 
/* The class value for index registers, and the one for base regs. */
#define INDEX_REG_CLASS GENERAL_REGS
#define BASE_REG_CLASS GENERAL_REGS
 
/* Get reg_class from a letter such as appears in the machine description. */
#define REG_CLASS_FROM_LETTER(C) \
((C) == 'l' ? LPCOUNT_REG /* ??? needed? */ \
: NO_REGS)
 
/* These assume that REGNO is a hard or pseudo reg number.
They give nonzero only if REGNO is a hard reg of the suitable class
or a pseudo reg currently allocated to a suitable hard reg.
Since they use reg_renumber, they are safe only once reg_renumber
has been allocated, which happens in local-alloc.c. */
#define REGNO_OK_FOR_BASE_P(REGNO) \
((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32)
#define REGNO_OK_FOR_INDEX_P(REGNO) \
((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32)
 
/* Given an rtx X being reloaded into a reg required to be
in class CLASS, return the class of reg to actually use.
In general this is just CLASS; but on some machines
in some cases it is preferable to use a more restrictive class. */
#define PREFERRED_RELOAD_CLASS(X,CLASS) \
(CLASS)
 
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS. */
#define CLASS_MAX_NREGS(CLASS, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* The letters I, J, K, L, M, N, O, P in a register constraint string
can be used to stand for particular ranges of immediate operands.
This macro defines what the ranges are.
C is the letter, and VALUE is a constant value.
Return 1 if VALUE is in the range specified by C. */
/* 'I' is used for short immediates (always signed).
'J' is used for long immediates.
'K' is used for any constant up to 64 bits (for 64x32 situations?). */
 
/* local to this file */
#define SMALL_INT(X) ((unsigned) ((X) + 0x100) < 0x200)
/* local to this file */
#define LARGE_INT(X) \
((X) >= (-(HOST_WIDE_INT) 0x7fffffff - 1) \
&& (unsigned HOST_WIDE_INT)(X) <= (unsigned HOST_WIDE_INT) 0xffffffff)
 
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'I' ? SMALL_INT (VALUE) \
: (C) == 'J' ? LARGE_INT (VALUE) \
: (C) == 'K' ? 1 \
: 0)
 
/* Similar, but for floating constants, and defining letters G and H.
Here VALUE is the CONST_DOUBLE rtx itself. */
/* 'G' is used for integer values for the multiplication insns where the
operands are extended from 4 bytes to 8 bytes.
'H' is used when any 64 bit constant is allowed. */
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'G' ? arc_double_limm_p (VALUE) \
: (C) == 'H' ? 1 \
: 0)
 
/* A C expression that defines the optional machine-dependent constraint
letters that can be used to segregate specific types of operands,
usually memory references, for the target machine. It should return 1 if
VALUE corresponds to the operand type represented by the constraint letter
C. If C is not defined as an extra constraint, the value returned should
be 0 regardless of VALUE. */
/* ??? This currently isn't used. Waiting for PIC. */
#if 0
#define EXTRA_CONSTRAINT(VALUE, C) \
((C) == 'R' ? (SYMBOL_REF_FUNCTION_P (VALUE) || GET_CODE (VALUE) == LABEL_REF) \
: 0)
#endif
/* Stack layout and stack pointer usage. */
 
/* Define this macro if pushing a word onto the stack moves the stack
pointer to a smaller address. */
#define STACK_GROWS_DOWNWARD
 
/* Define this to nonzero if the nominal address of the stack frame
is at the high-address end of the local variables;
that is, each additional local variable allocated
goes at a more negative offset in the frame. */
#define FRAME_GROWS_DOWNWARD 1
 
/* Offset within stack frame to start allocating local variables at.
If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
first local allocated. Otherwise, it is the offset to the BEGINNING
of the first local allocated. */
#define STARTING_FRAME_OFFSET 0
 
/* Offset from the stack pointer register to the first location at which
outgoing arguments are placed. */
#define STACK_POINTER_OFFSET FIRST_PARM_OFFSET (0)
 
/* Offset of first parameter from the argument pointer register value. */
/* 4 bytes for each of previous fp, return address, and previous gp.
4 byte reserved area for future considerations. */
#define FIRST_PARM_OFFSET(FNDECL) 16
 
/* A C expression whose value is RTL representing the address in a
stack frame where the pointer to the caller's frame is stored.
Assume that FRAMEADDR is an RTL expression for the address of the
stack frame itself.
 
If you don't define this macro, the default is to return the value
of FRAMEADDR--that is, the stack frame address is also the address
of the stack word that points to the previous frame. */
/* ??? unfinished */
/*define DYNAMIC_CHAIN_ADDRESS (FRAMEADDR)*/
 
/* A C expression whose value is RTL representing the value of the
return address for the frame COUNT steps up from the current frame.
FRAMEADDR is the frame pointer of the COUNT frame, or the frame
pointer of the COUNT - 1 frame if `RETURN_ADDR_IN_PREVIOUS_FRAME'
is defined. */
/* The current return address is in r31. The return address of anything
farther back is at [%fp,4]. */
#if 0 /* The default value should work. */
#define RETURN_ADDR_RTX(COUNT, FRAME) \
(((COUNT) == -1) \
? gen_rtx_REG (Pmode, 31) \
: copy_to_reg (gen_rtx_MEM (Pmode, \
memory_address (Pmode, \
plus_constant ((FRAME), \
UNITS_PER_WORD)))))
#endif
 
/* Register to use for pushing function arguments. */
#define STACK_POINTER_REGNUM 28
 
/* Base register for access to local variables of the function. */
#define FRAME_POINTER_REGNUM 27
 
/* Base register for access to arguments of the function. */
#define ARG_POINTER_REGNUM FRAME_POINTER_REGNUM
 
/* Register in which static-chain is passed to a function. This must
not be a register used by the prologue. */
#define STATIC_CHAIN_REGNUM 24
 
/* A C expression which is nonzero if a function must have and use a
frame pointer. This expression is evaluated in the reload pass.
If its value is nonzero the function will have a frame pointer. */
#define FRAME_POINTER_REQUIRED \
(current_function_calls_alloca)
 
/* C statement to store the difference between the frame pointer
and the stack pointer values immediately after the function prologue. */
#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
((VAR) = arc_compute_frame_size (get_frame_size ()))
/* Function argument passing. */
 
/* If defined, the maximum amount of space required for outgoing
arguments will be computed and placed into the variable
`current_function_outgoing_args_size'. No space will be pushed
onto the stack for each call; instead, the function prologue should
increase the stack frame size by this amount. */
#define ACCUMULATE_OUTGOING_ARGS 1
 
/* Value is the number of bytes of arguments automatically
popped when returning from a subroutine call.
FUNDECL is the declaration node of the function (as a tree),
FUNTYPE is the data type of the function (as a tree),
or for a library call it is an identifier node for the subroutine name.
SIZE is the number of bytes of arguments passed on the stack. */
#define RETURN_POPS_ARGS(DECL, FUNTYPE, SIZE) 0
 
/* Define a data type for recording info about an argument list
during the scan of that argument list. This data type should
hold all necessary information about the function itself
and about the args processed so far, enough to enable macros
such as FUNCTION_ARG to determine where the next arg should go. */
#define CUMULATIVE_ARGS int
 
/* Initialize a variable CUM of type CUMULATIVE_ARGS
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0. */
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
((CUM) = 0)
 
/* The number of registers used for parameter passing. Local to this file. */
#define MAX_ARC_PARM_REGS 8
 
/* 1 if N is a possible register number for function argument passing. */
#define FUNCTION_ARG_REGNO_P(N) \
((unsigned) (N) < MAX_ARC_PARM_REGS)
 
/* The ROUND_ADVANCE* macros are local to this file. */
/* Round SIZE up to a word boundary. */
#define ROUND_ADVANCE(SIZE) \
(((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* Round arg MODE/TYPE up to the next word boundary. */
#define ROUND_ADVANCE_ARG(MODE, TYPE) \
((MODE) == BLKmode \
? ROUND_ADVANCE (int_size_in_bytes (TYPE)) \
: ROUND_ADVANCE (GET_MODE_SIZE (MODE)))
 
/* Round CUM up to the necessary point for argument MODE/TYPE. */
#define ROUND_ADVANCE_CUM(CUM, MODE, TYPE) \
((((MODE) == BLKmode ? TYPE_ALIGN (TYPE) : GET_MODE_BITSIZE (MODE)) \
> BITS_PER_WORD) \
? (((CUM) + 1) & ~1) \
: (CUM))
 
/* Return boolean indicating arg of type TYPE and mode MODE will be passed in
a reg. This includes arguments that have to be passed by reference as the
pointer to them is passed in a reg if one is available (and that is what
we're given).
This macro is only used in this file. */
#define PASS_IN_REG_P(CUM, MODE, TYPE) \
((CUM) < MAX_ARC_PARM_REGS \
&& ((ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) \
+ ROUND_ADVANCE_ARG ((MODE), (TYPE)) \
<= MAX_ARC_PARM_REGS)))
 
/* Determine where to put an argument to a function.
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
 
MODE is the argument's machine mode.
TYPE is the data type of the argument (as a tree).
This is null for libcalls where that information may
not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
NAMED is nonzero if this argument is a named parameter
(otherwise it is an extra parameter matching an ellipsis). */
/* On the ARC the first MAX_ARC_PARM_REGS args are normally in registers
and the rest are pushed. */
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
(PASS_IN_REG_P ((CUM), (MODE), (TYPE)) \
? gen_rtx_REG ((MODE), ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE))) \
: 0)
 
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be available.) */
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
((CUM) = (ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) \
+ ROUND_ADVANCE_ARG ((MODE), (TYPE))))
 
/* If defined, a C expression that gives the alignment boundary, in bits,
of an argument with the specified mode and type. If it is not defined,
PARM_BOUNDARY is used for all arguments. */
#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
(((TYPE) ? TYPE_ALIGN (TYPE) : GET_MODE_BITSIZE (MODE)) <= PARM_BOUNDARY \
? PARM_BOUNDARY \
: 2 * PARM_BOUNDARY)
/* Function results. */
 
/* Define how to find the value returned by a function.
VALTYPE is the data type of the value (as a tree).
If the precise function being called is known, FUNC is its FUNCTION_DECL;
otherwise, FUNC is 0. */
#define FUNCTION_VALUE(VALTYPE, FUNC) gen_rtx_REG (TYPE_MODE (VALTYPE), 0)
 
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, 0)
 
/* 1 if N is a possible register number for a function value
as seen by the caller. */
/* ??? What about r1 in DI/DF values. */
#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
 
/* Tell GCC to use TARGET_RETURN_IN_MEMORY. */
#define DEFAULT_PCC_STRUCT_RETURN 0
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
the stack pointer does not matter. The value is tested only in
functions that have frame pointers.
No definition is equivalent to always zero. */
#define EXIT_IGNORE_STACK 0
 
/* Epilogue delay slots. */
#define DELAY_SLOTS_FOR_EPILOGUE arc_delay_slots_for_epilogue ()
 
#define ELIGIBLE_FOR_EPILOGUE_DELAY(TRIAL, SLOTS_FILLED) \
arc_eligible_for_epilogue_delay (TRIAL, SLOTS_FILLED)
 
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
#define FUNCTION_PROFILER(FILE, LABELNO)
/* Trampolines. */
/* ??? This doesn't work yet because GCC will use as the address of a nested
function the address of the trampoline. We need to use that address
right shifted by 2. It looks like we'll need PSImode after all. :-( */
 
/* Output assembler code for a block containing the constant parts
of a trampoline, leaving space for the variable parts. */
/* On the ARC, the trampoline is quite simple as we have 32 bit immediate
constants.
 
mov r24,STATIC
j.nd FUNCTION
*/
#define TRAMPOLINE_TEMPLATE(FILE) \
do { \
assemble_aligned_integer (UNITS_PER_WORD, GEN_INT (0x631f7c00)); \
assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
assemble_aligned_integer (UNITS_PER_WORD, GEN_INT (0x381f0000)); \
assemble_aligned_integer (UNITS_PER_WORD, const0_rtx); \
} while (0)
 
/* Length in units of the trampoline for entering a nested function. */
#define TRAMPOLINE_SIZE 16
 
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function. */
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
do { \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 4)), CXT); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 12)), FNADDR); \
emit_insn (gen_flush_icache (validize_mem (gen_rtx_MEM (SImode, TRAMP)))); \
} while (0)
/* Addressing modes, and classification of registers for them. */
 
/* Maximum number of registers that can appear in a valid memory address. */
/* The `ld' insn allows 2, but the `st' insn only allows 1. */
#define MAX_REGS_PER_ADDRESS 1
 
/* We have pre inc/dec (load/store with update). */
#define HAVE_PRE_INCREMENT 1
#define HAVE_PRE_DECREMENT 1
 
/* Recognize any constant value that is a valid address. */
#define CONSTANT_ADDRESS_P(X) \
(GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
|| GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST)
 
/* Nonzero if the constant value X is a legitimate general operand.
We can handle any 32 or 64 bit constant. */
/* "1" should work since the largest constant should be a 64 bit critter. */
/* ??? Not sure what to do for 64x32 compiler. */
#define LEGITIMATE_CONSTANT_P(X) 1
 
/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
and check its validity for a certain class.
We have two alternate definitions for each of them.
The usual definition accepts all pseudo regs; the other rejects
them unless they have been allocated suitable hard regs.
The symbol REG_OK_STRICT causes the latter definition to be used.
 
Most source files want to accept pseudo regs in the hope that
they will get allocated to the class that the insn wants them to be in.
Source files for reload pass need to be strict.
After reload, it makes no difference, since pseudo regs have
been eliminated by then. */
 
#ifndef REG_OK_STRICT
 
/* Nonzero if X is a hard reg that can be used as an index
or if it is a pseudo reg. */
#define REG_OK_FOR_INDEX_P(X) \
((unsigned) REGNO (X) - 32 >= FIRST_PSEUDO_REGISTER - 32)
/* Nonzero if X is a hard reg that can be used as a base reg
or if it is a pseudo reg. */
#define REG_OK_FOR_BASE_P(X) \
((unsigned) REGNO (X) - 32 >= FIRST_PSEUDO_REGISTER - 32)
 
#else
 
/* Nonzero if X is a hard reg that can be used as an index. */
#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
/* Nonzero if X is a hard reg that can be used as a base reg. */
#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
 
#endif
 
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
that wants to use this address. */
/* The `ld' insn allows [reg],[reg+shimm],[reg+limm],[reg+reg],[limm]
but the `st' insn only allows [reg],[reg+shimm],[limm].
The only thing we can do is only allow the most strict case `st' and hope
other parts optimize out the restrictions for `ld'. */
 
/* local to this file */
#define RTX_OK_FOR_BASE_P(X) \
(REG_P (X) && REG_OK_FOR_BASE_P (X))
 
/* local to this file */
#define RTX_OK_FOR_INDEX_P(X) \
(0 && /*???*/ REG_P (X) && REG_OK_FOR_INDEX_P (X))
 
/* local to this file */
/* ??? Loads can handle any constant, stores can only handle small ones. */
#define RTX_OK_FOR_OFFSET_P(X) \
(GET_CODE (X) == CONST_INT && SMALL_INT (INTVAL (X)))
 
#define LEGITIMATE_OFFSET_ADDRESS_P(MODE, X) \
(GET_CODE (X) == PLUS \
&& RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
&& (RTX_OK_FOR_INDEX_P (XEXP (X, 1)) \
|| RTX_OK_FOR_OFFSET_P (XEXP (X, 1))))
 
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
{ if (RTX_OK_FOR_BASE_P (X)) \
goto ADDR; \
if (LEGITIMATE_OFFSET_ADDRESS_P ((MODE), (X))) \
goto ADDR; \
if (GET_CODE (X) == CONST_INT && LARGE_INT (INTVAL (X))) \
goto ADDR; \
if (GET_CODE (X) == SYMBOL_REF \
|| GET_CODE (X) == LABEL_REF \
|| GET_CODE (X) == CONST) \
goto ADDR; \
if ((GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
/* We're restricted here by the `st' insn. */ \
&& RTX_OK_FOR_BASE_P (XEXP ((X), 0))) \
goto ADDR; \
}
 
/* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for. */
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
{ if (GET_CODE (ADDR) == PRE_DEC) \
goto LABEL; \
if (GET_CODE (ADDR) == PRE_INC) \
goto LABEL; \
}
/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
return the mode to be used for the comparison. */
#define SELECT_CC_MODE(OP, X, Y) \
arc_select_cc_mode (OP, X, Y)
 
/* Return nonzero if SELECT_CC_MODE will never return MODE for a
floating point inequality comparison. */
#define REVERSIBLE_CC_MODE(MODE) 1 /*???*/
/* Costs. */
 
/* Compute extra cost of moving data between one register class
and another. */
#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) 2
 
/* Compute the cost of moving data between registers and memory. */
/* Memory is 3 times as expensive as registers.
??? Is that the right way to look at it? */
#define MEMORY_MOVE_COST(MODE,CLASS,IN) \
(GET_MODE_SIZE (MODE) <= UNITS_PER_WORD ? 6 : 12)
 
/* The cost of a branch insn. */
/* ??? What's the right value here? Branches are certainly more
expensive than reg->reg moves. */
#define BRANCH_COST 2
 
/* Nonzero if access to memory by bytes is slow and undesirable.
For RISC chips, it means that access to memory by bytes is no
better than access by words when possible, so grab a whole word
and maybe make use of that. */
#define SLOW_BYTE_ACCESS 1
 
/* Define this macro if it is as good or better to call a constant
function address than to call an address kept in a register. */
/* On the ARC, calling through registers is slow. */
#define NO_FUNCTION_CSE
/* Section selection. */
/* WARNING: These section names also appear in dwarfout.c. */
 
/* The names of the text, data, and readonly-data sections are runtime
selectable. */
 
#define ARC_SECTION_FORMAT "\t.section %s"
#define ARC_DEFAULT_TEXT_SECTION ".text"
#define ARC_DEFAULT_DATA_SECTION ".data"
#define ARC_DEFAULT_RODATA_SECTION ".rodata"
 
extern const char *arc_text_section, *arc_data_section, *arc_rodata_section;
 
/* initfini.c uses this in an asm. */
#if defined (CRT_INIT) || defined (CRT_FINI)
#define TEXT_SECTION_ASM_OP "\t.section .text"
#else
#define TEXT_SECTION_ASM_OP arc_text_section
#endif
#define DATA_SECTION_ASM_OP arc_data_section
 
#undef READONLY_DATA_SECTION_ASM_OP
#define READONLY_DATA_SECTION_ASM_OP arc_rodata_section
 
#define BSS_SECTION_ASM_OP "\t.section .bss"
 
/* Define this macro if jump tables (for tablejump insns) should be
output in the text section, along with the assembler instructions.
Otherwise, the readonly data section is used.
This macro is irrelevant if there is no separate readonly data section. */
/*#define JUMP_TABLES_IN_TEXT_SECTION*/
 
/* For DWARF. Marginally different than default so output is "prettier"
(and consistent with above). */
#define PUSHSECTION_ASM_OP "\t.section "
 
/* Tell crtstuff.c we're using ELF. */
#define OBJECT_FORMAT_ELF
/* PIC */
 
/* The register number of the register used to address a table of static
data addresses in memory. In some cases this register is defined by a
processor's ``application binary interface'' (ABI). When this macro
is defined, RTL is generated for this register once, as with the stack
pointer and frame pointer registers. If this macro is not defined, it
is up to the machine-dependent files to allocate such a register (if
necessary). */
#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? 26 : INVALID_REGNUM)
 
/* Define this macro if the register defined by PIC_OFFSET_TABLE_REGNUM is
clobbered by calls. Do not define this macro if PIC_OFFSET_TABLE_REGNUM
is not defined. */
/* This register is call-saved on the ARC. */
/*#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED*/
 
/* A C expression that is nonzero if X is a legitimate immediate
operand on the target machine when generating position independent code.
You can assume that X satisfies CONSTANT_P, so you need not
check this. You can also assume `flag_pic' is true, so you need not
check it either. You need not define this macro if all constants
(including SYMBOL_REF) can be immediate operands when generating
position independent code. */
/*#define LEGITIMATE_PIC_OPERAND_P(X)*/
/* Control the assembler format that we output. */
 
/* A C string constant describing how to begin a comment in the target
assembler language. The compiler assumes that the comment will
end at the end of the line. */
#define ASM_COMMENT_START ";"
 
/* Output to assembler file text saying following lines
may contain character constants, extra white space, comments, etc. */
#define ASM_APP_ON ""
 
/* Output to assembler file text saying following lines
no longer contain unusual constructs. */
#define ASM_APP_OFF ""
 
/* Globalizing directive for a label. */
#define GLOBAL_ASM_OP "\t.global\t"
 
/* This is how to output a reference to a user-level label named NAME.
`assemble_name' uses this. */
/* We mangle all user labels to provide protection from linking code
compiled for different cpus. */
/* We work around a dwarfout.c deficiency by watching for labels from it and
not adding the '_' prefix nor the cpu suffix. There is a comment in
dwarfout.c that says it should be using (*targetm.asm_out.internal_label). */
extern const char *arc_mangle_cpu;
#define ASM_OUTPUT_LABELREF(FILE, NAME) \
do { \
if ((NAME)[0] == '.' && (NAME)[1] == 'L') \
fprintf (FILE, "%s", NAME); \
else \
{ \
fputc ('_', FILE); \
if (TARGET_MANGLE_CPU && arc_mangle_cpu != NULL) \
fprintf (FILE, "%s_", arc_mangle_cpu); \
fprintf (FILE, "%s", NAME); \
} \
} while (0)
 
/* Assembler pseudo-op to equate one value with another. */
/* ??? This is needed because dwarfout.c provides a default definition too
late for defaults.h (which contains the default definition of ASM_OUTPUT_DEF
that we use). */
#define SET_ASM_OP "\t.set\t"
 
/* How to refer to registers in assembler output.
This sequence is indexed by compiler's hard-register-number (see above). */
#define REGISTER_NAMES \
{"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
"r24", "r25", "r26", "fp", "sp", "ilink1", "ilink2", "blink", \
"r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", \
"r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", \
"r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", \
"r56", "r57", "r58", "r59", "lp_count", "cc"}
 
/* Entry to the insn conditionalizer. */
#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
arc_final_prescan_insn (INSN, OPVEC, NOPERANDS)
 
/* A C expression which evaluates to true if CODE is a valid
punctuation character for use in the `PRINT_OPERAND' macro. */
extern char arc_punct_chars[256];
#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
arc_punct_chars[(unsigned char) (CHAR)]
 
/* Print operand X (an rtx) in assembler syntax to file FILE.
CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
For `%' followed by punctuation, CODE is the punctuation and X is null. */
#define PRINT_OPERAND(FILE, X, CODE) \
arc_print_operand (FILE, X, CODE)
 
/* A C compound statement to output to stdio stream STREAM the
assembler syntax for an instruction operand that is a memory
reference whose address is ADDR. ADDR is an RTL expression. */
#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
arc_print_operand_address (FILE, ADDR)
 
/* This is how to output an element of a case-vector that is absolute. */
#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
do { \
char label[30]; \
ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
fprintf (FILE, "\t.word %%st("); \
assemble_name (FILE, label); \
fprintf (FILE, ")\n"); \
} while (0)
 
/* This is how to output an element of a case-vector that is relative. */
#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
do { \
char label[30]; \
ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
fprintf (FILE, "\t.word %%st("); \
assemble_name (FILE, label); \
fprintf (FILE, "-"); \
ASM_GENERATE_INTERNAL_LABEL (label, "L", REL); \
assemble_name (FILE, label); \
fprintf (FILE, ")\n"); \
} while (0)
 
/* The desired alignment for the location counter at the beginning
of a loop. */
/* On the ARC, align loops to 32 byte boundaries (cache line size)
if -malign-loops. */
#define LOOP_ALIGN(LABEL) (TARGET_ALIGN_LOOPS ? 5 : 0)
 
/* This is how to output an assembler line
that says to advance the location counter
to a multiple of 2**LOG bytes. */
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
do { if ((LOG) != 0) fprintf (FILE, "\t.align %d\n", 1 << (LOG)); } while (0)
/* Debugging information. */
 
/* Generate DBX and DWARF debugging information. */
#define DBX_DEBUGGING_INFO 1
 
/* Prefer STABS (for now). */
#undef PREFERRED_DEBUGGING_TYPE
#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
 
/* Turn off splitting of long stabs. */
#define DBX_CONTIN_LENGTH 0
/* Miscellaneous. */
 
/* Specify the machine mode that this machine uses
for the index in the tablejump instruction. */
#define CASE_VECTOR_MODE Pmode
 
/* Define if operations between registers always perform the operation
on the full register even if a narrower mode is specified. */
#define WORD_REGISTER_OPERATIONS
 
/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
will either zero-extend or sign-extend. The value of this macro should
be the code that says which one of the two operations is implicitly
done, UNKNOWN if none. */
#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
 
/* Max number of bytes we can move from memory to memory
in one reasonably fast instruction. */
#define MOVE_MAX 4
 
/* Define this to be nonzero if shift instructions ignore all but the low-order
few bits. */
#define SHIFT_COUNT_TRUNCATED 1
 
/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
is done just by pretending it is already truncated. */
#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
 
/* Specify the machine mode that pointers have.
After generation of rtl, the compiler makes no further distinction
between pointers and any other objects of this machine mode. */
/* ??? The arc doesn't have full 32 bit pointers, but making this PSImode has
its own problems (you have to add extendpsisi2 and trucnsipsi2 but how does
one do it without getting excess code?). Try to avoid it. */
#define Pmode SImode
 
/* A function address in a call instruction. */
#define FUNCTION_MODE SImode
 
/* alloca should avoid clobbering the old register save area. */
/* ??? Not defined in tm.texi. */
#define SETJMP_VIA_SAVE_AREA
/* Define the information needed to generate branch and scc insns. This is
stored from the compare operation. Note that we can't use "rtx" here
since it hasn't been defined! */
extern struct rtx_def *arc_compare_op0, *arc_compare_op1;
 
/* ARC function types. */
enum arc_function_type {
ARC_FUNCTION_UNKNOWN, ARC_FUNCTION_NORMAL,
/* These are interrupt handlers. The name corresponds to the register
name that contains the return address. */
ARC_FUNCTION_ILINK1, ARC_FUNCTION_ILINK2
};
#define ARC_INTERRUPT_P(TYPE) \
((TYPE) == ARC_FUNCTION_ILINK1 || (TYPE) == ARC_FUNCTION_ILINK2)
/* Compute the type of a function from its DECL. */
 
/* Implement `va_start' for varargs and stdarg. */
#define EXPAND_BUILTIN_VA_START(valist, nextarg) \
arc_va_start (valist, nextarg)

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.