OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/trunk/gnu-old/gcc-4.2.2/gcc/config/mt
    from Rev 154 to Rev 816
    Reverse comparison

Rev 154 → Rev 816

/mt.md
0,0 → 1,1500
;; Machine description for MorphoRISC1
;; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;; Contributed by Red Hat, Inc.
 
;; This file is part of GCC.
 
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
 
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
 
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
;; UNSPECs
(define_constants
[
(UNSPEC_BLOCKAGE 0)
(UNSPEC_EI 1)
(UNSPEC_DI 2)
(UNSPEC_LOOP 3)
])
 
;; Attributes
(define_attr "type" "branch,call,load,store,io,arith,complex,unknown"
(const_string "unknown") )
 
;; If the attribute takes numeric values, no `enum' type will be defined and
;; the function to obtain the attribute's value will return `int'.
 
(define_attr "length" "" (const_int 4))
 
;; DFA scheduler.
(define_automaton "other")
(define_cpu_unit "decode_unit" "other")
(define_cpu_unit "memory_unit" "other")
(define_cpu_unit "branch_unit" "other")
 
(define_insn_reservation "mem_access" 2
(ior (eq_attr "type" "load") (eq_attr "type" "store"))
"decode_unit+memory_unit*2")
 
(define_insn_reservation "io_access" 2
(eq_attr "type" "io")
"decode_unit+memory_unit*2")
 
(define_insn_reservation "branch_access" 2
(ior (eq_attr "type" "branch")
(eq_attr "type" "call"))
"decode_unit+branch_unit*2")
 
(define_insn_reservation "arith_access" 1
(eq_attr "type" "arith")
"decode_unit")
 
(define_bypass 2 "arith_access" "branch_access")
(define_bypass 3 "mem_access" "branch_access")
(define_bypass 3 "io_access" "branch_access")
 
;; Delay Slots
 
;; The mt does not allow branches in the delay slot.
;; The mt does not allow back to back memory or io instruction.
;; The compiler does not know what the type of instruction is at
;; the destination of the branch. Thus, only type that will be acceptable
;; (safe) is the arith type.
 
(define_delay (ior (eq_attr "type" "branch")
(eq_attr "type" "call"))
[(eq_attr "type" "arith") (nil) (nil)])
 
(define_insn "decrement_and_branch_until_zero"
[(set (pc)
(if_then_else
(ne (match_operand:SI 0 "nonimmediate_operand" "+r,*m")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))
(clobber (match_scratch:SI 2 "=X,&r"))
(clobber (match_scratch:SI 3 "=X,&r"))]
"TARGET_MS1_16_003 || TARGET_MS2"
"@
dbnz\t%0, %l1%#
#"
[(set_attr "length" "4,16")
(set_attr "type" "branch,unknown")]
)
 
;; Split the above to handle the case where operand 0 is in memory
;; (a register that couldn't get a hard register).
(define_split
[(set (pc)
(if_then_else
(ne (match_operand:SI 0 "memory_operand" "")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))
(clobber (match_scratch:SI 2 ""))
(clobber (match_scratch:SI 3 ""))]
"TARGET_MS1_16_003 || TARGET_MS2"
[(set (match_dup 2) (match_dup 0))
(set (match_dup 3) (plus:SI (match_dup 2) (const_int -1)))
(set (match_dup 0) (match_dup 3))
(set (pc)
(if_then_else
(ne (match_dup 2)
(const_int 0))
(label_ref (match_dup 1))
(pc)))]
"")
 
;; This peephole is defined in the vain hope that it might actually trigger one
;; day, although I have yet to find a test case that matches it. The normal
;; problem is that GCC likes to move the loading of the constant value -1 out
;; of the loop, so it is not here to be matched.
 
(define_peephole2
[(set (match_operand:SI 0 "register_operand" "")
(plus:SI (match_dup 0) (const_int -1)))
(set (match_operand:SI 1 "register_operand" "")
(const_int -1))
(set (pc) (if_then_else
(ne (match_dup 0) (match_dup 1))
(label_ref (match_operand 2 "" ""))
(pc)))]
"TARGET_MS1_16_003 || TARGET_MS2"
[(parallel [(set (pc)
(if_then_else
(ne (match_dup 0) (const_int 0))
(label_ref (match_dup 2))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0) (const_int -1)))
(clobber (reg:SI 0))
(clobber (reg:SI 0))])]
"")
 
;; Loop instructions. ms2 has a low overhead looping instructions.
;; these take a constant or register loop count and a loop length
;; offset. Unfortunately the loop can only be up to 256 instructions,
;; We deal with longer loops by moving the loop end upwards. To do
;; otherwise would force us to to be very pessimistic right up until
;; the end.
 
;; This instruction is a placeholder to make the control flow explicit.
(define_insn "loop_end"
[(set (pc) (if_then_else
(ne (match_operand:SI 0 "register_operand" "")
(const_int 1))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))
(unspec [(const_int 0)] UNSPEC_LOOP)]
"TARGET_MS2"
";loop end %0,%l1"
[(set_attr "length" "0")])
 
;; This is the real looping instruction. It is placed just before the
;; loop body. We make it a branch insn, so it stays at the end of the
;; block it is in.
(define_insn "loop_init"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(match_operand:SI 1 "uns_arith_operand" "r,K"))
(unspec [(label_ref (match_operand 2 "" ""))] UNSPEC_LOOP)]
"TARGET_MS2"
"@
loop %1,%l2 ;%0%#
loopi %1,%l2 ;%0%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
; operand 0 is the loop count pseudo register
; operand 1 is the number of loop iterations or 0 if it is unknown
; operand 2 is the maximum number of loop iterations
; operand 3 is the number of levels of enclosed loops
; operand 4 is the label to jump to at the top of the loop
(define_expand "doloop_end"
[(parallel [(set (pc) (if_then_else
(ne (match_operand:SI 0 "nonimmediate_operand" "")
(const_int 0))
(label_ref (match_operand 4 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))
(clobber (match_scratch:SI 5 ""))
(clobber (match_scratch:SI 6 ""))])]
"TARGET_MS1_16_003 || TARGET_MS2"
{mt_add_loop ();})
;; Moves
 
(define_expand "loadqi"
[
;; compute shift
(set (match_operand:SI 2 "register_operand" "")
(and:SI (match_dup 1) (const_int 3)))
(set (match_dup 2) (xor:SI (match_dup 2) (const_int 3)))
(set (match_dup 2 ) (ashift:SI (match_dup 2) (const_int 3)))
 
;; get word that contains byte
(set (match_operand:SI 0 "register_operand" "")
(mem:SI (and:SI (match_operand:SI 1 "register_operand" "")
(const_int -3))))
 
;; align byte
(set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))
]
""
"")
 
 
;; storeqi
;; operand 0 byte value to store
;; operand 1 address
;; operand 2 temp, word containing byte
;; operand 3 temp, shift count
;; operand 4 temp, mask, aligned and masked byte
;; operand 5 (unused)
(define_expand "storeqi"
[
;; compute shift
(set (match_operand:SI 3 "register_operand" "")
(and:SI (match_operand:SI 1 "register_operand" "") (const_int 3)))
(set (match_dup 3) (xor:SI (match_dup 3) (const_int 3)))
(set (match_dup 3) (ashift:SI (match_dup 3) (const_int 3)))
 
;; get word that contains byte
(set (match_operand:SI 2 "register_operand" "")
(mem:SI (and:SI (match_dup 1) (const_int -3))))
 
;; generate mask
(set (match_operand:SI 4 "register_operand" "") (const_int 255))
(set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 3)))
(set (match_dup 4) (not:SI (match_dup 4)))
 
;; clear appropriate bits
(set (match_dup 2) (and:SI (match_dup 2) (match_dup 4)))
 
;; align byte
(set (match_dup 4)
(and:SI (match_operand:SI 0 "register_operand" "") (const_int 255)))
(set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 3)))
 
;; combine
(set (match_dup 2) (ior:SI (match_dup 4) (match_dup 2)))
;; store updated word
(set (mem:SI (and:SI (match_dup 1) (const_int -3))) (match_dup 2))
]
""
"")
 
 
(define_expand "movqi"
[(set (match_operand:QI 0 "general_operand" "")
(match_operand:QI 1 "general_operand" ""))]
""
"
{
if (!reload_in_progress
&& !reload_completed
&& GET_CODE (operands[0]) == MEM
&& GET_CODE (operands[1]) == MEM)
operands[1] = copy_to_mode_reg (QImode, operands[1]);
if ( (! TARGET_BYTE_ACCESS) && GET_CODE (operands[0]) == MEM)
{
rtx scratch1 = gen_reg_rtx (SImode);
rtx scratch2 = gen_reg_rtx (SImode);
rtx scratch3 = gen_reg_rtx (SImode);
rtx data = operands[1];
rtx address = XEXP (operands[0], 0);
rtx seq;
 
if ( GET_CODE (data) != REG )
data = copy_to_mode_reg (QImode, data);
 
if ( GET_CODE (address) != REG )
address = copy_to_mode_reg (SImode, address);
 
start_sequence ();
emit_insn (gen_storeqi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}
 
if ( (! TARGET_BYTE_ACCESS) && GET_CODE (operands[1]) == MEM)
{
rtx scratch1 = gen_reg_rtx (SImode);
rtx data = operands[0];
rtx address = XEXP (operands[1], 0);
rtx seq;
 
if ( GET_CODE (address) != REG )
address = copy_to_mode_reg (SImode, address);
 
start_sequence ();
emit_insn (gen_loadqi (gen_lowpart (SImode, data), address, scratch1));
mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}
 
/* If the load is a pseudo register in a stack slot, some simplification
can be made because the loads are aligned */
if ( (! TARGET_BYTE_ACCESS)
&& (reload_in_progress && GET_CODE (operands[1]) == SUBREG
&& GET_CODE (SUBREG_REG (operands[1])) == REG
&& REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
{
rtx data = operands[0];
rtx address = XEXP (operands[1], 0);
rtx seq;
 
start_sequence ();
emit_insn (gen_movsi (gen_lowpart (SImode, data), address));
mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}
}")
 
(define_insn "*movqi_internal"
[(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r")
(match_operand:QI 1 "general_operand" "r,m,r,I"))]
"TARGET_BYTE_ACCESS
&& (!memory_operand (operands[0], QImode)
|| !memory_operand (operands[1], QImode))"
"@
or %0, %1, %1
ldb %0, %1
stb %1, %0
addi %0, r0, %1"
[(set_attr "length" "4,4,4,4")
(set_attr "type" "arith,load,store,arith")])
 
(define_insn "*movqi_internal_nobyte"
[(set (match_operand:QI 0 "register_operand" "=r,r")
(match_operand:QI 1 "arith_operand" "r,I"))]
"!TARGET_BYTE_ACCESS
&& (!memory_operand (operands[0], QImode)
|| !memory_operand (operands[1], QImode))"
"@
or %0, %1, %1
addi %0, r0, %1"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
 
;; The MorphoRISC does not have 16-bit loads and stores.
;; These operations must be synthesized. Note that the code
;; for loadhi and storehi assumes that the least significant bits
;; is ignored.
 
;; loadhi
;; operand 0 location of result
;; operand 1 memory address
;; operand 2 temp
(define_expand "loadhi"
[
;; compute shift
(set (match_operand:SI 2 "register_operand" "")
(and:SI (match_dup 1) (const_int 2)))
(set (match_dup 2) (xor:SI (match_dup 2) (const_int 2)))
(set (match_dup 2 ) (ashift:SI (match_dup 2) (const_int 3)))
 
;; get word that contains the 16-bits
(set (match_operand:SI 0 "register_operand" "")
(mem:SI (and:SI (match_operand:SI 1 "register_operand" "")
(const_int -3))))
 
;; align 16-bit value
(set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))
]
""
"")
 
;; storehi
;; operand 0 byte value to store
;; operand 1 address
;; operand 2 temp, word containing byte
;; operand 3 temp, shift count
;; operand 4 temp, mask, aligned and masked byte
;; operand 5 (unused)
(define_expand "storehi"
[
;; compute shift
(set (match_operand:SI 3 "register_operand" "")
(and:SI (match_operand:SI 1 "register_operand" "") (const_int 2)))
(set (match_dup 3) (xor:SI (match_dup 3) (const_int 2)))
(set (match_dup 3) (ashift:SI (match_dup 3) (const_int 3)))
 
;; get word that contains the 16-bits
(set (match_operand:SI 2 "register_operand" "")
(mem:SI (and:SI (match_dup 1) (const_int -3))))
 
;; generate mask
(set (match_operand:SI 4 "register_operand" "") (const_int 65535))
(set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 3)))
(set (match_dup 4) (not:SI (match_dup 4)))
 
;; clear appropriate bits
(set (match_dup 2) (and:SI (match_dup 2) (match_dup 4)))
 
;; align 16-bit value
(set (match_dup 4)
(and:SI (match_operand:SI 0 "register_operand" "") (const_int 65535)))
(set (match_dup 4) (ashift:SI (match_dup 4) (match_dup 3)))
 
;; combine
(set (match_dup 2) (ior:SI (match_dup 4) (match_dup 2)))
;; store updated word
(set (mem:SI (and:SI (match_dup 1) (const_int -3))) (match_dup 2))
]
""
"")
 
 
(define_expand "movhi"
[(set (match_operand:HI 0 "general_operand" "")
(match_operand:HI 1 "general_operand" ""))]
""
"
{
if (!reload_in_progress
&& !reload_completed
&& GET_CODE (operands[0]) == MEM
&& GET_CODE (operands[1]) == MEM)
operands[1] = copy_to_mode_reg (HImode, operands[1]);
 
if ( GET_CODE (operands[0]) == MEM)
{
rtx scratch1 = gen_reg_rtx (SImode);
rtx scratch2 = gen_reg_rtx (SImode);
rtx scratch3 = gen_reg_rtx (SImode);
rtx data = operands[1];
rtx address = XEXP (operands[0], 0);
rtx seq;
 
if (GET_CODE (data) != REG)
data = copy_to_mode_reg (HImode, data);
 
if (GET_CODE (address) != REG)
address = copy_to_mode_reg (SImode, address);
 
start_sequence ();
emit_insn (gen_storehi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}
 
if ( GET_CODE (operands[1]) == MEM)
{
rtx scratch1 = gen_reg_rtx (SImode);
rtx data = operands[0];
rtx address = XEXP (operands[1], 0);
rtx seq;
 
if (GET_CODE (address) != REG)
address = copy_to_mode_reg (SImode, address);
 
start_sequence ();
emit_insn (gen_loadhi (gen_lowpart (SImode, data), address,
scratch1));
mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}
 
/* If the load is a pseudo register in a stack slot, some simplification
can be made because the loads are aligned */
if ( (reload_in_progress && GET_CODE (operands[1]) == SUBREG
&& GET_CODE (SUBREG_REG (operands[1])) == REG
&& REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
{
rtx data = operands[0];
rtx address = XEXP (operands[1], 0);
rtx seq;
 
start_sequence ();
emit_insn (gen_movsi (gen_lowpart (SImode, data), address));
mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}
}")
 
(define_insn "*movhi_internal"
[(set (match_operand:HI 0 "register_operand" "=r,r")
(match_operand:HI 1 "arith_operand" "r,I"))]
"!memory_operand (operands[0], HImode) || !memory_operand (operands[1], HImode)"
"@
or %0, %1, %1
addi %0, r0, %1"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
(define_expand "movsi"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(match_operand:SI 1 "general_operand" ""))]
""
"
{
if (!reload_in_progress && !reload_completed
&& !register_operand (operands[0], SImode)
&& !register_operand (operands[1], SImode))
operands[1] = copy_to_mode_reg (SImode, operands[1]);
 
/* Take care of constants that don't fit in single instruction */
if ( (reload_in_progress || reload_completed)
&& !single_const_operand (operands[1], SImode))
{
emit_insn (gen_movsi_high (operands[0], operands[1]));
emit_insn (gen_movsi_lo_sum (operands[0], operands[0], operands[1]));
DONE;
}
 
}")
 
(define_insn "movsi_high"
[(set (match_operand:SI 0 "register_operand" "=r")
(high:SI (match_operand:SI 1 "general_operand" "i")))]
""
"*
{
return \"ldui\\t%0, %H1\";
}"
[(set_attr "length" "4")
(set_attr "type" "arith")])
 
 
(define_insn "movsi_lo_sum"
[(set (match_operand:SI 0 "register_operand" "=r")
(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "general_operand" "i")))]
""
"*
{
return \"addui\\t%0, %1, %L2\";
}"
[(set_attr "length" "4")
(set_attr "type" "arith")])
 
/* Take care of constants that don't fit in single instruction */
(define_split
[(set (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "general_operand" ""))]
"(reload_in_progress || reload_completed)
&& !single_const_operand (operands[1], SImode)"
 
[(set (match_dup 0 )
(high:SI (match_dup 1)))
(set (match_dup 0 )
(lo_sum:SI (match_dup 0)
(match_dup 1)))]
)
 
 
;; The last pattern in movsi (with two instructions)
;; is really handled by the emit_insn's in movsi
;; and the define_split above. This provides additional
;; instructions to fill delay slots.
 
;; Note - it is best to only have one movsi pattern and to handle
;; all the various contingencies by the use of alternatives. This
;; allows reload the greatest amount of flexibility (since reload will
;; only choose amoungst alternatives for a selected insn, it will not
;; replace the insn with another one).
(define_insn "*movsi_internal"
[(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,m,r,r,r,r,r")
(match_operand:SI 1 "general_operand" "r,m,r,I,P,L,N,i"))]
"(!memory_operand (operands[0], SImode) || !memory_operand (operands[1], SImode))
&& !((reload_in_progress || reload_completed)
&& !single_const_operand (operands[1], SImode))"
"@
or %0, %1, %1
ldw %0, %1
stw %1, %0
addi %0, r0, %1
addui %0, r0, %1
ldui %0, %H1
nori %0, r0, %N1
ldui %0, %H1\;addui %0, %0, %L1"
[(set_attr "length" "4,4,4,4,4,4,4,8")
(set_attr "type" "arith,load,store,arith,arith,arith,arith,complex")]
)
 
;; Floating Point Moves
;;
;; Note - Patterns for SF mode moves are compulsory, but
;; patterns for DF are optional, as GCC can synthesize them.
 
(define_expand "movsf"
[(set (match_operand:SF 0 "general_operand" "")
(match_operand:SF 1 "general_operand" ""))]
""
"
{
if (!reload_in_progress
&& !reload_completed
&& GET_CODE (operands[0]) == MEM
&& (GET_CODE (operands[1]) == MEM
|| GET_CODE (operands[1]) == CONST_DOUBLE))
operands[1] = copy_to_mode_reg (SFmode, operands[1]);
 
/* Take care of reg <- SF constant */
if ( const_double_operand (operands[1], GET_MODE (operands[1]) ) )
{
emit_insn (gen_movsf_high (operands[0], operands[1]));
emit_insn (gen_movsf_lo_sum (operands[0], operands[0], operands[1]));
DONE;
}
}")
 
(define_insn "movsf_lo_sum"
[(set (match_operand:SF 0 "register_operand" "=r")
(lo_sum:SF (match_operand:SF 1 "register_operand" "r")
(match_operand:SF 2 "const_double_operand" "")))]
""
"*
{
REAL_VALUE_TYPE r;
long i;
 
REAL_VALUE_FROM_CONST_DOUBLE (r, operands[2]);
REAL_VALUE_TO_TARGET_SINGLE (r, i);
operands[2] = GEN_INT (i);
return \"addui\\t%0, %1, %L2\";
}"
[(set_attr "length" "4")
(set_attr "type" "arith")])
 
(define_insn "movsf_high"
[(set (match_operand:SF 0 "register_operand" "=r")
(high:SF (match_operand:SF 1 "const_double_operand" "")))]
""
"*
{
REAL_VALUE_TYPE r;
long i;
 
REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
REAL_VALUE_TO_TARGET_SINGLE (r, i);
operands[1] = GEN_INT (i);
return \"ldui\\t%0, %H1\";
}"
[(set_attr "length" "4")
(set_attr "type" "arith")])
 
 
(define_insn "*movsf_internal"
[(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
(match_operand:SF 1 "nonimmediate_operand" "r,m,r"))]
"!memory_operand (operands[0], SFmode) || !memory_operand (operands[1], SFmode)"
"@
or %0, %1, %1
ldw %0, %1
stw %1, %0"
[(set_attr "length" "4,4,4")
(set_attr "type" "arith,load,store")]
)
 
(define_expand "movdf"
[(set (match_operand:DF 0 "general_operand" "")
(match_operand:DF 1 "general_operand" ""))]
""
"
{
/* One of the ops has to be in a register or 0 */
if (!register_operand (operand0, DFmode)
&& !reg_or_0_operand (operand1, DFmode))
operands[1] = copy_to_mode_reg (DFmode, operand1);
}")
 
(define_insn_and_split "*movdf_internal"
[(set (match_operand:DF 0 "nonimmediate_operand" "=r,o")
(match_operand:DF 1 "general_operand" "rim,r"))]
"! (memory_operand (operands[0], DFmode)
&& memory_operand (operands[1], DFmode))"
"#"
 
"(reload_completed || reload_in_progress)"
 
[(set (match_dup 2) (match_dup 3))
(set (match_dup 4) (match_dup 5))
]
 
"{
/* figure out what precisely to put into operands 2, 3, 4, and 5 */
mt_split_words (SImode, DFmode, operands);
}"
)
 
;; Reloads
 
;; Like `movM', but used when a scratch register is required to move between
;; operand 0 and operand 1. Operand 2 describes the scratch register. See the
;; discussion of the `SECONDARY_RELOAD_CLASS' macro.
 
(define_expand "reload_inqi"
[(set (match_operand:QI 0 "register_operand" "=r")
(match_operand:QI 1 "memory_operand" "m"))
(clobber (match_operand:DI 2 "register_operand" "=&r"))]
"! TARGET_BYTE_ACCESS"
"
{
rtx scratch1 = gen_rtx_REG (SImode, REGNO (operands[2]));
rtx scratch2 = gen_rtx_REG (SImode, REGNO (operands[2])+1);
rtx data = operands[0];
rtx address = XEXP (operands[1], 0);
rtx swap, seq;
 
/* It is possible that the registers we got for scratch1
might coincide with that of operands[0]. gen_loadqi
requires operand0 and operand2 to be different registers.
The following statement ensure that is always the case. */
if (REGNO(operands[0]) == REGNO(scratch1))
{
swap = scratch1;
scratch1 = scratch2;
scratch2 = swap;
}
 
/* need to make sure address is already in register */
if ( GET_CODE (address) != REG )
address = force_operand (address, scratch2);
 
start_sequence ();
emit_insn (gen_loadqi (gen_lowpart (SImode, data), address, scratch1));
mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}")
 
(define_expand "reload_outqi"
[(set (match_operand:QI 0 "memory_operand" "=m")
(match_operand:QI 1 "register_operand" "r"))
(clobber (match_operand:TI 2 "register_operand" "=&r"))]
"! TARGET_BYTE_ACCESS"
"
{
rtx scratch1 = gen_rtx_REG (SImode, REGNO (operands[2]));
rtx scratch2 = gen_rtx_REG (SImode, REGNO (operands[2])+1);
rtx scratch3 = gen_rtx_REG (SImode, REGNO (operands[2])+2);
rtx scratch4 = gen_rtx_REG (SImode, REGNO (operands[2])+3);
rtx data = operands[1];
rtx address = XEXP (operands[0], 0);
rtx seq;
 
/* need to make sure address is already in register */
if ( GET_CODE (address) != REG )
address = force_operand (address, scratch4);
 
start_sequence ();
emit_insn (gen_storeqi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}")
 
(define_expand "reload_inhi"
[(set (match_operand:HI 0 "register_operand" "=r")
(match_operand:HI 1 "memory_operand" "m"))
(clobber (match_operand:DI 2 "register_operand" "=&r"))]
""
"
{
rtx scratch1 = gen_rtx_REG (SImode, REGNO (operands[2]));
rtx scratch2 = gen_rtx_REG (SImode, REGNO (operands[2])+1);
rtx data = operands[0];
rtx address = XEXP (operands[1], 0);
rtx swap, seq;
 
/* It is possible that the registers we got for scratch1
might coincide with that of operands[0]. gen_loadqi
requires operand0 and operand2 to be different registers.
The following statement ensure that is always the case. */
if (REGNO(operands[0]) == REGNO(scratch1))
{
swap = scratch1;
scratch1 = scratch2;
scratch2 = swap;
}
 
/* need to make sure address is already in register */
if ( GET_CODE (address) != REG )
address = force_operand (address, scratch2);
 
start_sequence ();
emit_insn (gen_loadhi (gen_lowpart (SImode, data), address,
scratch1));
mt_set_memflags (operands[1]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}")
 
(define_expand "reload_outhi"
[(set (match_operand:HI 0 "memory_operand" "=m")
(match_operand:HI 1 "register_operand" "r"))
(clobber (match_operand:TI 2 "register_operand" "=&r"))]
""
"
{
rtx scratch1 = gen_rtx_REG (SImode, REGNO (operands[2]));
rtx scratch2 = gen_rtx_REG (SImode, REGNO (operands[2])+1);
rtx scratch3 = gen_rtx_REG (SImode, REGNO (operands[2])+2);
rtx scratch4 = gen_rtx_REG (SImode, REGNO (operands[2])+3);
rtx data = operands[1];
rtx address = XEXP (operands[0], 0);
rtx seq;
 
/* need to make sure address is already in register */
if ( GET_CODE (address) != REG )
address = force_operand (address, scratch4);
 
start_sequence ();
emit_insn (gen_storehi (gen_lowpart (SImode, data), address,
scratch1, scratch2, scratch3));
mt_set_memflags (operands[0]);
seq = get_insns ();
end_sequence ();
emit_insn (seq);
DONE;
}")
 
;; 32 bit Integer arithmetic
 
;; Addition
(define_insn "addsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_operand:SI 1 "register_operand" "%r,r")
(match_operand:SI 2 "arith_operand" "r,I")))]
""
"@
add %0, %1, %2
addi %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; Subtraction
(define_insn "subsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,rJ")
(match_operand:SI 2 "arith_operand" "rJ,I")))]
""
"@
sub %0, %z1, %z2
subi %0, %z1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; Negation
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(neg:SI (match_operand:SI 1 "arith_operand" "r,I")))]
""
"@
sub %0, r0, %1
subi %0, r0, %1"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; 32 bit Integer Shifts and Rotates
 
;; Arithmetic Shift Left
(define_insn "ashlsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ashift:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "arith_operand" "r,K")))]
""
"@
lsl %0, %1, %2
lsli %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; Arithmetic Shift Right
(define_insn "ashrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "uns_arith_operand" "r,K")))]
""
"@
asr %0, %1, %2
asri %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; Logical Shift Right
(define_insn "lshrsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
(match_operand:SI 2 "uns_arith_operand" "r,K")))]
""
"@
lsr %0, %1, %2
lsri %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; 32 Bit Integer Logical operations
 
;; Logical AND, 32 bit integers
(define_insn "andsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(and:SI (match_operand:SI 1 "register_operand" "%r,r")
(match_operand:SI 2 "uns_arith_operand" "r,K")))]
""
"@
and %0, %1, %2
andi %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; Inclusive OR, 32 bit integers
(define_insn "iorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ior:SI (match_operand:SI 1 "register_operand" "%r,r")
(match_operand:SI 2 "uns_arith_operand" "r,K")))]
""
"@
or %0, %1, %2
ori %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; Exclusive OR, 32 bit integers
(define_insn "xorsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(xor:SI (match_operand:SI 1 "register_operand" "%r,r")
(match_operand:SI 2 "uns_arith_operand" "r,K")))]
""
"@
xor %0, %1, %2
xori %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
 
;; One's complement, 32 bit integers
(define_insn "one_cmplsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(not:SI (match_operand:SI 1 "register_operand" "r")))]
""
"nor %0, %1, %1"
[(set_attr "length" "4")
(set_attr "type" "arith")])
 
;; Multiply
 
(define_insn "mulhisi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "%r,r"))
(sign_extend:SI (match_operand:HI 2 "arith_operand" "r,I"))))]
"TARGET_MS1_16_003 || TARGET_MS2"
"@
mul %0, %1, %2
muli %0, %1, %2"
[(set_attr "length" "4,4")
(set_attr "type" "arith,arith")])
 
;; Comparisons
 
;; Note, we store the operands in the comparison insns, and use them later
;; when generating the branch or scc operation.
 
;; First the routines called by the machine independent part of the compiler
(define_expand "cmpsi"
[(set (cc0)
(compare (match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "arith_operand" "")))]
""
"
{
mt_compare_op0 = operands[0];
mt_compare_op1 = operands[1];
DONE;
}")
 
;; Branches
 
(define_expand "beq"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (EQ, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bne"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (NE, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bge"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (GE, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bgt"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (GT, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "ble"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (LE, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "blt"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (LT, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bgeu"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (GEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bgtu"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (GTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bleu"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (LEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bltu"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (LTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bunge"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (GEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bungt"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (GTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bunle"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (LEU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_expand "bunlt"
[(use (match_operand 0 "" ""))]
""
"
{
mt_emit_cbranch (LTU, operands[0], mt_compare_op0, mt_compare_op1);
DONE;
}")
 
(define_insn "*beq_true"
[(set (pc)
(if_then_else (eq (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(label_ref (match_operand 2 "" ""))
(pc)))]
""
"breq %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*beq_false"
[(set (pc)
(if_then_else (eq (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(pc)
(label_ref (match_operand 2 "" ""))))]
""
"brne %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
 
(define_insn "*bne_true"
[(set (pc)
(if_then_else (ne (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(label_ref (match_operand 2 "" ""))
(pc)))]
""
"brne %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*bne_false"
[(set (pc)
(if_then_else (ne (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(pc)
(label_ref (match_operand 2 "" ""))))]
""
"breq %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*blt_true"
[(set (pc)
(if_then_else (lt (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(label_ref (match_operand 2 "" ""))
(pc)))]
""
"brlt %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*blt_false"
[(set (pc)
(if_then_else (lt (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(pc)
(label_ref (match_operand 2 "" ""))))]
""
"brle %z1, %z0,%l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*ble_true"
[(set (pc)
(if_then_else (le (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(label_ref (match_operand 2 "" ""))
(pc)))]
""
"brle %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*ble_false"
[(set (pc)
(if_then_else (le (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(pc)
(label_ref (match_operand 2 "" ""))))]
""
"brlt %z1, %z0,%l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*bgt_true"
[(set (pc)
(if_then_else (gt (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(label_ref (match_operand 2 "" ""))
(pc)))]
""
"brlt %z1, %z0, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*bgt_false"
[(set (pc)
(if_then_else (gt (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(pc)
(label_ref (match_operand 2 "" ""))))]
""
"brle %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*bge_true"
[(set (pc)
(if_then_else (ge (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(label_ref (match_operand 2 "" ""))
(pc)))]
""
"brle %z1, %z0,%l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_insn "*bge_false"
[(set (pc)
(if_then_else (ge (match_operand:SI 0 "reg_or_0_operand" "rJ")
(match_operand:SI 1 "reg_or_0_operand" "rJ"))
(pc)
(label_ref (match_operand 2 "" ""))))]
""
"brlt %z0, %z1, %l2%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
;; No unsigned operators on Morpho mt. All the unsigned operations are
;; converted to the signed operations above.
 
;; Set flag operations
 
;; "seq", "sne", "slt", "sle", "sgt", "sge", "sltu", "sleu",
;; "sgtu", and "sgeu" don't exist as regular instruction on the
;; mt, so these are not defined
 
;; Call and branch instructions
 
(define_expand "call"
[(parallel [(call (mem:SI (match_operand:SI 0 "register_operand" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 14))])]
""
"
{
operands[0] = force_reg (SImode, XEXP (operands[0], 0));
}")
 
(define_insn "call_internal"
[(call (mem:SI (match_operand 0 "register_operand" "r"))
(match_operand 1 "" ""))
;; possibly add a clobber of the reg that gets the return address
(clobber (reg:SI 14))]
""
"jal r14, %0%#"
[(set_attr "length" "4")
(set_attr "type" "call")])
 
(define_expand "call_value"
[(parallel [(set (match_operand 0 "register_operand" "")
(call (mem:SI (match_operand:SI 1 "register_operand" ""))
(match_operand 2 "general_operand" "")))
(clobber (reg:SI 14))])]
""
"
{
operands[1] = force_reg (SImode, XEXP (operands[1], 0));
}")
 
 
(define_insn "call_value_internal"
[(set (match_operand 0 "register_operand" "=r")
(call (mem:SI (match_operand 1 "register_operand" "r"))
(match_operand 2 "" "")))
;; possibly add a clobber of the reg that gets the return address
(clobber (reg:SI 14))]
""
"jal r14, %1%#"
[(set_attr "length" "4")
(set_attr "type" "call")])
 
;; Subroutine return
(define_insn "return_internal"
[(const_int 2)
(return)
(use (reg:SI 14))]
""
"jal r0, r14%#"
[(set_attr "length" "4")
(set_attr "type" "call")])
 
;; Interrupt return
(define_insn "return_interrupt_internal"
[(const_int 3)
(return)
(use (reg:SI 15))]
""
"reti r15%#"
[(set_attr "length" "4")
(set_attr "type" "call")])
 
;; Subroutine return
(define_insn "eh_return_internal"
[(return)
(use (reg:SI 7))
(use (reg:SI 8))
(use (reg:SI 11))
(use (reg:SI 10))]
""
"jal r0, r11%#"
[(set_attr "length" "4")
(set_attr "type" "call")])
 
 
;; Normal unconditional jump
(define_insn "jump"
[(set (pc) (label_ref (match_operand 0 "" "")))]
""
"jmp %l0%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
;; Indirect jump through a register
(define_insn "indirect_jump"
[(set (pc) (match_operand 0 "register_operand" "r"))]
""
"jal r0,%0%#"
[(set_attr "length" "4")
(set_attr "type" "call")])
 
(define_insn "tablejump"
[(set (pc) (match_operand:SI 0 "register_operand" "r"))
(use (label_ref (match_operand 1 "" "")))]
""
"jal r0, %0%#"
[(set_attr "length" "4")
(set_attr "type" "call")])
 
(define_expand "prologue"
[(const_int 1)]
""
"
{
mt_expand_prologue ();
DONE;
}")
 
(define_expand "epilogue"
[(const_int 2)]
""
"
{
mt_expand_epilogue (NORMAL_EPILOGUE);
DONE;
}")
 
 
(define_expand "eh_return"
[(use (match_operand:SI 0 "register_operand" "r"))]
""
"
{
mt_expand_eh_return (operands);
DONE;
}")
 
 
(define_insn_and_split "eh_epilogue"
[(unspec [(match_operand 0 "register_operand" "r")] 6)]
""
"#"
"reload_completed"
[(const_int 1)]
"mt_emit_eh_epilogue (operands); DONE;"
)
;; No operation, needed in case the user uses -g but not -O.
(define_insn "nop"
[(const_int 0)]
""
"nop"
[(set_attr "length" "4")
(set_attr "type" "arith")])
 
;; ::::::::::::::::::::
;; ::
;; :: UNSPEC_VOLATILE usage
;; ::
;; ::::::::::::::::::::
;;
;; 0 blockage
;; 1 Enable interrupts
;; 2 Disable interrupts
;;
 
;; Pseudo instruction that prevents the scheduler from moving code above this
;; point.
(define_insn "blockage"
[(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
""
""
[(set_attr "length" "0")])
 
;; Trap instruction to allow usage of the __builtin_trap function
(define_insn "trap"
[(trap_if (const_int 1) (const_int 0))
(clobber (reg:SI 14))]
""
"si r14%#"
[(set_attr "length" "4")
(set_attr "type" "branch")])
 
(define_expand "conditional_trap"
[(trap_if (match_operator 0 "comparison_operator"
[(match_dup 2)
(match_dup 3)])
(match_operand 1 "const_int_operand" ""))]
""
"
{
operands[2] = mt_compare_op0;
operands[3] = mt_compare_op1;
}")
 
;; Templates to control handling of interrupts
 
;; Enable interrupts template
(define_insn "ei"
[(unspec_volatile [(const_int 0)] UNSPEC_EI)]
""
"ei"
[(set_attr "length" "4")])
 
;; Enable interrupts template
(define_insn "di"
[(unspec_volatile [(const_int 0)] UNSPEC_DI)]
""
"di"
[(set_attr "length" "4")])
/crti.asm
0,0 → 1,71
# crti.asm for mt
#
# Copyright (C) 2005 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# In addition to the permissions in the GNU General Public License, the
# Free Software Foundation gives you unlimited permission to link the
# compiled version of this file with other programs, and to distribute
# those programs without any restriction coming from the use of this
# file. (The General Public License restrictions do apply in other
# respects; for example, they cover modification of the file, and
# distribution when not linked into another program.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING. If not, write to the Free
# Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# As a special exception, if you link this library with files
# compiled with GCC to produce an executable, this does not cause
# the resulting executable to be covered by the GNU General Public License.
# This exception does not however invalidate any other reasons why
# the executable file might be covered by the GNU General Public License.
#
 
# This file just make a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
 
.file "crti.asm"
 
.section ".init"
.global _init
.type _init,#function
.align 4
_init:
subi sp, sp, #4
stw r14, sp, #0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
 
.section ".fini"
.global _fini
.type _fini,#function
.align 4
_fini:
subi sp, sp, #4
stw r14, sp, #0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
or r0, r0, r0
/mt.c
0,0 → 1,2486
/* Target definitions for the MorphoRISC1
Copyright (C) 2005, 2007 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
#include "insn-config.h"
#include "conditions.h"
#include "insn-attr.h"
#include "recog.h"
#include "toplev.h"
#include "output.h"
#include "integrate.h"
#include "tree.h"
#include "function.h"
#include "expr.h"
#include "optabs.h"
#include "libfuncs.h"
#include "flags.h"
#include "tm_p.h"
#include "ggc.h"
#include "insn-flags.h"
#include "obstack.h"
#include "except.h"
#include "target.h"
#include "target-def.h"
#include "basic-block.h"
 
/* Frame pointer register mask. */
#define FP_MASK (1 << (GPR_FP))
 
/* Link register mask. */
#define LINK_MASK (1 << (GPR_LINK))
 
/* Given a SIZE in bytes, advance to the next word. */
#define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* A C structure for machine-specific, per-function data.
This is added to the cfun structure. */
struct machine_function GTY(())
{
/* Flags if __builtin_return_address (n) with n >= 1 was used. */
int ra_needs_full_frame;
struct rtx_def * eh_stack_adjust;
int interrupt_handler;
int has_loops;
};
 
/* Define the information needed to generate branch and scc insns.
This is stored from the compare operation. */
struct rtx_def * mt_compare_op0;
struct rtx_def * mt_compare_op1;
 
/* Current frame information calculated by compute_frame_size. */
struct mt_frame_info current_frame_info;
 
/* Zero structure to initialize current_frame_info. */
struct mt_frame_info zero_frame_info;
 
/* mt doesn't have unsigned compares need a library call for this. */
struct rtx_def * mt_ucmpsi3_libcall;
 
static int mt_flag_delayed_branch;
 
static rtx
mt_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
int incoming ATTRIBUTE_UNUSED)
{
return gen_rtx_REG (Pmode, RETVAL_REGNUM);
}
 
/* Implement RETURN_ADDR_RTX. */
rtx
mt_return_addr_rtx (int count)
{
if (count != 0)
return NULL_RTX;
 
return get_hard_reg_initial_val (Pmode, GPR_LINK);
}
 
/* The following variable value indicates the number of nops required
between the current instruction and the next instruction to avoid
any pipeline hazards. */
static int mt_nops_required = 0;
static const char * mt_nop_reasons = "";
 
/* Implement ASM_OUTPUT_OPCODE. */
const char *
mt_asm_output_opcode (FILE *f ATTRIBUTE_UNUSED, const char *ptr)
{
if (mt_nops_required)
fprintf (f, ";# need %d nops because of %s\n\t",
mt_nops_required, mt_nop_reasons);
while (mt_nops_required)
{
fprintf (f, "nop\n\t");
-- mt_nops_required;
}
return ptr;
}
 
/* Given an insn, return whether it's a memory operation or a branch
operation, otherwise return TYPE_ARITH. */
static enum attr_type
mt_get_attr_type (rtx complete_insn)
{
rtx insn = PATTERN (complete_insn);
 
if (JUMP_P (complete_insn))
return TYPE_BRANCH;
if (CALL_P (complete_insn))
return TYPE_BRANCH;
 
if (GET_CODE (insn) != SET)
return TYPE_ARITH;
 
if (SET_DEST (insn) == pc_rtx)
return TYPE_BRANCH;
 
if (GET_CODE (SET_DEST (insn)) == MEM)
return TYPE_STORE;
 
if (GET_CODE (SET_SRC (insn)) == MEM)
return TYPE_LOAD;
return TYPE_ARITH;
}
 
/* A helper routine for insn_dependent_p called through note_stores. */
 
static void
insn_dependent_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
{
rtx * pinsn = (rtx *) data;
 
if (*pinsn && reg_mentioned_p (x, *pinsn))
*pinsn = NULL_RTX;
}
 
/* Return true if anything in insn X is (anti,output,true)
dependent on anything in insn Y. */
 
static bool
insn_dependent_p (rtx x, rtx y)
{
rtx tmp;
 
if (! INSN_P (x) || ! INSN_P (y))
return 0;
 
tmp = PATTERN (y);
note_stores (PATTERN (x), insn_dependent_p_1, &tmp);
if (tmp == NULL_RTX)
return true;
 
tmp = PATTERN (x);
note_stores (PATTERN (y), insn_dependent_p_1, &tmp);
return (tmp == NULL_RTX);
}
 
 
/* Return true if anything in insn X is true dependent on anything in
insn Y. */
static bool
insn_true_dependent_p (rtx x, rtx y)
{
rtx tmp;
 
if (! INSN_P (x) || ! INSN_P (y))
return 0;
 
tmp = PATTERN (y);
note_stores (PATTERN (x), insn_dependent_p_1, &tmp);
return (tmp == NULL_RTX);
}
 
/* The following determines the number of nops that need to be
inserted between the previous instructions and current instruction
to avoid pipeline hazards on the mt processor. Remember that
the function is not called for asm insns. */
 
void
mt_final_prescan_insn (rtx insn,
rtx * opvec ATTRIBUTE_UNUSED,
int noperands ATTRIBUTE_UNUSED)
{
rtx prev_i;
enum attr_type prev_attr;
 
mt_nops_required = 0;
mt_nop_reasons = "";
 
/* ms2 constraints are dealt with in reorg. */
if (TARGET_MS2)
return;
/* Only worry about real instructions. */
if (! INSN_P (insn))
return;
 
/* Find the previous real instructions. */
for (prev_i = PREV_INSN (insn);
prev_i != NULL
&& (! INSN_P (prev_i)
|| GET_CODE (PATTERN (prev_i)) == USE
|| GET_CODE (PATTERN (prev_i)) == CLOBBER);
prev_i = PREV_INSN (prev_i))
{
/* If we meet a barrier, there is no flow through here. */
if (BARRIER_P (prev_i))
return;
}
/* If there isn't one then there is nothing that we need do. */
if (prev_i == NULL || ! INSN_P (prev_i))
return;
 
prev_attr = mt_get_attr_type (prev_i);
/* Delayed branch slots already taken care of by delay branch scheduling. */
if (prev_attr == TYPE_BRANCH)
return;
 
switch (mt_get_attr_type (insn))
{
case TYPE_LOAD:
case TYPE_STORE:
/* Avoid consecutive memory operation. */
if ((prev_attr == TYPE_LOAD || prev_attr == TYPE_STORE)
&& TARGET_MS1_64_001)
{
mt_nops_required = 1;
mt_nop_reasons = "consecutive mem ops";
}
/* Drop through. */
 
case TYPE_ARITH:
case TYPE_COMPLEX:
/* One cycle of delay is required between load
and the dependent arithmetic instruction. */
if (prev_attr == TYPE_LOAD
&& insn_true_dependent_p (prev_i, insn))
{
mt_nops_required = 1;
mt_nop_reasons = "load->arith dependency delay";
}
break;
 
case TYPE_BRANCH:
if (insn_dependent_p (prev_i, insn))
{
if (prev_attr == TYPE_ARITH && TARGET_MS1_64_001)
{
/* One cycle of delay between arith
instructions and branch dependent on arith. */
mt_nops_required = 1;
mt_nop_reasons = "arith->branch dependency delay";
}
else if (prev_attr == TYPE_LOAD)
{
/* Two cycles of delay are required
between load and dependent branch. */
if (TARGET_MS1_64_001)
mt_nops_required = 2;
else
mt_nops_required = 1;
mt_nop_reasons = "load->branch dependency delay";
}
}
break;
 
default:
fatal_insn ("mt_final_prescan_insn, invalid insn #1", insn);
break;
}
}
 
/* Print debugging information for a frame. */
static void
mt_debug_stack (struct mt_frame_info * info)
{
int regno;
 
if (!info)
{
error ("info pointer NULL");
gcc_unreachable ();
}
 
fprintf (stderr, "\nStack information for function %s:\n",
((current_function_decl && DECL_NAME (current_function_decl))
? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
: "<unknown>"));
 
fprintf (stderr, "\ttotal_size = %d\n", info->total_size);
fprintf (stderr, "\tpretend_size = %d\n", info->pretend_size);
fprintf (stderr, "\targs_size = %d\n", info->args_size);
fprintf (stderr, "\textra_size = %d\n", info->extra_size);
fprintf (stderr, "\treg_size = %d\n", info->reg_size);
fprintf (stderr, "\tvar_size = %d\n", info->var_size);
fprintf (stderr, "\tframe_size = %d\n", info->frame_size);
fprintf (stderr, "\treg_mask = 0x%x\n", info->reg_mask);
fprintf (stderr, "\tsave_fp = %d\n", info->save_fp);
fprintf (stderr, "\tsave_lr = %d\n", info->save_lr);
fprintf (stderr, "\tinitialized = %d\n", info->initialized);
fprintf (stderr, "\tsaved registers =");
 
/* Print out reg_mask in a more readable format. */
for (regno = GPR_R0; regno <= GPR_LAST; regno++)
if ( (1 << regno) & info->reg_mask)
fprintf (stderr, " %s", reg_names[regno]);
 
putc ('\n', stderr);
fflush (stderr);
}
 
/* Print a memory address as an operand to reference that memory location. */
 
static void
mt_print_operand_simple_address (FILE * file, rtx addr)
{
if (!addr)
error ("PRINT_OPERAND_ADDRESS, null pointer");
 
else
switch (GET_CODE (addr))
{
case REG:
fprintf (file, "%s, #0", reg_names [REGNO (addr)]);
break;
case PLUS:
{
rtx reg = 0;
rtx offset = 0;
rtx arg0 = XEXP (addr, 0);
rtx arg1 = XEXP (addr, 1);
 
if (GET_CODE (arg0) == REG)
{
reg = arg0;
offset = arg1;
if (GET_CODE (offset) == REG)
fatal_insn ("PRINT_OPERAND_ADDRESS, 2 regs", addr);
}
 
else if (GET_CODE (arg1) == REG)
reg = arg1, offset = arg0;
else if (CONSTANT_P (arg0) && CONSTANT_P (arg1))
{
fprintf (file, "%s, #", reg_names [GPR_R0]);
output_addr_const (file, addr);
break;
}
fprintf (file, "%s, #", reg_names [REGNO (reg)]);
output_addr_const (file, offset);
break;
}
 
case LABEL_REF:
case SYMBOL_REF:
case CONST_INT:
case CONST:
output_addr_const (file, addr);
break;
 
default:
fatal_insn ("PRINT_OPERAND_ADDRESS, invalid insn #1", addr);
break;
}
}
 
/* Implement PRINT_OPERAND_ADDRESS. */
void
mt_print_operand_address (FILE * file, rtx addr)
{
if (GET_CODE (addr) == AND
&& GET_CODE (XEXP (addr, 1)) == CONST_INT
&& INTVAL (XEXP (addr, 1)) == -3)
mt_print_operand_simple_address (file, XEXP (addr, 0));
else
mt_print_operand_simple_address (file, addr);
}
 
/* Implement PRINT_OPERAND. */
void
mt_print_operand (FILE * file, rtx x, int code)
{
switch (code)
{
case '#':
/* Output a nop if there's nothing for the delay slot. */
if (dbr_sequence_length () == 0)
fputs ("\n\tnop", file);
return;
case 'H':
fprintf(file, "#%%hi16(");
output_addr_const (file, x);
fprintf(file, ")");
return;
case 'L':
fprintf(file, "#%%lo16(");
output_addr_const (file, x);
fprintf(file, ")");
return;
 
case 'N':
fprintf(file, "#%ld", ~INTVAL (x));
return;
 
case 'z':
if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0)
{
fputs (reg_names[GPR_R0], file);
return;
}
 
case 0:
/* Handled below. */
break;
 
default:
/* output_operand_lossage ("mt_print_operand: unknown code"); */
fprintf (file, "unknown code");
return;
}
 
switch (GET_CODE (x))
{
case REG:
fputs (reg_names [REGNO (x)], file);
break;
 
case CONST:
case CONST_INT:
fprintf(file, "#%ld", INTVAL (x));
break;
 
case MEM:
mt_print_operand_address(file, XEXP (x,0));
break;
 
case LABEL_REF:
case SYMBOL_REF:
output_addr_const (file, x);
break;
default:
fprintf(file, "Uknown code: %d", GET_CODE (x));
break;
}
 
return;
}
 
/* Implement INIT_CUMULATIVE_ARGS. */
void
mt_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype, rtx libname,
tree fndecl ATTRIBUTE_UNUSED, int incoming)
{
*cum = 0;
 
if (TARGET_DEBUG_ARG)
{
fprintf (stderr, "\nmt_init_cumulative_args:");
 
if (incoming)
fputs (" incoming", stderr);
 
if (fntype)
{
tree ret_type = TREE_TYPE (fntype);
fprintf (stderr, " return = %s,",
tree_code_name[ (int)TREE_CODE (ret_type) ]);
}
 
if (libname && GET_CODE (libname) == SYMBOL_REF)
fprintf (stderr, " libname = %s", XSTR (libname, 0));
 
if (cfun->returns_struct)
fprintf (stderr, " return-struct");
 
putc ('\n', stderr);
}
}
 
/* Compute the slot number to pass an argument in.
Returns the slot number or -1 if passing on the stack.
 
CUM is a variable of type CUMULATIVE_ARGS which gives info about
the preceding args and about the function being called.
MODE is the argument's machine mode.
TYPE is the data type of the argument (as a tree).
This is null for libcalls where that information may
not be available.
NAMED is nonzero if this argument is a named parameter
(otherwise it is an extra parameter matching an ellipsis).
INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
*PREGNO records the register number to use if scalar type. */
 
static int
mt_function_arg_slotno (const CUMULATIVE_ARGS * cum,
enum machine_mode mode,
tree type,
int named ATTRIBUTE_UNUSED,
int incoming_p ATTRIBUTE_UNUSED,
int * pregno)
{
int regbase = FIRST_ARG_REGNUM;
int slotno = * cum;
 
if (mode == VOIDmode || targetm.calls.must_pass_in_stack (mode, type))
return -1;
 
if (slotno >= MT_NUM_ARG_REGS)
return -1;
 
* pregno = regbase + slotno;
 
return slotno;
}
 
/* Implement FUNCTION_ARG. */
rtx
mt_function_arg (const CUMULATIVE_ARGS * cum,
enum machine_mode mode,
tree type,
int named,
int incoming_p)
{
int slotno, regno;
rtx reg;
 
slotno = mt_function_arg_slotno (cum, mode, type, named, incoming_p, &regno);
 
if (slotno == -1)
reg = NULL_RTX;
else
reg = gen_rtx_REG (mode, regno);
 
return reg;
}
 
/* Implement FUNCTION_ARG_ADVANCE. */
void
mt_function_arg_advance (CUMULATIVE_ARGS * cum,
enum machine_mode mode,
tree type ATTRIBUTE_UNUSED,
int named)
{
int slotno, regno;
 
/* We pass 0 for incoming_p here, it doesn't matter. */
slotno = mt_function_arg_slotno (cum, mode, type, named, 0, &regno);
 
* cum += (mode != BLKmode
? ROUND_ADVANCE (GET_MODE_SIZE (mode))
: ROUND_ADVANCE (int_size_in_bytes (type)));
 
if (TARGET_DEBUG_ARG)
fprintf (stderr,
"mt_function_arg_advance: words = %2d, mode = %4s, named = %d, size = %3d\n",
*cum, GET_MODE_NAME (mode), named,
(*cum) * UNITS_PER_WORD);
}
 
/* Implement hook TARGET_ARG_PARTIAL_BYTES.
 
Returns the number of bytes at the beginning of an argument that
must be put in registers. The value must be zero for arguments
that are passed entirely in registers or that are entirely pushed
on the stack. */
static int
mt_arg_partial_bytes (CUMULATIVE_ARGS * pcum,
enum machine_mode mode,
tree type,
bool named ATTRIBUTE_UNUSED)
{
int cum = * pcum;
int words;
 
if (mode == BLKmode)
words = ((int_size_in_bytes (type) + UNITS_PER_WORD - 1)
/ UNITS_PER_WORD);
else
words = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
 
if (! targetm.calls.pass_by_reference (&cum, mode, type, named)
&& cum < MT_NUM_ARG_REGS
&& (cum + words) > MT_NUM_ARG_REGS)
{
int bytes = (MT_NUM_ARG_REGS - cum) * UNITS_PER_WORD;
 
if (TARGET_DEBUG)
fprintf (stderr, "function_arg_partial_nregs = %d\n", bytes);
return bytes;
}
 
return 0;
}
 
 
/* Implement TARGET_PASS_BY_REFERENCE hook. */
static bool
mt_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
enum machine_mode mode ATTRIBUTE_UNUSED,
tree type,
bool named ATTRIBUTE_UNUSED)
{
return (type && int_size_in_bytes (type) > 4 * UNITS_PER_WORD);
}
 
/* Implement FUNCTION_ARG_BOUNDARY. */
int
mt_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
tree type ATTRIBUTE_UNUSED)
{
return BITS_PER_WORD;
}
 
/* Implement REG_OK_FOR_BASE_P. */
int
mt_reg_ok_for_base_p (rtx x, int strict)
{
if (strict)
return (((unsigned) REGNO (x)) < FIRST_PSEUDO_REGISTER);
return 1;
}
 
/* Helper function of mt_legitimate_address_p. Return true if XINSN
is a simple address, otherwise false. */
static bool
mt_legitimate_simple_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
rtx xinsn, int strict)
{
if (TARGET_DEBUG)
{
fprintf (stderr, "\n========== GO_IF_LEGITIMATE_ADDRESS, %sstrict\n",
strict ? "" : "not ");
debug_rtx (xinsn);
}
 
if (GET_CODE (xinsn) == REG && mt_reg_ok_for_base_p (xinsn, strict))
return true;
 
if (GET_CODE (xinsn) == PLUS
&& GET_CODE (XEXP (xinsn, 0)) == REG
&& mt_reg_ok_for_base_p (XEXP (xinsn, 0), strict)
&& GET_CODE (XEXP (xinsn, 1)) == CONST_INT
&& SMALL_INT (XEXP (xinsn, 1)))
return true;
 
return false;
}
 
 
/* Helper function of GO_IF_LEGITIMATE_ADDRESS. Return nonzero if
XINSN is a legitimate address on MT. */
int
mt_legitimate_address_p (enum machine_mode mode, rtx xinsn, int strict)
{
if (mt_legitimate_simple_address_p (mode, xinsn, strict))
return 1;
 
if ((mode) == SImode
&& GET_CODE (xinsn) == AND
&& GET_CODE (XEXP (xinsn, 1)) == CONST_INT
&& INTVAL (XEXP (xinsn, 1)) == -3)
return mt_legitimate_simple_address_p (mode, XEXP (xinsn, 0), strict);
else
return 0;
}
 
/* Return truth value of whether OP can be used as an operands where a
register or 16 bit unsigned integer is needed. */
 
int
uns_arith_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) == CONST_INT && SMALL_INT_UNSIGNED (op))
return 1;
 
return register_operand (op, mode);
}
 
/* Return truth value of whether OP can be used as an operands where a
16 bit integer is needed. */
 
int
arith_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) == CONST_INT && SMALL_INT (op))
return 1;
 
return register_operand (op, mode);
}
 
/* Return truth value of whether OP is a register or the constant 0. */
 
int
reg_or_0_operand (rtx op, enum machine_mode mode)
{
switch (GET_CODE (op))
{
case CONST_INT:
return INTVAL (op) == 0;
 
case REG:
case SUBREG:
return register_operand (op, mode);
 
default:
break;
}
 
return 0;
}
 
/* Return truth value of whether OP is a constant that requires two
loads to put in a register. */
 
int
big_const_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_LETTER_P (INTVAL (op), 'M'))
return 1;
 
return 0;
}
 
/* Return truth value of whether OP is a constant that require only
one load to put in a register. */
 
int
single_const_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (big_const_operand (op, mode)
|| GET_CODE (op) == CONST
|| GET_CODE (op) == LABEL_REF
|| GET_CODE (op) == SYMBOL_REF)
return 0;
 
return 1;
}
 
/* True if the current function is an interrupt handler
(either via #pragma or an attribute specification). */
int interrupt_handler;
enum processor_type mt_cpu;
 
static struct machine_function *
mt_init_machine_status (void)
{
struct machine_function *f;
 
f = ggc_alloc_cleared (sizeof (struct machine_function));
 
return f;
}
 
/* Implement OVERRIDE_OPTIONS. */
void
mt_override_options (void)
{
if (mt_cpu_string != NULL)
{
if (!strcmp (mt_cpu_string, "ms1-64-001"))
mt_cpu = PROCESSOR_MS1_64_001;
else if (!strcmp (mt_cpu_string, "ms1-16-002"))
mt_cpu = PROCESSOR_MS1_16_002;
else if (!strcmp (mt_cpu_string, "ms1-16-003"))
mt_cpu = PROCESSOR_MS1_16_003;
else if (!strcmp (mt_cpu_string, "ms2"))
mt_cpu = PROCESSOR_MS2;
else
error ("bad value (%s) for -march= switch", mt_cpu_string);
}
else
mt_cpu = PROCESSOR_MS1_16_002;
 
if (flag_exceptions)
{
flag_omit_frame_pointer = 0;
flag_gcse = 0;
}
 
/* We do delayed branch filling in machine dependent reorg */
mt_flag_delayed_branch = flag_delayed_branch;
flag_delayed_branch = 0;
 
init_machine_status = mt_init_machine_status;
}
 
/* Do what is necessary for `va_start'. We look at the current function
to determine if stdarg or varargs is used and return the address of the
first unnamed parameter. */
 
static void
mt_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
enum machine_mode mode ATTRIBUTE_UNUSED,
tree type ATTRIBUTE_UNUSED,
int *pretend_size, int no_rtl)
{
int regno;
int regs = MT_NUM_ARG_REGS - *cum;
*pretend_size = regs < 0 ? 0 : GET_MODE_SIZE (SImode) * regs;
if (no_rtl)
return;
for (regno = *cum; regno < MT_NUM_ARG_REGS; regno++)
{
rtx reg = gen_rtx_REG (SImode, FIRST_ARG_REGNUM + regno);
rtx slot = gen_rtx_PLUS (Pmode,
gen_rtx_REG (SImode, ARG_POINTER_REGNUM),
GEN_INT (UNITS_PER_WORD * regno));
emit_move_insn (gen_rtx_MEM (SImode, slot), reg);
}
}
 
/* Returns the number of bytes offset between the frame pointer and the stack
pointer for the current function. SIZE is the number of bytes of space
needed for local variables. */
 
unsigned int
mt_compute_frame_size (int size)
{
int regno;
unsigned int total_size;
unsigned int var_size;
unsigned int args_size;
unsigned int pretend_size;
unsigned int extra_size;
unsigned int reg_size;
unsigned int frame_size;
unsigned int reg_mask;
 
var_size = size;
args_size = current_function_outgoing_args_size;
pretend_size = current_function_pretend_args_size;
extra_size = FIRST_PARM_OFFSET (0);
total_size = extra_size + pretend_size + args_size + var_size;
reg_size = 0;
reg_mask = 0;
 
/* Calculate space needed for registers. */
for (regno = GPR_R0; regno <= GPR_LAST; regno++)
{
if (MUST_SAVE_REGISTER (regno))
{
reg_size += UNITS_PER_WORD;
reg_mask |= 1 << regno;
}
}
 
current_frame_info.save_fp = (regs_ever_live [GPR_FP]
|| frame_pointer_needed
|| interrupt_handler);
current_frame_info.save_lr = (regs_ever_live [GPR_LINK]
|| profile_flag
|| interrupt_handler);
reg_size += (current_frame_info.save_fp + current_frame_info.save_lr)
* UNITS_PER_WORD;
total_size += reg_size;
total_size = ((total_size + 3) & ~3);
 
frame_size = total_size;
 
/* Save computed information. */
current_frame_info.pretend_size = pretend_size;
current_frame_info.var_size = var_size;
current_frame_info.args_size = args_size;
current_frame_info.reg_size = reg_size;
current_frame_info.frame_size = args_size + var_size;
current_frame_info.total_size = total_size;
current_frame_info.extra_size = extra_size;
current_frame_info.reg_mask = reg_mask;
current_frame_info.initialized = reload_completed;
return total_size;
}
 
/* Emit code to save REG in stack offset pointed to by MEM.
STACK_OFFSET is the offset from the SP where the save will happen.
This function sets the REG_FRAME_RELATED_EXPR note accordingly. */
static void
mt_emit_save_restore (enum save_direction direction,
rtx reg, rtx mem, int stack_offset)
{
if (direction == FROM_PROCESSOR_TO_MEM)
{
rtx insn;
insn = emit_move_insn (mem, reg);
RTX_FRAME_RELATED_P (insn) = 1;
REG_NOTES (insn)
= gen_rtx_EXPR_LIST
(REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode,
gen_rtx_MEM (SImode,
gen_rtx_PLUS (SImode,
stack_pointer_rtx,
GEN_INT (stack_offset))),
reg),
REG_NOTES (insn));
}
else
emit_move_insn (reg, mem);
}
 
 
/* Emit code to save the frame pointer in the prologue and restore
frame pointer in epilogue. */
 
static void
mt_emit_save_fp (enum save_direction direction,
struct mt_frame_info info)
{
rtx base_reg;
int reg_mask = info.reg_mask & ~(FP_MASK | LINK_MASK);
int offset = info.total_size;
int stack_offset = info.total_size;
 
/* If there is nothing to save, get out now. */
if (! info.save_fp && ! info.save_lr && ! reg_mask)
return;
 
/* If offset doesn't fit in a 15-bit signed integer,
uses a scratch registers to get a smaller offset. */
if (CONST_OK_FOR_LETTER_P(offset, 'O'))
base_reg = stack_pointer_rtx;
else
{
/* Use the scratch register R9 that holds old stack pointer. */
base_reg = gen_rtx_REG (SImode, GPR_R9);
offset = 0;
}
 
if (info.save_fp)
{
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
mt_emit_save_restore
(direction, gen_rtx_REG (SImode, GPR_FP),
gen_rtx_MEM (SImode,
gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
stack_offset);
}
}
 
/* Emit code to save registers in the prologue and restore register
in epilogue. */
 
static void
mt_emit_save_regs (enum save_direction direction,
struct mt_frame_info info)
{
rtx base_reg;
int regno;
int reg_mask = info.reg_mask & ~(FP_MASK | LINK_MASK);
int offset = info.total_size;
int stack_offset = info.total_size;
 
/* If there is nothing to save, get out now. */
if (! info.save_fp && ! info.save_lr && ! reg_mask)
return;
 
/* If offset doesn't fit in a 15-bit signed integer,
uses a scratch registers to get a smaller offset. */
if (CONST_OK_FOR_LETTER_P(offset, 'O'))
base_reg = stack_pointer_rtx;
else
{
/* Use the scratch register R9 that holds old stack pointer. */
base_reg = gen_rtx_REG (SImode, GPR_R9);
offset = 0;
}
 
if (info.save_fp)
{
/* This just records the space for it, the actual move generated in
mt_emit_save_fp (). */
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
}
 
if (info.save_lr)
{
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
mt_emit_save_restore
(direction, gen_rtx_REG (SImode, GPR_LINK),
gen_rtx_MEM (SImode,
gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
stack_offset);
}
 
/* Save any needed call-saved regs. */
for (regno = GPR_R0; regno <= GPR_LAST; regno++)
{
if ((reg_mask & (1 << regno)) != 0)
{
offset -= UNITS_PER_WORD;
stack_offset -= UNITS_PER_WORD;
mt_emit_save_restore
(direction, gen_rtx_REG (SImode, regno),
gen_rtx_MEM (SImode,
gen_rtx_PLUS (SImode, base_reg, GEN_INT (offset))),
stack_offset);
}
}
}
 
/* Return true if FUNC is a function with the 'interrupt' attribute. */
static bool
mt_interrupt_function_p (tree func)
{
tree a;
 
if (TREE_CODE (func) != FUNCTION_DECL)
return false;
 
a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
return a != NULL_TREE;
}
 
/* Generate prologue code. */
void
mt_expand_prologue (void)
{
rtx size_rtx, insn;
unsigned int frame_size;
 
if (mt_interrupt_function_p (current_function_decl))
{
interrupt_handler = 1;
if (cfun->machine)
cfun->machine->interrupt_handler = 1;
}
 
mt_compute_frame_size (get_frame_size ());
 
if (TARGET_DEBUG_STACK)
mt_debug_stack (&current_frame_info);
 
/* Compute size of stack adjustment. */
frame_size = current_frame_info.total_size;
 
/* If offset doesn't fit in a 15-bit signed integer,
uses a scratch registers to get a smaller offset. */
if (CONST_OK_FOR_LETTER_P(frame_size, 'O'))
size_rtx = GEN_INT (frame_size);
else
{
/* We do not have any scratch registers. */
gcc_assert (!interrupt_handler);
 
size_rtx = gen_rtx_REG (SImode, GPR_R9);
insn = emit_move_insn (size_rtx, GEN_INT (frame_size & 0xffff0000));
insn = emit_insn (gen_iorsi3 (size_rtx, size_rtx,
GEN_INT (frame_size & 0x0000ffff)));
}
 
/* Allocate stack for this frame. */
/* Make stack adjustment and use scratch register if constant too
large to fit as immediate. */
if (frame_size)
{
insn = emit_insn (gen_subsi3 (stack_pointer_rtx,
stack_pointer_rtx,
size_rtx));
RTX_FRAME_RELATED_P (insn) = 1;
REG_NOTES (insn)
= gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
gen_rtx_MINUS (SImode,
stack_pointer_rtx,
GEN_INT (frame_size))),
REG_NOTES (insn));
}
 
/* Set R9 to point to old sp if required for access to register save
area. */
if ( current_frame_info.reg_size != 0
&& !CONST_OK_FOR_LETTER_P (frame_size, 'O'))
emit_insn (gen_addsi3 (size_rtx, size_rtx, stack_pointer_rtx));
/* Save the frame pointer. */
mt_emit_save_fp (FROM_PROCESSOR_TO_MEM, current_frame_info);
 
/* Now put the frame pointer into the frame pointer register. */
if (frame_pointer_needed)
{
insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
}
 
/* Save the registers. */
mt_emit_save_regs (FROM_PROCESSOR_TO_MEM, current_frame_info);
 
/* If we are profiling, make sure no instructions are scheduled before
the call to mcount. */
if (profile_flag)
emit_insn (gen_blockage ());
}
 
/* Implement EPILOGUE_USES. */
int
mt_epilogue_uses (int regno)
{
if (cfun->machine && cfun->machine->interrupt_handler && reload_completed)
return 1;
return regno == GPR_LINK;
}
 
/* Generate epilogue. EH_MODE is NORMAL_EPILOGUE when generating a
function epilogue, or EH_EPILOGUE when generating an EH
epilogue. */
void
mt_expand_epilogue (enum epilogue_type eh_mode)
{
rtx size_rtx, insn;
unsigned frame_size;
 
mt_compute_frame_size (get_frame_size ());
 
if (TARGET_DEBUG_STACK)
mt_debug_stack (& current_frame_info);
 
/* Compute size of stack adjustment. */
frame_size = current_frame_info.total_size;
 
/* If offset doesn't fit in a 15-bit signed integer,
uses a scratch registers to get a smaller offset. */
if (CONST_OK_FOR_LETTER_P(frame_size, 'O'))
size_rtx = GEN_INT (frame_size);
else
{
/* We do not have any scratch registers. */
gcc_assert (!interrupt_handler);
 
size_rtx = gen_rtx_REG (SImode, GPR_R9);
insn = emit_move_insn (size_rtx, GEN_INT (frame_size & 0xffff0000));
insn = emit_insn (gen_iorsi3 (size_rtx, size_rtx,
GEN_INT (frame_size & 0x0000ffff)));
/* Set R9 to point to old sp if required for access to register
save area. */
emit_insn (gen_addsi3 (size_rtx, size_rtx, stack_pointer_rtx));
}
 
/* Restore sp if there was some possible change to it. */
if (frame_pointer_needed)
insn = emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
 
/* Restore the registers. */
mt_emit_save_fp (FROM_MEM_TO_PROCESSOR, current_frame_info);
mt_emit_save_regs (FROM_MEM_TO_PROCESSOR, current_frame_info);
 
/* Make stack adjustment and use scratch register if constant too
large to fit as immediate. */
if (frame_size)
{
if (CONST_OK_FOR_LETTER_P(frame_size, 'O'))
/* Can handle this with simple add. */
insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
size_rtx));
else
/* Scratch reg R9 has the old sp value. */
insn = emit_move_insn (stack_pointer_rtx,
gen_rtx_REG (SImode, GPR_R9));
 
REG_NOTES (insn)
= gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
gen_rtx_SET (VOIDmode,
stack_pointer_rtx,
gen_rtx_PLUS (SImode,
stack_pointer_rtx,
GEN_INT (frame_size))),
REG_NOTES (insn));
}
 
if (cfun->machine && cfun->machine->eh_stack_adjust != NULL_RTX)
/* Perform the additional bump for __throw. */
emit_insn (gen_addsi3 (stack_pointer_rtx,
stack_pointer_rtx,
cfun->machine->eh_stack_adjust));
 
/* Generate the appropriate return. */
if (eh_mode == EH_EPILOGUE)
{
emit_jump_insn (gen_eh_return_internal ());
emit_barrier ();
}
else if (interrupt_handler)
emit_jump_insn (gen_return_interrupt_internal ());
else
emit_jump_insn (gen_return_internal ());
 
/* Reset state info for each function. */
interrupt_handler = 0;
current_frame_info = zero_frame_info;
if (cfun->machine)
cfun->machine->eh_stack_adjust = NULL_RTX;
}
 
 
/* Generate code for the "eh_return" pattern. */
void
mt_expand_eh_return (rtx * operands)
{
if (GET_CODE (operands[0]) != REG
|| REGNO (operands[0]) != EH_RETURN_STACKADJ_REGNO)
{
rtx sp = EH_RETURN_STACKADJ_RTX;
 
emit_move_insn (sp, operands[0]);
operands[0] = sp;
}
 
emit_insn (gen_eh_epilogue (operands[0]));
}
 
/* Generate code for the "eh_epilogue" pattern. */
void
mt_emit_eh_epilogue (rtx * operands ATTRIBUTE_UNUSED)
{
cfun->machine->eh_stack_adjust = EH_RETURN_STACKADJ_RTX; /* operands[0]; */
mt_expand_epilogue (EH_EPILOGUE);
}
 
/* Handle an "interrupt" attribute. */
static tree
mt_handle_interrupt_attribute (tree * node,
tree name,
tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED,
bool * no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
warning (OPT_Wattributes,
"%qs attribute only applies to functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
 
return NULL_TREE;
}
 
/* Table of machine attributes. */
const struct attribute_spec mt_attribute_table[] =
{
/* name, min, max, decl?, type?, func?, handler */
{ "interrupt", 0, 0, false, false, false, mt_handle_interrupt_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
 
/* Implement INITIAL_ELIMINATION_OFFSET. */
int
mt_initial_elimination_offset (int from, int to)
{
mt_compute_frame_size (get_frame_size ());
 
if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
return 0;
 
else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
return current_frame_info.total_size;
 
else if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
return current_frame_info.total_size;
 
else
gcc_unreachable ();
}
 
/* Generate a compare for CODE. Return a brand-new rtx that
represents the result of the compare. */
 
static rtx
mt_generate_compare (enum rtx_code code, rtx op0, rtx op1)
{
rtx scratch0, scratch1, const_scratch;
 
switch (code)
{
case GTU:
case LTU:
case GEU:
case LEU:
/* Need to adjust ranges for faking unsigned compares. */
scratch0 = gen_reg_rtx (SImode);
scratch1 = gen_reg_rtx (SImode);
const_scratch = force_reg (SImode, GEN_INT(MT_MIN_INT));
emit_insn (gen_addsi3 (scratch0, const_scratch, op0));
emit_insn (gen_addsi3 (scratch1, const_scratch, op1));
break;
default:
scratch0 = op0;
scratch1 = op1;
break;
}
/* Adjust compare operator to fake unsigned compares. */
switch (code)
{
case GTU:
code = GT; break;
case LTU:
code = LT; break;
case GEU:
code = GE; break;
case LEU:
code = LE; break;
default:
/* do nothing */
break;
}
 
/* Generate the actual compare. */
return gen_rtx_fmt_ee (code, VOIDmode, scratch0, scratch1);
}
 
/* Emit a branch of kind CODE to location LOC. */
 
void
mt_emit_cbranch (enum rtx_code code, rtx loc, rtx op0, rtx op1)
{
rtx condition_rtx, loc_ref;
 
if (! reg_or_0_operand (op0, SImode))
op0 = copy_to_mode_reg (SImode, op0);
 
if (! reg_or_0_operand (op1, SImode))
op1 = copy_to_mode_reg (SImode, op1);
 
condition_rtx = mt_generate_compare (code, op0, op1);
loc_ref = gen_rtx_LABEL_REF (VOIDmode, loc);
emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
loc_ref, pc_rtx)));
}
 
/* Subfunction of the following function. Update the flags of any MEM
found in part of X. */
 
static void
mt_set_memflags_1 (rtx x, int in_struct_p, int volatile_p)
{
int i;
 
switch (GET_CODE (x))
{
case SEQUENCE:
case PARALLEL:
for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
mt_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p);
break;
 
case INSN:
mt_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p);
break;
 
case SET:
mt_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p);
mt_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p);
break;
 
case MEM:
MEM_IN_STRUCT_P (x) = in_struct_p;
MEM_VOLATILE_P (x) = volatile_p;
/* Sadly, we cannot use alias sets because the extra aliasing
produced by the AND interferes. Given that two-byte quantities
are the only thing we would be able to differentiate anyway,
there does not seem to be any point in convoluting the early
out of the alias check. */
/* set_mem_alias_set (x, alias_set); */
break;
 
default:
break;
}
}
 
/* Look for any MEMs in the current sequence of insns and set the
in-struct, unchanging, and volatile flags from the flags in REF.
If REF is not a MEM, don't do anything. */
 
void
mt_set_memflags (rtx ref)
{
rtx insn;
int in_struct_p, volatile_p;
 
if (GET_CODE (ref) != MEM)
return;
 
in_struct_p = MEM_IN_STRUCT_P (ref);
volatile_p = MEM_VOLATILE_P (ref);
 
/* This is only called from mt.md, after having had something
generated from one of the insn patterns. So if everything is
zero, the pattern is already up-to-date. */
if (! in_struct_p && ! volatile_p)
return;
 
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
mt_set_memflags_1 (insn, in_struct_p, volatile_p);
}
 
/* Implement SECONDARY_RELOAD_CLASS. */
enum reg_class
mt_secondary_reload_class (enum reg_class class ATTRIBUTE_UNUSED,
enum machine_mode mode,
rtx x)
{
if ((mode == QImode && (!TARGET_BYTE_ACCESS)) || mode == HImode)
{
if (GET_CODE (x) == MEM
|| (GET_CODE (x) == REG && true_regnum (x) == -1)
|| (GET_CODE (x) == SUBREG
&& (GET_CODE (SUBREG_REG (x)) == MEM
|| (GET_CODE (SUBREG_REG (x)) == REG
&& true_regnum (SUBREG_REG (x)) == -1))))
return GENERAL_REGS;
}
 
return NO_REGS;
}
 
/* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE
macros. */
rtx
mt_function_value (tree valtype, enum machine_mode mode, tree func_decl ATTRIBUTE_UNUSED)
{
if ((mode) == DImode || (mode) == DFmode)
return gen_rtx_MEM (mode, gen_rtx_REG (mode, RETURN_VALUE_REGNUM));
 
if (valtype)
mode = TYPE_MODE (valtype);
 
return gen_rtx_REG (mode, RETURN_VALUE_REGNUM);
}
 
/* Split a move into two smaller pieces.
MODE indicates the reduced mode. OPERANDS[0] is the original destination
OPERANDS[1] is the original src. The new destinations are
OPERANDS[2] and OPERANDS[4], while the new sources are OPERANDS[3]
and OPERANDS[5]. */
 
void
mt_split_words (enum machine_mode nmode,
enum machine_mode omode,
rtx *operands)
{
rtx dl,dh; /* src/dest pieces. */
rtx sl,sh;
int move_high_first = 0; /* Assume no overlap. */
 
switch (GET_CODE (operands[0])) /* Dest. */
{
case SUBREG:
case REG:
if ((GET_CODE (operands[1]) == REG
|| GET_CODE (operands[1]) == SUBREG)
&& true_regnum (operands[0]) <= true_regnum (operands[1]))
move_high_first = 1;
 
if (GET_CODE (operands[0]) == SUBREG)
{
dl = gen_rtx_SUBREG (nmode, SUBREG_REG (operands[0]),
SUBREG_BYTE (operands[0]) + GET_MODE_SIZE (nmode));
dh = gen_rtx_SUBREG (nmode, SUBREG_REG (operands[0]), SUBREG_BYTE (operands[0]));
}
else if (GET_CODE (operands[0]) == REG && ! IS_PSEUDO_P (operands[0]))
{
int r = REGNO (operands[0]);
dh = gen_rtx_REG (nmode, r);
dl = gen_rtx_REG (nmode, r + HARD_REGNO_NREGS (r, nmode));
}
else
{
dh = gen_rtx_SUBREG (nmode, operands[0], 0);
dl = gen_rtx_SUBREG (nmode, operands[0], GET_MODE_SIZE (nmode));
}
break;
 
case MEM:
switch (GET_CODE (XEXP (operands[0], 0)))
{
case POST_INC:
case POST_DEC:
gcc_unreachable ();
default:
dl = operand_subword (operands[0],
GET_MODE_SIZE (nmode)/UNITS_PER_WORD,
0, omode);
dh = operand_subword (operands[0], 0, 0, omode);
}
break;
default:
gcc_unreachable ();
}
 
switch (GET_CODE (operands[1]))
{
case REG:
if (! IS_PSEUDO_P (operands[1]))
{
int r = REGNO (operands[1]);
 
sh = gen_rtx_REG (nmode, r);
sl = gen_rtx_REG (nmode, r + HARD_REGNO_NREGS (r, nmode));
}
else
{
sh = gen_rtx_SUBREG (nmode, operands[1], 0);
sl = gen_rtx_SUBREG (nmode, operands[1], GET_MODE_SIZE (nmode));
}
break;
 
case CONST_DOUBLE:
if (operands[1] == const0_rtx)
sh = sl = const0_rtx;
else
split_double (operands[1], & sh, & sl);
break;
 
case CONST_INT:
if (operands[1] == const0_rtx)
sh = sl = const0_rtx;
else
{
int vl, vh;
 
switch (nmode)
{
default:
gcc_unreachable ();
}
sl = GEN_INT (vl);
sh = GEN_INT (vh);
}
break;
 
case SUBREG:
sl = gen_rtx_SUBREG (nmode,
SUBREG_REG (operands[1]),
SUBREG_BYTE (operands[1]) + GET_MODE_SIZE (nmode));
sh = gen_rtx_SUBREG (nmode,
SUBREG_REG (operands[1]),
SUBREG_BYTE (operands[1]));
break;
 
case MEM:
switch (GET_CODE (XEXP (operands[1], 0)))
{
case POST_DEC:
case POST_INC:
gcc_unreachable ();
break;
default:
sl = operand_subword (operands[1],
GET_MODE_SIZE (nmode)/UNITS_PER_WORD,
0, omode);
sh = operand_subword (operands[1], 0, 0, omode);
/* Check if the DF load is going to clobber the register
used for the address, and if so make sure that is going
to be the second move. */
if (GET_CODE (dl) == REG
&& true_regnum (dl)
== true_regnum (XEXP (XEXP (sl, 0 ), 0)))
move_high_first = 1;
}
break;
default:
gcc_unreachable ();
}
 
if (move_high_first)
{
operands[2] = dh;
operands[3] = sh;
operands[4] = dl;
operands[5] = sl;
}
else
{
operands[2] = dl;
operands[3] = sl;
operands[4] = dh;
operands[5] = sh;
}
return;
}
 
/* Implement TARGET_MUST_PASS_IN_STACK hook. */
static bool
mt_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
{
return (((type) != 0
&& (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
|| TREE_ADDRESSABLE (type))));
}
/* Increment the counter for the number of loop instructions in the
current function. */
 
void mt_add_loop (void)
{
cfun->machine->has_loops++;
}
 
 
/* Maximum loop nesting depth. */
#define MAX_LOOP_DEPTH 4
/* Maximum size of a loop (allows some headroom for delayed branch slot
filling. */
#define MAX_LOOP_LENGTH (200 * 4)
 
/* We need to keep a vector of loops */
typedef struct loop_info *loop_info;
DEF_VEC_P (loop_info);
DEF_VEC_ALLOC_P (loop_info,heap);
 
/* Information about a loop we have found (or are in the process of
finding). */
struct loop_info GTY (())
{
/* loop number, for dumps */
int loop_no;
/* Predecessor block of the loop. This is the one that falls into
the loop and contains the initialization instruction. */
basic_block predecessor;
 
/* First block in the loop. This is the one branched to by the dbnz
insn. */
basic_block head;
/* Last block in the loop (the one with the dbnz insn */
basic_block tail;
 
/* The successor block of the loop. This is the one the dbnz insn
falls into. */
basic_block successor;
 
/* The dbnz insn. */
rtx dbnz;
 
/* The initialization insn. */
rtx init;
 
/* The new initialization instruction. */
rtx loop_init;
 
/* The new ending instruction. */
rtx loop_end;
 
/* The new label placed at the end of the loop. */
rtx end_label;
 
/* The nesting depth of the loop. Set to -1 for a bad loop. */
int depth;
 
/* The length of the loop. */
int length;
 
/* Next loop in the graph. */
struct loop_info *next;
 
/* Vector of blocks only within the loop, (excluding those within
inner loops). */
VEC (basic_block,heap) *blocks;
 
/* Vector of inner loops within this loop */
VEC (loop_info,heap) *loops;
};
 
/* Information used during loop detection. */
typedef struct loop_work GTY(())
{
/* Basic block to be scanned. */
basic_block block;
 
/* Loop it will be within. */
loop_info loop;
} loop_work;
 
/* Work list. */
DEF_VEC_O (loop_work);
DEF_VEC_ALLOC_O (loop_work,heap);
 
/* Determine the nesting and length of LOOP. Return false if the loop
is bad. */
 
static bool
mt_loop_nesting (loop_info loop)
{
loop_info inner;
unsigned ix;
int inner_depth = 0;
if (!loop->depth)
{
/* Make sure we only have one entry point. */
if (EDGE_COUNT (loop->head->preds) == 2)
{
loop->predecessor = EDGE_PRED (loop->head, 0)->src;
if (loop->predecessor == loop->tail)
/* We wanted the other predecessor. */
loop->predecessor = EDGE_PRED (loop->head, 1)->src;
/* We can only place a loop insn on a fall through edge of a
single exit block. */
if (EDGE_COUNT (loop->predecessor->succs) != 1
|| !(EDGE_SUCC (loop->predecessor, 0)->flags & EDGE_FALLTHRU))
loop->predecessor = NULL;
}
 
/* Mark this loop as bad for now. */
loop->depth = -1;
if (loop->predecessor)
{
for (ix = 0; VEC_iterate (loop_info, loop->loops, ix++, inner);)
{
if (!inner->depth)
mt_loop_nesting (inner);
if (inner->depth < 0)
{
inner_depth = -1;
break;
}
if (inner_depth < inner->depth)
inner_depth = inner->depth;
loop->length += inner->length;
}
/* Set the proper loop depth, if it was good. */
if (inner_depth >= 0)
loop->depth = inner_depth + 1;
}
}
return (loop->depth > 0
&& loop->predecessor
&& loop->depth < MAX_LOOP_DEPTH
&& loop->length < MAX_LOOP_LENGTH);
}
 
/* Determine the length of block BB. */
 
static int
mt_block_length (basic_block bb)
{
int length = 0;
rtx insn;
 
for (insn = BB_HEAD (bb);
insn != NEXT_INSN (BB_END (bb));
insn = NEXT_INSN (insn))
{
if (!INSN_P (insn))
continue;
if (CALL_P (insn))
{
/* Calls are not allowed in loops. */
length = MAX_LOOP_LENGTH + 1;
break;
}
length += get_attr_length (insn);
}
return length;
}
 
/* Scan the blocks of LOOP (and its inferiors) looking for uses of
REG. Return true, if we find any. Don't count the loop's dbnz
insn if it matches DBNZ. */
 
static bool
mt_scan_loop (loop_info loop, rtx reg, rtx dbnz)
{
unsigned ix;
loop_info inner;
basic_block bb;
for (ix = 0; VEC_iterate (basic_block, loop->blocks, ix, bb); ix++)
{
rtx insn;
 
for (insn = BB_HEAD (bb);
insn != NEXT_INSN (BB_END (bb));
insn = NEXT_INSN (insn))
{
if (!INSN_P (insn))
continue;
if (insn == dbnz)
continue;
if (reg_mentioned_p (reg, PATTERN (insn)))
return true;
}
}
for (ix = 0; VEC_iterate (loop_info, loop->loops, ix, inner); ix++)
if (mt_scan_loop (inner, reg, NULL_RTX))
return true;
return false;
}
 
/* MS2 has a loop instruction which needs to be placed just before the
loop. It indicates the end of the loop and specifies the number of
loop iterations. It can be nested with an automatically maintained
stack of counter and end address registers. It's an ideal
candidate for doloop. Unfortunately, gcc presumes that loops
always end with an explicit instruction, and the doloop_begin
instruction is not a flow control instruction so it can be
scheduled earlier than just before the start of the loop. To make
matters worse, the optimization pipeline can duplicate loop exit
and entrance blocks and fails to track abnormally exiting loops.
Thus we cannot simply use doloop.
 
What we do is emit a dbnz pattern for the doloop optimization, and
let that be optimized as normal. Then in machine dependent reorg
we have to repeat the loop searching algorithm. We use the
flow graph to find closed loops ending in a dbnz insn. We then try
and convert it to use the loop instruction. The conditions are,
 
* the loop has no abnormal exits, duplicated end conditions or
duplicated entrance blocks
 
* the loop counter register is only used in the dbnz instruction
within the loop
* we can find the instruction setting the initial value of the loop
counter
 
* the loop is not executed more than 65535 times. (This might be
changed to 2^32-1, and would therefore allow variable initializers.)
 
* the loop is not nested more than 4 deep 5) there are no
subroutine calls in the loop. */
 
static void
mt_reorg_loops (FILE *dump_file)
{
basic_block bb;
loop_info loops = NULL;
loop_info loop;
int nloops = 0;
unsigned dwork = 0;
VEC (loop_work,heap) *works = VEC_alloc (loop_work,heap,20);
loop_work *work;
edge e;
edge_iterator ei;
bool replaced = false;
 
/* Find all the possible loop tails. This means searching for every
dbnz instruction. For each one found, create a loop_info
structure and add the head block to the work list. */
FOR_EACH_BB (bb)
{
rtx tail = BB_END (bb);
 
while (GET_CODE (tail) == NOTE)
tail = PREV_INSN (tail);
bb->aux = NULL;
if (recog_memoized (tail) == CODE_FOR_decrement_and_branch_until_zero)
{
/* A possible loop end */
 
loop = XNEW (struct loop_info);
loop->next = loops;
loops = loop;
loop->tail = bb;
loop->head = BRANCH_EDGE (bb)->dest;
loop->successor = FALLTHRU_EDGE (bb)->dest;
loop->predecessor = NULL;
loop->dbnz = tail;
loop->depth = 0;
loop->length = mt_block_length (bb);
loop->blocks = VEC_alloc (basic_block, heap, 20);
VEC_quick_push (basic_block, loop->blocks, bb);
loop->loops = NULL;
loop->loop_no = nloops++;
loop->init = loop->end_label = NULL_RTX;
loop->loop_init = loop->loop_end = NULL_RTX;
work = VEC_safe_push (loop_work, heap, works, NULL);
work->block = loop->head;
work->loop = loop;
 
bb->aux = loop;
 
if (dump_file)
{
fprintf (dump_file, ";; potential loop %d ending at\n",
loop->loop_no);
print_rtl_single (dump_file, tail);
}
}
}
 
/* Now find all the closed loops.
until work list empty,
if block's auxptr is set
if != loop slot
if block's loop's start != block
mark loop as bad
else
append block's loop's fallthrough block to worklist
increment this loop's depth
else if block is exit block
mark loop as bad
else
set auxptr
for each target of block
add to worklist */
while (VEC_iterate (loop_work, works, dwork++, work))
{
loop = work->loop;
bb = work->block;
if (bb == EXIT_BLOCK_PTR)
/* We've reached the exit block. The loop must be bad. */
loop->depth = -1;
else if (!bb->aux)
{
/* We've not seen this block before. Add it to the loop's
list and then add each successor to the work list. */
bb->aux = loop;
loop->length += mt_block_length (bb);
VEC_safe_push (basic_block, heap, loop->blocks, bb);
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (!VEC_space (loop_work, works, 1))
{
if (dwork)
{
VEC_block_remove (loop_work, works, 0, dwork);
dwork = 0;
}
else
VEC_reserve (loop_work, heap, works, 1);
}
work = VEC_quick_push (loop_work, works, NULL);
work->block = EDGE_SUCC (bb, ei.index)->dest;
work->loop = loop;
}
}
else if (bb->aux != loop)
{
/* We've seen this block in a different loop. If it's not
the other loop's head, then this loop must be bad.
Otherwise, the other loop might be a nested loop, so
continue from that loop's successor. */
loop_info other = bb->aux;
if (other->head != bb)
loop->depth = -1;
else
{
VEC_safe_push (loop_info, heap, loop->loops, other);
work = VEC_safe_push (loop_work, heap, works, NULL);
work->loop = loop;
work->block = other->successor;
}
}
}
VEC_free (loop_work, heap, works);
 
/* Now optimize the loops. */
for (loop = loops; loop; loop = loop->next)
{
rtx iter_reg, insn, init_insn;
rtx init_val, loop_end, loop_init, end_label, head_label;
 
if (!mt_loop_nesting (loop))
{
if (dump_file)
fprintf (dump_file, ";; loop %d is bad\n", loop->loop_no);
continue;
}
 
/* Get the loop iteration register. */
iter_reg = SET_DEST (XVECEXP (PATTERN (loop->dbnz), 0, 1));
if (!REG_P (iter_reg))
{
/* Spilled */
if (dump_file)
fprintf (dump_file, ";; loop %d has spilled iteration count\n",
loop->loop_no);
continue;
}
 
/* Look for the initializing insn */
init_insn = NULL_RTX;
for (insn = BB_END (loop->predecessor);
insn != PREV_INSN (BB_HEAD (loop->predecessor));
insn = PREV_INSN (insn))
{
if (!INSN_P (insn))
continue;
if (reg_mentioned_p (iter_reg, PATTERN (insn)))
{
rtx set = single_set (insn);
 
if (set && rtx_equal_p (iter_reg, SET_DEST (set)))
init_insn = insn;
break;
}
}
 
if (!init_insn)
{
if (dump_file)
fprintf (dump_file, ";; loop %d has no initializer\n",
loop->loop_no);
continue;
}
if (dump_file)
{
fprintf (dump_file, ";; loop %d initialized by\n",
loop->loop_no);
print_rtl_single (dump_file, init_insn);
}
 
init_val = PATTERN (init_insn);
if (GET_CODE (init_val) == SET)
init_val = SET_SRC (init_val);
if (GET_CODE (init_val) != CONST_INT || INTVAL (init_val) >= 65535)
{
if (dump_file)
fprintf (dump_file, ";; loop %d has complex initializer\n",
loop->loop_no);
continue;
}
/* Scan all the blocks to make sure they don't use iter_reg. */
if (mt_scan_loop (loop, iter_reg, loop->dbnz))
{
if (dump_file)
fprintf (dump_file, ";; loop %d uses iterator\n",
loop->loop_no);
continue;
}
 
/* The loop is good for replacement. */
/* loop is 1 based, dbnz is zero based. */
init_val = GEN_INT (INTVAL (init_val) + 1);
iter_reg = gen_rtx_REG (SImode, LOOP_FIRST + loop->depth - 1);
end_label = gen_label_rtx ();
head_label = XEXP (SET_SRC (XVECEXP (PATTERN (loop->dbnz), 0, 0)), 1);
loop_end = gen_loop_end (iter_reg, head_label);
loop_init = gen_loop_init (iter_reg, init_val, end_label);
loop->init = init_insn;
loop->end_label = end_label;
loop->loop_init = loop_init;
loop->loop_end = loop_end;
replaced = true;
if (dump_file)
{
fprintf (dump_file, ";; replacing loop %d initializer with\n",
loop->loop_no);
print_rtl_single (dump_file, loop->loop_init);
fprintf (dump_file, ";; replacing loop %d terminator with\n",
loop->loop_no);
print_rtl_single (dump_file, loop->loop_end);
}
}
 
/* Now apply the optimizations. Do it this way so we don't mess up
the flow graph half way through. */
for (loop = loops; loop; loop = loop->next)
if (loop->loop_init)
{
emit_jump_insn_after (loop->loop_init, BB_END (loop->predecessor));
delete_insn (loop->init);
emit_label_before (loop->end_label, loop->dbnz);
emit_jump_insn_before (loop->loop_end, loop->dbnz);
delete_insn (loop->dbnz);
}
 
/* Free up the loop structures */
while (loops)
{
loop = loops;
loops = loop->next;
VEC_free (loop_info, heap, loop->loops);
VEC_free (basic_block, heap, loop->blocks);
XDELETE (loop);
}
 
if (replaced && dump_file)
{
fprintf (dump_file, ";; Replaced loops\n");
print_rtl (dump_file, get_insns ());
}
}
 
/* Structures to hold branch information during reorg. */
typedef struct branch_info
{
rtx insn; /* The branch insn. */
struct branch_info *next;
} branch_info;
 
typedef struct label_info
{
rtx label; /* The label. */
branch_info *branches; /* branches to this label. */
struct label_info *next;
} label_info;
 
/* Chain of labels found in current function, used during reorg. */
static label_info *mt_labels;
 
/* If *X is a label, add INSN to the list of branches for that
label. */
 
static int
mt_add_branches (rtx *x, void *insn)
{
if (GET_CODE (*x) == LABEL_REF)
{
branch_info *branch = xmalloc (sizeof (*branch));
rtx label = XEXP (*x, 0);
label_info *info;
 
for (info = mt_labels; info; info = info->next)
if (info->label == label)
break;
 
if (!info)
{
info = xmalloc (sizeof (*info));
info->next = mt_labels;
mt_labels = info;
info->label = label;
info->branches = NULL;
}
 
branch->next = info->branches;
info->branches = branch;
branch->insn = insn;
}
return 0;
}
 
/* If BRANCH has a filled delay slot, check if INSN is dependent upon
it. If so, undo the delay slot fill. Returns the next insn, if
we patch out the branch. Returns the branch insn, if we cannot
patch out the branch (due to anti-dependency in the delay slot).
In that case, the caller must insert nops at the branch target. */
 
static rtx
mt_check_delay_slot (rtx branch, rtx insn)
{
rtx slot;
rtx tmp;
rtx p;
rtx jmp;
gcc_assert (GET_CODE (PATTERN (branch)) == SEQUENCE);
if (INSN_DELETED_P (branch))
return NULL_RTX;
slot = XVECEXP (PATTERN (branch), 0, 1);
tmp = PATTERN (insn);
note_stores (PATTERN (slot), insn_dependent_p_1, &tmp);
if (tmp)
/* Not dependent. */
return NULL_RTX;
/* Undo the delay slot. */
jmp = XVECEXP (PATTERN (branch), 0, 0);
tmp = PATTERN (jmp);
note_stores (PATTERN (slot), insn_dependent_p_1, &tmp);
if (!tmp)
/* Anti dependent. */
return branch;
p = PREV_INSN (branch);
NEXT_INSN (p) = slot;
PREV_INSN (slot) = p;
NEXT_INSN (slot) = jmp;
PREV_INSN (jmp) = slot;
NEXT_INSN (jmp) = branch;
PREV_INSN (branch) = jmp;
XVECEXP (PATTERN (branch), 0, 0) = NULL_RTX;
XVECEXP (PATTERN (branch), 0, 1) = NULL_RTX;
delete_insn (branch);
return jmp;
}
 
/* Insert nops to satisfy pipeline constraints. We only deal with ms2
constraints here. Earlier CPUs are dealt with by inserting nops with
final_prescan (but that can lead to inferior code, and is
impractical with ms2's JAL hazard).
 
ms2 dynamic constraints
1) a load and a following use must be separated by one insn
2) an insn and a following dependent call must be separated by two insns
only arith insns are placed in delay slots so #1 cannot happen with
a load in a delay slot. #2 can happen with an arith insn in the
delay slot. */
 
static void
mt_reorg_hazard (void)
{
rtx insn, next;
 
/* Find all the branches */
for (insn = get_insns ();
insn;
insn = NEXT_INSN (insn))
{
rtx jmp;
 
if (!INSN_P (insn))
continue;
 
jmp = PATTERN (insn);
if (GET_CODE (jmp) != SEQUENCE)
/* If it's not got a filled delay slot, then it can't
conflict. */
continue;
jmp = XVECEXP (jmp, 0, 0);
 
if (recog_memoized (jmp) == CODE_FOR_tablejump)
for (jmp = XEXP (XEXP (XVECEXP (PATTERN (jmp), 0, 1), 0), 0);
!JUMP_TABLE_DATA_P (jmp);
jmp = NEXT_INSN (jmp))
continue;
 
for_each_rtx (&PATTERN (jmp), mt_add_branches, insn);
}
 
/* Now scan for dependencies. */
for (insn = get_insns ();
insn && !INSN_P (insn);
insn = NEXT_INSN (insn))
continue;
for (;
insn;
insn = next)
{
rtx jmp, tmp;
enum attr_type attr;
gcc_assert (INSN_P (insn) && !INSN_DELETED_P (insn));
for (next = NEXT_INSN (insn);
next;
next = NEXT_INSN (next))
{
if (!INSN_P (next))
continue;
if (GET_CODE (PATTERN (next)) != USE)
break;
}
 
jmp = insn;
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
jmp = XVECEXP (PATTERN (insn), 0, 0);
attr = recog_memoized (jmp) >= 0 ? get_attr_type (jmp) : TYPE_UNKNOWN;
if (next && attr == TYPE_LOAD)
{
/* A load. See if NEXT is dependent, and if so insert a
nop. */
tmp = PATTERN (next);
if (GET_CODE (tmp) == SEQUENCE)
tmp = PATTERN (XVECEXP (tmp, 0, 0));
note_stores (PATTERN (insn), insn_dependent_p_1, &tmp);
if (!tmp)
emit_insn_after (gen_nop (), insn);
}
if (attr == TYPE_CALL)
{
/* A call. Make sure we're not dependent on either of the
previous two dynamic instructions. */
int nops = 0;
int count;
rtx prev = insn;
rtx rescan = NULL_RTX;
 
for (count = 2; count && !nops;)
{
int type;
prev = PREV_INSN (prev);
if (!prev)
{
/* If we reach the start of the function, we must
presume the caller set the address in the delay
slot of the call instruction. */
nops = count;
break;
}
if (BARRIER_P (prev))
break;
if (LABEL_P (prev))
{
/* Look at branches to this label. */
label_info *label;
branch_info *branch;
 
for (label = mt_labels;
label;
label = label->next)
if (label->label == prev)
{
for (branch = label->branches;
branch;
branch = branch->next)
{
tmp = mt_check_delay_slot (branch->insn, jmp);
 
if (tmp == branch->insn)
{
nops = count;
break;
}
if (tmp && branch->insn == next)
rescan = tmp;
}
break;
}
continue;
}
if (!INSN_P (prev) || GET_CODE (PATTERN (prev)) == USE)
continue;
if (GET_CODE (PATTERN (prev)) == SEQUENCE)
{
/* Look at the delay slot. */
tmp = mt_check_delay_slot (prev, jmp);
if (tmp == prev)
nops = count;
break;
}
type = (INSN_CODE (prev) >= 0 ? get_attr_type (prev)
: TYPE_COMPLEX);
if (type == TYPE_CALL || type == TYPE_BRANCH)
break;
if (type == TYPE_LOAD
|| type == TYPE_ARITH
|| type == TYPE_COMPLEX)
{
tmp = PATTERN (jmp);
note_stores (PATTERN (prev), insn_dependent_p_1, &tmp);
if (!tmp)
{
nops = count;
break;
}
}
 
if (INSN_CODE (prev) >= 0)
count--;
}
 
if (rescan)
for (next = NEXT_INSN (rescan);
next && !INSN_P (next);
next = NEXT_INSN (next))
continue;
while (nops--)
emit_insn_before (gen_nop (), insn);
}
}
 
/* Free the data structures. */
while (mt_labels)
{
label_info *label = mt_labels;
branch_info *branch, *next;
mt_labels = label->next;
for (branch = label->branches; branch; branch = next)
{
next = branch->next;
free (branch);
}
free (label);
}
}
 
/* Fixup the looping instructions, do delayed branch scheduling, fixup
scheduling hazards. */
 
static void
mt_machine_reorg (void)
{
if (cfun->machine->has_loops && TARGET_MS2)
mt_reorg_loops (dump_file);
 
if (mt_flag_delayed_branch)
dbr_schedule (get_insns ());
if (TARGET_MS2)
{
/* Force all instructions to be split into their final form. */
split_all_insns_noflow ();
mt_reorg_hazard ();
}
}
/* Initialize the GCC target structure. */
const struct attribute_spec mt_attribute_table[];
 
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE mt_attribute_table
#undef TARGET_STRUCT_VALUE_RTX
#define TARGET_STRUCT_VALUE_RTX mt_struct_value_rtx
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
#undef TARGET_PASS_BY_REFERENCE
#define TARGET_PASS_BY_REFERENCE mt_pass_by_reference
#undef TARGET_MUST_PASS_IN_STACK
#define TARGET_MUST_PASS_IN_STACK mt_pass_in_stack
#undef TARGET_ARG_PARTIAL_BYTES
#define TARGET_ARG_PARTIAL_BYTES mt_arg_partial_bytes
#undef TARGET_SETUP_INCOMING_VARARGS
#define TARGET_SETUP_INCOMING_VARARGS mt_setup_incoming_varargs
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG mt_machine_reorg
 
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-mt.h"
/mt.opt
0,0 → 1,55
; Options for the mt port of the compiler
;
; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
; GCC is free software; you can redistribute it and/or modify it under
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 3, or (at your option) any later
; version.
;
; GCC is distributed in the hope that it will be useful, but WITHOUT
; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
; License for more details.
;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING3. If not see
; <http://www.gnu.org/licenses/>.
 
mbacc
Target Report Mask(BYTE_ACCESS)
Use byte loads and stores when generating code.
 
msim
Target RejectNegative
Use simulator runtime
 
mno-crt0
Target RejectNegative
Do not include crt0.o in the startup files
 
mdebug-arg
Target RejectNegative Mask(DEBUG_ARG)
Internal debug switch
 
mdebug-addr
Target RejectNegative Mask(DEBUG_ADDR)
Internal debug switch
 
mdebug-stack
Target RejectNegative Mask(DEBUG_STACK)
Internal debug switch
 
mdebug-loc
Target RejectNegative Mask(DEBUG_LOC)
Internal debug switch
 
mdebug
Target RejectNegative Mask(DEBUG)
Internal debug switch
 
march=
Target RejectNegative Joined Var(mt_cpu_string)
Specify CPU for code generation purposes
/t-mt
0,0 → 1,73
# Copyright (C) 2005, 2007 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3, or (at your
# option) any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
 
 
# Name of assembly file containing libgcc1 functions.
# This entry must be present, but it can be empty if the target does
# not need any assembler functions to support its code generation.
CROSS_LIBGCC1 =
#
# Alternatively if assembler functions *are* needed then define the
# entries below:
# CROSS_LIBGCC1 = libgcc1-asm.a
# LIB1ASMSRC = mt/lib1funcs.asm
# LIB1ASMFUNCS = _udivsi3 etc...
 
LIB2FUNCS_EXTRA = $(srcdir)/config/mt/lib2extra-funcs.c
 
# If any special flags are necessary when building libgcc2 put them here.
#
# TARGET_LIBGCC2_CFLAGS =
 
EXTRA_PARTS = crtbegin.o crtend.o crti.o crtn.o
 
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
FPBIT = fp-bit.c
DPBIT = dp-bit.c
 
fp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define FLOAT' > fp-bit.c
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
 
dp-bit.c: $(srcdir)/config/fp-bit.c
cat $(srcdir)/config/fp-bit.c > dp-bit.c
 
# Assemble startup files.
$(T)crti.o: $(srcdir)/config/mt/crti.asm $(GCC_PASSES)
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
-c -o $(T)crti.o -x assembler $(srcdir)/config/mt/crti.asm
$(T)crtn.o: $(srcdir)/config/mt/crtn.asm $(GCC_PASSES)
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
-c -o $(T)crtn.o -x assembler $(srcdir)/config/mt/crtn.asm
 
# Enable the following if multilibs are needed.
# See gcc/genmultilib, gcc/gcc.texi and gcc/tm.texi for a
# description of the options and their values.
#
MULTILIB_OPTIONS = march=ms1-64-001/march=ms1-16-002/march=ms1-16-003/march=ms2
MULTILIB_DIRNAMES = ms1-64-001 ms1-16-002 ms1-16-003 ms2
# MULTILIB_MATCHES =
# MULTILIB_EXCEPTIONS =
# MULTILIB_EXTRA_OPTS =
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o
#
# LIBGCC = stmp-multilib
# INSTALL_LIBGCC = install-multilib
 
/crtn.asm
0,0 → 1,56
# crtn.asm for mt
 
# Copyright (C) 2005 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# In addition to the permissions in the GNU General Public License, the
# Free Software Foundation gives you unlimited permission to link the
# compiled version of this file with other programs, and to distribute
# those programs without any restriction coming from the use of this
# file. (The General Public License restrictions do apply in other
# respects; for example, they cover modification of the file, and
# distribution when not linked into another program.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING. If not, write to the Free
# Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# As a special exception, if you link this library with files
# compiled with GCC to produce an executable, this does not cause
# the resulting executable to be covered by the GNU General Public License.
# This exception does not however invalidate any other reasons why
# the executable file might be covered by the GNU General Public License.
#
 
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
 
.file "crtn.asm"
 
.section ".init"
.align 4
ldw r14, sp, #0
addi sp, sp, #4
nop
jal r0, r14
or r0, r0, r0
.section ".fini"
.align 4
 
ldw r14, sp, #0
addi sp, sp, #4
nop
jal r0, r14
or r0, r0, r0
/mt.h
0,0 → 1,868
/* Target Definitions for MorphoRISC1
Copyright (C) 2005, 2007 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
extern struct rtx_def * mt_ucmpsi3_libcall;
 
enum processor_type
{
PROCESSOR_MS1_64_001,
PROCESSOR_MS1_16_002,
PROCESSOR_MS1_16_003,
PROCESSOR_MS2
};
 
enum epilogue_type
{
EH_EPILOGUE,
NORMAL_EPILOGUE
};
 
extern enum processor_type mt_cpu;
 
/* Support for a compile-time default CPU, et cetera. The rules are:
--with-arch is ignored if -march is specified. */
#define OPTION_DEFAULT_SPECS \
{"arch", "%{!march=*:-march=%(VALUE)}" }
 
/* A C string constant that tells the GCC driver program options to pass to
the assembler. */
#undef ASM_SPEC
#define ASM_SPEC "%{march=*} %{!march=*: -march=ms1-16-002}"
 
/* A string to pass to at the end of the command given to the linker. */
#undef LIB_SPEC
#define LIB_SPEC "--start-group -lc -lsim --end-group \
%{msim: ; \
march=ms1-64-001:-T 64-001.ld%s; \
march=ms1-16-002:-T 16-002.ld%s; \
march=ms1-16-003:-T 16-003.ld%s; \
march=ms2:-T ms2.ld%s; \
:-T 16-002.ld}"
 
/* A string to pass at the very beginning of the command given to the
linker. */
#undef STARTFILE_SPEC
#define STARTFILE_SPEC "%{msim:crt0.o%s;\
march=ms1-64-001:%{!mno-crt0:crt0-64-001.o%s} startup-64-001.o%s; \
march=ms1-16-002:%{!mno-crt0:crt0-16-002.o%s} startup-16-002.o%s; \
march=ms1-16-003:%{!mno-crt0:crt0-16-003.o%s} startup-16-003.o%s; \
march=ms2:%{!mno-crt0:crt0-ms2.o%s} startup-ms2.o%s; \
:%{!mno-crt0:crt0-16-002.o%s} startup-16-002.o%s} \
crti.o%s crtbegin.o%s"
 
/* A string to pass at the end of the command given to the linker. */
#undef ENDFILE_SPEC
#define ENDFILE_SPEC "%{msim:exit.o%s; \
march=ms1-64-001:exit-64-001.o%s; \
march=ms1-16-002:exit-16-002.o%s; \
march=ms1-16-003:exit-16-003.o%s; \
march=ms2:exit-ms2.o%s; \
:exit-16-002.o%s} \
crtend.o%s crtn.o%s"
/* Run-time target specifications. */
 
#define TARGET_CPU_CPP_BUILTINS() \
do \
{ \
builtin_define_with_int_value ("__mt__", mt_cpu); \
builtin_assert ("machine=mt"); \
} \
while (0)
 
#define TARGET_MS1_64_001 (mt_cpu == PROCESSOR_MS1_64_001)
#define TARGET_MS1_16_002 (mt_cpu == PROCESSOR_MS1_16_002)
#define TARGET_MS1_16_003 (mt_cpu == PROCESSOR_MS1_16_003)
#define TARGET_MS2 (mt_cpu == PROCESSOR_MS2)
 
#define TARGET_VERSION fprintf (stderr, " (mt)");
 
#define OVERRIDE_OPTIONS mt_override_options ()
 
#define CAN_DEBUG_WITHOUT_FP 1
 
/* Storage Layout. */
 
#define BITS_BIG_ENDIAN 0
 
#define BYTES_BIG_ENDIAN 1
 
#define WORDS_BIG_ENDIAN 1
 
#define UNITS_PER_WORD 4
 
/* A macro to update MODE and UNSIGNEDP when an object whose type is TYPE and
which has the specified mode and signedness is to be stored in a register.
This macro is only called when TYPE is a scalar type.
 
On most RISC machines, which only have operations that operate on a full
register, define this macro to set M to `word_mode' if M is an integer mode
narrower than `BITS_PER_WORD'. In most cases, only integer modes should be
widened because wider-precision floating-point operations are usually more
expensive than their narrower counterparts.
 
For most machines, the macro definition does not change UNSIGNEDP. However,
some machines, have instructions that preferentially handle either signed or
unsigned quantities of certain modes. For example, on the DEC Alpha, 32-bit
loads from memory and 32-bit add instructions sign-extend the result to 64
bits. On such machines, set UNSIGNEDP according to which kind of extension
is more efficient.
 
Do not define this macro if it would never modify MODE. */
#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
do \
{ \
if (GET_MODE_CLASS (MODE) == MODE_INT \
&& GET_MODE_SIZE (MODE) < 4) \
(MODE) = SImode; \
} \
while (0)
 
/* Normal alignment required for function parameters on the stack, in bits.
All stack parameters receive at least this much alignment regardless of data
type. On most machines, this is the same as the size of an integer. */
#define PARM_BOUNDARY 32
 
/* Define this macro to the minimum alignment enforced by hardware for
the stack pointer on this machine. The definition is a C
expression for the desired alignment (measured in bits). This
value is used as a default if PREFERRED_STACK_BOUNDARY is not
defined. On most machines, this should be the same as
PARM_BOUNDARY. */
#define STACK_BOUNDARY 32
 
/* Alignment required for a function entry point, in bits. */
#define FUNCTION_BOUNDARY 32
 
/* Biggest alignment that any data type can require on this machine,
in bits. */
#define BIGGEST_ALIGNMENT 32
 
/* If defined, a C expression to compute the alignment for a variable
in the static store. TYPE is the data type, and ALIGN is the
alignment that the object would ordinarily have. The value of this
macro is used instead of that alignment to align the object.
 
If this macro is not defined, then ALIGN is used. */
#define DATA_ALIGNMENT(TYPE, ALIGN) \
(TREE_CODE (TYPE) == ARRAY_TYPE \
&& TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
&& (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
 
/* If defined, a C expression to compute the alignment given to a constant that
is being placed in memory. CONSTANT is the constant and ALIGN is the
alignment that the object would ordinarily have. The value of this macro is
used instead of that alignment to align the object.
 
If this macro is not defined, then ALIGN is used.
 
The typical use of this macro is to increase alignment for string constants
to be word aligned so that `strcpy' calls that copy constants can be done
inline. */
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
(TREE_CODE (EXP) == STRING_CST \
&& (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
 
/* Number of bits which any structure or union's size must be a multiple of.
Each structure or union's size is rounded up to a multiple of this.
 
If you do not define this macro, the default is the same as `BITS_PER_UNIT'. */
#define STRUCTURE_SIZE_BOUNDARY 32
 
/* Define this macro to be the value 1 if instructions will fail to work if
given data not on the nominal alignment. If instructions will merely go
slower in that case, define this macro as 0. */
#define STRICT_ALIGNMENT 1
 
/* Define this if you wish to imitate the way many other C compilers handle
alignment of bitfields and the structures that contain them. */
#define PCC_BITFIELD_TYPE_MATTERS 1
/* Layout of Source Language Data Types. */
 
#define INT_TYPE_SIZE 32
 
#define SHORT_TYPE_SIZE 16
 
#define LONG_TYPE_SIZE 32
 
#define LONG_LONG_TYPE_SIZE 64
 
#define CHAR_TYPE_SIZE 8
 
#define FLOAT_TYPE_SIZE 32
 
#define DOUBLE_TYPE_SIZE 64
 
#define LONG_DOUBLE_TYPE_SIZE 64
 
#define DEFAULT_SIGNED_CHAR 1
/* Register Basics. */
 
/* General purpose registers. */
#define GPR_FIRST 0 /* First gpr */
#define GPR_LAST 15 /* Last possible gpr */
 
#define GPR_R0 0 /* Always 0 */
#define GPR_R7 7 /* Used as a scratch register */
#define GPR_R8 8 /* Used as a scratch register */
#define GPR_R9 9 /* Used as a scratch register */
#define GPR_R10 10 /* Used as a scratch register */
#define GPR_R11 11 /* Used as a scratch register */
#define GPR_FP 12 /* Frame pointer */
#define GPR_SP 13 /* Stack pointer */
#define GPR_LINK 14 /* Saved return address as
seen by the caller */
#define GPR_INTERRUPT_LINK 15 /* hold return addres for interrupts */
 
#define LOOP_FIRST (GPR_LAST + 1)
#define LOOP_LAST (LOOP_FIRST + 3)
 
/* Argument register that is eliminated in favor of the frame and/or stack
pointer. Also add register to point to where the return address is
stored. */
#define SPECIAL_REG_FIRST (LOOP_LAST + 1)
#define SPECIAL_REG_LAST (SPECIAL_REG_FIRST)
#define ARG_POINTER_REGNUM (SPECIAL_REG_FIRST + 0)
#define SPECIAL_REG_P(R) ((R) == SPECIAL_REG_FIRST)
 
/* The first/last register that can contain the arguments to a function. */
#define FIRST_ARG_REGNUM 1
#define LAST_ARG_REGNUM 4
 
/* The register used to hold functions return value */
#define RETVAL_REGNUM 11
 
#define FIRST_PSEUDO_REGISTER (SPECIAL_REG_LAST + 1)
 
#define IS_PSEUDO_P(R) (REGNO (R) >= FIRST_PSEUDO_REGISTER)
 
/* R0 always has the value 0
R10 static link
R12 FP pointer to active frame
R13 SP pointer to top of stack
R14 RA return address
R15 IRA interrupt return address. */
#define FIXED_REGISTERS { 1, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 1, 1, 1, 1, \
1, 1, 1, 1, 1 \
}
 
/* Like `FIXED_REGISTERS' but has 1 for each register that is clobbered (in
general) by function calls as well as for fixed registers. This macro
therefore identifies the registers that are not available for general
allocation of values that must live across function calls. */
#define CALL_USED_REGISTERS { 1, 1, 1, 1, 1, 0, 0, 1, \
1, 1, 1, 1, 1, 1, 1, 1, \
1, 1, 1, 1, 1 \
}
 
/* How Values Fit in Registers. */
 
#define HARD_REGNO_NREGS(REGNO, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
 
/* A C expression that is nonzero if a value of mode MODE1 is
accessible in mode MODE2 without copying. */
#define MODES_TIEABLE_P(MODE1, MODE2) 1
/* Register Classes. */
 
enum reg_class
{
NO_REGS,
ALL_REGS,
LIM_REG_CLASSES
};
 
#define GENERAL_REGS ALL_REGS
 
#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
 
#define REG_CLASS_NAMES {"NO_REGS", "ALL_REGS" }
 
#define REG_CLASS_CONTENTS \
{ \
{ 0x0 }, \
{ 0x000fffff }, \
}
 
/* A C expression whose value is a register class containing hard register
REGNO. In general there is more than one such class; choose a class which
is "minimal", meaning that no smaller class also contains the register. */
#define REGNO_REG_CLASS(REGNO) GENERAL_REGS
 
#define BASE_REG_CLASS GENERAL_REGS
 
#define INDEX_REG_CLASS NO_REGS
 
#define REG_CLASS_FROM_LETTER(CHAR) NO_REGS
 
#define REGNO_OK_FOR_BASE_P(NUM) 1
 
#define REGNO_OK_FOR_INDEX_P(NUM) 1
 
/* A C expression that places additional restrictions on the register class to
use when it is necessary to copy value X into a register in class CLASS.
The value is a register class; perhaps CLASS, or perhaps another, smaller
class. On many machines, the following definition is safe:
 
#define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
*/
#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
 
#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
mt_secondary_reload_class((CLASS), (MODE), (X))
 
/* A C expression for the maximum number of consecutive registers of
class CLASS needed to hold a value of mode MODE. */
#define CLASS_MAX_NREGS(CLASS, MODE) \
((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
 
/* For MorphoRISC1:
 
`I' is used for the range of constants an arithmetic insn can
actually contain (16 bits signed integers).
 
`J' is used for the range which is just zero (ie, $r0).
 
`K' is used for the range of constants a logical insn can actually
contain (16 bit zero-extended integers).
 
`L' is used for the range of constants that be loaded with lui
(ie, the bottom 16 bits are zero).
 
`M' is used for the range of constants that take two words to load
(ie, not matched by `I', `K', and `L').
 
`N' is used for negative 16 bit constants other than -65536.
 
`O' is a 15 bit signed integer.
 
`P' is used for positive 16 bit constants. */
 
#define SMALL_INT(X) ((unsigned HOST_WIDE_INT) (INTVAL (X) + 0x8000) < 0x10000)
#define SMALL_INT_UNSIGNED(X) ((unsigned HOST_WIDE_INT) (INTVAL (X)) < 0x10000)
 
/* A C expression that defines the machine-dependent operand
constraint letters that specify particular ranges of integer
values. If C is one of those letters, the expression should check
that VALUE, an integer, is in the appropriate range and return 1 if
so, 0 otherwise. If C is not one of those letters, the value
should be 0 regardless of VALUE. */
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'I' ? ((unsigned HOST_WIDE_INT) ((VALUE) + 0x8000) < 0x10000) \
: (C) == 'J' ? ((VALUE) == 0) \
: (C) == 'K' ? ((unsigned HOST_WIDE_INT) (VALUE) < 0x10000) \
: (C) == 'L' ? (((VALUE) & 0x0000ffff) == 0 \
&& (((VALUE) & ~2147483647) == 0 \
|| ((VALUE) & ~2147483647) == ~2147483647)) \
: (C) == 'M' ? ((((VALUE) & ~0x0000ffff) != 0) \
&& (((VALUE) & ~0x0000ffff) != ~0x0000ffff) \
&& (((VALUE) & 0x0000ffff) != 0 \
|| (((VALUE) & ~2147483647) != 0 \
&& ((VALUE) & ~2147483647) != ~2147483647))) \
: (C) == 'N' ? ((unsigned HOST_WIDE_INT) ((VALUE) + 0xffff) < 0xffff) \
: (C) == 'O' ? ((unsigned HOST_WIDE_INT) ((VALUE) + 0x4000) < 0x8000) \
: (C) == 'P' ? ((VALUE) != 0 && (((VALUE) & ~0x0000ffff) == 0)) \
: 0)
 
/* A C expression that defines the machine-dependent operand constraint letters
(`G', `H') that specify particular ranges of `const_double' values. */
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) 0
 
/* Most negative value represent on mt */
#define MT_MIN_INT 0x80000000
/* Basic Stack Layout. */
 
enum save_direction
{
FROM_PROCESSOR_TO_MEM,
FROM_MEM_TO_PROCESSOR
};
 
/* Tell prologue and epilogue if register REGNO should be saved / restored.
The return address and frame pointer are treated separately.
Don't consider them here. */
#define MUST_SAVE_REGISTER(regno) \
( (regno) != GPR_LINK \
&& (regno) != GPR_FP \
&& (regno) != GPR_SP \
&& (regno) != GPR_R0 \
&& (( regs_ever_live [regno] && ! call_used_regs [regno] ) \
/* Save ira register in an interrupt handler. */ \
|| (interrupt_handler && (regno) == GPR_INTERRUPT_LINK) \
/* Save any register used in an interrupt handler. */ \
|| (interrupt_handler && regs_ever_live [regno]) \
/* Save call clobbered registers in non-leaf interrupt \
handlers. */ \
|| (interrupt_handler && call_used_regs[regno] \
&& !current_function_is_leaf) \
||(current_function_calls_eh_return \
&& (regno == GPR_R7 || regno == GPR_R8)) \
) \
)
 
#define STACK_GROWS_DOWNWARD 1
 
/* Offset from the frame pointer to the first local variable slot to be
allocated.
 
If `FRAME_GROWS_DOWNWARD', find the next slot's offset by
subtracting the first slot's length from `STARTING_FRAME_OFFSET'.
Otherwise, it is found by adding the length of the first slot to
the value `STARTING_FRAME_OFFSET'. */
#define STARTING_FRAME_OFFSET current_function_outgoing_args_size
 
/* Offset from the argument pointer register to the first argument's address.
On some machines it may depend on the data type of the function.
 
If `ARGS_GROW_DOWNWARD', this is the offset to the location above the first
argument's address. */
#define FIRST_PARM_OFFSET(FUNDECL) 0
 
#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \
mt_return_addr_rtx (COUNT)
 
/* A C expression whose value is RTL representing the location of the incoming
return address at the beginning of any function, before the prologue. This
RTL is either a `REG', indicating that the return value is saved in `REG',
or a `MEM' representing a location in the stack.
 
You only need to define this macro if you want to support call frame
debugging information like that provided by DWARF 2. */
#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (SImode, GPR_LINK)
 
/* A C expression whose value is an integer giving the offset, in bytes, from
the value of the stack pointer register to the top of the stack frame at the
beginning of any function, before the prologue. The top of the frame is
defined to be the value of the stack pointer in the previous frame, just
before the call instruction.
 
You only need to define this macro if you want to support call frame
debugging information like that provided by DWARF 2. */
#define INCOMING_FRAME_SP_OFFSET 0
 
#define STACK_POINTER_REGNUM GPR_SP
 
#define FRAME_POINTER_REGNUM GPR_FP
 
/* The register number of the arg pointer register, which is used to
access the function's argument list. */
#define ARG_POINTER_REGNUM (SPECIAL_REG_FIRST + 0)
 
/* Register numbers used for passing a function's static chain pointer. */
#define STATIC_CHAIN_REGNUM 10
 
/* A C expression which is nonzero if a function must have and use a frame
pointer. */
#define FRAME_POINTER_REQUIRED 0
 
/* Structure to be filled in by compute_frame_size with register
save masks, and offsets for the current function. */
 
struct mt_frame_info
{
unsigned int total_size; /* # Bytes that the entire frame takes up. */
unsigned int pretend_size; /* # Bytes we push and pretend caller did. */
unsigned int args_size; /* # Bytes that outgoing arguments take up. */
unsigned int extra_size;
unsigned int reg_size; /* # Bytes needed to store regs. */
unsigned int var_size; /* # Bytes that variables take up. */
unsigned int frame_size; /* # Bytes in current frame. */
unsigned int reg_mask; /* Mask of saved registers. */
unsigned int save_fp; /* Nonzero if frame pointer must be saved. */
unsigned int save_lr; /* Nonzero if return pointer must be saved. */
int initialized; /* Nonzero if frame size already calculated. */
};
 
extern struct mt_frame_info current_frame_info;
 
/* If defined, this macro specifies a table of register pairs used to eliminate
unneeded registers that point into the stack frame. */
#define ELIMINABLE_REGS \
{ \
{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
{ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
{FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM} \
}
 
/* A C expression that returns nonzero if the compiler is allowed to try to
replace register number FROM with register number TO. */
#define CAN_ELIMINATE(FROM, TO) \
((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \
? ! frame_pointer_needed \
: 1)
 
/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
specifies the initial difference between the specified pair of
registers. This macro must be defined if `ELIMINABLE_REGS' is
defined. */
#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
(OFFSET) = mt_initial_elimination_offset (FROM, TO)
 
/* If defined, the maximum amount of space required for outgoing
arguments will be computed and placed into the variable
`current_function_outgoing_args_size'. */
#define ACCUMULATE_OUTGOING_ARGS 1
 
/* Define this if it is the responsibility of the caller to
allocate the area reserved for arguments passed in registers. */
#define OUTGOING_REG_PARM_STACK_SPACE
 
/* The number of register assigned to holding function arguments. */
#define MT_NUM_ARG_REGS 4
 
/* Define this if it is the responsibility of the caller to allocate
the area reserved for arguments passed in registers. */
#define REG_PARM_STACK_SPACE(FNDECL) (MT_NUM_ARG_REGS * UNITS_PER_WORD)
 
/* Define this macro if `REG_PARM_STACK_SPACE' is defined, but the stack
parameters don't skip the area specified by it. */
#define STACK_PARMS_IN_REG_PARM_AREA
 
/* A C expression that should indicate the number of bytes of its own
arguments that a function pops on returning, or 0 if the function
pops no arguments and the caller must therefore pop them all after
the function returns. */
#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACK_SIZE) 0
 
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
mt_function_arg (& (CUM), (MODE), (TYPE), (NAMED), FALSE)
 
#define CUMULATIVE_ARGS int
 
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
mt_init_cumulative_args (& (CUM), FNTYPE, LIBNAME, FNDECL, FALSE)
 
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
mt_function_arg_advance (&CUM, MODE, TYPE, NAMED)
 
#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
mt_function_arg_boundary (MODE, TYPE)
 
#define FUNCTION_ARG_REGNO_P(REGNO) \
((REGNO) >= FIRST_ARG_REGNUM && ((REGNO) <= LAST_ARG_REGNUM))
 
#define RETURN_VALUE_REGNUM RETVAL_REGNUM
#define FUNCTION_VALUE(VALTYPE, FUNC) \
mt_function_value (VALTYPE, TYPE_MODE(VALTYPE), FUNC)
 
#define LIBCALL_VALUE(MODE) \
mt_function_value (NULL_TREE, MODE, NULL_TREE)
 
#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == RETURN_VALUE_REGNUM)
 
/* A C expression which can inhibit the returning of certain function
values in registers, based on the type of value. */
#define RETURN_IN_MEMORY(TYPE) (int_size_in_bytes (TYPE) > UNITS_PER_WORD)
 
/* Define this macro to be 1 if all structure and union return values must be
in memory. */
#define DEFAULT_PCC_STRUCT_RETURN 0
 
/* Define this macro as a C expression that is nonzero if the return
instruction or the function epilogue ignores the value of the stack
pointer; in other words, if it is safe to delete an instruction to
adjust the stack pointer before a return from the function. */
#define EXIT_IGNORE_STACK 1
 
#define EPILOGUE_USES(REGNO) mt_epilogue_uses(REGNO)
 
/* Define this macro if the function epilogue contains delay slots to which
instructions from the rest of the function can be "moved". */
#define DELAY_SLOTS_FOR_EPILOGUE 1
 
/* A C expression that returns 1 if INSN can be placed in delay slot number N
of the epilogue. */
#define ELIGIBLE_FOR_EPILOGUE_DELAY(INSN, N) 0
 
#define FUNCTION_PROFILER(FILE, LABELNO) gcc_unreachable ()
 
/* Trampolines are not implemented. */
#define TRAMPOLINE_SIZE 0
 
#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, STATIC_CHAIN)
 
/* ?? What is this -- aldyh ?? */
#define UCMPSI3_LIBCALL "__ucmpsi3"
 
/* Addressing Modes. */
 
/* A C expression that is 1 if the RTX X is a constant which is a valid
address. */
#define CONSTANT_ADDRESS_P(X) CONSTANT_P (X)
 
/* A number, the maximum number of registers that can appear in a valid memory
address. Note that it is up to you to specify a value equal to the maximum
number that `GO_IF_LEGITIMATE_ADDRESS' would ever accept. */
#define MAX_REGS_PER_ADDRESS 1
 
#ifdef REG_OK_STRICT
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
{ \
if (mt_legitimate_address_p (MODE, X, 1)) \
goto ADDR; \
}
#else
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
{ \
if (mt_legitimate_address_p (MODE, X, 0)) \
goto ADDR; \
}
#endif
 
#ifdef REG_OK_STRICT
#define REG_OK_FOR_BASE_P(X) mt_reg_ok_for_base_p (X, 1)
#else
#define REG_OK_FOR_BASE_P(X) mt_reg_ok_for_base_p (X, 0)
#endif
 
#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X)
 
#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) {}
 
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL)
 
#define LEGITIMATE_CONSTANT_P(X) 1
 
/* A C expression for the cost of moving data of mode M between a register and
memory. A value of 2 is the default; this cost is relative to those in
`REGISTER_MOVE_COST'.
 
If moving between registers and memory is more expensive than between two
registers, you should define this macro to express the relative cost. */
#define MEMORY_MOVE_COST(M,C,I) 10
 
/* Define this macro as a C expression which is nonzero if accessing less than
a word of memory (i.e. a `char' or a `short') is no faster than accessing a
word of memory. */
#define SLOW_BYTE_ACCESS 1
 
#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
 
#define TEXT_SECTION_ASM_OP ".text"
 
#define DATA_SECTION_ASM_OP ".data"
 
#define BSS_SECTION_ASM_OP "\t.section\t.bss"
 
/* A C string constant for text to be output before each `asm' statement or
group of consecutive ones. Normally this is `"#APP"', which is a comment
that has no effect on most assemblers but tells the GNU assembler that it
must check the lines that follow for all valid assembler constructs. */
#define ASM_APP_ON "#APP\n"
 
/* A C string constant for text to be output after each `asm' statement or
group of consecutive ones. Normally this is `"#NO_APP"', which tells the
GNU assembler to resume making the time-saving assumptions that are valid
for ordinary compiler output. */
#define ASM_APP_OFF "#NO_APP\n"
 
/* This is how to output an assembler line defining a `char' constant. */
#define ASM_OUTPUT_CHAR(FILE, VALUE) \
do \
{ \
fprintf (FILE, "\t.byte\t"); \
output_addr_const (FILE, (VALUE)); \
fprintf (FILE, "\n"); \
} \
while (0)
 
/* This is how to output an assembler line defining a `short' constant. */
#define ASM_OUTPUT_SHORT(FILE, VALUE) \
do \
{ \
fprintf (FILE, "\t.hword\t"); \
output_addr_const (FILE, (VALUE)); \
fprintf (FILE, "\n"); \
} \
while (0)
 
/* This is how to output an assembler line defining an `int' constant.
We also handle symbol output here. */
#define ASM_OUTPUT_INT(FILE, VALUE) \
do \
{ \
fprintf (FILE, "\t.word\t"); \
output_addr_const (FILE, (VALUE)); \
fprintf (FILE, "\n"); \
} \
while (0)
 
/* A C statement to output to the stdio stream STREAM an assembler instruction
to assemble a single byte containing the number VALUE.
 
This declaration must be present. */
#define ASM_OUTPUT_BYTE(STREAM, VALUE) \
fprintf (STREAM, "\t%s\t0x%x\n", ASM_BYTE_OP, (VALUE))
 
/* Globalizing directive for a label. */
#define GLOBAL_ASM_OP "\t.globl "
 
#define REGISTER_NAMES \
{ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", \
"R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", \
"LOOP1", "LOOP2", "LOOP3", "LOOP4", "ap" }
 
/* If defined, a C initializer for an array of structures containing a name and
a register number. This macro defines additional names for hard registers,
thus allowing the `asm' option in declarations to refer to registers using
alternate names. */
#define ADDITIONAL_REGISTER_NAMES \
{ { "FP", 12}, {"SP", 13}, {"RA", 14}, {"IRA", 15} }
 
/* Define this macro if you are using an unusual assembler that requires
different names for the machine instructions.
 
The definition is a C statement or statements which output an assembler
instruction opcode to the stdio stream STREAM. The macro-operand PTR is a
variable of type `char *' which points to the opcode name in its "internal"
form--the form that is written in the machine description. The definition
should output the opcode name to STREAM, performing any translation you
desire, and increment the variable PTR to point at the end of the opcode so
that it will not be output twice. */
#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
(PTR) = mt_asm_output_opcode (STREAM, PTR)
 
#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
mt_final_prescan_insn (INSN, OPVEC, NOPERANDS)
 
#define PRINT_OPERAND(STREAM, X, CODE) mt_print_operand (STREAM, X, CODE)
 
/* A C expression which evaluates to true if CODE is a valid punctuation
character for use in the `PRINT_OPERAND' macro. */
/* #: Print nop for delay slot. */
#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '#')
 
#define PRINT_OPERAND_ADDRESS(STREAM, X) mt_print_operand_address (STREAM, X)
 
/* If defined, C string expressions to be used for the `%R', `%L', `%U', and
`%I' options of `asm_fprintf' (see `final.c'). These are useful when a
single `md' file must support multiple assembler formats. In that case, the
various `tm.h' files can define these macros differently.
 
USER_LABEL_PREFIX is defined in svr4.h. */
#define REGISTER_PREFIX "%"
#define LOCAL_LABEL_PREFIX "."
#define USER_LABEL_PREFIX ""
#define IMMEDIATE_PREFIX ""
 
/* This macro should be provided on machines where the addresses in a dispatch
table are relative to the table's own address.
 
The definition should be a C statement to output to the stdio stream STREAM
an assembler pseudo-instruction to generate a difference between two labels.
VALUE and REL are the numbers of two internal labels. The definitions of
these labels are output using `targetm.asm_out.internal_label', and they
must be printed in the same way here. */
#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
fprintf (STREAM, "\t.word .L%d-.L%d\n", VALUE, REL)
 
/* This macro should be provided on machines where the addresses in a dispatch
table are absolute.
 
The definition should be a C statement to output to the stdio stream STREAM
an assembler pseudo-instruction to generate a reference to a label. VALUE
is the number of an internal label whose definition is output using
`targetm.asm_out.internal_label'. */
#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
fprintf (STREAM, "\t.word .L%d\n", VALUE)
 
#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (GPR_LINK)
 
#define EH_RETURN_DATA_REGNO(N) \
((N) == 0 ? GPR_R7 : (N) == 1 ? GPR_R8 : INVALID_REGNUM)
 
#define EH_RETURN_STACKADJ_REGNO GPR_R11
#define EH_RETURN_STACKADJ_RTX \
gen_rtx_REG (SImode, EH_RETURN_STACKADJ_REGNO)
#define EH_RETURN_HANDLER_REGNO GPR_R10
#define EH_RETURN_HANDLER_RTX \
gen_rtx_REG (SImode, EH_RETURN_HANDLER_REGNO)
#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
fprintf ((STREAM), "\t.p2align %d\n", (POWER))
 
#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
 
#ifndef DWARF2_DEBUGGING_INFO
#define DWARF2_DEBUGGING_INFO
#endif
 
/* Define this macro if GCC should produce dwarf version 2-style
line numbers. This usually requires extending the assembler to
support them, and #defining DWARF2_LINE_MIN_INSN_LENGTH in the
assembler configuration header files. */
#define DWARF2_ASM_LINE_DEBUG_INFO 1
 
/* An alias for a machine mode name. This is the machine mode that
elements of a jump-table should have. */
#define CASE_VECTOR_MODE SImode
 
/* Define this macro if operations between registers with integral
mode smaller than a word are always performed on the entire
register. Most RISC machines have this property and most CISC
machines do not. */
#define WORD_REGISTER_OPERATIONS
 
/* The maximum number of bytes that a single instruction can move quickly from
memory to memory. */
#define MOVE_MAX 4
 
/* A C expression which is nonzero if on this machine it is safe to "convert"
an integer of INPREC bits to one of OUTPREC bits (where OUTPREC is smaller
than INPREC) by merely operating on it as if it had only OUTPREC bits.
 
On many machines, this expression can be 1.
 
When `TRULY_NOOP_TRUNCATION' returns 1 for a pair of sizes for modes for
which `MODES_TIEABLE_P' is 0, suboptimal code can result. If this is the
case, making `TRULY_NOOP_TRUNCATION' return 0 in such cases may improve
things. */
#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
 
#define Pmode SImode
 
/* An alias for the machine mode used for memory references to functions being
called, in `call' RTL expressions. On most machines this should be
`QImode'. */
#define FUNCTION_MODE QImode
 
#define HANDLE_SYSV_PRAGMA 1
 
/* Indicate how many instructions can be issued at the same time. */
#define ISSUE_RATE 1
 
/* Define the information needed to generate branch and scc insns. This is
stored from the compare operation. Note that we can't use "rtx" here
since it hasn't been defined! */
 
extern struct rtx_def * mt_compare_op0;
extern struct rtx_def * mt_compare_op1;
 
/ABI.txt
0,0 → 1,219
Copyright (C) 2005 Free Software Foundation, Inc.
 
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved.
 
--------------------------------------------------------------------------
 
MS1 ABI
=========
 
Sizes and alignments
--------------------
 
Type Size (bytes) Alignment (bytes)
 
char 1 1
short 2 2
int 4 4
unsigned 4 4
long 4 4
long long 8 8
float 4 4
double 8 8
pointers 4 4
 
* alignment within aggregates (structs and unions) is as above, with
padding added if needed
* aggregates have alignment equal to that of their most aligned
member
* aggregates have sizes which are a multiple of their alignment
 
 
Floating point
--------------
 
All emulated using IEEE floating point conventions.
 
Registers
----------------
 
r0 always zero
r1 argument register 1
r2 argument register 2
r3 argument register 3
r4 argument register 4
r5 callee must save
r6 callee must save
r7 call clobbers
r8 call clobbers
r9 call clobbers
r10 call clobbers
r11 function return value
r12 frame pointer
r13 stack pointer
r14 linkage pointer
r15 interrupt pointer
 
Stack alignment 8 bytes
 
Structures passed <= 32 bits as values, else as pointers
 
The MS1 Stack
---------------
 
Space is allocated as needed in the stack frame for the following at compile
time:
 
* Outgoing parameters beyond the fourth
 
* All automatic arrays, automatic data aggregates, automatic
scalars which must be addressable, and automatic scalars for
which there is no room in registers
 
* Compiler-generated temporary values (typically when there are
too many for the compiler to keep them all in registers)
 
Space can be allocated dynamically (at runtime) in the stack frame for the
following:
 
* Memory allocated using the alloca() function of the C library
 
Addressable automatic variables on the stack are addressed with positive
offsets relative to r12; dynamically allocated space is addressed with positive
offsets from the pointer returned by alloca().
 
Stack Frame
-----------
 
+-----------------------+
| Parameter Word 1 |
+-----------------------+ <-sp
| Previous FP |
+-----------------------+
| Return address |
+-----------------------+
| Saved Registers |
+-----------------------+
| ... |
+-----------------------+
| Local Variables |
+-----------------------+ <-fp
| Alloca |
+-----------------------+
| ... |
+-----------------------+
| Parameter Word 2 |
+-----------------------+
| Parameter Word 1 |
+-----------------------+ <-sp
 
 
Parameter Assignment to Registers
---------------------------------
 
Consider the parameters in a function call as ordered from left (first
parameter) to right. GR contains the number of the next available
general-purpose register. STARG is the address of the next available stack
parameter word.
 
INITIALIZE:
Set GR=r1 and STARG to point to parameter word 1.
 
SCAN:
If there are no more parameters, terminate.
Otherwise, select one of the following depending on the type
of the next parameter:
 
SIMPLE ARG:
 
A SIMPLE ARG is one of the following:
 
* One of the simple integer types which will fit into a
general-purpose register,
* A pointer to an object of any type,
* A struct or union small enough to fit in a register (<= 32 bits)
* A larger struct or union, which shall be treated as a
pointer to the object or to a copy of the object.
(See below for when copies are made.)
 
If GR > r4, go to STACK. Otherwise, load the parameter value into
general-purpose register GR and advance GR to the next general-purpose
register. Values shorter than the register size are sign-extended or
zero-extended depending on whether they are signed or unsigned. Then
go to SCAN.
 
DOUBLE or LONG LONG
 
If GR > r3, go to STACK. Otherwise, if GR is odd, advance GR to the
next register. Load the 64-bit long long or double value into register
pair GR and GR+1. Advance GR to GR+2 and go to SCAN.
 
STACK:
 
Parameters not otherwise handled above are passed in the parameter
words of the caller's stack frame. SIMPLE ARGs, as defined above, are
considered to have size and alignment equal to the size of a
general-purpose register, with simple argument types shorter than this
sign- or zero-extended to this width. Round STARG up to a multiple of
the alignment requirement of the parameter and copy the argument
byte-for-byte into STARG, STARG+1, ... STARG+size-1. Set STARG to
STARG+size and go to SCAN.
 
 
Structure passing
-----------------
 
As noted above, code which passes structures and unions by value is implemented
specially. (In this section, "struct" will refer to structs and unions
inclusively.) Structs small enough to fit in a register are passed by value in
a single register or in a stack frame slot the size of a register. Structs
containing a single double or long long component are passed by value in two
registers or in a stack frame slot the size of two registers. Other structs
are handled by passing the address of the structure. In this case, a copy of
the structure will be made if necessary in order to preserve the pass-by-value
semantics.
 
Copies of large structs are made under the following rules:
 
ANSI mode K&R Mode
--------- --------
Normal param Callee copies if needed Caller copies
Varargs (...) param Caller copies Caller copies
 
In the case of normal (non-varargs) large-struct parameters in ANSI mode, the
callee is responsible for producing the same effect as if a copy of the
structure were passed, preserving the pass-by-value semantics. This may be
accomplished by having the callee make a copy, but in some cases the callee may
be able to determine that a copy is not necessary in order to produce the same
results. In such cases, the callee may choose to avoid making a copy of the
parameter.
 
 
Varargs handling
----------------
 
No special changes are needed for handling varargs parameters other than the
caller knowing that a copy is needed on struct parameters larger than a
register (see above).
 
The varargs macros set up a register save area for the general-purpose
registers to be saved. Because the save area lies between the caller and
callee stack frames, the saved register parameters are contiguous with
parameters passed on the stack. A pointer advances from the register save area
into the caller's stack frame.
 
 
Function return values
----------------------
 
Type Register
---- --------
int r11
short r11
long r11
long long stack
float r11
double stack
 
/lib2extra-funcs.c
0,0 → 1,232
/* Copyright (C) 2005 Free Software Foundation,
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
 
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
 
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
 
#define BITS_PER_UNIT 8
 
typedef int HItype __attribute__ ((mode (HI)));
typedef unsigned int UHItype __attribute__ ((mode (HI)));
 
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
 
typedef int word_type __attribute__ ((mode (__word__)));
 
struct SIstruct {HItype low, high;};
 
typedef union
{
struct SIstruct s;
SItype ll;
} SIunion;
 
SItype
__lshrsi3 (SItype u, word_type b)
{
SIunion w;
word_type bm;
SIunion uu;
 
if (b == 0)
return u;
 
uu.ll = u;
 
bm = (sizeof (HItype) * BITS_PER_UNIT) - b;
if (bm <= 0)
{
w.s.high = 0;
w.s.low = (UHItype)uu.s.high >> -bm;
}
else
{
UHItype carries = (UHItype)uu.s.high << bm;
w.s.high = (UHItype)uu.s.high >> b;
w.s.low = ((UHItype)uu.s.low >> b) | carries;
}
 
return w.ll;
}
 
SItype
__ashlsi3 (SItype u, word_type b)
{
SIunion w;
word_type bm;
SIunion uu;
 
if (b == 0)
return u;
 
uu.ll = u;
 
bm = (sizeof (HItype) * BITS_PER_UNIT) - b;
if (bm <= 0)
{
w.s.low = 0;
w.s.high = (UHItype)uu.s.low << -bm;
}
else
{
UHItype carries = (UHItype)uu.s.low >> bm;
w.s.low = (UHItype)uu.s.low << b;
w.s.high = ((UHItype)uu.s.high << b) | carries;
}
 
return w.ll;
}
 
SItype
__ashrsi3 (SItype u, word_type b)
{
SIunion w;
word_type bm;
SIunion uu;
 
if (b == 0)
return u;
 
uu.ll = u;
 
bm = (sizeof (HItype) * BITS_PER_UNIT) - b;
if (bm <= 0)
{
/* w.s.high = 1..1 or 0..0 */
w.s.high = uu.s.high >> (sizeof (HItype) * BITS_PER_UNIT - 1);
w.s.low = uu.s.high >> -bm;
}
else
{
UHItype carries = (UHItype)uu.s.high << bm;
w.s.high = uu.s.high >> b;
w.s.low = ((UHItype)uu.s.low >> b) | carries;
}
 
return w.ll;
}
 
USItype
__mulsi3 (USItype a, USItype b)
{
USItype c = 0;
 
while (a != 0)
{
if (a & 1)
c += b;
a >>= 1;
b <<= 1;
}
 
return c;
}
 
USItype
udivmodsi4(USItype num, USItype den, word_type modwanted)
{
USItype bit = 1;
USItype res = 0;
 
while (den < num && bit && !(den & (1L<<31)))
{
den <<=1;
bit <<=1;
}
while (bit)
{
if (num >= den)
{
num -= den;
res |= bit;
}
bit >>=1;
den >>=1;
}
if (modwanted) return num;
return res;
}
 
SItype
__divsi3 (SItype a, SItype b)
{
word_type neg = 0;
SItype res;
 
if (a < 0)
{
a = -a;
neg = !neg;
}
 
if (b < 0)
{
b = -b;
neg = !neg;
}
 
res = udivmodsi4 (a, b, 0);
 
if (neg)
res = -res;
 
return res;
}
 
SItype
__modsi3 (SItype a, SItype b)
{
word_type neg = 0;
SItype res;
 
if (a < 0)
{
a = -a;
neg = 1;
}
 
if (b < 0)
b = -b;
 
res = udivmodsi4 (a, b, 1);
 
if (neg)
res = -res;
 
return res;
}
 
SItype
__udivsi3 (SItype a, SItype b)
{
return udivmodsi4 (a, b, 0);
}
 
SItype
__umodsi3 (SItype a, SItype b)
{
return udivmodsi4 (a, b, 1);
}
/mt-protos.h
0,0 → 1,65
/* Prototypes for exported functions defined in ms1.c
Copyright (C) 2005, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
 
GCC is distributed in the hope that it will be useful,but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
extern void mt_init_expanders (void);
extern void mt_expand_prologue (void);
extern void mt_expand_epilogue (enum epilogue_type);
extern unsigned mt_compute_frame_size (int);
extern void mt_override_options (void);
extern int mt_initial_elimination_offset (int, int);
extern const char * mt_asm_output_opcode (FILE *, const char *);
extern int mt_epilogue_uses (int);
extern void mt_add_loop (void);
 
#ifdef TREE_CODE
extern const char * mt_cannot_inline_p (tree);
extern int mt_function_arg_boundary (enum machine_mode, tree);
extern void mt_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
#endif
 
#ifdef RTX_CODE
extern void mt_expand_eh_return (rtx *);
extern void mt_emit_eh_epilogue (rtx *);
extern void mt_print_operand (FILE *, rtx, int);
extern void mt_print_operand_address (FILE *, rtx);
extern int mt_check_split (rtx, enum machine_mode);
extern int mt_reg_ok_for_base_p (rtx, int);
extern int mt_legitimate_address_p (enum machine_mode, rtx, int);
/* Predicates for machine description. */
extern int uns_arith_operand (rtx, enum machine_mode);
extern int arith_operand (rtx, enum machine_mode);
extern int reg_or_0_operand (rtx, enum machine_mode);
extern int big_const_operand (rtx, enum machine_mode);
extern int single_const_operand (rtx, enum machine_mode);
extern void mt_emit_cbranch (enum rtx_code, rtx, rtx, rtx);
extern void mt_set_memflags (rtx);
extern rtx mt_return_addr_rtx (int);
extern void mt_split_words (enum machine_mode, enum machine_mode, rtx *);
extern void mt_final_prescan_insn (rtx, rtx *, int);
#endif
 
#ifdef TREE_CODE
#ifdef RTX_CODE
extern void mt_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree, int);
extern rtx mt_function_arg (const CUMULATIVE_ARGS *, enum machine_mode, tree, int, int);
extern void mt_va_start (tree, rtx);
extern enum reg_class mt_secondary_reload_class (enum reg_class, enum machine_mode, rtx);
extern rtx mt_function_value (tree, enum machine_mode, tree);
#endif
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.