URL
https://opencores.org/ocsvn/openrisc/openrisc/trunk
Subversion Repositories openrisc
Compare Revisions
- This comparison shows the changes necessary to convert path
/openrisc/trunk/gnu-old/gcc-4.2.2/gcc/config/mcore
- from Rev 154 to Rev 816
- ↔ Reverse comparison
Rev 154 → Rev 816
/mcore-elf.h
0,0 → 1,129
/* Definitions of MCore target. |
Copyright (C) 1998, 1999, 2000, 2001, 2002, 2004, 2007 |
Free Software Foundation, Inc. |
Contributed by Cygnus Solutions. |
|
This file is part of GCC. |
|
GCC is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 3, or (at your option) |
any later version. |
|
GCC is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
|
You should have received a copy of the GNU General Public License |
along with GCC; see the file COPYING3. If not see |
<http://www.gnu.org/licenses/>. */ |
|
#ifndef __MCORE_ELF_H__ |
#define __MCORE_ELF_H__ |
|
/* Run-time Target Specification. */ |
#define TARGET_VERSION fputs (" (Motorola MCORE/elf)", stderr) |
|
/* Use DWARF2 debugging info. */ |
#define DWARF2_DEBUGGING_INFO 1 |
|
#undef PREFERRED_DEBUGGING_TYPE |
#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG |
|
#define MCORE_EXPORT_NAME(STREAM, NAME) \ |
do \ |
{ \ |
fprintf (STREAM, "\t.section .exports\n"); \ |
fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \ |
(* targetm.strip_name_encoding) (NAME)); \ |
in_section = NULL; \ |
} \ |
while (0); |
|
/* Write the extra assembler code needed to declare a function properly. |
Some svr4 assemblers need to also have something extra said about the |
function's return value. We allow for that here. */ |
#undef ASM_DECLARE_FUNCTION_NAME |
#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \ |
do \ |
{ \ |
if (mcore_dllexport_name_p (NAME)) \ |
{ \ |
MCORE_EXPORT_NAME (FILE, NAME); \ |
switch_to_section (function_section (DECL)); \ |
} \ |
ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \ |
ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \ |
ASM_OUTPUT_LABEL (FILE, NAME); \ |
} \ |
while (0) |
|
/* Write the extra assembler code needed to declare an object properly. */ |
#undef ASM_DECLARE_OBJECT_NAME |
#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \ |
do \ |
{ \ |
HOST_WIDE_INT size; \ |
if (mcore_dllexport_name_p (NAME)) \ |
{ \ |
section *save_section = in_section; \ |
MCORE_EXPORT_NAME (FILE, NAME); \ |
switch_to_section (save_section); \ |
} \ |
ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \ |
size_directive_output = 0; \ |
if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \ |
{ \ |
size_directive_output = 1; \ |
size = int_size_in_bytes (TREE_TYPE (DECL)); \ |
ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \ |
} \ |
ASM_OUTPUT_LABEL(FILE, NAME); \ |
} \ |
while (0) |
|
/* Output the size directive for a decl in rest_of_decl_compilation |
in the case where we did not do so before the initializer. |
Once we find the error_mark_node, we know that the value of |
size_directive_output was set |
by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */ |
#undef ASM_FINISH_DECLARE_OBJECT |
#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \ |
do \ |
{ \ |
const char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \ |
HOST_WIDE_INT size; \ |
if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \ |
&& ! AT_END && TOP_LEVEL \ |
&& DECL_INITIAL (DECL) == error_mark_node \ |
&& !size_directive_output) \ |
{ \ |
size_directive_output = 1; \ |
size = int_size_in_bytes (TREE_TYPE (DECL)); \ |
ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \ |
} \ |
} \ |
while (0) |
|
|
#undef STARTFILE_SPEC |
#define STARTFILE_SPEC "crt0.o%s crti.o%s crtbegin.o%s" |
|
/* Include the OS stub library, so that the code can be simulated. |
This is not the right way to do this. Ideally this kind of thing |
should be done in the linker script - but I have not worked out how |
to specify the location of a linker script in a gcc command line yet. */ |
#undef ENDFILE_SPEC |
#define ENDFILE_SPEC "%{!mno-lsim:-lsim} crtend.o%s crtn.o%s" |
|
/* The subroutine calls in the .init and .fini sections create literal |
pools which must be jumped around.... */ |
#define FORCE_CODE_SECTION_ALIGN asm ("br 1f ; .literals ; 1:"); |
|
#undef CTORS_SECTION_ASM_OP |
#define CTORS_SECTION_ASM_OP "\t.section\t.ctors,\"aw\"" |
#undef DTORS_SECTION_ASM_OP |
#define DTORS_SECTION_ASM_OP "\t.section\t.dtors,\"aw\"" |
|
#endif /* __MCORE_ELF_H__ */ |
/mcore-protos.h
0,0 → 1,79
/* Prototypes for exported functions defined in mcore.c |
Copyright (C) 2000, 2002, 2003, 2004, 2005, 2007 |
Free Software Foundation, Inc. |
Contributed by Nick Clifton (nickc@redhat.com) |
|
This file is part of GCC. |
|
GCC is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 3, or (at your option) |
any later version. |
|
GCC is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
|
You should have received a copy of the GNU General Public License |
along with GCC; see the file COPYING3. If not see |
<http://www.gnu.org/licenses/>. */ |
|
extern const char * mcore_output_jump_label_table (void); |
extern void mcore_expand_prolog (void); |
extern void mcore_expand_epilog (void); |
extern int mcore_const_ok_for_inline (long); |
extern int mcore_num_ones (int); |
extern int mcore_num_zeros (int); |
extern int mcore_initial_elimination_offset (int, int); |
extern int mcore_byte_offset (unsigned int); |
extern int mcore_halfword_offset (unsigned int); |
extern int mcore_const_trick_uses_not (long); |
extern void mcore_override_options (void); |
extern int mcore_dllexport_name_p (const char *); |
extern int mcore_dllimport_name_p (const char *); |
extern int mcore_naked_function_p (void); |
|
#ifdef TREE_CODE |
#ifdef HAVE_MACHINE_MODES |
extern int mcore_num_arg_regs (enum machine_mode, tree); |
#endif /* HAVE_MACHINE_MODES */ |
|
#ifdef RTX_CODE |
extern rtx mcore_function_value (tree, tree); |
#endif /* RTX_CODE */ |
#endif /* TREE_CODE */ |
|
#ifdef RTX_CODE |
|
extern GTY(()) rtx arch_compare_op0; |
extern GTY(()) rtx arch_compare_op1; |
|
extern const char * mcore_output_bclri (rtx, int); |
extern const char * mcore_output_bseti (rtx, int); |
extern const char * mcore_output_cmov (rtx *, int, const char *); |
extern char * mcore_output_call (rtx *, int); |
extern int mcore_is_dead (rtx, rtx); |
extern int mcore_expand_insv (rtx *); |
extern int mcore_modify_comparison (RTX_CODE); |
extern bool mcore_expand_block_move (rtx *); |
extern const char * mcore_output_andn (rtx, rtx *); |
extern void mcore_print_operand_address (FILE *, rtx); |
extern void mcore_print_operand (FILE *, rtx, int); |
extern rtx mcore_gen_compare_reg (RTX_CODE); |
extern int mcore_symbolic_address_p (rtx); |
extern bool mcore_r15_operand_p (rtx); |
extern enum reg_class mcore_secondary_reload_class (enum reg_class, enum machine_mode, rtx); |
extern enum reg_class mcore_reload_class (rtx, enum reg_class); |
extern int mcore_is_same_reg (rtx, rtx); |
extern int mcore_arith_S_operand (rtx); |
|
#ifdef HAVE_MACHINE_MODES |
extern const char * mcore_output_move (rtx, rtx *, enum machine_mode); |
extern const char * mcore_output_movedouble (rtx *, enum machine_mode); |
extern int const_ok_for_mcore (int); |
#ifdef TREE_CODE |
extern rtx mcore_function_arg (CUMULATIVE_ARGS, enum machine_mode, tree, int); |
#endif /* TREE_CODE */ |
#endif /* HAVE_MACHINE_MODES */ |
#endif /* RTX_CODE */ |
/crti.asm
0,0 → 1,74
# crti.asm for ELF based systems |
|
# Copyright (C) 1992, 1998, 1999 Free Software Foundation, Inc. |
# Written By David Vinayak Henkel-Wallace, June 1992 |
# |
# This file is free software; you can redistribute it and/or modify it |
# under the terms of the GNU General Public License as published by the |
# Free Software Foundation; either version 2, or (at your option) any |
# later version. |
# |
# In addition to the permissions in the GNU General Public License, the |
# Free Software Foundation gives you unlimited permission to link the |
# compiled version of this file with other programs, and to distribute |
# those programs without any restriction coming from the use of this |
# file. (The General Public License restrictions do apply in other |
# respects; for example, they cover modification of the file, and |
# distribution when not linked into another program.) |
# |
# This file is distributed in the hope that it will be useful, but |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
# General Public License for more details. |
# |
# You should have received a copy of the GNU General Public License |
# along with this program; see the file COPYING. If not, write to |
# the Free Software Foundation, 51 Franklin Street, Fifth Floor, |
# Boston, MA 02110-1301, USA. |
# |
# As a special exception, if you link this library with files |
# compiled with GCC to produce an executable, this does not cause |
# the resulting executable to be covered by the GNU General Public License. |
# This exception does not however invalidate any other reasons why |
# the executable file might be covered by the GNU General Public License. |
# |
|
# This file just makes a stack frame for the contents of the .fini and |
# .init sections. Users may put any desired instructions in those |
# sections. |
|
.file "crti.asm" |
|
.section ".init" |
.global _init |
.type _init,@function |
.align 4 |
_init: |
subi r0, 16 |
st.w r15, (r0, 12) |
|
# These nops are here to align the end of this code with a 16 byte |
# boundary. The linker will start inserting code into the .init |
# section at such a boundary. |
|
nop |
nop |
nop |
nop |
nop |
nop |
|
|
.section ".fini" |
.global _fini |
.type _fini,@function |
.align 4 |
_fini: |
subi r0, 16 |
st.w r15, (r0, 12) |
nop |
nop |
nop |
nop |
nop |
nop |
/predicates.md
0,0 → 1,332
;; Predicate definitions for Motorola MCore. |
;; Copyright (C) 2005, 2007 Free Software Foundation, Inc. |
;; |
;; This file is part of GCC. |
;; |
;; GCC is free software; you can redistribute it and/or modify |
;; it under the terms of the GNU General Public License as published by |
;; the Free Software Foundation; either version 3, or (at your option) |
;; any later version. |
;; |
;; GCC is distributed in the hope that it will be useful, |
;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
;; GNU General Public License for more details. |
;; |
;; You should have received a copy of the GNU General Public License |
;; along with GCC; see the file COPYING3. If not see |
;; <http://www.gnu.org/licenses/>. |
|
;; Nonzero if OP is a normal arithmetic register. |
|
(define_predicate "mcore_arith_reg_operand" |
(match_code "reg,subreg") |
{ |
if (! register_operand (op, mode)) |
return 0; |
|
if (GET_CODE (op) == SUBREG) |
op = SUBREG_REG (op); |
|
if (GET_CODE (op) == REG) |
return REGNO (op) != CC_REG; |
|
return 1; |
}) |
|
;; Nonzero if OP can be source of a simple move operation. |
|
(define_predicate "mcore_general_movsrc_operand" |
(match_code "mem,const_int,reg,subreg,symbol_ref,label_ref") |
{ |
/* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */ |
if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF) |
return 1; |
|
return general_operand (op, mode); |
}) |
|
;; Nonzero if OP can be destination of a simple move operation. |
|
(define_predicate "mcore_general_movdst_operand" |
(match_code "mem,const_int,reg,subreg") |
{ |
if (GET_CODE (op) == REG && REGNO (op) == CC_REG) |
return 0; |
|
return general_operand (op, mode); |
}) |
|
;; Nonzero if OP should be recognized during reload for an ixh/ixw |
;; operand. See the ixh/ixw patterns. |
|
(define_predicate "mcore_reload_operand" |
(match_code "mem,reg,subreg") |
{ |
if (mcore_arith_reg_operand (op, mode)) |
return 1; |
|
if (! reload_in_progress) |
return 0; |
|
return GET_CODE (op) == MEM; |
}) |
|
;; Nonzero if OP is a valid source operand for an arithmetic insn. |
|
(define_predicate "mcore_arith_J_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op))) |
return 1; |
|
return 0; |
}) |
|
;; Nonzero if OP is a valid source operand for an arithmetic insn. |
|
(define_predicate "mcore_arith_K_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))) |
return 1; |
|
return 0; |
}) |
|
;; Nonzero if OP is a valid source operand for a shift or rotate insn. |
|
(define_predicate "mcore_arith_K_operand_not_0" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if ( GET_CODE (op) == CONST_INT |
&& CONST_OK_FOR_K (INTVAL (op)) |
&& INTVAL (op) != 0) |
return 1; |
|
return 0; |
}) |
|
;; TODO: Add a comment here. |
|
(define_predicate "mcore_arith_M_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op))) |
return 1; |
|
return 0; |
}) |
|
;; TODO: Add a comment here. |
|
(define_predicate "mcore_arith_K_S_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT) |
{ |
if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op))) |
return 1; |
} |
|
return 0; |
}) |
|
;; Nonzero if OP is a valid source operand for a cmov with two consts |
;; +/- 1. |
|
(define_predicate "mcore_arith_O_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op))) |
return 1; |
|
return 0; |
}) |
|
;; Nonzero if OP is a valid source operand for loading. |
|
(define_predicate "mcore_arith_imm_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op))) |
return 1; |
|
return 0; |
}) |
|
;; TODO: Add a comment here. |
|
(define_predicate "mcore_arith_any_imm_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT) |
return 1; |
|
return 0; |
}) |
|
;; Nonzero if OP is a valid source operand for a btsti. |
|
(define_predicate "mcore_literal_K_operand" |
(match_code "const_int") |
{ |
if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))) |
return 1; |
|
return 0; |
}) |
|
;; Nonzero if OP is a valid source operand for an add/sub insn. |
|
(define_predicate "mcore_addsub_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT) |
{ |
return 1; |
|
/* The following is removed because it precludes large constants from being |
returned as valid source operands for and add/sub insn. While large |
constants may not directly be used in an add/sub, they may if first loaded |
into a register. Thus, this predicate should indicate that they are valid, |
and the constraint in mcore.md should control whether an additional load to |
register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */ |
/* |
if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op))) |
return 1; |
*/ |
} |
|
return 0; |
}) |
|
;; Nonzero if OP is a valid source operand for a compare operation. |
|
(define_predicate "mcore_compare_operand" |
(match_code "const_int,reg,subreg") |
{ |
if (register_operand (op, mode)) |
return 1; |
|
if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0) |
return 1; |
|
return 0; |
}) |
|
;; Return 1 if OP is a load multiple operation. It is known to be a |
;; PARALLEL and the first section will be tested. |
|
(define_predicate "mcore_load_multiple_operation" |
(match_code "parallel") |
{ |
int count = XVECLEN (op, 0); |
int dest_regno; |
rtx src_addr; |
int i; |
|
/* Perform a quick check so we don't blow up below. */ |
if (count <= 1 |
|| GET_CODE (XVECEXP (op, 0, 0)) != SET |
|| GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG |
|| GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM) |
return 0; |
|
dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0))); |
src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0); |
|
for (i = 1; i < count; i++) |
{ |
rtx elt = XVECEXP (op, 0, i); |
|
if (GET_CODE (elt) != SET |
|| GET_CODE (SET_DEST (elt)) != REG |
|| GET_MODE (SET_DEST (elt)) != SImode |
|| REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i) |
|| GET_CODE (SET_SRC (elt)) != MEM |
|| GET_MODE (SET_SRC (elt)) != SImode |
|| GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS |
|| ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) |
|| GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT |
|| INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4) |
return 0; |
} |
|
return 1; |
}) |
|
;; Similar, but tests for store multiple. |
|
(define_predicate "mcore_store_multiple_operation" |
(match_code "parallel") |
{ |
int count = XVECLEN (op, 0); |
int src_regno; |
rtx dest_addr; |
int i; |
|
/* Perform a quick check so we don't blow up below. */ |
if (count <= 1 |
|| GET_CODE (XVECEXP (op, 0, 0)) != SET |
|| GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM |
|| GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG) |
return 0; |
|
src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0))); |
dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0); |
|
for (i = 1; i < count; i++) |
{ |
rtx elt = XVECEXP (op, 0, i); |
|
if (GET_CODE (elt) != SET |
|| GET_CODE (SET_SRC (elt)) != REG |
|| GET_MODE (SET_SRC (elt)) != SImode |
|| REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i) |
|| GET_CODE (SET_DEST (elt)) != MEM |
|| GET_MODE (SET_DEST (elt)) != SImode |
|| GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS |
|| ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) |
|| GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT |
|| INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4) |
return 0; |
} |
|
return 1; |
}) |
|
;; TODO: Add a comment here. |
|
(define_predicate "mcore_call_address_operand" |
(match_code "reg,subreg,const_int,symbol_ref") |
{ |
return register_operand (op, mode) || CONSTANT_P (op); |
}) |
/mcore.md
0,0 → 1,3350
;; Machine description the Motorola MCore |
;; Copyright (C) 1993, 1999, 2000, 2004, 2005, 2007 |
;; Free Software Foundation, Inc. |
;; Contributed by Motorola. |
|
;; This file is part of GCC. |
|
;; GCC is free software; you can redistribute it and/or modify |
;; it under the terms of the GNU General Public License as published by |
;; the Free Software Foundation; either version 3, or (at your option) |
;; any later version. |
|
;; GCC is distributed in the hope that it will be useful, |
;; but WITHOUT ANY WARRANTY; without even the implied warranty of |
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
;; GNU General Public License for more details. |
|
;; You should have received a copy of the GNU General Public License |
;; along with GCC; see the file COPYING3. If not see |
;; <http://www.gnu.org/licenses/>. |
|
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. |
|
|
|
;; ------------------------------------------------------------------------- |
;; Attributes |
;; ------------------------------------------------------------------------- |
|
; Target CPU. |
|
(define_attr "type" "brcond,branch,jmp,load,store,move,alu,shift" |
(const_string "alu")) |
|
;; If a branch destination is within -2048..2047 bytes away from the |
;; instruction it can be 2 bytes long. All other conditional branches |
;; are 10 bytes long, and all other unconditional branches are 8 bytes. |
;; |
;; the assembler handles the long-branch span case for us if we use |
;; the "jb*" mnemonics for jumps/branches. This pushes the span |
;; calculations and the literal table placement into the assembler, |
;; where their interactions can be managed in a single place. |
|
;; All MCORE instructions are two bytes long. |
|
(define_attr "length" "" (const_int 2)) |
|
;; Scheduling. We only model a simple load latency. |
(define_insn_reservation "any_insn" 1 |
(eq_attr "type" "!load") |
"nothing") |
(define_insn_reservation "memory" 2 |
(eq_attr "type" "load") |
"nothing") |
|
(include "predicates.md") |
|
;; ------------------------------------------------------------------------- |
;; Test and bit test |
;; ------------------------------------------------------------------------- |
|
(define_insn "" |
[(set (reg:SI 17) |
(sign_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(const_int 1) |
(match_operand:SI 1 "mcore_literal_K_operand" "K")))] |
"" |
"btsti %0,%1" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (reg:SI 17) |
(zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(const_int 1) |
(match_operand:SI 1 "mcore_literal_K_operand" "K")))] |
"" |
"btsti %0,%1" |
[(set_attr "type" "shift")]) |
|
;;; This is created by combine. |
(define_insn "" |
[(set (reg:CC 17) |
(ne:CC (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(const_int 1) |
(match_operand:SI 1 "mcore_literal_K_operand" "K")) |
(const_int 0)))] |
"" |
"btsti %0,%1" |
[(set_attr "type" "shift")]) |
|
|
;; Created by combine from conditional patterns below (see sextb/btsti rx,31) |
|
(define_insn "" |
[(set (reg:CC 17) |
(ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(const_int 7)) |
(const_int 0)))] |
"GET_CODE(operands[0]) == SUBREG && |
GET_MODE(SUBREG_REG(operands[0])) == QImode" |
"btsti %0,7" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (reg:CC 17) |
(ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(const_int 15)) |
(const_int 0)))] |
"GET_CODE(operands[0]) == SUBREG && |
GET_MODE(SUBREG_REG(operands[0])) == HImode" |
"btsti %0,15" |
[(set_attr "type" "shift")]) |
|
(define_split |
[(set (pc) |
(if_then_else (ne (eq:CC (zero_extract:SI |
(match_operand:SI 0 "mcore_arith_reg_operand" "") |
(const_int 1) |
(match_operand:SI 1 "mcore_literal_K_operand" "")) |
(const_int 0)) |
(const_int 0)) |
(label_ref (match_operand 2 "" "")) |
(pc)))] |
"" |
[(set (reg:CC 17) |
(zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))) |
(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) |
(label_ref (match_dup 2)) |
(pc)))] |
"") |
|
(define_split |
[(set (pc) |
(if_then_else (eq (ne:CC (zero_extract:SI |
(match_operand:SI 0 "mcore_arith_reg_operand" "") |
(const_int 1) |
(match_operand:SI 1 "mcore_literal_K_operand" "")) |
(const_int 0)) |
(const_int 0)) |
(label_ref (match_operand 2 "" "")) |
(pc)))] |
"" |
[(set (reg:CC 17) |
(zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))) |
(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) |
(label_ref (match_dup 2)) |
(pc)))] |
"") |
|
;; XXX - disabled by nickc because it fails on libiberty/fnmatch.c |
;; |
;; ; Experimental - relax immediates for and, andn, or, and tst to allow |
;; ; any immediate value (or an immediate at all -- or, andn, & tst). |
;; ; This is done to allow bit field masks to fold together in combine. |
;; ; The reload phase will force the immediate into a register at the |
;; ; very end. This helps in some cases, but hurts in others: we'd |
;; ; really like to cse these immediates. However, there is a phase |
;; ; ordering problem here. cse picks up individual masks and cse's |
;; ; those, but not folded masks (cse happens before combine). It's |
;; ; not clear what the best solution is because we really want cse |
;; ; before combine (leaving the bit field masks alone). To pick up |
;; ; relaxed immediates use -mrelax-immediates. It might take some |
;; ; experimenting to see which does better (i.e. regular imms vs. |
;; ; arbitrary imms) for a particular code. BRC |
;; |
;; (define_insn "" |
;; [(set (reg:CC 17) |
;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
;; (match_operand:SI 1 "mcore_arith_any_imm_operand" "rI")) |
;; (const_int 0)))] |
;; "TARGET_RELAX_IMM" |
;; "tst %0,%1") |
;; |
;; (define_insn "" |
;; [(set (reg:CC 17) |
;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
;; (match_operand:SI 1 "mcore_arith_M_operand" "r")) |
;; (const_int 0)))] |
;; "!TARGET_RELAX_IMM" |
;; "tst %0,%1") |
|
(define_insn "" |
[(set (reg:CC 17) |
(ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_M_operand" "r")) |
(const_int 0)))] |
"" |
"tst %0,%1") |
|
|
(define_split |
[(parallel[ |
(set (reg:CC 17) |
(ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "r")) |
(const_int 0)) |
(const_int 0))) |
(clobber (match_operand:CC 2 "mcore_arith_reg_operand" "=r"))])] |
"" |
[(set (reg:CC 17) (ne:SI (match_dup 0) (const_int 0))) |
(set (reg:CC 17) (leu:CC (match_dup 0) (match_dup 1)))]) |
|
;; ------------------------------------------------------------------------- |
;; SImode signed integer comparisons |
;; ------------------------------------------------------------------------- |
|
(define_insn "decne_t" |
[(set (reg:CC 17) (ne:CC (plus:SI (match_operand:SI 0 "mcore_arith_reg_operand" "+r") |
(const_int -1)) |
(const_int 0))) |
(set (match_dup 0) |
(plus:SI (match_dup 0) |
(const_int -1)))] |
"" |
"decne %0") |
|
;; The combiner seems to prefer the following to the former. |
;; |
(define_insn "" |
[(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "+r") |
(const_int 1))) |
(set (match_dup 0) |
(plus:SI (match_dup 0) |
(const_int -1)))] |
"" |
"decne %0") |
|
(define_insn "cmpnesi_t" |
[(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "r")))] |
"" |
"cmpne %0,%1") |
|
(define_insn "cmpneisi_t" |
[(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_K_operand" "K")))] |
"" |
"cmpnei %0,%1") |
|
(define_insn "cmpgtsi_t" |
[(set (reg:CC 17) (gt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "r")))] |
"" |
"cmplt %1,%0") |
|
(define_insn "" |
[(set (reg:CC 17) (gt:CC (plus:SI |
(match_operand:SI 0 "mcore_arith_reg_operand" "+r") |
(const_int -1)) |
(const_int 0))) |
(set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] |
"" |
"decgt %0") |
|
(define_insn "cmpltsi_t" |
[(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "r")))] |
"" |
"cmplt %0,%1") |
|
; cmplti is 1-32 |
(define_insn "cmpltisi_t" |
[(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_J_operand" "J")))] |
"" |
"cmplti %0,%1") |
|
; covers cmplti x,0 |
(define_insn "" |
[(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(const_int 0)))] |
"" |
"btsti %0,31") |
|
(define_insn "" |
[(set (reg:CC 17) (lt:CC (plus:SI |
(match_operand:SI 0 "mcore_arith_reg_operand" "+r") |
(const_int -1)) |
(const_int 0))) |
(set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] |
"" |
"declt %0") |
|
;; ------------------------------------------------------------------------- |
;; SImode unsigned integer comparisons |
;; ------------------------------------------------------------------------- |
|
(define_insn "cmpgeusi_t" |
[(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "r")))] |
"" |
"cmphs %0,%1") |
|
(define_insn "cmpgeusi_0" |
[(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(const_int 0)))] |
"" |
"cmpnei %0, 0") |
|
(define_insn "cmpleusi_t" |
[(set (reg:CC 17) (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "r")))] |
"" |
"cmphs %1,%0") |
|
;; We save the compare operands in the cmpxx patterns and use them when |
;; we generate the branch. |
|
;; We accept constants here, in case we can modify them to ones which |
;; are more efficient to load. E.g. change 'x <= 62' to 'x < 63'. |
|
(define_expand "cmpsi" |
[(set (reg:CC 17) (compare:CC (match_operand:SI 0 "mcore_compare_operand" "") |
(match_operand:SI 1 "nonmemory_operand" "")))] |
"" |
" |
{ arch_compare_op0 = operands[0]; |
arch_compare_op1 = operands[1]; |
DONE; |
}") |
|
;; ------------------------------------------------------------------------- |
;; Logical operations |
;; ------------------------------------------------------------------------- |
|
;; Logical AND clearing a single bit. andsi3 knows that we have this |
;; pattern and allows the constant literal pass through. |
;; |
|
;; RBE 2/97: don't need this pattern any longer... |
;; RBE: I don't think we need both "S" and exact_log2() clauses. |
;;(define_insn "" |
;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
;; (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") |
;; (match_operand:SI 2 "const_int_operand" "S")))] |
;; "mcore_arith_S_operand (operands[2])" |
;; "bclri %0,%Q2") |
;; |
|
(define_insn "andnsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(and:SI (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")) |
(match_operand:SI 2 "mcore_arith_reg_operand" "0")))] |
"" |
"andn %0,%1") |
|
(define_expand "andsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "nonmemory_operand" "")))] |
"" |
" |
{ |
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0 |
&& ! mcore_arith_S_operand (operands[2])) |
{ |
int not_value = ~ INTVAL (operands[2]); |
if ( CONST_OK_FOR_I (not_value) |
|| CONST_OK_FOR_M (not_value) |
|| CONST_OK_FOR_N (not_value)) |
{ |
operands[2] = copy_to_mode_reg (SImode, GEN_INT (not_value)); |
emit_insn (gen_andnsi3 (operands[0], operands[2], operands[1])); |
DONE; |
} |
} |
|
if (! mcore_arith_K_S_operand (operands[2], SImode)) |
operands[2] = copy_to_mode_reg (SImode, operands[2]); |
}") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0") |
(match_operand:SI 2 "mcore_arith_any_imm_operand" "r,K,0,S")))] |
"TARGET_RELAX_IMM" |
"* |
{ |
switch (which_alternative) |
{ |
case 0: return \"and %0,%2\"; |
case 1: return \"andi %0,%2\"; |
case 2: return \"and %0,%1\"; |
/* case -1: return \"bclri %0,%Q2\"; will not happen */ |
case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2])); |
default: gcc_unreachable (); |
} |
}") |
|
;; This was the old "S" which was "!(2^n)" */ |
;; case -1: return \"bclri %0,%Q2\"; will not happen */ |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0") |
(match_operand:SI 2 "mcore_arith_K_S_operand" "r,K,0,S")))] |
"!TARGET_RELAX_IMM" |
"* |
{ |
switch (which_alternative) |
{ |
case 0: return \"and %0,%2\"; |
case 1: return \"andi %0,%2\"; |
case 2: return \"and %0,%1\"; |
case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2])); |
default: gcc_unreachable (); |
} |
}") |
|
;(define_insn "iorsi3" |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") |
; (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] |
; "" |
; "or %0,%2") |
|
; need an expand to resolve ambiguity betw. the two iors below. |
(define_expand "iorsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "nonmemory_operand" "")))] |
"" |
" |
{ |
if (! mcore_arith_M_operand (operands[2], SImode)) |
operands[2] = copy_to_mode_reg (SImode, operands[2]); |
}") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") |
(ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") |
(match_operand:SI 2 "mcore_arith_any_imm_operand" "r,M,T")))] |
"TARGET_RELAX_IMM" |
"* |
{ |
switch (which_alternative) |
{ |
case 0: return \"or %0,%2\"; |
case 1: return \"bseti %0,%P2\"; |
case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2])); |
default: gcc_unreachable (); |
} |
}") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") |
(ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") |
(match_operand:SI 2 "mcore_arith_M_operand" "r,M,T")))] |
"!TARGET_RELAX_IMM" |
"* |
{ |
switch (which_alternative) |
{ |
case 0: return \"or %0,%2\"; |
case 1: return \"bseti %0,%P2\"; |
case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2])); |
default: gcc_unreachable (); |
} |
}") |
|
;(define_insn "" |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") |
; (match_operand:SI 2 "const_int_operand" "M")))] |
; "exact_log2 (INTVAL (operands[2])) >= 0" |
; "bseti %0,%P2") |
|
;(define_insn "" |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") |
; (match_operand:SI 2 "const_int_operand" "i")))] |
; "mcore_num_ones (INTVAL (operands[2])) < 3" |
; "* return mcore_output_bseti (operands[0], INTVAL (operands[2]));") |
|
(define_insn "xorsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(xor:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") |
(match_operand:SI 2 "mcore_arith_reg_operand" "r")))] |
"" |
"xor %0,%2") |
|
; these patterns give better code then gcc invents if |
; left to its own devices |
|
(define_insn "anddi3" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") |
(and:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") |
(match_operand:DI 2 "mcore_arith_reg_operand" "r")))] |
"" |
"and %0,%2\;and %R0,%R2" |
[(set_attr "length" "4")]) |
|
(define_insn "iordi3" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") |
(ior:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") |
(match_operand:DI 2 "mcore_arith_reg_operand" "r")))] |
"" |
"or %0,%2\;or %R0,%R2" |
[(set_attr "length" "4")]) |
|
(define_insn "xordi3" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") |
(xor:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") |
(match_operand:DI 2 "mcore_arith_reg_operand" "r")))] |
"" |
"xor %0,%2\;xor %R0,%R2" |
[(set_attr "length" "4")]) |
|
;; ------------------------------------------------------------------------- |
;; Shifts and rotates |
;; ------------------------------------------------------------------------- |
|
;; Only allow these if the shift count is a convenient constant. |
(define_expand "rotlsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "nonmemory_operand" "")))] |
"" |
"if (! mcore_literal_K_operand (operands[2], SImode)) |
FAIL; |
") |
|
;; We can only do constant rotates, which is what this pattern provides. |
;; The combiner will put it together for us when we do: |
;; (x << N) | (x >> (32 - N)) |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") |
(match_operand:SI 2 "mcore_literal_K_operand" "K")))] |
"" |
"rotli %0,%2" |
[(set_attr "type" "shift")]) |
|
(define_insn "ashlsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") |
(ashift:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") |
(match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] |
"" |
"@ |
lsl %0,%2 |
lsli %0,%2" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(ashift:SI (const_int 1) |
(match_operand:SI 1 "mcore_arith_reg_operand" "r")))] |
"" |
"bgenr %0,%1" |
[(set_attr "type" "shift")]) |
|
(define_insn "ashrsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") |
(ashiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") |
(match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] |
"" |
"@ |
asr %0,%2 |
asri %0,%2" |
[(set_attr "type" "shift")]) |
|
(define_insn "lshrsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") |
(lshiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") |
(match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] |
"" |
"@ |
lsr %0,%2 |
lsri %0,%2" |
[(set_attr "type" "shift")]) |
|
;(define_expand "ashldi3" |
; [(parallel[(set (match_operand:DI 0 "mcore_arith_reg_operand" "") |
; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "") |
; (match_operand:DI 2 "immediate_operand" ""))) |
; |
; (clobber (reg:CC 17))])] |
; |
; "" |
; " |
;{ |
; if (GET_CODE (operands[2]) != CONST_INT |
; || INTVAL (operands[2]) != 1) |
; FAIL; |
;}") |
; |
;(define_insn "" |
; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") |
; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") |
; (const_int 1))) |
; (clobber (reg:CC 17))] |
; "" |
; "lsli %R0,0\;rotli %0,0" |
; [(set_attr "length" "4") (set_attr "type" "shift")]) |
|
;; ------------------------------------------------------------------------- |
;; Index instructions |
;; ------------------------------------------------------------------------- |
;; The second of each set of patterns is borrowed from the alpha.md file. |
;; These variants of the above insns can occur if the second operand |
;; is the frame pointer. This is a kludge, but there doesn't |
;; seem to be a way around it. Only recognize them while reloading. |
|
;; We must use reload_operand for some operands in case frame pointer |
;; elimination put a MEM with invalid address there. Otherwise, |
;; the result of the substitution will not match this pattern, and reload |
;; will not be able to correctly fix the result. |
|
;; indexing longlongs or doubles (8 bytes) |
|
(define_insn "indexdi_t" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(const_int 8)) |
(match_operand:SI 2 "mcore_arith_reg_operand" "0")))] |
"" |
"* |
if (! mcore_is_same_reg (operands[1], operands[2])) |
{ |
output_asm_insn (\"ixw\\t%0,%1\", operands); |
output_asm_insn (\"ixw\\t%0,%1\", operands); |
} |
else |
{ |
output_asm_insn (\"ixh\\t%0,%1\", operands); |
output_asm_insn (\"ixh\\t%0,%1\", operands); |
} |
return \"\"; |
" |
;; if operands[1] == operands[2], the first option above is wrong! -- dac |
;; was this... -- dac |
;; ixw %0,%1\;ixw %0,%1" |
|
[(set_attr "length" "4")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") |
(plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") |
(const_int 8)) |
(match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) |
(match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] |
"reload_in_progress" |
"@ |
ixw %0,%1\;ixw %0,%1\;addu %0,%3 |
ixw %0,%1\;ixw %0,%1\;addi %0,%3 |
ixw %0,%1\;ixw %0,%1\;subi %0,%M3" |
[(set_attr "length" "6")]) |
|
;; indexing longs (4 bytes) |
|
(define_insn "indexsi_t" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(const_int 4)) |
(match_operand:SI 2 "mcore_arith_reg_operand" "0")))] |
"" |
"ixw %0,%1") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") |
(plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") |
(const_int 4)) |
(match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) |
(match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] |
"reload_in_progress" |
"@ |
ixw %0,%1\;addu %0,%3 |
ixw %0,%1\;addi %0,%3 |
ixw %0,%1\;subi %0,%M3" |
[(set_attr "length" "4")]) |
|
;; indexing shorts (2 bytes) |
|
(define_insn "indexhi_t" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(const_int 2)) |
(match_operand:SI 2 "mcore_arith_reg_operand" "0")))] |
"" |
"ixh %0,%1") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") |
(plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") |
(const_int 2)) |
(match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) |
(match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] |
"reload_in_progress" |
"@ |
ixh %0,%1\;addu %0,%3 |
ixh %0,%1\;addi %0,%3 |
ixh %0,%1\;subi %0,%M3" |
[(set_attr "length" "4")]) |
|
;; |
;; Other sizes may be handy for indexing. |
;; the tradeoffs to consider when adding these are |
;; code size, execution time [vs. mul it is easy to win], |
;; and register pressure -- these patterns don't use an extra |
;; register to build the offset from the base |
;; and whether the compiler will not come up with some other idiom. |
;; |
|
;; ------------------------------------------------------------------------- |
;; Addition, Subtraction instructions |
;; ------------------------------------------------------------------------- |
|
(define_expand "addsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "nonmemory_operand" "")))] |
"" |
" |
{ |
extern int flag_omit_frame_pointer; |
|
/* If this is an add to the frame pointer, then accept it as is so |
that we can later fold in the fp/sp offset from frame pointer |
elimination. */ |
if (flag_omit_frame_pointer |
&& GET_CODE (operands[1]) == REG |
&& (REGNO (operands[1]) == VIRTUAL_STACK_VARS_REGNUM |
|| REGNO (operands[1]) == FRAME_POINTER_REGNUM)) |
{ |
emit_insn (gen_addsi3_fp (operands[0], operands[1], operands[2])); |
DONE; |
} |
|
/* Convert adds to subtracts if this makes loading the constant cheaper. |
But only if we are allowed to generate new pseudos. */ |
if (! (reload_in_progress || reload_completed) |
&& GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < -32) |
{ |
int neg_value = - INTVAL (operands[2]); |
if ( CONST_OK_FOR_I (neg_value) |
|| CONST_OK_FOR_M (neg_value) |
|| CONST_OK_FOR_N (neg_value)) |
{ |
operands[2] = copy_to_mode_reg (SImode, GEN_INT (neg_value)); |
emit_insn (gen_subsi3 (operands[0], operands[1], operands[2])); |
DONE; |
} |
} |
|
if (! mcore_addsub_operand (operands[2], SImode)) |
operands[2] = copy_to_mode_reg (SImode, operands[2]); |
}") |
|
;; RBE: for some constants which are not in the range which allows |
;; us to do a single operation, we will try a paired addi/addi instead |
;; of a movi/addi. This relieves some register pressure at the expense |
;; of giving away some potential constant reuse. |
;; |
;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern |
;; for later reference |
;; |
;; (define_insn "addsi3_i2" |
;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") |
;; (match_operand:SI 2 "const_int_operand" "g")))] |
;; "GET_CODE(operands[2]) == CONST_INT |
;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64) |
;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))" |
;; "* |
;; { |
;; int n = INTVAL(operands[2]); |
;; if (n > 0) |
;; { |
;; operands[2] = GEN_INT(n - 32); |
;; return \"addi\\t%0,32\;addi\\t%0,%2\"; |
;; } |
;; else |
;; { |
;; n = (-n); |
;; operands[2] = GEN_INT(n - 32); |
;; return \"subi\\t%0,32\;subi\\t%0,%2\"; |
;; } |
;; }" |
;; [(set_attr "length" "4")]) |
|
(define_insn "addsi3_i" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") |
(plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") |
(match_operand:SI 2 "mcore_addsub_operand" "r,J,L")))] |
"" |
"@ |
addu %0,%2 |
addi %0,%2 |
subi %0,%M2") |
|
;; This exists so that address computations based on the frame pointer |
;; can be folded in when frame pointer elimination occurs. Ordinarily |
;; this would be bad because it allows insns which would require reloading, |
;; but without it, we get multiple adds where one would do. |
|
(define_insn "addsi3_fp" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") |
(plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") |
(match_operand:SI 2 "immediate_operand" "r,J,L")))] |
"flag_omit_frame_pointer |
&& (reload_in_progress || reload_completed || REGNO (operands[1]) == FRAME_POINTER_REGNUM)" |
"@ |
addu %0,%2 |
addi %0,%2 |
subi %0,%M2") |
|
;; RBE: for some constants which are not in the range which allows |
;; us to do a single operation, we will try a paired addi/addi instead |
;; of a movi/addi. This relieves some register pressure at the expense |
;; of giving away some potential constant reuse. |
;; |
;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern |
;; for later reference |
;; |
;; (define_insn "subsi3_i2" |
;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") |
;; (match_operand:SI 2 "const_int_operand" "g")))] |
;; "TARGET_RBETEST && GET_CODE(operands[2]) == CONST_INT |
;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64) |
;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))" |
;; "* |
;; { |
;; int n = INTVAL(operands[2]); |
;; if ( n > 0) |
;; { |
;; operands[2] = GEN_INT( n - 32); |
;; return \"subi\\t%0,32\;subi\\t%0,%2\"; |
;; } |
;; else |
;; { |
;; n = (-n); |
;; operands[2] = GEN_INT(n - 32); |
;; return \"addi\\t%0,32\;addi\\t%0,%2\"; |
;; } |
;; }" |
;; [(set_attr "length" "4")]) |
|
;(define_insn "subsi3" |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
; (minus:SI (match_operand:SI 1 "mcore_arith_K_operand" "0,0,r,K") |
; (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0,0")))] |
; "" |
; "@ |
; sub %0,%2 |
; subi %0,%2 |
; rsub %0,%1 |
; rsubi %0,%1") |
|
(define_insn "subsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") |
(minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r") |
(match_operand:SI 2 "mcore_arith_J_operand" "r,J,0")))] |
"" |
"@ |
subu %0,%2 |
subi %0,%2 |
rsub %0,%1") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(minus:SI (match_operand:SI 1 "mcore_literal_K_operand" "K") |
(match_operand:SI 2 "mcore_arith_reg_operand" "0")))] |
"" |
"rsubi %0,%1") |
|
(define_insn "adddi3" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
(plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") |
(match_operand:DI 2 "mcore_arith_reg_operand" "r"))) |
(clobber (reg:CC 17))] |
"" |
"* |
{ |
if (TARGET_LITTLE_END) |
return \"cmplt %0,%0\;addc %0,%2\;addc %R0,%R2\"; |
return \"cmplt %R0,%R0\;addc %R0,%R2\;addc %0,%2\"; |
}" |
[(set_attr "length" "6")]) |
|
;; special case for "longlong += 1" |
(define_insn "" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
(plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") |
(const_int 1))) |
(clobber (reg:CC 17))] |
"" |
"* |
{ |
if (TARGET_LITTLE_END) |
return \"addi %0,1\;cmpnei %0,0\;incf %R0\"; |
return \"addi %R0,1\;cmpnei %R0,0\;incf %0\"; |
}" |
[(set_attr "length" "6")]) |
|
;; special case for "longlong -= 1" |
(define_insn "" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
(plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") |
(const_int -1))) |
(clobber (reg:CC 17))] |
"" |
"* |
{ |
if (TARGET_LITTLE_END) |
return \"cmpnei %0,0\;decf %R0\;subi %0,1\"; |
return \"cmpnei %R0,0\;decf %0\;subi %R0,1\"; |
}" |
[(set_attr "length" "6")]) |
|
;; special case for "longlong += const_int" |
;; we have to use a register for the const_int because we don't |
;; have an unsigned compare immediate... only +/- 1 get to |
;; play the no-extra register game because they compare with 0. |
;; This winds up working out for any literal that is synthesized |
;; with a single instruction. The more complicated ones look |
;; like the get broken into subreg's to get initialized too soon |
;; for us to catch here. -- RBE 4/25/96 |
;; only allow for-sure positive values. |
|
(define_insn "" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
(plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") |
(match_operand:SI 2 "const_int_operand" "r"))) |
(clobber (reg:CC 17))] |
"GET_CODE (operands[2]) == CONST_INT |
&& INTVAL (operands[2]) > 0 && ! (INTVAL (operands[2]) & 0x80000000)" |
"* |
{ |
gcc_assert (GET_MODE (operands[2]) == SImode); |
if (TARGET_LITTLE_END) |
return \"addu %0,%2\;cmphs %0,%2\;incf %R0\"; |
return \"addu %R0,%2\;cmphs %R0,%2\;incf %0\"; |
}" |
[(set_attr "length" "6")]) |
|
;; optimize "long long" + "unsigned long" |
;; won't trigger because of how the extension is expanded upstream. |
;; (define_insn "" |
;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") |
;; (zero_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r")))) |
;; (clobber (reg:CC 17))] |
;; "0" |
;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0" |
;; [(set_attr "length" "6")]) |
|
;; optimize "long long" + "signed long" |
;; won't trigger because of how the extension is expanded upstream. |
;; (define_insn "" |
;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") |
;; (sign_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r")))) |
;; (clobber (reg:CC 17))] |
;; "0" |
;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0\;btsti %2,31\;dect %0" |
;; [(set_attr "length" "6")]) |
|
(define_insn "subdi3" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
(minus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") |
(match_operand:DI 2 "mcore_arith_reg_operand" "r"))) |
(clobber (reg:CC 17))] |
"" |
"* |
{ |
if (TARGET_LITTLE_END) |
return \"cmphs %0,%0\;subc %0,%2\;subc %R0,%R2\"; |
return \"cmphs %R0,%R0\;subc %R0,%R2\;subc %0,%2\"; |
}" |
[(set_attr "length" "6")]) |
|
;; ------------------------------------------------------------------------- |
;; Multiplication instructions |
;; ------------------------------------------------------------------------- |
|
(define_insn "mulsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") |
(match_operand:SI 2 "mcore_arith_reg_operand" "r")))] |
"" |
"mult %0,%2") |
|
;; |
;; 32/32 signed division -- added to the MCORE instruction set spring 1997 |
;; |
;; Different constraints based on the architecture revision... |
;; |
(define_expand "divsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" "")))] |
"TARGET_DIV" |
"") |
|
;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97) |
;; |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") |
(match_operand:SI 2 "mcore_arith_reg_operand" "b")))] |
"TARGET_DIV" |
"divs %0,%2") |
|
;; |
;; 32/32 signed division -- added to the MCORE instruction set spring 1997 |
;; |
;; Different constraints based on the architecture revision... |
;; |
(define_expand "udivsi3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" "")))] |
"TARGET_DIV" |
"") |
|
;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97) |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") |
(match_operand:SI 2 "mcore_arith_reg_operand" "b")))] |
"TARGET_DIV" |
"divu %0,%2") |
|
;; ------------------------------------------------------------------------- |
;; Unary arithmetic |
;; ------------------------------------------------------------------------- |
|
(define_insn "negsi2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(neg:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"* |
{ |
return \"rsubi %0,0\"; |
}") |
|
|
(define_insn "abssi2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(abs:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"abs %0") |
|
(define_insn "negdi2" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") |
(neg:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0"))) |
(clobber (reg:CC 17))] |
"" |
"* |
{ |
if (TARGET_LITTLE_END) |
return \"cmpnei %0,0\\n\\trsubi %0,0\\n\\tnot %R0\\n\\tincf %R0\"; |
return \"cmpnei %R0,0\\n\\trsubi %R0,0\\n\\tnot %0\\n\\tincf %0\"; |
}" |
[(set_attr "length" "8")]) |
|
(define_insn "one_cmplsi2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"not %0") |
|
;; ------------------------------------------------------------------------- |
;; Zero extension instructions |
;; ------------------------------------------------------------------------- |
|
(define_expand "zero_extendhisi2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(zero_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "")))] |
"" |
"") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") |
(zero_extend:SI (match_operand:HI 1 "general_operand" "0,m")))] |
"" |
"@ |
zexth %0 |
ld.h %0,%1" |
[(set_attr "type" "shift,load")]) |
|
;; ldh gives us a free zero-extension. The combiner picks up on this. |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(zero_extend:SI (mem:HI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] |
"" |
"ld.h %0,(%1)" |
[(set_attr "type" "load")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(zero_extend:SI (mem:HI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(match_operand:SI 2 "const_int_operand" "")))))] |
"(INTVAL (operands[2]) >= 0) && |
(INTVAL (operands[2]) < 32) && |
((INTVAL (operands[2])&1) == 0)" |
"ld.h %0,(%1,%2)" |
[(set_attr "type" "load")]) |
|
(define_expand "zero_extendqisi2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(zero_extend:SI (match_operand:QI 1 "general_operand" "")))] |
"" |
"") |
|
;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register. |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b,r") |
(zero_extend:SI (match_operand:QI 1 "general_operand" "0,r,m")))] |
"" |
"@ |
zextb %0 |
xtrb3 %0,%1 |
ld.b %0,%1" |
[(set_attr "type" "shift,shift,load")]) |
|
;; ldb gives us a free zero-extension. The combiner picks up on this. |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(zero_extend:SI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] |
"" |
"ld.b %0,(%1)" |
[(set_attr "type" "load")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(zero_extend:SI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(match_operand:SI 2 "const_int_operand" "")))))] |
"(INTVAL (operands[2]) >= 0) && |
(INTVAL (operands[2]) < 16)" |
"ld.b %0,(%1,%2)" |
[(set_attr "type" "load")]) |
|
(define_expand "zero_extendqihi2" |
[(set (match_operand:HI 0 "mcore_arith_reg_operand" "") |
(zero_extend:HI (match_operand:QI 1 "general_operand" "")))] |
"" |
"") |
|
;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register. |
(define_insn "" |
[(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r,b,r") |
(zero_extend:HI (match_operand:QI 1 "general_operand" "0,r,m")))] |
"" |
"@ |
zextb %0 |
xtrb3 %0,%1 |
ld.b %0,%1" |
[(set_attr "type" "shift,shift,load")]) |
|
;; ldb gives us a free zero-extension. The combiner picks up on this. |
;; this doesn't catch references that are into a structure. |
;; note that normally the compiler uses the above insn, unless it turns |
;; out that we're dealing with a volatile... |
(define_insn "" |
[(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") |
(zero_extend:HI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] |
"" |
"ld.b %0,(%1)" |
[(set_attr "type" "load")]) |
|
(define_insn "" |
[(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") |
(zero_extend:HI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(match_operand:SI 2 "const_int_operand" "")))))] |
"(INTVAL (operands[2]) >= 0) && |
(INTVAL (operands[2]) < 16)" |
"ld.b %0,(%1,%2)" |
[(set_attr "type" "load")]) |
|
|
;; ------------------------------------------------------------------------- |
;; Sign extension instructions |
;; ------------------------------------------------------------------------- |
|
(define_expand "extendsidi2" |
[(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "r"))] |
"" |
" |
{ |
int low, high; |
|
if (TARGET_LITTLE_END) |
low = 0, high = 4; |
else |
low = 4, high = 0; |
|
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], low), |
operands[1])); |
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], high), |
gen_rtx_ASHIFTRT (SImode, |
gen_rtx_SUBREG (SImode, operands[0], low), |
GEN_INT (31)))); |
DONE; |
}" |
) |
|
(define_insn "extendhisi2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"sexth %0") |
|
(define_insn "extendqisi2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"sextb %0") |
|
(define_insn "extendqihi2" |
[(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") |
(sign_extend:HI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"sextb %0") |
|
;; ------------------------------------------------------------------------- |
;; Move instructions |
;; ------------------------------------------------------------------------- |
|
;; SImode |
|
(define_expand "movsi" |
[(set (match_operand:SI 0 "general_operand" "") |
(match_operand:SI 1 "general_operand" ""))] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM) |
operands[1] = force_reg (SImode, operands[1]); |
}") |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,a,r,a,r,m") |
(match_operand:SI 1 "mcore_general_movsrc_operand" "r,P,i,c,R,m,r"))] |
"(register_operand (operands[0], SImode) |
|| register_operand (operands[1], SImode))" |
"* return mcore_output_move (insn, operands, SImode);" |
[(set_attr "type" "move,move,move,move,load,load,store")]) |
|
;; |
;; HImode |
;; |
|
(define_expand "movhi" |
[(set (match_operand:HI 0 "general_operand" "") |
(match_operand:HI 1 "general_operand" ""))] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM) |
operands[1] = force_reg (HImode, operands[1]); |
else if (CONSTANT_P (operands[1]) |
&& (GET_CODE (operands[1]) != CONST_INT |
|| (! CONST_OK_FOR_I (INTVAL (operands[1])) |
&& ! CONST_OK_FOR_M (INTVAL (operands[1])) |
&& ! CONST_OK_FOR_N (INTVAL (operands[1])))) |
&& ! reload_completed && ! reload_in_progress) |
{ |
rtx reg = gen_reg_rtx (SImode); |
emit_insn (gen_movsi (reg, operands[1])); |
operands[1] = gen_lowpart (HImode, reg); |
} |
}") |
|
(define_insn "" |
[(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m") |
(match_operand:HI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))] |
"(register_operand (operands[0], HImode) |
|| register_operand (operands[1], HImode))" |
"* return mcore_output_move (insn, operands, HImode);" |
[(set_attr "type" "move,move,move,move,load,store")]) |
|
;; |
;; QImode |
;; |
|
(define_expand "movqi" |
[(set (match_operand:QI 0 "general_operand" "") |
(match_operand:QI 1 "general_operand" ""))] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM) |
operands[1] = force_reg (QImode, operands[1]); |
else if (CONSTANT_P (operands[1]) |
&& (GET_CODE (operands[1]) != CONST_INT |
|| (! CONST_OK_FOR_I (INTVAL (operands[1])) |
&& ! CONST_OK_FOR_M (INTVAL (operands[1])) |
&& ! CONST_OK_FOR_N (INTVAL (operands[1])))) |
&& ! reload_completed && ! reload_in_progress) |
{ |
rtx reg = gen_reg_rtx (SImode); |
emit_insn (gen_movsi (reg, operands[1])); |
operands[1] = gen_lowpart (QImode, reg); |
} |
}") |
|
(define_insn "" |
[(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m") |
(match_operand:QI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))] |
"(register_operand (operands[0], QImode) |
|| register_operand (operands[1], QImode))" |
"* return mcore_output_move (insn, operands, QImode);" |
[(set_attr "type" "move,move,move,move,load,store")]) |
|
|
;; DImode |
|
(define_expand "movdi" |
[(set (match_operand:DI 0 "general_operand" "") |
(match_operand:DI 1 "general_operand" ""))] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM) |
operands[1] = force_reg (DImode, operands[1]); |
else if (GET_CODE (operands[1]) == CONST_INT |
&& ! CONST_OK_FOR_I (INTVAL (operands[1])) |
&& ! CONST_OK_FOR_M (INTVAL (operands[1])) |
&& ! CONST_OK_FOR_N (INTVAL (operands[1]))) |
{ |
int i; |
for (i = 0; i < UNITS_PER_WORD * 2; i += UNITS_PER_WORD) |
emit_move_insn (simplify_gen_subreg (SImode, operands[0], DImode, i), |
simplify_gen_subreg (SImode, operands[1], DImode, i)); |
DONE; |
} |
}") |
|
(define_insn "movdi_i" |
[(set (match_operand:DI 0 "general_operand" "=r,r,r,r,a,r,m") |
(match_operand:DI 1 "mcore_general_movsrc_operand" "I,M,N,r,R,m,r"))] |
"" |
"* return mcore_output_movedouble (operands, DImode);" |
[(set_attr "length" "4") (set_attr "type" "move,move,move,move,load,load,store")]) |
|
;; SFmode |
|
(define_expand "movsf" |
[(set (match_operand:SF 0 "general_operand" "") |
(match_operand:SF 1 "general_operand" ""))] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM) |
operands[1] = force_reg (SFmode, operands[1]); |
}") |
|
(define_insn "movsf_i" |
[(set (match_operand:SF 0 "general_operand" "=r,r,m") |
(match_operand:SF 1 "general_operand" "r,m,r"))] |
"" |
"@ |
mov %0,%1 |
ld.w %0,%1 |
st.w %1,%0" |
[(set_attr "type" "move,load,store")]) |
|
;; DFmode |
|
(define_expand "movdf" |
[(set (match_operand:DF 0 "general_operand" "") |
(match_operand:DF 1 "general_operand" ""))] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM) |
operands[1] = force_reg (DFmode, operands[1]); |
}") |
|
(define_insn "movdf_k" |
[(set (match_operand:DF 0 "general_operand" "=r,r,m") |
(match_operand:DF 1 "general_operand" "r,m,r"))] |
"" |
"* return mcore_output_movedouble (operands, DFmode);" |
[(set_attr "length" "4") (set_attr "type" "move,load,store")]) |
|
|
;; Load/store multiple |
|
;; ??? This is not currently used. |
(define_insn "ldm" |
[(set (match_operand:TI 0 "mcore_arith_reg_operand" "=r") |
(mem:TI (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] |
"" |
"ldq %U0,(%1)") |
|
;; ??? This is not currently used. |
(define_insn "stm" |
[(set (mem:TI (match_operand:SI 0 "mcore_arith_reg_operand" "r")) |
(match_operand:TI 1 "mcore_arith_reg_operand" "r"))] |
"" |
"stq %U1,(%0)") |
|
(define_expand "load_multiple" |
[(match_par_dup 3 [(set (match_operand:SI 0 "" "") |
(match_operand:SI 1 "" "")) |
(use (match_operand:SI 2 "" ""))])] |
"" |
" |
{ |
int regno, count, i; |
|
/* Support only loading a constant number of registers from memory and |
only if at least two registers. The last register must be r15. */ |
if (GET_CODE (operands[2]) != CONST_INT |
|| INTVAL (operands[2]) < 2 |
|| GET_CODE (operands[1]) != MEM |
|| XEXP (operands[1], 0) != stack_pointer_rtx |
|| GET_CODE (operands[0]) != REG |
|| REGNO (operands[0]) + INTVAL (operands[2]) != 16) |
FAIL; |
|
count = INTVAL (operands[2]); |
regno = REGNO (operands[0]); |
|
operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); |
|
for (i = 0; i < count; i++) |
XVECEXP (operands[3], 0, i) |
= gen_rtx_SET (VOIDmode, |
gen_rtx_REG (SImode, regno + i), |
gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, |
i * 4))); |
}") |
|
(define_insn "" |
[(match_parallel 0 "mcore_load_multiple_operation" |
[(set (match_operand:SI 1 "mcore_arith_reg_operand" "=r") |
(mem:SI (match_operand:SI 2 "register_operand" "r")))])] |
"GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM" |
"ldm %1-r15,(%2)") |
|
(define_expand "store_multiple" |
[(match_par_dup 3 [(set (match_operand:SI 0 "" "") |
(match_operand:SI 1 "" "")) |
(use (match_operand:SI 2 "" ""))])] |
"" |
" |
{ |
int regno, count, i; |
|
/* Support only storing a constant number of registers to memory and |
only if at least two registers. The last register must be r15. */ |
if (GET_CODE (operands[2]) != CONST_INT |
|| INTVAL (operands[2]) < 2 |
|| GET_CODE (operands[0]) != MEM |
|| XEXP (operands[0], 0) != stack_pointer_rtx |
|| GET_CODE (operands[1]) != REG |
|| REGNO (operands[1]) + INTVAL (operands[2]) != 16) |
FAIL; |
|
count = INTVAL (operands[2]); |
regno = REGNO (operands[1]); |
|
operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); |
|
for (i = 0; i < count; i++) |
XVECEXP (operands[3], 0, i) |
= gen_rtx_SET (VOIDmode, |
gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, |
i * 4)), |
gen_rtx_REG (SImode, regno + i)); |
}") |
|
(define_insn "" |
[(match_parallel 0 "mcore_store_multiple_operation" |
[(set (mem:SI (match_operand:SI 2 "register_operand" "r")) |
(match_operand:SI 1 "mcore_arith_reg_operand" "r"))])] |
"GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM" |
"stm %1-r15,(%2)") |
|
;; ------------------------------------------------------------------------ |
;; Define the real conditional branch instructions. |
;; ------------------------------------------------------------------------ |
|
(define_insn "branch_true" |
[(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
"jbt %l0" |
[(set_attr "type" "brcond")]) |
|
(define_insn "branch_false" |
[(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
"jbf %l0" |
[(set_attr "type" "brcond")]) |
|
(define_insn "inverse_branch_true" |
[(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0)) |
(pc) |
(label_ref (match_operand 0 "" ""))))] |
"" |
"jbf %l0" |
[(set_attr "type" "brcond")]) |
|
(define_insn "inverse_branch_false" |
[(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) |
(pc) |
(label_ref (match_operand 0 "" ""))))] |
"" |
"jbt %l0" |
[(set_attr "type" "brcond")]) |
|
;; Conditional branch insns |
|
;; At top-level, condition test are eq/ne, because we |
;; are comparing against the condition register (which |
;; has the result of the true relational test |
|
; There is no beq compare, so we reverse the branch arms. |
|
(define_expand "beq" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(pc) |
(label_ref (match_operand 0 "" ""))))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (EQ); |
}") |
|
(define_expand "bne" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (NE); |
}") |
|
; check whether (GT A imm) can become (LE A imm) with the branch reversed. |
; if so, emit a (LT A imm + 1) in place of the (LE A imm). BRC |
|
(define_expand "bgt" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
" |
{ |
if (mcore_modify_comparison (LE)) |
{ |
emit_jump_insn (gen_reverse_blt (operands[0])); |
DONE; |
} |
operands[1] = mcore_gen_compare_reg (GT); |
}") |
|
; There is no ble compare, so we reverse the branch arms. |
; reversed the condition and branch arms for ble -- the check_dbra_loop() |
; transformation assumes that ble uses a branch-true with the label as |
; as the target. BRC |
|
; check whether (LE A imm) can become (LT A imm + 1). |
|
(define_expand "ble" |
[(set (pc) (if_then_else (eq (match_dup 1) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
" |
{ |
if (mcore_modify_comparison (LE)) |
{ |
emit_jump_insn (gen_blt (operands[0])); |
DONE; |
} |
operands[1] = mcore_gen_compare_reg (LE); |
}") |
|
; make generating a reversed blt simple |
(define_expand "reverse_blt" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(pc) |
(label_ref (match_operand 0 "" ""))))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LT); |
}") |
|
(define_expand "blt" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LT); |
}") |
|
; There is no bge compare, so we reverse the branch arms. |
|
(define_expand "bge" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(pc) |
(label_ref (match_operand 0 "" ""))))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (GE); |
}") |
|
; There is no gtu compare, so we reverse the branch arms |
|
;(define_expand "bgtu" |
; [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
; (pc) |
; (label_ref (match_operand 0 "" ""))))] |
; "" |
; " |
;{ |
; if (GET_CODE (arch_compare_op1) == CONST_INT |
; && INTVAL (arch_compare_op1) == 0) |
; operands[1] = mcore_gen_compare_reg (NE); |
; else |
; { if (mcore_modify_comparison (GTU)) |
; { |
; emit_jump_insn (gen_bgeu (operands[0])); |
; DONE; |
; } |
; operands[1] = mcore_gen_compare_reg (LEU); |
; } |
;}") |
|
(define_expand "bgtu" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(pc) |
(label_ref (match_operand 0 "" ""))))] |
"" |
" |
{ |
if (GET_CODE (arch_compare_op1) == CONST_INT |
&& INTVAL (arch_compare_op1) == 0) |
{ |
/* The inverse of '> 0' for an unsigned test is |
'== 0' but we do not have such an instruction available. |
Instead we must reverse the branch (back to the normal |
ordering) and test '!= 0'. */ |
|
operands[1] = mcore_gen_compare_reg (NE); |
|
emit_jump_insn (gen_rtx_SET (VOIDmode, |
pc_rtx, |
gen_rtx_IF_THEN_ELSE (VOIDmode, |
gen_rtx_NE (VOIDmode, |
operands[1], |
const0_rtx), |
gen_rtx_LABEL_REF (VOIDmode,operands[0]), |
pc_rtx))); |
DONE; |
} |
operands[1] = mcore_gen_compare_reg (GTU); |
}") |
|
|
(define_expand "bleu" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LEU); |
}") |
|
; There is no bltu compare, so we reverse the branch arms |
(define_expand "bltu" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(pc) |
(label_ref (match_operand 0 "" ""))))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LTU); |
}") |
|
(define_expand "bgeu" |
[(set (pc) (if_then_else (ne (match_dup 1) (const_int 0)) |
(label_ref (match_operand 0 "" "")) |
(pc)))] |
"" |
" |
{ |
|
operands[1] = mcore_gen_compare_reg (GEU); |
}") |
|
;; ------------------------------------------------------------------------ |
;; Jump and linkage insns |
;; ------------------------------------------------------------------------ |
|
(define_insn "jump_real" |
[(set (pc) |
(label_ref (match_operand 0 "" "")))] |
"" |
"jbr %l0" |
[(set_attr "type" "branch")]) |
|
(define_expand "jump" |
[(set (pc) (label_ref (match_operand 0 "" "")))] |
"" |
" |
{ |
emit_jump_insn (gen_jump_real (operand0)); |
DONE; |
} |
") |
|
(define_insn "indirect_jump" |
[(set (pc) |
(match_operand:SI 0 "mcore_arith_reg_operand" "r"))] |
"" |
"jmp %0" |
[(set_attr "type" "jmp")]) |
|
(define_expand "call" |
[(parallel[(call (match_operand:SI 0 "" "") |
(match_operand 1 "" "")) |
(clobber (reg:SI 15))])] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM |
&& ! register_operand (XEXP (operands[0], 0), SImode) |
&& ! mcore_symbolic_address_p (XEXP (operands[0], 0))) |
operands[0] = gen_rtx_MEM (GET_MODE (operands[0]), |
force_reg (Pmode, XEXP (operands[0], 0))); |
}") |
|
(define_insn "call_internal" |
[(call (mem:SI (match_operand:SI 0 "mcore_call_address_operand" "riR")) |
(match_operand 1 "" "")) |
(clobber (reg:SI 15))] |
"" |
"* return mcore_output_call (operands, 0);") |
|
(define_expand "call_value" |
[(parallel[(set (match_operand 0 "register_operand" "") |
(call (match_operand:SI 1 "" "") |
(match_operand 2 "" ""))) |
(clobber (reg:SI 15))])] |
"" |
" |
{ |
if (GET_CODE (operands[0]) == MEM |
&& ! register_operand (XEXP (operands[0], 0), SImode) |
&& ! mcore_symbolic_address_p (XEXP (operands[0], 0))) |
operands[1] = gen_rtx_MEM (GET_MODE (operands[1]), |
force_reg (Pmode, XEXP (operands[1], 0))); |
}") |
|
(define_insn "call_value_internal" |
[(set (match_operand 0 "register_operand" "=r") |
(call (mem:SI (match_operand:SI 1 "mcore_call_address_operand" "riR")) |
(match_operand 2 "" ""))) |
(clobber (reg:SI 15))] |
"" |
"* return mcore_output_call (operands, 1);") |
|
(define_insn "call_value_struct" |
[(parallel [(set (match_parallel 0 "" |
[(expr_list (match_operand 3 "register_operand" "") (match_operand 4 "immediate_operand" "")) |
(expr_list (match_operand 5 "register_operand" "") (match_operand 6 "immediate_operand" ""))]) |
(call (match_operand:SI 1 "" "") |
(match_operand 2 "" ""))) |
(clobber (reg:SI 15))])] |
"" |
"* return mcore_output_call (operands, 1);" |
) |
|
|
;; ------------------------------------------------------------------------ |
;; Misc insns |
;; ------------------------------------------------------------------------ |
|
(define_insn "nop" |
[(const_int 0)] |
"" |
"or r0,r0") |
|
(define_insn "tablejump" |
[(set (pc) |
(match_operand:SI 0 "mcore_arith_reg_operand" "r")) |
(use (label_ref (match_operand 1 "" "")))] |
"" |
"jmp %0" |
[(set_attr "type" "jmp")]) |
|
(define_insn "*return" |
[(return)] |
"reload_completed && ! mcore_naked_function_p ()" |
"jmp r15" |
[(set_attr "type" "jmp")]) |
|
(define_insn "*no_return" |
[(return)] |
"reload_completed && mcore_naked_function_p ()" |
"" |
[(set_attr "length" "0")] |
) |
|
(define_expand "prologue" |
[(const_int 0)] |
"" |
"mcore_expand_prolog (); DONE;") |
|
(define_expand "epilogue" |
[(return)] |
"" |
"mcore_expand_epilog ();") |
|
;; ------------------------------------------------------------------------ |
;; Scc instructions |
;; ------------------------------------------------------------------------ |
|
(define_insn "mvc" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(ne:SI (reg:CC 17) (const_int 0)))] |
"" |
"mvc %0" |
[(set_attr "type" "move")]) |
|
(define_insn "mvcv" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(eq:SI (reg:CC 17) (const_int 0)))] |
"" |
"mvcv %0" |
[(set_attr "type" "move")]) |
|
; in 0.97 use (LE 0) with (LT 1) and complement c. BRC |
(define_split |
[(parallel[ |
(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(ne:SI (gt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(const_int 0)) |
(const_int 0))) |
(clobber (reg:SI 17))])] |
"" |
[(set (reg:CC 17) |
(lt:CC (match_dup 1) (const_int 1))) |
(set (match_dup 0) (eq:SI (reg:CC 17) (const_int 0)))]) |
|
|
(define_expand "seq" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(eq:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (NE); |
}") |
|
(define_expand "sne" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(ne:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (NE); |
}") |
|
(define_expand "slt" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(ne:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LT); |
}") |
|
; make generating a LT with the comparison reversed easy. BRC |
(define_expand "reverse_slt" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(eq:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LT); |
}") |
|
(define_expand "sge" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(eq:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LT); |
}") |
|
; check whether (GT A imm) can become (LE A imm) with the comparison |
; reversed. if so, emit a (LT A imm + 1) in place of the (LE A imm). BRC |
|
(define_expand "sgt" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(ne:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
if (mcore_modify_comparison (LE)) |
{ |
emit_insn (gen_reverse_slt (operands[0])); |
DONE; |
} |
|
operands[1] = mcore_gen_compare_reg (GT); |
}") |
|
(define_expand "sle" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(eq:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
if (mcore_modify_comparison (LE)) |
{ |
emit_insn (gen_slt (operands[0])); |
DONE; |
} |
operands[1] = mcore_gen_compare_reg (GT); |
}") |
|
(define_expand "sltu" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(eq:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (GEU); |
}") |
|
(define_expand "sgeu" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(ne:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (GEU); |
}") |
|
(define_expand "sgtu" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(eq:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LEU); |
}") |
|
(define_expand "sleu" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(ne:SI (match_dup 1) (const_int 0)))] |
"" |
" |
{ |
operands[1] = mcore_gen_compare_reg (LEU); |
}") |
|
(define_insn "incscc" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(plus:SI (ne (reg:CC 17) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"inct %0") |
|
(define_insn "incscc_false" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(plus:SI (eq (reg:CC 17) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_reg_operand" "0")))] |
"" |
"incf %0") |
|
(define_insn "decscc" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") |
(ne (reg:CC 17) (const_int 0))))] |
"" |
"dect %0") |
|
(define_insn "decscc_false" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") |
(eq (reg:CC 17) (const_int 0))))] |
"" |
"decf %0") |
|
;; ------------------------------------------------------------------------ |
;; Conditional move patterns. |
;; ------------------------------------------------------------------------ |
|
(define_expand "smaxsi3" |
[(set (reg:CC 17) |
(lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" ""))) |
(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(if_then_else:SI (eq (reg:CC 17) (const_int 0)) |
(match_dup 1) (match_dup 2)))] |
"" |
"") |
|
(define_split |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" "")))] |
"" |
[(set (reg:CC 17) |
(lt:SI (match_dup 1) (match_dup 2))) |
(set (match_dup 0) |
(if_then_else:SI (eq (reg:CC 17) (const_int 0)) |
(match_dup 1) (match_dup 2)))] |
"") |
|
; no tstgt in 0.97, so just use cmplti (btsti x,31) and reverse move |
; condition BRC |
(define_split |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(const_int 0)))] |
"" |
[(set (reg:CC 17) |
(lt:CC (match_dup 1) (const_int 0))) |
(set (match_dup 0) |
(if_then_else:SI (eq (reg:CC 17) (const_int 0)) |
(match_dup 1) (const_int 0)))] |
"") |
|
(define_expand "sminsi3" |
[(set (reg:CC 17) |
(lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" ""))) |
(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(if_then_else:SI (ne (reg:CC 17) (const_int 0)) |
(match_dup 1) (match_dup 2)))] |
"" |
"") |
|
(define_split |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" "")))] |
"" |
[(set (reg:CC 17) |
(lt:SI (match_dup 1) (match_dup 2))) |
(set (match_dup 0) |
(if_then_else:SI (ne (reg:CC 17) (const_int 0)) |
(match_dup 1) (match_dup 2)))] |
"") |
|
;(define_split |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
; (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
; (const_int 0)))] |
; "" |
; [(set (reg:CC 17) |
; (gt:CC (match_dup 1) (const_int 0))) |
; (set (match_dup 0) |
; (if_then_else:SI (eq (reg:CC 17) (const_int 0)) |
; (match_dup 1) (const_int 0)))] |
; "") |
|
; changed these unsigned patterns to use geu instead of ltu. it appears |
; that the c-torture & ssrl test suites didn't catch these! only showed |
; up in friedman's clib work. BRC 7/7/95 |
|
(define_expand "umaxsi3" |
[(set (reg:CC 17) |
(geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" ""))) |
(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(if_then_else:SI (eq (reg:CC 17) (const_int 0)) |
(match_dup 2) (match_dup 1)))] |
"" |
"") |
|
(define_split |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(umax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" "")))] |
"" |
[(set (reg:CC 17) |
(geu:SI (match_dup 1) (match_dup 2))) |
(set (match_dup 0) |
(if_then_else:SI (eq (reg:CC 17) (const_int 0)) |
(match_dup 2) (match_dup 1)))] |
"") |
|
(define_expand "uminsi3" |
[(set (reg:CC 17) |
(geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" ""))) |
(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(if_then_else:SI (ne (reg:CC 17) (const_int 0)) |
(match_dup 2) (match_dup 1)))] |
"" |
"") |
|
(define_split |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(umin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "mcore_arith_reg_operand" "")))] |
"" |
[(set (reg:CC 17) |
(geu:SI (match_dup 1) (match_dup 2))) |
(set (match_dup 0) |
(if_then_else:SI (ne (reg:CC 17) (const_int 0)) |
(match_dup 2) (match_dup 1)))] |
"") |
|
;; ------------------------------------------------------------------------ |
;; conditional move patterns really start here |
;; ------------------------------------------------------------------------ |
|
;; the "movtK" patterns are experimental. they are intended to account for |
;; gcc's mucking on code such as: |
;; |
;; free_ent = ((block_compress) ? 257 : 256 ); |
;; |
;; these patterns help to get a tstne/bgeni/inct (or equivalent) sequence |
;; when both arms have constants that are +/- 1 of each other. |
;; |
;; note in the following patterns that the "movtK" ones should be the first |
;; one defined in each sequence. this is because the general pattern also |
;; matches, so use ordering to determine priority (it's easier this way than |
;; adding conditions to the general patterns). BRC |
;; |
;; the U and Q constraints are necessary to ensure that reload does the |
;; 'right thing'. U constrains the operand to 0 and Q to 1 for use in the |
;; clrt & clrf and clrt/inct & clrf/incf patterns. BRC 6/26 |
;; |
;; ??? there appears to be some problems with these movtK patterns for ops |
;; other than eq & ne. need to fix. 6/30 BRC |
|
;; ------------------------------------------------------------------------ |
;; ne |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
|
(define_insn "movtK_1" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(ne (reg:CC 17) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_O_operand" "O") |
(match_operand:SI 2 "mcore_arith_O_operand" "O")))] |
" GET_CODE (operands[1]) == CONST_INT |
&& GET_CODE (operands[2]) == CONST_INT |
&& ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1) |
|| (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" |
"* return mcore_output_cmov (operands, 1, NULL);" |
[(set_attr "length" "4")]) |
|
(define_insn "movt0" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI |
(ne (reg:CC 17) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
movt %0,%1 |
movf %0,%2 |
clrt %0 |
clrf %0") |
|
;; ------------------------------------------------------------------------ |
;; eq |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(eq (reg:CC 17) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_O_operand" "O") |
(match_operand:SI 2 "mcore_arith_O_operand" "O")))] |
" GET_CODE (operands[1]) == CONST_INT |
&& GET_CODE (operands[2]) == CONST_INT |
&& ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1) |
|| (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" |
"* return mcore_output_cmov (operands, 0, NULL);" |
[(set_attr "length" "4")]) |
|
(define_insn "movf0" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI |
(eq (reg:CC 17) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
movf %0,%1 |
movt %0,%2 |
clrf %0 |
clrt %0") |
|
; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole |
; because the instructions are not adjacent (peepholes are related by posn - |
; not by dataflow). BRC |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (eq (zero_extract:SI |
(match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 1) |
(match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K")) |
(const_int 0)) |
(match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
btsti %1,%2\;movf %0,%3 |
btsti %1,%2\;movt %0,%4 |
btsti %1,%2\;clrf %0 |
btsti %1,%2\;clrt %0" |
[(set_attr "length" "4")]) |
|
; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (eq (lshiftrt:SI |
(match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 7)) |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] |
"GET_CODE (operands[1]) == SUBREG && |
GET_MODE (SUBREG_REG (operands[1])) == QImode" |
"@ |
btsti %1,7\;movf %0,%2 |
btsti %1,7\;movt %0,%3 |
btsti %1,7\;clrf %0 |
btsti %1,7\;clrt %0" |
[(set_attr "length" "4")]) |
|
|
;; ------------------------------------------------------------------------ |
;; ne |
;; ------------------------------------------------------------------------ |
|
;; Combine creates this from an andn instruction in a scc sequence. |
;; We must recognize it to get conditional moves generated. |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(ne (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_O_operand" "O") |
(match_operand:SI 3 "mcore_arith_O_operand" "O")))] |
" GET_CODE (operands[2]) == CONST_INT |
&& GET_CODE (operands[3]) == CONST_INT |
&& ( (INTVAL (operands[2]) - INTVAL (operands[3]) == 1) |
|| (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" |
"* |
{ |
rtx out_operands[4]; |
out_operands[0] = operands[0]; |
out_operands[1] = operands[2]; |
out_operands[2] = operands[3]; |
out_operands[3] = operands[1]; |
|
return mcore_output_cmov (out_operands, 1, \"cmpnei %3,0\"); |
|
}" |
[(set_attr "length" "6")]) |
|
(define_insn "movt2" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
cmpnei %1,0\;movt %0,%2 |
cmpnei %1,0\;movf %0,%3 |
cmpnei %1,0\;clrt %0 |
cmpnei %1,0\;clrf %0" |
[(set_attr "length" "4")]) |
|
; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole |
; because the instructions are not adjacent (peepholes are related by posn - |
; not by dataflow). BRC |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (ne (zero_extract:SI |
(match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 1) |
(match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K")) |
(const_int 0)) |
(match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
btsti %1,%2\;movt %0,%3 |
btsti %1,%2\;movf %0,%4 |
btsti %1,%2\;clrt %0 |
btsti %1,%2\;clrf %0" |
[(set_attr "length" "4")]) |
|
; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (ne (lshiftrt:SI |
(match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 7)) |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] |
"GET_CODE (operands[1]) == SUBREG && |
GET_MODE (SUBREG_REG (operands[1])) == QImode" |
"@ |
btsti %1,7\;movt %0,%2 |
btsti %1,7\;movf %0,%3 |
btsti %1,7\;clrt %0 |
btsti %1,7\;clrf %0" |
[(set_attr "length" "4")]) |
|
;; ------------------------------------------------------------------------ |
;; eq/eq |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_4" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_O_operand" "O") |
(match_operand:SI 2 "mcore_arith_O_operand" "O")))] |
"GET_CODE (operands[1]) == CONST_INT && |
GET_CODE (operands[2]) == CONST_INT && |
((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || |
(INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" |
"* return mcore_output_cmov(operands, 1, NULL);" |
[(set_attr "length" "4")]) |
|
(define_insn "movt3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI |
(eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
movt %0,%1 |
movf %0,%2 |
clrt %0 |
clrf %0") |
|
;; ------------------------------------------------------------------------ |
;; eq/ne |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_5" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_O_operand" "O") |
(match_operand:SI 2 "mcore_arith_O_operand" "O")))] |
"GET_CODE (operands[1]) == CONST_INT && |
GET_CODE (operands[2]) == CONST_INT && |
((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || |
(INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" |
"* return mcore_output_cmov (operands, 0, NULL);" |
[(set_attr "length" "4")]) |
|
(define_insn "movf1" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI |
(eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
movf %0,%1 |
movt %0,%2 |
clrf %0 |
clrt %0") |
|
;; ------------------------------------------------------------------------ |
;; eq |
;; ------------------------------------------------------------------------ |
|
;; Combine creates this from an andn instruction in a scc sequence. |
;; We must recognize it to get conditional moves generated. |
|
; experimental conditional move with two constants +/- 1 BRC |
|
(define_insn "movtK_6" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(eq (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_O_operand" "O") |
(match_operand:SI 3 "mcore_arith_O_operand" "O")))] |
"GET_CODE (operands[1]) == CONST_INT && |
GET_CODE (operands[2]) == CONST_INT && |
((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || |
(INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" |
"* |
{ |
rtx out_operands[4]; |
out_operands[0] = operands[0]; |
out_operands[1] = operands[2]; |
out_operands[2] = operands[3]; |
out_operands[3] = operands[1]; |
|
return mcore_output_cmov (out_operands, 0, \"cmpnei %3,0\"); |
}" |
[(set_attr "length" "6")]) |
|
(define_insn "movf3" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
cmpnei %1,0\;movf %0,%2 |
cmpnei %1,0\;movt %0,%3 |
cmpnei %1,0\;clrf %0 |
cmpnei %1,0\;clrt %0" |
[(set_attr "length" "4")]) |
|
;; ------------------------------------------------------------------------ |
;; ne/eq |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_7" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_O_operand" "O") |
(match_operand:SI 2 "mcore_arith_O_operand" "O")))] |
"GET_CODE (operands[1]) == CONST_INT && |
GET_CODE (operands[2]) == CONST_INT && |
((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || |
(INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" |
"* return mcore_output_cmov (operands, 0, NULL);" |
[(set_attr "length" "4")]) |
|
(define_insn "movf4" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI |
(ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
movf %0,%1 |
movt %0,%2 |
clrf %0 |
clrt %0") |
|
;; ------------------------------------------------------------------------ |
;; ne/ne |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_8" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_O_operand" "O") |
(match_operand:SI 2 "mcore_arith_O_operand" "O")))] |
"GET_CODE (operands[1]) == CONST_INT && |
GET_CODE (operands[2]) == CONST_INT && |
((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || |
(INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" |
"* return mcore_output_cmov (operands, 1, NULL);" |
[(set_attr "length" "4")]) |
|
(define_insn "movt4" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI |
(ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) |
(match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
movt %0,%1 |
movf %0,%2 |
clrt %0 |
clrf %0") |
|
;; Also need patterns to recognize lt/ge, since otherwise the compiler will |
;; try to output not/asri/tstne/movf. |
|
;; ------------------------------------------------------------------------ |
;; lt |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_9" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(lt (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_O_operand" "O") |
(match_operand:SI 3 "mcore_arith_O_operand" "O")))] |
"GET_CODE (operands[2]) == CONST_INT && |
GET_CODE (operands[3]) == CONST_INT && |
((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || |
(INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" |
"* |
{ |
rtx out_operands[4]; |
out_operands[0] = operands[0]; |
out_operands[1] = operands[2]; |
out_operands[2] = operands[3]; |
out_operands[3] = operands[1]; |
|
return mcore_output_cmov (out_operands, 1, \"btsti %3,31\"); |
}" |
[(set_attr "length" "6")]) |
|
(define_insn "movt5" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
btsti %1,31\;movt %0,%2 |
btsti %1,31\;movf %0,%3 |
btsti %1,31\;clrt %0 |
btsti %1,31\;clrf %0" |
[(set_attr "length" "4")]) |
|
|
;; ------------------------------------------------------------------------ |
;; ge |
;; ------------------------------------------------------------------------ |
|
; experimental conditional move with two constants +/- 1 BRC |
(define_insn "movtK_10" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(if_then_else:SI |
(ge (match_operand:SI 1 "mcore_arith_reg_operand" "r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_O_operand" "O") |
(match_operand:SI 3 "mcore_arith_O_operand" "O")))] |
"GET_CODE (operands[2]) == CONST_INT && |
GET_CODE (operands[3]) == CONST_INT && |
((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || |
(INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" |
"* |
{ |
rtx out_operands[4]; |
out_operands[0] = operands[0]; |
out_operands[1] = operands[2]; |
out_operands[2] = operands[3]; |
out_operands[3] = operands[1]; |
|
return mcore_output_cmov (out_operands, 0, \"btsti %3,31\"); |
}" |
[(set_attr "length" "6")]) |
|
(define_insn "movf5" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") |
(if_then_else:SI (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") |
(const_int 0)) |
(match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") |
(match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] |
"" |
"@ |
btsti %1,31\;movf %0,%2 |
btsti %1,31\;movt %0,%3 |
btsti %1,31\;clrf %0 |
btsti %1,31\;clrt %0" |
[(set_attr "length" "4")]) |
|
;; ------------------------------------------------------------------------ |
;; Bitfield extract (xtrbN) |
;; ------------------------------------------------------------------------ |
|
; sometimes we're better off using QI/HI mode and letting the machine indep. |
; part expand insv and extv. |
; |
; e.g., sequences like:a [an insertion] |
; |
; ldw r8,(r6) |
; movi r7,0x00ffffff |
; and r8,r7 r7 dead |
; stw r8,(r6) r8 dead |
; |
; become: |
; |
; movi r8,0 |
; stb r8,(r6) r8 dead |
; |
; it looks like always using SI mode is a win except in this type of code |
; (when adjacent bit fields collapse on a byte or halfword boundary). when |
; expanding with SI mode, non-adjacent bit field masks fold, but with QI/HI |
; mode, they do not. one thought is to add some peepholes to cover cases |
; like the above, but this is not a general solution. |
; |
; -mword-bitfields expands/inserts using SI mode. otherwise, do it with |
; the smallest mode possible (using the machine indep. expansions). BRC |
|
;(define_expand "extv" |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
; (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
; (match_operand:SI 2 "const_int_operand" "") |
; (match_operand:SI 3 "const_int_operand" ""))) |
; (clobber (reg:CC 17))] |
; "" |
; " |
;{ |
; if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) % 8 != 0) |
; { |
; if (TARGET_W_FIELD) |
; { |
; rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); |
; rtx rshft = GEN_INT (32 - INTVAL (operands[2])); |
; |
; emit_insn (gen_rtx_SET (SImode, operands[0], operands[1])); |
; emit_insn (gen_rtx_SET (SImode, operands[0], |
; gen_rtx_ASHIFT (SImode, operands[0], lshft))); |
; emit_insn (gen_rtx_SET (SImode, operands[0], |
; gen_rtx_ASHIFTRT (SImode, operands[0], rshft))); |
; DONE; |
; } |
; else |
; FAIL; |
; } |
;}") |
|
(define_expand "extv" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "const_int_operand" "") |
(match_operand:SI 3 "const_int_operand" ""))) |
(clobber (reg:CC 17))] |
"" |
" |
{ |
if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0) |
{ |
/* 8 bit field, aligned properly, use the xtrb[0123]+sext sequence. */ |
/* not DONE, not FAIL, but let the RTL get generated.... */ |
} |
else if (TARGET_W_FIELD) |
{ |
/* Arbitrary placement; note that the tree->rtl generator will make |
something close to this if we return FAIL */ |
rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); |
rtx rshft = GEN_INT (32 - INTVAL (operands[2])); |
rtx tmp1 = gen_reg_rtx (SImode); |
rtx tmp2 = gen_reg_rtx (SImode); |
|
emit_insn (gen_rtx_SET (SImode, tmp1, operands[1])); |
emit_insn (gen_rtx_SET (SImode, tmp2, |
gen_rtx_ASHIFT (SImode, tmp1, lshft))); |
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_ASHIFTRT (SImode, tmp2, rshft))); |
DONE; |
} |
else |
{ |
/* Let the caller choose an alternate sequence. */ |
FAIL; |
} |
}") |
|
(define_expand "extzv" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") |
(match_operand:SI 2 "const_int_operand" "") |
(match_operand:SI 3 "const_int_operand" ""))) |
(clobber (reg:CC 17))] |
"" |
" |
{ |
if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0) |
{ |
/* 8 bit field, aligned properly, use the xtrb[0123] sequence. */ |
/* Let the template generate some RTL.... */ |
} |
else if (CONST_OK_FOR_K ((1 << INTVAL (operands[2])) - 1)) |
{ |
/* A narrow bit-field (<=5 bits) means we can do a shift to put |
it in place and then use an andi to extract it. |
This is as good as a shiftleft/shiftright. */ |
|
rtx shifted; |
rtx mask = GEN_INT ((1 << INTVAL (operands[2])) - 1); |
|
if (INTVAL (operands[3]) == 0) |
{ |
shifted = operands[1]; |
} |
else |
{ |
rtx rshft = GEN_INT (INTVAL (operands[3])); |
shifted = gen_reg_rtx (SImode); |
emit_insn (gen_rtx_SET (SImode, shifted, |
gen_rtx_LSHIFTRT (SImode, operands[1], rshft))); |
} |
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_AND (SImode, shifted, mask))); |
DONE; |
} |
else if (TARGET_W_FIELD) |
{ |
/* Arbitrary pattern; play shift/shift games to get it. |
* this is pretty much what the caller will do if we say FAIL */ |
rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); |
rtx rshft = GEN_INT (32 - INTVAL (operands[2])); |
rtx tmp1 = gen_reg_rtx (SImode); |
rtx tmp2 = gen_reg_rtx (SImode); |
|
emit_insn (gen_rtx_SET (SImode, tmp1, operands[1])); |
emit_insn (gen_rtx_SET (SImode, tmp2, |
gen_rtx_ASHIFT (SImode, tmp1, lshft))); |
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_LSHIFTRT (SImode, tmp2, rshft))); |
DONE; |
} |
else |
{ |
/* Make the compiler figure out some alternative mechanism. */ |
FAIL; |
} |
|
/* Emit the RTL pattern; something will match it later. */ |
}") |
|
(define_expand "insv" |
[(set (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "const_int_operand" "") |
(match_operand:SI 2 "const_int_operand" "")) |
(match_operand:SI 3 "general_operand" "")) |
(clobber (reg:CC 17))] |
"" |
" |
{ |
if (mcore_expand_insv (operands)) |
{ |
DONE; |
} |
else |
{ |
FAIL; |
} |
}") |
|
;; |
;; the xtrb[0123] instructions handily get at 8-bit fields on nice boundaries. |
;; but then, they do force you through r1. |
;; |
;; the combiner will build such patterns for us, so we'll make them available |
;; for its use. |
;; |
;; Note that we have both SIGNED and UNSIGNED versions of these... |
;; |
|
;; |
;; These no longer worry about the clobbering of CC bit; not sure this is |
;; good... |
;; |
;; the SIGNED versions of these |
;; |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") |
(sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))] |
"" |
"@ |
asri %0,24 |
xtrb0 %0,%1\;sextb %0" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") |
(sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))] |
"" |
"xtrb1 %0,%1\;sextb %0" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") |
(sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))] |
"" |
"xtrb2 %0,%1\;sextb %0" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") (const_int 8) (const_int 0)))] |
"" |
"sextb %0" |
[(set_attr "type" "shift")]) |
|
;; the UNSIGNED uses of xtrb[0123] |
;; |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") |
(zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))] |
"" |
"@ |
lsri %0,24 |
xtrb0 %0,%1" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") |
(zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))] |
"" |
"xtrb1 %0,%1" |
[(set_attr "type" "shift")]) |
|
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") |
(zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))] |
"" |
"xtrb2 %0,%1" |
[(set_attr "type" "shift")]) |
|
;; This can be peepholed if it follows a ldb ... |
(define_insn "" |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") |
(zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 0)))] |
"" |
"@ |
zextb %0 |
xtrb3 %0,%1\;zextb %0" |
[(set_attr "type" "shift")]) |
|
|
;; ------------------------------------------------------------------------ |
;; Block move - adapted from m88k.md |
;; ------------------------------------------------------------------------ |
|
(define_expand "movmemsi" |
[(parallel [(set (mem:BLK (match_operand:BLK 0 "" "")) |
(mem:BLK (match_operand:BLK 1 "" ""))) |
(use (match_operand:SI 2 "general_operand" "")) |
(use (match_operand:SI 3 "immediate_operand" ""))])] |
"" |
" |
{ |
if (mcore_expand_block_move (operands)) |
DONE; |
else |
FAIL; |
}") |
|
;; ;;; ??? These patterns are meant to be generated from expand_block_move, |
;; ;;; but they currently are not. |
;; |
;; (define_insn "" |
;; [(set (match_operand:QI 0 "mcore_arith_reg_operand" "=r") |
;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] |
;; "" |
;; "ld.b %0,%1" |
;; [(set_attr "type" "load")]) |
;; |
;; (define_insn "" |
;; [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") |
;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] |
;; "" |
;; "ld.h %0,%1" |
;; [(set_attr "type" "load")]) |
;; |
;; (define_insn "" |
;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] |
;; "" |
;; "ld.w %0,%1" |
;; [(set_attr "type" "load")]) |
;; |
;; (define_insn "" |
;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") |
;; (match_operand:QI 1 "mcore_arith_reg_operand" "r"))] |
;; "" |
;; "st.b %1,%0" |
;; [(set_attr "type" "store")]) |
;; |
;; (define_insn "" |
;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") |
;; (match_operand:HI 1 "mcore_arith_reg_operand" "r"))] |
;; "" |
;; "st.h %1,%0" |
;; [(set_attr "type" "store")]) |
;; |
;; (define_insn "" |
;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") |
;; (match_operand:SI 1 "mcore_arith_reg_operand" "r"))] |
;; "" |
;; "st.w %1,%0" |
;; [(set_attr "type" "store")]) |
|
;; ------------------------------------------------------------------------ |
;; Misc Optimizing quirks |
;; ------------------------------------------------------------------------ |
|
;; pair to catch constructs like: (int *)((p+=4)-4) which happen |
;; in stdarg/varargs traversal. This changes a 3 insn sequence to a 2 |
;; insn sequence. -- RBE 11/30/95 |
(define_insn "" |
[(parallel[ |
(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") |
(match_operand:SI 1 "mcore_arith_reg_operand" "+r")) |
(set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])] |
"GET_CODE(operands[2]) == CONST_INT" |
"#" |
[(set_attr "length" "4")]) |
|
(define_split |
[(parallel[ |
(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "mcore_arith_reg_operand" "")) |
(set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])] |
"GET_CODE(operands[2]) == CONST_INT && |
operands[0] != operands[1]" |
[(set (match_dup 0) (match_dup 1)) |
(set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))]) |
|
|
;;; Peepholes |
|
; note: in the following patterns, use mcore_is_dead() to ensure that the |
; reg we may be trashing really is dead. reload doesn't always mark |
; deaths, so mcore_is_dead() (see mcore.c) scans forward to find its death. BRC |
|
;;; A peephole to convert the 3 instruction sequence generated by reload |
;;; to load a FP-offset address into a 2 instruction sequence. |
;;; ??? This probably never matches anymore. |
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "const_int_operand" "J")) |
(set (match_dup 0) (neg:SI (match_dup 0))) |
(set (match_dup 0) |
(plus:SI (match_dup 0) |
(match_operand:SI 2 "mcore_arith_reg_operand" "r")))] |
"CONST_OK_FOR_J (INTVAL (operands[1]))" |
"error\;mov %0,%2\;subi %0,%1") |
|
;; Moves of inlinable constants are done late, so when a 'not' is generated |
;; it is never combined with the following 'and' to generate an 'andn' b/c |
;; the combiner never sees it. use a peephole to pick up this case (happens |
;; mostly with bitfields) BRC |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(match_operand:SI 1 "const_int_operand" "i")) |
(set (match_operand:SI 2 "mcore_arith_reg_operand" "r") |
(and:SI (match_dup 2) (match_dup 0)))] |
"mcore_const_trick_uses_not (INTVAL (operands[1])) && |
operands[0] != operands[2] && |
mcore_is_dead (insn, operands[0])" |
"* return mcore_output_andn (insn, operands);") |
|
; when setting or clearing just two bits, it's cheapest to use two bseti's |
; or bclri's. only happens when relaxing immediates. BRC |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "const_int_operand" "")) |
(set (match_operand:SI 2 "mcore_arith_reg_operand" "") |
(ior:SI (match_dup 2) (match_dup 0)))] |
"TARGET_HARDLIT && mcore_num_ones (INTVAL (operands[1])) == 2 && |
mcore_is_dead (insn, operands[0])" |
"* return mcore_output_bseti (operands[2], INTVAL (operands[1]));") |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "const_int_operand" "")) |
(set (match_operand:SI 2 "mcore_arith_reg_operand" "") |
(and:SI (match_dup 2) (match_dup 0)))] |
"TARGET_HARDLIT && mcore_num_zeros (INTVAL (operands[1])) == 2 && |
mcore_is_dead (insn, operands[0])" |
"* return mcore_output_bclri (operands[2], INTVAL (operands[1]));") |
|
; change an and with a mask that has a single cleared bit into a bclri. this |
; handles QI and HI mode values using the knowledge that the most significant |
; bits don't matter. |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "const_int_operand" "")) |
(set (match_operand:SI 2 "mcore_arith_reg_operand" "") |
(and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "") |
(match_dup 0)))] |
"GET_CODE (operands[3]) == SUBREG && |
GET_MODE (SUBREG_REG (operands[3])) == QImode && |
mcore_num_zeros (INTVAL (operands[1]) | 0xffffff00) == 1 && |
mcore_is_dead (insn, operands[0])" |
"* |
if (! mcore_is_same_reg (operands[2], operands[3])) |
output_asm_insn (\"mov\\t%2,%3\", operands); |
return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffffff00);") |
|
/* Do not fold these together -- mode is lost at final output phase. */ |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "const_int_operand" "")) |
(set (match_operand:SI 2 "mcore_arith_reg_operand" "") |
(and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "") |
(match_dup 0)))] |
"GET_CODE (operands[3]) == SUBREG && |
GET_MODE (SUBREG_REG (operands[3])) == HImode && |
mcore_num_zeros (INTVAL (operands[1]) | 0xffff0000) == 1 && |
operands[2] == operands[3] && |
mcore_is_dead (insn, operands[0])" |
"* |
if (! mcore_is_same_reg (operands[2], operands[3])) |
output_asm_insn (\"mov\\t%2,%3\", operands); |
return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffff0000);") |
|
; This peephole helps when using -mwide-bitfields to widen fields so they |
; collapse. This, however, has the effect that a narrower mode is not used |
; when desirable. |
; |
; e.g., sequences like: |
; |
; ldw r8,(r6) |
; movi r7,0x00ffffff |
; and r8,r7 r7 dead |
; stw r8,(r6) r8 dead |
; |
; get peepholed to become: |
; |
; movi r8,0 |
; stb r8,(r6) r8 dead |
; |
; Do only easy addresses that have no offset. This peephole is also applied |
; to halfwords. We need to check that the load is non-volatile before we get |
; rid of it. |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "memory_operand" "")) |
(set (match_operand:SI 2 "mcore_arith_reg_operand" "") |
(match_operand:SI 3 "const_int_operand" "")) |
(set (match_dup 0) (and:SI (match_dup 0) (match_dup 2))) |
(set (match_operand:SI 4 "memory_operand" "") (match_dup 0))] |
"mcore_is_dead (insn, operands[0]) && |
! MEM_VOLATILE_P (operands[1]) && |
mcore_is_dead (insn, operands[2]) && |
(mcore_byte_offset (INTVAL (operands[3])) > -1 || |
mcore_halfword_offset (INTVAL (operands[3])) > -1) && |
! MEM_VOLATILE_P (operands[4]) && |
GET_CODE (XEXP (operands[4], 0)) == REG" |
"* |
{ |
int ofs; |
enum machine_mode mode; |
rtx base_reg = XEXP (operands[4], 0); |
|
if ((ofs = mcore_byte_offset (INTVAL (operands[3]))) > -1) |
mode = QImode; |
else if ((ofs = mcore_halfword_offset (INTVAL (operands[3]))) > -1) |
mode = HImode; |
else |
gcc_unreachable (); |
|
if (ofs > 0) |
operands[4] = gen_rtx_MEM (mode, |
gen_rtx_PLUS (SImode, base_reg, GEN_INT(ofs))); |
else |
operands[4] = gen_rtx_MEM (mode, base_reg); |
|
if (mode == QImode) |
return \"movi %0,0\\n\\tst.b %0,%4\"; |
|
return \"movi %0,0\\n\\tst.h %0,%4\"; |
}") |
|
; from sop11. get btsti's for (LT A 0) where A is a QI or HI value |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0"))) |
(set (reg:CC 17) |
(lt:CC (match_dup 0) |
(const_int 0)))] |
"mcore_is_dead (insn, operands[0])" |
"btsti %0,7") |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
(sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0"))) |
(set (reg:CC 17) |
(lt:CC (match_dup 0) |
(const_int 0)))] |
"mcore_is_dead (insn, operands[0])" |
"btsti %0,15") |
|
; Pick up a tst. This combination happens because the immediate is not |
; allowed to fold into one of the operands of the tst. Does not happen |
; when relaxing immediates. BRC |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(match_operand:SI 1 "mcore_arith_reg_operand" "")) |
(set (match_dup 0) |
(and:SI (match_dup 0) |
(match_operand:SI 2 "mcore_literal_K_operand" ""))) |
(set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))] |
"mcore_is_dead (insn, operands[0])" |
"movi %0,%2\;tst %1,%0") |
|
(define_peephole |
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
(if_then_else:SI (ne (zero_extract:SI |
(match_operand:SI 1 "mcore_arith_reg_operand" "") |
(const_int 1) |
(match_operand:SI 2 "mcore_literal_K_operand" "")) |
(const_int 0)) |
(match_operand:SI 3 "mcore_arith_imm_operand" "") |
(match_operand:SI 4 "mcore_arith_imm_operand" ""))) |
(set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))] |
"" |
"* |
{ |
unsigned int op0 = REGNO (operands[0]); |
|
if (GET_CODE (operands[3]) == REG) |
{ |
if (REGNO (operands[3]) == op0 && GET_CODE (operands[4]) == CONST_INT |
&& INTVAL (operands[4]) == 0) |
return \"btsti %1,%2\\n\\tclrf %0\"; |
else if (GET_CODE (operands[4]) == REG) |
{ |
if (REGNO (operands[4]) == op0) |
return \"btsti %1,%2\\n\\tmovf %0,%3\"; |
else if (REGNO (operands[3]) == op0) |
return \"btsti %1,%2\\n\\tmovt %0,%4\"; |
} |
|
gcc_unreachable (); |
} |
else if (GET_CODE (operands[3]) == CONST_INT |
&& INTVAL (operands[3]) == 0 |
&& GET_CODE (operands[4]) == REG) |
return \"btsti %1,%2\\n\\tclrt %0\"; |
|
gcc_unreachable (); |
}") |
|
; experimental - do the constant folding ourselves. note that this isn't |
; re-applied like we'd really want. i.e., four ands collapse into two |
; instead of one. this is because peepholes are applied as a sliding |
; window. the peephole does not generate new rtl's, but instead slides |
; across the rtl's generating machine instructions. it would be nice |
; if the peephole optimizer is changed to re-apply patterns and to gen |
; new rtl's. this is more flexible. the pattern below helps when we're |
; not using relaxed immediates. BRC |
|
;(define_peephole |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
; (match_operand:SI 1 "const_int_operand" "")) |
; (set (match_operand:SI 2 "mcore_arith_reg_operand" "") |
; (and:SI (match_dup 2) (match_dup 0))) |
; (set (match_dup 0) |
; (match_operand:SI 3 "const_int_operand" "")) |
; (set (match_dup 2) |
; (and:SI (match_dup 2) (match_dup 0)))] |
; "!TARGET_RELAX_IMM && mcore_is_dead (insn, operands[0]) && |
; mcore_const_ok_for_inline (INTVAL (operands[1]) & INTVAL (operands[3]))" |
; "* |
;{ |
; rtx out_operands[2]; |
; out_operands[0] = operands[0]; |
; out_operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[3])); |
; |
; output_inline_const (SImode, out_operands); |
; |
; output_asm_insn (\"and %2,%0\", operands); |
; |
; return \"\"; |
;}") |
|
; BRC: for inlining get rid of extra test - experimental |
;(define_peephole |
; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") |
; (ne:SI (reg:CC 17) (const_int 0))) |
; (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0))) |
; (set (pc) |
; (if_then_else (eq (reg:CC 17) (const_int 0)) |
; (label_ref (match_operand 1 "" "")) |
; (pc)))] |
; "" |
; "* |
;{ |
; if (get_attr_length (insn) == 10) |
; { |
; output_asm_insn (\"bt 2f\\n\\tjmpi [1f]\", operands); |
; output_asm_insn (\".align 2\\n1:\", operands); |
; output_asm_insn (\".long %1\\n2:\", operands); |
; return \"\"; |
; } |
; return \"bf %l1\"; |
;}") |
|
|
;;; Special patterns for dealing with the constant pool. |
|
;;; 4 byte integer in line. |
|
(define_insn "consttable_4" |
[(unspec_volatile [(match_operand:SI 0 "general_operand" "=g")] 0)] |
"" |
"* |
{ |
assemble_integer (operands[0], 4, BITS_PER_WORD, 1); |
return \"\"; |
}" |
[(set_attr "length" "4")]) |
|
;;; align to a four byte boundary. |
|
(define_insn "align_4" |
[(unspec_volatile [(const_int 0)] 1)] |
"" |
".align 2") |
|
;;; Handle extra constant pool entries created during final pass. |
|
(define_insn "consttable_end" |
[(unspec_volatile [(const_int 0)] 2)] |
"" |
"* return mcore_output_jump_label_table ();") |
|
;; |
;; Stack allocation -- in particular, for alloca(). |
;; this is *not* what we use for entry into functions. |
;; |
;; This is how we allocate stack space. If we are allocating a |
;; constant amount of space and we know it is less than 4096 |
;; bytes, we need do nothing. |
;; |
;; If it is more than 4096 bytes, we need to probe the stack |
;; periodically. |
;; |
;; operands[1], the distance is a POSITIVE number indicating that we |
;; are allocating stack space |
;; |
(define_expand "allocate_stack" |
[(set (reg:SI 0) |
(plus:SI (reg:SI 0) |
(match_operand:SI 1 "general_operand" ""))) |
(set (match_operand:SI 0 "register_operand" "=r") |
(match_dup 2))] |
"" |
" |
{ |
/* If he wants no probing, just do it for him. */ |
if (mcore_stack_increment == 0) |
{ |
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,operands[1])); |
;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); |
DONE; |
} |
|
/* For small constant growth, we unroll the code. */ |
if (GET_CODE (operands[1]) == CONST_INT |
&& INTVAL (operands[1]) < 8 * STACK_UNITS_MAXSTEP) |
{ |
int left = INTVAL(operands[1]); |
|
/* If it's a long way, get close enough for a last shot. */ |
if (left >= STACK_UNITS_MAXSTEP) |
{ |
rtx tmp = gen_reg_rtx (Pmode); |
emit_insn (gen_movsi (tmp, GEN_INT (STACK_UNITS_MAXSTEP))); |
do |
{ |
rtx memref = gen_rtx_MEM (SImode, stack_pointer_rtx); |
|
MEM_VOLATILE_P (memref) = 1; |
emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); |
emit_insn (gen_movsi (memref, stack_pointer_rtx)); |
left -= STACK_UNITS_MAXSTEP; |
} |
while (left > STACK_UNITS_MAXSTEP); |
} |
/* Perform the final adjustment. */ |
emit_insn (gen_addsi3 (stack_pointer_rtx,stack_pointer_rtx,GEN_INT(-left))); |
;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); |
DONE; |
} |
else |
{ |
rtx out_label = 0; |
rtx loop_label = gen_label_rtx (); |
rtx step = gen_reg_rtx (Pmode); |
rtx tmp = gen_reg_rtx (Pmode); |
rtx memref; |
|
#if 1 |
emit_insn (gen_movsi (tmp, operands[1])); |
emit_insn (gen_movsi (step, GEN_INT(STACK_UNITS_MAXSTEP))); |
|
if (GET_CODE (operands[1]) != CONST_INT) |
{ |
out_label = gen_label_rtx (); |
emit_insn (gen_cmpsi (step, tmp)); /* quick out */ |
emit_jump_insn (gen_bgeu (out_label)); |
} |
|
/* Run a loop that steps it incrementally. */ |
emit_label (loop_label); |
|
/* Extend a step, probe, and adjust remaining count. */ |
emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, step)); |
memref = gen_rtx_MEM (SImode, stack_pointer_rtx); |
MEM_VOLATILE_P (memref) = 1; |
emit_insn(gen_movsi(memref, stack_pointer_rtx)); |
emit_insn(gen_subsi3(tmp, tmp, step)); |
|
/* Loop condition -- going back up. */ |
emit_insn (gen_cmpsi (step, tmp)); |
emit_jump_insn (gen_bltu (loop_label)); |
|
if (out_label) |
emit_label (out_label); |
|
/* Bump the residual. */ |
emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); |
;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); |
DONE; |
#else |
/* simple one-shot -- ensure register and do a subtract. |
* This does NOT comply with the ABI. */ |
emit_insn (gen_movsi (tmp, operands[1])); |
emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); |
;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); |
DONE; |
#endif |
} |
}") |
/crtn.asm
0,0 → 1,57
# crtn.asm for ELF based systems |
|
# Copyright (C) 1992, 1999, 2000 Free Software Foundation, Inc. |
# Written By David Vinayak Henkel-Wallace, June 1992 |
# |
# This file is free software; you can redistribute it and/or modify it |
# under the terms of the GNU General Public License as published by the |
# Free Software Foundation; either version 2, or (at your option) any |
# later version. |
# |
# In addition to the permissions in the GNU General Public License, the |
# Free Software Foundation gives you unlimited permission to link the |
# compiled version of this file with other programs, and to distribute |
# those programs without any restriction coming from the use of this |
# file. (The General Public License restrictions do apply in other |
# respects; for example, they cover modification of the file, and |
# distribution when not linked into another program.) |
# |
# This file is distributed in the hope that it will be useful, but |
# WITHOUT ANY WARRANTY; without even the implied warranty of |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
# General Public License for more details. |
# |
# You should have received a copy of the GNU General Public License |
# along with this program; see the file COPYING. If not, write to |
# the Free Software Foundation, 51 Franklin Street, Fifth Floor, |
# Boston, MA 02110-1301, USA. |
# |
# As a special exception, if you link this library with files |
# compiled with GCC to produce an executable, this does not cause |
# the resulting executable to be covered by the GNU General Public License. |
# This exception does not however invalidate any other reasons why |
# the executable file might be covered by the GNU General Public License. |
# |
|
# This file just makes sure that the .fini and .init sections do in |
# fact return. Users may put any desired instructions in those sections. |
# This file is the last thing linked into any executable. |
|
.file "crtn.asm" |
|
.section ".init" |
.align 4 |
|
ldw r15,(r0, 12) |
addi r0,16 |
jmp r15 |
|
.section ".fini" |
.align 4 |
|
ldw r15, (r0, 12) |
addi r0,16 |
jmp r15 |
|
# Th-th-th-that is all folks! |
|
/lib1.asm
0,0 → 1,308
/* libgcc routines for the MCore. |
Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc. |
|
This file is part of GCC. |
|
GCC is free software; you can redistribute it and/or modify it |
under the terms of the GNU General Public License as published by the |
Free Software Foundation; either version 2, or (at your option) any |
later version. |
|
In addition to the permissions in the GNU General Public License, the |
Free Software Foundation gives you unlimited permission to link the |
compiled version of this file into combinations with other programs, |
and to distribute those combinations without any restriction coming |
from the use of this file. (The General Public License restrictions |
do apply in other respects; for example, they cover modification of |
the file, and distribution when not linked into a combine |
executable.) |
|
This file is distributed in the hope that it will be useful, but |
WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
General Public License for more details. |
|
You should have received a copy of the GNU General Public License |
along with this program; see the file COPYING. If not, write to |
the Free Software Foundation, 51 Franklin Street, Fifth Floor, |
Boston, MA 02110-1301, USA. */ |
|
#define CONCAT1(a, b) CONCAT2(a, b) |
#define CONCAT2(a, b) a ## b |
|
/* Use the right prefix for global labels. */ |
|
#define SYM(x) CONCAT1 (__, x) |
|
#ifdef __ELF__ |
#define TYPE(x) .type SYM (x),@function |
#define SIZE(x) .size SYM (x), . - SYM (x) |
#else |
#define TYPE(x) |
#define SIZE(x) |
#endif |
|
.macro FUNC_START name |
.text |
.globl SYM (\name) |
TYPE (\name) |
SYM (\name): |
.endm |
|
.macro FUNC_END name |
SIZE (\name) |
.endm |
|
#ifdef L_udivsi3 |
FUNC_START udiv32 |
FUNC_START udivsi32 |
|
movi r1,0 // r1-r2 form 64 bit dividend |
movi r4,1 // r4 is quotient (1 for a sentinel) |
|
cmpnei r3,0 // look for 0 divisor |
bt 9f |
trap 3 // divide by 0 |
9: |
// control iterations; skip across high order 0 bits in dividend |
mov r7,r2 |
cmpnei r7,0 |
bt 8f |
movi r2,0 // 0 dividend |
jmp r15 // quick return |
8: |
ff1 r7 // figure distance to skip |
lsl r4,r7 // move the sentinel along (with 0's behind) |
lsl r2,r7 // and the low 32 bits of numerator |
|
// appears to be wrong... |
// tested out incorrectly in our OS work... |
// mov r7,r3 // looking at divisor |
// ff1 r7 // I can move 32-r7 more bits to left. |
// addi r7,1 // ok, one short of that... |
// mov r1,r2 |
// lsr r1,r7 // bits that came from low order... |
// rsubi r7,31 // r7 == "32-n" == LEFT distance |
// addi r7,1 // this is (32-n) |
// lsl r4,r7 // fixes the high 32 (quotient) |
// lsl r2,r7 |
// cmpnei r4,0 |
// bf 4f // the sentinel went away... |
|
// run the remaining bits |
|
1: lslc r2,1 // 1 bit left shift of r1-r2 |
addc r1,r1 |
cmphs r1,r3 // upper 32 of dividend >= divisor? |
bf 2f |
sub r1,r3 // if yes, subtract divisor |
2: addc r4,r4 // shift by 1 and count subtracts |
bf 1b // if sentinel falls out of quotient, stop |
|
4: mov r2,r4 // return quotient |
mov r3,r1 // and piggyback the remainder |
jmp r15 |
FUNC_END udiv32 |
FUNC_END udivsi32 |
#endif |
|
#ifdef L_umodsi3 |
FUNC_START urem32 |
FUNC_START umodsi3 |
movi r1,0 // r1-r2 form 64 bit dividend |
movi r4,1 // r4 is quotient (1 for a sentinel) |
cmpnei r3,0 // look for 0 divisor |
bt 9f |
trap 3 // divide by 0 |
9: |
// control iterations; skip across high order 0 bits in dividend |
mov r7,r2 |
cmpnei r7,0 |
bt 8f |
movi r2,0 // 0 dividend |
jmp r15 // quick return |
8: |
ff1 r7 // figure distance to skip |
lsl r4,r7 // move the sentinel along (with 0's behind) |
lsl r2,r7 // and the low 32 bits of numerator |
|
1: lslc r2,1 // 1 bit left shift of r1-r2 |
addc r1,r1 |
cmphs r1,r3 // upper 32 of dividend >= divisor? |
bf 2f |
sub r1,r3 // if yes, subtract divisor |
2: addc r4,r4 // shift by 1 and count subtracts |
bf 1b // if sentinel falls out of quotient, stop |
mov r2,r1 // return remainder |
jmp r15 |
FUNC_END urem32 |
FUNC_END umodsi3 |
#endif |
|
#ifdef L_divsi3 |
FUNC_START div32 |
FUNC_START divsi3 |
mov r5,r2 // calc sign of quotient |
xor r5,r3 |
abs r2 // do unsigned divide |
abs r3 |
movi r1,0 // r1-r2 form 64 bit dividend |
movi r4,1 // r4 is quotient (1 for a sentinel) |
cmpnei r3,0 // look for 0 divisor |
bt 9f |
trap 3 // divide by 0 |
9: |
// control iterations; skip across high order 0 bits in dividend |
mov r7,r2 |
cmpnei r7,0 |
bt 8f |
movi r2,0 // 0 dividend |
jmp r15 // quick return |
8: |
ff1 r7 // figure distance to skip |
lsl r4,r7 // move the sentinel along (with 0's behind) |
lsl r2,r7 // and the low 32 bits of numerator |
|
// tested out incorrectly in our OS work... |
// mov r7,r3 // looking at divisor |
// ff1 r7 // I can move 32-r7 more bits to left. |
// addi r7,1 // ok, one short of that... |
// mov r1,r2 |
// lsr r1,r7 // bits that came from low order... |
// rsubi r7,31 // r7 == "32-n" == LEFT distance |
// addi r7,1 // this is (32-n) |
// lsl r4,r7 // fixes the high 32 (quotient) |
// lsl r2,r7 |
// cmpnei r4,0 |
// bf 4f // the sentinel went away... |
|
// run the remaining bits |
1: lslc r2,1 // 1 bit left shift of r1-r2 |
addc r1,r1 |
cmphs r1,r3 // upper 32 of dividend >= divisor? |
bf 2f |
sub r1,r3 // if yes, subtract divisor |
2: addc r4,r4 // shift by 1 and count subtracts |
bf 1b // if sentinel falls out of quotient, stop |
|
4: mov r2,r4 // return quotient |
mov r3,r1 // piggyback the remainder |
btsti r5,31 // after adjusting for sign |
bf 3f |
rsubi r2,0 |
rsubi r3,0 |
3: jmp r15 |
FUNC_END div32 |
FUNC_END divsi3 |
#endif |
|
#ifdef L_modsi3 |
FUNC_START rem32 |
FUNC_START modsi3 |
mov r5,r2 // calc sign of remainder |
abs r2 // do unsigned divide |
abs r3 |
movi r1,0 // r1-r2 form 64 bit dividend |
movi r4,1 // r4 is quotient (1 for a sentinel) |
cmpnei r3,0 // look for 0 divisor |
bt 9f |
trap 3 // divide by 0 |
9: |
// control iterations; skip across high order 0 bits in dividend |
mov r7,r2 |
cmpnei r7,0 |
bt 8f |
movi r2,0 // 0 dividend |
jmp r15 // quick return |
8: |
ff1 r7 // figure distance to skip |
lsl r4,r7 // move the sentinel along (with 0's behind) |
lsl r2,r7 // and the low 32 bits of numerator |
|
1: lslc r2,1 // 1 bit left shift of r1-r2 |
addc r1,r1 |
cmphs r1,r3 // upper 32 of dividend >= divisor? |
bf 2f |
sub r1,r3 // if yes, subtract divisor |
2: addc r4,r4 // shift by 1 and count subtracts |
bf 1b // if sentinel falls out of quotient, stop |
mov r2,r1 // return remainder |
btsti r5,31 // after adjusting for sign |
bf 3f |
rsubi r2,0 |
3: jmp r15 |
FUNC_END rem32 |
FUNC_END modsi3 |
#endif |
|
|
/* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2} |
will behave as __cmpdf2. So, we stub the implementations to |
jump on to __cmpdf2 and __cmpsf2. |
|
All of these shortcircuit the return path so that __cmp{sd}f2 |
will go directly back to the caller. */ |
|
.macro COMPARE_DF_JUMP name |
.import SYM (cmpdf2) |
FUNC_START \name |
jmpi SYM (cmpdf2) |
FUNC_END \name |
.endm |
|
#ifdef L_eqdf2 |
COMPARE_DF_JUMP eqdf2 |
#endif /* L_eqdf2 */ |
|
#ifdef L_nedf2 |
COMPARE_DF_JUMP nedf2 |
#endif /* L_nedf2 */ |
|
#ifdef L_gtdf2 |
COMPARE_DF_JUMP gtdf2 |
#endif /* L_gtdf2 */ |
|
#ifdef L_gedf2 |
COMPARE_DF_JUMP gedf2 |
#endif /* L_gedf2 */ |
|
#ifdef L_ltdf2 |
COMPARE_DF_JUMP ltdf2 |
#endif /* L_ltdf2 */ |
|
#ifdef L_ledf2 |
COMPARE_DF_JUMP ledf2 |
#endif /* L_ledf2 */ |
|
/* SINGLE PRECISION FLOATING POINT STUBS */ |
|
.macro COMPARE_SF_JUMP name |
.import SYM (cmpsf2) |
FUNC_START \name |
jmpi SYM (cmpsf2) |
FUNC_END \name |
.endm |
|
#ifdef L_eqsf2 |
COMPARE_SF_JUMP eqsf2 |
#endif /* L_eqsf2 */ |
|
#ifdef L_nesf2 |
COMPARE_SF_JUMP nesf2 |
#endif /* L_nesf2 */ |
|
#ifdef L_gtsf2 |
COMPARE_SF_JUMP gtsf2 |
#endif /* L_gtsf2 */ |
|
#ifdef L_gesf2 |
COMPARE_SF_JUMP __gesf2 |
#endif /* L_gesf2 */ |
|
#ifdef L_ltsf2 |
COMPARE_SF_JUMP __ltsf2 |
#endif /* L_ltsf2 */ |
|
#ifdef L_lesf2 |
COMPARE_SF_JUMP lesf2 |
#endif /* L_lesf2 */ |
/mcore.c
0,0 → 1,3108
/* Output routines for Motorola MCore processor |
Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 |
Free Software Foundation, Inc. |
|
This file is part of GCC. |
|
GCC is free software; you can redistribute it and/or modify it |
under the terms of the GNU General Public License as published |
by the Free Software Foundation; either version 3, or (at your |
option) any later version. |
|
GCC is distributed in the hope that it will be useful, but WITHOUT |
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public |
License for more details. |
|
You should have received a copy of the GNU General Public License |
along with GCC; see the file COPYING3. If not see |
<http://www.gnu.org/licenses/>. */ |
|
#include "config.h" |
#include "system.h" |
#include "coretypes.h" |
#include "tm.h" |
#include "rtl.h" |
#include "tree.h" |
#include "tm_p.h" |
#include "assert.h" |
#include "mcore.h" |
#include "regs.h" |
#include "hard-reg-set.h" |
#include "real.h" |
#include "insn-config.h" |
#include "conditions.h" |
#include "output.h" |
#include "insn-attr.h" |
#include "flags.h" |
#include "obstack.h" |
#include "expr.h" |
#include "reload.h" |
#include "recog.h" |
#include "function.h" |
#include "ggc.h" |
#include "toplev.h" |
#include "target.h" |
#include "target-def.h" |
|
/* Maximum size we are allowed to grow the stack in a single operation. |
If we want more, we must do it in increments of at most this size. |
If this value is 0, we don't check at all. */ |
int mcore_stack_increment = STACK_UNITS_MAXSTEP; |
|
/* For dumping information about frame sizes. */ |
char * mcore_current_function_name = 0; |
long mcore_current_compilation_timestamp = 0; |
|
/* Global variables for machine-dependent things. */ |
|
/* Saved operands from the last compare to use when we generate an scc |
or bcc insn. */ |
rtx arch_compare_op0; |
rtx arch_compare_op1; |
|
/* Provides the class number of the smallest class containing |
reg number. */ |
const int regno_reg_class[FIRST_PSEUDO_REGISTER] = |
{ |
GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS, |
LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS, |
LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS, |
LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS, |
GENERAL_REGS, C_REGS, NO_REGS, NO_REGS, |
}; |
|
/* Provide reg_class from a letter such as appears in the machine |
description. */ |
const enum reg_class reg_class_from_letter[] = |
{ |
/* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS, |
/* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS, |
/* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS, |
/* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS, |
/* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS, |
/* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS, |
/* y */ NO_REGS, /* z */ NO_REGS |
}; |
|
struct mcore_frame |
{ |
int arg_size; /* Stdarg spills (bytes). */ |
int reg_size; /* Non-volatile reg saves (bytes). */ |
int reg_mask; /* Non-volatile reg saves. */ |
int local_size; /* Locals. */ |
int outbound_size; /* Arg overflow on calls out. */ |
int pad_outbound; |
int pad_local; |
int pad_reg; |
/* Describe the steps we'll use to grow it. */ |
#define MAX_STACK_GROWS 4 /* Gives us some spare space. */ |
int growth[MAX_STACK_GROWS]; |
int arg_offset; |
int reg_offset; |
int reg_growth; |
int local_growth; |
}; |
|
typedef enum |
{ |
COND_NO, |
COND_MOV_INSN, |
COND_CLR_INSN, |
COND_INC_INSN, |
COND_DEC_INSN, |
COND_BRANCH_INSN |
} |
cond_type; |
|
static void output_stack_adjust (int, int); |
static int calc_live_regs (int *); |
static int try_constant_tricks (long, int *, int *); |
static const char * output_inline_const (enum machine_mode, rtx *); |
static void layout_mcore_frame (struct mcore_frame *); |
static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int); |
static cond_type is_cond_candidate (rtx); |
static rtx emit_new_cond_insn (rtx, int); |
static rtx conditionalize_block (rtx); |
static void conditionalize_optimization (void); |
static void mcore_reorg (void); |
static rtx handle_structs_in_regs (enum machine_mode, tree, int); |
static void mcore_mark_dllexport (tree); |
static void mcore_mark_dllimport (tree); |
static int mcore_dllexport_p (tree); |
static int mcore_dllimport_p (tree); |
const struct attribute_spec mcore_attribute_table[]; |
static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *); |
#ifdef OBJECT_FORMAT_ELF |
static void mcore_asm_named_section (const char *, |
unsigned int, tree); |
#endif |
static void mcore_unique_section (tree, int); |
static void mcore_encode_section_info (tree, rtx, int); |
static const char *mcore_strip_name_encoding (const char *); |
static int mcore_const_costs (rtx, RTX_CODE); |
static int mcore_and_cost (rtx); |
static int mcore_ior_cost (rtx); |
static bool mcore_rtx_costs (rtx, int, int, int *); |
static void mcore_external_libcall (rtx); |
static bool mcore_return_in_memory (tree, tree); |
static int mcore_arg_partial_bytes (CUMULATIVE_ARGS *, |
enum machine_mode, |
tree, bool); |
|
|
/* Initialize the GCC target structure. */ |
#undef TARGET_ASM_EXTERNAL_LIBCALL |
#define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall |
|
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES |
#undef TARGET_MERGE_DECL_ATTRIBUTES |
#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes |
#endif |
|
#ifdef OBJECT_FORMAT_ELF |
#undef TARGET_ASM_UNALIGNED_HI_OP |
#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t" |
#undef TARGET_ASM_UNALIGNED_SI_OP |
#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t" |
#endif |
|
#undef TARGET_ATTRIBUTE_TABLE |
#define TARGET_ATTRIBUTE_TABLE mcore_attribute_table |
#undef TARGET_ASM_UNIQUE_SECTION |
#define TARGET_ASM_UNIQUE_SECTION mcore_unique_section |
#undef TARGET_ASM_FUNCTION_RODATA_SECTION |
#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section |
#undef TARGET_DEFAULT_TARGET_FLAGS |
#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT |
#undef TARGET_ENCODE_SECTION_INFO |
#define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info |
#undef TARGET_STRIP_NAME_ENCODING |
#define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding |
#undef TARGET_RTX_COSTS |
#define TARGET_RTX_COSTS mcore_rtx_costs |
#undef TARGET_ADDRESS_COST |
#define TARGET_ADDRESS_COST hook_int_rtx_0 |
#undef TARGET_MACHINE_DEPENDENT_REORG |
#define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg |
|
#undef TARGET_PROMOTE_FUNCTION_ARGS |
#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true |
#undef TARGET_PROMOTE_FUNCTION_RETURN |
#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true |
#undef TARGET_PROMOTE_PROTOTYPES |
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true |
|
#undef TARGET_RETURN_IN_MEMORY |
#define TARGET_RETURN_IN_MEMORY mcore_return_in_memory |
#undef TARGET_MUST_PASS_IN_STACK |
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size |
#undef TARGET_PASS_BY_REFERENCE |
#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack |
#undef TARGET_ARG_PARTIAL_BYTES |
#define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes |
|
#undef TARGET_SETUP_INCOMING_VARARGS |
#define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs |
|
struct gcc_target targetm = TARGET_INITIALIZER; |
|
/* Adjust the stack and return the number of bytes taken to do it. */ |
static void |
output_stack_adjust (int direction, int size) |
{ |
/* If extending stack a lot, we do it incrementally. */ |
if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0) |
{ |
rtx tmp = gen_rtx_REG (SImode, 1); |
rtx memref; |
|
emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment))); |
do |
{ |
emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); |
memref = gen_rtx_MEM (SImode, stack_pointer_rtx); |
MEM_VOLATILE_P (memref) = 1; |
emit_insn (gen_movsi (memref, stack_pointer_rtx)); |
size -= mcore_stack_increment; |
} |
while (size > mcore_stack_increment); |
|
/* SIZE is now the residual for the last adjustment, |
which doesn't require a probe. */ |
} |
|
if (size) |
{ |
rtx insn; |
rtx val = GEN_INT (size); |
|
if (size > 32) |
{ |
rtx nval = gen_rtx_REG (SImode, 1); |
emit_insn (gen_movsi (nval, val)); |
val = nval; |
} |
|
if (direction > 0) |
insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val); |
else |
insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val); |
|
emit_insn (insn); |
} |
} |
|
/* Work out the registers which need to be saved, |
both as a mask and a count. */ |
|
static int |
calc_live_regs (int * count) |
{ |
int reg; |
int live_regs_mask = 0; |
|
* count = 0; |
|
for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++) |
{ |
if (regs_ever_live[reg] && !call_used_regs[reg]) |
{ |
(*count)++; |
live_regs_mask |= (1 << reg); |
} |
} |
|
return live_regs_mask; |
} |
|
/* Print the operand address in x to the stream. */ |
|
void |
mcore_print_operand_address (FILE * stream, rtx x) |
{ |
switch (GET_CODE (x)) |
{ |
case REG: |
fprintf (stream, "(%s)", reg_names[REGNO (x)]); |
break; |
|
case PLUS: |
{ |
rtx base = XEXP (x, 0); |
rtx index = XEXP (x, 1); |
|
if (GET_CODE (base) != REG) |
{ |
/* Ensure that BASE is a register (one of them must be). */ |
rtx temp = base; |
base = index; |
index = temp; |
} |
|
switch (GET_CODE (index)) |
{ |
case CONST_INT: |
fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")", |
reg_names[REGNO(base)], INTVAL (index)); |
break; |
|
default: |
gcc_unreachable (); |
} |
} |
|
break; |
|
default: |
output_addr_const (stream, x); |
break; |
} |
} |
|
/* Print operand x (an rtx) in assembler syntax to file stream |
according to modifier code. |
|
'R' print the next register or memory location along, i.e. the lsw in |
a double word value |
'O' print a constant without the # |
'M' print a constant as its negative |
'P' print log2 of a power of two |
'Q' print log2 of an inverse of a power of two |
'U' print register for ldm/stm instruction |
'X' print byte number for xtrbN instruction. */ |
|
void |
mcore_print_operand (FILE * stream, rtx x, int code) |
{ |
switch (code) |
{ |
case 'N': |
if (INTVAL(x) == -1) |
fprintf (asm_out_file, "32"); |
else |
fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1)); |
break; |
case 'P': |
fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x))); |
break; |
case 'Q': |
fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x))); |
break; |
case 'O': |
fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); |
break; |
case 'M': |
fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x)); |
break; |
case 'R': |
/* Next location along in memory or register. */ |
switch (GET_CODE (x)) |
{ |
case REG: |
fputs (reg_names[REGNO (x) + 1], (stream)); |
break; |
case MEM: |
mcore_print_operand_address |
(stream, XEXP (adjust_address (x, SImode, 4), 0)); |
break; |
default: |
gcc_unreachable (); |
} |
break; |
case 'U': |
fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)], |
reg_names[REGNO (x) + 3]); |
break; |
case 'x': |
fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x)); |
break; |
case 'X': |
fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8); |
break; |
|
default: |
switch (GET_CODE (x)) |
{ |
case REG: |
fputs (reg_names[REGNO (x)], (stream)); |
break; |
case MEM: |
output_address (XEXP (x, 0)); |
break; |
default: |
output_addr_const (stream, x); |
break; |
} |
break; |
} |
} |
|
/* What does a constant cost ? */ |
|
static int |
mcore_const_costs (rtx exp, enum rtx_code code) |
{ |
int val = INTVAL (exp); |
|
/* Easy constants. */ |
if ( CONST_OK_FOR_I (val) |
|| CONST_OK_FOR_M (val) |
|| CONST_OK_FOR_N (val) |
|| (code == PLUS && CONST_OK_FOR_L (val))) |
return 1; |
else if (code == AND |
&& ( CONST_OK_FOR_M (~val) |
|| CONST_OK_FOR_N (~val))) |
return 2; |
else if (code == PLUS |
&& ( CONST_OK_FOR_I (-val) |
|| CONST_OK_FOR_M (-val) |
|| CONST_OK_FOR_N (-val))) |
return 2; |
|
return 5; |
} |
|
/* What does an and instruction cost - we do this b/c immediates may |
have been relaxed. We want to ensure that cse will cse relaxed immeds |
out. Otherwise we'll get bad code (multiple reloads of the same const). */ |
|
static int |
mcore_and_cost (rtx x) |
{ |
int val; |
|
if (GET_CODE (XEXP (x, 1)) != CONST_INT) |
return 2; |
|
val = INTVAL (XEXP (x, 1)); |
|
/* Do it directly. */ |
if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val)) |
return 2; |
/* Takes one instruction to load. */ |
else if (const_ok_for_mcore (val)) |
return 3; |
/* Takes two instructions to load. */ |
else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val)) |
return 4; |
|
/* Takes a lrw to load. */ |
return 5; |
} |
|
/* What does an or cost - see and_cost(). */ |
|
static int |
mcore_ior_cost (rtx x) |
{ |
int val; |
|
if (GET_CODE (XEXP (x, 1)) != CONST_INT) |
return 2; |
|
val = INTVAL (XEXP (x, 1)); |
|
/* Do it directly with bclri. */ |
if (CONST_OK_FOR_M (val)) |
return 2; |
/* Takes one instruction to load. */ |
else if (const_ok_for_mcore (val)) |
return 3; |
/* Takes two instructions to load. */ |
else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val)) |
return 4; |
|
/* Takes a lrw to load. */ |
return 5; |
} |
|
static bool |
mcore_rtx_costs (rtx x, int code, int outer_code, int * total) |
{ |
switch (code) |
{ |
case CONST_INT: |
*total = mcore_const_costs (x, outer_code); |
return true; |
case CONST: |
case LABEL_REF: |
case SYMBOL_REF: |
*total = 5; |
return true; |
case CONST_DOUBLE: |
*total = 10; |
return true; |
|
case AND: |
*total = COSTS_N_INSNS (mcore_and_cost (x)); |
return true; |
|
case IOR: |
*total = COSTS_N_INSNS (mcore_ior_cost (x)); |
return true; |
|
case DIV: |
case UDIV: |
case MOD: |
case UMOD: |
case FLOAT: |
case FIX: |
*total = COSTS_N_INSNS (100); |
return true; |
|
default: |
return false; |
} |
} |
|
/* Check to see if a comparison against a constant can be made more efficient |
by incrementing/decrementing the constant to get one that is more efficient |
to load. */ |
|
int |
mcore_modify_comparison (enum rtx_code code) |
{ |
rtx op1 = arch_compare_op1; |
|
if (GET_CODE (op1) == CONST_INT) |
{ |
int val = INTVAL (op1); |
|
switch (code) |
{ |
case LE: |
if (CONST_OK_FOR_J (val + 1)) |
{ |
arch_compare_op1 = GEN_INT (val + 1); |
return 1; |
} |
break; |
|
default: |
break; |
} |
} |
|
return 0; |
} |
|
/* Prepare the operands for a comparison. */ |
|
rtx |
mcore_gen_compare_reg (enum rtx_code code) |
{ |
rtx op0 = arch_compare_op0; |
rtx op1 = arch_compare_op1; |
rtx cc_reg = gen_rtx_REG (CCmode, CC_REG); |
|
if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT) |
op1 = force_reg (SImode, op1); |
|
/* cmpnei: 0-31 (K immediate) |
cmplti: 1-32 (J immediate, 0 using btsti x,31). */ |
switch (code) |
{ |
case EQ: /* Use inverted condition, cmpne. */ |
code = NE; |
/* Drop through. */ |
|
case NE: /* Use normal condition, cmpne. */ |
if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1))) |
op1 = force_reg (SImode, op1); |
break; |
|
case LE: /* Use inverted condition, reversed cmplt. */ |
code = GT; |
/* Drop through. */ |
|
case GT: /* Use normal condition, reversed cmplt. */ |
if (GET_CODE (op1) == CONST_INT) |
op1 = force_reg (SImode, op1); |
break; |
|
case GE: /* Use inverted condition, cmplt. */ |
code = LT; |
/* Drop through. */ |
|
case LT: /* Use normal condition, cmplt. */ |
if (GET_CODE (op1) == CONST_INT && |
/* covered by btsti x,31. */ |
INTVAL (op1) != 0 && |
! CONST_OK_FOR_J (INTVAL (op1))) |
op1 = force_reg (SImode, op1); |
break; |
|
case GTU: /* Use inverted condition, cmple. */ |
/* Unsigned > 0 is the same as != 0, but we need to invert the |
condition, so we want to set code = EQ. This cannot be done |
however, as the mcore does not support such a test. Instead |
we cope with this case in the "bgtu" pattern itself so we |
should never reach this point. */ |
gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0); |
code = LEU; |
/* Drop through. */ |
|
case LEU: /* Use normal condition, reversed cmphs. */ |
if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0) |
op1 = force_reg (SImode, op1); |
break; |
|
case LTU: /* Use inverted condition, cmphs. */ |
code = GEU; |
/* Drop through. */ |
|
case GEU: /* Use normal condition, cmphs. */ |
if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0) |
op1 = force_reg (SImode, op1); |
break; |
|
default: |
break; |
} |
|
emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1))); |
|
return cc_reg; |
} |
|
int |
mcore_symbolic_address_p (rtx x) |
{ |
switch (GET_CODE (x)) |
{ |
case SYMBOL_REF: |
case LABEL_REF: |
return 1; |
case CONST: |
x = XEXP (x, 0); |
return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF |
|| GET_CODE (XEXP (x, 0)) == LABEL_REF) |
&& GET_CODE (XEXP (x, 1)) == CONST_INT); |
default: |
return 0; |
} |
} |
|
/* Functions to output assembly code for a function call. */ |
|
char * |
mcore_output_call (rtx operands[], int index) |
{ |
static char buffer[20]; |
rtx addr = operands [index]; |
|
if (REG_P (addr)) |
{ |
if (TARGET_CG_DATA) |
{ |
gcc_assert (mcore_current_function_name); |
|
ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, |
"unknown", 1); |
} |
|
sprintf (buffer, "jsr\t%%%d", index); |
} |
else |
{ |
if (TARGET_CG_DATA) |
{ |
gcc_assert (mcore_current_function_name); |
gcc_assert (GET_CODE (addr) == SYMBOL_REF); |
|
ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, |
XSTR (addr, 0), 0); |
} |
|
sprintf (buffer, "jbsr\t%%%d", index); |
} |
|
return buffer; |
} |
|
/* Can we load a constant with a single instruction ? */ |
|
int |
const_ok_for_mcore (int value) |
{ |
if (value >= 0 && value <= 127) |
return 1; |
|
/* Try exact power of two. */ |
if ((value & (value - 1)) == 0) |
return 1; |
|
/* Try exact power of two - 1. */ |
if ((value & (value + 1)) == 0) |
return 1; |
|
return 0; |
} |
|
/* Can we load a constant inline with up to 2 instructions ? */ |
|
int |
mcore_const_ok_for_inline (long value) |
{ |
int x, y; |
|
return try_constant_tricks (value, & x, & y) > 0; |
} |
|
/* Are we loading the constant using a not ? */ |
|
int |
mcore_const_trick_uses_not (long value) |
{ |
int x, y; |
|
return try_constant_tricks (value, & x, & y) == 2; |
} |
|
/* Try tricks to load a constant inline and return the trick number if |
success (0 is non-inlinable). |
|
0: not inlinable |
1: single instruction (do the usual thing) |
2: single insn followed by a 'not' |
3: single insn followed by a subi |
4: single insn followed by an addi |
5: single insn followed by rsubi |
6: single insn followed by bseti |
7: single insn followed by bclri |
8: single insn followed by rotli |
9: single insn followed by lsli |
10: single insn followed by ixh |
11: single insn followed by ixw. */ |
|
static int |
try_constant_tricks (long value, int * x, int * y) |
{ |
int i; |
unsigned bit, shf, rot; |
|
if (const_ok_for_mcore (value)) |
return 1; /* Do the usual thing. */ |
|
if (TARGET_HARDLIT) |
{ |
if (const_ok_for_mcore (~value)) |
{ |
*x = ~value; |
return 2; |
} |
|
for (i = 1; i <= 32; i++) |
{ |
if (const_ok_for_mcore (value - i)) |
{ |
*x = value - i; |
*y = i; |
|
return 3; |
} |
|
if (const_ok_for_mcore (value + i)) |
{ |
*x = value + i; |
*y = i; |
|
return 4; |
} |
} |
|
bit = 0x80000000L; |
|
for (i = 0; i <= 31; i++) |
{ |
if (const_ok_for_mcore (i - value)) |
{ |
*x = i - value; |
*y = i; |
|
return 5; |
} |
|
if (const_ok_for_mcore (value & ~bit)) |
{ |
*y = bit; |
*x = value & ~bit; |
|
return 6; |
} |
|
if (const_ok_for_mcore (value | bit)) |
{ |
*y = ~bit; |
*x = value | bit; |
|
return 7; |
} |
|
bit >>= 1; |
} |
|
shf = value; |
rot = value; |
|
for (i = 1; i < 31; i++) |
{ |
int c; |
|
/* MCore has rotate left. */ |
c = rot << 31; |
rot >>= 1; |
rot &= 0x7FFFFFFF; |
rot |= c; /* Simulate rotate. */ |
|
if (const_ok_for_mcore (rot)) |
{ |
*y = i; |
*x = rot; |
|
return 8; |
} |
|
if (shf & 1) |
shf = 0; /* Can't use logical shift, low order bit is one. */ |
|
shf >>= 1; |
|
if (shf != 0 && const_ok_for_mcore (shf)) |
{ |
*y = i; |
*x = shf; |
|
return 9; |
} |
} |
|
if ((value % 3) == 0 && const_ok_for_mcore (value / 3)) |
{ |
*x = value / 3; |
|
return 10; |
} |
|
if ((value % 5) == 0 && const_ok_for_mcore (value / 5)) |
{ |
*x = value / 5; |
|
return 11; |
} |
} |
|
return 0; |
} |
|
/* Check whether reg is dead at first. This is done by searching ahead |
for either the next use (i.e., reg is live), a death note, or a set of |
reg. Don't just use dead_or_set_p() since reload does not always mark |
deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We |
can ignore subregs by extracting the actual register. BRC */ |
|
int |
mcore_is_dead (rtx first, rtx reg) |
{ |
rtx insn; |
|
/* For mcore, subregs can't live independently of their parent regs. */ |
if (GET_CODE (reg) == SUBREG) |
reg = SUBREG_REG (reg); |
|
/* Dies immediately. */ |
if (dead_or_set_p (first, reg)) |
return 1; |
|
/* Look for conclusive evidence of live/death, otherwise we have |
to assume that it is live. */ |
for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn)) |
{ |
if (GET_CODE (insn) == JUMP_INSN) |
return 0; /* We lose track, assume it is alive. */ |
|
else if (GET_CODE(insn) == CALL_INSN) |
{ |
/* Call's might use it for target or register parms. */ |
if (reg_referenced_p (reg, PATTERN (insn)) |
|| find_reg_fusage (insn, USE, reg)) |
return 0; |
else if (dead_or_set_p (insn, reg)) |
return 1; |
} |
else if (GET_CODE (insn) == INSN) |
{ |
if (reg_referenced_p (reg, PATTERN (insn))) |
return 0; |
else if (dead_or_set_p (insn, reg)) |
return 1; |
} |
} |
|
/* No conclusive evidence either way, we cannot take the chance |
that control flow hid the use from us -- "I'm not dead yet". */ |
return 0; |
} |
|
/* Count the number of ones in mask. */ |
|
int |
mcore_num_ones (int mask) |
{ |
/* A trick to count set bits recently posted on comp.compilers. */ |
mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555); |
mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333); |
mask = ((mask >> 4) + mask) & 0x0f0f0f0f; |
mask = ((mask >> 8) + mask); |
|
return (mask + (mask >> 16)) & 0xff; |
} |
|
/* Count the number of zeros in mask. */ |
|
int |
mcore_num_zeros (int mask) |
{ |
return 32 - mcore_num_ones (mask); |
} |
|
/* Determine byte being masked. */ |
|
int |
mcore_byte_offset (unsigned int mask) |
{ |
if (mask == 0x00ffffffL) |
return 0; |
else if (mask == 0xff00ffffL) |
return 1; |
else if (mask == 0xffff00ffL) |
return 2; |
else if (mask == 0xffffff00L) |
return 3; |
|
return -1; |
} |
|
/* Determine halfword being masked. */ |
|
int |
mcore_halfword_offset (unsigned int mask) |
{ |
if (mask == 0x0000ffffL) |
return 0; |
else if (mask == 0xffff0000L) |
return 1; |
|
return -1; |
} |
|
/* Output a series of bseti's corresponding to mask. */ |
|
const char * |
mcore_output_bseti (rtx dst, int mask) |
{ |
rtx out_operands[2]; |
int bit; |
|
out_operands[0] = dst; |
|
for (bit = 0; bit < 32; bit++) |
{ |
if ((mask & 0x1) == 0x1) |
{ |
out_operands[1] = GEN_INT (bit); |
|
output_asm_insn ("bseti\t%0,%1", out_operands); |
} |
mask >>= 1; |
} |
|
return ""; |
} |
|
/* Output a series of bclri's corresponding to mask. */ |
|
const char * |
mcore_output_bclri (rtx dst, int mask) |
{ |
rtx out_operands[2]; |
int bit; |
|
out_operands[0] = dst; |
|
for (bit = 0; bit < 32; bit++) |
{ |
if ((mask & 0x1) == 0x0) |
{ |
out_operands[1] = GEN_INT (bit); |
|
output_asm_insn ("bclri\t%0,%1", out_operands); |
} |
|
mask >>= 1; |
} |
|
return ""; |
} |
|
/* Output a conditional move of two constants that are +/- 1 within each |
other. See the "movtK" patterns in mcore.md. I'm not sure this is |
really worth the effort. */ |
|
const char * |
mcore_output_cmov (rtx operands[], int cmp_t, const char * test) |
{ |
int load_value; |
int adjust_value; |
rtx out_operands[4]; |
|
out_operands[0] = operands[0]; |
|
/* Check to see which constant is loadable. */ |
if (const_ok_for_mcore (INTVAL (operands[1]))) |
{ |
out_operands[1] = operands[1]; |
out_operands[2] = operands[2]; |
} |
else if (const_ok_for_mcore (INTVAL (operands[2]))) |
{ |
out_operands[1] = operands[2]; |
out_operands[2] = operands[1]; |
|
/* Complement test since constants are swapped. */ |
cmp_t = (cmp_t == 0); |
} |
load_value = INTVAL (out_operands[1]); |
adjust_value = INTVAL (out_operands[2]); |
|
/* First output the test if folded into the pattern. */ |
|
if (test) |
output_asm_insn (test, operands); |
|
/* Load the constant - for now, only support constants that can be |
generated with a single instruction. maybe add general inlinable |
constants later (this will increase the # of patterns since the |
instruction sequence has a different length attribute). */ |
if (load_value >= 0 && load_value <= 127) |
output_asm_insn ("movi\t%0,%1", out_operands); |
else if ((load_value & (load_value - 1)) == 0) |
output_asm_insn ("bgeni\t%0,%P1", out_operands); |
else if ((load_value & (load_value + 1)) == 0) |
output_asm_insn ("bmaski\t%0,%N1", out_operands); |
|
/* Output the constant adjustment. */ |
if (load_value > adjust_value) |
{ |
if (cmp_t) |
output_asm_insn ("decf\t%0", out_operands); |
else |
output_asm_insn ("dect\t%0", out_operands); |
} |
else |
{ |
if (cmp_t) |
output_asm_insn ("incf\t%0", out_operands); |
else |
output_asm_insn ("inct\t%0", out_operands); |
} |
|
return ""; |
} |
|
/* Outputs the peephole for moving a constant that gets not'ed followed |
by an and (i.e. combine the not and the and into andn). BRC */ |
|
const char * |
mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[]) |
{ |
int x, y; |
rtx out_operands[3]; |
const char * load_op; |
char buf[256]; |
int trick_no; |
|
trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y); |
gcc_assert (trick_no == 2); |
|
out_operands[0] = operands[0]; |
out_operands[1] = GEN_INT(x); |
out_operands[2] = operands[2]; |
|
if (x >= 0 && x <= 127) |
load_op = "movi\t%0,%1"; |
|
/* Try exact power of two. */ |
else if ((x & (x - 1)) == 0) |
load_op = "bgeni\t%0,%P1"; |
|
/* Try exact power of two - 1. */ |
else if ((x & (x + 1)) == 0) |
load_op = "bmaski\t%0,%N1"; |
|
else |
load_op = "BADMOVI\t%0,%1"; |
|
sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op); |
output_asm_insn (buf, out_operands); |
|
return ""; |
} |
|
/* Output an inline constant. */ |
|
static const char * |
output_inline_const (enum machine_mode mode, rtx operands[]) |
{ |
int x = 0, y = 0; |
int trick_no; |
rtx out_operands[3]; |
char buf[256]; |
char load_op[256]; |
const char *dst_fmt; |
int value; |
|
value = INTVAL (operands[1]); |
|
trick_no = try_constant_tricks (value, &x, &y); |
/* lrw's are handled separately: Large inlinable constants never get |
turned into lrw's. Our caller uses try_constant_tricks to back |
off to an lrw rather than calling this routine. */ |
gcc_assert (trick_no != 0); |
|
if (trick_no == 1) |
x = value; |
|
/* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */ |
out_operands[0] = operands[0]; |
out_operands[1] = GEN_INT (x); |
|
if (trick_no > 2) |
out_operands[2] = GEN_INT (y); |
|
/* Select dst format based on mode. */ |
if (mode == DImode && (! TARGET_LITTLE_END)) |
dst_fmt = "%R0"; |
else |
dst_fmt = "%0"; |
|
if (x >= 0 && x <= 127) |
sprintf (load_op, "movi\t%s,%%1", dst_fmt); |
|
/* Try exact power of two. */ |
else if ((x & (x - 1)) == 0) |
sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt); |
|
/* Try exact power of two - 1. */ |
else if ((x & (x + 1)) == 0) |
sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt); |
|
else |
sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt); |
|
switch (trick_no) |
{ |
case 1: |
strcpy (buf, load_op); |
break; |
case 2: /* not */ |
sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 3: /* add */ |
sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 4: /* sub */ |
sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 5: /* rsub */ |
/* Never happens unless -mrsubi, see try_constant_tricks(). */ |
sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 6: /* bset */ |
sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 7: /* bclr */ |
sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 8: /* rotl */ |
sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 9: /* lsl */ |
sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value); |
break; |
case 10: /* ixh */ |
sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value); |
break; |
case 11: /* ixw */ |
sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value); |
break; |
default: |
return ""; |
} |
|
output_asm_insn (buf, out_operands); |
|
return ""; |
} |
|
/* Output a move of a word or less value. */ |
|
const char * |
mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[], |
enum machine_mode mode ATTRIBUTE_UNUSED) |
{ |
rtx dst = operands[0]; |
rtx src = operands[1]; |
|
if (GET_CODE (dst) == REG) |
{ |
if (GET_CODE (src) == REG) |
{ |
if (REGNO (src) == CC_REG) /* r-c */ |
return "mvc\t%0"; |
else |
return "mov\t%0,%1"; /* r-r*/ |
} |
else if (GET_CODE (src) == MEM) |
{ |
if (GET_CODE (XEXP (src, 0)) == LABEL_REF) |
return "lrw\t%0,[%1]"; /* a-R */ |
else |
switch (GET_MODE (src)) /* r-m */ |
{ |
case SImode: |
return "ldw\t%0,%1"; |
case HImode: |
return "ld.h\t%0,%1"; |
case QImode: |
return "ld.b\t%0,%1"; |
default: |
gcc_unreachable (); |
} |
} |
else if (GET_CODE (src) == CONST_INT) |
{ |
int x, y; |
|
if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */ |
return "movi\t%0,%1"; |
else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */ |
return "bgeni\t%0,%P1\t// %1 %x1"; |
else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */ |
return "bmaski\t%0,%N1\t// %1 %x1"; |
else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */ |
return output_inline_const (SImode, operands); /* 1-2 insns */ |
else |
return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */ |
} |
else |
return "lrw\t%0, %1"; /* Into the literal pool. */ |
} |
else if (GET_CODE (dst) == MEM) /* m-r */ |
switch (GET_MODE (dst)) |
{ |
case SImode: |
return "stw\t%1,%0"; |
case HImode: |
return "st.h\t%1,%0"; |
case QImode: |
return "st.b\t%1,%0"; |
default: |
gcc_unreachable (); |
} |
|
gcc_unreachable (); |
} |
|
/* Return a sequence of instructions to perform DI or DF move. |
Since the MCORE cannot move a DI or DF in one instruction, we have |
to take care when we see overlapping source and dest registers. */ |
|
const char * |
mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED) |
{ |
rtx dst = operands[0]; |
rtx src = operands[1]; |
|
if (GET_CODE (dst) == REG) |
{ |
if (GET_CODE (src) == REG) |
{ |
int dstreg = REGNO (dst); |
int srcreg = REGNO (src); |
|
/* Ensure the second source not overwritten. */ |
if (srcreg + 1 == dstreg) |
return "mov %R0,%R1\n\tmov %0,%1"; |
else |
return "mov %0,%1\n\tmov %R0,%R1"; |
} |
else if (GET_CODE (src) == MEM) |
{ |
rtx memexp = memexp = XEXP (src, 0); |
int dstreg = REGNO (dst); |
int basereg = -1; |
|
if (GET_CODE (memexp) == LABEL_REF) |
return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]"; |
else if (GET_CODE (memexp) == REG) |
basereg = REGNO (memexp); |
else if (GET_CODE (memexp) == PLUS) |
{ |
if (GET_CODE (XEXP (memexp, 0)) == REG) |
basereg = REGNO (XEXP (memexp, 0)); |
else if (GET_CODE (XEXP (memexp, 1)) == REG) |
basereg = REGNO (XEXP (memexp, 1)); |
else |
gcc_unreachable (); |
} |
else |
gcc_unreachable (); |
|
/* ??? length attribute is wrong here. */ |
if (dstreg == basereg) |
{ |
/* Just load them in reverse order. */ |
return "ldw\t%R0,%R1\n\tldw\t%0,%1"; |
|
/* XXX: alternative: move basereg to basereg+1 |
and then fall through. */ |
} |
else |
return "ldw\t%0,%1\n\tldw\t%R0,%R1"; |
} |
else if (GET_CODE (src) == CONST_INT) |
{ |
if (TARGET_LITTLE_END) |
{ |
if (CONST_OK_FOR_I (INTVAL (src))) |
output_asm_insn ("movi %0,%1", operands); |
else if (CONST_OK_FOR_M (INTVAL (src))) |
output_asm_insn ("bgeni %0,%P1", operands); |
else if (INTVAL (src) == -1) |
output_asm_insn ("bmaski %0,32", operands); |
else if (CONST_OK_FOR_N (INTVAL (src))) |
output_asm_insn ("bmaski %0,%N1", operands); |
else |
gcc_unreachable (); |
|
if (INTVAL (src) < 0) |
return "bmaski %R0,32"; |
else |
return "movi %R0,0"; |
} |
else |
{ |
if (CONST_OK_FOR_I (INTVAL (src))) |
output_asm_insn ("movi %R0,%1", operands); |
else if (CONST_OK_FOR_M (INTVAL (src))) |
output_asm_insn ("bgeni %R0,%P1", operands); |
else if (INTVAL (src) == -1) |
output_asm_insn ("bmaski %R0,32", operands); |
else if (CONST_OK_FOR_N (INTVAL (src))) |
output_asm_insn ("bmaski %R0,%N1", operands); |
else |
gcc_unreachable (); |
|
if (INTVAL (src) < 0) |
return "bmaski %0,32"; |
else |
return "movi %0,0"; |
} |
} |
else |
gcc_unreachable (); |
} |
else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG) |
return "stw\t%1,%0\n\tstw\t%R1,%R0"; |
else |
gcc_unreachable (); |
} |
|
/* Predicates used by the templates. */ |
|
int |
mcore_arith_S_operand (rtx op) |
{ |
if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op))) |
return 1; |
|
return 0; |
} |
|
/* Expand insert bit field. BRC */ |
|
int |
mcore_expand_insv (rtx operands[]) |
{ |
int width = INTVAL (operands[1]); |
int posn = INTVAL (operands[2]); |
int mask; |
rtx mreg, sreg, ereg; |
|
/* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191) |
for width==1 must be removed. Look around line 368. This is something |
we really want the md part to do. */ |
if (width == 1 && GET_CODE (operands[3]) == CONST_INT) |
{ |
/* Do directly with bseti or bclri. */ |
/* RBE: 2/97 consider only low bit of constant. */ |
if ((INTVAL(operands[3])&1) == 0) |
{ |
mask = ~(1 << posn); |
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_AND (SImode, operands[0], GEN_INT (mask)))); |
} |
else |
{ |
mask = 1 << posn; |
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_IOR (SImode, operands[0], GEN_INT (mask)))); |
} |
|
return 1; |
} |
|
/* Look at some bit-field placements that we aren't interested |
in handling ourselves, unless specifically directed to do so. */ |
if (! TARGET_W_FIELD) |
return 0; /* Generally, give up about now. */ |
|
if (width == 8 && posn % 8 == 0) |
/* Byte sized and aligned; let caller break it up. */ |
return 0; |
|
if (width == 16 && posn % 16 == 0) |
/* Short sized and aligned; let caller break it up. */ |
return 0; |
|
/* The general case - we can do this a little bit better than what the |
machine independent part tries. This will get rid of all the subregs |
that mess up constant folding in combine when working with relaxed |
immediates. */ |
|
/* If setting the entire field, do it directly. */ |
if (GET_CODE (operands[3]) == CONST_INT && |
INTVAL (operands[3]) == ((1 << width) - 1)) |
{ |
mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn)); |
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_IOR (SImode, operands[0], mreg))); |
return 1; |
} |
|
/* Generate the clear mask. */ |
mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn))); |
|
/* Clear the field, to overlay it later with the source. */ |
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_AND (SImode, operands[0], mreg))); |
|
/* If the source is constant 0, we've nothing to add back. */ |
if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0) |
return 1; |
|
/* XXX: Should we worry about more games with constant values? |
We've covered the high profile: set/clear single-bit and many-bit |
fields. How often do we see "arbitrary bit pattern" constants? */ |
sreg = copy_to_mode_reg (SImode, operands[3]); |
|
/* Extract src as same width as dst (needed for signed values). We |
always have to do this since we widen everything to SImode. |
We don't have to mask if we're shifting this up against the |
MSB of the register (e.g., the shift will push out any hi-order |
bits. */ |
if (width + posn != (int) GET_MODE_SIZE (SImode)) |
{ |
ereg = force_reg (SImode, GEN_INT ((1 << width) - 1)); |
emit_insn (gen_rtx_SET (SImode, sreg, |
gen_rtx_AND (SImode, sreg, ereg))); |
} |
|
/* Insert source value in dest. */ |
if (posn != 0) |
emit_insn (gen_rtx_SET (SImode, sreg, |
gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn)))); |
|
emit_insn (gen_rtx_SET (SImode, operands[0], |
gen_rtx_IOR (SImode, operands[0], sreg))); |
|
return 1; |
} |
|
/* ??? Block move stuff stolen from m88k. This code has not been |
verified for correctness. */ |
|
/* Emit code to perform a block move. Choose the best method. |
|
OPERANDS[0] is the destination. |
OPERANDS[1] is the source. |
OPERANDS[2] is the size. |
OPERANDS[3] is the alignment safe to use. */ |
|
/* Emit code to perform a block move with an offset sequence of ldw/st |
instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are |
known constants. DEST and SRC are registers. OFFSET is the known |
starting point for the output pattern. */ |
|
static const enum machine_mode mode_from_align[] = |
{ |
VOIDmode, QImode, HImode, VOIDmode, SImode, |
}; |
|
static void |
block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align) |
{ |
rtx temp[2]; |
enum machine_mode mode[2]; |
int amount[2]; |
bool active[2]; |
int phase = 0; |
int next; |
int offset_ld = 0; |
int offset_st = 0; |
rtx x; |
|
x = XEXP (dst_mem, 0); |
if (!REG_P (x)) |
{ |
x = force_reg (Pmode, x); |
dst_mem = replace_equiv_address (dst_mem, x); |
} |
|
x = XEXP (src_mem, 0); |
if (!REG_P (x)) |
{ |
x = force_reg (Pmode, x); |
src_mem = replace_equiv_address (src_mem, x); |
} |
|
active[0] = active[1] = false; |
|
do |
{ |
next = phase; |
phase ^= 1; |
|
if (size > 0) |
{ |
int next_amount; |
|
next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1)); |
next_amount = MIN (next_amount, align); |
|
amount[next] = next_amount; |
mode[next] = mode_from_align[next_amount]; |
temp[next] = gen_reg_rtx (mode[next]); |
|
x = adjust_address (src_mem, mode[next], offset_ld); |
emit_insn (gen_rtx_SET (VOIDmode, temp[next], x)); |
|
offset_ld += next_amount; |
size -= next_amount; |
active[next] = true; |
} |
|
if (active[phase]) |
{ |
active[phase] = false; |
|
x = adjust_address (dst_mem, mode[phase], offset_st); |
emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase])); |
|
offset_st += amount[phase]; |
} |
} |
while (active[next]); |
} |
|
bool |
mcore_expand_block_move (rtx *operands) |
{ |
HOST_WIDE_INT align, bytes, max; |
|
if (GET_CODE (operands[2]) != CONST_INT) |
return false; |
|
bytes = INTVAL (operands[2]); |
align = INTVAL (operands[3]); |
|
if (bytes <= 0) |
return false; |
if (align > 4) |
align = 4; |
|
switch (align) |
{ |
case 4: |
if (bytes & 1) |
max = 4*4; |
else if (bytes & 3) |
max = 8*4; |
else |
max = 16*4; |
break; |
case 2: |
max = 4*2; |
break; |
case 1: |
max = 4*1; |
break; |
default: |
gcc_unreachable (); |
} |
|
if (bytes <= max) |
{ |
block_move_sequence (operands[0], operands[1], bytes, align); |
return true; |
} |
|
return false; |
} |
|
|
/* Code to generate prologue and epilogue sequences. */ |
static int number_of_regs_before_varargs; |
|
/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is |
for a varargs function. */ |
static int current_function_anonymous_args; |
|
#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT) |
#define STORE_REACH (64) /* Maximum displace of word store + 4. */ |
#define ADDI_REACH (32) /* Maximum addi operand. */ |
|
static void |
layout_mcore_frame (struct mcore_frame * infp) |
{ |
int n; |
unsigned int i; |
int nbytes; |
int regarg; |
int localregarg; |
int localreg; |
int outbounds; |
unsigned int growths; |
int step; |
|
/* Might have to spill bytes to re-assemble a big argument that |
was passed partially in registers and partially on the stack. */ |
nbytes = current_function_pretend_args_size; |
|
/* Determine how much space for spilled anonymous args (e.g., stdarg). */ |
if (current_function_anonymous_args) |
nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD; |
|
infp->arg_size = nbytes; |
|
/* How much space to save non-volatile registers we stomp. */ |
infp->reg_mask = calc_live_regs (& n); |
infp->reg_size = n * 4; |
|
/* And the rest of it... locals and space for overflowed outbounds. */ |
infp->local_size = get_frame_size (); |
infp->outbound_size = current_function_outgoing_args_size; |
|
/* Make sure we have a whole number of words for the locals. */ |
if (infp->local_size % STACK_BYTES) |
infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1); |
|
/* Only thing we know we have to pad is the outbound space, since |
we've aligned our locals assuming that base of locals is aligned. */ |
infp->pad_local = 0; |
infp->pad_reg = 0; |
infp->pad_outbound = 0; |
if (infp->outbound_size % STACK_BYTES) |
infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES); |
|
/* Now we see how we want to stage the prologue so that it does |
the most appropriate stack growth and register saves to either: |
(1) run fast, |
(2) reduce instruction space, or |
(3) reduce stack space. */ |
for (i = 0; i < ARRAY_SIZE (infp->growth); i++) |
infp->growth[i] = 0; |
|
regarg = infp->reg_size + infp->arg_size; |
localregarg = infp->local_size + regarg; |
localreg = infp->local_size + infp->reg_size; |
outbounds = infp->outbound_size + infp->pad_outbound; |
growths = 0; |
|
/* XXX: Consider one where we consider localregarg + outbound too! */ |
|
/* Frame of <= 32 bytes and using stm would get <= 2 registers. |
use stw's with offsets and buy the frame in one shot. */ |
if (localregarg <= ADDI_REACH |
&& (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000)) |
{ |
/* Make sure we'll be aligned. */ |
if (localregarg % STACK_BYTES) |
infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES); |
|
step = localregarg + infp->pad_reg; |
infp->reg_offset = infp->local_size; |
|
if (outbounds + step <= ADDI_REACH && !frame_pointer_needed) |
{ |
step += outbounds; |
infp->reg_offset += outbounds; |
outbounds = 0; |
} |
|
infp->arg_offset = step - 4; |
infp->growth[growths++] = step; |
infp->reg_growth = growths; |
infp->local_growth = growths; |
|
/* If we haven't already folded it in. */ |
if (outbounds) |
infp->growth[growths++] = outbounds; |
|
goto finish; |
} |
|
/* Frame can't be done with a single subi, but can be done with 2 |
insns. If the 'stm' is getting <= 2 registers, we use stw's and |
shift some of the stack purchase into the first subi, so both are |
single instructions. */ |
if (localregarg <= STORE_REACH |
&& (infp->local_size > ADDI_REACH) |
&& (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000)) |
{ |
int all; |
|
/* Make sure we'll be aligned; use either pad_reg or pad_local. */ |
if (localregarg % STACK_BYTES) |
infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES); |
|
all = localregarg + infp->pad_reg + infp->pad_local; |
step = ADDI_REACH; /* As much up front as we can. */ |
if (step > all) |
step = all; |
|
/* XXX: Consider whether step will still be aligned; we believe so. */ |
infp->arg_offset = step - 4; |
infp->growth[growths++] = step; |
infp->reg_growth = growths; |
infp->reg_offset = step - infp->pad_reg - infp->reg_size; |
all -= step; |
|
/* Can we fold in any space required for outbounds? */ |
if (outbounds + all <= ADDI_REACH && !frame_pointer_needed) |
{ |
all += outbounds; |
outbounds = 0; |
} |
|
/* Get the rest of the locals in place. */ |
step = all; |
infp->growth[growths++] = step; |
infp->local_growth = growths; |
all -= step; |
|
assert (all == 0); |
|
/* Finish off if we need to do so. */ |
if (outbounds) |
infp->growth[growths++] = outbounds; |
|
goto finish; |
} |
|
/* Registers + args is nicely aligned, so we'll buy that in one shot. |
Then we buy the rest of the frame in 1 or 2 steps depending on |
whether we need a frame pointer. */ |
if ((regarg % STACK_BYTES) == 0) |
{ |
infp->growth[growths++] = regarg; |
infp->reg_growth = growths; |
infp->arg_offset = regarg - 4; |
infp->reg_offset = 0; |
|
if (infp->local_size % STACK_BYTES) |
infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES); |
|
step = infp->local_size + infp->pad_local; |
|
if (!frame_pointer_needed) |
{ |
step += outbounds; |
outbounds = 0; |
} |
|
infp->growth[growths++] = step; |
infp->local_growth = growths; |
|
/* If there's any left to be done. */ |
if (outbounds) |
infp->growth[growths++] = outbounds; |
|
goto finish; |
} |
|
/* XXX: optimizations that we'll want to play with.... |
-- regarg is not aligned, but it's a small number of registers; |
use some of localsize so that regarg is aligned and then |
save the registers. */ |
|
/* Simple encoding; plods down the stack buying the pieces as it goes. |
-- does not optimize space consumption. |
-- does not attempt to optimize instruction counts. |
-- but it is safe for all alignments. */ |
if (regarg % STACK_BYTES != 0) |
infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES); |
|
infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg; |
infp->reg_growth = growths; |
infp->arg_offset = infp->growth[0] - 4; |
infp->reg_offset = 0; |
|
if (frame_pointer_needed) |
{ |
if (infp->local_size % STACK_BYTES != 0) |
infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES); |
|
infp->growth[growths++] = infp->local_size + infp->pad_local; |
infp->local_growth = growths; |
|
infp->growth[growths++] = outbounds; |
} |
else |
{ |
if ((infp->local_size + outbounds) % STACK_BYTES != 0) |
infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES); |
|
infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds; |
infp->local_growth = growths; |
} |
|
/* Anything else that we've forgotten?, plus a few consistency checks. */ |
finish: |
assert (infp->reg_offset >= 0); |
assert (growths <= MAX_STACK_GROWS); |
|
for (i = 0; i < growths; i++) |
gcc_assert (!(infp->growth[i] % STACK_BYTES)); |
} |
|
/* Define the offset between two registers, one to be eliminated, and |
the other its replacement, at the start of a routine. */ |
|
int |
mcore_initial_elimination_offset (int from, int to) |
{ |
int above_frame; |
int below_frame; |
struct mcore_frame fi; |
|
layout_mcore_frame (& fi); |
|
/* fp to ap */ |
above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg; |
/* sp to fp */ |
below_frame = fi.outbound_size + fi.pad_outbound; |
|
if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) |
return above_frame; |
|
if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM) |
return above_frame + below_frame; |
|
if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM) |
return below_frame; |
|
gcc_unreachable (); |
} |
|
/* Keep track of some information about varargs for the prolog. */ |
|
static void |
mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far, |
enum machine_mode mode, tree type, |
int * ptr_pretend_size ATTRIBUTE_UNUSED, |
int second_time ATTRIBUTE_UNUSED) |
{ |
current_function_anonymous_args = 1; |
|
/* We need to know how many argument registers are used before |
the varargs start, so that we can push the remaining argument |
registers during the prologue. */ |
number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type); |
|
/* There is a bug somewhere in the arg handling code. |
Until I can find it this workaround always pushes the |
last named argument onto the stack. */ |
number_of_regs_before_varargs = *args_so_far; |
|
/* The last named argument may be split between argument registers |
and the stack. Allow for this here. */ |
if (number_of_regs_before_varargs > NPARM_REGS) |
number_of_regs_before_varargs = NPARM_REGS; |
} |
|
void |
mcore_expand_prolog (void) |
{ |
struct mcore_frame fi; |
int space_allocated = 0; |
int growth = 0; |
|
/* Find out what we're doing. */ |
layout_mcore_frame (&fi); |
|
space_allocated = fi.arg_size + fi.reg_size + fi.local_size + |
fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg; |
|
if (TARGET_CG_DATA) |
{ |
/* Emit a symbol for this routine's frame size. */ |
rtx x; |
|
x = DECL_RTL (current_function_decl); |
|
gcc_assert (GET_CODE (x) == MEM); |
|
x = XEXP (x, 0); |
|
gcc_assert (GET_CODE (x) == SYMBOL_REF); |
|
if (mcore_current_function_name) |
free (mcore_current_function_name); |
|
mcore_current_function_name = xstrdup (XSTR (x, 0)); |
|
ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated); |
|
if (current_function_calls_alloca) |
ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1); |
|
/* 970425: RBE: |
We're looking at how the 8byte alignment affects stack layout |
and where we had to pad things. This emits information we can |
extract which tells us about frame sizes and the like. */ |
fprintf (asm_out_file, |
"\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n", |
mcore_current_function_name, |
fi.arg_size, fi.reg_size, fi.reg_mask, |
fi.local_size, fi.outbound_size, |
frame_pointer_needed); |
} |
|
if (mcore_naked_function_p ()) |
return; |
|
/* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */ |
output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */ |
|
/* If we have a parameter passed partially in regs and partially in memory, |
the registers will have been stored to memory already in function.c. So |
we only need to do something here for varargs functions. */ |
if (fi.arg_size != 0 && current_function_pretend_args_size == 0) |
{ |
int offset; |
int rn = FIRST_PARM_REG + NPARM_REGS - 1; |
int remaining = fi.arg_size; |
|
for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4) |
{ |
emit_insn (gen_movsi |
(gen_rtx_MEM (SImode, |
plus_constant (stack_pointer_rtx, offset)), |
gen_rtx_REG (SImode, rn))); |
} |
} |
|
/* Do we need another stack adjustment before we do the register saves? */ |
if (growth < fi.reg_growth) |
output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */ |
|
if (fi.reg_size != 0) |
{ |
int i; |
int offs = fi.reg_offset; |
|
for (i = 15; i >= 0; i--) |
{ |
if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000)) |
{ |
int first_reg = 15; |
|
while (fi.reg_mask & (1 << first_reg)) |
first_reg--; |
first_reg++; |
|
emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx), |
gen_rtx_REG (SImode, first_reg), |
GEN_INT (16 - first_reg))); |
|
i -= (15 - first_reg); |
offs += (16 - first_reg) * 4; |
} |
else if (fi.reg_mask & (1 << i)) |
{ |
emit_insn (gen_movsi |
(gen_rtx_MEM (SImode, |
plus_constant (stack_pointer_rtx, offs)), |
gen_rtx_REG (SImode, i))); |
offs += 4; |
} |
} |
} |
|
/* Figure the locals + outbounds. */ |
if (frame_pointer_needed) |
{ |
/* If we haven't already purchased to 'fp'. */ |
if (growth < fi.local_growth) |
output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */ |
|
emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx)); |
|
/* ... and then go any remaining distance for outbounds, etc. */ |
if (fi.growth[growth]) |
output_stack_adjust (-1, fi.growth[growth++]); |
} |
else |
{ |
if (growth < fi.local_growth) |
output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */ |
if (fi.growth[growth]) |
output_stack_adjust (-1, fi.growth[growth++]); |
} |
} |
|
void |
mcore_expand_epilog (void) |
{ |
struct mcore_frame fi; |
int i; |
int offs; |
int growth = MAX_STACK_GROWS - 1 ; |
|
|
/* Find out what we're doing. */ |
layout_mcore_frame(&fi); |
|
if (mcore_naked_function_p ()) |
return; |
|
/* If we had a frame pointer, restore the sp from that. */ |
if (frame_pointer_needed) |
{ |
emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx)); |
growth = fi.local_growth - 1; |
} |
else |
{ |
/* XXX: while loop should accumulate and do a single sell. */ |
while (growth >= fi.local_growth) |
{ |
if (fi.growth[growth] != 0) |
output_stack_adjust (1, fi.growth[growth]); |
growth--; |
} |
} |
|
/* Make sure we've shrunk stack back to the point where the registers |
were laid down. This is typically 0/1 iterations. Then pull the |
register save information back off the stack. */ |
while (growth >= fi.reg_growth) |
output_stack_adjust ( 1, fi.growth[growth--]); |
|
offs = fi.reg_offset; |
|
for (i = 15; i >= 0; i--) |
{ |
if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000)) |
{ |
int first_reg; |
|
/* Find the starting register. */ |
first_reg = 15; |
|
while (fi.reg_mask & (1 << first_reg)) |
first_reg--; |
|
first_reg++; |
|
emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg), |
gen_rtx_MEM (SImode, stack_pointer_rtx), |
GEN_INT (16 - first_reg))); |
|
i -= (15 - first_reg); |
offs += (16 - first_reg) * 4; |
} |
else if (fi.reg_mask & (1 << i)) |
{ |
emit_insn (gen_movsi |
(gen_rtx_REG (SImode, i), |
gen_rtx_MEM (SImode, |
plus_constant (stack_pointer_rtx, offs)))); |
offs += 4; |
} |
} |
|
/* Give back anything else. */ |
/* XXX: Should accumulate total and then give it back. */ |
while (growth >= 0) |
output_stack_adjust ( 1, fi.growth[growth--]); |
} |
|
/* This code is borrowed from the SH port. */ |
|
/* The MCORE cannot load a large constant into a register, constants have to |
come from a pc relative load. The reference of a pc relative load |
instruction must be less than 1k in front of the instruction. This |
means that we often have to dump a constant inside a function, and |
generate code to branch around it. |
|
It is important to minimize this, since the branches will slow things |
down and make things bigger. |
|
Worst case code looks like: |
|
lrw L1,r0 |
br L2 |
align |
L1: .long value |
L2: |
.. |
|
lrw L3,r0 |
br L4 |
align |
L3: .long value |
L4: |
.. |
|
We fix this by performing a scan before scheduling, which notices which |
instructions need to have their operands fetched from the constant table |
and builds the table. |
|
The algorithm is: |
|
scan, find an instruction which needs a pcrel move. Look forward, find the |
last barrier which is within MAX_COUNT bytes of the requirement. |
If there isn't one, make one. Process all the instructions between |
the find and the barrier. |
|
In the above example, we can tell that L3 is within 1k of L1, so |
the first move can be shrunk from the 2 insn+constant sequence into |
just 1 insn, and the constant moved to L3 to make: |
|
lrw L1,r0 |
.. |
lrw L3,r0 |
bra L4 |
align |
L3:.long value |
L4:.long value |
|
Then the second move becomes the target for the shortening process. */ |
|
typedef struct |
{ |
rtx value; /* Value in table. */ |
rtx label; /* Label of value. */ |
} pool_node; |
|
/* The maximum number of constants that can fit into one pool, since |
the pc relative range is 0...1020 bytes and constants are at least 4 |
bytes long. We subtract 4 from the range to allow for the case where |
we need to add a branch/align before the constant pool. */ |
|
#define MAX_COUNT 1016 |
#define MAX_POOL_SIZE (MAX_COUNT/4) |
static pool_node pool_vector[MAX_POOL_SIZE]; |
static int pool_size; |
|
/* Dump out any constants accumulated in the final pass. These |
will only be labels. */ |
|
const char * |
mcore_output_jump_label_table (void) |
{ |
int i; |
|
if (pool_size) |
{ |
fprintf (asm_out_file, "\t.align 2\n"); |
|
for (i = 0; i < pool_size; i++) |
{ |
pool_node * p = pool_vector + i; |
|
(*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label)); |
|
output_asm_insn (".long %0", &p->value); |
} |
|
pool_size = 0; |
} |
|
return ""; |
} |
|
/* Check whether insn is a candidate for a conditional. */ |
|
static cond_type |
is_cond_candidate (rtx insn) |
{ |
/* The only things we conditionalize are those that can be directly |
changed into a conditional. Only bother with SImode items. If |
we wanted to be a little more aggressive, we could also do other |
modes such as DImode with reg-reg move or load 0. */ |
if (GET_CODE (insn) == INSN) |
{ |
rtx pat = PATTERN (insn); |
rtx src, dst; |
|
if (GET_CODE (pat) != SET) |
return COND_NO; |
|
dst = XEXP (pat, 0); |
|
if ((GET_CODE (dst) != REG && |
GET_CODE (dst) != SUBREG) || |
GET_MODE (dst) != SImode) |
return COND_NO; |
|
src = XEXP (pat, 1); |
|
if ((GET_CODE (src) == REG || |
(GET_CODE (src) == SUBREG && |
GET_CODE (SUBREG_REG (src)) == REG)) && |
GET_MODE (src) == SImode) |
return COND_MOV_INSN; |
else if (GET_CODE (src) == CONST_INT && |
INTVAL (src) == 0) |
return COND_CLR_INSN; |
else if (GET_CODE (src) == PLUS && |
(GET_CODE (XEXP (src, 0)) == REG || |
(GET_CODE (XEXP (src, 0)) == SUBREG && |
GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) && |
GET_MODE (XEXP (src, 0)) == SImode && |
GET_CODE (XEXP (src, 1)) == CONST_INT && |
INTVAL (XEXP (src, 1)) == 1) |
return COND_INC_INSN; |
else if (((GET_CODE (src) == MINUS && |
GET_CODE (XEXP (src, 1)) == CONST_INT && |
INTVAL( XEXP (src, 1)) == 1) || |
(GET_CODE (src) == PLUS && |
GET_CODE (XEXP (src, 1)) == CONST_INT && |
INTVAL (XEXP (src, 1)) == -1)) && |
(GET_CODE (XEXP (src, 0)) == REG || |
(GET_CODE (XEXP (src, 0)) == SUBREG && |
GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) && |
GET_MODE (XEXP (src, 0)) == SImode) |
return COND_DEC_INSN; |
|
/* Some insns that we don't bother with: |
(set (rx:DI) (ry:DI)) |
(set (rx:DI) (const_int 0)) |
*/ |
|
} |
else if (GET_CODE (insn) == JUMP_INSN && |
GET_CODE (PATTERN (insn)) == SET && |
GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF) |
return COND_BRANCH_INSN; |
|
return COND_NO; |
} |
|
/* Emit a conditional version of insn and replace the old insn with the |
new one. Return the new insn if emitted. */ |
|
static rtx |
emit_new_cond_insn (rtx insn, int cond) |
{ |
rtx c_insn = 0; |
rtx pat, dst, src; |
cond_type num; |
|
if ((num = is_cond_candidate (insn)) == COND_NO) |
return NULL; |
|
pat = PATTERN (insn); |
|
if (GET_CODE (insn) == INSN) |
{ |
dst = SET_DEST (pat); |
src = SET_SRC (pat); |
} |
else |
{ |
dst = JUMP_LABEL (insn); |
src = NULL_RTX; |
} |
|
switch (num) |
{ |
case COND_MOV_INSN: |
case COND_CLR_INSN: |
if (cond) |
c_insn = gen_movt0 (dst, src, dst); |
else |
c_insn = gen_movt0 (dst, dst, src); |
break; |
|
case COND_INC_INSN: |
if (cond) |
c_insn = gen_incscc (dst, dst); |
else |
c_insn = gen_incscc_false (dst, dst); |
break; |
|
case COND_DEC_INSN: |
if (cond) |
c_insn = gen_decscc (dst, dst); |
else |
c_insn = gen_decscc_false (dst, dst); |
break; |
|
case COND_BRANCH_INSN: |
if (cond) |
c_insn = gen_branch_true (dst); |
else |
c_insn = gen_branch_false (dst); |
break; |
|
default: |
return NULL; |
} |
|
/* Only copy the notes if they exist. */ |
if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7) |
{ |
/* We really don't need to bother with the notes and links at this |
point, but go ahead and save the notes. This will help is_dead() |
when applying peepholes (links don't matter since they are not |
used any more beyond this point for the mcore). */ |
REG_NOTES (c_insn) = REG_NOTES (insn); |
} |
|
if (num == COND_BRANCH_INSN) |
{ |
/* For jumps, we need to be a little bit careful and emit the new jump |
before the old one and to update the use count for the target label. |
This way, the barrier following the old (uncond) jump will get |
deleted, but the label won't. */ |
c_insn = emit_jump_insn_before (c_insn, insn); |
|
++ LABEL_NUSES (dst); |
|
JUMP_LABEL (c_insn) = dst; |
} |
else |
c_insn = emit_insn_after (c_insn, insn); |
|
delete_insn (insn); |
|
return c_insn; |
} |
|
/* Attempt to change a basic block into a series of conditional insns. This |
works by taking the branch at the end of the 1st block and scanning for the |
end of the 2nd block. If all instructions in the 2nd block have cond. |
versions and the label at the start of block 3 is the same as the target |
from the branch at block 1, then conditionalize all insn in block 2 using |
the inverse condition of the branch at block 1. (Note I'm bending the |
definition of basic block here.) |
|
e.g., change: |
|
bt L2 <-- end of block 1 (delete) |
mov r7,r8 |
addu r7,1 |
br L3 <-- end of block 2 |
|
L2: ... <-- start of block 3 (NUSES==1) |
L3: ... |
|
to: |
|
movf r7,r8 |
incf r7 |
bf L3 |
|
L3: ... |
|
we can delete the L2 label if NUSES==1 and re-apply the optimization |
starting at the last instruction of block 2. This may allow an entire |
if-then-else statement to be conditionalized. BRC */ |
static rtx |
conditionalize_block (rtx first) |
{ |
rtx insn; |
rtx br_pat; |
rtx end_blk_1_br = 0; |
rtx end_blk_2_insn = 0; |
rtx start_blk_3_lab = 0; |
int cond; |
int br_lab_num; |
int blk_size = 0; |
|
|
/* Check that the first insn is a candidate conditional jump. This is |
the one that we'll eliminate. If not, advance to the next insn to |
try. */ |
if (GET_CODE (first) != JUMP_INSN || |
GET_CODE (PATTERN (first)) != SET || |
GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE) |
return NEXT_INSN (first); |
|
/* Extract some information we need. */ |
end_blk_1_br = first; |
br_pat = PATTERN (end_blk_1_br); |
|
/* Complement the condition since we use the reverse cond. for the insns. */ |
cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ); |
|
/* Determine what kind of branch we have. */ |
if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF) |
{ |
/* A normal branch, so extract label out of first arm. */ |
br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0)); |
} |
else |
{ |
/* An inverse branch, so extract the label out of the 2nd arm |
and complement the condition. */ |
cond = (cond == 0); |
br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0)); |
} |
|
/* Scan forward for the start of block 2: it must start with a |
label and that label must be the same as the branch target |
label from block 1. We don't care about whether block 2 actually |
ends with a branch or a label (an uncond. branch is |
conditionalizable). */ |
for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn)) |
{ |
enum rtx_code code; |
|
code = GET_CODE (insn); |
|
/* Look for the label at the start of block 3. */ |
if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num) |
break; |
|
/* Skip barriers, notes, and conditionalizable insns. If the |
insn is not conditionalizable or makes this optimization fail, |
just return the next insn so we can start over from that point. */ |
if (code != BARRIER && code != NOTE && !is_cond_candidate (insn)) |
return NEXT_INSN (insn); |
|
/* Remember the last real insn before the label (i.e. end of block 2). */ |
if (code == JUMP_INSN || code == INSN) |
{ |
blk_size ++; |
end_blk_2_insn = insn; |
} |
} |
|
if (!insn) |
return insn; |
|
/* It is possible for this optimization to slow performance if the blocks |
are long. This really depends upon whether the branch is likely taken |
or not. If the branch is taken, we slow performance in many cases. But, |
if the branch is not taken, we always help performance (for a single |
block, but for a double block (i.e. when the optimization is re-applied) |
this is not true since the 'right thing' depends on the overall length of |
the collapsed block). As a compromise, don't apply this optimization on |
blocks larger than size 2 (unlikely for the mcore) when speed is important. |
the best threshold depends on the latencies of the instructions (i.e., |
the branch penalty). */ |
if (optimize > 1 && blk_size > 2) |
return insn; |
|
/* At this point, we've found the start of block 3 and we know that |
it is the destination of the branch from block 1. Also, all |
instructions in the block 2 are conditionalizable. So, apply the |
conditionalization and delete the branch. */ |
start_blk_3_lab = insn; |
|
for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab; |
insn = NEXT_INSN (insn)) |
{ |
rtx newinsn; |
|
if (INSN_DELETED_P (insn)) |
continue; |
|
/* Try to form a conditional variant of the instruction and emit it. */ |
if ((newinsn = emit_new_cond_insn (insn, cond))) |
{ |
if (end_blk_2_insn == insn) |
end_blk_2_insn = newinsn; |
|
insn = newinsn; |
} |
} |
|
/* Note whether we will delete the label starting blk 3 when the jump |
gets deleted. If so, we want to re-apply this optimization at the |
last real instruction right before the label. */ |
if (LABEL_NUSES (start_blk_3_lab) == 1) |
{ |
start_blk_3_lab = 0; |
} |
|
/* ??? we probably should redistribute the death notes for this insn, esp. |
the death of cc, but it doesn't really matter this late in the game. |
The peepholes all use is_dead() which will find the correct death |
regardless of whether there is a note. */ |
delete_insn (end_blk_1_br); |
|
if (! start_blk_3_lab) |
return end_blk_2_insn; |
|
/* Return the insn right after the label at the start of block 3. */ |
return NEXT_INSN (start_blk_3_lab); |
} |
|
/* Apply the conditionalization of blocks optimization. This is the |
outer loop that traverses through the insns scanning for a branch |
that signifies an opportunity to apply the optimization. Note that |
this optimization is applied late. If we could apply it earlier, |
say before cse 2, it may expose more optimization opportunities. |
but, the pay back probably isn't really worth the effort (we'd have |
to update all reg/flow/notes/links/etc to make it work - and stick it |
in before cse 2). */ |
|
static void |
conditionalize_optimization (void) |
{ |
rtx insn; |
|
for (insn = get_insns (); insn; insn = conditionalize_block (insn)) |
continue; |
} |
|
static int saved_warn_return_type = -1; |
static int saved_warn_return_type_count = 0; |
|
/* This is to handle loads from the constant pool. */ |
|
static void |
mcore_reorg (void) |
{ |
/* Reset this variable. */ |
current_function_anonymous_args = 0; |
|
/* Restore the warn_return_type if it has been altered. */ |
if (saved_warn_return_type != -1) |
{ |
/* Only restore the value if we have reached another function. |
The test of warn_return_type occurs in final_function () in |
c-decl.c a long time after the code for the function is generated, |
so we need a counter to tell us when we have finished parsing that |
function and can restore the flag. */ |
if (--saved_warn_return_type_count == 0) |
{ |
warn_return_type = saved_warn_return_type; |
saved_warn_return_type = -1; |
} |
} |
|
if (optimize == 0) |
return; |
|
/* Conditionalize blocks where we can. */ |
conditionalize_optimization (); |
|
/* Literal pool generation is now pushed off until the assembler. */ |
} |
|
|
/* Return true if X is something that can be moved directly into r15. */ |
|
bool |
mcore_r15_operand_p (rtx x) |
{ |
switch (GET_CODE (x)) |
{ |
case CONST_INT: |
return mcore_const_ok_for_inline (INTVAL (x)); |
|
case REG: |
case SUBREG: |
case MEM: |
return 1; |
|
default: |
return 0; |
} |
} |
|
/* Implement SECONDARY_RELOAD_CLASS. If CLASS contains r15, and we can't |
directly move X into it, use r1-r14 as a temporary. */ |
|
enum reg_class |
mcore_secondary_reload_class (enum reg_class class, |
enum machine_mode mode ATTRIBUTE_UNUSED, rtx x) |
{ |
if (TEST_HARD_REG_BIT (reg_class_contents[class], 15) |
&& !mcore_r15_operand_p (x)) |
return LRW_REGS; |
return NO_REGS; |
} |
|
/* Return the reg_class to use when reloading the rtx X into the class |
CLASS. If X is too complex to move directly into r15, prefer to |
use LRW_REGS instead. */ |
|
enum reg_class |
mcore_reload_class (rtx x, enum reg_class class) |
{ |
if (reg_class_subset_p (LRW_REGS, class) && !mcore_r15_operand_p (x)) |
return LRW_REGS; |
|
return class; |
} |
|
/* Tell me if a pair of reg/subreg rtx's actually refer to the same |
register. Note that the current version doesn't worry about whether |
they are the same mode or note (e.g., a QImode in r2 matches an HImode |
in r2 matches an SImode in r2. Might think in the future about whether |
we want to be able to say something about modes. */ |
|
int |
mcore_is_same_reg (rtx x, rtx y) |
{ |
/* Strip any and all of the subreg wrappers. */ |
while (GET_CODE (x) == SUBREG) |
x = SUBREG_REG (x); |
|
while (GET_CODE (y) == SUBREG) |
y = SUBREG_REG (y); |
|
if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y)) |
return 1; |
|
return 0; |
} |
|
void |
mcore_override_options (void) |
{ |
/* Only the m340 supports little endian code. */ |
if (TARGET_LITTLE_END && ! TARGET_M340) |
target_flags |= MASK_M340; |
} |
|
/* Compute the number of word sized registers needed to |
hold a function argument of mode MODE and type TYPE. */ |
|
int |
mcore_num_arg_regs (enum machine_mode mode, tree type) |
{ |
int size; |
|
if (targetm.calls.must_pass_in_stack (mode, type)) |
return 0; |
|
if (type && mode == BLKmode) |
size = int_size_in_bytes (type); |
else |
size = GET_MODE_SIZE (mode); |
|
return ROUND_ADVANCE (size); |
} |
|
static rtx |
handle_structs_in_regs (enum machine_mode mode, tree type, int reg) |
{ |
int size; |
|
/* The MCore ABI defines that a structure whose size is not a whole multiple |
of bytes is passed packed into registers (or spilled onto the stack if |
not enough registers are available) with the last few bytes of the |
structure being packed, left-justified, into the last register/stack slot. |
GCC handles this correctly if the last word is in a stack slot, but we |
have to generate a special, PARALLEL RTX if the last word is in an |
argument register. */ |
if (type |
&& TYPE_MODE (type) == BLKmode |
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST |
&& (size = int_size_in_bytes (type)) > UNITS_PER_WORD |
&& (size % UNITS_PER_WORD != 0) |
&& (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS))) |
{ |
rtx arg_regs [NPARM_REGS]; |
int nregs; |
rtx result; |
rtvec rtvec; |
|
for (nregs = 0; size > 0; size -= UNITS_PER_WORD) |
{ |
arg_regs [nregs] = |
gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++), |
GEN_INT (nregs * UNITS_PER_WORD)); |
nregs ++; |
} |
|
/* We assume here that NPARM_REGS == 6. The assert checks this. */ |
assert (ARRAY_SIZE (arg_regs) == 6); |
rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2], |
arg_regs[3], arg_regs[4], arg_regs[5]); |
|
result = gen_rtx_PARALLEL (mode, rtvec); |
return result; |
} |
|
return gen_rtx_REG (mode, reg); |
} |
|
rtx |
mcore_function_value (tree valtype, tree func ATTRIBUTE_UNUSED) |
{ |
enum machine_mode mode; |
int unsigned_p; |
|
mode = TYPE_MODE (valtype); |
|
PROMOTE_MODE (mode, unsigned_p, NULL); |
|
return handle_structs_in_regs (mode, valtype, FIRST_RET_REG); |
} |
|
/* Define where to put the arguments to a function. |
Value is zero to push the argument on the stack, |
or a hard register in which to store the argument. |
|
MODE is the argument's machine mode. |
TYPE is the data type of the argument (as a tree). |
This is null for libcalls where that information may |
not be available. |
CUM is a variable of type CUMULATIVE_ARGS which gives info about |
the preceding args and about the function being called. |
NAMED is nonzero if this argument is a named parameter |
(otherwise it is an extra parameter matching an ellipsis). |
|
On MCore the first args are normally in registers |
and the rest are pushed. Any arg that starts within the first |
NPARM_REGS words is at least partially passed in a register unless |
its data type forbids. */ |
|
rtx |
mcore_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, |
tree type, int named) |
{ |
int arg_reg; |
|
if (! named || mode == VOIDmode) |
return 0; |
|
if (targetm.calls.must_pass_in_stack (mode, type)) |
return 0; |
|
arg_reg = ROUND_REG (cum, mode); |
|
if (arg_reg < NPARM_REGS) |
return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg); |
|
return 0; |
} |
|
/* Returns the number of bytes of argument registers required to hold *part* |
of a parameter of machine mode MODE and type TYPE (which may be NULL if |
the type is not known). If the argument fits entirely in the argument |
registers, or entirely on the stack, then 0 is returned. CUM is the |
number of argument registers already used by earlier parameters to |
the function. */ |
|
static int |
mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode, |
tree type, bool named) |
{ |
int reg = ROUND_REG (*cum, mode); |
|
if (named == 0) |
return 0; |
|
if (targetm.calls.must_pass_in_stack (mode, type)) |
return 0; |
|
/* REG is not the *hardware* register number of the register that holds |
the argument, it is the *argument* register number. So for example, |
the first argument to a function goes in argument register 0, which |
translates (for the MCore) into hardware register 2. The second |
argument goes into argument register 1, which translates into hardware |
register 3, and so on. NPARM_REGS is the number of argument registers |
supported by the target, not the maximum hardware register number of |
the target. */ |
if (reg >= NPARM_REGS) |
return 0; |
|
/* If the argument fits entirely in registers, return 0. */ |
if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS) |
return 0; |
|
/* The argument overflows the number of available argument registers. |
Compute how many argument registers have not yet been assigned to |
hold an argument. */ |
reg = NPARM_REGS - reg; |
|
/* Return partially in registers and partially on the stack. */ |
return reg * UNITS_PER_WORD; |
} |
|
/* Return nonzero if SYMBOL is marked as being dllexport'd. */ |
|
int |
mcore_dllexport_name_p (const char * symbol) |
{ |
return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.'; |
} |
|
/* Return nonzero if SYMBOL is marked as being dllimport'd. */ |
|
int |
mcore_dllimport_name_p (const char * symbol) |
{ |
return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.'; |
} |
|
/* Mark a DECL as being dllexport'd. */ |
|
static void |
mcore_mark_dllexport (tree decl) |
{ |
const char * oldname; |
char * newname; |
rtx rtlname; |
tree idp; |
|
rtlname = XEXP (DECL_RTL (decl), 0); |
|
if (GET_CODE (rtlname) == MEM) |
rtlname = XEXP (rtlname, 0); |
gcc_assert (GET_CODE (rtlname) == SYMBOL_REF); |
oldname = XSTR (rtlname, 0); |
|
if (mcore_dllexport_name_p (oldname)) |
return; /* Already done. */ |
|
newname = alloca (strlen (oldname) + 4); |
sprintf (newname, "@e.%s", oldname); |
|
/* We pass newname through get_identifier to ensure it has a unique |
address. RTL processing can sometimes peek inside the symbol ref |
and compare the string's addresses to see if two symbols are |
identical. */ |
/* ??? At least I think that's why we do this. */ |
idp = get_identifier (newname); |
|
XEXP (DECL_RTL (decl), 0) = |
gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp)); |
} |
|
/* Mark a DECL as being dllimport'd. */ |
|
static void |
mcore_mark_dllimport (tree decl) |
{ |
const char * oldname; |
char * newname; |
tree idp; |
rtx rtlname; |
rtx newrtl; |
|
rtlname = XEXP (DECL_RTL (decl), 0); |
|
if (GET_CODE (rtlname) == MEM) |
rtlname = XEXP (rtlname, 0); |
gcc_assert (GET_CODE (rtlname) == SYMBOL_REF); |
oldname = XSTR (rtlname, 0); |
|
gcc_assert (!mcore_dllexport_name_p (oldname)); |
if (mcore_dllimport_name_p (oldname)) |
return; /* Already done. */ |
|
/* ??? One can well ask why we're making these checks here, |
and that would be a good question. */ |
|
/* Imported variables can't be initialized. */ |
if (TREE_CODE (decl) == VAR_DECL |
&& !DECL_VIRTUAL_P (decl) |
&& DECL_INITIAL (decl)) |
{ |
error ("initialized variable %q+D is marked dllimport", decl); |
return; |
} |
|
/* `extern' needn't be specified with dllimport. |
Specify `extern' now and hope for the best. Sigh. */ |
if (TREE_CODE (decl) == VAR_DECL |
/* ??? Is this test for vtables needed? */ |
&& !DECL_VIRTUAL_P (decl)) |
{ |
DECL_EXTERNAL (decl) = 1; |
TREE_PUBLIC (decl) = 1; |
} |
|
newname = alloca (strlen (oldname) + 11); |
sprintf (newname, "@i.__imp_%s", oldname); |
|
/* We pass newname through get_identifier to ensure it has a unique |
address. RTL processing can sometimes peek inside the symbol ref |
and compare the string's addresses to see if two symbols are |
identical. */ |
/* ??? At least I think that's why we do this. */ |
idp = get_identifier (newname); |
|
newrtl = gen_rtx_MEM (Pmode, |
gen_rtx_SYMBOL_REF (Pmode, |
IDENTIFIER_POINTER (idp))); |
XEXP (DECL_RTL (decl), 0) = newrtl; |
} |
|
static int |
mcore_dllexport_p (tree decl) |
{ |
if ( TREE_CODE (decl) != VAR_DECL |
&& TREE_CODE (decl) != FUNCTION_DECL) |
return 0; |
|
return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0; |
} |
|
static int |
mcore_dllimport_p (tree decl) |
{ |
if ( TREE_CODE (decl) != VAR_DECL |
&& TREE_CODE (decl) != FUNCTION_DECL) |
return 0; |
|
return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0; |
} |
|
/* We must mark dll symbols specially. Definitions of dllexport'd objects |
install some info in the .drective (PE) or .exports (ELF) sections. */ |
|
static void |
mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED) |
{ |
/* Mark the decl so we can tell from the rtl whether the object is |
dllexport'd or dllimport'd. */ |
if (mcore_dllexport_p (decl)) |
mcore_mark_dllexport (decl); |
else if (mcore_dllimport_p (decl)) |
mcore_mark_dllimport (decl); |
|
/* It might be that DECL has already been marked as dllimport, but |
a subsequent definition nullified that. The attribute is gone |
but DECL_RTL still has @i.__imp_foo. We need to remove that. */ |
else if ((TREE_CODE (decl) == FUNCTION_DECL |
|| TREE_CODE (decl) == VAR_DECL) |
&& DECL_RTL (decl) != NULL_RTX |
&& GET_CODE (DECL_RTL (decl)) == MEM |
&& GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM |
&& GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF |
&& mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0))) |
{ |
const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0); |
tree idp = get_identifier (oldname + 9); |
rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp)); |
|
XEXP (DECL_RTL (decl), 0) = newrtl; |
|
/* We previously set TREE_PUBLIC and DECL_EXTERNAL. |
??? We leave these alone for now. */ |
} |
} |
|
/* Undo the effects of the above. */ |
|
static const char * |
mcore_strip_name_encoding (const char * str) |
{ |
return str + (str[0] == '@' ? 3 : 0); |
} |
|
/* MCore specific attribute support. |
dllexport - for exporting a function/variable that will live in a dll |
dllimport - for importing a function/variable from a dll |
naked - do not create a function prologue/epilogue. */ |
|
const struct attribute_spec mcore_attribute_table[] = |
{ |
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ |
{ "dllexport", 0, 0, true, false, false, NULL }, |
{ "dllimport", 0, 0, true, false, false, NULL }, |
{ "naked", 0, 0, true, false, false, mcore_handle_naked_attribute }, |
{ NULL, 0, 0, false, false, false, NULL } |
}; |
|
/* Handle a "naked" attribute; arguments as in |
struct attribute_spec.handler. */ |
|
static tree |
mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED, |
int flags ATTRIBUTE_UNUSED, bool * no_add_attrs) |
{ |
if (TREE_CODE (*node) == FUNCTION_DECL) |
{ |
/* PR14310 - don't complain about lack of return statement |
in naked functions. The solution here is a gross hack |
but this is the only way to solve the problem without |
adding a new feature to GCC. I did try submitting a patch |
that would add such a new feature, but it was (rightfully) |
rejected on the grounds that it was creeping featurism, |
so hence this code. */ |
if (warn_return_type) |
{ |
saved_warn_return_type = warn_return_type; |
warn_return_type = 0; |
saved_warn_return_type_count = 2; |
} |
else if (saved_warn_return_type_count) |
saved_warn_return_type_count = 2; |
} |
else |
{ |
warning (OPT_Wattributes, "%qs attribute only applies to functions", |
IDENTIFIER_POINTER (name)); |
*no_add_attrs = true; |
} |
|
return NULL_TREE; |
} |
|
/* ??? It looks like this is PE specific? Oh well, this is what the |
old code did as well. */ |
|
static void |
mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED) |
{ |
int len; |
const char * name; |
char * string; |
const char * prefix; |
|
name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); |
|
/* Strip off any encoding in name. */ |
name = (* targetm.strip_name_encoding) (name); |
|
/* The object is put in, for example, section .text$foo. |
The linker will then ultimately place them in .text |
(everything from the $ on is stripped). */ |
if (TREE_CODE (decl) == FUNCTION_DECL) |
prefix = ".text$"; |
/* For compatibility with EPOC, we ignore the fact that the |
section might have relocs against it. */ |
else if (decl_readonly_section (decl, 0)) |
prefix = ".rdata$"; |
else |
prefix = ".data$"; |
|
len = strlen (name) + strlen (prefix); |
string = alloca (len + 1); |
|
sprintf (string, "%s%s", prefix, name); |
|
DECL_SECTION_NAME (decl) = build_string (len, string); |
} |
|
int |
mcore_naked_function_p (void) |
{ |
return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE; |
} |
|
#ifdef OBJECT_FORMAT_ELF |
static void |
mcore_asm_named_section (const char *name, |
unsigned int flags ATTRIBUTE_UNUSED, |
tree decl ATTRIBUTE_UNUSED) |
{ |
fprintf (asm_out_file, "\t.section %s\n", name); |
} |
#endif /* OBJECT_FORMAT_ELF */ |
|
/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */ |
|
static void |
mcore_external_libcall (rtx fun) |
{ |
fprintf (asm_out_file, "\t.import\t"); |
assemble_name (asm_out_file, XSTR (fun, 0)); |
fprintf (asm_out_file, "\n"); |
} |
|
/* Worker function for TARGET_RETURN_IN_MEMORY. */ |
|
static bool |
mcore_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED) |
{ |
HOST_WIDE_INT size = int_size_in_bytes (type); |
return (size == -1 || size > 2 * UNITS_PER_WORD); |
} |
/mcore.opt
0,0 → 1,79
; Options for the Motorola MCore port of the compiler. |
|
; Copyright (C) 2005, 2007 Free Software Foundation, Inc. |
; |
; This file is part of GCC. |
; |
; GCC is free software; you can redistribute it and/or modify it under |
; the terms of the GNU General Public License as published by the Free |
; Software Foundation; either version 3, or (at your option) any later |
; version. |
; |
; GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
; WARRANTY; without even the implied warranty of MERCHANTABILITY or |
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
; for more details. |
; |
; You should have received a copy of the GNU General Public License |
; along with GCC; see the file COPYING3. If not see |
; <http://www.gnu.org/licenses/>. |
|
m210 |
Target RejectNegative Report InverseMask(M340) |
Generate code for the M*Core M210 |
|
m340 |
Target RejectNegative Report Mask(M340) |
Generate code for the M*Core M340 |
|
m4align |
Target RejectNegative Report InverseMask(8ALIGN) |
Set maximum alignment to 4 |
|
m4byte-functions |
Target Report Mask(OVERALIGN_FUNC) |
Force functions to be aligned to a 4 byte boundary |
|
m8align |
Target RejectNegative Report Mask(8ALIGN) |
Set maximum alignment to 8 |
|
mbig-endian |
Target RejectNegative Report InverseMask(LITTLE_END) |
Generate big-endian code |
|
mcallgraph-data |
Target Report Mask(CG_DATA) |
Emit call graph information |
|
mdiv |
Target Report Mask(DIV) |
Use the divide instruction |
|
mhardlit |
Target Report Mask(HARDLIT) |
Inline constants if it can be done in 2 insns or less |
|
mlittle-endian |
Target RejectNegative Report Mask(LITTLE_END) |
Generate little-endian code |
|
; Not used by the compiler proper. |
mno-lsim |
Target RejectNegative Undocumented |
|
mrelax-immediates |
Target Report Mask(RELAX_IMM) |
Use arbitrary sized immediates in bit operations |
|
mslow-bytes |
Target Report Mask(SLOW_BYTES) |
Prefer word accesses over byte accesses |
|
mstack-increment= |
Target RejectNegative Joined UInteger Var(mcore_stack_increment) VarExists |
Set the maximum amount for a single stack increment operation |
|
mwide-bitfields |
Target Report Mask(W_FIELD) |
Always treat bitfields as int-sized |
/t-mcore
0,0 → 1,57
|
LIB1ASMSRC = mcore/lib1.asm |
LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3 |
|
# Assemble startup files. |
$(T)crti.o: $(srcdir)/config/mcore/crti.asm $(GCC_PASSES) |
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ |
-c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/mcore/crti.asm |
|
$(T)crtn.o: $(srcdir)/config/mcore/crtn.asm $(GCC_PASSES) |
$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ |
-c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/mcore/crtn.asm |
|
# We want fine grained libraries, so use the new code to build the |
# floating point emulation libraries. |
FPBIT = fp-bit.c |
DPBIT = dp-bit.c |
|
dp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore |
rm -f dp-bit.c |
echo '' > dp-bit.c |
cat $(srcdir)/config/fp-bit.c >> dp-bit.c |
|
fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore |
rm -f fp-bit.c |
echo '' > fp-bit.c |
echo '#define FLOAT' > fp-bit.c |
cat $(srcdir)/config/fp-bit.c >> fp-bit.c |
|
T_CFLAGS = -DDONT_HAVE_STDIO -DDONT_HAVE_SETJMP -Dinhibit_libc |
# could use -msifilter to be safe from interrupt/jmp interactions and others. |
TARGET_LIBGCC2_CFLAGS=-O3 -DNO_FLOATLIB_FIXUNSDFSI #-msifilter |
|
# We have values for float.h. |
CROSS_FLOAT_H = $(srcdir)/config/mcore/gfloat.h |
|
# If support for -m4align is ever re-enabled then comment out the |
# following line and uncomment the mutlilib lines below. |
|
EXTRA_PARTS = crtbegin.o crtend.o crti.o crtn.o |
|
# MULTILIB_OPTIONS = m8align/m4align |
# MULTILIB_DIRNAMES = align8 align4 |
# MULTILIB_MATCHES = |
# MULTILIB_EXTRA_OPTS = |
# MULTILIB_EXCEPTIONS = |
# EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o |
# LIBGCC = stmp-multilib |
# INSTALL_LIBGCC = install-multilib |
|
MULTILIB_OPTIONS = mbig-endian/mlittle-endian m210/m340 |
MULTILIB_DIRNAMES = big little m210 m340 |
|
EXTRA_PARTS = |
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o |
LIBGCC = stmp-multilib |
INSTALL_LIBGCC = install-multilib |
/t-mcore-pe
0,0 → 1,39
LIB1ASMSRC = mcore/lib1.asm |
LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3 |
|
# We want fine grained libraries, so use the new code to build the |
# floating point emulation libraries. |
FPBIT = fp-bit.c |
DPBIT = dp-bit.c |
|
dp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore |
rm -f dp-bit.c |
echo '' > dp-bit.c |
cat $(srcdir)/config/fp-bit.c >> dp-bit.c |
|
fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore |
rm -f fp-bit.c |
echo '' > fp-bit.c |
echo '#define FLOAT' > fp-bit.c |
cat $(srcdir)/config/fp-bit.c >> fp-bit.c |
|
T_CFLAGS = -DDONT_HAVE_STDIO -DDONT_HAVE_SETJMP -Dinhibit_libc |
# could use -msifilter to be safe from interrupt/jmp interactions and others. |
TARGET_LIBGCC2_CFLAGS=-O3 -DNO_FLOATLIB_FIXUNSDFSI #-msifilter |
|
# We have values for float.h. |
CROSS_FLOAT_H = $(srcdir)/config/mcore/gfloat.h |
|
MULTILIB_OPTIONS = mbig-endian/mlittle-endian m210/m340 |
MULTILIB_DIRNAMES = big little m210 m340 |
MULTILIB_MATCHES = |
MULTILIB_EXTRA_OPTS = |
MULTILIB_EXCEPTIONS = |
|
# EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o |
LIBGCC = stmp-multilib |
INSTALL_LIBGCC = install-multilib |
|
# If EXTRA_MULTILIB_PARTS is not defined above then define EXTRA_PARTS here |
# EXTRA_PARTS = crtbegin.o crtend.o |
|
/mcore.h
0,0 → 1,1017
/* Definitions of target machine for GNU compiler, |
for Motorola M*CORE Processor. |
Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 |
Free Software Foundation, Inc. |
|
This file is part of GCC. |
|
GCC is free software; you can redistribute it and/or modify it |
under the terms of the GNU General Public License as published |
by the Free Software Foundation; either version 3, or (at your |
option) any later version. |
|
GCC is distributed in the hope that it will be useful, but WITHOUT |
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public |
License for more details. |
|
You should have received a copy of the GNU General Public License |
along with GCC; see the file COPYING3. If not see |
<http://www.gnu.org/licenses/>. */ |
|
#ifndef GCC_MCORE_H |
#define GCC_MCORE_H |
|
/* RBE: need to move these elsewhere. */ |
#undef LIKE_PPC_ABI |
#define MCORE_STRUCT_ARGS |
/* RBE: end of "move elsewhere". */ |
|
/* Run-time Target Specification. */ |
#define TARGET_MCORE |
|
/* Get tree.c to declare a target-specific specialization of |
merge_decl_attributes. */ |
#define TARGET_DLLIMPORT_DECL_ATTRIBUTES 1 |
|
#define TARGET_CPU_CPP_BUILTINS() \ |
do \ |
{ \ |
builtin_define ("__mcore__"); \ |
builtin_define ("__MCORE__"); \ |
if (TARGET_LITTLE_END) \ |
builtin_define ("__MCORELE__"); \ |
else \ |
builtin_define ("__MCOREBE__"); \ |
if (TARGET_M340) \ |
builtin_define ("__M340__"); \ |
else \ |
builtin_define ("__M210__"); \ |
} \ |
while (0) |
|
/* If -m4align is ever re-enabled then add this line to the definition of CPP_SPEC |
%{!m4align:-D__MCORE_ALIGN_8__} %{m4align:-D__MCORE__ALIGN_4__}. */ |
#undef CPP_SPEC |
#define CPP_SPEC "%{m210:%{mlittle-endian:%ethe m210 does not have little endian support}}" |
|
/* We don't have a -lg library, so don't put it in the list. */ |
#undef LIB_SPEC |
#define LIB_SPEC "%{!shared: %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}" |
|
#undef ASM_SPEC |
#define ASM_SPEC "%{mbig-endian:-EB} %{m210:-cpu=210 -EB}" |
|
#undef LINK_SPEC |
#define LINK_SPEC "%{mbig-endian:-EB} %{m210:-EB} -X" |
|
#define TARGET_DEFAULT \ |
(MASK_HARDLIT \ |
| MASK_8ALIGN \ |
| MASK_DIV \ |
| MASK_RELAX_IMM \ |
| MASK_M340 \ |
| MASK_LITTLE_END) |
|
#ifndef MULTILIB_DEFAULTS |
#define MULTILIB_DEFAULTS { "mlittle-endian", "m340" } |
#endif |
|
/* The ability to have 4 byte alignment is being suppressed for now. |
If this ability is reenabled, you must disable the definition below |
*and* edit t-mcore to enable multilibs for 4 byte alignment code. */ |
#undef TARGET_8ALIGN |
#define TARGET_8ALIGN 1 |
|
extern char * mcore_current_function_name; |
|
/* The MCore ABI says that bitfields are unsigned by default. */ |
#define CC1_SPEC "-funsigned-bitfields" |
|
/* What options are we going to default to specific settings when |
-O* happens; the user can subsequently override these settings. |
|
Omitting the frame pointer is a very good idea on the MCore. |
Scheduling isn't worth anything on the current MCore implementation. */ |
#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) \ |
{ \ |
if (LEVEL) \ |
{ \ |
flag_no_function_cse = 1; \ |
flag_omit_frame_pointer = 1; \ |
\ |
if (LEVEL >= 2) \ |
{ \ |
flag_caller_saves = 0; \ |
flag_schedule_insns = 0; \ |
flag_schedule_insns_after_reload = 0; \ |
} \ |
} \ |
if (SIZE) \ |
{ \ |
target_flags &= ~MASK_HARDLIT; \ |
} \ |
} |
|
/* What options are we going to force to specific settings, |
regardless of what the user thought he wanted. |
We also use this for some post-processing of options. */ |
#define OVERRIDE_OPTIONS mcore_override_options () |
|
/* Target machine storage Layout. */ |
|
#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \ |
if (GET_MODE_CLASS (MODE) == MODE_INT \ |
&& GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ |
{ \ |
(MODE) = SImode; \ |
(UNSIGNEDP) = 1; \ |
} |
|
/* Define this if most significant bit is lowest numbered |
in instructions that operate on numbered bit-fields. */ |
#define BITS_BIG_ENDIAN 0 |
|
/* Define this if most significant byte of a word is the lowest numbered. */ |
#define BYTES_BIG_ENDIAN (! TARGET_LITTLE_END) |
|
/* Define this if most significant word of a multiword number is the lowest |
numbered. */ |
#define WORDS_BIG_ENDIAN (! TARGET_LITTLE_END) |
|
#define LIBGCC2_WORDS_BIG_ENDIAN 1 |
#ifdef __MCORELE__ |
#undef LIBGCC2_WORDS_BIG_ENDIAN |
#define LIBGCC2_WORDS_BIG_ENDIAN 0 |
#endif |
|
#define MAX_BITS_PER_WORD 32 |
|
/* Width of a word, in units (bytes). */ |
#define UNITS_PER_WORD 4 |
|
/* A C expression for the size in bits of the type `long long' on the |
target machine. If you don't define this, the default is two |
words. */ |
#define LONG_LONG_TYPE_SIZE 64 |
|
/* Allocation boundary (in *bits*) for storing arguments in argument list. */ |
#define PARM_BOUNDARY 32 |
|
/* Doubles must be aligned to an 8 byte boundary. */ |
#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \ |
((MODE != BLKmode && (GET_MODE_SIZE (MODE) == 8)) \ |
? BIGGEST_ALIGNMENT : PARM_BOUNDARY) |
|
/* Boundary (in *bits*) on which stack pointer should be aligned. */ |
#define STACK_BOUNDARY (TARGET_8ALIGN ? 64 : 32) |
|
/* Largest increment in UNITS we allow the stack to grow in a single operation. */ |
extern int mcore_stack_increment; |
#define STACK_UNITS_MAXSTEP 4096 |
|
/* Allocation boundary (in *bits*) for the code of a function. */ |
#define FUNCTION_BOUNDARY ((TARGET_OVERALIGN_FUNC) ? 32 : 16) |
|
/* Alignment of field after `int : 0' in a structure. */ |
#define EMPTY_FIELD_BOUNDARY 32 |
|
/* No data type wants to be aligned rounder than this. */ |
#define BIGGEST_ALIGNMENT (TARGET_8ALIGN ? 64 : 32) |
|
/* The best alignment to use in cases where we have a choice. */ |
#define FASTEST_ALIGNMENT 32 |
|
/* Every structures size must be a multiple of 8 bits. */ |
#define STRUCTURE_SIZE_BOUNDARY 8 |
|
/* Look at the fundamental type that is used for a bit-field and use |
that to impose alignment on the enclosing structure. |
struct s {int a:8}; should have same alignment as "int", not "char". */ |
#define PCC_BITFIELD_TYPE_MATTERS 1 |
|
/* Largest integer machine mode for structures. If undefined, the default |
is GET_MODE_SIZE(DImode). */ |
#define MAX_FIXED_MODE_SIZE 32 |
|
/* Make strings word-aligned so strcpy from constants will be faster. */ |
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ |
((TREE_CODE (EXP) == STRING_CST \ |
&& (ALIGN) < FASTEST_ALIGNMENT) \ |
? FASTEST_ALIGNMENT : (ALIGN)) |
|
/* Make arrays of chars word-aligned for the same reasons. */ |
#define DATA_ALIGNMENT(TYPE, ALIGN) \ |
(TREE_CODE (TYPE) == ARRAY_TYPE \ |
&& TYPE_MODE (TREE_TYPE (TYPE)) == QImode \ |
&& (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN)) |
|
/* Set this nonzero if move instructions will actually fail to work |
when given unaligned data. */ |
#define STRICT_ALIGNMENT 1 |
|
/* Standard register usage. */ |
|
/* Register allocation for our first guess |
|
r0 stack pointer |
r1 scratch, target reg for xtrb? |
r2-r7 arguments. |
r8-r14 call saved |
r15 link register |
ap arg pointer (doesn't really exist, always eliminated) |
c c bit |
fp frame pointer (doesn't really exist, always eliminated) |
x19 two control registers. */ |
|
/* Number of actual hardware registers. |
The hardware registers are assigned numbers for the compiler |
from 0 to just below FIRST_PSEUDO_REGISTER. |
All registers that the compiler knows about must be given numbers, |
even those that are not normally considered general registers. |
|
MCore has 16 integer registers and 2 control registers + the arg |
pointer. */ |
|
#define FIRST_PSEUDO_REGISTER 20 |
|
#define R1_REG 1 /* Where literals are forced. */ |
#define LK_REG 15 /* Overloaded on general register. */ |
#define AP_REG 16 /* Fake arg pointer register. */ |
/* RBE: mcore.md depends on CC_REG being set to 17. */ |
#define CC_REG 17 /* Can't name it C_REG. */ |
#define FP_REG 18 /* Fake frame pointer register. */ |
|
/* Specify the registers used for certain standard purposes. |
The values of these macros are register numbers. */ |
|
|
#undef PC_REGNUM /* Define this if the program counter is overloaded on a register. */ |
#define STACK_POINTER_REGNUM 0 /* Register to use for pushing function arguments. */ |
#define FRAME_POINTER_REGNUM 8 /* When we need FP, use r8. */ |
|
/* The assembler's names for the registers. RFP need not always be used as |
the Real framepointer; it can also be used as a normal general register. |
Note that the name `fp' is horribly misleading since `fp' is in fact only |
the argument-and-return-context pointer. */ |
#define REGISTER_NAMES \ |
{ \ |
"sp", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ |
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \ |
"apvirtual", "c", "fpvirtual", "x19" \ |
} |
|
/* 1 for registers that have pervasive standard uses |
and are not available for the register allocator. */ |
#define FIXED_REGISTERS \ |
/* r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 ap c fp x19 */ \ |
{ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1} |
|
/* 1 for registers not available across function calls. |
These must include the FIXED_REGISTERS and also any |
registers that can be used without being saved. |
The latter must include the registers where values are returned |
and the register where structure-value addresses are passed. |
Aside from that, you can include as many other registers as you like. */ |
|
/* RBE: r15 {link register} not available across calls, |
But we don't mark it that way here.... */ |
#define CALL_USED_REGISTERS \ |
/* r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 ap c fp x19 */ \ |
{ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1} |
|
/* The order in which register should be allocated. */ |
#define REG_ALLOC_ORDER \ |
/* r7 r6 r5 r4 r3 r2 r15 r14 r13 r12 r11 r10 r9 r8 r1 r0 ap c fp x19*/ \ |
{ 7, 6, 5, 4, 3, 2, 15, 14, 13, 12, 11, 10, 9, 8, 1, 0, 16, 17, 18, 19} |
|
/* Return number of consecutive hard regs needed starting at reg REGNO |
to hold something of mode MODE. |
This is ordinarily the length in words of a value of mode MODE |
but can be less for certain modes in special long registers. |
|
On the MCore regs are UNITS_PER_WORD bits wide; */ |
#define HARD_REGNO_NREGS(REGNO, MODE) \ |
(((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) |
|
/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. |
We may keep double values in even registers. */ |
#define HARD_REGNO_MODE_OK(REGNO, MODE) \ |
((TARGET_8ALIGN && GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (((REGNO) & 1) == 0) : (REGNO < 18)) |
|
/* Value is 1 if it is a good idea to tie two pseudo registers |
when one has mode MODE1 and one has mode MODE2. |
If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2, |
for any hard reg, then this must be 0 for correct output. */ |
#define MODES_TIEABLE_P(MODE1, MODE2) \ |
((MODE1) == (MODE2) || GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2)) |
|
/* Value should be nonzero if functions must have frame pointers. |
Zero means the frame pointer need not be set up (and parms may be accessed |
via the stack pointer) in functions that seem suitable. */ |
#define FRAME_POINTER_REQUIRED 0 |
|
/* Definitions for register eliminations. |
|
We have two registers that can be eliminated on the MCore. First, the |
frame pointer register can often be eliminated in favor of the stack |
pointer register. Secondly, the argument pointer register can always be |
eliminated; it is replaced with either the stack or frame pointer. */ |
|
/* Base register for access to arguments of the function. */ |
#define ARG_POINTER_REGNUM 16 |
|
/* Register in which the static-chain is passed to a function. */ |
#define STATIC_CHAIN_REGNUM 1 |
|
/* This is an array of structures. Each structure initializes one pair |
of eliminable registers. The "from" register number is given first, |
followed by "to". Eliminations of the same "from" register are listed |
in order of preference. */ |
#define ELIMINABLE_REGS \ |
{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
{ ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM},} |
|
/* Given FROM and TO register numbers, say whether this elimination |
is allowed. */ |
#define CAN_ELIMINATE(FROM, TO) \ |
(!((FROM) == FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED)) |
|
/* Define the offset between two registers, one to be eliminated, and the other |
its replacement, at the start of a routine. */ |
#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ |
OFFSET = mcore_initial_elimination_offset (FROM, TO) |
|
/* Define the classes of registers for register constraints in the |
machine description. Also define ranges of constants. |
|
One of the classes must always be named ALL_REGS and include all hard regs. |
If there is more than one class, another class must be named NO_REGS |
and contain no registers. |
|
The name GENERAL_REGS must be the name of a class (or an alias for |
another name such as ALL_REGS). This is the class of registers |
that is allowed by "g" or "r" in a register constraint. |
Also, registers outside this class are allocated only when |
instructions express preferences for them. |
|
The classes must be numbered in nondecreasing order; that is, |
a larger-numbered class must never be contained completely |
in a smaller-numbered class. |
|
For any two classes, it is very desirable that there be another |
class that represents their union. */ |
|
/* The MCore has only general registers. There are |
also some special purpose registers: the T bit register, the |
procedure Link and the Count Registers. */ |
enum reg_class |
{ |
NO_REGS, |
ONLYR1_REGS, |
LRW_REGS, |
GENERAL_REGS, |
C_REGS, |
ALL_REGS, |
LIM_REG_CLASSES |
}; |
|
#define N_REG_CLASSES (int) LIM_REG_CLASSES |
|
/* Give names of register classes as strings for dump file. */ |
#define REG_CLASS_NAMES \ |
{ \ |
"NO_REGS", \ |
"ONLYR1_REGS", \ |
"LRW_REGS", \ |
"GENERAL_REGS", \ |
"C_REGS", \ |
"ALL_REGS", \ |
} |
|
/* Define which registers fit in which classes. |
This is an initializer for a vector of HARD_REG_SET |
of length N_REG_CLASSES. */ |
|
/* ??? STACK_POINTER_REGNUM should be excluded from LRW_REGS. */ |
#define REG_CLASS_CONTENTS \ |
{ \ |
{0x000000}, /* NO_REGS */ \ |
{0x000002}, /* ONLYR1_REGS */ \ |
{0x007FFE}, /* LRW_REGS */ \ |
{0x01FFFF}, /* GENERAL_REGS */ \ |
{0x020000}, /* C_REGS */ \ |
{0x0FFFFF} /* ALL_REGS */ \ |
} |
|
/* The same information, inverted: |
Return the class number of the smallest class containing |
reg number REGNO. This could be a conditional expression |
or could index an array. */ |
|
extern const int regno_reg_class[FIRST_PSEUDO_REGISTER]; |
#define REGNO_REG_CLASS(REGNO) regno_reg_class[REGNO] |
|
/* When defined, the compiler allows registers explicitly used in the |
rtl to be used as spill registers but prevents the compiler from |
extending the lifetime of these registers. */ |
#define SMALL_REGISTER_CLASSES 1 |
|
/* The class value for index registers, and the one for base regs. */ |
#define INDEX_REG_CLASS NO_REGS |
#define BASE_REG_CLASS GENERAL_REGS |
|
/* Get reg_class from a letter such as appears in the machine |
description. */ |
extern const enum reg_class reg_class_from_letter[]; |
|
#define REG_CLASS_FROM_LETTER(C) \ |
(ISLOWER (C) ? reg_class_from_letter[(C) - 'a'] : NO_REGS) |
|
/* The letters I, J, K, L, M, N, O, and P in a register constraint string |
can be used to stand for particular ranges of immediate operands. |
This macro defines what the ranges are. |
C is the letter, and VALUE is a constant value. |
Return 1 if VALUE is in the range specified by C. |
I: loadable by movi (0..127) |
J: arithmetic operand 1..32 |
K: shift operand 0..31 |
L: negative arithmetic operand -1..-32 |
M: powers of two, constants loadable by bgeni |
N: powers of two minus 1, constants loadable by bmaski, including -1 |
O: allowed by cmov with two constants +/- 1 of each other |
P: values we will generate 'inline' -- without an 'lrw' |
|
Others defined for use after reload |
Q: constant 1 |
R: a label |
S: 0/1/2 cleared bits out of 32 [for bclri's] |
T: 2 set bits out of 32 [for bseti's] |
U: constant 0 |
xxxS: 1 cleared bit out of 32 (complement of power of 2). for bclri |
xxxT: 2 cleared bits out of 32. for pairs of bclris. */ |
#define CONST_OK_FOR_I(VALUE) (((int)(VALUE)) >= 0 && ((int)(VALUE)) <= 0x7f) |
#define CONST_OK_FOR_J(VALUE) (((int)(VALUE)) > 0 && ((int)(VALUE)) <= 32) |
#define CONST_OK_FOR_L(VALUE) (((int)(VALUE)) < 0 && ((int)(VALUE)) >= -32) |
#define CONST_OK_FOR_K(VALUE) (((int)(VALUE)) >= 0 && ((int)(VALUE)) <= 31) |
#define CONST_OK_FOR_M(VALUE) (exact_log2 (VALUE) >= 0) |
#define CONST_OK_FOR_N(VALUE) (((int)(VALUE)) == -1 || exact_log2 ((VALUE) + 1) >= 0) |
#define CONST_OK_FOR_O(VALUE) (CONST_OK_FOR_I(VALUE) || \ |
CONST_OK_FOR_M(VALUE) || \ |
CONST_OK_FOR_N(VALUE) || \ |
CONST_OK_FOR_M((int)(VALUE) - 1) || \ |
CONST_OK_FOR_N((int)(VALUE) + 1)) |
|
#define CONST_OK_FOR_P(VALUE) (mcore_const_ok_for_inline (VALUE)) |
|
#define CONST_OK_FOR_LETTER_P(VALUE, C) \ |
((C) == 'I' ? CONST_OK_FOR_I (VALUE) \ |
: (C) == 'J' ? CONST_OK_FOR_J (VALUE) \ |
: (C) == 'L' ? CONST_OK_FOR_L (VALUE) \ |
: (C) == 'K' ? CONST_OK_FOR_K (VALUE) \ |
: (C) == 'M' ? CONST_OK_FOR_M (VALUE) \ |
: (C) == 'N' ? CONST_OK_FOR_N (VALUE) \ |
: (C) == 'P' ? CONST_OK_FOR_P (VALUE) \ |
: (C) == 'O' ? CONST_OK_FOR_O (VALUE) \ |
: 0) |
|
/* Similar, but for floating constants, and defining letters G and H. |
Here VALUE is the CONST_DOUBLE rtx itself. */ |
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \ |
((C) == 'G' ? CONST_OK_FOR_I (CONST_DOUBLE_HIGH (VALUE)) \ |
&& CONST_OK_FOR_I (CONST_DOUBLE_LOW (VALUE)) \ |
: 0) |
|
/* Letters in the range `Q' through `U' in a register constraint string |
may be defined in a machine-dependent fashion to stand for arbitrary |
operand types. */ |
#define EXTRA_CONSTRAINT(OP, C) \ |
((C) == 'R' ? (GET_CODE (OP) == MEM \ |
&& GET_CODE (XEXP (OP, 0)) == LABEL_REF) \ |
: (C) == 'S' ? (GET_CODE (OP) == CONST_INT \ |
&& mcore_num_zeros (INTVAL (OP)) <= 2) \ |
: (C) == 'T' ? (GET_CODE (OP) == CONST_INT \ |
&& mcore_num_ones (INTVAL (OP)) == 2) \ |
: (C) == 'Q' ? (GET_CODE (OP) == CONST_INT \ |
&& INTVAL(OP) == 1) \ |
: (C) == 'U' ? (GET_CODE (OP) == CONST_INT \ |
&& INTVAL(OP) == 0) \ |
: 0) |
|
/* Given an rtx X being reloaded into a reg required to be |
in class CLASS, return the class of reg to actually use. |
In general this is just CLASS; but on some machines |
in some cases it is preferable to use a more restrictive class. */ |
#define PREFERRED_RELOAD_CLASS(X, CLASS) mcore_reload_class (X, CLASS) |
|
/* Return the register class of a scratch register needed to copy IN into |
or out of a register in CLASS in MODE. If it can be done directly, |
NO_REGS is returned. */ |
#define SECONDARY_RELOAD_CLASS(CLASS, MODE, X) \ |
mcore_secondary_reload_class (CLASS, MODE, X) |
|
/* Return the maximum number of consecutive registers |
needed to represent mode MODE in a register of class CLASS. |
|
On MCore this is the size of MODE in words. */ |
#define CLASS_MAX_NREGS(CLASS, MODE) \ |
(ROUND_ADVANCE (GET_MODE_SIZE (MODE))) |
|
/* Stack layout; function entry, exit and calling. */ |
|
/* Define the number of register that can hold parameters. |
These two macros are used only in other macro definitions below. */ |
#define NPARM_REGS 6 |
#define FIRST_PARM_REG 2 |
#define FIRST_RET_REG 2 |
|
/* Define this if pushing a word on the stack |
makes the stack pointer a smaller address. */ |
#define STACK_GROWS_DOWNWARD |
|
/* Offset within stack frame to start allocating local variables at. |
If FRAME_GROWS_DOWNWARD, this is the offset to the END of the |
first local allocated. Otherwise, it is the offset to the BEGINNING |
of the first local allocated. */ |
#define STARTING_FRAME_OFFSET 0 |
|
/* If defined, the maximum amount of space required for outgoing arguments |
will be computed and placed into the variable |
`current_function_outgoing_args_size'. No space will be pushed |
onto the stack for each call; instead, the function prologue should |
increase the stack frame size by this amount. */ |
#define ACCUMULATE_OUTGOING_ARGS 1 |
|
/* Offset of first parameter from the argument pointer register value. */ |
#define FIRST_PARM_OFFSET(FNDECL) 0 |
|
/* Value is the number of byte of arguments automatically |
popped when returning from a subroutine call. |
FUNTYPE is the data type of the function (as a tree), |
or for a library call it is an identifier node for the subroutine name. |
SIZE is the number of bytes of arguments passed on the stack. |
|
On the MCore, the callee does not pop any of its arguments that were passed |
on the stack. */ |
#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0 |
|
/* Define how to find the value returned by a function. |
VALTYPE is the data type of the value (as a tree). |
If the precise function being called is known, FUNC is its FUNCTION_DECL; |
otherwise, FUNC is 0. */ |
#define FUNCTION_VALUE(VALTYPE, FUNC) mcore_function_value (VALTYPE, FUNC) |
|
/* Don't default to pcc-struct-return, because gcc is the only compiler, and |
we want to retain compatibility with older gcc versions. */ |
#define DEFAULT_PCC_STRUCT_RETURN 0 |
|
/* Define how to find the value returned by a library function |
assuming the value has mode MODE. */ |
#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, FIRST_RET_REG) |
|
/* 1 if N is a possible register number for a function value. |
On the MCore, only r4 can return results. */ |
#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == FIRST_RET_REG) |
|
/* 1 if N is a possible register number for function argument passing. */ |
#define FUNCTION_ARG_REGNO_P(REGNO) \ |
((REGNO) >= FIRST_PARM_REG && (REGNO) < (NPARM_REGS + FIRST_PARM_REG)) |
|
/* Define a data type for recording info about an argument list |
during the scan of that argument list. This data type should |
hold all necessary information about the function itself |
and about the args processed so far, enough to enable macros |
such as FUNCTION_ARG to determine where the next arg should go. |
|
On MCore, this is a single integer, which is a number of words |
of arguments scanned so far (including the invisible argument, |
if any, which holds the structure-value-address). |
Thus NARGREGS or more means all following args should go on the stack. */ |
#define CUMULATIVE_ARGS int |
|
#define ROUND_ADVANCE(SIZE) \ |
((SIZE + UNITS_PER_WORD - 1) / UNITS_PER_WORD) |
|
/* Round a register number up to a proper boundary for an arg of mode |
MODE. |
|
We round to an even reg for things larger than a word. */ |
#define ROUND_REG(X, MODE) \ |
((TARGET_8ALIGN \ |
&& GET_MODE_UNIT_SIZE ((MODE)) > UNITS_PER_WORD) \ |
? ((X) + ((X) & 1)) : (X)) |
|
|
/* Initialize a variable CUM of type CUMULATIVE_ARGS |
for a call to a function whose data type is FNTYPE. |
For a library call, FNTYPE is 0. |
|
On MCore, the offset always starts at 0: the first parm reg is always |
the same reg. */ |
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ |
((CUM) = 0) |
|
/* Update the data in CUM to advance over an argument |
of mode MODE and data type TYPE. |
(TYPE is null for libcalls where that information may not be |
available.) */ |
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ |
((CUM) = (ROUND_REG ((CUM), (MODE)) \ |
+ ((NAMED) * mcore_num_arg_regs (MODE, TYPE)))) \ |
|
/* Define where to put the arguments to a function. */ |
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ |
mcore_function_arg (CUM, MODE, TYPE, NAMED) |
|
/* Call the function profiler with a given profile label. */ |
#define FUNCTION_PROFILER(STREAM,LABELNO) \ |
{ \ |
fprintf (STREAM, " trap 1\n"); \ |
fprintf (STREAM, " .align 2\n"); \ |
fprintf (STREAM, " .long LP%d\n", (LABELNO)); \ |
} |
|
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, |
the stack pointer does not matter. The value is tested only in |
functions that have frame pointers. |
No definition is equivalent to always zero. */ |
#define EXIT_IGNORE_STACK 0 |
|
/* Output assembler code for a block containing the constant parts |
of a trampoline, leaving space for the variable parts. |
|
On the MCore, the trampoline looks like: |
lrw r1, function |
lrw r13, area |
jmp r13 |
or r0, r0 |
.literals */ |
#define TRAMPOLINE_TEMPLATE(FILE) \ |
{ \ |
fprintf ((FILE), " .short 0x7102\n"); \ |
fprintf ((FILE), " .short 0x7d02\n"); \ |
fprintf ((FILE), " .short 0x00cd\n"); \ |
fprintf ((FILE), " .short 0x1e00\n"); \ |
fprintf ((FILE), " .long 0\n"); \ |
fprintf ((FILE), " .long 0\n"); \ |
} |
|
/* Length in units of the trampoline for entering a nested function. */ |
#define TRAMPOLINE_SIZE 12 |
|
/* Alignment required for a trampoline in bits. */ |
#define TRAMPOLINE_ALIGNMENT 32 |
|
/* Emit RTL insns to initialize the variable parts of a trampoline. |
FNADDR is an RTX for the address of the function's pure code. |
CXT is an RTX for the static chain value for the function. */ |
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \ |
{ \ |
emit_move_insn (gen_rtx_MEM (SImode, plus_constant ((TRAMP), 8)), \ |
(CXT)); \ |
emit_move_insn (gen_rtx_MEM (SImode, plus_constant ((TRAMP), 12)), \ |
(FNADDR)); \ |
} |
|
/* Macros to check register numbers against specific register classes. */ |
|
/* These assume that REGNO is a hard or pseudo reg number. |
They give nonzero only if REGNO is a hard reg of the suitable class |
or a pseudo reg currently allocated to a suitable hard reg. |
Since they use reg_renumber, they are safe only once reg_renumber |
has been allocated, which happens in local-alloc.c. */ |
#define REGNO_OK_FOR_BASE_P(REGNO) \ |
((REGNO) < AP_REG || (unsigned) reg_renumber[(REGNO)] < AP_REG) |
|
#define REGNO_OK_FOR_INDEX_P(REGNO) 0 |
|
/* Maximum number of registers that can appear in a valid memory |
address. */ |
#define MAX_REGS_PER_ADDRESS 1 |
|
/* Recognize any constant value that is a valid address. */ |
#define CONSTANT_ADDRESS_P(X) (GET_CODE (X) == LABEL_REF) |
|
/* Nonzero if the constant value X is a legitimate general operand. |
It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. |
|
On the MCore, allow anything but a double. */ |
#define LEGITIMATE_CONSTANT_P(X) (GET_CODE(X) != CONST_DOUBLE) |
|
/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx |
and check its validity for a certain class. |
We have two alternate definitions for each of them. |
The usual definition accepts all pseudo regs; the other rejects |
them unless they have been allocated suitable hard regs. |
The symbol REG_OK_STRICT causes the latter definition to be used. */ |
#ifndef REG_OK_STRICT |
|
/* Nonzero if X is a hard reg that can be used as a base reg |
or if it is a pseudo reg. */ |
#define REG_OK_FOR_BASE_P(X) \ |
(REGNO (X) <= 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER) |
|
/* Nonzero if X is a hard reg that can be used as an index |
or if it is a pseudo reg. */ |
#define REG_OK_FOR_INDEX_P(X) 0 |
|
#else |
|
/* Nonzero if X is a hard reg that can be used as a base reg. */ |
#define REG_OK_FOR_BASE_P(X) \ |
REGNO_OK_FOR_BASE_P (REGNO (X)) |
|
/* Nonzero if X is a hard reg that can be used as an index. */ |
#define REG_OK_FOR_INDEX_P(X) 0 |
|
#endif |
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression |
that is a valid memory address for an instruction. |
The MODE argument is the machine mode for the MEM expression |
that wants to use this address. |
|
The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */ |
#define BASE_REGISTER_RTX_P(X) \ |
(GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) |
|
#define INDEX_REGISTER_RTX_P(X) \ |
(GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X)) |
|
|
/* Jump to LABEL if X is a valid address RTX. This must also take |
REG_OK_STRICT into account when deciding about valid registers, but it uses |
the above macros so we are in luck. |
|
Allow REG |
REG+disp |
|
A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60, |
and for DI is 0..56 because we use two SI loads, etc. */ |
#define GO_IF_LEGITIMATE_INDEX(MODE, REGNO, OP, LABEL) \ |
do \ |
{ \ |
if (GET_CODE (OP) == CONST_INT) \ |
{ \ |
if (GET_MODE_SIZE (MODE) >= 4 \ |
&& (((unsigned)INTVAL (OP)) % 4) == 0 \ |
&& ((unsigned)INTVAL (OP)) <= 64 - GET_MODE_SIZE (MODE)) \ |
goto LABEL; \ |
if (GET_MODE_SIZE (MODE) == 2 \ |
&& (((unsigned)INTVAL (OP)) % 2) == 0 \ |
&& ((unsigned)INTVAL (OP)) <= 30) \ |
goto LABEL; \ |
if (GET_MODE_SIZE (MODE) == 1 \ |
&& ((unsigned)INTVAL (OP)) <= 15) \ |
goto LABEL; \ |
} \ |
} \ |
while (0) |
|
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \ |
{ \ |
if (BASE_REGISTER_RTX_P (X)) \ |
goto LABEL; \ |
else if (GET_CODE (X) == PLUS || GET_CODE (X) == LO_SUM) \ |
{ \ |
rtx xop0 = XEXP (X,0); \ |
rtx xop1 = XEXP (X,1); \ |
if (BASE_REGISTER_RTX_P (xop0)) \ |
GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \ |
if (BASE_REGISTER_RTX_P (xop1)) \ |
GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \ |
} \ |
} |
|
/* Go to LABEL if ADDR (a legitimate address expression) |
has an effect that depends on the machine mode it is used for. */ |
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \ |
{ \ |
if ( GET_CODE (ADDR) == PRE_DEC || GET_CODE (ADDR) == POST_DEC \ |
|| GET_CODE (ADDR) == PRE_INC || GET_CODE (ADDR) == POST_INC) \ |
goto LABEL; \ |
} |
|
/* Specify the machine mode that this machine uses |
for the index in the tablejump instruction. */ |
#define CASE_VECTOR_MODE SImode |
|
/* 'char' is signed by default. */ |
#define DEFAULT_SIGNED_CHAR 0 |
|
/* The type of size_t unsigned int. */ |
#define SIZE_TYPE "unsigned int" |
|
/* Max number of bytes we can move from memory to memory |
in one reasonably fast instruction. */ |
#define MOVE_MAX 4 |
|
/* Define if operations between registers always perform the operation |
on the full register even if a narrower mode is specified. */ |
#define WORD_REGISTER_OPERATIONS |
|
/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD |
will either zero-extend or sign-extend. The value of this macro should |
be the code that says which one of the two operations is implicitly |
done, UNKNOWN if none. */ |
#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND |
|
/* Nonzero if access to memory by bytes is slow and undesirable. */ |
#define SLOW_BYTE_ACCESS TARGET_SLOW_BYTES |
|
/* Shift counts are truncated to 6-bits (0 to 63) instead of the expected |
5-bits, so we can not define SHIFT_COUNT_TRUNCATED to true for this |
target. */ |
#define SHIFT_COUNT_TRUNCATED 0 |
|
/* All integers have the same format so truncation is easy. */ |
#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1 |
|
/* Define this if addresses of constant functions |
shouldn't be put through pseudo regs where they can be cse'd. |
Desirable on machines where ordinary constants are expensive |
but a CALL with constant address is cheap. */ |
/* Why is this defined??? -- dac */ |
#define NO_FUNCTION_CSE 1 |
|
/* The machine modes of pointers and functions. */ |
#define Pmode SImode |
#define FUNCTION_MODE Pmode |
|
/* Compute extra cost of moving data between one register class |
and another. All register moves are cheap. */ |
#define REGISTER_MOVE_COST(MODE, SRCCLASS, DSTCLASS) 2 |
|
#define WORD_REGISTER_OPERATIONS |
|
/* Assembler output control. */ |
#define ASM_COMMENT_START "\t//" |
|
#define ASM_APP_ON "// inline asm begin\n" |
#define ASM_APP_OFF "// inline asm end\n" |
|
#define FILE_ASM_OP "\t.file\n" |
|
/* Switch to the text or data segment. */ |
#define TEXT_SECTION_ASM_OP "\t.text" |
#define DATA_SECTION_ASM_OP "\t.data" |
|
/* Switch into a generic section. */ |
#undef TARGET_ASM_NAMED_SECTION |
#define TARGET_ASM_NAMED_SECTION mcore_asm_named_section |
|
/* This is how to output an insn to push a register on the stack. |
It need not be very fast code. */ |
#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \ |
fprintf (FILE, "\tsubi\t %s,%d\n\tstw\t %s,(%s)\n", \ |
reg_names[STACK_POINTER_REGNUM], \ |
(STACK_BOUNDARY / BITS_PER_UNIT), \ |
reg_names[REGNO], \ |
reg_names[STACK_POINTER_REGNUM]) |
|
/* Length in instructions of the code output by ASM_OUTPUT_REG_PUSH. */ |
#define REG_PUSH_LENGTH 2 |
|
/* This is how to output an insn to pop a register from the stack. */ |
#define ASM_OUTPUT_REG_POP(FILE,REGNO) \ |
fprintf (FILE, "\tldw\t %s,(%s)\n\taddi\t %s,%d\n", \ |
reg_names[REGNO], \ |
reg_names[STACK_POINTER_REGNUM], \ |
reg_names[STACK_POINTER_REGNUM], \ |
(STACK_BOUNDARY / BITS_PER_UNIT)) |
|
|
/* Output a reference to a label. */ |
#undef ASM_OUTPUT_LABELREF |
#define ASM_OUTPUT_LABELREF(STREAM, NAME) \ |
fprintf (STREAM, "%s%s", USER_LABEL_PREFIX, \ |
(* targetm.strip_name_encoding) (NAME)) |
|
/* This is how to output an assembler line |
that says to advance the location counter |
to a multiple of 2**LOG bytes. */ |
#define ASM_OUTPUT_ALIGN(FILE,LOG) \ |
if ((LOG) != 0) \ |
fprintf (FILE, "\t.align\t%d\n", LOG) |
|
#ifndef ASM_DECLARE_RESULT |
#define ASM_DECLARE_RESULT(FILE, RESULT) |
#endif |
|
#define MULTIPLE_SYMBOL_SPACES 1 |
|
#define SUPPORTS_ONE_ONLY 1 |
|
/* A pair of macros to output things for the callgraph data. |
VALUE means (to the tools that reads this info later): |
0 a call from src to dst |
1 the call is special (e.g. dst is "unknown" or "alloca") |
2 the call is special (e.g., the src is a table instead of routine) |
|
Frame sizes are augmented with timestamps to help later tools |
differentiate between static entities with same names in different |
files. */ |
extern long mcore_current_compilation_timestamp; |
#define ASM_OUTPUT_CG_NODE(FILE,SRCNAME,VALUE) \ |
do \ |
{ \ |
if (mcore_current_compilation_timestamp == 0) \ |
mcore_current_compilation_timestamp = time (0); \ |
fprintf ((FILE),"\t.equ\t__$frame$size$_%s_$_%08lx,%d\n", \ |
(SRCNAME), mcore_current_compilation_timestamp, (VALUE)); \ |
} \ |
while (0) |
|
#define ASM_OUTPUT_CG_EDGE(FILE,SRCNAME,DSTNAME,VALUE) \ |
do \ |
{ \ |
fprintf ((FILE),"\t.equ\t__$function$call$_%s_$_%s,%d\n", \ |
(SRCNAME), (DSTNAME), (VALUE)); \ |
} \ |
while (0) |
|
/* Globalizing directive for a label. */ |
#define GLOBAL_ASM_OP "\t.export\t" |
|
/* The prefix to add to user-visible assembler symbols. */ |
#undef USER_LABEL_PREFIX |
#define USER_LABEL_PREFIX "" |
|
/* Make an internal label into a string. */ |
#undef ASM_GENERATE_INTERNAL_LABEL |
#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \ |
sprintf (STRING, "*.%s%ld", PREFIX, (long) NUM) |
|
/* Jump tables must be 32 bit aligned. */ |
#undef ASM_OUTPUT_CASE_LABEL |
#define ASM_OUTPUT_CASE_LABEL(STREAM,PREFIX,NUM,TABLE) \ |
fprintf (STREAM, "\t.align 2\n.%s%d:\n", PREFIX, NUM); |
|
/* Output a relative address. Not needed since jump tables are absolute |
but we must define it anyway. */ |
#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \ |
fputs ("- - - ASM_OUTPUT_ADDR_DIFF_ELT called!\n", STREAM) |
|
/* Output an element of a dispatch table. */ |
#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \ |
fprintf (STREAM, "\t.long\t.L%d\n", VALUE) |
|
/* Output various types of constants. */ |
|
/* This is how to output an assembler line |
that says to advance the location counter by SIZE bytes. */ |
#undef ASM_OUTPUT_SKIP |
#define ASM_OUTPUT_SKIP(FILE,SIZE) \ |
fprintf (FILE, "\t.fill %d, 1\n", (int)(SIZE)) |
|
/* This says how to output an assembler line |
to define a global common symbol, with alignment information. */ |
/* XXX - for now we ignore the alignment. */ |
#undef ASM_OUTPUT_ALIGNED_COMMON |
#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \ |
do \ |
{ \ |
if (mcore_dllexport_name_p (NAME)) \ |
MCORE_EXPORT_NAME (FILE, NAME) \ |
if (! mcore_dllimport_name_p (NAME)) \ |
{ \ |
fputs ("\t.comm\t", FILE); \ |
assemble_name (FILE, NAME); \ |
fprintf (FILE, ",%lu\n", (unsigned long)(SIZE)); \ |
} \ |
} \ |
while (0) |
|
/* This says how to output an assembler line |
to define a local common symbol.... */ |
#undef ASM_OUTPUT_LOCAL |
#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \ |
(fputs ("\t.lcomm\t", FILE), \ |
assemble_name (FILE, NAME), \ |
fprintf (FILE, ",%d\n", (int)SIZE)) |
|
/* ... and how to define a local common symbol whose alignment |
we wish to specify. ALIGN comes in as bits, we have to turn |
it into bytes. */ |
#undef ASM_OUTPUT_ALIGNED_LOCAL |
#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \ |
do \ |
{ \ |
fputs ("\t.bss\t", (FILE)); \ |
assemble_name ((FILE), (NAME)); \ |
fprintf ((FILE), ",%d,%d\n", (int)(SIZE), (ALIGN) / BITS_PER_UNIT);\ |
} \ |
while (0) |
|
/* Print operand X (an rtx) in assembler syntax to file FILE. |
CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified. |
For `%' followed by punctuation, CODE is the punctuation and X is null. */ |
#define PRINT_OPERAND(STREAM, X, CODE) mcore_print_operand (STREAM, X, CODE) |
|
/* Print a memory address as an operand to reference that memory location. */ |
#define PRINT_OPERAND_ADDRESS(STREAM,X) mcore_print_operand_address (STREAM, X) |
|
#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \ |
((CHAR)=='.' || (CHAR) == '#' || (CHAR) == '*' || (CHAR) == '^' || (CHAR) == '!') |
|
#endif /* ! GCC_MCORE_H */ |
/mcore-pe.h
0,0 → 1,102
/* Definitions of target machine for GNU compiler, for MCore using COFF/PE. |
Copyright (C) 1994, 1999, 2000, 2002, 2003, 2004, 2007 |
Free Software Foundation, Inc. |
Contributed by Michael Tiemann (tiemann@cygnus.com). |
|
This file is part of GCC. |
|
GCC is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 3, or (at your option) |
any later version. |
|
GCC is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
|
You should have received a copy of the GNU General Public License |
along with GCC; see the file COPYING3. If not see |
<http://www.gnu.org/licenses/>. */ |
|
/* Run-time Target Specification. */ |
#define TARGET_VERSION fputs (" (MCORE/pe)", stderr) |
|
#define TARGET_OS_CPP_BUILTINS() \ |
do \ |
{ \ |
builtin_define ("__pe__"); \ |
} \ |
while (0) |
|
/* The MCore ABI says that bitfields are unsigned by default. */ |
/* The EPOC C++ environment does not support exceptions. */ |
#undef CC1_SPEC |
#define CC1_SPEC "-funsigned-bitfields %{!DIN_GCC:-fno-rtti} %{!DIN_GCC:-fno-exceptions}" |
|
#undef SDB_DEBUGGING_INFO |
#define DBX_DEBUGGING_INFO 1 |
|
/* Computed in toplev.c. */ |
#undef PREFERRED_DEBUGGING_TYPE |
|
#define READONLY_DATA_SECTION_ASM_OP "\t.section .rdata" |
|
#define MCORE_EXPORT_NAME(STREAM, NAME) \ |
do \ |
{ \ |
fprintf (STREAM, "\t.section .drectve\n"); \ |
fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \ |
(* targetm.strip_name_encoding) (NAME)); \ |
in_section = NULL; \ |
} \ |
while (0); |
|
/* Output the label for an initialized variable. */ |
#undef ASM_DECLARE_OBJECT_NAME |
#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \ |
do \ |
{ \ |
if (mcore_dllexport_name_p (NAME)) \ |
{ \ |
section *save_section = in_section; \ |
MCORE_EXPORT_NAME (STREAM, NAME); \ |
switch_to_section (save_section); \ |
} \ |
ASM_OUTPUT_LABEL ((STREAM), (NAME)); \ |
} \ |
while (0) |
|
/* Output a function label definition. */ |
#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \ |
do \ |
{ \ |
if (mcore_dllexport_name_p (NAME)) \ |
{ \ |
MCORE_EXPORT_NAME (STREAM, NAME); \ |
switch_to_section (function_section (DECL)); \ |
} \ |
ASM_OUTPUT_LABEL ((STREAM), (NAME)); \ |
} \ |
while (0); |
|
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true |
|
#define DBX_LINES_FUNCTION_RELATIVE 1 |
|
#define STARTFILE_SPEC "crt0.o%s" |
#define ENDFILE_SPEC "%{!mno-lsim:-lsim}" |
|
/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */ |
#define CTOR_LISTS_DEFINED_EXTERNALLY |
|
#undef DO_GLOBAL_CTORS_BODY |
#undef DO_GLOBAL_DTORS_BODY |
#undef INIT_SECTION_ASM_OP |
#undef DTORS_SECTION_ASM_OP |
|
#define SUPPORTS_ONE_ONLY 1 |
|
/* Switch into a generic section. */ |
#undef TARGET_ASM_NAMED_SECTION |
#define TARGET_ASM_NAMED_SECTION default_pe_asm_named_section |