OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/tags/gnu-src/gcc-4.5.1/gcc-4.5.1-or32-1.0rc1/gcc/config/cris
    from Rev 282 to Rev 338
    Reverse comparison

Rev 282 → Rev 338

/arit.c
0,0 → 1,304
/* Signed and unsigned multiplication and division and modulus for CRIS.
Contributed by Axis Communications.
Written by Hans-Peter Nilsson <hp@axis.se>, c:a 1992.
 
Copyright (C) 1998, 1999, 2000, 2001, 2002,
2005, 2009 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
 
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
 
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
 
 
/* Note that we provide prototypes for all "const" functions, to attach
the const attribute. This is necessary in 2.7.2 - adding the
attribute to the function *definition* is a syntax error.
This did not work with e.g. 2.1; back then, the return type had to
be "const". */
 
#include "config.h"
 
#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 3
#define LZ(v) __builtin_clz (v)
#endif
 
 
#if defined (L_udivsi3) || defined (L_divsi3) || defined (L_umodsi3) \
|| defined (L_modsi3)
/* Result type of divmod worker function. */
struct quot_rem
{
long quot;
long rem;
};
 
/* This is the worker function for div and mod. It is inlined into the
respective library function. Parameter A must have bit 31 == 0. */
 
static __inline__ struct quot_rem
do_31div (unsigned long a, unsigned long b)
__attribute__ ((__const__, __always_inline__));
 
static __inline__ struct quot_rem
do_31div (unsigned long a, unsigned long b)
{
/* Adjust operands and result if a is 31 bits. */
long extra = 0;
int quot_digits = 0;
 
if (b == 0)
{
struct quot_rem ret;
ret.quot = 0xffffffff;
ret.rem = 0xffffffff;
return ret;
}
 
if (a < b)
return (struct quot_rem) { 0, a };
 
#ifdef LZ
if (b <= a)
{
quot_digits = LZ (b) - LZ (a);
quot_digits += (a >= (b << quot_digits));
b <<= quot_digits;
}
#else
while (b <= a)
{
b <<= 1;
quot_digits++;
}
#endif
 
/* Is a 31 bits? Note that bit 31 is handled by the caller. */
if (a & 0x40000000)
{
/* Then make b:s highest bit max 0x40000000, because it must have
been 0x80000000 to be 1 bit higher than a. */
b >>= 1;
 
/* Adjust a to be maximum 0x3fffffff, i.e. two upper bits zero. */
if (a >= b)
{
a -= b;
extra = 1 << (quot_digits - 1);
}
else
{
a -= b >> 1;
 
/* Remember that we adjusted a by subtracting b * 2 ** Something. */
extra = 1 << quot_digits;
}
 
/* The number of quotient digits will be one less, because
we just adjusted b. */
quot_digits--;
}
 
/* Now do the division part. */
 
/* Subtract b and add ones to the right when a >= b
i.e. "a - (b - 1) == (a - b) + 1". */
b--;
 
#define DS __asm__ ("dstep %2,%0" : "=r" (a) : "0" (a), "r" (b))
 
switch (quot_digits)
{
case 32: DS; case 31: DS; case 30: DS; case 29: DS;
case 28: DS; case 27: DS; case 26: DS; case 25: DS;
case 24: DS; case 23: DS; case 22: DS; case 21: DS;
case 20: DS; case 19: DS; case 18: DS; case 17: DS;
case 16: DS; case 15: DS; case 14: DS; case 13: DS;
case 12: DS; case 11: DS; case 10: DS; case 9: DS;
case 8: DS; case 7: DS; case 6: DS; case 5: DS;
case 4: DS; case 3: DS; case 2: DS; case 1: DS;
case 0:;
}
 
{
struct quot_rem ret;
ret.quot = (a & ((1 << quot_digits) - 1)) + extra;
ret.rem = a >> quot_digits;
return ret;
}
}
 
#ifdef L_udivsi3
unsigned long
__Udiv (unsigned long a, unsigned long b) __attribute__ ((__const__));
 
unsigned long
__Udiv (unsigned long a, unsigned long b)
{
long extra = 0;
 
/* Adjust operands and result, if a and/or b is 32 bits. */
/* Effectively: b & 0x80000000. */
if ((long) b < 0)
return a >= b;
 
/* Effectively: a & 0x80000000. */
if ((long) a < 0)
{
int tmp = 0;
 
if (b == 0)
return 0xffffffff;
#ifdef LZ
tmp = LZ (b);
#else
for (tmp = 31; (((long) b & (1 << tmp)) == 0); tmp--)
;
 
tmp = 31 - tmp;
#endif
 
if ((b << tmp) > a)
{
extra = 1 << (tmp-1);
a -= b << (tmp - 1);
}
else
{
extra = 1 << tmp;
a -= b << tmp;
}
}
 
return do_31div (a, b).quot+extra;
}
#endif /* L_udivsi3 */
 
#ifdef L_divsi3
long
__Div (long a, long b) __attribute__ ((__const__));
 
long
__Div (long a, long b)
{
long extra = 0;
long sign = (b < 0) ? -1 : 1;
 
/* We need to handle a == -2147483648 as expected and must while
doing that avoid producing a sequence like "abs (a) < 0" as GCC
may optimize out the test. That sequence may not be obvious as
we call inline functions. Testing for a being negative and
handling (presumably much rarer than positive) enables us to get
a bit of optimization for an (accumulated) reduction of the
penalty of the 0x80000000 special-case. */
if (a < 0)
{
sign = -sign;
 
if ((a & 0x7fffffff) == 0)
{
/* We're at 0x80000000. Tread carefully. */
a -= b * sign;
extra = sign;
}
a = -a;
}
 
/* We knowingly penalize pre-v10 models by multiplication with the
sign. */
return sign * do_31div (a, __builtin_labs (b)).quot + extra;
}
#endif /* L_divsi3 */
 
 
#ifdef L_umodsi3
unsigned long
__Umod (unsigned long a, unsigned long b) __attribute__ ((__const__));
 
unsigned long
__Umod (unsigned long a, unsigned long b)
{
/* Adjust operands and result if a and/or b is 32 bits. */
if ((long) b < 0)
return a >= b ? a - b : a;
 
if ((long) a < 0)
{
int tmp = 0;
 
if (b == 0)
return a;
#ifdef LZ
tmp = LZ (b);
#else
for (tmp = 31; (((long) b & (1 << tmp)) == 0); tmp--)
;
tmp = 31 - tmp;
#endif
 
if ((b << tmp) > a)
{
a -= b << (tmp - 1);
}
else
{
a -= b << tmp;
}
}
 
return do_31div (a, b).rem;
}
#endif /* L_umodsi3 */
 
#ifdef L_modsi3
long
__Mod (long a, long b) __attribute__ ((__const__));
 
long
__Mod (long a, long b)
{
long sign = 1;
 
/* We need to handle a == -2147483648 as expected and must while
doing that avoid producing a sequence like "abs (a) < 0" as GCC
may optimize out the test. That sequence may not be obvious as
we call inline functions. Testing for a being negative and
handling (presumably much rarer than positive) enables us to get
a bit of optimization for an (accumulated) reduction of the
penalty of the 0x80000000 special-case. */
if (a < 0)
{
sign = -1;
if ((a & 0x7fffffff) == 0)
/* We're at 0x80000000. Tread carefully. */
a += __builtin_labs (b);
a = -a;
}
 
return sign * do_31div (a, __builtin_labs (b)).rem;
}
#endif /* L_modsi3 */
#endif /* L_udivsi3 || L_divsi3 || L_umodsi3 || L_modsi3 */
 
/*
* Local variables:
* eval: (c-set-style "gnu")
* indent-tabs-mode: t
* End:
*/
arit.c Property changes : Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +Id \ No newline at end of property Index: cris.c =================================================================== --- cris.c (nonexistent) +++ cris.c (revision 338) @@ -0,0 +1,4070 @@ +/* Definitions for GCC. Part of the machine description for CRIS. + Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, + 2008, 2009 Free Software Foundation, Inc. + Contributed by Axis Communications. Written by Hans-Peter Nilsson. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "rtl.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "real.h" +#include "insn-config.h" +#include "conditions.h" +#include "insn-attr.h" +#include "flags.h" +#include "tree.h" +#include "expr.h" +#include "except.h" +#include "function.h" +#include "toplev.h" +#include "recog.h" +#include "reload.h" +#include "tm_p.h" +#include "debug.h" +#include "output.h" +#include "target.h" +#include "target-def.h" +#include "ggc.h" +#include "optabs.h" +#include "df.h" + +/* Usable when we have an amount to add or subtract, and want the + optimal size of the insn. */ +#define ADDITIVE_SIZE_MODIFIER(size) \ + ((size) <= 63 ? "q" : (size) <= 255 ? "u.b" : (size) <= 65535 ? "u.w" : ".d") + +#define LOSE_AND_RETURN(msgid, x) \ + do \ + { \ + cris_operand_lossage (msgid, x); \ + return; \ + } while (0) + +enum cris_retinsn_type + { CRIS_RETINSN_UNKNOWN = 0, CRIS_RETINSN_RET, CRIS_RETINSN_JUMP }; + +/* Per-function machine data. */ +struct GTY(()) machine_function + { + int needs_return_address_on_stack; + + /* This is the number of registers we save in the prologue due to + stdarg. */ + int stdarg_regs; + + enum cris_retinsn_type return_type; + }; + +/* This little fix suppresses the 'u' or 's' when '%e' in assembly + pattern. */ +static char cris_output_insn_is_bound = 0; + +/* In code for output macros, this is how we know whether e.g. constant + goes in code or in a static initializer. */ +static int in_code = 0; + +/* Fix for reg_overlap_mentioned_p. */ +static int cris_reg_overlap_mentioned_p (rtx, rtx); + +static enum machine_mode cris_promote_function_mode (const_tree, enum machine_mode, + int *, const_tree, int); + +static void cris_print_base (rtx, FILE *); + +static void cris_print_index (rtx, FILE *); + +static void cris_output_addr_const (FILE *, rtx); + +static struct machine_function * cris_init_machine_status (void); + +static rtx cris_struct_value_rtx (tree, int); + +static void cris_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, + tree type, int *, int); + +static int cris_initial_frame_pointer_offset (void); + +static int saved_regs_mentioned (rtx); + +static void cris_operand_lossage (const char *, rtx); + +static int cris_reg_saved_in_regsave_area (unsigned int, bool); + +static void cris_asm_output_mi_thunk + (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); + +static void cris_file_start (void); +static void cris_init_libfuncs (void); + +static bool cris_rtx_costs (rtx, int, int, int *, bool); +static int cris_address_cost (rtx, bool); +static bool cris_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode, + const_tree, bool); +static int cris_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode, + tree, bool); +static tree cris_md_asm_clobbers (tree, tree, tree); + +static bool cris_handle_option (size_t, const char *, int); + +static bool cris_frame_pointer_required (void); + +static void cris_asm_trampoline_template (FILE *); +static void cris_trampoline_init (rtx, tree, rtx); + +static rtx cris_function_value(const_tree, const_tree, bool); +static rtx cris_libcall_value (enum machine_mode, const_rtx); + +/* This is the parsed result of the "-max-stack-stackframe=" option. If + it (still) is zero, then there was no such option given. */ +int cris_max_stackframe = 0; + +/* This is the parsed result of the "-march=" option, if given. */ +int cris_cpu_version = CRIS_DEFAULT_CPU_VERSION; + +#undef TARGET_ASM_ALIGNED_HI_OP +#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" +#undef TARGET_ASM_ALIGNED_SI_OP +#define TARGET_ASM_ALIGNED_SI_OP "\t.dword\t" +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t" + +/* We need to define these, since the 2byte, 4byte, 8byte op:s are only + available in ELF. These "normal" pseudos do not have any alignment + constraints or side-effects. */ +#undef TARGET_ASM_UNALIGNED_HI_OP +#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP + +#undef TARGET_ASM_UNALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP + +#undef TARGET_ASM_UNALIGNED_DI_OP +#define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP + +#undef TARGET_ASM_OUTPUT_MI_THUNK +#define TARGET_ASM_OUTPUT_MI_THUNK cris_asm_output_mi_thunk +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall + +#undef TARGET_ASM_FILE_START +#define TARGET_ASM_FILE_START cris_file_start + +#undef TARGET_INIT_LIBFUNCS +#define TARGET_INIT_LIBFUNCS cris_init_libfuncs + +#undef TARGET_RTX_COSTS +#define TARGET_RTX_COSTS cris_rtx_costs +#undef TARGET_ADDRESS_COST +#define TARGET_ADDRESS_COST cris_address_cost + +#undef TARGET_PROMOTE_FUNCTION_MODE +#define TARGET_PROMOTE_FUNCTION_MODE cris_promote_function_mode + +#undef TARGET_STRUCT_VALUE_RTX +#define TARGET_STRUCT_VALUE_RTX cris_struct_value_rtx +#undef TARGET_SETUP_INCOMING_VARARGS +#define TARGET_SETUP_INCOMING_VARARGS cris_setup_incoming_varargs +#undef TARGET_PASS_BY_REFERENCE +#define TARGET_PASS_BY_REFERENCE cris_pass_by_reference +#undef TARGET_ARG_PARTIAL_BYTES +#define TARGET_ARG_PARTIAL_BYTES cris_arg_partial_bytes +#undef TARGET_MD_ASM_CLOBBERS +#define TARGET_MD_ASM_CLOBBERS cris_md_asm_clobbers +#undef TARGET_DEFAULT_TARGET_FLAGS +#define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | CRIS_SUBTARGET_DEFAULT) +#undef TARGET_HANDLE_OPTION +#define TARGET_HANDLE_OPTION cris_handle_option +#undef TARGET_FRAME_POINTER_REQUIRED +#define TARGET_FRAME_POINTER_REQUIRED cris_frame_pointer_required + +#undef TARGET_ASM_TRAMPOLINE_TEMPLATE +#define TARGET_ASM_TRAMPOLINE_TEMPLATE cris_asm_trampoline_template +#undef TARGET_TRAMPOLINE_INIT +#define TARGET_TRAMPOLINE_INIT cris_trampoline_init + +#undef TARGET_FUNCTION_VALUE +#define TARGET_FUNCTION_VALUE cris_function_value +#undef TARGET_LIBCALL_VALUE +#define TARGET_LIBCALL_VALUE cris_libcall_value + +struct gcc_target targetm = TARGET_INITIALIZER; + +/* Helper for cris_load_multiple_op and cris_ret_movem_op. */ + +bool +cris_movem_load_rest_p (rtx op, int offs) +{ + unsigned int reg_count = XVECLEN (op, 0) - offs; + rtx src_addr; + int i; + rtx elt; + int setno; + int regno_dir = 1; + unsigned int regno = 0; + + /* Perform a quick check so we don't blow up below. FIXME: Adjust for + other than (MEM reg). */ + if (reg_count <= 1 + || GET_CODE (XVECEXP (op, 0, offs)) != SET + || !REG_P (SET_DEST (XVECEXP (op, 0, offs))) + || !MEM_P (SET_SRC (XVECEXP (op, 0, offs)))) + return false; + + /* Check a possible post-inc indicator. */ + if (GET_CODE (SET_SRC (XVECEXP (op, 0, offs + 1))) == PLUS) + { + rtx reg = XEXP (SET_SRC (XVECEXP (op, 0, offs + 1)), 0); + rtx inc = XEXP (SET_SRC (XVECEXP (op, 0, offs + 1)), 1); + + reg_count--; + + if (reg_count == 1 + || !REG_P (reg) + || !REG_P (SET_DEST (XVECEXP (op, 0, offs + 1))) + || REGNO (reg) != REGNO (SET_DEST (XVECEXP (op, 0, offs + 1))) + || !CONST_INT_P (inc) + || INTVAL (inc) != (HOST_WIDE_INT) reg_count * 4) + return false; + i = offs + 2; + } + else + i = offs + 1; + + if (!TARGET_V32) + { + regno_dir = -1; + regno = reg_count - 1; + } + + elt = XVECEXP (op, 0, offs); + src_addr = XEXP (SET_SRC (elt), 0); + + if (GET_CODE (elt) != SET + || !REG_P (SET_DEST (elt)) + || GET_MODE (SET_DEST (elt)) != SImode + || REGNO (SET_DEST (elt)) != regno + || !MEM_P (SET_SRC (elt)) + || GET_MODE (SET_SRC (elt)) != SImode + || !memory_address_p (SImode, src_addr)) + return false; + + for (setno = 1; i < XVECLEN (op, 0); setno++, i++) + { + rtx elt = XVECEXP (op, 0, i); + regno += regno_dir; + + if (GET_CODE (elt) != SET + || !REG_P (SET_DEST (elt)) + || GET_MODE (SET_DEST (elt)) != SImode + || REGNO (SET_DEST (elt)) != regno + || !MEM_P (SET_SRC (elt)) + || GET_MODE (SET_SRC (elt)) != SImode + || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) + || !CONST_INT_P (XEXP (XEXP (SET_SRC (elt), 0), 1)) + || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != setno * 4) + return false; + } + + return true; +} + +/* Worker function for predicate for the parallel contents in a movem + to-memory. */ + +bool +cris_store_multiple_op_p (rtx op) +{ + int reg_count = XVECLEN (op, 0); + rtx dest; + rtx dest_addr; + rtx dest_base; + int i; + rtx elt; + int setno; + int regno_dir = 1; + int regno = 0; + int offset = 0; + + /* Perform a quick check so we don't blow up below. FIXME: Adjust for + other than (MEM reg) and (MEM (PLUS reg const)). */ + if (reg_count <= 1) + return false; + + elt = XVECEXP (op, 0, 0); + + if (GET_CODE (elt) != SET) + return false; + + dest = SET_DEST (elt); + + if (!REG_P (SET_SRC (elt)) || !MEM_P (dest)) + return false; + + dest_addr = XEXP (dest, 0); + + /* Check a possible post-inc indicator. */ + if (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) == PLUS) + { + rtx reg = XEXP (SET_SRC (XVECEXP (op, 0, 1)), 0); + rtx inc = XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1); + + reg_count--; + + if (reg_count == 1 + || !REG_P (reg) + || !REG_P (SET_DEST (XVECEXP (op, 0, 1))) + || REGNO (reg) != REGNO (SET_DEST (XVECEXP (op, 0, 1))) + || !CONST_INT_P (inc) + /* Support increment by number of registers, and by the offset + of the destination, if it has the form (MEM (PLUS reg + offset)). */ + || !((REG_P (dest_addr) + && REGNO (dest_addr) == REGNO (reg) + && INTVAL (inc) == (HOST_WIDE_INT) reg_count * 4) + || (GET_CODE (dest_addr) == PLUS + && REG_P (XEXP (dest_addr, 0)) + && REGNO (XEXP (dest_addr, 0)) == REGNO (reg) + && CONST_INT_P (XEXP (dest_addr, 1)) + && INTVAL (XEXP (dest_addr, 1)) == INTVAL (inc)))) + return false; + + i = 2; + } + else + i = 1; + + if (!TARGET_V32) + { + regno_dir = -1; + regno = reg_count - 1; + } + + if (GET_CODE (elt) != SET + || !REG_P (SET_SRC (elt)) + || GET_MODE (SET_SRC (elt)) != SImode + || REGNO (SET_SRC (elt)) != (unsigned int) regno + || !MEM_P (SET_DEST (elt)) + || GET_MODE (SET_DEST (elt)) != SImode) + return false; + + if (REG_P (dest_addr)) + { + dest_base = dest_addr; + offset = 0; + } + else if (GET_CODE (dest_addr) == PLUS + && REG_P (XEXP (dest_addr, 0)) + && CONST_INT_P (XEXP (dest_addr, 1))) + { + dest_base = XEXP (dest_addr, 0); + offset = INTVAL (XEXP (dest_addr, 1)); + } + else + return false; + + for (setno = 1; i < XVECLEN (op, 0); setno++, i++) + { + rtx elt = XVECEXP (op, 0, i); + regno += regno_dir; + + if (GET_CODE (elt) != SET + || !REG_P (SET_SRC (elt)) + || GET_MODE (SET_SRC (elt)) != SImode + || REGNO (SET_SRC (elt)) != (unsigned int) regno + || !MEM_P (SET_DEST (elt)) + || GET_MODE (SET_DEST (elt)) != SImode + || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS + || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_base) + || !CONST_INT_P (XEXP (XEXP (SET_DEST (elt), 0), 1)) + || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != setno * 4 + offset) + return false; + } + + return true; +} + +/* The CONDITIONAL_REGISTER_USAGE worker. */ + +void +cris_conditional_register_usage (void) +{ + /* FIXME: This isn't nice. We should be able to use that register for + something else if the PIC table isn't needed. */ + if (flag_pic) + fixed_regs[PIC_OFFSET_TABLE_REGNUM] + = call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; + + /* Allow use of ACR (PC in pre-V32) and tweak order. */ + if (TARGET_V32) + { + static const int reg_alloc_order_v32[] = REG_ALLOC_ORDER_V32; + unsigned int i; + + fixed_regs[CRIS_ACR_REGNUM] = 0; + + for (i = 0; + i < sizeof (reg_alloc_order_v32)/sizeof (reg_alloc_order_v32[0]); + i++) + reg_alloc_order[i] = reg_alloc_order_v32[i]; + } + + if (TARGET_HAS_MUL_INSNS) + fixed_regs[CRIS_MOF_REGNUM] = 0; + + /* On early versions, we must use the 16-bit condition-code register, + which has another name. */ + if (cris_cpu_version < 8) + reg_names[CRIS_CC0_REGNUM] = "ccr"; +} + +/* Return crtl->uses_pic_offset_table. For use in cris.md, + since some generated files do not include function.h. */ + +int +cris_cfun_uses_pic_table (void) +{ + return crtl->uses_pic_offset_table; +} + +/* Given an rtx, return the text string corresponding to the CODE of X. + Intended for use in the assembly language output section of a + define_insn. */ + +const char * +cris_op_str (rtx x) +{ + cris_output_insn_is_bound = 0; + switch (GET_CODE (x)) + { + case PLUS: + return "add"; + break; + + case MINUS: + return "sub"; + break; + + case MULT: + /* This function is for retrieving a part of an instruction name for + an operator, for immediate output. If that ever happens for + MULT, we need to apply TARGET_MUL_BUG in the caller. Make sure + we notice. */ + internal_error ("MULT case in cris_op_str"); + break; + + case DIV: + return "div"; + break; + + case AND: + return "and"; + break; + + case IOR: + return "or"; + break; + + case XOR: + return "xor"; + break; + + case NOT: + return "not"; + break; + + case ASHIFT: + return "lsl"; + break; + + case LSHIFTRT: + return "lsr"; + break; + + case ASHIFTRT: + return "asr"; + break; + + case UMIN: + /* Used to control the sign/zero-extend character for the 'E' modifier. + BOUND has none. */ + cris_output_insn_is_bound = 1; + return "bound"; + break; + + default: + return "Unknown operator"; + break; + } +} + +/* Emit an error message when we're in an asm, and a fatal error for + "normal" insns. Formatted output isn't easily implemented, since we + use output_operand_lossage to output the actual message and handle the + categorization of the error. */ + +static void +cris_operand_lossage (const char *msgid, rtx op) +{ + debug_rtx (op); + output_operand_lossage ("%s", msgid); +} + +/* Print an index part of an address to file. */ + +static void +cris_print_index (rtx index, FILE *file) +{ + /* Make the index "additive" unless we'll output a negative number, in + which case the sign character is free (as in free beer). */ + if (!CONST_INT_P (index) || INTVAL (index) >= 0) + putc ('+', file); + + if (REG_P (index)) + fprintf (file, "$%s.b", reg_names[REGNO (index)]); + else if (CONSTANT_P (index)) + cris_output_addr_const (file, index); + else if (GET_CODE (index) == MULT) + { + fprintf (file, "$%s.", + reg_names[REGNO (XEXP (index, 0))]); + + putc (INTVAL (XEXP (index, 1)) == 2 ? 'w' : 'd', file); + } + else if (GET_CODE (index) == SIGN_EXTEND && MEM_P (XEXP (index, 0))) + { + rtx inner = XEXP (index, 0); + rtx inner_inner = XEXP (inner, 0); + + if (GET_CODE (inner_inner) == POST_INC) + { + fprintf (file, "[$%s+].", + reg_names[REGNO (XEXP (inner_inner, 0))]); + putc (GET_MODE (inner) == HImode ? 'w' : 'b', file); + } + else + { + fprintf (file, "[$%s].", reg_names[REGNO (inner_inner)]); + + putc (GET_MODE (inner) == HImode ? 'w' : 'b', file); + } + } + else if (MEM_P (index)) + { + rtx inner = XEXP (index, 0); + if (GET_CODE (inner) == POST_INC) + fprintf (file, "[$%s+].d", reg_names[REGNO (XEXP (inner, 0))]); + else + fprintf (file, "[$%s].d", reg_names[REGNO (inner)]); + } + else + cris_operand_lossage ("unexpected index-type in cris_print_index", + index); +} + +/* Print a base rtx of an address to file. */ + +static void +cris_print_base (rtx base, FILE *file) +{ + if (REG_P (base)) + fprintf (file, "$%s", reg_names[REGNO (base)]); + else if (GET_CODE (base) == POST_INC) + { + gcc_assert (REGNO (XEXP (base, 0)) != CRIS_ACR_REGNUM); + fprintf (file, "$%s+", reg_names[REGNO (XEXP (base, 0))]); + } + else + cris_operand_lossage ("unexpected base-type in cris_print_base", + base); +} + +/* Usable as a guard in expressions. */ + +int +cris_fatal (char *arg) +{ + internal_error (arg); + + /* We'll never get here; this is just to appease compilers. */ + return 0; +} + +/* Return nonzero if REGNO is an ordinary register that *needs* to be + saved together with other registers, possibly by a MOVEM instruction, + or is saved for target-independent reasons. There may be + target-dependent reasons to save the register anyway; this is just a + wrapper for a complicated conditional. */ + +static int +cris_reg_saved_in_regsave_area (unsigned int regno, bool got_really_used) +{ + return + (((df_regs_ever_live_p (regno) + && !call_used_regs[regno]) + || (regno == PIC_OFFSET_TABLE_REGNUM + && (got_really_used + /* It is saved anyway, if there would be a gap. */ + || (flag_pic + && df_regs_ever_live_p (regno + 1) + && !call_used_regs[regno + 1])))) + && (regno != FRAME_POINTER_REGNUM || !frame_pointer_needed) + && regno != CRIS_SRP_REGNUM) + || (crtl->calls_eh_return + && (regno == EH_RETURN_DATA_REGNO (0) + || regno == EH_RETURN_DATA_REGNO (1) + || regno == EH_RETURN_DATA_REGNO (2) + || regno == EH_RETURN_DATA_REGNO (3))); +} + +/* Return nonzero if there are regs mentioned in the insn that are not all + in the call_used regs. This is part of the decision whether an insn + can be put in the epilogue. */ + +static int +saved_regs_mentioned (rtx x) +{ + int i; + const char *fmt; + RTX_CODE code; + + /* Mainly stolen from refers_to_regno_p in rtlanal.c. */ + + code = GET_CODE (x); + + switch (code) + { + case REG: + i = REGNO (x); + return !call_used_regs[i]; + + case SUBREG: + /* If this is a SUBREG of a hard reg, we can see exactly which + registers are being modified. Otherwise, handle normally. */ + i = REGNO (SUBREG_REG (x)); + return !call_used_regs[i]; + + default: + ; + } + + fmt = GET_RTX_FORMAT (code); + for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) + { + if (fmt[i] == 'e') + { + if (saved_regs_mentioned (XEXP (x, i))) + return 1; + } + else if (fmt[i] == 'E') + { + int j; + for (j = XVECLEN (x, i) - 1; j >=0; j--) + if (saved_regs_mentioned (XEXP (x, i))) + return 1; + } + } + + return 0; +} + +/* The PRINT_OPERAND worker. */ + +void +cris_print_operand (FILE *file, rtx x, int code) +{ + rtx operand = x; + + /* Size-strings corresponding to MULT expressions. */ + static const char *const mults[] = { "BAD:0", ".b", ".w", "BAD:3", ".d" }; + + /* New code entries should just be added to the switch below. If + handling is finished, just return. If handling was just a + modification of the operand, the modified operand should be put in + "operand", and then do a break to let default handling + (zero-modifier) output the operand. */ + + switch (code) + { + case 'b': + /* Print the unsigned supplied integer as if it were signed + and < 0, i.e print 255 or 65535 as -1, 254, 65534 as -2, etc. */ + if (!CONST_INT_P (x) + || !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (x), 'O')) + LOSE_AND_RETURN ("invalid operand for 'b' modifier", x); + fprintf (file, HOST_WIDE_INT_PRINT_DEC, + INTVAL (x)| (INTVAL (x) <= 255 ? ~255 : ~65535)); + return; + + case 'x': + /* Print assembler code for operator. */ + fprintf (file, "%s", cris_op_str (operand)); + return; + + case 'o': + { + /* A movem modifier working on a parallel; output the register + name. */ + int regno; + + if (GET_CODE (x) != PARALLEL) + LOSE_AND_RETURN ("invalid operand for 'o' modifier", x); + + /* The second item can be (set reg (plus reg const)) to denote a + postincrement. */ + regno + = (GET_CODE (SET_SRC (XVECEXP (x, 0, 1))) == PLUS + ? XVECLEN (x, 0) - 2 + : XVECLEN (x, 0) - 1); + + fprintf (file, "$%s", reg_names [regno]); + } + return; + + case 'O': + { + /* A similar movem modifier; output the memory operand. */ + rtx addr; + + if (GET_CODE (x) != PARALLEL) + LOSE_AND_RETURN ("invalid operand for 'O' modifier", x); + + /* The lowest mem operand is in the first item, but perhaps it + needs to be output as postincremented. */ + addr = MEM_P (SET_SRC (XVECEXP (x, 0, 0))) + ? XEXP (SET_SRC (XVECEXP (x, 0, 0)), 0) + : XEXP (SET_DEST (XVECEXP (x, 0, 0)), 0); + + /* The second item can be a (set reg (plus reg const)) to denote + a modification. */ + if (GET_CODE (SET_SRC (XVECEXP (x, 0, 1))) == PLUS) + { + /* It's a post-increment, if the address is a naked (reg). */ + if (REG_P (addr)) + addr = gen_rtx_POST_INC (SImode, addr); + else + { + /* Otherwise, it's a side-effect; RN=RN+M. */ + fprintf (file, "[$%s=$%s%s%d]", + reg_names [REGNO (SET_DEST (XVECEXP (x, 0, 1)))], + reg_names [REGNO (XEXP (addr, 0))], + INTVAL (XEXP (addr, 1)) < 0 ? "" : "+", + (int) INTVAL (XEXP (addr, 1))); + return; + } + } + output_address (addr); + } + return; + + case 'p': + /* Adjust a power of two to its log2. */ + if (!CONST_INT_P (x) || exact_log2 (INTVAL (x)) < 0 ) + LOSE_AND_RETURN ("invalid operand for 'p' modifier", x); + fprintf (file, "%d", exact_log2 (INTVAL (x))); + return; + + case 's': + /* For an integer, print 'b' or 'w' if <= 255 or <= 65535 + respectively. This modifier also terminates the inhibiting + effects of the 'x' modifier. */ + cris_output_insn_is_bound = 0; + if (GET_MODE (x) == VOIDmode && CONST_INT_P (x)) + { + if (INTVAL (x) >= 0) + { + if (INTVAL (x) <= 255) + putc ('b', file); + else if (INTVAL (x) <= 65535) + putc ('w', file); + else + putc ('d', file); + } + else + putc ('d', file); + return; + } + + /* For a non-integer, print the size of the operand. */ + putc ((GET_MODE (x) == SImode || GET_MODE (x) == SFmode) + ? 'd' : GET_MODE (x) == HImode ? 'w' + : GET_MODE (x) == QImode ? 'b' + /* If none of the above, emit an erroneous size letter. */ + : 'X', + file); + return; + + case 'z': + /* Const_int: print b for -127 <= x <= 255, + w for -32768 <= x <= 65535, else die. */ + if (!CONST_INT_P (x) + || INTVAL (x) < -32768 || INTVAL (x) > 65535) + LOSE_AND_RETURN ("invalid operand for 'z' modifier", x); + putc (INTVAL (x) >= -128 && INTVAL (x) <= 255 ? 'b' : 'w', file); + return; + + case 'Z': + /* If this is a GOT-symbol, print the size-letter corresponding to + -fpic/-fPIC. For everything else, print "d". */ + putc ((flag_pic == 1 + && GET_CODE (x) == CONST + && GET_CODE (XEXP (x, 0)) == UNSPEC + && XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_GOTREAD) + ? 'w' : 'd', file); + return; + + case '#': + /* Output a 'nop' if there's nothing for the delay slot. + This method stolen from the sparc files. */ + if (dbr_sequence_length () == 0) + fputs ("\n\tnop", file); + return; + + case '!': + /* Output directive for alignment padded with "nop" insns. + Optimizing for size, it's plain 4-byte alignment, otherwise we + align the section to a cache-line (32 bytes) and skip at max 2 + bytes, i.e. we skip if it's the last insn on a cache-line. The + latter is faster by a small amount (for two test-programs 99.6% + and 99.9%) and larger by a small amount (ditto 100.1% and + 100.2%). This is supposed to be the simplest yet performance- + wise least intrusive way to make sure the immediately following + (supposed) muls/mulu insn isn't located at the end of a + cache-line. */ + if (TARGET_MUL_BUG) + fputs (optimize_size + ? ".p2alignw 2,0x050f\n\t" + : ".p2alignw 5,0x050f,2\n\t", file); + return; + + case ':': + /* The PIC register. */ + if (! flag_pic) + internal_error ("invalid use of ':' modifier"); + fprintf (file, "$%s", reg_names [PIC_OFFSET_TABLE_REGNUM]); + return; + + case 'H': + /* Print high (most significant) part of something. */ + switch (GET_CODE (operand)) + { + case CONST_INT: + /* If we're having 64-bit HOST_WIDE_INTs, the whole (DImode) + value is kept here, and so may be other than 0 or -1. */ + fprintf (file, HOST_WIDE_INT_PRINT_DEC, + INTVAL (operand_subword (operand, 1, 0, DImode))); + return; + + case CONST_DOUBLE: + /* High part of a long long constant. */ + if (GET_MODE (operand) == VOIDmode) + { + fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_HIGH (x)); + return; + } + else + LOSE_AND_RETURN ("invalid operand for 'H' modifier", x); + + case REG: + /* Print reg + 1. Check that there's not an attempt to print + high-parts of registers like stack-pointer or higher, except + for SRP (where the "high part" is MOF). */ + if (REGNO (operand) > STACK_POINTER_REGNUM - 2 + && (REGNO (operand) != CRIS_SRP_REGNUM + || CRIS_SRP_REGNUM + 1 != CRIS_MOF_REGNUM + || fixed_regs[CRIS_MOF_REGNUM] != 0)) + LOSE_AND_RETURN ("bad register", operand); + fprintf (file, "$%s", reg_names[REGNO (operand) + 1]); + return; + + case MEM: + /* Adjust memory address to high part. */ + { + rtx adj_mem = operand; + int size + = GET_MODE_BITSIZE (GET_MODE (operand)) / BITS_PER_UNIT; + + /* Adjust so we can use two SImode in DImode. + Calling adj_offsettable_operand will make sure it is an + offsettable address. Don't do this for a postincrement + though; it should remain as it was. */ + if (GET_CODE (XEXP (adj_mem, 0)) != POST_INC) + adj_mem + = adjust_address (adj_mem, GET_MODE (adj_mem), size / 2); + + output_address (XEXP (adj_mem, 0)); + return; + } + + default: + LOSE_AND_RETURN ("invalid operand for 'H' modifier", x); + } + + case 'L': + /* Strip the MEM expression. */ + operand = XEXP (operand, 0); + break; + + case 'e': + /* Like 'E', but ignore state set by 'x'. FIXME: Use code + iterators and attributes in cris.md to avoid the need for %x + and %E (and %e) and state passed between those modifiers. */ + cris_output_insn_is_bound = 0; + /* FALL THROUGH. */ + case 'E': + /* Print 's' if operand is SIGN_EXTEND or 'u' if ZERO_EXTEND unless + cris_output_insn_is_bound is nonzero. */ + if (GET_CODE (operand) != SIGN_EXTEND + && GET_CODE (operand) != ZERO_EXTEND + && !CONST_INT_P (operand)) + LOSE_AND_RETURN ("invalid operand for 'e' modifier", x); + + if (cris_output_insn_is_bound) + { + cris_output_insn_is_bound = 0; + return; + } + + putc (GET_CODE (operand) == SIGN_EXTEND + || (CONST_INT_P (operand) && INTVAL (operand) < 0) + ? 's' : 'u', file); + return; + + case 'm': + /* Print the size letter of the inner element. We can do it by + calling ourselves with the 's' modifier. */ + if (GET_CODE (operand) != SIGN_EXTEND && GET_CODE (operand) != ZERO_EXTEND) + LOSE_AND_RETURN ("invalid operand for 'm' modifier", x); + cris_print_operand (file, XEXP (operand, 0), 's'); + return; + + case 'M': + /* Print the least significant part of operand. */ + if (GET_CODE (operand) == CONST_DOUBLE) + { + fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x)); + return; + } + else if (HOST_BITS_PER_WIDE_INT > 32 && CONST_INT_P (operand)) + { + fprintf (file, HOST_WIDE_INT_PRINT_HEX, + INTVAL (x) & ((unsigned int) 0x7fffffff * 2 + 1)); + return; + } + /* Otherwise the least significant part equals the normal part, + so handle it normally. */ + break; + + case 'A': + /* When emitting an add for the high part of a DImode constant, we + want to use addq for 0 and adds.w for -1. */ + if (!CONST_INT_P (operand)) + LOSE_AND_RETURN ("invalid operand for 'A' modifier", x); + fprintf (file, INTVAL (operand) < 0 ? "adds.w" : "addq"); + return; + + case 'd': + /* If this is a GOT symbol, force it to be emitted as :GOT and + :GOTPLT regardless of -fpic (i.e. not as :GOT16, :GOTPLT16). + Avoid making this too much of a special case. */ + if (flag_pic == 1 && CONSTANT_P (operand)) + { + int flag_pic_save = flag_pic; + + flag_pic = 2; + cris_output_addr_const (file, operand); + flag_pic = flag_pic_save; + return; + } + break; + + case 'D': + /* When emitting an sub for the high part of a DImode constant, we + want to use subq for 0 and subs.w for -1. */ + if (!CONST_INT_P (operand)) + LOSE_AND_RETURN ("invalid operand for 'D' modifier", x); + fprintf (file, INTVAL (operand) < 0 ? "subs.w" : "subq"); + return; + + case 'S': + /* Print the operand as the index-part of an address. + Easiest way out is to use cris_print_index. */ + cris_print_index (operand, file); + return; + + case 'T': + /* Print the size letter for an operand to a MULT, which must be a + const_int with a suitable value. */ + if (!CONST_INT_P (operand) || INTVAL (operand) > 4) + LOSE_AND_RETURN ("invalid operand for 'T' modifier", x); + fprintf (file, "%s", mults[INTVAL (operand)]); + return; + + case 'u': + /* Print "u.w" if a GOT symbol and flag_pic == 1, else ".d". */ + if (flag_pic == 1 + && GET_CODE (operand) == CONST + && GET_CODE (XEXP (operand, 0)) == UNSPEC + && XINT (XEXP (operand, 0), 1) == CRIS_UNSPEC_GOTREAD) + fprintf (file, "u.w"); + else + fprintf (file, ".d"); + return; + + case 0: + /* No code, print as usual. */ + break; + + default: + LOSE_AND_RETURN ("invalid operand modifier letter", x); + } + + /* Print an operand as without a modifier letter. */ + switch (GET_CODE (operand)) + { + case REG: + if (REGNO (operand) > 15 + && REGNO (operand) != CRIS_MOF_REGNUM + && REGNO (operand) != CRIS_SRP_REGNUM + && REGNO (operand) != CRIS_CC0_REGNUM) + internal_error ("internal error: bad register: %d", REGNO (operand)); + fprintf (file, "$%s", reg_names[REGNO (operand)]); + return; + + case MEM: + output_address (XEXP (operand, 0)); + return; + + case CONST_DOUBLE: + if (GET_MODE (operand) == VOIDmode) + /* A long long constant. */ + output_addr_const (file, operand); + else + { + /* Only single precision is allowed as plain operands the + moment. FIXME: REAL_VALUE_FROM_CONST_DOUBLE isn't + documented. */ + REAL_VALUE_TYPE r; + long l; + + /* FIXME: Perhaps check overflow of the "single". */ + REAL_VALUE_FROM_CONST_DOUBLE (r, operand); + REAL_VALUE_TO_TARGET_SINGLE (r, l); + + fprintf (file, "0x%lx", l); + } + return; + + case UNSPEC: + /* Fall through. */ + case CONST: + cris_output_addr_const (file, operand); + return; + + case MULT: + case ASHIFT: + { + /* For a (MULT (reg X) const_int) we output "rX.S". */ + int i = CONST_INT_P (XEXP (operand, 1)) + ? INTVAL (XEXP (operand, 1)) : INTVAL (XEXP (operand, 0)); + rtx reg = CONST_INT_P (XEXP (operand, 1)) + ? XEXP (operand, 0) : XEXP (operand, 1); + + if (!REG_P (reg) + || (!CONST_INT_P (XEXP (operand, 0)) + && !CONST_INT_P (XEXP (operand, 1)))) + LOSE_AND_RETURN ("unexpected multiplicative operand", x); + + cris_print_base (reg, file); + fprintf (file, ".%c", + i == 0 || (i == 1 && GET_CODE (operand) == MULT) ? 'b' + : i == 4 ? 'd' + : (i == 2 && GET_CODE (operand) == MULT) || i == 1 ? 'w' + : 'd'); + return; + } + + default: + /* No need to handle all strange variants, let output_addr_const + do it for us. */ + if (CONSTANT_P (operand)) + { + cris_output_addr_const (file, operand); + return; + } + + LOSE_AND_RETURN ("unexpected operand", x); + } +} + +/* The PRINT_OPERAND_ADDRESS worker. */ + +void +cris_print_operand_address (FILE *file, rtx x) +{ + /* All these were inside MEM:s so output indirection characters. */ + putc ('[', file); + + if (CONSTANT_ADDRESS_P (x)) + cris_output_addr_const (file, x); + else if (BASE_OR_AUTOINCR_P (x)) + cris_print_base (x, file); + else if (GET_CODE (x) == PLUS) + { + rtx x1, x2; + + x1 = XEXP (x, 0); + x2 = XEXP (x, 1); + if (BASE_P (x1)) + { + cris_print_base (x1, file); + cris_print_index (x2, file); + } + else if (BASE_P (x2)) + { + cris_print_base (x2, file); + cris_print_index (x1, file); + } + else + LOSE_AND_RETURN ("unrecognized address", x); + } + else if (MEM_P (x)) + { + /* A DIP. Output more indirection characters. */ + putc ('[', file); + cris_print_base (XEXP (x, 0), file); + putc (']', file); + } + else + LOSE_AND_RETURN ("unrecognized address", x); + + putc (']', file); +} + +/* The RETURN_ADDR_RTX worker. + We mark that the return address is used, either by EH or + __builtin_return_address, for use by the function prologue and + epilogue. FIXME: This isn't optimal; we just use the mark in the + prologue and epilogue to say that the return address is to be stored + in the stack frame. We could return SRP for leaf-functions and use the + initial-value machinery. */ + +rtx +cris_return_addr_rtx (int count, rtx frameaddr ATTRIBUTE_UNUSED) +{ + cfun->machine->needs_return_address_on_stack = 1; + + /* The return-address is stored just above the saved frame-pointer (if + present). Apparently we can't eliminate from the frame-pointer in + that direction, so use the incoming args (maybe pretended) pointer. */ + return count == 0 + ? gen_rtx_MEM (Pmode, plus_constant (virtual_incoming_args_rtx, -4)) + : NULL_RTX; +} + +/* Accessor used in cris.md:return because cfun->machine isn't available + there. */ + +bool +cris_return_address_on_stack (void) +{ + return df_regs_ever_live_p (CRIS_SRP_REGNUM) + || cfun->machine->needs_return_address_on_stack; +} + +/* Accessor used in cris.md:return because cfun->machine isn't available + there. */ + +bool +cris_return_address_on_stack_for_return (void) +{ + return cfun->machine->return_type == CRIS_RETINSN_RET ? false + : cris_return_address_on_stack (); +} + +/* This used to be the INITIAL_FRAME_POINTER_OFFSET worker; now only + handles FP -> SP elimination offset. */ + +static int +cris_initial_frame_pointer_offset (void) +{ + int regno; + + /* Initial offset is 0 if we don't have a frame pointer. */ + int offs = 0; + bool got_really_used = false; + + if (crtl->uses_pic_offset_table) + { + push_topmost_sequence (); + got_really_used + = reg_used_between_p (pic_offset_table_rtx, get_insns (), + NULL_RTX); + pop_topmost_sequence (); + } + + /* And 4 for each register pushed. */ + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) + if (cris_reg_saved_in_regsave_area (regno, got_really_used)) + offs += 4; + + /* And then, last, we add the locals allocated. */ + offs += get_frame_size (); + + /* And more; the accumulated args size. */ + offs += crtl->outgoing_args_size; + + /* Then round it off, in case we use aligned stack. */ + if (TARGET_STACK_ALIGN) + offs = TARGET_ALIGN_BY_32 ? (offs + 3) & ~3 : (offs + 1) & ~1; + + return offs; +} + +/* The INITIAL_ELIMINATION_OFFSET worker. + Calculate the difference between imaginary registers such as frame + pointer and the stack pointer. Used to eliminate the frame pointer + and imaginary arg pointer. */ + +int +cris_initial_elimination_offset (int fromreg, int toreg) +{ + int fp_sp_offset + = cris_initial_frame_pointer_offset (); + + /* We should be able to use regs_ever_live and related prologue + information here, or alpha should not as well. */ + bool return_address_on_stack = cris_return_address_on_stack (); + + /* Here we act as if the frame-pointer were needed. */ + int ap_fp_offset = 4 + (return_address_on_stack ? 4 : 0); + + if (fromreg == ARG_POINTER_REGNUM + && toreg == FRAME_POINTER_REGNUM) + return ap_fp_offset; + + /* Between the frame pointer and the stack are only "normal" stack + variables and saved registers. */ + if (fromreg == FRAME_POINTER_REGNUM + && toreg == STACK_POINTER_REGNUM) + return fp_sp_offset; + + /* We need to balance out the frame pointer here. */ + if (fromreg == ARG_POINTER_REGNUM + && toreg == STACK_POINTER_REGNUM) + return ap_fp_offset + fp_sp_offset - 4; + + gcc_unreachable (); +} + +/* Worker function for LEGITIMIZE_RELOAD_ADDRESS. */ + +bool +cris_reload_address_legitimized (rtx x, + enum machine_mode mode ATTRIBUTE_UNUSED, + int opnum ATTRIBUTE_UNUSED, + int itype, + int ind_levels ATTRIBUTE_UNUSED) +{ + enum reload_type type = itype; + rtx op0, op1; + rtx *op0p; + rtx *op1p; + + if (GET_CODE (x) != PLUS) + return false; + + if (TARGET_V32) + return false; + + op0 = XEXP (x, 0); + op0p = &XEXP (x, 0); + op1 = XEXP (x, 1); + op1p = &XEXP (x, 1); + + if (!REG_P (op1)) + return false; + + if (GET_CODE (op0) == SIGN_EXTEND && MEM_P (XEXP (op0, 0))) + { + rtx op00 = XEXP (op0, 0); + rtx op000 = XEXP (op00, 0); + rtx *op000p = &XEXP (op00, 0); + + if ((GET_MODE (op00) == HImode || GET_MODE (op00) == QImode) + && (REG_P (op000) + || (GET_CODE (op000) == POST_INC && REG_P (XEXP (op000, 0))))) + { + bool something_reloaded = false; + + if (GET_CODE (op000) == POST_INC + && REG_P (XEXP (op000, 0)) + && REGNO (XEXP (op000, 0)) > CRIS_LAST_GENERAL_REGISTER) + /* No, this gets too complicated and is too rare to care + about trying to improve on the general code Here. + As the return-value is an all-or-nothing indicator, we + punt on the other register too. */ + return false; + + if ((REG_P (op000) + && REGNO (op000) > CRIS_LAST_GENERAL_REGISTER)) + { + /* The address of the inner mem is a pseudo or wrong + reg: reload that. */ + push_reload (op000, NULL_RTX, op000p, NULL, GENERAL_REGS, + GET_MODE (x), VOIDmode, 0, 0, opnum, type); + something_reloaded = true; + } + + if (REGNO (op1) > CRIS_LAST_GENERAL_REGISTER) + { + /* Base register is a pseudo or wrong reg: reload it. */ + push_reload (op1, NULL_RTX, op1p, NULL, GENERAL_REGS, + GET_MODE (x), VOIDmode, 0, 0, + opnum, type); + something_reloaded = true; + } + + gcc_assert (something_reloaded); + + return true; + } + } + + return false; +} + +/* Worker function for REGISTER_MOVE_COST. */ + +int +cris_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, + enum reg_class from, enum reg_class to) +{ + if (!TARGET_V32) + { + /* Pretend that classes that we don't support are ALL_REGS, so + we give them the highest cost. */ + if (from != SPECIAL_REGS && from != MOF_REGS + && from != GENERAL_REGS && from != GENNONACR_REGS) + from = ALL_REGS; + + if (to != SPECIAL_REGS && to != MOF_REGS + && to != GENERAL_REGS && to != GENNONACR_REGS) + to = ALL_REGS; + } + + /* Can't move to and from a SPECIAL_REGS register, so we have to say + their move cost within that class is higher. How about 7? That's 3 + for a move to a GENERAL_REGS register, 3 for the move from the + GENERAL_REGS register, and 1 for the increased register pressure. + Also, it's higher than the memory move cost, which is in order. + We also do this for ALL_REGS, since we don't want that class to be + preferred (even to memory) at all where GENERAL_REGS doesn't fit. + Whenever it's about to be used, it's for SPECIAL_REGS. If we don't + present a higher cost for ALL_REGS than memory, a SPECIAL_REGS may be + used when a GENERAL_REGS should be used, even if there are call-saved + GENERAL_REGS left to allocate. This is because the fall-back when + the most preferred register class isn't available, isn't the next + (or next good) wider register class, but the *most widest* register + class. */ + + if ((reg_classes_intersect_p (from, SPECIAL_REGS) + && reg_classes_intersect_p (to, SPECIAL_REGS)) + || from == ALL_REGS || to == ALL_REGS) + return 7; + + if (reg_classes_intersect_p (from, SPECIAL_REGS) + || reg_classes_intersect_p (to, SPECIAL_REGS)) + return 3; + + return 2; +} + +/* Worker for cris_notice_update_cc; handles the "normal" cases. + FIXME: this code is historical; its functionality should be + refactored to look at insn attributes and moved to + cris_notice_update_cc. Except, we better lose cc0 entirely. */ + +static void +cris_normal_notice_update_cc (rtx exp, rtx insn) +{ + /* "Normal" means, for: + (set (cc0) (...)): + CC is (...). + + (set (reg) (...)): + CC is (reg) and (...) - unless (...) is 0 or reg is a special + register or (v32 and (...) is -32..-1), then CC does not change. + CC_NO_OVERFLOW unless (...) is reg or mem. + + (set (mem) (...)): + CC does not change. + + (set (pc) (...)): + CC does not change. + + (parallel + (set (reg1) (mem (bdap/biap))) + (set (reg2) (bdap/biap))): + CC is (reg1) and (mem (reg2)) + + (parallel + (set (mem (bdap/biap)) (reg1)) [or 0] + (set (reg2) (bdap/biap))): + CC does not change. + + (where reg and mem includes strict_low_parts variants thereof) + + For all others, assume CC is clobbered. + Note that we do not have to care about setting CC_NO_OVERFLOW, + since the overflow flag is set to 0 (i.e. right) for + instructions where it does not have any sane sense, but where + other flags have meanings. (This includes shifts; the carry is + not set by them). + + Note that there are other parallel constructs we could match, + but we don't do that yet. */ + + if (GET_CODE (exp) == SET) + { + /* FIXME: Check when this happens. It looks like we should + actually do a CC_STATUS_INIT here to be safe. */ + if (SET_DEST (exp) == pc_rtx) + return; + + /* Record CC0 changes, so we do not have to output multiple + test insns. */ + if (SET_DEST (exp) == cc0_rtx) + { + CC_STATUS_INIT; + + if (GET_CODE (SET_SRC (exp)) == COMPARE + && XEXP (SET_SRC (exp), 1) == const0_rtx) + cc_status.value1 = XEXP (SET_SRC (exp), 0); + else + cc_status.value1 = SET_SRC (exp); + + /* Handle flags for the special btstq on one bit. */ + if (GET_CODE (cc_status.value1) == ZERO_EXTRACT + && XEXP (cc_status.value1, 1) == const1_rtx) + { + if (CONST_INT_P (XEXP (cc_status.value1, 0))) + /* Using cmpq. */ + cc_status.flags = CC_INVERTED; + else + /* A one-bit btstq. */ + cc_status.flags = CC_Z_IN_NOT_N; + } + + else if (GET_CODE (SET_SRC (exp)) == COMPARE) + { + if (!REG_P (XEXP (SET_SRC (exp), 0)) + && XEXP (SET_SRC (exp), 1) != const0_rtx) + /* For some reason gcc will not canonicalize compare + operations, reversing the sign by itself if + operands are in wrong order. */ + /* (But NOT inverted; eq is still eq.) */ + cc_status.flags = CC_REVERSED; + + /* This seems to be overlooked by gcc. FIXME: Check again. + FIXME: Is it really safe? */ + cc_status.value2 + = gen_rtx_MINUS (GET_MODE (SET_SRC (exp)), + XEXP (SET_SRC (exp), 0), + XEXP (SET_SRC (exp), 1)); + } + return; + } + else if (REG_P (SET_DEST (exp)) + || (GET_CODE (SET_DEST (exp)) == STRICT_LOW_PART + && REG_P (XEXP (SET_DEST (exp), 0)))) + { + /* A register is set; normally CC is set to show that no + test insn is needed. Catch the exceptions. */ + + /* If not to cc0, then no "set"s in non-natural mode give + ok cc0... */ + if (GET_MODE_SIZE (GET_MODE (SET_DEST (exp))) > UNITS_PER_WORD + || GET_MODE_CLASS (GET_MODE (SET_DEST (exp))) == MODE_FLOAT) + { + /* ... except add:s and sub:s in DImode. */ + if (GET_MODE (SET_DEST (exp)) == DImode + && (GET_CODE (SET_SRC (exp)) == PLUS + || GET_CODE (SET_SRC (exp)) == MINUS)) + { + CC_STATUS_INIT; + cc_status.value1 = SET_DEST (exp); + cc_status.value2 = SET_SRC (exp); + + if (cris_reg_overlap_mentioned_p (cc_status.value1, + cc_status.value2)) + cc_status.value2 = 0; + + /* Add and sub may set V, which gets us + unoptimizable results in "gt" and "le" condition + codes. */ + cc_status.flags |= CC_NO_OVERFLOW; + + return; + } + } + else if (SET_SRC (exp) == const0_rtx + || (REG_P (SET_SRC (exp)) + && (REGNO (SET_SRC (exp)) + > CRIS_LAST_GENERAL_REGISTER)) + || (TARGET_V32 + && GET_CODE (SET_SRC (exp)) == CONST_INT + && CRIS_CONST_OK_FOR_LETTER_P (INTVAL (SET_SRC (exp)), + 'I'))) + { + /* There's no CC0 change for this case. Just check + for overlap. */ + if (cc_status.value1 + && modified_in_p (cc_status.value1, insn)) + cc_status.value1 = 0; + + if (cc_status.value2 + && modified_in_p (cc_status.value2, insn)) + cc_status.value2 = 0; + + return; + } + else + { + CC_STATUS_INIT; + cc_status.value1 = SET_DEST (exp); + cc_status.value2 = SET_SRC (exp); + + if (cris_reg_overlap_mentioned_p (cc_status.value1, + cc_status.value2)) + cc_status.value2 = 0; + + /* Some operations may set V, which gets us + unoptimizable results in "gt" and "le" condition + codes. */ + if (GET_CODE (SET_SRC (exp)) == PLUS + || GET_CODE (SET_SRC (exp)) == MINUS + || GET_CODE (SET_SRC (exp)) == NEG) + cc_status.flags |= CC_NO_OVERFLOW; + + /* For V32, nothing with a register destination sets + C and V usefully. */ + if (TARGET_V32) + cc_status.flags |= CC_NO_OVERFLOW; + + return; + } + } + else if (MEM_P (SET_DEST (exp)) + || (GET_CODE (SET_DEST (exp)) == STRICT_LOW_PART + && MEM_P (XEXP (SET_DEST (exp), 0)))) + { + /* When SET to MEM, then CC is not changed (except for + overlap). */ + if (cc_status.value1 + && modified_in_p (cc_status.value1, insn)) + cc_status.value1 = 0; + + if (cc_status.value2 + && modified_in_p (cc_status.value2, insn)) + cc_status.value2 = 0; + + return; + } + } + else if (GET_CODE (exp) == PARALLEL) + { + if (GET_CODE (XVECEXP (exp, 0, 0)) == SET + && GET_CODE (XVECEXP (exp, 0, 1)) == SET + && REG_P (XEXP (XVECEXP (exp, 0, 1), 0))) + { + if (REG_P (XEXP (XVECEXP (exp, 0, 0), 0)) + && MEM_P (XEXP (XVECEXP (exp, 0, 0), 1))) + { + CC_STATUS_INIT; + + /* For "move.S [rx=ry+o],rz", say CC reflects + value1=rz and value2=[rx] */ + cc_status.value1 = XEXP (XVECEXP (exp, 0, 0), 0); + cc_status.value2 + = replace_equiv_address (XEXP (XVECEXP (exp, 0, 0), 1), + XEXP (XVECEXP (exp, 0, 1), 0)); + + /* Huh? A side-effect cannot change the destination + register. */ + if (cris_reg_overlap_mentioned_p (cc_status.value1, + cc_status.value2)) + internal_error ("internal error: sideeffect-insn affecting main effect"); + + /* For V32, moves to registers don't set C and V. */ + if (TARGET_V32) + cc_status.flags |= CC_NO_OVERFLOW; + return; + } + else if ((REG_P (XEXP (XVECEXP (exp, 0, 0), 1)) + || XEXP (XVECEXP (exp, 0, 0), 1) == const0_rtx) + && MEM_P (XEXP (XVECEXP (exp, 0, 0), 0))) + { + /* For "move.S rz,[rx=ry+o]" and "clear.S [rx=ry+o]", + say flags are not changed, except for overlap. */ + if (cc_status.value1 + && modified_in_p (cc_status.value1, insn)) + cc_status.value1 = 0; + + if (cc_status.value2 + && modified_in_p (cc_status.value2, insn)) + cc_status.value2 = 0; + + return; + } + } + } + + /* If we got here, the case wasn't covered by the code above. */ + CC_STATUS_INIT; +} + +/* This function looks into the pattern to see how this insn affects + condition codes. + + Used when to eliminate test insns before a condition-code user, + such as a "scc" insn or a conditional branch. This includes + checking if the entities that cc was updated by, are changed by the + operation. + + Currently a jumble of the old peek-inside-the-insn and the newer + check-cc-attribute methods. */ + +void +cris_notice_update_cc (rtx exp, rtx insn) +{ + enum attr_cc attrval = get_attr_cc (insn); + + /* Check if user specified "-mcc-init" as a bug-workaround. Remember + to still set CC_REVERSED as below, since that's required by some + compare insn alternatives. (FIXME: GCC should do this virtual + operand swap by itself.) A test-case that may otherwise fail is + gcc.c-torture/execute/20000217-1.c -O0 and -O1. */ + if (TARGET_CCINIT) + { + CC_STATUS_INIT; + + if (attrval == CC_REV) + cc_status.flags = CC_REVERSED; + return; + } + + /* Slowly, we're converting to using attributes to control the setting + of condition-code status. */ + switch (attrval) + { + case CC_NONE: + /* Even if it is "none", a setting may clobber a previous + cc-value, so check. */ + if (GET_CODE (exp) == SET) + { + if (cc_status.value1 + && modified_in_p (cc_status.value1, insn)) + cc_status.value1 = 0; + + if (cc_status.value2 + && modified_in_p (cc_status.value2, insn)) + cc_status.value2 = 0; + } + return; + + case CC_CLOBBER: + CC_STATUS_INIT; + return; + + case CC_REV: + case CC_NOOV32: + case CC_NORMAL: + cris_normal_notice_update_cc (exp, insn); + + /* The "test" insn doesn't clear (carry and) overflow on V32. We + can change bge => bpl and blt => bmi by passing on to the cc0 + user that V should not be considered; bgt and ble are taken + care of by other methods (see {tst,cmp}{si,hi,qi}). */ + if (attrval == CC_NOOV32 && TARGET_V32) + cc_status.flags |= CC_NO_OVERFLOW; + return; + + default: + internal_error ("unknown cc_attr value"); + } + + CC_STATUS_INIT; +} + +/* Return != 0 if the return sequence for the current function is short, + like "ret" or "jump [sp+]". Prior to reloading, we can't tell if + registers must be saved, so return 0 then. */ + +bool +cris_simple_epilogue (void) +{ + unsigned int regno; + unsigned int reglimit = STACK_POINTER_REGNUM; + bool got_really_used = false; + + if (! reload_completed + || frame_pointer_needed + || get_frame_size () != 0 + || crtl->args.pretend_args_size + || crtl->args.size + || crtl->outgoing_args_size + || crtl->calls_eh_return + + /* If we're not supposed to emit prologue and epilogue, we must + not emit return-type instructions. */ + || !TARGET_PROLOGUE_EPILOGUE) + return false; + + /* Can't return from stacked return address with v32. */ + if (TARGET_V32 && cris_return_address_on_stack ()) + return false; + + if (crtl->uses_pic_offset_table) + { + push_topmost_sequence (); + got_really_used + = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX); + pop_topmost_sequence (); + } + + /* No simple epilogue if there are saved registers. */ + for (regno = 0; regno < reglimit; regno++) + if (cris_reg_saved_in_regsave_area (regno, got_really_used)) + return false; + + return true; +} + +/* Expand a return insn (just one insn) marked as using SRP or stack + slot depending on parameter ON_STACK. */ + +void +cris_expand_return (bool on_stack) +{ + /* FIXME: emit a parallel with a USE for SRP or the stack-slot, to + tell "ret" from "jump [sp+]". Some, but not all, other parts of + GCC expect just (return) to do the right thing when optimizing, so + we do that until they're fixed. Currently, all return insns in a + function must be the same (not really a limiting factor) so we need + to check that it doesn't change half-way through. */ + emit_jump_insn (gen_rtx_RETURN (VOIDmode)); + + CRIS_ASSERT (cfun->machine->return_type != CRIS_RETINSN_RET || !on_stack); + CRIS_ASSERT (cfun->machine->return_type != CRIS_RETINSN_JUMP || on_stack); + + cfun->machine->return_type + = on_stack ? CRIS_RETINSN_JUMP : CRIS_RETINSN_RET; +} + +/* Compute a (partial) cost for rtx X. Return true if the complete + cost has been computed, and false if subexpressions should be + scanned. In either case, *TOTAL contains the cost result. */ + +static bool +cris_rtx_costs (rtx x, int code, int outer_code, int *total, + bool speed) +{ + switch (code) + { + case CONST_INT: + { + HOST_WIDE_INT val = INTVAL (x); + if (val == 0) + *total = 0; + else if (val < 32 && val >= -32) + *total = 1; + /* Eight or 16 bits are a word and cycle more expensive. */ + else if (val <= 32767 && val >= -32768) + *total = 2; + /* A 32-bit constant (or very seldom, unsigned 16 bits) costs + another word. FIXME: This isn't linear to 16 bits. */ + else + *total = 4; + return true; + } + + case LABEL_REF: + *total = 6; + return true; + + case CONST: + case SYMBOL_REF: + *total = 6; + return true; + + case CONST_DOUBLE: + if (x != CONST0_RTX (GET_MODE (x) == VOIDmode ? DImode : GET_MODE (x))) + *total = 12; + else + /* Make 0.0 cheap, else test-insns will not be used. */ + *total = 0; + return true; + + case MULT: + /* If we have one arm of an ADDI, make sure it gets the cost of + one insn, i.e. zero cost for this operand, and just the cost + of the PLUS, as the insn is created by combine from a PLUS + and an ASHIFT, and the MULT cost below would make the + combined value be larger than the separate insns. The insn + validity is checked elsewhere by combine. + + FIXME: this case is a stop-gap for 4.3 and 4.4, this whole + function should be rewritten. */ + if (outer_code == PLUS && BIAP_INDEX_P (x)) + { + *total = 0; + return true; + } + + /* Identify values that are no powers of two. Powers of 2 are + taken care of already and those values should not be changed. */ + if (!CONST_INT_P (XEXP (x, 1)) + || exact_log2 (INTVAL (XEXP (x, 1)) < 0)) + { + /* If we have a multiply insn, then the cost is between + 1 and 2 "fast" instructions. */ + if (TARGET_HAS_MUL_INSNS) + { + *total = COSTS_N_INSNS (1) + COSTS_N_INSNS (1) / 2; + return true; + } + + /* Estimate as 4 + 4 * #ofbits. */ + *total = COSTS_N_INSNS (132); + return true; + } + return false; + + case UDIV: + case MOD: + case UMOD: + case DIV: + if (!CONST_INT_P (XEXP (x, 1)) + || exact_log2 (INTVAL (XEXP (x, 1)) < 0)) + { + /* Estimate this as 4 + 8 * #of bits. */ + *total = COSTS_N_INSNS (260); + return true; + } + return false; + + case AND: + if (CONST_INT_P (XEXP (x, 1)) + /* Two constants may actually happen before optimization. */ + && !CONST_INT_P (XEXP (x, 0)) + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (XEXP (x, 1)), 'I')) + { + *total = (rtx_cost (XEXP (x, 0), outer_code, speed) + 2 + + 2 * GET_MODE_NUNITS (GET_MODE (XEXP (x, 0)))); + return true; + } + return false; + + case ZERO_EXTRACT: + if (outer_code != COMPARE) + return false; + /* fall through */ + + case ZERO_EXTEND: case SIGN_EXTEND: + *total = rtx_cost (XEXP (x, 0), outer_code, speed); + return true; + + default: + return false; + } +} + +/* The ADDRESS_COST worker. */ + +static int +cris_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED) +{ + /* The metric to use for the cost-macros is unclear. + The metric used here is (the number of cycles needed) / 2, + where we consider equal a cycle for a word of code and a cycle to + read memory. FIXME: Adding "+ 1" to all values would avoid + returning 0, as tree-ssa-loop-ivopts.c as of r128272 "normalizes" + 0 to 1, thereby giving equal costs to [rN + rM] and [rN]. + Unfortunately(?) such a hack would expose other pessimizations, + at least with g++.dg/tree-ssa/ivopts-1.C, adding insns to the + loop there, without apparent reason. */ + + /* The cheapest addressing modes get 0, since nothing extra is needed. */ + if (BASE_OR_AUTOINCR_P (x)) + return 0; + + /* An indirect mem must be a DIP. This means two bytes extra for code, + and 4 bytes extra for memory read, i.e. (2 + 4) / 2. */ + if (MEM_P (x)) + return (2 + 4) / 2; + + /* Assume (2 + 4) / 2 for a single constant; a dword, since it needs + an extra DIP prefix and 4 bytes of constant in most cases. */ + if (CONSTANT_P (x)) + return (2 + 4) / 2; + + /* Handle BIAP and BDAP prefixes. */ + if (GET_CODE (x) == PLUS) + { + rtx tem1 = XEXP (x, 0); + rtx tem2 = XEXP (x, 1); + + /* Local extended canonicalization rule: the first operand must + be REG, unless it's an operation (MULT). */ + if (!REG_P (tem1) && GET_CODE (tem1) != MULT) + tem1 = tem2, tem2 = XEXP (x, 0); + + /* We'll "assume" we have canonical RTX now. */ + gcc_assert (REG_P (tem1) || GET_CODE (tem1) == MULT); + + /* A BIAP is 2 extra bytes for the prefix insn, nothing more. We + recognize the typical MULT which is always in tem1 because of + insn canonicalization. */ + if ((GET_CODE (tem1) == MULT && BIAP_INDEX_P (tem1)) + || REG_P (tem2)) + return 2 / 2; + + /* A BDAP (quick) is 2 extra bytes. Any constant operand to the + PLUS is always found in tem2. */ + if (CONST_INT_P (tem2) && INTVAL (tem2) < 128 && INTVAL (tem2) >= -128) + return 2 / 2; + + /* A BDAP -32768 .. 32767 is like BDAP quick, but with 2 extra + bytes. */ + if (CONST_INT_P (tem2) + && CRIS_CONST_OK_FOR_LETTER_P (INTVAL (tem2), 'L')) + return (2 + 2) / 2; + + /* A BDAP with some other constant is 2 bytes extra. */ + if (CONSTANT_P (tem2)) + return (2 + 2 + 2) / 2; + + /* BDAP with something indirect should have a higher cost than + BIAP with register. FIXME: Should it cost like a MEM or more? */ + return (2 + 2 + 2) / 2; + } + + /* What else? Return a high cost. It matters only for valid + addressing modes. */ + return 10; +} + +/* Check various objections to the side-effect. Used in the test-part + of an anonymous insn describing an insn with a possible side-effect. + Returns nonzero if the implied side-effect is ok. + + code : PLUS or MULT + ops : An array of rtx:es. lreg, rreg, rval, + The variables multop and other_op are indexes into this, + or -1 if they are not applicable. + lreg : The register that gets assigned in the side-effect. + rreg : One register in the side-effect expression + rval : The other register, or an int. + multop : An integer to multiply rval with. + other_op : One of the entities of the main effect, + whose mode we must consider. */ + +int +cris_side_effect_mode_ok (enum rtx_code code, rtx *ops, + int lreg, int rreg, int rval, + int multop, int other_op) +{ + /* Find what value to multiply with, for rx =ry + rz * n. */ + int mult = multop < 0 ? 1 : INTVAL (ops[multop]); + + rtx reg_rtx = ops[rreg]; + rtx val_rtx = ops[rval]; + + /* The operands may be swapped. Canonicalize them in reg_rtx and + val_rtx, where reg_rtx always is a reg (for this constraint to + match). */ + if (! BASE_P (reg_rtx)) + reg_rtx = val_rtx, val_rtx = ops[rreg]; + + /* Don't forget to check that reg_rtx really is a reg. If it isn't, + we have no business. */ + if (! BASE_P (reg_rtx)) + return 0; + + /* Don't do this when -mno-split. */ + if (!TARGET_SIDE_EFFECT_PREFIXES) + return 0; + + /* The mult expression may be hidden in lreg. FIXME: Add more + commentary about that. */ + if (GET_CODE (val_rtx) == MULT) + { + mult = INTVAL (XEXP (val_rtx, 1)); + val_rtx = XEXP (val_rtx, 0); + code = MULT; + } + + /* First check the "other operand". */ + if (other_op >= 0) + { + if (GET_MODE_SIZE (GET_MODE (ops[other_op])) > UNITS_PER_WORD) + return 0; + + /* Check if the lvalue register is the same as the "other + operand". If so, the result is undefined and we shouldn't do + this. FIXME: Check again. */ + if ((BASE_P (ops[lreg]) + && BASE_P (ops[other_op]) + && REGNO (ops[lreg]) == REGNO (ops[other_op])) + || rtx_equal_p (ops[other_op], ops[lreg])) + return 0; + } + + /* Do not accept frame_pointer_rtx as any operand. */ + if (ops[lreg] == frame_pointer_rtx || ops[rreg] == frame_pointer_rtx + || ops[rval] == frame_pointer_rtx + || (other_op >= 0 && ops[other_op] == frame_pointer_rtx)) + return 0; + + if (code == PLUS + && ! BASE_P (val_rtx)) + { + + /* Do not allow rx = rx + n if a normal add or sub with same size + would do. */ + if (rtx_equal_p (ops[lreg], reg_rtx) + && CONST_INT_P (val_rtx) + && (INTVAL (val_rtx) <= 63 && INTVAL (val_rtx) >= -63)) + return 0; + + /* Check allowed cases, like [r(+)?].[bwd] and const. */ + if (CONSTANT_P (val_rtx)) + return 1; + + if (MEM_P (val_rtx) && BASE_OR_AUTOINCR_P (XEXP (val_rtx, 0))) + return 1; + + if (GET_CODE (val_rtx) == SIGN_EXTEND + && MEM_P (XEXP (val_rtx, 0)) + && BASE_OR_AUTOINCR_P (XEXP (XEXP (val_rtx, 0), 0))) + return 1; + + /* If we got here, it's not a valid addressing mode. */ + return 0; + } + else if (code == MULT + || (code == PLUS && BASE_P (val_rtx))) + { + /* Do not allow rx = rx + ry.S, since it doesn't give better code. */ + if (rtx_equal_p (ops[lreg], reg_rtx) + || (mult == 1 && rtx_equal_p (ops[lreg], val_rtx))) + return 0; + + /* Do not allow bad multiply-values. */ + if (mult != 1 && mult != 2 && mult != 4) + return 0; + + /* Only allow r + ... */ + if (! BASE_P (reg_rtx)) + return 0; + + /* If we got here, all seems ok. + (All checks need to be done above). */ + return 1; + } + + /* If we get here, the caller got its initial tests wrong. */ + internal_error ("internal error: cris_side_effect_mode_ok with bad operands"); +} + +/* Whether next_cc0_user of insn is LE or GT or requires a real compare + insn for other reasons. */ + +bool +cris_cc0_user_requires_cmp (rtx insn) +{ + rtx cc0_user = NULL; + rtx body; + rtx set; + + gcc_assert (insn != NULL); + + if (!TARGET_V32) + return false; + + cc0_user = next_cc0_user (insn); + if (cc0_user == NULL) + return false; + + body = PATTERN (cc0_user); + set = single_set (cc0_user); + + /* Users can be sCC and bCC. */ + if (JUMP_P (cc0_user) + && GET_CODE (body) == SET + && SET_DEST (body) == pc_rtx + && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE + && XEXP (XEXP (SET_SRC (body), 0), 0) == cc0_rtx) + { + return + GET_CODE (XEXP (SET_SRC (body), 0)) == GT + || GET_CODE (XEXP (SET_SRC (body), 0)) == LE; + } + else if (set) + { + return + GET_CODE (SET_SRC (body)) == GT + || GET_CODE (SET_SRC (body)) == LE; + } + + gcc_unreachable (); +} + +/* The function reg_overlap_mentioned_p in CVS (still as of 2001-05-16) + does not handle the case where the IN operand is strict_low_part; it + does handle it for X. Test-case in Axis-20010516. This function takes + care of that for THIS port. FIXME: strict_low_part is going away + anyway. */ + +static int +cris_reg_overlap_mentioned_p (rtx x, rtx in) +{ + /* The function reg_overlap_mentioned now handles when X is + strict_low_part, but not when IN is a STRICT_LOW_PART. */ + if (GET_CODE (in) == STRICT_LOW_PART) + in = XEXP (in, 0); + + return reg_overlap_mentioned_p (x, in); +} + +/* The TARGET_ASM_NAMED_SECTION worker. + We just dispatch to the functions for ELF and a.out. */ + +void +cris_target_asm_named_section (const char *name, unsigned int flags, + tree decl) +{ + if (! TARGET_ELF) + default_no_named_section (name, flags, decl); + else + default_elf_asm_named_section (name, flags, decl); +} + +/* Return TRUE iff X is a CONST valid for e.g. indexing. + ANY_OPERAND is 0 if X is in a CALL_P insn or movsi, 1 + elsewhere. */ + +bool +cris_valid_pic_const (rtx x, bool any_operand) +{ + gcc_assert (flag_pic); + + switch (GET_CODE (x)) + { + case CONST_INT: + case CONST_DOUBLE: + return true; + default: + ; + } + + if (GET_CODE (x) != CONST) + return false; + + x = XEXP (x, 0); + + /* Handle (const (plus (unspec .. UNSPEC_GOTREL) (const_int ...))). */ + if (GET_CODE (x) == PLUS + && GET_CODE (XEXP (x, 0)) == UNSPEC + && (XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_GOTREL + || XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_PCREL) + && CONST_INT_P (XEXP (x, 1))) + x = XEXP (x, 0); + + if (GET_CODE (x) == UNSPEC) + switch (XINT (x, 1)) + { + /* A PCREL operand is only valid for call and movsi. */ + case CRIS_UNSPEC_PLT_PCREL: + case CRIS_UNSPEC_PCREL: + return !any_operand; + + case CRIS_UNSPEC_PLT_GOTREL: + case CRIS_UNSPEC_PLTGOTREAD: + case CRIS_UNSPEC_GOTREAD: + case CRIS_UNSPEC_GOTREL: + return true; + default: + gcc_unreachable (); + } + + return cris_pic_symbol_type_of (x) == cris_no_symbol; +} + +/* Helper function to find the right PIC-type symbol to generate, + given the original (non-PIC) representation. */ + +enum cris_pic_symbol_type +cris_pic_symbol_type_of (rtx x) +{ + switch (GET_CODE (x)) + { + case SYMBOL_REF: + return SYMBOL_REF_LOCAL_P (x) + ? cris_rel_symbol : cris_got_symbol; + + case LABEL_REF: + return cris_rel_symbol; + + case CONST: + return cris_pic_symbol_type_of (XEXP (x, 0)); + + case PLUS: + case MINUS: + { + enum cris_pic_symbol_type t1 = cris_pic_symbol_type_of (XEXP (x, 0)); + enum cris_pic_symbol_type t2 = cris_pic_symbol_type_of (XEXP (x, 1)); + + gcc_assert (t1 == cris_no_symbol || t2 == cris_no_symbol); + + if (t1 == cris_got_symbol || t1 == cris_got_symbol) + return cris_got_symbol_needing_fixup; + + return t1 != cris_no_symbol ? t1 : t2; + } + + case CONST_INT: + case CONST_DOUBLE: + return cris_no_symbol; + + case UNSPEC: + /* Likely an offsettability-test attempting to add a constant to + a GOTREAD symbol, which can't be handled. */ + return cris_invalid_pic_symbol; + + default: + fatal_insn ("unrecognized supposed constant", x); + } + + gcc_unreachable (); +} + +/* The LEGITIMATE_PIC_OPERAND_P worker. */ + +int +cris_legitimate_pic_operand (rtx x) +{ + /* Symbols are not valid PIC operands as-is; just constants. */ + return cris_valid_pic_const (x, true); +} + +/* The ASM_OUTPUT_CASE_END worker. */ + +void +cris_asm_output_case_end (FILE *stream, int num, rtx table) +{ + if (TARGET_V32) + { + rtx whole_jump_insn = PATTERN (PREV_INSN (PREV_INSN (table))); + + /* This can be a SEQUENCE, meaning the delay-slot of the jump is + filled. */ + rtx parallel_jump + = (GET_CODE (whole_jump_insn) == SEQUENCE + ? PATTERN (XVECEXP (whole_jump_insn, 0, 0)) : whole_jump_insn); + + asm_fprintf (stream, + "\t.word %LL%d-.%s\n", + CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (XVECEXP + (parallel_jump, 0, 0), + 1), 2), 0)), + (TARGET_PDEBUG ? "; default" : "")); + return; + } + + asm_fprintf (stream, + "\t.word %LL%d-%LL%d%s\n", + CODE_LABEL_NUMBER (XEXP + (XEXP + (XEXP + (XVECEXP + (PATTERN + (PREV_INSN + (PREV_INSN (table))), 0, 0), 1), + 2), 0)), + num, + (TARGET_PDEBUG ? "; default" : "")); +} + +/* TARGET_HANDLE_OPTION worker. We just store the values into local + variables here. Checks for correct semantics are in + cris_override_options. */ + +static bool +cris_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, + int value ATTRIBUTE_UNUSED) +{ + switch (code) + { + case OPT_metrax100: + target_flags + |= (MASK_SVINTO + + MASK_ETRAX4_ADD + + MASK_ALIGN_BY_32); + break; + + case OPT_mno_etrax100: + target_flags + &= ~(MASK_SVINTO + + MASK_ETRAX4_ADD + + MASK_ALIGN_BY_32); + break; + + case OPT_m32_bit: + case OPT_m32bit: + target_flags + |= (MASK_STACK_ALIGN + + MASK_CONST_ALIGN + + MASK_DATA_ALIGN + + MASK_ALIGN_BY_32); + break; + + case OPT_m16_bit: + case OPT_m16bit: + target_flags + |= (MASK_STACK_ALIGN + + MASK_CONST_ALIGN + + MASK_DATA_ALIGN); + break; + + case OPT_m8_bit: + case OPT_m8bit: + target_flags + &= ~(MASK_STACK_ALIGN + + MASK_CONST_ALIGN + + MASK_DATA_ALIGN); + break; + + default: + break; + } + + CRIS_SUBTARGET_HANDLE_OPTION(code, arg, value); + + return true; +} + +/* The OVERRIDE_OPTIONS worker. + As is the norm, this also parses -mfoo=bar type parameters. */ + +void +cris_override_options (void) +{ + if (cris_max_stackframe_str) + { + cris_max_stackframe = atoi (cris_max_stackframe_str); + + /* Do some sanity checking. */ + if (cris_max_stackframe < 0 || cris_max_stackframe > 0x20000000) + internal_error ("-max-stackframe=%d is not usable, not between 0 and %d", + cris_max_stackframe, 0x20000000); + } + + /* Let "-metrax4" and "-metrax100" change the cpu version. */ + if (TARGET_SVINTO && cris_cpu_version < CRIS_CPU_SVINTO) + cris_cpu_version = CRIS_CPU_SVINTO; + else if (TARGET_ETRAX4_ADD && cris_cpu_version < CRIS_CPU_ETRAX4) + cris_cpu_version = CRIS_CPU_ETRAX4; + + /* Parse -march=... and its synonym, the deprecated -mcpu=... */ + if (cris_cpu_str) + { + cris_cpu_version + = (*cris_cpu_str == 'v' ? atoi (cris_cpu_str + 1) : -1); + + if (strcmp ("etrax4", cris_cpu_str) == 0) + cris_cpu_version = 3; + + if (strcmp ("svinto", cris_cpu_str) == 0 + || strcmp ("etrax100", cris_cpu_str) == 0) + cris_cpu_version = 8; + + if (strcmp ("ng", cris_cpu_str) == 0 + || strcmp ("etrax100lx", cris_cpu_str) == 0) + cris_cpu_version = 10; + + if (cris_cpu_version < 0 || cris_cpu_version > 32) + error ("unknown CRIS version specification in -march= or -mcpu= : %s", + cris_cpu_str); + + /* Set the target flags. */ + if (cris_cpu_version >= CRIS_CPU_ETRAX4) + target_flags |= MASK_ETRAX4_ADD; + + /* If this is Svinto or higher, align for 32 bit accesses. */ + if (cris_cpu_version >= CRIS_CPU_SVINTO) + target_flags + |= (MASK_SVINTO | MASK_ALIGN_BY_32 + | MASK_STACK_ALIGN | MASK_CONST_ALIGN + | MASK_DATA_ALIGN); + + /* Note that we do not add new flags when it can be completely + described with a macro that uses -mcpu=X. So + TARGET_HAS_MUL_INSNS is (cris_cpu_version >= CRIS_CPU_NG). */ + } + + if (cris_tune_str) + { + int cris_tune + = (*cris_tune_str == 'v' ? atoi (cris_tune_str + 1) : -1); + + if (strcmp ("etrax4", cris_tune_str) == 0) + cris_tune = 3; + + if (strcmp ("svinto", cris_tune_str) == 0 + || strcmp ("etrax100", cris_tune_str) == 0) + cris_tune = 8; + + if (strcmp ("ng", cris_tune_str) == 0 + || strcmp ("etrax100lx", cris_tune_str) == 0) + cris_tune = 10; + + if (cris_tune < 0 || cris_tune > 32) + error ("unknown CRIS cpu version specification in -mtune= : %s", + cris_tune_str); + + if (cris_tune >= CRIS_CPU_SVINTO) + /* We have currently nothing more to tune than alignment for + memory accesses. */ + target_flags + |= (MASK_STACK_ALIGN | MASK_CONST_ALIGN + | MASK_DATA_ALIGN | MASK_ALIGN_BY_32); + } + + if (cris_cpu_version >= CRIS_CPU_V32) + target_flags &= ~(MASK_SIDE_EFFECT_PREFIXES|MASK_MUL_BUG); + + if (flag_pic) + { + /* Use error rather than warning, so invalid use is easily + detectable. Still change to the values we expect, to avoid + further errors. */ + if (! TARGET_LINUX) + { + error ("-fPIC and -fpic are not supported in this configuration"); + flag_pic = 0; + } + + /* Turn off function CSE. We need to have the addresses reach the + call expanders to get PLT-marked, as they could otherwise be + compared against zero directly or indirectly. After visiting the + call expanders they will then be cse:ed, as the call expanders + force_reg the addresses, effectively forcing flag_no_function_cse + to 0. */ + flag_no_function_cse = 1; + } + + if (write_symbols == DWARF2_DEBUG && ! TARGET_ELF) + { + warning (0, "that particular -g option is invalid with -maout and -melinux"); + write_symbols = DBX_DEBUG; + } + + /* Set the per-function-data initializer. */ + init_machine_status = cris_init_machine_status; +} + +/* The TARGET_ASM_OUTPUT_MI_THUNK worker. */ + +static void +cris_asm_output_mi_thunk (FILE *stream, + tree thunkdecl ATTRIBUTE_UNUSED, + HOST_WIDE_INT delta, + HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED, + tree funcdecl) +{ + if (delta > 0) + fprintf (stream, "\tadd%s " HOST_WIDE_INT_PRINT_DEC ",$%s\n", + ADDITIVE_SIZE_MODIFIER (delta), delta, + reg_names[CRIS_FIRST_ARG_REG]); + else if (delta < 0) + fprintf (stream, "\tsub%s " HOST_WIDE_INT_PRINT_DEC ",$%s\n", + ADDITIVE_SIZE_MODIFIER (-delta), -delta, + reg_names[CRIS_FIRST_ARG_REG]); + + if (flag_pic) + { + const char *name = XSTR (XEXP (DECL_RTL (funcdecl), 0), 0); + + name = (* targetm.strip_name_encoding) (name); + + if (TARGET_V32) + { + fprintf (stream, "\tba "); + assemble_name (stream, name); + fprintf (stream, "%s\n", CRIS_PLT_PCOFFSET_SUFFIX); + } + else + { + fprintf (stream, "add.d "); + assemble_name (stream, name); + fprintf (stream, "%s,$pc\n", CRIS_PLT_PCOFFSET_SUFFIX); + } + } + else + { + fprintf (stream, "jump "); + assemble_name (stream, XSTR (XEXP (DECL_RTL (funcdecl), 0), 0)); + fprintf (stream, "\n"); + + if (TARGET_V32) + fprintf (stream, "\tnop\n"); + } +} + +/* Boilerplate emitted at start of file. + + NO_APP *only at file start* means faster assembly. It also means + comments are not allowed. In some cases comments will be output + for debugging purposes. Make sure they are allowed then. + + We want a .file directive only if TARGET_ELF. */ +static void +cris_file_start (void) +{ + /* These expressions can vary at run time, so we cannot put + them into TARGET_INITIALIZER. */ + targetm.file_start_app_off = !(TARGET_PDEBUG || flag_print_asm_name); + targetm.file_start_file_directive = TARGET_ELF; + + default_file_start (); +} + +/* Rename the function calls for integer multiply and divide. */ +static void +cris_init_libfuncs (void) +{ + set_optab_libfunc (smul_optab, SImode, "__Mul"); + set_optab_libfunc (sdiv_optab, SImode, "__Div"); + set_optab_libfunc (udiv_optab, SImode, "__Udiv"); + set_optab_libfunc (smod_optab, SImode, "__Mod"); + set_optab_libfunc (umod_optab, SImode, "__Umod"); +} + +/* The INIT_EXPANDERS worker sets the per-function-data initializer and + mark functions. */ + +void +cris_init_expanders (void) +{ + /* Nothing here at the moment. */ +} + +/* Zero initialization is OK for all current fields. */ + +static struct machine_function * +cris_init_machine_status (void) +{ + return GGC_CNEW (struct machine_function); +} + +/* Split a 2 word move (DI or presumably DF) into component parts. + Originally a copy of gen_split_move_double in m32r.c. */ + +rtx +cris_split_movdx (rtx *operands) +{ + enum machine_mode mode = GET_MODE (operands[0]); + rtx dest = operands[0]; + rtx src = operands[1]; + rtx val; + + /* We used to have to handle (SUBREG (MEM)) here, but that should no + longer happen; after reload there are no SUBREGs any more, and we're + only called after reload. */ + CRIS_ASSERT (GET_CODE (dest) != SUBREG && GET_CODE (src) != SUBREG); + + start_sequence (); + if (REG_P (dest)) + { + int dregno = REGNO (dest); + + /* Reg-to-reg copy. */ + if (REG_P (src)) + { + int sregno = REGNO (src); + + int reverse = (dregno == sregno + 1); + + /* We normally copy the low-numbered register first. However, if + the first register operand 0 is the same as the second register of + operand 1, we must copy in the opposite order. */ + emit_insn (gen_rtx_SET (VOIDmode, + operand_subword (dest, reverse, TRUE, mode), + operand_subword (src, reverse, TRUE, mode))); + + emit_insn (gen_rtx_SET (VOIDmode, + operand_subword (dest, !reverse, TRUE, mode), + operand_subword (src, !reverse, TRUE, mode))); + } + /* Constant-to-reg copy. */ + else if (CONST_INT_P (src) || GET_CODE (src) == CONST_DOUBLE) + { + rtx words[2]; + split_double (src, &words[0], &words[1]); + emit_insn (gen_rtx_SET (VOIDmode, + operand_subword (dest, 0, TRUE, mode), + words[0])); + + emit_insn (gen_rtx_SET (VOIDmode, + operand_subword (dest, 1, TRUE, mode), + words[1])); + } + /* Mem-to-reg copy. */ + else if (MEM_P (src)) + { + /* If the high-address word is used in the address, we must load it + last. Otherwise, load it first. */ + rtx addr = XEXP (src, 0); + int reverse + = (refers_to_regno_p (dregno, dregno + 1, addr, NULL) != 0); + + /* The original code implies that we can't do + move.x [rN+],rM move.x [rN],rM+1 + when rN is dead, because of REG_NOTES damage. That is + consistent with what I've seen, so don't try it. + + We have two different cases here; if the addr is POST_INC, + just pass it through, otherwise add constants. */ + + if (GET_CODE (addr) == POST_INC) + { + rtx mem; + rtx insn; + + /* Whenever we emit insns with post-incremented + addresses ourselves, we must add a post-inc note + manually. */ + mem = change_address (src, SImode, addr); + insn + = gen_rtx_SET (VOIDmode, + operand_subword (dest, 0, TRUE, mode), mem); + insn = emit_insn (insn); + if (GET_CODE (XEXP (mem, 0)) == POST_INC) + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), + REG_NOTES (insn)); + + mem = copy_rtx (mem); + insn + = gen_rtx_SET (VOIDmode, + operand_subword (dest, 1, TRUE, mode), mem); + insn = emit_insn (insn); + if (GET_CODE (XEXP (mem, 0)) == POST_INC) + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), + REG_NOTES (insn)); + } + else + { + /* Make sure we don't get any other addresses with + embedded postincrements. They should be stopped in + GO_IF_LEGITIMATE_ADDRESS, but we're here for your + safety. */ + if (side_effects_p (addr)) + fatal_insn ("unexpected side-effects in address", addr); + + emit_insn (gen_rtx_SET + (VOIDmode, + operand_subword (dest, reverse, TRUE, mode), + change_address + (src, SImode, + plus_constant (addr, + reverse * UNITS_PER_WORD)))); + emit_insn (gen_rtx_SET + (VOIDmode, + operand_subword (dest, ! reverse, TRUE, mode), + change_address + (src, SImode, + plus_constant (addr, + (! reverse) * + UNITS_PER_WORD)))); + } + } + else + internal_error ("Unknown src"); + } + /* Reg-to-mem copy or clear mem. */ + else if (MEM_P (dest) + && (REG_P (src) + || src == const0_rtx + || src == CONST0_RTX (DFmode))) + { + rtx addr = XEXP (dest, 0); + + if (GET_CODE (addr) == POST_INC) + { + rtx mem; + rtx insn; + + /* Whenever we emit insns with post-incremented addresses + ourselves, we must add a post-inc note manually. */ + mem = change_address (dest, SImode, addr); + insn + = gen_rtx_SET (VOIDmode, + mem, operand_subword (src, 0, TRUE, mode)); + insn = emit_insn (insn); + if (GET_CODE (XEXP (mem, 0)) == POST_INC) + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), + REG_NOTES (insn)); + + mem = copy_rtx (mem); + insn + = gen_rtx_SET (VOIDmode, + mem, + operand_subword (src, 1, TRUE, mode)); + insn = emit_insn (insn); + if (GET_CODE (XEXP (mem, 0)) == POST_INC) + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), + REG_NOTES (insn)); + } + else + { + /* Make sure we don't get any other addresses with embedded + postincrements. They should be stopped in + GO_IF_LEGITIMATE_ADDRESS, but we're here for your safety. */ + if (side_effects_p (addr)) + fatal_insn ("unexpected side-effects in address", addr); + + emit_insn (gen_rtx_SET + (VOIDmode, + change_address (dest, SImode, addr), + operand_subword (src, 0, TRUE, mode))); + + emit_insn (gen_rtx_SET + (VOIDmode, + change_address (dest, SImode, + plus_constant (addr, + UNITS_PER_WORD)), + operand_subword (src, 1, TRUE, mode))); + } + } + + else + internal_error ("Unknown dest"); + + val = get_insns (); + end_sequence (); + return val; +} + +/* The expander for the prologue pattern name. */ + +void +cris_expand_prologue (void) +{ + int regno; + int size = get_frame_size (); + /* Shorten the used name for readability. */ + int cfoa_size = crtl->outgoing_args_size; + int last_movem_reg = -1; + int framesize = 0; + rtx mem, insn; + int return_address_on_stack = cris_return_address_on_stack (); + int got_really_used = false; + int n_movem_regs = 0; + int pretend = crtl->args.pretend_args_size; + + /* Don't do anything if no prologues or epilogues are wanted. */ + if (!TARGET_PROLOGUE_EPILOGUE) + return; + + CRIS_ASSERT (size >= 0); + + if (crtl->uses_pic_offset_table) + { + /* A reference may have been optimized out (like the abort () in + fde_split in unwind-dw2-fde.c, at least 3.2.1) so check that + it's still used. */ + push_topmost_sequence (); + got_really_used + = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX); + pop_topmost_sequence (); + } + + /* Align the size to what's best for the CPU model. */ + if (TARGET_STACK_ALIGN) + size = TARGET_ALIGN_BY_32 ? (size + 3) & ~3 : (size + 1) & ~1; + + if (pretend) + { + /* See also cris_setup_incoming_varargs where + cfun->machine->stdarg_regs is set. There are other setters of + crtl->args.pretend_args_size than stdarg handling, like + for an argument passed with parts in R13 and stack. We must + not store R13 into the pretend-area for that case, as GCC does + that itself. "Our" store would be marked as redundant and GCC + will attempt to remove it, which will then be flagged as an + internal error; trying to remove a frame-related insn. */ + int stdarg_regs = cfun->machine->stdarg_regs; + + framesize += pretend; + + for (regno = CRIS_FIRST_ARG_REG + CRIS_MAX_ARGS_IN_REGS - 1; + stdarg_regs > 0; + regno--, pretend -= 4, stdarg_regs--) + { + insn = emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -4))); + /* FIXME: When dwarf2 frame output and unless asynchronous + exceptions, make dwarf2 bundle together all stack + adjustments like it does for registers between stack + adjustments. */ + RTX_FRAME_RELATED_P (insn) = 1; + + mem = gen_rtx_MEM (SImode, stack_pointer_rtx); + set_mem_alias_set (mem, get_varargs_alias_set ()); + insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, regno)); + + /* Note the absence of RTX_FRAME_RELATED_P on the above insn: + the value isn't restored, so we don't want to tell dwarf2 + that it's been stored to stack, else EH handling info would + get confused. */ + } + + /* For other setters of crtl->args.pretend_args_size, we + just adjust the stack by leaving the remaining size in + "pretend", handled below. */ + } + + /* Save SRP if not a leaf function. */ + if (return_address_on_stack) + { + insn = emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -4 - pretend))); + pretend = 0; + RTX_FRAME_RELATED_P (insn) = 1; + + mem = gen_rtx_MEM (SImode, stack_pointer_rtx); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM)); + RTX_FRAME_RELATED_P (insn) = 1; + framesize += 4; + } + + /* Set up the frame pointer, if needed. */ + if (frame_pointer_needed) + { + insn = emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -4 - pretend))); + pretend = 0; + RTX_FRAME_RELATED_P (insn) = 1; + + mem = gen_rtx_MEM (SImode, stack_pointer_rtx); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = emit_move_insn (mem, frame_pointer_rtx); + RTX_FRAME_RELATED_P (insn) = 1; + + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); + RTX_FRAME_RELATED_P (insn) = 1; + + framesize += 4; + } + + /* Between frame-pointer and saved registers lie the area for local + variables. If we get here with "pretended" size remaining, count + it into the general stack size. */ + size += pretend; + + /* Get a contiguous sequence of registers, starting with R0, that need + to be saved. */ + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) + { + if (cris_reg_saved_in_regsave_area (regno, got_really_used)) + { + n_movem_regs++; + + /* Check if movem may be used for registers so far. */ + if (regno == last_movem_reg + 1) + /* Yes, update next expected register. */ + last_movem_reg = regno; + else + { + /* We cannot use movem for all registers. We have to flush + any movem:ed registers we got so far. */ + if (last_movem_reg != -1) + { + int n_saved + = (n_movem_regs == 1) ? 1 : last_movem_reg + 1; + + /* It is a win to use a side-effect assignment for + 64 <= size <= 128. But side-effect on movem was + not usable for CRIS v0..3. Also only do it if + side-effects insns are allowed. */ + if ((last_movem_reg + 1) * 4 + size >= 64 + && (last_movem_reg + 1) * 4 + size <= 128 + && (cris_cpu_version >= CRIS_CPU_SVINTO || n_saved == 1) + && TARGET_SIDE_EFFECT_PREFIXES) + { + mem + = gen_rtx_MEM (SImode, + plus_constant (stack_pointer_rtx, + -(n_saved * 4 + size))); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn + = cris_emit_movem_store (mem, GEN_INT (n_saved), + -(n_saved * 4 + size), + true); + } + else + { + insn + = gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -(n_saved * 4 + size))); + insn = emit_insn (insn); + RTX_FRAME_RELATED_P (insn) = 1; + + mem = gen_rtx_MEM (SImode, stack_pointer_rtx); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = cris_emit_movem_store (mem, GEN_INT (n_saved), + 0, true); + } + + framesize += n_saved * 4 + size; + last_movem_reg = -1; + size = 0; + } + + insn = emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -4 - size))); + RTX_FRAME_RELATED_P (insn) = 1; + + mem = gen_rtx_MEM (SImode, stack_pointer_rtx); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, regno)); + RTX_FRAME_RELATED_P (insn) = 1; + + framesize += 4 + size; + size = 0; + } + } + } + + /* Check after, if we could movem all registers. This is the normal case. */ + if (last_movem_reg != -1) + { + int n_saved + = (n_movem_regs == 1) ? 1 : last_movem_reg + 1; + + /* Side-effect on movem was not usable for CRIS v0..3. Also only + do it if side-effects insns are allowed. */ + if ((last_movem_reg + 1) * 4 + size >= 64 + && (last_movem_reg + 1) * 4 + size <= 128 + && (cris_cpu_version >= CRIS_CPU_SVINTO || n_saved == 1) + && TARGET_SIDE_EFFECT_PREFIXES) + { + mem + = gen_rtx_MEM (SImode, + plus_constant (stack_pointer_rtx, + -(n_saved * 4 + size))); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = cris_emit_movem_store (mem, GEN_INT (n_saved), + -(n_saved * 4 + size), true); + } + else + { + insn + = gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -(n_saved * 4 + size))); + insn = emit_insn (insn); + RTX_FRAME_RELATED_P (insn) = 1; + + mem = gen_rtx_MEM (SImode, stack_pointer_rtx); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = cris_emit_movem_store (mem, GEN_INT (n_saved), 0, true); + } + + framesize += n_saved * 4 + size; + /* We have to put outgoing argument space after regs. */ + if (cfoa_size) + { + insn = emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -cfoa_size))); + RTX_FRAME_RELATED_P (insn) = 1; + framesize += cfoa_size; + } + } + else if ((size + cfoa_size) > 0) + { + insn = emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + -(cfoa_size + size)))); + RTX_FRAME_RELATED_P (insn) = 1; + framesize += size + cfoa_size; + } + + /* Set up the PIC register, if it is used. */ + if (got_really_used) + { + rtx got + = gen_rtx_UNSPEC (SImode, gen_rtvec (1, const0_rtx), CRIS_UNSPEC_GOT); + emit_move_insn (pic_offset_table_rtx, got); + + /* FIXME: This is a cover-up for flow2 messing up; it doesn't + follow exceptional paths and tries to delete the GOT load as + unused, if it isn't used on the non-exceptional paths. Other + ports have similar or other cover-ups, or plain bugs marking + the GOT register load as maybe-dead. To see this, remove the + line below and try libsupc++/vec.cc or a trivial + "static void y (); void x () {try {y ();} catch (...) {}}". */ + emit_use (pic_offset_table_rtx); + } + + if (cris_max_stackframe && framesize > cris_max_stackframe) + warning (0, "stackframe too big: %d bytes", framesize); +} + +/* The expander for the epilogue pattern. */ + +void +cris_expand_epilogue (void) +{ + int regno; + int size = get_frame_size (); + int last_movem_reg = -1; + int argspace_offset = crtl->outgoing_args_size; + int pretend = crtl->args.pretend_args_size; + rtx mem; + bool return_address_on_stack = cris_return_address_on_stack (); + /* A reference may have been optimized out + (like the abort () in fde_split in unwind-dw2-fde.c, at least 3.2.1) + so check that it's still used. */ + int got_really_used = false; + int n_movem_regs = 0; + + if (!TARGET_PROLOGUE_EPILOGUE) + return; + + if (crtl->uses_pic_offset_table) + { + /* A reference may have been optimized out (like the abort () in + fde_split in unwind-dw2-fde.c, at least 3.2.1) so check that + it's still used. */ + push_topmost_sequence (); + got_really_used + = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX); + pop_topmost_sequence (); + } + + /* Align byte count of stack frame. */ + if (TARGET_STACK_ALIGN) + size = TARGET_ALIGN_BY_32 ? (size + 3) & ~3 : (size + 1) & ~1; + + /* Check how many saved regs we can movem. They start at r0 and must + be contiguous. */ + for (regno = 0; + regno < FIRST_PSEUDO_REGISTER; + regno++) + if (cris_reg_saved_in_regsave_area (regno, got_really_used)) + { + n_movem_regs++; + + if (regno == last_movem_reg + 1) + last_movem_reg = regno; + else + break; + } + + /* If there was only one register that really needed to be saved + through movem, don't use movem. */ + if (n_movem_regs == 1) + last_movem_reg = -1; + + /* Now emit "normal" move insns for all regs higher than the movem + regs. */ + for (regno = FIRST_PSEUDO_REGISTER - 1; + regno > last_movem_reg; + regno--) + if (cris_reg_saved_in_regsave_area (regno, got_really_used)) + { + rtx insn; + + if (argspace_offset) + { + /* There is an area for outgoing parameters located before + the saved registers. We have to adjust for that. */ + emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + argspace_offset))); + /* Make sure we only do this once. */ + argspace_offset = 0; + } + + mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode, + stack_pointer_rtx)); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = emit_move_insn (gen_rtx_raw_REG (SImode, regno), mem); + + /* Whenever we emit insns with post-incremented addresses + ourselves, we must add a post-inc note manually. */ + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); + } + + /* If we have any movem-restore, do it now. */ + if (last_movem_reg != -1) + { + rtx insn; + + if (argspace_offset) + { + emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, + argspace_offset))); + argspace_offset = 0; + } + + mem = gen_rtx_MEM (SImode, + gen_rtx_POST_INC (SImode, stack_pointer_rtx)); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn + = emit_insn (cris_gen_movem_load (mem, + GEN_INT (last_movem_reg + 1), 0)); + /* Whenever we emit insns with post-incremented addresses + ourselves, we must add a post-inc note manually. */ + if (side_effects_p (PATTERN (insn))) + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); + } + + /* If we don't clobber all of the allocated stack area (we've already + deallocated saved registers), GCC might want to schedule loads from + the stack to *after* the stack-pointer restore, which introduces an + interrupt race condition. This happened for the initial-value + SRP-restore for g++.dg/eh/registers1.C (noticed by inspection of + other failure for that test). It also happened for the stack slot + for the return value in (one version of) + linux/fs/dcache.c:__d_lookup, at least with "-O2 + -fno-omit-frame-pointer". */ + + /* Restore frame pointer if necessary. */ + if (frame_pointer_needed) + { + rtx insn; + + emit_insn (gen_cris_frame_deallocated_barrier ()); + + emit_move_insn (stack_pointer_rtx, frame_pointer_rtx); + mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode, + stack_pointer_rtx)); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = emit_move_insn (frame_pointer_rtx, mem); + + /* Whenever we emit insns with post-incremented addresses + ourselves, we must add a post-inc note manually. */ + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); + } + else if ((size + argspace_offset) != 0) + { + emit_insn (gen_cris_frame_deallocated_barrier ()); + + /* If there was no frame-pointer to restore sp from, we must + explicitly deallocate local variables. */ + + /* Handle space for outgoing parameters that hasn't been handled + yet. */ + size += argspace_offset; + + emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, size))); + } + + /* If this function has no pushed register parameters + (stdargs/varargs), and if it is not a leaf function, then we have + the return address on the stack. */ + if (return_address_on_stack && pretend == 0) + { + if (TARGET_V32 || crtl->calls_eh_return) + { + rtx mem; + rtx insn; + rtx srpreg = gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM); + mem = gen_rtx_MEM (SImode, + gen_rtx_POST_INC (SImode, + stack_pointer_rtx)); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = emit_move_insn (srpreg, mem); + + /* Whenever we emit insns with post-incremented addresses + ourselves, we must add a post-inc note manually. */ + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); + + if (crtl->calls_eh_return) + emit_insn (gen_addsi3 (stack_pointer_rtx, + stack_pointer_rtx, + gen_rtx_raw_REG (SImode, + CRIS_STACKADJ_REG))); + cris_expand_return (false); + } + else + cris_expand_return (true); + + return; + } + + /* If we pushed some register parameters, then adjust the stack for + them. */ + if (pretend != 0) + { + /* If SRP is stored on the way, we need to restore it first. */ + if (return_address_on_stack) + { + rtx mem; + rtx srpreg = gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM); + rtx insn; + + mem = gen_rtx_MEM (SImode, + gen_rtx_POST_INC (SImode, + stack_pointer_rtx)); + set_mem_alias_set (mem, get_frame_alias_set ()); + insn = emit_move_insn (srpreg, mem); + + /* Whenever we emit insns with post-incremented addresses + ourselves, we must add a post-inc note manually. */ + REG_NOTES (insn) + = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); + } + + emit_insn (gen_rtx_SET (VOIDmode, + stack_pointer_rtx, + plus_constant (stack_pointer_rtx, pretend))); + } + + /* Perform the "physical" unwinding that the EH machinery calculated. */ + if (crtl->calls_eh_return) + emit_insn (gen_addsi3 (stack_pointer_rtx, + stack_pointer_rtx, + gen_rtx_raw_REG (SImode, + CRIS_STACKADJ_REG))); + cris_expand_return (false); +} + +/* Worker function for generating movem from mem for load_multiple. */ + +rtx +cris_gen_movem_load (rtx src, rtx nregs_rtx, int nprefix) +{ + int nregs = INTVAL (nregs_rtx); + rtvec vec; + int eltno = 1; + int i; + rtx srcreg = XEXP (src, 0); + unsigned int regno = nregs - 1; + int regno_inc = -1; + + if (TARGET_V32) + { + regno = 0; + regno_inc = 1; + } + + if (GET_CODE (srcreg) == POST_INC) + srcreg = XEXP (srcreg, 0); + + CRIS_ASSERT (REG_P (srcreg)); + + /* Don't use movem for just one insn. The insns are equivalent except + for the pipeline hazard (on v32); movem does not forward the loaded + registers so there's a three cycles penalty for their use. */ + if (nregs == 1) + return gen_movsi (gen_rtx_REG (SImode, 0), src); + + vec = rtvec_alloc (nprefix + nregs + + (GET_CODE (XEXP (src, 0)) == POST_INC)); + + if (GET_CODE (XEXP (src, 0)) == POST_INC) + { + RTVEC_ELT (vec, nprefix + 1) + = gen_rtx_SET (VOIDmode, srcreg, plus_constant (srcreg, nregs * 4)); + eltno++; + } + + src = replace_equiv_address (src, srcreg); + RTVEC_ELT (vec, nprefix) + = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno), src); + regno += regno_inc; + + for (i = 1; i < nregs; i++, eltno++) + { + RTVEC_ELT (vec, nprefix + eltno) + = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno), + adjust_address_nv (src, SImode, i * 4)); + regno += regno_inc; + } + + return gen_rtx_PARALLEL (VOIDmode, vec); +} + +/* Worker function for generating movem to mem. If FRAME_RELATED, notes + are added that the dwarf2 machinery understands. */ + +rtx +cris_emit_movem_store (rtx dest, rtx nregs_rtx, int increment, + bool frame_related) +{ + int nregs = INTVAL (nregs_rtx); + rtvec vec; + int eltno = 1; + int i; + rtx insn; + rtx destreg = XEXP (dest, 0); + unsigned int regno = nregs - 1; + int regno_inc = -1; + + if (TARGET_V32) + { + regno = 0; + regno_inc = 1; + } + + if (GET_CODE (destreg) == POST_INC) + increment += nregs * 4; + + if (GET_CODE (destreg) == POST_INC || GET_CODE (destreg) == PLUS) + destreg = XEXP (destreg, 0); + + CRIS_ASSERT (REG_P (destreg)); + + /* Don't use movem for just one insn. The insns are equivalent except + for the pipeline hazard (on v32); movem does not forward the loaded + registers so there's a three cycles penalty for use. */ + if (nregs == 1) + { + rtx mov = gen_rtx_SET (VOIDmode, dest, gen_rtx_REG (SImode, 0)); + + if (increment == 0) + { + insn = emit_insn (mov); + if (frame_related) + RTX_FRAME_RELATED_P (insn) = 1; + return insn; + } + + /* If there was a request for a side-effect, create the ordinary + parallel. */ + vec = rtvec_alloc (2); + + RTVEC_ELT (vec, 0) = mov; + RTVEC_ELT (vec, 1) = gen_rtx_SET (VOIDmode, destreg, + plus_constant (destreg, increment)); + if (frame_related) + { + RTX_FRAME_RELATED_P (mov) = 1; + RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 1)) = 1; + } + } + else + { + vec = rtvec_alloc (nregs + (increment != 0 ? 1 : 0)); + RTVEC_ELT (vec, 0) + = gen_rtx_SET (VOIDmode, + replace_equiv_address (dest, + plus_constant (destreg, + increment)), + gen_rtx_REG (SImode, regno)); + regno += regno_inc; + + /* The dwarf2 info wants this mark on each component in a parallel + that's part of the prologue (though it's optional on the first + component). */ + if (frame_related) + RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 0)) = 1; + + if (increment != 0) + { + RTVEC_ELT (vec, 1) + = gen_rtx_SET (VOIDmode, destreg, + plus_constant (destreg, + increment != 0 + ? increment : nregs * 4)); + eltno++; + + if (frame_related) + RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 1)) = 1; + + /* Don't call adjust_address_nv on a post-incremented address if + we can help it. */ + if (GET_CODE (XEXP (dest, 0)) == POST_INC) + dest = replace_equiv_address (dest, destreg); + } + + for (i = 1; i < nregs; i++, eltno++) + { + RTVEC_ELT (vec, eltno) + = gen_rtx_SET (VOIDmode, adjust_address_nv (dest, SImode, i * 4), + gen_rtx_REG (SImode, regno)); + if (frame_related) + RTX_FRAME_RELATED_P (RTVEC_ELT (vec, eltno)) = 1; + regno += regno_inc; + } + } + + insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, vec)); + + /* Because dwarf2out.c handles the insns in a parallel as a sequence, + we need to keep the stack adjustment separate, after the + MEM-setters. Else the stack-adjustment in the second component of + the parallel would be mishandled; the offsets for the SETs that + follow it would be wrong. We prepare for this by adding a + REG_FRAME_RELATED_EXPR with the MEM-setting parts in a SEQUENCE + followed by the increment. Note that we have FRAME_RELATED_P on + all the SETs, including the original stack adjustment SET in the + parallel. */ + if (frame_related) + { + if (increment != 0) + { + rtx seq = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nregs + 1)); + XVECEXP (seq, 0, 0) = copy_rtx (XVECEXP (PATTERN (insn), 0, 0)); + for (i = 1; i < nregs; i++) + XVECEXP (seq, 0, i) + = copy_rtx (XVECEXP (PATTERN (insn), 0, i + 1)); + XVECEXP (seq, 0, nregs) = copy_rtx (XVECEXP (PATTERN (insn), 0, 1)); + REG_NOTES (insn) + = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, seq, + REG_NOTES (insn)); + } + + RTX_FRAME_RELATED_P (insn) = 1; + } + + return insn; +} + +/* Worker function for expanding the address for PIC function calls. */ + +void +cris_expand_pic_call_address (rtx *opp) +{ + rtx op = *opp; + + gcc_assert (MEM_P (op)); + op = XEXP (op, 0); + + /* It might be that code can be generated that jumps to 0 (or to a + specific address). Don't die on that. (There is a + testcase.) */ + if (CONSTANT_ADDRESS_P (op) && !CONST_INT_P (op)) + { + enum cris_pic_symbol_type t = cris_pic_symbol_type_of (op); + + CRIS_ASSERT (can_create_pseudo_p ()); + + /* For local symbols (non-PLT), just get the plain symbol + reference into a register. For symbols that can be PLT, make + them PLT. */ + if (t == cris_rel_symbol) + { + /* For v32, we're fine as-is; just PICify the symbol. Forcing + into a register caused performance regression for 3.2.1, + observable in __floatdidf and elsewhere in libgcc. */ + if (TARGET_V32) + { + rtx sym = GET_CODE (op) != CONST ? op : get_related_value (op); + HOST_WIDE_INT offs = get_integer_term (op); + + /* We can't get calls to sym+N, N integer, can we? */ + gcc_assert (offs == 0); + + op = gen_rtx_CONST (Pmode, + gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym), + CRIS_UNSPEC_PCREL)); + } + else + op = force_reg (Pmode, op); + } + else if (t == cris_got_symbol) + { + if (TARGET_AVOID_GOTPLT) + { + /* Change a "jsr sym" into (allocate register rM, rO) + "move.d (const (unspec [sym rPIC] CRIS_UNSPEC_PLT_GOTREL)),rM" + "add.d rPIC,rM,rO", "jsr rO" for pre-v32 and + "jsr (const (unspec [sym rPIC] CRIS_UNSPEC_PLT_PCREL))" + for v32. */ + rtx tem, rm, ro; + gcc_assert (can_create_pseudo_p ()); + crtl->uses_pic_offset_table = 1; + tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op), + TARGET_V32 + ? CRIS_UNSPEC_PLT_PCREL + : CRIS_UNSPEC_PLT_GOTREL); + tem = gen_rtx_CONST (Pmode, tem); + if (TARGET_V32) + op = tem; + else + { + rm = gen_reg_rtx (Pmode); + emit_move_insn (rm, tem); + ro = gen_reg_rtx (Pmode); + if (expand_binop (Pmode, add_optab, rm, + pic_offset_table_rtx, + ro, 0, OPTAB_LIB_WIDEN) != ro) + internal_error ("expand_binop failed in movsi got"); + op = ro; + } + } + else + { + /* Change a "jsr sym" into (allocate register rM, rO) + "move.d (const (unspec [sym] CRIS_UNSPEC_PLTGOTREAD)),rM" + "add.d rPIC,rM,rO" "jsr [rO]" with the memory access + marked as not trapping and not aliasing. No "move.d + [rO],rP" as that would invite to re-use of a value + that should not be reused. FIXME: Need a peephole2 + for cases when this is cse:d from the call, to change + back to just get the PLT entry address, so we don't + resolve the same symbol over and over (the memory + access of the PLTGOT isn't constant). */ + rtx tem, mem, rm, ro; + + gcc_assert (can_create_pseudo_p ()); + crtl->uses_pic_offset_table = 1; + tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op), + CRIS_UNSPEC_PLTGOTREAD); + rm = gen_reg_rtx (Pmode); + emit_move_insn (rm, gen_rtx_CONST (Pmode, tem)); + ro = gen_reg_rtx (Pmode); + if (expand_binop (Pmode, add_optab, rm, + pic_offset_table_rtx, + ro, 0, OPTAB_LIB_WIDEN) != ro) + internal_error ("expand_binop failed in movsi got"); + mem = gen_rtx_MEM (Pmode, ro); + + /* This MEM doesn't alias anything. Whether it aliases + other same symbols is unimportant. */ + set_mem_alias_set (mem, new_alias_set ()); + MEM_NOTRAP_P (mem) = 1; + op = mem; + } + } + else + /* Can't possibly get a GOT-needing-fixup for a function-call, + right? */ + fatal_insn ("Unidentifiable call op", op); + + *opp = replace_equiv_address (*opp, op); + } +} + +/* Make sure operands are in the right order for an addsi3 insn as + generated by a define_split. Nothing but REG_P as the first + operand is recognized by addsi3 after reload. OPERANDS contains + the operands, with the first at OPERANDS[N] and the second at + OPERANDS[N+1]. */ + +void +cris_order_for_addsi3 (rtx *operands, int n) +{ + if (!REG_P (operands[n])) + { + rtx tem = operands[n]; + operands[n] = operands[n + 1]; + operands[n + 1] = tem; + } +} + +/* Use from within code, from e.g. PRINT_OPERAND and + PRINT_OPERAND_ADDRESS. Macros used in output_addr_const need to emit + different things depending on whether code operand or constant is + emitted. */ + +static void +cris_output_addr_const (FILE *file, rtx x) +{ + in_code++; + output_addr_const (file, x); + in_code--; +} + +/* Worker function for ASM_OUTPUT_SYMBOL_REF. */ + +void +cris_asm_output_symbol_ref (FILE *file, rtx x) +{ + gcc_assert (GET_CODE (x) == SYMBOL_REF); + + if (flag_pic && in_code > 0) + { + const char *origstr = XSTR (x, 0); + const char *str; + str = (* targetm.strip_name_encoding) (origstr); + assemble_name (file, str); + + /* Sanity check. */ + if (!TARGET_V32 && !crtl->uses_pic_offset_table) + output_operand_lossage ("PIC register isn't set up"); + } + else + assemble_name (file, XSTR (x, 0)); +} + +/* Worker function for ASM_OUTPUT_LABEL_REF. */ + +void +cris_asm_output_label_ref (FILE *file, char *buf) +{ + if (flag_pic && in_code > 0) + { + assemble_name (file, buf); + + /* Sanity check. */ + if (!TARGET_V32 && !crtl->uses_pic_offset_table) + internal_error ("emitting PIC operand, but PIC register isn't set up"); + } + else + assemble_name (file, buf); +} + +/* Worker function for OUTPUT_ADDR_CONST_EXTRA. */ + +bool +cris_output_addr_const_extra (FILE *file, rtx xconst) +{ + switch (GET_CODE (xconst)) + { + rtx x; + + case UNSPEC: + x = XVECEXP (xconst, 0, 0); + CRIS_ASSERT (GET_CODE (x) == SYMBOL_REF + || GET_CODE (x) == LABEL_REF + || GET_CODE (x) == CONST); + output_addr_const (file, x); + switch (XINT (xconst, 1)) + { + case CRIS_UNSPEC_PCREL: + /* We only get this with -fpic/PIC to tell it apart from an + invalid symbol. We can't tell here, but it should only + be the operand of a call or movsi. */ + gcc_assert (TARGET_V32 && flag_pic); + break; + + case CRIS_UNSPEC_PLT_PCREL: + gcc_assert (TARGET_V32); + fprintf (file, ":PLT"); + break; + + case CRIS_UNSPEC_PLT_GOTREL: + gcc_assert (!TARGET_V32); + fprintf (file, ":PLTG"); + break; + + case CRIS_UNSPEC_GOTREL: + gcc_assert (!TARGET_V32); + fprintf (file, ":GOTOFF"); + break; + + case CRIS_UNSPEC_GOTREAD: + if (flag_pic == 1) + fprintf (file, ":GOT16"); + else + fprintf (file, ":GOT"); + break; + + case CRIS_UNSPEC_PLTGOTREAD: + if (flag_pic == 1) + fprintf (file, CRIS_GOTPLT_SUFFIX "16"); + else + fprintf (file, CRIS_GOTPLT_SUFFIX); + break; + + default: + gcc_unreachable (); + } + return true; + + default: + return false; + } +} + +/* Worker function for TARGET_STRUCT_VALUE_RTX. */ + +static rtx +cris_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED, + int incoming ATTRIBUTE_UNUSED) +{ + return gen_rtx_REG (Pmode, CRIS_STRUCT_VALUE_REGNUM); +} + +/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */ + +static void +cris_setup_incoming_varargs (CUMULATIVE_ARGS *ca, + enum machine_mode mode ATTRIBUTE_UNUSED, + tree type ATTRIBUTE_UNUSED, + int *pretend_arg_size, + int second_time) +{ + if (ca->regs < CRIS_MAX_ARGS_IN_REGS) + { + int stdarg_regs = CRIS_MAX_ARGS_IN_REGS - ca->regs; + cfun->machine->stdarg_regs = stdarg_regs; + *pretend_arg_size = stdarg_regs * 4; + } + + if (TARGET_PDEBUG) + fprintf (asm_out_file, + "\n; VA:: ANSI: %d args before, anon @ #%d, %dtime\n", + ca->regs, *pretend_arg_size, second_time); +} + +/* Return true if TYPE must be passed by invisible reference. + For cris, we pass <= 8 bytes by value, others by reference. */ + +static bool +cris_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED, + enum machine_mode mode, const_tree type, + bool named ATTRIBUTE_UNUSED) +{ + return (targetm.calls.must_pass_in_stack (mode, type) + || CRIS_FUNCTION_ARG_SIZE (mode, type) > 8); +} + +/* A combination of defining TARGET_PROMOTE_FUNCTION_MODE, promoting arguments + and *not* defining TARGET_PROMOTE_PROTOTYPES or PROMOTE_MODE gives the + best code size and speed for gcc, ipps and products in gcc-2.7.2. */ + +enum machine_mode +cris_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, + enum machine_mode mode, + int *punsignedp ATTRIBUTE_UNUSED, + const_tree fntype ATTRIBUTE_UNUSED, + int for_return) +{ + /* Defining PROMOTE_FUNCTION_RETURN in gcc-2.7.2 uncovered bug 981110 (even + when modifying TARGET_FUNCTION_VALUE to return the promoted mode). + Maybe pointless as of now, but let's keep the old behavior. */ + if (for_return == 1) + return mode; + return CRIS_PROMOTED_MODE (mode, *punsignedp, type); +} + +/* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the + time being. */ + +static rtx +cris_function_value(const_tree type, + const_tree func ATTRIBUTE_UNUSED, + bool outgoing ATTRIBUTE_UNUSED) +{ + return gen_rtx_REG (TYPE_MODE (type), CRIS_FIRST_ARG_REG); +} + +/* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the + time being. */ + +static rtx +cris_libcall_value (enum machine_mode mode, + const_rtx fun ATTRIBUTE_UNUSED) +{ + return gen_rtx_REG (mode, CRIS_FIRST_ARG_REG); +} + +/* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the + time being. */ + +bool +cris_function_value_regno_p (const unsigned int regno) +{ + return (regno == CRIS_FIRST_ARG_REG); +} + +static int +cris_arg_partial_bytes (CUMULATIVE_ARGS *ca, enum machine_mode mode, + tree type, bool named ATTRIBUTE_UNUSED) +{ + if (ca->regs == CRIS_MAX_ARGS_IN_REGS - 1 + && !targetm.calls.must_pass_in_stack (mode, type) + && CRIS_FUNCTION_ARG_SIZE (mode, type) > 4 + && CRIS_FUNCTION_ARG_SIZE (mode, type) <= 8) + return UNITS_PER_WORD; + else + return 0; +} + +/* Worker function for TARGET_MD_ASM_CLOBBERS. */ + +static tree +cris_md_asm_clobbers (tree outputs, tree inputs, tree in_clobbers) +{ + HARD_REG_SET mof_set; + tree clobbers; + tree t; + + CLEAR_HARD_REG_SET (mof_set); + SET_HARD_REG_BIT (mof_set, CRIS_MOF_REGNUM); + + /* For the time being, all asms clobber condition codes. Revisit when + there's a reasonable use for inputs/outputs that mention condition + codes. */ + clobbers + = tree_cons (NULL_TREE, + build_string (strlen (reg_names[CRIS_CC0_REGNUM]), + reg_names[CRIS_CC0_REGNUM]), + in_clobbers); + + for (t = outputs; t != NULL; t = TREE_CHAIN (t)) + { + tree val = TREE_VALUE (t); + + /* The constraint letter for the singleton register class of MOF + is 'h'. If it's mentioned in the constraints, the asm is + MOF-aware and adding it to the clobbers would cause it to have + impossible constraints. */ + if (strchr (TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))), + 'h') != NULL + || tree_overlaps_hard_reg_set (val, &mof_set) != NULL_TREE) + return clobbers; + } + + for (t = inputs; t != NULL; t = TREE_CHAIN (t)) + { + tree val = TREE_VALUE (t); + + if (strchr (TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))), + 'h') != NULL + || tree_overlaps_hard_reg_set (val, &mof_set) != NULL_TREE) + return clobbers; + } + + return tree_cons (NULL_TREE, + build_string (strlen (reg_names[CRIS_MOF_REGNUM]), + reg_names[CRIS_MOF_REGNUM]), + clobbers); +} + +/* Implement TARGET_FRAME_POINTER_REQUIRED. + + Really only needed if the stack frame has variable length (alloca + or variable sized local arguments (GNU C extension). See PR39499 and + PR38609 for the reason this isn't just 0. */ + +bool +cris_frame_pointer_required (void) +{ + return !current_function_sp_is_unchanging; +} + +/* Implement TARGET_ASM_TRAMPOLINE_TEMPLATE. + + This looks too complicated, and it is. I assigned r7 to be the + static chain register, but it is call-saved, so we have to save it, + and come back to restore it after the call, so we have to save srp... + Anyway, trampolines are rare enough that we can cope with this + somewhat lack of elegance. + (Do not be tempted to "straighten up" whitespace in the asms; the + assembler #NO_APP state mandates strict spacing). */ +/* ??? See the i386 regparm=3 implementation that pushes the static + chain value to the stack in the trampoline, and uses a call-saved + register when called directly. */ + +static void +cris_asm_trampoline_template (FILE *f) +{ + if (TARGET_V32) + { + /* This normally-unused nop insn acts as an instruction to + the simulator to flush its instruction cache. None of + the other instructions in the trampoline template suits + as a trigger for V32. The pc-relative addressing mode + works nicely as a trigger for V10. + FIXME: Have specific V32 template (possibly avoiding the + use of a special instruction). */ + fprintf (f, "\tclearf x\n"); + /* We have to use a register as an intermediate, choosing + semi-randomly R1 (which has to not be the STATIC_CHAIN_REGNUM), + so we can use it for address indirection and jsr target. */ + fprintf (f, "\tmove $r1,$mof\n"); + /* +4 */ + fprintf (f, "\tmove.d 0,$r1\n"); + fprintf (f, "\tmove.d $%s,[$r1]\n", reg_names[STATIC_CHAIN_REGNUM]); + fprintf (f, "\taddq 6,$r1\n"); + fprintf (f, "\tmove $mof,[$r1]\n"); + fprintf (f, "\taddq 6,$r1\n"); + fprintf (f, "\tmove $srp,[$r1]\n"); + /* +20 */ + fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); + /* +26 */ + fprintf (f, "\tmove.d 0,$r1\n"); + fprintf (f, "\tjsr $r1\n"); + fprintf (f, "\tsetf\n"); + /* +36 */ + fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); + /* +42 */ + fprintf (f, "\tmove.d 0,$r1\n"); + /* +48 */ + fprintf (f, "\tmove.d 0,$r9\n"); + fprintf (f, "\tjump $r9\n"); + fprintf (f, "\tsetf\n"); + } + else + { + fprintf (f, "\tmove.d $%s,[$pc+20]\n", reg_names[STATIC_CHAIN_REGNUM]); + fprintf (f, "\tmove $srp,[$pc+22]\n"); + fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); + fprintf (f, "\tjsr 0\n"); + fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); + fprintf (f, "\tjump 0\n"); + } +} + +/* Implement TARGET_TRAMPOLINE_INIT. */ + +static void +cris_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) +{ + rtx fnaddr = XEXP (DECL_RTL (fndecl), 0); + rtx tramp = XEXP (m_tramp, 0); + rtx mem; + + emit_block_move (m_tramp, assemble_trampoline_template (), + GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL); + + if (TARGET_V32) + { + mem = adjust_address (m_tramp, SImode, 6); + emit_move_insn (mem, plus_constant (tramp, 38)); + mem = adjust_address (m_tramp, SImode, 22); + emit_move_insn (mem, chain_value); + mem = adjust_address (m_tramp, SImode, 28); + emit_move_insn (mem, fnaddr); + } + else + { + mem = adjust_address (m_tramp, SImode, 10); + emit_move_insn (mem, chain_value); + mem = adjust_address (m_tramp, SImode, 16); + emit_move_insn (mem, fnaddr); + } + + /* Note that there is no need to do anything with the cache for + sake of a trampoline. */ +} + + +#if 0 +/* Various small functions to replace macros. Only called from a + debugger. They might collide with gcc functions or system functions, + so only emit them when '#if 1' above. */ + +enum rtx_code Get_code (rtx); + +enum rtx_code +Get_code (rtx x) +{ + return GET_CODE (x); +} + +const char *Get_mode (rtx); + +const char * +Get_mode (rtx x) +{ + return GET_MODE_NAME (GET_MODE (x)); +} + +rtx Xexp (rtx, int); + +rtx +Xexp (rtx x, int n) +{ + return XEXP (x, n); +} + +rtx Xvecexp (rtx, int, int); + +rtx +Xvecexp (rtx x, int n, int m) +{ + return XVECEXP (x, n, m); +} + +int Get_rtx_len (rtx); + +int +Get_rtx_len (rtx x) +{ + return GET_RTX_LENGTH (GET_CODE (x)); +} + +/* Use upper-case to distinguish from local variables that are sometimes + called next_insn and prev_insn. */ + +rtx Next_insn (rtx); + +rtx +Next_insn (rtx insn) +{ + return NEXT_INSN (insn); +} + +rtx Prev_insn (rtx); + +rtx +Prev_insn (rtx insn) +{ + return PREV_INSN (insn); +} +#endif + +#include "gt-cris.h" + +/* + * Local variables: + * eval: (c-set-style "gnu") + * indent-tabs-mode: t + * End: + */
cris.c Property changes : Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +Id \ No newline at end of property Index: cris_abi_symbol.c =================================================================== --- cris_abi_symbol.c (nonexistent) +++ cris_abi_symbol.c (revision 338) @@ -0,0 +1,45 @@ +/* Define symbol to recognize CRIS ABI version 2, for a.out use. + Contributed by Axis Communications. + Written by Hans-Peter Nilsson , c:a 1992. + + Copyright (C) 2000, 2001, 2003, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#include "tconfig.h" +#include "tm.h" + +#ifdef __AOUT__ + +/* ELF support was not released before the ABI was changed, so we + restrict this awkwardness to a.out. This symbol is for gdb to + recognize, so it can debug both old and new programs successfully. */ +__asm__ (".global " CRIS_ABI_VERSION_SYMBOL_STRING); +__asm__ (".set " CRIS_ABI_VERSION_SYMBOL_STRING ",0"); + +#else /* not __AOUT__ */ + +/* The file must not be empty (declaration/definition-wise) according to + ISO, IIRC. */ +extern int _Dummy; + +#endif /* not __AOUT__ */
cris_abi_symbol.c Property changes : Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +Id \ No newline at end of property Index: predicates.md =================================================================== --- predicates.md (nonexistent) +++ predicates.md (revision 338) @@ -0,0 +1,174 @@ +;; Operand and operator predicates for the GCC CRIS port. +;; Copyright (C) 2005, 2007 Free Software Foundation, Inc. + +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + + +;; Operator predicates. + +(define_predicate "cris_orthogonal_operator" + (match_code "plus, minus, ior, and, umin")) + +(define_predicate "cris_commutative_orth_op" + (match_code "plus, ior, and, umin")) + +;; By the name, you might think we should include MULT. We don't because +;; it doesn't accept the same addressing modes as the others (only +;; registers) and there's also the problem of handling TARGET_MUL_BUG. + +(define_predicate "cris_operand_extend_operator" + (match_code "plus, minus, umin")) + +(define_predicate "cris_additive_operand_extend_operator" + (match_code "plus, minus")) + +(define_predicate "cris_extend_operator" + (match_code "zero_extend, sign_extend")) + +(define_predicate "cris_plus_or_bound_operator" + (match_code "plus, umin")) + +;; Used as an operator to get a handle on a already-known-valid MEM rtx:es +;; (no need to validate the address), where some address expression parts +;; have their own match_operand. + +(define_predicate "cris_mem_op" + (match_code "mem")) + +(define_predicate "cris_load_multiple_op" + (and (match_code "parallel") + (match_test "cris_movem_load_rest_p (op, 0)"))) + +(define_predicate "cris_store_multiple_op" + (and (match_code "parallel") + (match_test "cris_store_multiple_op_p (op)"))) + + +;; Operand helper predicates. + +(define_predicate "cris_bdap_const_operand" + (and (match_code "label_ref, symbol_ref, const_int, const_double, const") + (ior (not (match_test "flag_pic")) + (match_test "cris_valid_pic_const (op, true)")))) + +(define_predicate "cris_simple_address_operand" + (ior (match_operand:SI 0 "register_operand") + (and (match_code "post_inc") + (match_test "register_operand (XEXP (op, 0), Pmode)")))) + +(define_predicate "cris_simple_operand" + (ior (match_operand 0 "register_operand") + (and (match_code "mem") + (match_test "cris_simple_address_operand (XEXP (op, 0), + Pmode)")))) + +;; The caller needs to use :SI. +(define_predicate "cris_bdap_sign_extend_operand" +; Disabled until +; or is committed. + (match_test "0")) +; (and (match_code "sign_extend") +; (and (match_test "MEM_P (XEXP (op, 0))") +; (match_test "cris_simple_address_operand (XEXP (XEXP (op, 0), 0), +; Pmode)")))) + +;; FIXME: Should not have to test for 1. +(define_predicate "cris_scale_int_operand" + (and (match_code "const_int") + (ior (ior (match_test "op == GEN_INT (4)") + (match_test "op == const2_rtx")) + (match_test "op == const1_rtx")))) + +;; FIXME: Should be able to assume (reg int). +(define_predicate "cris_biap_mult_operand" + (and (match_code "mult") + (ior (and (match_test "register_operand (XEXP (op, 0), Pmode)") + (match_test "cris_scale_int_operand (XEXP (op, 1), Pmode)")) + (and (match_test "cris_scale_int_operand (XEXP (op, 0), Pmode)") + (match_test "register_operand (XEXP (op, 1), Pmode)"))))) + + +;; Operand predicates. + +;; This checks a part of an address, the one that is not a plain register +;; for an addressing mode using BDAP. +;; Allowed operands are either: +;; a) a register +;; b) a CONST operand (but not a symbol when generating PIC) +;; c) a [r] or [r+] in SImode, or sign-extend from HI or QI. + +(define_predicate "cris_bdap_operand" + (ior (match_operand 0 "cris_bdap_const_operand") + (ior (match_operand:SI 0 "cris_simple_operand") + (match_operand:SI 0 "cris_bdap_sign_extend_operand")))) + +;; This is similar to cris_bdap_operand: +;; It checks a part of an address, the one that is not a plain register +;; for an addressing mode using BDAP or BIAP. +;; Allowed operands are either: +;; a) a register +;; b) a CONST operand (but not a symbol when generating PIC) +;; c) a mult of (1, 2 or 4) and a register +;; d) a [r] or [r+] in SImode, or sign-extend from HI or QI. */ + +(define_predicate "cris_bdap_biap_operand" + (ior (match_operand 0 "cris_bdap_operand") + (match_operand 0 "cris_biap_mult_operand"))) + +;; Since with -fPIC, not all symbols are valid PIC symbols or indeed +;; general_operands, we have to have a predicate that matches it for the +;; "movsi" expander. +;; FIXME: Can s/special_// when PR 20413 is fixed. + +(define_special_predicate "cris_general_operand_or_symbol" + (ior (match_operand 0 "general_operand") + (and (match_code "const, symbol_ref, label_ref") + ; The following test is actually just an assertion. + (match_test "cris_pic_symbol_type_of (op) != cris_no_symbol")))) + +;; A predicate for the anon movsi expansion, one that fits a PCREL +;; operand as well as general_operand. + +(define_special_predicate "cris_general_operand_or_pic_source" + (ior (match_operand 0 "general_operand") + (and (match_test "flag_pic") + (match_test "cris_valid_pic_const (op, false)")))) + +;; Since a PLT symbol is not a general_operand, we have to have a +;; predicate that matches it when we need it. We use this in the expanded +;; "call" and "call_value" anonymous patterns. + +(define_predicate "cris_nonmemory_operand_or_callable_symbol" + (ior (match_operand 0 "nonmemory_operand") + (and (match_code "const") + (and + (match_test "GET_CODE (XEXP (op, 0)) == UNSPEC") + (ior + (match_test "XINT (XEXP (op, 0), 1) == CRIS_UNSPEC_PLT_PCREL") + (match_test "XINT (XEXP (op, 0), 1) == CRIS_UNSPEC_PCREL")))))) + +;; This matches a (MEM (general_operand)) or +;; (MEM (cris_general_operand_or_symbol)). The second one isn't a valid +;; memory_operand, so we need this predicate to recognize call +;; destinations before we change them to a PLT operand (by wrapping in +;; UNSPEC CRIS_UNSPEC_PLT). + +(define_predicate "cris_mem_call_operand" + (and (match_code "mem") + (ior (match_operand 0 "memory_operand") + (match_test "cris_general_operand_or_symbol (XEXP (op, 0), + Pmode)")))) Index: t-elfmulti =================================================================== --- t-elfmulti (nonexistent) +++ t-elfmulti (revision 338) @@ -0,0 +1,34 @@ +# Copyright (C) 2001, 2007 Free Software Foundation, Inc. +# +# This file is part of GCC. +# +# GCC is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GCC is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . + +LIB2FUNCS_STATIC_EXTRA = $(srcdir)/config/cris/mulsi3.asm +MULTILIB_OPTIONS = march=v10/march=v32 +MULTILIB_DIRNAMES = v10 v32 +MULTILIB_MATCHES = \ + march?v10=mcpu?etrax100lx \ + march?v10=mcpu?ng \ + march?v10=march?etrax100lx \ + march?v10=march?ng \ + march?v10=march?v11 \ + march?v10=mcpu?v11 \ + march?v10=mcpu?v10 \ + march?v32=mcpu?v32 +MULTILIB_EXTRA_OPTS = mbest-lib-options +INSTALL_LIBGCC = install-multilib +LIBGCC = stmp-multilib +CRTSTUFF_T_CFLAGS = $(LIBGCC2_CFLAGS) -moverride-best-lib-options Index: linux.h =================================================================== --- linux.h (nonexistent) +++ linux.h (revision 338) @@ -0,0 +1,148 @@ +/* Definitions for GCC. Part of the machine description for CRIS. + Copyright (C) 2001, 2002, 2003, 2005, 2006, 2007, 2008 + Free Software Foundation, Inc. + Contributed by Axis Communications. Written by Hans-Peter Nilsson. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + + +/* After the first "Node:" comment comes all preprocessor directives and + attached declarations described in the info files, the "Using and + Porting GCC" manual (uapgcc), in the same order as found in the "Target + macros" section in the gcc-2.9x CVS edition of 2000-03-17. FIXME: Not + really, but needs an update anyway. + + There is no generic copy-of-uapgcc comment, you'll have to see uapgcc + for that. If applicable, there is a CRIS-specific comment. The order + of macro definitions follow the order in the manual. Every section in + the manual (node in the info pages) has an introductory `Node: + ' comment. If no macros are defined for a section, only + the section-comment is present. */ + +/* This file defines the macros for cris-axis-linux-gnu that are not + covered by cris.h, elfos.h and (config/)linux.h. */ + +/* Make sure we have a valid TARGET_CPU_DEFAULT, so we can assume it + and take shortcuts below. */ +#ifndef TARGET_CPU_DEFAULT +#error "TARGET_CPU_DEFAULT not defined" +#elif (TARGET_CPU_DEFAULT+0) != 10 && (TARGET_CPU_DEFAULT+0) != 32 +#error "TARGET_CPU_DEFAULT must be 10 or 32, or this file be updated" +#endif + +/* Node: Instruction Output */ + +#undef USER_LABEL_PREFIX +#define USER_LABEL_PREFIX "" + +/* Node: Driver */ +/* These macros are CRIS-specific, but used in target driver macros. */ + +#undef CRIS_CPP_SUBTARGET_SPEC +#if TARGET_CPU_DEFAULT == 32 +# define CRIS_CPP_SUBTARGET_SPEC \ + "%{pthread:-D_REENTRANT}\ + %{!march=*:%{!cpu=*:-D__arch_v32 -D__CRIS_arch_version=32}}" +#else +# define CRIS_CPP_SUBTARGET_SPEC \ + "%{pthread:-D_REENTRANT}\ + %{!march=*:%{!cpu=*:-D__arch_v10 -D__CRIS_arch_version=10}}" +#endif + +#undef CRIS_CC1_SUBTARGET_SPEC +#if TARGET_CPU_DEFAULT == 32 +# define CRIS_CC1_SUBTARGET_SPEC \ + "%{!march=*:%{!cpu=*:-march=v32}}" +#define CRIS_SUBTARGET_DEFAULT_ARCH MASK_AVOID_GOTPLT +#else +# define CRIS_CC1_SUBTARGET_SPEC \ + "%{!march=*:%{!cpu=*:-march=v10}}" +#define CRIS_SUBTARGET_DEFAULT_ARCH 0 +#endif + +#undef CRIS_ASM_SUBTARGET_SPEC +#if TARGET_CPU_DEFAULT == 32 +# define CRIS_ASM_SUBTARGET_SPEC \ + "--em=criself \ + %{!march=*:%{!cpu=*:--march=v32}} \ + %{!fleading-underscore:--no-underscore}\ + %{fPIC|fpic|fPIE|fpie: --pic}" +#else +# define CRIS_ASM_SUBTARGET_SPEC \ + "--em=criself \ + %{!march=*:%{!cpu=*:--march=v10}} \ + %{!fleading-underscore:--no-underscore}\ + %{fPIC|fpic|fPIE|fpie: --pic}" +#endif + +/* Previously controlled by target_flags. */ +#undef TARGET_LINUX +#define TARGET_LINUX 1 + +#undef CRIS_SUBTARGET_DEFAULT +#define CRIS_SUBTARGET_DEFAULT \ + (MASK_SVINTO \ + + MASK_ETRAX4_ADD \ + + MASK_ALIGN_BY_32 \ + + CRIS_SUBTARGET_DEFAULT_ARCH) + +#undef CRIS_DEFAULT_CPU_VERSION +#define CRIS_DEFAULT_CPU_VERSION CRIS_CPU_NG + +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" + +/* We need an -rpath-link to ld.so.1, and presumably to each directory + specified with -B. */ +#undef CRIS_LINK_SUBTARGET_SPEC +#define CRIS_LINK_SUBTARGET_SPEC \ + "-mcrislinux\ + %{B*:-rpath-link %*}\ + %{!nostdlib:-rpath-link ../sys-include/asm/../../lib%s}\ + %{shared} %{static}\ + %{symbolic:-Bdynamic} %{shlib:-Bdynamic} %{static:-Bstatic}\ + %{!shared:%{!static:\ + %{rdynamic:-export-dynamic}\ + %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER "}}}\ + %{!r:%{O2|O3: --gc-sections}}" + + +/* Node: Run-time Target */ + +/* For the cris-*-linux* subtarget. */ +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do \ + { \ + LINUX_TARGET_OS_CPP_BUILTINS(); \ + if (flag_leading_underscore <= 0) \ + builtin_define ("__NO_UNDERSCORES__"); \ + } \ + while (0) + + +/* Node: Sections */ + +/* GNU/Linux has crti and crtn and does not need the + CRT_CALL_STATIC_FUNCTION trick in cris.h. */ +#undef CRT_CALL_STATIC_FUNCTION + +/* + * Local variables: + * eval: (c-set-style "gnu") + * indent-tabs-mode: t + * End: + */
linux.h Property changes : Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +Id \ No newline at end of property Index: cris.h =================================================================== --- cris.h (nonexistent) +++ cris.h (revision 338) @@ -0,0 +1,1407 @@ +/* Definitions for GCC. Part of the machine description for CRIS. + Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, + 2009 Free Software Foundation, Inc. + Contributed by Axis Communications. Written by Hans-Peter Nilsson. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* After the first "Node:" comment comes all preprocessor directives and + attached declarations described in the info files, the "Using and + Porting GCC" manual (uapgcc), in the same order as found in the "Target + macros" section in the gcc-2.9x CVS edition of 2000-03-17. FIXME: Not + really, but needs an update anyway. + + There is no generic copy-of-uapgcc comment, you'll have to see uapgcc + for that. If applicable, there is a CRIS-specific comment. The order + of macro definitions follow the order in the manual. Every section in + the manual (node in the info pages) has an introductory `Node: + ' comment. If no macros are defined for a section, only + the section-comment is present. */ + +/* Note that other header files (e.g. config/elfos.h, config/linux.h, + config/cris/linux.h and config/cris/aout.h) are responsible for lots of + settings not repeated below. This file contains general CRIS + definitions and definitions for the cris-*-elf subtarget. */ + +/* We don't want to use gcc_assert for everything, as that can be + compiled out. */ +#define CRIS_ASSERT(x) \ + do { if (!(x)) internal_error ("CRIS-port assertion failed: " #x); } while (0) + +/* Replacement for REG_P since it does not match SUBREGs. Happens for + testcase Axis-20000320 with gcc-2.9x. */ +#define REG_S_P(x) \ + (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (XEXP (x, 0)))) + +/* Last register in main register bank r0..r15. */ +#define CRIS_LAST_GENERAL_REGISTER 15 + +/* Descriptions of registers used for arguments. */ +#define CRIS_FIRST_ARG_REG 10 +#define CRIS_MAX_ARGS_IN_REGS 4 + +/* See also *_REGNUM constants in cris.md. */ + +/* Most of the time, we need the index into the register-names array. + When passing debug-info, we need the real hardware register number. */ +#define CRIS_CANONICAL_SRP_REGNUM (16 + 11) +#define CRIS_CANONICAL_MOF_REGNUM (16 + 7) +/* We have CCR in all models including v10, but that's 16 bits, so let's + prefer the DCCR number, which is a DMA pointer in pre-v8, so we'll + never clash with it for GCC purposes. */ +#define CRIS_CANONICAL_CC0_REGNUM (16 + 13) + +/* When generating PIC, these suffixes are added to the names of non-local + functions when being output. Contrary to other ports, we have offsets + relative to the GOT, not the PC. We might implement PC-relative PLT + semantics later for the general case; they are used in some cases right + now, such as MI thunks. */ +#define CRIS_GOTPLT_SUFFIX ":GOTPLT" +#define CRIS_PLT_GOTOFFSET_SUFFIX ":PLTG" +#define CRIS_PLT_PCOFFSET_SUFFIX ":PLT" + +#define CRIS_FUNCTION_ARG_SIZE(MODE, TYPE) \ + ((MODE) != BLKmode ? GET_MODE_SIZE (MODE) \ + : (unsigned) int_size_in_bytes (TYPE)) + +/* Which CPU version this is. The parsed and adjusted cris_cpu_str. */ +extern int cris_cpu_version; + +/* Changing the order used to be necessary to put the fourth __make_dp + argument (a DImode parameter) in registers, to fit with the libfunc + parameter passing scheme used for intrinsic functions. FIXME: Check + performance and maybe remove definition from TARGET_LIBGCC2_CFLAGS now + that it isn't strictly necessary. We used to do this through + TARGET_LIBGCC2_CFLAGS, but that became increasingly difficult as the + parenthesis (that needed quoting) travels through several layers of + make and shell invocations. */ +#ifdef IN_LIBGCC2 +#define __make_dp(a,b,c,d) __cris_make_dp(d,a,b,c) +#endif + + +/* Node: Driver */ + +/* When using make with defaults.mak for Sun this will handily remove + any "-target sun*" switches. */ +/* We need to override any previous definitions (linux.h) */ +#undef WORD_SWITCH_TAKES_ARG +#define WORD_SWITCH_TAKES_ARG(STR) \ + (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \ + || !strcmp (STR, "target")) + +/* Also provide canonical vN definitions when user specifies an alias. + Note that -melf overrides -maout. */ + +#define CPP_SPEC \ + "%{mtune=*:-D__tune_%* %{mtune=v*:-D__CRIS_arch_tune=%*}\ + %{mtune=etrax4:-D__tune_v3 -D__CRIS_arch_tune=3}\ + %{mtune=etrax100:-D__tune_v8 -D__CRIS_arch_tune=8}\ + %{mtune=svinto:-D__tune_v8 -D__CRIS_arch_tune=8}\ + %{mtune=etrax100lx:-D__tune_v10 -D__CRIS_arch_tune=10}\ + %{mtune=ng:-D__tune_v10 -D__CRIS_arch_tune=10}}\ + %{mcpu=*:-D__arch_%* %{mcpu=v*:-D__CRIS_arch_version=%*}\ + %{mcpu=etrax4:-D__arch_v3 -D__CRIS_arch_version=3}\ + %{mcpu=etrax100:-D__arch_v8 -D__CRIS_arch_version=8}\ + %{mcpu=svinto:-D__arch_v8 -D__CRIS_arch_version=8}\ + %{mcpu=etrax100lx:-D__arch_v10 -D__CRIS_arch_version=10}\ + %{mcpu=ng:-D__arch_v10 -D__CRIS_arch_version=10}}\ + %{march=*:-D__arch_%* %{march=v*:-D__CRIS_arch_version=%*}\ + %{march=etrax4:-D__arch_v3 -D__CRIS_arch_version=3}\ + %{march=etrax100:-D__arch_v8 -D__CRIS_arch_version=8}\ + %{march=svinto:-D__arch_v8 -D__CRIS_arch_version=8}\ + %{march=etrax100lx:-D__arch_v10 -D__CRIS_arch_version=10}\ + %{march=ng:-D__arch_v10 -D__CRIS_arch_version=10}}\ + %{metrax100:-D__arch__v8 -D__CRIS_arch_version=8}\ + %{metrax4:-D__arch__v3 -D__CRIS_arch_version=3}\ + %(cpp_subtarget)" + +/* For the cris-*-elf subtarget. */ + +#define CRIS_DEFAULT_TUNE "10" +#define CRIS_ARCH_CPP_DEFAULT +#define CRIS_DEFAULT_ASM_ARCH_OPTION "" + +#ifdef TARGET_CPU_DEFAULT +#if TARGET_CPU_DEFAULT != 32 && TARGET_CPU_DEFAULT != 10 + #error "Due to '()'; e.g. '#define TARGET_CPU_DEFAULT (10)', stringize TARGET_CPU_DEFAULT isn't useful: update manually." +#endif + +#if TARGET_CPU_DEFAULT == 32 +#undef CRIS_DEFAULT_TUNE +#define CRIS_DEFAULT_TUNE "32" +/* To enable use of "generic" cris-axis-elf binutils, always pass the + architecture option to GAS. (We don't do this for non-v32.) */ +#undef CRIS_DEFAULT_ASM_ARCH_OPTION +#define CRIS_DEFAULT_ASM_ARCH_OPTION "--march=v32" +#endif + +#undef CRIS_ARCH_CPP_DEFAULT +#define CRIS_ARCH_CPP_DEFAULT \ + "%{!march=*:\ + %{!metrax*:\ + %{!mcpu=*:\ + %{!mtune=*:-D__tune_v" CRIS_DEFAULT_TUNE "}\ + -D__arch_v"CRIS_DEFAULT_TUNE\ + " -D__CRIS_arch_version=" CRIS_DEFAULT_TUNE "}}}" +#endif + +#define CRIS_CPP_SUBTARGET_SPEC \ + "%{mbest-lib-options:\ + %{!moverride-best-lib-options:\ + %{!march=*:%{!metrax*:%{!mcpu=*:\ + -D__tune_v" CRIS_DEFAULT_TUNE \ + " -D__CRIS_arch_tune=" CRIS_DEFAULT_TUNE "}}}}}"\ + CRIS_ARCH_CPP_DEFAULT + +/* Remove those Sun-make "target" switches. */ +/* Override previous definitions (linux.h). */ +#undef CC1_SPEC +#define CC1_SPEC \ + "%{target*:}\ + %{metrax4:-march=v3}\ + %{metrax100:-march=v8}\ + %(cc1_subtarget)" + +/* For the cris-*-elf subtarget. */ +#define CRIS_CC1_SUBTARGET_SPEC \ + "-melf\ + %{mbest-lib-options:\ + %{!moverride-best-lib-options:\ + %{!march=*:%{!mcpu=*:-mtune=v" CRIS_DEFAULT_TUNE\ + " -D__CRIS_arch_tune=" CRIS_DEFAULT_TUNE "}}\ + %{!finhibit-size-directive:\ + %{!fno-function-sections: -ffunction-sections}\ + %{!fno-data-sections: -fdata-sections}}}}" + +/* This adds to CC1_SPEC. */ +#define CC1PLUS_SPEC "" + +#ifdef HAVE_AS_NO_MUL_BUG_ABORT_OPTION +#define MAYBE_AS_NO_MUL_BUG_ABORT \ + "%{mno-mul-bug-workaround:-no-mul-bug-abort} " +#else +#define MAYBE_AS_NO_MUL_BUG_ABORT +#endif + +/* Override previous definitions (linux.h). */ +#undef ASM_SPEC +#define ASM_SPEC \ + MAYBE_AS_NO_MUL_BUG_ABORT \ + "%{v:-v}\ + %(asm_subtarget)\ + %{march=*:%{cpu=*:%eDo not specify both -march=... and -mcpu=...}}\ + %{march=v32:--march=v32} %{mcpu=v32:--march=v32}" + +/* For the cris-*-elf subtarget. */ +#define CRIS_ASM_SUBTARGET_SPEC \ + "--em=criself %{!march=*:%{!cpu=*:" CRIS_DEFAULT_ASM_ARCH_OPTION "}}" + +/* FIXME: We should propagate the -melf option to make the criself + "emulation" unless a linker script is provided (-T*), but I don't know + how to do that if either of -Ttext, -Tdata or -Tbss is given but no + linker script, as is usually the case. Leave it to the user for the + time being. + + Note that -melf overrides -maout except that a.out-compiled libraries + are linked in (multilibbing). The somewhat cryptic -rpath-link pair is + to avoid *only* picking up the linux multilib subdir from the "-B./" + option during build, while still giving it preference. We'd need some + %s-variant that checked for existence of some specific file. */ +/* Override previous definitions (svr4.h). */ +#undef LINK_SPEC +#define LINK_SPEC \ + "%{v:--verbose}\ + %(link_subtarget)" + +/* For the cris-*-elf subtarget. */ +#define CRIS_LINK_SUBTARGET_SPEC \ + "-mcriself\ + %{sim2:%{!T*:-Tdata 0x4000000 -Tbss 0x8000000}}\ + %{!r:%{O2|O3: --gc-sections}}" + +/* Which library to get. The simulator uses a different library for + the low-level syscalls (implementing the Linux syscall ABI instead + of direct-iron accesses). Default everything with the stub "nosys" + library. */ +/* Override previous definitions (linux.h). */ +#undef LIB_SPEC +#define LIB_SPEC \ + "%{sim*:--start-group -lc -lsyslinux --end-group}\ + %{!sim*:%{g*:-lg}\ + %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p} -lbsp}\ + -lnosys" + +/* Linker startfile options; crt0 flavors. + We need to remove any previous definition (elfos.h). */ +#undef STARTFILE_SPEC +#define STARTFILE_SPEC \ + "%{sim*:crt1.o%s}%{!sim*:crt0.o%s}\ + crti.o%s crtbegin.o%s" + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC "crtend.o%s crtn.o%s" + +#define EXTRA_SPECS \ + {"cpp_subtarget", CRIS_CPP_SUBTARGET_SPEC}, \ + {"cc1_subtarget", CRIS_CC1_SUBTARGET_SPEC}, \ + {"asm_subtarget", CRIS_ASM_SUBTARGET_SPEC}, \ + {"link_subtarget", CRIS_LINK_SUBTARGET_SPEC}, \ + CRIS_SUBTARGET_EXTRA_SPECS + +#define CRIS_SUBTARGET_EXTRA_SPECS + + +/* Node: Run-time Target */ + +#define TARGET_CPU_CPP_BUILTINS() \ + do \ + { \ + builtin_define_std ("cris"); \ + builtin_define_std ("CRIS"); \ + builtin_define_std ("GNU_CRIS"); \ + builtin_define ("__CRIS_ABI_version=2"); \ + builtin_assert ("cpu=cris"); \ + builtin_assert ("machine=cris"); \ + } \ + while (0) + +/* This needs to be at least 32 bits. */ +extern int target_flags; + +/* Previously controlled by target_flags. */ +#define TARGET_ELF 1 + +/* Previously controlled by target_flags. Note that this is *not* set + for -melinux. */ +#define TARGET_LINUX 0 + +/* For the cris-*-elf subtarget. */ +#define CRIS_SUBTARGET_DEFAULT 0 + +#define CRIS_CPU_BASE 0 +#define CRIS_CPU_ETRAX4 3 /* Just lz added. */ +#define CRIS_CPU_SVINTO 8 /* Added swap, jsrc & Co., 32-bit accesses. */ +#define CRIS_CPU_NG 10 /* Added mul[su]. */ +#define CRIS_CPU_V32 32 /* Major changes. */ + +#ifndef TARGET_CPU_DEFAULT +#define TARGET_CPU_DEFAULT CRIS_CPU_BASE +#endif + +/* Default target_flags if no switches specified. */ +#ifndef TARGET_DEFAULT +# if TARGET_CPU_DEFAULT == 32 +# define TARGET_DEFAULT \ + (MASK_STACK_ALIGN \ + + MASK_CONST_ALIGN + MASK_DATA_ALIGN \ + + MASK_PROLOGUE_EPILOGUE) +# else /* 10 */ +# define TARGET_DEFAULT \ + (MASK_SIDE_EFFECT_PREFIXES + MASK_STACK_ALIGN \ + + MASK_CONST_ALIGN + MASK_DATA_ALIGN \ + + MASK_PROLOGUE_EPILOGUE + MASK_MUL_BUG) +# endif +#endif + +/* Local, providing a default for cris_cpu_version. */ +#define CRIS_DEFAULT_CPU_VERSION TARGET_CPU_DEFAULT + +#define TARGET_HAS_MUL_INSNS (cris_cpu_version >= CRIS_CPU_NG) +#define TARGET_HAS_LZ (cris_cpu_version >= CRIS_CPU_ETRAX4) +#define TARGET_HAS_SWAP (cris_cpu_version >= CRIS_CPU_SVINTO) +#define TARGET_V32 (cris_cpu_version >= CRIS_CPU_V32) + +#define CRIS_SUBTARGET_HANDLE_OPTION(x, y, z) + +#define OVERRIDE_OPTIONS cris_override_options () + +#define OPTIMIZATION_OPTIONS(OPTIMIZE, SIZE) \ + do \ + { \ + if ((OPTIMIZE) >= 2 || (SIZE)) \ + flag_omit_frame_pointer = 1; \ + } \ + while (0) + + +/* Node: Storage Layout */ + +#define BITS_BIG_ENDIAN 0 + +#define BYTES_BIG_ENDIAN 0 + +/* WORDS_BIG_ENDIAN is not defined in the hardware, but for consistency, + we use little-endianness, and we may also be able to use + post-increment on DImode indirect. */ +#define WORDS_BIG_ENDIAN 0 + +#define UNITS_PER_WORD 4 + +#define CRIS_PROMOTED_MODE(MODE, UNSIGNEDP, TYPE) \ + (GET_MODE_CLASS (MODE) == MODE_INT && GET_MODE_SIZE (MODE) < 4) \ + ? SImode : MODE + +/* We will be using prototype promotion, so they will be 32 bit. */ +#define PARM_BOUNDARY 32 + +/* Stack boundary is guided by -mstack-align, -mno-stack-align, + -malign. + Old comment: (2.1: still valid in 2.7.2?) + Note that to make this macro affect the alignment of stack + locals, a fix was required, and special precautions when handling + the stack pointer in various other macros (TARGET_ASM_FUNCTION_PROLOGUE + et al) were required. See file "function.c". If you would just define + this macro, it would only affect the builtin alloca and variable + local data (non-ANSI, non-K&R, Gnu C extension). */ +#define STACK_BOUNDARY \ + (TARGET_STACK_ALIGN ? (TARGET_ALIGN_BY_32 ? 32 : 16) : 8) + +#define FUNCTION_BOUNDARY 16 + +/* Do not change BIGGEST_ALIGNMENT (when optimizing), as it will affect + strange places, at least in 2.1. */ +#define BIGGEST_ALIGNMENT 8 + +/* If -m16bit, -m16-bit, -malign or -mdata-align, + align everything to 16 bit. */ +#define DATA_ALIGNMENT(TYPE, BASIC_ALIGN) \ + (TARGET_DATA_ALIGN \ + ? (TARGET_ALIGN_BY_32 \ + ? (BASIC_ALIGN < 32 ? 32 : BASIC_ALIGN) \ + : (BASIC_ALIGN < 16 ? 16 : BASIC_ALIGN)) : BASIC_ALIGN) + +/* Note that CONSTANT_ALIGNMENT has the effect of making gcc believe that + ALL references to constant stuff (in code segment, like strings) has + this alignment. That is a rather rushed assumption. Luckily we do not + care about the "alignment" operand to builtin memcpy (only place where + it counts), so it doesn't affect any bad spots. */ +#define CONSTANT_ALIGNMENT(CONSTANT, BASIC_ALIGN) \ + (TARGET_CONST_ALIGN \ + ? (TARGET_ALIGN_BY_32 \ + ? (BASIC_ALIGN < 32 ? 32 : BASIC_ALIGN) \ + : (BASIC_ALIGN < 16 ? 16 : BASIC_ALIGN)) : BASIC_ALIGN) + +/* FIXME: Define LOCAL_ALIGNMENT for word and dword or arrays and + structures (if -mstack-align=), and check that it is good. */ + +#define EMPTY_FIELD_BOUNDARY 8 + +#define STRUCTURE_SIZE_BOUNDARY 8 + +#define STRICT_ALIGNMENT 0 + +/* Remove any previous definition (elfos.h). + ??? If it wasn't for all the other stuff that affects layout of + structures and bit-fields, this could presumably cause incompatibility + with other GNU/Linux ports (i.e. elfos.h users). */ +#undef PCC_BITFIELD_TYPE_MATTERS + +/* This is only used for non-scalars. Strange stuff happens to structs + (FIXME: What?) if we use anything larger than largest actually used + datum size, so lets make it 32. The type "long long" will still work + as usual. We can still have DImode insns, but they will only be used + for scalar data (i.e. long long). */ +#define MAX_FIXED_MODE_SIZE 32 + + +/* Node: Type Layout */ + +/* Note that DOUBLE_TYPE_SIZE is not defined anymore, since the default + value gives a 64-bit double, which is what we now use. */ + +/* For compatibility and historical reasons, a char should be signed. */ +#define DEFAULT_SIGNED_CHAR 1 + +/* Note that WCHAR_TYPE_SIZE is used in cexp.y, + where TARGET_SHORT is not available. */ +#undef WCHAR_TYPE +#define WCHAR_TYPE "long int" + +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + + +/* Node: Register Basics */ + +/* We count all 16 non-special registers, SRP, a faked argument + pointer register, MOF and CCR/DCCR. */ +#define FIRST_PSEUDO_REGISTER (16 + 1 + 1 + 1 + 1) + +/* For CRIS, these are r15 (pc) and r14 (sp). Register r8 is used as a + frame-pointer, but is not fixed. SRP is not included in general + registers and will not be used automatically. All other special + registers are fixed at the moment. The faked argument pointer register + is fixed too. */ +#define FIXED_REGISTERS \ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0} + +/* Register r9 is used for structure-address, r10-r13 for parameters, + r10- for return values. */ +#define CALL_USED_REGISTERS \ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1} + +#define CONDITIONAL_REGISTER_USAGE cris_conditional_register_usage () + + +/* Node: Allocation Order */ + +/* We need this on CRIS, because call-used regs should be used first, + (so we don't need to push). Else start using registers from r0 and up. + This preference is mainly because if we put call-used-regs from r0 + and up, then we can't use movem to push the rest, (which have to be + saved if we use them, and movem has to start with r0). + Change here if you change which registers to use as call registers. + + The actual need to explicitly prefer call-used registers improved the + situation a lot for 2.1, but might not actually be needed anymore. + Still, this order reflects what GCC should find out by itself, so it + probably does not hurt. + + Order of preference: Call-used-regs first, then r0 and up, last fp & + sp & pc as fillers. + Call-used regs in opposite order, so they will cause less conflict if + a function has few args (<= 3) and it wants a scratch reg. + Use struct-return address first, since very few functions use + structure return values so it is likely to be available. */ +#define REG_ALLOC_ORDER \ + {9, 13, 12, 11, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 14, 15, 17, 16, 18, 19} + +/* Use MOF and ACR. Prefer ACR before any other register. Prefer MOF + then SRP after saved registers. The *after* is because they're only + useful for storage, not for things being computed, which is + apparently more common. */ +#define REG_ALLOC_ORDER_V32 \ + {15, 9, 13, 12, 11, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 17, 16, 14, 18, 19} + + +/* Node: Values in Registers */ + +/* The VOIDmode test is so we can omit mode on anonymous insns. FIXME: + Still needed in 2.9x, at least for Axis-20000319. */ +#define HARD_REGNO_NREGS(REGNO, MODE) \ + (MODE == VOIDmode \ + ? 1 : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) + +/* CRIS permits all registers to hold all modes. Well, except for the + condition-code register. And we can't hold larger-than-register size + modes in the last special register that can hold a full 32 bits. */ +#define HARD_REGNO_MODE_OK(REGNO, MODE) \ + (((MODE) == CCmode \ + || (REGNO) != CRIS_CC0_REGNUM) \ + && (GET_MODE_SIZE (MODE) <= UNITS_PER_WORD \ + || ((REGNO) != CRIS_MOF_REGNUM && (REGNO) != CRIS_ACR_REGNUM))) + +/* Because CCmode isn't covered by the "narrower mode" statement in + tm.texi, we can still say all modes are tieable despite not having an + always 1 HARD_REGNO_MODE_OK. */ +#define MODES_TIEABLE_P(MODE1, MODE2) 1 + + +/* Node: Leaf Functions */ +/* (no definitions) */ + +/* Node: Stack Registers */ +/* (no definitions) */ + + +/* Node: Register Classes */ + +/* FIXME: A separate class for the return register would make sense. + + We need a separate register class to handle register allocation for + ACR, since it can't be used for post-increment. + + It's not obvious, but having subunions of all movable-between + register classes does really help register allocation. */ +enum reg_class + { + NO_REGS, + ACR_REGS, MOF_REGS, CC0_REGS, SPECIAL_REGS, + SPEC_ACR_REGS, GENNONACR_REGS, + SPEC_GENNONACR_REGS, GENERAL_REGS, + ALL_REGS, + LIM_REG_CLASSES + }; + +#define N_REG_CLASSES (int) LIM_REG_CLASSES + +#define REG_CLASS_NAMES \ + {"NO_REGS", \ + "ACR_REGS", "MOF_REGS", "CC0_REGS", "SPECIAL_REGS", \ + "SPEC_ACR_REGS", "GENNONACR_REGS", "SPEC_GENNONACR_REGS", \ + "GENERAL_REGS", "ALL_REGS"} + +#define CRIS_SPECIAL_REGS_CONTENTS \ + ((1 << CRIS_SRP_REGNUM) | (1 << CRIS_MOF_REGNUM) | (1 << CRIS_CC0_REGNUM)) + +/* Count in the faked argument register in GENERAL_REGS. Keep out SRP. */ +#define REG_CLASS_CONTENTS \ + { \ + {0}, \ + {1 << CRIS_ACR_REGNUM}, \ + {1 << CRIS_MOF_REGNUM}, \ + {1 << CRIS_CC0_REGNUM}, \ + {CRIS_SPECIAL_REGS_CONTENTS}, \ + {CRIS_SPECIAL_REGS_CONTENTS \ + | (1 << CRIS_ACR_REGNUM)}, \ + {(0xffff | (1 << CRIS_AP_REGNUM)) \ + & ~(1 << CRIS_ACR_REGNUM)}, \ + {(0xffff | (1 << CRIS_AP_REGNUM) \ + | CRIS_SPECIAL_REGS_CONTENTS) \ + & ~(1 << CRIS_ACR_REGNUM)}, \ + {0xffff | (1 << CRIS_AP_REGNUM)}, \ + {0xffff | (1 << CRIS_AP_REGNUM) \ + | CRIS_SPECIAL_REGS_CONTENTS} \ + } + +#define REGNO_REG_CLASS(REGNO) \ + ((REGNO) == CRIS_ACR_REGNUM ? ACR_REGS : \ + (REGNO) == CRIS_MOF_REGNUM ? MOF_REGS : \ + (REGNO) == CRIS_CC0_REGNUM ? CC0_REGS : \ + (REGNO) == CRIS_SRP_REGNUM ? SPECIAL_REGS : \ + GENERAL_REGS) + +#define BASE_REG_CLASS GENERAL_REGS + +#define MODE_CODE_BASE_REG_CLASS(MODE, OCODE, ICODE) \ + ((OCODE) != POST_INC ? BASE_REG_CLASS : GENNONACR_REGS) + +#define INDEX_REG_CLASS GENERAL_REGS + +#define IRA_COVER_CLASSES { GENERAL_REGS, SPECIAL_REGS, LIM_REG_CLASSES } + +#define REG_CLASS_FROM_LETTER(C) \ + ( \ + (C) == 'a' ? ACR_REGS : \ + (C) == 'b' ? GENNONACR_REGS : \ + (C) == 'h' ? MOF_REGS : \ + (C) == 'x' ? SPECIAL_REGS : \ + (C) == 'c' ? CC0_REGS : \ + NO_REGS \ + ) + +/* Since it uses reg_renumber, it is safe only once reg_renumber + has been allocated, which happens in local-alloc.c. */ +#define REGNO_OK_FOR_BASE_P(REGNO) \ + ((REGNO) <= CRIS_LAST_GENERAL_REGISTER \ + || (REGNO) == ARG_POINTER_REGNUM \ + || (unsigned) reg_renumber[REGNO] <= CRIS_LAST_GENERAL_REGISTER \ + || (unsigned) reg_renumber[REGNO] == ARG_POINTER_REGNUM) + +/* REGNO_OK_FOR_BASE_P seems to be obsolete wrt. this one, but not yet + documented as such. */ +#define REGNO_MODE_CODE_OK_FOR_BASE_P(REGNO, MODE, OCODE, ICODE) \ + (REGNO_OK_FOR_BASE_P (REGNO) \ + && ((OCODE) != POST_INC \ + || !((REGNO) == CRIS_ACR_REGNUM \ + || (unsigned) reg_renumber[REGNO] == CRIS_ACR_REGNUM))) + +/* See REGNO_OK_FOR_BASE_P. */ +#define REGNO_OK_FOR_INDEX_P(REGNO) REGNO_OK_FOR_BASE_P(REGNO) + +/* It seems like gcc (2.7.2 and 2.9x of 2000-03-22) may send "NO_REGS" as + the class for a constant (testcase: __Mul in arit.c). To avoid forcing + out a constant into the constant pool, we will trap this case and + return something a bit more sane. FIXME: Check if this is a bug. + Beware that we must not "override" classes that can be specified as + constraint letters, or else asm operands using them will fail when + they need to be reloaded. FIXME: Investigate whether that constitutes + a bug. */ +#define PREFERRED_RELOAD_CLASS(X, CLASS) \ + ((CLASS) != ACR_REGS \ + && (CLASS) != MOF_REGS \ + && (CLASS) != CC0_REGS \ + && (CLASS) != SPECIAL_REGS \ + ? GENERAL_REGS : (CLASS)) + +/* We can't move special registers to and from memory in smaller than + word_mode. We also can't move between special registers. Luckily, + -1, as returned by true_regnum for non-sub/registers, is valid as a + parameter to our REGNO_REG_CLASS, returning GENERAL_REGS, so we get + the effect that any X that isn't a special-register is treated as + a non-empty intersection with GENERAL_REGS. */ +#define SECONDARY_RELOAD_CLASS(CLASS, MODE, X) \ + ((((CLASS) == SPECIAL_REGS || (CLASS) == MOF_REGS) \ + && ((GET_MODE_SIZE (MODE) < 4 && MEM_P (X)) \ + || !reg_classes_intersect_p (REGNO_REG_CLASS (true_regnum (X)), \ + GENERAL_REGS))) \ + ? GENERAL_REGS : NO_REGS) + +/* FIXME: Fix regrename.c; it should check validity of replacements, + not just with a silly pass-specific macro. We may miss some + opportunities, but we must stop regrename from creating acr++. */ +#define HARD_REGNO_RENAME_OK(FROM, TO) ((TO) != CRIS_ACR_REGNUM) + +/* For CRIS, this is always the size of MODE in words, + since all registers are the same size. To use omitted modes in + patterns with reload constraints, you must say the widest size + which is allowed for VOIDmode. + FIXME: Does that still apply for gcc-2.9x? Keep poisoned until such + patterns are added back. News: 2001-03-16: Happens as early as the + underscore-test. */ +#define CLASS_MAX_NREGS(CLASS, MODE) \ + ((MODE) == VOIDmode \ + ? 1 /* + cris_fatal ("CLASS_MAX_NREGS with VOIDmode") */ \ + : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) + +/* We are now out of letters; we could use ten more. This forces us to + use C-code in the 'md' file. FIXME: Use some EXTRA_CONSTRAINTS. */ +#define CRIS_CONST_OK_FOR_LETTER_P(VALUE, C) \ + ( \ + /* MOVEQ, CMPQ, ANDQ, ORQ. */ \ + (C) == 'I' ? (VALUE) >= -32 && (VALUE) <= 31 : \ + /* ADDQ, SUBQ. */ \ + (C) == 'J' ? (VALUE) >= 0 && (VALUE) <= 63 : \ + /* ASRQ, BTSTQ, LSRQ, LSLQ. */ \ + (C) == 'K' ? (VALUE) >= 0 && (VALUE) <= 31 : \ + /* A 16-bit signed number. */ \ + (C) == 'L' ? (VALUE) >= -32768 && (VALUE) <= 32767 : \ + /* The constant 0 for CLEAR. */ \ + (C) == 'M' ? (VALUE) == 0 : \ + /* A negative ADDQ or SUBQ. */ \ + (C) == 'N' ? (VALUE) >= -63 && (VALUE) < 0 : \ + /* Quickened ints, QI and HI. */ \ + (C) == 'O' ? (VALUE) >= 0 && (VALUE) <= 65535 \ + && ((VALUE) >= (65535-31) \ + || ((VALUE) >= (255-31) \ + && (VALUE) <= 255 )) : \ + /* A 16-bit number signed *or* unsigned. */ \ + (C) == 'P' ? (VALUE) >= -32768 && (VALUE) <= 65535 : \ + 0) + +#define CONST_OK_FOR_CONSTRAINT_P(VALUE, C, S) \ + ( \ + ((C) != 'K' || (S)[1] == 'c') \ + ? CRIS_CONST_OK_FOR_LETTER_P (VALUE, C) : \ + ((C) == 'K' && (S)[1] == 'p') \ + ? exact_log2 (VALUE) >= 0 : \ + 0) + +#define CONSTRAINT_LEN(C, S) ((C) == 'K' ? 2 : DEFAULT_CONSTRAINT_LEN (C, S)) + +/* It is really simple to make up a 0.0; it is the same as int-0 in + IEEE754. */ +#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \ + ((C) == 'G' && ((VALUE) == CONST0_RTX (DFmode) \ + || (VALUE) == CONST0_RTX (SFmode))) + +/* We need this on cris to distinguish delay-slottable addressing modes. */ +#define EXTRA_CONSTRAINT(X, C) \ + ( \ + /* Slottable address mode? */ \ + (C) == 'Q' ? EXTRA_CONSTRAINT_Q (X) : \ + /* Operand to BDAP or BIAP? */ \ + (C) == 'R' ? EXTRA_CONSTRAINT_R (X) : \ + /* A local PIC symbol? */ \ + (C) == 'S' ? EXTRA_CONSTRAINT_S (X) : \ + /* A three-address addressing-mode? */ \ + (C) == 'T' ? EXTRA_CONSTRAINT_T (X) : \ + /* A PLT symbol? */ \ + (C) == 'U' ? EXTRA_CONSTRAINT_U (X) : \ + 0) + +#define EXTRA_MEMORY_CONSTRAINT(X, STR) ((X) == 'Q') + +#define EXTRA_CONSTRAINT_Q(X) \ + ( \ + /* Just an indirect register (happens to also be \ + "all" slottable memory addressing modes not \ + covered by other constraints, i.e. '>'). */ \ + MEM_P (X) && BASE_P (XEXP (X, 0)) \ + ) + +#define EXTRA_CONSTRAINT_R(X) \ + ( \ + /* An operand to BDAP or BIAP: \ + A BIAP; r.S? */ \ + BIAP_INDEX_P (X) \ + /* A [reg] or (int) [reg], maybe with post-increment. */ \ + || BDAP_INDEX_P (X) \ + || CONSTANT_INDEX_P (X) \ + ) + +#define EXTRA_CONSTRAINT_T(X) \ + ( \ + /* Memory three-address operand. All are indirect-memory: */ \ + MEM_P (X) \ + && ((MEM_P (XEXP (X, 0)) \ + /* Double indirect: [[reg]] or [[reg+]]? */ \ + && (BASE_OR_AUTOINCR_P (XEXP (XEXP (X, 0), 0)))) \ + /* Just an explicit indirect reference: [const]? */ \ + || CONSTANT_P (XEXP (X, 0)) \ + /* Something that is indexed; [...+...]? */ \ + || (GET_CODE (XEXP (X, 0)) == PLUS \ + /* A BDAP constant: [reg+(8|16|32)bit offset]? */ \ + && ((BASE_P (XEXP (XEXP (X, 0), 0)) \ + && CONSTANT_INDEX_P (XEXP (XEXP (X, 0), 1))) \ + /* A BDAP register: [reg+[reg(+)].S]? */ \ + || (BASE_P (XEXP (XEXP (X, 0), 0)) \ + && BDAP_INDEX_P(XEXP(XEXP(X, 0), 1))) \ + /* Same, but with swapped arguments (no canonical \ + ordering between e.g. REG and MEM as of LAST_UPDATED \ + "Thu May 12 03:59:11 UTC 2005"). */ \ + || (BASE_P (XEXP (XEXP (X, 0), 1)) \ + && BDAP_INDEX_P (XEXP (XEXP (X, 0), 0))) \ + /* A BIAP: [reg+reg.S] (MULT comes first). */ \ + || (BASE_P (XEXP (XEXP (X, 0), 1)) \ + && BIAP_INDEX_P (XEXP (XEXP (X, 0), 0)))))) \ + ) + +/* PIC-constructs for symbols. */ +#define EXTRA_CONSTRAINT_S(X) \ + (flag_pic && GET_CODE (X) == CONST && cris_valid_pic_const (X, false)) + +#define EXTRA_CONSTRAINT_U(X) \ + (flag_pic \ + && CONSTANT_P (X) \ + && cris_nonmemory_operand_or_callable_symbol (X, VOIDmode)) + + +/* Node: Frame Layout */ + +#define STACK_GROWS_DOWNWARD +#define FRAME_GROWS_DOWNWARD 1 + +/* It seems to be indicated in the code (at least 2.1) that this is + better a constant, and best 0. */ +#define STARTING_FRAME_OFFSET 0 + +#define FIRST_PARM_OFFSET(FNDECL) 0 + +#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \ + cris_return_addr_rtx (COUNT, FRAMEADDR) + +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, CRIS_SRP_REGNUM) + +/* FIXME: Any __builtin_eh_return callers must not return anything and + there must not be collisions with incoming parameters. Luckily the + number of __builtin_eh_return callers is limited. For now return + parameter registers in reverse order and hope for the best. */ +#define EH_RETURN_DATA_REGNO(N) \ + (IN_RANGE ((N), 0, 3) ? (CRIS_FIRST_ARG_REG + 3 - (N)) : INVALID_REGNUM) + +/* Store the stack adjustment in the structure-return-address register. */ +#define CRIS_STACKADJ_REG CRIS_STRUCT_VALUE_REGNUM +#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (SImode, CRIS_STACKADJ_REG) + +#define EH_RETURN_HANDLER_RTX \ + cris_return_addr_rtx (0, NULL) + +#define INIT_EXPANDERS cris_init_expanders () + +/* FIXME: Move this to right node (it's not documented properly yet). */ +#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (CRIS_SRP_REGNUM) + +/* FIXME: Move this to right node (it's not documented properly yet). + FIXME: Check what alignment we can assume regarding + TARGET_STACK_ALIGN and TARGET_ALIGN_BY_32. */ +#define DWARF_CIE_DATA_ALIGNMENT -1 + +/* If we would ever need an exact mapping between canonical register + number and dwarf frame register, we would either need to include all + registers in the gcc description (with some marked fixed of course), or + an inverse mapping from dwarf register to gcc register. There is one + need in dwarf2out.c:expand_builtin_init_dwarf_reg_sizes. Right now, I + don't see that we need exact correspondence between DWARF *frame* + registers and DBX_REGISTER_NUMBER, so map them onto GCC registers. */ +#define DWARF_FRAME_REGNUM(REG) (REG) + +/* Node: Stack Checking */ +/* (no definitions) FIXME: Check. */ + +/* Node: Frame Registers */ + +#define STACK_POINTER_REGNUM CRIS_SP_REGNUM + +/* Register used for frame pointer. This is also the last of the saved + registers, when a frame pointer is not used. */ +#define FRAME_POINTER_REGNUM CRIS_FP_REGNUM + +/* Faked register, is always eliminated. We need it to eliminate + allocating stack slots for the return address and the frame pointer. */ +#define ARG_POINTER_REGNUM CRIS_AP_REGNUM + +#define STATIC_CHAIN_REGNUM CRIS_STATIC_CHAIN_REGNUM + + +/* Node: Elimination */ + +#define ELIMINABLE_REGS \ + {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \ + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} + +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ + (OFFSET) = cris_initial_elimination_offset (FROM, TO) + + +/* Node: Stack Arguments */ + +/* Since many parameters take up one register each in any case, + defining TARGET_PROMOTE_PROTOTYPES that always returns true would + seem like a good idea, but measurements indicate that a combination + using PROMOTE_MODE is better. */ + +#define ACCUMULATE_OUTGOING_ARGS 1 + +#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, STACKSIZE) 0 + + +/* Node: Register Arguments */ + +/* The void_type_node is sent as a "closing" call. */ +#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ + ((CUM).regs < CRIS_MAX_ARGS_IN_REGS \ + ? gen_rtx_REG (MODE, (CRIS_FIRST_ARG_REG) + (CUM).regs) \ + : NULL_RTX) + +/* The differences between this and the previous, is that this one checks + that an argument is named, since incoming stdarg/varargs arguments are + pushed onto the stack, and we don't have to check against the "closing" + void_type_node TYPE parameter. */ +#define FUNCTION_INCOMING_ARG(CUM, MODE, TYPE, NAMED) \ + ((NAMED) && (CUM).regs < CRIS_MAX_ARGS_IN_REGS \ + ? gen_rtx_REG (MODE, CRIS_FIRST_ARG_REG + (CUM).regs) \ + : NULL_RTX) + +/* Contrary to what you'd believe, defining FUNCTION_ARG_CALLEE_COPIES + seems like a (small total) loss, at least for gcc-2.7.2 compiling and + running gcc-2.1 (small win in size, small loss running -- 100.1%), + and similarly for size for products (.1 .. .3% bloat, sometimes win). + Due to the empirical likeliness of making slower code, it is not + defined. */ + +/* This no longer *needs* to be a structure; but keeping it as such should + not hurt (and hacking the ABI is simpler). */ +#define CUMULATIVE_ARGS struct cum_args +struct cum_args {int regs;}; + +/* The regs member is an integer, the number of arguments got into + registers so far. */ +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ + ((CUM).regs = 0) + +#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ + ((CUM).regs += (3 + CRIS_FUNCTION_ARG_SIZE (MODE, TYPE)) / 4) + +#define FUNCTION_ARG_REGNO_P(REGNO) \ + ((REGNO) >= CRIS_FIRST_ARG_REG \ + && (REGNO) < CRIS_FIRST_ARG_REG + (CRIS_MAX_ARGS_IN_REGS)) + + +/* Node: Scalar Return */ + +#define FUNCTION_VALUE_REGNO_P(N) cris_function_value_regno_p (N) + + + +/* Node: Aggregate Return */ + +#define CRIS_STRUCT_VALUE_REGNUM ((CRIS_FIRST_ARG_REG) - 1) + + +/* Node: Caller Saves */ +/* (no definitions) */ + +/* Node: Function entry */ + +/* See cris.c for TARGET_ASM_FUNCTION_PROLOGUE and + TARGET_ASM_FUNCTION_EPILOGUE. */ + +/* Node: Profiling */ + +#define FUNCTION_PROFILER(FILE, LABELNO) \ + error ("no FUNCTION_PROFILER for CRIS") + +/* FIXME: Some of the undefined macros might be mandatory. If so, fix + documentation. */ + + +/* Node: Trampolines */ + +#define TRAMPOLINE_SIZE (TARGET_V32 ? 58 : 32) + +/* CRIS wants instructions on word-boundary. */ +#define TRAMPOLINE_ALIGNMENT 16 + +/* Node: Library Calls */ + +/* If you change this, you have to check whatever libraries and systems + that use it. */ +#define TARGET_EDOM 33 + + +/* Node: Addressing Modes */ + +#define HAVE_POST_INCREMENT 1 + +/* Must be a compile-time constant, so we go with the highest value + among all CRIS variants. */ +#define MAX_REGS_PER_ADDRESS 2 + +/* There are helper macros defined here which are used only in + GO_IF_LEGITIMATE_ADDRESS. + + Note that you *have to* reject invalid addressing modes for mode + MODE, even if it is legal for normal addressing modes. You cannot + rely on the constraints to do this work. They can only be used to + doublecheck your intentions. One example is that you HAVE TO reject + (mem:DI (plus:SI (reg:SI x) (reg:SI y))) because for some reason + this cannot be reloaded. (Which of course you can argue that gcc + should have done.) FIXME: Strange. Check. */ + +/* No symbol can be used as an index (or more correct, as a base) together + with a register with PIC; the PIC register must be there. */ +#define CONSTANT_INDEX_P(X) \ + (CONSTANT_P (X) && (!flag_pic || cris_valid_pic_const (X, true))) + +/* True if X is a valid base register. */ +#define BASE_P(X) \ + (REG_P (X) && REG_OK_FOR_BASE_P (X)) + +/* True if X is a valid base register with or without autoincrement. */ +#define BASE_OR_AUTOINCR_P(X) \ + (BASE_P (X) \ + || (GET_CODE (X) == POST_INC \ + && BASE_P (XEXP (X, 0)) \ + && REGNO (XEXP (X, 0)) != CRIS_ACR_REGNUM)) + +/* True if X is a valid (register) index for BDAP, i.e. [Rs].S or [Rs+].S. */ +#define BDAP_INDEX_P(X) \ + ((MEM_P (X) && GET_MODE (X) == SImode \ + && BASE_OR_AUTOINCR_P (XEXP (X, 0))) \ + || (GET_CODE (X) == SIGN_EXTEND \ + && MEM_P (XEXP (X, 0)) \ + && (GET_MODE (XEXP (X, 0)) == HImode \ + || GET_MODE (XEXP (X, 0)) == QImode) \ + && BASE_OR_AUTOINCR_P (XEXP (XEXP (X, 0), 0)))) + +/* True if X is a valid (register) index for BIAP, i.e. Rd.m. */ +#define BIAP_INDEX_P(X) \ + ((BASE_P (X) && REG_OK_FOR_INDEX_P (X)) \ + || (GET_CODE (X) == MULT \ + && BASE_P (XEXP (X, 0)) \ + && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \ + && CONST_INT_P (XEXP (X, 1)) \ + && (INTVAL (XEXP (X, 1)) == 2 \ + || INTVAL (XEXP (X, 1)) == 4))) + +/* A PIC operand looks like a normal symbol here. At output we dress it + in "[rPIC+symbol:GOT]" (global symbol) or "rPIC+symbol:GOTOFF" (local + symbol) so we exclude all addressing modes where we can't replace a + plain "symbol" with that. A global PIC symbol does not fit anywhere + here (but is thankfully a general_operand in itself). A local PIC + symbol is valid for the plain "symbol + offset" case. */ +#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ + { \ + rtx x1, x2; \ + if (BASE_OR_AUTOINCR_P (X)) \ + goto ADDR; \ + else if (TARGET_V32) \ + /* Nothing else is valid then. */ \ + ; \ + else if (CONSTANT_INDEX_P (X)) \ + goto ADDR; \ + /* Indexed? */ \ + else if (GET_CODE (X) == PLUS) \ + { \ + x1 = XEXP (X, 0); \ + x2 = XEXP (X, 1); \ + /* BDAP o, Rd. */ \ + if ((BASE_P (x1) && CONSTANT_INDEX_P (x2)) \ + || (BASE_P (x2) && CONSTANT_INDEX_P (x1)) \ + /* BDAP Rs[+], Rd. */ \ + || (GET_MODE_SIZE (MODE) <= UNITS_PER_WORD \ + && ((BASE_P (x1) && BDAP_INDEX_P (x2)) \ + || (BASE_P (x2) && BDAP_INDEX_P (x1)) \ + /* BIAP.m Rs, Rd */ \ + || (BASE_P (x1) && BIAP_INDEX_P (x2)) \ + || (BASE_P (x2) && BIAP_INDEX_P (x1))))) \ + goto ADDR; \ + } \ + else if (MEM_P (X)) \ + { \ + /* DIP (Rs). Reject [[reg+]] and [[reg]] for \ + DImode (long long). */ \ + if (GET_MODE_SIZE (MODE) <= UNITS_PER_WORD \ + && (BASE_P (XEXP (X, 0)) \ + || BASE_OR_AUTOINCR_P (XEXP (X, 0)))) \ + goto ADDR; \ + } \ + } + +#ifndef REG_OK_STRICT + /* Nonzero if X is a hard reg that can be used as a base reg + or if it is a pseudo reg. */ +# define REG_OK_FOR_BASE_P(X) \ + (REGNO (X) <= CRIS_LAST_GENERAL_REGISTER \ + || REGNO (X) == ARG_POINTER_REGNUM \ + || REGNO (X) >= FIRST_PSEUDO_REGISTER) +#else + /* Nonzero if X is a hard reg that can be used as a base reg. */ +# define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) +#endif + +#ifndef REG_OK_STRICT + /* Nonzero if X is a hard reg that can be used as an index + or if it is a pseudo reg. */ +# define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X) +#else + /* Nonzero if X is a hard reg that can be used as an index. */ +# define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) +#endif + +/* Fix reloads known to cause suboptimal spilling. */ +#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, INDL, WIN) \ + do \ + { \ + if (cris_reload_address_legitimized (X, MODE, OPNUM, TYPE, INDL)) \ + goto WIN; \ + } \ + while (0) + +#define LEGITIMATE_CONSTANT_P(X) 1 + + +/* Node: Condition Code */ + +#define NOTICE_UPDATE_CC(EXP, INSN) cris_notice_update_cc (EXP, INSN) + +/* FIXME: Maybe define CANONICALIZE_COMPARISON later, when playing with + optimizations. It is needed; currently we do this with instruction + patterns and NOTICE_UPDATE_CC. */ + + +/* Node: Costs */ + +#define REGISTER_MOVE_COST(MODE, FROM, TO) \ + cris_register_move_cost (MODE, FROM, TO) + +/* This isn't strictly correct for v0..3 in buswidth-8bit mode, but + should suffice. */ +#define MEMORY_MOVE_COST(M, CLASS, IN) \ + (((M) == QImode) ? 4 : ((M) == HImode) ? 4 : 6) + +/* Regardless of the presence of delay slots, the default value of 1 for + BRANCH_COST is the best in the range (1, 2, 3), tested with gcc-2.7.2 + with testcases ipps and gcc, giving smallest and fastest code. */ + +#define SLOW_BYTE_ACCESS 0 + +/* This is the threshold *below* which inline move sequences of + word-length sizes will be emitted. The "9" will translate to + (9 - 1) * 4 = 32 bytes maximum moved, but using 16 instructions + (8 instruction sequences) or less. */ +#define MOVE_RATIO(speed) 9 + + +/* Node: Sections */ + +#define TEXT_SECTION_ASM_OP "\t.text" + +#define DATA_SECTION_ASM_OP "\t.data" + +#define FORCE_EH_FRAME_INFO_IN_DATA_SECTION (! TARGET_ELF) + +/* The jump table is immediately connected to the preceding insn. */ +#define JUMP_TABLES_IN_TEXT_SECTION 1 + + +/* Node: PIC */ + +/* Helper type. */ + +enum cris_pic_symbol_type + { + cris_no_symbol = 0, + cris_got_symbol = 1, + cris_rel_symbol = 2, + cris_got_symbol_needing_fixup = 3, + cris_invalid_pic_symbol = 4 + }; + +#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? CRIS_GOT_REGNUM : INVALID_REGNUM) + +#define LEGITIMATE_PIC_OPERAND_P(X) cris_legitimate_pic_operand (X) + + +/* Node: File Framework */ + +/* We don't want an .ident for gcc. To avoid that but still support + #ident, we override ASM_OUTPUT_IDENT and, since the gcc .ident is its + only use besides ASM_OUTPUT_IDENT, undef IDENT_ASM_OP from elfos.h. */ +#undef IDENT_ASM_OP +#undef ASM_OUTPUT_IDENT +#define ASM_OUTPUT_IDENT(FILE, NAME) \ + fprintf (FILE, "%s\"%s\"\n", "\t.ident\t", NAME); + +#define ASM_APP_ON "#APP\n" + +#define ASM_APP_OFF "#NO_APP\n" + + +/* Node: Data Output */ + +#define OUTPUT_ADDR_CONST_EXTRA(STREAM, X, FAIL) \ + do { if (!cris_output_addr_const_extra (STREAM, X)) goto FAIL; } while (0) + +#define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) (C) == '@' + +/* Node: Uninitialized Data */ + +/* Remember to round off odd values if we want data alignment, + since we cannot do that with an .align directive. + + Using .comm causes the space not to be reserved in .bss, but by + tricks with the symbol type. Not good if other tools than binutils + are used on the object files. Since ".global ... .lcomm ..." works, we + use that. Use .._ALIGNED_COMMON, since gcc whines when we only have + ..._COMMON, and we prefer to whine ourselves; BIGGEST_ALIGNMENT is not + the one to check. This done for a.out only. */ +/* FIXME: I suspect a bug in gcc with alignment. Do not warn until + investigated; it mucks up the testsuite results. */ +#define CRIS_ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN, LOCAL) \ + do \ + { \ + int align_ = (ALIGN) / BITS_PER_UNIT; \ + if (TARGET_DATA_ALIGN && TARGET_ALIGN_BY_32 && align_ < 4) \ + align_ = 4; \ + else if (TARGET_DATA_ALIGN && align_ < 2) \ + align_ = 2; \ + /* FIXME: Do we need this? */ \ + else if (align_ < 1) \ + align_ = 1; \ + \ + if (TARGET_ELF) \ + { \ + if (LOCAL) \ + { \ + fprintf ((FILE), "%s", LOCAL_ASM_OP); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), "\n"); \ + } \ + fprintf ((FILE), "%s", COMMON_ASM_OP); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), ",%u,%u\n", (int)(SIZE), align_); \ + } \ + else \ + { \ + /* We can't tell a one-only or weak COMM from a "global \ + COMM" so just make all non-locals weak. */ \ + if (! (LOCAL)) \ + ASM_WEAKEN_LABEL (FILE, NAME); \ + fputs ("\t.lcomm ", (FILE)); \ + assemble_name ((FILE), (NAME)); \ + fprintf ((FILE), ",%u\n", \ + ((int)(SIZE) + (align_ - 1)) & ~(align_ - 1)); \ + } \ + } \ + while (0) + +#define ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN) \ + CRIS_ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN, 0) + +#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL +#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \ + CRIS_ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN, 1) + +/* Node: Label Output */ + +/* Globalizing directive for a label. */ +#define GLOBAL_ASM_OP "\t.global " + +#define SUPPORTS_WEAK 1 + +#define ASM_OUTPUT_SYMBOL_REF(STREAM, SYM) \ + cris_asm_output_symbol_ref (STREAM, SYM) + +#define ASM_OUTPUT_LABEL_REF(STREAM, BUF) \ + cris_asm_output_label_ref (STREAM, BUF) + +/* Remove any previous definition (elfos.h). */ +#undef ASM_GENERATE_INTERNAL_LABEL +#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \ + sprintf (LABEL, "*%s%s%ld", LOCAL_LABEL_PREFIX, PREFIX, (long) NUM) + +/* Node: Initialization */ +/* (no definitions) */ + +/* Node: Macros for Initialization */ +/* (no definitions) */ + +/* Node: Instruction Output */ + +#define REGISTER_NAMES \ + {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", \ + "r9", "r10", "r11", "r12", "r13", "sp", "acr", "srp", "mof", "faked_ap", "dccr"} + +#define ADDITIONAL_REGISTER_NAMES \ + {{"r14", 14}, {"r15", 15}, {"pc", 15}} + +#define PRINT_OPERAND(FILE, X, CODE) \ + cris_print_operand (FILE, X, CODE) + +/* For delay-slot handling. */ +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \ + ((CODE) == '#' || (CODE) == '!' || (CODE) == ':') + +#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \ + cris_print_operand_address (FILE, ADDR) + +/* Output an empty line to illustrate the presence of the delay slot. */ +#define DBR_OUTPUT_SEQEND(FILE) \ + fprintf (FILE, "\n") + +#define LOCAL_LABEL_PREFIX (TARGET_ELF ? "." : "") + +/* cppinit.c initializes a const array from this, so it must be constant, + can't have it different based on options. Luckily, the prefix is + always allowed, so let's have it on all GCC-generated code. Note that + we have this verbatim everywhere in the back-end, not using %R or %s or + such. */ +#define REGISTER_PREFIX "$" + +/* Remove any previous definition (elfos.h). */ +/* We use -fno-leading-underscore to remove it, when necessary. */ +#undef USER_LABEL_PREFIX +#define USER_LABEL_PREFIX "_" + +#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \ + fprintf (FILE, \ + TARGET_V32 \ + ? "\tsubq 4,$sp\n\tmove $%s,[$sp]\n" : "\tpush $%s\n", \ + reg_names[REGNO]) + +#define ASM_OUTPUT_REG_POP(FILE, REGNO) \ + fprintf (FILE, "\tmove [$sp+],$%s\n", reg_names[REGNO]) + + +/* Node: Dispatch Tables */ + +#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ + do \ + { \ + if (TARGET_V32) \ + asm_fprintf (FILE, "\t.word %LL%d-.\n", VALUE); \ + else \ + asm_fprintf (FILE, "\t.word %LL%d-%LL%d\n", VALUE, REL); \ + } \ + while (0) + +#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ + asm_fprintf (FILE, "\t.dword %LL%d\n", VALUE) + +/* Defined to also emit an .align in elfos.h. We don't want that. */ +#undef ASM_OUTPUT_CASE_LABEL + +/* Since the "bound" insn loads the comparison value if the compared< + value (register) is out of bounds (0..comparison value-1), we need + to output another case to catch it. + The way to find it is to look for the label_ref at the else-arm inside + the expanded casesi core-insn. + FIXME: Check this construct when changing to new version of gcc. */ +#define ASM_OUTPUT_CASE_END(STREAM, NUM, TABLE) \ + cris_asm_output_case_end (STREAM, NUM, TABLE) + + +/* Node: Exception Region Output */ +/* (no definitions) */ +/* FIXME: Fill in with our own optimized layout. */ + +/* Node: Alignment Output */ + +#define ASM_OUTPUT_ALIGN(FILE, LOG) \ + fprintf (FILE, "\t.align %d\n", (LOG)) + + +/* Node: All Debuggers */ + +#define DBX_REGISTER_NUMBER(REGNO) \ + ((REGNO) == CRIS_SRP_REGNUM ? CRIS_CANONICAL_SRP_REGNUM : \ + (REGNO) == CRIS_MOF_REGNUM ? CRIS_CANONICAL_MOF_REGNUM : \ + (REGNO) == CRIS_CC0_REGNUM ? CRIS_CANONICAL_CC0_REGNUM : \ + (REGNO)) + +/* FIXME: Investigate DEBUGGER_AUTO_OFFSET, DEBUGGER_ARG_OFFSET. */ + + +/* Node: DBX Options */ + +/* Is this correct? Check later. */ +#define DBX_NO_XREFS + +#define DBX_CONTIN_LENGTH 0 + +/* FIXME: Is this needed when we have 0 DBX_CONTIN_LENGTH? */ +#define DBX_CONTIN_CHAR '?' + + +/* Node: DBX Hooks */ +/* (no definitions) */ + +/* Node: File names and DBX */ +/* (no definitions) */ + + +/* Node: SDB and DWARF */ +/* (no definitions) */ + +/* Node: Misc */ + +/* A combination of the bound (umin) insn together with a + sign-extended add via the table to PC seems optimal. + If the table overflows, the assembler will take care of it. + Theoretically, in extreme cases (uncertain if they occur), an error + will be emitted, so FIXME: Check how large case-tables are emitted, + possible add an option to emit SImode case-tables. */ +#define CASE_VECTOR_MODE HImode + +#define CASE_VECTOR_PC_RELATIVE 1 + +/* FIXME: Investigate CASE_VECTOR_SHORTEN_MODE to make sure HImode is not + used when broken-.word could possibly fail (plus testcase). */ + +#define FIXUNS_TRUNC_LIKE_FIX_TRUNC + +/* This is the number of bytes that can be moved in one + reasonably fast instruction sequence. For CRIS, this is two + instructions: mem => reg, reg => mem. */ +#define MOVE_MAX 4 + +/* Maybe SHIFT_COUNT_TRUNCATED is safe to define? FIXME: Check later. */ + +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 + +#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1) +#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1) + +#define Pmode SImode + +#define FUNCTION_MODE QImode + +#define NO_IMPLICIT_EXTERN_C + +/* No specific purpose other than warningless compatibility. */ +#define HANDLE_PRAGMA_PACK_PUSH_POP 1 + +/* + * Local variables: + * eval: (c-set-style "gnu") + * indent-tabs-mode: t + * End: + */
cris.h Property changes : Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +Id \ No newline at end of property Index: mulsi3.asm =================================================================== --- mulsi3.asm (nonexistent) +++ mulsi3.asm (revision 338) @@ -0,0 +1,255 @@ +;; Copyright (C) 2001, 2004 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify it under +;; the terms of the GNU General Public License as published by the Free +;; Software Foundation; either version 3, or (at your option) any later +;; version. +;; +;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY +;; WARRANTY; without even the implied warranty of MERCHANTABILITY or +;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +;; for more details. +;; +;; Under Section 7 of GPL version 3, you are granted additional +;; permissions described in the GCC Runtime Library Exception, version +;; 3.1, as published by the Free Software Foundation. +;; +;; You should have received a copy of the GNU General Public License and +;; a copy of the GCC Runtime Library Exception along with this program; +;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +;; . +;; +;; This code used to be expanded through interesting expansions in +;; the machine description, compiled from this code: +;; +;; #ifdef L_mulsi3 +;; long __Mul (unsigned long a, unsigned long b) __attribute__ ((__const__)); +;; +;; /* This must be compiled with the -mexpand-mul flag, to synthesize the +;; multiplication from the mstep instructions. The check for +;; smaller-size multiplication pays off in the order of .5-10%; +;; estimated median 1%, depending on application. +;; FIXME: It can be further optimized if we go to assembler code, as +;; gcc 2.7.2 adds a few unnecessary instructions and does not put the +;; basic blocks in optimal order. */ +;; long +;; __Mul (unsigned long a, unsigned long b) +;; { +;; #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10 +;; /* In case other code is compiled without -march=v10, they will +;; contain calls to __Mul, regardless of flags at link-time. The +;; "else"-code below will work, but is unnecessarily slow. This +;; sometimes cuts a few minutes off from simulation time by just +;; returning a "mulu.d". */ +;; return a * b; +;; #else +;; unsigned long min; +;; +;; /* Get minimum via the bound insn. */ +;; min = a < b ? a : b; +;; +;; /* Can we omit computation of the high part? */ +;; if (min > 65535) +;; /* No. Perform full multiplication. */ +;; return a * b; +;; else +;; { +;; /* Check if both operands are within 16 bits. */ +;; unsigned long max; +;; +;; /* Get maximum, by knowing the minimum. +;; This will partition a and b into max and min. +;; This is not currently something GCC understands, +;; so do this trick by asm. */ +;; __asm__ ("xor %1,%0\n\txor %2,%0" +;; : "=r" (max) +;; : "r" (b), "r" (a), "0" (min)); +;; +;; if (max > 65535) +;; /* Make GCC understand that only the low part of "min" will be +;; used. */ +;; return max * (unsigned short) min; +;; else +;; /* Only the low parts of both operands are necessary. */ +;; return ((unsigned short) max) * (unsigned short) min; +;; } +;; #endif /* not __CRIS_arch_version >= 10 */ +;; } +;; #endif /* L_mulsi3 */ +;; +;; That approach was abandoned since the caveats outweighted the +;; benefits. The expand-multiplication machinery is also removed, so you +;; can't do this anymore. +;; +;; For doubters of there being any benefits, some where: insensitivity to: +;; - ABI changes (mostly for experimentation) +;; - assembler syntax differences (mostly debug format). +;; - insn scheduling issues. +;; Most ABI experiments will presumably happen with arches with mul insns, +;; so that argument doesn't really hold anymore, and it's unlikely there +;; being new arch variants needing insn scheduling and not having mul +;; insns. + +;; ELF and a.out have different syntax for local labels: the "wrong" +;; one may not be omitted from the object. +#undef L +#ifdef __AOUT__ +# define L(x) x +#else +# define L(x) .x +#endif + + .global ___Mul + .type ___Mul,@function +___Mul: +#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10 +;; Can't have the mulu.d last on a cache-line (in the delay-slot of the +;; "ret"), due to hardware bug. See documentation for -mmul-bug-workaround. +;; Not worthwhile to conditionalize here. + .p2alignw 2,0x050f + mulu.d $r11,$r10 + ret + nop +#else + move.d $r10,$r12 + move.d $r11,$r9 + bound.d $r12,$r9 + cmpu.w 65535,$r9 + bls L(L3) + move.d $r12,$r13 + + movu.w $r11,$r9 + lslq 16,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + mstep $r9,$r13 + clear.w $r10 + test.d $r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + movu.w $r12,$r12 + move.d $r11,$r9 + clear.w $r9 + test.d $r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + mstep $r12,$r9 + add.w $r9,$r10 + lslq 16,$r10 + ret + add.d $r13,$r10 + +L(L3): + move.d $r9,$r10 + xor $r11,$r10 + xor $r12,$r10 + cmpu.w 65535,$r10 + bls L(L5) + movu.w $r9,$r13 + + movu.w $r13,$r13 + move.d $r10,$r9 + lslq 16,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + mstep $r13,$r9 + clear.w $r10 + test.d $r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + mstep $r13,$r10 + lslq 16,$r10 + ret + add.d $r9,$r10 + +L(L5): + movu.w $r9,$r9 + lslq 16,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + mstep $r9,$r10 + ret + mstep $r9,$r10 +#endif +L(Lfe1): + .size ___Mul,L(Lfe1)-___Mul Index: cris.md =================================================================== --- cris.md (nonexistent) +++ cris.md (revision 338) @@ -0,0 +1,5110 @@ +;; GCC machine description for CRIS cpu cores. +;; Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, +;; 2008, 2009 Free Software Foundation, Inc. +;; Contributed by Axis Communications. + +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; . + +;; The original PO technology requires these to be ordered by speed, +;; so that assigner will pick the fastest. + +;; See files "md.texi" and "rtl.def" for documentation on define_insn, +;; match_*, et. al. +;; +;; The function cris_notice_update_cc in cris.c handles condition code +;; updates for most instructions, helped by the "cc" attribute. + +;; There are several instructions that are orthogonal in size, and seems +;; they could be matched by a single pattern without a specified size +;; for the operand that is orthogonal. However, this did not work on +;; gcc-2.7.2 (and probably not on gcc-2.8.1), relating to that when a +;; constant is substituted into an operand, the actual mode must be +;; deduced from the pattern. There is reasonable hope that that has been +;; fixed, so FIXME: try again. + +;; You will notice that three-operand alternatives ("=r", "r", "!To") +;; are marked with a "!" constraint modifier to avoid being reloaded +;; into. This is because gcc would otherwise prefer to use the constant +;; pool and its offsettable address instead of reloading to an +;; ("=r", "0", "i") alternative. Also, the constant-pool support was not +;; only suboptimal but also buggy in 2.7.2, ??? maybe only in 2.6.3. + +;; All insns that look like (set (...) (plus (...) (reg:SI 8))) +;; get problems when reloading r8 (frame pointer) to r14 + offs (stack +;; pointer). Thus the instructions that get into trouble have specific +;; checks against matching frame_pointer_rtx. +;; ??? But it should be re-checked for gcc > 2.7.2 +;; FIXME: This changed some time ago (from 2000-03-16) for gcc-2.9x. + +;; FIXME: When PIC, all [rX=rY+S] could be enabled to match +;; [rX=gotless_symbol]. +;; The movsi for a gotless symbol could be split (post reload). + + +(define_constants + [ + ;; PLT reference from call expansion: operand 0 is the address, + ;; the mode is VOIDmode. Always wrapped in CONST. + ;; The value is relative to the GOT. + (CRIS_UNSPEC_PLT_GOTREL 0) + + ;; PLT reference from call expansion: operand 0 is the address, + ;; the mode is VOIDmode. Always wrapped in CONST. + ;; The value is relative to the PC. It's arch-dependent whether + ;; the offset counts from the start or the end of the current item. + (CRIS_UNSPEC_PLT_PCREL 1) + + ;; The address of the global offset table as a source operand. + (CRIS_UNSPEC_GOT 2) + + ;; The offset from the global offset table to the operand. + (CRIS_UNSPEC_GOTREL 3) + + ;; The PC-relative offset to the operand. It's arch-dependent whether + ;; the offset counts from the start or the end of the current item. + (CRIS_UNSPEC_PCREL 4) + + ;; The index into the global offset table of a symbol, while + ;; also generating a GOT entry for the symbol. + (CRIS_UNSPEC_GOTREAD 5) + + ;; Similar to CRIS_UNSPEC_GOTREAD, but also generating a PLT entry. + (CRIS_UNSPEC_PLTGOTREAD 6) + + ;; Condition for v32 casesi jump, since it needs to have if_then_else + ;; form with register as one branch and default label as other. + ;; Operand 0 is const_int 0. + (CRIS_UNSPEC_CASESI 7) + + ;; Stack frame deallocation barrier. + (CRIS_UNSPEC_FRAME_DEALLOC 8) + + ;; Swap all 32 bits of the operand; 31 <=> 0, 30 <=> 1... + (CRIS_UNSPEC_SWAP_BITS 9) + ]) + +;; Register numbers. +(define_constants + [(CRIS_GOT_REGNUM 0) + (CRIS_STATIC_CHAIN_REGNUM 7) + (CRIS_FP_REGNUM 8) + (CRIS_SP_REGNUM 14) + (CRIS_ACR_REGNUM 15) + (CRIS_SRP_REGNUM 16) + (CRIS_MOF_REGNUM 17) + (CRIS_AP_REGNUM 18) + (CRIS_CC0_REGNUM 19)] +) + +;; We need an attribute to define whether an instruction can be put in +;; a branch-delay slot or not, and whether it has a delay slot. +;; +;; Branches and return instructions have a delay slot, and cannot +;; themselves be put in a delay slot. This has changed *for short +;; branches only* between architecture variants, but the possible win +;; is presumed negligible compared to the added complexity of the machine +;; description: one would have to add always-correct infrastructure to +;; distinguish short branches. +;; +;; Whether an instruction can be put in a delay slot depends on the +;; instruction (all short instructions except jumps and branches) +;; and the addressing mode (must not be prefixed or referring to pc). +;; In short, any "slottable" instruction must be 16 bit and not refer +;; to pc, or alter it. +;; +;; The possible values are "yes", "no", "has_slot", "has_return_slot" +;; and "has_call_slot". +;; Yes/no tells whether the insn is slottable or not. Has_call_slot means +;; that the insn is a call insn, which for CRIS v32 has a delay-slot. +;; Of special concern is that no RTX_FRAME_RELATED insn must go in that +;; call delay slot, as it's located in the address *after* the call insn, +;; and the unwind machinery doesn't know about delay slots. +;; Has_slot means that the insn is a branch insn (which are +;; not considered slottable since that is generally true). Having the +;; seemingly illogical value "has_slot" means we do not have to add +;; another attribute just to say that an insn has a delay-slot, since it +;; also infers that it is not slottable. Better names for the attribute +;; were found to be longer and not add readability to the machine +;; description. +;; Has_return_slot is similar, for the return insn. +;; +;; The default that is defined here for this attribute is "no", not +;; slottable, not having a delay-slot, so there's no need to worry about +;; it being wrong for non-branch and return instructions. +;; The default could depend on the kind of insn and the addressing +;; mode, but that would need more attributes and hairier, more error +;; prone code. +;; +;; There is an extra memory constraint, 'Q', which recognizes an indirect +;; register. The constraints 'Q' and '>' together match all possible +;; memory operands that are slottable. +;; For other operands, you need to check if it has a valid "slottable" +;; quick-immediate operand, where the particular signedness-variation +;; may match the constraints 'I' or 'J'.), and include it in the +;; constraint pattern for the slottable pattern. An alternative using +;; only "r" constraints is most often slottable. + +(define_attr "slottable" "no,yes,has_slot,has_return_slot,has_call_slot" + (const_string "no")) + +;; We also need attributes to sanely determine the condition code +;; state. See cris_notice_update_cc for how this is used. + +(define_attr "cc" "none,clobber,normal,noov32,rev" (const_string "normal")) + +;; At the moment, this attribute is just used to help bb-reorder do its +;; work; the default 0 doesn't help it. Many insns have other lengths, +;; though none are shorter. +(define_attr "length" "" (const_int 2)) + +;; A branch has one delay-slot. The instruction in the +;; delay-slot is always executed, independent of whether the branch is +;; taken or not. Note that besides setting "slottable" to "has_slot", +;; there also has to be a "%#" at the end of a "delayed" instruction +;; output pattern (for "jump" this means "ba %l0%#"), so print_operand can +;; catch it and print a "nop" if necessary. This method was stolen from +;; sparc.md. + +(define_delay (eq_attr "slottable" "has_slot") + [(eq_attr "slottable" "yes") (nil) (nil)]) + +;; We can't put prologue insns in call-insn delay-slots when +;; DWARF2 unwind info is emitted, because the unwinder matches the +;; address after the insn. It must see the return address of a call at +;; a position at least *one byte after* the insn, or it'll think that +;; the insn hasn't been executed. If the insn is in a delay-slot of a +;; call, it's just *exactly* after the insn. + +(define_delay (eq_attr "slottable" "has_call_slot") + [(and (eq_attr "slottable" "yes") + (ior (eq (symbol_ref "RTX_FRAME_RELATED_P (insn)") + (const_int 0)) + (eq (symbol_ref "flag_exceptions") + (const_int 0)))) + (nil) (nil)]) + +;; The insn in the return insn slot must not be the +;; return-address-register restore. FIXME: Use has_slot and express +;; as a parallel with a use of the return-address-register (currently +;; only SRP). However, this requires an amount of fixing tests for +;; naked RETURN in middle-end. +(define_delay (eq_attr "slottable" "has_return_slot") + [(and (eq_attr "slottable" "yes") + (eq (symbol_ref "dead_or_set_regno_p (insn, CRIS_SRP_REGNUM)") + (const_int 0))) + (nil) (nil)]) + + +;; Iterator definitions. + +;; For the "usual" pattern size alternatives. +(define_mode_iterator BWD [SI HI QI]) +(define_mode_iterator WD [SI HI]) +(define_mode_iterator BW [HI QI]) +(define_mode_attr S [(SI "HI") (HI "QI")]) +(define_mode_attr s [(SI "hi") (HI "qi")]) +(define_mode_attr m [(SI ".d") (HI ".w") (QI ".b")]) +(define_mode_attr mm [(SI ".w") (HI ".b")]) +(define_mode_attr nbitsm1 [(SI "31") (HI "15") (QI "7")]) + +;; For the sign_extend+zero_extend variants. +(define_code_iterator szext [sign_extend zero_extend]) +(define_code_attr u [(sign_extend "") (zero_extend "u")]) +(define_code_attr su [(sign_extend "s") (zero_extend "u")]) + +;; For the shift variants. +(define_code_iterator shift [ashiftrt lshiftrt ashift]) +(define_code_iterator shiftrt [ashiftrt lshiftrt]) +(define_code_attr shlr [(ashiftrt "ashr") (lshiftrt "lshr") (ashift "ashl")]) +(define_code_attr slr [(ashiftrt "asr") (lshiftrt "lsr") (ashift "lsl")]) + +(define_code_iterator ncond [eq ne gtu ltu geu leu]) +(define_code_iterator ocond [gt le]) +(define_code_iterator rcond [lt ge]) +(define_code_attr CC [(eq "eq") (ne "ne") (gt "gt") (gtu "hi") (lt "lt") + (ltu "lo") (ge "ge") (geu "hs") (le "le") (leu "ls")]) +(define_code_attr rCC [(eq "ne") (ne "eq") (gt "le") (gtu "ls") (lt "ge") + (ltu "hs") (ge "lt") (geu "lo") (le "gt") (leu "hi")]) +(define_code_attr oCC [(lt "mi") (ge "pl")]) +(define_code_attr roCC [(lt "pl") (ge "mi")]) + +;; Operand and operator predicates. + +(include "predicates.md") + +;; Test insns. + +;; No test insns with side-effect on the mem addressing. +;; +;; See note on cmp-insns with side-effects (or lack of them) + +;; Normal named test patterns from SI on. + +(define_insn "*tstsi" + [(set (cc0) + (compare (match_operand:SI 0 "nonimmediate_operand" "r,Q>,m") + (const_int 0)))] + "" +{ + if (which_alternative == 0 && TARGET_V32) + return "cmpq 0,%0"; + return "test.d %0"; +} + [(set_attr "slottable" "yes,yes,no")]) + +(define_insn "*tst_cmp" + [(set (cc0) + (compare (match_operand:BW 0 "nonimmediate_operand" "r,Q>,m") + (const_int 0)))] + "cris_cc0_user_requires_cmp (insn)" + "@ + cmp 0,%0 + test %0 + test %0" + [(set_attr "slottable" "no,yes,no")]) + +(define_insn "*tst_non_cmp" + [(set (cc0) + (compare (match_operand:BW 0 "nonimmediate_operand" "r,Q>,m") + (const_int 0)))] + "!cris_cc0_user_requires_cmp (insn)" + "@ + move %0,%0 + test %0 + test %0" + [(set_attr "slottable" "yes,yes,no") + (set_attr "cc" "noov32,*,*")]) + +;; It seems that the position of the sign-bit and the fact that 0.0 is +;; all 0-bits would make "tstsf" a straight-forward implementation; +;; either "test.d" it for positive/negative or "btstq 30,r" it for +;; zeroness. +;; +;; FIXME: Do that some time; check next_cc0_user to determine if +;; zero or negative is tested for. + +;; Compare insns. + +;; We could optimize the sizes of the immediate operands for various +;; cases, but that is not worth it because of the very little usage of +;; DImode for anything else but a structure/block-mode. Just do the +;; obvious stuff for the straight-forward constraint letters. + +(define_insn "*cmpdi_non_v32" + [(set (cc0) + (compare (match_operand:DI 0 "nonimmediate_operand" "rm,r,r,r,r,r,r,o") + (match_operand:DI 1 "general_operand" "M,Kc,I,P,n,r,o,r")))] + "!TARGET_V32" + "@ + test.d %M0\;ax\;test.d %H0 + cmpq %1,%M0\;ax\;cmpq 0,%H0 + cmpq %1,%M0\;ax\;cmpq -1,%H0 + cmp%e1.%z1 %1,%M0\;ax\;cmpq %H1,%H0 + cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0 + cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0 + cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0 + cmp.d %M0,%M1\;ax\;cmp.d %H0,%H1") + +(define_insn "*cmpdi_v32" + [(set (cc0) + (compare (match_operand:DI 0 "register_operand" "r,r,r,r,r") + (match_operand:DI 1 "nonmemory_operand" "Kc,I,P,n,r")))] + "TARGET_V32" + "@ + cmpq %1,%M0\;ax\;cmpq 0,%H0 + cmpq %1,%M0\;ax\;cmpq -1,%H0 + cmp%e1.%z1 %1,%M0\;ax\;cmpq %H1,%H0 + cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0 + cmp.d %M1,%M0\;ax\;cmp.d %H1,%H0") + +;; Note that compare insns with side effect addressing mode (e.g.): +;; +;; cmp.S [rx=ry+i],rz; +;; cmp.S [%3=%1+%2],%0 +;; +;; are *not* usable for gcc since the reloader *does not accept* +;; cc0-changing insns with side-effects other than setting the condition +;; codes. The reason is that the reload stage *may* cause another insn to +;; be output after the main instruction, in turn invalidating cc0 for the +;; insn using the test. (This does not apply to the CRIS case, since a +;; reload for output -- move to memory -- does not change the condition +;; code. Unfortunately we have no way to describe that at the moment. I +;; think code would improve being in the order of one percent faster. + +;; We have cmps and cmpu (compare reg w. sign/zero extended mem). +;; These are mostly useful for compares in SImode, using 8 or 16-bit +;; constants, but sometimes gcc will find its way to use it for other +;; (memory) operands. Avoid side-effect patterns, though (see above). + +(define_insn "*cmp_ext" + [(set (cc0) + (compare + (match_operand:SI 0 "register_operand" "r,r") + (match_operator:SI 2 "cris_extend_operator" + [(match_operand:BW 1 "memory_operand" "Q>,m")])))] + "" + "cmp%e2 %1,%0" + [(set_attr "slottable" "yes,no")]) + +;; Swap operands; it seems the canonical look (if any) is not enforced. +;; +;; FIXME: Investigate that. + +(define_insn "*cmp_swapext" + [(set (cc0) + (compare + (match_operator:SI 2 "cris_extend_operator" + [(match_operand:BW 0 "memory_operand" "Q>,m")]) + (match_operand:SI 1 "register_operand" "r,r")))] + "" + "cmp%e2 %0,%1" + [(set_attr "slottable" "yes,no") + (set_attr "cc" "rev")]) + +;; The "normal" compare patterns, from SI on. Special-cases with zero +;; are covered above. + +(define_insn "*cmpsi" + [(set (cc0) + (compare + (match_operand:SI 0 "nonimmediate_operand" "r,r,r, Q>,r,r,m") + (match_operand:SI 1 "general_operand" "I,r,Q>,r, P,g,r")))] + "" + "@ + cmpq %1,%0 + cmp.d %1,%0 + cmp.d %1,%0 + cmp.d %0,%1 + cmp%e1.%z1 %1,%0 + cmp.d %1,%0 + cmp.d %0,%1" + [(set_attr "slottable" "yes,yes,yes,yes,no,no,no") + (set_attr "cc" "normal,normal,normal,rev,normal,normal,rev")]) + +(define_insn "*cmp" + [(set (cc0) + (compare (match_operand:BW 0 "nonimmediate_operand" "r,r, Q>,r,m") + (match_operand:BW 1 "general_operand" "r,Q>,r, g,r")))] + "" + "@ + cmp %1,%0 + cmp %1,%0 + cmp %0,%1 + cmp %1,%0 + cmp %0,%1" + [(set_attr "slottable" "yes,yes,yes,no,no") + (set_attr "cc" "normal,normal,rev,normal,rev")]) + +;; Pattern matching the BTST insn. +;; It is useful for "if (i & val)" constructs, where val is an exact +;; power of 2, or if val + 1 is a power of two, where we check for a bunch +;; of zeros starting at bit 0). + +;; SImode. This mode is the only one needed, since gcc automatically +;; extends subregs for lower-size modes. FIXME: Add testcase. +(define_insn "*btst" + [(set (cc0) + (compare + (zero_extract:SI + (match_operand:SI 0 "nonmemory_operand" "r, r,r, r,r, r,Kp") + (match_operand:SI 1 "const_int_operand" "Kc,n,Kc,n,Kc,n,n") + (match_operand:SI 2 "nonmemory_operand" "M, M,Kc,n,r, r,r")) + (const_int 0)))] + ;; Either it is a single bit, or consecutive ones starting at 0. + ;; The btst ones depend on stuff in NOTICE_UPDATE_CC. + "CONST_INT_P (operands[1]) + && (operands[1] == const1_rtx || operands[2] == const0_rtx) + && (REG_S_P (operands[0]) + || (operands[1] == const1_rtx + && REG_S_P (operands[2]) + && CONST_INT_P (operands[0]) + && exact_log2 (INTVAL (operands[0])) >= 0)) + && !TARGET_CCINIT" + +;; The next-to-last "&&" condition above should be caught by some kind of +;; canonicalization in gcc, but we can easily help with it here. +;; It results from expressions of the type +;; "power_of_2_value & (1 << y)". +;; +;; Since there may be codes with tests in on bits (in constant position) +;; beyond the size of a word, handle that by assuming those bits are 0. +;; GCC should handle that, but it's a matter of easily-added belts while +;; having suspenders. + + "@ + btstq (%1-1),%0 + cmpq 0,%0 + btstq %2,%0 + clearf nz + btst %2,%0 + clearf nz + cmpq %p0,%2" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; Move insns. + +;; The whole mandatory movdi family is here; expander, "anonymous" +;; recognizer and splitter. We're forced to have a movdi pattern, +;; although GCC should be able to split it up itself. Normally it can, +;; but if other insns have DI operands (as is the case here), reload +;; must be able to generate or match a movdi. many testcases fail at +;; -O3 or -fssa if we don't have this. FIXME: Fix GCC... See +;; . +;; However, a patch from Richard Kenner (similar to the cause of +;; discussion at the URL above), indicates otherwise. See +;; . +;; The truth has IMO is not been decided yet, so check from time to +;; time by disabling the movdi patterns. + +;; To appease testcase gcc.c-torture/execute/920501-2.c (and others) at +;; -O0, we need a movdi as a temporary measure. Here's how things fail: +;; A cmpdi RTX needs reloading (global): +;; (insn 185 326 186 (set (cc0) +;; (compare (mem/f:DI (reg/v:SI 22) 0) +;; (const_int 1 [0x1]))) 4 {cmpdi} (nil) +;; (nil)) +;; Now, reg 22 is reloaded for input address, and the mem is also moved +;; out of the instruction (into a register), since one of the operands +;; must be a register. Reg 22 is reloaded (into reg 10), and the mem is +;; moved out and synthesized in SImode parts (reg 9, reg 10 - should be ok +;; wrt. overlap). The bad things happen with the synthesis in +;; emit_move_insn_1; the location where to substitute reg 10 is lost into +;; two new RTX:es, both still having reg 22. Later on, the left-over reg +;; 22 is recognized to have an equivalent in memory which is substituted +;; straight in, and we end up with an unrecognizable insn: +;; (insn 325 324 326 (set (reg:SI 9 r9) +;; (mem/f:SI (mem:SI (plus:SI (reg:SI 8 r8) +;; (const_int -84 [0xffffffac])) 0) 0)) -1 (nil) +;; (nil)) +;; which is the first part of the reloaded synthesized "movdi". +;; The right thing would be to add equivalent replacement locations for +;; insn with pseudos that need more reloading. The question is where. + +(define_expand "movdi" + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (match_operand:DI 1 "general_operand" ""))] + "" +{ + if (MEM_P (operands[0]) + && operands[1] != const0_rtx + && (!TARGET_V32 || (!REG_P (operands[1]) && can_create_pseudo_p ()))) + operands[1] = copy_to_mode_reg (DImode, operands[1]); + + /* Some other ports (as of 2001-09-10 for example mcore and romp) also + prefer to split up constants early, like this. The testcase in + gcc.c-torture/execute/961213-1.c shows that CSE2 gets confused by the + resulting subreg sets when using the construct from mcore (as of FSF + CVS, version -r 1.5), and it believes that the high part (the last one + emitted) is the final value. */ + if ((CONST_INT_P (operands[1]) || GET_CODE (operands[1]) == CONST_DOUBLE) + && ! reload_completed + && ! reload_in_progress) + { + rtx insns; + rtx op0 = operands[0]; + rtx op1 = operands[1]; + + start_sequence (); + emit_move_insn (operand_subword (op0, 0, 1, DImode), + operand_subword (op1, 0, 1, DImode)); + emit_move_insn (operand_subword (op0, 1, 1, DImode), + operand_subword (op1, 1, 1, DImode)); + insns = get_insns (); + end_sequence (); + + emit_insn (insns); + DONE; + } +}) + +(define_insn_and_split "*movdi_insn_non_v32" + [(set (match_operand:DI 0 "nonimmediate_operand" "=r,rx,m") + (match_operand:DI 1 "general_operand" "rx,g,rxM"))] + "(register_operand (operands[0], DImode) + || register_operand (operands[1], DImode) + || operands[1] == const0_rtx) + && !TARGET_V32" + "#" + "&& reload_completed" + [(match_dup 2)] + "operands[2] = cris_split_movdx (operands);") + +;; Overlapping (but non-identical) source memory address and destination +;; register would be a compiler bug, so we don't have to specify that. +(define_insn "*movdi_v32" + [(set + (match_operand:DI 0 "nonimmediate_operand" "=r,rx,&r,>, m,r,x,m") + (match_operand:DI 1 "general_operand" "rxi,r>,m, rx,r,m,m,x"))] + "TARGET_V32" +{ + switch (which_alternative) + { + /* FIXME: 1) Use autoincrement where possible. 2) Have peephole2, + particularly for cases where the address register is dead. */ + case 5: + if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0))) + return "addq 4,%L1\;move.d %1,%H0\;subq 4,%L1\;move.d %1,%M0"; + gcc_assert (REGNO (operands[0]) + 1 == REGNO (XEXP (operands[1], 0))); + return "move.d [%L1+],%M0\;move.d [%L1],%H0"; + case 2: + /* We could do away with the addq if we knew the address-register + isn't ACR. If we knew the address-register is dead, we could do + away with the subq too. */ + return "move.d [%L1],%M0\;addq 4,%L1\;move.d [%L1],%H0\;subq 4,%L1"; + case 4: + return "move.d %M1,[%L0]\;addq 4,%L0\;move.d %H1,[%L0]\;subq 4,%L0"; + case 6: + return "move [%L1],%M0\;addq 4,%L1\;move [%L1],%H0\;subq 4,%L1"; + case 7: + return "move %M1,[%L0]\;addq 4,%L0\;move %H1,[%L0]\;subq 4,%L0"; + + default: + return "#"; + } +} + ;; The non-split cases clobber cc0 because of their adds and subs. + ;; Beware that NOTICE_UPDATE_CC is called before the forced split happens. + [(set_attr "cc" "*,*,clobber,*,clobber,clobber,*,*")]) + +;; Much like "*movdi_insn_non_v32". Overlapping registers and constants +;; is handled so much better in cris_split_movdx. +(define_split + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (match_operand:DI 1 "general_operand" ""))] + "TARGET_V32 + && reload_completed + && (!MEM_P (operands[0]) || !REG_P (XEXP (operands[0], 0))) + && (!MEM_P (operands[1]) || !REG_P (XEXP (operands[1], 0)))" + [(match_dup 2)] + "operands[2] = cris_split_movdx (operands);") + +;; Side-effect patterns for move.S1 [rx=ry+rx.S2],rw +;; and move.S1 [rx=ry+i],rz +;; Then movs.S1 and movu.S1 for both modes. +;; +;; move.S1 [rx=ry+rz.S],rw avoiding when rx is ry, or rw is rx +;; FIXME: These could have anonymous mode for operand 0. +;; FIXME: Special registers' alternatives too. + +(define_insn "*mov_side_biap" + [(set (match_operand:BW 0 "register_operand" "=r,r") + (mem:BW (plus:SI + (mult:SI (match_operand:SI 1 "register_operand" "r,r") + (match_operand:SI 2 "const_int_operand" "n,n")) + (match_operand:SI 3 "register_operand" "r,r")))) + (set (match_operand:SI 4 "register_operand" "=*3,r") + (plus:SI (mult:SI (match_dup 1) + (match_dup 2)) + (match_dup 3)))] + "cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)" + "@ + # + move [%4=%3+%1%T2],%0") + +(define_insn "*mov_sidesisf_biap" + [(set (match_operand 0 "register_operand" "=r,r,x,x") + (mem (plus:SI + (mult:SI (match_operand:SI 1 "register_operand" "r,r,r,r") + (match_operand:SI 2 "const_int_operand" "n,n,n,n")) + (match_operand:SI 3 "register_operand" "r,r,r,r")))) + (set (match_operand:SI 4 "register_operand" "=*3,r,*3,r") + (plus:SI (mult:SI (match_dup 1) + (match_dup 2)) + (match_dup 3)))] + "GET_MODE_SIZE (GET_MODE (operands[0])) == UNITS_PER_WORD + && cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)" + "@ + # + move.%s0 [%4=%3+%1%T2],%0 + # + move [%4=%3+%1%T2],%0") + +;; move.S1 [rx=ry+i],rz +;; avoiding move.S1 [ry=ry+i],rz +;; and move.S1 [rz=ry+i],rz +;; Note that "i" is allowed to be a register. + +(define_insn "*mov_side" + [(set (match_operand:BW 0 "register_operand" "=r,r,r,r,r") + (mem:BW + (plus:SI (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))) + (set (match_operand:SI 3 "register_operand" "=*1,r,r,*2,r") + (plus:SI (match_dup 1) + (match_dup 2)))] + "cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[2]) + || INTVAL (operands[2]) > 127 + || INTVAL (operands[2]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))) + return "#"; + if (which_alternative == 4) + return "move [%3=%2%S1],%0"; + return "move [%3=%1%S2],%0"; +}) + +(define_insn "*mov_sidesisf" + [(set (match_operand 0 "register_operand" "=r,r,r,x,x,x,r,r,x,x") + (mem + (plus:SI + (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,r,r,r,R,R,R,R") + (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r>Rn,r,>Rn,r,r,r,r")))) + (set (match_operand:SI 3 "register_operand" "=*1,r,r,*1,r,r,*2,r,*2,r") + (plus:SI (match_dup 1) + (match_dup 2)))] + "GET_MODE_SIZE (GET_MODE (operands[0])) == UNITS_PER_WORD + && cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)" +{ + if ((which_alternative == 0 + || which_alternative == 3 + || which_alternative == 6 + || which_alternative == 8) + && (!CONST_INT_P (operands[2]) + || INTVAL (operands[2]) > 127 + || INTVAL (operands[2]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))) + return "#"; + if (which_alternative < 3) + return "move.%s0 [%3=%1%S2],%0"; + if (which_alternative == 7) + return "move.%s0 [%3=%2%S1],%0"; + if (which_alternative == 9) + return "move [%3=%2%S1],%0"; + return "move [%3=%1%S2],%0"; +}) + +;; Other way around; move to memory. + +;; Note that the condition (which for side-effect patterns is usually a +;; call to cris_side_effect_mode_ok), isn't consulted for register +;; allocation preferences -- constraints is the method for that. The +;; drawback is that we can't exclude register allocation to cause +;; "move.s rw,[rx=ry+rz.S]" when rw==rx without also excluding rx==ry or +;; rx==rz if we use an earlyclobber modifier for the constraint for rx. +;; Instead of that, we recognize and split the cases where dangerous +;; register combinations are spotted: where a register is set in the +;; side-effect, and used in the main insn. We don't handle the case where +;; the set in the main insn overlaps the set in the side-effect; that case +;; must be handled in gcc. We handle just the case where the set in the +;; side-effect overlaps the input operand of the main insn (i.e. just +;; moves to memory). + +;; +;; move.s rz,[ry=rx+rw.S] + +(define_insn "*mov_side_biap_mem" + [(set (mem:BW (plus:SI + (mult:SI (match_operand:SI 0 "register_operand" "r,r,r") + (match_operand:SI 1 "const_int_operand" "n,n,n")) + (match_operand:SI 2 "register_operand" "r,r,r"))) + (match_operand:BW 3 "register_operand" "r,r,r")) + (set (match_operand:SI 4 "register_operand" "=*2,!3,r") + (plus:SI (mult:SI (match_dup 0) + (match_dup 1)) + (match_dup 2)))] + "cris_side_effect_mode_ok (MULT, operands, 4, 2, 0, 1, 3)" + "@ + # + # + move %3,[%4=%2+%0%T1]") + +(define_insn "*mov_sidesisf_biap_mem" + [(set (mem (plus:SI + (mult:SI (match_operand:SI 0 "register_operand" "r,r,r,r,r,r") + (match_operand:SI 1 "const_int_operand" "n,n,n,n,n,n")) + (match_operand:SI 2 "register_operand" "r,r,r,r,r,r"))) + (match_operand 3 "register_operand" "r,r,r,x,x,x")) + (set (match_operand:SI 4 "register_operand" "=*2,!3,r,*2,!3,r") + (plus:SI (mult:SI (match_dup 0) + (match_dup 1)) + (match_dup 2)))] + "GET_MODE_SIZE (GET_MODE (operands[3])) == UNITS_PER_WORD + && cris_side_effect_mode_ok (MULT, operands, 4, 2, 0, 1, 3)" + "@ + # + # + move.%s3 %3,[%4=%2+%0%T1] + # + # + move %3,[%4=%2+%0%T1]") + +;; Split for the case above where we're out of luck with register +;; allocation (again, the condition isn't checked for that), and we end up +;; with the set in the side-effect getting the same register as the input +;; register. + +(define_split + [(parallel + [(set (match_operator + 6 "cris_mem_op" + [(plus:SI + (mult:SI (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "const_int_operand" "")) + (match_operand:SI 2 "register_operand" ""))]) + (match_operand 3 "register_operand" "")) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (mult:SI (match_dup 0) + (match_dup 1)) + (match_dup 2)))])] + "reload_completed && reg_overlap_mentioned_p (operands[4], operands[3])" + [(set (match_dup 5) (match_dup 3)) + (set (match_dup 4) (match_dup 2)) + (set (match_dup 4) + (plus:SI (mult:SI (match_dup 0) + (match_dup 1)) + (match_dup 4)))] + "operands[5] + = replace_equiv_address (operands[6], + gen_rtx_PLUS (SImode, + gen_rtx_MULT (SImode, + operands[0], + operands[1]), + operands[2]));") + +;; move.s rx,[ry=rz+i] +;; FIXME: These could have anonymous mode for operand 2. + +;; QImode + +(define_insn "*mov_side_mem" + [(set (mem:BW + (plus:SI (match_operand:SI 0 "cris_bdap_operand" "%r,r,r,r,R,R,R") + (match_operand:SI 1 "cris_bdap_operand" "r>Rn,r>Rn,r,>Rn,r,r,r"))) + (match_operand:BW 2 "register_operand" "r,r,r,r,r,r,r")) + (set (match_operand:SI 3 "register_operand" "=*0,!*2,r,r,*1,!*2,r") + (plus:SI (match_dup 0) + (match_dup 1)))] + "cris_side_effect_mode_ok (PLUS, operands, 3, 0, 1, -1, 2)" +{ + if ((which_alternative == 0 || which_alternative == 4) + && (!CONST_INT_P (operands[1]) + || INTVAL (operands[1]) > 127 + || INTVAL (operands[1]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'J'))) + return "#"; + if (which_alternative == 1 || which_alternative == 5) + return "#"; + if (which_alternative == 6) + return "move.%s2 %2,[%3=%1%S0]"; + return "move %2,[%3=%0%S1]"; +}) + +;; SImode + +(define_insn "*mov_sidesisf_mem" + [(set (mem + (plus:SI + (match_operand:SI + 0 "cris_bdap_operand" + "%r, r, r,r, r, r,r, R,R, R,R, R") + (match_operand:SI + 1 "cris_bdap_operand" + "r>Rn,r>Rn,r,>Rn,r>Rn,r,>Rn,r,r, r,r, r"))) + (match_operand 2 "register_operand" + "r, r, r,r, x, x,x, r,r, r,x, x")) + (set (match_operand:SI 3 "register_operand" + "=*0,!2, r,r, *0, r,r, *1,!*2,r,*1,r") + (plus:SI (match_dup 0) + (match_dup 1)))] + "GET_MODE_SIZE (GET_MODE (operands[2])) == UNITS_PER_WORD + && cris_side_effect_mode_ok (PLUS, operands, 3, 0, 1, -1, 2)" +{ + if ((which_alternative == 0 || which_alternative == 4) + && (!CONST_INT_P (operands[1]) + || INTVAL (operands[1]) > 127 + || INTVAL (operands[1]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'J'))) + return "#"; + if (which_alternative == 1 + || which_alternative == 7 + || which_alternative == 8 + || which_alternative == 10) + return "#"; + if (which_alternative < 4) + return "move.%s2 %2,[%3=%0%S1]"; + if (which_alternative == 9) + return "move.%s2 %2,[%3=%1%S0]"; + if (which_alternative == 11) + return "move %2,[%3=%1%S0]"; + return "move %2,[%3=%0%S1]"; +}) + +;; Like the biap case, a split where the set in the side-effect gets the +;; same register as the input register to the main insn, since the +;; condition isn't checked at register allocation. + +(define_split + [(parallel + [(set (match_operator + 4 "cris_mem_op" + [(plus:SI + (match_operand:SI 0 "cris_bdap_operand" "") + (match_operand:SI 1 "cris_bdap_operand" ""))]) + (match_operand 2 "register_operand" "")) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (match_dup 0) (match_dup 1)))])] + "reload_completed && reg_overlap_mentioned_p (operands[3], operands[2])" + [(set (match_dup 4) (match_dup 2)) + (set (match_dup 3) (match_dup 0)) + (set (match_dup 3) (plus:SI (match_dup 3) (match_dup 1)))] + "") + +;; Clear memory side-effect patterns. It is hard to get to the mode if +;; the MEM was anonymous, so there will be one for each mode. + +;; clear.[bwd] [ry=rx+rw.s2] + +(define_insn "*clear_side_biap" + [(set (mem:BWD (plus:SI + (mult:SI (match_operand:SI 0 "register_operand" "r,r") + (match_operand:SI 1 "const_int_operand" "n,n")) + (match_operand:SI 2 "register_operand" "r,r"))) + (const_int 0)) + (set (match_operand:SI 3 "register_operand" "=*2,r") + (plus:SI (mult:SI (match_dup 0) + (match_dup 1)) + (match_dup 2)))] + "cris_side_effect_mode_ok (MULT, operands, 3, 2, 0, 1, -1)" + "@ + # + clear [%3=%2+%0%T1]") + +;; clear.[bwd] [ry=rz+i] + +(define_insn "*clear_side" + [(set (mem:BWD + (plus:SI (match_operand:SI 0 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 1 "cris_bdap_operand" "r>Rn,r,>Rn,r,r"))) + (const_int 0)) + (set (match_operand:SI 2 "register_operand" "=*0,r,r,*1,r") + (plus:SI (match_dup 0) + (match_dup 1)))] + "cris_side_effect_mode_ok (PLUS, operands, 2, 0, 1, -1, -1)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[1]) + || INTVAL (operands[1]) > 127 + || INTVAL (operands[1]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'J'))) + return "#"; + if (which_alternative == 4) + return "clear [%2=%1%S0]"; + return "clear [%2=%0%S1]"; +}) + +;; Normal move patterns from SI on. + +(define_expand "movsi" + [(set + (match_operand:SI 0 "nonimmediate_operand" "") + (match_operand:SI 1 "cris_general_operand_or_symbol" ""))] + "" +{ + /* If the output goes to a MEM, make sure we have zero or a register as + input. */ + if (MEM_P (operands[0]) + && ! REG_S_P (operands[1]) + && operands[1] != const0_rtx + && can_create_pseudo_p ()) + operands[1] = force_reg (SImode, operands[1]); + + /* If we're generating PIC and have an incoming symbol, validize it to a + general operand or something that will match a special pattern. + + FIXME: Do we *have* to recognize anything that would normally be a + valid symbol? Can we exclude global PIC addresses with an added + offset? */ + if (flag_pic + && CONSTANT_ADDRESS_P (operands[1]) + && !cris_valid_pic_const (operands[1], false)) + { + enum cris_pic_symbol_type t = cris_pic_symbol_type_of (operands[1]); + + gcc_assert (t != cris_no_symbol); + + if (! REG_S_P (operands[0])) + { + /* We must have a register as destination for what we're about to + do, and for the patterns we generate. */ + CRIS_ASSERT (can_create_pseudo_p ()); + operands[1] = force_reg (SImode, operands[1]); + } + else + { + /* FIXME: add a REG_EQUAL (or is it REG_EQUIV) note to the + destination register for the symbol. It might not be + worth it. Measure. */ + crtl->uses_pic_offset_table = 1; + if (t == cris_rel_symbol) + { + /* Change a "move.d sym(+offs),rN" into (allocate register rM) + for pre-v32: + "move.d (const (plus (unspec [sym] + CRIS_UNSPEC_GOTREL) offs)),rM" "add.d rPIC,rM,rN" + and for v32: + "move.d (const (plus (unspec [sym] + CRIS_UNSPEC_PCREL) offs)),rN". */ + rtx tem, rm, rn = operands[0]; + rtx sym = GET_CODE (operands[1]) != CONST + ? operands[1] : get_related_value (operands[1]); + HOST_WIDE_INT offs = get_integer_term (operands[1]); + + gcc_assert (can_create_pseudo_p ()); + + if (TARGET_V32) + { + tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym), + CRIS_UNSPEC_PCREL); + if (offs != 0) + tem = plus_constant (tem, offs); + rm = rn; + emit_move_insn (rm, gen_rtx_CONST (Pmode, tem)); + } + else + { + /* We still uses GOT-relative addressing for + pre-v32. */ + crtl->uses_pic_offset_table = 1; + tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym), + CRIS_UNSPEC_GOTREL); + if (offs != 0) + tem = plus_constant (tem, offs); + rm = gen_reg_rtx (Pmode); + emit_move_insn (rm, gen_rtx_CONST (Pmode, tem)); + if (expand_binop (Pmode, add_optab, rm, pic_offset_table_rtx, + rn, 0, OPTAB_LIB_WIDEN) != rn) + internal_error ("expand_binop failed in movsi gotrel"); + } + DONE; + } + else if (t == cris_got_symbol) + { + /* Change a "move.d sym,rN" into (allocate register rM, rO) + "move.d (const (unspec [sym] CRIS_UNSPEC_GOTREAD)),rM" + "add.d rPIC,rM,rO", "move.d [rO],rN" with + the memory access marked as read-only. */ + rtx tem, mem, rm, ro, rn = operands[0]; + gcc_assert (can_create_pseudo_p ()); + tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, operands[1]), + CRIS_UNSPEC_GOTREAD); + rm = gen_reg_rtx (Pmode); + emit_move_insn (rm, gen_rtx_CONST (Pmode, tem)); + ro = gen_reg_rtx (Pmode); + if (expand_binop (Pmode, add_optab, rm, pic_offset_table_rtx, + ro, 0, OPTAB_LIB_WIDEN) != ro) + internal_error ("expand_binop failed in movsi got"); + mem = gen_rtx_MEM (Pmode, ro); + + /* This MEM doesn't alias anything. Whether it + aliases other same symbols is unimportant. */ + set_mem_alias_set (mem, new_alias_set ()); + MEM_NOTRAP_P (mem) = 1; + + /* We can set the GOT memory read of a non-called symbol + to readonly, but not that of a call symbol, as those + are subject to lazy evaluation and usually have the value + changed from the first call to the second (but + constant thereafter). */ + MEM_READONLY_P (mem) = 1; + emit_move_insn (rn, mem); + DONE; + } + else + { + /* We get here when we have to change something that would + be recognizable if it wasn't PIC. A ``sym'' is ok for + PIC symbols both with and without a GOT entry. And ``sym + + offset'' is ok for local symbols, so the only thing it + could be, is a global symbol with an offset. Check and + abort if not. */ + rtx reg = gen_reg_rtx (Pmode); + rtx sym = get_related_value (operands[1]); + HOST_WIDE_INT offs = get_integer_term (operands[1]); + + gcc_assert (can_create_pseudo_p () + && t == cris_got_symbol_needing_fixup + && sym != NULL_RTX && offs != 0); + + emit_move_insn (reg, sym); + if (expand_binop (SImode, add_optab, reg, + GEN_INT (offs), operands[0], 0, + OPTAB_LIB_WIDEN) != operands[0]) + internal_error ("expand_binop failed in movsi got+offs"); + DONE; + } + } + } +}) + +(define_insn "*movsi_got_load" + [(set (reg:SI CRIS_GOT_REGNUM) (unspec:SI [(const_int 0)] CRIS_UNSPEC_GOT))] + "flag_pic" +{ + return TARGET_V32 + ? "lapc _GLOBAL_OFFSET_TABLE_,%:" + : "move.d $pc,%:\;sub.d .:GOTOFF,%:"; +} + [(set_attr "cc" "clobber")]) + +(define_insn "*movsi_internal" + [(set + (match_operand:SI 0 "nonimmediate_operand" + "=r,r, r,Q>,r,Q>,g,r,r, r,g,rQ>,x, m,x") + (match_operand:SI 1 "cris_general_operand_or_pic_source" + "r,Q>,M,M, I,r, M,n,!S,g,r,x, rQ>,x,gi"))] + ;; Note that we prefer not to use the S alternative (if for some reason + ;; it competes with others) above, but g matches S. + "" +{ + /* Better to have c-switch here; it is worth it to optimize the size of + move insns. The alternative would be to try to find more constraint + letters. FIXME: Check again. It seems this could shrink a bit. */ + switch (which_alternative) + { + case 9: + if (TARGET_V32) + { + if (!flag_pic + && (GET_CODE (operands[1]) == SYMBOL_REF + || GET_CODE (operands[1]) == LABEL_REF + || GET_CODE (operands[1]) == CONST)) + { + /* FIXME: Express this through (set_attr cc none) instead, + since we can't express the ``none'' at this point. FIXME: + Use lapc for everything except const_int and when next cc0 + user would want the flag setting. */ + CC_STATUS_INIT; + return "lapc %1,%0"; + } + if (flag_pic == 1 + && GET_CODE (operands[1]) == CONST + && GET_CODE (XEXP (operands[1], 0)) == UNSPEC + && XINT (XEXP (operands[1], 0), 1) == CRIS_UNSPEC_GOTREAD) + return "movu.w %1,%0"; + } + /* FALLTHROUGH */ + case 0: + case 1: + case 5: + case 10: + return "move.d %1,%0"; + + case 11: + case 12: + case 13: + case 14: + return "move %d1,%0"; + + case 2: + case 3: + case 6: + return "clear.d %0"; + + /* Constants -32..31 except 0. */ + case 4: + return "moveq %1,%0"; + + /* We can win a little on constants -32768..-33, 32..65535. */ + case 7: + if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) < 65536) + { + if (INTVAL (operands[1]) < 256) + return "movu.b %1,%0"; + return "movu.w %1,%0"; + } + else if (INTVAL (operands[1]) >= -32768 && INTVAL (operands[1]) < 32768) + { + if (INTVAL (operands[1]) >= -128 && INTVAL (operands[1]) < 128) + return "movs.b %1,%0"; + return "movs.w %1,%0"; + } + return "move.d %1,%0"; + + case 8: + { + rtx tem = operands[1]; + gcc_assert (GET_CODE (tem) == CONST); + tem = XEXP (tem, 0); + if (GET_CODE (tem) == PLUS + && GET_CODE (XEXP (tem, 0)) == UNSPEC + && (XINT (XEXP (tem, 0), 1) == CRIS_UNSPEC_GOTREL + || XINT (XEXP (tem, 0), 1) == CRIS_UNSPEC_PCREL) + && CONST_INT_P (XEXP (tem, 1))) + tem = XEXP (tem, 0); + gcc_assert (GET_CODE (tem) == UNSPEC); + switch (XINT (tem, 1)) + { + case CRIS_UNSPEC_GOTREAD: + case CRIS_UNSPEC_PLTGOTREAD: + /* Using sign-extend mostly to be consistent with the + indexed addressing mode. */ + if (flag_pic == 1) + return "movs.w %1,%0"; + return "move.d %1,%0"; + + case CRIS_UNSPEC_GOTREL: + case CRIS_UNSPEC_PLT_GOTREL: + gcc_assert (!TARGET_V32); + return "move.d %1,%0"; + + case CRIS_UNSPEC_PCREL: + case CRIS_UNSPEC_PLT_PCREL: + gcc_assert (TARGET_V32); + return "lapc %1,%0"; + + default: + gcc_unreachable (); + } + } + default: + return "BOGUS: %1 to %0"; + } +} + [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,no,no,no,no,no,yes,yes,no,no") + (set_attr "cc" "*,*,*,*,*,*,*,*,*,*,*,none,none,none,none")]) + +;; Extend operations with side-effect from mem to register, using +;; MOVS/MOVU. These are from mem to register only. +;; +;; [rx=ry+rz.S] +;; +;; QImode to HImode +;; +;; FIXME: Can we omit extend to HImode, since GCC should truncate for +;; HImode by itself? Perhaps use only anonymous modes? + +(define_insn "*ext_sideqihi_biap" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (match_operator:HI + 5 "cris_extend_operator" + [(mem:QI (plus:SI + (mult:SI (match_operand:SI 1 "register_operand" "r,r") + (match_operand:SI 2 "const_int_operand" "n,n")) + (match_operand:SI 3 "register_operand" "r,r")))])) + (set (match_operand:SI 4 "register_operand" "=*3,r") + (plus:SI (mult:SI (match_dup 1) + (match_dup 2)) + (match_dup 3)))] + "cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)" + "@ + # + mov%e5.%m5 [%4=%3+%1%T2],%0") + +(define_insn "*ext_sidesi_biap" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (match_operator:SI + 5 "cris_extend_operator" + [(mem:BW (plus:SI + (mult:SI (match_operand:SI 1 "register_operand" "r,r") + (match_operand:SI 2 "const_int_operand" "n,n")) + (match_operand:SI 3 "register_operand" "r,r")))])) + (set (match_operand:SI 4 "register_operand" "=*3,r") + (plus:SI (mult:SI (match_dup 1) + (match_dup 2)) + (match_dup 3)))] + "cris_side_effect_mode_ok (MULT, operands, 4, 3, 1, 2, 0)" + "@ + # + mov%e5 [%4=%3+%1%T2],%0") + +;; Same but [rx=ry+i] + +;; QImode to HImode + +(define_insn "*ext_sideqihi" + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r") + (match_operator:HI + 4 "cris_extend_operator" + [(mem:QI (plus:SI + (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))])) + (set (match_operand:SI 3 "register_operand" "=*1,r,r,*2,r") + (plus:SI (match_dup 1) + (match_dup 2)))] + "cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[2]) + || INTVAL (operands[2]) > 127 + || INTVAL (operands[2]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))) + return "#"; + if (which_alternative == 4) + return "mov%e4.%m4 [%3=%2%S1],%0"; + return "mov%e4.%m4 [%3=%1%S2],%0"; +}) + +(define_insn "*ext_sidesi" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r") + (match_operator:SI + 4 "cris_extend_operator" + [(mem:BW (plus:SI + (match_operand:SI 1 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))])) + (set (match_operand:SI 3 "register_operand" "=*1,r,r,*2,r") + (plus:SI (match_dup 1) + (match_dup 2)))] + "cris_side_effect_mode_ok (PLUS, operands, 3, 1, 2, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[2]) + || INTVAL (operands[2]) > 127 + || INTVAL (operands[2]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))) + return "#"; + if (which_alternative == 4) + return "mov%e4 [%3=%2%S1],%0"; + return "mov%e4 [%3=%1%S2],%0"; +}) + +;; FIXME: See movsi. + +(define_insn "movhi" + [(set + (match_operand:HI 0 "nonimmediate_operand" "=r,r, r,Q>,r,Q>,r,r,r,g,g,r,r,x") + (match_operand:HI 1 "general_operand" "r,Q>,M,M, I,r, L,O,n,M,r,g,x,r"))] + "" +{ + switch (which_alternative) + { + case 0: + case 1: + case 5: + case 10: + case 11: + return "move.w %1,%0"; + case 12: + case 13: + return "move %1,%0"; + case 2: + case 3: + case 9: + return "clear.w %0"; + case 4: + return "moveq %1,%0"; + case 6: + case 8: + if (INTVAL (operands[1]) < 256 && INTVAL (operands[1]) >= -128) + { + if (INTVAL (operands[1]) > 0) + return "movu.b %1,%0"; + return "movs.b %1,%0"; + } + return "move.w %1,%0"; + case 7: + return "movEq %b1,%0"; + default: + return "BOGUS: %1 to %0"; + } +} + [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,no,yes,no,no,no,no,yes,yes") + (set_attr "cc" "*,*,none,none,*,none,*,clobber,*,none,none,*,none,none")]) + +(define_insn "movstricthi" + [(set + (strict_low_part + (match_operand:HI 0 "nonimmediate_operand" "+r,r, r,Q>,Q>,g,r,g")) + (match_operand:HI 1 "general_operand" "r,Q>,M,M, r, M,g,r"))] + "" + "@ + move.w %1,%0 + move.w %1,%0 + clear.w %0 + clear.w %0 + move.w %1,%0 + clear.w %0 + move.w %1,%0 + move.w %1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no,no")]) + +(define_expand "reload_in" + [(set (match_operand:BW 2 "register_operand" "=r") + (match_operand:BW 1 "memory_operand" "m")) + (set (match_operand:BW 0 "register_operand" "=x") + (match_dup 2))] + "" + "") + +(define_expand "reload_out" + [(set (match_operand:BW 2 "register_operand" "=&r") + (match_operand:BW 1 "register_operand" "x")) + (set (match_operand:BW 0 "memory_operand" "=m") + (match_dup 2))] + "" + "") + +(define_insn "movqi" + [(set (match_operand:QI 0 "nonimmediate_operand" "=r,Q>,r, r,Q>,r,g,g,r,r,r,x") + (match_operand:QI 1 "general_operand" "r,r, Q>,M,M, I,M,r,O,g,x,r"))] + "" + "@ + move.b %1,%0 + move.b %1,%0 + move.b %1,%0 + clear.b %0 + clear.b %0 + moveq %1,%0 + clear.b %0 + move.b %1,%0 + moveq %b1,%0 + move.b %1,%0 + move %1,%0 + move %1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,no,no,yes,no,yes,yes") + (set_attr "cc" "*,*,*,*,*,*,*,*,clobber,*,none,none")]) + +(define_insn "movstrictqi" + [(set (strict_low_part + (match_operand:QI 0 "nonimmediate_operand" "+r,Q>,r, r,Q>,g,g,r")) + (match_operand:QI 1 "general_operand" "r,r, Q>,M,M, M,r,g"))] + "" + "@ + move.b %1,%0 + move.b %1,%0 + move.b %1,%0 + clear.b %0 + clear.b %0 + clear.b %0 + move.b %1,%0 + move.b %1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no,no")]) + +;; The valid "quick" bit-patterns are, except for 0.0, denormalized +;; values REALLY close to 0, and some NaN:s (I think; their exponent is +;; all ones); the worthwhile one is "0.0". +;; It will use clear, so we know ALL types of immediate 0 never change cc. + +(define_insn "movsf" + [(set (match_operand:SF 0 "nonimmediate_operand" "=r,Q>,r, r,Q>,g,g,r,r,x,Q>,m,x, x") + (match_operand:SF 1 "general_operand" "r,r, Q>,G,G, G,r,g,x,r,x, x,Q>,g"))] + "" + "@ + move.d %1,%0 + move.d %1,%0 + move.d %1,%0 + clear.d %0 + clear.d %0 + clear.d %0 + move.d %1,%0 + move.d %1,%0 + move %1,%0 + move %1,%0 + move %1,%0 + move %1,%0 + move %1,%0 + move %1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no,no,yes,yes,yes,no,yes,no")]) + +;; Movem patterns. Primarily for use in function prologue and epilogue. +;; The V32 variants have an ordering matching the expectations of the +;; standard names "load_multiple" and "store_multiple"; pre-v32 movem +;; store R0 in the highest memory location. + +(define_expand "load_multiple" + [(match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "memory_operand" "") + (match_operand:SI 2 "const_int_operand" "")] + "TARGET_V32" +{ + rtx indreg; + + /* Apparently the predicate isn't checked, so we need to do so + manually. Once happened for libstdc++-v3 locale_facets.tcc. */ + if (!MEM_P (operands[1])) + FAIL; + + indreg = XEXP (operands[1], 0); + + if (GET_CODE (indreg) == POST_INC) + indreg = XEXP (indreg, 0); + if (!REG_P (indreg) + || GET_CODE (operands[2]) != CONST_INT + || !REG_P (operands[0]) + || REGNO (operands[0]) != 0 + || INTVAL (operands[2]) > CRIS_SP_REGNUM + || (int) REGNO (indreg) < INTVAL (operands[2])) + FAIL; + gcc_unreachable (); + emit_insn (cris_gen_movem_load (operands[1], operands[2], 0)); + DONE; +}) + +(define_expand "store_multiple" + [(match_operand:SI 0 "memory_operand" "") + (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "const_int_operand" "")] + "TARGET_V32" +{ + rtx indreg; + + /* See load_multiple. */ + if (!MEM_P (operands[0])) + FAIL; + + indreg = XEXP (operands[0], 0); + + if (GET_CODE (indreg) == POST_INC) + indreg = XEXP (indreg, 0); + if (!REG_P (indreg) + || GET_CODE (operands[2]) != CONST_INT + || !REG_P (operands[1]) + || REGNO (operands[1]) != 0 + || INTVAL (operands[2]) > CRIS_SP_REGNUM + || (int) REGNO (indreg) < INTVAL (operands[2])) + FAIL; + gcc_unreachable (); + cris_emit_movem_store (operands[0], operands[2], 0, false); + DONE; +}) + +(define_insn "*cris_load_multiple" + [(match_parallel 0 "cris_load_multiple_op" + [(set (match_operand:SI 1 "register_operand" "=r,r") + (match_operand:SI 2 "memory_operand" "Q,m"))])] + "" + "movem %O0,%o0" + [(set_attr "cc" "none") + (set_attr "slottable" "yes,no") + ;; Not true, but setting the length to 0 causes return sequences (ret + ;; movem) to have the cost they had when (return) included the movem + ;; and reduces the performance penalty taken for needing to emit an + ;; epilogue (in turn copied by bb-reorder) instead of return patterns. + ;; FIXME: temporary change until all insn lengths are correctly + ;; described. FIXME: have better target control over bb-reorder. + (set_attr "length" "0")]) + +(define_insn "*cris_store_multiple" + [(match_parallel 0 "cris_store_multiple_op" + [(set (match_operand:SI 2 "memory_operand" "=Q,m") + (match_operand:SI 1 "register_operand" "r,r"))])] + "" + "movem %o0,%O0" + [(set_attr "cc" "none") + (set_attr "slottable" "yes,no")]) + + +;; Sign- and zero-extend insns with standard names. +;; Those for integer source operand are ordered with the widest source +;; type first. + +;; Sign-extend. + +(define_insn "extendsidi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI (match_operand:SI 1 "general_operand" "g")))] + "" + "move.d %1,%M0\;smi %H0\;neg.d %H0,%H0") + +(define_insn "extenddi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI (match_operand:BW 1 "general_operand" "g")))] + "" + "movs %1,%M0\;smi %H0\;neg.d %H0,%H0") + +(define_insn "extendsi2" + [(set (match_operand:SI 0 "register_operand" "=r,r,r") + (sign_extend:SI (match_operand:BW 1 "general_operand" "r,Q>,g")))] + "" + "movs %1,%0" + [(set_attr "slottable" "yes,yes,no")]) + +;; To do a byte->word extension, extend to dword, exept that the top half +;; of the register will be clobbered. FIXME: Perhaps this is not needed. + +(define_insn "extendqihi2" + [(set (match_operand:HI 0 "register_operand" "=r,r,r") + (sign_extend:HI (match_operand:QI 1 "general_operand" "r,Q>,g")))] + "" + "movs.b %1,%0" + [(set_attr "slottable" "yes,yes,no")]) + + +;; Zero-extend. The DImode ones are synthesized by gcc, so we don't +;; specify them here. + +(define_insn "zero_extendsi2" + [(set (match_operand:SI 0 "register_operand" "=r,r,r") + (zero_extend:SI + (match_operand:BW 1 "nonimmediate_operand" "r,Q>,m")))] + "" + "movu %1,%0" + [(set_attr "slottable" "yes,yes,no")]) + +;; Same comment as sign-extend QImode to HImode above applies. + +(define_insn "zero_extendqihi2" + [(set (match_operand:HI 0 "register_operand" "=r,r,r") + (zero_extend:HI + (match_operand:QI 1 "nonimmediate_operand" "r,Q>,m")))] + "" + "movu.b %1,%0" + [(set_attr "slottable" "yes,yes,no")]) + +;; All kinds of arithmetic and logical instructions. +;; +;; First, anonymous patterns to match addressing modes with +;; side-effects. +;; +;; op.S [rx=ry+I],rz; (add, sub, or, and, bound). +;; +;; [rx=ry+rz.S] + +(define_insn "*op_side_biap" + [(set (match_operand:BWD 0 "register_operand" "=r,r") + (match_operator:BWD + 6 "cris_orthogonal_operator" + [(match_operand:BWD 1 "register_operand" "0,0") + (mem:BWD (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "r,r") + (match_operand:SI 3 "const_int_operand" "n,n")) + (match_operand:SI 4 "register_operand" "r,r")))])) + (set (match_operand:SI 5 "register_operand" "=*4,r") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))] + "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)" + "@ + # + %x6 [%5=%4+%2%T3],%0") + +;; [rx=ry+i] ([%4=%2+%3]) + +(define_insn "*op_side" + [(set (match_operand:BWD 0 "register_operand" "=r,r,r,r,r") + (match_operator:BWD + 5 "cris_orthogonal_operator" + [(match_operand:BWD 1 "register_operand" "0,0,0,0,0") + (mem:BWD (plus:SI + (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))])) + (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r") + (plus:SI (match_dup 2) + (match_dup 3)))] + "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[3]) + || INTVAL (operands[3]) > 127 + || INTVAL (operands[3]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'J'))) + return "#"; + if (which_alternative == 4) + return "%x5.%s0 [%4=%3%S2],%0"; + return "%x5 [%4=%2%S3],%0"; +}) + +;; To match all cases for commutative operations we may have to have the +;; following pattern for add, or & and. I do not know really, but it does +;; not break anything. +;; +;; FIXME: This really ought to be checked. +;; +;; op.S [rx=ry+I],rz; +;; +;; [rx=ry+rz.S] + +(define_insn "*op_swap_side_biap" + [(set (match_operand:BWD 0 "register_operand" "=r,r") + (match_operator:BWD + 6 "cris_commutative_orth_op" + [(mem:BWD (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "r,r") + (match_operand:SI 3 "const_int_operand" "n,n")) + (match_operand:SI 4 "register_operand" "r,r"))) + (match_operand:BWD 1 "register_operand" "0,0")])) + (set (match_operand:SI 5 "register_operand" "=*4,r") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))] + "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)" + "@ + # + %x6 [%5=%4+%2%T3],%0") + +;; [rx=ry+i] ([%4=%2+%3]) +;; FIXME: These could have anonymous mode for operand 0. + +;; QImode + +(define_insn "*op_swap_side" + [(set (match_operand:BWD 0 "register_operand" "=r,r,r,r,r") + (match_operator:BWD + 5 "cris_commutative_orth_op" + [(mem:BWD + (plus:SI (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r"))) + (match_operand:BWD 1 "register_operand" "0,0,0,0,0")])) + (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r") + (plus:SI (match_dup 2) + (match_dup 3)))] + "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[3]) + || INTVAL (operands[3]) > 127 + || INTVAL (operands[3]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'J'))) + return "#"; + if (which_alternative == 4) + return "%x5 [%4=%3%S2],%0"; + return "%x5 [%4=%2%S3],%0"; +}) + +;; Add operations, standard names. + +;; Note that for the 'P' constraint, the high part can be -1 or 0. We +;; output the insn through the 'A' output modifier as "adds.w" and "addq", +;; respectively. +(define_expand "adddi3" + [(set (match_operand:DI 0 "register_operand") + (plus:DI (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "general_operand")))] + "" +{ + if (MEM_P (operands[2]) && TARGET_V32) + operands[2] = force_reg (DImode, operands[2]); +}) + +(define_insn "*adddi3_non_v32" + [(set (match_operand:DI 0 "register_operand" "=r,r,r,&r,&r") + (plus:DI (match_operand:DI 1 "register_operand" "%0,0,0,0,r") + (match_operand:DI 2 "general_operand" "J,N,P,g,!To")))] + "!TARGET_V32" + "@ + addq %2,%M0\;ax\;addq 0,%H0 + subq %n2,%M0\;ax\;subq 0,%H0 + add%e2.%z2 %2,%M0\;ax\;%A2 %H2,%H0 + add.d %M2,%M0\;ax\;add.d %H2,%H0 + add.d %M2,%M1,%M0\;ax\;add.d %H2,%H1,%H0") + +; It seems no use allowing a memory operand for this one, because we'd +; need a scratch register for incrementing the address. +(define_insn "*adddi3_v32" + [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r") + (plus:DI (match_operand:DI 1 "register_operand" "%0,0,0,0,0") + (match_operand:DI 2 "nonmemory_operand" "J,N,P,r,n")))] + "TARGET_V32" + "@ + addq %2,%M0\;addc 0,%H0 + subq %n2,%M0\;ax\;subq 0,%H0 + add%e2.%z2 %2,%M0\;addc %H2,%H0 + add.d %M2,%M0\;addc %H2,%H0 + add.d %M2,%M0\;addc %H2,%H0") + +(define_expand "add3" + [(set (match_operand:BWD 0 "register_operand") + (plus:BWD + (match_operand:BWD 1 "register_operand") + (match_operand:BWD 2 "general_operand")))] + "" + "") + +(define_insn "*addsi3_non_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r, r,r,r,r, r,r, r") + (plus:SI + (match_operand:SI 1 "register_operand" "%0,0, 0,0,0,0, 0,r, r") + (match_operand:SI 2 "general_operand" "r,Q>,J,N,n,!S,g,!To,0")))] + +;; The last constraint is due to that after reload, the '%' is not +;; honored, and canonicalization doesn't care about keeping the same +;; register as in destination. This will happen after insn splitting. +;; gcc <= 2.7.2. FIXME: Check for gcc-2.9x + + "!TARGET_V32" +{ + switch (which_alternative) + { + case 0: + case 1: + return "add.d %2,%0"; + case 2: + return "addq %2,%0"; + case 3: + return "subq %n2,%0"; + case 4: + /* 'Known value', but not in -63..63. + Check if addu/subu may be used. */ + if (INTVAL (operands[2]) > 0) + { + if (INTVAL (operands[2]) < 256) + return "addu.b %2,%0"; + if (INTVAL (operands[2]) < 65536) + return "addu.w %2,%0"; + } + else + { + if (INTVAL (operands[2]) >= -255) + return "subu.b %n2,%0"; + if (INTVAL (operands[2]) >= -65535) + return "subu.w %n2,%0"; + } + return "add.d %2,%0"; + case 5: + { + rtx tem = operands[2]; + gcc_assert (GET_CODE (tem) == CONST); + tem = XEXP (tem, 0); + if (GET_CODE (tem) == PLUS + && GET_CODE (XEXP (tem, 0)) == UNSPEC + /* We don't allow CRIS_UNSPEC_PCREL here; we can't have a + pc-relative operand in an add insn. */ + && XINT (XEXP (tem, 0), 1) == CRIS_UNSPEC_GOTREL + && CONST_INT_P (XEXP (tem, 1))) + tem = XEXP (tem, 0); + gcc_assert (GET_CODE (tem) == UNSPEC); + switch (XINT (tem, 1)) + { + case CRIS_UNSPEC_GOTREAD: + case CRIS_UNSPEC_PLTGOTREAD: + /* Using sign-extend mostly to be consistent with the + indexed addressing mode. */ + if (flag_pic == 1) + return "adds.w %2,%0"; + return "add.d %2,%0"; + + case CRIS_UNSPEC_PLT_GOTREL: + case CRIS_UNSPEC_GOTREL: + return "add.d %2,%0"; + default: + gcc_unreachable (); + } + } + case 6: + return "add%u2 %2,%0"; + case 7: + return "add.d %2,%1,%0"; + case 8: + return "add.d %1,%0"; + default: + return "BOGUS addsi %2+%1 to %0"; + } +} + [(set_attr "slottable" "yes,yes,yes,yes,no,no,no,no,yes")]) + +; FIXME: Check what's best: having the three-operand ACR alternative +; before or after the corresponding-operand2 alternative. Check for +; *all* insns. FIXME: constant constraint letter for -128..127. +(define_insn "*addsi3_v32" + [(set (match_operand:SI 0 "register_operand" "=r,!a,r,!a, r,r,!a,r,!a,r,r,r,!a") + (plus:SI + (match_operand:SI 1 "register_operand" "%0,r, 0, r, 0,0,r, 0,r, 0,0,0,r") + (match_operand:SI 2 "general_operand" "r, r, Q>,Q>,J,N,NJ,L,L, P,n,g,g")))] + "TARGET_V32" + "@ + add.d %2,%0 + addi %2.b,%1,%0 + add.d %2,%0 + addo.d %2,%1,%0 + addq %2,%0 + subq %n2,%0 + addoq %2,%1,%0 + adds.w %2,%0 + addo %2,%1,%0 + addu.w %2,%0 + add.d %2,%0 + add%u2 %2,%0 + addo.%Z2 %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,yes,no,no,no,no,no,no") + (set_attr "cc" "*,none,*,none,*,*,none,*,none,*,*,*,none")]) + +(define_insn "*addhi3_non_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r, r,r,r,r") + (plus:HI (match_operand:HI 1 "register_operand" "%0,0, 0,0,0,r") + (match_operand:HI 2 "general_operand" "r,Q>,J,N,g,!To")))] + "!TARGET_V32" + "@ + add.w %2,%0 + add.w %2,%0 + addq %2,%0 + subq %n2,%0 + add.w %2,%0 + add.w %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no,no") + (set_attr "cc" "normal,normal,clobber,clobber,normal,normal")]) + +(define_insn "*addhi3_v32" + [(set (match_operand:HI 0 "register_operand" "=r, !a,r,!a, r,r,!a,r,!a") + (plus:HI + (match_operand:HI 1 "register_operand" "%0,r, 0, r, 0,0,r, 0,r") + (match_operand:HI 2 "general_operand" "r, r, Q>,Q>,J,N,NJ,g,g")))] + "TARGET_V32" + "@ + add.w %2,%0 + addi %2.b,%1,%0 + add.w %2,%0 + addo.w %2,%1,%0 + addq %2,%0 + subq %n2,%0 + addoq %2,%1,%0 + add.w %2,%0 + addo.w %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,yes,no,no") + (set_attr "cc" "*,none,*,none,clobber,clobber,none,*,none")]) + +(define_insn "*addqi3_non_v32" + [(set (match_operand:QI 0 "register_operand" "=r,r, r,r,r,r,r") + (plus:QI (match_operand:QI 1 "register_operand" "%0,0, 0,0,0,0,r") + (match_operand:QI 2 "general_operand" "r,Q>,J,N,O,g,!To")))] + "!TARGET_V32" + "@ + add.b %2,%0 + add.b %2,%0 + addq %2,%0 + subq %n2,%0 + subQ -%b2,%0 + add.b %2,%0 + add.b %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,no,no") + (set_attr "cc" "normal,normal,clobber,clobber,clobber,normal,normal")]) + +(define_insn "*addqi3_v32" + [(set (match_operand:QI 0 "register_operand" "=r,!a,r,!a, r,r,!a,r,r,!a") + (plus:QI + (match_operand:QI 1 "register_operand" "%0,r, 0, r, 0,0,r, 0,0,r") + (match_operand:QI 2 "general_operand" "r,r, Q>,Q>,J,N,NJ,O,g,g")))] + "TARGET_V32" + "@ + add.b %2,%0 + addi %2.b,%1,%0 + add.b %2,%0 + addo.b %2,%1,%0 + addq %2,%0 + subq %n2,%0 + addoq %2,%1,%0 + subQ -%b2,%0 + add.b %2,%0 + addo.b %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,yes,yes,yes,yes,no,no") + (set_attr "cc" "*,none,*,none,clobber,clobber,none,clobber,*,none")]) + +;; Subtract. +;; +;; Note that because of insn canonicalization these will *seldom* but +;; rarely be used with a known constant as an operand. + +;; Note that for the 'P' constraint, the high part can be -1 or 0. We +;; output the insn through the 'D' output modifier as "subs.w" and "subq", +;; respectively. +(define_expand "subdi3" + [(set (match_operand:DI 0 "register_operand") + (minus:DI (match_operand:DI 1 "register_operand") + (match_operand:DI 2 "general_operand")))] + "" +{ + if (TARGET_V32 && MEM_P (operands[2])) + operands[2] = force_reg (DImode, operands[2]); +}) + +(define_insn "*subdi3_non_v32" + [(set (match_operand:DI 0 "register_operand" "=r,r,r,&r,&r") + (minus:DI (match_operand:DI 1 "register_operand" "0,0,0,0,r") + (match_operand:DI 2 "general_operand" "J,N,P,g,!To")))] + "!TARGET_V32" + "@ + subq %2,%M0\;ax\;subq 0,%H0 + addq %n2,%M0\;ax\;addq 0,%H0 + sub%e2.%z2 %2,%M0\;ax\;%D2 %H2,%H0 + sub.d %M2,%M0\;ax\;sub.d %H2,%H0 + sub.d %M2,%M1,%M0\;ax\;sub.d %H2,%H1,%H0") + +(define_insn "*subdi3_v32" + [(set (match_operand:DI 0 "register_operand" "=r,r,r,&r") + (minus:DI (match_operand:DI 1 "register_operand" "0,0,0,0") + (match_operand:DI 2 "nonmemory_operand" "J,N,P,r")))] + "TARGET_V32" + "@ + subq %2,%M0\;ax\;subq 0,%H0 + addq %n2,%M0\;ax\;addq 0,%H0 + sub%e2.%z2 %2,%M0\;ax\;%D2 %H2,%H0 + sub.d %M2,%M0\;ax\;sub.d %H2,%H0") + +(define_expand "sub3" + [(set (match_operand:BWD 0 "register_operand") + (minus:BWD + (match_operand:BWD 1 "register_operand") + (match_operand:BWD 2 "general_operand")))] + "" + "") + +(define_insn "*subsi3_non_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r, r,r,r,r,r,r") + (minus:SI + (match_operand:SI 1 "register_operand" "0,0, 0,0,0,0,0,r") + (match_operand:SI 2 "general_operand" "r,Q>,J,N,P,n,g,!To")))] + "!TARGET_V32" + +;; This does not do the optimal: "addu.w 65535,r0" when %2 is negative. +;; But then again, %2 should not be negative. + + "@ + sub.d %2,%0 + sub.d %2,%0 + subq %2,%0 + addq %n2,%0 + sub%e2.%z2 %2,%0 + sub.d %2,%0 + sub.d %2,%0 + sub.d %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no,no,no,no")]) + +(define_insn "*subsi3_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r") + (minus:SI + (match_operand:SI 1 "register_operand" "0,0,0,0,0,0,0") + (match_operand:SI 2 "general_operand" "r,Q>,J,N,P,n,g")))] + "TARGET_V32" + "@ + sub.d %2,%0 + sub.d %2,%0 + subq %2,%0 + addq %n2,%0 + sub%e2.%z2 %2,%0 + sub.d %2,%0 + sub.d %2,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no,no,no")]) + +(define_insn "*sub3_nonv32" + [(set (match_operand:BW 0 "register_operand" "=r,r, r,r,r,r") + (minus:BW (match_operand:BW 1 "register_operand" "0,0, 0,0,0,r") + (match_operand:BW 2 "general_operand" "r,Q>,J,N,g,!To")))] + "!TARGET_V32" + "@ + sub %2,%0 + sub %2,%0 + subq %2,%0 + addq %n2,%0 + sub %2,%0 + sub %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no,no") + (set_attr "cc" "normal,normal,clobber,clobber,normal,normal")]) + +(define_insn "*sub3_v32" + [(set (match_operand:BW 0 "register_operand" "=r,r,r,r,r") + (minus:BW (match_operand:BW 1 "register_operand" "0,0,0,0,0") + (match_operand:BW 2 "general_operand" "r,Q>,J,N,g")))] + "TARGET_V32" + "@ + sub %2,%0 + sub %2,%0 + subq %2,%0 + addq %n2,%0 + sub %2,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no") + (set_attr "cc" "normal,normal,clobber,clobber,normal")]) + +;; CRIS has some add/sub-with-sign/zero-extend instructions. +;; Although these perform sign/zero-extension to SImode, they are +;; equally applicable for the HImode case. +;; FIXME: Check; GCC should handle the widening. +;; Note that these must be located after the normal add/sub patterns, +;; so not to get constants into any less specific operands. +;; +;; Extend with add/sub and side-effect. +;; +;; ADDS/SUBS/ADDU/SUBU and BOUND, which needs a check for zero_extend +;; +;; adds/subs/addu/subu bound [rx=ry+rz.S] + +;; QImode to HImode +;; FIXME: GCC should widen. + +(define_insn "*extopqihi_side_biap" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (match_operator:HI + 6 "cris_additive_operand_extend_operator" + [(match_operand:HI 1 "register_operand" "0,0") + (match_operator:HI + 7 "cris_extend_operator" + [(mem:QI (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "r,r") + (match_operand:SI 3 "const_int_operand" "n,n")) + (match_operand:SI 4 "register_operand" "r,r")))])])) + (set (match_operand:SI 5 "register_operand" "=*4,r") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))] + "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)" + "@ + # + %x6%e7.%m7 [%5=%4+%2%T3],%0") + +(define_insn "*extopsi_side_biap" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (match_operator:SI + 6 "cris_operand_extend_operator" + [(match_operand:SI 1 "register_operand" "0,0") + (match_operator:SI + 7 "cris_extend_operator" + [(mem:BW (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "r,r") + (match_operand:SI 3 "const_int_operand" "n,n")) + (match_operand:SI 4 "register_operand" "r,r")))])])) + (set (match_operand:SI 5 "register_operand" "=*4,r") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))] + "(GET_CODE (operands[6]) != UMIN || GET_CODE (operands[7]) == ZERO_EXTEND) + && cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)" + "@ + # + %x6%e7 [%5=%4+%2%T3],%0") + + +;; [rx=ry+i] + +;; QImode to HImode + +(define_insn "*extopqihi_side" + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r") + (match_operator:HI + 5 "cris_additive_operand_extend_operator" + [(match_operand:HI 1 "register_operand" "0,0,0,0,0") + (match_operator:HI + 6 "cris_extend_operator" + [(mem:QI + (plus:SI (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r") + ))])])) + (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r") + (plus:SI (match_dup 2) + (match_dup 3)))] + "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[3]) + || INTVAL (operands[3]) > 127 + || INTVAL (operands[3]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'J'))) + return "#"; + if (which_alternative == 4) + return "%x5%E6.%m6 [%4=%3%S2],%0"; + return "%x5%E6.%m6 [%4=%2%S3],%0"; +}) + +(define_insn "*extopsi_side" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r") + (match_operator:SI + 5 "cris_operand_extend_operator" + [(match_operand:SI 1 "register_operand" "0,0,0,0,0") + (match_operator:SI + 6 "cris_extend_operator" + [(mem:BW + (plus:SI (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r") + ))])])) + (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r") + (plus:SI (match_dup 2) + (match_dup 3)))] + "(GET_CODE (operands[5]) != UMIN || GET_CODE (operands[6]) == ZERO_EXTEND) + && cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[3]) + || INTVAL (operands[3]) > 127 + || INTVAL (operands[3]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'J'))) + return "#"; + if (which_alternative == 4) + return "%x5%E6 [%4=%3%S2],%0"; + return "%x5%E6 [%4=%2%S3],%0"; +}) + + +;; As with op.S we may have to add special pattern to match commuted +;; operands to adds/addu and bound +;; +;; adds/addu/bound [rx=ry+rz.S] + +;; QImode to HImode +;; FIXME: GCC should widen. + +(define_insn "*extopqihi_swap_side_biap" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (plus:HI + (match_operator:HI + 6 "cris_extend_operator" + [(mem:QI (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "r,r") + (match_operand:SI 3 "const_int_operand" "n,n")) + (match_operand:SI 4 "register_operand" "r,r")))]) + (match_operand:HI 1 "register_operand" "0,0"))) + (set (match_operand:SI 5 "register_operand" "=*4,r") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))] + "cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)" + "@ + # + add%e6.b [%5=%4+%2%T3],%0") + +(define_insn "*extopsi_swap_side_biap" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (match_operator:SI + 7 "cris_plus_or_bound_operator" + [(match_operator:SI + 6 "cris_extend_operator" + [(mem:BW (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "r,r") + (match_operand:SI 3 "const_int_operand" "n,n")) + (match_operand:SI 4 "register_operand" "r,r")))]) + (match_operand:SI 1 "register_operand" "0,0")])) + (set (match_operand:SI 5 "register_operand" "=*4,r") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))] + "(GET_CODE (operands[7]) != UMIN || GET_CODE (operands[6]) == ZERO_EXTEND) + && cris_side_effect_mode_ok (MULT, operands, 5, 4, 2, 3, 0)" + "@ + # + %x7%E6 [%5=%4+%2%T3],%0") + +;; [rx=ry+i] +;; FIXME: GCC should widen. + +;; QImode to HImode + +(define_insn "*extopqihi_swap_side" + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r") + (plus:HI + (match_operator:HI + 5 "cris_extend_operator" + [(mem:QI (plus:SI + (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))]) + (match_operand:HI 1 "register_operand" "0,0,0,0,0"))) + (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r") + (plus:SI (match_dup 2) + (match_dup 3)))] + "cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[3]) + || INTVAL (operands[3]) > 127 + || INTVAL (operands[3]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'J'))) + return "#"; + if (which_alternative == 4) + return "add%e5.b [%4=%3%S2],%0"; + return "add%e5.b [%4=%2%S3],%0"; +}) + +(define_insn "*extopsi_swap_side" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r") + (match_operator:SI + 6 "cris_plus_or_bound_operator" + [(match_operator:SI + 5 "cris_extend_operator" + [(mem:BW (plus:SI + (match_operand:SI 2 "cris_bdap_operand" "%r,r,r,R,R") + (match_operand:SI 3 "cris_bdap_operand" "r>Rn,r,>Rn,r,r")))]) + (match_operand:SI 1 "register_operand" "0,0,0,0,0")])) + (set (match_operand:SI 4 "register_operand" "=*2,r,r,*3,r") + (plus:SI (match_dup 2) + (match_dup 3)))] + "(GET_CODE (operands[6]) != UMIN || GET_CODE (operands[5]) == ZERO_EXTEND) + && cris_side_effect_mode_ok (PLUS, operands, 4, 2, 3, -1, 0)" +{ + if ((which_alternative == 0 || which_alternative == 3) + && (!CONST_INT_P (operands[3]) + || INTVAL (operands[3]) > 127 + || INTVAL (operands[3]) < -128 + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'N') + || CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'J'))) + return "#"; + if (which_alternative == 4) + return \"%x6%E5.%m5 [%4=%3%S2],%0\"; + return "%x6%E5 [%4=%2%S3],%0"; +}) + +;; Extend versions (zero/sign) of normal add/sub (no side-effects). + +;; QImode to HImode +;; FIXME: GCC should widen. + +(define_insn "*extopqihi_non_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r") + (match_operator:HI + 3 "cris_additive_operand_extend_operator" + [(match_operand:HI 1 "register_operand" "0,0,0,r") + (match_operator:HI + 4 "cris_extend_operator" + [(match_operand:QI 2 "nonimmediate_operand" "r,Q>,m,!To")])]))] + "!TARGET_V32 && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && (operands[1] != frame_pointer_rtx || GET_CODE (operands[3]) != PLUS)" + "@ + %x3%E4.%m4 %2,%0 + %x3%E4.%m4 %2,%0 + %x3%E4.%m4 %2,%0 + %x3%E4.%m4 %2,%1,%0" + [(set_attr "slottable" "yes,yes,no,no") + (set_attr "cc" "clobber")]) + +(define_insn "*extopqihi_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (match_operator:HI + 3 "cris_additive_operand_extend_operator" + [(match_operand:HI 1 "register_operand" "0,0") + (match_operator:HI + 4 "cris_extend_operator" + [(match_operand:QI 2 "nonimmediate_operand" "r,m")])]))] + "TARGET_V32" + "%x3%e4.%m4 %2,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "clobber")]) + +;; QImode to SImode + +(define_insn "*extopsi_non_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") + (match_operator:SI + 3 "cris_operand_extend_operator" + [(match_operand:SI 1 "register_operand" "0,0,0,r") + (match_operator:SI + 4 "cris_extend_operator" + [(match_operand:BW 2 "nonimmediate_operand" "r,Q>,m,!To")])]))] + "!TARGET_V32 + && (GET_CODE (operands[3]) != UMIN || GET_CODE (operands[4]) == ZERO_EXTEND) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && (operands[1] != frame_pointer_rtx || GET_CODE (operands[3]) != PLUS)" + "@ + %x3%E4 %2,%0 + %x3%E4 %2,%0 + %x3%E4 %2,%0 + %x3%E4 %2,%1,%0" + [(set_attr "slottable" "yes,yes,no,no")]) + +(define_insn "*extopsi_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (match_operator:SI + 3 "cris_additive_operand_extend_operator" + [(match_operand:SI 1 "register_operand" "0,0") + (match_operator:SI + 4 "cris_extend_operator" + [(match_operand:BW 2 "nonimmediate_operand" "r,m")])]))] + "TARGET_V32" + "%x3%e4.%m4 %2,%0" + [(set_attr "slottable" "yes")]) + +;; As with the side-effect patterns, may have to have swapped operands for add. +;; For commutative operands, these are the canonical forms. + +;; QImode to HImode + +(define_insn "*addxqihi_swap_non_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r") + (plus:HI + (match_operator:HI + 3 "cris_extend_operator" + [(match_operand:QI 2 "nonimmediate_operand" "r,Q>,m,!To")]) + (match_operand:HI 1 "register_operand" "0,0,0,r")))] + "!TARGET_V32 && operands[1] != frame_pointer_rtx" + "@ + add%e3.b %2,%0 + add%e3.b %2,%0 + add%e3.b %2,%0 + add%e3.b %2,%1,%0" + [(set_attr "slottable" "yes,yes,no,no") + (set_attr "cc" "clobber")]) + +;; A case for v32, to catch the "addo" insn in addition to "adds". We +;; only care to match the canonical form; there should be no other. + +(define_insn "*addsbw_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r,!a") + (plus:HI + (sign_extend:HI + (match_operand:QI 2 "nonimmediate_operand" "r,m,m")) + (match_operand:HI 1 "register_operand" "0,0,r")))] + "TARGET_V32" + "@ + adds.b %2,%0 + adds.b %2,%0 + addo.b %2,%1,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "clobber,clobber,none")]) + +(define_insn "*addubw_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r") + (plus:HI + (zero_extend:HI + (match_operand:QI 2 "nonimmediate_operand" "r,m")) + (match_operand:HI 1 "register_operand" "0,0")))] + "TARGET_V32" + "addu.b %2,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "clobber")]) + +(define_insn "*extopsi_swap_non_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") + (match_operator:SI + 4 "cris_plus_or_bound_operator" + [(match_operator:SI + 3 "cris_extend_operator" + [(match_operand:BW 2 "nonimmediate_operand" "r,Q>,m,!To")]) + (match_operand:SI 1 "register_operand" "0,0,0,r")]))] + "!TARGET_V32 + && (GET_CODE (operands[4]) != UMIN || GET_CODE (operands[3]) == ZERO_EXTEND) + && operands[1] != frame_pointer_rtx" + "@ + %x4%E3 %2,%0 + %x4%E3 %2,%0 + %x4%E3 %2,%0 + %x4%E3 %2,%1,%0" + [(set_attr "slottable" "yes,yes,no,no")]) + +(define_insn "*adds_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,!a") + (plus:SI + (sign_extend:SI + (match_operand:BW 2 "nonimmediate_operand" "r,m,m")) + (match_operand:SI 1 "register_operand" "0,0,r")))] + "TARGET_V32" + "@ + adds %2,%0 + adds %2,%0 + addo %2,%1,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "*,*,none")]) + +(define_insn "*addu_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (plus:SI + (zero_extend:SI + (match_operand:BW 2 "nonimmediate_operand" "r,m")) + (match_operand:SI 1 "register_operand" "0,0")))] + "TARGET_V32 && operands[1] != frame_pointer_rtx" + "addu %2,%0" + [(set_attr "slottable" "yes")]) + +(define_insn "*bound_v32" + [(set (match_operand:SI 0 "register_operand" "=r") + (umin:SI + (zero_extend:SI + (match_operand:BW 2 "register_operand" "r")) + (match_operand:SI 1 "register_operand" "0")))] + "TARGET_V32 && operands[1] != frame_pointer_rtx" + "bound %2,%0" + [(set_attr "slottable" "yes")]) + +;; This is the special case when we use what corresponds to the +;; instruction above in "casesi". Do *not* change it to use the generic +;; pattern and "REG 15" as pc; I did that and it led to madness and +;; maintenance problems: Instead of (as imagined) recognizing and removing +;; or replacing this pattern with something simpler, other variant +;; patterns were recognized or combined, including some prefix variants +;; where the value in pc is not that of the next instruction (which means +;; this instruction actually *is* special and *should* be marked as such). +;; When switching from the "generic pattern match" approach to this simpler +;; approach, there were insignificant differences in gcc, ipps and +;; product code, somehow due to scratching reload behind the ear or +;; something. Testcase "gcc" looked .01% slower and 4 bytes bigger; +;; product code became .001% smaller but "looked better". The testcase +;; "ipps" was just different at register allocation). +;; +;; Assumptions in the jump optimizer forces us to use IF_THEN_ELSE in this +;; pattern with the default-label as the else, with the "if" being +;; index-is-less-than the max number of cases plus one. The default-label +;; is attached to the end of the case-table at time of output. + +(define_insn "*casesi_adds_w" + [(set (pc) + (if_then_else + (ltu (match_operand:SI 0 "register_operand" "r") + (match_operand:SI 1 "const_int_operand" "n")) + (plus:SI (sign_extend:SI + (mem:HI + (plus:SI (mult:SI (match_dup 0) (const_int 2)) + (pc)))) + (pc)) + (label_ref (match_operand 2 "" "")))) + (use (label_ref (match_operand 3 "" "")))] + "!TARGET_V32 && operands[0] != frame_pointer_rtx" + "adds.w [$pc+%0.w],$pc" + [(set_attr "cc" "clobber")]) + +;; For V32, we just have a jump, but we need to mark the table as used, +;; and the jump insn must have the if_then_else form expected by core +;; GCC. Since we don't want to prolong the lifetime of the original +;; index value, we compare against "unspec 0". It's a pity we have to +;; jump through to get the default label in place and to keep the jump +;; table around. FIXME: Look into it some time. + +(define_insn "*casesi_jump_v32" + [(set (pc) + (if_then_else + (ltu (unspec [(const_int 0)] CRIS_UNSPEC_CASESI) + (match_operand:SI 0 "const_int_operand" "n")) + (match_operand:SI 1 "register_operand" "r") + (label_ref (match_operand 2 "" "")))) + (use (label_ref (match_operand 3 "" "")))] + "TARGET_V32" + "jump %1%#" + [(set_attr "cc" "clobber") + (set_attr "slottable" "has_slot")]) + +;; Multiply instructions. + +;; Sometimes powers of 2 (which are normally canonicalized to a +;; left-shift) appear here, as a result of address reloading. +;; As a special, for values 3 and 5, we can match with an addi, so add those. +;; +;; FIXME: This may be unnecessary now. +;; Explicitly named for convenience of having a gen_... function. + +(define_insn "addi_mul" + [(set (match_operand:SI 0 "register_operand" "=r") + (mult:SI + (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "const_int_operand" "n")))] + "operands[0] != frame_pointer_rtx + && operands[1] != frame_pointer_rtx + && CONST_INT_P (operands[2]) + && (INTVAL (operands[2]) == 2 + || INTVAL (operands[2]) == 4 || INTVAL (operands[2]) == 3 + || INTVAL (operands[2]) == 5)" +{ + if (INTVAL (operands[2]) == 2) + return "lslq 1,%0"; + else if (INTVAL (operands[2]) == 4) + return "lslq 2,%0"; + else if (INTVAL (operands[2]) == 3) + return "addi %0.w,%0"; + else if (INTVAL (operands[2]) == 5) + return "addi %0.d,%0"; + return "BAD: adr_mulsi: %0=%1*%2"; +} +[(set_attr "slottable" "yes") + ;; No flags are changed if this insn is "addi", but it does not seem + ;; worth the trouble to distinguish that to the lslq cases. + (set_attr "cc" "clobber")]) + +;; The addi insn as it is normally used. + +;; Make the the ACR alternative taste bad enough to not choose it as a +;; preference to avoid spilling problems (unwind-dw2-fde.c at build). +;; FIXME: Revisit for new register allocator. + +(define_insn "*addi" + [(set (match_operand:SI 0 "register_operand" "=r,!a") + (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "r,r") + (match_operand:SI 3 "const_int_operand" "n,n")) + (match_operand:SI 1 "register_operand" "0,r")))] + "operands[0] != frame_pointer_rtx + && operands[1] != frame_pointer_rtx + && CONST_INT_P (operands[3]) + && (INTVAL (operands[3]) == 1 + || INTVAL (operands[3]) == 2 || INTVAL (operands[3]) == 4)" + "@ + addi %2%T3,%0 + addi %2%T3,%1,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "none")]) + +;; The mstep instruction. Probably not useful by itself; it's to +;; non-linear wrt. the other insns. We used to expand to it, so at least +;; it's correct. + +(define_insn "mstep_shift" + [(set (match_operand:SI 0 "register_operand" "=r") + (if_then_else:SI + (lt:SI (cc0) (const_int 0)) + (plus:SI (ashift:SI (match_operand:SI 1 "register_operand" "0") + (const_int 1)) + (match_operand:SI 2 "register_operand" "r")) + (ashift:SI (match_operand:SI 3 "register_operand" "0") + (const_int 1))))] + "!TARGET_V32" + "mstep %2,%0" + [(set_attr "slottable" "yes")]) + +;; When illegitimate addresses are legitimized, sometimes gcc forgets +;; to canonicalize the multiplications. +;; +;; FIXME: Check gcc > 2.7.2, remove and possibly fix in gcc. + +(define_insn "mstep_mul" + [(set (match_operand:SI 0 "register_operand" "=r") + (if_then_else:SI + (lt:SI (cc0) (const_int 0)) + (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "0") + (const_int 2)) + (match_operand:SI 2 "register_operand" "r")) + (mult:SI (match_operand:SI 3 "register_operand" "0") + (const_int 2))))] + "!TARGET_V32 + && operands[0] != frame_pointer_rtx + && operands[1] != frame_pointer_rtx + && operands[2] != frame_pointer_rtx + && operands[3] != frame_pointer_rtx" + "mstep %2,%0" + [(set_attr "slottable" "yes")]) + +(define_insn "mul3" + [(set (match_operand:WD 0 "register_operand" "=r") + (mult:WD + (szext:WD (match_operand: 1 "register_operand" "%0")) + (szext:WD (match_operand: 2 "register_operand" "r")))) + (clobber (match_scratch:SI 3 "=h"))] + "TARGET_HAS_MUL_INSNS" + "%!mul %2,%0" + [(set (attr "slottable") + (if_then_else (ne (symbol_ref "TARGET_MUL_BUG") (const_int 0)) + (const_string "no") + (const_string "yes"))) + ;; For umuls.[bwd] it's just N unusable here, but let's be safe. + ;; For muls.b, this really extends to SImode, so cc should be + ;; considered clobbered. + ;; For muls.w, it's just N unusable here, but let's be safe. + (set_attr "cc" "clobber")]) + +;; Note that gcc does not make use of such a thing as umulqisi3. It gets +;; confused and will erroneously use it instead of umulhisi3, failing (at +;; least) gcc.c-torture/execute/arith-rand.c at all optimization levels. +;; Inspection of optab code shows that there must be only one widening +;; multiplication per mode widened to. + +(define_insn "mulsi3" + [(set (match_operand:SI 0 "register_operand" "=r") + (mult:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "register_operand" "r"))) + (clobber (match_scratch:SI 3 "=h"))] + "TARGET_HAS_MUL_INSNS" + "%!muls.d %2,%0" + [(set (attr "slottable") + (if_then_else (ne (symbol_ref "TARGET_MUL_BUG") (const_int 0)) + (const_string "no") + (const_string "yes"))) + ;; Just N unusable here, but let's be safe. + (set_attr "cc" "clobber")]) + +;; A few multiply variations. + +;; When needed, we can get the high 32 bits from the overflow +;; register. We don't care to split and optimize these. +;; +;; Note that cc0 is still valid after the move-from-overflow-register +;; insn; no special precaution need to be taken in cris_notice_update_cc. + +(define_insn "mulsidi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (mult:DI + (szext:DI (match_operand:SI 1 "register_operand" "%0")) + (szext:DI (match_operand:SI 2 "register_operand" "r")))) + (clobber (match_scratch:SI 3 "=h"))] + "TARGET_HAS_MUL_INSNS" + "%!mul.d %2,%M0\;move $mof,%H0") + +;; These two patterns may be expressible by other means, perhaps by making +;; [u]?mulsidi3 a define_expand. + +;; Due to register allocation braindamage, the clobber 1,2 alternatives +;; cause a move into the clobbered register *before* the insn, then +;; after the insn, mof is moved too, rather than the clobber assigned +;; the last mof target. This became apparent when making MOF and SRP +;; visible registers, with the necessary tweak to smulsi3_highpart. +;; Because these patterns are used in division by constants, that damage +;; is visible (ipps regression tests). Therefore the last two +;; alternatives, "helping" reload to avoid an unnecessary move, but +;; punished by force of one "?". Check code from "int d (int a) {return +;; a / 1000;}" and unsigned. FIXME: Comment above was for 3.2, revisit. + +(define_insn "mulsi3_highpart" + [(set (match_operand:SI 0 "register_operand" "=h,h,?r,?r") + (truncate:SI + (lshiftrt:DI + (mult:DI + (szext:DI (match_operand:SI 1 "register_operand" "r,r,0,r")) + (szext:DI (match_operand:SI 2 "register_operand" "r,r,r,0"))) + (const_int 32)))) + (clobber (match_scratch:SI 3 "=1,2,h,h"))] + "TARGET_HAS_MUL_INSNS" + "@ + %!mul.d %2,%1 + %!mul.d %1,%2 + %!mul.d %2,%1\;move $mof,%0 + %!mul.d %1,%2\;move $mof,%0" + [(set_attr "slottable" "yes,yes,no,no") + (set_attr "cc" "clobber")]) + +;; Divide and modulus instructions. CRIS only has a step instruction. + +(define_insn "dstep_shift" + [(set (match_operand:SI 0 "register_operand" "=r") + (if_then_else:SI + (geu:SI (ashift:SI (match_operand:SI 1 "register_operand" "0") + (const_int 1)) + (match_operand:SI 2 "register_operand" "r")) + (minus:SI (ashift:SI (match_operand:SI 3 "register_operand" "0") + (const_int 1)) + (match_operand:SI 4 "register_operand" "2")) + (ashift:SI (match_operand:SI 5 "register_operand" "0") + (const_int 1))))] + "" + "dstep %2,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; Here's a variant with mult instead of ashift. +;; +;; FIXME: This should be investigated. Which one matches through combination? + +(define_insn "dstep_mul" + [(set (match_operand:SI 0 "register_operand" "=r") + (if_then_else:SI + (geu:SI (mult:SI (match_operand:SI 1 "register_operand" "0") + (const_int 2)) + (match_operand:SI 2 "register_operand" "r")) + (minus:SI (mult:SI (match_operand:SI 3 "register_operand" "0") + (const_int 2)) + (match_operand:SI 4 "register_operand" "2")) + (mult:SI (match_operand:SI 5 "register_operand" "0") + (const_int 2))))] + "operands[0] != frame_pointer_rtx + && operands[1] != frame_pointer_rtx + && operands[2] != frame_pointer_rtx + && operands[3] != frame_pointer_rtx" + "dstep %2,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; Logical operators. + +;; Bitwise "and". + +;; There is no use in defining "anddi3", because gcc can expand this by +;; itself, and make reasonable code without interference. + +;; If the first operand is memory or a register and is the same as the +;; second operand, and the third operand is -256 or -65536, we can use +;; CLEAR instead. Or, if the first operand is a register, and the third +;; operand is 255 or 65535, we can zero_extend. +;; GCC isn't smart enough to recognize these cases (yet), and they seem +;; to be common enough to be worthwhile. +;; FIXME: This should be made obsolete. + +(define_expand "andsi3" + [(set (match_operand:SI 0 "nonimmediate_operand" "") + (and:SI (match_operand:SI 1 "nonimmediate_operand" "") + (match_operand:SI 2 "general_operand" "")))] + "" +{ + if (! (CONST_INT_P (operands[2]) + && (((INTVAL (operands[2]) == -256 + || INTVAL (operands[2]) == -65536) + && rtx_equal_p (operands[1], operands[0])) + || ((INTVAL (operands[2]) == 255 + || INTVAL (operands[2]) == 65535) + && REG_P (operands[0]))))) + { + /* Make intermediate steps if operand0 is not a register or + operand1 is not a register, and hope that the reload pass will + make something useful out of it. Note that the operands are + *not* canonicalized. For the moment, I chicken out on this, + because all or most ports do not describe 'and' with + canonicalized operands, and I seem to remember magic in reload, + checking that operand1 has constraint '%0', in which case + operand0 and operand1 must have similar predicates. + FIXME: Investigate. */ + rtx reg0 = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SImode); + rtx reg1 = operands[1]; + + if (! REG_P (reg1)) + { + emit_move_insn (reg0, reg1); + reg1 = reg0; + } + + emit_insn (gen_rtx_SET (SImode, reg0, + gen_rtx_AND (SImode, reg1, operands[2]))); + + /* Make sure we get the right *final* destination. */ + if (! REG_P (operands[0])) + emit_move_insn (operands[0], reg0); + + DONE; + } +}) + +;; Some special cases of andsi3. + +(define_insn "*andsi_movu" + [(set (match_operand:SI 0 "register_operand" "=r,r,r") + (and:SI (match_operand:SI 1 "nonimmediate_operand" "%r,Q,To") + (match_operand:SI 2 "const_int_operand" "n,n,n")))] + "(INTVAL (operands[2]) == 255 || INTVAL (operands[2]) == 65535) + && !side_effects_p (operands[1])" + "movu.%z2 %1,%0" + [(set_attr "slottable" "yes,yes,no")]) + +(define_insn "*andsi_clear" + [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,Q,Q,To,To") + (and:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,0,0,0,0") + (match_operand:SI 2 "const_int_operand" "P,n,P,n,P,n")))] + "(INTVAL (operands[2]) == -65536 || INTVAL (operands[2]) == -256) + && !side_effects_p (operands[0])" + "@ + cLear.b %0 + cLear.w %0 + cLear.b %0 + cLear.w %0 + cLear.b %0 + cLear.w %0" + [(set_attr "slottable" "yes,yes,yes,yes,no,no") + (set_attr "cc" "none")]) + +;; This is a catch-all pattern, taking care of everything that was not +;; matched in the insns above. +;; +;; Sidenote: the tightening from "nonimmediate_operand" to +;; "register_operand" for operand 1 actually increased the register +;; pressure (worse code). That will hopefully change with an +;; improved reload pass. + +(define_insn "*expanded_andsi_non_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,r, r,r") + (and:SI (match_operand:SI 1 "register_operand" "%0,0,0, 0,r") + (match_operand:SI 2 "general_operand" "I,r,Q>,g,!To")))] + "!TARGET_V32" + "@ + andq %2,%0 + and.d %2,%0 + and.d %2,%0 + and.d %2,%0 + and.d %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,no,no")]) + +(define_insn "*expanded_andsi_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") + (and:SI (match_operand:SI 1 "register_operand" "%0,0,0,0") + (match_operand:SI 2 "general_operand" "I,r,Q>,g")))] + "TARGET_V32" + "@ + andq %2,%0 + and.d %2,%0 + and.d %2,%0 + and.d %2,%0" + [(set_attr "slottable" "yes,yes,yes,no") + (set_attr "cc" "noov32")]) + +;; For both QI and HI we may use the quick patterns. This results in +;; useless condition codes, but that is used rarely enough for it to +;; normally be a win (could check ahead for use of cc0, but seems to be +;; more pain than win). + +;; FIXME: See note for andsi3 + +(define_expand "andhi3" + [(set (match_operand:HI 0 "nonimmediate_operand" "") + (and:HI (match_operand:HI 1 "nonimmediate_operand" "") + (match_operand:HI 2 "general_operand" "")))] + "" +{ + if (! (CONST_INT_P (operands[2]) + && (((INTVAL (operands[2]) == -256 + || INTVAL (operands[2]) == 65280) + && rtx_equal_p (operands[1], operands[0])) + || (INTVAL (operands[2]) == 255 + && REG_P (operands[0]))))) + { + /* See comment for andsi3. */ + rtx reg0 = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (HImode); + rtx reg1 = operands[1]; + + if (! REG_P (reg1)) + { + emit_move_insn (reg0, reg1); + reg1 = reg0; + } + + emit_insn (gen_rtx_SET (HImode, reg0, + gen_rtx_AND (HImode, reg1, operands[2]))); + + /* Make sure we get the right destination. */ + if (! REG_P (operands[0])) + emit_move_insn (operands[0], reg0); + + DONE; + } +}) + +;; Some fast andhi3 special cases. + +(define_insn "*andhi_movu" + [(set (match_operand:HI 0 "register_operand" "=r,r,r") + (and:HI (match_operand:HI 1 "nonimmediate_operand" "r,Q,To") + (const_int 255)))] + "!side_effects_p (operands[1])" + "mOvu.b %1,%0" + [(set_attr "slottable" "yes,yes,no")]) + +(define_insn "*andhi_clear" + [(set (match_operand:HI 0 "nonimmediate_operand" "=r,Q,To") + (and:HI (match_operand:HI 1 "nonimmediate_operand" "0,0,0") + (const_int -256)))] + "!side_effects_p (operands[0])" + "cLear.b %0" + [(set_attr "slottable" "yes,yes,no") + (set_attr "cc" "none")]) + +;; Catch-all andhi3 pattern. + +(define_insn "*expanded_andhi_non_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r,r, r,r,r,r") + (and:HI (match_operand:HI 1 "register_operand" "%0,0,0, 0,0,0,r") + (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g,!To")))] + +;; Sidenote: the tightening from "general_operand" to +;; "register_operand" for operand 1 actually increased the register +;; pressure (worse code). That will hopefully change with an +;; improved reload pass. + + "!TARGET_V32" + "@ + andq %2,%0 + and.w %2,%0 + and.w %2,%0 + and.w %2,%0 + anDq %b2,%0 + and.w %2,%0 + and.w %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,no,yes,no,no") + (set_attr "cc" "clobber,normal,normal,normal,clobber,normal,normal")]) + +(define_insn "*expanded_andhi_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r") + (and:HI (match_operand:HI 1 "register_operand" "%0,0,0,0,0,0") + (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g")))] + "TARGET_V32" + "@ + andq %2,%0 + and.w %2,%0 + and.w %2,%0 + and.w %2,%0 + anDq %b2,%0 + and.w %2,%0" + [(set_attr "slottable" "yes,yes,yes,no,yes,no") + (set_attr "cc" "clobber,noov32,noov32,noov32,clobber,noov32")]) + +;; A strict_low_part pattern. + +(define_insn "*andhi_lowpart_non_v32" + [(set (strict_low_part + (match_operand:HI 0 "register_operand" "+r,r, r,r,r,r")) + (and:HI (match_operand:HI 1 "register_operand" "%0,0, 0,0,0,r") + (match_operand:HI 2 "general_operand" "r,Q>,L,O,g,!To")))] + "!TARGET_V32" + "@ + and.w %2,%0 + and.w %2,%0 + and.w %2,%0 + anDq %b2,%0 + and.w %2,%0 + and.w %2,%1,%0" + [(set_attr "slottable" "yes,yes,no,yes,no,no") + (set_attr "cc" "normal,normal,normal,clobber,normal,normal")]) + +(define_insn "*andhi_lowpart_v32" + [(set (strict_low_part + (match_operand:HI 0 "register_operand" "+r,r,r,r,r")) + (and:HI (match_operand:HI 1 "register_operand" "%0,0,0,0,0") + (match_operand:HI 2 "general_operand" "r,Q>,L,O,g")))] + "TARGET_V32" + "@ + and.w %2,%0 + and.w %2,%0 + and.w %2,%0 + anDq %b2,%0 + and.w %2,%0" + [(set_attr "slottable" "yes,yes,no,yes,no") + (set_attr "cc" "noov32,noov32,noov32,clobber,noov32")]) + +(define_expand "andqi3" + [(set (match_operand:QI 0 "register_operand") + (and:QI (match_operand:QI 1 "register_operand") + (match_operand:QI 2 "general_operand")))] + "" + "") + +(define_insn "*andqi3_non_v32" + [(set (match_operand:QI 0 "register_operand" "=r,r,r, r,r,r") + (and:QI (match_operand:QI 1 "register_operand" "%0,0,0, 0,0,r") + (match_operand:QI 2 "general_operand" "I,r,Q>,O,g,!To")))] + "!TARGET_V32" + "@ + andq %2,%0 + and.b %2,%0 + and.b %2,%0 + andQ %b2,%0 + and.b %2,%0 + and.b %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no,no") + (set_attr "cc" "clobber,normal,normal,clobber,normal,normal")]) + +(define_insn "*andqi3_v32" + [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r") + (and:QI (match_operand:QI 1 "register_operand" "%0,0,0,0,0") + (match_operand:QI 2 "general_operand" "I,r,Q>,O,g")))] + "TARGET_V32" + "@ + andq %2,%0 + and.b %2,%0 + and.b %2,%0 + andQ %b2,%0 + and.b %2,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no") + (set_attr "cc" "clobber,noov32,noov32,clobber,noov32")]) + +(define_insn "*andqi_lowpart_non_v32" + [(set (strict_low_part + (match_operand:QI 0 "register_operand" "+r,r, r,r,r")) + (and:QI (match_operand:QI 1 "register_operand" "%0,0, 0,0,r") + (match_operand:QI 2 "general_operand" "r,Q>,O,g,!To")))] + "!TARGET_V32" + "@ + and.b %2,%0 + and.b %2,%0 + andQ %b2,%0 + and.b %2,%0 + and.b %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,no,no") + (set_attr "cc" "normal,normal,clobber,normal,normal")]) + +(define_insn "*andqi_lowpart_v32" + [(set (strict_low_part + (match_operand:QI 0 "register_operand" "+r,r,r,r")) + (and:QI (match_operand:QI 1 "register_operand" "%0,0,0,0") + (match_operand:QI 2 "general_operand" "r,Q>,O,g")))] + "TARGET_V32" + "@ + and.b %2,%0 + and.b %2,%0 + andQ %b2,%0 + and.b %2,%0" + [(set_attr "slottable" "yes,yes,yes,no") + (set_attr "cc" "noov32,noov32,clobber,noov32")]) + +;; Bitwise or. + +;; Same comment as anddi3 applies here - no need for such a pattern. + +;; It seems there's no need to jump through hoops to get good code such as +;; with andsi3. + +(define_expand "ior3" + [(set (match_operand:BWD 0 "register_operand") + (ior:BWD (match_operand:BWD 1 "register_operand") + (match_operand:BWD 2 "general_operand")))] + "" + "") + +(define_insn "*iorsi3_non_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,r, r,r,r") + (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0, 0,0,r") + (match_operand:SI 2 "general_operand" "I, r,Q>,n,g,!To")))] + "!TARGET_V32" + "@ + orq %2,%0 + or.d %2,%0 + or.d %2,%0 + oR.%s2 %2,%0 + or.d %2,%0 + or.d %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,no,no,no") + (set_attr "cc" "normal,normal,normal,clobber,normal,normal")]) + +(define_insn "*iorsi3_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r") + (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0") + (match_operand:SI 2 "general_operand" "I,r,Q>,n,g")))] + "TARGET_V32" + "@ + orq %2,%0 + or.d %2,%0 + or.d %2,%0 + oR.%s2 %2,%0 + or.d %2,%0" + [(set_attr "slottable" "yes,yes,yes,no,no") + (set_attr "cc" "noov32,noov32,noov32,clobber,noov32")]) + +(define_insn "*iorhi3_non_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r,r, r,r,r,r") + (ior:HI (match_operand:HI 1 "register_operand" "%0,0,0, 0,0,0,r") + (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g,!To")))] + "!TARGET_V32" + "@ + orq %2,%0 + or.w %2,%0 + or.w %2,%0 + or.w %2,%0 + oRq %b2,%0 + or.w %2,%0 + or.w %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,no,yes,no,no") + (set_attr "cc" "clobber,normal,normal,normal,clobber,normal,normal")]) + +(define_insn "*iorhi3_v32" + [(set (match_operand:HI 0 "register_operand" "=r,r,r,r,r,r") + (ior:HI (match_operand:HI 1 "register_operand" "%0,0,0,0,0,0") + (match_operand:HI 2 "general_operand" "I,r,Q>,L,O,g")))] + "TARGET_V32" + "@ + orq %2,%0 + or.w %2,%0 + or.w %2,%0 + or.w %2,%0 + oRq %b2,%0 + or.w %2,%0" + [(set_attr "slottable" "yes,yes,yes,no,yes,no") + (set_attr "cc" "clobber,noov32,noov32,noov32,clobber,noov32")]) + +(define_insn "*iorqi3_non_v32" + [(set (match_operand:QI 0 "register_operand" "=r,r,r, r,r,r") + (ior:QI (match_operand:QI 1 "register_operand" "%0,0,0, 0,0,r") + (match_operand:QI 2 "general_operand" "I,r,Q>,O,g,!To")))] + "!TARGET_V32" + "@ + orq %2,%0 + or.b %2,%0 + or.b %2,%0 + orQ %b2,%0 + or.b %2,%0 + or.b %2,%1,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no,no") + (set_attr "cc" "clobber,normal,normal,clobber,normal,normal")]) + +(define_insn "*iorqi3_v32" + [(set (match_operand:QI 0 "register_operand" "=r,r,r,r,r") + (ior:QI (match_operand:QI 1 "register_operand" "%0,0,0,0,0") + (match_operand:QI 2 "general_operand" "I,r,Q>,O,g")))] + "TARGET_V32" + "@ + orq %2,%0 + or.b %2,%0 + or.b %2,%0 + orQ %b2,%0 + or.b %2,%0" + [(set_attr "slottable" "yes,yes,yes,yes,no") + (set_attr "cc" "clobber,noov32,noov32,clobber,noov32")]) + +;; Exclusive-or + +;; See comment about "anddi3" for xordi3 - no need for such a pattern. +;; FIXME: Do we really need the shorter variants? + +(define_insn "xorsi3" + [(set (match_operand:SI 0 "register_operand" "=r") + (xor:SI (match_operand:SI 1 "register_operand" "%0") + (match_operand:SI 2 "register_operand" "r")))] + "" + "xor %2,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +(define_insn "xor3" + [(set (match_operand:BW 0 "register_operand" "=r") + (xor:BW (match_operand:BW 1 "register_operand" "%0") + (match_operand:BW 2 "register_operand" "r")))] + "" + "xor %2,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "clobber")]) + +;; Negation insns. + +;; Questionable use, here mostly as a (slightly usable) define_expand +;; example. + +(define_expand "negsf2" + [(set (match_dup 2) + (match_dup 3)) + (parallel [(set (match_operand:SF 0 "register_operand" "=r") + (neg:SF (match_operand:SF 1 + "register_operand" "0"))) + (use (match_dup 2))])] + "" +{ + operands[2] = gen_reg_rtx (SImode); + operands[3] = GEN_INT (1 << 31); +}) + +(define_insn "*expanded_negsf2" + [(set (match_operand:SF 0 "register_operand" "=r") + (neg:SF (match_operand:SF 1 "register_operand" "0"))) + (use (match_operand:SI 2 "register_operand" "r"))] + "" + "xor %2,%0" + [(set_attr "slottable" "yes")]) + +;; No "negdi2" although we could make one up that may be faster than +;; the one in libgcc. + +(define_insn "neg2" + [(set (match_operand:BWD 0 "register_operand" "=r") + (neg:BWD (match_operand:BWD 1 "register_operand" "r")))] + "" + "neg %1,%0" + [(set_attr "slottable" "yes")]) + +;; One-complements. + +;; See comment on anddi3 - no need for a DImode pattern. +;; See also xor comment. + +(define_insn "one_cmplsi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (not:SI (match_operand:SI 1 "register_operand" "0")))] + "" + "not %0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +(define_insn "one_cmpl2" + [(set (match_operand:BW 0 "register_operand" "=r") + (not:BW (match_operand:BW 1 "register_operand" "0")))] + "" + "not %0" + [(set_attr "slottable" "yes") + (set_attr "cc" "clobber")]) + +;; Arithmetic/Logical shift right (and SI left). + +(define_insn "si3" + [(set (match_operand:SI 0 "register_operand" "=r") + (shift:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "nonmemory_operand" "Kcr")))] + "" +{ + if (REG_S_P (operands[2])) + return ".d %2,%0"; + + return "q %2,%0"; +} + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; Since gcc gets lost, and forgets to zero-extend the source (or mask +;; the destination) when it changes shifts of lower modes into SImode, +;; it is better to make these expands an anonymous patterns instead of +;; the more correct define_insns. This occurs when gcc thinks that is +;; is better to widen to SImode and use immediate shift count. + +;; FIXME: Is this legacy or still true for gcc >= 2.7.2? + +;; FIXME: Can't parametrize sign_extend and zero_extend (before +;; mentioning "shiftrt"), so we need two patterns. +(define_expand "ashr3" + [(set (match_dup 3) + (sign_extend:SI (match_operand:BW 1 "nonimmediate_operand" ""))) + (set (match_dup 4) + (zero_extend:SI (match_operand:BW 2 "nonimmediate_operand" ""))) + (set (match_dup 5) (ashiftrt:SI (match_dup 3) (match_dup 4))) + (set (match_operand:BW 0 "general_operand" "") + (subreg:BW (match_dup 5) 0))] + "" +{ + int i; + + for (i = 3; i < 6; i++) + operands[i] = gen_reg_rtx (SImode); +}) + +(define_expand "lshr3" + [(set (match_dup 3) + (zero_extend:SI (match_operand:BW 1 "nonimmediate_operand" ""))) + (set (match_dup 4) + (zero_extend:SI (match_operand:BW 2 "nonimmediate_operand" ""))) + (set (match_dup 5) (lshiftrt:SI (match_dup 3) (match_dup 4))) + (set (match_operand:BW 0 "general_operand" "") + (subreg:BW (match_dup 5) 0))] + "" +{ + int i; + + for (i = 3; i < 6; i++) + operands[i] = gen_reg_rtx (SImode); +}) + +(define_insn "*expanded_" + [(set (match_operand:BW 0 "register_operand" "=r") + (shiftrt:BW (match_operand:BW 1 "register_operand" "0") + (match_operand:BW 2 "register_operand" "r")))] + "" + " %2,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +(define_insn "*_lowpart" + [(set (strict_low_part (match_operand:BW 0 "register_operand" "+r")) + (shiftrt:BW (match_dup 0) + (match_operand:BW 1 "register_operand" "r")))] + "" + " %1,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; Arithmetic/logical shift left. + +;; For narrower modes than SI, we can use lslq although it makes cc +;; unusable. The win is that we do not have to reload the shift-count +;; into a register. + +(define_insn "ashl3" + [(set (match_operand:BW 0 "register_operand" "=r,r") + (ashift:BW (match_operand:BW 1 "register_operand" "0,0") + (match_operand:BW 2 "nonmemory_operand" "r,Kc")))] + "" +{ + return + (CONST_INT_P (operands[2]) && INTVAL (operands[2]) > ) + ? "moveq 0,%0" + : (CONSTANT_P (operands[2]) + ? "lslq %2,%0" : "lsl %2,%0"); +} + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32,clobber")]) + +;; A strict_low_part matcher. + +(define_insn "*ashl_lowpart" + [(set (strict_low_part (match_operand:BW 0 "register_operand" "+r")) + (ashift:BW (match_dup 0) + (match_operand:HI 1 "register_operand" "r")))] + "" + "lsl %1,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; Various strange insns that gcc likes. + +;; Fortunately, it is simple to construct an abssf (although it may not +;; be very much used in practice). + +(define_insn "abssf2" + [(set (match_operand:SF 0 "register_operand" "=r") + (abs:SF (match_operand:SF 1 "register_operand" "0")))] + "" + "lslq 1,%0\;lsrq 1,%0") + +(define_insn "abssi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (abs:SI (match_operand:SI 1 "register_operand" "r")))] + "" + "abs %1,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; FIXME: GCC should be able to do these expansions itself. + +(define_expand "abs2" + [(set (match_dup 2) + (sign_extend:SI (match_operand:BW 1 "general_operand" ""))) + (set (match_dup 3) (abs:SI (match_dup 2))) + (set (match_operand:BW 0 "register_operand" "") + (subreg:BW (match_dup 3) 0))] + "" + "operands[2] = gen_reg_rtx (SImode); operands[3] = gen_reg_rtx (SImode);") + +(define_insn "clzsi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (clz:SI (match_operand:SI 1 "register_operand" "r")))] + "TARGET_HAS_LZ" + "lz %1,%0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +(define_insn "bswapsi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (bswap:SI (match_operand:SI 1 "register_operand" "0")))] + "TARGET_HAS_SWAP" + "swapwb %0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; This instruction swaps all bits in a register. +;; That means that the most significant bit is put in the place +;; of the least significant bit, and so on. + +(define_insn "cris_swap_bits" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(match_operand:SI 1 "register_operand" "0")] + CRIS_UNSPEC_SWAP_BITS))] + "TARGET_HAS_SWAP" + "swapwbr %0" + [(set_attr "slottable" "yes") + (set_attr "cc" "noov32")]) + +;; Implement ctz using two instructions, one for bit swap and one for clz. +;; Defines a scratch register to avoid clobbering input. + +(define_expand "ctzsi2" + [(set (match_dup 2) + (match_operand:SI 1 "register_operand")) + (set (match_dup 2) + (unspec:SI [(match_dup 2)] CRIS_UNSPEC_SWAP_BITS)) + (set (match_operand:SI 0 "register_operand") + (clz:SI (match_dup 2)))] + "TARGET_HAS_LZ && TARGET_HAS_SWAP" + "operands[2] = gen_reg_rtx (SImode);") + +;; Bound-insn. Defined to be the same as an unsigned minimum, which is an +;; operation supported by gcc. Used in casesi, but used now and then in +;; normal code too. + +(define_expand "uminsi3" + [(set (match_operand:SI 0 "register_operand" "") + (umin:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "general_operand" "")))] + "" +{ + if (MEM_P (operands[2]) && TARGET_V32) + operands[2] = force_reg (SImode, operands[2]); +}) + +(define_insn "*uminsi3_non_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r, r,r") + (umin:SI (match_operand:SI 1 "register_operand" "%0,0, 0,r") + (match_operand:SI 2 "general_operand" "r,Q>,g,!To")))] + "!TARGET_V32" +{ + if (CONST_INT_P (operands[2])) + { + /* Constant operands are zero-extended, so only 32-bit operands + may be negative. */ + if (INTVAL (operands[2]) >= 0) + { + if (INTVAL (operands[2]) < 256) + return "bound.b %2,%0"; + + if (INTVAL (operands[2]) < 65536) + return "bound.w %2,%0"; + } + } + else if (which_alternative == 3) + return "bound.d %2,%1,%0"; + + return "bound.d %2,%0"; +} + [(set_attr "slottable" "yes,yes,no,no")]) + +(define_insn "*uminsi3_v32" + [(set (match_operand:SI 0 "register_operand" "=r,r") + (umin:SI (match_operand:SI 1 "register_operand" "%0,0") + (match_operand:SI 2 "nonmemory_operand" "r,i")))] + "TARGET_V32" +{ + if (GET_CODE (operands[2]) == CONST_INT) + { + /* Constant operands are zero-extended, so only 32-bit operands + may be negative. */ + if (INTVAL (operands[2]) >= 0) + { + if (INTVAL (operands[2]) < 256) + return "bound.b %2,%0"; + + if (INTVAL (operands[2]) < 65536) + return "bound.w %2,%0"; + } + } + + return "bound.d %2,%0"; +} + [(set_attr "slottable" "yes,no")]) + +;; Jump and branch insns. + +(define_insn "jump" + [(set (pc) + (label_ref (match_operand 0 "" "")))] + "" + "ba %l0%#" + [(set_attr "slottable" "has_slot")]) + +;; Testcase gcc.c-torture/compile/991213-3.c fails if we allow a constant +;; here, since the insn is not recognized as an indirect jump by +;; jmp_uses_reg_or_mem used by computed_jump_p. Perhaps it is a kludge to +;; change from general_operand to nonimmediate_operand (at least the docs +;; should be changed), but then again the pattern is called indirect_jump. +(define_expand "indirect_jump" + [(set (pc) (match_operand:SI 0 "nonimmediate_operand"))] + "" +{ + if (TARGET_V32 && MEM_P (operands[0])) + operands[0] = force_reg (SImode, operands[0]); +}) + +(define_insn "*indirect_jump_non_v32" + [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))] + "!TARGET_V32" + "jump %0") + +(define_insn "*indirect_jump_v32" + [(set (pc) (match_operand:SI 0 "register_operand" "r"))] + "TARGET_V32" + "jump %0%#" + [(set_attr "slottable" "has_slot")]) + +;; Return insn. Used whenever the epilogue is very simple; if it is only +;; a single ret or jump [sp+]. No allocated stack space or saved +;; registers are allowed. +;; Note that for this pattern, although named, it is ok to check the +;; context of the insn in the test, not only compiler switches. + +(define_expand "return" + [(return)] + "cris_simple_epilogue ()" + "cris_expand_return (cris_return_address_on_stack ()); DONE;") + +(define_insn "*return_expanded" + [(return)] + "" +{ + return cris_return_address_on_stack_for_return () + ? "jump [$sp+]" : "ret%#"; +} + [(set (attr "slottable") + (if_then_else + (ne (symbol_ref + "(cris_return_address_on_stack_for_return ())") + (const_int 0)) + (const_string "no") + (const_string "has_return_slot")))]) + +(define_expand "prologue" + [(const_int 0)] + "TARGET_PROLOGUE_EPILOGUE" + "cris_expand_prologue (); DONE;") + +;; Note that the (return) from the expander itself is always the last +;; insn in the epilogue. +(define_expand "epilogue" + [(const_int 0)] + "TARGET_PROLOGUE_EPILOGUE" + "cris_expand_epilogue (); DONE;") + +;; Conditional branches. + +(define_expand "cbranch4" + [(set (cc0) (compare + (match_operand:BWD 1 "nonimmediate_operand") + (match_operand:BWD 2 "general_operand"))) + (set (pc) + (if_then_else (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (label_ref (match_operand 3 "" "")) + (pc)))] + "" + "") + +(define_expand "cbranchdi4" + [(set (cc0) + (compare (match_operand:DI 1 "nonimmediate_operand" "") + (match_operand:DI 2 "general_operand" ""))) + (set (pc) + (if_then_else (match_operator 0 "ordered_comparison_operator" + [(cc0) (const_int 0)]) + (label_ref (match_operand 3 "" "")) + (pc)))] + "" +{ + if (TARGET_V32 && !REG_P (operands[1])) + operands[1] = force_reg (DImode, operands[1]); + if (TARGET_V32 && MEM_P (operands[2])) + operands[2] = force_reg (DImode, operands[2]); +}) + + +;; We suffer from the same overflow-bit-gets-in-the-way problem as +;; e.g. m68k, so we have to check if overflow bit is set on all "signed" +;; conditions. + +(define_insn "b" + [(set (pc) + (if_then_else (ncond (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "b %l0%#" + [(set_attr "slottable" "has_slot")]) + +(define_insn "b" + [(set (pc) + (if_then_else (ocond (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + return + (cc_prev_status.flags & CC_NO_OVERFLOW) + ? 0 : "b %l0%#"; +} + [(set_attr "slottable" "has_slot")]) + +(define_insn "b" + [(set (pc) + (if_then_else (rcond (cc0) + (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" +{ + return + (cc_prev_status.flags & CC_NO_OVERFLOW) + ? "b %l0%#" : "b %l0%#"; +} + [(set_attr "slottable" "has_slot")]) + +;; Reversed anonymous patterns to the ones above, as mandated. + +(define_insn "*b_reversed" + [(set (pc) + (if_then_else (ncond (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "b %l0%#" + [(set_attr "slottable" "has_slot")]) + +(define_insn "*b_reversed" + [(set (pc) + (if_then_else (ocond (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + return + (cc_prev_status.flags & CC_NO_OVERFLOW) + ? 0 : "b %l0%#"; +} + [(set_attr "slottable" "has_slot")]) + +(define_insn "*b_reversed" + [(set (pc) + (if_then_else (rcond (cc0) + (const_int 0)) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" +{ + return + (cc_prev_status.flags & CC_NO_OVERFLOW) + ? "b %l0%#" : "b %l0%#"; +} + [(set_attr "slottable" "has_slot")]) + +;; Set on condition: sCC. + +(define_expand "cstoredi4" + [(set (cc0) (compare + (match_operand:DI 2 "nonimmediate_operand") + (match_operand:DI 3 "general_operand"))) + (set (match_operand:SI 0 "register_operand") + (match_operator:SI 1 "ordered_comparison_operator" + [(cc0) (const_int 0)]))] + "" +{ + if (TARGET_V32 && !REG_P (operands[2])) + operands[2] = force_reg (DImode, operands[2]); + if (TARGET_V32 && MEM_P (operands[3])) + operands[3] = force_reg (DImode, operands[3]); +}) + +(define_expand "cstore4" + [(set (cc0) (compare + (match_operand:BWD 2 "nonimmediate_operand") + (match_operand:BWD 3 "general_operand"))) + (set (match_operand:SI 0 "register_operand") + (match_operator:SI 1 "ordered_comparison_operator" + [(cc0) (const_int 0)]))] + "" + "") + +;; Like bCC, we have to check the overflow bit for +;; signed conditions. + +(define_insn "s" + [(set (match_operand:SI 0 "register_operand" "=r") + (ncond:SI (cc0) (const_int 0)))] + "" + "s %0" + [(set_attr "slottable" "yes") + (set_attr "cc" "none")]) + +(define_insn "s" + [(set (match_operand:SI 0 "register_operand" "=r") + (rcond:SI (cc0) (const_int 0)))] + "" +{ + return + (cc_prev_status.flags & CC_NO_OVERFLOW) + ? "s %0" : "s %0"; +} + [(set_attr "slottable" "yes") + (set_attr "cc" "none")]) + +(define_insn "s" + [(set (match_operand:SI 0 "register_operand" "=r") + (ocond:SI (cc0) (const_int 0)))] + "" +{ + return + (cc_prev_status.flags & CC_NO_OVERFLOW) + ? 0 : "s %0"; +} + [(set_attr "slottable" "yes") + (set_attr "cc" "none")]) + +;; Call insns. + +;; We need to make these patterns "expand", since the real operand is +;; hidden in a (mem:QI ) inside operand[0] (call_value: operand[1]), +;; and cannot be checked if it were a "normal" pattern. +;; Note that "call" and "call_value" are *always* called with a +;; mem-operand for operand 0 and 1 respective. What happens for combined +;; instructions is a different issue. + +(define_expand "call" + [(parallel [(call (match_operand:QI 0 "cris_mem_call_operand" "") + (match_operand 1 "general_operand" "")) + (clobber (reg:SI CRIS_SRP_REGNUM))])] + "" +{ + gcc_assert (MEM_P (operands[0])); + if (flag_pic) + cris_expand_pic_call_address (&operands[0]); +}) + +;; Accept *anything* as operand 1. Accept operands for operand 0 in +;; order of preference. + +(define_insn "*expanded_call_non_v32" + [(call (mem:QI (match_operand:SI 0 "general_operand" "r,Q>,g")) + (match_operand 1 "" "")) + (clobber (reg:SI CRIS_SRP_REGNUM))] + "!TARGET_V32" + "jsr %0") + +(define_insn "*expanded_call_v32" + [(call + (mem:QI + (match_operand:SI 0 "cris_nonmemory_operand_or_callable_symbol" "n,r,U,i")) + (match_operand 1 "" "")) + (clobber (reg:SI CRIS_SRP_REGNUM))] + "TARGET_V32" + "@ + jsr %0%# + jsr %0%# + bsr %0%# + bsr %0%#" + [(set_attr "slottable" "has_call_slot")]) + +;; Parallel when calculating and reusing address of indirect pointer +;; with simple offset. (Makes most sense with PIC.) It looks a bit +;; wrong not to have the clobber last, but that's the way combine +;; generates it (except it doesn' look into the *inner* mem, so this +;; just matches a peephole2). FIXME: investigate that. +(define_insn "*expanded_call_side" + [(call (mem:QI + (mem:SI + (plus:SI (match_operand:SI 0 "cris_bdap_operand" "%r, r,r") + (match_operand:SI 1 "cris_bdap_operand" "r>Rn,r,>Rn")))) + (match_operand 2 "" "")) + (clobber (reg:SI CRIS_SRP_REGNUM)) + (set (match_operand:SI 3 "register_operand" "=*0,r,r") + (plus:SI (match_dup 0) + (match_dup 1)))] + "!TARGET_AVOID_GOTPLT && !TARGET_V32" + "jsr [%3=%0%S1]") + +(define_expand "call_value" + [(parallel [(set (match_operand 0 "" "") + (call (match_operand:QI 1 "cris_mem_call_operand" "") + (match_operand 2 "" ""))) + (clobber (reg:SI CRIS_SRP_REGNUM))])] + "" +{ + gcc_assert (MEM_P (operands[1])); + if (flag_pic) + cris_expand_pic_call_address (&operands[1]); +}) + +;; Accept *anything* as operand 2. The validity other than "general" of +;; operand 0 will be checked elsewhere. Accept operands for operand 1 in +;; order of preference (Q includes r, but r is shorter, faster). +;; We also accept a PLT symbol. We output it as [rPIC+sym:GOTPLT] rather +;; than requiring getting rPIC + sym:PLT into a register. + +(define_insn "*expanded_call_value_non_v32" + [(set (match_operand 0 "nonimmediate_operand" "=g,g,g") + (call (mem:QI (match_operand:SI 1 "general_operand" "r,Q>,g")) + (match_operand 2 "" ""))) + (clobber (reg:SI CRIS_SRP_REGNUM))] + "!TARGET_V32" + "Jsr %1" + [(set_attr "cc" "clobber")]) + +;; See similar call special-case. +(define_insn "*expanded_call_value_side" + [(set (match_operand 0 "nonimmediate_operand" "=g,g,g") + (call + (mem:QI + (mem:SI + (plus:SI (match_operand:SI 1 "cris_bdap_operand" "%r, r,r") + (match_operand:SI 2 "cris_bdap_operand" "r>Rn,r,>Rn")))) + (match_operand 3 "" ""))) + (clobber (reg:SI CRIS_SRP_REGNUM)) + (set (match_operand:SI 4 "register_operand" "=*1,r,r") + (plus:SI (match_dup 1) + (match_dup 2)))] + "!TARGET_AVOID_GOTPLT && !TARGET_V32" + "Jsr [%4=%1%S2]" + [(set_attr "cc" "clobber")]) + +(define_insn "*expanded_call_value_v32" + [(set + (match_operand 0 "nonimmediate_operand" "=g,g,g,g") + (call + (mem:QI + (match_operand:SI 1 "cris_nonmemory_operand_or_callable_symbol" "n,r,U,i")) + (match_operand 2 "" ""))) + (clobber (reg:SI 16))] + "TARGET_V32" + "@ + Jsr %1%# + Jsr %1%# + Bsr %1%# + Bsr %1%#" + [(set_attr "cc" "clobber") + (set_attr "slottable" "has_call_slot")]) + +;; Used in debugging. No use for the direct pattern; unfilled +;; delayed-branches are taken care of by other means. + +(define_insn "nop" + [(const_int 0)] + "" + "nop" + [(set_attr "cc" "none")]) + +;; We need to stop accesses to the stack after the memory is +;; deallocated. Unfortunately, reorg doesn't look at naked clobbers, +;; e.g. (insn ... (clobber (mem:BLK (stack_pointer_rtx)))) and we don't +;; want to use a naked (unspec_volatile) as that would stop any +;; scheduling in the epilogue. Hence we model it as a "real" insn that +;; sets the memory in an unspecified manner. FIXME: Unfortunately it +;; still has the effect of an unspec_volatile. +(define_insn "cris_frame_deallocated_barrier" + [(set (mem:BLK (reg:SI CRIS_SP_REGNUM)) + (unspec:BLK [(const_int 0)] CRIS_UNSPEC_FRAME_DEALLOC))] + "" + "" + [(set_attr "length" "0")]) + +;; We expand on casesi so we can use "bound" and "add offset fetched from +;; a table to pc" (adds.w [pc+%0.w],pc). + +;; Note: if you change the "parallel" (or add anything after it) in +;; this expansion, you must change the macro ASM_OUTPUT_CASE_END +;; accordingly, to add the default case at the end of the jump-table. + +(define_expand "cris_casesi_non_v32" + [(set (match_dup 5) (match_operand:SI 0 "general_operand" "")) + (set (match_dup 6) + (minus:SI (match_dup 5) + (match_operand:SI 1 "const_int_operand" "n"))) + (set (match_dup 7) + (umin:SI (match_dup 6) + (match_operand:SI 2 "const_int_operand" "n"))) + (parallel + [(set (pc) + (if_then_else + (ltu (match_dup 7) (match_dup 2)) + (plus:SI (sign_extend:SI + (mem:HI + (plus:SI (mult:SI (match_dup 7) (const_int 2)) + (pc)))) + (pc)) + (label_ref (match_operand 4 "" "")))) + (use (label_ref (match_operand 3 "" "")))])] + "" +{ + operands[2] = plus_constant (operands[2], 1); + operands[5] = gen_reg_rtx (SImode); + operands[6] = gen_reg_rtx (SImode); + operands[7] = gen_reg_rtx (SImode); +}) + +;; FIXME: Check effect of not JUMP_TABLES_IN_TEXT_SECTION. +(define_expand "cris_casesi_v32" + [(set (match_dup 5) (match_operand:SI 0 "general_operand")) + (set (match_dup 6) + (minus:SI (match_dup 5) + (match_operand:SI 1 "const_int_operand"))) + (set (match_dup 7) + (umin:SI (match_dup 6) + (match_operand:SI 2 "const_int_operand"))) + (set (match_dup 8) (match_dup 11)) + (set (match_dup 9) + (plus:SI (mult:SI (match_dup 7) (const_int 2)) + (match_dup 8))) + (set (match_dup 10) + (plus:SI (sign_extend:SI (mem:HI (match_dup 9))) + (match_dup 9))) + (parallel + [(set (pc) + (if_then_else + (ltu (unspec [(const_int 0)] CRIS_UNSPEC_CASESI) (match_dup 2)) + (match_dup 10) + (label_ref (match_operand 4 "" "")))) + (use (label_ref (match_dup 3)))])] + "TARGET_V32" +{ + int i; + rtx xlabel = gen_rtx_LABEL_REF (VOIDmode, operands[3]); + for (i = 5; i <= 10; i++) + operands[i] = gen_reg_rtx (SImode); + operands[2] = plus_constant (operands[2], 1); + + /* Don't forget to decorate labels too, for PIC. */ + operands[11] = flag_pic + ? gen_rtx_CONST (Pmode, + gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xlabel), + CRIS_UNSPEC_PCREL)) + : xlabel; +}) + +(define_expand "casesi" + [(match_operand:SI 0 "general_operand") + (match_operand:SI 1 "const_int_operand") + (match_operand:SI 2 "const_int_operand") + (match_operand 3 "" "") + (match_operand 4 "" "")] + "" +{ + if (TARGET_V32) + emit_insn (gen_cris_casesi_v32 (operands[0], operands[1], operands[2], + operands[3], operands[4])); + else + emit_insn (gen_cris_casesi_non_v32 (operands[0], operands[1], operands[2], + operands[3], operands[4])); + DONE; +}) + +;; Split-patterns. Some of them have modes unspecified. This +;; should always be ok; if for no other reason sparc.md has it as +;; well. +;; +;; When register_operand is specified for an operand, we can get a +;; subreg as well (Axis-990331), so don't just assume that REG_P is true +;; for a register_operand and that REGNO can be used as is. It is best to +;; guard with REG_P, unless it is worth it to adjust for the subreg case. + +;; op [rx + 0],ry,rz +;; The index to rx is optimized into zero, and gone. + +;; First, recognize bound [rx],ry,rz; where [rx] is zero-extended, +;; and add/sub [rx],ry,rz, with zero or sign-extend on [rx]. +;; Split this into: +;; move ry,rz +;; op [rx],rz +;; Lose if rz=ry or rx=rz. +;; Call this op-extend-split. +;; Do not match for V32; the addo and addi shouldn't be split +;; up. + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 4 "cris_operand_extend_operator" + [(match_operand 1 "register_operand" "") + (match_operator + 3 "cris_extend_operator" + [(match_operand 2 "memory_operand" "")])]))] + "!TARGET_V32 + && REG_P (operands[0]) + && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])" + [(set (match_dup 0) + (match_dup 1)) + (set (match_dup 0) + (match_op_dup + 4 [(match_dup 0) + (match_op_dup 3 [(match_dup 2)])]))] + "") + +;; As op-extend-split, but recognize and split op [rz],ry,rz into +;; ext [rz],rz +;; op ry,rz +;; Do this for plus or bound only, being commutative operations, since we +;; have swapped the operands. +;; Call this op-extend-split-rx=rz + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 4 "cris_plus_or_bound_operator" + [(match_operand 1 "register_operand" "") + (match_operator + 3 "cris_extend_operator" + [(match_operand 2 "memory_operand" "")])]))] + "!TARGET_V32 + && REG_P (operands[0]) + && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])" + [(set (match_dup 0) + (match_op_dup 3 [(match_dup 2)])) + (set (match_dup 0) + (match_op_dup + 4 [(match_dup 0) + (match_dup 1)]))] + "") + +;; As the op-extend-split, but swapped operands, and only for +;; plus or bound, being the commutative extend-operators. FIXME: Why is +;; this needed? Is it? +;; Call this op-extend-split-swapped + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 4 "cris_plus_or_bound_operator" + [(match_operator + 3 "cris_extend_operator" + [(match_operand 2 "memory_operand" "")]) + (match_operand 1 "register_operand" "")]))] + "!TARGET_V32 + && REG_P (operands[0]) + && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])" + [(set (match_dup 0) + (match_dup 1)) + (set (match_dup 0) + (match_op_dup + 4 [(match_dup 0) + (match_op_dup 3 [(match_dup 2)])]))] + "") + +;; As op-extend-split-rx=rz, but swapped operands, only for plus or +;; bound. Call this op-extend-split-swapped-rx=rz. + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 4 "cris_plus_or_bound_operator" + [(match_operator + 3 "cris_extend_operator" + [(match_operand 2 "memory_operand" "")]) + (match_operand 1 "register_operand" "")]))] + "!TARGET_V32 + && REG_P (operands[0]) + && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])" + [(set (match_dup 0) + (match_op_dup 3 [(match_dup 2)])) + (set (match_dup 0) + (match_op_dup + 4 [(match_dup 0) + (match_dup 1)]))] + "") + +;; As op-extend-split, but the mem operand is not extended. +;; +;; op [rx],ry,rz changed into +;; move ry,rz +;; op [rx],rz +;; lose if ry=rz or rx=rz +;; Call this op-extend. + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 3 "cris_orthogonal_operator" + [(match_operand 1 "register_operand" "") + (match_operand 2 "memory_operand" "")]))] + "!TARGET_V32 + && REG_P (operands[0]) + && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])" + [(set (match_dup 0) + (match_dup 1)) + (set (match_dup 0) + (match_op_dup + 3 [(match_dup 0) + (match_dup 2)]))] + "") + +;; As op-extend-split-rx=rz, non-extended. +;; Call this op-split-rx=rz + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 3 "cris_commutative_orth_op" + [(match_operand 2 "memory_operand" "") + (match_operand 1 "register_operand" "")]))] + "!TARGET_V32 + && REG_P (operands[0]) + && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) != REGNO (operands[0])" + [(set (match_dup 0) + (match_dup 1)) + (set (match_dup 0) + (match_op_dup + 3 [(match_dup 0) + (match_dup 2)]))] + "") + +;; As op-extend-split-swapped, nonextended. +;; Call this op-split-swapped. + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 3 "cris_commutative_orth_op" + [(match_operand 1 "register_operand" "") + (match_operand 2 "memory_operand" "")]))] + "!TARGET_V32 + && REG_P (operands[0]) && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])" + [(set (match_dup 0) + (match_dup 2)) + (set (match_dup 0) + (match_op_dup + 3 [(match_dup 0) + (match_dup 1)]))] + "") + +;; As op-extend-split-swapped-rx=rz, non-extended. +;; Call this op-split-swapped-rx=rz. + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 3 "cris_orthogonal_operator" + [(match_operand 2 "memory_operand" "") + (match_operand 1 "register_operand" "")]))] + "!TARGET_V32 + && REG_P (operands[0]) && REG_P (operands[1]) + && REGNO (operands[1]) != REGNO (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && REG_P (XEXP (operands[2], 0)) + && REGNO (XEXP (operands[2], 0)) == REGNO (operands[0])" + [(set (match_dup 0) + (match_dup 2)) + (set (match_dup 0) + (match_op_dup + 3 [(match_dup 0) + (match_dup 1)]))] + "") + +;; Splits for all cases in side-effect insns where (possibly after reload +;; and register allocation) rx and ry in [rx=ry+i] are equal. + +;; move.S1 [rx=rx+rz.S2],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 6 "cris_mem_op" + [(plus:SI + (mult:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "const_int_operand" "")) + (match_operand:SI 3 "register_operand" ""))])) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (mult:SI (match_dup 1) + (match_dup 2)) + (match_dup 3)))])] + "REG_P (operands[3]) && REG_P (operands[4]) + && REGNO (operands[3]) == REGNO (operands[4])" + [(set (match_dup 4) (plus:SI (mult:SI (match_dup 1) (match_dup 2)) + (match_dup 3))) + (set (match_dup 0) (match_dup 5))] + "operands[5] = replace_equiv_address (operands[6], operands[3]);") + +;; move.S1 [rx=rx+i],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 5 "cris_mem_op" + [(plus:SI (match_operand:SI 1 "cris_bdap_operand" "") + (match_operand:SI 2 "cris_bdap_operand" ""))])) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (match_dup 1) + (match_dup 2)))])] + "(rtx_equal_p (operands[3], operands[1]) + || rtx_equal_p (operands[3], operands[2]))" + [(set (match_dup 3) (plus:SI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (match_dup 4))] +{ + operands[4] = replace_equiv_address (operands[5], operands[3]); + cris_order_for_addsi3 (operands, 1); +}) + +;; move.S1 ry,[rx=rx+rz.S2] + +(define_split + [(parallel + [(set (match_operator + 6 "cris_mem_op" + [(plus:SI + (mult:SI (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "const_int_operand" "")) + (match_operand:SI 2 "register_operand" ""))]) + (match_operand 3 "register_operand" "")) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (mult:SI (match_dup 0) + (match_dup 1)) + (match_dup 2)))])] + "REG_P (operands[2]) && REG_P (operands[4]) + && REGNO (operands[4]) == REGNO (operands[2])" + [(set (match_dup 4) (plus:SI (mult:SI (match_dup 0) (match_dup 1)) + (match_dup 2))) + (set (match_dup 5) (match_dup 3))] + "operands[5] = replace_equiv_address (operands[6], operands[4]);") + +;; move.S1 ry,[rx=rx+i] + +(define_split + [(parallel + [(set (match_operator + 6 "cris_mem_op" + [(plus:SI (match_operand:SI 0 "cris_bdap_operand" "") + (match_operand:SI 1 "cris_bdap_operand" ""))]) + (match_operand 2 "register_operand" "")) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (match_dup 0) + (match_dup 1)))])] + "(rtx_equal_p (operands[3], operands[0]) + || rtx_equal_p (operands[3], operands[1]))" + [(set (match_dup 3) (plus:SI (match_dup 0) (match_dup 1))) + (set (match_dup 5) (match_dup 2))] +{ + operands[5] = replace_equiv_address (operands[6], operands[3]); + cris_order_for_addsi3 (operands, 0); +}) + +;; clear.[bwd] [rx=rx+rz.S2] + +(define_split + [(parallel + [(set (mem:BWD (plus:SI + (mult:SI (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "const_int_operand" "")) + (match_operand:SI 2 "register_operand" ""))) + (const_int 0)) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (mult:SI (match_dup 0) + (match_dup 1)) + (match_dup 2)))])] + "REG_P (operands[2]) && REG_P (operands[3]) + && REGNO (operands[3]) == REGNO (operands[2])" + [(set (match_dup 3) (plus:SI (mult:SI (match_dup 0) (match_dup 1)) + (match_dup 2))) + (set (mem:BWD (match_dup 3)) (const_int 0))] + "") + +;; clear.[bwd] [rx=rx+i] + +(define_split + [(parallel + [(set (mem:BWD + (plus:SI (match_operand:SI 0 "cris_bdap_operand" "") + (match_operand:SI 1 "cris_bdap_operand" ""))) + (const_int 0)) + (set (match_operand:SI 2 "register_operand" "") + (plus:SI (match_dup 0) + (match_dup 1)))])] + "(rtx_equal_p (operands[0], operands[2]) + || rtx_equal_p (operands[2], operands[1]))" + [(set (match_dup 2) (plus:SI (match_dup 0) (match_dup 1))) + (set (mem:BWD (match_dup 2)) (const_int 0))] + "cris_order_for_addsi3 (operands, 0);") + +;; mov(s|u).S1 [rx=rx+rz.S2],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 5 "cris_extend_operator" + [(mem (plus:SI + (mult:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "const_int_operand" "")) + (match_operand:SI 3 "register_operand" "")))])) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (mult:SI (match_dup 1) + (match_dup 2)) + (match_dup 3)))])] + "REG_P (operands[3]) + && REG_P (operands[4]) + && REGNO (operands[3]) == REGNO (operands[4])" + [(set (match_dup 4) (plus:SI (mult:SI (match_dup 1) (match_dup 2)) + (match_dup 3))) + (set (match_dup 0) (match_op_dup 5 [(match_dup 6)]))] + "operands[6] = replace_equiv_address (XEXP (operands[5], 0), operands[4]);") + +;; mov(s|u).S1 [rx=rx+i],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 4 "cris_extend_operator" + [(mem (plus:SI + (match_operand:SI 1 "cris_bdap_operand" "") + (match_operand:SI 2 "cris_bdap_operand" "")))])) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (match_dup 1) + (match_dup 2)))])] + "(rtx_equal_p (operands[1], operands[3]) + || rtx_equal_p (operands[2], operands[3]))" + [(set (match_dup 3) (plus:SI (match_dup 1) (match_dup 2))) + (set (match_dup 0) (match_op_dup 4 [(match_dup 5)]))] +{ + operands[5] = replace_equiv_address (XEXP (operands[4], 0), operands[3]); + cris_order_for_addsi3 (operands, 1); +}) + +;; op.S1 [rx=rx+i],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 5 "cris_orthogonal_operator" + [(match_operand 1 "register_operand" "") + (mem (plus:SI + (match_operand:SI 2 "cris_bdap_operand" "") + (match_operand:SI 3 "cris_bdap_operand" "")))])) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (match_dup 2) + (match_dup 3)))])] + "(rtx_equal_p (operands[4], operands[2]) + || rtx_equal_p (operands[4], operands[3]))" + [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3))) + (set (match_dup 0) (match_op_dup 5 [(match_dup 1) (match_dup 6)]))] +{ + operands[6] = replace_equiv_address (XEXP (operands[5], 1), operands[4]); + cris_order_for_addsi3 (operands, 2); +}) + +;; op.S1 [rx=rx+rz.S2],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 6 "cris_orthogonal_operator" + [(match_operand 1 "register_operand" "") + (mem (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "") + (match_operand:SI 3 "const_int_operand" "")) + (match_operand:SI 4 "register_operand" "")))])) + (set (match_operand:SI 5 "register_operand" "") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))])] + "REG_P (operands[4]) + && REG_P (operands[5]) + && REGNO (operands[5]) == REGNO (operands[4])" + [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3)) + (match_dup 4))) + (set (match_dup 0) (match_op_dup 6 [(match_dup 1) (match_dup 7)]))] + "operands[7] = replace_equiv_address (XEXP (operands[6], 1), operands[5]);") + +;; op.S1 [rx=rx+rz.S2],ry (swapped) + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 6 "cris_commutative_orth_op" + [(mem (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "") + (match_operand:SI 3 "const_int_operand" "")) + (match_operand:SI 4 "register_operand" ""))) + (match_operand 1 "register_operand" "")])) + (set (match_operand:SI 5 "register_operand" "") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))])] + "REG_P (operands[4]) + && REG_P (operands[5]) + && REGNO (operands[5]) == REGNO (operands[4])" + [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3)) + (match_dup 4))) + (set (match_dup 0) (match_op_dup 6 [(match_dup 7) (match_dup 1)]))] + "operands[7] = replace_equiv_address (XEXP (operands[6], 0), operands[5]);") + +;; op.S1 [rx=rx+i],ry (swapped) + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 5 "cris_commutative_orth_op" + [(mem + (plus:SI (match_operand:SI 2 "cris_bdap_operand" "") + (match_operand:SI 3 "cris_bdap_operand" ""))) + (match_operand 1 "register_operand" "")])) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (match_dup 2) + (match_dup 3)))])] + "(rtx_equal_p (operands[4], operands[2]) + || rtx_equal_p (operands[4], operands[3]))" + [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3))) + (set (match_dup 0) (match_op_dup 5 [(match_dup 6) (match_dup 1)]))] +{ + operands[6] = replace_equiv_address (XEXP (operands[5], 0), operands[4]); + cris_order_for_addsi3 (operands, 2); +}) + +;; op(s|u).S1 [rx=rx+rz.S2],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 6 "cris_operand_extend_operator" + [(match_operand 1 "register_operand" "") + (match_operator + 7 "cris_extend_operator" + [(mem (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "") + (match_operand:SI 3 "const_int_operand" "")) + (match_operand:SI 4 "register_operand" "")))])])) + (set (match_operand:SI 5 "register_operand" "") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))])] + "REG_P (operands[4]) + && REG_P (operands[5]) + && REGNO (operands[5]) == REGNO (operands[4])" + [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3)) + (match_dup 4))) + (set (match_dup 0) (match_op_dup 6 [(match_dup 1) (match_dup 8)]))] + "operands[8] = gen_rtx_fmt_e (GET_CODE (operands[7]), GET_MODE (operands[7]), + replace_equiv_address (XEXP (operands[7], 0), + operands[5]));") + +;; op(s|u).S1 [rx=rx+i],ry + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 5 "cris_operand_extend_operator" + [(match_operand 1 "register_operand" "") + (match_operator + 6 "cris_extend_operator" + [(mem + (plus:SI (match_operand:SI 2 "cris_bdap_operand" "") + (match_operand:SI 3 "cris_bdap_operand" "") + ))])])) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (match_dup 2) + (match_dup 3)))])] + "(rtx_equal_p (operands[4], operands[2]) + || rtx_equal_p (operands[4], operands[3]))" + [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3))) + (set (match_dup 0) (match_op_dup 5 [(match_dup 1) (match_dup 7)]))] +{ + operands[7] = gen_rtx_fmt_e (GET_CODE (operands[6]), GET_MODE (operands[6]), + replace_equiv_address (XEXP (operands[6], 0), + operands[4])); + cris_order_for_addsi3 (operands, 2); +}) + +;; op(s|u).S1 [rx=rx+rz.S2],ry (swapped, plus or bound) + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 7 "cris_plus_or_bound_operator" + [(match_operator + 6 "cris_extend_operator" + [(mem (plus:SI + (mult:SI (match_operand:SI 2 "register_operand" "") + (match_operand:SI 3 "const_int_operand" "")) + (match_operand:SI 4 "register_operand" "")))]) + (match_operand 1 "register_operand" "")])) + (set (match_operand:SI 5 "register_operand" "") + (plus:SI (mult:SI (match_dup 2) + (match_dup 3)) + (match_dup 4)))])] + "REG_P (operands[4]) && REG_P (operands[5]) + && REGNO (operands[5]) == REGNO (operands[4])" + [(set (match_dup 5) (plus:SI (mult:SI (match_dup 2) (match_dup 3)) + (match_dup 4))) + (set (match_dup 0) (match_op_dup 6 [(match_dup 8) (match_dup 1)]))] + "operands[8] = gen_rtx_fmt_e (GET_CODE (operands[6]), GET_MODE (operands[6]), + replace_equiv_address (XEXP (operands[6], 0), + operands[5]));") + +;; op(s|u).S1 [rx=rx+i],ry (swapped, plus or bound) + +(define_split + [(parallel + [(set (match_operand 0 "register_operand" "") + (match_operator + 6 "cris_plus_or_bound_operator" + [(match_operator + 5 "cris_extend_operator" + [(mem (plus:SI + (match_operand:SI 2 "cris_bdap_operand" "") + (match_operand:SI 3 "cris_bdap_operand" "")))]) + (match_operand 1 "register_operand" "")])) + (set (match_operand:SI 4 "register_operand" "") + (plus:SI (match_dup 2) + (match_dup 3)))])] + "(rtx_equal_p (operands[4], operands[2]) + || rtx_equal_p (operands[4], operands[3]))" + [(set (match_dup 4) (plus:SI (match_dup 2) (match_dup 3))) + (set (match_dup 0) (match_op_dup 6 [(match_dup 7) (match_dup 1)]))] +{ + operands[7] = gen_rtx_fmt_e (GET_CODE (operands[5]), GET_MODE (operands[5]), + replace_equiv_address (XEXP (operands[5], 0), + operands[4])); + cris_order_for_addsi3 (operands, 2); +}) + +;; Splits for addressing prefixes that have no side-effects, so we can +;; fill a delay slot. Never split if we lose something, though. + +;; If we have a +;; move [indirect_ref],rx +;; where indirect ref = {const, [r+], [r]}, it costs as much as +;; move indirect_ref,rx +;; move [rx],rx +;; Take care not to allow indirect_ref = register. + +;; We're not allowed to generate copies of registers with different mode +;; until after reload; copying pseudos upsets reload. CVS as of +;; 2001-08-24, unwind-dw2-fde.c, _Unwind_Find_FDE ICE in +;; cselib_invalidate_regno. + +(define_split ; indir_to_reg_split + [(set (match_operand 0 "register_operand" "") + (match_operand 1 "indirect_operand" ""))] + "reload_completed + && REG_P (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && (MEM_P (XEXP (operands[1], 0)) || CONSTANT_P (XEXP (operands[1], 0))) + && REGNO (operands[0]) < CRIS_LAST_GENERAL_REGISTER" + [(set (match_dup 2) (match_dup 4)) + (set (match_dup 0) (match_dup 3))] + "operands[2] = gen_rtx_REG (Pmode, REGNO (operands[0])); + operands[3] = replace_equiv_address (operands[1], operands[2]); + operands[4] = XEXP (operands[1], 0);") + +;; As the above, but MOVS and MOVU. + +(define_split + [(set (match_operand 0 "register_operand" "") + (match_operator + 4 "cris_extend_operator" + [(match_operand 1 "indirect_operand" "")]))] + "reload_completed + && REG_P (operands[0]) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD + && (MEM_P (XEXP (operands[1], 0)) + || CONSTANT_P (XEXP (operands[1], 0)))" + [(set (match_dup 2) (match_dup 5)) + (set (match_dup 0) (match_op_dup 4 [(match_dup 3)]))] + "operands[2] = gen_rtx_REG (Pmode, REGNO (operands[0])); + operands[3] = replace_equiv_address (XEXP (operands[4], 0), operands[2]); + operands[5] = XEXP (operands[1], 0);") + +;; Various peephole optimizations. +;; +;; Watch out: when you exchange one set of instructions for another, the +;; condition codes setting must be the same, or you have to CC_INIT or +;; whatever is appropriate, in the pattern before you emit the +;; assembly text. This is best done here, not in cris_notice_update_cc, +;; to keep changes local to their cause. +;; +;; Do not add patterns that you do not know will be matched. +;; Please also add a self-contained testcase. + +;; We have trouble with and:s and shifts. Maybe something is broken in +;; gcc? Or it could just be that bit-field insn expansion is a bit +;; suboptimal when not having extzv insns. +;; Testcase for the following four peepholes: gcc.dg/cris-peep2-xsrand.c + +(define_peephole2 ; asrandb (peephole casesi+31) + [(set (match_operand:SI 0 "register_operand" "") + (ashiftrt:SI (match_dup 0) + (match_operand:SI 1 "const_int_operand" ""))) + (set (match_dup 0) + (and:SI (match_dup 0) + (match_operand 2 "const_int_operand" "")))] + "INTVAL (operands[2]) > 31 + && INTVAL (operands[2]) < 255 + && INTVAL (operands[1]) > 23 + /* Check that the and-operation enables us to use logical-shift. */ + && (INTVAL (operands[2]) + & ((HOST_WIDE_INT) -1 << (32 - INTVAL (operands[1])))) == 0" + [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1))) + (set (match_dup 3) (and:QI (match_dup 3) (match_dup 4)))] + ;; FIXME: CC0 is valid except for the M bit. +{ + operands[3] = gen_rtx_REG (QImode, REGNO (operands[0])); + operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), QImode)); +}) + +(define_peephole2 ; asrandw (peephole casesi+32) + [(set (match_operand:SI 0 "register_operand" "") + (ashiftrt:SI (match_dup 0) + (match_operand:SI 1 "const_int_operand" ""))) + (set (match_dup 0) + (and:SI (match_dup 0) (match_operand 2 "const_int_operand" "")))] + "INTVAL (operands[2]) > 31 + && INTVAL (operands[2]) < 65535 + && INTVAL (operands[2]) != 255 + && INTVAL (operands[1]) > 15 + /* Check that the and-operation enables us to use logical-shift. */ + && (INTVAL (operands[2]) + & ((HOST_WIDE_INT) -1 << (32 - INTVAL (operands[1])))) == 0" + [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1))) + (set (match_dup 3) (and:HI (match_dup 3) (match_dup 4)))] + ;; FIXME: CC0 is valid except for the M bit. +{ + operands[3] = gen_rtx_REG (HImode, REGNO (operands[0])); + operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), HImode)); +}) + +(define_peephole2 ; lsrandb (peephole casesi+33) + [(set (match_operand:SI 0 "register_operand" "") + (lshiftrt:SI (match_dup 0) + (match_operand:SI 1 "const_int_operand" ""))) + (set (match_dup 0) + (and:SI (match_dup 0) (match_operand 2 "const_int_operand" "")))] + "INTVAL (operands[2]) > 31 + && INTVAL (operands[2]) < 255 + && INTVAL (operands[1]) > 23" + [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1))) + (set (match_dup 3) (and:QI (match_dup 3) (match_dup 4)))] + ;; FIXME: CC0 is valid except for the M bit. +{ + operands[3] = gen_rtx_REG (QImode, REGNO (operands[0])); + operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), QImode)); +}) + +(define_peephole2 ; lsrandw (peephole casesi+34) + [(set (match_operand:SI 0 "register_operand" "") + (lshiftrt:SI (match_dup 0) + (match_operand:SI 1 "const_int_operand" ""))) + (set (match_dup 0) + (and:SI (match_dup 0) (match_operand 2 "const_int_operand" "")))] + "INTVAL (operands[2]) > 31 && INTVAL (operands[2]) < 65535 + && INTVAL (operands[2]) != 255 + && INTVAL (operands[1]) > 15" + [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (match_dup 1))) + (set (match_dup 3) (and:HI (match_dup 3) (match_dup 4)))] + ;; FIXME: CC0 is valid except for the M bit. +{ + operands[3] = gen_rtx_REG (HImode, REGNO (operands[0])); + operands[4] = GEN_INT (trunc_int_for_mode (INTVAL (operands[2]), HImode)); +}) + + +;; Change +;; add.d n,rx +;; move [rx],ry +;; into +;; move [rx=rx+n],ry +;; when -128 <= n <= 127. +;; This will reduce the size of the assembler code for n = [-128..127], +;; and speed up accordingly. Don't match if the previous insn is +;; (set rx rz) because that combination is matched by another peephole. +;; No stable test-case. + +(define_peephole2 ; moversideqi (peephole casesi+35) + [(set (match_operand:SI 0 "register_operand" "") + (plus:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "const_int_operand" ""))) + (set (match_operand 3 "register_operand" "") + (match_operator 4 "cris_mem_op" [(match_dup 0)]))] + "GET_MODE_SIZE (GET_MODE (operands[4])) <= UNITS_PER_WORD + && REGNO (operands[3]) != REGNO (operands[0]) + && (BASE_P (operands[1]) || BASE_P (operands[2])) + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J') + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'N') + && (INTVAL (operands[2]) >= -128 && INTVAL (operands[2]) < 128) + && TARGET_SIDE_EFFECT_PREFIXES" + [(parallel + [(set (match_dup 3) (match_dup 5)) + (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])] + ;; Checking the previous insn is a bit too awkward for the condition. +{ + rtx prev = prev_nonnote_insn (curr_insn); + if (prev != NULL_RTX) + { + rtx set = single_set (prev); + if (set != NULL_RTX + && REG_S_P (SET_DEST (set)) + && REGNO (SET_DEST (set)) == REGNO (operands[0]) + && REG_S_P (SET_SRC (set))) + FAIL; + } + operands[5] + = replace_equiv_address (operands[4], + gen_rtx_PLUS (SImode, + operands[1], operands[2])); +}) + +;; Vice versa: move ry,[rx=rx+n] + +(define_peephole2 ; movemsideqi (peephole casesi+36) + [(set (match_operand:SI 0 "register_operand" "") + (plus:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "const_int_operand" ""))) + (set (match_operator 3 "cris_mem_op" [(match_dup 0)]) + (match_operand 4 "register_operand" ""))] + "GET_MODE_SIZE (GET_MODE (operands[4])) <= UNITS_PER_WORD + && REGNO (operands[4]) != REGNO (operands[0]) + && (BASE_P (operands[1]) || BASE_P (operands[2])) + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J') + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'N') + && (INTVAL (operands[2]) >= -128 && INTVAL (operands[2]) < 128) + && TARGET_SIDE_EFFECT_PREFIXES" + [(parallel + [(set (match_dup 5) (match_dup 4)) + (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])] + "operands[5] + = replace_equiv_address (operands[3], + gen_rtx_PLUS (SImode, + operands[1], operands[2]));") + +;; As above, change: +;; add.d n,rx +;; op.d [rx],ry +;; into: +;; op.d [rx=rx+n],ry +;; Saves when n = [-128..127]. +;; +;; Splitting and joining combinations for side-effect modes are slightly +;; out of hand. They probably will not save the time they take typing in, +;; not to mention the bugs that creep in. FIXME: Get rid of as many of +;; the splits and peepholes as possible. +;; No stable test-case. + +(define_peephole2 ; mover2side (peephole casesi+37) + [(set (match_operand:SI 0 "register_operand" "") + (plus:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "const_int_operand" ""))) + (set (match_operand 3 "register_operand" "") + (match_operator 4 "cris_orthogonal_operator" + [(match_dup 3) + (match_operator + 5 "cris_mem_op" [(match_dup 0)])]))] + ;; FIXME: What about DFmode? + ;; Change to GET_MODE_SIZE (GET_MODE (operands[3])) <= UNITS_PER_WORD? + "GET_MODE (operands[3]) != DImode + && REGNO (operands[0]) != REGNO (operands[3]) + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J') + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'N') + && INTVAL (operands[2]) >= -128 + && INTVAL (operands[2]) <= 127 + && TARGET_SIDE_EFFECT_PREFIXES" + [(parallel + [(set (match_dup 3) (match_op_dup 4 [(match_dup 3) (match_dup 6)])) + (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])] + "operands[6] + = replace_equiv_address (operands[5], + gen_rtx_PLUS (SImode, + operands[1], operands[2]));") + +;; Sometimes, for some reason the pattern +;; move x,rx +;; add y,rx +;; move [rx],rz +;; will occur. Solve this, and likewise for to-memory. +;; No stable test-case. + +(define_peephole2 ; moverside (peephole casesi+38) + [(set (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "cris_bdap_biap_operand" "")) + (set (match_dup 0) + (plus:SI (match_operand:SI 2 "cris_bdap_biap_operand" "") + (match_operand:SI 3 "cris_bdap_biap_operand" ""))) + (set (match_operand 4 "register_operand" "") + (match_operator 5 "cris_mem_op" [(match_dup 0)]))] + "(rtx_equal_p (operands[2], operands[0]) + || rtx_equal_p (operands[3], operands[0])) + && cris_side_effect_mode_ok (PLUS, operands, 0, + (REG_S_P (operands[1]) + ? 1 + : (rtx_equal_p (operands[2], operands[0]) + ? 3 : 2)), + (! REG_S_P (operands[1]) + ? 1 + : (rtx_equal_p (operands[2], operands[0]) + ? 3 : 2)), + -1, 4)" + [(parallel + [(set (match_dup 4) (match_dup 6)) + (set (match_dup 0) (plus:SI (match_dup 7) (match_dup 8)))])] +{ + rtx otherop + = rtx_equal_p (operands[2], operands[0]) ? operands[3] : operands[2]; + + /* Make sure we have canonical RTX so we match the insn pattern - + not a constant in the first operand. We also require the order + (plus reg mem) to match the final pattern. */ + if (CONSTANT_P (otherop) || MEM_P (otherop)) + { + operands[7] = operands[1]; + operands[8] = otherop; + } + else + { + operands[7] = otherop; + operands[8] = operands[1]; + } + operands[6] + = replace_equiv_address (operands[5], + gen_rtx_PLUS (SImode, + operands[7], operands[8])); +}) + +;; As above but to memory. +;; FIXME: Split movemside and moverside into variants and prune +;; the ones that don't trig. +;; No stable test-case. + +(define_peephole2 ; movemside (peephole casesi+39) + [(set (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "cris_bdap_biap_operand" "")) + (set (match_dup 0) + (plus:SI (match_operand:SI 2 "cris_bdap_biap_operand" "") + (match_operand:SI 3 "cris_bdap_biap_operand" ""))) + (set (match_operator 4 "cris_mem_op" [(match_dup 0)]) + (match_operand 5 "register_operand" ""))] + "(rtx_equal_p (operands[2], operands[0]) + || rtx_equal_p (operands[3], operands[0])) + && cris_side_effect_mode_ok (PLUS, operands, 0, + (REG_S_P (operands[1]) + ? 1 + : (rtx_equal_p (operands[2], operands[0]) + ? 3 : 2)), + (! REG_S_P (operands[1]) + ? 1 + : (rtx_equal_p (operands[2], operands[0]) + ? 3 : 2)), + -1, 5)" + [(parallel + [(set (match_dup 6) (match_dup 5)) + (set (match_dup 0) (plus:SI (match_dup 7) (match_dup 8)))])] +{ + rtx otherop + = rtx_equal_p (operands[2], operands[0]) ? operands[3] : operands[2]; + + /* Make sure we have canonical RTX so we match the insn pattern - + not a constant in the first operand. We also require the order + (plus reg mem) to match the final pattern. */ + if (CONSTANT_P (otherop) || MEM_P (otherop)) + { + operands[7] = operands[1]; + operands[8] = otherop; + } + else + { + operands[7] = otherop; + operands[8] = operands[1]; + } + operands[6] + = replace_equiv_address (operands[4], + gen_rtx_PLUS (SImode, + operands[7], operands[8])); +}) + +;; Another spotted bad code: +;; move rx,ry +;; move [ry],ry +;; No stable test-case. + +(define_peephole2 ; movei (peephole casesi+42) + [(set (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "register_operand" "")) + (set (match_operand 2 "register_operand" "") + (match_operator 3 "cris_mem_op" [(match_dup 0)]))] + "REGNO (operands[0]) == REGNO (operands[2]) + && (REGNO_REG_CLASS (REGNO (operands[0])) + == REGNO_REG_CLASS (REGNO (operands[1]))) + && GET_MODE_SIZE (GET_MODE (operands[2])) <= UNITS_PER_WORD" + [(set (match_dup 2) (match_dup 4))] + "operands[4] = replace_equiv_address (operands[3], operands[1]);") + +;; move.d [r10+16],r9 +;; and.d r12,r9 +;; change to +;; and.d [r10+16],r12,r9 +;; With generalization of the operation, the size and the addressing mode. +;; This seems to be the result of a quirk in register allocation +;; missing the three-operand cases when having different predicates. +;; Maybe that it matters that it is a commutative operation. +;; This pattern helps that situation, but there's still the increased +;; register pressure. +;; Note that adding the noncommutative variant did not show any matches +;; in ipps and cc1, so it's not here. +;; No stable test-case. + +(define_peephole2 ; op3 (peephole casesi+44) + [(set (match_operand 0 "register_operand" "") + (match_operator + 6 "cris_mem_op" + [(plus:SI + (match_operand:SI 1 "cris_bdap_biap_operand" "") + (match_operand:SI 2 "cris_bdap_biap_operand" ""))])) + (set (match_dup 0) + (match_operator + 5 "cris_commutative_orth_op" + [(match_operand 3 "register_operand" "") + (match_operand 4 "register_operand" "")]))] + "(rtx_equal_p (operands[3], operands[0]) + || rtx_equal_p (operands[4], operands[0])) + && ! rtx_equal_p (operands[3], operands[4]) + && (REG_S_P (operands[1]) || REG_S_P (operands[2])) + && GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD" + [(set (match_dup 0) (match_op_dup 5 [(match_dup 7) (match_dup 6)]))] + "operands[7] + = rtx_equal_p (operands[3], operands[0]) ? operands[4] : operands[3];") + +;; I cannot tell GCC (2.1, 2.7.2) how to correctly reload an instruction +;; that looks like +;; and.b some_byte,const,reg_32 +;; where reg_32 is the destination of the "three-address" code optimally. +;; It should be: +;; movu.b some_byte,reg_32 +;; and.b const,reg_32 +;; but it turns into: +;; move.b some_byte,reg_32 +;; and.d const,reg_32 +;; Fix it here. +;; Testcases: gcc.dg/cris-peep2-andu1.c gcc.dg/cris-peep2-andu2.c + +(define_peephole2 ; andu (casesi+45) + [(set (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "nonimmediate_operand" "")) + (set (match_operand:SI 2 "register_operand" "") + (and:SI (match_dup 0) + (match_operand:SI 3 "const_int_operand" "")))] + ;; Since the size of the memory access could be made different here, + ;; don't do this for a mem-volatile access. + "REGNO (operands[2]) == REGNO (operands[0]) + && INTVAL (operands[3]) <= 65535 && INTVAL (operands[3]) >= 0 + && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'I') + && !side_effects_p (operands[1]) + && (!REG_P (operands[1]) + || REGNO (operands[1]) <= CRIS_LAST_GENERAL_REGISTER)" + ;; FIXME: CC0 valid except for M (i.e. CC_NOT_NEGATIVE). + [(set (match_dup 0) (match_dup 4)) + (set (match_dup 5) (match_dup 6))] +{ + enum machine_mode zmode = INTVAL (operands[3]) <= 255 ? QImode : HImode; + enum machine_mode amode + = CRIS_CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'O') ? SImode : zmode; + rtx op1 + = (REG_S_P (operands[1]) + ? gen_rtx_REG (zmode, REGNO (operands[1])) + : adjust_address (operands[1], zmode, 0)); + operands[4] + = gen_rtx_ZERO_EXTEND (SImode, op1); + operands[5] = gen_rtx_REG (amode, REGNO (operands[0])); + operands[6] + = gen_rtx_AND (amode, gen_rtx_REG (amode, REGNO (operands[0])), + GEN_INT (trunc_int_for_mode (INTVAL (operands[3]), + amode == SImode + ? QImode : amode))); +}) + +;; Try and avoid GOTPLT reads escaping a call: transform them into +;; PLT. Curiously (but thankfully), peepholes for instructions +;; *without side-effects* that just feed a call (or call_value) are +;; not matched neither in a build or test-suite, so those patterns are +;; omitted. + +;; A "normal" move where we don't check the consumer. + +(define_peephole2 ; gotplt-to-plt + [(set + (match_operand:SI 0 "register_operand" "") + (match_operator:SI + 1 "cris_mem_op" + [(plus:SI + (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_operand:SI 2 "cris_general_operand_or_symbol" "")] + CRIS_UNSPEC_PLTGOTREAD)))]))] + "flag_pic + && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true) + && REGNO_REG_CLASS (REGNO (operands[0])) == REGNO_REG_CLASS (0)" + [(set (match_dup 0) (const:SI (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLT_GOTREL))) + (set (match_dup 0) (plus:SI (match_dup 0) (reg:SI CRIS_GOT_REGNUM)))] + "") + +;; And one set with a side-effect getting the PLTGOT offset. +;; First call and call_value variants. + +(define_peephole2 ; gotplt-to-plt-side-call + [(parallel + [(set + (match_operand:SI 0 "register_operand" "") + (match_operator:SI + 1 "cris_mem_op" + [(plus:SI + (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_operand:SI + 2 "cris_general_operand_or_symbol" "")] + CRIS_UNSPEC_PLTGOTREAD)))])) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD))))]) + (parallel [(call (mem:QI (match_dup 0)) + (match_operand 4 "" "")) + (clobber (reg:SI CRIS_SRP_REGNUM))])] + "flag_pic + && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true) + && peep2_reg_dead_p (2, operands[0])" + [(parallel [(call (mem:QI (match_dup 1)) + (match_dup 4)) + (clobber (reg:SI CRIS_SRP_REGNUM)) + (set (match_dup 3) + (plus:SI (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_dup 2)] + CRIS_UNSPEC_PLTGOTREAD))))])] + "") + +(define_peephole2 ; gotplt-to-plt-side-call-value + [(parallel + [(set + (match_operand:SI 0 "register_operand" "") + (match_operator:SI + 1 "cris_mem_op" + [(plus:SI + (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_operand:SI + 2 "cris_general_operand_or_symbol" "")] + CRIS_UNSPEC_PLTGOTREAD)))])) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD))))]) + (parallel [(set (match_operand 5 "" "") + (call (mem:QI (match_dup 0)) + (match_operand 4 "" ""))) + (clobber (reg:SI CRIS_SRP_REGNUM))])] + "flag_pic + && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true) + && peep2_reg_dead_p (2, operands[0])" + [(parallel [(set (match_dup 5) + (call (mem:QI (match_dup 1)) + (match_dup 4))) + (clobber (reg:SI CRIS_SRP_REGNUM)) + (set (match_dup 3) + (plus:SI (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_dup 2)] + CRIS_UNSPEC_PLTGOTREAD))))])] + "") + +(define_peephole2 ; gotplt-to-plt-side + [(parallel + [(set + (match_operand:SI 0 "register_operand" "") + (match_operator:SI + 1 "cris_mem_op" + [(plus:SI + (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_operand:SI + 2 "cris_general_operand_or_symbol" "")] + CRIS_UNSPEC_PLTGOTREAD)))])) + (set (match_operand:SI 3 "register_operand" "") + (plus:SI (reg:SI CRIS_GOT_REGNUM) + (const:SI + (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD))))])] + "flag_pic + && cris_valid_pic_const (XEXP (XEXP (operands[1], 0), 1), true) + && REGNO_REG_CLASS (REGNO (operands[0])) == REGNO_REG_CLASS (0)" + [(set (match_dup 3) + (const:SI (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLTGOTREAD))) + (set (match_dup 3) (plus:SI (match_dup 3) (reg:SI CRIS_GOT_REGNUM))) + (set (match_dup 0) + (const:SI (unspec:SI [(match_dup 2)] CRIS_UNSPEC_PLT_GOTREL))) + (set (match_dup 0) (plus:SI (match_dup 0) (reg:SI CRIS_GOT_REGNUM)))] + "") + +;; Local variables: +;; mode:emacs-lisp +;; comment-start: ";; " +;; eval: (set-syntax-table (copy-sequence (syntax-table))) +;; eval: (modify-syntax-entry ?[ "(]") +;; eval: (modify-syntax-entry ?] ")[") +;; eval: (modify-syntax-entry ?{ "(}") +;; eval: (modify-syntax-entry ?} "){") +;; eval: (setq indent-tabs-mode t) +;; End: Index: linux.opt =================================================================== --- linux.opt (nonexistent) +++ linux.opt (revision 338) @@ -0,0 +1,33 @@ +; GNU/Linux-specific options for the CRIS port of the compiler. + +; Copyright (C) 2005, 2007 Free Software Foundation, Inc. +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY +; WARRANTY; without even the implied warranty of MERCHANTABILITY or +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +; for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; . + +; Provide a legacy -mlinux option. +mlinux +Target Report RejectNegative Undocumented + +mno-gotplt +Target Report RejectNegative Mask(AVOID_GOTPLT) MaskExists +Together with -fpic and -fPIC, do not use GOTPLT references + +; There's a small added setup cost with using GOTPLT references +; for the first (resolving) call, but should in total be a win +; both in code-size and execution-time. +mgotplt +Target Report RejectNegative InverseMask(AVOID_GOTPLT) Undocumented Index: t-linux =================================================================== --- t-linux (nonexistent) +++ t-linux (revision 338) @@ -0,0 +1,9 @@ +TARGET_LIBGCC2_CFLAGS += -fPIC +CRTSTUFF_T_CFLAGS_S = $(TARGET_LIBGCC2_CFLAGS) +SHLIB_MAPFILES += $(srcdir)/config/cris/libgcc.ver + +# We *know* we have a limits.h in the glibc library, with extra +# definitions needed for e.g. libgfortran. +ifneq ($(inhibit_libc),true) +LIMITS_H_TEST = : +endif Index: cris.opt =================================================================== --- cris.opt (nonexistent) +++ cris.opt (revision 338) @@ -0,0 +1,190 @@ +; Options for the CRIS port of the compiler. + +; Copyright (C) 2005, 2007 Free Software Foundation, Inc. +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY +; WARRANTY; without even the implied warranty of MERCHANTABILITY or +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +; for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; . + +; TARGET_MUL_BUG: Whether or not to work around multiplication +; instruction hardware bug when generating code for models where +; it may be present. From the trouble report for Etrax 100 LX: +; "A multiply operation may cause incorrect cache behaviour +; under some specific circumstances. The problem can occur if +; the instruction following the multiply instruction causes a +; cache miss, and multiply operand 1 (source operand) bits +; [31:27] matches the logical mapping of the mode register +; address (0xb0....), and bits [9:2] of operand 1 matches the +; TLB register address (0x258-0x25f). There is such a mapping +; in kernel mode or when the MMU is off. Normally there is no +; such mapping in user mode, and the problem will therefore +; probably not occur in Linux user mode programs." +; +; We have no sure-fire way to know from within GCC that we're +; compiling a user program. For example, -fpic/PIC is used in +; libgcc which is linked into the kernel. However, the +; workaround option -mno-mul-bug can be safely used per-package +; when compiling programs. The same goes for general user-only +; libraries such as glibc, since there's no user-space +; driver-like program that gets a mapping of I/O registers (all +; on the same page, including the TLB registers). +mmul-bug-workaround +Target Report Mask(MUL_BUG) +Work around bug in multiplication instruction + +; TARGET_ETRAX4_ADD: Instruction-set additions from Etrax 4 and up. +; (Just "lz".) +metrax4 +Target Report Mask(ETRAX4_ADD) +Compile for ETRAX 4 (CRIS v3) + +; See cris_handle_option. +metrax100 +Target Report RejectNegative +Compile for ETRAX 100 (CRIS v8) + +; See cris_handle_option. +mno-etrax100 +Target Report RejectNegative Undocumented + +mpdebug +Target Report Mask(PDEBUG) +Emit verbose debug information in assembly code + +; TARGET_CCINIT: Whether to use condition-codes generated by +; insns other than the immediately preceding compare/test insn. +; Used to check for errors in notice_update_cc. +mcc-init +Target Report Mask(CCINIT) +Do not use condition codes from normal instructions + +; TARGET_SIDE_EFFECT_PREFIXES: Whether to use side-effect +; patterns. Used to debug the [rx=ry+i] type patterns. +mside-effects +Target Report RejectNegative Mask(SIDE_EFFECT_PREFIXES) Undocumented + +mno-side-effects +Target Report RejectNegative InverseMask(SIDE_EFFECT_PREFIXES) +Do not emit addressing modes with side-effect assignment + +; TARGET_STACK_ALIGN: Whether to *keep* (not force) alignment of +; stack at 16 (or 32, depending on TARGET_ALIGN_BY_32) bits. +mstack-align +Target Report RejectNegative Mask(STACK_ALIGN) Undocumented + +mno-stack-align +Target Report RejectNegative InverseMask(STACK_ALIGN) +Do not tune stack alignment + +; TARGET_DATA_ALIGN: Whether to do alignment on individual +; modifiable objects. +mdata-align +Target Report RejectNegative Mask(DATA_ALIGN) Undocumented + +mno-data-align +Target Report RejectNegative InverseMask(DATA_ALIGN) +Do not tune writable data alignment + +; TARGET_CONST_ALIGN: Whether to do alignment on individual +; non-modifiable objects. +mconst-align +Target Report RejectNegative Mask(CONST_ALIGN) Undocumented + +mno-const-align +Target Report RejectNegative InverseMask(CONST_ALIGN) +Do not tune code and read-only data alignment + +; See cris_handle_option. +m32-bit +Target Report RejectNegative Undocumented + +; See cris_handle_option. +m32bit +Target Report RejectNegative +Align code and data to 32 bits + +; See cris_handle_option. +m16-bit +Target Report RejectNegative Undocumented + +; See cris_handle_option. +m16bit +Target Report RejectNegative Undocumented + +; See cris_handle_option. +m8-bit +Target Report RejectNegative Undocumented + +; See cris_handle_option. +m8bit +Target Report RejectNegative +Don't align items in code or data + +; TARGET_PROLOGUE_EPILOGUE: Whether or not to omit function +; prologue and epilogue. +mprologue-epilogue +Target Report RejectNegative Mask(PROLOGUE_EPILOGUE) Undocumented + +mno-prologue-epilogue +Target Report RejectNegative InverseMask(PROLOGUE_EPILOGUE) +Do not emit function prologue or epilogue + +; We have to handle this m-option here since we can't wash it +; off in both CC1_SPEC and CC1PLUS_SPEC. + +mbest-lib-options +Target Report RejectNegative +Use the most feature-enabling options allowed by other options + +; FIXME: The following comment relates to gcc before cris.opt. +; Check it it's still valid: +; We must call it "override-" since calling it "no-" will cause +; gcc.c to forget it, if there's a "later" -mbest-lib-options. +; Kludgy, but needed for some multilibbed files. +moverride-best-lib-options +Target Report RejectNegative +Override -mbest-lib-options + +mcpu= +Target Report RejectNegative Joined Undocumented Var(cris_cpu_str) + +march= +Target Report RejectNegative Joined Var(cris_cpu_str) +-march=ARCH Generate code for the specified chip or CPU version + +mtune= +Target Report RejectNegative Joined Var(cris_tune_str) +-mtune=ARCH Tune alignment for the specified chip or CPU version + +mmax-stackframe= +Target Report RejectNegative Joined Var(cris_max_stackframe_str) +-mmax-stackframe=SIZE Warn when a stackframe is larger than the specified size + +max-stackframe= +Target Report RejectNegative Joined Undocumented Var(cris_max_stackframe_str) + +; TARGET_SVINTO: Currently this just affects alignment. FIXME: +; Redundant with TARGET_ALIGN_BY_32, or put machine stuff here? +; This and the others below could just as well be variables and +; TARGET_* defines in cris.h. +Mask(SVINTO) + +; TARGET_ALIGN_BY_32: Say that all alignment specifications say +; to prefer 32 rather than 16 bits. +Mask(ALIGN_BY_32) + +; TARGET_AVOID_GOTPLT is referred to in the .c and the .md so we +; need to allocate the flag and macros here. +Mask(AVOID_GOTPLT) Index: t-cris =================================================================== --- t-cris (nonexistent) +++ t-cris (revision 338) @@ -0,0 +1,58 @@ +# +# t-cris +# +# The Makefile fragment to include when compiling gcc et al for CRIS. +# +# Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc. +# +# This file is part of GCC. +# +# GCC is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GCC is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GCC; see the file COPYING3. If not see +# . +# +# The makefile macros etc. are included in the order found in the +# section "Target Fragment" in the gcc info-files (or the paper copy) of +# "Using and Porting GCC" + +LIB2FUNCS_EXTRA = _udivsi3.c _divsi3.c _umodsi3.c _modsi3.c +CRIS_LIB1CSRC = $(srcdir)/config/cris/arit.c + +FPBIT = tmplibgcc_fp_bit.c +DPBIT = dp-bit.c + +dp-bit.c: $(srcdir)/config/fp-bit.c + echo '#define FLOAT_BIT_ORDER_MISMATCH' > dp-bit.c + cat $(srcdir)/config/fp-bit.c >> dp-bit.c + +# Use another name to avoid confusing SUN make, if support for +# it is reinstated elsewhere. Prefixed with "tmplibgcc" means +# "make clean" will wipe it. We define a few L_ thingies +# because we can't select them individually through FPBIT_FUNCS; +# see above. +tmplibgcc_fp_bit.c: $(srcdir)/config/fp-bit.c + echo '#define FLOAT_BIT_ORDER_MISMATCH' > $@ + echo '#define FLOAT' >> $@ + cat $(srcdir)/config/fp-bit.c >> $@ + +# The fixed-point arithmetic code is in one file, arit.c, +# similar to libgcc2.c (or the old libgcc1.c). We need to +# "split it up" with one file per define. +$(LIB2FUNCS_EXTRA): $(CRIS_LIB1CSRC) + name=`echo $@ | sed -e 's,.*/,,' | sed -e 's,.c$$,,'`; \ + echo "#define L$$name" > tmp-$@ \ + && echo '#include "$<"' >> tmp-$@ \ + && mv -f tmp-$@ $@ + +$(out_object_file): gt-cris.h +gt-cris.h : s-gtype ; @true Index: elf.opt =================================================================== --- elf.opt (nonexistent) +++ elf.opt (revision 338) @@ -0,0 +1,22 @@ +; ELF-specific options for the CRIS port of the compiler. + +; Copyright (C) 2005, 2007 Free Software Foundation, Inc. +; +; This file is part of GCC. +; +; GCC is free software; you can redistribute it and/or modify it under +; the terms of the GNU General Public License as published by the Free +; Software Foundation; either version 3, or (at your option) any later +; version. +; +; GCC is distributed in the hope that it will be useful, but WITHOUT ANY +; WARRANTY; without even the implied warranty of MERCHANTABILITY or +; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +; for more details. +; +; You should have received a copy of the GNU General Public License +; along with GCC; see the file COPYING3. If not see +; . + +melf +Target Report RejectNegative Undocumented Index: cris-protos.h =================================================================== --- cris-protos.h (nonexistent) +++ cris-protos.h (revision 338) @@ -0,0 +1,75 @@ +/* Definitions for GCC. Part of the machine description for CRIS. + Copyright (C) 1998, 1999, 2000, 2001, 2004, 2005, 2006, 2007 + Free Software Foundation, Inc. + Contributed by Axis Communications. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +. */ + +/* Prototypes for the CRIS port. */ + +#if defined(FILE) || defined(stdin) || defined(stdout) || defined(getc) || defined(putc) +#define STDIO_INCLUDED +#endif + +extern void cris_conditional_register_usage (void); +extern bool cris_simple_epilogue (void); +#ifdef RTX_CODE +extern const char *cris_op_str (rtx); +extern void cris_notice_update_cc (rtx, rtx); +extern bool cris_reload_address_legitimized (rtx, enum machine_mode, int, int, int); +extern int cris_register_move_cost (enum machine_mode, enum reg_class, + enum reg_class); +extern void cris_print_operand (FILE *, rtx, int); +extern void cris_print_operand_address (FILE *, rtx); +extern int cris_side_effect_mode_ok (enum rtx_code, rtx *, int, int, + int, int, int); +extern bool cris_cc0_user_requires_cmp (rtx); +extern rtx cris_return_addr_rtx (int, rtx); +extern rtx cris_split_movdx (rtx *); +extern int cris_legitimate_pic_operand (rtx); +extern enum cris_pic_symbol_type cris_pic_symbol_type_of (rtx); +extern bool cris_valid_pic_const (rtx, bool); +extern bool cris_store_multiple_op_p (rtx); +extern bool cris_movem_load_rest_p (rtx, int); +extern void cris_asm_output_symbol_ref (FILE *, rtx); +extern bool cris_output_addr_const_extra (FILE *, rtx); +extern int cris_cfun_uses_pic_table (void); +extern void cris_asm_output_case_end (FILE *, int, rtx); +extern rtx cris_gen_movem_load (rtx, rtx, int); +extern rtx cris_emit_movem_store (rtx, rtx, int, bool); +extern void cris_expand_pic_call_address (rtx *); +extern void cris_order_for_addsi3 (rtx *, int); +#endif /* RTX_CODE */ +extern void cris_asm_output_label_ref (FILE *, char *); +extern void cris_target_asm_named_section (const char *, unsigned int, tree); +extern void cris_expand_prologue (void); +extern void cris_expand_epilogue (void); +extern void cris_expand_return (bool); +extern bool cris_return_address_on_stack_for_return (void); +extern bool cris_return_address_on_stack (void); +extern void cris_pragma_expand_mul (struct cpp_reader *); + +/* Need one that returns an int; usable in expressions. */ +extern int cris_fatal (char *); + +extern void cris_override_options (void); + +extern int cris_initial_elimination_offset (int, int); + +extern void cris_init_expanders (void); + +extern bool cris_function_value_regno_p (const unsigned int);
cris-protos.h Property changes : Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +Id \ No newline at end of property Index: libgcc.ver =================================================================== --- libgcc.ver (nonexistent) +++ libgcc.ver (revision 338) @@ -0,0 +1,7 @@ +GCC_4.3 { + __Mul + __Div + __Udiv + __Mod + __Umod +}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.