OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/trunk/gnu-old/gcc-4.2.2/gcc/config/m68k
    from Rev 154 to Rev 816
    Reverse comparison

Rev 154 → Rev 816

/t-m68kbare
0,0 → 1,21
LIB1ASMSRC = m68k/lb1sf68.asm
LIB1ASMFUNCS = _mulsi3 _udivsi3 _divsi3 _umodsi3 _modsi3 \
_double _float _floatex \
_eqdf2 _nedf2 _gtdf2 _gedf2 _ltdf2 _ledf2 \
_eqsf2 _nesf2 _gtsf2 _gesf2 _ltsf2 _lesf2
 
LIB2FUNCS_EXTRA = fpgnulib.c xfgnulib.c
 
fpgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
cp $(srcdir)/config/m68k/fpgnulib.c fpgnulib.c
xfgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
echo '#define EXTFLOAT' > xfgnulib.c
cat $(srcdir)/config/m68k/fpgnulib.c >> xfgnulib.c
 
MULTILIB_OPTIONS = m68000/m68020/m5200/mcpu32/m68040/m68060 m68881/msoft-float
MULTILIB_DIRNAMES =
MULTILIB_MATCHES = m68000=mc68000 m68000=m68302 mcpu32=m68332 m68020=mc68020
MULTILIB_EXCEPTIONS = m68000/msoft-float m5200/m68881 m5200/msoft-float mcpu32/m68881 mcpu32/msoft-float m68040/m68881 m68060/m68881
 
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
/openbsd.h
0,0 → 1,84
/* Configuration file for an m68k OpenBSD target.
Copyright (C) 1999, 2002, 2003, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* Target OS builtins. */
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
builtin_define ("__unix__"); \
builtin_define ("__OpenBSD__"); \
builtin_assert ("system=unix"); \
builtin_assert ("system=OpenBSD"); \
} \
while (0)
 
/* Define __HAVE_68881__ in preprocessor, unless -msoft-float is specified.
This will control the use of inline 68881 insns in certain macros. */
#undef CPP_SPEC
#define CPP_SPEC "%{!msoft-float:-D__HAVE_68881__ -D__HAVE_FPU__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_POSIX_THREADS}"
 
/* m68k as needs to know about the processor subtype. */
#undef ASM_SPEC
#define ASM_SPEC "%{m68030} %{m68040} %{m68060} %{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
 
#define AS_NEEDS_DASH_FOR_PIPED_INPUT
 
/* Layout of source language data types. */
 
/* This must agree with <machine/ansi.h> */
#undef SIZE_TYPE
#define SIZE_TYPE "unsigned int"
 
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
 
#undef WCHAR_TYPE
#define WCHAR_TYPE "int"
 
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE 32
 
/* Storage layout. */
 
/* Every structure or union's size must be a multiple of 2 bytes. */
#define STRUCTURE_SIZE_BOUNDARY 16
 
/* Specific options for DBX Output. */
 
/* This is BSD, so it wants DBX format. */
#define DBX_DEBUGGING_INFO 1
 
/* Do not break .stabs pseudos into continuations. */
#define DBX_CONTIN_LENGTH 0
 
/* This is the char to use for continuation (in case we need to turn
continuation back on). */
#define DBX_CONTIN_CHAR '?'
 
/* Stack & calling: aggregate returns. */
 
/* Don't default to pcc-struct-return, because gcc is the only compiler, and
we want to retain compatibility with older gcc versions. */
#define DEFAULT_PCC_STRUCT_RETURN 0
 
/* Assembler format: exception region output. */
 
/* All configurations that don't use elf must be explicit about not using
dwarf unwind information. */
#define DWARF2_UNWIND_INFO 0
/m68k-aout.h
0,0 → 1,37
/* Definitions of target machine for GNU compiler. "naked" 68020,
a.out object files and debugging, version.
Copyright (C) 1994, 1996, 2003, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#define DBX_DEBUGGING_INFO 1
#undef SDB_DEBUGGING_INFO
 
/* If defined, a C expression whose value is a string containing the
assembler operation to identify the following data as uninitialized global
data. */
#define BSS_SECTION_ASM_OP "\t.bss"
 
/* A C statement (sans semicolon) to output to the stdio stream
FILE the assembler definition of uninitialized global DECL named
NAME whose size is SIZE bytes. The variable ROUNDED
is the size rounded up to whatever alignment the caller wants.
Try to use asm_output_bss to implement this macro. */
/* a.out files typically can't handle arbitrary variable alignments so
define ASM_OUTPUT_BSS instead of ASM_OUTPUT_ALIGNED_BSS. */
#define ASM_OUTPUT_BSS(FILE, DECL, NAME, SIZE, ROUNDED) \
asm_output_bss ((FILE), (DECL), (NAME), (SIZE), (ROUNDED))
/predicates.md
0,0 → 1,171
;; Predicate definitions for Motorola 68000.
;; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
 
;; Special case of a general operand that's used as a source
;; operand. Use this to permit reads from PC-relative memory when
;; -mpcrel is specified.
 
(define_predicate "general_src_operand"
(match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem")
{
if (TARGET_PCREL
&& GET_CODE (op) == MEM
&& (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
|| GET_CODE (XEXP (op, 0)) == LABEL_REF
|| GET_CODE (XEXP (op, 0)) == CONST))
return 1;
return general_operand (op, mode);
})
 
;; Special case of a nonimmediate operand that's used as a source. Use
;; this to permit reads from PC-relative memory when -mpcrel is
;; specified.
 
(define_predicate "nonimmediate_src_operand"
(match_code "subreg,reg,mem")
{
if (TARGET_PCREL && GET_CODE (op) == MEM
&& (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
|| GET_CODE (XEXP (op, 0)) == LABEL_REF
|| GET_CODE (XEXP (op, 0)) == CONST))
return 1;
return nonimmediate_operand (op, mode);
})
 
;; Special case of a memory operand that's used as a source. Use this
;; to permit reads from PC-relative memory when -mpcrel is specified.
 
(define_predicate "memory_src_operand"
(match_code "subreg,mem")
{
if (TARGET_PCREL && GET_CODE (op) == MEM
&& (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
|| GET_CODE (XEXP (op, 0)) == LABEL_REF
|| GET_CODE (XEXP (op, 0)) == CONST))
return 1;
return memory_operand (op, mode);
})
 
;; Similar to general_operand, but exclude stack_pointer_rtx.
 
(define_predicate "not_sp_operand"
(match_code "subreg,reg,mem")
{
return op != stack_pointer_rtx && nonimmediate_operand (op, mode);
})
 
;; Predicate that accepts only a pc-relative address. This is needed
;; because pc-relative addresses don't satisfy the predicate
;; "general_src_operand".
 
(define_predicate "pcrel_address"
(match_code "symbol_ref,label_ref,const"))
 
;; Accept integer operands in the range 0..0xffffffff. We have to
;; check the range carefully since this predicate is used in DImode
;; contexts. Also, we need some extra crud to make it work when
;; hosted on 64-bit machines.
 
(define_predicate "const_uint32_operand"
(match_code "const_int,const_double")
{
/* It doesn't make sense to ask this question with a mode that is
not larger than 32 bits. */
gcc_assert (GET_MODE_BITSIZE (mode) > 32);
 
#if HOST_BITS_PER_WIDE_INT > 32
/* All allowed constants will fit a CONST_INT. */
return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) >= 0 && INTVAL (op) <= 0xffffffffL));
#else
return (GET_CODE (op) == CONST_INT
|| (GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_HIGH (op) == 0));
#endif
})
 
;; Accept integer operands in the range -0x80000000..0x7fffffff. We
;; have to check the range carefully since this predicate is used in
;; DImode contexts.
 
(define_predicate "const_sint32_operand"
(match_code "const_int")
{
/* It doesn't make sense to ask this question with a mode that is
not larger than 32 bits. */
gcc_assert (GET_MODE_BITSIZE (mode) > 32);
 
/* All allowed constants will fit a CONST_INT. */
return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) >= (-0x7fffffff - 1) && INTVAL (op) <= 0x7fffffff));
})
 
;; Return true if X is a valid comparison operator for the dbcc
;; instruction. Note it rejects floating point comparison
;; operators. (In the future we could use Fdbcc). It also rejects
;; some comparisons when CC_NO_OVERFLOW is set.
 
(define_predicate "valid_dbcc_comparison_p"
(and (match_code "eq,ne,gtu,ltu,geu,leu,gt,lt,ge,le")
(match_test "valid_dbcc_comparison_p_2 (op, mode)")))
 
;; Check for sign_extend or zero_extend. Used for bit-count operands.
 
(define_predicate "extend_operator"
(match_code "sign_extend,zero_extend"))
 
;; Returns true if OP is either a symbol reference or a sum of a
;; symbol reference and a constant.
 
(define_predicate "symbolic_operand"
(match_code "symbol_ref,label_ref,const")
{
switch (GET_CODE (op))
{
case SYMBOL_REF:
case LABEL_REF:
return true;
 
case CONST:
op = XEXP (op, 0);
return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
|| GET_CODE (XEXP (op, 0)) == LABEL_REF)
&& GET_CODE (XEXP (op, 1)) == CONST_INT);
 
#if 0 /* Deleted, with corresponding change in m68k.h,
so as to fit the specs. No CONST_DOUBLE is ever symbolic. */
case CONST_DOUBLE:
return GET_MODE (op) == mode;
#endif
 
default:
return false;
}
})
 
;; TODO: Add a comment here.
 
(define_predicate "post_inc_operand"
(and (match_code "mem")
(match_test "GET_CODE (XEXP (op, 0)) == POST_INC")))
 
;; TODO: Add a comment here.
 
(define_predicate "pre_dec_operand"
(and (match_code "mem")
(match_test "GET_CODE (XEXP (op, 0)) == PRE_DEC")))
/m68k.md
0,0 → 1,6984
;;- Machine description for GNU compiler, Motorola 68000 Version
;; Copyright (C) 1987, 1988, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2001,
;; 2002, 2003, 2004, 2005, 2006, 2007
;; Free Software Foundation, Inc.
 
;; This file is part of GCC.
 
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
 
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
 
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
 
;;- Information about MCF5200 port.
 
;;- The MCF5200 "ColdFire" architecture is a reduced version of the
;;- 68k ISA. Differences include reduced support for byte and word
;;- operands and the removal of BCD, bitfield, rotate, and integer
;;- divide instructions. The TARGET_COLDFIRE flag turns the use of the
;;- removed opcodes and addressing modes off.
;;-
 
 
;;- instruction definitions
 
;;- @@The original PO technology requires these to be ordered by speed,
;;- @@ so that assigner will pick the fastest.
 
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
 
;;- When naming insn's (operand 0 of define_insn) be careful about using
;;- names from other targets machine descriptions.
 
;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code
;;- updates for most instructions.
 
;;- Operand classes for the register allocator:
;;- 'a' one of the address registers can be used.
;;- 'd' one of the data registers can be used.
;;- 'f' one of the m68881/fpu registers can be used
;;- 'r' either a data or an address register can be used.
 
;;- Immediate Floating point operator constraints
;;- 'G' a floating point constant that is *NOT* one of the standard
;; 68881 constant values (to force calling output_move_const_double
;; to get it from rom if it is a 68881 constant).
;;
;; See the functions standard_XXX_constant_p in output-m68k.c for more
;; info.
 
;;- Immediate integer operand constraints:
;;- 'I' 1 .. 8
;;- 'J' -32768 .. 32767
;;- 'K' all integers EXCEPT -128 .. 127
;;- 'L' -8 .. -1
;;- 'M' all integers EXCEPT -256 .. 255
;;- 'N' 24 .. 31
;;- 'O' 16
;;- 'P' 8 .. 15
 
;;- Assembler specs:
;;- "%." size separator ("." or "") move%.l d0,d1
;;- "%-" push operand "sp@-" move%.l d0,%-
;;- "%+" pop operand "sp@+" move%.l d0,%+
;;- "%@" top of stack "sp@" move%.l d0,%@
;;- "%!" fpcr register
;;- "%$" single-precision fp specifier ("s" or "") f%$add.x fp0,fp1
;;- "%&" double-precision fp specifier ("d" or "") f%&add.x fp0,fp1
 
;;- Information about 68040 port.
 
;;- The 68040 executes all 68030 and 68881/2 instructions, but some must
;;- be emulated in software by the OS. It is faster to avoid these
;;- instructions and issue a library call rather than trapping into
;;- the kernel. The affected instructions are fintrz and fscale. The
;;- TARGET_68040 flag turns the use of the opcodes off.
 
;;- The '040 also implements a set of new floating-point instructions
;;- which specify the rounding precision in the opcode. This finally
;;- permit the 68k series to be truly IEEE compliant, and solves all
;;- issues of excess precision accumulating in the extended registers.
;;- By default, GCC does not use these instructions, since such code will
;;- not run on an '030. To use these instructions, use the -m68040-only
;;- switch. By changing TARGET_DEFAULT to include TARGET_68040_ONLY,
;;- you can make these instructions the default.
 
;;- These new instructions aren't directly in the md. They are brought
;;- into play by defining "%$" and "%&" to expand to "s" and "d" rather
;;- than "".
 
;;- Information about 68060 port.
 
;;- The 68060 executes all 68030 and 68881/2 instructions, but some must
;;- be emulated in software by the OS. It is faster to avoid these
;;- instructions and issue a library call rather than trapping into
;;- the kernel. The affected instructions are: divs.l <ea>,Dr:Dq;
;;- divu.l <ea>,Dr:Dq; muls.l <ea>,Dr:Dq; mulu.l <ea>,Dr:Dq; and
;;- fscale. The TARGET_68060 flag turns the use of the opcodes off.
 
;;- Some of these insn's are composites of several m68000 op codes.
;;- The assembler (or final @@??) insures that the appropriate one is
;;- selected.
 
;; UNSPEC usage:
 
(define_constants
[(UNSPEC_SIN 1)
(UNSPEC_COS 2)
])
 
;; UNSPEC_VOLATILE usage:
 
(define_constants
[(UNSPECV_BLOCKAGE 0)
])
 
;; Registers by name.
(define_constants
[(A0_REG 8)
(SP_REG 15)
])
 
(include "predicates.md")
;; Mode macros for floating point operations.
;; Valid floating point modes
(define_mode_macro FP [SF DF (XF "TARGET_68881")])
;; Mnemonic infix to round result
(define_mode_attr round [(SF "%$") (DF "%&") (XF "")])
;; Mnemonic infix to round result for mul or div instruction
(define_mode_attr round_mul [(SF "sgl") (DF "%&") (XF "")])
;; Suffix specifying source operand format
(define_mode_attr prec [(SF "s") (DF "d") (XF "x")])
;; Allowable D registers
(define_mode_attr dreg [(SF "d") (DF "") (XF "")])
;; Allowable 68881 constant constraints
(define_mode_attr const [(SF "F") (DF "G") (XF "")])
(define_insn ""
[(set (match_operand:DF 0 "push_operand" "=m")
(match_operand:DF 1 "general_operand" "ro<>fyE"))]
""
{
if (FP_REG_P (operands[1]))
return "fmove%.d %f1,%0";
return output_move_double (operands);
})
 
(define_insn "pushdi"
[(set (match_operand:DI 0 "push_operand" "=m")
(match_operand:DI 1 "general_operand" "ro<>Fyi"))]
""
{
return output_move_double (operands);
})
;; We don't want to allow a constant operand for test insns because
;; (set (cc0) (const_int foo)) has no mode information. Such insns will
;; be folded while optimizing anyway.
 
(define_expand "tstdi"
[(parallel [(set (cc0)
(match_operand:DI 0 "nonimmediate_operand" ""))
(clobber (match_scratch:SI 1 ""))
(clobber (match_scratch:DI 2 ""))])]
""
"m68k_last_compare_had_fp_operands = 0;")
 
(define_insn ""
[(set (cc0)
(match_operand:DI 0 "nonimmediate_operand" "am,d"))
(clobber (match_scratch:SI 1 "=X,d"))
(clobber (match_scratch:DI 2 "=d,X"))]
""
{
if (which_alternative == 0)
{
rtx xoperands[2];
 
xoperands[0] = operands[2];
xoperands[1] = operands[0];
output_move_double (xoperands);
cc_status.flags |= CC_REVERSED;
return "neg%.l %R2\;negx%.l %2";
}
if (find_reg_note (insn, REG_DEAD, operands[0]))
{
cc_status.flags |= CC_REVERSED;
return "neg%.l %R0\;negx%.l %0";
}
else
/*
'sub' clears %1, and also clears the X cc bit
'tst' sets the Z cc bit according to the low part of the DImode operand
'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high part.
*/
return "sub%.l %1,%1\;tst%.l %R0\;subx%.l %1,%0";
})
 
(define_expand "tstsi"
[(set (cc0)
(match_operand:SI 0 "nonimmediate_operand" ""))]
""
"m68k_last_compare_had_fp_operands = 0;")
 
(define_insn ""
[(set (cc0)
(match_operand:SI 0 "nonimmediate_operand" "rm"))]
""
{
if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (operands[0]))
return "tst%.l %0";
/* If you think that the 68020 does not support tstl a0,
reread page B-167 of the 68020 manual more carefully. */
/* On an address reg, cmpw may replace cmpl. */
return "cmp%.w #0,%0";
})
 
;; This can't use an address register, because comparisons
;; with address registers as second operand always test the whole word.
(define_expand "tsthi"
[(set (cc0)
(match_operand:HI 0 "nonimmediate_operand" ""))]
""
"m68k_last_compare_had_fp_operands = 0;")
 
(define_insn ""
[(set (cc0)
(match_operand:HI 0 "nonimmediate_operand" "dm"))]
""
"tst%.w %0")
 
(define_expand "tstqi"
[(set (cc0)
(match_operand:QI 0 "nonimmediate_operand" ""))]
""
"m68k_last_compare_had_fp_operands = 0;")
 
(define_insn ""
[(set (cc0)
(match_operand:QI 0 "nonimmediate_operand" "dm"))]
""
"tst%.b %0")
 
(define_expand "tst<mode>"
[(set (cc0)
(match_operand:FP 0 "general_operand" ""))]
"TARGET_HARD_FLOAT"
{
m68k_last_compare_had_fp_operands = 1;
})
 
(define_insn "tst<mode>_68881"
[(set (cc0)
(match_operand:FP 0 "general_operand" "f<FP:dreg>m"))]
"TARGET_68881"
{
cc_status.flags = CC_IN_68881;
if (FP_REG_P (operands[0]))
return "ftst%.x %0";
return "ftst%.<FP:prec> %0";
})
 
(define_insn "tst<mode>_cf"
[(set (cc0)
(match_operand:FP 0 "general_operand" "f<FP:dreg><Q>U"))]
"TARGET_COLDFIRE_FPU"
{
cc_status.flags = CC_IN_68881;
if (FP_REG_P (operands[0]))
return "ftst%.d %0";
return "ftst%.<FP:prec> %0";
})
 
;; compare instructions.
 
(define_expand "cmpdi"
[(parallel
[(set (cc0)
(compare (match_operand:DI 0 "nonimmediate_operand" "")
(match_operand:DI 1 "general_operand" "")))
(clobber (match_dup 2))])]
""
"m68k_last_compare_had_fp_operands = 0; operands[2] = gen_reg_rtx (DImode);")
 
(define_insn ""
[(set (cc0)
(compare (match_operand:DI 1 "nonimmediate_operand" "0,d")
(match_operand:DI 2 "general_operand" "d,0")))
(clobber (match_operand:DI 0 "register_operand" "=d,d"))]
""
{
if (rtx_equal_p (operands[0], operands[1]))
return "sub%.l %R2,%R0\;subx%.l %2,%0";
else
{
cc_status.flags |= CC_REVERSED;
return "sub%.l %R1,%R0\;subx%.l %1,%0";
}
})
 
;; This is the second "hook" for PIC code (in addition to movsi). See
;; comment of movsi for a description of PIC handling.
(define_expand "cmpsi"
[(set (cc0)
(compare (match_operand:SI 0 "nonimmediate_operand" "")
(match_operand:SI 1 "general_operand" "")))]
""
{
m68k_last_compare_had_fp_operands = 0;
if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode))
{
/* The source is an address which requires PIC relocation.
Call legitimize_pic_address with the source, mode, and a relocation
register (a new pseudo, or the final destination if reload_in_progress
is set). Then fall through normally */
rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode);
operands[1] = legitimize_pic_address (operands[1], SImode, temp);
}
})
 
;; A composite of the cmp, cmpa, cmpi & cmpm m68000 op codes.
(define_insn ""
[(set (cc0)
(compare (match_operand:SI 0 "nonimmediate_operand" "rKT,rKs,mSr,mSa,>")
(match_operand:SI 1 "general_src_operand" "mSr,mSa,KTr,Ksr,>")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
return "cmpm%.l %1,%0";
if (REG_P (operands[1])
|| (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
{
cc_status.flags |= CC_REVERSED;
return "cmp%.l %d0,%d1";
}
if (ADDRESS_REG_P (operands[0])
&& GET_CODE (operands[1]) == CONST_INT
&& INTVAL (operands[1]) < 0x8000
&& INTVAL (operands[1]) >= -0x8000)
return "cmp%.w %1,%0";
return "cmp%.l %d1,%d0";
})
 
(define_insn ""
[(set (cc0)
(compare (match_operand:SI 0 "nonimmediate_operand" "mrKs,r")
(match_operand:SI 1 "general_operand" "r,mrKs")))]
"TARGET_COLDFIRE"
{
if (REG_P (operands[1])
|| (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
{
cc_status.flags |= CC_REVERSED;
return "cmp%.l %d0,%d1";
}
return "cmp%.l %d1,%d0";
})
 
(define_expand "cmphi"
[(set (cc0)
(compare (match_operand:HI 0 "nonimmediate_src_operand" "")
(match_operand:HI 1 "general_src_operand" "")))]
"!TARGET_COLDFIRE"
"m68k_last_compare_had_fp_operands = 0;")
 
(define_insn ""
[(set (cc0)
(compare (match_operand:HI 0 "nonimmediate_src_operand" "rnmS,d,n,mS,>")
(match_operand:HI 1 "general_src_operand" "d,rnmS,mS,n,>")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
return "cmpm%.w %1,%0";
if ((REG_P (operands[1]) && !ADDRESS_REG_P (operands[1]))
|| (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
{
cc_status.flags |= CC_REVERSED;
return "cmp%.w %d0,%d1";
}
return "cmp%.w %d1,%d0";
})
 
(define_expand "cmpqi"
[(set (cc0)
(compare (match_operand:QI 0 "nonimmediate_src_operand" "")
(match_operand:QI 1 "general_src_operand" "")))]
"!TARGET_COLDFIRE"
"m68k_last_compare_had_fp_operands = 0;")
 
(define_insn ""
[(set (cc0)
(compare (match_operand:QI 0 "nonimmediate_src_operand" "dn,dmS,>")
(match_operand:QI 1 "general_src_operand" "dmS,nd,>")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
return "cmpm%.b %1,%0";
if (REG_P (operands[1])
|| (!REG_P (operands[0]) && GET_CODE (operands[0]) != MEM))
{
cc_status.flags |= CC_REVERSED;
return "cmp%.b %d0,%d1";
}
return "cmp%.b %d1,%d0";
})
 
(define_expand "cmp<mode>"
[(set (cc0)
(compare (match_operand:FP 0 "general_operand" "")
(match_operand:FP 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
{
m68k_last_compare_had_fp_operands = 1;
if (TARGET_COLDFIRE && !reload_completed)
operands[1] = force_reg (<MODE>mode, operands[1]);
})
 
(define_insn "cmp<mode>_68881"
[(set (cc0)
(compare (match_operand:FP 0 "general_operand" "f,m<FP:const>")
(match_operand:FP 1 "general_operand" "f<FP:dreg>m<FP:const>,f")))]
"TARGET_68881"
{
cc_status.flags = CC_IN_68881;
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "fcmp%.x %1,%0";
else
return "fcmp%.<FP:prec> %f1,%0";
}
cc_status.flags |= CC_REVERSED;
return "fcmp%.<FP:prec> %f0,%1";
})
 
(define_insn "cmp<mode>_cf"
[(set (cc0)
(compare (match_operand:FP 0 "general_operand" "f,<FP:dreg><Q>U")
(match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U,f")))]
"TARGET_COLDFIRE_FPU"
{
cc_status.flags = CC_IN_68881;
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "fcmp%.d %1,%0";
else
return "fcmp%.<FP:prec> %f1,%0";
}
cc_status.flags |= CC_REVERSED;
return "fcmp%.<FP:prec> %f0,%1";
})
;; Recognizers for btst instructions.
 
;; ColdFire/5200 only allows "<Q>" type addresses when the bit position is
;; specified as a constant, so we must disable all patterns that may extract
;; from a MEM at a constant bit position if we can't use this as a constraint.
 
(define_insn ""
[(set (cc0) (zero_extract (match_operand:QI 0 "memory_src_operand" "oS")
(const_int 1)
(minus:SI (const_int 7)
(match_operand:SI 1 "general_operand" "di"))))]
"!TARGET_COLDFIRE"
{
return output_btst (operands, operands[1], operands[0], insn, 7);
})
 
;; This is the same as the above pattern except for the constraints. The 'i'
;; has been deleted.
 
(define_insn ""
[(set (cc0) (zero_extract (match_operand:QI 0 "memory_operand" "o")
(const_int 1)
(minus:SI (const_int 7)
(match_operand:SI 1 "general_operand" "d"))))]
"TARGET_COLDFIRE"
{
return output_btst (operands, operands[1], operands[0], insn, 7);
})
 
(define_insn ""
[(set (cc0) (zero_extract (match_operand:SI 0 "register_operand" "d")
(const_int 1)
(minus:SI (const_int 31)
(match_operand:SI 1 "general_operand" "di"))))]
""
{
return output_btst (operands, operands[1], operands[0], insn, 31);
})
 
;; The following two patterns are like the previous two
;; except that they use the fact that bit-number operands
;; are automatically masked to 3 or 5 bits.
 
(define_insn ""
[(set (cc0) (zero_extract (match_operand:QI 0 "memory_operand" "o")
(const_int 1)
(minus:SI (const_int 7)
(and:SI
(match_operand:SI 1 "register_operand" "d")
(const_int 7)))))]
""
{
return output_btst (operands, operands[1], operands[0], insn, 7);
})
 
(define_insn ""
[(set (cc0) (zero_extract (match_operand:SI 0 "register_operand" "d")
(const_int 1)
(minus:SI (const_int 31)
(and:SI
(match_operand:SI 1 "register_operand" "d")
(const_int 31)))))]
""
{
return output_btst (operands, operands[1], operands[0], insn, 31);
})
 
;; Nonoffsettable mem refs are ok in this one pattern
;; since we don't try to adjust them.
(define_insn ""
[(set (cc0) (zero_extract (match_operand:QI 0 "memory_operand" "m")
(const_int 1)
(match_operand:SI 1 "const_int_operand" "n")))]
"(unsigned) INTVAL (operands[1]) < 8 && !TARGET_COLDFIRE"
{
operands[1] = GEN_INT (7 - INTVAL (operands[1]));
return output_btst (operands, operands[1], operands[0], insn, 7);
})
 
(define_insn ""
[(set (cc0) (zero_extract (match_operand:SI 0 "register_operand" "do")
(const_int 1)
(match_operand:SI 1 "const_int_operand" "n")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[0]) == MEM)
{
operands[0] = adjust_address (operands[0], QImode,
INTVAL (operands[1]) / 8);
operands[1] = GEN_INT (7 - INTVAL (operands[1]) % 8);
return output_btst (operands, operands[1], operands[0], insn, 7);
}
operands[1] = GEN_INT (31 - INTVAL (operands[1]));
return output_btst (operands, operands[1], operands[0], insn, 31);
})
 
;; This is the same as the above pattern except for the constraints.
;; The 'o' has been replaced with 'Q'.
 
(define_insn ""
[(set (cc0) (zero_extract (match_operand:SI 0 "register_operand" "dQ")
(const_int 1)
(match_operand:SI 1 "const_int_operand" "n")))]
"TARGET_COLDFIRE"
{
if (GET_CODE (operands[0]) == MEM)
{
operands[0] = adjust_address (operands[0], QImode,
INTVAL (operands[1]) / 8);
operands[1] = GEN_INT (7 - INTVAL (operands[1]) % 8);
return output_btst (operands, operands[1], operands[0], insn, 7);
}
operands[1] = GEN_INT (31 - INTVAL (operands[1]));
return output_btst (operands, operands[1], operands[0], insn, 31);
})
 
;; move instructions
 
;; A special case in which it is not desirable
;; to reload the constant into a data register.
(define_insn "pushexthisi_const"
[(set (match_operand:SI 0 "push_operand" "=m")
(match_operand:SI 1 "const_int_operand" "J"))]
"INTVAL (operands[1]) >= -0x8000 && INTVAL (operands[1]) < 0x8000"
{
if (operands[1] == const0_rtx)
return "clr%.l %0";
if (valid_mov3q_const(operands[1]))
return "mov3q%.l %1,%-";
return "pea %a1";
})
 
;This is never used.
;(define_insn "swapsi"
; [(set (match_operand:SI 0 "nonimmediate_operand" "+r")
; (match_operand:SI 1 "general_operand" "+r"))
; (set (match_dup 1) (match_dup 0))]
; ""
; "exg %1,%0")
 
;; Special case of fullword move when source is zero.
;; The reason this is special is to avoid loading a zero
;; into a data reg with moveq in order to store it elsewhere.
 
(define_insn "movsi_const0"
[(set (match_operand:SI 0 "nonimmediate_operand" "=g")
(const_int 0))]
;; clr insns on 68000 read before writing.
;; This isn't so on the 68010, but we have no TARGET_68010.
"((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))"
{
if (ADDRESS_REG_P (operands[0]))
{
/* On the '040, 'subl an,an' takes 2 clocks while lea takes only 1 */
if (!TARGET_68040 && !TARGET_68060)
return "sub%.l %0,%0";
else
return MOTOROLA ? "lea 0.w,%0" : "lea 0:w,%0";
}
/* moveq is faster on the 68000. */
if (DATA_REG_P (operands[0]) && (!TARGET_68020 && !TARGET_COLDFIRE))
return "moveq #0,%0";
return "clr%.l %0";
})
 
;; General case of fullword move.
;;
;; This is the main "hook" for PIC code. When generating
;; PIC, movsi is responsible for determining when the source address
;; needs PIC relocation and appropriately calling legitimize_pic_address
;; to perform the actual relocation.
;;
;; In both the PIC and non-PIC cases the patterns generated will
;; matched by the next define_insn.
(define_expand "movsi"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(match_operand:SI 1 "general_operand" ""))]
""
{
if (flag_pic && !TARGET_PCREL && symbolic_operand (operands[1], SImode))
{
/* The source is an address which requires PIC relocation.
Call legitimize_pic_address with the source, mode, and a relocation
register (a new pseudo, or the final destination if reload_in_progress
is set). Then fall through normally */
rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode);
operands[1] = legitimize_pic_address (operands[1], SImode, temp);
}
else if (flag_pic && TARGET_PCREL && ! reload_in_progress)
{
/* Don't allow writes to memory except via a register;
the m68k doesn't consider PC-relative addresses to be writable. */
if (symbolic_operand (operands[0], SImode))
operands[0] = force_reg (SImode, XEXP (operands[0], 0));
else if (GET_CODE (operands[0]) == MEM
&& symbolic_operand (XEXP (operands[0], 0), SImode))
operands[0] = gen_rtx_MEM (SImode,
force_reg (SImode, XEXP (operands[0], 0)));
}
})
 
;; General case of fullword move. The register constraints
;; force integer constants in range for a moveq to be reloaded
;; if they are headed for memory.
(define_insn ""
;; Notes: make sure no alternative allows g vs g.
;; We don't allow f-regs since fixed point cannot go in them.
[(set (match_operand:SI 0 "nonimmediate_operand" "=g,d,a<")
(match_operand:SI 1 "general_src_operand" "daymSKT,n,i"))]
 
"!TARGET_COLDFIRE"
{
return output_move_simode (operands);
})
 
(define_insn "*movsi_cf"
[(set (match_operand:SI 0 "nonimmediate_operand" "=r<Q>,g,U")
(match_operand:SI 1 "general_operand" "g,r<Q>,U"))]
"TARGET_COLDFIRE && !TARGET_CFV4"
"* return output_move_simode (operands);")
 
(define_insn "*movsi_cfv4"
[(set (match_operand:SI 0 "nonimmediate_operand" "=r<Q>,g,U")
(match_operand:SI 1 "general_operand" "Rg,Rr<Q>,U"))]
"TARGET_CFV4"
"* return output_move_simode (operands);")
 
;; Special case of fullword move, where we need to get a non-GOT PIC
;; reference into an address register.
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=a<")
(match_operand:SI 1 "pcrel_address" ""))]
"TARGET_PCREL"
{
if (push_operand (operands[0], SImode))
return "pea %a1";
return "lea %a1,%0";
})
 
(define_expand "movhi"
[(set (match_operand:HI 0 "nonimmediate_operand" "")
(match_operand:HI 1 "general_operand" ""))]
""
"")
 
(define_insn ""
[(set (match_operand:HI 0 "nonimmediate_operand" "=g")
(match_operand:HI 1 "general_src_operand" "gS"))]
"!TARGET_COLDFIRE"
"* return output_move_himode (operands);")
 
(define_insn ""
[(set (match_operand:HI 0 "nonimmediate_operand" "=r<Q>,g,U")
(match_operand:HI 1 "general_operand" "g,r<Q>,U"))]
"TARGET_COLDFIRE"
"* return output_move_himode (operands);")
 
(define_expand "movstricthi"
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" ""))
(match_operand:HI 1 "general_src_operand" ""))]
""
"")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
(match_operand:HI 1 "general_src_operand" "rmSn"))]
"!TARGET_COLDFIRE"
"* return output_move_stricthi (operands);")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+d,m"))
(match_operand:HI 1 "general_src_operand" "rmn,r"))]
"TARGET_COLDFIRE"
"* return output_move_stricthi (operands);")
 
(define_expand "movqi"
[(set (match_operand:QI 0 "nonimmediate_operand" "")
(match_operand:QI 1 "general_src_operand" ""))]
""
"")
 
(define_insn ""
[(set (match_operand:QI 0 "nonimmediate_operand" "=d,*a,m")
(match_operand:QI 1 "general_src_operand" "dmSi*a,di*a,dmSi"))]
"!TARGET_COLDFIRE"
"* return output_move_qimode (operands);")
 
(define_insn ""
[(set (match_operand:QI 0 "nonimmediate_operand" "=d<Q>,dm,U,d*a")
(match_operand:QI 1 "general_src_operand" "dmi,d<Q>,U,di*a"))]
"TARGET_COLDFIRE"
"* return output_move_qimode (operands);")
 
(define_expand "movstrictqi"
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" ""))
(match_operand:QI 1 "general_src_operand" ""))]
""
"")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
(match_operand:QI 1 "general_src_operand" "dmSn"))]
"!TARGET_COLDFIRE"
"* return output_move_strictqi (operands);")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+d,m"))
(match_operand:QI 1 "general_src_operand" "dmn,d"))]
"TARGET_COLDFIRE"
"* return output_move_strictqi (operands);")
 
(define_expand "pushqi1"
[(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int -2)))
(set (mem:QI (plus:SI (reg:SI SP_REG) (const_int 1)))
(match_operand:QI 0 "general_operand" ""))]
"!TARGET_COLDFIRE"
"")
 
(define_expand "reload_insf"
[(set (match_operand:SF 0 "nonimmediate_operand" "=f")
(match_operand:SF 1 "general_operand" "mf"))
(clobber (match_operand:SI 2 "register_operand" "=&a"))]
"TARGET_COLDFIRE_FPU"
{
if (emit_move_sequence (operands, SFmode, operands[2]))
DONE;
 
/* We don't want the clobber emitted, so handle this ourselves. */
emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
DONE;
})
 
(define_expand "reload_outsf"
[(set (match_operand:SF 0 "general_operand" "")
(match_operand:SF 1 "register_operand" "f"))
(clobber (match_operand:SI 2 "register_operand" "=&a"))]
"TARGET_COLDFIRE_FPU"
{
if (emit_move_sequence (operands, SFmode, operands[2]))
DONE;
 
/* We don't want the clobber emitted, so handle this ourselves. */
emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
DONE;
})
 
(define_expand "movsf"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
(match_operand:SF 1 "general_operand" ""))]
""
"")
 
(define_insn ""
[(set (match_operand:SF 0 "nonimmediate_operand" "=rmf")
(match_operand:SF 1 "general_operand" "rmfF"))]
"!TARGET_COLDFIRE"
{
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "f%$move%.x %1,%0";
else if (ADDRESS_REG_P (operands[1]))
return "move%.l %1,%-\;f%$move%.s %+,%0";
else if (GET_CODE (operands[1]) == CONST_DOUBLE)
return output_move_const_single (operands);
return "f%$move%.s %f1,%0";
}
if (FP_REG_P (operands[1]))
{
if (ADDRESS_REG_P (operands[0]))
return "fmove%.s %1,%-\;move%.l %+,%0";
return "fmove%.s %f1,%0";
}
if (operands[1] == CONST0_RTX (SFmode)
/* clr insns on 68000 read before writing.
This isn't so on the 68010, but we have no TARGET_68010. */
&& ((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
{
if (ADDRESS_REG_P (operands[0]))
{
/* On the '040, 'subl an,an' takes 2 clocks while lea takes only 1 */
if (!TARGET_68040 && !TARGET_68060)
return "sub%.l %0,%0";
else
return MOTOROLA ? "lea 0.w,%0" : "lea 0:w,%0";
}
/* moveq is faster on the 68000. */
if (DATA_REG_P (operands[0]) && !(TARGET_68020 || TARGET_COLDFIRE))
{
return "moveq #0,%0";
}
return "clr%.l %0";
}
return "move%.l %1,%0";
})
 
(define_insn "movsf_cf_soft"
[(set (match_operand:SF 0 "nonimmediate_operand" "=r,g")
(match_operand:SF 1 "general_operand" "g,r"))]
"TARGET_COLDFIRE && !TARGET_COLDFIRE_FPU"
{
return "move%.l %1,%0";
})
 
(define_insn "movsf_cf_hard"
[(set (match_operand:SF 0 "nonimmediate_operand" "=r<Q>U, f, f,mr,f,r<Q>,f
,m")
(match_operand:SF 1 "general_operand" " f, r<Q>U,f,rm,F,F, m
,f"))]
"TARGET_COLDFIRE_FPU"
{
if (which_alternative == 4 || which_alternative == 5) {
rtx xoperands[2];
REAL_VALUE_TYPE r;
long l;
REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
REAL_VALUE_TO_TARGET_SINGLE (r, l);
xoperands[0] = operands[0];
xoperands[1] = GEN_INT (l);
if (which_alternative == 5) {
if (l == 0) {
if (ADDRESS_REG_P (xoperands[0]))
output_asm_insn ("sub%.l %0,%0", xoperands);
else
output_asm_insn ("clr%.l %0", xoperands);
} else
if (GET_CODE (operands[0]) == MEM
&& symbolic_operand (XEXP (operands[0], 0), SImode))
output_asm_insn ("move%.l %1,%-;move%.l %+,%0", xoperands);
else
output_asm_insn ("move%.l %1,%0", xoperands);
return "";
}
if (l != 0)
output_asm_insn ("move%.l %1,%-;fsmove%.s %+,%0", xoperands);
else
output_asm_insn ("clr%.l %-;fsmove%.s %+,%0", xoperands);
return "";
}
if (FP_REG_P (operands[0]))
{
if (ADDRESS_REG_P (operands[1]))
return "move%.l %1,%-;f%$smove%.s %+,%0";
if (FP_REG_P (operands[1]))
return "f%$move%.d %1,%0";
if (GET_CODE (operands[1]) == CONST_DOUBLE)
return output_move_const_single (operands);
return "f%$move%.s %f1,%0";
}
if (FP_REG_P (operands[1]))
{
if (ADDRESS_REG_P (operands[0]))
return "fmove%.s %1,%-;move%.l %+,%0";
return "fmove%.s %f1,%0";
}
if (operands[1] == CONST0_RTX (SFmode))
{
if (ADDRESS_REG_P (operands[0]))
return "sub%.l %0,%0";
return "clr%.l %0";
}
return "move%.l %1,%0";
})
 
(define_expand "reload_indf"
[(set (match_operand:DF 0 "nonimmediate_operand" "=f")
(match_operand:DF 1 "general_operand" "mf"))
(clobber (match_operand:SI 2 "register_operand" "=&a"))]
"TARGET_COLDFIRE_FPU"
{
if (emit_move_sequence (operands, DFmode, operands[2]))
DONE;
 
/* We don't want the clobber emitted, so handle this ourselves. */
emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
DONE;
})
 
(define_expand "reload_outdf"
[(set (match_operand:DF 0 "general_operand" "")
(match_operand:DF 1 "register_operand" "f"))
(clobber (match_operand:SI 2 "register_operand" "=&a"))]
"TARGET_COLDFIRE_FPU"
{
if (emit_move_sequence (operands, DFmode, operands[2]))
DONE;
 
/* We don't want the clobber emitted, so handle this ourselves. */
emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
DONE;
})
 
(define_expand "movdf"
[(set (match_operand:DF 0 "nonimmediate_operand" "")
(match_operand:DF 1 "general_operand" ""))]
""
{
if (TARGET_COLDFIRE_FPU)
if (emit_move_sequence (operands, DFmode, 0))
DONE;
})
 
(define_insn ""
[(set (match_operand:DF 0 "nonimmediate_operand" "=rm,rf,rf,&rof<>")
(match_operand:DF 1 "general_operand" "*rf,m,0,*rofE<>"))]
; [(set (match_operand:DF 0 "nonimmediate_operand" "=rm,&rf,&rof<>")
; (match_operand:DF 1 "general_operand" "rf,m,rofF<>"))]
"!TARGET_COLDFIRE"
{
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "f%&move%.x %1,%0";
if (REG_P (operands[1]))
{
rtx xoperands[2];
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
output_asm_insn ("move%.l %1,%-", xoperands);
output_asm_insn ("move%.l %1,%-", operands);
return "f%&move%.d %+,%0";
}
if (GET_CODE (operands[1]) == CONST_DOUBLE)
return output_move_const_double (operands);
return "f%&move%.d %f1,%0";
}
else if (FP_REG_P (operands[1]))
{
if (REG_P (operands[0]))
{
output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
return "move%.l %+,%0";
}
else
return "fmove%.d %f1,%0";
}
return output_move_double (operands);
})
 
(define_insn "movdf_cf_soft"
[(set (match_operand:DF 0 "nonimmediate_operand" "=r,g")
(match_operand:DF 1 "general_operand" "g,r"))]
"TARGET_COLDFIRE && !TARGET_COLDFIRE_FPU"
{
return output_move_double (operands);
})
 
(define_insn "movdf_cf_hard"
[(set (match_operand:DF 0 "nonimmediate_operand" "=f, <Q>U,r,f,r,r,m,f")
(match_operand:DF 1 "general_operand" " f<Q>U,f, f,r,r,m,r,E"))]
"TARGET_COLDFIRE_FPU"
{
rtx xoperands[3];
REAL_VALUE_TYPE r;
long l[2];
 
switch (which_alternative)
{
default:
return "fmove%.d %1,%0";
case 2:
return "fmove%.d %1,%-;move%.l %+,%0;move%.l %+,%R0";
case 3:
return "move%.l %R1,%-;move%.l %1,%-;f%&move%.d %+,%0";
case 4:
return "move%.l %1,%0;move%.l %R1,%R0";
case 5: case 6:
return output_move_double (operands);
case 7:
REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
REAL_VALUE_TO_TARGET_DOUBLE (r, l);
xoperands[0] = operands[0];
xoperands[1] = GEN_INT (l[0]);
xoperands[2] = GEN_INT (l[1]);
if (operands[1] == CONST0_RTX (DFmode))
output_asm_insn ("clr%.l %-;clr%.l %-;fdmove%.d %+,%0",
xoperands);
else
if (l[1] == 0)
output_asm_insn ("clr%.l %-;move%.l %1,%-;fdmove%.d %+,%0",
xoperands);
else
output_asm_insn ("move%.l %2,%-;move%.l %1,%-;fdmove%.d %+,%0",
xoperands);
return "";
}
})
 
;; ??? The XFmode patterns are schizophrenic about whether constants are
;; allowed. Most but not all have predicates and constraint that disallow
;; constants. Most but not all have output templates that handle constants.
;; See also LEGITIMATE_CONSTANT_P.
 
(define_expand "movxf"
[(set (match_operand:XF 0 "nonimmediate_operand" "")
(match_operand:XF 1 "general_operand" ""))]
""
{
/* We can't rewrite operands during reload. */
if (! reload_in_progress)
{
if (CONSTANT_P (operands[1]))
{
operands[1] = force_const_mem (XFmode, operands[1]);
if (! memory_address_p (XFmode, XEXP (operands[1], 0)))
operands[1] = adjust_address (operands[1], XFmode, 0);
}
if (flag_pic && TARGET_PCREL)
{
/* Don't allow writes to memory except via a register; the
m68k doesn't consider PC-relative addresses to be writable. */
if (GET_CODE (operands[0]) == MEM
&& symbolic_operand (XEXP (operands[0], 0), SImode))
operands[0] = gen_rtx_MEM (XFmode,
force_reg (SImode, XEXP (operands[0], 0)));
}
}
})
 
(define_insn ""
[(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,!r,!f,!r,m,!r")
(match_operand:XF 1 "nonimmediate_operand" "m,f,f,f,r,!r,!r,m"))]
"TARGET_68881"
{
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "fmove%.x %1,%0";
if (REG_P (operands[1]))
{
rtx xoperands[2];
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
output_asm_insn ("move%.l %1,%-", xoperands);
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
output_asm_insn ("move%.l %1,%-", xoperands);
output_asm_insn ("move%.l %1,%-", operands);
return "fmove%.x %+,%0";
}
if (GET_CODE (operands[1]) == CONST_DOUBLE)
return "fmove%.x %1,%0";
return "fmove%.x %f1,%0";
}
if (FP_REG_P (operands[1]))
{
if (REG_P (operands[0]))
{
output_asm_insn ("fmove%.x %f1,%-\;move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
output_asm_insn ("move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
return "move%.l %+,%0";
}
/* Must be memory destination. */
return "fmove%.x %f1,%0";
}
return output_move_double (operands);
})
 
(define_insn ""
[(set (match_operand:XF 0 "nonimmediate_operand" "=rm,rf,&rof<>")
(match_operand:XF 1 "nonimmediate_operand" "rf,m,rof<>"))]
"! TARGET_68881 && ! TARGET_COLDFIRE"
{
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "fmove%.x %1,%0";
if (REG_P (operands[1]))
{
rtx xoperands[2];
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
output_asm_insn ("move%.l %1,%-", xoperands);
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
output_asm_insn ("move%.l %1,%-", xoperands);
output_asm_insn ("move%.l %1,%-", operands);
return "fmove%.x %+,%0";
}
if (GET_CODE (operands[1]) == CONST_DOUBLE)
return "fmove%.x %1,%0";
return "fmove%.x %f1,%0";
}
if (FP_REG_P (operands[1]))
{
if (REG_P (operands[0]))
{
output_asm_insn ("fmove%.x %f1,%-\;move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
output_asm_insn ("move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
return "move%.l %+,%0";
}
else
return "fmove%.x %f1,%0";
}
return output_move_double (operands);
})
 
(define_insn ""
[(set (match_operand:XF 0 "nonimmediate_operand" "=r,g")
(match_operand:XF 1 "nonimmediate_operand" "g,r"))]
"! TARGET_68881 && TARGET_COLDFIRE"
"* return output_move_double (operands);")
 
(define_expand "movdi"
;; Let's see if it really still needs to handle fp regs, and, if so, why.
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(match_operand:DI 1 "general_operand" ""))]
""
"")
 
;; movdi can apply to fp regs in some cases
(define_insn ""
;; Let's see if it really still needs to handle fp regs, and, if so, why.
[(set (match_operand:DI 0 "nonimmediate_operand" "=rm,r,&ro<>")
(match_operand:DI 1 "general_operand" "rF,m,roi<>F"))]
; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&r,&ro<>,!&rm,!&f")
; (match_operand:DI 1 "general_operand" "r,m,roi<>,fF"))]
; [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,&rf,&ro<>,!&rm,!&f")
; (match_operand:DI 1 "general_operand" "r,m,roi<>,fF,rfF"))]
"!TARGET_COLDFIRE"
{
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "fmove%.x %1,%0";
if (REG_P (operands[1]))
{
rtx xoperands[2];
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
output_asm_insn ("move%.l %1,%-", xoperands);
output_asm_insn ("move%.l %1,%-", operands);
return "fmove%.d %+,%0";
}
if (GET_CODE (operands[1]) == CONST_DOUBLE)
return output_move_const_double (operands);
return "fmove%.d %f1,%0";
}
else if (FP_REG_P (operands[1]))
{
if (REG_P (operands[0]))
{
output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
return "move%.l %+,%0";
}
else
return "fmove%.d %f1,%0";
}
return output_move_double (operands);
})
 
(define_insn ""
[(set (match_operand:DI 0 "nonimmediate_operand" "=r,g")
(match_operand:DI 1 "general_operand" "g,r"))]
"TARGET_COLDFIRE"
"* return output_move_double (operands);")
 
;; Thus goes after the move instructions
;; because the move instructions are better (require no spilling)
;; when they can apply. It goes before the add/sub insns
;; so we will prefer it to them.
 
(define_insn "pushasi"
[(set (match_operand:SI 0 "push_operand" "=m")
(match_operand:SI 1 "address_operand" "p"))]
""
"pea %a1")
;; truncation instructions
(define_insn "truncsiqi2"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d")
(truncate:QI
(match_operand:SI 1 "general_src_operand" "doJS,i")))]
""
{
if (GET_CODE (operands[0]) == REG)
{
/* Must clear condition codes, since the move.l bases them on
the entire 32 bits, not just the desired 8 bits. */
CC_STATUS_INIT;
return "move%.l %1,%0";
}
if (GET_CODE (operands[1]) == MEM)
operands[1] = adjust_address (operands[1], QImode, 3);
return "move%.b %1,%0";
})
 
(define_insn "trunchiqi2"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm,d")
(truncate:QI
(match_operand:HI 1 "general_src_operand" "doJS,i")))]
""
{
if (GET_CODE (operands[0]) == REG
&& (GET_CODE (operands[1]) == MEM
|| GET_CODE (operands[1]) == CONST_INT))
{
/* Must clear condition codes, since the move.w bases them on
the entire 16 bits, not just the desired 8 bits. */
CC_STATUS_INIT;
return "move%.w %1,%0";
}
if (GET_CODE (operands[0]) == REG)
{
/* Must clear condition codes, since the move.l bases them on
the entire 32 bits, not just the desired 8 bits. */
CC_STATUS_INIT;
return "move%.l %1,%0";
}
if (GET_CODE (operands[1]) == MEM)
operands[1] = adjust_address (operands[1], QImode, 1);
return "move%.b %1,%0";
})
 
(define_insn "truncsihi2"
[(set (match_operand:HI 0 "nonimmediate_operand" "=dm,d")
(truncate:HI
(match_operand:SI 1 "general_src_operand" "roJS,i")))]
""
{
if (GET_CODE (operands[0]) == REG)
{
/* Must clear condition codes, since the move.l bases them on
the entire 32 bits, not just the desired 8 bits. */
CC_STATUS_INIT;
return "move%.l %1,%0";
}
if (GET_CODE (operands[1]) == MEM)
operands[1] = adjust_address (operands[1], QImode, 2);
return "move%.w %1,%0";
})
;; zero extension instructions
 
;; two special patterns to match various post_inc/pre_dec patterns
(define_insn_and_split "*zero_extend_inc"
[(set (match_operand 0 "post_inc_operand" "")
(zero_extend (match_operand 1 "register_operand" "")))]
"GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT &&
GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT &&
GET_MODE_SIZE (GET_MODE (operands[0])) == GET_MODE_SIZE (GET_MODE (operands[1])) * 2"
"#"
""
[(set (match_dup 0)
(const_int 0))
(set (match_dup 0)
(match_dup 1))]
{
operands[0] = adjust_address (operands[0], GET_MODE (operands[1]), 0);
})
 
(define_insn_and_split "*zero_extend_dec"
[(set (match_operand 0 "pre_dec_operand" "")
(zero_extend (match_operand 1 "register_operand" "")))]
"(GET_MODE (operands[0]) != HImode || XEXP (XEXP (operands[0], 0), 0) != stack_pointer_rtx) &&
GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT &&
GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT &&
GET_MODE_SIZE (GET_MODE (operands[0])) == GET_MODE_SIZE (GET_MODE (operands[1])) * 2"
"#"
""
[(set (match_dup 0)
(match_dup 1))
(set (match_dup 0)
(const_int 0))]
{
operands[0] = adjust_address (operands[0], GET_MODE (operands[1]), 0);
})
 
(define_insn_and_split "zero_extendqidi2"
[(set (match_operand:DI 0 "register_operand" "")
(zero_extend:DI (match_operand:QI 1 "nonimmediate_src_operand" "")))]
""
"#"
""
[(set (match_dup 2)
(zero_extend:SI (match_dup 1)))
(set (match_dup 3)
(const_int 0))]
{
operands[2] = gen_lowpart (SImode, operands[0]);
operands[3] = gen_highpart (SImode, operands[0]);
})
 
(define_insn_and_split "zero_extendhidi2"
[(set (match_operand:DI 0 "register_operand" "")
(zero_extend:DI (match_operand:HI 1 "nonimmediate_src_operand" "")))]
""
"#"
""
[(set (match_dup 2)
(zero_extend:SI (match_dup 1)))
(set (match_dup 3)
(const_int 0))]
{
operands[2] = gen_lowpart (SImode, operands[0]);
operands[3] = gen_highpart (SImode, operands[0]);
})
 
(define_expand "zero_extendsidi2"
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "")))]
""
{
if (GET_CODE (operands[0]) == MEM
&& GET_CODE (operands[1]) == MEM)
operands[1] = force_reg (SImode, operands[1]);
})
 
(define_insn_and_split "*zero_extendsidi2"
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_src_operand" "")))]
"GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
"#"
""
[(set (match_dup 2)
(match_dup 1))
(set (match_dup 3)
(const_int 0))]
{
operands[2] = gen_lowpart (SImode, operands[0]);
operands[3] = gen_highpart (SImode, operands[0]);
})
 
(define_insn "*zero_extendhisi2_cf"
[(set (match_operand:SI 0 "register_operand" "=d")
(zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))]
"TARGET_CFV4"
"mvz%.w %1,%0")
 
(define_insn "zero_extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=d")
(zero_extend:SI (match_operand:HI 1 "nonimmediate_src_operand" "rmS")))]
""
"#")
 
(define_expand "zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand" "")
(zero_extend:HI (match_operand:QI 1 "nonimmediate_src_operand" "")))]
"!TARGET_COLDFIRE"
"")
 
(define_insn "*zero_extendqihi2"
[(set (match_operand:HI 0 "register_operand" "=d")
(zero_extend:HI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))]
"!TARGET_COLDFIRE"
"#")
 
(define_insn "*zero_extendqisi2_cfv4"
[(set (match_operand:SI 0 "register_operand" "=d")
(zero_extend:SI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))]
"TARGET_CFV4"
"mvz%.b %1,%0")
 
(define_insn "zero_extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=d")
(zero_extend:SI (match_operand:QI 1 "nonimmediate_src_operand" "dmS")))]
""
"#")
 
;; these two pattern split everything else which isn't matched by
;; something else above
(define_split
[(set (match_operand 0 "register_operand" "")
(zero_extend (match_operand 1 "nonimmediate_src_operand" "")))]
"!TARGET_CFV4 && reload_completed && reg_mentioned_p (operands[0], operands[1])"
[(set (strict_low_part (match_dup 2))
(match_dup 1))
(set (match_dup 0)
(match_op_dup 4 [(match_dup 0) (match_dup 3)]))]
{
operands[2] = gen_lowpart (GET_MODE (operands[1]), operands[0]);
operands[3] = GEN_INT (GET_MODE_MASK (GET_MODE (operands[1])));
operands[4] = gen_rtx_AND (GET_MODE (operands[0]), operands[0], operands[3]);
})
 
(define_split
[(set (match_operand 0 "register_operand" "")
(zero_extend (match_operand 1 "nonimmediate_src_operand" "")))]
"!TARGET_CFV4 && reload_completed"
[(set (match_dup 0)
(const_int 0))
(set (strict_low_part (match_dup 2))
(match_dup 1))]
{
operands[2] = gen_lowpart (GET_MODE (operands[1]), operands[0]);
})
;; sign extension instructions
 
(define_insn "extendqidi2"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(sign_extend:DI (match_operand:QI 1 "general_src_operand" "rmS")))]
""
{
CC_STATUS_INIT;
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (TARGET_CFV4)
return "mvs%.b %1,%2\;smi %0\;extb%.l %0";
if (TARGET_68020 || TARGET_COLDFIRE)
{
if (ADDRESS_REG_P (operands[1]))
return "move%.w %1,%2\;extb%.l %2\;smi %0\;extb%.l %0";
else
return "move%.b %1,%2\;extb%.l %2\;smi %0\;extb%.l %0";
}
else
{
if (ADDRESS_REG_P (operands[1]))
return "move%.w %1,%2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0\;smi %0";
else
return "move%.b %1,%2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0\;smi %0";
}
})
 
(define_insn "extendhidi2"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(sign_extend:DI
(match_operand:HI 1 "general_src_operand" "rmS")))]
""
{
CC_STATUS_INIT;
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (TARGET_CFV4)
return "mvs%.w %1,%2\;smi %0\;extb%.l %0";
if (TARGET_68020 || TARGET_COLDFIRE)
return "move%.w %1,%2\;ext%.l %2\;smi %0\;extb%.l %0";
else
return "move%.w %1,%2\;ext%.l %2\;smi %0\;ext%.w %0\;ext%.l %0";
})
 
(define_insn "extendsidi2"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(sign_extend:DI
(match_operand:SI 1 "general_operand" "rm")))]
""
{
CC_STATUS_INIT;
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (TARGET_68020 || TARGET_COLDFIRE)
return "move%.l %1,%2\;smi %0\;extb%.l %0";
else
return "move%.l %1,%2\;smi %0\;ext%.w %0\;ext%.l %0";
})
 
;; Special case when one can avoid register clobbering, copy and test
;; Maybe there is a way to make that the general case, by forcing the
;; result of the SI tree to be in the lower register of the DI target
 
(define_insn "extendplussidi"
[(set (match_operand:DI 0 "register_operand" "=d")
(sign_extend:DI (plus:SI (match_operand:SI 1 "general_operand" "%rmn")
(match_operand:SI 2 "general_operand" "rmn"))))]
""
{
CC_STATUS_INIT;
operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (GET_CODE (operands[1]) == CONST_INT
&& (unsigned) INTVAL (operands[1]) > 8)
{
rtx tmp = operands[1];
 
operands[1] = operands[2];
operands[2] = tmp;
}
if (GET_CODE (operands[1]) == REG
&& REGNO (operands[1]) == REGNO (operands[3]))
output_asm_insn ("add%.l %2,%3", operands);
else
output_asm_insn ("move%.l %2,%3\;add%.l %1,%3", operands);
if (TARGET_68020 || TARGET_COLDFIRE)
return "smi %0\;extb%.l %0";
else
return "smi %0\;ext%.w %0\;ext%.l %0";
})
 
(define_expand "extendhisi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(sign_extend:SI
(match_operand:HI 1 "nonimmediate_src_operand" "")))]
""
"")
 
(define_insn "*cfv4_extendhisi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(sign_extend:SI
(match_operand:HI 1 "nonimmediate_src_operand" "rmS")))]
"TARGET_CFV4"
"mvs%.w %1,%0")
 
(define_insn "*68k_extendhisi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "=*d,a")
(sign_extend:SI
(match_operand:HI 1 "nonimmediate_src_operand" "0,rmS")))]
"!TARGET_CFV4"
{
if (ADDRESS_REG_P (operands[0]))
return "move%.w %1,%0";
return "ext%.l %0";
})
 
(define_insn "extendqihi2"
[(set (match_operand:HI 0 "nonimmediate_operand" "=d")
(sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0")))]
""
"ext%.w %0")
 
(define_expand "extendqisi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
"TARGET_68020 || TARGET_COLDFIRE"
"")
 
(define_insn "*cfv4_extendqisi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "rms")))]
"TARGET_CFV4"
"mvs%.b %1,%0")
 
(define_insn "*68k_extendqisi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0")))]
"TARGET_68020 || (TARGET_COLDFIRE && !TARGET_CFV4)"
"extb%.l %0")
;; Conversions between float and double.
 
(define_expand "extendsfdf2"
[(set (match_operand:DF 0 "nonimmediate_operand" "")
(float_extend:DF
(match_operand:SF 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn ""
[(set (match_operand:DF 0 "nonimmediate_operand" "=*fdm,f")
(float_extend:DF
(match_operand:SF 1 "general_operand" "f,dmF")))]
"TARGET_68881"
{
if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
{
if (REGNO (operands[0]) == REGNO (operands[1]))
{
/* Extending float to double in an fp-reg is a no-op.
NOTICE_UPDATE_CC has already assumed that the
cc will be set. So cancel what it did. */
cc_status = cc_prev_status;
return "";
}
return "f%&move%.x %1,%0";
}
if (FP_REG_P (operands[0]))
return "f%&move%.s %f1,%0";
if (DATA_REG_P (operands[0]) && FP_REG_P (operands[1]))
{
output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
return "move%.l %+,%0";
}
return "fmove%.d %f1,%0";
})
 
(define_insn "extendsfdf2_cf"
[(set (match_operand:DF 0 "nonimmediate_operand" "=f,f")
(float_extend:DF
(match_operand:SF 1 "general_operand" "f,<Q>U")))]
"TARGET_COLDFIRE_FPU"
{
if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
{
if (REGNO (operands[0]) == REGNO (operands[1]))
{
/* Extending float to double in an fp-reg is a no-op.
NOTICE_UPDATE_CC has already assumed that the
cc will be set. So cancel what it did. */
cc_status = cc_prev_status;
return "";
}
return "f%&move%.d %1,%0";
}
return "f%&move%.s %f1,%0";
})
 
;; This cannot output into an f-reg because there is no way to be
;; sure of truncating in that case.
(define_expand "truncdfsf2"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
(float_truncate:SF
(match_operand:DF 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
;; On the '040 we can truncate in a register accurately and easily.
(define_insn ""
[(set (match_operand:SF 0 "nonimmediate_operand" "=f")
(float_truncate:SF
(match_operand:DF 1 "general_operand" "fmG")))]
"TARGET_68881 && TARGET_68040_ONLY"
{
if (FP_REG_P (operands[1]))
return "f%$move%.x %1,%0";
return "f%$move%.d %f1,%0";
})
 
(define_insn "truncdfsf2_cf"
[(set (match_operand:SF 0 "nonimmediate_operand" "=f,d<Q>U")
(float_truncate:SF
(match_operand:DF 1 "general_operand" "<Q>U,f")))]
"TARGET_COLDFIRE_FPU"
"@
f%$move%.d %1,%0
fmove%.s %1,%0")
 
(define_insn ""
[(set (match_operand:SF 0 "nonimmediate_operand" "=dm")
(float_truncate:SF
(match_operand:DF 1 "general_operand" "f")))]
"TARGET_68881"
"fmove%.s %f1,%0")
;; Conversion between fixed point and floating point.
;; Note that among the fix-to-float insns
;; the ones that start with SImode come first.
;; That is so that an operand that is a CONST_INT
;; (and therefore lacks a specific machine mode).
;; will be recognized as SImode (which is always valid)
;; rather than as QImode or HImode.
 
(define_expand "floatsi<mode>2"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(float:FP (match_operand:SI 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "floatsi<mode>2_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(float:FP (match_operand:SI 1 "general_operand" "dmi")))]
"TARGET_68881"
"f<FP:round>move%.l %1,%0")
 
(define_insn "floatsi<mode>2_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(float:FP (match_operand:SI 1 "general_operand" "d<Q>U")))]
"TARGET_COLDFIRE_FPU"
"f<FP:prec>move%.l %1,%0")
 
 
(define_expand "floathi<mode>2"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(float:FP (match_operand:HI 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "floathi<mode>2_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(float:FP (match_operand:HI 1 "general_operand" "dmn")))]
"TARGET_68881"
"fmove%.w %1,%0")
 
(define_insn "floathi<mode>2_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(float:FP (match_operand:HI 1 "general_operand" "d<Q>U")))]
"TARGET_COLDFIRE_FPU"
"fmove%.w %1,%0")
 
 
(define_expand "floatqi<mode>2"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(float:FP (match_operand:QI 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "floatqi<mode>2_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(float:FP (match_operand:QI 1 "general_operand" "dmn")))]
"TARGET_68881"
"fmove%.b %1,%0")
 
(define_insn "floatqi<mode>2_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(float:FP (match_operand:QI 1 "general_operand" "d<Q>U")))]
"TARGET_COLDFIRE_FPU"
"fmove%.b %1,%0")
 
 
;; New routines to convert floating-point values to integers
;; to be used on the '040. These should be faster than trapping
;; into the kernel to emulate fintrz. They should also be faster
;; than calling the subroutines fixsfsi or fixdfsi.
 
(define_insn "fix_truncdfsi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
(fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
(clobber (match_scratch:SI 2 "=d"))
(clobber (match_scratch:SI 3 "=d"))]
"TARGET_68881 && TARGET_68040"
{
CC_STATUS_INIT;
return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.l %1,%0\;fmovem%.l %2,%!";
})
 
(define_insn "fix_truncdfhi2"
[(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
(fix:HI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
(clobber (match_scratch:SI 2 "=d"))
(clobber (match_scratch:SI 3 "=d"))]
"TARGET_68881 && TARGET_68040"
{
CC_STATUS_INIT;
return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.w %1,%0\;fmovem%.l %2,%!";
})
 
(define_insn "fix_truncdfqi2"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
(fix:QI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
(clobber (match_scratch:SI 2 "=d"))
(clobber (match_scratch:SI 3 "=d"))]
"TARGET_68881 && TARGET_68040"
{
CC_STATUS_INIT;
return "fmovem%.l %!,%2\;moveq #16,%3\;or%.l %2,%3\;and%.w #-33,%3\;fmovem%.l %3,%!\;fmove%.b %1,%0\;fmovem%.l %2,%!";
})
 
;; Convert a float to a float whose value is an integer.
;; This is the first stage of converting it to an integer type.
 
(define_expand "ftrunc<mode>2"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(fix:FP (match_operand:FP 1 "general_operand" "")))]
"TARGET_HARD_FLOAT && !TARGET_68040"
"")
 
(define_insn "ftrunc<mode>2_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(fix:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m")))]
"TARGET_68881 && !TARGET_68040"
{
if (FP_REG_P (operands[1]))
return "fintrz%.x %f1,%0";
return "fintrz%.<FP:prec> %f1,%0";
})
 
(define_insn "ftrunc<mode>2_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(fix:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U")))]
"TARGET_COLDFIRE_FPU"
{
if (FP_REG_P (operands[1]))
return "fintrz%.d %f1,%0";
return "fintrz%.<FP:prec> %f1,%0";
})
 
;; Convert a float whose value is an integer
;; to an actual integer. Second stage of converting float to integer type.
(define_expand "fix<mode>qi2"
[(set (match_operand:QI 0 "nonimmediate_operand" "")
(fix:QI (match_operand:FP 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "fix<mode>qi2_68881"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
(fix:QI (match_operand:FP 1 "general_operand" "f")))]
"TARGET_68881"
"fmove%.b %1,%0")
 
(define_insn "fix<mode>qi2_cf"
[(set (match_operand:QI 0 "nonimmediate_operand" "=d<Q>U")
(fix:QI (match_operand:FP 1 "general_operand" "f")))]
"TARGET_COLDFIRE_FPU"
"fmove%.b %1,%0")
 
(define_expand "fix<mode>hi2"
[(set (match_operand:HI 0 "nonimmediate_operand" "")
(fix:HI (match_operand:FP 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "fix<mode>hi2_68881"
[(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
(fix:HI (match_operand:FP 1 "general_operand" "f")))]
"TARGET_68881"
"fmove%.w %1,%0")
 
(define_insn "fix<mode>hi2_cf"
[(set (match_operand:HI 0 "nonimmediate_operand" "=d<Q>U")
(fix:HI (match_operand:FP 1 "general_operand" "f")))]
"TARGET_COLDFIRE_FPU"
"fmove%.w %1,%0")
 
(define_expand "fix<mode>si2"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(fix:SI (match_operand:FP 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "fix<mode>si2_68881"
[(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
(fix:SI (match_operand:FP 1 "general_operand" "f")))]
"TARGET_68881"
"fmove%.l %1,%0")
 
(define_insn "fix<mode>si2_cf"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d<Q>U")
(fix:SI (match_operand:FP 1 "general_operand" "f")))]
"TARGET_COLDFIRE_FPU"
"fmove%.l %1,%0")
 
;; add instructions
 
(define_insn "adddi_lshrdi_63"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(plus:DI (lshiftrt:DI (match_operand:DI 1 "general_operand" "rm")
(const_int 63))
(match_dup 1)))
(clobber (match_scratch:SI 2 "=d"))]
""
{
operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (REG_P (operands[1]) && REGNO (operands[1]) == REGNO (operands[0]))
return
"move%.l %1,%2\;add%.l %2,%2\;subx%.l %2,%2\;sub%.l %2,%3\;subx%.l %2,%0";
if (GET_CODE (operands[1]) == REG)
operands[4] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
|| GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
operands[4] = operands[1];
else
operands[4] = adjust_address (operands[1], SImode, 4);
if (GET_CODE (operands[1]) == MEM
&& GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
output_asm_insn ("move%.l %4,%3", operands);
output_asm_insn ("move%.l %1,%0\;smi %2", operands);
if (TARGET_68020 || TARGET_COLDFIRE)
output_asm_insn ("extb%.l %2", operands);
else
output_asm_insn ("ext%.w %2\;ext%.l %2", operands);
if (GET_CODE (operands[1]) != MEM
|| GET_CODE (XEXP (operands[1], 0)) != PRE_DEC)
output_asm_insn ("move%.l %4,%3", operands);
return "sub%.l %2,%3\;subx%.l %2,%0";
})
 
(define_insn "adddi_sexthishl32"
[(set (match_operand:DI 0 "nonimmediate_operand" "=o,a,*d,*d")
(plus:DI (ashift:DI (sign_extend:DI
(match_operand:HI 1 "general_operand" "rm,rm,rm,rm"))
(const_int 32))
(match_operand:DI 2 "general_operand" "0,0,0,0")))
(clobber (match_scratch:SI 3 "=&d,X,a,?d"))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
if (ADDRESS_REG_P (operands[0]))
return "add%.w %1,%0";
else if (ADDRESS_REG_P (operands[3]))
return "move%.w %1,%3\;add%.l %3,%0";
else
return "move%.w %1,%3\;ext%.l %3\;add%.l %3,%0";
})
 
(define_insn "adddi_dilshr32"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d,o")
;; (plus:DI (match_operand:DI 2 "general_operand" "%0")
;; (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro")
;; (const_int 32))))]
(plus:DI (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro,d")
(const_int 32))
(match_operand:DI 2 "general_operand" "0,0")))]
""
{
CC_STATUS_INIT;
if (GET_CODE (operands[0]) == REG)
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[2] = adjust_address (operands[0], SImode, 4);
return "add%.l %1,%2\;negx%.l %0\;neg%.l %0";
})
 
(define_insn "adddi_dishl32"
[(set (match_operand:DI 0 "nonimmediate_operand" "=r,o")
;; (plus:DI (match_operand:DI 2 "general_operand" "%0")
;; (ashift:DI (match_operand:DI 1 "general_operand" "ro")
;; (const_int 32))))]
(plus:DI (ashift:DI (match_operand:DI 1 "general_operand" "ro,d")
(const_int 32))
(match_operand:DI 2 "general_operand" "0,0")))]
""
{
CC_STATUS_INIT;
if (GET_CODE (operands[1]) == REG)
operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
else
operands[1] = adjust_address (operands[1], SImode, 4);
return "add%.l %1,%0";
})
 
(define_insn "adddi3"
[(set (match_operand:DI 0 "nonimmediate_operand" "=<,o<>,d,d,d")
(plus:DI (match_operand:DI 1 "general_operand" "%0,0,0,0,0")
(match_operand:DI 2 "general_operand" "<,d,no>,d,a")))
(clobber (match_scratch:SI 3 "=X,&d,&d,X,&d"))]
""
{
if (DATA_REG_P (operands[0]))
{
if (DATA_REG_P (operands[2]))
return "add%.l %R2,%R0\;addx%.l %2,%0";
else if (GET_CODE (operands[2]) == MEM
&& GET_CODE (XEXP (operands[2], 0)) == POST_INC)
return "move%.l %2,%3\;add%.l %2,%R0\;addx%.l %3,%0";
else
{
rtx high, low;
rtx xoperands[2];
 
if (GET_CODE (operands[2]) == REG)
{
low = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
high = operands[2];
}
else if (CONSTANT_P (operands[2]))
split_double (operands[2], &high, &low);
else
{
low = adjust_address (operands[2], SImode, 4);
high = operands[2];
}
 
operands[1] = low, operands[2] = high;
xoperands[0] = operands[3];
if (GET_CODE (operands[1]) == CONST_INT
&& INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
xoperands[1] = GEN_INT (-INTVAL (operands[2]) - 1);
else
xoperands[1] = operands[2];
 
output_asm_insn (output_move_simode (xoperands), xoperands);
if (GET_CODE (operands[1]) == CONST_INT)
{
if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8)
return "addq%.l %1,%R0\;addx%.l %3,%0";
else if (INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
{
operands[1] = GEN_INT (-INTVAL (operands[1]));
return "subq%.l %1,%R0\;subx%.l %3,%0";
}
}
return "add%.l %1,%R0\;addx%.l %3,%0";
}
}
else
{
gcc_assert (GET_CODE (operands[0]) == MEM);
if (GET_CODE (operands[2]) == MEM
&& GET_CODE (XEXP (operands[2], 0)) == PRE_DEC)
return "add%.l %2,%0\;addx%.l %2,%0";
CC_STATUS_INIT;
if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
{
operands[1] = gen_rtx_MEM (SImode,
plus_constant (XEXP(operands[0], 0), -8));
return "move%.l %0,%3\;add%.l %R2,%0\;addx%.l %2,%3\;move%.l %3,%1";
}
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
{
operands[1] = XEXP(operands[0], 0);
return "add%.l %R2,%0\;move%.l %0,%3\;addx%.l %2,%3\;move%.l %3,%1";
}
else
{
operands[1] = adjust_address (operands[0], SImode, 4);
return "add%.l %R2,%1\;move%.l %0,%3\;addx%.l %2,%3\;move%.l %3,%0";
}
}
})
 
(define_insn "addsi_lshrsi_31"
[(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
(plus:SI (lshiftrt:SI (match_operand:SI 1 "general_operand" "rm")
(const_int 31))
(match_dup 1)))]
""
{
operands[2] = operands[0];
operands[3] = gen_label_rtx();
if (GET_CODE (operands[0]) == MEM)
{
if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
operands[0] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0));
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
operands[2] = gen_rtx_MEM (SImode, XEXP (XEXP (operands[0], 0), 0));
}
output_asm_insn ("move%.l %1,%0", operands);
output_asm_insn (MOTOROLA ? "jbpl %l3" : "jpl %l3", operands);
output_asm_insn ("addq%.l #1,%2", operands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (operands[3]));
return "";
})
 
(define_expand "addsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(plus:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_src_operand" "")))]
""
"")
 
;; Note that the middle two alternatives are near-duplicates
;; in order to handle insns generated by reload.
;; This is needed since they are not themselves reloaded,
;; so commutativity won't apply to them.
(define_insn "*addsi3_internal"
[(set (match_operand:SI 0 "nonimmediate_operand" "=m,?a,?a,d,a")
(plus:SI (match_operand:SI 1 "general_operand" "%0,a,rJK,0,0")
(match_operand:SI 2 "general_src_operand" "dIKLT,rJK,a,mSrIKLT,mSrIKLs")))]
 
 
"! TARGET_COLDFIRE"
"* return output_addsi3 (operands);")
 
(define_insn "*addsi3_5200"
[(set (match_operand:SI 0 "nonimmediate_operand" "=m,?a,?a,r")
(plus:SI (match_operand:SI 1 "general_operand" "%0,a,rJK,0")
(match_operand:SI 2 "general_src_operand" "dIL,rJK,a,mrIKLi")))]
"TARGET_COLDFIRE"
"* return output_addsi3 (operands);")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=a")
(plus:SI (match_operand:SI 1 "general_operand" "0")
(sign_extend:SI
(match_operand:HI 2 "nonimmediate_src_operand" "rmS"))))]
"!TARGET_COLDFIRE"
"add%.w %2,%0")
 
(define_insn "addhi3"
[(set (match_operand:HI 0 "nonimmediate_operand" "=m,r")
(plus:HI (match_operand:HI 1 "general_operand" "%0,0")
(match_operand:HI 2 "general_src_operand" "dn,rmSn")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[2]) == CONST_INT)
{
/* If the constant would be a negative number when interpreted as
HImode, make it negative. This is usually, but not always, done
elsewhere in the compiler. First check for constants out of range,
which could confuse us. */
 
if (INTVAL (operands[2]) >= 32768)
operands[2] = GEN_INT (INTVAL (operands[2]) - 65536);
 
if (INTVAL (operands[2]) > 0
&& INTVAL (operands[2]) <= 8)
return "addq%.w %2,%0";
if (INTVAL (operands[2]) < 0
&& INTVAL (operands[2]) >= -8)
{
operands[2] = GEN_INT (- INTVAL (operands[2]));
return "subq%.w %2,%0";
}
/* On the CPU32 it is faster to use two addqw instructions to
add a small integer (8 < N <= 16) to a register.
Likewise for subqw. */
if (TARGET_CPU32 && REG_P (operands[0]))
{
if (INTVAL (operands[2]) > 8
&& INTVAL (operands[2]) <= 16)
{
operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
return "addq%.w #8,%0\;addq%.w %2,%0";
}
if (INTVAL (operands[2]) < -8
&& INTVAL (operands[2]) >= -16)
{
operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
return "subq%.w #8,%0\;subq%.w %2,%0";
}
}
if (ADDRESS_REG_P (operands[0]) && !TARGET_68040)
return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
}
return "add%.w %2,%0";
})
 
;; These insns must use MATCH_DUP instead of the more expected
;; use of a matching constraint because the "output" here is also
;; an input, so you can't use the matching constraint. That also means
;; that you can't use the "%", so you need patterns with the matched
;; operand in both positions.
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
(plus:HI (match_dup 0)
(match_operand:HI 1 "general_src_operand" "dn,rmSn")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[1]) == CONST_INT)
{
/* If the constant would be a negative number when interpreted as
HImode, make it negative. This is usually, but not always, done
elsewhere in the compiler. First check for constants out of range,
which could confuse us. */
 
if (INTVAL (operands[1]) >= 32768)
operands[1] = GEN_INT (INTVAL (operands[1]) - 65536);
 
if (INTVAL (operands[1]) > 0
&& INTVAL (operands[1]) <= 8)
return "addq%.w %1,%0";
if (INTVAL (operands[1]) < 0
&& INTVAL (operands[1]) >= -8)
{
operands[1] = GEN_INT (- INTVAL (operands[1]));
return "subq%.w %1,%0";
}
/* On the CPU32 it is faster to use two addqw instructions to
add a small integer (8 < N <= 16) to a register.
Likewise for subqw. */
if (TARGET_CPU32 && REG_P (operands[0]))
{
if (INTVAL (operands[1]) > 8
&& INTVAL (operands[1]) <= 16)
{
operands[1] = GEN_INT (INTVAL (operands[1]) - 8);
return "addq%.w #8,%0\;addq%.w %1,%0";
}
if (INTVAL (operands[1]) < -8
&& INTVAL (operands[1]) >= -16)
{
operands[1] = GEN_INT (- INTVAL (operands[1]) - 8);
return "subq%.w #8,%0\;subq%.w %1,%0";
}
}
if (ADDRESS_REG_P (operands[0]) && !TARGET_68040)
return MOTOROLA ? "lea (%c1,%0),%0" : "lea %0@(%c1),%0";
}
return "add%.w %1,%0";
})
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
(plus:HI (match_operand:HI 1 "general_src_operand" "dn,rmSn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[1]) == CONST_INT)
{
/* If the constant would be a negative number when interpreted as
HImode, make it negative. This is usually, but not always, done
elsewhere in the compiler. First check for constants out of range,
which could confuse us. */
 
if (INTVAL (operands[1]) >= 32768)
operands[1] = GEN_INT (INTVAL (operands[1]) - 65536);
 
if (INTVAL (operands[1]) > 0
&& INTVAL (operands[1]) <= 8)
return "addq%.w %1,%0";
if (INTVAL (operands[1]) < 0
&& INTVAL (operands[1]) >= -8)
{
operands[1] = GEN_INT (- INTVAL (operands[1]));
return "subq%.w %1,%0";
}
/* On the CPU32 it is faster to use two addqw instructions to
add a small integer (8 < N <= 16) to a register.
Likewise for subqw. */
if (TARGET_CPU32 && REG_P (operands[0]))
{
if (INTVAL (operands[1]) > 8
&& INTVAL (operands[1]) <= 16)
{
operands[1] = GEN_INT (INTVAL (operands[1]) - 8);
return "addq%.w #8,%0\;addq%.w %1,%0";
}
if (INTVAL (operands[1]) < -8
&& INTVAL (operands[1]) >= -16)
{
operands[1] = GEN_INT (- INTVAL (operands[1]) - 8);
return "subq%.w #8,%0\;subq%.w %1,%0";
}
}
if (ADDRESS_REG_P (operands[0]) && !TARGET_68040)
return MOTOROLA ? "lea (%c1,%0),%0" : "lea %0@(%c1),%0";
}
return "add%.w %1,%0";
})
 
(define_insn "addqi3"
[(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
(plus:QI (match_operand:QI 1 "general_operand" "%0,0")
(match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) >= 128)
operands[2] = GEN_INT (INTVAL (operands[2]) - 256);
 
if (INTVAL (operands[2]) > 0
&& INTVAL (operands[2]) <= 8)
return "addq%.b %2,%0";
if (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) >= -8)
{
operands[2] = GEN_INT (- INTVAL (operands[2]));
return "subq%.b %2,%0";
}
}
return "add%.b %2,%0";
})
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
(plus:QI (match_dup 0)
(match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[1]) == CONST_INT)
{
if (INTVAL (operands[1]) >= 128)
operands[1] = GEN_INT (INTVAL (operands[1]) - 256);
 
if (INTVAL (operands[1]) > 0
&& INTVAL (operands[1]) <= 8)
return "addq%.b %1,%0";
if (INTVAL (operands[1]) < 0 && INTVAL (operands[1]) >= -8)
{
operands[1] = GEN_INT (- INTVAL (operands[1]));
return "subq%.b %1,%0";
}
}
return "add%.b %1,%0";
})
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
(plus:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[1]) == CONST_INT)
{
if (INTVAL (operands[1]) >= 128)
operands[1] = GEN_INT (INTVAL (operands[1]) - 256);
 
if (INTVAL (operands[1]) > 0
&& INTVAL (operands[1]) <= 8)
return "addq%.b %1,%0";
if (INTVAL (operands[1]) < 0 && INTVAL (operands[1]) >= -8)
{
operands[1] = GEN_INT (- INTVAL (operands[1]));
return "subq%.b %1,%0";
}
}
return "add%.b %1,%0";
})
 
(define_expand "add<mode>3"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(plus:FP (match_operand:FP 1 "general_operand" "")
(match_operand:FP 2 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "add<mode>3_floatsi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(plus:FP (float:FP (match_operand:SI 2 "general_operand" "dmi"))
(match_operand:FP 1 "general_operand" "0")))]
"TARGET_68881"
"f<FP:round>add%.l %2,%0")
 
(define_insn "add<mode>3_floathi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(plus:FP (float:FP (match_operand:HI 2 "general_operand" "dmn"))
(match_operand:FP 1 "general_operand" "0")))]
"TARGET_68881"
"f<FP:round>add%.w %2,%0")
 
(define_insn "add<mode>3_floatqi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(plus:FP (float:FP (match_operand:QI 2 "general_operand" "dmn"))
(match_operand:FP 1 "general_operand" "0")))]
"TARGET_68881"
"f<FP:round>add%.b %2,%0")
 
(define_insn "add<mode>3_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(plus:FP (match_operand:FP 1 "general_operand" "%0")
(match_operand:FP 2 "general_operand" "f<FP:dreg>m<FP:const>")))]
"TARGET_68881"
{
if (FP_REG_P (operands[2]))
return "f<FP:round>add%.x %2,%0";
return "f<FP:round>add%.<FP:prec> %f2,%0";
})
 
(define_insn "add<mode>3_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(plus:FP (match_operand:FP 1 "general_operand" "%0")
(match_operand:FP 2 "general_operand" "f<FP:dreg><Q>U")))]
"TARGET_COLDFIRE_FPU"
{
if (FP_REG_P (operands[2]))
return "f<FP:round>add%.d %2,%0";
return "f<FP:round>add%.<FP:prec> %2,%0";
})
;; subtract instructions
 
(define_insn "subdi_sexthishl32"
[(set (match_operand:DI 0 "nonimmediate_operand" "=o,a,*d,*d")
(minus:DI (match_operand:DI 1 "general_operand" "0,0,0,0")
(ashift:DI (sign_extend:DI (match_operand:HI 2 "general_operand" "rm,rm,rm,rm"))
(const_int 32))))
(clobber (match_scratch:SI 3 "=&d,X,a,?d"))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
if (ADDRESS_REG_P (operands[0]))
return "sub%.w %2,%0";
else if (ADDRESS_REG_P (operands[3]))
return "move%.w %2,%3\;sub%.l %3,%0";
else
return "move%.w %2,%3\;ext%.l %3\;sub%.l %3,%0";
})
 
(define_insn "subdi_dishl32"
[(set (match_operand:DI 0 "nonimmediate_operand" "+ro")
(minus:DI (match_dup 0)
(ashift:DI (match_operand:DI 1 "general_operand" "ro")
(const_int 32))))]
""
{
CC_STATUS_INIT;
if (GET_CODE (operands[1]) == REG)
operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
else
operands[1] = adjust_address (operands[1], SImode, 4);
return "sub%.l %1,%0";
})
 
(define_insn "subdi3"
[(set (match_operand:DI 0 "nonimmediate_operand" "=<,o<>,d,d,d")
(minus:DI (match_operand:DI 1 "general_operand" "0,0,0,0,0")
(match_operand:DI 2 "general_operand" "<,d,no>,d,a")))
(clobber (match_scratch:SI 3 "=X,&d,&d,X,&d"))]
""
{
if (DATA_REG_P (operands[0]))
{
if (DATA_REG_P (operands[2]))
return "sub%.l %R2,%R0\;subx%.l %2,%0";
else if (GET_CODE (operands[2]) == MEM
&& GET_CODE (XEXP (operands[2], 0)) == POST_INC)
{
return "move%.l %2,%3\;sub%.l %2,%R0\;subx%.l %3,%0";
}
else
{
rtx high, low;
rtx xoperands[2];
 
if (GET_CODE (operands[2]) == REG)
{
low = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
high = operands[2];
}
else if (CONSTANT_P (operands[2]))
split_double (operands[2], &high, &low);
else
{
low = adjust_address (operands[2], SImode, 4);
high = operands[2];
}
 
operands[1] = low, operands[2] = high;
xoperands[0] = operands[3];
if (GET_CODE (operands[1]) == CONST_INT
&& INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
xoperands[1] = GEN_INT (-INTVAL (operands[2]) - 1);
else
xoperands[1] = operands[2];
 
output_asm_insn (output_move_simode (xoperands), xoperands);
if (GET_CODE (operands[1]) == CONST_INT)
{
if (INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8)
return "subq%.l %1,%R0\;subx%.l %3,%0";
else if (INTVAL (operands[1]) >= -8 && INTVAL (operands[1]) < 0)
{
operands[1] = GEN_INT (-INTVAL (operands[1]));
return "addq%.l %1,%R0\;addx%.l %3,%0";
}
}
return "sub%.l %1,%R0\;subx%.l %3,%0";
}
}
else
{
gcc_assert (GET_CODE (operands[0]) == MEM);
if (GET_CODE (operands[2]) == MEM
&& GET_CODE (XEXP (operands[2], 0)) == PRE_DEC)
return "sub%.l %2,%0\;subx%.l %2,%0";
CC_STATUS_INIT;
if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
{
operands[1]
= gen_rtx_MEM (SImode, plus_constant (XEXP (operands[0], 0), -8));
return "move%.l %0,%3\;sub%.l %R2,%0\;subx%.l %2,%3\;move%.l %3,%1";
}
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
{
operands[1] = XEXP(operands[0], 0);
return "sub%.l %R2,%0\;move%.l %0,%3\;subx%.l %2,%3\;move%.l %3,%1";
}
else
{
operands[1] = adjust_address (operands[0], SImode, 4);
return "sub%.l %R2,%1\;move%.l %0,%3\;subx%.l %2,%3\;move%.l %3,%0";
}
}
})
 
(define_insn "subsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "=m,d,a")
(minus:SI (match_operand:SI 1 "general_operand" "0,0,0")
(match_operand:SI 2 "general_src_operand" "dT,mSrT,mSrs")))]
""
"sub%.l %2,%0")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=a")
(minus:SI (match_operand:SI 1 "general_operand" "0")
(sign_extend:SI
(match_operand:HI 2 "nonimmediate_src_operand" "rmS"))))]
"!TARGET_COLDFIRE"
"sub%.w %2,%0")
 
(define_insn "subhi3"
[(set (match_operand:HI 0 "nonimmediate_operand" "=m,r")
(minus:HI (match_operand:HI 1 "general_operand" "0,0")
(match_operand:HI 2 "general_src_operand" "dn,rmSn")))]
"!TARGET_COLDFIRE"
"sub%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
(minus:HI (match_dup 0)
(match_operand:HI 1 "general_src_operand" "dn,rmSn")))]
"!TARGET_COLDFIRE"
"sub%.w %1,%0")
 
(define_insn "subqi3"
[(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
(minus:QI (match_operand:QI 1 "general_operand" "0,0")
(match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"sub%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
(minus:QI (match_dup 0)
(match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"sub%.b %1,%0")
 
(define_expand "sub<mode>3"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(minus:FP (match_operand:FP 1 "general_operand" "")
(match_operand:FP 2 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "sub<mode>3_floatsi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(minus:FP (match_operand:FP 1 "general_operand" "0")
(float:FP (match_operand:SI 2 "general_operand" "dmi"))))]
"TARGET_68881"
"f<FP:round>sub%.l %2,%0")
 
(define_insn "sub<mode>3_floathi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(minus:FP (match_operand:FP 1 "general_operand" "0")
(float:FP (match_operand:HI 2 "general_operand" "dmn"))))]
"TARGET_68881"
"f<FP:round>sub%.w %2,%0")
 
(define_insn "sub<mode>3_floatqi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(minus:FP (match_operand:FP 1 "general_operand" "0")
(float:FP (match_operand:QI 2 "general_operand" "dmn"))))]
"TARGET_68881"
"f<FP:round>sub%.b %2,%0")
 
(define_insn "sub<mode>3_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(minus:FP (match_operand:FP 1 "general_operand" "0")
(match_operand:FP 2 "general_operand" "f<FP:dreg>m<FP:const>")))]
"TARGET_68881"
{
if (FP_REG_P (operands[2]))
return "f<FP:round>sub%.x %2,%0";
return "f<FP:round>sub%.<FP:prec> %f2,%0";
})
 
(define_insn "sub<mode>3_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(minus:FP (match_operand:FP 1 "general_operand" "0")
(match_operand:FP 2 "general_operand" "f<FP:dreg><Q>U")))]
"TARGET_COLDFIRE_FPU"
{
if (FP_REG_P (operands[2]))
return "f<FP:round>sub%.d %2,%0";
return "f<FP:round>sub%.<FP:prec> %2,%0";
})
;; multiply instructions
 
(define_insn "mulhi3"
[(set (match_operand:HI 0 "nonimmediate_operand" "=d")
(mult:HI (match_operand:HI 1 "general_operand" "%0")
(match_operand:HI 2 "general_src_operand" "dmSn")))]
""
{
return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0";
})
 
(define_insn "mulhisi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(mult:SI (sign_extend:SI
(match_operand:HI 1 "nonimmediate_operand" "%0"))
(sign_extend:SI
(match_operand:HI 2 "nonimmediate_src_operand" "dmS"))))]
""
{
return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0";
})
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(mult:SI (sign_extend:SI
(match_operand:HI 1 "nonimmediate_operand" "%0"))
(match_operand:SI 2 "const_int_operand" "n")))]
"INTVAL (operands[2]) >= -0x8000 && INTVAL (operands[2]) <= 0x7fff"
{
return MOTOROLA ? "muls%.w %2,%0" : "muls %2,%0";
})
 
(define_expand "mulsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(mult:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_operand" "")))]
"TARGET_68020 || TARGET_COLDFIRE"
"")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(mult:SI (match_operand:SI 1 "general_operand" "%0")
(match_operand:SI 2 "general_src_operand" "dmSTK")))]
 
"TARGET_68020"
"muls%.l %2,%0")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(mult:SI (match_operand:SI 1 "general_operand" "%0")
(match_operand:SI 2 "general_operand" "d<Q>")))]
"TARGET_COLDFIRE"
"muls%.l %2,%0")
 
(define_insn "umulhisi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(mult:SI (zero_extend:SI
(match_operand:HI 1 "nonimmediate_operand" "%0"))
(zero_extend:SI
(match_operand:HI 2 "nonimmediate_src_operand" "dmS"))))]
""
{
return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0";
})
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(mult:SI (zero_extend:SI
(match_operand:HI 1 "nonimmediate_operand" "%0"))
(match_operand:SI 2 "const_int_operand" "n")))]
"INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 0xffff"
{
return MOTOROLA ? "mulu%.w %2,%0" : "mulu %2,%0";
})
 
;; We need a separate DEFINE_EXPAND for u?mulsidi3 to be able to use the
;; proper matching constraint. This is because the matching is between
;; the high-numbered word of the DImode operand[0] and operand[1].
(define_expand "umulsidi3"
[(parallel
[(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
(mult:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(set (subreg:SI (match_dup 0) 0)
(truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
(zero_extend:DI (match_dup 2)))
(const_int 32))))])]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"")
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=d")
(mult:SI (match_operand:SI 1 "register_operand" "%0")
(match_operand:SI 2 "nonimmediate_operand" "dm")))
(set (match_operand:SI 3 "register_operand" "=d")
(truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
(zero_extend:DI (match_dup 2)))
(const_int 32))))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"mulu%.l %2,%3:%0")
 
; Match immediate case. For 2.4 only match things < 2^31.
; It's tricky with larger values in these patterns since we need to match
; values between the two parallel multiplies, between a CONST_DOUBLE and
; a CONST_INT.
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=d")
(mult:SI (match_operand:SI 1 "register_operand" "%0")
(match_operand:SI 2 "const_int_operand" "n")))
(set (match_operand:SI 3 "register_operand" "=d")
(truncate:SI (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
(match_dup 2))
(const_int 32))))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE
&& (unsigned) INTVAL (operands[2]) <= 0x7fffffff"
"mulu%.l %2,%3:%0")
 
(define_expand "mulsidi3"
[(parallel
[(set (subreg:SI (match_operand:DI 0 "register_operand" "") 4)
(mult:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "register_operand" "")))
(set (subreg:SI (match_dup 0) 0)
(truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
(sign_extend:DI (match_dup 2)))
(const_int 32))))])]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"")
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=d")
(mult:SI (match_operand:SI 1 "register_operand" "%0")
(match_operand:SI 2 "nonimmediate_operand" "dm")))
(set (match_operand:SI 3 "register_operand" "=d")
(truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
(sign_extend:DI (match_dup 2)))
(const_int 32))))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"muls%.l %2,%3:%0")
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=d")
(mult:SI (match_operand:SI 1 "register_operand" "%0")
(match_operand:SI 2 "const_int_operand" "n")))
(set (match_operand:SI 3 "register_operand" "=d")
(truncate:SI (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
(match_dup 2))
(const_int 32))))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"muls%.l %2,%3:%0")
 
(define_expand "umulsi3_highpart"
[(parallel
[(set (match_operand:SI 0 "register_operand" "")
(truncate:SI
(lshiftrt:DI
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
(zero_extend:DI (match_operand:SI 2 "general_operand" "")))
(const_int 32))))
(clobber (match_dup 3))])]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
{
operands[3] = gen_reg_rtx (SImode);
 
if (GET_CODE (operands[2]) == CONST_INT)
{
operands[2] = immed_double_const (INTVAL (operands[2]) & 0xffffffff,
0, DImode);
 
/* We have to adjust the operand order for the matching constraints. */
emit_insn (gen_const_umulsi3_highpart (operands[0], operands[3],
operands[1], operands[2]));
DONE;
}
})
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=d")
(truncate:SI
(lshiftrt:DI
(mult:DI (zero_extend:DI (match_operand:SI 2 "register_operand" "%1"))
(zero_extend:DI (match_operand:SI 3 "nonimmediate_operand" "dm")))
(const_int 32))))
(clobber (match_operand:SI 1 "register_operand" "=d"))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"mulu%.l %3,%0:%1")
 
(define_insn "const_umulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=d")
(truncate:SI
(lshiftrt:DI
(mult:DI (zero_extend:DI (match_operand:SI 2 "register_operand" "1"))
(match_operand:DI 3 "const_uint32_operand" "n"))
(const_int 32))))
(clobber (match_operand:SI 1 "register_operand" "=d"))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"mulu%.l %3,%0:%1")
 
(define_expand "smulsi3_highpart"
[(parallel
[(set (match_operand:SI 0 "register_operand" "")
(truncate:SI
(lshiftrt:DI
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
(sign_extend:DI (match_operand:SI 2 "general_operand" "")))
(const_int 32))))
(clobber (match_dup 3))])]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
{
operands[3] = gen_reg_rtx (SImode);
if (GET_CODE (operands[2]) == CONST_INT)
{
/* We have to adjust the operand order for the matching constraints. */
emit_insn (gen_const_smulsi3_highpart (operands[0], operands[3],
operands[1], operands[2]));
DONE;
}
})
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=d")
(truncate:SI
(lshiftrt:DI
(mult:DI (sign_extend:DI (match_operand:SI 2 "register_operand" "%1"))
(sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "dm")))
(const_int 32))))
(clobber (match_operand:SI 1 "register_operand" "=d"))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"muls%.l %3,%0:%1")
 
(define_insn "const_smulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=d")
(truncate:SI
(lshiftrt:DI
(mult:DI (sign_extend:DI (match_operand:SI 2 "register_operand" "1"))
(match_operand:DI 3 "const_sint32_operand" "n"))
(const_int 32))))
(clobber (match_operand:SI 1 "register_operand" "=d"))]
"TARGET_68020 && !TARGET_68060 && !TARGET_COLDFIRE"
"muls%.l %3,%0:%1")
 
(define_expand "mul<mode>3"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(mult:FP (match_operand:FP 1 "general_operand" "")
(match_operand:FP 2 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "mul<mode>3_floatsi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(mult:FP (float:FP (match_operand:SI 2 "general_operand" "dmi"))
(match_operand:FP 1 "general_operand" "0")))]
"TARGET_68881"
{
return TARGET_68040_ONLY
? "f<FP:round>mul%.l %2,%0"
: "f<FP:round_mul>mul%.l %2,%0";
})
 
(define_insn "mul<mode>3_floathi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(mult:FP (float:FP (match_operand:HI 2 "general_operand" "dmn"))
(match_operand:FP 1 "general_operand" "0")))]
"TARGET_68881"
{
return TARGET_68040_ONLY
? "f<FP:round>mul%.w %2,%0"
: "f<FP:round_mul>mul%.w %2,%0";
})
 
(define_insn "mul<mode>3_floatqi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(mult:FP (float:FP (match_operand:QI 2 "general_operand" "dmn"))
(match_operand:FP 1 "general_operand" "0")))]
"TARGET_68881"
{
return TARGET_68040_ONLY
? "f<FP:round>mul%.b %2,%0"
: "f<FP:round_mul>mul%.b %2,%0";
})
 
(define_insn "muldf_68881"
[(set (match_operand:DF 0 "nonimmediate_operand" "=f")
(mult:DF (match_operand:DF 1 "general_operand" "%0")
(match_operand:DF 2 "general_operand" "fmG")))]
"TARGET_68881"
{
if (GET_CODE (operands[2]) == CONST_DOUBLE
&& floating_exact_log2 (operands[2]) && !TARGET_68040 && !TARGET_68060)
{
int i = floating_exact_log2 (operands[2]);
operands[2] = GEN_INT (i);
return "fscale%.l %2,%0";
}
if (REG_P (operands[2]))
return "f%&mul%.x %2,%0";
return "f%&mul%.d %f2,%0";
})
 
(define_insn "mulsf_68881"
[(set (match_operand:SF 0 "nonimmediate_operand" "=f")
(mult:SF (match_operand:SF 1 "general_operand" "%0")
(match_operand:SF 2 "general_operand" "fdmF")))]
"TARGET_68881"
{
if (FP_REG_P (operands[2]))
return (TARGET_68040_ONLY
? "fsmul%.x %2,%0"
: "fsglmul%.x %2,%0");
return (TARGET_68040_ONLY
? "fsmul%.s %f2,%0"
: "fsglmul%.s %f2,%0");
})
 
(define_insn "mulxf3_68881"
[(set (match_operand:XF 0 "nonimmediate_operand" "=f")
(mult:XF (match_operand:XF 1 "nonimmediate_operand" "%0")
(match_operand:XF 2 "nonimmediate_operand" "fm")))]
"TARGET_68881"
{
return "fmul%.x %f2,%0";
})
 
(define_insn "fmul<mode>3_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(mult:FP (match_operand:FP 1 "general_operand" "%0")
(match_operand:FP 2 "general_operand" "f<Q>U<FP:dreg>")))]
"TARGET_COLDFIRE_FPU"
{
if (FP_REG_P (operands[2]))
return "f<FP:prec>mul%.d %2,%0";
return "f<FP:prec>mul%.<FP:prec> %2,%0";
})
;; divide instructions
 
(define_expand "div<mode>3"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(div:FP (match_operand:FP 1 "general_operand" "")
(match_operand:FP 2 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "div<mode>3_floatsi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(div:FP (match_operand:FP 1 "general_operand" "0")
(float:FP (match_operand:SI 2 "general_operand" "dmi"))))]
"TARGET_68881"
{
return TARGET_68040_ONLY
? "f<FP:round>div%.l %2,%0"
: "f<FP:round_mul>div%.l %2,%0";
})
 
(define_insn "div<mode>3_floathi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(div:FP (match_operand:FP 1 "general_operand" "0")
(float:FP (match_operand:HI 2 "general_operand" "dmn"))))]
"TARGET_68881"
{
return TARGET_68040_ONLY
? "f<FP:round>div%.w %2,%0"
: "f<FP:round_mul>div%.w %2,%0";
})
 
(define_insn "div<mode>3_floatqi_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(div:FP (match_operand:FP 1 "general_operand" "0")
(float:FP (match_operand:QI 2 "general_operand" "dmn"))))]
"TARGET_68881"
{
return TARGET_68040_ONLY
? "f<FP:round>div%.b %2,%0"
: "f<FP:round_mul>div%.b %2,%0";
})
 
(define_insn "div<mode>3_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(div:FP (match_operand:FP 1 "general_operand" "0")
(match_operand:FP 2 "general_operand" "f<FP:dreg>m<FP:const>")))]
"TARGET_68881"
{
if (FP_REG_P (operands[2]))
return (TARGET_68040_ONLY
? "f<FP:round>div%.x %2,%0"
: "f<FP:round_mul>div%.x %2,%0");
return (TARGET_68040_ONLY
? "f<FP:round>div%.<FP:prec> %f2,%0"
: "f<FP:round_mul>div%.<FP:prec> %f2,%0");
})
 
(define_insn "div<mode>3_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(div:FP (match_operand:FP 1 "general_operand" "0")
(match_operand:FP 2 "general_operand" "f<Q>U<FP:dreg>")))]
"TARGET_COLDFIRE_FPU"
{
if (FP_REG_P (operands[2]))
return "f<FP:prec>div%.d %2,%0";
return "f<FP:prec>div%.<FP:prec> %2,%0";
})
;; Remainder instructions.
 
(define_expand "divmodsi4"
[(parallel
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(div:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_src_operand" "")))
(set (match_operand:SI 3 "nonimmediate_operand" "")
(mod:SI (match_dup 1) (match_dup 2)))])]
"TARGET_68020 || TARGET_CF_HWDIV"
"")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(div:SI (match_operand:SI 1 "general_operand" "0")
(match_operand:SI 2 "general_src_operand" "d<Q>U")))
(set (match_operand:SI 3 "nonimmediate_operand" "=&d")
(mod:SI (match_dup 1) (match_dup 2)))]
"TARGET_CF_HWDIV"
{
if (find_reg_note (insn, REG_UNUSED, operands[3]))
return "divs%.l %2,%0";
else if (find_reg_note (insn, REG_UNUSED, operands[0]))
return "rems%.l %2,%3:%0";
else
return "rems%.l %2,%3:%0\;divs%.l %2,%0";
})
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(div:SI (match_operand:SI 1 "general_operand" "0")
(match_operand:SI 2 "general_src_operand" "dmSTK")))
(set (match_operand:SI 3 "nonimmediate_operand" "=d")
(mod:SI (match_dup 1) (match_dup 2)))]
"TARGET_68020"
{
if (find_reg_note (insn, REG_UNUSED, operands[3]))
return "divs%.l %2,%0";
else
return "divsl%.l %2,%3:%0";
})
 
(define_expand "udivmodsi4"
[(parallel
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(udiv:SI (match_operand:SI 1 "general_operand" "0")
(match_operand:SI 2 "general_src_operand" "dmSTK")))
(set (match_operand:SI 3 "nonimmediate_operand" "=d")
(umod:SI (match_dup 1) (match_dup 2)))])]
"TARGET_68020 || TARGET_CF_HWDIV"
"")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(udiv:SI (match_operand:SI 1 "general_operand" "0")
(match_operand:SI 2 "general_src_operand" "d<Q>U")))
(set (match_operand:SI 3 "nonimmediate_operand" "=&d")
(umod:SI (match_dup 1) (match_dup 2)))]
"TARGET_CF_HWDIV"
{
if (find_reg_note (insn, REG_UNUSED, operands[3]))
return "divu%.l %2,%0";
else if (find_reg_note (insn, REG_UNUSED, operands[0]))
return "remu%.l %2,%3:%0";
else
return "remu%.l %2,%3:%0\;divu%.l %2,%0";
})
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(udiv:SI (match_operand:SI 1 "general_operand" "0")
(match_operand:SI 2 "general_src_operand" "dmSTK")))
(set (match_operand:SI 3 "nonimmediate_operand" "=d")
(umod:SI (match_dup 1) (match_dup 2)))]
"TARGET_68020 && !TARGET_COLDFIRE"
{
if (find_reg_note (insn, REG_UNUSED, operands[3]))
return "divu%.l %2,%0";
else
return "divul%.l %2,%3:%0";
})
 
(define_insn "divmodhi4"
[(set (match_operand:HI 0 "nonimmediate_operand" "=d")
(div:HI (match_operand:HI 1 "general_operand" "0")
(match_operand:HI 2 "general_src_operand" "dmSKT")))
(set (match_operand:HI 3 "nonimmediate_operand" "=d")
(mod:HI (match_dup 1) (match_dup 2)))]
"!TARGET_COLDFIRE || TARGET_CF_HWDIV"
{
output_asm_insn (MOTOROLA ?
"ext%.l %0\;divs%.w %2,%0" :
"extl %0\;divs %2,%0",
operands);
if (!find_reg_note(insn, REG_UNUSED, operands[3]))
{
CC_STATUS_INIT;
return "move%.l %0,%3\;swap %3";
}
else
return "";
})
 
(define_insn "udivmodhi4"
[(set (match_operand:HI 0 "nonimmediate_operand" "=d")
(udiv:HI (match_operand:HI 1 "general_operand" "0")
(match_operand:HI 2 "general_src_operand" "dmSKT")))
(set (match_operand:HI 3 "nonimmediate_operand" "=d")
(umod:HI (match_dup 1) (match_dup 2)))]
"!TARGET_COLDFIRE || TARGET_CF_HWDIV"
{
if (TARGET_CFV4)
output_asm_insn (MOTOROLA ?
"mvz%.w %0,%0\;divu%.w %2,%0" :
"mvz%.w %0,%0\;divu %2,%0",
operands);
else
output_asm_insn (MOTOROLA ?
"and%.l #0xFFFF,%0\;divu%.w %2,%0" :
"and%.l #0xFFFF,%0\;divu %2,%0",
operands);
 
if (!find_reg_note(insn, REG_UNUSED, operands[3]))
{
CC_STATUS_INIT;
return "move%.l %0,%3\;swap %3";
}
else
return "";
})
;; logical-and instructions
 
;; "anddi3" is mainly here to help combine().
(define_insn "anddi3"
[(set (match_operand:DI 0 "nonimmediate_operand" "=o,d")
(and:DI (match_operand:DI 1 "general_operand" "%0,0")
(match_operand:DI 2 "general_operand" "dn,don")))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
/* We can get CONST_DOUBLE, but also const1_rtx etc. */
if (CONSTANT_P (operands[2]))
{
rtx hi, lo;
 
split_double (operands[2], &hi, &lo);
 
switch (INTVAL (hi))
{
case 0 :
output_asm_insn ("clr%.l %0", operands);
break;
case -1 :
break;
default :
{
rtx xoperands[3];
 
xoperands[0] = operands[0];
xoperands[2] = hi;
output_asm_insn (output_andsi3 (xoperands), xoperands);
}
}
if (GET_CODE (operands[0]) == REG)
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[0] = adjust_address (operands[0], SImode, 4);
switch (INTVAL (lo))
{
case 0 :
output_asm_insn ("clr%.l %0", operands);
break;
case -1 :
break;
default :
{
rtx xoperands[3];
 
xoperands[0] = operands[0];
xoperands[2] = lo;
output_asm_insn (output_andsi3 (xoperands), xoperands);
}
}
return "";
}
if (GET_CODE (operands[0]) != REG)
{
operands[1] = adjust_address (operands[0], SImode, 4);
return "and%.l %2,%0\;and%.l %R2,%1";
}
if (GET_CODE (operands[2]) != REG)
{
operands[1] = adjust_address (operands[2], SImode, 4);
return "and%.l %2,%0\;and%.l %1,%R0";
}
return "and%.l %2,%0\;and%.l %R2,%R0";
})
 
;; Prevent AND from being made with sp. This doesn't exist in the machine
;; and reload will cause inefficient code. Since sp is a FIXED_REG, we
;; can't allocate pseudos into it.
 
(define_expand "andsi3"
[(set (match_operand:SI 0 "not_sp_operand" "")
(and:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_src_operand" "")))]
""
"")
 
;; produced by split operations after reload finished
(define_insn "*andsi3_split"
[(set (match_operand:SI 0 "register_operand" "=d")
(and:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "const_int_operand" "i")))]
"reload_completed && !TARGET_COLDFIRE"
{
return output_andsi3 (operands);
})
 
(define_insn "andsi3_internal"
[(set (match_operand:SI 0 "not_sp_operand" "=m,d")
(and:SI (match_operand:SI 1 "general_operand" "%0,0")
(match_operand:SI 2 "general_src_operand" "dKT,dmSM")))]
"!TARGET_COLDFIRE"
{
return output_andsi3 (operands);
})
 
(define_insn "andsi3_5200"
[(set (match_operand:SI 0 "not_sp_operand" "=m,d")
(and:SI (match_operand:SI 1 "general_operand" "%0,0")
(match_operand:SI 2 "general_src_operand" "d,dmsK")))]
"TARGET_COLDFIRE"
{
if (TARGET_CFV4 && DATA_REG_P (operands[0])
&& GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) == 0x000000ff)
return "mvz%.b %0,%0";
else if (INTVAL (operands[2]) == 0x0000ffff)
return "mvz%.w %0,%0";
}
return output_andsi3 (operands);
})
 
(define_insn "andhi3"
[(set (match_operand:HI 0 "nonimmediate_operand" "=m,d")
(and:HI (match_operand:HI 1 "general_operand" "%0,0")
(match_operand:HI 2 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"and%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
(and:HI (match_dup 0)
(match_operand:HI 1 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"and%.w %1,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
(and:HI (match_operand:HI 1 "general_src_operand" "dn,dmSn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
"and%.w %1,%0")
 
(define_insn "andqi3"
[(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
(and:QI (match_operand:QI 1 "general_operand" "%0,0")
(match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"and%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
(and:QI (match_dup 0)
(match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"and%.b %1,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
(and:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
"and%.b %1,%0")
;; inclusive-or instructions
 
(define_insn "iordi_zext"
[(set (match_operand:DI 0 "nonimmediate_operand" "=o,d")
(ior:DI (zero_extend:DI (match_operand 1 "general_operand" "dn,dmn"))
(match_operand:DI 2 "general_operand" "0,0")))]
"!TARGET_COLDFIRE"
{
int byte_mode;
 
CC_STATUS_INIT;
if (GET_CODE (operands[0]) == REG)
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[0] = adjust_address (operands[0], SImode, 4);
if (GET_MODE (operands[1]) == SImode)
return "or%.l %1,%0";
byte_mode = (GET_MODE (operands[1]) == QImode);
if (GET_CODE (operands[0]) == MEM)
operands[0] = adjust_address (operands[0], byte_mode ? QImode : HImode,
byte_mode ? 3 : 2);
if (byte_mode)
return "or%.b %1,%0";
else
return "or%.w %1,%0";
})
 
;; "iordi3" is mainly here to help combine().
(define_insn "iordi3"
[(set (match_operand:DI 0 "nonimmediate_operand" "=o,d")
(ior:DI (match_operand:DI 1 "general_operand" "%0,0")
(match_operand:DI 2 "general_operand" "dn,don")))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
/* We can get CONST_DOUBLE, but also const1_rtx etc. */
if (CONSTANT_P (operands[2]))
{
rtx hi, lo;
 
split_double (operands[2], &hi, &lo);
 
switch (INTVAL (hi))
{
case 0 :
break;
case -1 :
/* FIXME : a scratch register would be welcome here if operand[0]
is not a register */
output_asm_insn ("move%.l #-1,%0", operands);
break;
default :
{
rtx xoperands[3];
 
xoperands[0] = operands[0];
xoperands[2] = hi;
output_asm_insn (output_iorsi3 (xoperands), xoperands);
}
}
if (GET_CODE (operands[0]) == REG)
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[0] = adjust_address (operands[0], SImode, 4);
switch (INTVAL (lo))
{
case 0 :
break;
case -1 :
/* FIXME : a scratch register would be welcome here if operand[0]
is not a register */
output_asm_insn ("move%.l #-1,%0", operands);
break;
default :
{
rtx xoperands[3];
 
xoperands[0] = operands[0];
xoperands[2] = lo;
output_asm_insn (output_iorsi3 (xoperands), xoperands);
}
}
return "";
}
if (GET_CODE (operands[0]) != REG)
{
operands[1] = adjust_address (operands[0], SImode, 4);
return "or%.l %2,%0\;or%.l %R2,%1";
}
if (GET_CODE (operands[2]) != REG)
{
operands[1] = adjust_address (operands[2], SImode, 4);
return "or%.l %2,%0\;or%.l %1,%R0";
}
return "or%.l %2,%0\;or%.l %R2,%R0";
})
 
(define_expand "iorsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(ior:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_src_operand" "")))]
""
"")
 
(define_insn "iorsi3_internal"
[(set (match_operand:SI 0 "nonimmediate_operand" "=m,d")
(ior:SI (match_operand:SI 1 "general_operand" "%0,0")
(match_operand:SI 2 "general_src_operand" "dKT,dmSMT")))]
"! TARGET_COLDFIRE"
{
return output_iorsi3 (operands);
})
 
(define_insn "iorsi3_5200"
[(set (match_operand:SI 0 "nonimmediate_operand" "=m,d")
(ior:SI (match_operand:SI 1 "general_operand" "%0,0")
(match_operand:SI 2 "general_src_operand" "d,dmsK")))]
"TARGET_COLDFIRE"
{
return output_iorsi3 (operands);
})
 
(define_insn "iorhi3"
[(set (match_operand:HI 0 "nonimmediate_operand" "=m,d")
(ior:HI (match_operand:HI 1 "general_operand" "%0,0")
(match_operand:HI 2 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"or%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
(ior:HI (match_dup 0)
(match_operand:HI 1 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"or%.w %1,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+m,d"))
(ior:HI (match_operand:HI 1 "general_src_operand" "dn,dmSn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
"or%.w %1,%0")
 
(define_insn "iorqi3"
[(set (match_operand:QI 0 "nonimmediate_operand" "=m,d")
(ior:QI (match_operand:QI 1 "general_operand" "%0,0")
(match_operand:QI 2 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"or%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
(ior:QI (match_dup 0)
(match_operand:QI 1 "general_src_operand" "dn,dmSn")))]
"!TARGET_COLDFIRE"
"or%.b %1,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+m,d"))
(ior:QI (match_operand:QI 1 "general_src_operand" "dn,dmSn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
"or%.b %1,%0")
 
;; On all 68k models, this makes faster code in a special case.
;; See also ashlsi_16, ashrsi_16 and lshrsi_16.
 
(define_insn "iorsi_zexthi_ashl16"
[(set (match_operand:SI 0 "nonimmediate_operand" "=&d")
(ior:SI (zero_extend:SI (match_operand:HI 1 "general_operand" "rmn"))
(ashift:SI (match_operand:SI 2 "general_operand" "or")
(const_int 16))))]
""
{
CC_STATUS_INIT;
if (GET_CODE (operands[2]) != REG)
operands[2] = adjust_address (operands[2], HImode, 2);
if (GET_CODE (operands[2]) != REG
|| REGNO (operands[2]) != REGNO (operands[0]))
output_asm_insn ("move%.w %2,%0", operands);
return "swap %0\;mov%.w %1,%0";
})
 
(define_insn "iorsi_zext"
[(set (match_operand:SI 0 "nonimmediate_operand" "=o,d")
(ior:SI (zero_extend:SI (match_operand 1 "general_operand" "dn,dmn"))
(match_operand:SI 2 "general_operand" "0,0")))]
"!TARGET_COLDFIRE"
{
int byte_mode;
 
CC_STATUS_INIT;
byte_mode = (GET_MODE (operands[1]) == QImode);
if (GET_CODE (operands[0]) == MEM)
operands[0] = adjust_address (operands[0], byte_mode ? QImode : HImode,
byte_mode ? 3 : 2);
if (byte_mode)
return "or%.b %1,%0";
else
return "or%.w %1,%0";
})
;; xor instructions
 
;; "xordi3" is mainly here to help combine().
(define_insn "xordi3"
[(set (match_operand:DI 0 "nonimmediate_operand" "=od")
(xor:DI (match_operand:DI 1 "general_operand" "%0")
(match_operand:DI 2 "general_operand" "dn")))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
/* We can get CONST_DOUBLE, but also const1_rtx etc. */
 
if (CONSTANT_P (operands[2]))
{
rtx hi, lo;
 
split_double (operands[2], &hi, &lo);
 
switch (INTVAL (hi))
{
case 0 :
break;
case -1 :
output_asm_insn ("not%.l %0", operands);
break;
default :
/* FIXME : a scratch register would be welcome here if
-128 <= INTVAL (hi) < -1 */
{
rtx xoperands[3];
 
xoperands[0] = operands[0];
xoperands[2] = hi;
output_asm_insn (output_xorsi3 (xoperands), xoperands);
}
}
if (GET_CODE (operands[0]) == REG)
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[0] = adjust_address (operands[0], SImode, 4);
switch (INTVAL (lo))
{
case 0 :
break;
case -1 :
output_asm_insn ("not%.l %0", operands);
break;
default :
/* FIXME : a scratch register would be welcome here if
-128 <= INTVAL (lo) < -1 */
operands[2] = lo;
/* FIXME : this should be merged with xorsi3 */
{
rtx xoperands[3];
 
xoperands[0] = operands[0];
xoperands[2] = lo;
output_asm_insn (output_xorsi3 (xoperands), xoperands);
}
}
return "";
}
if (GET_CODE (operands[0]) != REG)
{
operands[1] = adjust_address (operands[0], SImode, 4);
return "eor%.l %2,%0\;eor%.l %R2,%1";
}
if (GET_CODE (operands[2]) != REG)
{
operands[1] = adjust_address (operands[2], SImode, 4);
return "eor%.l %2,%0\;eor%.l %1,%R0";
}
return "eor%.l %2,%0\;eor%.l %R2,%R0";
})
 
(define_expand "xorsi3"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(xor:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_operand" "")))]
""
"")
 
(define_insn "xorsi3_internal"
[(set (match_operand:SI 0 "nonimmediate_operand" "=do,m")
(xor:SI (match_operand:SI 1 "general_operand" "%0,0")
(match_operand:SI 2 "general_operand" "di,dKT")))]
 
"!TARGET_COLDFIRE"
{
return output_xorsi3 (operands);
})
 
(define_insn "xorsi3_5200"
[(set (match_operand:SI 0 "nonimmediate_operand" "=dm,d")
(xor:SI (match_operand:SI 1 "general_operand" "%0,0")
(match_operand:SI 2 "general_operand" "d,Ks")))]
"TARGET_COLDFIRE"
{
return output_xorsi3 (operands);
})
 
(define_insn "xorhi3"
[(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
(xor:HI (match_operand:HI 1 "general_operand" "%0")
(match_operand:HI 2 "general_operand" "dn")))]
"!TARGET_COLDFIRE"
"eor%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
(xor:HI (match_dup 0)
(match_operand:HI 1 "general_operand" "dn")))]
"!TARGET_COLDFIRE"
"eor%.w %1,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
(xor:HI (match_operand:HI 1 "general_operand" "dn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
"eor%.w %1,%0")
 
(define_insn "xorqi3"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
(xor:QI (match_operand:QI 1 "general_operand" "%0")
(match_operand:QI 2 "general_operand" "dn")))]
"!TARGET_COLDFIRE"
"eor%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
(xor:QI (match_dup 0)
(match_operand:QI 1 "general_operand" "dn")))]
"!TARGET_COLDFIRE"
"eor%.b %1,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
(xor:QI (match_operand:QI 1 "general_operand" "dn")
(match_dup 0)))]
"!TARGET_COLDFIRE"
"eor%.b %1,%0")
;; negation instructions
 
(define_expand "negdi2"
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(neg:DI (match_operand:DI 1 "general_operand" "")))]
""
{
if (TARGET_COLDFIRE)
emit_insn (gen_negdi2_5200 (operands[0], operands[1]));
else
emit_insn (gen_negdi2_internal (operands[0], operands[1]));
DONE;
})
 
(define_insn "negdi2_internal"
[(set (match_operand:DI 0 "nonimmediate_operand" "=<,do,!*a")
(neg:DI (match_operand:DI 1 "general_operand" "0,0,0")))]
"!TARGET_COLDFIRE"
{
if (which_alternative == 0)
return "neg%.l %0\;negx%.l %0";
if (GET_CODE (operands[0]) == REG)
operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[1] = adjust_address (operands[0], SImode, 4);
if (ADDRESS_REG_P (operands[0]))
return "exg %/d0,%1\;neg%.l %/d0\;exg %/d0,%1\;exg %/d0,%0\;negx%.l %/d0\;exg %/d0,%0";
else
return "neg%.l %1\;negx%.l %0";
})
 
(define_insn "negdi2_5200"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(neg:DI (match_operand:DI 1 "general_operand" "0")))]
"TARGET_COLDFIRE"
{
operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
return "neg%.l %1\;negx%.l %0";
})
 
(define_expand "negsi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(neg:SI (match_operand:SI 1 "general_operand" "")))]
""
{
if (TARGET_COLDFIRE)
emit_insn (gen_negsi2_5200 (operands[0], operands[1]));
else
emit_insn (gen_negsi2_internal (operands[0], operands[1]));
DONE;
})
 
(define_insn "negsi2_internal"
[(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
(neg:SI (match_operand:SI 1 "general_operand" "0")))]
"!TARGET_COLDFIRE"
"neg%.l %0")
 
(define_insn "negsi2_5200"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(neg:SI (match_operand:SI 1 "general_operand" "0")))]
"TARGET_COLDFIRE"
"neg%.l %0")
 
(define_insn "neghi2"
[(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
(neg:HI (match_operand:HI 1 "general_operand" "0")))]
"!TARGET_COLDFIRE"
"neg%.w %0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
(neg:HI (match_dup 0)))]
"!TARGET_COLDFIRE"
"neg%.w %0")
 
(define_insn "negqi2"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
(neg:QI (match_operand:QI 1 "general_operand" "0")))]
"!TARGET_COLDFIRE"
"neg%.b %0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
(neg:QI (match_dup 0)))]
"!TARGET_COLDFIRE"
"neg%.b %0")
 
;; If using software floating point, just flip the sign bit.
 
(define_expand "negsf2"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
(neg:SF (match_operand:SF 1 "general_operand" "")))]
""
{
if (!TARGET_HARD_FLOAT)
{
rtx result;
rtx target;
 
target = operand_subword_force (operands[0], 0, SFmode);
result = expand_binop (SImode, xor_optab,
operand_subword_force (operands[1], 0, SFmode),
GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN);
gcc_assert (result);
 
if (result != target)
emit_move_insn (result, target);
 
/* Make a place for REG_EQUAL. */
emit_move_insn (operands[0], operands[0]);
DONE;
}
})
 
(define_expand "negdf2"
[(set (match_operand:DF 0 "nonimmediate_operand" "")
(neg:DF (match_operand:DF 1 "general_operand" "")))]
""
{
if (!TARGET_HARD_FLOAT)
{
rtx result;
rtx target;
rtx insns;
 
start_sequence ();
target = operand_subword (operands[0], 0, 1, DFmode);
result = expand_binop (SImode, xor_optab,
operand_subword_force (operands[1], 0, DFmode),
GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN);
gcc_assert (result);
 
if (result != target)
emit_move_insn (result, target);
 
emit_move_insn (operand_subword (operands[0], 1, 1, DFmode),
operand_subword_force (operands[1], 1, DFmode));
 
insns = get_insns ();
end_sequence ();
 
emit_no_conflict_block (insns, operands[0], operands[1], 0, 0);
DONE;
}
})
 
(define_expand "negxf2"
[(set (match_operand:XF 0 "nonimmediate_operand" "")
(neg:XF (match_operand:XF 1 "nonimmediate_operand" "")))]
""
{
if (!TARGET_68881)
{
rtx result;
rtx target;
rtx insns;
 
start_sequence ();
target = operand_subword (operands[0], 0, 1, XFmode);
result = expand_binop (SImode, xor_optab,
operand_subword_force (operands[1], 0, XFmode),
GEN_INT (-2147483647 - 1), target, 0, OPTAB_WIDEN);
gcc_assert (result);
 
if (result != target)
emit_move_insn (result, target);
 
emit_move_insn (operand_subword (operands[0], 1, 1, XFmode),
operand_subword_force (operands[1], 1, XFmode));
emit_move_insn (operand_subword (operands[0], 2, 1, XFmode),
operand_subword_force (operands[1], 2, XFmode));
 
insns = get_insns ();
end_sequence ();
 
emit_no_conflict_block (insns, operands[0], operands[1], 0, 0);
DONE;
}
})
 
(define_insn "neg<mode>2_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
(neg:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m<FP:const>,0")))]
"TARGET_68881"
{
if (DATA_REG_P (operands[0]))
{
operands[1] = GEN_INT (31);
return "bchg %1,%0";
}
if (FP_REG_P (operands[1]))
return "f<FP:round>neg%.x %1,%0";
return "f<FP:round>neg%.<FP:prec> %f1,%0";
})
 
(define_insn "neg<mode>2_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
(neg:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U,0")))]
"TARGET_COLDFIRE_FPU"
{
if (DATA_REG_P (operands[0]))
{
operands[1] = GEN_INT (31);
return "bchg %1,%0";
}
if (FP_REG_P (operands[1]))
return "f<FP:prec>neg%.d %1,%0";
return "f<FP:prec>neg%.<FP:prec> %1,%0";
})
;; Sqrt instruction for the 68881
 
(define_expand "sqrt<mode>2"
[(set (match_operand:FP 0 "nonimmediate_operand" "")
(sqrt:FP (match_operand:FP 1 "general_operand" "")))]
"TARGET_HARD_FLOAT"
"")
 
(define_insn "sqrt<mode>2_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(sqrt:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m")))]
"TARGET_68881"
{
if (FP_REG_P (operands[1]))
return "f<FP:round>sqrt%.x %1,%0";
return "f<FP:round>sqrt%.<FP:prec> %1,%0";
})
 
(define_insn "sqrt<mode>2_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(sqrt:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U")))]
"TARGET_COLDFIRE_FPU"
{
if (FP_REG_P (operands[1]))
return "f<FP:prec>sqrt%.d %1,%0";
return "f<FP:prec>sqrt%.<FP:prec> %1,%0";
})
;; Absolute value instructions
;; If using software floating point, just zero the sign bit.
 
(define_expand "abssf2"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
(abs:SF (match_operand:SF 1 "general_operand" "")))]
""
{
if (!TARGET_HARD_FLOAT)
{
rtx result;
rtx target;
 
target = operand_subword_force (operands[0], 0, SFmode);
result = expand_binop (SImode, and_optab,
operand_subword_force (operands[1], 0, SFmode),
GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN);
gcc_assert (result);
 
if (result != target)
emit_move_insn (result, target);
 
/* Make a place for REG_EQUAL. */
emit_move_insn (operands[0], operands[0]);
DONE;
}
})
 
(define_expand "absdf2"
[(set (match_operand:DF 0 "nonimmediate_operand" "")
(abs:DF (match_operand:DF 1 "general_operand" "")))]
""
{
if (!TARGET_HARD_FLOAT)
{
rtx result;
rtx target;
rtx insns;
 
start_sequence ();
target = operand_subword (operands[0], 0, 1, DFmode);
result = expand_binop (SImode, and_optab,
operand_subword_force (operands[1], 0, DFmode),
GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN);
gcc_assert (result);
 
if (result != target)
emit_move_insn (result, target);
 
emit_move_insn (operand_subword (operands[0], 1, 1, DFmode),
operand_subword_force (operands[1], 1, DFmode));
 
insns = get_insns ();
end_sequence ();
 
emit_no_conflict_block (insns, operands[0], operands[1], 0, 0);
DONE;
}
})
 
(define_expand "absxf2"
[(set (match_operand:XF 0 "nonimmediate_operand" "")
(abs:XF (match_operand:XF 1 "nonimmediate_operand" "")))]
""
{
if (!TARGET_68881)
{
rtx result;
rtx target;
rtx insns;
 
start_sequence ();
target = operand_subword (operands[0], 0, 1, XFmode);
result = expand_binop (SImode, and_optab,
operand_subword_force (operands[1], 0, XFmode),
GEN_INT (0x7fffffff), target, 0, OPTAB_WIDEN);
gcc_assert (result);
 
if (result != target)
emit_move_insn (result, target);
 
emit_move_insn (operand_subword (operands[0], 1, 1, XFmode),
operand_subword_force (operands[1], 1, XFmode));
emit_move_insn (operand_subword (operands[0], 2, 1, XFmode),
operand_subword_force (operands[1], 2, XFmode));
 
insns = get_insns ();
end_sequence ();
 
emit_no_conflict_block (insns, operands[0], operands[1], 0, 0);
DONE;
}
})
 
(define_insn "abs<mode>2_68881"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
(abs:FP (match_operand:FP 1 "general_operand" "f<FP:dreg>m<FP:const>,0")))]
"TARGET_68881"
{
if (DATA_REG_P (operands[0]))
{
operands[1] = GEN_INT (31);
return "bclr %1,%0";
}
if (FP_REG_P (operands[1]))
return "f<FP:round>abs%.x %1,%0";
return "f<FP:round>abs%.<FP:prec> %f1,%0";
})
 
(define_insn "abs<mode>2_cf"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f,d")
(abs:FP (match_operand:FP 1 "general_operand" "f<FP:dreg><Q>U,0")))]
"TARGET_COLDFIRE_FPU"
{
if (DATA_REG_P (operands[0]))
{
operands[1] = GEN_INT (31);
return "bclr %1,%0";
}
if (FP_REG_P (operands[1]))
return "f<FP:prec>abs%.d %1,%0";
return "f<FP:prec>abs%.<FP:prec> %1,%0";
})
;; one complement instructions
 
;; "one_cmpldi2" is mainly here to help combine().
(define_insn "one_cmpldi2"
[(set (match_operand:DI 0 "nonimmediate_operand" "=dm")
(not:DI (match_operand:DI 1 "general_operand" "0")))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
if (GET_CODE (operands[0]) == REG)
operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC
|| GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
operands[1] = operands[0];
else
operands[1] = adjust_address (operands[0], SImode, 4);
return "not%.l %1\;not%.l %0";
})
 
(define_expand "one_cmplsi2"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(not:SI (match_operand:SI 1 "general_operand" "")))]
""
{
if (TARGET_COLDFIRE)
emit_insn (gen_one_cmplsi2_5200 (operands[0], operands[1]));
else
emit_insn (gen_one_cmplsi2_internal (operands[0], operands[1]));
DONE;
})
 
(define_insn "one_cmplsi2_internal"
[(set (match_operand:SI 0 "nonimmediate_operand" "=dm")
(not:SI (match_operand:SI 1 "general_operand" "0")))]
"!TARGET_COLDFIRE"
"not%.l %0")
 
(define_insn "one_cmplsi2_5200"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(not:SI (match_operand:SI 1 "general_operand" "0")))]
"TARGET_COLDFIRE"
"not%.l %0")
 
(define_insn "one_cmplhi2"
[(set (match_operand:HI 0 "nonimmediate_operand" "=dm")
(not:HI (match_operand:HI 1 "general_operand" "0")))]
"!TARGET_COLDFIRE"
"not%.w %0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "nonimmediate_operand" "+dm"))
(not:HI (match_dup 0)))]
"!TARGET_COLDFIRE"
"not%.w %0")
 
(define_insn "one_cmplqi2"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
(not:QI (match_operand:QI 1 "general_operand" "0")))]
"!TARGET_COLDFIRE"
"not%.b %0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+dm"))
(not:QI (match_dup 0)))]
"!TARGET_COLDFIRE"
"not%.b %0")
;; arithmetic shift instructions
;; We don't need the shift memory by 1 bit instruction
 
(define_insn "ashldi_extsi"
[(set (match_operand:DI 0 "nonimmediate_operand" "=ro")
(ashift:DI
(match_operator:DI 2 "extend_operator"
[(match_operand:SI 1 "general_operand" "rm")])
(const_int 32)))]
""
{
CC_STATUS_INIT;
if (GET_CODE (operands[0]) == REG)
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[2] = adjust_address (operands[0], SImode, 4);
if (ADDRESS_REG_P (operands[0]))
return "move%.l %1,%0\;sub%.l %2,%2";
else
return "move%.l %1,%0\;clr%.l %2";
})
 
(define_insn "ashldi_sexthi"
[(set (match_operand:DI 0 "nonimmediate_operand" "=m,a*d")
(ashift:DI (sign_extend:DI (match_operand:HI 1 "general_operand" "rm,rm"))
(const_int 32)))
(clobber (match_scratch:SI 2 "=a,X"))]
""
{
CC_STATUS_INIT;
if (GET_CODE (operands[0]) == MEM)
{
if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
return "clr%.l %0\;move%.w %1,%2\;move%.l %2,%0";
else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
return "move%.w %1,%2\;move%.l %2,%0\;clr%.l %0";
else
{
operands[3] = adjust_address (operands[0], SImode, 4);
return "move%.w %1,%2\;move%.l %2,%0\;clr%.l %3";
}
}
else if (DATA_REG_P (operands[0]))
return "move%.w %1,%0\;ext%.l %0\;clr%.l %R0";
else
return "move%.w %1,%0\;sub%.l %R0,%R0";
})
 
(define_insn "ashldi_const32"
[(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
(ashift:DI (match_operand:DI 1 "general_operand" "ro")
(const_int 32)))]
""
{
CC_STATUS_INIT;
if (GET_CODE (operands[1]) == REG)
operands[3] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
else
operands[3] = adjust_address (operands[1], SImode, 4);
if (GET_CODE (operands[0]) == REG)
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
return "clr%.l %0\;move%.l %3,%0";
else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
return "move%.l %3,%0\;clr%.l %0";
else
operands[2] = adjust_address (operands[0], SImode, 4);
if (ADDRESS_REG_P (operands[2]))
return "move%.l %3,%0\;sub%.l %2,%2";
else
return "move%.l %3,%0\;clr%.l %2";
})
 
;; The predicate below must be general_operand, because ashldi3 allows that
(define_insn "ashldi_const"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(ashift:DI (match_operand:DI 1 "general_operand" "0")
(match_operand 2 "const_int_operand" "n")))]
"(!TARGET_COLDFIRE
&& ((INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3)
|| INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16
|| (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63)))"
{
operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (INTVAL (operands[2]) == 1)
return "add%.l %1,%1\;addx%.l %0,%0";
else if (INTVAL (operands[2]) == 8)
return "rol%.l #8,%1\;rol%.l #8,%0\;move%.b %1,%0\;clr%.b %1";
else if (INTVAL (operands[2]) == 16)
return "swap %1\;swap %0\;move%.w %1,%0\;clr%.w %1";
else if (INTVAL (operands[2]) == 48)
return "mov%.l %1,%0\;swap %0\;clr%.l %1\;clr%.w %0";
else if (INTVAL (operands[2]) == 2)
return "add%.l %1,%1\;addx%.l %0,%0\;add%.l %1,%1\;addx%.l %0,%0";
else if (INTVAL (operands[2]) == 3)
return "add%.l %1,%1\;addx%.l %0,%0\;add%.l %1,%1\;addx%.l %0,%0\;add%.l %1,%1\;addx%.l %0,%0";
else /* 32 < INTVAL (operands[2]) <= 63 */
{
operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
output_asm_insn (INTVAL (operands[2]) <= 8 ? "asl%.l %2,%1" :
"moveq %2,%0\;asl%.l %0,%1", operands);
return "mov%.l %1,%0\;moveq #0,%1";
}
})
 
(define_expand "ashldi3"
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(ashift:DI (match_operand:DI 1 "general_operand" "")
(match_operand 2 "const_int_operand" "")))]
"!TARGET_COLDFIRE"
"
{
/* ??? This is a named pattern like this is not allowed to FAIL based
on its operands. */
if (GET_CODE (operands[2]) != CONST_INT
|| ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3)
&& INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16
&& (INTVAL (operands[2]) < 32 || INTVAL (operands[2]) > 63)))
FAIL;
} ")
 
;; On most 68k models, this makes faster code in a special case.
 
(define_insn "ashlsi_16"
[(set (match_operand:SI 0 "register_operand" "=d")
(ashift:SI (match_operand:SI 1 "register_operand" "0")
(const_int 16)))]
"!TARGET_68060"
{
CC_STATUS_INIT;
return "swap %0\;clr%.w %0";
})
 
;; ashift patterns : use lsl instead of asl, because lsl always clears the
;; overflow bit, so we must not set CC_NO_OVERFLOW.
 
;; On the 68000, this makes faster code in a special case.
 
(define_insn "ashlsi_17_24"
[(set (match_operand:SI 0 "register_operand" "=d")
(ashift:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "const_int_operand" "n")))]
"(! TARGET_68020 && !TARGET_COLDFIRE
&& INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 24)"
{
CC_STATUS_INIT;
 
operands[2] = GEN_INT (INTVAL (operands[2]) - 16);
return "lsl%.w %2,%0\;swap %0\;clr%.w %0";
})
 
(define_insn "ashlsi3"
[(set (match_operand:SI 0 "register_operand" "=d")
(ashift:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "general_operand" "dI")))]
""
{
if (operands[2] == const1_rtx)
{
cc_status.flags = CC_NO_OVERFLOW;
return "add%.l %0,%0";
}
return "lsl%.l %2,%0";
})
 
(define_insn "ashlhi3"
[(set (match_operand:HI 0 "register_operand" "=d")
(ashift:HI (match_operand:HI 1 "register_operand" "0")
(match_operand:HI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsl%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
(ashift:HI (match_dup 0)
(match_operand:HI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsl%.w %1,%0")
 
(define_insn "ashlqi3"
[(set (match_operand:QI 0 "register_operand" "=d")
(ashift:QI (match_operand:QI 1 "register_operand" "0")
(match_operand:QI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsl%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
(ashift:QI (match_dup 0)
(match_operand:QI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsl%.b %1,%0")
 
;; On most 68k models, this makes faster code in a special case.
 
(define_insn "ashrsi_16"
[(set (match_operand:SI 0 "register_operand" "=d")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
(const_int 16)))]
"!TARGET_68060"
"swap %0\;ext%.l %0")
 
;; On the 68000, this makes faster code in a special case.
 
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=d")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "const_int_operand" "n")))]
"(! TARGET_68020 && !TARGET_COLDFIRE
&& INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 24)"
{
operands[2] = GEN_INT (INTVAL (operands[2]) - 16);
return "swap %0\;asr%.w %2,%0\;ext%.l %0";
})
 
(define_insn "subreghi1ashrdi_const32"
[(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
(subreg:HI (ashiftrt:DI (match_operand:DI 1 "general_operand" "ro")
(const_int 32)) 6))]
""
{
if (GET_CODE (operands[1]) != REG)
operands[1] = adjust_address (operands[1], HImode, 2);
return "move%.w %1,%0";
})
 
(define_insn "subregsi1ashrdi_const32"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(subreg:SI (ashiftrt:DI (match_operand:DI 1 "general_operand" "ro")
(const_int 32)) 4))]
""
{
return "move%.l %1,%0";
})
 
(define_insn "ashrdi_const32"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d,o,<")
(ashiftrt:DI (match_operand:DI 1 "general_operand" "ro,ro,ro")
(const_int 32)))
(clobber (match_scratch:SI 2 "=X,d,d"))]
""
{
CC_STATUS_INIT;
if (which_alternative == 0)
{
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (TARGET_68020)
return "move%.l %1,%2\;smi %0\;extb%.l %0";
else
return "move%.l %1,%2\;smi %0\;ext%.w %0\;ext%.l %0";
}
else
{
if (which_alternative == 2)
operands[3] = operands[0];
else if (which_alternative == 1)
operands[3] = adjust_address (operands[0], SImode, 4);
if (TARGET_68020)
return "move%.l %1,%3\;smi %2\;extb%.l %2\;move%.l %2,%0";
else
return "move%.l %1,%3\;smi %2\;ext%.w %2\;ext%.l %2\;move%.l %2,%0";
}
})
 
;; The predicate below must be general_operand, because ashrdi3 allows that
(define_insn "ashrdi_const"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(ashiftrt:DI (match_operand:DI 1 "general_operand" "0")
(match_operand 2 "const_int_operand" "n")))
(clobber (match_scratch:SI 3 "=X"))]
"(!TARGET_COLDFIRE
&& ((INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3)
|| INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16
|| INTVAL (operands[2]) == 31
|| (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63)))"
{
operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (INTVAL (operands[2]) == 63)
return "add%.l %0,%0\;subx%.l %0,%0\;move%.l %0,%1";
CC_STATUS_INIT;
if (INTVAL (operands[2]) == 1)
return "asr%.l #1,%0\;roxr%.l #1,%1";
else if (INTVAL (operands[2]) == 8)
return "move%.b %0,%1\;asr%.l #8,%0\;ror%.l #8,%1";
else if (INTVAL (operands[2]) == 16)
return "move%.w %0,%1\;swap %0\;ext%.l %0\;swap %1";
else if (INTVAL (operands[2]) == 48)
return "swap %0\;ext%.l %0\;move%.l %0,%1\;smi %0\;ext%.w %0";
else if (INTVAL (operands[2]) == 31)
return "add%.l %1,%1\;addx%.l %0,%0\;move%.l %0,%1\;subx%.l %0,%0";
else if (INTVAL (operands[2]) == 2)
return "asr%.l #1,%0\;roxr%.l #1,%1\;asr%.l #1,%0\;roxr%.l #1,%1";
else if (INTVAL (operands[2]) == 3)
return "asr%.l #1,%0\;roxr%.l #1,%1\;asr%.l #1,%0\;roxr%.l #1,%1\;asr%.l #1,%0\;roxr%.l #1,%1";
else /* 32 < INTVAL (operands[2]) <= 63 */
{
operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
output_asm_insn (INTVAL (operands[2]) <= 8 ? "asr%.l %2,%0" :
"moveq %2,%1\;asr%.l %1,%0", operands);
output_asm_insn ("mov%.l %0,%1\;smi %0", operands);
return INTVAL (operands[2]) >= 15 ? "ext%.w %d0" :
TARGET_68020 ? "extb%.l %0" : "ext%.w %0\;ext%.l %0";
}
})
 
(define_expand "ashrdi3"
[(parallel [(set (match_operand:DI 0 "nonimmediate_operand" "")
(ashiftrt:DI (match_operand:DI 1 "general_operand" "")
(match_operand 2 "const_int_operand" "")))
(clobber (match_scratch:SI 3 ""))])]
"!TARGET_COLDFIRE"
"
{
/* ??? This is a named pattern like this is not allowed to FAIL based
on its operands. */
if (GET_CODE (operands[2]) != CONST_INT
|| ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3)
&& INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16
&& (INTVAL (operands[2]) < 31 || INTVAL (operands[2]) > 63)))
FAIL;
operands[3] = gen_rtx_SCRATCH (SImode);
} ")
 
;; On all 68k models, this makes faster code in a special case.
 
(define_insn "ashrsi_31"
[(set (match_operand:SI 0 "register_operand" "=d")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
(const_int 31)))]
""
{
return "add%.l %0,%0\;subx%.l %0,%0";
})
 
(define_insn "ashrsi3"
[(set (match_operand:SI 0 "register_operand" "=d")
(ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "general_operand" "dI")))]
""
"asr%.l %2,%0")
 
(define_insn "ashrhi3"
[(set (match_operand:HI 0 "register_operand" "=d")
(ashiftrt:HI (match_operand:HI 1 "register_operand" "0")
(match_operand:HI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"asr%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
(ashiftrt:HI (match_dup 0)
(match_operand:HI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"asr%.w %1,%0")
 
(define_insn "ashrqi3"
[(set (match_operand:QI 0 "register_operand" "=d")
(ashiftrt:QI (match_operand:QI 1 "register_operand" "0")
(match_operand:QI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"asr%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
(ashiftrt:QI (match_dup 0)
(match_operand:QI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"asr%.b %1,%0")
;; logical shift instructions
 
;; commented out because of reload problems in 950612-1.c
;;(define_insn ""
;; [(set (cc0)
;; (subreg:SI (lshiftrt:DI (match_operand:DI 0 "general_operand" "ro")
;; (const_int 32)) 4))
;; (set (match_operand:SI 1 "nonimmediate_operand" "=dm")
;; (subreg:SI (lshiftrt:DI (match_dup 0)
;; (const_int 32)) 4))]
;; ""
;;{
;; return "move%.l %0,%1";
;;})
;;
;;(define_insn ""
;; [(set (cc0)
;; (subreg:SI (lshiftrt:DI (match_operand:DI 0 "general_operand" "ro")
;; (const_int 32)) 0))
;; (set (match_operand:DI 1 "nonimmediate_operand" "=do")
;; (lshiftrt:DI (match_dup 0)
;; (const_int 32)))]
;; ""
;;{
;; if (GET_CODE (operands[1]) == REG)
;; operands[2] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
;; else
;; operands[2] = adjust_address (operands[1], SImode, 4);
;; return "move%.l %0,%2\;clr%.l %1";
;;})
 
(define_insn "subreg1lshrdi_const32"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(subreg:SI (lshiftrt:DI (match_operand:DI 1 "general_operand" "ro")
(const_int 32)) 4))]
""
{
return "move%.l %1,%0";
})
 
(define_insn "lshrdi_const32"
[(set (match_operand:DI 0 "nonimmediate_operand" "=ro,<,>")
(lshiftrt:DI (match_operand:DI 1 "general_operand" "ro,ro,ro")
(const_int 32)))]
""
{
CC_STATUS_INIT;
if (which_alternative == 1)
return "move%.l %1,%0\;clr%.l %0";
if (which_alternative == 2)
return "clr%.l %0\;move%.l %1,%0";
if (GET_CODE (operands[0]) == REG)
operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[2] = adjust_address (operands[0], SImode, 4);
if (GET_CODE (operands[1]) == REG)
operands[3] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
else
operands[3] = adjust_address (operands[1], SImode, 4);
if (ADDRESS_REG_P (operands[0]))
return "move%.l %1,%2\;sub%.l %0,%0";
else
return "move%.l %1,%2\;clr%.l %0";
})
 
;; The predicate below must be general_operand, because lshrdi3 allows that
(define_insn "lshrdi_const"
[(set (match_operand:DI 0 "nonimmediate_operand" "=d")
(lshiftrt:DI (match_operand:DI 1 "general_operand" "0")
(match_operand 2 "const_int_operand" "n")))]
"(!TARGET_COLDFIRE
&& ((INTVAL (operands[2]) >= 1 && INTVAL (operands[2]) <= 3)
|| INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16
|| (INTVAL (operands[2]) > 32 && INTVAL (operands[2]) <= 63)))"
{
operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
if (INTVAL (operands[2]) == 63)
return "add%.l %0,%0\;clr%.l %0\;clr%.l %1\;addx%.l %1,%1";
CC_STATUS_INIT;
if (INTVAL (operands[2]) == 1)
return "lsr%.l #1,%0\;roxr%.l #1,%1";
else if (INTVAL (operands[2]) == 8)
return "move%.b %0,%1\;lsr%.l #8,%0\;ror%.l #8,%1";
else if (INTVAL (operands[2]) == 16)
return "move%.w %0,%1\;clr%.w %0\;swap %1\;swap %0";
else if (INTVAL (operands[2]) == 48)
return "move%.l %0,%1\;clr%.w %1\;clr%.l %0\;swap %1";
else if (INTVAL (operands[2]) == 2)
return "lsr%.l #1,%0\;roxr%.l #1,%1\;lsr%.l #1,%0\;roxr%.l #1,%1";
else if (INTVAL (operands[2]) == 3)
return "lsr%.l #1,%0\;roxr%.l #1,%1\;lsr%.l #1,%0\;roxr%.l #1,%1\;lsr%.l #1,%0\;roxr%.l #1,%1";
else /* 32 < INTVAL (operands[2]) <= 63 */
{
operands[2] = GEN_INT (INTVAL (operands[2]) - 32);
output_asm_insn (INTVAL (operands[2]) <= 8 ? "lsr%.l %2,%0" :
"moveq %2,%1\;lsr%.l %1,%0", operands);
return "mov%.l %0,%1\;moveq #0,%0";
}
})
 
(define_expand "lshrdi3"
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(lshiftrt:DI (match_operand:DI 1 "general_operand" "")
(match_operand 2 "const_int_operand" "")))]
"!TARGET_COLDFIRE"
{
/* ??? This is a named pattern like this is not allowed to FAIL based
on its operands. */
if (GET_CODE (operands[2]) != CONST_INT
|| ((INTVAL (operands[2]) < 1 || INTVAL (operands[2]) > 3)
&& INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16
&& (INTVAL (operands[2]) < 32 || INTVAL (operands[2]) > 63)))
FAIL;
})
 
;; On all 68k models, this makes faster code in a special case.
 
(define_insn "lshrsi_31"
[(set (match_operand:SI 0 "register_operand" "=d")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
(const_int 31)))]
""
{
return "add%.l %0,%0\;subx%.l %0,%0\;neg%.l %0";
})
 
;; On most 68k models, this makes faster code in a special case.
 
(define_insn "lshrsi_16"
[(set (match_operand:SI 0 "register_operand" "=d")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
(const_int 16)))]
"!TARGET_68060"
{
CC_STATUS_INIT;
return "clr%.w %0\;swap %0";
})
 
;; On the 68000, this makes faster code in a special case.
 
(define_insn "lshrsi_17_24"
[(set (match_operand:SI 0 "register_operand" "=d")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "const_int_operand" "n")))]
"(! TARGET_68020 && !TARGET_COLDFIRE
&& INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 24)"
{
/* I think lsr%.w sets the CC properly. */
operands[2] = GEN_INT (INTVAL (operands[2]) - 16);
return "clr%.w %0\;swap %0\;lsr%.w %2,%0";
})
 
(define_insn "lshrsi3"
[(set (match_operand:SI 0 "register_operand" "=d")
(lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "general_operand" "dI")))]
""
"lsr%.l %2,%0")
 
(define_insn "lshrhi3"
[(set (match_operand:HI 0 "register_operand" "=d")
(lshiftrt:HI (match_operand:HI 1 "register_operand" "0")
(match_operand:HI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsr%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
(lshiftrt:HI (match_dup 0)
(match_operand:HI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsr%.w %1,%0")
 
(define_insn "lshrqi3"
[(set (match_operand:QI 0 "register_operand" "=d")
(lshiftrt:QI (match_operand:QI 1 "register_operand" "0")
(match_operand:QI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsr%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
(lshiftrt:QI (match_dup 0)
(match_operand:QI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"lsr%.b %1,%0")
;; rotate instructions
 
(define_insn "rotlsi3"
[(set (match_operand:SI 0 "register_operand" "=d")
(rotate:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "general_operand" "dINO")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 16)
return "swap %0";
else if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 16)
{
operands[2] = GEN_INT (32 - INTVAL (operands[2]));
return "ror%.l %2,%0";
}
else
return "rol%.l %2,%0";
})
 
(define_insn "rotlhi3"
[(set (match_operand:HI 0 "register_operand" "=d")
(rotate:HI (match_operand:HI 1 "register_operand" "0")
(match_operand:HI 2 "general_operand" "dIP")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 8)
{
operands[2] = GEN_INT (16 - INTVAL (operands[2]));
return "ror%.w %2,%0";
}
else
return "rol%.w %2,%0";
})
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
(rotate:HI (match_dup 0)
(match_operand:HI 1 "general_operand" "dIP")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 8)
{
operands[2] = GEN_INT (16 - INTVAL (operands[2]));
return "ror%.w %2,%0";
}
else
return "rol%.w %2,%0";
})
 
(define_insn "rotlqi3"
[(set (match_operand:QI 0 "register_operand" "=d")
(rotate:QI (match_operand:QI 1 "register_operand" "0")
(match_operand:QI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 4)
{
operands[2] = GEN_INT (8 - INTVAL (operands[2]));
return "ror%.b %2,%0";
}
else
return "rol%.b %2,%0";
})
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
(rotate:QI (match_dup 0)
(match_operand:QI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
{
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 4)
{
operands[2] = GEN_INT (8 - INTVAL (operands[2]));
return "ror%.b %2,%0";
}
else
return "rol%.b %2,%0";
})
 
(define_insn "rotrsi3"
[(set (match_operand:SI 0 "register_operand" "=d")
(rotatert:SI (match_operand:SI 1 "register_operand" "0")
(match_operand:SI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"ror%.l %2,%0")
 
(define_insn "rotrhi3"
[(set (match_operand:HI 0 "register_operand" "=d")
(rotatert:HI (match_operand:HI 1 "register_operand" "0")
(match_operand:HI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"ror%.w %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:HI 0 "register_operand" "+d"))
(rotatert:HI (match_dup 0)
(match_operand:HI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"ror%.w %1,%0")
 
(define_insn "rotrqi3"
[(set (match_operand:QI 0 "register_operand" "=d")
(rotatert:QI (match_operand:QI 1 "register_operand" "0")
(match_operand:QI 2 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"ror%.b %2,%0")
 
(define_insn ""
[(set (strict_low_part (match_operand:QI 0 "register_operand" "+d"))
(rotatert:QI (match_dup 0)
(match_operand:QI 1 "general_operand" "dI")))]
"!TARGET_COLDFIRE"
"ror%.b %1,%0")
 
;; Bit set/clear in memory byte.
 
;; set bit, bit number is int
(define_insn "bsetmemqi"
[(set (match_operand:QI 0 "memory_operand" "+m")
(ior:QI (subreg:QI (ashift:SI (const_int 1)
(match_operand:SI 1 "general_operand" "d")) 3)
(match_dup 0)))]
""
{
CC_STATUS_INIT;
return "bset %1,%0";
})
 
;; set bit, bit number is (sign/zero)_extended from HImode/QImode
(define_insn ""
[(set (match_operand:QI 0 "memory_operand" "+m")
(ior:QI (subreg:QI (ashift:SI (const_int 1)
(match_operator:SI 2 "extend_operator"
[(match_operand 1 "general_operand" "d")])) 3)
(match_dup 0)))]
""
{
CC_STATUS_INIT;
return "bset %1,%0";
})
 
;; clear bit, bit number is int
(define_insn "bclrmemqi"
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+m")
(const_int 1)
(minus:SI (const_int 7)
(match_operand:SI 1 "general_operand" "d")))
(const_int 0))]
""
{
CC_STATUS_INIT;
return "bclr %1,%0";
})
 
;; clear bit, bit number is (sign/zero)_extended from HImode/QImode
(define_insn ""
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+m")
(const_int 1)
(minus:SI (const_int 7)
(match_operator:SI 2 "extend_operator"
[(match_operand 1 "general_operand" "d")])))
(const_int 0))]
""
{
CC_STATUS_INIT;
return "bclr %1,%0";
})
 
;; Special cases of bit-field insns which we should
;; recognize in preference to the general case.
;; These handle aligned 8-bit and 16-bit fields,
;; which can usually be done with move instructions.
 
;
; Special case for 32-bit field in memory. This only occurs when 32-bit
; alignment of structure members is specified.
;
; The move is allowed to be odd byte aligned, because that's still faster
; than an odd byte aligned bit-field instruction.
;
(define_insn ""
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(const_int 32)
(match_operand:SI 1 "const_int_operand" "n"))
(match_operand:SI 2 "general_src_operand" "rmSi"))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[1]) % 8) == 0
&& ! mode_dependent_address_p (XEXP (operands[0], 0))"
{
operands[0]
= adjust_address (operands[0], SImode, INTVAL (operands[1]) / 8);
 
return "move%.l %2,%0";
})
 
(define_insn ""
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+do")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))
(match_operand:SI 3 "register_operand" "d"))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
&& INTVAL (operands[2]) % INTVAL (operands[1]) == 0
&& (GET_CODE (operands[0]) == REG
|| ! mode_dependent_address_p (XEXP (operands[0], 0)))"
{
if (REG_P (operands[0]))
{
if (INTVAL (operands[1]) + INTVAL (operands[2]) != 32)
return "bfins %3,%0{%b2:%b1}";
}
else
operands[0] = adjust_address (operands[0],
INTVAL (operands[1]) == 8 ? QImode : HImode,
INTVAL (operands[2]) / 8);
 
if (GET_CODE (operands[3]) == MEM)
operands[3] = adjust_address (operands[3],
INTVAL (operands[1]) == 8 ? QImode : HImode,
(32 - INTVAL (operands[1])) / 8);
 
if (INTVAL (operands[1]) == 8)
return "move%.b %3,%0";
return "move%.w %3,%0";
})
 
 
;
; Special case for 32-bit field in memory. This only occurs when 32-bit
; alignment of structure members is specified.
;
; The move is allowed to be odd byte aligned, because that's still faster
; than an odd byte aligned bit-field instruction.
;
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(zero_extract:SI (match_operand:QI 1 "memory_src_operand" "oS")
(const_int 32)
(match_operand:SI 2 "const_int_operand" "n")))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[2]) % 8) == 0
&& ! mode_dependent_address_p (XEXP (operands[1], 0))"
{
operands[1]
= adjust_address (operands[1], SImode, INTVAL (operands[2]) / 8);
 
return "move%.l %1,%0";
})
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=&d")
(zero_extract:SI (match_operand:SI 1 "register_operand" "do")
(match_operand:SI 2 "const_int_operand" "n")
(match_operand:SI 3 "const_int_operand" "n")))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
&& INTVAL (operands[3]) % INTVAL (operands[2]) == 0
&& (GET_CODE (operands[1]) == REG
|| ! mode_dependent_address_p (XEXP (operands[1], 0)))"
{
cc_status.flags |= CC_NOT_NEGATIVE;
if (REG_P (operands[1]))
{
if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
return "bfextu %1{%b3:%b2},%0";
}
else
operands[1]
= adjust_address (operands[1], SImode, INTVAL (operands[3]) / 8);
 
output_asm_insn ("clr%.l %0", operands);
if (GET_CODE (operands[0]) == MEM)
operands[0] = adjust_address (operands[0],
INTVAL (operands[2]) == 8 ? QImode : HImode,
(32 - INTVAL (operands[1])) / 8);
 
if (INTVAL (operands[2]) == 8)
return "move%.b %1,%0";
return "move%.w %1,%0";
})
 
;
; Special case for 32-bit field in memory. This only occurs when 32-bit
; alignment of structure members is specified.
;
; The move is allowed to be odd byte aligned, because that's still faster
; than an odd byte aligned bit-field instruction.
;
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(sign_extract:SI (match_operand:QI 1 "memory_src_operand" "oS")
(const_int 32)
(match_operand:SI 2 "const_int_operand" "n")))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[2]) % 8) == 0
&& ! mode_dependent_address_p (XEXP (operands[1], 0))"
{
operands[1]
= adjust_address (operands[1], SImode, INTVAL (operands[2]) / 8);
 
return "move%.l %1,%0";
})
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(sign_extract:SI (match_operand:SI 1 "register_operand" "do")
(match_operand:SI 2 "const_int_operand" "n")
(match_operand:SI 3 "const_int_operand" "n")))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
&& INTVAL (operands[3]) % INTVAL (operands[2]) == 0
&& (GET_CODE (operands[1]) == REG
|| ! mode_dependent_address_p (XEXP (operands[1], 0)))"
{
if (REG_P (operands[1]))
{
if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
return "bfexts %1{%b3:%b2},%0";
}
else
operands[1]
= adjust_address (operands[1],
INTVAL (operands[2]) == 8 ? QImode : HImode,
INTVAL (operands[3]) / 8);
 
if (INTVAL (operands[2]) == 8)
return "move%.b %1,%0\;extb%.l %0";
return "move%.w %1,%0\;ext%.l %0";
})
;; Bit-field instructions, general cases.
;; "o,d" constraint causes a nonoffsettable memref to match the "o"
;; so that its address is reloaded.
 
(define_expand "extv"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(sign_extract:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_operand" "")
(match_operand:SI 3 "general_operand" "")))]
"TARGET_68020 && TARGET_BITFIELD"
"")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(sign_extract:SI (match_operand:QI 1 "memory_operand" "o")
(match_operand:SI 2 "general_operand" "dn")
(match_operand:SI 3 "general_operand" "dn")))]
"TARGET_68020 && TARGET_BITFIELD"
"bfexts %1{%b3:%b2},%0")
 
(define_expand "extzv"
[(set (match_operand:SI 0 "nonimmediate_operand" "")
(zero_extract:SI (match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_operand" "")
(match_operand:SI 3 "general_operand" "")))]
"TARGET_68020 && TARGET_BITFIELD"
"")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d,d")
(zero_extract:SI (match_operand:QI 1 "memory_operand" "o,d")
(match_operand:SI 2 "general_operand" "dn,dn")
(match_operand:SI 3 "general_operand" "dn,dn")))]
"TARGET_68020 && TARGET_BITFIELD"
{
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) != 32)
cc_status.flags |= CC_NOT_NEGATIVE;
}
else
{
CC_STATUS_INIT;
}
return "bfextu %1{%b3:%b2},%0";
})
 
(define_insn ""
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
(xor:SI (zero_extract:SI (match_dup 0) (match_dup 1) (match_dup 2))
(match_operand 3 "const_int_operand" "n")))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[3]) == -1
|| (GET_CODE (operands[1]) == CONST_INT
&& (~ INTVAL (operands[3]) & ((1 << INTVAL (operands[1]))- 1)) == 0))"
{
CC_STATUS_INIT;
return "bfchg %0{%b2:%b1}";
})
 
(define_insn ""
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
(const_int 0))]
"TARGET_68020 && TARGET_BITFIELD"
{
CC_STATUS_INIT;
return "bfclr %0{%b2:%b1}";
})
 
(define_insn ""
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
(const_int -1))]
"TARGET_68020 && TARGET_BITFIELD"
{
CC_STATUS_INIT;
return "bfset %0{%b2:%b1}";
})
 
(define_expand "insv"
[(set (zero_extract:SI (match_operand:SI 0 "nonimmediate_operand" "")
(match_operand:SI 1 "general_operand" "")
(match_operand:SI 2 "general_operand" ""))
(match_operand:SI 3 "register_operand" ""))]
"TARGET_68020 && TARGET_BITFIELD"
"")
 
(define_insn ""
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
(match_operand:SI 3 "register_operand" "d"))]
"TARGET_68020 && TARGET_BITFIELD"
"bfins %3,%0{%b2:%b1}")
 
;; Now recognize bit-field insns that operate on registers
;; (or at least were intended to do so).
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(sign_extract:SI (match_operand:SI 1 "register_operand" "d")
(match_operand:SI 2 "general_operand" "dn")
(match_operand:SI 3 "general_operand" "dn")))]
"TARGET_68020 && TARGET_BITFIELD"
"bfexts %1{%b3:%b2},%0")
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(zero_extract:SI (match_operand:SI 1 "register_operand" "d")
(match_operand:SI 2 "general_operand" "dn")
(match_operand:SI 3 "general_operand" "dn")))]
"TARGET_68020 && TARGET_BITFIELD"
{
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) != 32)
cc_status.flags |= CC_NOT_NEGATIVE;
}
else
{
CC_STATUS_INIT;
}
return "bfextu %1{%b3:%b2},%0";
})
 
(define_insn ""
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
(const_int 0))]
"TARGET_68020 && TARGET_BITFIELD"
{
CC_STATUS_INIT;
return "bfclr %0{%b2:%b1}";
})
 
(define_insn ""
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
(const_int -1))]
"TARGET_68020 && TARGET_BITFIELD"
{
CC_STATUS_INIT;
return "bfset %0{%b2:%b1}";
})
 
(define_insn ""
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
(match_operand:SI 3 "register_operand" "d"))]
"TARGET_68020 && TARGET_BITFIELD"
{
#if 0
/* These special cases are now recognized by a specific pattern. */
if (GET_CODE (operands[1]) == CONST_INT && GET_CODE (operands[2]) == CONST_INT
&& INTVAL (operands[1]) == 16 && INTVAL (operands[2]) == 16)
return "move%.w %3,%0";
if (GET_CODE (operands[1]) == CONST_INT && GET_CODE (operands[2]) == CONST_INT
&& INTVAL (operands[1]) == 24 && INTVAL (operands[2]) == 8)
return "move%.b %3,%0";
#endif
return "bfins %3,%0{%b2:%b1}";
})
;; Special patterns for optimizing bit-field instructions.
 
(define_insn ""
[(set (cc0)
(zero_extract:SI (match_operand:QI 0 "memory_operand" "o")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "general_operand" "dn")))]
"TARGET_68020 && TARGET_BITFIELD"
{
if (operands[1] == const1_rtx
&& GET_CODE (operands[2]) == CONST_INT)
{
int width = GET_CODE (operands[0]) == REG ? 31 : 7;
return output_btst (operands,
GEN_INT (width - INTVAL (operands[2])),
operands[0], insn, 1000);
/* Pass 1000 as SIGNPOS argument so that btst will
not think we are testing the sign bit for an `and'
and assume that nonzero implies a negative result. */
}
if (INTVAL (operands[1]) != 32)
cc_status.flags = CC_NOT_NEGATIVE;
return "bftst %0{%b2:%b1}";
})
 
 
;;; now handle the register cases
(define_insn ""
[(set (cc0)
(zero_extract:SI (match_operand:SI 0 "register_operand" "d")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "general_operand" "dn")))]
"TARGET_68020 && TARGET_BITFIELD"
{
if (operands[1] == const1_rtx
&& GET_CODE (operands[2]) == CONST_INT)
{
int width = GET_CODE (operands[0]) == REG ? 31 : 7;
return output_btst (operands, GEN_INT (width - INTVAL (operands[2])),
operands[0], insn, 1000);
/* Pass 1000 as SIGNPOS argument so that btst will
not think we are testing the sign bit for an `and'
and assume that nonzero implies a negative result. */
}
if (INTVAL (operands[1]) != 32)
cc_status.flags = CC_NOT_NEGATIVE;
return "bftst %0{%b2:%b1}";
})
(define_insn "scc0_di"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm")
(match_operator 1 "valid_dbcc_comparison_p"
[(match_operand:DI 2 "general_operand" "ro") (const_int 0)]))]
"! TARGET_COLDFIRE"
{
return output_scc_di (operands[1], operands[2], const0_rtx, operands[0]);
})
 
(define_insn "scc0_di_5200"
[(set (match_operand:QI 0 "nonimmediate_operand" "=d")
(match_operator 1 "valid_dbcc_comparison_p"
[(match_operand:DI 2 "general_operand" "ro") (const_int 0)]))]
"TARGET_COLDFIRE"
{
return output_scc_di (operands[1], operands[2], const0_rtx, operands[0]);
})
 
(define_insn "scc_di"
[(set (match_operand:QI 0 "nonimmediate_operand" "=dm,dm")
(match_operator 1 "valid_dbcc_comparison_p"
[(match_operand:DI 2 "general_operand" "ro,r")
(match_operand:DI 3 "general_operand" "r,ro")]))]
"! TARGET_COLDFIRE"
{
return output_scc_di (operands[1], operands[2], operands[3], operands[0]);
})
 
(define_insn "scc_di_5200"
[(set (match_operand:QI 0 "nonimmediate_operand" "=d,d")
(match_operator 1 "valid_dbcc_comparison_p"
[(match_operand:DI 2 "general_operand" "ro,r")
(match_operand:DI 3 "general_operand" "r,ro")]))]
"TARGET_COLDFIRE"
{
return output_scc_di (operands[1], operands[2], operands[3], operands[0]);
})
 
;; Note that operand 0 of an SCC insn is supported in the hardware as
;; memory, but we cannot allow it to be in memory in case the address
;; needs to be reloaded.
 
(define_expand "seq"
[(set (match_operand:QI 0 "register_operand" "")
(eq:QI (cc0) (const_int 0)))]
""
{
if ((TARGET_68060 || TARGET_COLDFIRE_FPU)
&& m68k_last_compare_had_fp_operands)
{
m68k_last_compare_had_fp_operands = 0;
FAIL;
}
})
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(eq:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
OUTPUT_JUMP ("seq %0", "fseq %0", "seq %0");
})
 
(define_expand "sne"
[(set (match_operand:QI 0 "register_operand" "")
(ne:QI (cc0) (const_int 0)))]
""
{
if ((TARGET_68060 || TARGET_COLDFIRE_FPU)
&& m68k_last_compare_had_fp_operands)
{
m68k_last_compare_had_fp_operands = 0;
FAIL;
}
})
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(ne:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
OUTPUT_JUMP ("sne %0", "fsne %0", "sne %0");
})
 
(define_expand "sgt"
[(set (match_operand:QI 0 "register_operand" "")
(gt:QI (cc0) (const_int 0)))]
""
{
if ((TARGET_68060 || TARGET_COLDFIRE_FPU)
&& m68k_last_compare_had_fp_operands)
{
m68k_last_compare_had_fp_operands = 0;
FAIL;
}
})
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(gt:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
OUTPUT_JUMP ("sgt %0", "fsgt %0", 0);
})
 
(define_expand "sgtu"
[(set (match_operand:QI 0 "register_operand" "")
(gtu:QI (cc0) (const_int 0)))]
""
"")
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(gtu:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
return "shi %0";
})
 
(define_expand "slt"
[(set (match_operand:QI 0 "register_operand" "")
(lt:QI (cc0) (const_int 0)))]
""
{
if ((TARGET_68060 || TARGET_COLDFIRE_FPU)
&& m68k_last_compare_had_fp_operands)
{
m68k_last_compare_had_fp_operands = 0;
FAIL;
}
})
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(lt:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
OUTPUT_JUMP ("slt %0", "fslt %0", "smi %0");
})
 
(define_expand "sltu"
[(set (match_operand:QI 0 "register_operand" "")
(ltu:QI (cc0) (const_int 0)))]
""
"")
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(ltu:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
return "scs %0";
})
 
(define_expand "sge"
[(set (match_operand:QI 0 "register_operand" "")
(ge:QI (cc0) (const_int 0)))]
""
{
if ((TARGET_68060 || TARGET_COLDFIRE_FPU)
&& m68k_last_compare_had_fp_operands)
{
m68k_last_compare_had_fp_operands = 0;
FAIL;
}
})
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(ge:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
OUTPUT_JUMP ("sge %0", "fsge %0", "spl %0");
})
 
(define_expand "sgeu"
[(set (match_operand:QI 0 "register_operand" "")
(geu:QI (cc0) (const_int 0)))]
""
"")
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(geu:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
return "scc %0";
})
 
(define_expand "sle"
[(set (match_operand:QI 0 "register_operand" "")
(le:QI (cc0) (const_int 0)))]
""
{
if ((TARGET_68060 || TARGET_COLDFIRE_FPU)
&& m68k_last_compare_had_fp_operands)
{
m68k_last_compare_had_fp_operands = 0;
FAIL;
}
})
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(le:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
OUTPUT_JUMP ("sle %0", "fsle %0", 0);
})
 
(define_expand "sleu"
[(set (match_operand:QI 0 "register_operand" "")
(leu:QI (cc0) (const_int 0)))]
""
"")
 
(define_insn ""
[(set (match_operand:QI 0 "register_operand" "=d")
(leu:QI (cc0) (const_int 0)))]
""
{
cc_status = cc_prev_status;
return "sls %0";
})
 
(define_expand "sordered"
[(set (match_operand:QI 0 "register_operand" "")
(ordered:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*sordered_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(ordered:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsor %0";
})
 
(define_expand "sunordered"
[(set (match_operand:QI 0 "register_operand" "")
(unordered:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*sunordered_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(unordered:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsun %0";
})
 
(define_expand "suneq"
[(set (match_operand:QI 0 "register_operand" "")
(uneq:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*suneq_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(uneq:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsueq %0";
})
 
(define_expand "sunge"
[(set (match_operand:QI 0 "register_operand" "")
(unge:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*sunge_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(unge:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsuge %0";
})
 
(define_expand "sungt"
[(set (match_operand:QI 0 "register_operand" "")
(ungt:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*sungt_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(ungt:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsugt %0";
})
 
(define_expand "sunle"
[(set (match_operand:QI 0 "register_operand" "")
(unle:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*sunle_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(unle:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsule %0";
})
 
(define_expand "sunlt"
[(set (match_operand:QI 0 "register_operand" "")
(unlt:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*sunlt_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(unlt:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsult %0";
})
 
(define_expand "sltgt"
[(set (match_operand:QI 0 "register_operand" "")
(ltgt:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
gcc_assert (m68k_last_compare_had_fp_operands);
m68k_last_compare_had_fp_operands = 0;
})
 
(define_insn "*sltgt_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(ltgt:QI (cc0) (const_int 0)))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsogl %0";
})
 
(define_insn "*fsogt_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(not:QI (unle:QI (cc0) (const_int 0))))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsogt %0";
})
 
(define_insn "*fsoge_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(not:QI (unlt:QI (cc0) (const_int 0))))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsoge %0";
})
 
(define_insn "*fsolt_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(not:QI (unge:QI (cc0) (const_int 0))))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsolt %0";
})
 
(define_insn "*fsole_1"
[(set (match_operand:QI 0 "register_operand" "=d")
(not:QI (ungt:QI (cc0) (const_int 0))))]
"TARGET_68881 && !TARGET_68060"
{
cc_status = cc_prev_status;
return "fsole %0";
})
;; Basic conditional jump instructions.
 
(define_insn "beq0_di"
[(set (pc)
(if_then_else (eq (match_operand:DI 0 "general_operand" "d*ao,<>")
(const_int 0))
(label_ref (match_operand 1 "" ","))
(pc)))
(clobber (match_scratch:SI 2 "=d,d"))]
""
{
CC_STATUS_INIT;
if (which_alternative == 1)
{
if (MOTOROLA)
return "move%.l %0,%2\;or%.l %0,%2\;jbeq %l1";
else
return "move%.l %0,%2\;or%.l %0,%2\;jeq %l1";
}
if ((cc_prev_status.value1
&& rtx_equal_p (cc_prev_status.value1, operands[0]))
|| (cc_prev_status.value2
&& rtx_equal_p (cc_prev_status.value2, operands[0])))
{
cc_status = cc_prev_status;
return MOTOROLA ? "jbeq %l1" : "jeq %l1";
}
if (GET_CODE (operands[0]) == REG)
operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[3] = adjust_address (operands[0], SImode, 4);
if (! ADDRESS_REG_P (operands[0]))
{
if (reg_overlap_mentioned_p (operands[2], operands[0]))
{
if (reg_overlap_mentioned_p (operands[2], operands[3]))
{
if (MOTOROLA)
return "or%.l %0,%2\;jbeq %l1";
else
return "or%.l %0,%2\;jeq %l1";
}
else
{
if (MOTOROLA)
return "or%.l %3,%2\;jbeq %l1";
else
return "or%.l %3,%2\;jeq %l1";
}
}
if (MOTOROLA)
return "move%.l %0,%2\;or%.l %3,%2\;jbeq %l1";
else
return "move%.l %0,%2\;or%.l %3,%2\;jeq %l1";
}
operands[4] = gen_label_rtx();
if (TARGET_68020 || TARGET_COLDFIRE)
{
if (MOTOROLA)
output_asm_insn ("tst%.l %0\;jbne %l4\;tst%.l %3\;jbeq %l1", operands);
else
output_asm_insn ("tst%.l %0\;jne %l4\;tst%.l %3\;jeq %l1", operands);
}
else
{
if (MOTOROLA)
output_asm_insn ("cmp%.w #0,%0\;jbne %l4\;cmp%.w #0,%3\;jbeq %l1", operands);
else
output_asm_insn ("cmp%.w #0,%0\;jne %l4\;cmp%.w #0,%3\;jeq %l1", operands);
}
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (operands[4]));
return "";
})
 
(define_insn "bne0_di"
[(set (pc)
(if_then_else (ne (match_operand:DI 0 "general_operand" "do,*a")
(const_int 0))
(label_ref (match_operand 1 "" ","))
(pc)))
(clobber (match_scratch:SI 2 "=d,X"))]
""
{
if ((cc_prev_status.value1
&& rtx_equal_p (cc_prev_status.value1, operands[0]))
|| (cc_prev_status.value2
&& rtx_equal_p (cc_prev_status.value2, operands[0])))
{
cc_status = cc_prev_status;
return MOTOROLA ? "jbne %l1" : "jne %l1";
}
CC_STATUS_INIT;
if (GET_CODE (operands[0]) == REG)
operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else
operands[3] = adjust_address (operands[0], SImode, 4);
if (!ADDRESS_REG_P (operands[0]))
{
if (reg_overlap_mentioned_p (operands[2], operands[0]))
{
if (reg_overlap_mentioned_p (operands[2], operands[3]))
{
if (MOTOROLA)
return "or%.l %0,%2\;jbne %l1";
else
return "or%.l %0,%2\;jne %l1";
}
else
{
if (MOTOROLA)
return "or%.l %3,%2\;jbne %l1";
else
return "or%.l %3,%2\;jne %l1";
}
}
if (MOTOROLA)
return "move%.l %0,%2\;or%.l %3,%2\;jbne %l1";
else
return "move%.l %0,%2\;or%.l %3,%2\;jne %l1";
}
if (TARGET_68020 || TARGET_COLDFIRE)
{
if (MOTOROLA)
return "tst%.l %0\;jbne %l1\;tst%.l %3\;jbne %l1";
else
return "tst%.l %0\;jne %l1\;tst%.l %3\;jne %l1";
}
else
{
if (MOTOROLA)
return "cmp%.w #0,%0\;jbne %l1\;cmp%.w #0,%3\;jbne %l1";
else
return "cmp%.w #0,%0\;jne %l1\;cmp%.w #0,%3\;jne %l1";
}
})
 
(define_insn "bge0_di"
[(set (pc)
(if_then_else (ge (match_operand:DI 0 "general_operand" "ro")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))]
""
{
if ((cc_prev_status.value1
&& rtx_equal_p (cc_prev_status.value1, operands[0]))
|| (cc_prev_status.value2
&& rtx_equal_p (cc_prev_status.value2, operands[0])))
{
cc_status = cc_prev_status;
if (cc_status.flags & CC_REVERSED)
{
return MOTOROLA ? "jble %l1" : "jle %l1";
}
else
{
return MOTOROLA ? "jbpl %l1" : "jpl %l1";
}
}
CC_STATUS_INIT;
if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (operands[0]))
output_asm_insn("tst%.l %0", operands);
else
{
/* On an address reg, cmpw may replace cmpl. */
output_asm_insn("cmp%.w #0,%0", operands);
}
return MOTOROLA ? "jbpl %l1" : "jpl %l1";
})
 
(define_insn "blt0_di"
[(set (pc)
(if_then_else (lt (match_operand:DI 0 "general_operand" "ro")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))]
""
{
if ((cc_prev_status.value1
&& rtx_equal_p (cc_prev_status.value1, operands[0]))
|| (cc_prev_status.value2
&& rtx_equal_p (cc_prev_status.value2, operands[0])))
{
cc_status = cc_prev_status;
if (cc_status.flags & CC_REVERSED)
{
return MOTOROLA ? "jbgt %l1" : "jgt %l1";
}
else
{
return MOTOROLA ? "jbmi %l1" : "jmi %l1";
}
}
CC_STATUS_INIT;
if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (operands[0]))
output_asm_insn("tst%.l %0", operands);
else
{
/* On an address reg, cmpw may replace cmpl. */
output_asm_insn("cmp%.w #0,%0", operands);
}
 
return MOTOROLA ? "jbmi %l1" : "jmi %l1";
})
 
(define_insn "beq"
[(set (pc)
(if_then_else (eq (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbeq %l0", "fbeq %l0", "jbeq %l0");
else
OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0");
})
 
(define_insn "bne"
[(set (pc)
(if_then_else (ne (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbne %l0", "fbne %l0", "jbne %l0");
else
OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0");
})
 
(define_insn "bgt"
[(set (pc)
(if_then_else (gt (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbgt %l0", "fbgt %l0", 0);
else
OUTPUT_JUMP ("jgt %l0", "fjgt %l0", 0);
})
 
(define_insn "bgtu"
[(set (pc)
(if_then_else (gtu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
return MOTOROLA ? "jbhi %l0" : "jhi %l0";
})
 
(define_insn "blt"
[(set (pc)
(if_then_else (lt (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jblt %l0", "fblt %l0", "jbmi %l0");
else
OUTPUT_JUMP ("jlt %l0", "fjlt %l0", "jmi %l0");
})
 
(define_insn "bltu"
[(set (pc)
(if_then_else (ltu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
return MOTOROLA ? "jbcs %l0" : "jcs %l0";
})
 
(define_insn "bge"
[(set (pc)
(if_then_else (ge (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbge %l0", "fbge %l0", "jbpl %l0");
else
OUTPUT_JUMP ("jge %l0", "fjge %l0", "jpl %l0");
})
 
(define_insn "bgeu"
[(set (pc)
(if_then_else (geu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
return MOTOROLA ? "jbcc %l0" : "jcc %l0";
})
 
(define_insn "ble"
[(set (pc)
(if_then_else (le (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jble %l0", "fble %l0", 0);
else
OUTPUT_JUMP ("jle %l0", "fjle %l0", 0);
})
 
(define_insn "bleu"
[(set (pc)
(if_then_else (leu (cc0)
(const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
""
{
return MOTOROLA ? "jbls %l0" : "jls %l0";
})
 
(define_insn "bordered"
[(set (pc)
(if_then_else (ordered (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbor %l0" : "fjor %l0";
})
 
(define_insn "bunordered"
[(set (pc)
(if_then_else (unordered (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbun %l0" : "fjun %l0";
})
 
(define_insn "buneq"
[(set (pc)
(if_then_else (uneq (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbueq %l0" : "fjueq %l0";
})
 
(define_insn "bunge"
[(set (pc)
(if_then_else (unge (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbuge %l0" : "fjuge %l0";
})
 
(define_insn "bungt"
[(set (pc)
(if_then_else (ungt (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbugt %l0" : "fjugt %l0";
})
 
(define_insn "bunle"
[(set (pc)
(if_then_else (unle (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbule %l0" : "fjule %l0";
})
 
(define_insn "bunlt"
[(set (pc)
(if_then_else (unlt (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbult %l0" : "fjult %l0";
})
 
(define_insn "bltgt"
[(set (pc)
(if_then_else (ltgt (cc0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbogl %l0" : "fjogl %l0";
})
;; Negated conditional jump instructions.
 
(define_insn ""
[(set (pc)
(if_then_else (eq (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbne %l0", "fbne %l0", "jbne %l0");
else
OUTPUT_JUMP ("jne %l0", "fjne %l0", "jne %l0");
})
 
(define_insn ""
[(set (pc)
(if_then_else (ne (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbeq %l0", "fbeq %l0", "jbeq %l0");
else
OUTPUT_JUMP ("jeq %l0", "fjeq %l0", "jeq %l0");
})
 
(define_insn ""
[(set (pc)
(if_then_else (gt (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jble %l0", "fbngt %l0", 0);
else
OUTPUT_JUMP ("jle %l0", "fjngt %l0", 0);
})
 
(define_insn ""
[(set (pc)
(if_then_else (gtu (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
return MOTOROLA ? "jbls %l0" : "jls %l0";
})
 
(define_insn ""
[(set (pc)
(if_then_else (lt (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbge %l0", "fbnlt %l0", "jbpl %l0");
else
OUTPUT_JUMP ("jge %l0", "fjnlt %l0", "jpl %l0");
})
 
(define_insn ""
[(set (pc)
(if_then_else (ltu (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
return MOTOROLA ? "jbcc %l0" : "jcc %l0";
})
 
(define_insn ""
[(set (pc)
(if_then_else (ge (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jblt %l0", "fbnge %l0", "jbmi %l0");
else
OUTPUT_JUMP ("jlt %l0", "fjnge %l0", "jmi %l0");
})
 
(define_insn ""
[(set (pc)
(if_then_else (geu (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
return MOTOROLA ? "jbcs %l0" : "jcs %l0";
})
 
(define_insn ""
[(set (pc)
(if_then_else (le (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
if (MOTOROLA)
OUTPUT_JUMP ("jbgt %l0", "fbnle %l0", 0);
else
OUTPUT_JUMP ("jgt %l0", "fjnle %l0", 0);
})
 
(define_insn ""
[(set (pc)
(if_then_else (leu (cc0)
(const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
""
{
return MOTOROLA ? "jbhi %l0" : "jhi %l0";
})
 
(define_insn "*bordered_rev"
[(set (pc)
(if_then_else (ordered (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbun %l0" : "fjun %l0";
})
 
(define_insn "*bunordered_rev"
[(set (pc)
(if_then_else (unordered (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbor %l0" : "fjor %l0";
})
 
(define_insn "*buneq_rev"
[(set (pc)
(if_then_else (uneq (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbogl %l0" : "fjogl %l0";
})
 
(define_insn "*bunge_rev"
[(set (pc)
(if_then_else (unge (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbolt %l0" : "fjolt %l0";
})
 
(define_insn "*bungt_rev"
[(set (pc)
(if_then_else (ungt (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbole %l0" : "fjole %l0";
})
 
(define_insn "*bunle_rev"
[(set (pc)
(if_then_else (unle (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbogt %l0" : "fjogt %l0";
})
 
(define_insn "*bunlt_rev"
[(set (pc)
(if_then_else (unlt (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fboge %l0" : "fjoge %l0";
})
 
(define_insn "*bltgt_rev"
[(set (pc)
(if_then_else (ltgt (cc0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
"TARGET_68881"
{
gcc_assert (cc_prev_status.flags & CC_IN_68881);
return MOTOROLA ? "fbueq %l0" : "fjueq %l0";
})
;; Unconditional and other jump instructions
(define_insn "jump"
[(set (pc)
(label_ref (match_operand 0 "" "")))]
""
{
return MOTOROLA ? "jbra %l0" : "jra %l0";
})
 
(define_expand "tablejump"
[(parallel [(set (pc) (match_operand 0 "" ""))
(use (label_ref (match_operand 1 "" "")))])]
""
{
#ifdef CASE_VECTOR_PC_RELATIVE
operands[0] = gen_rtx_PLUS (SImode, pc_rtx,
gen_rtx_SIGN_EXTEND (SImode, operands[0]));
#endif
})
 
;; Jump to variable address from dispatch table of absolute addresses.
(define_insn ""
[(set (pc) (match_operand:SI 0 "register_operand" "a"))
(use (label_ref (match_operand 1 "" "")))]
""
{
return MOTOROLA ? "jmp (%0)" : "jmp %0@";
})
 
;; Jump to variable address from dispatch table of relative addresses.
(define_insn ""
[(set (pc)
(plus:SI (pc)
(sign_extend:SI (match_operand:HI 0 "register_operand" "r"))))
(use (label_ref (match_operand 1 "" "")))]
""
{
#ifdef ASM_RETURN_CASE_JUMP
ASM_RETURN_CASE_JUMP;
#else
if (TARGET_COLDFIRE)
{
if (ADDRESS_REG_P (operands[0]))
return MOTOROLA ? "jmp (2,pc,%0.l)" : "jmp pc@(2,%0:l)";
else if (MOTOROLA)
return "ext%.l %0\;jmp (2,pc,%0.l)";
else
return "extl %0\;jmp pc@(2,%0:l)";
}
else
return MOTOROLA ? "jmp (2,pc,%0.w)" : "jmp pc@(2,%0:w)";
#endif
})
 
;; Decrement-and-branch insns.
(define_insn ""
[(set (pc)
(if_then_else
(ne (match_operand:HI 0 "nonimmediate_operand" "+d*g")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:HI (match_dup 0)
(const_int -1)))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
if (DATA_REG_P (operands[0]))
return "dbra %0,%l1";
if (GET_CODE (operands[0]) == MEM)
return MOTOROLA ?
"subq%.w #1,%0\;jbcc %l1" :
"subqw #1,%0\;jcc %l1";
return MOTOROLA ?
"subq%.w #1,%0\;cmp%.w #-1,%0\;jbne %l1" :
"subqw #1,%0\;cmpw #-1,%0\;jne %l1";
})
 
(define_insn ""
[(set (pc)
(if_then_else
(ne (match_operand:SI 0 "nonimmediate_operand" "+d*g")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))]
"!TARGET_COLDFIRE"
{
CC_STATUS_INIT;
if (DATA_REG_P (operands[0]))
return MOTOROLA ?
"dbra %0,%l1\;clr%.w %0\;subq%.l #1,%0\;jbcc %l1" :
"dbra %0,%l1\;clr%.w %0\;subq%.l #1,%0\;jcc %l1";
if (GET_CODE (operands[0]) == MEM)
return MOTOROLA ?
"subq%.l #1,%0\;jbcc %l1" :
"subq%.l #1,%0\;jcc %l1";
return MOTOROLA ?
"subq.l #1,%0\;cmp.l #-1,%0\;jbne %l1" :
"subql #1,%0\;cmpl #-1,%0\;jne %l1";
})
 
;; Two dbra patterns that use REG_NOTES info generated by strength_reduce.
 
(define_insn ""
[(set (pc)
(if_then_else
(ge (plus:HI (match_operand:HI 0 "nonimmediate_operand" "+d*am")
(const_int -1))
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:HI (match_dup 0)
(const_int -1)))]
"!TARGET_COLDFIRE && find_reg_note (insn, REG_NONNEG, 0)"
{
CC_STATUS_INIT;
if (DATA_REG_P (operands[0]))
return "dbra %0,%l1";
if (GET_CODE (operands[0]) == MEM)
return MOTOROLA ?
"subq%.w #1,%0\;jbcc %l1" :
"subq%.w #1,%0\;jcc %l1";
return MOTOROLA ?
"subq.w #1,%0\;cmp.w #-1,%0\;jbne %l1" :
"subqw #1,%0\;cmpw #-1,%0\;jne %l1";
})
 
(define_expand "decrement_and_branch_until_zero"
[(parallel [(set (pc)
(if_then_else
(ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "")
(const_int -1))
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))])]
""
"")
 
(define_insn ""
[(set (pc)
(if_then_else
(ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+d*am")
(const_int -1))
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))]
"!TARGET_COLDFIRE && find_reg_note (insn, REG_NONNEG, 0)"
{
CC_STATUS_INIT;
if (DATA_REG_P (operands[0]))
return MOTOROLA ?
"dbra %0,%l1\;clr%.w %0\;subq%.l #1,%0\;jbcc %l1" :
"dbra %0,%l1\;clr%.w %0\;subql #1,%0\;jcc %l1";
if (GET_CODE (operands[0]) == MEM)
return MOTOROLA ?
"subq%.l #1,%0\;jbcc %l1" :
"subql #1,%0\;jcc %l1";
return MOTOROLA ?
"subq.l #1,%0\;cmp.l #-1,%0\;jbne %l1" :
"subql #1,%0\;cmpl #-1,%0\;jne %l1";
})
 
 
;; For PIC calls, in order to be able to support
;; dynamic linker LAZY BINDING, all the procedure calls need to go
;; through the PLT (Procedure Linkage Table) section in PIC mode.
;;
;; PIC calls are handled by loading the address of the function into a
;; register (via movsi), then emitting a register indirect call using
;; the "jsr" function call syntax.
;;
;; When outputting MIT syntax (e.g. on Suns), we add a bogus extra
;; operand to the jbsr statement to indicate that this call should
;; go through the PLT (why? because this is the way that Sun does it).
;;
;; We have different patterns for PIC calls and non-PIC calls. The
;; different patterns are only used to choose the right syntax.
;;
;; The svr4 m68k assembler recognizes this syntax: `bsr FUNC@PLTPC' and it
;; will create the correct relocation entry (R_68K_PLT32) for `FUNC',
;; that tells the linker editor to create an entry for `FUNC' in PLT
;; section at link time. However, all global objects reference are still
;; done by using `OBJ@GOT'. So, the goal here is to output the function
;; call operand as `FUNC@PLTPC', but output object operand as `OBJ@GOT'.
;; We need to have a way to differentiate these two different operands.
;;
;; The strategy I use here is to use SYMBOL_REF_FLAG to differentiate
;; these two different operands. The macro LEGITIMATE_PIC_OPERAND_P needs
;; to be changed to recognize function calls symbol_ref operand as a valid
;; PIC operand (by checking whether SYMBOL_REF_FLAG is set). This will
;; avoid the compiler to load this symbol_ref operand into a register.
;; Remember, the operand "foo@PLTPC" cannot be called via jsr directly
;; since the value is a PC relative offset, not a real address.
;;
;; All global objects are treated in the similar way as in SUN3. The only
;; difference is: on m68k svr4, the reference of such global object needs
;; to end with a suffix "@GOT" so the assembler and linker know to create
;; an entry for it in GOT (Global Offset Table) section. This is done in
;; m68k.c.
 
;; Call subroutine with no return value.
(define_expand "call"
[(call (match_operand:QI 0 "memory_operand" "")
(match_operand:SI 1 "general_operand" ""))]
;; Operand 1 not really used on the m68000.
 
""
{
if (flag_pic && GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF)
SYMBOL_REF_FLAG (XEXP (operands[0], 0)) = 1;
})
 
;; This is a normal call sequence.
(define_insn ""
[(call (match_operand:QI 0 "memory_operand" "o")
(match_operand:SI 1 "general_operand" "g"))]
;; Operand 1 not really used on the m68000.
 
"! flag_pic"
{
#if MOTOROLA && !defined (USE_GAS)
return "jsr %0";
#else
return "jbsr %0";
#endif
})
 
;; This is a PIC call sequence.
(define_insn ""
[(call (match_operand:QI 0 "memory_operand" "o")
(match_operand:SI 1 "general_operand" "g"))]
;; Operand 1 not really used on the m68000.
 
"flag_pic"
{
m68k_output_pic_call(operands[0]);
return "";
})
 
;; Call subroutine, returning value in operand 0
;; (which must be a hard register).
;; See comments before "call" regarding PIC calls.
(define_expand "call_value"
[(set (match_operand 0 "" "")
(call (match_operand:QI 1 "memory_operand" "")
(match_operand:SI 2 "general_operand" "")))]
;; Operand 2 not really used on the m68000.
""
{
if (flag_pic && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF)
SYMBOL_REF_FLAG (XEXP (operands[1], 0)) = 1;
})
 
;; This is a normal call_value
(define_insn ""
[(set (match_operand 0 "" "=rf")
(call (match_operand:QI 1 "memory_operand" "o")
(match_operand:SI 2 "general_operand" "g")))]
;; Operand 2 not really used on the m68000.
"! flag_pic"
{
#if MOTOROLA && !defined (USE_GAS)
return "jsr %1";
#else
return "jbsr %1";
#endif
})
 
;; This is a PIC call_value
(define_insn ""
[(set (match_operand 0 "" "=rf")
(call (match_operand:QI 1 "memory_operand" "o")
(match_operand:SI 2 "general_operand" "g")))]
;; Operand 2 not really used on the m68000.
"flag_pic"
{
m68k_output_pic_call(operands[1]);
return "";
})
 
;; Call subroutine returning any type.
 
(define_expand "untyped_call"
[(parallel [(call (match_operand 0 "" "")
(const_int 0))
(match_operand 1 "" "")
(match_operand 2 "" "")])]
"NEEDS_UNTYPED_CALL"
{
int i;
 
emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
 
for (i = 0; i < XVECLEN (operands[2], 0); i++)
{
rtx set = XVECEXP (operands[2], 0, i);
emit_move_insn (SET_DEST (set), SET_SRC (set));
}
 
/* The optimizer does not know that the call sets the function value
registers we stored in the result block. We avoid problems by
claiming that all hard registers are used and clobbered at this
point. */
emit_insn (gen_blockage ());
 
DONE;
})
 
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
 
(define_insn "blockage"
[(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
""
"")
 
(define_insn "nop"
[(const_int 0)]
""
"nop")
 
;; Used for frameless functions which save no regs and allocate no locals.
(define_insn "return"
[(return)]
"USE_RETURN_INSN"
{
if (current_function_pops_args == 0)
return "rts";
operands[0] = GEN_INT (current_function_pops_args);
return "rtd %0";
})
 
(define_insn "indirect_jump"
[(set (pc) (match_operand:SI 0 "address_operand" "p"))]
""
"jmp %a0")
;; This should not be used unless the add/sub insns can't be.
 
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=a")
(match_operand:QI 1 "address_operand" "p"))]
""
{
/* Recognize an insn that refers to a table of offsets. Such an insn will
need to refer to a label on the insn. So output one. Use the
label-number of the table of offsets to generate this label. This code,
and similar code above, assumes that there will be at most one reference
to each table. */
if (GET_CODE (operands[1]) == PLUS
&& GET_CODE (XEXP (operands[1], 1)) == LABEL_REF
&& GET_CODE (XEXP (operands[1], 0)) != PLUS)
{
rtx labelref = XEXP (operands[1], 1);
if (MOTOROLA)
asm_fprintf (asm_out_file, "\\t.set %LLI%d,.+2\\n",
CODE_LABEL_NUMBER (XEXP (labelref, 0)));
else
(*targetm.asm_out.internal_label) (asm_out_file, "LI",
CODE_LABEL_NUMBER (XEXP (labelref, 0)));
}
return "lea %a1,%0";
})
;; This is the first machine-dependent peephole optimization.
;; It is useful when a floating value is returned from a function call
;; and then is moved into an FP register.
;; But it is mainly intended to test the support for these optimizations.
 
(define_peephole
[(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG) (const_int 4)))
(set (match_operand:DF 0 "register_operand" "=f")
(match_operand:DF 1 "register_operand" "ad"))]
"FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
{
rtx xoperands[2];
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
output_asm_insn ("move%.l %1,%@", xoperands);
output_asm_insn ("move%.l %1,%-", operands);
return "fmove%.d %+,%0";
})
 
;; Optimize a stack-adjust followed by a push of an argument.
;; This is said to happen frequently with -msoft-float
;; when there are consecutive library calls.
 
(define_peephole
[(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
(match_operand:SI 0 "const_int_operand" "n")))
(set (match_operand:SF 1 "push_operand" "=m")
(match_operand:SF 2 "general_operand" "rmfF"))]
"INTVAL (operands[0]) >= 4
&& ! reg_mentioned_p (stack_pointer_rtx, operands[2])"
{
if (INTVAL (operands[0]) > 4)
{
rtx xoperands[2];
xoperands[0] = stack_pointer_rtx;
xoperands[1] = GEN_INT (INTVAL (operands[0]) - 4);
if (INTVAL (xoperands[1]) <= 8)
{
if (!TARGET_COLDFIRE)
output_asm_insn ("addq%.w %1,%0", xoperands);
else
output_asm_insn ("addq%.l %1,%0", xoperands);
}
else if (TARGET_CPU32 && INTVAL (xoperands[1]) <= 16)
{
xoperands[1] = GEN_INT (INTVAL (xoperands[1]) - 8);
output_asm_insn ("addq%.w #8,%0\;addq%.w %1,%0", xoperands);
}
else if (INTVAL (xoperands[1]) <= 0x7FFF)
{
if (TARGET_68040)
output_asm_insn ("add%.w %1,%0", xoperands);
else if (MOTOROLA)
output_asm_insn ("lea (%c1,%0),%0", xoperands);
else
output_asm_insn ("lea %0@(%c1),%0", xoperands);
}
else
output_asm_insn ("add%.l %1,%0", xoperands);
}
if (FP_REG_P (operands[2]))
return "fmove%.s %2,%@";
return "move%.l %2,%@";
})
 
;; Speed up stack adjust followed by a fullword fixedpoint push.
 
(define_peephole
[(set (reg:SI SP_REG) (plus:SI (reg:SI SP_REG)
(match_operand:SI 0 "const_int_operand" "n")))
(set (match_operand:SI 1 "push_operand" "=m")
(match_operand:SI 2 "general_operand" "g"))]
"INTVAL (operands[0]) >= 4
&& ! reg_mentioned_p (stack_pointer_rtx, operands[2])"
{
if (INTVAL (operands[0]) > 4)
{
rtx xoperands[2];
xoperands[0] = stack_pointer_rtx;
xoperands[1] = GEN_INT (INTVAL (operands[0]) - 4);
if (INTVAL (xoperands[1]) <= 8)
{
if (!TARGET_COLDFIRE)
output_asm_insn ("addq%.w %1,%0", xoperands);
else
output_asm_insn ("addq%.l %1,%0", xoperands);
}
else if (TARGET_CPU32 && INTVAL (xoperands[1]) <= 16)
{
xoperands[1] = GEN_INT (INTVAL (xoperands[1]) - 8);
output_asm_insn ("addq%.w #8,%0\;addq%.w %1,%0", xoperands);
}
else if (INTVAL (xoperands[1]) <= 0x7FFF)
{
if (TARGET_68040)
output_asm_insn ("add%.w %1,%0", xoperands);
else if (MOTOROLA)
output_asm_insn ("lea (%c1,%0),%0", xoperands);
else
output_asm_insn ("lea %0@(%c1),%0", xoperands);
}
else
output_asm_insn ("add%.l %1,%0", xoperands);
}
if (operands[2] == const0_rtx)
return "clr%.l %@";
return "move%.l %2,%@";
})
 
;; Speed up pushing a single byte but leaving four bytes of space.
 
(define_peephole
[(set (mem:QI (pre_dec:SI (reg:SI SP_REG)))
(match_operand:QI 1 "general_operand" "dami"))
(set (reg:SI SP_REG) (minus:SI (reg:SI SP_REG) (const_int 2)))]
"! reg_mentioned_p (stack_pointer_rtx, operands[1])"
{
rtx xoperands[4];
 
if (GET_CODE (operands[1]) == REG)
return "move%.l %1,%-";
 
xoperands[1] = operands[1];
xoperands[2]
= gen_rtx_MEM (QImode, plus_constant (stack_pointer_rtx, 3));
xoperands[3] = stack_pointer_rtx;
if (!TARGET_COLDFIRE)
output_asm_insn ("subq%.w #4,%3\;move%.b %1,%2", xoperands);
else
output_asm_insn ("subq%.l #4,%3\;move%.b %1,%2", xoperands);
return "";
})
 
(define_peephole
[(set (match_operand:SI 0 "register_operand" "=d")
(const_int 0))
(set (strict_low_part (subreg:HI (match_dup 0) 2))
(match_operand:HI 1 "general_operand" "rmn"))]
"strict_low_part_peephole_ok (HImode, prev_nonnote_insn (insn), operands[0])"
{
if (GET_CODE (operands[1]) == CONST_INT)
{
if (operands[1] == const0_rtx
&& (DATA_REG_P (operands[0])
|| GET_CODE (operands[0]) == MEM)
/* clr insns on 68000 read before writing.
This isn't so on the 68010, but we have no TARGET_68010. */
&& ((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM
&& MEM_VOLATILE_P (operands[0]))))
return "clr%.w %0";
}
return "move%.w %1,%0";
})
 
;; dbCC peepholes
;;
;; Turns
;; loop:
;; [ ... ]
;; jCC label ; abnormal loop termination
;; dbra dN, loop ; normal loop termination
;;
;; Into
;; loop:
;; [ ... ]
;; dbCC dN, loop
;; jCC label
;;
;; Which moves the jCC condition outside the inner loop for free.
;;
 
(define_peephole
[(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
[(cc0) (const_int 0)])
(label_ref (match_operand 2 "" ""))
(pc)))
(parallel
[(set (pc)
(if_then_else
(ne (match_operand:HI 0 "register_operand" "")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:HI (match_dup 0)
(const_int -1)))])]
"!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
{
CC_STATUS_INIT;
output_dbcc_and_branch (operands);
return "";
})
 
(define_peephole
[(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
[(cc0) (const_int 0)])
(label_ref (match_operand 2 "" ""))
(pc)))
(parallel
[(set (pc)
(if_then_else
(ne (match_operand:SI 0 "register_operand" "")
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))])]
"!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
{
CC_STATUS_INIT;
output_dbcc_and_branch (operands);
return "";
})
 
(define_peephole
[(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
[(cc0) (const_int 0)])
(label_ref (match_operand 2 "" ""))
(pc)))
(parallel
[(set (pc)
(if_then_else
(ge (plus:HI (match_operand:HI 0 "register_operand" "")
(const_int -1))
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:HI (match_dup 0)
(const_int -1)))])]
"!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
{
CC_STATUS_INIT;
output_dbcc_and_branch (operands);
return "";
})
 
(define_peephole
[(set (pc) (if_then_else (match_operator 3 "valid_dbcc_comparison_p"
[(cc0) (const_int 0)])
(label_ref (match_operand 2 "" ""))
(pc)))
(parallel
[(set (pc)
(if_then_else
(ge (plus:SI (match_operand:SI 0 "register_operand" "")
(const_int -1))
(const_int 0))
(label_ref (match_operand 1 "" ""))
(pc)))
(set (match_dup 0)
(plus:SI (match_dup 0)
(const_int -1)))])]
"!TARGET_COLDFIRE && DATA_REG_P (operands[0]) && ! flags_in_68881 ()"
{
CC_STATUS_INIT;
output_dbcc_and_branch (operands);
return "";
})
 
(define_insn "extendsfxf2"
[(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f")
(float_extend:XF (match_operand:SF 1 "general_operand" "f,rmF")))]
"TARGET_68881"
{
if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
{
if (REGNO (operands[0]) == REGNO (operands[1]))
{
/* Extending float to double in an fp-reg is a no-op.
NOTICE_UPDATE_CC has already assumed that the
cc will be set. So cancel what it did. */
cc_status = cc_prev_status;
return "";
}
return "f%$move%.x %1,%0";
}
if (FP_REG_P (operands[0]))
{
if (FP_REG_P (operands[1]))
return "f%$move%.x %1,%0";
else if (ADDRESS_REG_P (operands[1]))
return "move%.l %1,%-\;f%$move%.s %+,%0";
else if (GET_CODE (operands[1]) == CONST_DOUBLE)
return output_move_const_single (operands);
return "f%$move%.s %f1,%0";
}
return "fmove%.x %f1,%0";
})
 
 
(define_insn "extenddfxf2"
[(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f")
(float_extend:XF
(match_operand:DF 1 "general_operand" "f,rmE")))]
"TARGET_68881"
{
if (FP_REG_P (operands[0]) && FP_REG_P (operands[1]))
{
if (REGNO (operands[0]) == REGNO (operands[1]))
{
/* Extending float to double in an fp-reg is a no-op.
NOTICE_UPDATE_CC has already assumed that the
cc will be set. So cancel what it did. */
cc_status = cc_prev_status;
return "";
}
return "fmove%.x %1,%0";
}
if (FP_REG_P (operands[0]))
{
if (REG_P (operands[1]))
{
rtx xoperands[2];
xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
output_asm_insn ("move%.l %1,%-", xoperands);
output_asm_insn ("move%.l %1,%-", operands);
return "f%&move%.d %+,%0";
}
if (GET_CODE (operands[1]) == CONST_DOUBLE)
return output_move_const_double (operands);
return "f%&move%.d %f1,%0";
}
return "fmove%.x %f1,%0";
})
 
(define_insn "truncxfdf2"
[(set (match_operand:DF 0 "nonimmediate_operand" "=m,!r")
(float_truncate:DF
(match_operand:XF 1 "general_operand" "f,f")))]
"TARGET_68881"
{
if (REG_P (operands[0]))
{
output_asm_insn ("fmove%.d %f1,%-\;move%.l %+,%0", operands);
operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
return "move%.l %+,%0";
}
return "fmove%.d %f1,%0";
})
 
(define_insn "truncxfsf2"
[(set (match_operand:SF 0 "nonimmediate_operand" "=dm")
(float_truncate:SF
(match_operand:XF 1 "general_operand" "f")))]
"TARGET_68881"
"fmove%.s %f1,%0")
 
(define_insn "sin<mode>2"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(unspec:FP
[(match_operand:FP 1 "general_operand" "f<FP:dreg>m")] UNSPEC_SIN))]
"TARGET_68881 && flag_unsafe_math_optimizations"
{
if (FP_REG_P (operands[1]))
return "fsin%.x %1,%0";
else
return "fsin%.<FP:prec> %1,%0";
})
 
(define_insn "cos<mode>2"
[(set (match_operand:FP 0 "nonimmediate_operand" "=f")
(unspec:FP
[(match_operand:FP 1 "general_operand" "f<FP:dreg>m")] UNSPEC_COS))]
"TARGET_68881 && flag_unsafe_math_optimizations"
{
if (FP_REG_P (operands[1]))
return "fcos%.x %1,%0";
else
return "fcos%.<FP:prec> %1,%0";
})
 
(define_insn "trap"
[(trap_if (const_int -1) (const_int 7))]
""
"trap #7")
 
(define_insn "conditional_trap"
[(trap_if (match_operator 0 "valid_dbcc_comparison_p"
[(cc0) (const_int 0)])
(match_operand:SI 1 "const_int_operand" "I"))]
"TARGET_68020 && ! flags_in_68881 ()"
{
switch (GET_CODE (operands[0]))
{
case EQ: return "trapeq";
case NE: return "trapne";
case GT: return "trapgt";
case GTU: return "traphi";
case LT: return "traplt";
case LTU: return "trapcs";
case GE: return "trapge";
case GEU: return "trapcc";
case LE: return "traple";
case LEU: return "trapls";
default: gcc_unreachable ();
}
})
/linux.h
0,0 → 1,293
/* Definitions for Motorola 68k running Linux-based GNU systems with
ELF format.
Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004, 2006,
2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (68k GNU/Linux with ELF)");
 
/* Default target comes from config.gcc. */
 
#undef TARGET_DEFAULT
#ifdef TARGET_CPU_DEFAULT
#define TARGET_DEFAULT TARGET_CPU_DEFAULT
#else
#define TARGET_DEFAULT (MASK_BITFIELD|MASK_68881|MASK_68020)
#endif
 
/* for 68k machines this only needs to be TRUE for the 68000 */
 
#undef STRICT_ALIGNMENT
#define STRICT_ALIGNMENT 0
 
/* Here are four prefixes that are used by asm_fprintf to
facilitate customization for alternate assembler syntaxes.
Machines with no likelihood of an alternate syntax need not
define these and need not use asm_fprintf. */
 
/* The prefix for register names. Note that REGISTER_NAMES
is supposed to include this prefix. Also note that this is NOT an
fprintf format string, it is a literal string */
 
#undef REGISTER_PREFIX
#define REGISTER_PREFIX "%"
 
/* The prefix for local (compiler generated) labels.
These labels will not appear in the symbol table. */
 
#undef LOCAL_LABEL_PREFIX
#define LOCAL_LABEL_PREFIX "."
 
/* The prefix to add to user-visible assembler symbols. */
 
#undef USER_LABEL_PREFIX
#define USER_LABEL_PREFIX ""
 
#define ASM_COMMENT_START "|"
 
#undef SIZE_TYPE
#define SIZE_TYPE "unsigned int"
 
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
 
#undef WCHAR_TYPE
#define WCHAR_TYPE "long int"
 
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE BITS_PER_WORD
 
/* Target OS builtins. */
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
LINUX_TARGET_OS_CPP_BUILTINS(); \
builtin_define_std ("mc68000"); \
builtin_define_std ("mc68020"); \
} \
while (0)
 
#define TARGET_OBJFMT_CPP_BUILTINS() \
do \
{ \
builtin_define ("__ELF__"); \
} \
while (0)
 
#undef CPP_SPEC
#if TARGET_DEFAULT & MASK_68881
#define CPP_SPEC \
"%{!msoft-float:-D__HAVE_68881__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
#else
#define CPP_SPEC \
"%{m68881:-D__HAVE_68881__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
#endif
 
/* We override the ASM_SPEC from svr4.h because we must pass -m68040 down
to the assembler. */
#undef ASM_SPEC
#define ASM_SPEC \
"%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \
%{m68040} %{m68060:-m68040}"
 
/* Provide a LINK_SPEC appropriate for GNU/Linux. Here we provide support
for the special GCC options -static and -shared, which allow us to
link things in one of these three modes by applying the appropriate
combinations of options at link-time. We like to support here for
as many of the other GNU linker options as possible. But I don't
have the time to search for those flags. I am sure how to add
support for -soname shared_object_name. H.J.
 
I took out %{v:%{!V:-V}}. It is too much :-(. They can use
-Wl,-V.
 
When the -shared link option is used a final link is not being
done. */
 
/* If ELF is the default format, we should not use /lib/elf. */
 
#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
 
#undef LINK_SPEC
#define LINK_SPEC "-m m68kelf %{shared} \
%{!shared: \
%{!static: \
%{rdynamic:-export-dynamic} \
%{!dynamic-linker*:-dynamic-linker " LINUX_DYNAMIC_LINKER "}} \
%{static}}"
 
/* For compatibility with linux/a.out */
 
#undef PCC_BITFIELD_TYPE_MATTERS
 
/* Currently, JUMP_TABLES_IN_TEXT_SECTION must be defined in order to
keep switch tables in the text section. */
#define JUMP_TABLES_IN_TEXT_SECTION 1
 
/* Use the default action for outputting the case label. */
#undef ASM_OUTPUT_CASE_LABEL
#define ASM_RETURN_CASE_JUMP \
do { \
if (TARGET_COLDFIRE) \
{ \
if (ADDRESS_REG_P (operands[0])) \
return "jmp %%pc@(2,%0:l)"; \
else \
return "ext%.l %0\n\tjmp %%pc@(2,%0:l)"; \
} \
else \
return "jmp %%pc@(2,%0:w)"; \
} while (0)
 
/* This is how to output an assembler line that says to advance the
location counter to a multiple of 2**LOG bytes. */
 
#undef ASM_OUTPUT_ALIGN
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
if ((LOG) > 0) \
fprintf ((FILE), "%s%u\n", ALIGN_ASM_OP, 1 << (LOG));
 
/* If defined, a C expression whose value is a string containing the
assembler operation to identify the following data as uninitialized global
data. */
 
#define BSS_SECTION_ASM_OP "\t.section\t.bss"
 
/* A C statement (sans semicolon) to output to the stdio stream
FILE the assembler definition of uninitialized global DECL named
NAME whose size is SIZE bytes and alignment is ALIGN bytes.
Try to use asm_output_aligned_bss to implement this macro. */
 
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
 
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. */
 
#undef FUNCTION_PROFILER
#define FUNCTION_PROFILER(FILE, LABELNO) \
{ \
asm_fprintf (FILE, "\tlea (%LLP%d,%Rpc),%Ra1\n", (LABELNO)); \
if (flag_pic) \
fprintf (FILE, "\tbsr.l _mcount@PLTPC\n"); \
else \
fprintf (FILE, "\tjbsr _mcount\n"); \
}
 
/* How to renumber registers for dbx and gdb.
On the Sun-3, the floating point registers have numbers
18 to 25, not 16 to 23 as they do in the compiler. */
 
#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 16 ? (REGNO) : (REGNO) + 2)
 
/* Do not break .stabs pseudos into continuations. */
 
#define DBX_CONTIN_LENGTH 0
 
/* 1 if N is a possible register number for a function value. For
m68k/SVR4 allow d0, a0, or fp0 as return registers, for integral,
pointer, or floating types, respectively. Reject fp0 if not using
a 68881 coprocessor. */
 
#undef FUNCTION_VALUE_REGNO_P
#define FUNCTION_VALUE_REGNO_P(N) \
((N) == 0 || (N) == 8 || (TARGET_68881 && (N) == 16))
 
/* Define this to be true when FUNCTION_VALUE_REGNO_P is true for
more than one register. */
 
#undef NEEDS_UNTYPED_CALL
#define NEEDS_UNTYPED_CALL 1
 
/* Define how to generate (in the callee) the output value of a
function and how to find (in the caller) the value returned by a
function. VALTYPE is the data type of the value (as a tree). If
the precise function being called is known, FUNC is its
FUNCTION_DECL; otherwise, FUNC is 0. For m68k/SVR4 generate the
result in d0, a0, or fp0 as appropriate. */
 
#undef FUNCTION_VALUE
#define FUNCTION_VALUE(VALTYPE, FUNC) \
m68k_function_value (VALTYPE, FUNC)
 
/* For compatibility with the large body of existing code which does
not always properly declare external functions returning pointer
types, the m68k/SVR4 convention is to copy the value returned for
pointer functions from a0 to d0 in the function epilogue, so that
callers that have neglected to properly declare the callee can
still find the correct return value. */
 
#define FUNCTION_EXTRA_EPILOGUE(FILE, SIZE) \
do { \
if (current_function_returns_pointer \
&& ! find_equiv_reg (0, get_last_insn (), 0, 0, 0, 8, Pmode)) \
asm_fprintf (FILE, "\tmove.l %Ra0,%Rd0\n"); \
} while (0);
 
/* Define how to find the value returned by a library function
assuming the value has mode MODE.
For m68k/SVR4 look for integer values in d0, pointer values in d0
(returned in both d0 and a0), and floating values in fp0. */
 
#undef LIBCALL_VALUE
#define LIBCALL_VALUE(MODE) \
m68k_libcall_value (MODE)
 
/* For m68k SVR4, structures are returned using the reentrant
technique. */
#undef PCC_STATIC_STRUCT_RETURN
#define DEFAULT_PCC_STRUCT_RETURN 0
 
/* Finalize the trampoline by flushing the insn cache. */
 
#undef FINALIZE_TRAMPOLINE
#define FINALIZE_TRAMPOLINE(TRAMP) \
emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"), \
0, VOIDmode, 2, TRAMP, Pmode, \
plus_constant (TRAMP, TRAMPOLINE_SIZE), Pmode);
 
/* Clear the instruction cache from `beg' to `end'. This makes an
inline system call to SYS_cacheflush. The arguments are as
follows:
 
cacheflush (addr, scope, cache, len)
 
addr - the start address for the flush
scope - the scope of the flush (see the cpush insn)
cache - which cache to flush (see the cpush insn)
len - a factor relating to the number of flushes to perform:
len/16 lines, or len/4096 pages. */
 
#define CLEAR_INSN_CACHE(BEG, END) \
{ \
register unsigned long _beg __asm ("%d1") = (unsigned long) (BEG); \
unsigned long _end = (unsigned long) (END); \
register unsigned long _len __asm ("%d4") = (_end - _beg + 32); \
__asm __volatile \
("move%.l #123, %/d0\n\t" /* system call nr */ \
"move%.l #1, %/d2\n\t" /* clear lines */ \
"move%.l #3, %/d3\n\t" /* insn+data caches */ \
"trap #0" \
: /* no outputs */ \
: "d" (_beg), "d" (_len) \
: "%d0", "%d2", "%d3"); \
}
 
#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
/m68020-elf.h
0,0 → 1,32
/* Definitions of target machine for GNU compiler. "naked" 68020,
elf object files and debugging, version.
Copyright (C) 1987, 1988, 1992, 1995, 1996, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* This comment is here to see if it will keep Sun's cpp from dying. */
 
/* We need to override the default specs from elfos.h. This suppresses the
loading of crt0.o by gcc's default linker spec. For embedded targets crt0
now comes from the linker script. */
 
#undef STARTFILE_SPEC
#define STARTFILE_SPEC "crtbegin.o%s"
 
#define LIB_SPEC "-lc"
 
/* end of m68020-elf.h */
/m68k.opt
0,0 → 1,152
; Options for the Motorola 68000 port of the compiler.
 
; Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
; GCC is free software; you can redistribute it and/or modify it under
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 3, or (at your option) any later
; version.
;
; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
; WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING3. If not see
; <http://www.gnu.org/licenses/>.
 
m5200
Target RejectNegative Mask(5200)
Generate code for a 520X
 
m5206e
Target RejectNegative Mask(CF_HWDIV)
Generate code for a 5206e
 
m528x
Target RejectNegative Mask(528x)
Generate code for a 528x
 
m5307
Target RejectNegative Mask(CFV3)
Generate code for a 5307
 
m5407
Target RejectNegative Mask(CFV4)
Generate code for a 5407
 
mcfv4e
Target RejectNegative Mask(CFV4E)
Generate code for a ColdFire v4e
 
m68000
Target RejectNegative
Generate code for a 68000
 
m68020
Target RejectNegative Mask(68020)
Generate code for a 68020
 
m68020-40
Target RejectNegative Mask(68040)
Generate code for a 68040, without any new instructions
 
m68020-60
Target RejectNegative Mask(68060)
Generate code for a 68060, without any new instructions
 
m68030
Target RejectNegative Mask(68030)
Generate code for a 68030
 
m68040
Target RejectNegative Mask(68040_ONLY)
Generate code for a 68040
 
m68060
Target RejectNegative
Generate code for a 68060
 
m68302
Target RejectNegative
Generate code for a 68302
 
m68332
Target RejectNegative
Generate code for a 68332
 
; Has no effect on gcc
m68851
Target
Generate code for a 68851
 
m68881
Target RejectNegative Mask(68881)
Generate code that uses 68881 floating-point instructions
 
malign-int
Target Report Mask(ALIGN_INT)
Align variables on a 32-bit boundary
 
mbitfield
Target Report RejectNegative Mask(BITFIELD)
Use the bit-field instructions
 
mc68000
Target RejectNegative
Generate code for a 68000
 
mc68020
Target RejectNegative
Generate code for a 68020
 
mcpu32
Target RejectNegative
Generate code for a cpu32
 
mid-shared-library
Target Report Mask(ID_SHARED_LIBRARY)
Enable ID based shared library
 
mnobitfield
Target RejectNegative InverseMask(BITFIELD)
Do not use the bit-field instructions
 
mnortd
Target RejectNegative InverseMask(RTD)
Use normal calling convention
 
mnoshort
Target RejectNegative InverseMask(SHORT)
Consider type 'int' to be 32 bits wide
 
mpcrel
Target Report Mask(PCREL)
Generate pc-relative code
 
mrtd
Target Report RejectNegative Mask(RTD)
Use different calling convention using 'rtd'
 
msep-data
Target Report Mask(SEP_DATA)
Enable separate data segment
 
mshared-library-id=
Target RejectNegative Joined UInteger
ID of shared library to build
 
mshort
Target Report RejectNegative Mask(SHORT)
Consider type 'int' to be 16 bits wide
 
msoft-float
Target RejectNegative InverseMask(68881)
Generate code with library calls for floating point
 
mstrict-align
Target Report Mask(STRICT_ALIGNMENT)
Do not use unaligned memory references
/fpgnulib.c
0,0 → 1,572
/* This is a stripped down version of floatlib.c. It supplies only those
functions which exist in libgcc, but for which there is not assembly
language versions in m68k/lb1sf68.asm.
 
It also includes simplistic support for extended floats (by working in
double precision). You must compile this file again with -DEXTFLOAT
to get this support. */
 
/*
** gnulib support for software floating point.
** Copyright (C) 1991 by Pipeline Associates, Inc. All rights reserved.
** Permission is granted to do *anything* you want with this file,
** commercial or otherwise, provided this message remains intact. So there!
** I would appreciate receiving any updates/patches/changes that anyone
** makes, and am willing to be the repository for said changes (am I
** making a big mistake?).
**
** Pat Wood
** Pipeline Associates, Inc.
** pipeline!phw@motown.com or
** sun!pipeline!phw or
** uunet!motown!pipeline!phw
**
** 05/01/91 -- V1.0 -- first release to gcc mailing lists
** 05/04/91 -- V1.1 -- added float and double prototypes and return values
** -- fixed problems with adding and subtracting zero
** -- fixed rounding in truncdfsf2
** -- fixed SWAP define and tested on 386
*/
 
/*
** The following are routines that replace the gnulib soft floating point
** routines that are called automatically when -msoft-float is selected.
** The support single and double precision IEEE format, with provisions
** for byte-swapped machines (tested on 386). Some of the double-precision
** routines work at full precision, but most of the hard ones simply punt
** and call the single precision routines, producing a loss of accuracy.
** long long support is not assumed or included.
** Overall accuracy is close to IEEE (actually 68882) for single-precision
** arithmetic. I think there may still be a 1 in 1000 chance of a bit
** being rounded the wrong way during a multiply. I'm not fussy enough to
** bother with it, but if anyone is, knock yourself out.
**
** Efficiency has only been addressed where it was obvious that something
** would make a big difference. Anyone who wants to do this right for
** best speed should go in and rewrite in assembler.
**
** I have tested this only on a 68030 workstation and 386/ix integrated
** in with -msoft-float.
*/
 
/* the following deal with IEEE single-precision numbers */
#define EXCESS 126L
#define SIGNBIT 0x80000000L
#define HIDDEN (1L << 23L)
#define SIGN(fp) ((fp) & SIGNBIT)
#define EXP(fp) (((fp) >> 23L) & 0xFF)
#define MANT(fp) (((fp) & 0x7FFFFFL) | HIDDEN)
#define PACK(s,e,m) ((s) | ((e) << 23L) | (m))
 
/* the following deal with IEEE double-precision numbers */
#define EXCESSD 1022L
#define HIDDEND (1L << 20L)
#define EXPDBITS 11
#define EXPDMASK 0x7FFL
#define EXPD(fp) (((fp.l.upper) >> 20L) & 0x7FFL)
#define SIGND(fp) ((fp.l.upper) & SIGNBIT)
#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
(fp.l.lower >> 22))
#define MANTDMASK 0xFFFFFL /* mask of upper part */
 
/* the following deal with IEEE extended-precision numbers */
#define EXCESSX 16382L
#define HIDDENX (1L << 31L)
#define EXPXBITS 15
#define EXPXMASK 0x7FFF
#define EXPX(fp) (((fp.l.upper) >> 16) & EXPXMASK)
#define SIGNX(fp) ((fp.l.upper) & SIGNBIT)
#define MANTXMASK 0x7FFFFFFFL /* mask of upper part */
 
union double_long
{
double d;
struct {
long upper;
unsigned long lower;
} l;
};
 
union float_long {
float f;
long l;
};
 
union long_double_long
{
long double ld;
struct
{
long upper;
unsigned long middle;
unsigned long lower;
} l;
};
#ifndef EXTFLOAT
 
int
__unordsf2(float a, float b)
{
union float_long fl;
 
fl.f = a;
if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
return 1;
fl.f = b;
if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
return 1;
return 0;
}
 
int
__unorddf2(double a, double b)
{
union double_long dl;
 
dl.d = a;
if (EXPD(dl) == EXPDMASK
&& ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
return 1;
dl.d = b;
if (EXPD(dl) == EXPDMASK
&& ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
return 1;
return 0;
}
 
/* convert unsigned int to double */
double
__floatunsidf (unsigned long a1)
{
long exp = 32 + EXCESSD;
union double_long dl;
 
if (!a1)
{
dl.l.upper = dl.l.lower = 0;
return dl.d;
}
 
while (a1 < 0x2000000L)
{
a1 <<= 4;
exp -= 4;
}
 
while (a1 < 0x80000000L)
{
a1 <<= 1;
exp--;
}
 
/* pack up and go home */
dl.l.upper = exp << 20L;
dl.l.upper |= (a1 >> 11L) & ~HIDDEND;
dl.l.lower = a1 << 21L;
 
return dl.d;
}
 
/* convert int to double */
double
__floatsidf (long a1)
{
long sign = 0, exp = 31 + EXCESSD;
union double_long dl;
 
if (!a1)
{
dl.l.upper = dl.l.lower = 0;
return dl.d;
}
 
if (a1 < 0)
{
sign = SIGNBIT;
a1 = (long)-(unsigned long)a1;
if (a1 < 0)
{
dl.l.upper = SIGNBIT | ((32 + EXCESSD) << 20L);
dl.l.lower = 0;
return dl.d;
}
}
 
while (a1 < 0x1000000L)
{
a1 <<= 4;
exp -= 4;
}
 
while (a1 < 0x40000000L)
{
a1 <<= 1;
exp--;
}
 
/* pack up and go home */
dl.l.upper = sign;
dl.l.upper |= exp << 20L;
dl.l.upper |= (a1 >> 10L) & ~HIDDEND;
dl.l.lower = a1 << 22L;
 
return dl.d;
}
 
/* convert unsigned int to float */
float
__floatunsisf (unsigned long l)
{
double foo = __floatunsidf (l);
return foo;
}
 
/* convert int to float */
float
__floatsisf (long l)
{
double foo = __floatsidf (l);
return foo;
}
 
/* convert float to double */
double
__extendsfdf2 (float a1)
{
register union float_long fl1;
register union double_long dl;
register long exp;
register long mant;
 
fl1.f = a1;
 
dl.l.upper = SIGN (fl1.l);
if ((fl1.l & ~SIGNBIT) == 0)
{
dl.l.lower = 0;
return dl.d;
}
 
exp = EXP(fl1.l);
mant = MANT (fl1.l) & ~HIDDEN;
if (exp == 0)
{
/* Denormal. */
exp = 1;
while (!(mant & HIDDEN))
{
mant <<= 1;
exp--;
}
mant &= ~HIDDEN;
}
exp = exp - EXCESS + EXCESSD;
dl.l.upper |= exp << 20;
dl.l.upper |= mant >> 3;
dl.l.lower = mant << 29;
return dl.d;
}
 
/* convert double to float */
float
__truncdfsf2 (double a1)
{
register long exp;
register long mant;
register union float_long fl;
register union double_long dl1;
 
dl1.d = a1;
 
if ((dl1.l.upper & ~SIGNBIT) == 0 && !dl1.l.lower)
{
fl.l = SIGND(dl1);
return fl.f;
}
 
exp = EXPD (dl1) - EXCESSD + EXCESS;
 
/* shift double mantissa 6 bits so we can round */
mant = MANTD (dl1) >> 6;
 
/* Check for underflow and denormals. */
if (exp <= 0)
{
if (exp < -24)
mant = 0;
else
mant >>= 1 - exp;
exp = 0;
}
/* now round and shift down */
mant += 1;
mant >>= 1;
 
/* did the round overflow? */
if (mant & 0xFF000000L)
{
mant >>= 1;
exp++;
}
 
mant &= ~HIDDEN;
 
/* pack up and go home */
fl.l = PACK (SIGND (dl1), exp, mant);
return (fl.f);
}
 
/* convert double to int */
long
__fixdfsi (double a1)
{
register union double_long dl1;
register long exp;
register long l;
 
dl1.d = a1;
 
if (!dl1.l.upper && !dl1.l.lower)
return 0;
 
exp = EXPD (dl1) - EXCESSD - 31;
l = MANTD (dl1);
 
if (exp > 0)
{
/* Return largest integer. */
return SIGND (dl1) ? 0x80000000L : 0x7fffffffL;
}
 
if (exp <= -32)
return 0;
 
/* shift down until exp = 0 */
if (exp < 0)
l >>= -exp;
 
return (SIGND (dl1) ? -l : l);
}
 
/* convert float to int */
long
__fixsfsi (float a1)
{
double foo = a1;
return __fixdfsi (foo);
}
#else /* EXTFLOAT */
 
/* Primitive extended precision floating point support.
 
We assume all numbers are normalized, don't do any rounding, etc. */
 
/* Prototypes for the above in case we use them. */
double __floatunsidf (unsigned long);
double __floatsidf (long);
float __floatsisf (long);
double __extendsfdf2 (float);
float __truncdfsf2 (double);
long __fixdfsi (double);
long __fixsfsi (float);
 
int
__unordxf2(long double a, long double b)
{
union long_double_long ldl;
 
ldl.ld = a;
if (EXPX(ldl) == EXPXMASK
&& ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
return 1;
ldl.ld = b;
if (EXPX(ldl) == EXPXMASK
&& ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
return 1;
return 0;
}
 
/* convert double to long double */
long double
__extenddfxf2 (double d)
{
register union double_long dl;
register union long_double_long ldl;
register long exp;
 
dl.d = d;
/*printf ("dfxf in: %g\n", d);*/
 
ldl.l.upper = SIGND (dl);
if ((dl.l.upper & ~SIGNBIT) == 0 && !dl.l.lower)
{
ldl.l.middle = 0;
ldl.l.lower = 0;
return ldl.ld;
}
 
exp = EXPD (dl) - EXCESSD + EXCESSX;
ldl.l.upper |= exp << 16;
ldl.l.middle = HIDDENX;
/* 31-20: # mantissa bits in ldl.l.middle - # mantissa bits in dl.l.upper */
ldl.l.middle |= (dl.l.upper & MANTDMASK) << (31 - 20);
/* 1+20: explicit-integer-bit + # mantissa bits in dl.l.upper */
ldl.l.middle |= dl.l.lower >> (1 + 20);
/* 32 - 21: # bits of dl.l.lower in ldl.l.middle */
ldl.l.lower = dl.l.lower << (32 - 21);
 
/*printf ("dfxf out: %s\n", dumpxf (ldl.ld));*/
return ldl.ld;
}
 
/* convert long double to double */
double
__truncxfdf2 (long double ld)
{
register long exp;
register union double_long dl;
register union long_double_long ldl;
 
ldl.ld = ld;
/*printf ("xfdf in: %s\n", dumpxf (ld));*/
 
dl.l.upper = SIGNX (ldl);
if ((ldl.l.upper & ~SIGNBIT) == 0 && !ldl.l.middle && !ldl.l.lower)
{
dl.l.lower = 0;
return dl.d;
}
 
exp = EXPX (ldl) - EXCESSX + EXCESSD;
/* ??? quick and dirty: keep `exp' sane */
if (exp >= EXPDMASK)
exp = EXPDMASK - 1;
dl.l.upper |= exp << (32 - (EXPDBITS + 1));
/* +1-1: add one for sign bit, but take one off for explicit-integer-bit */
dl.l.upper |= (ldl.l.middle & MANTXMASK) >> (EXPDBITS + 1 - 1);
dl.l.lower = (ldl.l.middle & MANTXMASK) << (32 - (EXPDBITS + 1 - 1));
dl.l.lower |= ldl.l.lower >> (EXPDBITS + 1 - 1);
 
/*printf ("xfdf out: %g\n", dl.d);*/
return dl.d;
}
 
/* convert a float to a long double */
long double
__extendsfxf2 (float f)
{
long double foo = __extenddfxf2 (__extendsfdf2 (f));
return foo;
}
 
/* convert a long double to a float */
float
__truncxfsf2 (long double ld)
{
float foo = __truncdfsf2 (__truncxfdf2 (ld));
return foo;
}
 
/* convert an int to a long double */
long double
__floatsixf (long l)
{
double foo = __floatsidf (l);
return foo;
}
 
/* convert an unsigned int to a long double */
long double
__floatunsixf (unsigned long l)
{
double foo = __floatunsidf (l);
return foo;
}
 
/* convert a long double to an int */
long
__fixxfsi (long double ld)
{
long foo = __fixdfsi ((double) ld);
return foo;
}
 
/* The remaining provide crude math support by working in double precision. */
 
long double
__addxf3 (long double x1, long double x2)
{
return (double) x1 + (double) x2;
}
 
long double
__subxf3 (long double x1, long double x2)
{
return (double) x1 - (double) x2;
}
 
long double
__mulxf3 (long double x1, long double x2)
{
return (double) x1 * (double) x2;
}
 
long double
__divxf3 (long double x1, long double x2)
{
return (double) x1 / (double) x2;
}
 
long double
__negxf2 (long double x1)
{
return - (double) x1;
}
 
long
__cmpxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
 
long
__eqxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
 
long
__nexf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
 
long
__ltxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
 
long
__lexf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
 
long
__gtxf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
 
long
__gexf2 (long double x1, long double x2)
{
return __cmpdf2 ((double) x1, (double) x2);
}
 
#endif /* EXTFLOAT */
/m68k-modes.def
0,0 → 1,21
/* M68k extra machine modes.
Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* 80-bit floating point (IEEE extended, in a 96-bit field) */
FRACTIONAL_FLOAT_MODE (XF, 80, 12, ieee_extended_motorola_format);
/t-uclinux
0,0 → 1,24
LIB1ASMSRC = m68k/lb1sf68.asm
LIB1ASMFUNCS = _mulsi3 _udivsi3 _divsi3 _umodsi3 _modsi3 \
_double _float _floatex \
_eqdf2 _nedf2 _gtdf2 _gedf2 _ltdf2 _ledf2 \
_eqsf2 _nesf2 _gtsf2 _gesf2 _ltsf2 _lesf2
 
LIB2FUNCS_EXTRA = fpgnulib.c xfgnulib.c
 
fpgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
cp $(srcdir)/config/m68k/fpgnulib.c fpgnulib.c
xfgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
echo '#define EXTFLOAT' > xfgnulib.c
cat $(srcdir)/config/m68k/fpgnulib.c >> xfgnulib.c
 
MULTILIB_OPTIONS = m68000/m5200/m5206e/m528x/m5307/m5407/mcpu32 msep-data/mid-shared-library
MULTILIB_DIRNAMES =
MULTILIB_MATCHES = m68000=mc68000 m68000=m68302 mcpu32=m68332 m5206e=m5272
MULTILIB_EXCEPTIONS = m68000/msep-data* m68000/mid-shared-library* msep-data* mid-shared-library*
 
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
 
# We don't use crtbegin.o and crtend.o
EXTRA_MULTILIB_PARTS=
/t-rtems
0,0 → 1,7
# Custom multilibs for RTEMS
 
MULTILIB_OPTIONS = m68000/m68020/m5200/mcpu32/m68030/m68040/m68060 m68881/msoft-float
MULTILIB_DIRNAMES =
MULTILIB_MATCHES = m68000=mc68000 m68000=m68302 mcpu32=m68332 m68020=mc68020 m68030=mc68030
MULTILIB_MATCHES += m5200=m528x
MULTILIB_EXCEPTIONS = m68000/msoft-float m5200/m68881 m5200/msoft-float mcpu32/m68881 mcpu32/msoft-float m68040/m68881 m68060/m68881
/t-openbsd
0,0 → 1,5
# gdb gets confused if pic code is linked with non pic
# We cope by building all variants of libgcc.
MULTILIB_OPTIONS = fpic/fPIC
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
/m68kemb.h
0,0 → 1,56
/* Definitions of target machine for GNU compiler. "embedded" 68XXX.
This is meant to be included after m68k.h.
Copyright (C) 1994, 1995, 1998, 1999, 2004, 2006
Free Software Foundation, Inc. */
 
/* Override the SVR4 ABI for this target. */
 
#define PTRDIFF_TYPE "long int"
#define SIZE_TYPE "long unsigned int"
 
/* In order for bitfields to work on a 68000, or with -mnobitfield, we must
define either PCC_BITFIELD_TYPE_MATTERS or STRUCTURE_SIZE_BOUNDARY.
Defining STRUCTURE_SIZE_BOUNDARY results in structure packing problems,
so we define PCC_BITFIELD_TYPE_MATTERS. */
#define PCC_BITFIELD_TYPE_MATTERS 1
 
/* Undef PCC_STATIC_STRUCT_RETURN so that we get a re-entrant calling
convention. */
#undef PCC_STATIC_STRUCT_RETURN
 
/* Don't default to pcc-struct-return, so that we can return small structures
and unions in registers, which is slightly more efficient. */
#define DEFAULT_PCC_STRUCT_RETURN 0
 
#undef FUNCTION_VALUE
#define FUNCTION_VALUE(VALTYPE,FUNC) LIBCALL_VALUE (TYPE_MODE (VALTYPE))
 
#undef LIBCALL_VALUE
#define LIBCALL_VALUE(MODE) \
m68k_libcall_value (MODE)
 
#undef FUNCTION_VALUE_REGNO_P
#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0 || (TARGET_68881 && (N) == 16))
 
#undef NEEDS_UNTYPED_CALL
#define NEEDS_UNTYPED_CALL 1
 
/* Target OS builtins. */
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
builtin_define ("__embedded__"); \
} \
while (0)
 
/* Override the default LIB_SPEC from gcc.c. We don't currently support
profiling, or libg.a. */
 
#undef LIB_SPEC
#define LIB_SPEC "-lc"
 
/* Make this be null, since we want the crt0.o to come from the linker
script */
 
#undef STARTFILE_SPEC
#define STARTFILE_SPEC ""
/m68k-protos.h
0,0 → 1,64
/* Definitions of target machine for GNU compiler. Sun 68000/68020 version.
Copyright (C) 2000, 2002, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* Define functions defined in aux-output.c and used in templates. */
 
#ifdef RTX_CODE
extern HOST_WIDE_INT m68k_initial_elimination_offset (int from, int to);
extern const char *output_move_const_into_data_reg (rtx *);
extern int valid_mov3q_const (rtx);
extern const char *output_move_simode_const (rtx *);
extern const char *output_move_simode (rtx *);
extern const char *output_move_himode (rtx *);
extern const char *output_move_qimode (rtx *);
extern const char *output_move_stricthi (rtx *);
extern const char *output_move_strictqi (rtx *);
extern const char *output_move_double (rtx *);
extern const char *output_move_const_single (rtx *);
extern const char *output_move_const_double (rtx *);
extern const char *output_btst (rtx *, rtx, rtx, rtx, int);
extern const char *output_scc_di (rtx, rtx, rtx, rtx);
extern const char *output_addsi3 (rtx *);
extern const char *output_andsi3 (rtx *);
extern const char *output_iorsi3 (rtx *);
extern const char *output_xorsi3 (rtx *);
extern void m68k_output_pic_call (rtx dest);
extern void output_dbcc_and_branch (rtx *);
extern int floating_exact_log2 (rtx);
extern bool strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn, rtx target);
 
/* Functions from m68k.c used in macros. */
extern int standard_68881_constant_p (rtx);
extern void print_operand_address (FILE *, rtx);
extern void print_operand (FILE *, rtx, int);
extern void notice_update_cc (rtx, rtx);
extern rtx legitimize_pic_address (rtx, enum machine_mode, rtx);
extern int valid_dbcc_comparison_p_2 (rtx, enum machine_mode);
extern rtx m68k_libcall_value (enum machine_mode);
extern rtx m68k_function_value (tree, tree);
extern int emit_move_sequence (rtx *, enum machine_mode, rtx);
 
#endif /* RTX_CODE */
 
extern bool m68k_regno_mode_ok (int, enum machine_mode);
extern int flags_in_68881 (void);
extern bool use_return_insn (void);
extern void override_options (void);
extern void init_68881_table (void);
extern int m68k_hard_regno_rename_ok(unsigned int, unsigned int);
/t-crtstuff
0,0 → 1,10
EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crti.o crtn.o
 
# Add flags here as required.
CRTSTUFF_T_CFLAGS =
 
# Assemble startup files.
$(T)crti.o: $(srcdir)/config/m68k/crti.s $(GCC_PASSES)
$(GCC_FOR_TARGET) $(MULTILIB_CFLAGS) -c -o $(T)crti.o $(srcdir)/config/m68k/crti.s
$(T)crtn.o: $(srcdir)/config/m68k/crtn.s $(GCC_PASSES)
$(GCC_FOR_TARGET) $(MULTILIB_CFLAGS) -c -o $(T)crtn.o $(srcdir)/config/m68k/crtn.s
/crti.s
0,0 → 1,47
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
 
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
 
/*
* This file just supplies function prologues for the .init and .fini
* sections. It is linked in before crtbegin.o.
*/
 
.file "crti.o"
.ident "GNU C crti.o"
 
.section .init
.globl _init
.type _init,@function
_init:
linkw %fp,#0
 
.section .fini
.globl _fini
.type _fini,@function
_fini:
linkw %fp,#0
/m68kelf.h
0,0 → 1,243
/* m68kelf support, derived from m68kv4.h */
 
/* Target definitions for GNU compiler for mc680x0 running System V.4
Copyright (C) 1991, 1993, 2000, 2002, 2003, 2004, 2007
Free Software Foundation, Inc.
 
Written by Ron Guilmette (rfg@netcom.com) and Fred Fish (fnf@cygnus.com).
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
 
#ifndef SWBEG_ASM_OP
#define SWBEG_ASM_OP "\t.swbeg\t"
#endif
 
/* Here are four prefixes that are used by asm_fprintf to
facilitate customization for alternate assembler syntaxes.
Machines with no likelihood of an alternate syntax need not
define these and need not use asm_fprintf. */
 
/* The prefix for register names. Note that REGISTER_NAMES
is supposed to include this prefix. Also note that this is NOT an
fprintf format string, it is a literal string */
 
#undef REGISTER_PREFIX
#define REGISTER_PREFIX "%"
 
/* The prefix for local (compiler generated) labels.
These labels will not appear in the symbol table. */
 
#undef LOCAL_LABEL_PREFIX
#define LOCAL_LABEL_PREFIX "."
 
/* The prefix to add to user-visible assembler symbols. */
 
#undef USER_LABEL_PREFIX
#define USER_LABEL_PREFIX ""
 
/* The prefix for immediate operands. */
 
#undef IMMEDIATE_PREFIX
#define IMMEDIATE_PREFIX "#"
 
/* In the machine description we can't use %R, because it will not be seen
by ASM_FPRINTF. (Isn't that a design bug?). */
 
#undef REGISTER_PREFIX_MD
#define REGISTER_PREFIX_MD "%%"
 
/* config/m68k.md has an explicit reference to the program counter,
prefix this by the register prefix. */
 
#define ASM_RETURN_CASE_JUMP \
do { \
if (TARGET_COLDFIRE) \
{ \
if (ADDRESS_REG_P (operands[0])) \
return "jmp %%pc@(2,%0:l)"; \
else \
return "ext%.l %0\n\tjmp %%pc@(2,%0:l)"; \
} \
else \
return "jmp %%pc@(2,%0:w)"; \
} while (0)
 
/* This is how to output an assembler line that says to advance the
location counter to a multiple of 2**LOG bytes. */
 
#undef ASM_OUTPUT_ALIGN
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
do { \
if ((LOG) > 0) \
fprintf ((FILE), "%s%u\n", ALIGN_ASM_OP, 1 << (LOG)); \
} while (0)
 
/* Use proper assembler syntax for these macros. */
#undef ASM_OUTPUT_REG_PUSH
#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
asm_fprintf (FILE, "\t%Omove.l %s,-(%Rsp)\n", reg_names[REGNO])
 
#undef ASM_OUTPUT_REG_POP
#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
asm_fprintf (FILE, "\t%Omove.l (%Rsp)+,%s\n", reg_names[REGNO])
 
/* Override the definition of NO_DOLLAR_IN_LABEL in svr4.h, for special
g++ assembler names. When this is defined, g++ uses embedded '.'
characters and some m68k assemblers have problems with this. The
chances are much greater that any particular assembler will permit
embedded '$' characters. */
 
#undef NO_DOLLAR_IN_LABEL
 
/* Define PCC_STATIC_STRUCT_RETURN if the convention on the target machine
is to use the nonreentrant technique for returning structure and union
values, as commonly implemented by the AT&T Portable C Compiler (PCC).
When defined, the gcc option -fpcc-struct-return can be used to cause
this form to be generated. When undefined, the option does nothing.
For m68k SVR4, the convention is to use a reentrant technique compatible
with the gcc default, so override the definition of this macro in m68k.h */
 
#undef PCC_STATIC_STRUCT_RETURN
 
/* Local common symbols are declared to the assembler with ".lcomm" rather
than ".bss", so override the definition in svr4.h */
 
#undef BSS_ASM_OP
#define BSS_ASM_OP "\t.lcomm\t"
 
/* Register in which address to store a structure value is passed to a
function. The default in m68k.h is a1. For m68k/SVR4 it is a0. */
 
#undef M68K_STRUCT_VALUE_REGNUM
#define M68K_STRUCT_VALUE_REGNUM 8
 
#define ASM_COMMENT_START "|"
 
/* Define how the m68k registers should be numbered for Dwarf output.
The numbering provided here should be compatible with the native
SVR4 SDB debugger in the m68k/SVR4 reference port, where d0-d7
are 0-7, a0-a8 are 8-15, and fp0-fp7 are 16-23. */
 
#undef DBX_REGISTER_NUMBER
#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
 
/* The ASM_OUTPUT_SKIP macro is first defined in m68k.h, using ".skip".
It is then overridden by m68k/sgs.h to use ".space", and again by svr4.h
to use ".zero". The m68k/SVR4 assembler uses ".space", so repeat the
definition from m68k/sgs.h here. Note that ASM_NO_SKIP_IN_TEXT is
defined in m68k/sgs.h, so we don't have to repeat it here. */
 
#undef ASM_OUTPUT_SKIP
#define ASM_OUTPUT_SKIP(FILE,SIZE) \
fprintf (FILE, "%s%u\n", SPACE_ASM_OP, (int)(SIZE))
 
#if 0
/* SVR4 m68k assembler is bitching on the `comm i,1,1' which askes for
1 byte alignment. Don't generate alignment for COMMON seems to be
safer until we the assembler is fixed. */
#undef ASM_OUTPUT_ALIGNED_COMMON
/* Same problem with this one. */
#undef ASM_OUTPUT_ALIGNED_LOCAL
#endif
 
/* The `string' directive on m68k svr4 does not handle string with
escape char (i.e., `\') right. Use normal way to output ASCII bytes
seems to be safer. */
#undef ASM_OUTPUT_ASCII
#define ASM_OUTPUT_ASCII(FILE,PTR,LEN) \
do { \
register int sp = 0, ch; \
fputs (integer_asm_op (1, TRUE), (FILE)); \
do { \
ch = (PTR)[sp]; \
if (ch > ' ' && ! (ch & 0x80) && ch != '\\') \
{ \
fprintf ((FILE), "'%c", ch); \
} \
else \
{ \
fprintf ((FILE), "0x%x", ch); \
} \
if (++sp < (LEN)) \
{ \
if ((sp % 10) == 0) \
{ \
fprintf ((FILE), "\n%s", integer_asm_op (1, TRUE)); \
} \
else \
{ \
putc (',', (FILE)); \
} \
} \
} while (sp < (LEN)); \
putc ('\n', (FILE)); \
} while (0)
 
#undef ASM_OUTPUT_COMMON
#undef ASM_OUTPUT_LOCAL
#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
( fputs (".comm ", (FILE)), \
assemble_name ((FILE), (NAME)), \
fprintf ((FILE), ",%u\n", (int)(SIZE)))
 
#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
( fputs (".lcomm ", (FILE)), \
assemble_name ((FILE), (NAME)), \
fprintf ((FILE), ",%u\n", (int)(SIZE)))
 
/* Currently, JUMP_TABLES_IN_TEXT_SECTION must be defined in order to
keep switch tables in the text section. */
#define JUMP_TABLES_IN_TEXT_SECTION 1
 
/* Override the definition in svr4.h. In m68k svr4, using swbeg is the
standard way to do switch table. */
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
fprintf ((FILE), "%s&%d\n", SWBEG_ASM_OP, XVECLEN (PATTERN (TABLE), 1));
/* end of stuff from m68kv4.h */
 
#undef ENDFILE_SPEC
#define ENDFILE_SPEC "crtend.o%s"
 
#undef STARTFILE_SPEC
#define STARTFILE_SPEC "crtbegin.o%s"
 
/* If defined, a C expression whose value is a string containing the
assembler operation to identify the following data as
uninitialized global data. If not defined, and neither
`ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined,
uninitialized global data will be output in the data section if
`-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be
used. */
#ifndef BSS_SECTION_ASM_OP
#define BSS_SECTION_ASM_OP "\t.section\t.bss"
#endif
 
/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
separate, explicit argument. If you define this macro, it is used
in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
handling the required alignment of the variable. The alignment is
specified as the number of bits.
 
Try to use function `asm_output_aligned_bss' defined in file
`varasm.c' when defining this macro. */
#ifndef ASM_OUTPUT_ALIGNED_BSS
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
#endif
/coff.h
0,0 → 1,94
/* Definitions of target machine for GNU compiler.
m68k series COFF object files and debugging, version.
Copyright (C) 1994, 1996, 1997, 2000, 2002, 2003, 2004, 2007
Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* This file is included after m68k.h by CPU COFF specific files. It
is not a complete target itself. */
 
/* Used in m68k.c to include required support code. */
 
#define M68K_TARGET_COFF 1
 
/* Generate sdb debugging information. */
 
#define SDB_DEBUGGING_INFO 1
 
/* COFF symbols don't start with an underscore. */
 
#undef USER_LABEL_PREFIX
#define USER_LABEL_PREFIX ""
 
/* Use a prefix for local labels, just to be on the save side. */
 
#undef LOCAL_LABEL_PREFIX
#define LOCAL_LABEL_PREFIX "."
 
/* Use a register prefix to avoid clashes with external symbols (classic
example: `extern char PC;' in termcap). */
 
#undef REGISTER_PREFIX
#define REGISTER_PREFIX "%"
 
/* In the machine description we can't use %R, because it will not be seen
by ASM_FPRINTF. (Isn't that a design bug?). */
 
#undef REGISTER_PREFIX_MD
#define REGISTER_PREFIX_MD "%%"
 
/* config/m68k.md has an explicit reference to the program counter,
prefix this by the register prefix. */
 
#define ASM_RETURN_CASE_JUMP \
do { \
if (TARGET_COLDFIRE) \
{ \
if (ADDRESS_REG_P (operands[0])) \
return "jmp %%pc@(2,%0:l)"; \
else \
return "ext%.l %0\n\tjmp %%pc@(2,%0:l)"; \
} \
else \
return "jmp %%pc@(2,%0:w)"; \
} while (0)
 
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
 
/* If defined, a C expression whose value is a string containing the
assembler operation to identify the following data as uninitialized global
data. */
 
#define BSS_SECTION_ASM_OP "\t.section\t.bss"
 
/* A C statement (sans semicolon) to output to the stdio stream
FILE the assembler definition of uninitialized global DECL named
NAME whose size is SIZE bytes and alignment is ALIGN bytes.
Try to use asm_output_aligned_bss to implement this macro. */
 
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
asm_output_aligned_bss ((FILE), (DECL), (NAME), (SIZE), (ALIGN))
 
/* Switch into a generic section. */
#undef TARGET_ASM_NAMED_SECTION
#define TARGET_ASM_NAMED_SECTION m68k_coff_asm_named_section
 
/* Don't assume anything about startfiles. */
 
#undef STARTFILE_SPEC
#define STARTFILE_SPEC ""
/lb1sf68.asm
0,0 → 1,4031
/* libgcc routines for 68000 w/o floating-point hardware.
Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
 
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file with other programs, and to distribute
those programs without any restriction coming from the use of this
file. (The General Public License restrictions do apply in other
respects; for example, they cover modification of the file, and
distribution when not linked into another program.)
 
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
 
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
 
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
 
/* These are predefined by new versions of GNU cpp. */
 
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
 
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
 
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
 
/* ANSI concatenation macros. */
 
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
 
/* Use the right prefix for global labels. */
 
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
 
/* Use the right prefix for registers. */
 
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
 
/* Use the right prefix for immediate values. */
 
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
 
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
#define pc REG (pc)
 
/* Provide a few macros to allow for PIC code support.
* With PIC, data is stored A5 relative so we've got to take a bit of special
* care to ensure that all loads of global data is via A5. PIC also requires
* jumps and subroutine calls to be PC relative rather than absolute. We cheat
* a little on this and in the PIC case, we use short offset branches and
* hope that the final object code is within range (which it should be).
*/
#ifndef __PIC__
 
/* Non PIC (absolute/relocatable) versions */
 
.macro PICCALL addr
jbsr \addr
.endm
 
.macro PICJUMP addr
jmp \addr
.endm
 
.macro PICLEA sym, reg
lea \sym, \reg
.endm
 
.macro PICPEA sym, areg
pea \sym
.endm
 
#else /* __PIC__ */
 
/* Common for -mid-shared-libary and -msep-data */
 
.macro PICCALL addr
bsr \addr
.endm
 
.macro PICJUMP addr
bra \addr
.endm
 
# if defined(__ID_SHARED_LIBRARY__)
 
/* -mid-shared-library versions */
 
.macro PICLEA sym, reg
movel a5@(_current_shared_library_a5_offset_), \reg
movel \sym@GOT(\reg), \reg
.endm
 
.macro PICPEA sym, areg
movel a5@(_current_shared_library_a5_offset_), \areg
movel \sym@GOT(\areg), sp@-
.endm
 
# else /* !__ID_SHARED_LIBRARY__ */
 
/* Versions for -msep-data */
 
.macro PICLEA sym, reg
movel \sym@GOT(a5), \reg
.endm
 
.macro PICPEA sym, areg
movel \sym@GOT(a5), sp@-
.endm
 
# endif /* !__ID_SHARED_LIBRARY__ */
#endif /* __PIC__ */
 
 
#ifdef L_floatex
 
| This is an attempt at a decent floating point (single, double and
| extended double) code for the GNU C compiler. It should be easy to
| adapt to other compilers (but beware of the local labels!).
 
| Starting date: 21 October, 1990
 
| It is convenient to introduce the notation (s,e,f) for a floating point
| number, where s=sign, e=exponent, f=fraction. We will call a floating
| point number fpn to abbreviate, independently of the precision.
| Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
| for doubles and 16383 for long doubles). We then have the following
| different cases:
| 1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
| (-1)^s x 1.f x 2^(e-bias-1).
| 2. Denormalized fpns have e=0. They correspond to numbers of the form
| (-1)^s x 0.f x 2^(-bias).
| 3. +/-INFINITY have e=MAX_EXP, f=0.
| 4. Quiet NaN (Not a Number) have all bits set.
| 5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
 
|=============================================================================
| exceptions
|=============================================================================
 
| This is the floating point condition code register (_fpCCR):
|
| struct {
| short _exception_bits;
| short _trap_enable_bits;
| short _sticky_bits;
| short _rounding_mode;
| short _format;
| short _last_operation;
| union {
| float sf;
| double df;
| } _operand1;
| union {
| float sf;
| double df;
| } _operand2;
| } _fpCCR;
 
.data
.even
 
.globl SYM (_fpCCR)
SYM (_fpCCR):
__exception_bits:
.word 0
__trap_enable_bits:
.word 0
__sticky_bits:
.word 0
__rounding_mode:
.word ROUND_TO_NEAREST
__format:
.word NIL
__last_operation:
.word NOOP
__operand1:
.long 0
.long 0
__operand2:
.long 0
.long 0
 
| Offsets:
EBITS = __exception_bits - SYM (_fpCCR)
TRAPE = __trap_enable_bits - SYM (_fpCCR)
STICK = __sticky_bits - SYM (_fpCCR)
ROUND = __rounding_mode - SYM (_fpCCR)
FORMT = __format - SYM (_fpCCR)
LASTO = __last_operation - SYM (_fpCCR)
OPER1 = __operand1 - SYM (_fpCCR)
OPER2 = __operand2 - SYM (_fpCCR)
 
| The following exception types are supported:
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
 
| The allowed rounding modes are:
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
 
| The allowed values of format are:
NIL = 0
SINGLE_FLOAT = 1
DOUBLE_FLOAT = 2
LONG_FLOAT = 3
 
| The allowed values for the last operation are:
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
 
|=============================================================================
| __clear_sticky_bits
|=============================================================================
 
| The sticky bits are normally not cleared (thus the name), whereas the
| exception type and exception value reflect the last computation.
| This routine is provided to clear them (you can also write to _fpCCR,
| since it is globally visible).
 
.globl SYM (__clear_sticky_bit)
 
.text
.even
 
| void __clear_sticky_bits(void);
SYM (__clear_sticky_bit):
PICLEA SYM (_fpCCR),a0
#ifndef __mcoldfire__
movew IMM (0),a0@(STICK)
#else
clr.w a0@(STICK)
#endif
rts
 
|=============================================================================
| $_exception_handler
|=============================================================================
 
.globl $_exception_handler
 
.text
.even
 
| This is the common exit point if an exception occurs.
| NOTE: it is NOT callable from C!
| It expects the exception type in d7, the format (SINGLE_FLOAT,
| DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
| It sets the corresponding exception and sticky bits, and the format.
| Depending on the format if fills the corresponding slots for the
| operands which produced the exception (all this information is provided
| so if you write your own exception handlers you have enough information
| to deal with the problem).
| Then checks to see if the corresponding exception is trap-enabled,
| in which case it pushes the address of _fpCCR and traps through
| trap FPTRAP (15 for the moment).
 
FPTRAP = 15
 
$_exception_handler:
PICLEA SYM (_fpCCR),a0
movew d7,a0@(EBITS) | set __exception_bits
#ifndef __mcoldfire__
orw d7,a0@(STICK) | and __sticky_bits
#else
movew a0@(STICK),d4
orl d7,d4
movew d4,a0@(STICK)
#endif
movew d6,a0@(FORMT) | and __format
movew d5,a0@(LASTO) | and __last_operation
 
| Now put the operands in place:
#ifndef __mcoldfire__
cmpw IMM (SINGLE_FLOAT),d6
#else
cmpl IMM (SINGLE_FLOAT),d6
#endif
beq 1f
movel a6@(8),a0@(OPER1)
movel a6@(12),a0@(OPER1+4)
movel a6@(16),a0@(OPER2)
movel a6@(20),a0@(OPER2+4)
bra 2f
1: movel a6@(8),a0@(OPER1)
movel a6@(12),a0@(OPER2)
2:
| And check whether the exception is trap-enabled:
#ifndef __mcoldfire__
andw a0@(TRAPE),d7 | is exception trap-enabled?
#else
clrl d6
movew a0@(TRAPE),d6
andl d6,d7
#endif
beq 1f | no, exit
PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
trap IMM (FPTRAP) | and trap
#ifndef __mcoldfire__
1: moveml sp@+,d2-d7 | restore data registers
#else
1: moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
#endif /* L_floatex */
 
#ifdef L_mulsi3
.text
.proc
.globl SYM (__mulsi3)
SYM (__mulsi3):
movew sp@(4), d0 /* x0 -> d0 */
muluw sp@(10), d0 /* x0*y1 */
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(8), d1 /* x1*y0 */
#ifndef __mcoldfire__
addw d1, d0
#else
addl d1, d0
#endif
swap d0
clrw d0
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(10), d1 /* x1*y1 */
addl d1, d0
 
rts
#endif /* L_mulsi3 */
 
#ifdef L_udivsi3
.text
.proc
.globl SYM (__udivsi3)
SYM (__udivsi3):
#ifndef __mcoldfire__
movel d2, sp@-
movel sp@(12), d1 /* d1 = divisor */
movel sp@(8), d0 /* d0 = dividend */
 
cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
jcc L3 /* then try next algorithm */
movel d0, d2
clrw d2
swap d2
divu d1, d2 /* high quotient in lower word */
movew d2, d0 /* save high quotient */
swap d0
movew sp@(10), d2 /* get low dividend + high rest */
divu d1, d2 /* low quotient */
movew d2, d0
jra L6
 
L3: movel d1, d2 /* use d2 as divisor backup */
L4: lsrl IMM (1), d1 /* shift divisor */
lsrl IMM (1), d0 /* shift dividend */
cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
jcc L4
divu d1, d0 /* now we have 16-bit divisor */
andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
 
/* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
the operand ranges, this might give a 33-bit product. If this product is
greater than the dividend, the tentative quotient was too large. */
movel d2, d1
mulu d0, d1 /* low part, 32 bits */
swap d2
mulu d0, d2 /* high part, at most 17 bits */
swap d2 /* align high part with low part */
tstw d2 /* high part 17 bits? */
jne L5 /* if 17 bits, quotient was too large */
addl d2, d1 /* add parts */
jcs L5 /* if sum is 33 bits, quotient was too large */
cmpl sp@(8), d1 /* compare the sum with the dividend */
jls L6 /* if sum > dividend, quotient was too large */
L5: subql IMM (1), d0 /* adjust quotient */
 
L6: movel sp@+, d2
rts
 
#else /* __mcoldfire__ */
 
/* ColdFire implementation of non-restoring division algorithm from
Hennessy & Patterson, Appendix A. */
link a6,IMM (-12)
moveml d2-d4,sp@
movel a6@(8),d0
movel a6@(12),d1
clrl d2 | clear p
moveq IMM (31),d4
L1: addl d0,d0 | shift reg pair (p,a) one bit left
addxl d2,d2
movl d2,d3 | subtract b from p, store in tmp.
subl d1,d3
jcs L2 | if no carry,
bset IMM (0),d0 | set the low order bit of a to 1,
movl d3,d2 | and store tmp in p.
L2: subql IMM (1),d4
jcc L1
moveml sp@,d2-d4 | restore data registers
unlk a6 | and return
rts
#endif /* __mcoldfire__ */
 
#endif /* L_udivsi3 */
 
#ifdef L_divsi3
.text
.proc
.globl SYM (__divsi3)
SYM (__divsi3):
movel d2, sp@-
 
moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
movel sp@(12), d1 /* d1 = divisor */
jpl L1
negl d1
#ifndef __mcoldfire__
negb d2 /* change sign because divisor <0 */
#else
negl d2 /* change sign because divisor <0 */
#endif
L1: movel sp@(8), d0 /* d0 = dividend */
jpl L2
negl d0
#ifndef __mcoldfire__
negb d2
#else
negl d2
#endif
 
L2: movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
addql IMM (8), sp
 
tstb d2
jpl L3
negl d0
 
L3: movel sp@+, d2
rts
#endif /* L_divsi3 */
 
#ifdef L_umodsi3
.text
.proc
.globl SYM (__umodsi3)
SYM (__umodsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__udivsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
#endif /* L_umodsi3 */
 
#ifdef L_modsi3
.text
.proc
.globl SYM (__modsi3)
SYM (__modsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__divsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
#endif /* L_modsi3 */
 
 
#ifdef L_double
 
.globl SYM (_fpCCR)
.globl $_exception_handler
 
QUIET_NaN = 0xffffffff
 
D_MAX_EXP = 0x07ff
D_BIAS = 1022
DBL_MAX_EXP = D_MAX_EXP - D_BIAS
DBL_MIN_EXP = 1 - D_BIAS
DBL_MANT_DIG = 53
 
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
 
DOUBLE_FLOAT = 2
 
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
 
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
 
| Entry points:
 
.globl SYM (__adddf3)
.globl SYM (__subdf3)
.globl SYM (__muldf3)
.globl SYM (__divdf3)
.globl SYM (__negdf2)
.globl SYM (__cmpdf2)
.globl SYM (__cmpdf2_internal)
 
.text
.even
 
| These are common routines to return and signal exceptions.
 
Ld$den:
| Return and signal a denormalized number
orl d7,d0
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
 
Ld$infty:
Ld$overflow:
| Return a properly signed INFINITY and set the exception flags
movel IMM (0x7ff00000),d0
movel IMM (0),d1
orl d7,d0
movew IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
 
Ld$underflow:
| Return 0 and set the exception flags
movel IMM (0),d0
movel d0,d1
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
 
Ld$inop:
| Return a quiet NaN and set the exception flags
movel IMM (QUIET_NaN),d0
movel d0,d1
movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
 
Ld$div$0:
| Return a properly signed INFINITY and set the exception flags
movel IMM (0x7ff00000),d0
movel IMM (0),d1
orl d7,d0
movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
 
|=============================================================================
|=============================================================================
| double precision routines
|=============================================================================
|=============================================================================
 
| A double precision floating point number (double) has the format:
|
| struct _double {
| unsigned int sign : 1; /* sign bit */
| unsigned int exponent : 11; /* exponent, shifted by 126 */
| unsigned int fraction : 52; /* fraction */
| } double;
|
| Thus sizeof(double) = 8 (64 bits).
|
| All the routines are callable from C programs, and return the result
| in the register pair d0-d1. They also preserve all registers except
| d0-d1 and a0-a1.
 
|=============================================================================
| __subdf3
|=============================================================================
 
| double __subdf3(double, double);
SYM (__subdf3):
bchg IMM (31),sp@(12) | change sign of second operand
| and fall through, so we always add
|=============================================================================
| __adddf3
|=============================================================================
 
| double __adddf3(double, double);
SYM (__adddf3):
#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get first operand
movel a6@(12),d1 |
movel a6@(16),d2 | get second operand
movel a6@(20),d3 |
 
movel d0,d7 | get d0's sign bit in d7 '
addl d1,d1 | check and clear sign bit of a, and gain one
addxl d0,d0 | bit of extra precision
beq Ladddf$b | if zero return second operand
 
movel d2,d6 | save sign in d6
addl d3,d3 | get rid of sign bit and gain one bit of
addxl d2,d2 | extra precision
beq Ladddf$a | if zero return first operand
 
andl IMM (0x80000000),d7 | isolate a's sign bit '
swap d6 | and also b's sign bit '
#ifndef __mcoldfire__
andw IMM (0x8000),d6 |
orw d6,d7 | and combine them into d7, so that a's sign '
| bit is in the high word and b's is in the '
| low word, so d6 is free to be used
#else
andl IMM (0x8000),d6
orl d6,d7
#endif
movel d7,a0 | now save d7 into a0, so d7 is free to
| be used also
 
| Get the exponents and check for denormalized and/or infinity.
 
movel IMM (0x001fffff),d6 | mask for the fraction
movel IMM (0x00200000),d7 | mask to put hidden bit back
 
movel d0,d4 |
andl d6,d0 | get fraction in d0
notl d6 | make d6 into mask for the exponent
andl d6,d4 | get exponent in d4
beq Ladddf$a$den | branch if a is denormalized
cmpl d6,d4 | check for INFINITY or NaN
beq Ladddf$nf |
orl d7,d0 | and put hidden bit back
Ladddf$1:
swap d4 | shift right exponent so that it starts
#ifndef __mcoldfire__
lsrw IMM (5),d4 | in bit 0 and not bit 20
#else
lsrl IMM (5),d4 | in bit 0 and not bit 20
#endif
| Now we have a's exponent in d4 and fraction in d0-d1 '
movel d2,d5 | save b to get exponent
andl d6,d5 | get exponent in d5
beq Ladddf$b$den | branch if b is denormalized
cmpl d6,d5 | check for INFINITY or NaN
beq Ladddf$nf
notl d6 | make d6 into mask for the fraction again
andl d6,d2 | and get fraction in d2
orl d7,d2 | and put hidden bit back
Ladddf$2:
swap d5 | shift right exponent so that it starts
#ifndef __mcoldfire__
lsrw IMM (5),d5 | in bit 0 and not bit 20
#else
lsrl IMM (5),d5 | in bit 0 and not bit 20
#endif
 
| Now we have b's exponent in d5 and fraction in d2-d3. '
 
| The situation now is as follows: the signs are combined in a0, the
| numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
| and d5 (b). To do the rounding correctly we need to keep all the
| bits until the end, so we need to use d0-d1-d2-d3 for the first number
| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
| exponents in a2-a3.
 
#ifndef __mcoldfire__
moveml a2-a3,sp@- | save the address registers
#else
movel a2,sp@-
movel a3,sp@-
movel a4,sp@-
#endif
 
movel d4,a2 | save the exponents
movel d5,a3 |
 
movel IMM (0),d7 | and move the numbers around
movel d7,d6 |
movel d3,d5 |
movel d2,d4 |
movel d7,d3 |
movel d7,d2 |
 
| Here we shift the numbers until the exponents are the same, and put
| the largest exponent in a2.
#ifndef __mcoldfire__
exg d4,a2 | get exponents back
exg d5,a3 |
cmpw d4,d5 | compare the exponents
#else
movel d4,a4 | get exponents back
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
cmpl d4,d5 | compare the exponents
#endif
beq Ladddf$3 | if equal don't shift '
bhi 9f | branch if second exponent is higher
 
| Here we have a's exponent larger than b's, so we have to shift b. We do
| this by using as counter d2:
1: movew d4,d2 | move largest exponent to d2
#ifndef __mcoldfire__
subw d5,d2 | and subtract second exponent
exg d4,a2 | get back the longs we saved
exg d5,a3 |
#else
subl d5,d2 | and subtract second exponent
movel d4,a4 | get back the longs we saved
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d2
#else
cmpl IMM (DBL_MANT_DIG+2),d2
#endif
bge Ladddf$b$small
#ifndef __mcoldfire__
cmpw IMM (32),d2 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d2 | if difference >= 32, shift by longs
#endif
bge 5f
2:
#ifndef __mcoldfire__
cmpw IMM (16),d2 | if difference >= 16, shift by words
#else
cmpl IMM (16),d2 | if difference >= 16, shift by words
#endif
bge 6f
bra 3f | enter dbra loop
 
4:
#ifndef __mcoldfire__
lsrl IMM (1),d4
roxrl IMM (1),d5
roxrl IMM (1),d6
roxrl IMM (1),d7
#else
lsrl IMM (1),d7
btst IMM (0),d6
beq 10f
bset IMM (31),d7
10: lsrl IMM (1),d6
btst IMM (0),d5
beq 11f
bset IMM (31),d6
11: lsrl IMM (1),d5
btst IMM (0),d4
beq 12f
bset IMM (31),d5
12: lsrl IMM (1),d4
#endif
3:
#ifndef __mcoldfire__
dbra d2,4b
#else
subql IMM (1),d2
bpl 4b
#endif
movel IMM (0),d2
movel d2,d3
bra Ladddf$4
5:
movel d6,d7
movel d5,d6
movel d4,d5
movel IMM (0),d4
#ifndef __mcoldfire__
subw IMM (32),d2
#else
subl IMM (32),d2
#endif
bra 2b
6:
movew d6,d7
swap d7
movew d5,d6
swap d6
movew d4,d5
swap d5
movew IMM (0),d4
swap d4
#ifndef __mcoldfire__
subw IMM (16),d2
#else
subl IMM (16),d2
#endif
bra 3b
9:
#ifndef __mcoldfire__
exg d4,d5
movew d4,d6
subw d5,d6 | keep d5 (largest exponent) in d4
exg d4,a2
exg d5,a3
#else
movel d5,d6
movel d4,d5
movel d6,d4
subl d5,d6
movel d4,a4
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d6
#else
cmpl IMM (DBL_MANT_DIG+2),d6
#endif
bge Ladddf$a$small
#ifndef __mcoldfire__
cmpw IMM (32),d6 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d6 | if difference >= 32, shift by longs
#endif
bge 5f
2:
#ifndef __mcoldfire__
cmpw IMM (16),d6 | if difference >= 16, shift by words
#else
cmpl IMM (16),d6 | if difference >= 16, shift by words
#endif
bge 6f
bra 3f | enter dbra loop
 
4:
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
#endif
3:
#ifndef __mcoldfire__
dbra d6,4b
#else
subql IMM (1),d6
bpl 4b
#endif
movel IMM (0),d7
movel d7,d6
bra Ladddf$4
5:
movel d2,d3
movel d1,d2
movel d0,d1
movel IMM (0),d0
#ifndef __mcoldfire__
subw IMM (32),d6
#else
subl IMM (32),d6
#endif
bra 2b
6:
movew d2,d3
swap d3
movew d1,d2
swap d2
movew d0,d1
swap d1
movew IMM (0),d0
swap d0
#ifndef __mcoldfire__
subw IMM (16),d6
#else
subl IMM (16),d6
#endif
bra 3b
Ladddf$3:
#ifndef __mcoldfire__
exg d4,a2
exg d5,a3
#else
movel d4,a4
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
Ladddf$4:
| Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
| the signs in a4.
 
| Here we have to decide whether to add or subtract the numbers:
#ifndef __mcoldfire__
exg d7,a0 | get the signs
exg d6,a3 | a3 is free to be used
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
movel d7,d6 |
movew IMM (0),d7 | get a's sign in d7 '
swap d6 |
movew IMM (0),d6 | and b's sign in d6 '
eorl d7,d6 | compare the signs
bmi Lsubdf$0 | if the signs are different we have
| to subtract
#ifndef __mcoldfire__
exg d7,a0 | else we add the numbers
exg d6,a3 |
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
addl d7,d3 |
addxl d6,d2 |
addxl d5,d1 |
addxl d4,d0 |
 
movel a2,d4 | return exponent to d4
movel a0,d7 |
andl IMM (0x80000000),d7 | d7 now has the sign
 
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
 
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
lea pc@(Ladddf$5),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Ladddf$5:
| Put back the exponent and check for overflow
#ifndef __mcoldfire__
cmpw IMM (0x7ff),d4 | is the exponent big?
#else
cmpl IMM (0x7ff),d4 | is the exponent big?
#endif
bge 1f
bclr IMM (DBL_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
bra Ladddf$ret
1:
moveq IMM (ADD),d5
bra Ld$overflow
 
Lsubdf$0:
| Here we do the subtraction.
#ifndef __mcoldfire__
exg d7,a0 | put sign back in a0
exg d6,a3 |
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
subl d7,d3 |
subxl d6,d2 |
subxl d5,d1 |
subxl d4,d0 |
beq Ladddf$ret$1 | if zero just exit
bpl 1f | if positive skip the following
movel a0,d7 |
bchg IMM (31),d7 | change sign bit in d7
movel d7,a0 |
negl d3 |
negxl d2 |
negxl d1 | and negate result
negxl d0 |
1:
movel a2,d4 | return exponent to d4
movel a0,d7
andl IMM (0x80000000),d7 | isolate sign bit
#ifndef __mcoldfire__
moveml sp@+,a2-a3 |
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
 
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
lea pc@(Lsubdf$1),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lsubdf$1:
| Put back the exponent and sign (we don't have overflow). '
bclr IMM (DBL_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
bra Ladddf$ret
 
| If one of the numbers was too small (difference of exponents >=
| DBL_MANT_DIG+1) we return the other (and now we don't have to '
| check for finiteness or zero).
Ladddf$a$small:
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
movel a6@(16),d0
movel a6@(20),d1
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
 
Ladddf$b$small:
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
movel a6@(8),d0
movel a6@(12),d1
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
 
Ladddf$a$den:
movel d7,d4 | d7 contains 0x00200000
bra Ladddf$1
 
Ladddf$b$den:
movel d7,d5 | d7 contains 0x00200000
notl d6
bra Ladddf$2
 
Ladddf$b:
| Return b (if a is zero)
movel d2,d0
movel d3,d1
bne 1f | Check if b is -0
cmpl IMM (0x80000000),d0
bne 1f
andl IMM (0x80000000),d7 | Use the sign of a
clrl d0
bra Ladddf$ret
Ladddf$a:
movel a6@(8),d0
movel a6@(12),d1
1:
moveq IMM (ADD),d5
| Check for NaN and +/-INFINITY.
movel d0,d7 |
andl IMM (0x80000000),d7 |
bclr IMM (31),d0 |
cmpl IMM (0x7ff00000),d0 |
bge 2f |
movel d0,d0 | check for zero, since we don't '
bne Ladddf$ret | want to return -0 by mistake
bclr IMM (31),d7 |
bra Ladddf$ret |
2:
andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction)
orl d1,d0 |
bne Ld$inop |
bra Ld$infty |
Ladddf$ret$1:
#ifndef __mcoldfire__
moveml sp@+,a2-a3 | restore regs and exit
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
 
Ladddf$ret:
| Normal exit.
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit back
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
 
Ladddf$ret$den:
| Return a denormalized number.
#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right once more
roxrl IMM (1),d1 |
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
bra Ladddf$ret
 
Ladddf$nf:
moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
movel a6@(12),d1 | did some processing already)
movel a6@(16),d2 |
movel a6@(20),d3 |
movel IMM (0x7ff00000),d4 | useful constant (INFINITY)
movel d0,d7 | save sign bits
movel d2,d6 |
bclr IMM (31),d0 | clear sign bits
bclr IMM (31),d2 |
| We know that one of them is either NaN of +/-INFINITY
| Check for NaN (if either one is NaN return NaN)
cmpl d4,d0 | check first a (d0)
bhi Ld$inop | if d0 > 0x7ff00000 or equal and
bne 2f
tstl d1 | d1 > 0, a is NaN
bne Ld$inop |
2: cmpl d4,d2 | check now b (d1)
bhi Ld$inop |
bne 3f
tstl d3 |
bne Ld$inop |
3:
| Now comes the check for +/-INFINITY. We know that both are (maybe not
| finite) numbers, but we have to check if both are infinite whether we
| are adding or subtracting them.
eorl d7,d6 | to check sign bits
bmi 1f
andl IMM (0x80000000),d7 | get (common) sign bit
bra Ld$infty
1:
| We know one (or both) are infinite, so we test for equality between the
| two numbers (if they are equal they have to be infinite both, so we
| return NaN).
cmpl d2,d0 | are both infinite?
bne 1f | if d0 <> d2 they are not equal
cmpl d3,d1 | if d0 == d2 test d3 and d1
beq Ld$inop | if equal return NaN
1:
andl IMM (0x80000000),d7 | get a's sign bit '
cmpl d4,d0 | test now for infinity
beq Ld$infty | if a is INFINITY return with this sign
bchg IMM (31),d7 | else we know b is INFINITY and has
bra Ld$infty | the opposite sign
 
|=============================================================================
| __muldf3
|=============================================================================
 
| double __muldf3(double, double);
SYM (__muldf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0-d1
movel a6@(12),d1 |
movel a6@(16),d2 | and b into d2-d3
movel a6@(20),d3 |
movel d0,d7 | d7 will hold the sign of the product
eorl d2,d7 |
andl IMM (0x80000000),d7 |
movel d7,a0 | save sign bit into a0
movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
movel d7,d6 | another (mask for fraction)
notl d6 |
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d4 |
orl d1,d4 |
beq Lmuldf$a$0 | branch if a is zero
movel d0,d4 |
bclr IMM (31),d2 | get rid of b's sign bit '
movel d2,d5 |
orl d3,d5 |
beq Lmuldf$b$0 | branch if b is zero
movel d2,d5 |
cmpl d7,d0 | is a big?
bhi Lmuldf$inop | if a is NaN return NaN
beq Lmuldf$a$nf | we still have to check d1 and b ...
cmpl d7,d2 | now compare b with INFINITY
bhi Lmuldf$inop | is b NaN?
beq Lmuldf$b$nf | we still have to check d3 ...
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d4 and d5.
andl d7,d4 | isolate exponent in d4
beq Lmuldf$a$den | if exponent zero, have denormalized
andl d6,d0 | isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
#endif
Lmuldf$1:
andl d7,d5 |
beq Lmuldf$b$den |
andl d6,d2 |
orl IMM (0x00100000),d2 | and put hidden bit back
swap d5 |
#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Lmuldf$2: |
#ifndef __mcoldfire__
addw d5,d4 | add exponents
subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#else
addl d5,d4 | add exponents
subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#endif
 
| We are now ready to do the multiplication. The situation is as follows:
| both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
| denormalized to start with!), which means that in the product bit 104
| (which will correspond to bit 8 of the fourth long) is set.
 
| Here we have to do the product.
| To do it we have to juggle the registers back and forth, as there are not
| enough to keep everything in them. So we use the address registers to keep
| some intermediate data.
 
#ifndef __mcoldfire__
moveml a2-a3,sp@- | save a2 and a3 for temporary use
#else
movel a2,sp@-
movel a3,sp@-
movel a4,sp@-
#endif
movel IMM (0),a2 | a2 is a null register
movel d4,a3 | and a3 will preserve the exponent
 
| First, shift d2-d3 so bit 20 becomes bit 31:
#ifndef __mcoldfire__
rorl IMM (5),d2 | rotate d2 5 places right
swap d2 | and swap it
rorl IMM (5),d3 | do the same thing with d3
swap d3 |
movew d3,d6 | get the rightmost 11 bits of d3
andw IMM (0x07ff),d6 |
orw d6,d2 | and put them into d2
andw IMM (0xf800),d3 | clear those bits in d3
#else
moveq IMM (11),d7 | left shift d2 11 bits
lsll d7,d2
movel d3,d6 | get a copy of d3
lsll d7,d3 | left shift d3 11 bits
andl IMM (0xffe00000),d6 | get the top 11 bits of d3
moveq IMM (21),d7 | right shift them 21 bits
lsrl d7,d6
orl d6,d2 | stick them at the end of d2
#endif
 
movel d2,d6 | move b into d6-d7
movel d3,d7 | move a into d4-d5
movel d0,d4 | and clear d0-d1-d2-d3 (to put result)
movel d1,d5 |
movel IMM (0),d3 |
movel d3,d2 |
movel d3,d1 |
movel d3,d0 |
 
| We use a1 as counter:
movel IMM (DBL_MANT_DIG-1),a1
#ifndef __mcoldfire__
exg d7,a1
#else
movel d7,a4
movel a1,d7
movel a4,a1
#endif
 
1:
#ifndef __mcoldfire__
exg d7,a1 | put counter back in a1
#else
movel d7,a4
movel a1,d7
movel a4,a1
#endif
addl d3,d3 | shift sum once left
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
addl d7,d7 |
addxl d6,d6 |
bcc 2f | if bit clear skip the following
#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a2,d7
movel a4,a2
#endif
addl d5,d3 | else add a to the sum
addxl d4,d2 |
addxl d7,d1 |
addxl d7,d0 |
#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a2,d7
movel a4,a2
#endif
2:
#ifndef __mcoldfire__
exg d7,a1 | put counter in d7
dbf d7,1b | decrement and branch
#else
movel d7,a4
movel a1,d7
movel a4,a1
subql IMM (1),d7
bpl 1b
#endif
 
movel a3,d4 | restore exponent
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
 
| Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
| first thing to do now is to normalize it so bit 8 becomes bit
| DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
swap d0
swap d1
movew d1,d0
swap d2
movew d2,d1
swap d3
movew d3,d2
movew IMM (0),d3
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
#else
moveq IMM (29),d6
lsrl IMM (3),d3
movel d2,d7
lsll d6,d7
orl d7,d3
lsrl IMM (3),d2
movel d1,d7
lsll d6,d7
orl d7,d2
lsrl IMM (3),d1
movel d0,d7
lsll d6,d7
orl d7,d1
lsrl IMM (3),d0
#endif
| Now round, check for over- and underflow, and exit.
movel a0,d7 | get sign bit back into d7
moveq IMM (MULTIPLY),d5
 
btst IMM (DBL_MANT_DIG+1-32),d0
beq Lround$exit
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addl IMM (1),d4
#endif
bra Lround$exit
 
Lmuldf$inop:
moveq IMM (MULTIPLY),d5
bra Ld$inop
 
Lmuldf$b$nf:
moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d3 | we know d2 == 0x7ff00000, so check d3
bne Ld$inop | if d3 <> 0 b is NaN
bra Ld$overflow | else we have overflow (since a is finite)
 
Lmuldf$a$nf:
moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d1 | we know d0 == 0x7ff00000, so check d1
bne Ld$inop | if d1 <> 0 a is NaN
bra Ld$overflow | else signal overflow
 
| If either number is zero return zero, unless the other is +/-INFINITY or
| NaN, in which case we return NaN.
Lmuldf$b$0:
moveq IMM (MULTIPLY),d5
#ifndef __mcoldfire__
exg d2,d0 | put b (==0) into d0-d1
exg d3,d1 | and a (with sign bit cleared) into d2-d3
movel a0,d0 | set result sign
#else
movel d0,d2 | put a into d2-d3
movel d1,d3
movel a0,d0 | put result zero into d0-d1
movq IMM(0),d1
#endif
bra 1f
Lmuldf$a$0:
movel a0,d0 | set result sign
movel a6@(16),d2 | put b into d2-d3 again
movel a6@(20),d3 |
bclr IMM (31),d2 | clear sign bit
1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
bge Ld$inop | in case NaN or +/-INFINITY return NaN
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
 
| If a number is denormalized we put an exponent of 1 but do not put the
| hidden bit back into the fraction; instead we shift left until bit 21
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
| to ensure that the product of the fractions is close to 1.
Lmuldf$a$den:
movel IMM (1),d4
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0 |
#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
#endif
btst IMM (20),d0 |
bne Lmuldf$1 |
bra 1b
 
Lmuldf$b$den:
movel IMM (1),d5
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2 |
#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
#endif
btst IMM (20),d2 |
bne Lmuldf$2 |
bra 1b
 
 
|=============================================================================
| __divdf3
|=============================================================================
 
| double __divdf3(double, double);
SYM (__divdf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0-d1
movel a6@(12),d1 |
movel a6@(16),d2 | and b into d2-d3
movel a6@(20),d3 |
movel d0,d7 | d7 will hold the sign of the result
eorl d2,d7 |
andl IMM (0x80000000),d7
movel d7,a0 | save sign into a0
movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
movel d7,d6 | another (mask for fraction)
notl d6 |
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d4 |
orl d1,d4 |
beq Ldivdf$a$0 | branch if a is zero
movel d0,d4 |
bclr IMM (31),d2 | get rid of b's sign bit '
movel d2,d5 |
orl d3,d5 |
beq Ldivdf$b$0 | branch if b is zero
movel d2,d5
cmpl d7,d0 | is a big?
bhi Ldivdf$inop | if a is NaN return NaN
beq Ldivdf$a$nf | if d0 == 0x7ff00000 we check d1
cmpl d7,d2 | now compare b with INFINITY
bhi Ldivdf$inop | if b is NaN return NaN
beq Ldivdf$b$nf | if d2 == 0x7ff00000 we check d3
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d4 and d5 and normalize the numbers to
| ensure that the ratio of the fractions is around 1. We do this by
| making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
| set, even if they were denormalized to start with.
| Thus, the result will satisfy: 2 > result > 1/2.
andl d7,d4 | and isolate exponent in d4
beq Ldivdf$a$den | if exponent is zero we have a denormalized
andl d6,d0 | and isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
#endif
Ldivdf$1: |
andl d7,d5 |
beq Ldivdf$b$den |
andl d6,d2 |
orl IMM (0x00100000),d2
swap d5 |
#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Ldivdf$2: |
#ifndef __mcoldfire__
subw d5,d4 | subtract exponents
addw IMM (D_BIAS),d4 | and add bias
#else
subl d5,d4 | subtract exponents
addl IMM (D_BIAS),d4 | and add bias
#endif
 
| We are now ready to do the division. We have prepared things in such a way
| that the ratio of the fractions will be less than 2 but greater than 1/2.
| At this point the registers in use are:
| d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
| DBL_MANT_DIG-1-32=1)
| d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
| d4 holds the difference of the exponents, corrected by the bias
| a0 holds the sign of the ratio
 
| To do the rounding correctly we need to keep information about the
| nonsignificant bits. One way to do this would be to do the division
| using four registers; another is to use two registers (as originally
| I did), but use a sticky bit to preserve information about the
| fractional part. Note that we can keep that info in a1, which is not
| used.
movel IMM (0),d6 | d6-d7 will hold the result
movel d6,d7 |
movel IMM (0),a1 | and a1 will hold the sticky bit
 
movel IMM (DBL_MANT_DIG-32+1),d5
1: cmpl d0,d2 | is a < b?
bhi 3f | if b > a skip the following
beq 4f | if d0==d2 check d1 and d3
2: subl d3,d1 |
subxl d2,d0 | a <-- a - b
bset d5,d6 | set the corresponding bit in d6
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
bra 5f
4: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 3b | if d1 > d2 skip the subtraction
bra 2b | else go do it
5:
| Here we have to start setting the bits in the second long.
movel IMM (31),d5 | again d5 is counter
 
1: cmpl d0,d2 | is a < b?
bhi 3f | if b > a skip the following
beq 4f | if d0==d2 check d1 and d3
2: subl d3,d1 |
subxl d2,d0 | a <-- a - b
bset d5,d7 | set the corresponding bit in d7
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
bra 5f
4: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 3b | if d1 > d2 skip the subtraction
bra 2b | else go do it
5:
| Now go ahead checking until we hit a one, which we store in d2.
movel IMM (DBL_MANT_DIG),d5
1: cmpl d2,d0 | is a < b?
bhi 4f | if b < a, exit
beq 3f | if d0==d2 check d1 and d3
2: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
movel IMM (0),d2 | here no sticky bit was found
movel d2,d3
bra 5f
3: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 2b | if d1 > d2 go back
4:
| Here put the sticky bit in d2-d3 (in the position which actually corresponds
| to it; if you don't do this the algorithm loses in some cases). '
movel IMM (0),d2
movel d2,d3
#ifndef __mcoldfire__
subw IMM (DBL_MANT_DIG),d5
addw IMM (63),d5
cmpw IMM (31),d5
#else
subl IMM (DBL_MANT_DIG),d5
addl IMM (63),d5
cmpl IMM (31),d5
#endif
bhi 2f
1: bset d5,d3
bra 5f
#ifndef __mcoldfire__
subw IMM (32),d5
#else
subl IMM (32),d5
#endif
2: bset d5,d2
5:
| Finally we are finished! Move the longs in the address registers to
| their final destination:
movel d6,d0
movel d7,d1
movel IMM (0),d3
 
| Here we have finished the division, with the result in d0-d1-d2-d3, with
| 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
| If it is not, then definitely bit 21 is set. Normalize so bit 22 is
| not set:
btst IMM (DBL_MANT_DIG-32+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
| Now round, check for over- and underflow, and exit.
movel a0,d7 | restore sign bit to d7
moveq IMM (DIVIDE),d5
bra Lround$exit
 
Ldivdf$inop:
moveq IMM (DIVIDE),d5
bra Ld$inop
 
Ldivdf$a$0:
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
| return a properly signed zero.
moveq IMM (DIVIDE),d5
bclr IMM (31),d2 |
movel d2,d4 |
orl d3,d4 |
beq Ld$inop | if b is also zero return NaN
cmpl IMM (0x7ff00000),d2 | check for NaN
bhi Ld$inop |
blt 1f |
tstl d3 |
bne Ld$inop |
1: movel a0,d0 | else return signed zero
moveq IMM(0),d1 |
PICLEA SYM (_fpCCR),a0 | clear exception flags
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
 
Ldivdf$b$0:
moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
movel a0,d7 | put a's sign bit back in d7 '
cmpl IMM (0x7ff00000),d0 | compare d0 with INFINITY
bhi Ld$inop | if larger it is NaN
tstl d1 |
bne Ld$inop |
bra Ld$div$0 | else signal DIVIDE_BY_ZERO
 
Ldivdf$b$nf:
moveq IMM (DIVIDE),d5
| If d2 == 0x7ff00000 we have to check d3.
tstl d3 |
bne Ld$inop | if d3 <> 0, b is NaN
bra Ld$underflow | else b is +/-INFINITY, so signal underflow
 
Ldivdf$a$nf:
moveq IMM (DIVIDE),d5
| If d0 == 0x7ff00000 we have to check d1.
tstl d1 |
bne Ld$inop | if d1 <> 0, a is NaN
| If a is INFINITY we have to check b
cmpl d7,d2 | compare b with INFINITY
bge Ld$inop | if b is NaN or INFINITY return NaN
tstl d3 |
bne Ld$inop |
bra Ld$overflow | else return overflow
 
| If a number is denormalized we put an exponent of 1 but do not put the
| bit back into the fraction.
Ldivdf$a$den:
movel IMM (1),d4
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0
#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
#endif
btst IMM (DBL_MANT_DIG-32-1),d0
bne Ldivdf$1
bra 1b
 
Ldivdf$b$den:
movel IMM (1),d5
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2
#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
#endif
btst IMM (DBL_MANT_DIG-32-1),d2
bne Ldivdf$2
bra 1b
 
Lround$exit:
| This is a common exit point for __muldf3 and __divdf3. When they enter
| this point the sign of the result is in d7, the result in d0-d1, normalized
| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
 
| First check for underlow in the exponent:
#ifndef __mcoldfire__
cmpw IMM (-DBL_MANT_DIG-1),d4
#else
cmpl IMM (-DBL_MANT_DIG-1),d4
#endif
blt Ld$underflow
| It could happen that the exponent is less than 1, in which case the
| number is denormalized. In this case we shift right and adjust the
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel d7,a0 |
movel IMM (0),d6 | use d6-d7 to collect bits flushed right
movel d6,d7 | use d6-d7 to collect bits flushed right
#ifndef __mcoldfire__
cmpw IMM (1),d4 | if the exponent is less than 1 we
#else
cmpl IMM (1),d4 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
#ifndef __mcoldfire__
addw IMM (1),d4 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
roxrl IMM (1),d2 |
roxrl IMM (1),d3 |
roxrl IMM (1),d6 |
roxrl IMM (1),d7 |
cmpw IMM (1),d4 | is the exponent 1 already?
#else
addl IMM (1),d4 | adjust the exponent
lsrl IMM (1),d7
btst IMM (0),d6
beq 13f
bset IMM (31),d7
13: lsrl IMM (1),d6
btst IMM (0),d3
beq 14f
bset IMM (31),d6
14: lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
cmpl IMM (1),d4 | is the exponent 1 already?
#endif
beq 2f | if not loop back
bra 1b |
bra Ld$underflow | safety check, shouldn't execute '
2: orl d6,d2 | this is a trick so we don't lose '
orl d7,d3 | the bits which were flushed right
movel a0,d7 | get back sign bit into d7
| Now call the rounding routine (which takes care of denormalized numbers):
lea pc@(Lround$0),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lround$0:
| Here we have a correctly rounded result (either normalized or denormalized).
 
| Here we should have either a normalized number or a denormalized one, and
| the exponent is necessarily larger or equal to 1 (so we don't have to '
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 0x7ff).
#ifndef __mcoldfire__
cmpw IMM (0x07ff),d4
#else
cmpl IMM (0x07ff),d4
#endif
bge Ld$overflow
| Now check for a denormalized number (exponent==0):
movew d4,d4
beq Ld$den
1:
| Put back the exponents and sign and return.
#ifndef __mcoldfire__
lslw IMM (4),d4 | exponent back to fourth byte
#else
lsll IMM (4),d4 | exponent back to fourth byte
#endif
bclr IMM (DBL_MANT_DIG-32-1),d0
swap d0 | and put back exponent
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
orl d7,d0 | and sign also
 
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
 
|=============================================================================
| __negdf2
|=============================================================================
 
| double __negdf2(double, double);
SYM (__negdf2):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0-d1
movel a6@(12),d1 |
bchg IMM (31),d0 | negate
movel d0,d2 | make a positive copy (for the tests)
bclr IMM (31),d2 |
movel d2,d4 | check for zero
orl d1,d4 |
beq 2f | if zero (either sign) return +zero
cmpl IMM (0x7ff00000),d2 | compare to +INFINITY
blt 1f | if finite, return
bhi Ld$inop | if larger (fraction not zero) is NaN
tstl d1 | if d2 == 0x7ff00000 check d1
bne Ld$inop |
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Ld$infty
1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
2: bclr IMM (31),d0
bra 1b
 
|=============================================================================
| __cmpdf2
|=============================================================================
 
GREATER = 1
LESS = -1
EQUAL = 0
 
| int __cmpdf2_internal(double, double, int);
SYM (__cmpdf2_internal):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 |
movel a6@(16),d2 | get second operand
movel a6@(20),d3 |
| First check if a and/or b are (+/-) zero and in that case clear
| the sign bit.
movel d0,d6 | copy signs into d6 (a) and d7(b)
bclr IMM (31),d0 | and clear signs in d0 and d2
movel d2,d7 |
bclr IMM (31),d2 |
cmpl IMM (0x7ff00000),d0 | check for a == NaN
bhi Lcmpd$inop | if d0 > 0x7ff00000, a is NaN
beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
movel d0,d4 | copy into d4 to test for zero
orl d1,d4 |
beq Lcmpdf$a$0 |
Lcmpdf$0:
cmpl IMM (0x7ff00000),d2 | check for b == NaN
bhi Lcmpd$inop | if d2 > 0x7ff00000, b is NaN
beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
movel d2,d4 |
orl d3,d4 |
beq Lcmpdf$b$0 |
Lcmpdf$1:
| Check the signs
eorl d6,d7
bpl 1f
| If the signs are not equal check if a >= 0
tstl d6
bpl Lcmpdf$a$gt$b | if (a >= 0 && b < 0) => a > b
bmi Lcmpdf$b$gt$a | if (a < 0 && b >= 0) => a < b
1:
| If the signs are equal check for < 0
tstl d6
bpl 1f
| If both are negative exchange them
#ifndef __mcoldfire__
exg d0,d2
exg d1,d3
#else
movel d0,d7
movel d2,d0
movel d7,d2
movel d1,d7
movel d3,d1
movel d7,d3
#endif
1:
| Now that they are positive we just compare them as longs (does this also
| work for denormalized numbers?).
cmpl d0,d2
bhi Lcmpdf$b$gt$a | |b| > |a|
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here d0 == d2, so we compare d1 and d3.
cmpl d1,d3
bhi Lcmpdf$b$gt$a | |b| > |a|
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$a$gt$b:
movel IMM (GREATER),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$b$gt$a:
movel IMM (LESS),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
 
Lcmpdf$a$0:
bclr IMM (31),d6
bra Lcmpdf$0
Lcmpdf$b$0:
bclr IMM (31),d7
bra Lcmpdf$1
 
Lcmpdf$a$nf:
tstl d1
bne Ld$inop
bra Lcmpdf$0
 
Lcmpdf$b$nf:
tstl d3
bne Ld$inop
bra Lcmpdf$1
 
Lcmpd$inop:
movl a6@(24),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
 
| int __cmpdf2(double, double);
SYM (__cmpdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
bsr SYM (__cmpdf2_internal)
unlk a6
rts
 
|=============================================================================
| rounding routines
|=============================================================================
 
| The rounding routines expect the number to be normalized in registers
| d0-d1-d2-d3, with the exponent in register d4. They assume that the
| exponent is larger or equal to 1. They return a properly normalized number
| if possible, and a denormalized number otherwise. The exponent is returned
| in d4.
 
Lround$to$nearest:
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
| Here we assume that the exponent is not too small (this should be checked
| before entering the rounding routine), but the number could be denormalized.
 
| Check for denormalized numbers:
1: btst IMM (DBL_MANT_DIG-32),d0
bne 2f | if set the number is normalized
| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -D_BIAS+1).
#ifndef __mcoldfire__
cmpw IMM (1),d4 | remember that the exponent is at least one
#else
cmpl IMM (1),d4 | remember that the exponent is at least one
#endif
beq 2f | an exponent of one means denormalized
addl d3,d3 | else shift and adjust the exponent
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d4,1b |
#else
subql IMM (1), d4
bpl 1b
#endif
2:
| Now round: we do it as follows: after the shifting we can write the
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
| If delta < 1, do nothing. If delta > 1, add 1 to f.
| If delta == 1, we make sure the rounded number will be even (odd?)
| (after shifting).
btst IMM (0),d1 | is delta < 1?
beq 2f | if so, do not do anything
orl d2,d3 | is delta == 1?
bne 1f | if so round to even
movel d1,d3 |
andl IMM (2),d3 | bit 1 is the last significant bit
movel IMM (0),d2 |
addl d3,d1 |
addxl d2,d0 |
bra 2f |
1: movel IMM (1),d3 | else add 1
movel IMM (0),d2 |
addl d3,d1 |
addxl d2,d0
| Shift right once (because we used bit #DBL_MANT_DIG-32!).
2:
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
 
| Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
| 'fraction overflow' ...).
btst IMM (DBL_MANT_DIG-32),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
| If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
| have to put the exponent to zero and return a denormalized number.
btst IMM (DBL_MANT_DIG-32-1),d0
beq 1f
jmp a0@
1: movel IMM (0),d4
jmp a0@
 
Lround$to$zero:
Lround$to$plus:
Lround$to$minus:
jmp a0@
#endif /* L_double */
 
#ifdef L_float
 
.globl SYM (_fpCCR)
.globl $_exception_handler
 
QUIET_NaN = 0xffffffff
SIGNL_NaN = 0x7f800001
INFINITY = 0x7f800000
 
F_MAX_EXP = 0xff
F_BIAS = 126
FLT_MAX_EXP = F_MAX_EXP - F_BIAS
FLT_MIN_EXP = 1 - F_BIAS
FLT_MANT_DIG = 24
 
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
 
SINGLE_FLOAT = 1
 
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
 
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
 
| Entry points:
 
.globl SYM (__addsf3)
.globl SYM (__subsf3)
.globl SYM (__mulsf3)
.globl SYM (__divsf3)
.globl SYM (__negsf2)
.globl SYM (__cmpsf2)
.globl SYM (__cmpsf2_internal)
 
| These are common routines to return and signal exceptions.
 
.text
.even
 
Lf$den:
| Return and signal a denormalized number
orl d7,d0
moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
 
Lf$infty:
Lf$overflow:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
moveq IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
 
Lf$underflow:
| Return 0 and set the exception flags
moveq IMM (0),d0
moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
 
Lf$inop:
| Return a quiet NaN and set the exception flags
movel IMM (QUIET_NaN),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
 
Lf$div$0:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
moveq IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
 
|=============================================================================
|=============================================================================
| single precision routines
|=============================================================================
|=============================================================================
 
| A single precision floating point number (float) has the format:
|
| struct _float {
| unsigned int sign : 1; /* sign bit */
| unsigned int exponent : 8; /* exponent, shifted by 126 */
| unsigned int fraction : 23; /* fraction */
| } float;
|
| Thus sizeof(float) = 4 (32 bits).
|
| All the routines are callable from C programs, and return the result
| in the single register d0. They also preserve all registers except
| d0-d1 and a0-a1.
 
|=============================================================================
| __subsf3
|=============================================================================
 
| float __subsf3(float, float);
SYM (__subsf3):
bchg IMM (31),sp@(8) | change sign of second operand
| and fall through
|=============================================================================
| __addsf3
|=============================================================================
 
| float __addsf3(float, float);
SYM (__addsf3):
#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers but d0-d1
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
movel d0,a0 | get d0's sign bit '
addl d0,d0 | check and clear sign bit of a
beq Laddsf$b | if zero return second operand
movel d1,a1 | save b's sign bit '
addl d1,d1 | get rid of sign bit
beq Laddsf$a | if zero return first operand
 
| Get the exponents and check for denormalized and/or infinity.
 
movel IMM (0x00ffffff),d4 | mask to get fraction
movel IMM (0x01000000),d5 | mask to put hidden bit back
 
movel d0,d6 | save a to get exponent
andl d4,d0 | get fraction in d0
notl d4 | make d4 into a mask for the exponent
andl d4,d6 | get exponent in d6
beq Laddsf$a$den | branch if a is denormalized
cmpl d4,d6 | check for INFINITY or NaN
beq Laddsf$nf
swap d6 | put exponent into first word
orl d5,d0 | and put hidden bit back
Laddsf$1:
| Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
movel d1,d7 | get exponent in d7
andl d4,d7 |
beq Laddsf$b$den | branch if b is denormalized
cmpl d4,d7 | check for INFINITY or NaN
beq Laddsf$nf
swap d7 | put exponent into first word
notl d4 | make d4 into a mask for the fraction
andl d4,d1 | get fraction in d1
orl d5,d1 | and put hidden bit back
Laddsf$2:
| Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
 
| Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
| shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
| bit).
 
movel d1,d2 | move b to d2, since we want to use
| two registers to do the sum
movel IMM (0),d1 | and clear the new ones
movel d1,d3 |
 
| Here we shift the numbers in registers d0 and d1 so the exponents are the
| same, and put the largest exponent in d6. Note that we are using two
| registers for each number (see the discussion by D. Knuth in "Seminumerical
| Algorithms").
#ifndef __mcoldfire__
cmpw d6,d7 | compare exponents
#else
cmpl d6,d7 | compare exponents
#endif
beq Laddsf$3 | if equal don't shift '
bhi 5f | branch if second exponent largest
1:
subl d6,d7 | keep the largest exponent
negl d7
#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$b$small
#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 4f
2:
#ifndef __mcoldfire__
subw IMM (1),d7
#else
subql IMM (1), d7
#endif
3:
#ifndef __mcoldfire__
lsrl IMM (1),d2 | shift right second operand
roxrl IMM (1),d3
dbra d7,3b
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
subql IMM (1), d7
bpl 3b
#endif
bra Laddsf$3
4:
movew d2,d3
swap d3
movew d3,d2
swap d2
#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
#endif
bne 2b | if still more bits, go back to normal case
bra Laddsf$3
5:
#ifndef __mcoldfire__
exg d6,d7 | exchange the exponents
#else
eorl d6,d7
eorl d7,d6
eorl d6,d7
#endif
subl d6,d7 | keep the largest exponent
negl d7 |
#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (and exit!) '
#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$a$small
#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 8f
6:
#ifndef __mcoldfire__
subw IMM (1),d7
#else
subl IMM (1),d7
#endif
7:
#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right first operand
roxrl IMM (1),d1
dbra d7,7b
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
subql IMM (1),d7
bpl 7b
#endif
bra Laddsf$3
8:
movew d0,d1
swap d1
movew d1,d0
swap d0
#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
#endif
bne 6b | if still more bits, go back to normal case
| otherwise we fall through
 
| Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
| signs are stored in a0 and a1).
 
Laddsf$3:
| Here we have to decide whether to add or subtract the numbers
#ifndef __mcoldfire__
exg d6,a0 | get signs back
exg d7,a1 | and save the exponents
#else
movel d6,d4
movel a0,d6
movel d4,a0
movel d7,d4
movel a1,d7
movel d4,a1
#endif
eorl d6,d7 | combine sign bits
bmi Lsubsf$0 | if negative a and b have opposite
| sign so we actually subtract the
| numbers
 
| Here we have both positive or both negative
#ifndef __mcoldfire__
exg d6,a0 | now we have the exponent in d6
#else
movel d6,d4
movel a0,d6
movel d4,a0
#endif
movel a0,d7 | and sign in d7
andl IMM (0x80000000),d7
| Here we do the addition.
addl d3,d1
addxl d2,d0
| Note: now we have d2, d3, d4 and d5 to play with!
 
| Put the exponent, in the first byte, in d2, to use the "standard" rounding
| routines:
movel d6,d2
#ifndef __mcoldfire__
lsrw IMM (8),d2
#else
lsrl IMM (8),d2
#endif
 
| Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (FLT_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
addl IMM (1),d2
1:
lea pc@(Laddsf$4),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Laddsf$4:
| Put back the exponent, but check for overflow.
#ifndef __mcoldfire__
cmpw IMM (0xff),d2
#else
cmpl IMM (0xff),d2
#endif
bhi 1f
bclr IMM (FLT_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
#endif
swap d2
orl d2,d0
bra Laddsf$ret
1:
moveq IMM (ADD),d5
bra Lf$overflow
 
Lsubsf$0:
| We are here if a > 0 and b < 0 (sign bits cleared).
| Here we do the subtraction.
movel d6,d7 | put sign in d7
andl IMM (0x80000000),d7
 
subl d3,d1 | result in d0-d1
subxl d2,d0 |
beq Laddsf$ret | if zero just exit
bpl 1f | if positive skip the following
bchg IMM (31),d7 | change sign bit in d7
negl d1
negxl d0
1:
#ifndef __mcoldfire__
exg d2,a0 | now we have the exponent in d2
lsrw IMM (8),d2 | put it in the first byte
#else
movel d2,d4
movel a0,d2
movel d4,a0
lsrl IMM (8),d2 | put it in the first byte
#endif
 
| Now d0-d1 is positive and the sign bit is in d7.
 
| Note that we do not have to normalize, since in the subtraction bit
| #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
| the rounding routines themselves.
lea pc@(Lsubsf$1),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lsubsf$1:
| Put back the exponent (we can't have overflow!). '
bclr IMM (FLT_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
#endif
swap d2
orl d2,d0
bra Laddsf$ret
 
| If one of the numbers was too small (difference of exponents >=
| FLT_MANT_DIG+2) we return the other (and now we don't have to '
| check for finiteness or zero).
Laddsf$a$small:
movel a6@(12),d0
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
 
Laddsf$b$small:
movel a6@(8),d0
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
 
| If the numbers are denormalized remember to put exponent equal to 1.
 
Laddsf$a$den:
movel d5,d6 | d5 contains 0x01000000
swap d6
bra Laddsf$1
 
Laddsf$b$den:
movel d5,d7
swap d7
notl d4 | make d4 into a mask for the fraction
| (this was not executed after the jump)
bra Laddsf$2
 
| The rest is mainly code for the different results which can be
| returned (checking always for +/-INFINITY and NaN).
 
Laddsf$b:
| Return b (if a is zero).
movel a6@(12),d0
cmpl IMM (0x80000000),d0 | Check if b is -0
bne 1f
movel a0,d7
andl IMM (0x80000000),d7 | Use the sign of a
clrl d0
bra Laddsf$ret
Laddsf$a:
| Return a (if b is zero).
movel a6@(8),d0
1:
moveq IMM (ADD),d5
| We have to check for NaN and +/-infty.
movel d0,d7
andl IMM (0x80000000),d7 | put sign in d7
bclr IMM (31),d0 | clear sign
cmpl IMM (INFINITY),d0 | check for infty or NaN
bge 2f
movel d0,d0 | check for zero (we do this because we don't '
bne Laddsf$ret | want to return -0 by mistake
bclr IMM (31),d7 | if zero be sure to clear sign
bra Laddsf$ret | if everything OK just return
2:
| The value to be returned is either +/-infty or NaN
andl IMM (0x007fffff),d0 | check for NaN
bne Lf$inop | if mantissa not zero is NaN
bra Lf$infty
 
Laddsf$ret:
| Normal exit (a and b nonzero, result is not NaN nor +/-infty).
| We have to clear the exception flags (just the exception type).
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
 
Laddsf$ret$den:
| Return a denormalized number (for addition we don't signal underflow) '
lsrl IMM (1),d0 | remember to shift right back once
bra Laddsf$ret | and return
 
| Note: when adding two floats of the same sign if either one is
| NaN we return NaN without regard to whether the other is finite or
| not. When subtracting them (i.e., when adding two numbers of
| opposite signs) things are more complicated: if both are INFINITY
| we return NaN, if only one is INFINITY and the other is NaN we return
| NaN, but if it is finite we return INFINITY with the corresponding sign.
 
Laddsf$nf:
moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
movel a6@(12),d1 | did some processing already)
movel IMM (INFINITY),d4 | useful constant (INFINITY)
movel d0,d2 | save sign bits
movel d1,d3
bclr IMM (31),d0 | clear sign bits
bclr IMM (31),d1
| We know that one of them is either NaN of +/-INFINITY
| Check for NaN (if either one is NaN return NaN)
cmpl d4,d0 | check first a (d0)
bhi Lf$inop
cmpl d4,d1 | check now b (d1)
bhi Lf$inop
| Now comes the check for +/-INFINITY. We know that both are (maybe not
| finite) numbers, but we have to check if both are infinite whether we
| are adding or subtracting them.
eorl d3,d2 | to check sign bits
bmi 1f
movel d0,d7
andl IMM (0x80000000),d7 | get (common) sign bit
bra Lf$infty
1:
| We know one (or both) are infinite, so we test for equality between the
| two numbers (if they are equal they have to be infinite both, so we
| return NaN).
cmpl d1,d0 | are both infinite?
beq Lf$inop | if so return NaN
 
movel d0,d7
andl IMM (0x80000000),d7 | get a's sign bit '
cmpl d4,d0 | test now for infinity
beq Lf$infty | if a is INFINITY return with this sign
bchg IMM (31),d7 | else we know b is INFINITY and has
bra Lf$infty | the opposite sign
 
|=============================================================================
| __mulsf3
|=============================================================================
 
| float __mulsf3(float, float);
SYM (__mulsf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0
movel a6@(12),d1 | and b into d1
movel d0,d7 | d7 will hold the sign of the product
eorl d1,d7 |
andl IMM (0x80000000),d7
movel IMM (INFINITY),d6 | useful constant (+INFINITY)
movel d6,d5 | another (mask for fraction)
notl d5 |
movel IMM (0x00800000),d4 | this is to put hidden bit back
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d2 |
beq Lmulsf$a$0 | branch if a is zero
bclr IMM (31),d1 | get rid of b's sign bit '
movel d1,d3 |
beq Lmulsf$b$0 | branch if b is zero
cmpl d6,d0 | is a big?
bhi Lmulsf$inop | if a is NaN return NaN
beq Lmulsf$inf | if a is INFINITY we have to check b
cmpl d6,d1 | now compare b with INFINITY
bhi Lmulsf$inop | is b NaN?
beq Lmulsf$overflow | is b INFINITY?
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d2 and d3.
andl d6,d2 | and isolate exponent in d2
beq Lmulsf$a$den | if exponent is zero we have a denormalized
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
#endif
Lmulsf$1: | number
andl d6,d3 |
beq Lmulsf$b$den |
andl d5,d1 |
orl d4,d1 |
swap d3 |
#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Lmulsf$2: |
#ifndef __mcoldfire__
addw d3,d2 | add exponents
subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#else
addl d3,d2 | add exponents
subl IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#endif
 
| We are now ready to do the multiplication. The situation is as follows:
| both a and b have bit FLT_MANT_DIG-1 set (even if they were
| denormalized to start with!), which means that in the product
| bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
| high long) is set.
 
| To do the multiplication let us move the number a little bit around ...
movel d1,d6 | second operand in d6
movel d0,d5 | first operand in d4-d5
movel IMM (0),d4
movel d4,d1 | the sums will go in d0-d1
movel d4,d0
 
| now bit FLT_MANT_DIG-1 becomes bit 31:
lsll IMM (31-FLT_MANT_DIG+1),d6
 
| Start the loop (we loop #FLT_MANT_DIG times):
moveq IMM (FLT_MANT_DIG-1),d3
1: addl d1,d1 | shift sum
addxl d0,d0
lsll IMM (1),d6 | get bit bn
bcc 2f | if not set skip sum
addl d5,d1 | add a
addxl d4,d0
2:
#ifndef __mcoldfire__
dbf d3,1b | loop back
#else
subql IMM (1),d3
bpl 1b
#endif
 
| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
| FLT_MANT_DIG is set (to do the rounding).
#ifndef __mcoldfire__
rorl IMM (6),d1
swap d1
movew d1,d3
andw IMM (0x03ff),d3
andw IMM (0xfd00),d1
#else
movel d1,d3
lsll IMM (8),d1
addl d1,d1
addl d1,d1
moveq IMM (22),d5
lsrl d5,d3
orl d3,d1
andl IMM (0xfffffd00),d1
#endif
lsll IMM (8),d0
addl d0,d0
addl d0,d0
#ifndef __mcoldfire__
orw d3,d0
#else
orl d3,d0
#endif
 
moveq IMM (MULTIPLY),d5
btst IMM (FLT_MANT_DIG+1),d0
beq Lround$exit
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d2
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addql IMM (1),d2
#endif
bra Lround$exit
 
Lmulsf$inop:
moveq IMM (MULTIPLY),d5
bra Lf$inop
 
Lmulsf$overflow:
moveq IMM (MULTIPLY),d5
bra Lf$overflow
 
Lmulsf$inf:
moveq IMM (MULTIPLY),d5
| If either is NaN return NaN; else both are (maybe infinite) numbers, so
| return INFINITY with the correct sign (which is in d7).
cmpl d6,d1 | is b NaN?
bhi Lf$inop | if so return NaN
bra Lf$overflow | else return +/-INFINITY
 
| If either number is zero return zero, unless the other is +/-INFINITY,
| or NaN, in which case we return NaN.
Lmulsf$b$0:
| Here d1 (==b) is zero.
movel a6@(8),d1 | get a again to check for non-finiteness
bra 1f
Lmulsf$a$0:
movel a6@(12),d1 | get b again to check for non-finiteness
1: bclr IMM (31),d1 | clear sign bit
cmpl IMM (INFINITY),d1 | and check for a large exponent
bge Lf$inop | if b is +/-INFINITY or NaN return NaN
movel d7,d0 | else return signed zero
PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
 
| If a number is denormalized we put an exponent of 1 but do not put the
| hidden bit back into the fraction; instead we shift left until bit 23
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
| to ensure that the product of the fractions is close to 1.
Lmulsf$a$den:
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left (until bit 23 is set)
#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subql IMM (1),d2 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d0
bne Lmulsf$1 |
bra 1b | else loop back
 
Lmulsf$b$den:
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit 23 is set
#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subql IMM (1),d3 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d1
bne Lmulsf$2 |
bra 1b | else loop back
 
|=============================================================================
| __divsf3
|=============================================================================
 
| float __divsf3(float, float);
SYM (__divsf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0
movel a6@(12),d1 | and b into d1
movel d0,d7 | d7 will hold the sign of the result
eorl d1,d7 |
andl IMM (0x80000000),d7 |
movel IMM (INFINITY),d6 | useful constant (+INFINITY)
movel d6,d5 | another (mask for fraction)
notl d5 |
movel IMM (0x00800000),d4 | this is to put hidden bit back
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d2 |
beq Ldivsf$a$0 | branch if a is zero
bclr IMM (31),d1 | get rid of b's sign bit '
movel d1,d3 |
beq Ldivsf$b$0 | branch if b is zero
cmpl d6,d0 | is a big?
bhi Ldivsf$inop | if a is NaN return NaN
beq Ldivsf$inf | if a is INFINITY we have to check b
cmpl d6,d1 | now compare b with INFINITY
bhi Ldivsf$inop | if b is NaN return NaN
beq Ldivsf$underflow
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d2 and d3 and normalize the numbers to
| ensure that the ratio of the fractions is close to 1. We do this by
| making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
andl d6,d2 | and isolate exponent in d2
beq Ldivsf$a$den | if exponent is zero we have a denormalized
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
#endif
Ldivsf$1: |
andl d6,d3 |
beq Ldivsf$b$den |
andl d5,d1 |
orl d4,d1 |
swap d3 |
#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Ldivsf$2: |
#ifndef __mcoldfire__
subw d3,d2 | subtract exponents
addw IMM (F_BIAS),d2 | and add bias
#else
subl d3,d2 | subtract exponents
addl IMM (F_BIAS),d2 | and add bias
#endif
| We are now ready to do the division. We have prepared things in such a way
| that the ratio of the fractions will be less than 2 but greater than 1/2.
| At this point the registers in use are:
| d0 holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
| d1 holds b (second operand, bit FLT_MANT_DIG=1)
| d2 holds the difference of the exponents, corrected by the bias
| d7 holds the sign of the ratio
| d4, d5, d6 hold some constants
movel d7,a0 | d6-d7 will hold the ratio of the fractions
movel IMM (0),d6 |
movel d6,d7
 
moveq IMM (FLT_MANT_DIG+1),d3
1: cmpl d0,d1 | is a < b?
bhi 2f |
bset d3,d6 | set a bit in d6
subl d1,d0 | if a >= b a <-- a-b
beq 3f | if a is zero, exit
2: addl d0,d0 | multiply a by 2
#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM (1),d3
bpl 1b
#endif
 
| Now we keep going to set the sticky bit ...
moveq IMM (FLT_MANT_DIG),d3
1: cmpl d0,d1
ble 2f
addl d0,d0
#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM(1),d3
bpl 1b
#endif
movel IMM (0),d1
bra 3f
2: movel IMM (0),d1
#ifndef __mcoldfire__
subw IMM (FLT_MANT_DIG),d3
addw IMM (31),d3
#else
subl IMM (FLT_MANT_DIG),d3
addl IMM (31),d3
#endif
bset d3,d1
3:
movel d6,d0 | put the ratio in d0-d1
movel a0,d7 | get sign back
 
| Because of the normalization we did before we are guaranteed that
| d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
| bit 25 could be set, and if it is not set then bit 24 is necessarily set.
btst IMM (FLT_MANT_DIG+1),d0
beq 1f | if it is not set, then bit 24 is set
lsrl IMM (1),d0 |
#ifndef __mcoldfire__
addw IMM (1),d2 |
#else
addl IMM (1),d2 |
#endif
1:
| Now round, check for over- and underflow, and exit.
moveq IMM (DIVIDE),d5
bra Lround$exit
 
Ldivsf$inop:
moveq IMM (DIVIDE),d5
bra Lf$inop
 
Ldivsf$overflow:
moveq IMM (DIVIDE),d5
bra Lf$overflow
 
Ldivsf$underflow:
moveq IMM (DIVIDE),d5
bra Lf$underflow
 
Ldivsf$a$0:
moveq IMM (DIVIDE),d5
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
| return a properly signed zero.
andl IMM (0x7fffffff),d1 | clear sign bit and test b
beq Lf$inop | if b is also zero return NaN
cmpl IMM (INFINITY),d1 | check for NaN
bhi Lf$inop |
movel d7,d0 | else return signed zero
PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
Ldivsf$b$0:
moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
cmpl IMM (INFINITY),d0 | compare d0 with INFINITY
bhi Lf$inop | if larger it is NaN
bra Lf$div$0 | else signal DIVIDE_BY_ZERO
 
Ldivsf$inf:
moveq IMM (DIVIDE),d5
| If a is INFINITY we have to check b
cmpl IMM (INFINITY),d1 | compare b with INFINITY
bge Lf$inop | if b is NaN or INFINITY return NaN
bra Lf$overflow | else return overflow
 
| If a number is denormalized we put an exponent of 1 but do not put the
| bit back into the fraction.
Ldivsf$a$den:
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subl IMM (1),d2 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d0
bne Ldivsf$1
bra 1b
 
Ldivsf$b$den:
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subl IMM (1),d3 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d1
bne Ldivsf$2
bra 1b
 
Lround$exit:
| This is a common exit point for __mulsf3 and __divsf3.
 
| First check for underlow in the exponent:
#ifndef __mcoldfire__
cmpw IMM (-FLT_MANT_DIG-1),d2
#else
cmpl IMM (-FLT_MANT_DIG-1),d2
#endif
blt Lf$underflow
| It could happen that the exponent is less than 1, in which case the
| number is denormalized. In this case we shift right and adjust the
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel IMM (0),d6 | d6 is used temporarily
#ifndef __mcoldfire__
cmpw IMM (1),d2 | if the exponent is less than 1 we
#else
cmpl IMM (1),d2 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
#ifndef __mcoldfire__
addw IMM (1),d2 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
roxrl IMM (1),d6 | d6 collect bits we would lose otherwise
cmpw IMM (1),d2 | is the exponent 1 already?
#else
addql IMM (1),d2 | adjust the exponent
lsrl IMM (1),d6
btst IMM (0),d1
beq 11f
bset IMM (31),d6
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
cmpl IMM (1),d2 | is the exponent 1 already?
#endif
beq 2f | if not loop back
bra 1b |
bra Lf$underflow | safety check, shouldn't execute '
2: orl d6,d1 | this is a trick so we don't lose '
| the extra bits which were flushed right
| Now call the rounding routine (which takes care of denormalized numbers):
lea pc@(Lround$0),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lround$0:
| Here we have a correctly rounded result (either normalized or denormalized).
 
| Here we should have either a normalized number or a denormalized one, and
| the exponent is necessarily larger or equal to 1 (so we don't have to '
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 255).
#ifndef __mcoldfire__
cmpw IMM (0x00ff),d2
#else
cmpl IMM (0x00ff),d2
#endif
bge Lf$overflow
| Now check for a denormalized number (exponent==0).
movew d2,d2
beq Lf$den
1:
| Put back the exponents and sign and return.
#ifndef __mcoldfire__
lslw IMM (7),d2 | exponent back to fourth byte
#else
lsll IMM (7),d2 | exponent back to fourth byte
#endif
bclr IMM (FLT_MANT_DIG-1),d0
swap d0 | and put back exponent
#ifndef __mcoldfire__
orw d2,d0 |
#else
orl d2,d0
#endif
swap d0 |
orl d7,d0 | and sign also
 
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
 
|=============================================================================
| __negsf2
|=============================================================================
 
| This is trivial and could be shorter if we didn't bother checking for NaN '
| and +/-INFINITY.
 
| float __negsf2(float);
SYM (__negsf2):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0
bchg IMM (31),d0 | negate
movel d0,d1 | make a positive copy
bclr IMM (31),d1 |
tstl d1 | check for zero
beq 2f | if zero (either sign) return +zero
cmpl IMM (INFINITY),d1 | compare to +INFINITY
blt 1f |
bhi Lf$inop | if larger (fraction not zero) is NaN
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Lf$infty
1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
2: bclr IMM (31),d0
bra 1b
 
|=============================================================================
| __cmpsf2
|=============================================================================
 
GREATER = 1
LESS = -1
EQUAL = 0
 
| int __cmpsf2_internal(float, float, int);
SYM (__cmpsf2_internal):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
| Check if either is NaN, and in that case return garbage and signal
| INVALID_OPERATION. Check also if either is zero, and clear the signs
| if necessary.
movel d0,d6
andl IMM (0x7fffffff),d0
beq Lcmpsf$a$0
cmpl IMM (0x7f800000),d0
bhi Lcmpf$inop
Lcmpsf$1:
movel d1,d7
andl IMM (0x7fffffff),d1
beq Lcmpsf$b$0
cmpl IMM (0x7f800000),d1
bhi Lcmpf$inop
Lcmpsf$2:
| Check the signs
eorl d6,d7
bpl 1f
| If the signs are not equal check if a >= 0
tstl d6
bpl Lcmpsf$a$gt$b | if (a >= 0 && b < 0) => a > b
bmi Lcmpsf$b$gt$a | if (a < 0 && b >= 0) => a < b
1:
| If the signs are equal check for < 0
tstl d6
bpl 1f
| If both are negative exchange them
#ifndef __mcoldfire__
exg d0,d1
#else
movel d0,d7
movel d1,d0
movel d7,d1
#endif
1:
| Now that they are positive we just compare them as longs (does this also
| work for denormalized numbers?).
cmpl d0,d1
bhi Lcmpsf$b$gt$a | |b| > |a|
bne Lcmpsf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
#endif
unlk a6
rts
Lcmpsf$a$gt$b:
movel IMM (GREATER),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpsf$b$gt$a:
movel IMM (LESS),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
 
Lcmpsf$a$0:
bclr IMM (31),d6
bra Lcmpsf$1
Lcmpsf$b$0:
bclr IMM (31),d7
bra Lcmpsf$2
 
Lcmpf$inop:
movl a6@(16),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
 
| int __cmpsf2(float, float);
SYM (__cmpsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
bsr (__cmpsf2_internal)
unlk a6
rts
 
|=============================================================================
| rounding routines
|=============================================================================
 
| The rounding routines expect the number to be normalized in registers
| d0-d1, with the exponent in register d2. They assume that the
| exponent is larger or equal to 1. They return a properly normalized number
| if possible, and a denormalized number otherwise. The exponent is returned
| in d2.
 
Lround$to$nearest:
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
| Here we assume that the exponent is not too small (this should be checked
| before entering the rounding routine), but the number could be denormalized.
 
| Check for denormalized numbers:
1: btst IMM (FLT_MANT_DIG),d0
bne 2f | if set the number is normalized
| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -F_BIAS+1).
#ifndef __mcoldfire__
cmpw IMM (1),d2 | remember that the exponent is at least one
#else
cmpl IMM (1),d2 | remember that the exponent is at least one
#endif
beq 2f | an exponent of one means denormalized
addl d1,d1 | else shift and adjust the exponent
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d2,1b |
#else
subql IMM (1),d2
bpl 1b
#endif
2:
| Now round: we do it as follows: after the shifting we can write the
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
| If delta < 1, do nothing. If delta > 1, add 1 to f.
| If delta == 1, we make sure the rounded number will be even (odd?)
| (after shifting).
btst IMM (0),d0 | is delta < 1?
beq 2f | if so, do not do anything
tstl d1 | is delta == 1?
bne 1f | if so round to even
movel d0,d1 |
andl IMM (2),d1 | bit 1 is the last significant bit
addl d1,d0 |
bra 2f |
1: movel IMM (1),d1 | else add 1
addl d1,d0 |
| Shift right once (because we used bit #FLT_MANT_DIG!).
2: lsrl IMM (1),d0
| Now check again bit #FLT_MANT_DIG (rounding could have produced a
| 'fraction overflow' ...).
btst IMM (FLT_MANT_DIG),d0
beq 1f
lsrl IMM (1),d0
#ifndef __mcoldfire__
addw IMM (1),d2
#else
addql IMM (1),d2
#endif
1:
| If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
| have to put the exponent to zero and return a denormalized number.
btst IMM (FLT_MANT_DIG-1),d0
beq 1f
jmp a0@
1: movel IMM (0),d2
jmp a0@
 
Lround$to$zero:
Lround$to$plus:
Lround$to$minus:
jmp a0@
#endif /* L_float */
 
| gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
| __ledf2, __ltdf2 to all return the same value as a direct call to
| __cmpdf2 would. In this implementation, each of these routines
| simply calls __cmpdf2. It would be more efficient to give the
| __cmpdf2 routine several names, but separating them out will make it
| easier to write efficient versions of these routines someday.
| If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
| The other routines return 1.
 
#ifdef L_eqdf2
.text
.proc
.globl SYM (__eqdf2)
SYM (__eqdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_eqdf2 */
 
#ifdef L_nedf2
.text
.proc
.globl SYM (__nedf2)
SYM (__nedf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_nedf2 */
 
#ifdef L_gtdf2
.text
.proc
.globl SYM (__gtdf2)
SYM (__gtdf2):
link a6,IMM (0)
pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gtdf2 */
 
#ifdef L_gedf2
.text
.proc
.globl SYM (__gedf2)
SYM (__gedf2):
link a6,IMM (0)
pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gedf2 */
 
#ifdef L_ltdf2
.text
.proc
.globl SYM (__ltdf2)
SYM (__ltdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ltdf2 */
 
#ifdef L_ledf2
.text
.proc
.globl SYM (__ledf2)
SYM (__ledf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ledf2 */
 
| The comments above about __eqdf2, et. al., also apply to __eqsf2,
| et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
 
#ifdef L_eqsf2
.text
.proc
.globl SYM (__eqsf2)
SYM (__eqsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_eqsf2 */
 
#ifdef L_nesf2
.text
.proc
.globl SYM (__nesf2)
SYM (__nesf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_nesf2 */
 
#ifdef L_gtsf2
.text
.proc
.globl SYM (__gtsf2)
SYM (__gtsf2):
link a6,IMM (0)
pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gtsf2 */
 
#ifdef L_gesf2
.text
.proc
.globl SYM (__gesf2)
SYM (__gesf2):
link a6,IMM (0)
pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gesf2 */
 
#ifdef L_ltsf2
.text
.proc
.globl SYM (__ltsf2)
SYM (__ltsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_ltsf2 */
 
#ifdef L_lesf2
.text
.proc
.globl SYM (__lesf2)
SYM (__lesf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_lesf2 */
/t-slibgcc-elf-ver
0,0 → 1,3
# Bump the version number of the shared libgcc library
 
SHLIB_SOVERSION = 2
/m68k.c
0,0 → 1,3690
/* Subroutines for insn-output.c for Motorola 68000 family.
Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
2001, 2003, 2004, 2005, 2006, 2007
Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "function.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
#include "insn-config.h"
#include "conditions.h"
#include "output.h"
#include "insn-attr.h"
#include "recog.h"
#include "toplev.h"
#include "expr.h"
#include "reload.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
#include "debug.h"
#include "flags.h"
 
enum reg_class regno_reg_class[] =
{
DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
ADDR_REGS
};
 
 
/* The ASM_DOT macro allows easy string pasting to handle the differences
between MOTOROLA and MIT syntaxes in asm_fprintf(), which doesn't
support the %. option. */
#if MOTOROLA
# define ASM_DOT "."
# define ASM_DOTW ".w"
# define ASM_DOTL ".l"
#else
# define ASM_DOT ""
# define ASM_DOTW ""
# define ASM_DOTL ""
#endif
 
 
/* Structure describing stack frame layout. */
struct m68k_frame
{
/* Stack pointer to frame pointer offset. */
HOST_WIDE_INT offset;
 
/* Offset of FPU registers. */
HOST_WIDE_INT foffset;
 
/* Frame size in bytes (rounded up). */
HOST_WIDE_INT size;
 
/* Data and address register. */
int reg_no;
unsigned int reg_mask;
unsigned int reg_rev_mask;
 
/* FPU registers. */
int fpu_no;
unsigned int fpu_mask;
unsigned int fpu_rev_mask;
 
/* Offsets relative to ARG_POINTER. */
HOST_WIDE_INT frame_pointer_offset;
HOST_WIDE_INT stack_pointer_offset;
 
/* Function which the above information refers to. */
int funcdef_no;
};
 
/* Current frame information calculated by m68k_compute_frame_layout(). */
static struct m68k_frame current_frame;
 
static bool m68k_handle_option (size_t, const char *, int);
static rtx find_addr_reg (rtx);
static const char *singlemove_string (rtx *);
static void m68k_output_function_prologue (FILE *, HOST_WIDE_INT);
static void m68k_output_function_epilogue (FILE *, HOST_WIDE_INT);
#ifdef M68K_TARGET_COFF
static void m68k_coff_asm_named_section (const char *, unsigned int, tree);
#endif /* M68K_TARGET_COFF */
static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
static rtx m68k_struct_value_rtx (tree, int);
static bool m68k_interrupt_function_p (tree func);
static tree m68k_handle_fndecl_attribute (tree *node, tree name,
tree args, int flags,
bool *no_add_attrs);
static void m68k_compute_frame_layout (void);
static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
static int const_int_cost (rtx);
static bool m68k_rtx_costs (rtx, int, int, int *);
 
/* Specify the identification number of the library being built */
const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
 
/* Nonzero if the last compare/test insn had FP operands. The
sCC expanders peek at this to determine what to do for the
68060, which has no fsCC instructions. */
int m68k_last_compare_had_fp_operands;
/* Initialize the GCC target structure. */
 
#if INT_OP_GROUP == INT_OP_DOT_WORD
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
#endif
 
#if INT_OP_GROUP == INT_OP_NO_DOT
#undef TARGET_ASM_BYTE_OP
#define TARGET_ASM_BYTE_OP "\tbyte\t"
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
#undef TARGET_ASM_ALIGNED_SI_OP
#define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
#endif
 
#if INT_OP_GROUP == INT_OP_DC
#undef TARGET_ASM_BYTE_OP
#define TARGET_ASM_BYTE_OP "\tdc.b\t"
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
#undef TARGET_ASM_ALIGNED_SI_OP
#define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
#endif
 
#undef TARGET_ASM_UNALIGNED_HI_OP
#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
#undef TARGET_ASM_UNALIGNED_SI_OP
#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
 
#undef TARGET_ASM_FUNCTION_PROLOGUE
#define TARGET_ASM_FUNCTION_PROLOGUE m68k_output_function_prologue
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE m68k_output_function_epilogue
 
#undef TARGET_ASM_OUTPUT_MI_THUNK
#define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
 
#undef TARGET_ASM_FILE_START_APP_OFF
#define TARGET_ASM_FILE_START_APP_OFF true
 
#undef TARGET_DEFAULT_TARGET_FLAGS
#define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_STRICT_ALIGNMENT)
#undef TARGET_HANDLE_OPTION
#define TARGET_HANDLE_OPTION m68k_handle_option
 
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS m68k_rtx_costs
 
#undef TARGET_ATTRIBUTE_TABLE
#define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
 
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
 
#undef TARGET_STRUCT_VALUE_RTX
#define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
 
static const struct attribute_spec m68k_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
{ "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
 
struct gcc_target targetm = TARGET_INITIALIZER;
/* These bits are controlled by all CPU selection options. Many options
also control MASK_68881, but some (notably -m68020) leave it alone. */
 
#define MASK_ALL_CPU_BITS \
(MASK_COLDFIRE | MASK_CF_HWDIV | MASK_68060 | MASK_68040 \
| MASK_68040_ONLY | MASK_68030 | MASK_68020 | MASK_BITFIELD)
 
/* Implement TARGET_HANDLE_OPTION. */
 
static bool
m68k_handle_option (size_t code, const char *arg, int value)
{
switch (code)
{
case OPT_m5200:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
target_flags |= MASK_5200;
return true;
 
case OPT_m5206e:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
target_flags |= MASK_5200 | MASK_CF_HWDIV;
return true;
 
case OPT_m528x:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
target_flags |= MASK_528x | MASK_CF_HWDIV;
return true;
 
case OPT_m5307:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
target_flags |= MASK_CFV3 | MASK_CF_HWDIV;
return true;
 
case OPT_m5407:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
target_flags |= MASK_CFV4 | MASK_CF_HWDIV;
return true;
 
case OPT_mcfv4e:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
target_flags |= MASK_CFV4 | MASK_CF_HWDIV | MASK_CFV4E;
return true;
 
case OPT_m68000:
case OPT_mc68000:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
return true;
 
case OPT_m68020:
case OPT_mc68020:
target_flags &= ~MASK_ALL_CPU_BITS;
target_flags |= MASK_68020 | MASK_BITFIELD;
return true;
 
case OPT_m68020_40:
target_flags &= ~MASK_ALL_CPU_BITS;
target_flags |= MASK_BITFIELD | MASK_68881 | MASK_68020 | MASK_68040;
return true;
 
case OPT_m68020_60:
target_flags &= ~MASK_ALL_CPU_BITS;
target_flags |= (MASK_BITFIELD | MASK_68881 | MASK_68020
| MASK_68040 | MASK_68060);
return true;
 
case OPT_m68030:
target_flags &= ~MASK_ALL_CPU_BITS;
target_flags |= MASK_68020 | MASK_68030 | MASK_BITFIELD;
return true;
 
case OPT_m68040:
target_flags &= ~MASK_ALL_CPU_BITS;
target_flags |= (MASK_68020 | MASK_68881 | MASK_BITFIELD
| MASK_68040_ONLY | MASK_68040);
return true;
 
case OPT_m68060:
target_flags &= ~MASK_ALL_CPU_BITS;
target_flags |= (MASK_68020 | MASK_68881 | MASK_BITFIELD
| MASK_68040_ONLY | MASK_68060);
return true;
 
case OPT_m68302:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
return true;
 
case OPT_m68332:
case OPT_mcpu32:
target_flags &= ~(MASK_ALL_CPU_BITS | MASK_68881);
target_flags |= MASK_68020;
return true;
 
case OPT_mshared_library_id_:
if (value > MAX_LIBRARY_ID)
error ("-mshared-library-id=%s is not between 0 and %d",
arg, MAX_LIBRARY_ID);
else
asprintf ((char **) &m68k_library_id_string, "%d", (value * -4) - 4);
return true;
 
default:
return true;
}
}
 
/* Sometimes certain combinations of command options do not make
sense on a particular target machine. You can define a macro
`OVERRIDE_OPTIONS' to take account of this. This macro, if
defined, is executed once just after all the command options have
been parsed.
 
Don't use this macro to turn on various extra optimizations for
`-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
 
void
override_options (void)
{
/* Sanity check to ensure that msep-data and mid-sahred-library are not
* both specified together. Doing so simply doesn't make sense.
*/
if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
error ("cannot specify both -msep-data and -mid-shared-library");
 
/* If we're generating code for a separate A5 relative data segment,
* we've got to enable -fPIC as well. This might be relaxable to
* -fpic but it hasn't been tested properly.
*/
if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
flag_pic = 2;
 
/* -fPIC uses 32-bit pc-relative displacements, which don't exist
until the 68020. */
if (!TARGET_68020 && !TARGET_COLDFIRE && (flag_pic == 2))
error ("-fPIC is not currently supported on the 68000 or 68010");
 
/* ??? A historic way of turning on pic, or is this intended to
be an embedded thing that doesn't have the same name binding
significance that it does on hosted ELF systems? */
if (TARGET_PCREL && flag_pic == 0)
flag_pic = 1;
 
/* Turn off function cse if we are doing PIC. We always want function call
to be done as `bsr foo@PLTPC', so it will force the assembler to create
the PLT entry for `foo'. Doing function cse will cause the address of
`foo' to be loaded into a register, which is exactly what we want to
avoid when we are doing PIC on svr4 m68k. */
if (flag_pic)
flag_no_function_cse = 1;
 
SUBTARGET_OVERRIDE_OPTIONS;
}
/* Return nonzero if FUNC is an interrupt function as specified by the
"interrupt_handler" attribute. */
static bool
m68k_interrupt_function_p(tree func)
{
tree a;
 
if (TREE_CODE (func) != FUNCTION_DECL)
return false;
 
a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
return (a != NULL_TREE);
}
 
/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
struct attribute_spec.handler. */
static tree
m68k_handle_fndecl_attribute (tree *node, tree name,
tree args ATTRIBUTE_UNUSED,
int flags ATTRIBUTE_UNUSED,
bool *no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
warning (OPT_Wattributes, "%qs attribute only applies to functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
 
return NULL_TREE;
}
 
static void
m68k_compute_frame_layout (void)
{
int regno, saved;
unsigned int mask, rmask;
bool interrupt_handler = m68k_interrupt_function_p (current_function_decl);
 
/* Only compute the frame once per function.
Don't cache information until reload has been completed. */
if (current_frame.funcdef_no == current_function_funcdef_no
&& reload_completed)
return;
 
current_frame.size = (get_frame_size () + 3) & -4;
 
mask = rmask = saved = 0;
for (regno = 0; regno < 16; regno++)
if (m68k_save_reg (regno, interrupt_handler))
{
mask |= 1 << regno;
rmask |= 1 << (15 - regno);
saved++;
}
current_frame.offset = saved * 4;
current_frame.reg_no = saved;
current_frame.reg_mask = mask;
current_frame.reg_rev_mask = rmask;
 
current_frame.foffset = 0;
mask = rmask = saved = 0;
if (TARGET_HARD_FLOAT)
{
for (regno = 16; regno < 24; regno++)
if (m68k_save_reg (regno, interrupt_handler))
{
mask |= 1 << (regno - 16);
rmask |= 1 << (23 - regno);
saved++;
}
current_frame.foffset = saved * TARGET_FP_REG_SIZE;
current_frame.offset += current_frame.foffset;
}
current_frame.fpu_no = saved;
current_frame.fpu_mask = mask;
current_frame.fpu_rev_mask = rmask;
 
/* Remember what function this frame refers to. */
current_frame.funcdef_no = current_function_funcdef_no;
}
 
HOST_WIDE_INT
m68k_initial_elimination_offset (int from, int to)
{
int argptr_offset;
/* The arg pointer points 8 bytes before the start of the arguments,
as defined by FIRST_PARM_OFFSET. This makes it coincident with the
frame pointer in most frames. */
argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
return argptr_offset;
 
m68k_compute_frame_layout ();
 
gcc_assert (to == STACK_POINTER_REGNUM);
switch (from)
{
case ARG_POINTER_REGNUM:
return current_frame.offset + current_frame.size - argptr_offset;
case FRAME_POINTER_REGNUM:
return current_frame.offset + current_frame.size;
default:
gcc_unreachable ();
}
}
 
/* Refer to the array `regs_ever_live' to determine which registers
to save; `regs_ever_live[I]' is nonzero if register number I
is ever used in the function. This function is responsible for
knowing which registers should not be saved even if used.
Return true if we need to save REGNO. */
 
static bool
m68k_save_reg (unsigned int regno, bool interrupt_handler)
{
if (flag_pic && regno == PIC_OFFSET_TABLE_REGNUM)
{
if (current_function_uses_pic_offset_table)
return true;
if (!current_function_is_leaf && TARGET_ID_SHARED_LIBRARY)
return true;
}
 
if (current_function_calls_eh_return)
{
unsigned int i;
for (i = 0; ; i++)
{
unsigned int test = EH_RETURN_DATA_REGNO (i);
if (test == INVALID_REGNUM)
break;
if (test == regno)
return true;
}
}
 
/* Fixed regs we never touch. */
if (fixed_regs[regno])
return false;
 
/* The frame pointer (if it is such) is handled specially. */
if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
return false;
 
/* Interrupt handlers must also save call_used_regs
if they are live or when calling nested functions. */
if (interrupt_handler)
{
if (regs_ever_live[regno])
return true;
 
if (!current_function_is_leaf && call_used_regs[regno])
return true;
}
 
/* Never need to save registers that aren't touched. */
if (!regs_ever_live[regno])
return false;
 
/* Otherwise save everything that isn't call-clobbered. */
return !call_used_regs[regno];
}
 
/* This function generates the assembly code for function entry.
STREAM is a stdio stream to output the code to.
SIZE is an int: how many units of temporary storage to allocate. */
 
static void
m68k_output_function_prologue (FILE *stream,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
HOST_WIDE_INT fsize_with_regs;
HOST_WIDE_INT cfa_offset = INCOMING_FRAME_SP_OFFSET;
 
m68k_compute_frame_layout();
 
/* If the stack limit is a symbol, we can check it here,
before actually allocating the space. */
if (current_function_limit_stack
&& GET_CODE (stack_limit_rtx) == SYMBOL_REF)
asm_fprintf (stream, "\tcmp" ASM_DOT "l %I%s+%wd,%Rsp\n\ttrapcs\n",
XSTR (stack_limit_rtx, 0), current_frame.size + 4);
 
/* On ColdFire add register save into initial stack frame setup, if possible. */
fsize_with_regs = current_frame.size;
if (TARGET_COLDFIRE)
{
if (current_frame.reg_no > 2)
fsize_with_regs += current_frame.reg_no * 4;
if (current_frame.fpu_no)
fsize_with_regs += current_frame.fpu_no * 8;
}
 
if (frame_pointer_needed)
{
if (current_frame.size == 0 && TARGET_68040)
/* on the 68040, pea + move is faster than link.w 0 */
fprintf (stream, (MOTOROLA
? "\tpea (%s)\n\tmove.l %s,%s\n"
: "\tpea %s@\n\tmovel %s,%s\n"),
M68K_REGNAME (FRAME_POINTER_REGNUM),
M68K_REGNAME (STACK_POINTER_REGNUM),
M68K_REGNAME (FRAME_POINTER_REGNUM));
else if (fsize_with_regs < 0x8000)
asm_fprintf (stream, "\tlink" ASM_DOTW " %s,%I%wd\n",
M68K_REGNAME (FRAME_POINTER_REGNUM), -fsize_with_regs);
else if (TARGET_68020)
asm_fprintf (stream, "\tlink" ASM_DOTL " %s,%I%wd\n",
M68K_REGNAME (FRAME_POINTER_REGNUM), -fsize_with_regs);
else
/* Adding negative number is faster on the 68040. */
asm_fprintf (stream,
"\tlink" ASM_DOTW " %s,%I0\n"
"\tadd" ASM_DOT "l %I%wd,%Rsp\n",
M68K_REGNAME (FRAME_POINTER_REGNUM), -fsize_with_regs);
}
else if (fsize_with_regs) /* !frame_pointer_needed */
{
if (fsize_with_regs < 0x8000)
{
if (fsize_with_regs <= 8)
{
if (!TARGET_COLDFIRE)
asm_fprintf (stream, "\tsubq" ASM_DOT "w %I%wd,%Rsp\n",
fsize_with_regs);
else
asm_fprintf (stream, "\tsubq" ASM_DOT "l %I%wd,%Rsp\n",
fsize_with_regs);
}
else if (fsize_with_regs <= 16 && TARGET_CPU32)
/* On the CPU32 it is faster to use two subqw instructions to
subtract a small integer (8 < N <= 16) to a register. */
asm_fprintf (stream,
"\tsubq" ASM_DOT "w %I8,%Rsp\n"
"\tsubq" ASM_DOT "w %I%wd,%Rsp\n",
fsize_with_regs - 8);
else if (TARGET_68040)
/* Adding negative number is faster on the 68040. */
asm_fprintf (stream, "\tadd" ASM_DOT "w %I%wd,%Rsp\n",
-fsize_with_regs);
else
asm_fprintf (stream, (MOTOROLA
? "\tlea (%wd,%Rsp),%Rsp\n"
: "\tlea %Rsp@(%wd),%Rsp\n"),
-fsize_with_regs);
}
else /* fsize_with_regs >= 0x8000 */
asm_fprintf (stream, "\tadd" ASM_DOT "l %I%wd,%Rsp\n",
-fsize_with_regs);
} /* !frame_pointer_needed */
 
if (dwarf2out_do_frame ())
{
if (frame_pointer_needed)
{
char *l;
l = (char *) dwarf2out_cfi_label ();
cfa_offset += 4;
dwarf2out_reg_save (l, FRAME_POINTER_REGNUM, -cfa_offset);
dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, cfa_offset);
cfa_offset += current_frame.size;
}
else
{
cfa_offset += current_frame.size;
dwarf2out_def_cfa ("", STACK_POINTER_REGNUM, cfa_offset);
}
}
 
if (current_frame.fpu_mask)
{
if (TARGET_68881)
{
asm_fprintf (stream, (MOTOROLA
? "\tfmovm %I0x%x,-(%Rsp)\n"
: "\tfmovem %I0x%x,%Rsp@-\n"),
current_frame.fpu_mask);
}
else
{
int offset;
 
/* stack already has registers in it. Find the offset from
the bottom of stack to where the FP registers go */
if (current_frame.reg_no <= 2)
offset = 0;
else
offset = current_frame.reg_no * 4;
if (offset)
asm_fprintf (stream,
"\tfmovem %I0x%x,%d(%Rsp)\n",
current_frame.fpu_rev_mask,
offset);
else
asm_fprintf (stream,
"\tfmovem %I0x%x,(%Rsp)\n",
current_frame.fpu_rev_mask);
}
 
if (dwarf2out_do_frame ())
{
char *l = (char *) dwarf2out_cfi_label ();
int n_regs, regno;
 
cfa_offset += current_frame.fpu_no * TARGET_FP_REG_SIZE;
if (! frame_pointer_needed)
dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, cfa_offset);
for (regno = 16, n_regs = 0; regno < 24; regno++)
if (current_frame.fpu_mask & (1 << (regno - 16)))
dwarf2out_reg_save (l, regno, -cfa_offset
+ n_regs++ * TARGET_FP_REG_SIZE);
}
}
 
/* If the stack limit is not a symbol, check it here.
This has the disadvantage that it may be too late... */
if (current_function_limit_stack)
{
if (REG_P (stack_limit_rtx))
asm_fprintf (stream, "\tcmp" ASM_DOT "l %s,%Rsp\n\ttrapcs\n",
M68K_REGNAME (REGNO (stack_limit_rtx)));
else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
warning (0, "stack limit expression is not supported");
}
 
if (current_frame.reg_no <= 2)
{
/* Store each separately in the same order moveml uses.
Using two movel instructions instead of a single moveml
is about 15% faster for the 68020 and 68030 at no expense
in code size. */
 
int i;
 
for (i = 0; i < 16; i++)
if (current_frame.reg_rev_mask & (1 << i))
{
asm_fprintf (stream, (MOTOROLA
? "\t%Omove.l %s,-(%Rsp)\n"
: "\tmovel %s,%Rsp@-\n"),
M68K_REGNAME (15 - i));
if (dwarf2out_do_frame ())
{
char *l = (char *) dwarf2out_cfi_label ();
 
cfa_offset += 4;
if (! frame_pointer_needed)
dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, cfa_offset);
dwarf2out_reg_save (l, 15 - i, -cfa_offset);
}
}
}
else if (current_frame.reg_rev_mask)
{
if (TARGET_COLDFIRE)
/* The ColdFire does not support the predecrement form of the
MOVEM instruction, so we must adjust the stack pointer and
then use the plain address register indirect mode.
The required register save space was combined earlier with
the fsize_with_regs amount. */
 
asm_fprintf (stream, (MOTOROLA
? "\tmovm.l %I0x%x,(%Rsp)\n"
: "\tmoveml %I0x%x,%Rsp@\n"),
current_frame.reg_mask);
else
asm_fprintf (stream, (MOTOROLA
? "\tmovm.l %I0x%x,-(%Rsp)\n"
: "\tmoveml %I0x%x,%Rsp@-\n"),
current_frame.reg_rev_mask);
if (dwarf2out_do_frame ())
{
char *l = (char *) dwarf2out_cfi_label ();
int n_regs, regno;
 
cfa_offset += current_frame.reg_no * 4;
if (! frame_pointer_needed)
dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, cfa_offset);
for (regno = 0, n_regs = 0; regno < 16; regno++)
if (current_frame.reg_mask & (1 << regno))
dwarf2out_reg_save (l, regno, -cfa_offset + n_regs++ * 4);
}
}
if (!TARGET_SEP_DATA && flag_pic
&& (current_function_uses_pic_offset_table
|| (!current_function_is_leaf && TARGET_ID_SHARED_LIBRARY)))
{
if (TARGET_ID_SHARED_LIBRARY)
{
asm_fprintf (stream, "\tmovel %s@(%s), %s\n",
M68K_REGNAME (PIC_OFFSET_TABLE_REGNUM),
m68k_library_id_string,
M68K_REGNAME (PIC_OFFSET_TABLE_REGNUM));
}
else
{
if (MOTOROLA)
asm_fprintf (stream,
"\t%Olea (%Rpc, %U_GLOBAL_OFFSET_TABLE_@GOTPC), %s\n",
M68K_REGNAME (PIC_OFFSET_TABLE_REGNUM));
else
{
asm_fprintf (stream, "\tmovel %I%U_GLOBAL_OFFSET_TABLE_, %s\n",
M68K_REGNAME (PIC_OFFSET_TABLE_REGNUM));
asm_fprintf (stream, "\tlea %Rpc@(0,%s:l),%s\n",
M68K_REGNAME (PIC_OFFSET_TABLE_REGNUM),
M68K_REGNAME (PIC_OFFSET_TABLE_REGNUM));
}
}
}
}
/* Return true if this function's epilogue can be output as RTL. */
 
bool
use_return_insn (void)
{
if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
return false;
 
/* We can output the epilogue as RTL only if no registers need to be
restored. */
m68k_compute_frame_layout ();
return current_frame.reg_no ? false : true;
}
 
/* This function generates the assembly code for function exit,
on machines that need it.
 
The function epilogue should not depend on the current stack pointer!
It should use the frame pointer only, if there is a frame pointer.
This is mandatory because of alloca; we also take advantage of it to
omit stack adjustments before returning. */
 
static void
m68k_output_function_epilogue (FILE *stream,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
HOST_WIDE_INT fsize, fsize_with_regs;
bool big = false;
bool restore_from_sp = false;
rtx insn = get_last_insn ();
 
m68k_compute_frame_layout ();
 
/* If the last insn was a BARRIER, we don't have to write any code. */
if (GET_CODE (insn) == NOTE)
insn = prev_nonnote_insn (insn);
if (insn && GET_CODE (insn) == BARRIER)
{
/* Output just a no-op so that debuggers don't get confused
about which function the pc is in at this address. */
fprintf (stream, "\tnop\n");
return;
}
 
#ifdef FUNCTION_EXTRA_EPILOGUE
FUNCTION_EXTRA_EPILOGUE (stream, size);
#endif
 
fsize = current_frame.size;
 
/* FIXME: leaf_function_p below is too strong.
What we really need to know there is if there could be pending
stack adjustment needed at that point. */
restore_from_sp
= (! frame_pointer_needed
|| (! current_function_calls_alloca && leaf_function_p ()));
 
/* fsize_with_regs is the size we need to adjust the sp when
popping the frame. */
fsize_with_regs = fsize;
 
/* Because the ColdFire doesn't support moveml with
complex address modes, we must adjust the stack manually
after restoring registers. When the frame pointer isn't used,
we can merge movem adjustment into frame unlinking
made immediately after it. */
if (TARGET_COLDFIRE && restore_from_sp)
{
if (current_frame.reg_no > 2)
fsize_with_regs += current_frame.reg_no * 4;
if (current_frame.fpu_no)
fsize_with_regs += current_frame.fpu_no * 8;
}
 
if (current_frame.offset + fsize >= 0x8000
&& ! restore_from_sp
&& (current_frame.reg_mask || current_frame.fpu_mask))
{
/* Because the ColdFire doesn't support moveml with
complex address modes we make an extra correction here. */
if (TARGET_COLDFIRE)
fsize += current_frame.offset;
 
asm_fprintf (stream, "\t%Omove" ASM_DOT "l %I%wd,%Ra1\n", -fsize);
fsize = 0, big = true;
}
if (current_frame.reg_no <= 2)
{
/* Restore each separately in the same order moveml does.
Using two movel instructions instead of a single moveml
is about 15% faster for the 68020 and 68030 at no expense
in code size. */
 
int i;
HOST_WIDE_INT offset = current_frame.offset + fsize;
 
for (i = 0; i < 16; i++)
if (current_frame.reg_mask & (1 << i))
{
if (big)
{
if (MOTOROLA)
asm_fprintf (stream, "\t%Omove.l -%wd(%s,%Ra1.l),%s\n",
offset,
M68K_REGNAME (FRAME_POINTER_REGNUM),
M68K_REGNAME (i));
else
asm_fprintf (stream, "\tmovel %s@(-%wd,%Ra1:l),%s\n",
M68K_REGNAME (FRAME_POINTER_REGNUM),
offset,
M68K_REGNAME (i));
}
else if (restore_from_sp)
asm_fprintf (stream, (MOTOROLA
? "\t%Omove.l (%Rsp)+,%s\n"
: "\tmovel %Rsp@+,%s\n"),
M68K_REGNAME (i));
else
{
if (MOTOROLA)
asm_fprintf (stream, "\t%Omove.l -%wd(%s),%s\n",
offset,
M68K_REGNAME (FRAME_POINTER_REGNUM),
M68K_REGNAME (i));
else
asm_fprintf (stream, "\tmovel %s@(-%wd),%s\n",
M68K_REGNAME (FRAME_POINTER_REGNUM),
offset,
M68K_REGNAME (i));
}
offset -= 4;
}
}
else if (current_frame.reg_mask)
{
/* The ColdFire requires special handling due to its limited moveml
insn. */
if (TARGET_COLDFIRE)
{
if (big)
{
asm_fprintf (stream, "\tadd" ASM_DOT "l %s,%Ra1\n",
M68K_REGNAME (FRAME_POINTER_REGNUM));
asm_fprintf (stream, (MOTOROLA
? "\tmovm.l (%Ra1),%I0x%x\n"
: "\tmoveml %Ra1@,%I0x%x\n"),
current_frame.reg_mask);
}
else if (restore_from_sp)
asm_fprintf (stream, (MOTOROLA
? "\tmovm.l (%Rsp),%I0x%x\n"
: "\tmoveml %Rsp@,%I0x%x\n"),
current_frame.reg_mask);
else
{
if (MOTOROLA)
asm_fprintf (stream, "\tmovm.l -%wd(%s),%I0x%x\n",
current_frame.offset + fsize,
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.reg_mask);
else
asm_fprintf (stream, "\tmoveml %s@(-%wd),%I0x%x\n",
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.offset + fsize,
current_frame.reg_mask);
}
}
else /* !TARGET_COLDFIRE */
{
if (big)
{
if (MOTOROLA)
asm_fprintf (stream, "\tmovm.l -%wd(%s,%Ra1.l),%I0x%x\n",
current_frame.offset + fsize,
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.reg_mask);
else
asm_fprintf (stream, "\tmoveml %s@(-%wd,%Ra1:l),%I0x%x\n",
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.offset + fsize,
current_frame.reg_mask);
}
else if (restore_from_sp)
{
asm_fprintf (stream, (MOTOROLA
? "\tmovm.l (%Rsp)+,%I0x%x\n"
: "\tmoveml %Rsp@+,%I0x%x\n"),
current_frame.reg_mask);
}
else
{
if (MOTOROLA)
asm_fprintf (stream, "\tmovm.l -%wd(%s),%I0x%x\n",
current_frame.offset + fsize,
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.reg_mask);
else
asm_fprintf (stream, "\tmoveml %s@(-%wd),%I0x%x\n",
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.offset + fsize,
current_frame.reg_mask);
}
}
}
if (current_frame.fpu_rev_mask)
{
if (big)
{
if (TARGET_COLDFIRE)
{
if (current_frame.reg_no)
asm_fprintf (stream, MOTOROLA ?
"\tfmovem.d %d(%Ra1),%I0x%x\n" :
"\tfmovmd (%d,%Ra1),%I0x%x\n",
current_frame.reg_no * 4,
current_frame.fpu_rev_mask);
else
asm_fprintf (stream, MOTOROLA ?
"\tfmovem.d (%Ra1),%I0x%x\n" :
"\tfmovmd (%Ra1),%I0x%x\n",
current_frame.fpu_rev_mask);
}
else if (MOTOROLA)
asm_fprintf (stream, "\tfmovm -%wd(%s,%Ra1.l),%I0x%x\n",
current_frame.foffset + fsize,
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.fpu_rev_mask);
else
asm_fprintf (stream, "\tfmovem %s@(-%wd,%Ra1:l),%I0x%x\n",
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.foffset + fsize,
current_frame.fpu_rev_mask);
}
else if (restore_from_sp)
{
if (TARGET_COLDFIRE)
{
int offset;
 
/* Stack already has registers in it. Find the offset from
the bottom of stack to where the FP registers go. */
if (current_frame.reg_no <= 2)
offset = 0;
else
offset = current_frame.reg_no * 4;
if (offset)
asm_fprintf (stream,
"\tfmovem %Rsp@(%d), %I0x%x\n",
offset, current_frame.fpu_rev_mask);
else
asm_fprintf (stream,
"\tfmovem %Rsp@, %I0x%x\n",
current_frame.fpu_rev_mask);
}
else
asm_fprintf (stream, MOTOROLA ?
"\tfmovm (%Rsp)+,%I0x%x\n" :
"\tfmovem %Rsp@+,%I0x%x\n",
current_frame.fpu_rev_mask);
}
else
{
if (MOTOROLA && !TARGET_COLDFIRE)
asm_fprintf (stream, "\tfmovm -%wd(%s),%I0x%x\n",
current_frame.foffset + fsize,
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.fpu_rev_mask);
else
asm_fprintf (stream, "\tfmovem %s@(-%wd),%I0x%x\n",
M68K_REGNAME (FRAME_POINTER_REGNUM),
current_frame.foffset + fsize,
current_frame.fpu_rev_mask);
}
}
if (frame_pointer_needed)
fprintf (stream, "\tunlk %s\n", M68K_REGNAME (FRAME_POINTER_REGNUM));
else if (fsize_with_regs)
{
if (fsize_with_regs <= 8)
{
if (!TARGET_COLDFIRE)
asm_fprintf (stream, "\taddq" ASM_DOT "w %I%wd,%Rsp\n",
fsize_with_regs);
else
asm_fprintf (stream, "\taddq" ASM_DOT "l %I%wd,%Rsp\n",
fsize_with_regs);
}
else if (fsize_with_regs <= 16 && TARGET_CPU32)
{
/* On the CPU32 it is faster to use two addqw instructions to
add a small integer (8 < N <= 16) to a register. */
asm_fprintf (stream,
"\taddq" ASM_DOT "w %I8,%Rsp\n"
"\taddq" ASM_DOT "w %I%wd,%Rsp\n",
fsize_with_regs - 8);
}
else if (fsize_with_regs < 0x8000)
{
if (TARGET_68040)
asm_fprintf (stream, "\tadd" ASM_DOT "w %I%wd,%Rsp\n",
fsize_with_regs);
else
asm_fprintf (stream, (MOTOROLA
? "\tlea (%wd,%Rsp),%Rsp\n"
: "\tlea %Rsp@(%wd),%Rsp\n"),
fsize_with_regs);
}
else
asm_fprintf (stream, "\tadd" ASM_DOT "l %I%wd,%Rsp\n", fsize_with_regs);
}
if (current_function_calls_eh_return)
asm_fprintf (stream, "\tadd" ASM_DOT "l %Ra0,%Rsp\n");
if (m68k_interrupt_function_p (current_function_decl))
fprintf (stream, "\trte\n");
else if (current_function_pops_args)
asm_fprintf (stream, "\trtd %I%d\n", current_function_pops_args);
else
fprintf (stream, "\trts\n");
}
/* Return true if X is a valid comparison operator for the dbcc
instruction.
 
Note it rejects floating point comparison operators.
(In the future we could use Fdbcc).
 
It also rejects some comparisons when CC_NO_OVERFLOW is set. */
int
valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
{
switch (GET_CODE (x))
{
case EQ: case NE: case GTU: case LTU:
case GEU: case LEU:
return 1;
 
/* Reject some when CC_NO_OVERFLOW is set. This may be over
conservative */
case GT: case LT: case GE: case LE:
return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
default:
return 0;
}
}
 
/* Return nonzero if flags are currently in the 68881 flag register. */
int
flags_in_68881 (void)
{
/* We could add support for these in the future */
return cc_status.flags & CC_IN_68881;
}
 
/* Output a BSR instruction suitable for PIC code. */
void
m68k_output_pic_call (rtx dest)
{
const char *out;
 
if (!(GET_CODE (dest) == MEM && GET_CODE (XEXP (dest, 0)) == SYMBOL_REF))
out = "jsr %0";
/* We output a BSR instruction if we're building for a target that
supports long branches. Otherwise we generate one of two sequences:
a shorter one that uses a GOT entry or a longer one that doesn't.
We'll use the -Os command-line flag to decide which to generate.
Both sequences take the same time to execute on the ColdFire. */
else if (TARGET_PCREL)
out = "bsr.l %o0";
else if (TARGET_68020)
#if defined(USE_GAS)
out = "bsr.l %0@PLTPC";
#else
out = "bsr %0@PLTPC";
#endif
else if (optimize_size || TARGET_ID_SHARED_LIBRARY)
out = "move.l %0@GOT(%%a5), %%a1\n\tjsr (%%a1)";
else
out = "lea %0-.-8,%%a1\n\tjsr 0(%%pc,%%a1)";
 
output_asm_insn (out, &dest);
}
 
/* Output a dbCC; jCC sequence. Note we do not handle the
floating point version of this sequence (Fdbcc). We also
do not handle alternative conditions when CC_NO_OVERFLOW is
set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
kick those out before we get here. */
 
void
output_dbcc_and_branch (rtx *operands)
{
switch (GET_CODE (operands[3]))
{
case EQ:
output_asm_insn (MOTOROLA
? "dbeq %0,%l1\n\tjbeq %l2"
: "dbeq %0,%l1\n\tjeq %l2",
operands);
break;
 
case NE:
output_asm_insn (MOTOROLA
? "dbne %0,%l1\n\tjbne %l2"
: "dbne %0,%l1\n\tjne %l2",
operands);
break;
 
case GT:
output_asm_insn (MOTOROLA
? "dbgt %0,%l1\n\tjbgt %l2"
: "dbgt %0,%l1\n\tjgt %l2",
operands);
break;
 
case GTU:
output_asm_insn (MOTOROLA
? "dbhi %0,%l1\n\tjbhi %l2"
: "dbhi %0,%l1\n\tjhi %l2",
operands);
break;
 
case LT:
output_asm_insn (MOTOROLA
? "dblt %0,%l1\n\tjblt %l2"
: "dblt %0,%l1\n\tjlt %l2",
operands);
break;
 
case LTU:
output_asm_insn (MOTOROLA
? "dbcs %0,%l1\n\tjbcs %l2"
: "dbcs %0,%l1\n\tjcs %l2",
operands);
break;
 
case GE:
output_asm_insn (MOTOROLA
? "dbge %0,%l1\n\tjbge %l2"
: "dbge %0,%l1\n\tjge %l2",
operands);
break;
 
case GEU:
output_asm_insn (MOTOROLA
? "dbcc %0,%l1\n\tjbcc %l2"
: "dbcc %0,%l1\n\tjcc %l2",
operands);
break;
 
case LE:
output_asm_insn (MOTOROLA
? "dble %0,%l1\n\tjble %l2"
: "dble %0,%l1\n\tjle %l2",
operands);
break;
 
case LEU:
output_asm_insn (MOTOROLA
? "dbls %0,%l1\n\tjbls %l2"
: "dbls %0,%l1\n\tjls %l2",
operands);
break;
 
default:
gcc_unreachable ();
}
 
/* If the decrement is to be done in SImode, then we have
to compensate for the fact that dbcc decrements in HImode. */
switch (GET_MODE (operands[0]))
{
case SImode:
output_asm_insn (MOTOROLA
? "clr%.w %0\n\tsubq%.l #1,%0\n\tjbpl %l1"
: "clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1",
operands);
break;
 
case HImode:
break;
 
default:
gcc_unreachable ();
}
}
 
const char *
output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
{
rtx loperands[7];
enum rtx_code op_code = GET_CODE (op);
 
/* This does not produce a useful cc. */
CC_STATUS_INIT;
 
/* The m68k cmp.l instruction requires operand1 to be a reg as used
below. Swap the operands and change the op if these requirements
are not fulfilled. */
if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
{
rtx tmp = operand1;
 
operand1 = operand2;
operand2 = tmp;
op_code = swap_condition (op_code);
}
loperands[0] = operand1;
if (GET_CODE (operand1) == REG)
loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
else
loperands[1] = adjust_address (operand1, SImode, 4);
if (operand2 != const0_rtx)
{
loperands[2] = operand2;
if (GET_CODE (operand2) == REG)
loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
else
loperands[3] = adjust_address (operand2, SImode, 4);
}
loperands[4] = gen_label_rtx ();
if (operand2 != const0_rtx)
{
output_asm_insn (MOTOROLA
? "cmp%.l %2,%0\n\tjbne %l4\n\tcmp%.l %3,%1"
: "cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1",
loperands);
}
else
{
if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
output_asm_insn ("tst%.l %0", loperands);
else
output_asm_insn ("cmp%.w #0,%0", loperands);
 
output_asm_insn (MOTOROLA ? "jbne %l4" : "jne %l4", loperands);
 
if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
output_asm_insn ("tst%.l %1", loperands);
else
output_asm_insn ("cmp%.w #0,%1", loperands);
}
 
loperands[5] = dest;
 
switch (op_code)
{
case EQ:
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("seq %5", loperands);
break;
 
case NE:
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("sne %5", loperands);
break;
 
case GT:
loperands[6] = gen_label_rtx ();
output_asm_insn (MOTOROLA ? "shi %5\n\tjbra %l6" : "shi %5\n\tjra %l6",
loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("sgt %5", loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[6]));
break;
 
case GTU:
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("shi %5", loperands);
break;
 
case LT:
loperands[6] = gen_label_rtx ();
output_asm_insn (MOTOROLA ? "scs %5\n\tjbra %l6" : "scs %5\n\tjra %l6",
loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("slt %5", loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[6]));
break;
 
case LTU:
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("scs %5", loperands);
break;
 
case GE:
loperands[6] = gen_label_rtx ();
output_asm_insn (MOTOROLA ? "scc %5\n\tjbra %l6" : "scc %5\n\tjra %l6",
loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("sge %5", loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[6]));
break;
 
case GEU:
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("scc %5", loperands);
break;
 
case LE:
loperands[6] = gen_label_rtx ();
output_asm_insn (MOTOROLA ? "sls %5\n\tjbra %l6" : "sls %5\n\tjra %l6",
loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("sle %5", loperands);
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[6]));
break;
 
case LEU:
(*targetm.asm_out.internal_label) (asm_out_file, "L",
CODE_LABEL_NUMBER (loperands[4]));
output_asm_insn ("sls %5", loperands);
break;
 
default:
gcc_unreachable ();
}
return "";
}
 
const char *
output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
{
operands[0] = countop;
operands[1] = dataop;
 
if (GET_CODE (countop) == CONST_INT)
{
register int count = INTVAL (countop);
/* If COUNT is bigger than size of storage unit in use,
advance to the containing unit of same size. */
if (count > signpos)
{
int offset = (count & ~signpos) / 8;
count = count & signpos;
operands[1] = dataop = adjust_address (dataop, QImode, offset);
}
if (count == signpos)
cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
else
cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
 
/* These three statements used to use next_insns_test_no...
but it appears that this should do the same job. */
if (count == 31
&& next_insn_tests_no_inequality (insn))
return "tst%.l %1";
if (count == 15
&& next_insn_tests_no_inequality (insn))
return "tst%.w %1";
if (count == 7
&& next_insn_tests_no_inequality (insn))
return "tst%.b %1";
 
cc_status.flags = CC_NOT_NEGATIVE;
}
return "btst %0,%1";
}
/* Legitimize PIC addresses. If the address is already
position-independent, we return ORIG. Newly generated
position-independent addresses go to REG. If we need more
than one register, we lose.
 
An address is legitimized by making an indirect reference
through the Global Offset Table with the name of the symbol
used as an offset.
 
The assembler and linker are responsible for placing the
address of the symbol in the GOT. The function prologue
is responsible for initializing a5 to the starting address
of the GOT.
 
The assembler is also responsible for translating a symbol name
into a constant displacement from the start of the GOT.
 
A quick example may make things a little clearer:
 
When not generating PIC code to store the value 12345 into _foo
we would generate the following code:
 
movel #12345, _foo
 
When generating PIC two transformations are made. First, the compiler
loads the address of foo into a register. So the first transformation makes:
 
lea _foo, a0
movel #12345, a0@
 
The code in movsi will intercept the lea instruction and call this
routine which will transform the instructions into:
 
movel a5@(_foo:w), a0
movel #12345, a0@
 
That (in a nutshell) is how *all* symbol and label references are
handled. */
 
rtx
legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
rtx reg)
{
rtx pic_ref = orig;
 
/* First handle a simple SYMBOL_REF or LABEL_REF */
if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
{
gcc_assert (reg);
 
pic_ref = gen_rtx_MEM (Pmode,
gen_rtx_PLUS (Pmode,
pic_offset_table_rtx, orig));
current_function_uses_pic_offset_table = 1;
MEM_READONLY_P (pic_ref) = 1;
emit_move_insn (reg, pic_ref);
return reg;
}
else if (GET_CODE (orig) == CONST)
{
rtx base;
 
/* Make sure this has not already been legitimized. */
if (GET_CODE (XEXP (orig, 0)) == PLUS
&& XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
return orig;
 
gcc_assert (reg);
 
/* legitimize both operands of the PLUS */
gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
base == reg ? 0 : reg);
 
if (GET_CODE (orig) == CONST_INT)
return plus_constant (base, INTVAL (orig));
pic_ref = gen_rtx_PLUS (Pmode, base, orig);
/* Likewise, should we set special REG_NOTEs here? */
}
return pic_ref;
}
 
typedef enum { MOVL, SWAP, NEGW, NOTW, NOTB, MOVQ, MVS, MVZ } CONST_METHOD;
 
static CONST_METHOD const_method (rtx);
 
#define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
 
static CONST_METHOD
const_method (rtx constant)
{
int i;
unsigned u;
 
i = INTVAL (constant);
if (USE_MOVQ (i))
return MOVQ;
 
/* The ColdFire doesn't have byte or word operations. */
/* FIXME: This may not be useful for the m68060 either. */
if (!TARGET_COLDFIRE)
{
/* if -256 < N < 256 but N is not in range for a moveq
N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
if (USE_MOVQ (i ^ 0xff))
return NOTB;
/* Likewise, try with not.w */
if (USE_MOVQ (i ^ 0xffff))
return NOTW;
/* This is the only value where neg.w is useful */
if (i == -65408)
return NEGW;
}
 
/* Try also with swap. */
u = i;
if (USE_MOVQ ((u >> 16) | (u << 16)))
return SWAP;
 
if (TARGET_CFV4)
{
/* Try using MVZ/MVS with an immediate value to load constants. */
if (i >= 0 && i <= 65535)
return MVZ;
if (i >= -32768 && i <= 32767)
return MVS;
}
 
/* Otherwise, use move.l */
return MOVL;
}
 
static int
const_int_cost (rtx constant)
{
switch (const_method (constant))
{
case MOVQ:
/* Constants between -128 and 127 are cheap due to moveq. */
return 0;
case MVZ:
case MVS:
case NOTB:
case NOTW:
case NEGW:
case SWAP:
/* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
return 1;
case MOVL:
return 2;
default:
gcc_unreachable ();
}
}
 
static bool
m68k_rtx_costs (rtx x, int code, int outer_code, int *total)
{
switch (code)
{
case CONST_INT:
/* Constant zero is super cheap due to clr instruction. */
if (x == const0_rtx)
*total = 0;
else
*total = const_int_cost (x);
return true;
 
case CONST:
case LABEL_REF:
case SYMBOL_REF:
*total = 3;
return true;
 
case CONST_DOUBLE:
/* Make 0.0 cheaper than other floating constants to
encourage creating tstsf and tstdf insns. */
if (outer_code == COMPARE
&& (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
*total = 4;
else
*total = 5;
return true;
 
/* These are vaguely right for a 68020. */
/* The costs for long multiply have been adjusted to work properly
in synth_mult on the 68020, relative to an average of the time
for add and the time for shift, taking away a little more because
sometimes move insns are needed. */
/* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
terms. */
#define MULL_COST (TARGET_68060 ? 2 : TARGET_68040 ? 5 \
: (TARGET_COLDFIRE && !TARGET_5200) ? 3 \
: TARGET_COLDFIRE ? 10 : 13)
#define MULW_COST (TARGET_68060 ? 2 : TARGET_68040 ? 3 : TARGET_68020 ? 8 \
: (TARGET_COLDFIRE && !TARGET_5200) ? 2 : 5)
#define DIVW_COST (TARGET_68020 ? 27 : TARGET_CF_HWDIV ? 11 : 12)
 
case PLUS:
/* An lea costs about three times as much as a simple add. */
if (GET_MODE (x) == SImode
&& GET_CODE (XEXP (x, 1)) == REG
&& GET_CODE (XEXP (x, 0)) == MULT
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
&& (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
|| INTVAL (XEXP (XEXP (x, 0), 1)) == 4
|| INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
{
/* lea an@(dx:l:i),am */
*total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
return true;
}
return false;
 
case ASHIFT:
case ASHIFTRT:
case LSHIFTRT:
if (TARGET_68060)
{
*total = COSTS_N_INSNS(1);
return true;
}
if (! TARGET_68020 && ! TARGET_COLDFIRE)
{
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
{
if (INTVAL (XEXP (x, 1)) < 16)
*total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
else
/* We're using clrw + swap for these cases. */
*total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
}
else
*total = COSTS_N_INSNS (10); /* Worst case. */
return true;
}
/* A shift by a big integer takes an extra instruction. */
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& (INTVAL (XEXP (x, 1)) == 16))
{
*total = COSTS_N_INSNS (2); /* clrw;swap */
return true;
}
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& !(INTVAL (XEXP (x, 1)) > 0
&& INTVAL (XEXP (x, 1)) <= 8))
{
*total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
return true;
}
return false;
 
case MULT:
if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
&& GET_MODE (x) == SImode)
*total = COSTS_N_INSNS (MULW_COST);
else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
*total = COSTS_N_INSNS (MULW_COST);
else
*total = COSTS_N_INSNS (MULL_COST);
return true;
 
case DIV:
case UDIV:
case MOD:
case UMOD:
if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
*total = COSTS_N_INSNS (DIVW_COST); /* div.w */
else if (TARGET_CF_HWDIV)
*total = COSTS_N_INSNS (18);
else
*total = COSTS_N_INSNS (43); /* div.l */
return true;
 
default:
return false;
}
}
 
const char *
output_move_const_into_data_reg (rtx *operands)
{
int i;
 
i = INTVAL (operands[1]);
switch (const_method (operands[1]))
{
case MVZ:
return "mvzw %1,%0";
case MVS:
return "mvsw %1,%0";
case MOVQ:
return "moveq %1,%0";
case NOTB:
CC_STATUS_INIT;
operands[1] = GEN_INT (i ^ 0xff);
return "moveq %1,%0\n\tnot%.b %0";
case NOTW:
CC_STATUS_INIT;
operands[1] = GEN_INT (i ^ 0xffff);
return "moveq %1,%0\n\tnot%.w %0";
case NEGW:
CC_STATUS_INIT;
return "moveq #-128,%0\n\tneg%.w %0";
case SWAP:
{
unsigned u = i;
 
operands[1] = GEN_INT ((u << 16) | (u >> 16));
return "moveq %1,%0\n\tswap %0";
}
case MOVL:
return "move%.l %1,%0";
default:
gcc_unreachable ();
}
}
 
/* Return 1 if 'constant' can be represented by
mov3q on a ColdFire V4 core. */
int
valid_mov3q_const (rtx constant)
{
int i;
 
if (TARGET_CFV4 && GET_CODE (constant) == CONST_INT)
{
i = INTVAL (constant);
if (i == -1 || (i >= 1 && i <= 7))
return 1;
}
return 0;
}
 
 
const char *
output_move_simode_const (rtx *operands)
{
if (operands[1] == const0_rtx
&& (DATA_REG_P (operands[0])
|| GET_CODE (operands[0]) == MEM)
/* clr insns on 68000 read before writing.
This isn't so on the 68010, but we have no TARGET_68010. */
&& ((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM
&& MEM_VOLATILE_P (operands[0]))))
return "clr%.l %0";
else if ((GET_MODE (operands[0]) == SImode)
&& valid_mov3q_const (operands[1]))
return "mov3q%.l %1,%0";
else if (operands[1] == const0_rtx
&& ADDRESS_REG_P (operands[0]))
return "sub%.l %0,%0";
else if (DATA_REG_P (operands[0]))
return output_move_const_into_data_reg (operands);
else if (ADDRESS_REG_P (operands[0])
&& INTVAL (operands[1]) < 0x8000
&& INTVAL (operands[1]) >= -0x8000)
{
if (valid_mov3q_const (operands[1]))
return "mov3q%.l %1,%0";
return "move%.w %1,%0";
}
else if (GET_CODE (operands[0]) == MEM
&& GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
&& REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
&& INTVAL (operands[1]) < 0x8000
&& INTVAL (operands[1]) >= -0x8000)
{
if (valid_mov3q_const (operands[1]))
return "mov3q%.l %1,%-";
return "pea %a1";
}
return "move%.l %1,%0";
}
 
const char *
output_move_simode (rtx *operands)
{
if (GET_CODE (operands[1]) == CONST_INT)
return output_move_simode_const (operands);
else if ((GET_CODE (operands[1]) == SYMBOL_REF
|| GET_CODE (operands[1]) == CONST)
&& push_operand (operands[0], SImode))
return "pea %a1";
else if ((GET_CODE (operands[1]) == SYMBOL_REF
|| GET_CODE (operands[1]) == CONST)
&& ADDRESS_REG_P (operands[0]))
return "lea %a1,%0";
return "move%.l %1,%0";
}
 
const char *
output_move_himode (rtx *operands)
{
if (GET_CODE (operands[1]) == CONST_INT)
{
if (operands[1] == const0_rtx
&& (DATA_REG_P (operands[0])
|| GET_CODE (operands[0]) == MEM)
/* clr insns on 68000 read before writing.
This isn't so on the 68010, but we have no TARGET_68010. */
&& ((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM
&& MEM_VOLATILE_P (operands[0]))))
return "clr%.w %0";
else if (operands[1] == const0_rtx
&& ADDRESS_REG_P (operands[0]))
return "sub%.l %0,%0";
else if (DATA_REG_P (operands[0])
&& INTVAL (operands[1]) < 128
&& INTVAL (operands[1]) >= -128)
return "moveq %1,%0";
else if (INTVAL (operands[1]) < 0x8000
&& INTVAL (operands[1]) >= -0x8000)
return "move%.w %1,%0";
}
else if (CONSTANT_P (operands[1]))
return "move%.l %1,%0";
/* Recognize the insn before a tablejump, one that refers
to a table of offsets. Such an insn will need to refer
to a label on the insn. So output one. Use the label-number
of the table of offsets to generate this label. This code,
and similar code below, assumes that there will be at most one
reference to each table. */
if (GET_CODE (operands[1]) == MEM
&& GET_CODE (XEXP (operands[1], 0)) == PLUS
&& GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == LABEL_REF
&& GET_CODE (XEXP (XEXP (operands[1], 0), 0)) != PLUS)
{
rtx labelref = XEXP (XEXP (operands[1], 0), 1);
if (MOTOROLA)
asm_fprintf (asm_out_file, "\t.set %LLI%d,.+2\n",
CODE_LABEL_NUMBER (XEXP (labelref, 0)));
else
(*targetm.asm_out.internal_label) (asm_out_file, "LI",
CODE_LABEL_NUMBER (XEXP (labelref, 0)));
}
return "move%.w %1,%0";
}
 
const char *
output_move_qimode (rtx *operands)
{
/* 68k family always modifies the stack pointer by at least 2, even for
byte pushes. The 5200 (ColdFire) does not do this. */
/* This case is generated by pushqi1 pattern now. */
gcc_assert (!(GET_CODE (operands[0]) == MEM
&& GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
&& XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
&& ! ADDRESS_REG_P (operands[1])
&& ! TARGET_COLDFIRE));
 
/* clr and st insns on 68000 read before writing.
This isn't so on the 68010, but we have no TARGET_68010. */
if (!ADDRESS_REG_P (operands[0])
&& ((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
{
if (operands[1] == const0_rtx)
return "clr%.b %0";
if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
&& GET_CODE (operands[1]) == CONST_INT
&& (INTVAL (operands[1]) & 255) == 255)
{
CC_STATUS_INIT;
return "st %0";
}
}
if (GET_CODE (operands[1]) == CONST_INT
&& DATA_REG_P (operands[0])
&& INTVAL (operands[1]) < 128
&& INTVAL (operands[1]) >= -128)
return "moveq %1,%0";
if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
return "sub%.l %0,%0";
if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
return "move%.l %1,%0";
/* 68k family (including the 5200 ColdFire) does not support byte moves to
from address registers. */
if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
return "move%.w %1,%0";
return "move%.b %1,%0";
}
 
const char *
output_move_stricthi (rtx *operands)
{
if (operands[1] == const0_rtx
/* clr insns on 68000 read before writing.
This isn't so on the 68010, but we have no TARGET_68010. */
&& ((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
return "clr%.w %0";
return "move%.w %1,%0";
}
 
const char *
output_move_strictqi (rtx *operands)
{
if (operands[1] == const0_rtx
/* clr insns on 68000 read before writing.
This isn't so on the 68010, but we have no TARGET_68010. */
&& ((TARGET_68020 || TARGET_COLDFIRE)
|| !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
return "clr%.b %0";
return "move%.b %1,%0";
}
 
/* Return the best assembler insn template
for moving operands[1] into operands[0] as a fullword. */
 
static const char *
singlemove_string (rtx *operands)
{
if (GET_CODE (operands[1]) == CONST_INT)
return output_move_simode_const (operands);
return "move%.l %1,%0";
}
 
 
/* Output assembler code to perform a doubleword move insn
with operands OPERANDS. */
 
const char *
output_move_double (rtx *operands)
{
enum
{
REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
} optype0, optype1;
rtx latehalf[2];
rtx middlehalf[2];
rtx xops[2];
rtx addreg0 = 0, addreg1 = 0;
int dest_overlapped_low = 0;
int size = GET_MODE_SIZE (GET_MODE (operands[0]));
 
middlehalf[0] = 0;
middlehalf[1] = 0;
 
/* First classify both operands. */
 
if (REG_P (operands[0]))
optype0 = REGOP;
else if (offsettable_memref_p (operands[0]))
optype0 = OFFSOP;
else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
optype0 = POPOP;
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
optype0 = PUSHOP;
else if (GET_CODE (operands[0]) == MEM)
optype0 = MEMOP;
else
optype0 = RNDOP;
 
if (REG_P (operands[1]))
optype1 = REGOP;
else if (CONSTANT_P (operands[1]))
optype1 = CNSTOP;
else if (offsettable_memref_p (operands[1]))
optype1 = OFFSOP;
else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
optype1 = POPOP;
else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
optype1 = PUSHOP;
else if (GET_CODE (operands[1]) == MEM)
optype1 = MEMOP;
else
optype1 = RNDOP;
 
/* Check for the cases that the operand constraints are not supposed
to allow to happen. Generating code for these cases is
painful. */
gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
 
/* If one operand is decrementing and one is incrementing
decrement the former register explicitly
and change that operand into ordinary indexing. */
 
if (optype0 == PUSHOP && optype1 == POPOP)
{
operands[0] = XEXP (XEXP (operands[0], 0), 0);
if (size == 12)
output_asm_insn ("sub%.l #12,%0", operands);
else
output_asm_insn ("subq%.l #8,%0", operands);
if (GET_MODE (operands[1]) == XFmode)
operands[0] = gen_rtx_MEM (XFmode, operands[0]);
else if (GET_MODE (operands[0]) == DFmode)
operands[0] = gen_rtx_MEM (DFmode, operands[0]);
else
operands[0] = gen_rtx_MEM (DImode, operands[0]);
optype0 = OFFSOP;
}
if (optype0 == POPOP && optype1 == PUSHOP)
{
operands[1] = XEXP (XEXP (operands[1], 0), 0);
if (size == 12)
output_asm_insn ("sub%.l #12,%1", operands);
else
output_asm_insn ("subq%.l #8,%1", operands);
if (GET_MODE (operands[1]) == XFmode)
operands[1] = gen_rtx_MEM (XFmode, operands[1]);
else if (GET_MODE (operands[1]) == DFmode)
operands[1] = gen_rtx_MEM (DFmode, operands[1]);
else
operands[1] = gen_rtx_MEM (DImode, operands[1]);
optype1 = OFFSOP;
}
 
/* If an operand is an unoffsettable memory ref, find a register
we can increment temporarily to make it refer to the second word. */
 
if (optype0 == MEMOP)
addreg0 = find_addr_reg (XEXP (operands[0], 0));
 
if (optype1 == MEMOP)
addreg1 = find_addr_reg (XEXP (operands[1], 0));
 
/* Ok, we can do one word at a time.
Normally we do the low-numbered word first,
but if either operand is autodecrementing then we
do the high-numbered word first.
 
In either case, set up in LATEHALF the operands to use
for the high-numbered word and in some cases alter the
operands in OPERANDS to be suitable for the low-numbered word. */
 
if (size == 12)
{
if (optype0 == REGOP)
{
latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
}
else if (optype0 == OFFSOP)
{
middlehalf[0] = adjust_address (operands[0], SImode, 4);
latehalf[0] = adjust_address (operands[0], SImode, size - 4);
}
else
{
middlehalf[0] = operands[0];
latehalf[0] = operands[0];
}
 
if (optype1 == REGOP)
{
latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
}
else if (optype1 == OFFSOP)
{
middlehalf[1] = adjust_address (operands[1], SImode, 4);
latehalf[1] = adjust_address (operands[1], SImode, size - 4);
}
else if (optype1 == CNSTOP)
{
if (GET_CODE (operands[1]) == CONST_DOUBLE)
{
REAL_VALUE_TYPE r;
long l[3];
 
REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
operands[1] = GEN_INT (l[0]);
middlehalf[1] = GEN_INT (l[1]);
latehalf[1] = GEN_INT (l[2]);
}
else
{
/* No non-CONST_DOUBLE constant should ever appear
here. */
gcc_assert (!CONSTANT_P (operands[1]));
}
}
else
{
middlehalf[1] = operands[1];
latehalf[1] = operands[1];
}
}
else
/* size is not 12: */
{
if (optype0 == REGOP)
latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else if (optype0 == OFFSOP)
latehalf[0] = adjust_address (operands[0], SImode, size - 4);
else
latehalf[0] = operands[0];
 
if (optype1 == REGOP)
latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
else if (optype1 == OFFSOP)
latehalf[1] = adjust_address (operands[1], SImode, size - 4);
else if (optype1 == CNSTOP)
split_double (operands[1], &operands[1], &latehalf[1]);
else
latehalf[1] = operands[1];
}
 
/* If insn is effectively movd N(sp),-(sp) then we will do the
high word first. We should use the adjusted operand 1 (which is N+4(sp))
for the low word as well, to compensate for the first decrement of sp. */
if (optype0 == PUSHOP
&& REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
&& reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
operands[1] = middlehalf[1] = latehalf[1];
 
/* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
if the upper part of reg N does not appear in the MEM, arrange to
emit the move late-half first. Otherwise, compute the MEM address
into the upper part of N and use that as a pointer to the memory
operand. */
if (optype0 == REGOP
&& (optype1 == OFFSOP || optype1 == MEMOP))
{
rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
 
if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
&& reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
{
/* If both halves of dest are used in the src memory address,
compute the address into latehalf of dest.
Note that this can't happen if the dest is two data regs. */
compadr:
xops[0] = latehalf[0];
xops[1] = XEXP (operands[1], 0);
output_asm_insn ("lea %a1,%0", xops);
if (GET_MODE (operands[1]) == XFmode )
{
operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
latehalf[1] = adjust_address (operands[1], DImode, size - 4);
}
else
{
operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
latehalf[1] = adjust_address (operands[1], DImode, size - 4);
}
}
else if (size == 12
&& reg_overlap_mentioned_p (middlehalf[0],
XEXP (operands[1], 0)))
{
/* Check for two regs used by both source and dest.
Note that this can't happen if the dest is all data regs.
It can happen if the dest is d6, d7, a0.
But in that case, latehalf is an addr reg, so
the code at compadr does ok. */
 
if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
|| reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
goto compadr;
 
/* JRV says this can't happen: */
gcc_assert (!addreg0 && !addreg1);
 
/* Only the middle reg conflicts; simply put it last. */
output_asm_insn (singlemove_string (operands), operands);
output_asm_insn (singlemove_string (latehalf), latehalf);
output_asm_insn (singlemove_string (middlehalf), middlehalf);
return "";
}
else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
/* If the low half of dest is mentioned in the source memory
address, the arrange to emit the move late half first. */
dest_overlapped_low = 1;
}
 
/* If one or both operands autodecrementing,
do the two words, high-numbered first. */
 
/* Likewise, the first move would clobber the source of the second one,
do them in the other order. This happens only for registers;
such overlap can't happen in memory unless the user explicitly
sets it up, and that is an undefined circumstance. */
 
if (optype0 == PUSHOP || optype1 == PUSHOP
|| (optype0 == REGOP && optype1 == REGOP
&& ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
|| REGNO (operands[0]) == REGNO (latehalf[1])))
|| dest_overlapped_low)
{
/* Make any unoffsettable addresses point at high-numbered word. */
if (addreg0)
{
if (size == 12)
output_asm_insn ("addq%.l #8,%0", &addreg0);
else
output_asm_insn ("addq%.l #4,%0", &addreg0);
}
if (addreg1)
{
if (size == 12)
output_asm_insn ("addq%.l #8,%0", &addreg1);
else
output_asm_insn ("addq%.l #4,%0", &addreg1);
}
 
/* Do that word. */
output_asm_insn (singlemove_string (latehalf), latehalf);
 
/* Undo the adds we just did. */
if (addreg0)
output_asm_insn ("subq%.l #4,%0", &addreg0);
if (addreg1)
output_asm_insn ("subq%.l #4,%0", &addreg1);
 
if (size == 12)
{
output_asm_insn (singlemove_string (middlehalf), middlehalf);
if (addreg0)
output_asm_insn ("subq%.l #4,%0", &addreg0);
if (addreg1)
output_asm_insn ("subq%.l #4,%0", &addreg1);
}
 
/* Do low-numbered word. */
return singlemove_string (operands);
}
 
/* Normal case: do the two words, low-numbered first. */
 
output_asm_insn (singlemove_string (operands), operands);
 
/* Do the middle one of the three words for long double */
if (size == 12)
{
if (addreg0)
output_asm_insn ("addq%.l #4,%0", &addreg0);
if (addreg1)
output_asm_insn ("addq%.l #4,%0", &addreg1);
 
output_asm_insn (singlemove_string (middlehalf), middlehalf);
}
 
/* Make any unoffsettable addresses point at high-numbered word. */
if (addreg0)
output_asm_insn ("addq%.l #4,%0", &addreg0);
if (addreg1)
output_asm_insn ("addq%.l #4,%0", &addreg1);
 
/* Do that word. */
output_asm_insn (singlemove_string (latehalf), latehalf);
 
/* Undo the adds we just did. */
if (addreg0)
{
if (size == 12)
output_asm_insn ("subq%.l #8,%0", &addreg0);
else
output_asm_insn ("subq%.l #4,%0", &addreg0);
}
if (addreg1)
{
if (size == 12)
output_asm_insn ("subq%.l #8,%0", &addreg1);
else
output_asm_insn ("subq%.l #4,%0", &addreg1);
}
 
return "";
}
 
 
/* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
new rtx with the correct mode. */
 
static rtx
force_mode (enum machine_mode mode, rtx orig)
{
if (mode == GET_MODE (orig))
return orig;
 
if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
abort ();
 
return gen_rtx_REG (mode, REGNO (orig));
}
 
static int
fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
return reg_renumber && FP_REG_P (op);
}
 
/* Emit insns to move operands[1] into operands[0].
 
Return 1 if we have written out everything that needs to be done to
do the move. Otherwise, return 0 and the caller will emit the move
normally.
 
Note SCRATCH_REG may not be in the proper mode depending on how it
will be used. This routine is responsible for creating a new copy
of SCRATCH_REG in the proper mode. */
 
int
emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
{
register rtx operand0 = operands[0];
register rtx operand1 = operands[1];
register rtx tem;
 
if (scratch_reg
&& reload_in_progress && GET_CODE (operand0) == REG
&& REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
operand0 = reg_equiv_mem[REGNO (operand0)];
else if (scratch_reg
&& reload_in_progress && GET_CODE (operand0) == SUBREG
&& GET_CODE (SUBREG_REG (operand0)) == REG
&& REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
{
/* We must not alter SUBREG_BYTE (operand0) since that would confuse
the code which tracks sets/uses for delete_output_reload. */
rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
SUBREG_BYTE (operand0));
operand0 = alter_subreg (&temp);
}
 
if (scratch_reg
&& reload_in_progress && GET_CODE (operand1) == REG
&& REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
operand1 = reg_equiv_mem[REGNO (operand1)];
else if (scratch_reg
&& reload_in_progress && GET_CODE (operand1) == SUBREG
&& GET_CODE (SUBREG_REG (operand1)) == REG
&& REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
{
/* We must not alter SUBREG_BYTE (operand0) since that would confuse
the code which tracks sets/uses for delete_output_reload. */
rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
SUBREG_BYTE (operand1));
operand1 = alter_subreg (&temp);
}
 
if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
&& ((tem = find_replacement (&XEXP (operand0, 0)))
!= XEXP (operand0, 0)))
operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
&& ((tem = find_replacement (&XEXP (operand1, 0)))
!= XEXP (operand1, 0)))
operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
 
/* Handle secondary reloads for loads/stores of FP registers where
the address is symbolic by using the scratch register */
if (fp_reg_operand (operand0, mode)
&& ((GET_CODE (operand1) == MEM
&& ! memory_address_p (DFmode, XEXP (operand1, 0)))
|| ((GET_CODE (operand1) == SUBREG
&& GET_CODE (XEXP (operand1, 0)) == MEM
&& !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
&& scratch_reg)
{
if (GET_CODE (operand1) == SUBREG)
operand1 = XEXP (operand1, 0);
 
/* SCRATCH_REG will hold an address. We want
it in SImode regardless of what mode it was originally given
to us. */
scratch_reg = force_mode (SImode, scratch_reg);
 
/* D might not fit in 14 bits either; for such cases load D into
scratch reg. */
if (!memory_address_p (Pmode, XEXP (operand1, 0)))
{
emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
Pmode,
XEXP (XEXP (operand1, 0), 0),
scratch_reg));
}
else
emit_move_insn (scratch_reg, XEXP (operand1, 0));
emit_insn (gen_rtx_SET (VOIDmode, operand0,
gen_rtx_MEM (mode, scratch_reg)));
return 1;
}
else if (fp_reg_operand (operand1, mode)
&& ((GET_CODE (operand0) == MEM
&& ! memory_address_p (DFmode, XEXP (operand0, 0)))
|| ((GET_CODE (operand0) == SUBREG)
&& GET_CODE (XEXP (operand0, 0)) == MEM
&& !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
&& scratch_reg)
{
if (GET_CODE (operand0) == SUBREG)
operand0 = XEXP (operand0, 0);
 
/* SCRATCH_REG will hold an address and maybe the actual data. We want
it in SIMODE regardless of what mode it was originally given
to us. */
scratch_reg = force_mode (SImode, scratch_reg);
 
/* D might not fit in 14 bits either; for such cases load D into
scratch reg. */
if (!memory_address_p (Pmode, XEXP (operand0, 0)))
{
emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
0)),
Pmode,
XEXP (XEXP (operand0, 0),
0),
scratch_reg));
}
else
emit_move_insn (scratch_reg, XEXP (operand0, 0));
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
operand1));
return 1;
}
/* Handle secondary reloads for loads of FP registers from constant
expressions by forcing the constant into memory.
 
use scratch_reg to hold the address of the memory location.
 
The proper fix is to change PREFERRED_RELOAD_CLASS to return
NO_REGS when presented with a const_int and an register class
containing only FP registers. Doing so unfortunately creates
more problems than it solves. Fix this for 2.5. */
else if (fp_reg_operand (operand0, mode)
&& CONSTANT_P (operand1)
&& scratch_reg)
{
rtx xoperands[2];
 
/* SCRATCH_REG will hold an address and maybe the actual data. We want
it in SIMODE regardless of what mode it was originally given
to us. */
scratch_reg = force_mode (SImode, scratch_reg);
 
/* Force the constant into memory and put the address of the
memory location into scratch_reg. */
xoperands[0] = scratch_reg;
xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
 
/* Now load the destination register. */
emit_insn (gen_rtx_SET (mode, operand0,
gen_rtx_MEM (mode, scratch_reg)));
return 1;
}
 
/* Now have insn-emit do whatever it normally does. */
return 0;
}
 
/* Return a REG that occurs in ADDR with coefficient 1.
ADDR can be effectively incremented by incrementing REG. */
 
static rtx
find_addr_reg (rtx addr)
{
while (GET_CODE (addr) == PLUS)
{
if (GET_CODE (XEXP (addr, 0)) == REG)
addr = XEXP (addr, 0);
else if (GET_CODE (XEXP (addr, 1)) == REG)
addr = XEXP (addr, 1);
else if (CONSTANT_P (XEXP (addr, 0)))
addr = XEXP (addr, 1);
else if (CONSTANT_P (XEXP (addr, 1)))
addr = XEXP (addr, 0);
else
gcc_unreachable ();
}
gcc_assert (GET_CODE (addr) == REG);
return addr;
}
 
/* Output assembler code to perform a 32-bit 3-operand add. */
 
const char *
output_addsi3 (rtx *operands)
{
if (! operands_match_p (operands[0], operands[1]))
{
if (!ADDRESS_REG_P (operands[1]))
{
rtx tmp = operands[1];
 
operands[1] = operands[2];
operands[2] = tmp;
}
 
/* These insns can result from reloads to access
stack slots over 64k from the frame pointer. */
if (GET_CODE (operands[2]) == CONST_INT
&& (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
return "move%.l %2,%0\n\tadd%.l %1,%0";
if (GET_CODE (operands[2]) == REG)
return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
}
if (GET_CODE (operands[2]) == CONST_INT)
{
if (INTVAL (operands[2]) > 0
&& INTVAL (operands[2]) <= 8)
return "addq%.l %2,%0";
if (INTVAL (operands[2]) < 0
&& INTVAL (operands[2]) >= -8)
{
operands[2] = GEN_INT (- INTVAL (operands[2]));
return "subq%.l %2,%0";
}
/* On the CPU32 it is faster to use two addql instructions to
add a small integer (8 < N <= 16) to a register.
Likewise for subql. */
if (TARGET_CPU32 && REG_P (operands[0]))
{
if (INTVAL (operands[2]) > 8
&& INTVAL (operands[2]) <= 16)
{
operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
return "addq%.l #8,%0\n\taddq%.l %2,%0";
}
if (INTVAL (operands[2]) < -8
&& INTVAL (operands[2]) >= -16)
{
operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
return "subq%.l #8,%0\n\tsubq%.l %2,%0";
}
}
if (ADDRESS_REG_P (operands[0])
&& INTVAL (operands[2]) >= -0x8000
&& INTVAL (operands[2]) < 0x8000)
{
if (TARGET_68040)
return "add%.w %2,%0";
else
return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
}
}
return "add%.l %2,%0";
}
/* Store in cc_status the expressions that the condition codes will
describe after execution of an instruction whose pattern is EXP.
Do not alter them if the instruction would not alter the cc's. */
 
/* On the 68000, all the insns to store in an address register fail to
set the cc's. However, in some cases these instructions can make it
possibly invalid to use the saved cc's. In those cases we clear out
some or all of the saved cc's so they won't be used. */
 
void
notice_update_cc (rtx exp, rtx insn)
{
if (GET_CODE (exp) == SET)
{
if (GET_CODE (SET_SRC (exp)) == CALL)
CC_STATUS_INIT;
else if (ADDRESS_REG_P (SET_DEST (exp)))
{
if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
cc_status.value1 = 0;
if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
cc_status.value2 = 0;
}
else if (!FP_REG_P (SET_DEST (exp))
&& SET_DEST (exp) != cc0_rtx
&& (FP_REG_P (SET_SRC (exp))
|| GET_CODE (SET_SRC (exp)) == FIX
|| GET_CODE (SET_SRC (exp)) == FLOAT_TRUNCATE
|| GET_CODE (SET_SRC (exp)) == FLOAT_EXTEND))
CC_STATUS_INIT;
/* A pair of move insns doesn't produce a useful overall cc. */
else if (!FP_REG_P (SET_DEST (exp))
&& !FP_REG_P (SET_SRC (exp))
&& GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
&& (GET_CODE (SET_SRC (exp)) == REG
|| GET_CODE (SET_SRC (exp)) == MEM
|| GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
CC_STATUS_INIT;
else if (SET_DEST (exp) != pc_rtx)
{
cc_status.flags = 0;
cc_status.value1 = SET_DEST (exp);
cc_status.value2 = SET_SRC (exp);
}
}
else if (GET_CODE (exp) == PARALLEL
&& GET_CODE (XVECEXP (exp, 0, 0)) == SET)
{
rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
rtx src = SET_SRC (XVECEXP (exp, 0, 0));
 
if (ADDRESS_REG_P (dest))
CC_STATUS_INIT;
else if (dest != pc_rtx)
{
cc_status.flags = 0;
cc_status.value1 = dest;
cc_status.value2 = src;
}
}
else
CC_STATUS_INIT;
if (cc_status.value2 != 0
&& ADDRESS_REG_P (cc_status.value2)
&& GET_MODE (cc_status.value2) == QImode)
CC_STATUS_INIT;
if (cc_status.value2 != 0)
switch (GET_CODE (cc_status.value2))
{
case ASHIFT: case ASHIFTRT: case LSHIFTRT:
case ROTATE: case ROTATERT:
/* These instructions always clear the overflow bit, and set
the carry to the bit shifted out. */
/* ??? We don't currently have a way to signal carry not valid,
nor do we check for it in the branch insns. */
CC_STATUS_INIT;
break;
 
case PLUS: case MINUS: case MULT:
case DIV: case UDIV: case MOD: case UMOD: case NEG:
if (GET_MODE (cc_status.value2) != VOIDmode)
cc_status.flags |= CC_NO_OVERFLOW;
break;
case ZERO_EXTEND:
/* (SET r1 (ZERO_EXTEND r2)) on this machine
ends with a move insn moving r2 in r2's mode.
Thus, the cc's are set for r2.
This can set N bit spuriously. */
cc_status.flags |= CC_NOT_NEGATIVE;
 
default:
break;
}
if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
&& cc_status.value2
&& reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
cc_status.value2 = 0;
if (((cc_status.value1 && FP_REG_P (cc_status.value1))
|| (cc_status.value2 && FP_REG_P (cc_status.value2))))
cc_status.flags = CC_IN_68881;
}
const char *
output_move_const_double (rtx *operands)
{
int code = standard_68881_constant_p (operands[1]);
 
if (code != 0)
{
static char buf[40];
 
sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
return buf;
}
return "fmove%.d %1,%0";
}
 
const char *
output_move_const_single (rtx *operands)
{
int code = standard_68881_constant_p (operands[1]);
 
if (code != 0)
{
static char buf[40];
 
sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
return buf;
}
return "fmove%.s %f1,%0";
}
 
/* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
from the "fmovecr" instruction.
The value, anded with 0xff, gives the code to use in fmovecr
to get the desired constant. */
 
/* This code has been fixed for cross-compilation. */
static int inited_68881_table = 0;
 
static const char *const strings_68881[7] = {
"0.0",
"1.0",
"10.0",
"100.0",
"10000.0",
"1e8",
"1e16"
};
 
static const int codes_68881[7] = {
0x0f,
0x32,
0x33,
0x34,
0x35,
0x36,
0x37
};
 
REAL_VALUE_TYPE values_68881[7];
 
/* Set up values_68881 array by converting the decimal values
strings_68881 to binary. */
 
void
init_68881_table (void)
{
int i;
REAL_VALUE_TYPE r;
enum machine_mode mode;
 
mode = SFmode;
for (i = 0; i < 7; i++)
{
if (i == 6)
mode = DFmode;
r = REAL_VALUE_ATOF (strings_68881[i], mode);
values_68881[i] = r;
}
inited_68881_table = 1;
}
 
int
standard_68881_constant_p (rtx x)
{
REAL_VALUE_TYPE r;
int i;
 
/* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
used at all on those chips. */
if (TARGET_68040 || TARGET_68060)
return 0;
 
if (! inited_68881_table)
init_68881_table ();
 
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
 
/* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
is rejected. */
for (i = 0; i < 6; i++)
{
if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
return (codes_68881[i]);
}
if (GET_MODE (x) == SFmode)
return 0;
 
if (REAL_VALUES_EQUAL (r, values_68881[6]))
return (codes_68881[6]);
 
/* larger powers of ten in the constants ram are not used
because they are not equal to a `double' C constant. */
return 0;
}
 
/* If X is a floating-point constant, return the logarithm of X base 2,
or 0 if X is not a power of 2. */
 
int
floating_exact_log2 (rtx x)
{
REAL_VALUE_TYPE r, r1;
int exp;
 
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
 
if (REAL_VALUES_LESS (r, dconst1))
return 0;
 
exp = real_exponent (&r);
real_2expN (&r1, exp);
if (REAL_VALUES_EQUAL (r1, r))
return exp;
 
return 0;
}
/* A C compound statement to output to stdio stream STREAM the
assembler syntax for an instruction operand X. X is an RTL
expression.
 
CODE is a value that can be used to specify one of several ways
of printing the operand. It is used when identical operands
must be printed differently depending on the context. CODE
comes from the `%' specification that was used to request
printing of the operand. If the specification was just `%DIGIT'
then CODE is 0; if the specification was `%LTR DIGIT' then CODE
is the ASCII code for LTR.
 
If X is a register, this macro should print the register's name.
The names can be found in an array `reg_names' whose type is
`char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
 
When the machine description has a specification `%PUNCT' (a `%'
followed by a punctuation character), this macro is called with
a null pointer for X and the punctuation character for CODE.
 
The m68k specific codes are:
 
'.' for dot needed in Motorola-style opcode names.
'-' for an operand pushing on the stack:
sp@-, -(sp) or -(%sp) depending on the style of syntax.
'+' for an operand pushing on the stack:
sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
'@' for a reference to the top word on the stack:
sp@, (sp) or (%sp) depending on the style of syntax.
'#' for an immediate operand prefix (# in MIT and Motorola syntax
but & in SGS syntax).
'!' for the cc register (used in an `and to cc' insn).
'$' for the letter `s' in an op code, but only on the 68040.
'&' for the letter `d' in an op code, but only on the 68040.
'/' for register prefix needed by longlong.h.
 
'b' for byte insn (no effect, on the Sun; this is for the ISI).
'd' to force memory addressing to be absolute, not relative.
'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
'o' for operands to go directly to output_operand_address (bypassing
print_operand_address--used only for SYMBOL_REFs under TARGET_PCREL)
'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
or print pair of registers as rx:ry.
 
*/
 
void
print_operand (FILE *file, rtx op, int letter)
{
if (letter == '.')
{
if (MOTOROLA)
fprintf (file, ".");
}
else if (letter == '#')
asm_fprintf (file, "%I");
else if (letter == '-')
asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
else if (letter == '+')
asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
else if (letter == '@')
asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
else if (letter == '!')
asm_fprintf (file, "%Rfpcr");
else if (letter == '$')
{
if (TARGET_68040_ONLY)
fprintf (file, "s");
}
else if (letter == '&')
{
if (TARGET_68040_ONLY)
fprintf (file, "d");
}
else if (letter == '/')
asm_fprintf (file, "%R");
else if (letter == 'o')
{
/* This is only for direct addresses with TARGET_PCREL */
gcc_assert (GET_CODE (op) == MEM
&& GET_CODE (XEXP (op, 0)) == SYMBOL_REF
&& TARGET_PCREL);
output_addr_const (file, XEXP (op, 0));
}
else if (GET_CODE (op) == REG)
{
if (letter == 'R')
/* Print out the second register name of a register pair.
I.e., R (6) => 7. */
fputs (M68K_REGNAME(REGNO (op) + 1), file);
else
fputs (M68K_REGNAME(REGNO (op)), file);
}
else if (GET_CODE (op) == MEM)
{
output_address (XEXP (op, 0));
if (letter == 'd' && ! TARGET_68020
&& CONSTANT_ADDRESS_P (XEXP (op, 0))
&& !(GET_CODE (XEXP (op, 0)) == CONST_INT
&& INTVAL (XEXP (op, 0)) < 0x8000
&& INTVAL (XEXP (op, 0)) >= -0x8000))
fprintf (file, MOTOROLA ? ".l" : ":l");
}
else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
{
REAL_VALUE_TYPE r;
REAL_VALUE_FROM_CONST_DOUBLE (r, op);
ASM_OUTPUT_FLOAT_OPERAND (letter, file, r);
}
else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
{
REAL_VALUE_TYPE r;
REAL_VALUE_FROM_CONST_DOUBLE (r, op);
ASM_OUTPUT_LONG_DOUBLE_OPERAND (file, r);
}
else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
{
REAL_VALUE_TYPE r;
REAL_VALUE_FROM_CONST_DOUBLE (r, op);
ASM_OUTPUT_DOUBLE_OPERAND (file, r);
}
else
{
/* Use `print_operand_address' instead of `output_addr_const'
to ensure that we print relevant PIC stuff. */
asm_fprintf (file, "%I");
if (TARGET_PCREL
&& (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
print_operand_address (file, op);
else
output_addr_const (file, op);
}
}
 
/* A C compound statement to output to stdio stream STREAM the
assembler syntax for an instruction operand that is a memory
reference whose address is ADDR. ADDR is an RTL expression.
 
Note that this contains a kludge that knows that the only reason
we have an address (plus (label_ref...) (reg...)) when not generating
PIC code is in the insn before a tablejump, and we know that m68k.md
generates a label LInnn: on such an insn.
 
It is possible for PIC to generate a (plus (label_ref...) (reg...))
and we handle that just like we would a (plus (symbol_ref...) (reg...)).
 
Some SGS assemblers have a bug such that "Lnnn-LInnn-2.b(pc,d0.l*2)"
fails to assemble. Luckily "Lnnn(pc,d0.l*2)" produces the results
we want. This difference can be accommodated by using an assembler
define such "LDnnn" to be either "Lnnn-LInnn-2.b", "Lnnn", or any other
string, as necessary. This is accomplished via the ASM_OUTPUT_CASE_END
macro. See m68k/sgs.h for an example; for versions without the bug.
Some assemblers refuse all the above solutions. The workaround is to
emit "K(pc,d0.l*2)" with K being a small constant known to give the
right behavior.
 
They also do not like things like "pea 1.w", so we simple leave off
the .w on small constants.
 
This routine is responsible for distinguishing between -fpic and -fPIC
style relocations in an address. When generating -fpic code the
offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
-fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
 
#if MOTOROLA
# define ASM_OUTPUT_CASE_FETCH(file, labelno, regname) \
asm_fprintf (file, "%LL%d-%LLI%d.b(%Rpc,%s.", labelno, labelno, regname)
#else /* !MOTOROLA */
# define ASM_OUTPUT_CASE_FETCH(file, labelno, regname) \
asm_fprintf (file, "%Rpc@(%LL%d-%LLI%d-2:b,%s:", labelno, labelno, regname)
#endif /* !MOTOROLA */
 
void
print_operand_address (FILE *file, rtx addr)
{
register rtx reg1, reg2, breg, ireg;
rtx offset;
 
switch (GET_CODE (addr))
{
case REG:
fprintf (file, MOTOROLA ? "(%s)" : "%s@", M68K_REGNAME (REGNO (addr)));
break;
case PRE_DEC:
fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
M68K_REGNAME (REGNO (XEXP (addr, 0))));
break;
case POST_INC:
fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
M68K_REGNAME (REGNO (XEXP (addr, 0))));
break;
case PLUS:
reg1 = reg2 = ireg = breg = offset = 0;
if (CONSTANT_ADDRESS_P (XEXP (addr, 0)))
{
offset = XEXP (addr, 0);
addr = XEXP (addr, 1);
}
else if (CONSTANT_ADDRESS_P (XEXP (addr, 1)))
{
offset = XEXP (addr, 1);
addr = XEXP (addr, 0);
}
if (GET_CODE (addr) != PLUS)
{
;
}
else if (GET_CODE (XEXP (addr, 0)) == SIGN_EXTEND)
{
reg1 = XEXP (addr, 0);
addr = XEXP (addr, 1);
}
else if (GET_CODE (XEXP (addr, 1)) == SIGN_EXTEND)
{
reg1 = XEXP (addr, 1);
addr = XEXP (addr, 0);
}
else if (GET_CODE (XEXP (addr, 0)) == MULT)
{
reg1 = XEXP (addr, 0);
addr = XEXP (addr, 1);
}
else if (GET_CODE (XEXP (addr, 1)) == MULT)
{
reg1 = XEXP (addr, 1);
addr = XEXP (addr, 0);
}
else if (GET_CODE (XEXP (addr, 0)) == REG)
{
reg1 = XEXP (addr, 0);
addr = XEXP (addr, 1);
}
else if (GET_CODE (XEXP (addr, 1)) == REG)
{
reg1 = XEXP (addr, 1);
addr = XEXP (addr, 0);
}
if (GET_CODE (addr) == REG || GET_CODE (addr) == MULT
|| GET_CODE (addr) == SIGN_EXTEND)
{
if (reg1 == 0)
reg1 = addr;
else
reg2 = addr;
addr = 0;
}
#if 0 /* for OLD_INDEXING */
else if (GET_CODE (addr) == PLUS)
{
if (GET_CODE (XEXP (addr, 0)) == REG)
{
reg2 = XEXP (addr, 0);
addr = XEXP (addr, 1);
}
else if (GET_CODE (XEXP (addr, 1)) == REG)
{
reg2 = XEXP (addr, 1);
addr = XEXP (addr, 0);
}
}
#endif
if (offset != 0)
{
gcc_assert (!addr);
addr = offset;
}
if ((reg1 && (GET_CODE (reg1) == SIGN_EXTEND
|| GET_CODE (reg1) == MULT))
|| (reg2 != 0 && REGNO_OK_FOR_BASE_P (REGNO (reg2))))
{
breg = reg2;
ireg = reg1;
}
else if (reg1 != 0 && REGNO_OK_FOR_BASE_P (REGNO (reg1)))
{
breg = reg1;
ireg = reg2;
}
if (ireg != 0 && breg == 0 && GET_CODE (addr) == LABEL_REF
&& ! (flag_pic && ireg == pic_offset_table_rtx))
{
int scale = 1;
if (GET_CODE (ireg) == MULT)
{
scale = INTVAL (XEXP (ireg, 1));
ireg = XEXP (ireg, 0);
}
if (GET_CODE (ireg) == SIGN_EXTEND)
{
ASM_OUTPUT_CASE_FETCH (file,
CODE_LABEL_NUMBER (XEXP (addr, 0)),
M68K_REGNAME (REGNO (XEXP (ireg, 0))));
fprintf (file, "w");
}
else
{
ASM_OUTPUT_CASE_FETCH (file,
CODE_LABEL_NUMBER (XEXP (addr, 0)),
M68K_REGNAME (REGNO (ireg)));
fprintf (file, "l");
}
if (scale != 1)
fprintf (file, MOTOROLA ? "*%d" : ":%d", scale);
putc (')', file);
break;
}
if (breg != 0 && ireg == 0 && GET_CODE (addr) == LABEL_REF
&& ! (flag_pic && breg == pic_offset_table_rtx))
{
ASM_OUTPUT_CASE_FETCH (file,
CODE_LABEL_NUMBER (XEXP (addr, 0)),
M68K_REGNAME (REGNO (breg)));
fprintf (file, "l)");
break;
}
if (ireg != 0 || breg != 0)
{
int scale = 1;
gcc_assert (breg);
gcc_assert (flag_pic || !addr || GET_CODE (addr) != LABEL_REF);
if (MOTOROLA)
{
if (addr != 0)
{
output_addr_const (file, addr);
if (flag_pic && (breg == pic_offset_table_rtx))
{
fprintf (file, "@GOT");
if (flag_pic == 1)
fprintf (file, ".w");
}
}
fprintf (file, "(%s", M68K_REGNAME (REGNO (breg)));
if (ireg != 0)
putc (',', file);
}
else /* !MOTOROLA */
{
fprintf (file, "%s@(", M68K_REGNAME (REGNO (breg)));
if (addr != 0)
{
output_addr_const (file, addr);
if (breg == pic_offset_table_rtx)
switch (flag_pic)
{
case 1:
fprintf (file, ":w");
break;
case 2:
fprintf (file, ":l");
break;
default:
break;
}
if (ireg != 0)
putc (',', file);
}
} /* !MOTOROLA */
if (ireg != 0 && GET_CODE (ireg) == MULT)
{
scale = INTVAL (XEXP (ireg, 1));
ireg = XEXP (ireg, 0);
}
if (ireg != 0 && GET_CODE (ireg) == SIGN_EXTEND)
fprintf (file, MOTOROLA ? "%s.w" : "%s:w",
M68K_REGNAME (REGNO (XEXP (ireg, 0))));
else if (ireg != 0)
fprintf (file, MOTOROLA ? "%s.l" : "%s:l",
M68K_REGNAME (REGNO (ireg)));
if (scale != 1)
fprintf (file, MOTOROLA ? "*%d" : ":%d", scale);
putc (')', file);
break;
}
else if (reg1 != 0 && GET_CODE (addr) == LABEL_REF
&& ! (flag_pic && reg1 == pic_offset_table_rtx))
{
ASM_OUTPUT_CASE_FETCH (file,
CODE_LABEL_NUMBER (XEXP (addr, 0)),
M68K_REGNAME (REGNO (reg1)));
fprintf (file, "l)");
break;
}
/* FALL-THROUGH (is this really what we want?) */
default:
if (GET_CODE (addr) == CONST_INT
&& INTVAL (addr) < 0x8000
&& INTVAL (addr) >= -0x8000)
{
fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
}
else if (GET_CODE (addr) == CONST_INT)
{
fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
}
else if (TARGET_PCREL)
{
fputc ('(', file);
output_addr_const (file, addr);
if (flag_pic == 1)
asm_fprintf (file, ":w,%Rpc)");
else
asm_fprintf (file, ":l,%Rpc)");
}
else
{
/* Special case for SYMBOL_REF if the symbol name ends in
`.<letter>', this can be mistaken as a size suffix. Put
the name in parentheses. */
if (GET_CODE (addr) == SYMBOL_REF
&& strlen (XSTR (addr, 0)) > 2
&& XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
{
putc ('(', file);
output_addr_const (file, addr);
putc (')', file);
}
else
output_addr_const (file, addr);
}
break;
}
}
/* Check for cases where a clr insns can be omitted from code using
strict_low_part sets. For example, the second clrl here is not needed:
clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
 
MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
insn we are checking for redundancy. TARGET is the register set by the
clear insn. */
 
bool
strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
rtx target)
{
rtx p;
 
p = prev_nonnote_insn (first_insn);
 
while (p)
{
/* If it isn't an insn, then give up. */
if (GET_CODE (p) != INSN)
return false;
 
if (reg_set_p (target, p))
{
rtx set = single_set (p);
rtx dest;
 
/* If it isn't an easy to recognize insn, then give up. */
if (! set)
return false;
 
dest = SET_DEST (set);
 
/* If this sets the entire target register to zero, then our
first_insn is redundant. */
if (rtx_equal_p (dest, target)
&& SET_SRC (set) == const0_rtx)
return true;
else if (GET_CODE (dest) == STRICT_LOW_PART
&& GET_CODE (XEXP (dest, 0)) == REG
&& REGNO (XEXP (dest, 0)) == REGNO (target)
&& (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
<= GET_MODE_SIZE (mode)))
/* This is a strict low part set which modifies less than
we are using, so it is safe. */
;
else
return false;
}
 
p = prev_nonnote_insn (p);
}
 
return false;
}
 
/* Operand predicates for implementing asymmetric pc-relative addressing
on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
when used as a source operand, but not as a destination operand.
 
We model this by restricting the meaning of the basic predicates
(general_operand, memory_operand, etc) to forbid the use of this
addressing mode, and then define the following predicates that permit
this addressing mode. These predicates can then be used for the
source operands of the appropriate instructions.
 
n.b. While it is theoretically possible to change all machine patterns
to use this addressing more where permitted by the architecture,
it has only been implemented for "common" cases: SImode, HImode, and
QImode operands, and only for the principle operations that would
require this addressing mode: data movement and simple integer operations.
 
In parallel with these new predicates, two new constraint letters
were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
In the pcrel case 's' is only valid in combination with 'a' registers.
See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
of how these constraints are used.
 
The use of these predicates is strictly optional, though patterns that
don't will cause an extra reload register to be allocated where one
was not necessary:
 
lea (abc:w,%pc),%a0 ; need to reload address
moveq &1,%d1 ; since write to pc-relative space
movel %d1,%a0@ ; is not allowed
...
lea (abc:w,%pc),%a1 ; no need to reload address here
movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
 
For more info, consult tiemann@cygnus.com.
 
 
All of the ugliness with predicates and constraints is due to the
simple fact that the m68k does not allow a pc-relative addressing
mode as a destination. gcc does not distinguish between source and
destination addresses. Hence, if we claim that pc-relative address
modes are valid, e.g. GO_IF_LEGITIMATE_ADDRESS accepts them, then we
end up with invalid code. To get around this problem, we left
pc-relative modes as invalid addresses, and then added special
predicates and constraints to accept them.
 
A cleaner way to handle this is to modify gcc to distinguish
between source and destination addresses. We can then say that
pc-relative is a valid source address but not a valid destination
address, and hopefully avoid a lot of the predicate and constraint
hackery. Unfortunately, this would be a pretty big change. It would
be a useful change for a number of ports, but there aren't any current
plans to undertake this.
 
***************************************************************************/
 
 
const char *
output_andsi3 (rtx *operands)
{
int logval;
if (GET_CODE (operands[2]) == CONST_INT
&& (INTVAL (operands[2]) | 0xffff) == -1
&& (DATA_REG_P (operands[0])
|| offsettable_memref_p (operands[0]))
&& !TARGET_COLDFIRE)
{
if (GET_CODE (operands[0]) != REG)
operands[0] = adjust_address (operands[0], HImode, 2);
operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
/* Do not delete a following tstl %0 insn; that would be incorrect. */
CC_STATUS_INIT;
if (operands[2] == const0_rtx)
return "clr%.w %0";
return "and%.w %2,%0";
}
if (GET_CODE (operands[2]) == CONST_INT
&& (logval = exact_log2 (~ INTVAL (operands[2]))) >= 0
&& (DATA_REG_P (operands[0])
|| offsettable_memref_p (operands[0])))
{
if (DATA_REG_P (operands[0]))
operands[1] = GEN_INT (logval);
else
{
operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
operands[1] = GEN_INT (logval % 8);
}
/* This does not set condition codes in a standard way. */
CC_STATUS_INIT;
return "bclr %1,%0";
}
return "and%.l %2,%0";
}
 
const char *
output_iorsi3 (rtx *operands)
{
register int logval;
if (GET_CODE (operands[2]) == CONST_INT
&& INTVAL (operands[2]) >> 16 == 0
&& (DATA_REG_P (operands[0])
|| offsettable_memref_p (operands[0]))
&& !TARGET_COLDFIRE)
{
if (GET_CODE (operands[0]) != REG)
operands[0] = adjust_address (operands[0], HImode, 2);
/* Do not delete a following tstl %0 insn; that would be incorrect. */
CC_STATUS_INIT;
if (INTVAL (operands[2]) == 0xffff)
return "mov%.w %2,%0";
return "or%.w %2,%0";
}
if (GET_CODE (operands[2]) == CONST_INT
&& (logval = exact_log2 (INTVAL (operands[2]))) >= 0
&& (DATA_REG_P (operands[0])
|| offsettable_memref_p (operands[0])))
{
if (DATA_REG_P (operands[0]))
operands[1] = GEN_INT (logval);
else
{
operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
operands[1] = GEN_INT (logval % 8);
}
CC_STATUS_INIT;
return "bset %1,%0";
}
return "or%.l %2,%0";
}
 
const char *
output_xorsi3 (rtx *operands)
{
register int logval;
if (GET_CODE (operands[2]) == CONST_INT
&& INTVAL (operands[2]) >> 16 == 0
&& (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
&& !TARGET_COLDFIRE)
{
if (! DATA_REG_P (operands[0]))
operands[0] = adjust_address (operands[0], HImode, 2);
/* Do not delete a following tstl %0 insn; that would be incorrect. */
CC_STATUS_INIT;
if (INTVAL (operands[2]) == 0xffff)
return "not%.w %0";
return "eor%.w %2,%0";
}
if (GET_CODE (operands[2]) == CONST_INT
&& (logval = exact_log2 (INTVAL (operands[2]))) >= 0
&& (DATA_REG_P (operands[0])
|| offsettable_memref_p (operands[0])))
{
if (DATA_REG_P (operands[0]))
operands[1] = GEN_INT (logval);
else
{
operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
operands[1] = GEN_INT (logval % 8);
}
CC_STATUS_INIT;
return "bchg %1,%0";
}
return "eor%.l %2,%0";
}
 
#ifdef M68K_TARGET_COFF
 
/* Output assembly to switch to section NAME with attribute FLAGS. */
 
static void
m68k_coff_asm_named_section (const char *name, unsigned int flags,
tree decl ATTRIBUTE_UNUSED)
{
char flagchar;
 
if (flags & SECTION_WRITE)
flagchar = 'd';
else
flagchar = 'x';
 
fprintf (asm_out_file, "\t.section\t%s,\"%c\"\n", name, flagchar);
}
 
#endif /* M68K_TARGET_COFF */
 
static void
m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
HOST_WIDE_INT delta,
HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
tree function)
{
rtx xops[1];
const char *fmt;
 
if (delta > 0 && delta <= 8)
asm_fprintf (file, (MOTOROLA
? "\taddq.l %I%d,4(%Rsp)\n"
: "\taddql %I%d,%Rsp@(4)\n"),
(int) delta);
else if (delta < 0 && delta >= -8)
asm_fprintf (file, (MOTOROLA
? "\tsubq.l %I%d,4(%Rsp)\n"
: "\tsubql %I%d,%Rsp@(4)\n"),
(int) -delta);
else if (TARGET_COLDFIRE)
{
/* ColdFire can't add/sub a constant to memory unless it is in
the range of addq/subq. So load the value into %d0 and
then add it to 4(%sp). */
if (delta >= -128 && delta <= 127)
asm_fprintf (file, (MOTOROLA
? "\tmoveq.l %I%wd,%Rd0\n"
: "\tmoveql %I%wd,%Rd0\n"),
delta);
else
asm_fprintf (file, (MOTOROLA
? "\tmove.l %I%wd,%Rd0\n"
: "\tmovel %I%wd,%Rd0\n"),
delta);
asm_fprintf (file, (MOTOROLA
? "\tadd.l %Rd0,4(%Rsp)\n"
: "\taddl %Rd0,%Rsp@(4)\n"));
}
else
asm_fprintf (file, (MOTOROLA
? "\tadd.l %I%wd,4(%Rsp)\n"
: "\taddl %I%wd,%Rsp@(4)\n"),
delta);
 
xops[0] = DECL_RTL (function);
 
/* Logic taken from call patterns in m68k.md. */
if (flag_pic)
{
if (TARGET_PCREL)
fmt = "bra.l %o0";
else if (flag_pic == 1 || TARGET_68020)
{
if (MOTOROLA)
{
#if defined (USE_GAS)
fmt = "bra.l %0@PLTPC";
#else
fmt = "bra %0@PLTPC";
#endif
}
else /* !MOTOROLA */
{
#ifdef USE_GAS
fmt = "bra.l %0";
#else
fmt = "jra %0,a1";
#endif
}
}
else if (optimize_size || TARGET_ID_SHARED_LIBRARY)
fmt = "move.l %0@GOT(%%a5), %%a1\n\tjmp (%%a1)";
else
fmt = "lea %0-.-8,%%a1\n\tjmp 0(%%pc,%%a1)";
}
else
{
#if MOTOROLA && !defined (USE_GAS)
fmt = "jmp %0";
#else
fmt = "jra %0";
#endif
}
 
output_asm_insn (fmt, xops);
}
 
/* Worker function for TARGET_STRUCT_VALUE_RTX. */
 
static rtx
m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
int incoming ATTRIBUTE_UNUSED)
{
return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
}
 
/* Return nonzero if register old_reg can be renamed to register new_reg. */
int
m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
unsigned int new_reg)
{
 
/* Interrupt functions can only use registers that have already been
saved by the prologue, even if they would normally be
call-clobbered. */
 
if (m68k_interrupt_function_p (current_function_decl)
&& !regs_ever_live[new_reg])
return 0;
 
return 1;
}
 
/* Value is true if hard register REGNO can hold a value of machine-mode MODE.
On the 68000, the cpu registers can hold any mode except bytes in address
registers, but the 68881 registers can hold only SFmode or DFmode. */
bool
m68k_regno_mode_ok (int regno, enum machine_mode mode)
{
if (regno < 8)
{
/* Data Registers, can hold aggregate if fits in. */
if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
return true;
}
else if (regno < 16)
{
/* Address Registers, can't hold bytes, can hold aggregate if
fits in. */
if (GET_MODE_SIZE (mode) == 1)
return false;
if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
return true;
}
else if (regno < 24)
{
/* FPU registers, hold float or complex float of long double or
smaller. */
if ((GET_MODE_CLASS (mode) == MODE_FLOAT
|| GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
&& GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
return true;
}
return false;
}
 
/* Return floating point values in a 68881 register. This makes 68881 code
a little bit faster. It also makes -msoft-float code incompatible with
hard-float code, so people have to be careful not to mix the two.
For ColdFire it was decided the ABI incompatibility is undesirable.
If there is need for a hard-float ABI it is probably worth doing it
properly and also passing function arguments in FP registers. */
rtx
m68k_libcall_value (enum machine_mode mode)
{
switch (mode) {
case SFmode:
case DFmode:
case XFmode:
if (TARGET_68881)
return gen_rtx_REG (mode, 16);
break;
default:
break;
}
return gen_rtx_REG (mode, 0);
}
 
rtx
m68k_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
{
enum machine_mode mode;
 
mode = TYPE_MODE (valtype);
switch (mode) {
case SFmode:
case DFmode:
case XFmode:
if (TARGET_68881)
return gen_rtx_REG (mode, 16);
break;
default:
break;
}
 
/* If the function returns a pointer, push that into %a0 */
if (POINTER_TYPE_P (valtype))
return gen_rtx_REG (mode, 8);
else
return gen_rtx_REG (mode, 0);
}
/crtn.s
0,0 → 1,43
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
 
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
 
/*
* This file supplies function epilogues for the .init and .fini sections.
* It is linked in after all other files.
*/
 
.file "crtn.o"
.ident "GNU C crtn.o"
 
.section .init
unlk %fp
rts
 
.section .fini
unlk %fp
rts
/rtemself.h
0,0 → 1,33
/* Definitions for rtems targeting a Motorola m68k using elf.
Copyright (C) 1999, 2000, 2002 National Research Council of Canada.
Copyright (C) 2007 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@nrc.ca).
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
 
/* Target OS builtins. */
#undef TARGET_OS_CPP_BUILTINS /* Defined in m68kemb.h. */
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
builtin_define_std ("mc68000"); \
builtin_define ("__USE_INIT_FINI__"); \
builtin_define ("__rtems__"); \
builtin_assert ("system=rtems"); \
} \
while (0)
/m68k-none.h
0,0 → 1,130
/* Definitions of target machine for GNU compiler. "naked" 68020.
Copyright (C) 1994, 1996, 2003, 2006, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* Default to m68k (m68020). */
#ifndef TARGET_CPU_DEFAULT
#define TARGET_CPU_DEFAULT M68K_CPU_m68k
#endif
 
/* These are values set by the configure script in TARGET_CPU_DEFAULT.
They are (sequential integer + (desired value for TARGET_DEFAULT) << 4). */
#define M68K_CPU_m68k (0 + ((MASK_68020|MASK_68881|MASK_BITFIELD)<<4))
#define M68K_CPU_m68000 (1 + (0 << 4))
#define M68K_CPU_m68010 (1 + (0 << 4)) /* make same as m68000 */
#define M68K_CPU_m68020 (2 + ((MASK_68020|MASK_68881|MASK_BITFIELD) << 4))
#define M68K_CPU_m68030 (3 + ((MASK_68030|MASK_68020|MASK_68881|MASK_BITFIELD) << 4))
#define M68K_CPU_m68040 (4 + ((MASK_68040_ONLY|MASK_68020|MASK_68881|MASK_BITFIELD) << 4))
#define M68K_CPU_m68302 (5 + (0 << 4))
#define M68K_CPU_m68332 (6 + (MASK_68020 << 4))
 
/* This is tested for below, so if target wants to override this, it
just set this first in cover file. */
#ifndef TARGET_DEFAULT
#define TARGET_DEFAULT (TARGET_CPU_DEFAULT >> 4)
#endif
/* Defaults for the various specs below.
These are collected here so we only test TARGET_CPU_DEFAULT once. */
/* ??? CC1_CPU_DEFAULT_SPEC was copied over from the earlier version of
this file. However, it's not used anywhere here because it doesn't
seem to be necessary. */
#if TARGET_CPU_DEFAULT == M68K_CPU_m68k || TARGET_CPU_DEFAULT == M68K_CPU_m68020
#define ASM_CPU_DEFAULT_SPEC "-mc68020"
#define CC1_CPU_DEFAULT_SPEC "-m68020"
#else
#if TARGET_CPU_DEFAULT == M68K_CPU_m68000
#define ASM_CPU_DEFAULT_SPEC "-mc68000"
#define CC1_CPU_DEFAULT_SPEC "-m68000"
#else
#if TARGET_CPU_DEFAULT == M68K_CPU_m68030
#define ASM_CPU_DEFAULT_SPEC "-mc68030"
#define CC1_CPU_DEFAULT_SPEC "-m68030"
#else
#if TARGET_CPU_DEFAULT == M68K_CPU_m68040
#define ASM_CPU_DEFAULT_SPEC "-mc68040"
#define CC1_CPU_DEFAULT_SPEC "-m68040"
#else
#if TARGET_CPU_DEFAULT == M68K_CPU_m68302
#define ASM_CPU_DEFAULT_SPEC "-mc68302"
#define CC1_CPU_DEFAULT_SPEC "-m68302"
#else
#if TARGET_CPU_DEFAULT == M68K_CPU_m68332
#define ASM_CPU_DEFAULT_SPEC "-mc68332"
#define CC1_CPU_DEFAULT_SPEC "-m68332"
#else
Unrecognized value in TARGET_CPU_DEFAULT.
#endif
#endif
#endif
#endif
#endif
#endif
 
/* Pass flags to gas indicating which type of processor we have. */
 
#undef ASM_SPEC
#define ASM_SPEC "\
%{m68851}%{mno-68851}%{m68881}%{mno-68881}%{msoft-float:-mno-68881} %{m68000}%{m68302}%{mc68000}%{m68010}%{m68020}%{mc68020}%{m68030}%{m68040}%{m68020-40:-mc68040} %{m68020-60:-mc68040} %{m68060}%{mcpu32}%{m68332}%{m5200}%{m5206e}%{m528x}%{m5307}%{m5407}%{mcfv4e}%{!mc68000:%{!m68000:%{!m68302:%{!m68010:%{!mc68020:%{!m68020:%{!m68030:%{!m68040:%{!m68020-40:%{!m68020-60:%{!m68060:%{!mcpu32:%{!m68332:%{!m5200:%{!m5206e:%{!m528x:%{!m5307:%{!m5407:%{!mcfv4e:%(asm_cpu_default)}}}}}}}}}}}}}}}}}}} \
%{fPIC:--pcrel} %{fpic:--pcrel} %{msep-data:--pcrel} %{mid-shared-library:--pcrel} \
"
 
/* cc1/cc1plus always receives all the -m flags. If the specs strings above
are consistent with the flags in m68k.opt, there should be no need for
any further cc1/cc1plus specs. */
 
#undef CC1_SPEC
#define CC1_SPEC ""
 
/* This macro defines names of additional specifications to put in the specs
that can be used in various specifications like CC1_SPEC. Its definition
is an initializer with a subgrouping for each command option.
 
Each subgrouping contains a string constant, that defines the
specification name, and a string constant that used by the GCC driver
program.
 
Do not define this macro if it does not need to do anything. */
 
#define EXTRA_SPECS \
{ "asm_cpu_default", ASM_CPU_DEFAULT_SPEC }, \
{ "cc1_cpu_default", CC1_CPU_DEFAULT_SPEC }, \
SUBTARGET_EXTRA_SPECS
 
#define CPP_SUBTARGET_SPEC ""
#define SUBTARGET_EXTRA_SPECS
/* Avoid building multilib libraries for the defaults.
For targets not handled here, just build the full set of multilibs.
The default is m68k 99.9% of the time anyway. */
 
#if TARGET_CPU_DEFAULT == M68K_CPU_m68k || TARGET_CPU_DEFAULT == M68K_CPU_m68020
#if TARGET_DEFAULT & MASK_68881
#define MULTILIB_DEFAULTS { "m68020", "m68881" }
#else
#define MULTILIB_DEFAULTS { "m68020", "msoft-float" }
#endif
#endif
 
#if TARGET_CPU_DEFAULT == M68K_CPU_m68000 || TARGET_CPU_DEFAULT == M68K_CPU_m68302
#if TARGET_DEFAULT & MASK_68881
#define MULTILIB_DEFAULTS { "m68000", "m68881" }
#else
#define MULTILIB_DEFAULTS { "m68000", "msoft-float" }
#endif
#endif
/ieee.opt
0,0 → 1,24
; Extra IEEE options for the Motorola 68000 port of the compiler.
 
; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
;
; This file is part of GCC.
;
; GCC is free software; you can redistribute it and/or modify it under
; the terms of the GNU General Public License as published by the Free
; Software Foundation; either version 3, or (at your option) any later
; version.
;
; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
; WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; for more details.
;
; You should have received a copy of the GNU General Public License
; along with GCC; see the file COPYING3. If not see
; <http://www.gnu.org/licenses/>.
 
; This option is ignored by gcc
mieee-fp
Target RejectNegative
Use IEEE math for fp comparisons
/netbsd-elf.h
0,0 → 1,409
/* Definitions of target machine for GNU compiler,
for m68k (including m68010) NetBSD platforms using the
ELF object format.
Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
Contributed by Wasabi Systems. Inc.
 
This file is derived from <m68k/m68kv4.h>, <m68k/m68kelf.h>,
and <m68k/linux.h>.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
NETBSD_OS_CPP_BUILTINS_ELF(); \
builtin_define ("__m68k__"); \
builtin_define ("__SVR4_ABI__"); \
builtin_define ("__motorola__"); \
} \
while (0)
 
/* Default target comes from config.gcc */
#undef TARGET_DEFAULT
#define TARGET_DEFAULT TARGET_CPU_DEFAULT
 
 
/* Don't try using XFmode on the 68010. */
#undef LONG_DOUBLE_TYPE_SIZE
#define LONG_DOUBLE_TYPE_SIZE \
((TARGET_68020 || TARGET_68040 || TARGET_68040_ONLY || \
TARGET_68060) ? 80 : 64)
 
#ifdef __mc68010__
#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
#else
#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 80
#endif
 
#define EXTRA_SPECS \
{ "cpp_cpu_default_spec", CPP_CPU_DEFAULT_SPEC }, \
{ "cpp_cpu_spec", CPP_CPU_SPEC }, \
{ "cpp_fpu_spec", CPP_FPU_SPEC }, \
{ "asm_default_spec", ASM_DEFAULT_SPEC }, \
{ "netbsd_cpp_spec", NETBSD_CPP_SPEC }, \
{ "netbsd_entry_point", NETBSD_ENTRY_POINT },
 
 
#define CPP_CPU_SPEC \
"%{m68010:-D__mc68010__} \
%{m68020:-D__mc68020__} \
%{m68030:-D__mc68030__} \
%{m68040:-D__mc68040__} \
%(cpp_cpu_default_spec)"
 
 
#undef TARGET_VERSION
#if TARGET_DEFAULT & MASK_68020
#define TARGET_VERSION fprintf (stderr, " (NetBSD/m68k ELF)");
#define CPP_CPU_DEFAULT_SPEC "%{!m680*:-D__mc68020__}"
#define ASM_DEFAULT_SPEC "%{!m680*:-m68020}"
#else
#define TARGET_VERSION fprintf (stderr, " (NetBSD/68010 ELF)");
#define CPP_CPU_DEFAULT_SPEC "%{!m680*:-D__mc68010__}"
#define ASM_DEFAULT_SPEC "%{!m680*:-m68010}"
#endif
 
 
#if TARGET_DEFAULT & MASK_68881
#define CPP_FPU_SPEC "%{!msoft-float:-D__HAVE_68881__ -D__HAVE_FPU__}"
#else
#define CPP_FPU_SPEC "%{m68881:-D__HAVE_68881__ -D__HAVE_FPU__}"
#endif
 
 
/* Provide a CPP_SPEC appropriate for NetBSD m68k targets. Currently we
deal with the GCC option '-posix', as well as an indication as to
whether or not use of the FPU is allowed. */
 
#undef CPP_SPEC
#define CPP_SPEC \
"%(netbsd_cpp_spec) %(cpp_cpu_spec) %(cpp_fpu_spec)"
 
 
/* Provide an ASM_SPEC appropriate for NetBSD m68k ELF targets. We pass
on some CPU options, as well as PIC code generation options. */
 
#undef ASM_SPEC
#define ASM_SPEC \
"%(asm_default_spec) \
%{m68010} %{m68020} %{m68030} %{m68040} %{m68060} \
%{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
 
#define AS_NEEDS_DASH_FOR_PIPED_INPUT
 
/* Provide a LINK_SPEC appropriate for a NetBSD/m68k ELF target. */
 
#undef LINK_SPEC
#define LINK_SPEC NETBSD_LINK_SPEC_ELF
 
#define NETBSD_ENTRY_POINT "_start"
 
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function only. */
 
#undef FUNCTION_PROFILER
#define FUNCTION_PROFILER(FILE, LABELNO) \
do \
{ \
asm_fprintf (FILE, "\tlea (%LLP%d,%Rpc),%Ra1\n", (LABELNO)); \
if (flag_pic) \
fprintf (FILE, "\tbsr.l __mcount@PLTPC\n"); \
else \
fprintf (FILE, "\tjbsr __mcount\n"); \
} \
while (0)
 
 
/* Make gcc agree with <machine/ansi.h> */
 
#undef SIZE_TYPE
#define SIZE_TYPE "unsigned int"
 
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE "int"
 
 
/* XXX
Here is a bunch of stuff lifted from m68kelf.h. We don't use that
file directly, because it has a lot of baggage we don't want. */
 
 
/* The prefix for register names. Note that REGISTER_NAMES
is supposed to include this prefix. Also note that this is NOT an
fprintf format string, it is a literal string. */
 
#undef REGISTER_PREFIX
#define REGISTER_PREFIX "%"
 
 
/* The prefix for local (compiler generated) lables.
These labels will not appear in the symbol table. */
 
#undef LOCAL_LABEL_PREFIX
#define LOCAL_LABEL_PREFIX "."
 
 
/* The prefix to add to user-visible assembler symbols. */
 
#undef USER_LABEL_PREFIX
#define USER_LABEL_PREFIX ""
 
 
/* The prefix for immediate operands. */
 
#undef IMMEDIATE_PREFIX
#define IMMEDIATE_PREFIX "#"
 
 
#undef ASM_COMMENT_START
#define ASM_COMMENT_START "|"
 
 
/* Currently, JUMP_TABLES_IN_TEXT_SECTION must be defined in order to
keep switch tables in the text section. */
 
#undef JUMP_TABLES_IN_TEXT_SECTION
#define JUMP_TABLES_IN_TEXT_SECTION 1
 
 
/* Use the default action for outputting the case label. */
#undef ASM_OUTPUT_CASE_LABEL
#define ASM_RETURN_CASE_JUMP \
do { \
if (TARGET_COLDFIRE) \
{ \
if (ADDRESS_REG_P (operands[0])) \
return "jmp %%pc@(2,%0:l)"; \
else \
return "ext%.l %0\n\tjmp %%pc@(2,%0:l)"; \
} \
else \
return "jmp %%pc@(2,%0:w)"; \
} while (0)
 
 
/* This is how to output an assembler line that says to advance the
location counter to a multiple of 2**LOG bytes. */
 
#undef ASM_OUTPUT_ALIGN
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
do \
{ \
if ((LOG) > 0) \
fprintf ((FILE), "%s%u\n", ALIGN_ASM_OP, 1 << (LOG)); \
} \
while (0)
 
 
/* If defined, a C expression whose value is a string containing the
assembler operation to identify the following data as uninitialized global
data. */
 
#define BSS_SECTION_ASM_OP ".section\t.bss"
 
 
/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
separate, explicit argument. If you define this macro, it is used
in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
handling the required alignment of the variable. The alignment is
specified as the number of bits.
 
Try to use function `asm_output_aligned_bss' defined in file
`varasm.c' when defining this macro. */
 
#undef ASM_OUTPUT_ALIGNED_BSS
#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
 
 
#undef ASM_OUTPUT_COMMON
#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
( fputs (".comm ", (FILE)), \
assemble_name ((FILE), (NAME)), \
fprintf ((FILE), ",%u\n", (int)(SIZE)))
 
#undef ASM_OUTPUT_LOCAL
#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
( fputs (".lcomm ", (FILE)), \
assemble_name ((FILE), (NAME)), \
fprintf ((FILE), ",%u\n", (int)(SIZE)))
 
 
/* XXX
This is the end of the chunk lifted from m68kelf.h */
 
 
/* XXX
The following chunk is more or less lifted from m68kv4.h.
We'd like to just #include that file, but it has not yet
been converted to the new include style.
 
Should there be a m68kv4-abi.h ?? */
 
 
/* Register in which address to store a structure value is passed to a
function. The default in m68k.h is a1. For m68k/SVR4 it is a0. */
 
#undef M68K_STRUCT_VALUE_REGNUM
#define M68K_STRUCT_VALUE_REGNUM 8
 
 
/* Register in which static-chain is passed to a function. The
default isn m68k.h is a0, but that is already the struct value
regnum. Make it a1 instead. */
 
#undef STATIC_CHAIN_REGNUM
#define STATIC_CHAIN_REGNUM 9
 
 
/* Now to renumber registers for dbx and gdb.
We use the Sun-3 convention, which is:
floating point registers have numbers 18 to 25, not
16 to 23 as they do in the compiler. */
 
#undef DBX_REGISTER_NUMBER
#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 16 ? (REGNO) : (REGNO) + 2)
 
 
/* 1 if N is a possible register number for a function value. For
m68k/SVR4 allow d0, a0, or fp0 as return registers, for integral,
pointer, or floating types, respectively. Reject fp0 if not using
a 68881 coprocessor. */
 
#undef FUNCTION_VALUE_REGNO_P
#define FUNCTION_VALUE_REGNO_P(N) \
((N) == 0 || (N) == 8 || (TARGET_68881 && (N) == 16))
 
 
/* Define this to be true when FUNCTION_VALUE_REGNO_P is true for
more than one register. */
 
#undef NEEDS_UNTYPED_CALL
#define NEEDS_UNTYPED_CALL 1
 
 
/* Define how to generate (in the callee) the output value of a
function and how to find (in the caller) the value returned by a
function. VALTYPE is the data type of the value (as a tree). If
the precise function being called is known, FUNC is its
FUNCTION_DECL; otherwise, FUNC is 0. For m68k/SVR4 generate the
result in d0, a0, or fp0 as appropriate. */
 
#undef FUNCTION_VALUE
#define FUNCTION_VALUE(VALTYPE, FUNC) \
m68k_function_value (VALTYPE, FUNC)
 
 
/* For compatibility with the large body of existing code which does
not always properly declare external functions returning pointer
types, the m68k/SVR4 convention is to copy the value returned for
pointer functions from a0 to d0 in the function epilogue, so that
callers that have neglected to properly declare the callee can
still find the correct return value. */
 
extern int current_function_returns_pointer;
#define FUNCTION_EXTRA_EPILOGUE(FILE, SIZE) \
do \
{ \
if (current_function_returns_pointer \
&& ! find_equiv_reg (0, get_last_insn (), 0, 0, 0, 8, Pmode)) \
asm_fprintf (FILE, "\tmove.l %Ra0,%Rd0\n"); \
} \
while (0)
 
 
/* Define how to find the value returned by a library function
assuming the value has mode MODE.
For m68k/SVR4 look for integer values in d0, pointer values in d0
(returned in both d0 and a0), and floating values in fp0. */
 
#undef LIBCALL_VALUE
#define LIBCALL_VALUE(MODE) \
m68k_libcall_value (MODE)
 
 
/* Boundary (in *bits*) on which stack pointer should be aligned.
The m68k/SVR4 convention is to keep the stack pointer longword aligned. */
 
#undef STACK_BOUNDARY
#define STACK_BOUNDARY 32
 
 
/* Alignment of field after `int : 0' in a structure.
For m68k/SVR4, this is the next longword boundary. */
 
#undef EMPTY_FIELD_BOUNDARY
#define EMPTY_FIELD_BOUNDARY 32
 
 
/* No data type wants to be aligned rounder than this.
For m68k/SVR4, some types (doubles for example) are aligned on 8 byte
boundaries */
 
#undef BIGGEST_ALIGNMENT
#define BIGGEST_ALIGNMENT 64
 
 
/* For m68k SVR4, structures are returned using the reentrant
technique. */
 
#undef PCC_STATIC_STRUCT_RETURN
 
 
/* The svr4 ABI for the m68k says that records and unions are returned
in memory. */
 
#undef DEFAULT_PCC_STRUCT_RETURN
#define DEFAULT_PCC_STRUCT_RETURN 1
 
/* Output assembler code for a block containing the constant parts
of a trampoline, leaving space for the variable parts. */
 
/* On m68k svr4, the trampoline is different from the generic version
in that we use a1 as the static call chain. */
 
#undef TRAMPOLINE_TEMPLATE
#define TRAMPOLINE_TEMPLATE(FILE) \
{ \
assemble_aligned_integer (2, GEN_INT (0x227a)); \
assemble_aligned_integer (2, GEN_INT (8)); \
assemble_aligned_integer (2, GEN_INT (0x2f3a)); \
assemble_aligned_integer (2, GEN_INT (8)); \
assemble_aligned_integer (2, GEN_INT (0x4e75)); \
assemble_aligned_integer (4, const0_rtx); \
assemble_aligned_integer (4, const0_rtx); \
}
 
/* Redefine since we are using a different trampoline */
#undef TRAMPOLINE_SIZE
#define TRAMPOLINE_SIZE 18
 
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function. */
 
#undef INITIALIZE_TRAMPOLINE
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
{ \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 10)), CXT); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 14)), FNADDR); \
}
 
 
/* XXX
This is the end of the chunk lifted from m68kv4.h */
/m68k.h
0,0 → 1,1070
/* Definitions of target machine for GCC for Motorola 680x0/ColdFire.
Copyright (C) 1987, 1988, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
/* We need to have MOTOROLA always defined (either 0 or 1) because we use
if-statements and ?: on it. This way we have compile-time error checking
for both the MOTOROLA and MIT code paths. We do rely on the host compiler
to optimize away all constant tests. */
#ifdef MOTOROLA
# undef MOTOROLA
# define MOTOROLA 1 /* Use the Motorola assembly syntax. */
# define TARGET_VERSION fprintf (stderr, " (68k, Motorola syntax)")
#else
# define TARGET_VERSION fprintf (stderr, " (68k, MIT syntax)")
# define MOTOROLA 0 /* Use the MIT assembly syntax. */
#endif
 
/* Note that some other tm.h files include this one and then override
many of the definitions that relate to assembler syntax. */
 
#define TARGET_CPU_CPP_BUILTINS() \
do \
{ \
builtin_define ("__m68k__"); \
builtin_define_std ("mc68000"); \
if (TARGET_68040_ONLY) \
{ \
if (TARGET_68060) \
builtin_define_std ("mc68060"); \
else \
builtin_define_std ("mc68040"); \
} \
else if (TARGET_68060) /* -m68020-60 */ \
{ \
builtin_define_std ("mc68060"); \
builtin_define_std ("mc68040"); \
builtin_define_std ("mc68030"); \
builtin_define_std ("mc68020"); \
} \
else if (TARGET_68040) /* -m68020-40 */ \
{ \
builtin_define_std ("mc68040"); \
builtin_define_std ("mc68030"); \
builtin_define_std ("mc68020"); \
} \
else if (TARGET_68030) \
builtin_define_std ("mc68030"); \
else if (TARGET_68020) \
builtin_define_std ("mc68020"); \
if (TARGET_68881) \
builtin_define ("__HAVE_68881__"); \
if (TARGET_CPU32) \
{ \
builtin_define_std ("mc68332"); \
builtin_define_std ("mcpu32"); \
} \
if (TARGET_COLDFIRE) \
builtin_define ("__mcoldfire__"); \
if (TARGET_5200) \
builtin_define ("__mcf5200__"); \
if (TARGET_528x) \
{ \
builtin_define ("__mcf528x__"); \
builtin_define ("__mcf5200__"); \
} \
if (TARGET_CFV3) \
{ \
builtin_define ("__mcf5300__"); \
builtin_define ("__mcf5307__"); \
} \
if (TARGET_CFV4) \
{ \
builtin_define ("__mcf5400__"); \
builtin_define ("__mcf5407__"); \
} \
if (TARGET_CFV4E) \
{ \
builtin_define ("__mcfv4e__"); \
} \
if (TARGET_CF_HWDIV) \
builtin_define ("__mcfhwdiv__"); \
builtin_assert ("cpu=m68k"); \
builtin_assert ("machine=m68k"); \
} \
while (0)
 
/* Classify the groups of pseudo-ops used to assemble QI, HI and SI
quantities. */
#define INT_OP_STANDARD 0 /* .byte, .short, .long */
#define INT_OP_DOT_WORD 1 /* .byte, .word, .long */
#define INT_OP_NO_DOT 2 /* byte, short, long */
#define INT_OP_DC 3 /* dc.b, dc.w, dc.l */
 
/* Set the default. */
#define INT_OP_GROUP INT_OP_DOT_WORD
 
/* Compile for a CPU32. A 68020 without bitfields is a good
heuristic for a CPU32. */
#define TARGET_CPU32 (TARGET_68020 && !TARGET_BITFIELD)
 
/* Is the target a ColdFire? */
#define MASK_COLDFIRE \
(MASK_5200 | MASK_528x | MASK_CFV3 | MASK_CFV4 | MASK_CFV4E)
#define TARGET_COLDFIRE ((target_flags & MASK_COLDFIRE) != 0)
 
#define TARGET_COLDFIRE_FPU TARGET_CFV4E
 
#define TARGET_HARD_FLOAT (TARGET_68881 || TARGET_COLDFIRE_FPU)
/* Size (in bytes) of FPU registers. */
#define TARGET_FP_REG_SIZE (TARGET_COLDFIRE ? 8 : 12)
 
 
#define OVERRIDE_OPTIONS override_options()
 
/* These are meant to be redefined in the host dependent files */
#define SUBTARGET_OVERRIDE_OPTIONS
/* target machine storage layout */
 
#define LONG_DOUBLE_TYPE_SIZE 80
 
/* Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
instructions, we get proper intermediate rounding, otherwise we
get extended precision results. */
#define TARGET_FLT_EVAL_METHOD ((TARGET_68040_ONLY || ! TARGET_68881) ? 0 : 2)
 
#define BITS_BIG_ENDIAN 1
#define BYTES_BIG_ENDIAN 1
#define WORDS_BIG_ENDIAN 1
 
#define UNITS_PER_WORD 4
 
#define PARM_BOUNDARY (TARGET_SHORT ? 16 : 32)
#define STACK_BOUNDARY 16
#define FUNCTION_BOUNDARY 16
#define EMPTY_FIELD_BOUNDARY 16
 
/* No data type wants to be aligned rounder than this.
Most published ABIs say that ints should be aligned on 16 bit
boundaries, but CPUs with 32-bit busses get better performance
aligned on 32-bit boundaries. ColdFires without a misalignment
module require 32-bit alignment. */
#define BIGGEST_ALIGNMENT (TARGET_ALIGN_INT ? 32 : 16)
 
#define STRICT_ALIGNMENT (TARGET_STRICT_ALIGNMENT)
 
#define INT_TYPE_SIZE (TARGET_SHORT ? 16 : 32)
 
/* Define these to avoid dependence on meaning of `int'. */
#define WCHAR_TYPE "long int"
#define WCHAR_TYPE_SIZE 32
 
/* Maximum number of library IDs we permit with -mid-shared-library. */
#define MAX_LIBRARY_ID 255
 
/* Standard register usage. */
 
/* For the m68k, we give the data registers numbers 0-7,
the address registers numbers 010-017 (8-15),
and the 68881 floating point registers numbers 020-027 (16-24).
We also have a fake `arg-pointer' register 030 (25) used for
register elimination. */
#define FIRST_PSEUDO_REGISTER 25
 
/* All m68k targets (except AmigaOS) use %a5 as the PIC register */
#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? 13 : INVALID_REGNUM)
 
/* 1 for registers that have pervasive standard uses
and are not available for the register allocator.
On the m68k, only the stack pointer is such.
Our fake arg-pointer is obviously fixed as well. */
#define FIXED_REGISTERS \
{/* Data registers. */ \
0, 0, 0, 0, 0, 0, 0, 0, \
\
/* Address registers. */ \
0, 0, 0, 0, 0, 0, 0, 1, \
\
/* Floating point registers \
(if available). */ \
0, 0, 0, 0, 0, 0, 0, 0, \
\
/* Arg pointer. */ \
1 }
 
/* 1 for registers not available across function calls.
These must include the FIXED_REGISTERS and also any
registers that can be used without being saved.
The latter must include the registers where values are returned
and the register where structure-value addresses are passed.
Aside from that, you can include as many other registers as you like. */
#define CALL_USED_REGISTERS \
{/* Data registers. */ \
1, 1, 0, 0, 0, 0, 0, 0, \
\
/* Address registers. */ \
1, 1, 0, 0, 0, 0, 0, 1, \
\
/* Floating point registers \
(if available). */ \
1, 1, 0, 0, 0, 0, 0, 0, \
\
/* Arg pointer. */ \
1 }
 
#define REG_ALLOC_ORDER \
{ /* d0/d1/a0/a1 */ \
0, 1, 8, 9, \
/* d2-d7 */ \
2, 3, 4, 5, 6, 7, \
/* a2-a7/arg */ \
10, 11, 12, 13, 14, 15, 24, \
/* fp0-fp7 */ \
16, 17, 18, 19, 20, 21, 22, 23\
}
 
 
/* Make sure everything's fine if we *don't* have a given processor.
This assumes that putting a register in fixed_regs will keep the
compiler's mitts completely off it. We don't bother to zero it out
of register classes. */
#define CONDITIONAL_REGISTER_USAGE \
{ \
int i; \
HARD_REG_SET x; \
if (!TARGET_HARD_FLOAT) \
{ \
COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]); \
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \
if (TEST_HARD_REG_BIT (x, i)) \
fixed_regs[i] = call_used_regs[i] = 1; \
} \
if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
fixed_regs[PIC_OFFSET_TABLE_REGNUM] \
= call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
}
 
/* On the m68k, ordinary registers hold 32 bits worth;
for the 68881 registers, a single register is always enough for
anything that can be stored in them at all. */
#define HARD_REGNO_NREGS(REGNO, MODE) \
((REGNO) >= 16 ? GET_MODE_NUNITS (MODE) \
: ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
 
/* A C expression that is nonzero if hard register NEW_REG can be
considered for use as a rename register for OLD_REG register. */
 
#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \
m68k_hard_regno_rename_ok (OLD_REG, NEW_REG)
 
/* Value is true if hard register REGNO can hold a value of machine-mode MODE.
On the 68000, the cpu registers can hold any mode except bytes in
address registers, the 68881 registers can hold only SFmode or DFmode. */
 
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
m68k_regno_mode_ok ((REGNO), (MODE))
 
#define MODES_TIEABLE_P(MODE1, MODE2) \
(! TARGET_HARD_FLOAT \
|| ((GET_MODE_CLASS (MODE1) == MODE_FLOAT \
|| GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT) \
== (GET_MODE_CLASS (MODE2) == MODE_FLOAT \
|| GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT)))
 
/* Specify the registers used for certain standard purposes.
The values of these macros are register numbers. */
 
#define STACK_POINTER_REGNUM 15
 
/* Most m68k targets use %a6 as a frame pointer. The AmigaOS
ABI uses %a6 for shared library calls, therefore the frame
pointer is shifted to %a5 on this target. */
#define FRAME_POINTER_REGNUM 14
 
#define FRAME_POINTER_REQUIRED 0
 
/* Base register for access to arguments of the function.
* This isn't a hardware register. It will be eliminated to the
* stack pointer or frame pointer.
*/
#define ARG_POINTER_REGNUM 24
 
#define STATIC_CHAIN_REGNUM 8
 
/* Register in which address to store a structure value
is passed to a function. */
#define M68K_STRUCT_VALUE_REGNUM 9
 
 
/* The m68k has three kinds of registers, so eight classes would be
a complete set. One of them is not needed. */
enum reg_class {
NO_REGS, DATA_REGS,
ADDR_REGS, FP_REGS,
GENERAL_REGS, DATA_OR_FP_REGS,
ADDR_OR_FP_REGS, ALL_REGS,
LIM_REG_CLASSES };
 
#define N_REG_CLASSES (int) LIM_REG_CLASSES
 
#define REG_CLASS_NAMES \
{ "NO_REGS", "DATA_REGS", \
"ADDR_REGS", "FP_REGS", \
"GENERAL_REGS", "DATA_OR_FP_REGS", \
"ADDR_OR_FP_REGS", "ALL_REGS" }
 
#define REG_CLASS_CONTENTS \
{ \
{0x00000000}, /* NO_REGS */ \
{0x000000ff}, /* DATA_REGS */ \
{0x0100ff00}, /* ADDR_REGS */ \
{0x00ff0000}, /* FP_REGS */ \
{0x0100ffff}, /* GENERAL_REGS */ \
{0x00ff00ff}, /* DATA_OR_FP_REGS */ \
{0x01ffff00}, /* ADDR_OR_FP_REGS */ \
{0x01ffffff}, /* ALL_REGS */ \
}
 
extern enum reg_class regno_reg_class[];
#define REGNO_REG_CLASS(REGNO) (regno_reg_class[(REGNO)])
#define INDEX_REG_CLASS GENERAL_REGS
#define BASE_REG_CLASS ADDR_REGS
 
/* We do a trick here to modify the effective constraints on the
machine description; we zorch the constraint letters that aren't
appropriate for a specific target. This allows us to guarantee
that a specific kind of register will not be used for a given target
without fiddling with the register classes above. */
#define REG_CLASS_FROM_LETTER(C) \
((C) == 'a' ? ADDR_REGS : \
((C) == 'd' ? DATA_REGS : \
((C) == 'f' ? (TARGET_HARD_FLOAT ? \
FP_REGS : NO_REGS) : \
NO_REGS)))
 
/* For the m68k, `I' is used for the range 1 to 8
allowed as immediate shift counts and in addq.
`J' is used for the range of signed numbers that fit in 16 bits.
`K' is for numbers that moveq can't handle.
`L' is for range -8 to -1, range of values that can be added with subq.
`M' is for numbers that moveq+notb can't handle.
'N' is for range 24 to 31, rotatert:SI 8 to 1 expressed as rotate.
'O' is for 16 (for rotate using swap).
'P' is for range 8 to 15, rotatert:HI 8 to 1 expressed as rotate. */
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'I' ? (VALUE) > 0 && (VALUE) <= 8 : \
(C) == 'J' ? (VALUE) >= -0x8000 && (VALUE) <= 0x7FFF : \
(C) == 'K' ? (VALUE) < -0x80 || (VALUE) >= 0x80 : \
(C) == 'L' ? (VALUE) < 0 && (VALUE) >= -8 : \
(C) == 'M' ? (VALUE) < -0x100 || (VALUE) >= 0x100 : \
(C) == 'N' ? (VALUE) >= 24 && (VALUE) <= 31 : \
(C) == 'O' ? (VALUE) == 16 : \
(C) == 'P' ? (VALUE) >= 8 && (VALUE) <= 15 : 0)
 
/* "G" defines all of the floating constants that are *NOT* 68881
constants. This is so 68881 constants get reloaded and the
fpmovecr is used. */
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'G' ? ! (TARGET_68881 && standard_68881_constant_p (VALUE)) : 0 )
 
/* `Q' means address register indirect addressing mode.
`S' is for operands that satisfy 'm' when -mpcrel is in effect.
`T' is for operands that satisfy 's' when -mpcrel is not in effect.
`U' is for register offset addressing. */
#define EXTRA_CONSTRAINT(OP,CODE) \
(((CODE) == 'S') \
? (TARGET_PCREL \
&& GET_CODE (OP) == MEM \
&& (GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
|| GET_CODE (XEXP (OP, 0)) == LABEL_REF \
|| GET_CODE (XEXP (OP, 0)) == CONST)) \
: \
(((CODE) == 'T') \
? ( !TARGET_PCREL \
&& (GET_CODE (OP) == SYMBOL_REF \
|| GET_CODE (OP) == LABEL_REF \
|| GET_CODE (OP) == CONST)) \
: \
(((CODE) == 'Q') \
? (GET_CODE (OP) == MEM \
&& GET_CODE (XEXP (OP, 0)) == REG) \
: \
(((CODE) == 'U') \
? (GET_CODE (OP) == MEM \
&& GET_CODE (XEXP (OP, 0)) == PLUS \
&& GET_CODE (XEXP (XEXP (OP, 0), 0)) == REG \
&& GET_CODE (XEXP (XEXP (OP, 0), 1)) == CONST_INT) \
: \
0))))
 
/* On the m68k, use a data reg if possible when the
value is a constant in the range where moveq could be used
and we ensure that QImodes are reloaded into data regs. */
#define PREFERRED_RELOAD_CLASS(X,CLASS) \
((GET_CODE (X) == CONST_INT \
&& (unsigned) (INTVAL (X) + 0x80) < 0x100 \
&& (CLASS) != ADDR_REGS) \
? DATA_REGS \
: (GET_MODE (X) == QImode && (CLASS) != ADDR_REGS) \
? DATA_REGS \
: (GET_CODE (X) == CONST_DOUBLE \
&& GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT) \
? (TARGET_HARD_FLOAT && (CLASS == FP_REGS || CLASS == DATA_OR_FP_REGS) \
? FP_REGS : NO_REGS) \
: (TARGET_PCREL \
&& (GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == CONST \
|| GET_CODE (X) == LABEL_REF)) \
? ADDR_REGS \
: (CLASS))
 
/* Force QImode output reloads from subregs to be allocated to data regs,
since QImode stores from address regs are not supported. We make the
assumption that if the class is not ADDR_REGS, then it must be a superset
of DATA_REGS. */
#define LIMIT_RELOAD_CLASS(MODE, CLASS) \
(((MODE) == QImode && (CLASS) != ADDR_REGS) \
? DATA_REGS \
: (CLASS))
 
/* On the m68k, this is the size of MODE in words,
except in the FP regs, where a single reg is always enough. */
#define CLASS_MAX_NREGS(CLASS, MODE) \
((CLASS) == FP_REGS ? 1 \
: ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
 
/* Moves between fp regs and other regs are two insns. */
#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
(((CLASS1) == FP_REGS && (CLASS2) != FP_REGS) \
|| ((CLASS2) == FP_REGS && (CLASS1) != FP_REGS) \
? 4 : 2)
/* Stack layout; function entry, exit and calling. */
 
#define STACK_GROWS_DOWNWARD
#define FRAME_GROWS_DOWNWARD 1
#define STARTING_FRAME_OFFSET 0
 
/* On the 680x0, sp@- in a byte insn really pushes a word.
On the ColdFire, sp@- in a byte insn pushes just a byte. */
#define PUSH_ROUNDING(BYTES) (TARGET_COLDFIRE ? BYTES : ((BYTES) + 1) & ~1)
 
#define FIRST_PARM_OFFSET(FNDECL) 8
 
/* On the 68000, the RTS insn cannot pop anything.
On the 68010, the RTD insn may be used to pop them if the number
of args is fixed, but if the number is variable then the caller
must pop them all. RTD can't be used for library calls now
because the library is compiled with the Unix compiler.
Use of RTD is a selectable option, since it is incompatible with
standard Unix calling sequences. If the option is not selected,
the caller must always pop the args. */
#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) \
((TARGET_RTD && (!(FUNDECL) || TREE_CODE (FUNDECL) != IDENTIFIER_NODE) \
&& (TYPE_ARG_TYPES (FUNTYPE) == 0 \
|| (TREE_VALUE (tree_last (TYPE_ARG_TYPES (FUNTYPE))) \
== void_type_node))) \
? (SIZE) : 0)
 
/* On the m68k the return value is always in D0. */
#define FUNCTION_VALUE(VALTYPE, FUNC) \
gen_rtx_REG (TYPE_MODE (VALTYPE), 0)
 
/* On the m68k the return value is always in D0. */
#define LIBCALL_VALUE(MODE) gen_rtx_REG (MODE, 0)
 
/* On the m68k, D0 is the only register used. */
#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
 
/* Define this to be true when FUNCTION_VALUE_REGNO_P is true for
more than one register.
XXX This macro is m68k specific and used only for m68kemb.h. */
#define NEEDS_UNTYPED_CALL 0
 
#define PCC_STATIC_STRUCT_RETURN
 
/* On the m68k, all arguments are usually pushed on the stack. */
#define FUNCTION_ARG_REGNO_P(N) 0
/* On the m68k, this is a single integer, which is a number of bytes
of arguments scanned so far. */
#define CUMULATIVE_ARGS int
 
/* On the m68k, the offset starts at 0. */
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
((CUM) = 0)
 
#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
((CUM) += ((MODE) != BLKmode \
? (GET_MODE_SIZE (MODE) + 3) & ~3 \
: (int_size_in_bytes (TYPE) + 3) & ~3))
 
/* On the m68k all args are always pushed. */
#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) 0
 
#define FUNCTION_PROFILER(FILE, LABELNO) \
asm_fprintf (FILE, "\tlea %LLP%d,%Ra0\n\tjsr mcount\n", (LABELNO))
 
#define EXIT_IGNORE_STACK 1
 
/* Determine if the epilogue should be output as RTL.
You should override this if you define FUNCTION_EXTRA_EPILOGUE.
 
XXX This macro is m68k-specific and only used in m68k.md. */
#define USE_RETURN_INSN use_return_insn ()
 
/* Output assembler code for a block containing the constant parts
of a trampoline, leaving space for the variable parts.
 
On the m68k, the trampoline looks like this:
movl #STATIC,a0
jmp FUNCTION
 
WARNING: Targets that may run on 68040+ cpus must arrange for
the instruction cache to be flushed. Previous incarnations of
the m68k trampoline code attempted to get around this by either
using an out-of-line transfer function or pc-relative data, but
the fact remains that the code to jump to the transfer function
or the code to load the pc-relative data needs to be flushed
just as much as the "variable" portion of the trampoline.
Recognizing that a cache flush is going to be required anyway,
dispense with such notions and build a smaller trampoline.
 
Since more instructions are required to move a template into
place than to create it on the spot, don't use a template. */
 
#define TRAMPOLINE_SIZE 12
#define TRAMPOLINE_ALIGNMENT 16
 
/* Targets redefine this to invoke code to either flush the cache,
or enable stack execution (or both). */
#ifndef FINALIZE_TRAMPOLINE
#define FINALIZE_TRAMPOLINE(TRAMP)
#endif
 
/* We generate a two-instructions program at address TRAMP :
movea.l &CXT,%a0
jmp FNADDR */
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
{ \
emit_move_insn (gen_rtx_MEM (HImode, TRAMP), GEN_INT(0x207C)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 2)), CXT); \
emit_move_insn (gen_rtx_MEM (HImode, plus_constant (TRAMP, 6)), \
GEN_INT(0x4EF9)); \
emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 8)), FNADDR); \
FINALIZE_TRAMPOLINE(TRAMP); \
}
 
/* This is the library routine that is used to transfer control from the
trampoline to the actual nested function. It is defined for backward
compatibility, for linking with object code that used the old trampoline
definition.
 
A colon is used with no explicit operands to cause the template string
to be scanned for %-constructs.
 
The function name __transfer_from_trampoline is not actually used.
The function definition just permits use of "asm with operands"
(though the operand list is empty). */
#define TRANSFER_FROM_TRAMPOLINE \
void \
__transfer_from_trampoline () \
{ \
register char *a0 asm ("%a0"); \
asm (GLOBAL_ASM_OP "___trampoline"); \
asm ("___trampoline:"); \
asm volatile ("move%.l %0,%@" : : "m" (a0[22])); \
asm volatile ("move%.l %1,%0" : "=a" (a0) : "m" (a0[18])); \
asm ("rts":); \
}
/* There are two registers that can always be eliminated on the m68k.
The frame pointer and the arg pointer can be replaced by either the
hard frame pointer or to the stack pointer, depending upon the
circumstances. The hard frame pointer is not used before reload and
so it is not eligible for elimination. */
#define ELIMINABLE_REGS \
{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
{ ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }, \
{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }}
 
#define CAN_ELIMINATE(FROM, TO) \
((TO) == STACK_POINTER_REGNUM ? ! frame_pointer_needed : 1)
 
#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
(OFFSET) = m68k_initial_elimination_offset(FROM, TO)
/* Addressing modes, and classification of registers for them. */
 
#define HAVE_POST_INCREMENT 1
#define HAVE_PRE_DECREMENT 1
 
/* Macros to check register numbers against specific register classes. */
 
#define REGNO_OK_FOR_INDEX_P(REGNO) \
((REGNO) < 16 || (unsigned) reg_renumber[REGNO] < 16)
#define REGNO_OK_FOR_BASE_P(REGNO) \
(((REGNO) ^ 010) < 8 || (unsigned) (reg_renumber[REGNO] ^ 010) < 8)
#define REGNO_OK_FOR_DATA_P(REGNO) \
((REGNO) < 8 || (unsigned) reg_renumber[REGNO] < 8)
#define REGNO_OK_FOR_FP_P(REGNO) \
(((REGNO) ^ 020) < 8 || (unsigned) (reg_renumber[REGNO] ^ 020) < 8)
 
/* Now macros that check whether X is a register and also,
strictly, whether it is in a specified class.
 
These macros are specific to the m68k, and may be used only
in code for printing assembler insns and in conditions for
define_optimization. */
 
/* 1 if X is a data register. */
#define DATA_REG_P(X) (REG_P (X) && REGNO_OK_FOR_DATA_P (REGNO (X)))
 
/* 1 if X is an fp register. */
#define FP_REG_P(X) (REG_P (X) && REGNO_OK_FOR_FP_P (REGNO (X)))
 
/* 1 if X is an address register */
#define ADDRESS_REG_P(X) (REG_P (X) && REGNO_OK_FOR_BASE_P (REGNO (X)))
 
#define MAX_REGS_PER_ADDRESS 2
 
#define CONSTANT_ADDRESS_P(X) \
(GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
|| GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
|| GET_CODE (X) == HIGH)
 
/* Nonzero if the constant value X is a legitimate general operand.
It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
#define LEGITIMATE_CONSTANT_P(X) (GET_MODE (X) != XFmode)
 
#ifndef REG_OK_STRICT
#define PCREL_GENERAL_OPERAND_OK 0
#else
#define PCREL_GENERAL_OPERAND_OK (TARGET_PCREL)
#endif
 
#define LEGITIMATE_PIC_OPERAND_P(X) \
(! symbolic_operand (X, VOIDmode) \
|| (GET_CODE (X) == SYMBOL_REF && SYMBOL_REF_FLAG (X)) \
|| PCREL_GENERAL_OPERAND_OK)
 
#ifndef REG_OK_STRICT
 
/* Nonzero if X is a hard reg that can be used as an index
or if it is a pseudo reg. */
#define REG_OK_FOR_INDEX_P(X) ((REGNO (X) ^ 020) >= 8)
/* Nonzero if X is a hard reg that can be used as a base reg
or if it is a pseudo reg. */
#define REG_OK_FOR_BASE_P(X) ((REGNO (X) & ~027) != 0)
 
#else
 
/* Nonzero if X is a hard reg that can be used as an index. */
#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
/* Nonzero if X is a hard reg that can be used as a base reg. */
#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
 
#endif
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
that wants to use this address.
 
When generating PIC, an address involving a SYMBOL_REF is legitimate
if and only if it is the sum of pic_offset_table_rtx and the SYMBOL_REF.
We use LEGITIMATE_PIC_OPERAND_P to throw out the illegitimate addresses,
and we explicitly check for the sum of pic_offset_table_rtx and a SYMBOL_REF.
 
Likewise for a LABEL_REF when generating PIC.
 
The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
 
/* Allow SUBREG everywhere we allow REG. This results in better code. It
also makes function inlining work when inline functions are called with
arguments that are SUBREGs. */
 
#define LEGITIMATE_BASE_REG_P(X) \
((GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
|| (GET_CODE (X) == SUBREG \
&& GET_CODE (SUBREG_REG (X)) == REG \
&& REG_OK_FOR_BASE_P (SUBREG_REG (X))))
 
#define INDIRECTABLE_1_ADDRESS_P(X) \
((CONSTANT_ADDRESS_P (X) && (!flag_pic || LEGITIMATE_PIC_OPERAND_P (X))) \
|| LEGITIMATE_BASE_REG_P (X) \
|| ((GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_INC) \
&& LEGITIMATE_BASE_REG_P (XEXP (X, 0))) \
|| (GET_CODE (X) == PLUS \
&& LEGITIMATE_BASE_REG_P (XEXP (X, 0)) \
&& GET_CODE (XEXP (X, 1)) == CONST_INT \
&& (TARGET_68020 \
|| ((unsigned) INTVAL (XEXP (X, 1)) + 0x8000) < 0x10000)) \
|| (GET_CODE (X) == PLUS && XEXP (X, 0) == pic_offset_table_rtx \
&& flag_pic && GET_CODE (XEXP (X, 1)) == SYMBOL_REF) \
|| (GET_CODE (X) == PLUS && XEXP (X, 0) == pic_offset_table_rtx \
&& flag_pic && GET_CODE (XEXP (X, 1)) == LABEL_REF))
 
#define GO_IF_NONINDEXED_ADDRESS(X, ADDR) \
{ if (INDIRECTABLE_1_ADDRESS_P (X)) goto ADDR; }
 
/* Only labels on dispatch tables are valid for indexing from. */
#define GO_IF_INDEXABLE_BASE(X, ADDR) \
{ rtx temp; \
if (GET_CODE (X) == LABEL_REF \
&& (temp = next_nonnote_insn (XEXP (X, 0))) != 0 \
&& GET_CODE (temp) == JUMP_INSN \
&& (GET_CODE (PATTERN (temp)) == ADDR_VEC \
|| GET_CODE (PATTERN (temp)) == ADDR_DIFF_VEC)) \
goto ADDR; \
if (LEGITIMATE_BASE_REG_P (X)) goto ADDR; }
 
#define GO_IF_INDEXING(X, ADDR) \
{ if (GET_CODE (X) == PLUS && LEGITIMATE_INDEX_P (XEXP (X, 0))) \
{ GO_IF_INDEXABLE_BASE (XEXP (X, 1), ADDR); } \
if (GET_CODE (X) == PLUS && LEGITIMATE_INDEX_P (XEXP (X, 1))) \
{ GO_IF_INDEXABLE_BASE (XEXP (X, 0), ADDR); } }
 
#define GO_IF_INDEXED_ADDRESS(X, ADDR) \
{ GO_IF_INDEXING (X, ADDR); \
if (GET_CODE (X) == PLUS) \
{ if (GET_CODE (XEXP (X, 1)) == CONST_INT \
&& (TARGET_68020 || (unsigned) INTVAL (XEXP (X, 1)) + 0x80 < 0x100)) \
{ rtx go_temp = XEXP (X, 0); GO_IF_INDEXING (go_temp, ADDR); } \
if (GET_CODE (XEXP (X, 0)) == CONST_INT \
&& (TARGET_68020 || (unsigned) INTVAL (XEXP (X, 0)) + 0x80 < 0x100)) \
{ rtx go_temp = XEXP (X, 1); GO_IF_INDEXING (go_temp, ADDR); } } }
 
/* ColdFire/5200 does not allow HImode index registers. */
#define LEGITIMATE_INDEX_REG_P(X) \
((GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X)) \
|| (! TARGET_COLDFIRE \
&& GET_CODE (X) == SIGN_EXTEND \
&& GET_CODE (XEXP (X, 0)) == REG \
&& GET_MODE (XEXP (X, 0)) == HImode \
&& REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
|| (GET_CODE (X) == SUBREG \
&& GET_CODE (SUBREG_REG (X)) == REG \
&& REG_OK_FOR_INDEX_P (SUBREG_REG (X))))
 
#define LEGITIMATE_INDEX_P(X) \
(LEGITIMATE_INDEX_REG_P (X) \
|| ((TARGET_68020 || TARGET_COLDFIRE) && GET_CODE (X) == MULT \
&& LEGITIMATE_INDEX_REG_P (XEXP (X, 0)) \
&& GET_CODE (XEXP (X, 1)) == CONST_INT \
&& (INTVAL (XEXP (X, 1)) == 2 \
|| INTVAL (XEXP (X, 1)) == 4 \
|| (INTVAL (XEXP (X, 1)) == 8 \
&& (TARGET_CFV4E || !TARGET_COLDFIRE)))))
 
/* Coldfire FPU only accepts addressing modes 2-5 */
#define GO_IF_COLDFIRE_FPU_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
{ if (LEGITIMATE_BASE_REG_P (X) \
|| ((GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_INC) \
&& LEGITIMATE_BASE_REG_P (XEXP (X, 0))) \
|| ((GET_CODE (X) == PLUS) && LEGITIMATE_BASE_REG_P (XEXP (X, 0)) \
&& (GET_CODE (XEXP (X, 1)) == CONST_INT) \
&& ((((unsigned) INTVAL (XEXP (X, 1)) + 0x8000) < 0x10000)))) \
goto ADDR;}
 
/* If pic, we accept INDEX+LABEL, which is what do_tablejump makes. */
#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
{ if (TARGET_COLDFIRE_FPU && (GET_MODE_CLASS (MODE) == MODE_FLOAT)) \
{ \
GO_IF_COLDFIRE_FPU_LEGITIMATE_ADDRESS (MODE, X, ADDR); \
} \
else \
{ \
GO_IF_NONINDEXED_ADDRESS (X, ADDR); \
GO_IF_INDEXED_ADDRESS (X, ADDR); \
if (flag_pic && MODE == CASE_VECTOR_MODE && GET_CODE (X) == PLUS \
&& LEGITIMATE_INDEX_P (XEXP (X, 0)) \
&& GET_CODE (XEXP (X, 1)) == LABEL_REF) \
goto ADDR; \
}}
 
/* Don't call memory_address_noforce for the address to fetch
the switch offset. This address is ok as it stands (see above),
but memory_address_noforce would alter it. */
#define PIC_CASE_VECTOR_ADDRESS(index) index
/* For the 68000, we handle X+REG by loading X into a register R and
using R+REG. R will go in an address reg and indexing will be used.
However, if REG is a broken-out memory address or multiplication,
nothing needs to be done because REG can certainly go in an address reg. */
#define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
{ register int ch = (X) != (OLDX); \
if (GET_CODE (X) == PLUS) \
{ int copied = 0; \
if (GET_CODE (XEXP (X, 0)) == MULT) \
{ COPY_ONCE (X); XEXP (X, 0) = force_operand (XEXP (X, 0), 0);} \
if (GET_CODE (XEXP (X, 1)) == MULT) \
{ COPY_ONCE (X); XEXP (X, 1) = force_operand (XEXP (X, 1), 0);} \
if (ch && GET_CODE (XEXP (X, 1)) == REG \
&& GET_CODE (XEXP (X, 0)) == REG) \
{ if (TARGET_CFV4E && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
{ COPY_ONCE (X); X = force_operand (X, 0);} \
goto WIN; } \
if (ch) { GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN); } \
if (GET_CODE (XEXP (X, 0)) == REG \
|| (GET_CODE (XEXP (X, 0)) == SIGN_EXTEND \
&& GET_CODE (XEXP (XEXP (X, 0), 0)) == REG \
&& GET_MODE (XEXP (XEXP (X, 0), 0)) == HImode)) \
{ register rtx temp = gen_reg_rtx (Pmode); \
register rtx val = force_operand (XEXP (X, 1), 0); \
emit_move_insn (temp, val); \
COPY_ONCE (X); \
XEXP (X, 1) = temp; \
if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (MODE) == MODE_FLOAT \
&& GET_CODE (XEXP (X, 0)) == REG) \
X = force_operand (X, 0); \
goto WIN; } \
else if (GET_CODE (XEXP (X, 1)) == REG \
|| (GET_CODE (XEXP (X, 1)) == SIGN_EXTEND \
&& GET_CODE (XEXP (XEXP (X, 1), 0)) == REG \
&& GET_MODE (XEXP (XEXP (X, 1), 0)) == HImode)) \
{ register rtx temp = gen_reg_rtx (Pmode); \
register rtx val = force_operand (XEXP (X, 0), 0); \
emit_move_insn (temp, val); \
COPY_ONCE (X); \
XEXP (X, 0) = temp; \
if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (MODE) == MODE_FLOAT \
&& GET_CODE (XEXP (X, 1)) == REG) \
X = force_operand (X, 0); \
goto WIN; }}}
 
/* On the 68000, only predecrement and postincrement address depend thus
(the amount of decrement or increment being the length of the operand). */
#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
if (GET_CODE (ADDR) == POST_INC || GET_CODE (ADDR) == PRE_DEC) goto LABEL
#define CASE_VECTOR_MODE HImode
#define CASE_VECTOR_PC_RELATIVE 1
 
#define DEFAULT_SIGNED_CHAR 1
#define MOVE_MAX 4
#define SLOW_BYTE_ACCESS 0
 
#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
 
#define STORE_FLAG_VALUE (-1)
 
#define Pmode SImode
#define FUNCTION_MODE QImode
 
/* Tell final.c how to eliminate redundant test instructions. */
 
/* Here we define machine-dependent flags and fields in cc_status
(see `conditions.h'). */
 
/* Set if the cc value is actually in the 68881, so a floating point
conditional branch must be output. */
#define CC_IN_68881 04000
 
/* On the 68000, all the insns to store in an address register fail to
set the cc's. However, in some cases these instructions can make it
possibly invalid to use the saved cc's. In those cases we clear out
some or all of the saved cc's so they won't be used. */
#define NOTICE_UPDATE_CC(EXP,INSN) notice_update_cc (EXP, INSN)
 
#define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \
do { if (cc_prev_status.flags & CC_IN_68881) \
return FLOAT; \
if (cc_prev_status.flags & CC_NO_OVERFLOW) \
return NO_OV; \
return NORMAL; } while (0)
/* Control the assembler format that we output. */
 
#define ASM_APP_ON "#APP\n"
#define ASM_APP_OFF "#NO_APP\n"
#define TEXT_SECTION_ASM_OP "\t.text"
#define DATA_SECTION_ASM_OP "\t.data"
#define GLOBAL_ASM_OP "\t.globl\t"
#define REGISTER_PREFIX ""
#define LOCAL_LABEL_PREFIX ""
#define USER_LABEL_PREFIX "_"
#define IMMEDIATE_PREFIX "#"
 
#define REGISTER_NAMES \
{REGISTER_PREFIX"d0", REGISTER_PREFIX"d1", REGISTER_PREFIX"d2", \
REGISTER_PREFIX"d3", REGISTER_PREFIX"d4", REGISTER_PREFIX"d5", \
REGISTER_PREFIX"d6", REGISTER_PREFIX"d7", \
REGISTER_PREFIX"a0", REGISTER_PREFIX"a1", REGISTER_PREFIX"a2", \
REGISTER_PREFIX"a3", REGISTER_PREFIX"a4", REGISTER_PREFIX"a5", \
REGISTER_PREFIX"a6", REGISTER_PREFIX"sp", \
REGISTER_PREFIX"fp0", REGISTER_PREFIX"fp1", REGISTER_PREFIX"fp2", \
REGISTER_PREFIX"fp3", REGISTER_PREFIX"fp4", REGISTER_PREFIX"fp5", \
REGISTER_PREFIX"fp6", REGISTER_PREFIX"fp7", REGISTER_PREFIX"argptr" }
 
#define M68K_FP_REG_NAME REGISTER_PREFIX"fp"
 
/* Return a register name by index, handling %fp nicely.
We don't replace %fp for targets that don't map it to %a6
since it may confuse GAS. */
#define M68K_REGNAME(r) ( \
((FRAME_POINTER_REGNUM == 14) \
&& ((r) == FRAME_POINTER_REGNUM) \
&& frame_pointer_needed) ? \
M68K_FP_REG_NAME : reg_names[(r)])
 
/* On the Sun-3, the floating point registers have numbers
18 to 25, not 16 to 23 as they do in the compiler. */
#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 16 ? (REGNO) : (REGNO) + 2)
 
/* Before the prologue, RA is at 0(%sp). */
#define INCOMING_RETURN_ADDR_RTX \
gen_rtx_MEM (VOIDmode, gen_rtx_REG (VOIDmode, STACK_POINTER_REGNUM))
 
/* After the prologue, RA is at 4(AP) in the current frame. */
#define RETURN_ADDR_RTX(COUNT, FRAME) \
((COUNT) == 0 \
? gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, UNITS_PER_WORD)) \
: gen_rtx_MEM (Pmode, plus_constant (FRAME, UNITS_PER_WORD)))
 
/* We must not use the DBX register numbers for the DWARF 2 CFA column
numbers because that maps to numbers beyond FIRST_PSEUDO_REGISTER.
Instead use the identity mapping. */
#define DWARF_FRAME_REGNUM(REG) REG
 
/* Before the prologue, the top of the frame is at 4(%sp). */
#define INCOMING_FRAME_SP_OFFSET 4
 
/* Describe how we implement __builtin_eh_return. */
#define EH_RETURN_DATA_REGNO(N) \
((N) < 2 ? (N) : INVALID_REGNUM)
#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 8)
#define EH_RETURN_HANDLER_RTX \
gen_rtx_MEM (Pmode, \
gen_rtx_PLUS (Pmode, arg_pointer_rtx, \
plus_constant (EH_RETURN_STACKADJ_RTX, \
UNITS_PER_WORD)))
 
/* Select a format to encode pointers in exception handling data. CODE
is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
true if the symbol may be affected by dynamic relocations. */
#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
(flag_pic \
? ((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4 \
: DW_EH_PE_absptr)
 
#define ASM_OUTPUT_LABELREF(FILE,NAME) \
asm_fprintf (FILE, "%U%s", NAME)
 
#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
sprintf (LABEL, "*%s%s%ld", LOCAL_LABEL_PREFIX, PREFIX, (long)(NUM))
 
#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
asm_fprintf (FILE, "\tmovel %s,%Rsp@-\n", reg_names[REGNO])
#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
asm_fprintf (FILE, "\tmovel %Rsp@+,%s\n", reg_names[REGNO])
 
/* The m68k does not use absolute case-vectors, but we must define this macro
anyway. */
#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
asm_fprintf (FILE, "\t.long %LL%d\n", VALUE)
 
#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
asm_fprintf (FILE, "\t.word %LL%d-%LL%d\n", VALUE, REL)
 
/* We don't have a way to align to more than a two-byte boundary, so do the
best we can and don't complain. */
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
if ((LOG) >= 1) \
fprintf (FILE, "\t.even\n");
 
#define ASM_OUTPUT_SKIP(FILE,SIZE) \
fprintf (FILE, "\t.skip %u\n", (int)(SIZE))
 
#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
( fputs (".comm ", (FILE)), \
assemble_name ((FILE), (NAME)), \
fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
 
#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
( fputs (".lcomm ", (FILE)), \
assemble_name ((FILE), (NAME)), \
fprintf ((FILE), ",%u\n", (int)(ROUNDED)))
 
/* Output a float value (represented as a C double) as an immediate operand.
This macro is m68k-specific. */
#define ASM_OUTPUT_FLOAT_OPERAND(CODE,FILE,VALUE) \
do { \
if (CODE == 'f') \
{ \
char dstr[30]; \
real_to_decimal (dstr, &(VALUE), sizeof (dstr), 9, 0); \
asm_fprintf ((FILE), "%I0r%s", dstr); \
} \
else \
{ \
long l; \
REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
asm_fprintf ((FILE), "%I0x%lx", l); \
} \
} while (0)
 
/* Output a double value (represented as a C double) as an immediate operand.
This macro is m68k-specific. */
#define ASM_OUTPUT_DOUBLE_OPERAND(FILE,VALUE) \
do { char dstr[30]; \
real_to_decimal (dstr, &(VALUE), sizeof (dstr), 0, 1); \
asm_fprintf (FILE, "%I0r%s", dstr); \
} while (0)
 
/* Note, long double immediate operands are not actually
generated by m68k.md. */
#define ASM_OUTPUT_LONG_DOUBLE_OPERAND(FILE,VALUE) \
do { char dstr[30]; \
real_to_decimal (dstr, &(VALUE), sizeof (dstr), 0, 1); \
asm_fprintf (FILE, "%I0r%s", dstr); \
} while (0)
 
/* On the 68000, we use several CODE characters:
'.' for dot needed in Motorola-style opcode names.
'-' for an operand pushing on the stack:
sp@-, -(sp) or -(%sp) depending on the style of syntax.
'+' for an operand pushing on the stack:
sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
'@' for a reference to the top word on the stack:
sp@, (sp) or (%sp) depending on the style of syntax.
'#' for an immediate operand prefix (# in MIT and Motorola syntax
but & in SGS syntax).
'!' for the fpcr register (used in some float-to-fixed conversions).
'$' for the letter `s' in an op code, but only on the 68040.
'&' for the letter `d' in an op code, but only on the 68040.
'/' for register prefix needed by longlong.h.
 
'b' for byte insn (no effect, on the Sun; this is for the ISI).
'd' to force memory addressing to be absolute, not relative.
'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
'o' for operands to go directly to output_operand_address (bypassing
print_operand_address--used only for SYMBOL_REFs under TARGET_PCREL)
'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
or print pair of registers as rx:ry. */
 
#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
((CODE) == '.' || (CODE) == '#' || (CODE) == '-' \
|| (CODE) == '+' || (CODE) == '@' || (CODE) == '!' \
|| (CODE) == '$' || (CODE) == '&' || (CODE) == '/')
 
 
/* See m68k.c for the m68k specific codes. */
#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
 
#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
 
/* Variables in m68k.c */
extern const char *m68k_library_id_string;
extern int m68k_last_compare_had_fp_operands;
/math-68881.h
0,0 → 1,529
/******************************************************************\
* *
* <math-68881.h> last modified: 23 May 1992. *
* *
* Copyright (C) 1989 by Matthew Self. *
* You may freely distribute verbatim copies of this software *
* provided that this copyright notice is retained in all copies. *
* You may distribute modifications to this software under the *
* conditions above if you also clearly note such modifications *
* with their author and date. *
* *
* Note: errno is not set to EDOM when domain errors occur for *
* most of these functions. Rather, it is assumed that the *
* 68881's OPERR exception will be enabled and handled *
* appropriately by the operating system. Similarly, overflow *
* and underflow do not set errno to ERANGE. *
* *
* Send bugs to Matthew Self (self@bayes.arc.nasa.gov). *
* *
\******************************************************************/
 
/* This file is NOT a part of GCC, just distributed with it. */
 
/* If you find this in GCC,
please send bug reports to bug-gcc@prep.ai.mit.edu. */
 
/* Changed by Richard Stallman:
May 1993, add conditional to prevent multiple inclusion.
% inserted before a #.
New function `hypot' added.
Nans written in hex to avoid 0rnan.
May 1992, use %! for fpcr register. Break lines before function names.
December 1989, add parens around `&' in pow.
November 1990, added alternate definition of HUGE_VAL for Sun. */
 
/* Changed by Jim Wilson:
September 1993, Use #undef before HUGE_VAL instead of #ifdef/#endif. */
 
/* Changed by Ian Lance Taylor:
September 1994, use extern inline instead of static inline. */
 
#ifndef __math_68881
#define __math_68881
 
#include <errno.h>
 
#undef HUGE_VAL
#ifdef __sun__
/* The Sun assembler fails to handle the hex constant in the usual defn. */
#define HUGE_VAL \
({ \
static union { int i[2]; double d; } u = { {0x7ff00000, 0} }; \
u.d; \
})
#else
#define HUGE_VAL \
({ \
double huge_val; \
\
__asm ("fmove%.d #0x7ff0000000000000,%0" /* Infinity */ \
: "=f" (huge_val) \
: /* no inputs */); \
huge_val; \
})
#endif
 
__inline extern double
sin (double x)
{
double value;
 
__asm ("fsin%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
cos (double x)
{
double value;
 
__asm ("fcos%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
tan (double x)
{
double value;
 
__asm ("ftan%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
asin (double x)
{
double value;
 
__asm ("fasin%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
acos (double x)
{
double value;
 
__asm ("facos%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
atan (double x)
{
double value;
 
__asm ("fatan%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
atan2 (double y, double x)
{
double pi, pi_over_2;
 
__asm ("fmovecr%.x #0,%0" /* extended precision pi */
: "=f" (pi)
: /* no inputs */ );
__asm ("fscale%.b #-1,%0" /* no loss of accuracy */
: "=f" (pi_over_2)
: "0" (pi));
if (x > 0)
{
if (y > 0)
{
if (x > y)
return atan (y / x);
else
return pi_over_2 - atan (x / y);
}
else
{
if (x > -y)
return atan (y / x);
else
return - pi_over_2 - atan (x / y);
}
}
else
{
if (y < 0)
{
if (-x > -y)
return - pi + atan (y / x);
else
return - pi_over_2 - atan (x / y);
}
else
{
if (-x > y)
return pi + atan (y / x);
else if (y > 0)
return pi_over_2 - atan (x / y);
else
{
double value;
 
errno = EDOM;
__asm ("fmove%.d #0x7fffffffffffffff,%0" /* quiet NaN */
: "=f" (value)
: /* no inputs */);
return value;
}
}
}
}
 
__inline extern double
sinh (double x)
{
double value;
 
__asm ("fsinh%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
cosh (double x)
{
double value;
 
__asm ("fcosh%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
tanh (double x)
{
double value;
 
__asm ("ftanh%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
atanh (double x)
{
double value;
 
__asm ("fatanh%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
exp (double x)
{
double value;
 
__asm ("fetox%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
expm1 (double x)
{
double value;
 
__asm ("fetoxm1%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
log (double x)
{
double value;
 
__asm ("flogn%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
log1p (double x)
{
double value;
 
__asm ("flognp1%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
log10 (double x)
{
double value;
 
__asm ("flog10%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
sqrt (double x)
{
double value;
 
__asm ("fsqrt%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
hypot (double x, double y)
{
return sqrt (x*x + y*y);
}
 
__inline extern double
pow (double x, double y)
{
if (x > 0)
return exp (y * log (x));
else if (x == 0)
{
if (y > 0)
return 0.0;
else
{
double value;
 
errno = EDOM;
__asm ("fmove%.d #0x7fffffffffffffff,%0" /* quiet NaN */
: "=f" (value)
: /* no inputs */);
return value;
}
}
else
{
double temp;
 
__asm ("fintrz%.x %1,%0"
: "=f" (temp) /* integer-valued float */
: "f" (y));
if (y == temp)
{
int i = (int) y;
 
if ((i & 1) == 0) /* even */
return exp (y * log (-x));
else
return - exp (y * log (-x));
}
else
{
double value;
 
errno = EDOM;
__asm ("fmove%.d #0x7fffffffffffffff,%0" /* quiet NaN */
: "=f" (value)
: /* no inputs */);
return value;
}
}
}
 
__inline extern double
fabs (double x)
{
double value;
 
__asm ("fabs%.x %1,%0"
: "=f" (value)
: "f" (x));
return value;
}
 
__inline extern double
ceil (double x)
{
int rounding_mode, round_up;
double value;
 
__asm volatile ("fmove%.l %!,%0"
: "=dm" (rounding_mode)
: /* no inputs */ );
round_up = rounding_mode | 0x30;
__asm volatile ("fmove%.l %0,%!"
: /* no outputs */
: "dmi" (round_up));
__asm volatile ("fint%.x %1,%0"
: "=f" (value)
: "f" (x));
__asm volatile ("fmove%.l %0,%!"
: /* no outputs */
: "dmi" (rounding_mode));
return value;
}
 
__inline extern double
floor (double x)
{
int rounding_mode, round_down;
double value;
 
__asm volatile ("fmove%.l %!,%0"
: "=dm" (rounding_mode)
: /* no inputs */ );
round_down = (rounding_mode & ~0x10)
| 0x20;
__asm volatile ("fmove%.l %0,%!"
: /* no outputs */
: "dmi" (round_down));
__asm volatile ("fint%.x %1,%0"
: "=f" (value)
: "f" (x));
__asm volatile ("fmove%.l %0,%!"
: /* no outputs */
: "dmi" (rounding_mode));
return value;
}
 
__inline extern double
rint (double x)
{
int rounding_mode, round_nearest;
double value;
 
__asm volatile ("fmove%.l %!,%0"
: "=dm" (rounding_mode)
: /* no inputs */ );
round_nearest = rounding_mode & ~0x30;
__asm volatile ("fmove%.l %0,%!"
: /* no outputs */
: "dmi" (round_nearest));
__asm volatile ("fint%.x %1,%0"
: "=f" (value)
: "f" (x));
__asm volatile ("fmove%.l %0,%!"
: /* no outputs */
: "dmi" (rounding_mode));
return value;
}
 
__inline extern double
fmod (double x, double y)
{
double value;
 
__asm ("fmod%.x %2,%0"
: "=f" (value)
: "0" (x),
"f" (y));
return value;
}
 
__inline extern double
drem (double x, double y)
{
double value;
 
__asm ("frem%.x %2,%0"
: "=f" (value)
: "0" (x),
"f" (y));
return value;
}
 
__inline extern double
scalb (double x, int n)
{
double value;
 
__asm ("fscale%.l %2,%0"
: "=f" (value)
: "0" (x),
"dmi" (n));
return value;
}
 
__inline extern double
logb (double x)
{
double exponent;
 
__asm ("fgetexp%.x %1,%0"
: "=f" (exponent)
: "f" (x));
return exponent;
}
 
__inline extern double
ldexp (double x, int n)
{
double value;
 
__asm ("fscale%.l %2,%0"
: "=f" (value)
: "0" (x),
"dmi" (n));
return value;
}
 
__inline extern double
frexp (double x, int *exp)
{
double float_exponent;
int int_exponent;
double mantissa;
 
__asm ("fgetexp%.x %1,%0"
: "=f" (float_exponent) /* integer-valued float */
: "f" (x));
int_exponent = (int) float_exponent;
__asm ("fgetman%.x %1,%0"
: "=f" (mantissa) /* 1.0 <= mantissa < 2.0 */
: "f" (x));
if (mantissa != 0)
{
__asm ("fscale%.b #-1,%0"
: "=f" (mantissa) /* mantissa /= 2.0 */
: "0" (mantissa));
int_exponent += 1;
}
*exp = int_exponent;
return mantissa;
}
 
__inline extern double
modf (double x, double *ip)
{
double temp;
 
__asm ("fintrz%.x %1,%0"
: "=f" (temp) /* integer-valued float */
: "f" (x));
*ip = temp;
return x - temp;
}
 
#endif /* not __math_68881 */
/t-m68kelf
0,0 → 1,30
LIB1ASMSRC = m68k/lb1sf68.asm
LIB1ASMFUNCS = _mulsi3 _udivsi3 _divsi3 _umodsi3 _modsi3 \
_double _float _floatex \
_eqdf2 _nedf2 _gtdf2 _gedf2 _ltdf2 _ledf2 \
_eqsf2 _nesf2 _gtsf2 _gesf2 _ltsf2 _lesf2
 
LIB2FUNCS_EXTRA = fpgnulib.c xfgnulib.c
 
fpgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
cp $(srcdir)/config/m68k/fpgnulib.c fpgnulib.c
xfgnulib.c: $(srcdir)/config/m68k/fpgnulib.c
echo '#define EXTFLOAT' > xfgnulib.c
cat $(srcdir)/config/m68k/fpgnulib.c >> xfgnulib.c
 
MULTILIB_OPTIONS = m68000/m68020/m5200/m5206e/m528x/m5307/m5407/mcfv4e/mcpu32/m68040/m68060 m68881/msoft-float
MULTILIB_DIRNAMES =
MULTILIB_MATCHES = m68000=mc68000 m68000=m68302 mcpu32=m68332 m68020=mc68020 m5206e=m5272
MULTILIB_EXCEPTIONS = m68000/msoft-float m5200/m68881 m5200/msoft-float \
m5206e/m68881 m5206e/msoft-float m528x/m68881 m528x/msoft-float \
m5307/m68881 m5307/msoft-float m5407/m68881 m5407/msoft-float \
mcpu32/m68881 mcpu32/msoft-float m68040/m68881 m68040/msoft-float \
m68060/m68881 m68060/msoft-float \
mcfv4e/msoft-float mcfv4e/m68881
LIBGCC = stmp-multilib
INSTALL_LIBGCC = install-multilib
 
# from ../t-svr4
EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o
# no pic for now
#CRTSTUFF_T_CFLAGS=-fpic
/uclinux.h
0,0 → 1,64
/* Definitions of target machine for GCC. m68k/ColdFire based uClinux system
using ELF objects with special linker post-processing to produce FLAT
executables.
 
Copyright (C) 2003, 2007 Free Software Foundation, Inc.
 
This file is part of GCC.
 
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
 
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
 
 
/* Undo the definition of STARTFILE_SPEC from m68kelf.h so we'll
pick the default from gcc.c (just link crt0.o from multilib dir). */
#undef STARTFILE_SPEC
 
/* Override the default LIB_SPEC from gcc.c. We don't currently support
profiling, or libg.a. */
#undef LIB_SPEC
#define LIB_SPEC "\
%{mid-shared-library:-R libc.gdb%s -elf2flt -shared-lib-id 0} -lc \
"
 
/* we don't want a .eh_frame section. */
#define EH_FRAME_IN_DATA_SECTION
 
/* ??? Quick hack to get constructors working. Make this look more like a
COFF target, so the existing dejagnu/libgloss support works. A better
solution would be to make the necessary dejagnu and libgloss changes so
that we can use normal the ELF constructor mechanism. */
#undef INIT_SECTION_ASM_OP
#undef FINI_SECTION_ASM_OP
#undef ENDFILE_SPEC
#define ENDFILE_SPEC ""
/* Bring in standard linux defines */
#undef TARGET_OS_CPP_BUILTINS
#define TARGET_OS_CPP_BUILTINS() \
do \
{ \
builtin_define_std ("mc68000"); \
builtin_define ("__uClinux__"); \
builtin_define_std ("linux"); \
builtin_define_std ("unix"); \
builtin_define ("__gnu_linux__"); \
builtin_assert ("system=linux"); \
builtin_assert ("system=unix"); \
builtin_assert ("system=posix"); \
if (TARGET_ID_SHARED_LIBRARY) \
builtin_define ("__ID_SHARED_LIBRARY__"); \
} \
while (0)
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.