URL
https://opencores.org/ocsvn/zipcpu/zipcpu/trunk
Subversion Repositories zipcpu
[/] [zipcpu/] [trunk/] [sw/] [gcc-zippatch.patch] - Rev 162
Go to most recent revision | Compare with Previous | Blame | View Log
diff -Naur '--exclude=*.swp' gcc-5.3.0-original/config.sub gcc-5.3.0-zip/config.sub --- gcc-5.3.0-original/config.sub 2015-01-02 04:30:21.000000000 -0500 +++ gcc-5.3.0-zip/config.sub 2016-01-30 12:27:56.023073747 -0500 @@ -316,7 +316,7 @@ | visium \ | we32k \ | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) + | z8k | z80 | zip) basic_machine=$basic_machine-unknown ;; c54x) @@ -1547,6 +1547,9 @@ # system, and we'll never get to this point. case $basic_machine in + zip-*) + os=-elf + ;; score-*) os=-elf ;; diff -Naur '--exclude=*.swp' gcc-5.3.0-original/configure gcc-5.3.0-zip/configure --- gcc-5.3.0-original/configure 2015-05-03 13:29:57.000000000 -0400 +++ gcc-5.3.0-zip/configure 2016-01-30 16:19:48.264867231 -0500 @@ -3927,6 +3927,8 @@ vax-*-*) noconfigdirs="$noconfigdirs target-newlib target-libgloss" ;; + zip*) + noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof" esac # If we aren't building newlib, then don't build libgloss, since libgloss diff -Naur '--exclude=*.swp' gcc-5.3.0-original/configure.ac gcc-5.3.0-zip/configure.ac --- gcc-5.3.0-original/configure.ac 2015-05-03 13:29:57.000000000 -0400 +++ gcc-5.3.0-zip/configure.ac 2016-02-12 10:47:23.847194843 -0500 @@ -1274,6 +1274,10 @@ vax-*-*) noconfigdirs="$noconfigdirs target-newlib target-libgloss" ;; + zip*) + noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof" + unsupported_languages="$unsupported_languages fortran java" + ;; esac # If we aren't building newlib, then don't build libgloss, since libgloss diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/cfgexpand.c gcc-5.3.0-zip/gcc/cfgexpand.c --- gcc-5.3.0-original/gcc/cfgexpand.c 2015-07-23 06:39:26.000000000 -0400 +++ gcc-5.3.0-zip/gcc/cfgexpand.c 2016-04-01 06:40:17.288326711 -0400 @@ -108,6 +108,14 @@ #include "tree-chkp.h" #include "rtl-chkp.h" +#ifdef DO_ZIP_DEBUGS +#include <stdio.h> +#define ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0) +extern void zip_debug_rtx(const_rtx); +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + /* Some systems use __main in a way incompatible with its use in gcc, in these cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to give the same symbol without quotes for an alternative entry point. You diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/cgraphbuild.c gcc-5.3.0-zip/gcc/cgraphbuild.c --- gcc-5.3.0-original/gcc/cgraphbuild.c 2015-01-09 15:18:42.000000000 -0500 +++ gcc-5.3.0-zip/gcc/cgraphbuild.c 2016-03-24 22:13:24.815287808 -0400 @@ -62,6 +62,13 @@ #include "ipa-prop.h" #include "ipa-inline.h" +#ifdef DO_ZIP_DEBUGS +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s:%d/%s\n", __FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + /* Context of record_reference. */ struct record_reference_ctx { diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/common/config/zip/zip-common.c gcc-5.3.0-zip/gcc/common/config/zip/zip-common.c --- gcc-5.3.0-original/gcc/common/config/zip/zip-common.c 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/common/config/zip/zip-common.c 2016-02-14 00:54:31.821055716 -0500 @@ -0,0 +1,52 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Filename: common/config/zip/zip-common.c +// +// Project: Zip CPU backend for the GNU Compiler Collection +// +// Purpose: To eliminate the frame register automatically. +// +// Creator: Dan Gisselquist, Ph.D. +// Gisselquist Technology, LLC +// +//////////////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2016, Gisselquist Technology, LLC +// +// This program is free software (firmware): you can redistribute it and/or +// modify it under the terms of the GNU General Public License as published +// by the Free Software Foundation, either version 3 of the License, or (at +// your option) any later version. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// You should have received a copy of the GNU General Public License along +// with this program. (It's in the $(ROOT)/doc directory, run make with no +// target there if the PDF file isn't present.) If not, see +// <http://www.gnu.org/licenses/> for a copy. +// +// License: GPL, v3, as defined and found on www.gnu.org, +// http://www.gnu.org/licenses/gpl.html +// +// +//////////////////////////////////////////////////////////////////////////////// +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "common/common-target.h" +#include "common/common-target-def.h" + +static const struct default_options zip_option_optimization_table[] = + { + { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 }, + { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + +#undef TARGET_OPTION_OPTIMIZATION_TABLE +#define TARGET_OPTION_OPTIMIZATION_TABLE zip_option_optimization_table + +struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER; diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/aarch64/aarch64-linux.h gcc-5.3.0-zip/gcc/config/aarch64/aarch64-linux.h --- gcc-5.3.0-original/gcc/config/aarch64/aarch64-linux.h 2016-05-12 21:52:06.137764804 -0400 +++ gcc-5.3.0-zip/gcc/config/aarch64/aarch64-linux.h 2015-07-24 12:00:26.000000000 -0400 @@ -21,7 +21,7 @@ #ifndef GCC_AARCH64_LINUX_H #define GCC_AARCH64_LINUX_H -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" #undef ASAN_CC1_SPEC #define ASAN_CC1_SPEC "%{%:sanitize(address):-funwind-tables}" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/alpha/linux-elf.h gcc-5.3.0-zip/gcc/config/alpha/linux-elf.h --- gcc-5.3.0-original/gcc/config/alpha/linux-elf.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/alpha/linux-elf.h 2015-01-05 07:33:28.000000000 -0500 @@ -23,8 +23,8 @@ #define EXTRA_SPECS \ { "elf_dynamic_linker", ELF_DYNAMIC_LINKER }, -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld-linux.so.2" -#define UCLIBC_DYNAMIC_LINKER "/tools/lib/ld-uClibc.so.0" +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" +#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" #if DEFAULT_LIBC == LIBC_UCLIBC #define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" #elif DEFAULT_LIBC == LIBC_GLIBC diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/arm/linux-eabi.h gcc-5.3.0-zip/gcc/config/arm/linux-eabi.h --- gcc-5.3.0-original/gcc/config/arm/linux-eabi.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/arm/linux-eabi.h 2015-01-05 07:33:28.000000000 -0500 @@ -68,8 +68,8 @@ GLIBC_DYNAMIC_LINKER_DEFAULT and TARGET_DEFAULT_FLOAT_ABI. */ #undef GLIBC_DYNAMIC_LINKER -#define GLIBC_DYNAMIC_LINKER_SOFT_FLOAT "/tools/lib/ld-linux.so.3" -#define GLIBC_DYNAMIC_LINKER_HARD_FLOAT "/tools/lib/ld-linux-armhf.so.3" +#define GLIBC_DYNAMIC_LINKER_SOFT_FLOAT "/lib/ld-linux.so.3" +#define GLIBC_DYNAMIC_LINKER_HARD_FLOAT "/lib/ld-linux-armhf.so.3" #define GLIBC_DYNAMIC_LINKER_DEFAULT GLIBC_DYNAMIC_LINKER_SOFT_FLOAT #define GLIBC_DYNAMIC_LINKER \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/arm/linux-elf.h gcc-5.3.0-zip/gcc/config/arm/linux-elf.h --- gcc-5.3.0-original/gcc/config/arm/linux-elf.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/arm/linux-elf.h 2015-06-23 05:26:54.000000000 -0400 @@ -62,7 +62,7 @@ #define LIBGCC_SPEC "%{mfloat-abi=soft*:-lfloat} -lgcc" -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" #define LINUX_TARGET_LINK_SPEC "%{h*} \ %{static:-Bstatic} \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/bfin/linux.h gcc-5.3.0-zip/gcc/config/bfin/linux.h --- gcc-5.3.0-original/gcc/config/bfin/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/bfin/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -45,7 +45,7 @@ %{shared:-G -Bdynamic} \ %{!shared: %{!static: \ %{rdynamic:-export-dynamic} \ - -dynamic-linker /tools/lib/ld-uClibc.so.0} \ + -dynamic-linker /lib/ld-uClibc.so.0} \ %{static}} -init __init -fini __fini" #undef TARGET_SUPPORTS_SYNC_CALLS diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/cris/linux.h gcc-5.3.0-zip/gcc/config/cris/linux.h --- gcc-5.3.0-original/gcc/config/cris/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/cris/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -102,7 +102,7 @@ #undef CRIS_DEFAULT_CPU_VERSION #define CRIS_DEFAULT_CPU_VERSION CRIS_CPU_NG -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" #undef CRIS_LINK_SUBTARGET_SPEC #define CRIS_LINK_SUBTARGET_SPEC \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/freebsd-spec.h gcc-5.3.0-zip/gcc/config/freebsd-spec.h --- gcc-5.3.0-original/gcc/config/freebsd-spec.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/freebsd-spec.h 2015-06-25 13:53:14.000000000 -0400 @@ -129,9 +129,9 @@ #endif #if FBSD_MAJOR < 6 -#define FBSD_DYNAMIC_LINKER "/tools/libexec/ld-elf.so.1" +#define FBSD_DYNAMIC_LINKER "/usr/libexec/ld-elf.so.1" #else -#define FBSD_DYNAMIC_LINKER "/tools/libexec/ld-elf.so.1" +#define FBSD_DYNAMIC_LINKER "/libexec/ld-elf.so.1" #endif /* NOTE: The freebsd-spec.h header is included also for various diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/frv/linux.h gcc-5.3.0-zip/gcc/config/frv/linux.h --- gcc-5.3.0-original/gcc/config/frv/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/frv/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -34,7 +34,7 @@ #define ENDFILE_SPEC \ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s" -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" #undef LINK_SPEC #define LINK_SPEC "\ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/i386/gnu.h gcc-5.3.0-zip/gcc/config/i386/gnu.h --- gcc-5.3.0-original/gcc/config/i386/gnu.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/i386/gnu.h 2015-01-05 07:33:28.000000000 -0500 @@ -22,7 +22,7 @@ #define GNU_USER_LINK_EMULATION "elf_i386" #undef GNU_USER_DYNAMIC_LINKER -#define GNU_USER_DYNAMIC_LINKER "/tools/lib/ld.so" +#define GNU_USER_DYNAMIC_LINKER "/lib/ld.so" #undef STARTFILE_SPEC #if defined HAVE_LD_PIE diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/i386/kfreebsd-gnu64.h gcc-5.3.0-zip/gcc/config/i386/kfreebsd-gnu64.h --- gcc-5.3.0-original/gcc/config/i386/kfreebsd-gnu64.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/i386/kfreebsd-gnu64.h 2015-01-05 07:33:28.000000000 -0500 @@ -22,6 +22,6 @@ #define GNU_USER_LINK_EMULATION64 "elf_x86_64_fbsd" #define GNU_USER_LINK_EMULATIONX32 "elf32_x86_64_fbsd" -#define GLIBC_DYNAMIC_LINKER32 "/tools/lib/ld.so.1" -#define GLIBC_DYNAMIC_LINKER64 "/tools/lib/ld-kfreebsd-x86-64.so.1" -#define GLIBC_DYNAMIC_LINKERX32 "/tools/lib/ld-kfreebsd-x32.so.1" +#define GLIBC_DYNAMIC_LINKER32 "/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER64 "/lib/ld-kfreebsd-x86-64.so.1" +#define GLIBC_DYNAMIC_LINKERX32 "/lib/ld-kfreebsd-x32.so.1" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/i386/kfreebsd-gnu.h gcc-5.3.0-zip/gcc/config/i386/kfreebsd-gnu.h --- gcc-5.3.0-original/gcc/config/i386/kfreebsd-gnu.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/i386/kfreebsd-gnu.h 2015-01-05 07:33:28.000000000 -0500 @@ -19,4 +19,4 @@ <http://www.gnu.org/licenses/>. */ #define GNU_USER_LINK_EMULATION "elf_i386_fbsd" -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/i386/linux64.h gcc-5.3.0-zip/gcc/config/i386/linux64.h --- gcc-5.3.0-original/gcc/config/i386/linux64.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/i386/linux64.h 2015-01-05 07:33:28.000000000 -0500 @@ -27,6 +27,6 @@ #define GNU_USER_LINK_EMULATION64 "elf_x86_64" #define GNU_USER_LINK_EMULATIONX32 "elf32_x86_64" -#define GLIBC_DYNAMIC_LINKER32 "/tools/lib/ld-linux.so.2" -#define GLIBC_DYNAMIC_LINKER64 "/tools/lib64/ld-linux-x86-64.so.2" -#define GLIBC_DYNAMIC_LINKERX32 "/tools/libx32/ld-linux-x32.so.2" +#define GLIBC_DYNAMIC_LINKER32 "/lib/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux-x86-64.so.2" +#define GLIBC_DYNAMIC_LINKERX32 "/libx32/ld-linux-x32.so.2" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/i386/linux.h gcc-5.3.0-zip/gcc/config/i386/linux.h --- gcc-5.3.0-original/gcc/config/i386/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/i386/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -20,4 +20,4 @@ <http://www.gnu.org/licenses/>. */ #define GNU_USER_LINK_EMULATION "elf_i386" -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/ia64/linux.h gcc-5.3.0-zip/gcc/config/ia64/linux.h --- gcc-5.3.0-original/gcc/config/ia64/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/ia64/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -55,7 +55,7 @@ /* Define this for shared library support because it isn't in the main linux.h file. */ -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld-linux-ia64.so.2" +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-ia64.so.2" #undef LINK_SPEC #define LINK_SPEC "\ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/knetbsd-gnu.h gcc-5.3.0-zip/gcc/config/knetbsd-gnu.h --- gcc-5.3.0-original/gcc/config/knetbsd-gnu.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/knetbsd-gnu.h 2015-01-05 07:33:28.000000000 -0500 @@ -32,4 +32,4 @@ #undef GNU_USER_DYNAMIC_LINKER -#define GNU_USER_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GNU_USER_DYNAMIC_LINKER "/lib/ld.so.1" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/kopensolaris-gnu.h gcc-5.3.0-zip/gcc/config/kopensolaris-gnu.h --- gcc-5.3.0-original/gcc/config/kopensolaris-gnu.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/kopensolaris-gnu.h 2015-01-05 07:33:28.000000000 -0500 @@ -31,5 +31,4 @@ while (0) #undef GNU_USER_DYNAMIC_LINKER -#define GNU_USER_DYNAMIC_LINKER "/tools/lib/ld.so.1" - +#define GNU_USER_DYNAMIC_LINKER "/lib/ld.so.1" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/linux.h gcc-5.3.0-zip/gcc/config/linux.h --- gcc-5.3.0-original/gcc/config/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -73,10 +73,10 @@ GLIBC_DYNAMIC_LINKER must be defined for each target using them, or GLIBC_DYNAMIC_LINKER32 and GLIBC_DYNAMIC_LINKER64 for targets supporting both 32-bit and 64-bit compilation. */ -#define UCLIBC_DYNAMIC_LINKER "/tools/lib/ld-uClibc.so.0" -#define UCLIBC_DYNAMIC_LINKER32 "/tools/lib/ld-uClibc.so.0" -#define UCLIBC_DYNAMIC_LINKER64 "/tools/lib/ld64-uClibc.so.0" -#define UCLIBC_DYNAMIC_LINKERX32 "/tools/lib/ldx32-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKERX32 "/lib/ldx32-uClibc.so.0" #define BIONIC_DYNAMIC_LINKER "/system/bin/linker" #define BIONIC_DYNAMIC_LINKER32 "/system/bin/linker" #define BIONIC_DYNAMIC_LINKER64 "/system/bin/linker64" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/lm32/uclinux-elf.h gcc-5.3.0-zip/gcc/config/lm32/uclinux-elf.h --- gcc-5.3.0-original/gcc/config/lm32/uclinux-elf.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/lm32/uclinux-elf.h 2015-01-05 07:33:28.000000000 -0500 @@ -67,7 +67,7 @@ %{shared:-shared} \ %{symbolic:-Bsymbolic} \ %{rdynamic:-export-dynamic} \ - -dynamic-linker /tools/lib/ld-linux.so.2" + -dynamic-linker /lib/ld-linux.so.2" #define TARGET_OS_CPP_BUILTINS() GNU_USER_TARGET_OS_CPP_BUILTINS() diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/m68k/linux.h gcc-5.3.0-zip/gcc/config/m68k/linux.h --- gcc-5.3.0-original/gcc/config/m68k/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/m68k/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -71,7 +71,7 @@ When the -shared link option is used a final link is not being done. */ -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" #undef LINK_SPEC #define LINK_SPEC "-m m68kelf %{shared} \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/microblaze/linux.h gcc-5.3.0-zip/gcc/config/microblaze/linux.h --- gcc-5.3.0-original/gcc/config/microblaze/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/microblaze/linux.h 2015-05-28 10:08:19.000000000 -0400 @@ -28,7 +28,7 @@ #undef TLS_NEEDS_GOT #define TLS_NEEDS_GOT 1 -#define DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define DYNAMIC_LINKER "/lib/ld.so.1" #undef SUBTARGET_EXTRA_SPECS #define SUBTARGET_EXTRA_SPECS \ { "dynamic_linker", DYNAMIC_LINKER } diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/mips/linux.h gcc-5.3.0-zip/gcc/config/mips/linux.h --- gcc-5.3.0-original/gcc/config/mips/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/mips/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -22,20 +22,20 @@ #define GNU_USER_LINK_EMULATIONN32 "elf32%{EB:b}%{EL:l}tsmipn32" #define GLIBC_DYNAMIC_LINKER32 \ - "%{mnan=2008:/tools/lib/ld-linux-mipsn8.so.1;:/tools/lib/ld.so.1}" + "%{mnan=2008:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}" #define GLIBC_DYNAMIC_LINKER64 \ - "%{mnan=2008:/tools/lib64/ld-linux-mipsn8.so.1;:/tools/lib64/ld.so.1}" + "%{mnan=2008:/lib64/ld-linux-mipsn8.so.1;:/lib64/ld.so.1}" #define GLIBC_DYNAMIC_LINKERN32 \ - "%{mnan=2008:/tools/lib32/ld-linux-mipsn8.so.1;:/tools/lib32/ld.so.1}" + "%{mnan=2008:/lib32/ld-linux-mipsn8.so.1;:/lib32/ld.so.1}" #undef UCLIBC_DYNAMIC_LINKER32 #define UCLIBC_DYNAMIC_LINKER32 \ - "%{mnan=2008:/tools/lib/ld-uClibc-mipsn8.so.0;:/tools/lib/ld-uClibc.so.0}" + "%{mnan=2008:/lib/ld-uClibc-mipsn8.so.0;:/lib/ld-uClibc.so.0}" #undef UCLIBC_DYNAMIC_LINKER64 #define UCLIBC_DYNAMIC_LINKER64 \ - "%{mnan=2008:/tools/lib/ld64-uClibc-mipsn8.so.0;:/tools/lib/ld64-uClibc.so.0}" + "%{mnan=2008:/lib/ld64-uClibc-mipsn8.so.0;:/lib/ld64-uClibc.so.0}" #define UCLIBC_DYNAMIC_LINKERN32 \ - "%{mnan=2008:/tools/lib32/ld-uClibc-mipsn8.so.0;:/tools/lib32/ld-uClibc.so.0}" + "%{mnan=2008:/lib32/ld-uClibc-mipsn8.so.0;:/lib32/ld-uClibc.so.0}" #define BIONIC_DYNAMIC_LINKERN32 "/system/bin/linker32" #define GNU_USER_DYNAMIC_LINKERN32 \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/mn10300/linux.h gcc-5.3.0-zip/gcc/config/mn10300/linux.h --- gcc-5.3.0-original/gcc/config/mn10300/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/mn10300/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -32,7 +32,7 @@ #undef ASM_SPEC #define ASM_SPEC "" -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" #undef LINK_SPEC #define LINK_SPEC "%{mrelax:--relax} %{shared:-shared} \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/pa/pa-linux.h gcc-5.3.0-zip/gcc/config/pa/pa-linux.h --- gcc-5.3.0-original/gcc/config/pa/pa-linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/pa/pa-linux.h 2015-09-24 20:04:26.000000000 -0400 @@ -37,7 +37,7 @@ /* Define this for shared library support because it isn't in the main linux.h file. */ -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" #undef LINK_SPEC #define LINK_SPEC "\ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/rs6000/linux64.h gcc-5.3.0-zip/gcc/config/rs6000/linux64.h --- gcc-5.3.0-original/gcc/config/rs6000/linux64.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/rs6000/linux64.h 2015-03-09 19:18:57.000000000 -0400 @@ -357,14 +357,14 @@ #undef LINK_OS_DEFAULT_SPEC #define LINK_OS_DEFAULT_SPEC "%(link_os_linux)" -#define GLIBC_DYNAMIC_LINKER32 "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER32 "/lib/ld.so.1" #ifdef LINUX64_DEFAULT_ABI_ELFv2 -#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv1:/tools/lib64/ld64.so.1;:/tools/lib64/ld64.so.2}" +#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv1:/lib64/ld64.so.1;:/lib64/ld64.so.2}" #else -#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv2:/tools/lib64/ld64.so.2;:/tools/lib64/ld64.so.1}" +#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv2:/lib64/ld64.so.2;:/lib64/ld64.so.1}" #endif -#define UCLIBC_DYNAMIC_LINKER32 "/tools/lib/ld-uClibc.so.0" -#define UCLIBC_DYNAMIC_LINKER64 "/tools/lib/ld64-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0" +#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0" #if DEFAULT_LIBC == LIBC_UCLIBC #define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" #elif DEFAULT_LIBC == LIBC_GLIBC diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/rs6000/sysv4.h gcc-5.3.0-zip/gcc/config/rs6000/sysv4.h --- gcc-5.3.0-original/gcc/config/rs6000/sysv4.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/rs6000/sysv4.h 2015-09-24 09:46:45.000000000 -0400 @@ -757,8 +757,8 @@ #define LINK_START_LINUX_SPEC "" -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" -#define UCLIBC_DYNAMIC_LINKER "/tools/lib/ld-uClibc.so.0" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" +#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" #if DEFAULT_LIBC == LIBC_UCLIBC #define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" #elif !defined (DEFAULT_LIBC) || DEFAULT_LIBC == LIBC_GLIBC diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/s390/linux.h gcc-5.3.0-zip/gcc/config/s390/linux.h --- gcc-5.3.0-original/gcc/config/s390/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/s390/linux.h 2015-05-11 03:14:10.000000000 -0400 @@ -60,8 +60,8 @@ #define MULTILIB_DEFAULTS { "m31" } #endif -#define GLIBC_DYNAMIC_LINKER32 "/tools/lib/ld.so.1" -#define GLIBC_DYNAMIC_LINKER64 "/tools/lib/ld64.so.1" +#define GLIBC_DYNAMIC_LINKER32 "/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER64 "/lib/ld64.so.1" #undef LINK_SPEC #define LINK_SPEC \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/sh/linux.h gcc-5.3.0-zip/gcc/config/sh/linux.h --- gcc-5.3.0-original/gcc/config/sh/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/sh/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -43,7 +43,7 @@ #define TARGET_ASM_FILE_END file_end_indicate_exec_stack -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" #undef SUBTARGET_LINK_EMUL_SUFFIX #define SUBTARGET_LINK_EMUL_SUFFIX "_linux" diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/sparc/linux64.h gcc-5.3.0-zip/gcc/config/sparc/linux64.h --- gcc-5.3.0-original/gcc/config/sparc/linux64.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/sparc/linux64.h 2015-01-05 07:33:28.000000000 -0500 @@ -84,8 +84,8 @@ When the -shared link option is used a final link is not being done. */ -#define GLIBC_DYNAMIC_LINKER32 "/tools/lib/ld-linux.so.2" -#define GLIBC_DYNAMIC_LINKER64 "/tools/lib64/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER32 "/lib/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux.so.2" #ifdef SPARC_BI_ARCH @@ -193,7 +193,7 @@ #else /* !SPARC_BI_ARCH */ #undef LINK_SPEC -#define LINK_SPEC "-m elf64_sparc -Y P,%R/tools/lib64 %{shared:-shared} \ +#define LINK_SPEC "-m elf64_sparc -Y P,%R/usr/lib64 %{shared:-shared} \ %{!shared: \ %{!static: \ %{rdynamic:-export-dynamic} \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/sparc/linux.h gcc-5.3.0-zip/gcc/config/sparc/linux.h --- gcc-5.3.0-original/gcc/config/sparc/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/sparc/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -83,7 +83,7 @@ When the -shared link option is used a final link is not being done. */ -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld-linux.so.2" +#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" #undef LINK_SPEC #define LINK_SPEC "-m elf32_sparc %{shared:-shared} \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/vax/linux.h gcc-5.3.0-zip/gcc/config/vax/linux.h --- gcc-5.3.0-original/gcc/config/vax/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/vax/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -41,7 +41,7 @@ %{!shared: \ %{!static: \ %{rdynamic:-export-dynamic} \ - -dynamic-linker /tools/lib/ld.so.1} \ + -dynamic-linker /lib/ld.so.1} \ %{static:-static}}" #undef WCHAR_TYPE diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/xtensa/linux.h gcc-5.3.0-zip/gcc/config/xtensa/linux.h --- gcc-5.3.0-original/gcc/config/xtensa/linux.h 2016-05-12 21:52:06.141764778 -0400 +++ gcc-5.3.0-zip/gcc/config/xtensa/linux.h 2015-01-05 07:33:28.000000000 -0500 @@ -44,7 +44,7 @@ %{mlongcalls:--longcalls} \ %{mno-longcalls:--no-longcalls}" -#define GLIBC_DYNAMIC_LINKER "/tools/lib/ld.so.1" +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" #undef LINK_SPEC #define LINK_SPEC \ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/zip/netbsd.h gcc-5.3.0-zip/gcc/config/zip/netbsd.h --- gcc-5.3.0-original/gcc/config/zip/netbsd.h 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/config/zip/netbsd.h 2016-01-30 15:04:14.796899050 -0500 @@ -0,0 +1,82 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Filename: netbsd.h +// +// Project: Zip CPU backend for the GNU Compiler Collection +// +// Purpose: +// +// Creator: Dan Gisselquist, Ph.D. +// Gisselquist Technology, LLC +// +//////////////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2016, Gisselquist Technology, LLC +// +// This program is free software (firmware): you can redistribute it and/or +// modify it under the terms of the GNU General Public License as published +// by the Free Software Foundation, either version 3 of the License, or (at +// your option) any later version. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// You should have received a copy of the GNU General Public License along +// with this program. (It's in the $(ROOT)/doc directory, run make with no +// target there if the PDF file isn't present.) If not, see +// <http://www.gnu.org/licenses/> for a copy. +// +// License: GPL, v3, as defined and found on www.gnu.org, +// http://www.gnu.org/licenses/gpl.html +// +// +//////////////////////////////////////////////////////////////////////////////// +#ifndef ZIP_NETBSD_H +#define ZIP_NETBSD_H + +/* Define default target values. */ + +#undef MACHINE_TYPE +#define MACHINE_TYPE "NetBSD/Zip ELF" + +#undef TARGET_OS_CPP_BUILTINS +#define TARGET_OS_CPP_BUILTINS() \ + do { NETBSD_OS_CPP_BUILTINS_ELF(); \ + builtin_define("__ZIPCPU__"); \ + builtin_assert("cpu=zip"); \ + builtin_assert("machine=zip"); \ + } while(0); + +#undef CPP_SPEC +#define CPP_SPEC NETBSD_CPP_SPEC + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC NETBSD_STARTFILE_SPEC + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC NETBSD_ENDFILE_SPEC + +#undef LIB_SPEC +#define LIB_SPEC NETBSD_LIB_SPEC + +#undef TARGET_VERSION +#define TARGET_VERSION fprintf(stderr, " (%s)", MACHINE_TYPE); + +/* Make gcc agree with <machine/ansi.h> */ + +#undef WCHAR_TYPE +#define WCHAR_TYPE "int" + +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + +#undef WINT_TYPE +#define WINT_TYPE "int" + +/* Clean up after the generic Zip/ELF configuration. */ +#undef MD_EXEC_PREFIX +#undef MD_STARTFILE_PREFIX + +#endif /* ZIP_NETBSD_H */ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/zip/t-zip gcc-5.3.0-zip/gcc/config/zip/t-zip --- gcc-5.3.0-original/gcc/config/zip/t-zip 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/config/zip/t-zip 2016-02-04 19:00:59.939652587 -0500 @@ -0,0 +1,47 @@ +################################################################################ +## +## Filename: t-zip +## +## Project: Zip CPU backend for the GNU Compiler Collection +## +## Purpose: +## +## Creator: Dan Gisselquist, Ph.D. +## Gisselquist Technology, LLC +## +################################################################################ +## +## Copyright (C) 2016, Gisselquist Technology, LLC +## +## This program is free software (firmware): you can redistribute it and/or +## modify it under the terms of the GNU General Public License as published +## by the Free Software Foundation, either version 3 of the License, or (at +## your option) any later version. +## +## This program is distributed in the hope that it will be useful, but WITHOUT +## ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or +## FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +## for more details. +## +## You should have received a copy of the GNU General Public License along +## with this program. (It's in the $(ROOT)/doc directory, run make with no +## target there if the PDF file isn't present.) If not, see +## <http://www.gnu.org/licenses/> for a copy. +## +## License: GPL, v3, as defined and found on www.gnu.org, +## http://www.gnu.org/licenses/gpl.html +## +## +################################################################################ + +FPBIT = fp-bit.c +DPBIT = dp-bit.c + +# dp-bit.c: $(srcdir)/config/fp-bit.c + # cat $(srcdir)/config/fp-bit.c > dp-bit.c +# +# fp-bit.c: $(srcdir)/config/fp-bit.c + # echo '#define FLOAT" > fp-bit.c + # cat $(srcdir)/config/fp-bit.c >> fp-bit.c + + diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/zip/zip.c gcc-5.3.0-zip/gcc/config/zip/zip.c --- gcc-5.3.0-original/gcc/config/zip/zip.c 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/config/zip/zip.c 2016-05-09 11:40:35.637861735 -0400 @@ -0,0 +1,2286 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Filename: zip.c +// +// Project: Zip CPU backend for the GNU Compiler Collection +// +// Purpose: +// +// Creator: Dan Gisselquist, Ph.D. +// Gisselquist Technology, LLC +// +//////////////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2016, Gisselquist Technology, LLC +// +// This program is free software (firmware): you can redistribute it and/or +// modify it under the terms of the GNU General Public License as published +// by the Free Software Foundation, either version 3 of the License, or (at +// your option) any later version. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// You should have received a copy of the GNU General Public License along +// with this program. (It's in the $(ROOT)/doc directory, run make with no +// target there if the PDF file isn't present.) If not, see +// <http://www.gnu.org/licenses/> for a copy. +// +// License: GPL, v3, as defined and found on www.gnu.org, +// http://www.gnu.org/licenses/gpl.html +// +// +//////////////////////////////////////////////////////////////////////////////// +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "rtl.h" +#include "dominance.h" +#include "cfg.h" +#include "cfgrtl.h" +#include "cfganal.h" +#include "lcm.h" +#include "cfgbuild.h" +#include "cfgcleanup.h" +#include "predict.h" +#include "basic-block.h" +#include "df.h" +#include "hashtab.h" +#include "hash-set.h" +#include "machmode.h" +#include "symtab.h" +#include "rtlhash.h" +#include "tree.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "real.h" +#include "insn-config.h" +#include "conditions.h" +#include "output.h" +#include "insn-attr.h" +#include "flags.h" +#include "expr.h" +#include "function.h" +#include "recog.h" +#include "toplev.h" +#include "ggc.h" +#include "builtins.h" +#include "calls.h" +#include "langhooks.h" +#include "optabs.h" +#include "explow.h" +#include "emit-rtl.h" +#include "ifcvt.h" + +// #include "tmp_p.h" +#include "target.h" +#include "target-def.h" +// #include "tm-constrs.h" +#include "tm-preds.h" + +#include "diagnostic.h" +// #include "integrate.h" + +// static int zip_arg_partial_bytes(CUMULATIVE_ARGS *, enum machine_mode, tree, bool); +// static bool zip_pass_by_reference(CUMULATIVE_ARGS *, enum machine_mode, const_tree, bool); +static bool zip_return_in_memory(const_tree, const_tree); +static bool zip_frame_pointer_required(void); + +static void zip_function_arg_advance(cumulative_args_t ca, enum machine_mode mode, + const_tree type, bool named); +static rtx zip_function_arg(cumulative_args_t ca, enum machine_mode mode, const_tree type, bool named); + +static void zip_asm_trampoline_template(FILE *); +static void zip_trampoline_init(rtx, tree, rtx); +static void zip_init_builtins(void); +static tree zip_builtin_decl(unsigned, bool); +// static void zip_asm_output_anchor(rtx x); + void zip_asm_output_def(FILE *s, const char *n, const char *v); +static rtx zip_expand_builtin(tree exp, rtx target, rtx subtarget, + enum machine_mode tmode, int ignore); +static bool zip_scalar_mode_supported_p(enum machine_mode mode); +static bool zip_libgcc_floating_mode_supported_p(enum machine_mode mode); +static int zip_address_cost(rtx addr, enum machine_mode mode, addr_space_t as, bool spd); +static bool zip_mode_dependent_address_p(const_rtx addr, addr_space_t); +static unsigned HOST_WIDE_INT zip_const_anchor = 0x20000; +static HOST_WIDE_INT zip_min_opb_imm = -0x20000; +static HOST_WIDE_INT zip_max_opb_imm = 0x1ffff; +static HOST_WIDE_INT zip_min_anchor_offset = -0x2000; +static HOST_WIDE_INT zip_max_anchor_offset = 0x1fff; +static HOST_WIDE_INT zip_min_mov_offset = -0x1000; +static HOST_WIDE_INT zip_max_mov_offset = 0x0fff; +static int zip_sched_issue_rate(void) { return 1; } +static bool zip_legitimate_address_p(machine_mode, rtx, bool); +static bool zip_legitimate_move_operand_p(machine_mode, rtx, bool); + void zip_debug_rtx_pfx(const char *, const_rtx x); + void zip_debug_rtx(const_rtx x); +static void zip_override_options(void); +static bool zip_can_eliminate(int from ATTRIBUTE_UNUSED, int to); +static int zip_memory_move_cost(machine_mode, reg_class_t, bool); +static rtx zip_legitimize_address(rtx x, rtx oldx, machine_mode mode); +static bool zip_cannot_modify_jumps_p(void); +#ifdef HAVE_cc0 + void zip_update_cc_notice(rtx exp, rtx_insn *insn); +#error "We're not supposed to have CC0 anymore" +#else +static bool zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b); +#endif + + +#define ALL_DEBUG_OFF false +#define ALL_DEBUG_ON false + +enum ZIP_BUILTIN_ID_CODE { + ZIP_BUILTIN_RTU, + ZIP_BUILTIN_HALT, + ZIP_BUILTIN_IDLE, + ZIP_BUILTIN_SYSCALL, + ZIP_BUILTIN_SAVE_CONTEXT, + ZIP_BUILTIN_RESTORE_CONTEXT, + ZIP_BUILTIN_BITREV, + ZIP_BUILTIN_CC, + ZIP_BUILTIN_UCC, + ZIP_BUILTIN_MAX +}; + +static GTY (()) tree zip_builtins[(int)ZIP_BUILTIN_MAX]; +static enum insn_code zip_builtins_icode[(int)ZIP_BUILTIN_MAX]; + + +#include "gt-zip.h" + +/* The Global 'targetm' Variable. */ +struct gcc_target targetm = TARGET_INITIALIZER; + + +enum reg_class zip_reg_class(int); + +#define LOSE_AND_RETURN(msgid, x) \ + do { \ + zip_operand_lossage(msgid, x); \ + return; \ + } while(0) + +/* Per-function machine data. */ +struct GTY(()) machine_function +{ + /* number of pretented arguments for varargs */ + int pretend_size; + + /* Number of bytes saved on the stack for local variables. */ + int local_vars_size; + + /* Number of bytes saved on stack for register save area */ + int saved_reg_size; + int save_ret; + + int sp_fp_offset; + bool fp_needed; + int size_for_adjusting_sp; +}; + +/* Allocate a chunk of memory for per-function machine-dependent data. */ + +static struct machine_function * +zip_init_machine_status(void) { + return ggc_cleared_alloc<machine_function>(); +} + +static void +zip_override_options(void) +{ + init_machine_status = zip_init_machine_status; +} + +enum reg_class +zip_reg_class(int regno) +{ + if (is_ZIP_GENERAL_REG(regno)) { + return GENERAL_REGS; + } else if (is_ZIP_REG(regno)) { + return ALL_REGS; + } return NO_REGS; +} + +/* Worker function for TARGET_RETURN_IN_MEMORY. */ +static bool +zip_return_in_memory(const_tree type, const_tree fntype ATTRIBUTE_UNUSED) { + const HOST_WIDE_INT size = int_size_in_bytes(type); + return (size == -1)||(size > UNITS_PER_WORD); +} + +/* Emit an error emssage when we're in an asm, and a fatal error for "normal" + * insn. Formatted output isn't easily implemented, since we use output operand + * lossage to output the actual message and handle the categorization of the + * error. */ + +static void +zip_operand_lossage(const char *msgid, rtx op) { + fprintf(stderr, "Operand lossage??\n"); + debug_rtx(op); + zip_debug_rtx(op); + output_operand_lossage("%s", msgid); +} + +/* The PRINT_OPERAND_ADDRESS worker. */ +void +zip_print_operand_address(FILE *file, rtx x) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) zip_debug_rtx(x); + switch(GET_CODE(x)) { + case REG: + gcc_assert(is_ZIP_REG(REGNO(x))); + fprintf(file, "(%s)", reg_names[REGNO(x)]); + break; + case SYMBOL_REF: + fprintf(file, "%s", XSTR(x,0)); + break; + case LABEL_REF: + x = LABEL_REF_LABEL(x); + case CODE_LABEL: + { char buf[256]; + ASM_GENERATE_INTERNAL_LABEL(buf, "L", CODE_LABEL_NUMBER(x)); +#ifdef ASM_OUTPUT_LABEL_REF + ASM_OUTPUT_LABEL_REF(file, buf); +#else + assemble_name(file, buf); +#endif + } + break; + case PLUS: + if (!REG_P(XEXP(x, 0))) { + fprintf(stderr, "Unsupported address construct\n"); + zip_debug_rtx(x); + abort(); + } gcc_assert(is_ZIP_REG(REGNO(XEXP(x,0)))); + if (CONST_INT_P(XEXP(x, 1))) { + if (INTVAL(XEXP(x,1))!=0) { + fprintf(file, "%ld(%s)", + (long)INTVAL(XEXP(x, 1)), + reg_names[REGNO(XEXP(x, 0))]); + } else { + fprintf(file, "(%s)", + reg_names[REGNO(XEXP(x, 0))]); + } + } else if (GET_CODE(XEXP(x,1)) == SYMBOL_REF) { + fprintf(file, "%s(%s)", XSTR(x,0), + reg_names[REGNO(XEXP(x, 0))]); + } else if ((GET_CODE(XEXP(x, 1)) == MINUS) + && (GET_CODE(XEXP(XEXP(x, 1), 0))==SYMBOL_REF) + && (GET_CODE(XEXP(XEXP(x, 1), 1))==SYMBOL_REF)) { + fprintf(file, "%s-%s(%s)", + XSTR(XEXP(XEXP(x, 1),0),0), + XSTR(XEXP(XEXP(x, 1),1),0), + reg_names[REGNO(XEXP(x, 0))]); + } else + fprintf(file, "#INVALID(%s)", + reg_names[REGNO(XEXP(x, 0))]); + /* + else if (GET_CODE(XEXP(addr, 1)) == LABEL) + fprintf(file, "%s(%s)", + GET_CODE(XEXP(addr, 1)), + reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]); + else if ((GET_CODE(XEXP(addr, 1)) == MINUS) + && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 0))==LABEL) + && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 1))==LABEL)) { + fprintf(file, "%s-%s(%s)", + reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]); + reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]); + reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]); + } + */ + break; + // We don't support direct memory addressing within our + // instruction set, even though the instructions themselves + // would support direct memory addressing of the lower 18 bits + // of memory space. + case MEM: + if (dbg) zip_debug_rtx(x); + zip_print_operand_address(file, XEXP(x, 0)); + break; + case CONST_INT: + fprintf(file, "%ld",(long)INTVAL(x)); + break; + default: + fprintf(stderr, "Unknown address format\n"); + zip_debug_rtx(x); + abort(); break; + // output_addr_const(file, x); + break; + } +} + +/* The PRINT_OPERAND worker. */ + +void +zip_print_operand(FILE *file, rtx x, int code) +{ + rtx operand = x; + int rgoff = 0; + + // fprintf(file, "Print Operand!\n"); + + /* New code entries should just be added to the switch below. If + * handling is finished, just return. If handling was just a + * modification of the operand, the modified operand should be put in + * "operand", and then do a break to let default handling + * (zero-modifier) output the operand. + */ + switch(code) { + case 0: + /* No code, print as usual. */ + break; + case 'L': + /* Lower of two registers, print one up */ + rgoff = 1; + break; + case 'R': + case 'H': + /* Higher of a register pair, print normal */ + break; + + default: + LOSE_AND_RETURN("invalid operand modifier letter", x); + } + + /* Print an operand as without a modifier letter. */ + switch (GET_CODE(operand)) { + case REG: + if (REGNO(operand)+rgoff >= FIRST_PSEUDO_REGISTER) + internal_error("internal error: bad register: %d", REGNO(operand)); + fprintf(file, "%s", reg_names[REGNO(operand)+rgoff]); + return; + case SCRATCH: + LOSE_AND_RETURN("Need a scratch register", x); + return; + + case CODE_LABEL: + case LABEL_REF: + case SYMBOL_REF: + case PLUS: + PRINT_OPERAND_ADDRESS(file, operand); + return; + case MEM: + PRINT_OPERAND_ADDRESS(file, XEXP(operand, 0)); + return; + + default: + /* No need to handle all strange variants, let + * output_addr_const do it for us. + */ + if (CONSTANT_P(operand)) { + output_addr_const(file, operand); + return; + } + + LOSE_AND_RETURN("unexpected operand", x); + } +} + +static bool +zip_frame_pointer_required(void) +{ + // This should really depend upon whether we have variable sized + // arguments in our frame or not. Once this fails, let's look + // at what the problem was and then whether or not we can detect + // it. + // + // Use a GCC global to determine our answer + if (cfun->calls_alloca) + return true; + return (frame_pointer_needed); +/* +*/ +} + +/* Determine whether or not a register needs to be saved on the stack or not. + */ +static bool +zip_save_reg(int regno) { + if (regno == 0) + return ((!crtl->is_leaf) + ||((df_regs_ever_live_p(0))&&(!call_used_regs[0]))); + else if ((regno == zip_GOT)&&(!ZIP_PIC)) + return ((df_regs_ever_live_p(regno)) + &&(!call_used_regs[regno])); + else if (regno == zip_FP) + return((zip_frame_pointer_required())||((df_regs_ever_live_p(regno)) + &&(!call_used_regs[regno]))); + else if (regno < zip_FP) + return ((df_regs_ever_live_p(regno)) + &&(!call_used_regs[regno])); + return false; +} + +/* Compute the size of the local area and the size to be adjusted by the + * prologue and epilogue. + * + * Here's what we are looking at (top is the current, bottom is the last ...) + * + * Stack Pointer -> + * Outgoing arguments + * Local variables (could be variable size) + * Frame Pointer -> (= Stack Pointer + sp_fp_offset) + * Saved return address, if saved + * Other Saved registers + * Saved frame pointer (if used) + * Saved R12, if used + * (Stack pointer is not saved) + * Original stack pointer -> (= Stack_Pointer +size_for_adjusting_sp) + * Called arguments (not passed in registers) + * Return arguments (not R1, args.pretend_args_size) + * (Prior function's stack frame ... ) + * + */ +static void +zip_compute_frame(void) { + int regno; + int args_size; + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-COMPUTE-FRAME\n"); + // gcc_assert(crtl); + gcc_assert(cfun); + gcc_assert(cfun->machine); + + args_size=(ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0); + + if(crtl->args.pretend_args_size > 0) { + args_size += crtl->args.pretend_args_size; + // printf("%s pretend_args_size : %d\n", current_function_name(), + // crtl->args.pretend_args_size); + cfun->machine->pretend_size = crtl->args.pretend_args_size; + } + + cfun->machine->local_vars_size = get_frame_size(); + + // Save callee-saved registers. + cfun->machine->saved_reg_size = 0; + for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) { + if (zip_save_reg(regno)) + cfun->machine->saved_reg_size ++; + } + + cfun->machine->fp_needed = (zip_frame_pointer_required()); + + if ((cfun->machine->fp_needed)&& + (!df_regs_ever_live_p(zip_FP))) { + cfun->machine->saved_reg_size ++; + } + + cfun->machine->sp_fp_offset = args_size + cfun->machine->local_vars_size; + cfun->machine->size_for_adjusting_sp = cfun->machine->local_vars_size + + cfun->machine->saved_reg_size + + args_size; + if(dbg) { + fprintf(stderr, "\tFRAME-POINTR: %s\n", + cfun->machine->fp_needed?"Yes":"No"); + fprintf(stderr, "\tARGS-SIZE : %d\n", + args_size); + fprintf(stderr, "\tLOCALS-SIZE : %d\n", + cfun->machine->local_vars_size); + fprintf(stderr, "\tREGISTERS : %d\n", + cfun->machine->saved_reg_size); + fprintf(stderr, "\tSP_FP_OFFSET: %d\n", + cfun->machine->sp_fp_offset); + fprintf(stderr, "\tSP-ADJUSTMNT: %d\n", + cfun->machine->size_for_adjusting_sp); + } +} + +void +zip_expand_prologue(void) { + rtx insn; + + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + zip_compute_frame(); + + if (dbg) fprintf(stderr, "PROLOGUE: Computing Prologue instructions\n"); + if (dbg) fprintf(stderr, "PROLOGUE: SP-FP offset is %d\n", + cfun->machine->sp_fp_offset); + if (cfun->machine->size_for_adjusting_sp != 0) { + insn = emit_insn(gen_subsi3_reg_clobber(stack_pointer_rtx, + stack_pointer_rtx, + gen_int_mode(cfun->machine->size_for_adjusting_sp, + SImode))); + // cfun->machine->sp_fp_offset + + RTX_FRAME_RELATED_P(insn) = 1; + } + + { + int offset = 0, regno; + for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) { + if (zip_save_reg(regno)) { + if (dbg) fprintf(stderr, + "PROLOGUE: Saving R%d in %d+%d(SP)\n", + regno, cfun->machine->sp_fp_offset, + offset); + insn=emit_insn(gen_movsi_sto_off( + stack_pointer_rtx, + GEN_INT(cfun->machine->sp_fp_offset + +offset++), + gen_rtx_REG(SImode, regno))); + RTX_FRAME_RELATED_P(insn) = 1; + } + } + if (dbg) fprintf(stderr, "%d registers saved%s\n", offset, + (crtl->saves_all_registers)?", should be all of them":", less than all"); + } + + if (cfun->machine->fp_needed) { + if (dbg) zip_debug_rtx(stack_pointer_rtx); + if (dbg) zip_debug_rtx(frame_pointer_rtx); + insn = emit_insn(gen_movsi_reg_off(frame_pointer_rtx, + stack_pointer_rtx, + GEN_INT(cfun->machine->sp_fp_offset))); + RTX_FRAME_RELATED_P(insn) = 1; + if (dbg) fprintf(stderr, "sp_fp_offset is %d\n", cfun->machine->sp_fp_offset); + } +} + +bool +zip_use_return_insn(void) +{ + if ((!reload_completed)||(cfun->machine->fp_needed) + ||(get_frame_size()!=0)) { + // If R0 ever gets pushed to the stack, then we cannot + // use a master return from anywhere. We need to clean up the + // stack first. + if ((!crtl->is_leaf)||((df_regs_ever_live_p(0)) + &&(!call_used_regs[0]))) { + return false; + } + } + zip_compute_frame(); + return (cfun->machine->size_for_adjusting_sp == 0); +} + +/* As per the notes in M68k.c, quote the function epilogue should not depend + * upon the current stack pointer. It should use the frame poitner only, + * if there is a frame pointer. This is mandatory because of alloca; we also + * take advantage of it to omit stack adjustments before returning ... + * + * Let's see if we can use their approach here. + * + * We can't. Consider our choices: + * LOD (FP),R0 + * LOD 1(FP),R4 + * LOD 2(FP),R5 + * LOD 3(FP),R6 + * LOD 4(FP),FP + * ... Then what is the stack pointer? + * or + * LOD (FP),R0 + * LOD 1(FP),R4 + * LOD 2(FP),R5 + * LOD 3(FP),R6 + * MOV FP,SP + * LOD 4(SP),FP + * ... Which suffers unnecessary pipeline stalls, and certainly doesn't + * exploit our pipeline memory function + * or + * MOV FP,SP + * LOD (SP),R0 + * LOD 1(SP),R4 + * LOD 2(SP),R5 + * LOD 3(SP),R6 + * LOD 4(SP),FP + * Which will be our choice. Note that we do use the stack pointer, eventually. + * + */ +void +zip_expand_epilogue(void) { + int regno, offset; + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + rtx insn; + + zip_compute_frame(); + + if (dbg) fprintf(stderr, "EPILOG::\n"); + if (cfun->machine->fp_needed) { + // This is done special--if you can't trust the stack pointer + // enough so that you must have a frame pointer, then you can't + // trust its offset enough to restore from it. Hence, we start + // by moving the frame pointer to the stack pointer to recover + // the stack pointer back to a usable value. + if (dbg) fprintf(stderr, "EPILOG::Moving frame pointer to stack register\n"); + insn = emit_insn(gen_movsi_reg(stack_pointer_rtx, frame_pointer_rtx)); + RTX_FRAME_RELATED_P(insn) = 1; + } + + if (cfun->machine->saved_reg_size != 0) { + if (cfun->machine->fp_needed) + offset = 0; + else + offset = cfun->machine->sp_fp_offset; + if (dbg) fprintf(stderr, "EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size); + for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) { + if (zip_save_reg(regno)) { + if (dbg) fprintf(stderr, "EPILOG::RESTORING R%d\n", regno); + rtx reg = gen_rtx_REG(SImode, regno); + insn = emit_insn(gen_movsi_lod_off( + reg, + stack_pointer_rtx, + GEN_INT(offset++))); + add_reg_note(insn, REG_CFA_RESTORE, reg); + RTX_FRAME_RELATED_P(insn) = 1; + } + } + } + + if (cfun->machine->fp_needed) { + // Restore the stack pointer back to the original, the + // difference being the difference from the frame pointer + // to the original stack + insn = emit_insn(gen_addsi3_reg_clobber(stack_pointer_rtx, + stack_pointer_rtx, + GEN_INT(cfun->machine->size_for_adjusting_sp + -cfun->machine->sp_fp_offset))); + RTX_FRAME_RELATED_P(insn) = 1; + } else { + // else now the difference is between the stack pointer and + // the original stack pointer. + if (dbg) fprintf(stderr, "EPILOG::ADDSI3(StackPtr, %d)\n", + cfun->machine->size_for_adjusting_sp); + insn = emit_insn(gen_addsi3_reg_clobber(stack_pointer_rtx, + stack_pointer_rtx, + GEN_INT(cfun->machine->size_for_adjusting_sp))); + RTX_FRAME_RELATED_P(insn) = 1; + } + if (dbg) fprintf(stderr, "EPILOG::EMITTING-RETURN\n"); + + // The return RTX is not allowed to be frame related + insn = emit_jump_insn(ret_rtx); + // RTX_FRAME_RELATED_P(insn) = 1; +} + +/* Implement RETURN_ADDR_RTX(COUNT, FRAMEADDR). + * + * We currently only support calculating the return address for the current + * frame. + */ + +/* +rtx +zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED) +{ + if (count) + return NULL_RTX; + + zip_compute_frame(); + + // saved return address for current function is at fp - 1 + if (cfun->machine->save_ret) + return gen_rtx_MEM(Pmode, plus_constant(frame_pointer_rtx, + -UNITS_PER_WORD)); + return get_hard_reg_initial_val(Pmode, RETURN_ADDRESS_REGNUM); +} +*/ + +/* Implements the macro INITIAL_ELIMINATION_OFFSET, + * return the OFFSET. + */ +int +zip_initial_elimination_offset(int from, int to) { + int ret = 0; + zip_compute_frame(); + + if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) { + ret = cfun->machine->sp_fp_offset; + } else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) { + ret = cfun->machine->sp_fp_offset; + } else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) { + // ret = cfun->machine->local_vars_size; + ret = 0; + } else { + abort(); + } + + return ret; +} + +/* + * Code taken from m68k ... + */ +static bool +zip_can_eliminate(int from, int to) +{ + // fprintf(stderr, "CAN_ELIMINATE::QUERYING(%d,%d)\n", from, to); + if ((from == zip_FP)&&(to == zip_SP)) + return !cfun->machine->fp_needed; + return true; +} + +/* +static void +zip_basic_check(void) +{ + gcc_assert(mode_base_align[SImode]==4); + if ((BITS_PER_UNIT != 32) + ||(GET_MODE_SIZE(SImode)!=1) + ||(GET_MODE_SIZE(DImode)!=1) + ||(HARD_REGNO_NREGS(0,SImode)!=1)) { + printf("SIZEOF(SIMode) == %d\n", GET_MODE_SIZE(SImode)); + printf("BITS_PER_UNIT == %d\n", BITS_PER_UNIT); + gcc_assert(BITS_PER_UNIT==32); + gcc_assert(GET_MODE_SIZE(SImode)==1); + gcc_assert(HARD_REGNO_NREGS(0,SImode)==1); + } +} +*/ + +#define zip_basic_check() + +/* Compute the number of word sized regiters needed to hold a function + * argument of mode INT_MODE and tree type TYPE. + */ +int +zip_num_arg_regs(enum machine_mode mode, const_tree type) { + int size; + + zip_basic_check(); + + if (targetm.calls.must_pass_in_stack(mode, type)) + return 0; + + if ((type)&&(mode == BLKmode)) + size = int_size_in_bytes(type); + else + size = GET_MODE_SIZE(mode); + + return (size + UNITS_PER_WORD - 1)/UNITS_PER_WORD; +} + +/* pushed in function prologue */ +/* +static int +zip_arg_partial_bytes(CUMULATIVE_ARGS *cum, enum machine_mode mode, + tree type, bool name ATTRIBUTE_UNUSED) { + int words; + unsigned int regs = zip_num_arg_regs(mode, type); + + if (*cum >= ZIP_LAST_ARG_REGNO + 1) + words = 0; + else if ((*cum + regs) > ZIP_LAST_ARG_REGNO + 1) + words = (*cum + regs) - ZIP_LAST_ARG_REGNO + 1; + else + words = 0; + + return words * UNITS_PER_WORD; +} +*/ + +static void +zip_function_arg_advance(cumulative_args_t ca, machine_mode mode, + const_tree type, bool named ATTRIBUTE_UNUSED) { + CUMULATIVE_ARGS *cum; + int nreg; + + zip_basic_check(); + + cum = get_cumulative_args(ca); + nreg = zip_num_arg_regs(mode, type); + if (((*cum)+nreg) > NUM_ARG_REGS) + (*cum) = NUM_ARG_REGS; + else + (*cum) += nreg; +} + +static rtx +zip_function_arg(cumulative_args_t ca, machine_mode mode, + const_tree type ATTRIBUTE_UNUSED, bool named) { + CUMULATIVE_ARGS *cum; + + zip_basic_check(); + + + if (!named) + return NULL_RTX; + //if (targetm.calls.must_pass_in_stack(mode, type)) + //return NULL_RTX; + cum = get_cumulative_args(ca); + + if ((*cum) >= NUM_ARG_REGS) + return NULL_RTX; + return + gen_rtx_REG(mode, (*cum)+1); +} + +#ifdef HAVE_cc0 +/* NOTICE_UPDATE_CC sends us here + */ +void +zip_update_cc_notice(rtx exp, rtx_insn *insn) +{ +#error "The CC0 code was supposed to be removed" + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + enum attr_ccresult ccr; + enum attr_conditional conditionally_executed; + + // The default is that nothing has changed. + // cc_status = cc_status_prev; + rtx src, dest; + + if (dbg) fprintf(stderr, "CC-NOTICE ...\n"); + if (dbg) zip_debug_rtx_pfx("CC :", exp); + if (dbg) debug_rtx(exp); + + ccr = get_attr_ccresult(insn); + if (ccr == CCRESULT_UNKNOWN) { + CC_STATUS_INIT; + if (dbg) fprintf(stderr, "\tINIT-CC\n"); + return; + } + + if ((GET_CODE(exp) == PARALLEL)&&(GET_CODE(XVECEXP(exp, 0, 0))==SET)) { + // This works up and until we add cc0 parallel instructions + // to our instruction set. + dest = SET_DEST(XVECEXP(exp, 0, 0)); + src = SET_SRC (XVECEXP(exp, 0, 0)); + } else if (GET_CODE(exp) == SET) { + dest = SET_DEST(exp); + src = SET_SRC (exp); + } else { + // First, do nothing if we haven't touched the condition codes. + // Condition codes can only be changed as a result of a set + // expression ...? + if (dbg) fprintf(stderr, "Non-set expression, doesn\'t touch condition codes\n"); + return; + } + + + if (ccr == CCRESULT_UNCHANGED) { + if (dbg) fprintf(stderr, "\tUnchanged CC\n"); + + // We can't just run away here ... even though the CC result + // hasn't changed, GCC's ability to recognize it as a valid + // result has changed. In other words, if we just 'set' a + // value contained within either value1 or value2, then we'll + // need to update those values so that they are no longer looked + // upon as potentially containing the current CC values. + + if (dest) { + if (dest == cc0_rtx) + CC_STATUS_INIT; + else if ((REG_P(dest))&&(dest != pc_rtx)) { + // An example here might be a load instruction + if (reg_mentioned_p(dest, cc_status.value1)) + cc_status.value1 = NULL_RTX; + if (reg_mentioned_p(dest, cc_status.value2)) + cc_status.value2 = NULL_RTX; + } + } + return; + } + + // Gotta wait on this test, until we know whether or not the + // conditionally executed instruction was designed to set the + // CC0 register. + conditionally_executed = get_attr_conditional(insn); + if ((conditionally_executed == CONDITIONAL_YES)&&(dest != cc0_rtx)) { + // cc_status is unchanged + // However, GCC's vision of it may have changed + // + // Initialize CC_STATUS + if (dbg) fprintf(stderr, "\tCC -- unchanged (conditional exec)\n"); + CC_STATUS_INIT; + return; + } else if (GET_CODE(src)==IF_THEN_ELSE) { + // Same thing as above + CC_STATUS_INIT; + return; + } + + if (ccr == CCRESULT_VALIDZN) + cc_status.flags = CC_NO_OVERFLOW; + else + cc_status.flags = 0; + cc_status.value1 = dest; + if (dest == cc0_rtx) + cc_status.value2 = src; + else if((REG_P(dest))&&(!reg_mentioned_p(dest, src))) + cc_status.value2 = src; + else if((SUBREG_P(dest))&&(!reg_mentioned_p(XEXP(dest,0), src))) + cc_status.value2 = src; + else + cc_status.value2 = 0; + if (dbg) fprintf(stderr, "\tCC -- Set flags for\n"); + if (dbg) zip_debug_rtx_pfx("V1: ", dest); + if ((dbg)&&(cc_status.value2)) zip_debug_rtx_pfx("V2: ", src); + else if (dbg) fprintf(stderr, "V2: (No SRC)\n"); + if ((dbg)&&(REG_P(dest))) fprintf(stderr, "src refers to dest ?? %s\n", + refers_to_regno_p(REGNO(dest),REGNO(dest),src,NULL)?"Yes":"No"); + if ((dbg)&&(REG_P(dest))) fprintf(stderr, "Occurrs %d times\n", + count_occurrences(dest,src,0)); + if ((dbg)&&(REG_P(dest))) fprintf(stderr, "%s mentioned\n", + reg_mentioned_p(dest,src)?"Is":"Is not"); + if ((dbg)&&(REG_P(dest))) fprintf(stderr, "%s referenced\n", + reg_referenced_p(dest,src)?"Is":"Is not"); + +// +// These results are only used in final.c, where they are used to remove +// compare instructions if the optimizer is on. If I produce nothing, no +// compare instructions will be removed. If I produce something, a smart +// decision may be made to remove compare instructions. +// +// cc_status will be compared with subsequent +// (set (cc0) (something)) (i.e. compare only) instructions +// +// (set (cc0) (compare (x) (y))) +// dst = cc0 -- the destination of the set is ignored, save that it must be +// cc0 +// src1 = (compare (x) (y)) +// if (src1 == compare)&&(y == (const_int 0)) +// src2 = (x) +// else +// src2 = null +// +// Four conditions: +// 1. if (val1)&&(src1 == val1) +// This would be true if I had seen a (set (val1) (src1)) insn +// If I have seen a (set (val1) (src1)) +// or equivalently a (set (val1) (compare (x) (y))) +// or +// 2. if (val2)&&(src1 == val2) +// This would be true if I had seen a (set (val1) (src1)) insn, +// and only if val2 was still valid. +// or +// 3. if (src2)&&(value1)&&(src2 == value1) +// This would be true if we are comparing against zero, and the +// number we are comparing against zero is value 1 +// or +// 4. if (src2)&&(value2)&&(src2 == value2) +// ... or value2. This is the common ZipCPU case. +// +// then delete the compare. +// +} +#else + +void zip_canonicalize_comparison(int *code, rtx *op0, rtx *op1, + bool preserve_op0) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "CANONICALIZE ...%s\n", (preserve_op0)?"(Preserve Op0)":""); + if (dbg) zip_debug_rtx_pfx("CODE", gen_rtx_fmt_ee((rtx_code)*code, VOIDmode, gen_rtx_REG(CCmode,zip_CC), const0_rtx)); + if (dbg) zip_debug_rtx_pfx("OP0 ", *op0); + if (dbg) zip_debug_rtx_pfx("OP1 ", *op1); + + if ((!preserve_op0)&&((*code == LE)||(*code == GTU)||(*code == GEU))) { + rtx tem = *op0; + *op0 = *op1; + *op1 = tem; + *code = (int)swap_condition((enum rtx_code)*code); + } + + if ((*code == LE)||(*code == LEU)||(*code == GTU)) { + int offset = 1; // (*code == GTU) ? 1 : -1; + bool swap = false; + + if (CONST_INT_P(*op1)) { + *op1 = GEN_INT(INTVAL(*op1)+offset); + swap = true; + } else if (REG_P(*op1)) { + *op1 = plus_constant(GET_MODE(*op1), *op1, offset, true); + swap = true; + } else if ((GET_CODE(*op1)==PLUS)&&(CONST_INT_P(XEXP(*op1,1)))){ + *op1 = plus_constant(GET_MODE(*op1),XEXP(*op1,0), + INTVAL(XEXP(*op1,1))+offset); + swap = true; + } if (swap) { + if (*code == LE) + (*code)= LT; + else if (*code == LEU) + (*code)= LTU; + else // (*code == GTU) + (*code) = GEU; + } + } +} + +static bool +zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b) { + *a = zip_CC; + *b = INVALID_REGNUM; + return true; +} + +#endif + + +/* totally buggy - we can't return pointers to nested functions */ +static void +zip_asm_trampoline_template(FILE *f) { + // Whereas at one time I thought I wouldn't need it, now I know I + // need this trampoline function, although it is for a completely + // different purpose than the one I was familiar with. + fprintf(f, "\tbrev\t0,r1\n"); + fprintf(f, "\tldilo\t0,r1\n"); + fprintf(f, "\tjmp r1\n"); +} + +/* Worker function for TARGET_TRAMPOLINE_INIT. */ +static void +zip_trampoline_init(rtx m_tramp ATTRIBUTE_UNUSED, + tree fndecl ATTRIBUTE_UNUSED, + rtx chain_value ATTRIBUTE_UNUSED) { +// #warning "This needs to be filled out" + abort(); +} + +static tree +def_builtin(const char *name, enum insn_code icode, enum ZIP_BUILTIN_ID_CODE code, + tree type) +{ + tree t = add_builtin_function(name,type,code,BUILT_IN_MD, NULL, NULL_TREE); + zip_basic_check(); + + if(t) { + zip_builtins[code] = t; + zip_builtins_icode[code] = icode; + } + + return t; + +} + +void zip_init_builtins(void) { + zip_basic_check(); + + tree void_ftype_void = build_function_type_list(void_type_node, NULL_TREE); +#ifdef HAVE_zip_rtu + def_builtin("zip_rtu", CODE_FOR_zip_rtu, ZIP_BUILTIN_RTU, void_ftype_void); +#endif +#ifdef HAVE_zip_halt + def_builtin("zip_halt", CODE_FOR_zip_halt, ZIP_BUILTIN_HALT, void_ftype_void); +#endif +#ifdef HAVE_zip_idle + def_builtin("zip_idle", CODE_FOR_zip_idle, ZIP_BUILTIN_IDLE, void_ftype_void); +#endif + +#ifdef HAVE_zip_syscall +// Support int SYSCALL(callID, int a, int b, int c); + def_builtin("zip_syscall", CODE_FOR_zip_syscall, ZIP_BUILTIN_SYSCALL, + build_function_type_list(void_type_node, NULL_TREE)); +#endif + +#ifdef HAVE_zip_save_context + def_builtin("zip_save_context", CODE_FOR_zip_save_context, ZIP_BUILTIN_SAVE_CONTEXT, + build_function_type_list(void_type_node, ptr_type_node, 0)); +#endif + +#ifdef HAVE_zip_restore_context + def_builtin("zip_restore_context", CODE_FOR_zip_restore_context, ZIP_BUILTIN_RESTORE_CONTEXT, + build_function_type_list(void_type_node, ptr_type_node, 0)); +#endif + +#ifdef HAVE_zip_bitrev + def_builtin("zip_bitrev", CODE_FOR_zip_bitrev, ZIP_BUILTIN_BITREV, + build_function_type_list(unsigned_type_node, unsigned_type_node, + NULL_TREE)); +#endif + +#ifdef HAVE_zip_cc + def_builtin("zip_cc", CODE_FOR_zip_cc, ZIP_BUILTIN_CC, + build_function_type_list(unsigned_type_node, NULL_TREE)); +#endif + +#ifdef HAVE_zip_ucc + def_builtin("zip_ucc", CODE_FOR_zip_ucc, ZIP_BUILTIN_UCC, + build_function_type_list(unsigned_type_node, NULL_TREE)); +#endif + +} + +static tree +zip_builtin_decl(unsigned zip_builtin_code, bool initialize_p ATTRIBUTE_UNUSED) +{ + if (zip_builtin_code >= ZIP_BUILTIN_MAX) + return error_mark_node; + + return zip_builtins[zip_builtin_code]; +} + +static rtx +zip_expand_builtin(tree exp, rtx target, + rtx subtarget ATTRIBUTE_UNUSED, + machine_mode tmode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) { + + tree fndecl = TREE_OPERAND(CALL_EXPR_FN(exp), 0); + bool nonvoid = (TREE_TYPE(TREE_TYPE(fndecl)) != void_type_node); + enum ZIP_BUILTIN_ID_CODE code=(enum ZIP_BUILTIN_ID_CODE)DECL_FUNCTION_CODE(fndecl); + enum insn_code icode = zip_builtins_icode[code]; + rtx pat, op[5]; + call_expr_arg_iterator iter; + tree arg; + + if ((code == ZIP_BUILTIN_SAVE_CONTEXT) + ||(code == ZIP_BUILTIN_RESTORE_CONTEXT)) { + arg = first_call_expr_arg(exp, &iter); + if (arg == error_mark_node) + return NULL_RTX; + op[0] = expand_normal(arg); + if (GET_CODE(op[0]) != REG) + op[0] = force_reg(Pmode, op[0]); + pat = GEN_FCN(icode)(op[0]); + } else if (code == ZIP_BUILTIN_BITREV) { + arg = first_call_expr_arg(exp, &iter); + if (arg == error_mark_node) { + return NULL_RTX; + } + op[0] = expand_normal(arg); + if (!target) + target = gen_reg_rtx(SImode); + pat = GEN_FCN(icode)(target, op[0]); + } else if ((code == ZIP_BUILTIN_CC)||(code == ZIP_BUILTIN_UCC)) { + if (!target) + target = gen_reg_rtx(SImode); + pat = GEN_FCN(icode)(target); + } else // RTU, HALT, IDLE + pat = GEN_FCN(icode)(); + if (!pat) + return NULL_RTX; + emit_insn(pat); + return (nonvoid ? target : const0_rtx); +} + +static bool +zip_scalar_mode_supported_p(enum machine_mode mode) { + zip_basic_check(); + + return ((mode)==SImode)||((mode)==DImode); // ||((mode)==SFmode); +} + +static bool +zip_libgcc_floating_mode_supported_p(enum machine_mode mode) { + return ((mode)==SFmode)||((mode)==DFmode); +} + +static int +zip_address_cost(rtx addr ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + addr_space_t as ATTRIBUTE_UNUSED, bool spd ATTRIBUTE_UNUSED) { + return 1; +} + +static bool +zip_mode_dependent_address_p(const_rtx addr ATTRIBUTE_UNUSED, + addr_space_t as ATTRIBUTE_UNUSED) { + return false; +} + +/* +static void +zip_asm_output_anchor(rtx x) { + printf("ANCHOR: OP(%d)\n", GET_CODE(x)); +} +*/ + +static void +zip_debug_print(const char *pfx, int lvl, const char *str) { + int i; + i = lvl; + if ((true)||(lvl == 0)) + fprintf(stderr, "%s", pfx); + else + i += strlen(pfx); + while(i-->0) + fprintf(stderr, " "); + fprintf(stderr, "%s\n", str); +} + +static void +zip_debug_print_m(const char *pfx, int lvl, const char *str, enum machine_mode m) { + int i; + + i = lvl; + if ((true)||(lvl == 0)) + fprintf(stderr, "%s", pfx); + else + i = lvl+strlen(pfx); + while(i-->0) + fprintf(stderr, " "); + switch(m) { + case VOIDmode: + fprintf(stderr, "%s:V\n", str); + break; + case BLKmode: + fprintf(stderr, "%s:BLK\n", str); + break; + case BImode: + fprintf(stderr, "%s:BI\n", str); + break; +#ifdef HAVE_QImode + case QImode: + fprintf(stderr, "%s:QI\n", str); + break; +#endif +#ifdef HAVE_HImode + case HImode: + fprintf(stderr, "%s:HI\n", str); + break; +#endif + case SImode: + fprintf(stderr, "%s:SI\n", str); + break; + case CCmode: + fprintf(stderr, "%s:CC\n", str); + break; + case DImode: + fprintf(stderr, "%s:DI\n", str); + break; + default: + fprintf(stderr, "%s:?\n", str); + } +} + +static void +zip_debug_rtx_1(const char *pfx, const_rtx x, int lvl) { + if (x == NULL_RTX) { + zip_debug_print(pfx, lvl, "(NULL-RTX)"); + return; + } else if (GET_CODE(x) > NUM_RTX_CODE) { + char buf[64]; + sprintf(buf, "(BAD-RTX-CODE %d)", GET_CODE(x)); + zip_debug_print(pfx, lvl, buf); + gcc_assert(0 && "Bad RTX Code"); + return; + } switch(GET_CODE(x)) { // rtl.def + case PARALLEL: + zip_debug_print(pfx, lvl, "(PARALLEL"); + for(int j=0; j<XVECLEN(x,0);j++) + zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1); + zip_debug_print(pfx, lvl, ")"); + debug_rtx(x); + break; + case INT_LIST: zip_debug_print(pfx, lvl, "(INT-LIST"); break; + case SEQUENCE: + zip_debug_print(pfx, lvl, "(SEQUENCE"); + for(int j=0; j<XVECLEN(x,0);j++) + zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1); + zip_debug_print(pfx, lvl, ")"); + debug_rtx(x); + break; + case ADDRESS: zip_debug_print(pfx, lvl, "(ADDRESS"); break; + case DEBUG_INSN: zip_debug_print(pfx, lvl, "(DEBUG-INSN"); break; + case INSN: + zip_debug_print(pfx, lvl, "(INSN"); + /* + { const rtx_insn *tmp_rtx; + for(tmp_rtx = as_a <const rtx_insn *>(x); tmp_rtx != 0; tmp_rtx = NEXT_INSN(tmp_rtx)) { + zip_debug_rtx_1(tmp_rtx, lvl+1); + }} + */ + zip_debug_rtx_1(pfx, PATTERN(x), lvl+1); + zip_debug_print(pfx, lvl, ")"); + debug_rtx(x); + break; + case JUMP_INSN: zip_debug_print(pfx, lvl, "(JUMP-INSN"); + zip_debug_rtx_1(pfx, PATTERN(x), lvl+1); + zip_debug_print(pfx, lvl, ")"); + /* + if (JUMP_LABEL(x)) { + if (GET_CODE(JUMP_LABEL(x)) == LABEL_REF) { + char buf[64]; + sprintf(buf, "(LABEL *.L%d))", CODE_LABEL_NUMBER(LABEL_REF_LABEL(JUMP_LABEL(x)))); + zip_debug_print(pfx, lvl+1, buf); + } else if (GET_CODE(JUMP_LABEL(x))==CODE_LABEL) { + char buf[64]; + sprintf(buf, "(CODE_LABEL *.L%d))", CODE_LABEL_NUMBER(JUMP_LABEL(x))); + zip_debug_print(pfx, lvl+1, buf); + } else + zip_debug_print(pfx, lvl+1, "(w/Label))"); + } else + zip_debug_print(pfx, lvl+1, "(NO label))"); + debug_rtx(x); + */ + break; + case CALL: + zip_debug_print(pfx, lvl, "(CALL (Adr) (Args)"); + zip_debug_rtx_1(pfx, XEXP(x,0), lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1), lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case CALL_INSN: zip_debug_print(pfx, lvl, "(CALL-INSN"); + debug_rtx(x); + break; + case BARRIER: zip_debug_print(pfx, lvl, "(BARRIER)"); break; + case RETURN: zip_debug_print(pfx, lvl, "(RETURN)"); break; + case NOTE: + { char buf[128]; + sprintf(buf, "(NOTE %s)", GET_REG_NOTE_NAME(GET_MODE(x))); + zip_debug_print(pfx, lvl, buf); + }break; + case COND_EXEC: zip_debug_print(pfx, lvl, "(COND_EXEC)"); + debug_rtx(x); + break; + case ASM_INPUT: zip_debug_print(pfx, lvl, "(ASM INPUT)"); break; + case ASM_OPERANDS: zip_debug_print(pfx, lvl, "(ASM OPERANDS)"); break; + case UNSPEC: zip_debug_print(pfx, lvl, "(UNSPEC)"); break; + case UNSPEC_VOLATILE: zip_debug_print(pfx, lvl, "(UNSPEC_VOLATILE)"); break; + case CODE_LABEL: + { + char buf[64]; + sprintf(buf, "(CODE_LABEL *.L%d)", CODE_LABEL_NUMBER(x)); + zip_debug_print_m(pfx, lvl, buf, GET_MODE(x)); + } break; + case SET: + zip_debug_print_m(pfx, lvl, "(SET", GET_MODE(x)); + zip_debug_rtx_1(pfx, SET_DEST(x),lvl+1); + zip_debug_rtx_1(pfx, SET_SRC(x),lvl+1); + zip_debug_print(pfx, lvl, ")"); + debug_rtx(x); + break; + case REG: { + char buf[25], mstr[4]; + mstr[0] = '\0'; + if (GET_MODE(x) == SImode) + strcpy(mstr, ":SI"); + else if (GET_MODE(x) == DImode) + strcpy(mstr, ":DI"); + else if (GET_MODE(x) == VOIDmode) + strcpy(mstr, ":V"); + if (REGNO(x) == zip_PC) + sprintf(buf, "(PC%s)", mstr); + else if (REGNO(x) == zip_CC) + sprintf(buf, "(CC%s)", mstr); + else if (REGNO(x) == zip_SP) + sprintf(buf, "(SP%s)", mstr); + else if (REGNO(x) == zip_FP) + sprintf(buf, "(REG%s FP)", mstr); + else if (REGNO(x) == zip_GOT) + sprintf(buf, "(REG%s GBL)", mstr); + else if (FUNCTION_VALUE_REGNO_P(REGNO(x))) + sprintf(buf, "(REG%s RTN-VL)", mstr); + else if (REGNO(x) == RETURN_ADDRESS_REGNUM) + sprintf(buf, "(REG%s RTN-AD)", mstr); + else + sprintf(buf, "(REG%s %d)", mstr, REGNO(x)); + if (mstr[0]) + zip_debug_print(pfx, lvl, buf); + else + zip_debug_print_m(pfx, lvl, buf, GET_MODE(x)); + } break; + case IF_THEN_ELSE: // 51 + zip_debug_print(pfx, lvl, "(IF-THEN-ELSE"); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,2),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case PC: + zip_debug_print(pfx, lvl, "(PC)"); + break; + case CC0: + zip_debug_print(pfx, lvl, "(CC0)"); + break; + case COMPARE: + zip_debug_print_m(pfx, lvl, "(COMPARE", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case CONST: + zip_debug_print_m(pfx, lvl, "(CONST", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case CONST_INT: + { char buf[25]; + if (GET_MODE(x)==SImode) + sprintf(buf, "(CONST_INT:SI %ld)", (long)INTVAL(x)); + else if (GET_MODE(x)==VOIDmode) + sprintf(buf, "(CONST_INT:V %ld)", (long)INTVAL(x)); + else + sprintf(buf, "(CONST_INT:? %ld)", (long)INTVAL(x)); + zip_debug_print(pfx, lvl, buf); + } break; + case LABEL_REF: + { char buf[256]; + sprintf(buf, "(LABEL *.L%d)", CODE_LABEL_NUMBER(LABEL_REF_LABEL(x))); + zip_debug_print(pfx, lvl, buf); + } + break; + case SYMBOL_REF: + { + char buf[64]; + sprintf(buf, "(SYMBOL: %s)", XSTR(x,0)); + // fprintf(file, "%s", XSTR(x,0)); + zip_debug_print(pfx, lvl, buf); + } + break; + case MEM: + zip_debug_print_m(pfx, lvl, "(MEM", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + /* + case VALUE: + { + char buf[64]; + sprintf(buf, "(VALUE: %d)", INTVAL(XEXP,0)); + zip_debug_print_m(pfx, lvl, "buf", GET_MODE(x)); + } + break; + */ + case PLUS: + zip_debug_print_m(pfx, lvl, "(PLUS", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case MINUS: + zip_debug_print_m(pfx, lvl, "(MINUS", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case AND: + zip_debug_print_m(pfx, lvl, "(AND", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case IOR: + zip_debug_print_m(pfx, lvl, "(OR", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case XOR: + zip_debug_print_m(pfx, lvl, "(XOR", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case MULT: + zip_debug_print_m(pfx, lvl, "(MULT", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case EQ: // + zip_debug_print_m(pfx, lvl, "(EQ", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case NE: // + zip_debug_print_m(pfx, lvl, "(NE", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case GE: // + zip_debug_print_m(pfx, lvl, "(GE", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case GT: // + zip_debug_print_m(pfx, lvl, "(GT", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case LE: // + zip_debug_print_m(pfx, lvl, "(LE", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case LT: // + zip_debug_print_m(pfx, lvl, "(LT", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case GEU: // + zip_debug_print_m(pfx, lvl, "(GEU", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case GTU: // + zip_debug_print_m(pfx, lvl, "(GTU", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case LEU: // + zip_debug_print_m(pfx, lvl, "(LEU", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case LTU: // + zip_debug_print_m(pfx, lvl, "(LTU", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case SCRATCH: // + zip_debug_print_m(pfx, lvl, "(SCRATCH)", GET_MODE(x)); + break; + case SUBREG: + { char buf[25]; + if (REG_P(XEXP(x,0))) { + sprintf(buf, "(SUBREG %d/%d)", REGNO(XEXP(x,0)), + SUBREG_BYTE(x)); + zip_debug_print(pfx, lvl, buf); + } else if (MEM_P(XEXP(x,0))) { + sprintf(buf, "(SUBREG /%d", SUBREG_BYTE(x)); + zip_debug_print(pfx, lvl, buf); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_print(pfx, lvl, ")"); + } else { + sprintf(buf, "(SUBREG UNK /%d", SUBREG_BYTE(x)); + zip_debug_print(pfx, lvl, buf); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_print(pfx, lvl, ")"); + }} + break; + case ASHIFT: + zip_debug_print_m(pfx, lvl, "(ASHIFT", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case ASHIFTRT: + zip_debug_print_m(pfx, lvl, "(ASHIFTRT", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + case LSHIFTRT: + zip_debug_print_m(pfx, lvl, "(LSHIFTRT", GET_MODE(x)); + zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1); + zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1); + zip_debug_print(pfx, lvl, ")"); + break; + default: + { char buf[128]; + sprintf(buf, "(? = %d) -- calling DEBUG-RTX", GET_CODE(x)); + zip_debug_print(pfx, lvl, buf); + debug_rtx(x); + } break; + } +} + +void +zip_debug_rtx_pfx(const char *pfx, const_rtx x) { + zip_debug_rtx_1(pfx, x, 0); +} + +void +zip_debug_rtx(const_rtx x) { + zip_debug_rtx_pfx("", x); +} + +void +zip_debug_ccode(int ccode) { + switch(ccode) { + case EQ: fprintf(stderr, "EQ"); break; + case NE: fprintf(stderr, "NE"); break; + case GT: fprintf(stderr, "GT"); break; + case GE: fprintf(stderr, "GE"); break; + case LT: fprintf(stderr, "LT"); break; + case LE: fprintf(stderr, "LE"); break; + case GTU: fprintf(stderr, "GTU"); break; + case GEU: fprintf(stderr, "GEU"); break; + case LTU: fprintf(stderr, "LTU"); break; + case LEU: fprintf(stderr, "LEU"); break; + default: + fprintf(stderr, "%d", ccode); break; + } +} + +void +zip_debug_insn(rtx_insn *insn ATTRIBUTE_UNUSED) { +} + +void +zip_debug_bb(basic_block bb) { + rtx_insn *insn; + + fprintf(stderr, "************ BASIC-BLOCK ***************\n"); + FOR_BB_INSNS(bb, insn) + { + zip_debug_rtx(insn); + } +} + + +static bool +zip_legitimate_opb(rtx x, bool strict) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB\n"); + if (dbg) zip_debug_rtx_pfx("Test: ", x); + + if (NULL_RTX == x) + return false; + else if ((GET_MODE(x) != SImode)&&(GET_MODE(x) != VOIDmode)) { + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> Mode failure\n"); + return false; + } else if ((strict)&&(REG_P(x))) { + if (REGNO(x)<zip_CC) { + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n"); + return true; + } else return false; + } else if (register_operand(x, GET_MODE(x))) { + // This also handles subregs + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n"); + return true; + } else if ((CONST_INT_P(x)) + &&(INTVAL(x) >= zip_min_opb_imm) + &&(INTVAL(x) <= zip_max_opb_imm)) { + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (Const) %ld <= %ld <= %ld\n", (long)zip_min_opb_imm, (long)INTVAL(x), (long)zip_max_opb_imm); + return true; + // } else if ((GET_CODE(x) == LABEL_REF)||(GET_CODE(x)==CODE_LABEL)) { + // return true; + } else if (GET_CODE(x) == PLUS) { + // Is it a valid register? + if ((!strict)&&(!register_operand((rtx)XEXP((rtx)x,0), GET_MODE(x)))) { + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No (No reg in +%s)\n", + (GET_CODE(XEXP(x,1))==REG)?", reg in op[1]":""); + return false; + } else if ((strict)&&((!REG_P(XEXP(x,0)))||(REGNO(XEXP(x,0))>=zip_CC))) { + return false; + } if ((GET_CODE(XEXP(x, 1)) == CONST_INT) + &&(INTVAL(XEXP(x, 1)) <= zip_max_anchor_offset) + &&(INTVAL(XEXP(x, 1)) >= zip_min_anchor_offset)) { + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (reg+int)\n"); + // if((INTVAL(XEXP(x,1))<0)&&(REGNO(XEXP(x,0))==zip_SP)) + // gcc_unreachable(); + return true; + } if ((GET_CODE(XEXP(x, 1)) == LABEL_REF) + ||(GET_CODE(XEXP(x, 1)) == CODE_LABEL) + ||(GET_CODE(XEXP(x, 1)) == SYMBOL_REF)) { + // While we can technically support this, the problem + // is that the symbol address could be anywhere, and we + // have no way of recovering if it's outside of our + // 14 allowable bits. + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No. (reg+lbl)\n"); + return false; + } + } + + if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No\n"); + if (dbg) zip_debug_rtx(x); + return false; +} + +static bool +zip_legitimate_move_operand_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND\n"); + if (dbg) zip_debug_rtx_pfx("VMov?: ", x); + + if (!zip_legitimate_opb(x, strict)) + return false; + else if ((GET_CODE(x)==PLUS)&&(CONST_INT_P(XEXP(x,1)))) { + if ((INTVAL(XEXP(x, 1)) > zip_max_mov_offset) + ||(INTVAL(XEXP(x, 1)) < zip_min_mov_offset)) { + if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> NO! (reg+int), int out of bounds: %ld\n", (long)INTVAL(XEXP(x,1))); + return false; + } + } + + if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> Yes\n"); + if (dbg) zip_debug_rtx(x); + return true; +} + +int +zip_pd_mov_operand(rtx op) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-VALID-MOV(predicate) for OPERAND\n"); + return zip_legitimate_move_operand_p(VOIDmode, op, !can_create_pseudo_p()); +} + +int +zip_pd_mvimm_operand(rtx op) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-VALID-MVIMM(predicate) for OPERAND\n"); + if (!CONST_INT_P(op)) + return false; + if (INTVAL(op) > zip_max_mov_offset) + return false; + if (INTVAL(op) < zip_min_mov_offset) + return false; + return true; +} + +int +zip_pd_imm_operand(rtx op) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-VALID-IMM(predicate) for OPERAND\n"); + if (!CONST_INT_P(op)) + return false; + if (INTVAL(op) > zip_max_anchor_offset) + return false; + if (INTVAL(op) < zip_min_anchor_offset) + return false; + return true; +} + +int +zip_address_operand(rtx op) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-ADDRESS for OPERAND\n"); + if ((REG_P(op))&&(REGNO(op)==zip_CC)) + return false; + else if ((GET_CODE(op) == PLUS)&&(REG_P(XEXP(op,0))) + &&(REGNO(XEXP(op,0))==zip_CC)) + return false; + else + return zip_legitimate_opb(op, !can_create_pseudo_p()); +} + +int +zip_pd_opb_operand(rtx op) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-OPB(predicate) for OPERAND\n"); + return zip_legitimate_opb(op, false); //, !can_create_pseudo_p()); +} + +int +zip_ct_address_operand(rtx op) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-ADDRESS(constraint) for OPERAND\n"); + return zip_legitimate_opb(op, !can_create_pseudo_p()); +} + +int +zip_const_address_operand(rtx x) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS?\n"); + if (dbg) zip_debug_rtx(x); + if ((GET_MODE(x) != SImode)&&(GET_MODE(x) != VOIDmode)) { + fprintf(stderr, "is ZIP-CONST-ADDRESS? -> NO, BAD MODE\n"); + return false; + } + if ((GET_CODE(x) == LABEL_REF) + ||(GET_CODE(x) == CODE_LABEL) + ||(GET_CODE(x) == SYMBOL_REF)) { + if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (LBL)\n"); + return true; + } else if (CONST_INT_P(x)) { + if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (INT)\n"); + return true; + } else if (GET_CODE(x) == PLUS) { + if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(PLUS)\n"); + return ((zip_const_address_operand(XEXP(x,0))) + &&(CONST_INT_P(XEXP(x,1)))); + } else if (GET_CODE(x) == MINUS) { + if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(MINUS)\n"); + return ((zip_const_address_operand(XEXP(x,0))) + &&(zip_const_address_operand(XEXP(x,1)))); + } + + if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> No\n"); + if (dbg) zip_debug_rtx(x); + return false; +} + +int +zip_ct_const_address_operand(rtx x) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(constraint)\n"); + return zip_const_address_operand(x); +} + +int +zip_pd_const_address_operand(rtx x) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(predicate)\n"); + return zip_const_address_operand(x); +} + + +static bool +zip_legitimate_address_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict) +{ + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) fprintf(stderr, "Zip-LEGITIMATE-ADDRESS-P\n"); + if (dbg) zip_debug_rtx(x); + + // Only insist the register be a valid register if strict is true + if (zip_legitimate_opb(x, strict)) + return true; + // else if (zip_const_address_operand(x)) + // return true; + + return false; +} + +static rtx +zip_legitimize_address(rtx x, rtx oldx ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + + if (dbg) zip_debug_rtx_pfx("LEGITIMIZE: ", x); + if (zip_legitimate_address_p(mode, x, !can_create_pseudo_p())) + return x; + + if (GET_CODE(x)==PLUS) { + if (!REG_P(XEXP(x,0))) + XEXP(x,0) = force_reg(GET_MODE(x),XEXP(x,0)); + if ((!zip_legitimate_address_p(mode, x, !can_create_pseudo_p())) + &&(!CONST_INT_P(XEXP(x,1)))) + x = force_reg(GET_MODE(x),x); + } else if (MEM_P(x)) + x = force_reg(GET_MODE(x),x); + + if (dbg) zip_debug_rtx_pfx("LEGITIMATE: ", x); + return x; +} + +void +zip_asm_output_def(FILE *stream, const char *name, const char *value) +{ + assemble_name(stream, name); + fprintf(stream, "\t.equ "); + assemble_name(stream, value); + fputc('\n', stream); +} + +#define USE_SUBREG +#ifdef USE_SUBREG +#define SREG_P(RTX) ((SUBREG_P(RTX))&&(REG_P(XEXP(RTX,0)))) +#define SMEM_P(RTX) ((SUBREG_P(RTX))&&(MEM_P(XEXP(RTX,0)))) +#else +#define SREG_P(RTX) false +#define SMEM_P(RTX) false +#endif + +const char *zip_set_zero_or_one(rtx condition, rtx dst) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + if (dbg) fprintf(stderr, "ZIP::SET-ZERO-OR-ONE\n"); + if (dbg) zip_debug_rtx_pfx("CND", condition); + if (dbg) zip_debug_rtx_pfx("REG", dst); + switch(GET_CODE(condition)) { + case EQ: return "LDI\t0,%0\n\tLDILO.Z\t1,%0"; + case NE: return "LDI\t0,%0\n\tLDILO.NZ\t1,%0"; + case LT: return "LDI\t0,%0\n\tLDILO.LT\t1,%0"; + case GT: return "LDI\t0,%0\n\tLDILO.GT\t1,%0"; + case LE: return "LDI\t1,%0\n\tLDILO.GT\t0,%0"; + case GE: return "LDI\t0,%0\n\tLDILO.GE\t1,%0"; + case LTU: return "LDI\t0,%0\n\tLDILO.C\t1,%0"; + case GTU: return "LDI\t1,%0\n\tLDILO.C\t0,%0\n\tLDILO.Z\t0,%0"; + case LEU: return "LDI\t0,%0\n\tLDILO.C\t1,%0\n\tLDILO.Z\t1,%0"; + case GEU: return "LDI\t1,%0\n\tLDILO.C\t0,%0"; + default: + zip_debug_rtx(condition); + internal_error("CSTORE Unsupported condition"); + return NULL; + } +} + +/* +const char *zip_binary_movsicc(rtx_code condition, const char *op, const int opno) { + static char result[64] = ""; + switch(condition) { + // + // Result already exists in the iffalse register + // Can't change it. Therefore, on the + // condition ... move true register to the + // destination + // + case EQ: sprintf(result, "%s.Z\t%%%d,%%0", op, opno); break; + case NE: sprintf(result, "%s.NZ\t%%%d,%%0", op, opno); break; + case LT: sprintf(result, "%s.LT\t%%%d,%%0", op, opno); break; + case GT: sprintf(result, "%s.GT\t%%%d,%%0", op, opno); break; + // .LE doesn't exist on Zip CPU--turn this into two instructions + case LE: sprintf(result, "%s.LT\t%%%d,%%0\n\t%s.Z\t%%%d,%%0", op, opno, op, opno); break; + case GE: sprintf(result, "%s.GE\t%%%d,%%0", op, opno); break; + case LTU: sprintf(result, "%s.C\t%%%d,%%0", op, opno); break; + // + // .GTU doesn't exist on the Zip CPU either. We also note that + // .C will never be set on an equal condition. Therefore, we + // turn this into a XOR.NZ 2,CC, which will set the .C condition + // as long as .Z wasn't true. We then undo this when we're + // done. This is possible since none of these instructions + // (LDI/MOV/Lod conditional, nor Xor conditional) will ever set + // the condition codes. + // + // This is obviously not very optimal. Avoid this by all means + // if you can + case GTU: sprintf(result, "XOR.NZ\t2,CC\n%s.C\t%%%d,%%0\n\tXOR.NZ\t2,CC", op, opno); break; + // .LEU doesn't exist on Zip CPU either--turn this into another + // two instructions + case LEU: sprintf(result, "%s.C\t%%%d,%%0\n\t%s.Z\t%%%d,%%0", op, opno, op, opno); break; + // + // .GEU doesn't exist on Zip CPU. Implementing it her is + // painful. We can change the condition codes to make it so, + // but the instruction requires the condition codes not be + // changed. Hence, we must change them back if we do so. + // + // .C will be set on less than but not equal. Hence !.C will + // be true on greater than or equal. + case GEU: sprintf(result, "XOR\t2,CC\n%s.C\t%%%d,%%0\n\tXOR\t2,CC", op, opno); break; + default: + internal_error("MOVSICC(BINARY) Unsupported condition"); + return NULL; + } return result; +} +*/ + +bool +zip_supported_condition(int c) { + switch(c) { + case NE: case LT: case EQ: case GT: case GE: case LTU: + return true; + break; + default: + break; + } return false; +} + +bool +zip_signed_comparison(int c) { + switch(c) { + case NE: case LT: case EQ: case GT: case GE: + return true; + default: + break; + } return false; +} + +bool +zip_expand_movsicc(rtx dst, rtx condition, rtx iftrue, rtx iffalse) { + rtx_insn *insn; + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + if (dbg) fprintf(stderr, "ZIP::MOVSICC\n"); + if (dbg) zip_debug_rtx_pfx("DST", dst); + if (dbg) zip_debug_rtx_pfx("CND", condition); + if (dbg) zip_debug_rtx_pfx("TRU", iftrue); + if (dbg) zip_debug_rtx_pfx("FAL", iffalse); + + // Start with the condition + rtx cmpa = XEXP(condition,0), cmpb=XEXP(condition,1); + enum rtx_code cmpcode = GET_CODE(condition); + + // Want to always do the false expression, and only sometimes the + // true expression. If, however, the false is a constant and the + // true and destination are the same thing, this doesn't work. + if (rtx_equal_p(dst, iftrue)) { + // If the true value is the same as the destination already, + // then swap so we only do the condition on true + rtx tem = iffalse; + iffalse = iftrue; + iftrue = tem; + cmpcode = reverse_condition(cmpcode); + } + + //; Do we need to swap or adjust the condition? + if (zip_supported_condition((int)cmpcode)) { + // Keep everything as is + if (dbg) fprintf(stderr, "ZIP::MOVSICC -- Condition is supported\n"); + } else if ((zip_supported_condition(reverse_condition(cmpcode))) + &&(!MEM_P(iffalse)) + &&(!rtx_equal_p(dst,iffalse))) { + rtx tem = iffalse; + iffalse = iftrue; + iftrue = tem; + + cmpcode = reverse_condition(cmpcode); + } else if ((zip_supported_condition((int)swap_condition(cmpcode))) + &&((REG_P(cmpb))||(can_create_pseudo_p()))) { + rtx tem = cmpa; + cmpa = cmpb; + cmpa = tem; + cmpcode = swap_condition(cmpcode); + + if ((GET_CODE(cmpa)==PLUS)&&(zip_signed_comparison((int)cmpcode)) + &&(REG_P(XEXP(cmpa,0))) + &&(CONST_INT_P(XEXP(cmpa,1))) + &&(abs(INTVAL(XEXP(cmpa,1)))<(1<<17))) { + + // If we were doing CMP x(Rb),Ra + // and we just changed it to CMP Ra,x(Rb) + // adjust it to CMP -x(Ra),Rb + cmpb = plus_constant(SImode, cmpb, -INTVAL(XEXP(cmpa,1))); + cmpa = XEXP(cmpa,0); + } else if (!REG_P(cmpa)) { + // Otherwise, if we had anything else in Rb other than + // a register ... such as a constant, then load it into + // a register before comparing it. So + // CMP x,Ra + // became + // CMP Ra,x + // now becomes + // LDI x,Rt + // CMP Ra,Rt + // (We already tested for can_create_pseudo_p() above..) + tem = gen_reg_rtx(SImode); + emit_move_insn(tem, cmpa); + cmpa = tem; + } + } else { + // Here's our last chance. + // This will adjust for less than equal types of stuff + int cod = (int)cmpcode; + zip_canonicalize_comparison(&cod, &cmpa, &cmpb, false); + cmpcode = (enum rtx_code)cod; + } + + if (dbg) fprintf(stderr, "ZIP::MOVSICC -- Post-Modes\n"); + if (dbg) zip_debug_rtx_pfx("DST-P: ", dst); + if (dbg) zip_debug_rtx_pfx("CND-P: ", condition); + if (dbg) zip_debug_rtx_pfx("TRU-P: ", iftrue); + if (dbg) zip_debug_rtx_pfx("FAL-P: ", iffalse); + + if (!zip_supported_condition((int)cmpcode)) { + if (dbg) { + fprintf(stderr, "ZIP::MOVSICC -- Unsupported condition: "); + zip_debug_ccode(cmpcode); + fprintf(stderr, "\n"); + } + return false; + } + gcc_assert(zip_supported_condition((int)cmpcode)); + + //; Always do the default move + bool conditionally_do_false = false; + conditionally_do_false = (MEM_P(iffalse)) + &&(!rtx_equal_p(dst,iffalse)) + &&(zip_supported_condition(reverse_condition(cmpcode))); + conditionally_do_false = conditionally_do_false || (rtx_equal_p(dst,iftrue)); + if ((conditionally_do_false)&&(!zip_supported_condition(reverse_condition(cmpcode)))) { + if (dbg) { + fprintf(stderr, "ZIP::MOVSICC -- Cant support the reverse condition: "); + zip_debug_ccode(cmpcode); + fprintf(stderr, "\n"); + } + return false; + } + + if ((!rtx_equal_p(dst, iffalse))&&(!conditionally_do_false)) { + if (dbg) + fprintf(stderr, "ZIP::MOVSICC -- EMITTING MOVE FALSE->DST\n"); + insn = emit_move_insn(dst, iffalse); + if (dbg) zip_debug_rtx_pfx("BARE-U: ", insn); + } + + rtx cc_rtx = gen_rtx_REG(CCmode, zip_CC); + + //; Now let's get our comparison right + if (dbg) fprintf(stderr, "ZIP::MOVSICC -- EMITTING COMPARISON\n"); + insn = emit_insn(gen_rtx_SET(VOIDmode, cc_rtx, + gen_rtx_COMPARE(CCmode, cmpa, cmpb))); + if (dbg) zip_debug_rtx_pfx("BARE-C: ", insn); + + //; Finally, let's load the value on true + if (!rtx_equal_p(dst, iftrue)) { + if (dbg) fprintf(stderr, "ZIP::MOVSICC -- EMITTING BARE\n"); + insn=emit_insn(gen_movsicc_bare(dst, + gen_rtx_fmt_ee(cmpcode, SImode, NULL_RTX, NULL_RTX), + iftrue, dst)); + if (dbg) zip_debug_rtx_pfx("BARE-T: ", insn); + } + + if (conditionally_do_false) { + gcc_assert(zip_supported_condition(reverse_condition(cmpcode))); + insn=emit_insn(gen_movsicc_bare(dst, + gen_rtx_fmt_ee(reverse_condition(cmpcode), SImode, + NULL_RTX, NULL_RTX), iffalse, dst)); + if (dbg) zip_debug_rtx_pfx("BARE-F: ", insn); + } + + // Return true on success + return true; +} + +const char *zip_addsicc(rtx dst, rtx condition, rtx ifsrc, rtx addv ATTRIBUTE_UNUSED) { + // We know upon entry that REG_P(dst) must be true + if (!REG_P(dst)) + internal_error("%s","ADDSICC into something other than register"); + if ((REG_P(ifsrc))&&(REGNO(dst)==REGNO(ifsrc))) { + switch (GET_CODE(condition)) { + case EQ: return "ADD.Z\t%3,%0"; + case NE: return "ADD.NZ\t%3,%0"; + case LT: return "ADD.LT\t%3,%0"; + case GT: return "ADD.GT\t%3,%0"; + case LE: return "ADD.LT\t%3,%0\n\tADD.Z\t%3,%0"; + case GE: return "ADD.GE\t%3,%0"; + case LTU: return "ADD.C\t%3,%0"; + case LEU: return "ADD.C\t%3,%0\n\tADD.Z\t%3,%0"; + case GEU: return "XOR\t2,CC\n\tADD.C\t%3,%0\n\tXOR\t2,CC"; + // Can do a GEU comparison, and then undo on the Zero condition + case GTU: return "XOR\t2,CC\n\tADD.C\t%3,%0\n\tSUB.Z\t%3,%0\n\tXOR\t2,CC"; + default: + internal_error("%s", "Zip/No usable addsi expansion"); + break; + } + } else { + // MOV A+REG,REG + switch (GET_CODE(condition)) { + case EQ: return "MOV.Z\t%3+%2,%0"; + case NE: return "MOV.NZ\t%3+%2,%0"; + case LT: return "MOV.LT\t%3+%2,%0"; + case GT: return "MOV.GT\t%3+%2,%0"; + case LE: return "MOV.LT\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0"; + case GE: return "MOV.GE\t%3+%2,%0"; + case LTU: return "MOV.C\t%3+%2,%0"; + case LEU: return "MOV.C\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0"; + case GEU: return "XOR\t2,CC\n\tMOV.C\t%3+%2,%0\n\tXOR\t2,CC"; + // Can do a GEU comparison, and then undo on the Zero condition + // EXCEPT: with a move instruction, what's there to undo? We + // just clobbered our register! + // case GTU: return "XOR\t2,CC\n\tMOV.C\t%3,%0\n\tSUB.Z\t%3,%0XOR\t2,CC"; + default: + internal_error("%s", "Zip/No usable addsi(reg,reg) expansion"); + break; + } + } + + return "BREAK"; +} + +static int zip_memory_move_cost(machine_mode mode, reg_class_t ATTRIBUTE_UNUSED, bool in ATTRIBUTE_UNUSED) { + int rv = 14; + if ((mode == DImode)||(mode == DFmode)) + rv += 2; + return rv; +} + +// #warning "How do we tell the compiler LDI label is expensive as 2 ops"? +static bool zip_cannot_modify_jumps_p(void) { + // Let's try their suggested approach, keeping us from modifying jumps + // after reload. This should also allow our peephole2 optimizations + // to adjust things back to what they need to be if necessary. + return (reload_completed || reload_in_progress); +} + +rtx_insn *zip_ifcvt_info; + +void +zip_ifcvt_modify_tests(ce_if_block *ce_info ATTRIBUTE_UNUSED, rtx *true_expr, rtx *false_expr) { + const bool dbg = ((ALL_DEBUG_ON)||(false))&&(!ALL_DEBUG_OFF); + if (dbg) fprintf(stderr, "IFCVT-MODIFY-TESTS\n"); + if (*true_expr) switch(GET_CODE(*true_expr)) { + case LE: + case GTU: + case GEU: + case LEU: + if (dbg) fprintf(stderr, "TRUE, missing expr\n"); + if (dbg) zip_debug_rtx(*true_expr); + *true_expr = NULL_RTX; + break; + default: // LT, GT, GTE, LTU, NE, EQ + break; + } + + if (*false_expr) switch(GET_CODE(*false_expr)) { + case LE: + case GTU: + case GEU: + case LEU: + if (dbg) fprintf(stderr, "FALSE, missing expr\n"); + if (dbg) zip_debug_rtx(*false_expr); + *false_expr = NULL_RTX; + default: + break; + } + if ((dbg)&&((!*true_expr)||(!*false_expr))) + fprintf(stderr, "IFCVT-MODIFY-TESTS -- FAIL\n"); +} + +void +zip_ifcvt_machdep_init(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) { +/* +if (!ceinfo->then_bb) + return; +rtx_insn *insn; +FOR_BB_INSNS(ceinfo->then_bb, insn) { + fprintf(stderr, "IFCVT -- INIT\n"); + zip_debug_rtx_pfx("INIT-BB", insn); +} +*/ +/* + zip_ifcvt_info = NULL; + rtx_insn *insn, *ifinsn = NULL; + FOR_BB_INSNS(ceinfo->test_bb, insn) { + rtx p; + p = single_set(insn); + if (!p) continue; + if (SET_DEST(p)==pc_rtx) { + ifinsn = insn; + } + if (!REG_P(SET_DEST(p))) + continue; + if (GET_MODE(SET_DEST(p))!=CCmode) + continue; + if (REGNO(SET_DEST(p))!=zip_CC) + continue; + zip_ifcvt_info = insn; + } + + if (zip_ifcvt_info) + zip_debug_rtx_pfx("PUTATIVE-CMP",zip_ifcvt_info); + if (ifinsn) + zip_debug_rtx_pfx("PRIOR-JMP",ifinsn); +*/ +} + +void +zip_ifcvt_modify_insn(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED, + rtx pattern ATTRIBUTE_UNUSED, + rtx_insn *insn ATTRIBUTE_UNUSED) { + // zip_debug_rtx_pfx("MODIFY-INSN: ", insn); +} + +void +zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) { +/* + fprintf(stderr, "IFCVT -- CANCEL\n"); + zip_ifcvt_info = NULL; +*/ +} + +void +zip_ifcvt_modify_final(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) { +/* +rtx_insn *insn; +FOR_BB_INSNS(ceinfo->test_bb, insn) { + fprintf(stderr, "IFCVT -- FINAL\n"); + zip_debug_rtx_pfx("FINAL-TEST-BB", insn); +} + zip_ifcvt_info = NULL; +*/ +} + + +int zip_insn_sets_cc(rtx_insn *insn) { + return (get_attr_ccresult(insn)==CCRESULT_SET); +} + +int zip_is_conditional(rtx_insn *insn) { + return (get_attr_conditional(insn)==CONDITIONAL_YES); +} diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/zip/zip.h gcc-5.3.0-zip/gcc/config/zip/zip.h --- gcc-5.3.0-original/gcc/config/zip/zip.h 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/config/zip/zip.h 2016-05-12 15:20:14.000702915 -0400 @@ -0,0 +1,4077 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Filename: gcc/config/zip/zip.h +// +// Project: Zip CPU backend for the GNU Compiler Collection +// +// Purpose: +// +// Creator: Dan Gisselquist, Ph.D. +// Gisselquist Technology, LLC +// +//////////////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2016, Gisselquist Technology, LLC +// +// This program is free software (firmware): you can redistribute it and/or +// modify it under the terms of the GNU General Public License as published +// by the Free Software Foundation, either version 3 of the License, or (at +// your option) any later version. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// You should have received a copy of the GNU General Public License along +// with this program. (It's in the $(ROOT)/doc directory, run make with no +// target there if the PDF file isn't present.) If not, see +// <http://www.gnu.org/licenses/> for a copy. +// +// License: GPL, v3, as defined and found on www.gnu.org, +// http://www.gnu.org/licenses/gpl.html +// +// +//////////////////////////////////////////////////////////////////////////////// +#ifndef GCC_ZIP_H +#define GCC_ZIP_H + + +// +// +// Zip CPU configuration defines +// +// +#define ZIP_USER 0 // Assume we are in supervisor mode +#define ZIP_MULTIPLY 1 // Assume we have multiply instructions +#define ZIP_DIVIDE 1 // Assume we have divide instructions +#define ZIP_FPU 0 // Assume we have no floating point instructions +#define ZIP_PIPELINED 1 // Assume our instructions are pipelined +#define ZIP_VLIW 1 // Assume we have the VLIW feature +#define ZIP_ATOMIC ((ZIP_PIPELINED)&&(ZIP_VLIW)) +#define ZIP_PIC 0 // Attempting to produce PIC code, with GOT +#define ZIP_HAS_DI 1 +// Should we use the peephole optimizations? +#define ZIP_PEEPHOLE 1 // 0 means no peephole optimizations. +// How about the new long multiply instruction set? +#define ZIP_LONGMPY 1 // 0 means use the old instruction set + +// Zip has 16 registers in each user mode. +// Register 15 is the program counter (PC) +// Register 14 is the condition codes (CC) +// Register 13 is the stack pointer (SP) +// Register 12 (may be) the Global Offset Table pointer (GOT) +// Register 0 (may be) the return address pointer +// Registers 16-31 may only be used in supervisor mode. +#define is_ZIP_GENERAL_REG(REGNO) ((REGNO)<13) +#define is_ZIP_REG(REGNO) ((REGNO)<16) + +// #define zip_FP_PSEUDO 16 +#define zip_PC 15 +#define zip_CC 14 +#define zip_SP 13 +#define zip_FP 12 +#define zip_GOT 11 +#define zip_AP 10 +#define zip_R1 1 +#define zip_R0 0 + +#define ZIP_FIRST_ARG_REGNO 1 +#define ZIP_LAST_ARG_REGNO 5 +#define NUM_ARG_REGS (ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1) +#define MAX_PARM_REGS (ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1) + +/* The overall framework of an assembler file */ + +#define ASM_COMMENT_START ";" +#define ASM_APP_ON "" +#define ASM_APP_OFF "" + +#define FILE_ASM_OP "\t.file\n" + +/* Output and Generation of Labels */ +#define GLOBAL_ASM_OP "\t.global\t" + +#undef BITS_PER_UNIT +#define BITS_PER_UNIT (32) + +/* Assembler Commands for Alignment */ +#define ASM_OUTPUT_ALIGN(STREAM,POWER) \ + { int pwr = POWER; fprintf(STREAM, "\t.p2align %d\n", (pwr<2)?2:pwr); } + + +/* A C compound statement to output to stdio stream STREAM the assembler syntax + * for an instruction operand X. */ +#define PRINT_OPERAND(STREAM, X, CODE) zip_print_operand(STREAM, X, CODE) +#define PRINT_OPERAND_ADDRESS(STREAM, X) zip_print_operand_address(STREAM, X) + +/* Passing arguments in registers */ +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO)==zip_R1) + +/* Define how to find the value returned by a function. VALTYPE is the data + * type of the value (as a tree). If the precise function being called is known + * FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */ +#define FUNCTION_VALUE(VALTYPE, FUNC) gen_rtx_REG(TYPE_MODE(VALTYPE), zip_R1) + +/* Define how to find the value returned by a library function assuming the + * value has mode MODE. + */ +#define LIBCALL_VALUE(MODE) gen_rtx_REG(MODE, zip_R1) + + +/* STACK AND CALLING */ + + +/* Define this macro as a C expression that is nonzero for registers that are + * used by the epilogue or the return pattern. The stack and frame pointer + * registers are already assumed to be used as needed. + */ +#define EPILOGUE_USES(R) (R == RETURN_ADDRESS_REGNUM) + + +/* The best alignment to use in cases where we have a choice. */ +#define FASTEST_ALIGNMENT BITS_PER_WORD + +/* MAX_FIXED_MODE_SIZE -- An integer expression for the size in bits of the + * largest integer machine mode that should actually be used. All integer + * machine modes of this size and smaller can be used for structures and unions + * with the appropriate sizes. If this macro is undefined, + * GET_MODE_BITSIZE(DImode) is assumed. + * + * ZipCPU -- The default looks good enough for us. + */ + +/* Generate Code for Profiling + */ +#define FUNCTION_PROFILER(FILE,LABELNO) (abort(), 0) + + +/* A C expression which is nonzero if register number NUM is suitable for use + * as an index register in operand addresses. + */ +#define REGNO_OK_FOR_INDEX_P(NUM) 0 + + +/* A C compound statement with a conditional 'goto LABEL;' executed if X + * (an RTX) is a legitimate memory address on the target machine for a memory + * operand of mode MODE. + */ +/* 17.03 Controlling the Compilation Driver, 'gcc' */ +// DRIVER_SELF_SPECS +// OPTION_DEFAULT_SPECS +// CPP_SPEC +// CPLUSPLUS_CPP_SPEC +// CC1_SPEC +// CC1PLUS_SPEC +/* ASM_SPEC ... A C string constant that tells the GCC driver program options + * to pass to the assembler. It can also specify how to translate options you + * give to GCC into options for GCC to pass to the assembler. See the file + * 'sun3.h' for an example of this. + * + * Do not define thismacro if it does not need to do anything. + */ +// #undef ASM_SPEC +// ASM_FINAL_SPEC +// ASM_NEEDS_DASH_FOR_PIPED_INPUT + +/* LINK_SPEC ... A C string constant that tells the GCC driver program options + * to pass to the linker. It can also specify how to translate options you give + * to GCC into options for GCC to pass to the linker. + * + * Do not define this macro if it does not need to do anything. + */ + +/* LIB_SPEC ... Another C string constant very much like LINK_SPEC. The + * difference between the two is that LIB_SPEC is used at the end of the + * command given to the linker. + * + * If this macro is not defined, a default is provided that loads the standard + * C library from the usual place. See 'gcc.c'. + */ +#undef LIB_SPEC +// #define LIB_SPEC "%{!g:-lc} %{g:-lg} -lzip" +#define LIB_SPEC "" + +/* LIBGCC_SPEC ... Another C string constant that tells the GCC driver program + * hoow and when to place a reference to 'libgcc.a' into the linker command + * line. This constant is placed both before and after the value of LIB_SPEC. + * + * If this macro is not defined, the GCC driver provides a default that passes + * the string '-lgcc' to the linker. + */ +#undef LIBGCC_SPEC +#define LIBGCC_SPEC "" + +/* REAL_LIBGCC_SPEC ... By default, if ENABLE_SHARED_LIBGCC is defined, the + * LIBGCC_SPEC is not directly used by the driver program but is instead + * modified to refer to different versions of 'libgcc.a' depending on the + * values of the command line flags '-static', '-shared', '-static-libgcc', + * and '-shared-libgcc'. On targets where these modifications are + * inappropriate, define REAL_LIBGCC_SPEC instead. REAL_LIBGCC_SPEC tells the + * driver how to place a reference to 'libgcc' on the link command line, but + * unlike LIBGCC_SPEC, it is used unmodified. + */ +#define REAL_LIBGCC_SPEC "" + +// USE_LD_AS_NEEDED +// LINK_EH_SPEC + +/* STARTFILE_SPEC ... Another C string constant used much like LINK_SPEC. The + * difference between the two is that STARTFILE_SPEC is used at the very + * beginning of the command given to the linker. + * + * If this macro is not defined, a default is provided that loads the standard + * C startup file from the usual place. See 'gcc.c' + */ +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "" + +/* ENDFILE_SPEC ... Another C string constant used much like LINK_SPEC. The + * difference between the two is that ENDFILE_SPEC is used at the very end + * of the command given to the linker. + * + * Do not define this macro if it does not do anything. + */ +// #undef ENDFILE_SPEC +// #define ENDFILE_SPEC "" + +// THREAD_MODEL_SPEC +// SYSROOT_SUFFIX_SPEC +// SYSROOT_HEADERS_SUFFIX_SPEC +// EXTRA_SPECS +// LINK_LIBGCC_SPECIAL_1 +// LINK_GCC_C_SEQUENCE_SPEC +// LINK_COMMAND_SPEC +// TARGET_ALWAYS_STRIP_DOTDOT +// MULTILIB_DEFAULTS +// RELATIVE_PREFIX_NOT_LINKDIR +// MD_EXEC_PREFIX +// STANDARD_STARTFILE_PREFIX +// STANDARD_STARTFILE_PREFIX_1 +// STANDARD_STARTFILE_PREFIX_2 +// MD_STARTFILE_PREFIX +// MD_STARTFILE_PREFIX_1 +// INIT_ENVIRONMENT +// LOCAL_INCLUDE_DIR +#undef LOCAL_INCLUDE_DIR + +// NATIVE_SYSTEM_HEADER_COMPONENT +// INCLUDE_DEFAULTS + +/* 17.03 Run-time Target Specification */ + +/* TARGET_CPU_CPP_BUILTINS() ... This function-like macro expands to a block of + * code that defines built-in preprocessor macros and assertions for the target + * CPU, using the functions builtin_define, builtin_define_std, and + * builtin_assert. When the front end calls this macro it provides a trailing + * semicolon, and since it has finished command line option proccessing your + * code can use those results freely. + * + * ZipCPU --- We should probably capture in this macro what capabilities the + * command line parameters we've been given indicate that our CPU has. That + * way, code can be adjusted depending upon the CPU's capabilities. + */ +#define TARGET_CPU_CPP_BUILTINS() \ + { builtin_define("__ZIPCPU__"); \ + if (ZIP_FPU) builtin_define("__ZIPFPU__"); \ + if (ZIP_ATOMIC) builtin_define("__ZIPATOMIC__"); \ + } + // If (zip_param_has_fpu) builtin_define("__ZIPFPU__"); + // If (zip_param_has_div) builtin_define("__ZIPDIV__"); + // If (zip_param_has_mpy) builtin_define("__ZIPMPY__"); + // If (zip_param_has_lock) builtin_define("__ZIPLOCK__"); + // If (zip_param_supervisor) builtin_define("__ZIPUREGS__"); + // If (we support int64s) builtin_define("___int64_t_defined"); + +/* TARGET_OS_CPP_BUILTINS() ... Similarly to TARGET_CPU_CPP_BUILTINS but this + * macro is optional and is used for the target operating system instead. + */ + +/* Option macros: (we need to define these eventually ... ) + * + * TARGET_HANDLE_OPTION + * TARGET_HANDLE_C_OPTION + * TARGET_OBJ_CONSTRUCT_STRING_OBJECT + * TARGET_OBJ_DECLARE_UNRESOLVED_CLASS_REFERENCE + * TARGET_OBJ_DECLARE_CLASS_DEFINITION + * TARGET_STRING_OBJECT_REF_TYPE_P + * TARGET_CHECK_STRING_OBJECT_FORMAT_ARG + * TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE(VOID) + * C_COMMON_OVERRIDE_OTPTIONS + * TARGET_OPTION_OPTIMIZATION_TABLE + * TARGET_OPTION_INIT_STRUCT + * TARGET_OPTION_DEFAULT_PARAMS + */ + +/* SWITCHABLE_TARGET + * + * Zip CPU doesn't need this, so it defaults to zero. No need to change it + * here. + */ + +/* TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(VOID) ... Returns true if the + * target supports IEEE 754 floating-point exceptions and rounding modes, false + * otherwise. This is intended to relate to the float and double types, but not + * necessarily "long double". By default, returns true if the adddf3 + * instruction pattern is available and false otherwise, on the assumption that + * hardware floating point supports exceptions and rounding modes but software + * floating point does not. + * + * ZipCPU floating point is barely going to be functional, I doubt it will + * support all of these bells and whistles when full functionality is even + * achieved. Therefore, we won't support these modes. However, we can't just + * set this to zero, so let's come back to this. + */ +// #warning "Wrong answer encoded to date" +// #undef TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P +// #define TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(X) 0 + +/* 17.04 Defining data structures for per-function information */ + +/* INIT_EXPANDERS ... Macro called to initialize any target specific + * information. This macro is called once per function, before generation of + * any RTL has begun. The intention is to allow the initialization of the + * function pointer init_machine_status. + */ +// #warning "I may need to define this to handle function return addresses ..." + +/* 17.05 Storage Layout */ + +/* Storage Layout */ +#define BITS_BIG_ENDIAN 0 // MSB has highest number +#define BYTES_BIG_ENDIAN 1 // 1 if MSB is lowest number +#define WORDS_BIG_ENDIAN 1 // 1 if MSW is lowest number +#define FLOAT_WORDS_BIG_ENDIAN 1 +#define BITS_PER_WORD 32 +// #define MAX_BITS_PER_WORD // defaults to BITS_PER_WORD +#define UNITS_PER_WORD 1 // Storage units in a word, pwr of 2:1-8 +#define MIN_UNITS_PER_WORD 1 // Default is UNITS_PER_WORD +/* POINTER_SIZE ... Width of a pointer in bits. You must specify a value no + * wider than the width of Pmode. If it is not equal to the width of Pmode, + * you must define POINTERS_EXTEND_UNSIGNED. If you do not specify a value the + * default is BITS_PER_WORD. + * + * ZipCPU --- All of our pointers are 32-bits, the width of our address bus. + */ +#define POINTER_SIZE 32 // Ptr width in bits +/* POINTERS_EXTEND_UNSIGNED ... A C expression that determines how pointers + * should be extended from ptr_mode to either Pmode or word_mode. It is greater + * than zero if pointers should be zero-extended, zero if they should be sign + * extended, and negative if some other conversion is needed. In the last case, + * the extension is done by the target's ptr_extend instruction. + * + * You need not define this macro if the ptr_mode, Pmode, and word_mode are all + * the same width. + * + * ZipCPU --- While we shouldn't need this, QImode and HImode have the same + * number of bits as SImode. Therefore, one might wish to convert between the + * two. Hence, we specify how we would do that here. + */ +#define POINTERS_EXTEND_UNSIGNED 1 + +/* PROMOTE_MODE(m,unsignedp,type) ... A macro to update m and unsignedp when an + * object whose type is type and which has he specified mode and signedness is + * to be stored in a register. This macro is only called when type is a scalar + * type. + * + * On most RISC machines, which only have operations that operate on a full + * register, define this macro to set m to word_mode if m is an integer mode + * narrower than BITS_PER_WORD. In most cases, only integer modes should be + * widened because wider precision floating-point operations are usually more + * expensive than their narrower counterparts. + * + * For most machines, the macro definition does not change unsigndep. However, + * some machines, have instructions that preferentially handle either signed or + * unsigned quantities of certain modes. For example, on the DEC Alpha, 32-bit + * loads from memory and 32-bit add instructions sign-extend the result to + * 64-bits. On such machines, set unsignedp according to which kind of extension + * is more efficient. + * + * Do not define this macro if it would never modify m. + * + * ZipCPU --- We need to always (if possible) promote everything to SImode where + * we can handle things. HImode and QImode just don't make sense on this CPU. + */ +#define PROMOTE_MODE(M,U,T) if ((GET_MODE_CLASS(M)==MODE_INT)&&(GET_MODE_SIZE(M)<2)) (M)=SImode; + +// TARGET_PROMOTE_FUNCTION_MODE +/* PARM_BOUNDARY ... Normal alignment required for function parameters on the + * stack, in bits. All stack parameters receive at least this much alignment + * regardless of data type. On most machines, this is the same as the size of + * an integer. + */ +#define PARM_BOUNDARY 32 + +/* STACK_BOUNDARY ... Define this macro to the minimum alignment enforced by + * hardware for the stack pointer on this machine. The definition is a C + * expression for the desired alignment (measured in bits). This value is used + * as a default if PREFERRED_STACK_BOUNDARY is not defined. On most machines, + * this should be the same as PARM_BOUNDARY. + */ +#define STACK_BOUNDARY PARM_BOUNDARY + +/* PREFERRED_STACK_BOUNDARY ... Define this ... */ +#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY + +/* INCOMING_STACK_BOUNDARY ... Define this macro if the incoming stack boundary + * may be different from PREFERRED_STACK_BOUNDARY. This macro must evaluate + * to a value equal to or larger than STACK_BOUNDARY. + */ +#define INCOMING_STACK_BOUNDARY STACK_BOUNDARY + +/* FUNCTION_BOUNDARY ... Alignment required for a function entry point, in bits. + */ +#define FUNCTION_BOUNDARY 32 + +/* BIGGEST_ALIGNMENT ... Biggest alignment that any data type can require on + * this machine, in bits. Note that this is not the biggest alignment that is + * supported, just the biggest alignment that, when violated, may cause a fault. + */ +#define BIGGEST_ALIGNMENT 32 + +/* MALLOC_ABI_ALIGNMENT + */ + +/* ATTRIBUTE_ALIGNED_VALUE + */ + +/* MINIMUM_ATOMIC_ALIGNMENT ... If defined, the smallest alignment, that can be + * given to an object that can be referenced in one operation, without + * disturbing any nearby object. Normally, this is BITS_PER_UNIT, but may be + * larger on machines that don't have byte or halfword store operations. + */ +#define MINIMUM_ATOMIC_ALIGNMENT BITS_PER_UNIT + +/* BIGGEST_FIELD_ALIGNMENT ... Biggest alignment that any structure or union + * field can require on this machine, in bits. If defined, this overrides + * BIGGEST_ALIGNMENT for structure and union fields only, unless the field + * alignment has been set by the __attribute__((aligned(n))) construct. + */ +#define BIGGEST_FIELD_ALIGNMENT BITS_PER_UNIT + +/* ADJUST_FIELD_ALIGN + */ +#define ADJUST_FIELD_ALIGN(A,B) BITS_PER_WORD + +/* MAX_STACK_ALIGNMENT + */ +#define MAX_STACK_ALIGNMENT BITS_PER_WORD + +/* MAX_OFILE_ALIGNMENT + */ + +/* DATA_ALIGNMENT(TYPE, BASIC-ALIGN) ... If defined, a C expression to compute + * the alignment for a variable in the static store. TYPE is the data type, and + * BASIC-ALIGN is the alignment that the object would ordinarily have. The + * value of this macro is used instead of that alignment to align the object. + * + * If this macro is not defined, then BASIC-ALIGN is used. + * + * ZipCPU -- in hindsight, if this macro is not defined then the compiler is + * broken. So we define it to be our fastest alignment, or 32-bits. + */ +#define DATA_ALIGNMENT(TYPE, ALIGN) BITS_PER_WORD + + +/* DATA_ABI_ALIGNMENT(TYPE,BASIC-ALIGN) + */ + +/* CONSTANT_ALIGNMENT(CONST, BASIC-ALIGN) ... If defined, a C expression to + * compute the alignment given to a constant that is being placed in memory. + * CONST is the constant and BASIC-ALIGN is the alignment that the object + * would ordinarily have. The value of this macro is used instead of that + * alignment to align the object. + * + * If this macro is not defined, then BASIC-ALIGN is used. + * + * ZipCPU -- in hindsiht, if this macro is not defined then the compiler is + * broken. We'll define it as above. + * + */ +#define CONSTANT_ALIGNMENT(EXP, ALIGN) BITS_PER_WORD + +/* LOCAL_ALIGNMENT(TYPE,BASIC-ALIGN) ... If defined ... + */ +#define LOCAL_ALIGNMENT(TYP,ALIGN) BITS_PER_WORD + +/* TARGET_VECTOR_ALIGNMENT + */ + +/* STACK_SLOT_ALIGNMENT + */ +#define STACK_SLOT_ALIGNMENT(T,M,B) BITS_PER_WORD + +/* LOCAL_DECL_ALIGNMEN(DECL) + */ +#define LOCAL_DECL_ALIGNMENT(DECL) BITS_PER_WORD + +/* MINIMUM_ALIGNMENT + */ +#define MINIMUM_ALIGNMENT(EXP,MOD,ALIGN) BITS_PER_WORD + +/* EMPTY_FIELD_BOUNDARY + * Alignment of field after 'int : 0' in a structure. + */ +#define EMPTY_FIELD_BOUNDARY BITS_PER_WORD + +/* STRUCTURE_SIE_BOUNDARY + * ZipCPU -- Every structures size must be a multiple of 32-bits. + */ +#define STRUCTURE_SIZE_BOUNDARY BITS_PER_WORD + +/* STRICT_ALIGNMENT ... Set this nonzero if move instructions will actually + * fail to work when given unaligned data. If instructions will merely go + * slower in that case, define this macro as 0. + * + * ZipCPU -- Since we have defined our smallest addressable unit to be a 32-bit + * word (one byte, on our machine), and since reading any amount of 32-bit words + * is easy, then there really are no instructions that will ever fail. + */ +#define STRICT_ALIGNMENT 0 + +/* PCC_BITFIELD_TYPE_MATTERS -- define this if you wish to imitate the the way + * other C compilers handle alignment of bit-fields and the structures that + * contain them. + * + * The behavior is that the type written for a named bit-field (int, short, or + * other integer type) imposes an alignment for the entire structure, as if the + * structure really did contain an ordinary field of that type. In addition, + * the bit-field is placed within the structure so that it would fit within + * such a field, not crossing a boundary for it. + * + * Thus, no most machines, a named bit-field whose type is written as int would + * not cross a four-byte boundary, and would force four-byte alignment for the + * whole structure. (The alignment used may not be four bytes; it is controlled + * by other alignment parameters.) + * + * An unnamed bit-field will not affect the alignment of the containing + * structure. + * + * If the macro is defined, its definition should be a C expression, a non + * zero value for the expression enables this behavior. + * Look at the fundamental type that is used for a bit-field and use that to + * impose alignment on the enclosing structure. struct s{int a:8}; should + * have the same alignment as 'int', not 'char'. + */ +#undef PCC_BITFIELD_TYPE_MATTERS +#define PCC_BITFIELD_TYPE_MATTERS 0 + +/* MAX_FIXED_MODE_SIZE ... An integer expression for the size in bits of the + * largest integer machine mode that should actually be used. All integer + * machine modes of this size or smaller can be used for structures and unions + * with the appropriate sizes. If this macro is undefined, + * GET_MODE_BITSIZE(DImode) is assumed. + * + * ZipCPU ... Get_MOD_BITSIZE(DImode) will be 64, and this is really not the + * size on bits of the largest integer machine mode. However, that's the case + * with most DI implementations: A long is two words, spliced together. We'd + * like to support that eventually, but we need to get there. Hence, let's use + * compile time flag (ZIP_HAS_DI) that we can enable when we're ready. + */ +#if (ZIP_HAS_DI != 0) +#define MAX_FIXED_MODE_SIZE 64 +#else +#define MAX_FIXED_MODE_SIZE 32 +#endif + + +/* 17.06 Layout of Source Language Data Types */ + +#undef CHAR_TYPE_SIZE +#undef SHORT_TYPE_SIZE +#undef INT_TYPE_SIZE +#undef LONG_TYPE_SIZE +#undef LONG_LONG_TYPE_SIZE +// +#define CHAR_TYPE_SIZE 32 +#define SHORT_TYPE_SIZE 32 +#define INT_TYPE_SIZE 32 +#define LONG_TYPE_SIZE 32 +#define LONG_LONG_TYPE_SIZE 64 +// BOOL_TYPE_SIZE defaults to CHAR_TYPE_SIZE +#undef FLOAT_TYPE_SIZE +#undef DOUBLE_TYPE_SIZE +#undef LONG_DOUBLE_TYPE_SIZE +#define FLOAT_TYPE_SIZE 32 +#define DOUBLE_TYPE_SIZE FLOAT_TYPE_SIZE // Zip CPU doesn't support dbls +#define LONG_DOUBLE_TYPE_SIZE 64 // This'll need to be done via emulation +// SHORT_FRAC_TYPE_SIZE +// LONG_FFRACT_TYPE_SIZE +// LONG_LONG_FRACT_TIME_SIZE +#undef SHORT_ACCUM_TYPE_SIZE +#undef ACCUM_TYPE_SIZE +#undef LONG_ACCUM_TYPE_SIZE +#define SHORT_ACCUM_TYPE_SIZE SHORT_TYPE_SIZE +#define ACCUM_TYPE_SIZE INT_TYPE_SIZE +#define LONG_ACCUM_TYPE_SIZE LONG_TYPE_SIZE + +/* LIBGCC2_GNU_PREFIX ... This macro corresponds to the TARGET_GNU_PREFIX target + * hook and should be defined if that hook is overriden to be true. It causes + * function names in libgcc to be changed to use a __gnu_ prefix for their name + * rather than the default __. A port which uses this macro should also arrange + * to use t-gnu-prefix in the libgcc config.host. + * + * ZipCPU -- I see no reason to define and therefore change this behavior. + */ + +/* TARGET_FLT_EVAL_METHOD ... A C expression for the value for FLT_EVAL_METHOD + * in float.h,, assuming, if applicable, that the floating-point control word + * is in its default state. If you do not define this macro the value of + * FLT_EVAL_METHOD will be zero. + * + * ZipCPU --- ??? + */ + +/* WIDEST_HARDWARE_FP_SIZE ... A C expression for the size in bits of the widest + * floating-point format supported by the hardware. If you define this macro, + * you must specify a value less than or equal to the value of LONG_DOUBLE_... + * If you do not define this macro, the value of LONG_DOUBLE_TYPE_SIZE is the + * default. + * + * ZipCPU supports 32-bit IEEE floats--IF THE SUPPORT IS COMPILED IN! This + * really needs to be determined, then, based upon a compile time parameter + * where the one compiling the code states whether or not the H/W even has + * floating point support. + * + * For now, we'll assume it does--but once we implement GCC parameters, we'll + * need to change this. + */ +#undef WIDEST_HARDWARE_FP_SIZE +// #warning "Definition needs to change if no FPU present" +#define WIDEST_HARDWARE_FP_SIZE FLOAT_TYPE_SIZE + +/* DEFAULT_SIGNED_CHAR ... An expression whose value is 1 or 0, according to + * whether the type char should be signed or unsigned by default. The user + * can always override this default with the options -fsigned-char and + * -funsigned-char. + * + * ZipCPU--let's go with the default behavior. + */ +#define DEFAULT_SIGNED_CHAR 1 + +/* TARGET_DEFAULT_SHORT_ENUMS(VOID) ... This target hook should return true if + * the compiler should give an enum type only as many bytes as it takes to + * represent the range of possible values of that type. It should return + * false if all enum types should be allocated like int. + * + * The default is to return false. This is what the ZipCPU needs, so we won't + * override it. + */ + +/* SIZE_TYPE ... A C expression for a string describing the name of the data + * type to use for size values. The typedef name size_t is defined using the + * contents of the string. + * + * If you don't define this macro, the default is "long unsigned int". Since + * on the ZipCPU this is a 32-bit number, and all ZipCPU values are 32-bits, + * the default seems perfect for us. + */ +#define SIZE_TYPE "unsigned int" + +/* SIZETYPE ... GCC defines internal types () for expressions dealing with size. + * This macro is a C expression for a string describing the name of the data + * type from which the precision of sizetype is extracted. The string has the + * same restrictions as SIZE_TYPE string. If you don't define this macro, the + * default is SIZE_TYPE --- which seems good enough for us. + */ + +/* PTRDIFF_TYPE ... A C expression for a string describing the name of the data + * type to use for the result of subtracting two pointers. The typedef name + * ptrdiff_t is defined using the contents of the string. See SIZE_TYPE for + * more information. + * + * The default is "long int" which for the ZipCPU is 32-bits---still good enough + * for us. + */ +#define PTRDIFF_TYPE "int" + +/* WCHAR_TYPE ... A C expression for a string describing the name of the data + * type to use for wide characters. The typedef name wchar_t is defined using + * the contents of the string. If you don't define this macro, the default is + * 'int'--good enough for ZipCPU. + */ + +/* WCHAR_TYPE_SIZE ... A C expression for the size in bits of the data type for + * wide characters. This is used in cpp, which cannot make use of WCHAR_TYPE. + */ +#undef WCHAR_TYPE_SIZE +#define WCHAR_TYPE_SIZE 32 + +/* WINT_TYPE ... A C expression for a string describing the name of the data + * type to use for wide characters passed to printf and returned from getwc. + * The typedef name wint_t is defined using the contents of the string. See + * + * ZipCPU -- If you don't define this macro, the default is "unsigned int"--also + * best for us again. + */ + +/* INTMAX_TYPE ... A C expression for a string describing the name of the + * data type that can represent any value of any standard or extended signed + * integer type. The typedef name intmax_t is defined using the contents of + * the string. + * + * If you don't define this macro, the default is the first of "int", "long int" + * or "long long int" that has as much precision as "long long int". + */ + +/* UINTMAX_TYPE ... same as INTMAX_TYPE, but for unsigned + */ + +#undef SIG_ATOMIC_TYPE +#if (ZIP_ATOMIC != 0) +#define SIG_ATOMIC_TYPE "int" +#else +#define SIG_ATOMIC_TYPE NULL // We have no atomic types, but registers +#endif +#undef INT8_TYPE +#define INT8_TYPE NULL // We have no 8-bit integer type +#undef INT16_TYPE +#define INT16_TYPE NULL +#undef INT32_TYPE +#define INT32_TYPE "int" +#undef UINT8_TYPE +#define UINT8_TYPE NULL +#undef UINT16_TYPE +#define UINT16_TYPE NULL +#undef UINT32_TYPE +#define UINT32_TYPE "unsigned int" +#undef INT_LEAST8_TYPE +#define INT_LEAST8_TYPE "int" +#undef INT_LEAST16_TYPE +#define INT_LEAST16_TYPE "int" +#undef INT_LEAST32_TYPE +#define INT_LEAST32_TYPE "int" +#undef UINT_LEAST8_TYPE +#define UINT_LEAST8_TYPE "unsigned int" +#undef UINT_LEAST16_TYPE +#define UINT_LEAST16_TYPE "unsigned int" +#undef UINT_LEAST32_TYPE +#define UINT_LEAST32_TYPE "unsigned int" +#undef INT_FAST8_TYPE +#define INT_FAST8_TYPE "int" +#undef INT_FAST16_TYPE +#define INT_FAST16_TYPE "int" +#undef INT_FAST32_TYPE +#define INT_FAST32_TYPE "int" +#undef UINT_FAST8_TYPE +#define UINT_FAST8_TYPE "unsigned int" +#undef UINT_FAST16_TYPE +#define UINT_FAST16_TYPE "unsigned int" +#undef UINT_FAST32_TYPE +#define UINT_FAST32_TYPE "unsigned int" +#undef INTPTR_TYPE +#define INTPTR_TYPE "unsigned int" +#undef UINTPTR_TYPE +#define UINTPTR_TYPE "unsigned int" + +#undef INT64_TYPE +#undef UINT64_TYPE +#undef INT_LEAST64_TYPE +#undef UINT_LEAST64_TYPE +#undef INT_FAST64_TYPE +#undef UINT_FAST64_TYPE + +#if (ZIP_HAS_DI != 0) +#define INT64_TYPE "long int" +#define UINT64_TYPE "long unsigned int" +#define INT_LEAST64_TYPE "long int" +#define UINT_LEAST64_TYPE "long unsigned int" +#define INT_FAST64_TYPE "long int" +#define UINT_FAST64_TYPE "long unsigned int" +#else +#define INT64_TYPE NULL +#define UINT64_TYPE NULL +#define INT_LEAST64_TYPE NULL +#define UINT_LEAST64_TYPE NULL +#define INT_FAST64_TYPE NULL +#define UINT_FAST64_TYPE NULL +#endif + +#define TARGET_PTRMEMFUNC_VBI_LOCATION ptrmemfunc_vbit_in_pfn + + +/* 17.07 Register Usage / Register definitions */ + +/* FIRST_PSEUDO_REGISTER ... Number of hardware registers known to the compiler. + * They receive numbers 0 through FIRST_PSEUDO_REGISTER-1; thus the first + * pseudo register's numbrer really is assigned the number + * FIRST_PSEUDO_REGISTER. + * + * ZipCPU---There are 16 registers in the ZipCPU, numbered 0-15 with the CC + * and PC register being numbered 14 and 15 respectively. Therefore, the + * compiler can take register number 16 and above and do whatever it wants + * with it. + */ +#ifdef DEFINE_USER_REGS +# define FIRST_PSEUDO_REGISTER 32 +#else +# ifdef zip_FP_PSEUDO +# define FIRST_PSEUDO_REGISTER (zip_FP_PSEUDO+1) +# else +# define FIRST_PSEUDO_REGISTER 16 +# endif +#endif + +/* FIXED_REGISTERS ... An initializer that says which registers are used for + * fixed purposes all throughout the compiled code and are therefore not + * available for general allocation. These would include the stack pointer, the + * frame pointer (except on machines where that can be used as a general + * register when no frame pointer is needed), the program counter on machines + * where that is considered one of the addressable registers, and any other + * numbered register with a standard use. + * + * This information is expressed as a sequence of numbers, separated by commas, + * and surrounded by braces. The nth number is 1 if register n is fixed, 0 + * otherwise. + * + * For the Zip CPU, we have three fixed registers that are not available for + * general allocation: + * + * SP The stack pointer + * CC The condition codes and CPU state register + * PC The program counter + * + * Other registers, such as FP (the frame pointer) or GBL (the global offset + * table pointer) are registers that we hope will not be so fixed. + */ +#ifdef DEFINE_USER_REGS +# define FIXED_REGISTERS { 0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 } +#else +# ifdef zip_FP_PSEUDO +# define FIXED_REGISTERS { 0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1 } +# else +# define FIXED_REGISTERS { 0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1 } +# endif +#endif + +/* CALL_USED_REGISTERS ... like FIXED_REGISTERS but has 1 for each register + * that is clobbered (in general) by function calls as well as for fixed + * registers. This macro therefore identifies the registers that are not + * available for general allocation of values that must live across function + * calls. + * + * If a register has 0 in CALL_USED_REGISTERS, the compiler automatically saves + * it on function entry and restores it on function exit, if the register is + * used within the function. + * + * On the Zip CPU, we must save R0 (the return address), and (let's pick) any + * register above R5. + */ +#ifdef DEFINE_USER_REGS +# define CALL_USED_REGISTERS { 0,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 } +#else +# ifdef zip_FP_PSEUDO +# define CALL_USED_REGISTERS { 0,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1 } +# else +# define CALL_USED_REGISTERS { 0,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1 } +# endif +#endif + +/* CALL_REALLY_USED_REGISTERS ... optional macro that, if not defined, defaults + * to the value of CALL_USED_REGISTERS. + */ + +/* HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE) ... A C expression that is nonzero + * if it is not permissible to store a value of mode MODE in hard register REGNO + * across a call without some part of it being clobbbered. For most machines, + * this macro need not be defined. It is only required for machines that do + * not preserve the entire contents of a register across a call. + * + * ZipCPU--Always preserves the entire contents of those registers that are + * preserved across calls, so this shouldnt need to be defined. + */ +// #define HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE) (REGNO==0) + +/* TARGET_CONDITIONAL_REGISTER_USAGE(VOID) ... This hook may conditionally + * modify five variables fixed_regs, call_used_regs, global_regs, reg_names, and + * reg_class_contents, to take into account any dependence of these register + * sets on target flags. The first three of these are of type char[] + * (interpreted as Boolean vectors). global_regs is a const char *[] and + * reg_class_contents is a HARD_REG_SET. Before the macro is called, + * fixed_regs, call_used_regs, reg_class_contents, and reg_names have been + * initialized from FIXED_REGISTERS, CALL_USED_REGISTERS, REG_CLASS_CONTENTS, + * and REGISTER_NAMES, respectively. global_regs has been cleared, and any + * -ffixed-reg, -fcall-used-reg, and -fcall-saved-reg command options have been + * applied. + * + * ZipCPU -- I may need to return and define this depending upon how FP and + * GBL register allocation go. But for now, we'll leave this at its default + * value. + */ +// #warning "Revisit me after FP and GBL allocation" + +/* INCOMING_REGNO(out) ... Define this macro if the target machine has register + * windows. ... + * + * Zip CPU has no register windows. + */ + +/* OUTGOING_REGNO ... same thing. + */ + +/* LOCAL_REGNO ... same thing. + */ + +/* PC_REGNUM ... If the program counter has a register number, define this as + * that register number. Otherwise do not define it. + */ +#define PC_REGNUM zip_PC + + +/* REG_ALLOC_ORDER ... If defined, an initializer for a vector of integers, + * containing the number of hard registers in the order in which GCC should + * prefer to use them (from most preferred to least. + * + * If this macro is not defined, registers are used lowest numbered first (all + * else being equal). + * + * Since the default is the ZipCPU desired case, we won't define this here. + */ + +/* ADJUST_REG_ALLOC_ORDER ... on most machines it is not necessary to define + * this macro, so we won't either. + */ + +/* HONOR_REG_ALLOC_ORDER ... + */ + +/* HONOR_REG_ALLOC_ORDER ... on most machines it is not necessary to define + * this macro, so we won't either. + */ + +/* HARD_REGNO_NREGS(REGNO, MODE) ... A C expression for the number of + * consecutive hard registers, starting at register number REGNO, required to + * hold a value of mode MODE. + * + * On a machine where all registers are exactly one word, a suitable definition + * is given of ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)/UNITS_PER_WORD. + * + * On ZipCPU, we might do + * ((((MODE)==DImode)||((MODE)==DFmode))?2:1) + * but I think the default (above) code should work as well. Hence, let's stick + * with the default, lest someone try to create larger modes (TImode, OImode, + * XImode) and expect us to follow them properly some how. + * + * Okay, now in hind sight, we know that the default doesn't work for our + * architecture, since GET_MODE_SIZE(SImode)=4, not 1. Thus, let's rearrange + * this expression to work in bits rather than in bytes and we'll know more + * of what we are doing. + */ +#undef HARD_REGNO_NREGS +#define HARD_REGNO_NREGS(REGNO, MODE) ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)\ + / (UNITS_PER_WORD)) + +/* HARD_REGNO_NREGS_HAS_PADDING(REGNO,MODE) ... A C expression that is nonzero + * if a value of mode MODE, stored in memory, ends with padding that causes it + * to take up more space than in registers starting at register number REGNO + * (as determined by multiplying GCC's notion of the size of the register when + * containing this mode by the number of registers returned by HARD_REGNO_NREGS) + * By default this is zero. + * + * Zip CPU --- The default looks good enough to me. + */ + +/* HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) + * + * ZipCPU --- + */ + +/* REGMODE_NATURAL_SIZE(MODE) -- Define this macro if the natural size of + * registers that hold values of mode mode is not the word size. It is a C + * expression that should give the natural size in bytes for the specified mode. + * It is used by the register allocator to try to optimize its results. + * + * ZipCPU --- + */ +// #define REGMODE_NATURAL_SIZE(MODE) (((MODE)==DImode)?2:1) + +/* HARD_REGNO_MODE_OK ... A C expression that is nonzero if it is permissible + * to store a value of mode MODE in a hard register number REGNO (or in several + * registers starting with that one). For a machine where all registers are + * equivalent, a suitable definition is '1'. You need not include code to check + * for the numbers of fixed registers, because the allocation mechanism + * considered them to be always occupied. + * + * ZipCPU --- As long as you are already avoiding the fixed registers, the + * suitable default definition mentioned above should be sufficient. + */ +#undef HARD_REGNO_MODE_OK +#define HARD_REGNO_MODE_OK(R,M) (R<zip_CC) + +/* HARD_REGNO_RENAME_OK(FROM,TO) ... A C expression that is nonzero if it is + * okay to rename a hard register FROM to another hard register TO. One common + * use of this macro is to prevernt renaming of a register to another register + * that is not saved by a prologue in an interrupt handler. The default is + * always nonzero. + * + * ZipCPU --- The default looks good enough to us. + */ +#undef HARD_REGNO_RENAME_OK +#define HARD_REGNO_RENAME_OK(FROM,TO) ((is_ZIP_GENERAL_REG(FROM))&&(is_ZIP_GENERAL_REG(TO))) + + +/* MODES_TIABLE_P(M1, M2) ... A C expression that is nonzero if a value of mode + * M1 is accessible in mode M2 without copying. + * + * ZipCPU --- well, that's true for us (although we support scant few modes) ... + * so lets' set to one. + */ +#define MODES_TIEABLE_P(M1,M2) 1 + +/* TARGET_HARD_REGNO_SCRATCH_OK(REGNO) + * This target hook should return true if it is OK to use a hard register + * REGNO has a scratch register in peephole2. One common use of this macro is + * to prevent using of a register that is not saved by a prologue in an + * interrupt handler. The default version of this hook always returns true. + * + * ZipCPU --- the default works for us as well. If you are in an interrupt + * context, you have an entirely new set of registers (the supervisor set), so + * this is a non-issue. + */ + +/* AVOID_CCMODE_COPIES ... define this macro if the compiler should avoid + * copies to/from CCmode register(s). You should only define this macro if + * support for copying to/from CCmode is incomplete. + * + * ZipCPU --- CCmode register copies work like any other, so we'll keep with the + * default definition. + */ + +/* STACK_REGS ... Define this if the machine has any stack-like registers. + * + * Zip CPU has no stack-like registers, as their definition is different from + * the ZipCPU stack pointer register. + */ + +// #define ZIP_REG_BYTE_SIZE 1 + +/* 17.08 Register Classes */ + +/* enum reg_class ... An enumerate type that must be defined with all the + * register class names as enumerated values. NO_REGS must be first. ALL_REGS + * must be the last register class, followed by one more enumerated value, + * LIM_REG_CLASSES, which is not a register class but rather tells how many + * classes there are. + * + * ZipCPU --- We'll defined register 0-13 as general registers, 14-15 in + * all_regs, and go from there. + */ +enum reg_class { + NO_REGS, GENERAL_REGS, +#ifdef DEFINE_USER_REGS + USER_REGS, +#endif + ALL_REGS, LIM_REG_CLASSES +}; + +/* N_REG_CLASSES ... the number of distinct register classes, defined as follows + */ +#define N_REG_CLASSES (int)LIM_REG_CLASSES + +/* REG_CLASS_NAMES ... An initializer containing the names of the register + * classes as C string constants. These names are used in writing some of the + * debugging dumps. + */ +#ifdef DEFINE_USER_REGS +# define REG_CLASS_NAMES { "NO_REGS", "GENERAL_REGS", "USER_REGS", "ALL_REGS" } +#else +# define REG_CLASS_NAMES { "NO_REGS", "GENERAL_REGS", "ALL_REGS" } +#endif + +/* REG_CLASS_CONTENTS ... An initializer containing the contents of the register + * classes, as integers which are bit masks. The nth integer specifies the + * contents of class n. That way the integer mask is interpreted as that + * register r is in the class if (mask&(1<<r)) is 1. + * + * When the machine has more than 32 registers ... that's not us. + * + * ZipCPU --- This is straight forward, three register classes, etc. + */ +#ifdef DEFINE_USER_REGS +# define REG_CLASS_CONTENTS { { 0x000000000}, {0x00003fff}, {0x0ffff0000l}, {0x0ffffffffl} } +#else +# ifdef zip_FP_PSEUDO +# define REG_CLASS_CONTENTS { { 0x00000}, {0x13fff}, {0x1ffff} } +# else +# define REG_CLASS_CONTENTS { { 0x00000}, {0x03fff}, {0x0ffff} } +# endif +#endif + +/* REGNO_REG_CLASS ... A C expression whose value is a register class + * containing hard register REGNO. In general there is more than one such + * class; Choose a class which is minimal, meaning that no smaller class also + * contains the register. + */ +#undef REGNO_REG_CLASS +#ifdef zip_FP_PSEUDO +#define REGNO_REG_CLASS(R) (is_ZIP_REG(R)?((((R)<=13)||((R)==zip_FP_PSEUDO))?GENERAL_REGS:ALL_REGS):NO_REGS) +#else +#define REGNO_REG_CLASS(R) (is_ZIP_REG(R)?((R<=13)?GENERAL_REGS:ALL_REGS):NO_REGS) +#endif + +/* BASE_REG_CLASS ... A macro whose definition is the name of the class to which + * a valid base register must belong. A base register is one used in an address + * which is the register value plus a displacement. + */ +#undef BASE_REG_CLASS +#define BASE_REG_CLASS GENERAL_REGS + +/* MODE_BASE_CLASS(MODE) ... This is a variation of the BASE_REG_CLASS macro + * which allows the selection of a bse register in a mode dependent manner. If + * mode is VOIDmode then it should return the same value as BASE_REG_CLASS. + */ +#undef MODE_BASE_CLASS +#define MODE_BASE_CLASS(MODE) GENERAL_REGS + +/* MODE_BASE_REG_REG_CLASS(MODE) ... A C expression whose value is the register + * class to which a valid base register must belong in order to be used in a + * base plus index register address. You should define this macro if base plus + * index addresses have different requirements than other base register uses. + * + * Zip CPU does not support the base plus index addressing mode, thus ... + */ +// #undef MODE_BASE_REG_REG_CLASS +// #define MODE_BASE_REG_REG_CLASS(MODE) NO_REGS + +/* INDEX_REG_CLASS ... A macro whose definition is the name of the class to + * which a valid index register must belong. An index register is one used in + * an address where its value is either multiplied by a scale factor or added + * to another register (as well as added to a displacement). + * + * ZipCPU -- Has no index registers. + */ +#undef INDEX_REG_CLASS +#define INDEX_REG_CLASS NO_REGS + +/* REGNO_OK_FOR_BASE_P(NUM) ... A C expression which is nonzero if register + * number num is suitable for use as a base register in operand addresses. + */ +#undef REGNO_OK_FOR_BASE_P +# define REGNO_OK_FOR_BASE_P(NUM) ((NUM>=FIRST_PSEUDO_REGISTER)||(NUM != zip_CC)) + +/* REGNO_MODE_OK_FOR_BASE_P ... A C expressison that is just like + * REGNO_OK_FOR_BASE_P, except that that expression may examine the mode of the + * memory reference in MODE. You should define this macro if the mode of the + * memory reference affects whether a register may be used as a base register. + * + * ZipCPU --- the mode doesn't affect anything, so we don't define this. + */ + +/* REGNO_MODE_OK_FOR_REG_BASE_P(NUM, MODE) ... base plus index operand + * addresses, accessing memory in mode mode. + * + * Use of this macro is deprecated. + */ + +/* REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) ... A C expression which is + * nonzero if a register number N is suitable for use as a base register in + * operand addresses, accessing memory in mode M in address space AS. This is + * similar to REGNO_MODE_OK_FOR_BASE_P, except that the expression may examine + * the context in which the register appears in the memory reference. + * + * ZipCPU---We aren't specific in how we use our registers. + */ +#define REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) REGNO_OK_FOR_BASE_P(N) + +/* REGNO_OK_FOR_INDEX_P(REGNO) ... A C expression which is nonzero if register + * num is suitable for use as an index register in opernad addressess. It may + * be either a suitable hard register or a pseudo register that has been + * allocated such as a hard register. + * + * ZipCPU has no index registers, therefore we declare this to be zero. + */ +#undef REGNO_OK_FOR_INDEX_P +#define REGNO_OK_FOR_INDEX_P(REGNO) 0 + +/* TARGET_PREFERRED_RENAME_CLASS(RCLASS) ... A target hook that places + * additional preference on the register class to use when it is necessary to + * rename a register in class RCLASS to another class, or perhaps NO_REGS, if no + * preferred register class is found or hook preferred_rename_class is not + * implemented. SOmething returning a more restrictive class makes better code. + * For example, on ARM, thumb-2 instructions using LO_REGS may be smaller than + * instructions using GENERIC_REGS. By returning LO_REGS from + * preferred_rename_class, code size can be reduced. + */ +// #undef TARGET_PREFERRED_RENAME_CLASS +// #define TARGET_PREFERRED_RENAME_CLASS(RCLASS) RCLASS + +/* TARGET_PREFERRED_RELOAD_CLASS(X,RC) ... A target hook that places additional + * restri tions on the register class to use when it is necessary to copy value + * X into a register in class RC. The value is a register class; rehaps RC, or + * perhaps a smaller class. + * + * The default fversion of this hook always returns value of RC argument, which + * sounds quite appropriate for the ZipCPU. + */ + +/* PREFERRED_RELOAD_CLASS(X,CLASS) ... A C expression that places additional + * restrictions on the register class to use when it is necessary to copy + * value X into a register in class CLASS. On many machines, the following + * definition is safe: PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS) + * Sometimes returning a more restrictive class makes better code. For example, + * on the 68k, when x is an integer constant that is in range for a moveq + * instruction, the value of this macro is always DATA_REGS as long as CLASS + * includes the data registers. Requiring a data register guarantees that a + * 'moveq' will be used. + * + * ZipCPU --- you can't load certain values into all members of ALL_REGS. For + * example, loading (sleep and !gie) into the CC register could halt the CPU. + * Hence, we only allow loads into the GENERAL_REG class. + */ +#define PREFERRED_RELOAD_CLASS(X, CLASS) GENERAL_REGS + +/* TARGET_PREFERRED_OUTPUT_RELOAD_CLASS(RTX,RCLASS) ... Like TARGET_PREFERRED_.. + * RELOAD_CLASS, but for output instead of input reloads. + * + * ZipCPU --- there's gotta be a valid default behaviour for this. + */ + +/* LIMIT_RELOAD_CLASS(MODE, CL) ... + * + * Don't define this macro unless the target machine has limitations which + * require the macro to do something nontrivial. ZipCPU doesn't, so we won't. + */ + +/* TARGET_SECONDARY_RELOAD + * SECONDARY_ ... + * Don't think we need these ... + */ + +/* CLASS_MAX_NREGS(CLASS,MODE) ... A C expression for the maximum number of + * consecutive registers of class CLASS needed to hold a value of mode MODE. + * + * This is closely related to the macro HARD_REGNO_NREGS. In fact, the value + * of the macro CLASS_MAX_REGS(CL,M) should be the maximum value of + * HARD_REGNO_NREGS(REGNO,MODE) for all REGNO values in the class CLASS. + * + * This macro helps control the handling of multiple word values in the reload + * pass. + * + * ZipCPU --- We'll just use HARDNO_REGNO_NREGS, since CLASS is independent for + * us. We'll also choose register R0, since ... well, since it simply doesn't + * matter. (HARD_REGNO_NREGS ignores this anyway) + */ +#define CLASS_MAX_NREGS(CLASS, MODE) HARD_REGNO_NREGS(0,MODE) + +/* CANNOT_CHANGE_MODE_CLASS + * ??? + */ + +/* TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS + */ + +/* TARRGET_LRA_P + * Default looks good. + */ + +/* TARGET_REGISTER_PRIORITY(INT) ... A target hook which returns the register + * priority number to which the register HARD_REGNO belongs to. The bigger the + * number + * + * The default version of this target hook returns always zero---good enough for + * the ZipCPU. + */ + +/* TARGET_REGISTER_USAGE_LEVELING_P(VOID) ... A target hook which returns true + * if we need register usage leveling. That means if a few hard registers are + * equally good for the assignment, we choose the least used hard register. The + * register usage leveling may be profitable for some targets. Don't use usage + * leveling for targets with conditional execution or targets with big register + * files as it hurts if-conversion and cross-jumping optimizations. The default + * version of this target hook returns always false. + * + * ZipCPU --- Default is the right answer. + */ + +/* TARGET_DIFFERENT_ADDR_DISPLACEMENT_P ... + * Default looks good. + */ + +/* TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P ... + * Default looks good. + */ + +/* TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT .... + */ + +/* TARGET_SPILL_CLASS + * + * ZipCPU --- If we were running in supervisor mode only, this might be the + * user set of registers. However, we're not building for that mode (now), + * so we'll leave this at the default of NO_REGS. + */ + +/* TARGET_CSTORE_MODE(ICODE) ... Defines the machine mode to use for the + * boolean result of conditional store patterns. The OCIDE argument is the + * instruction code for the cstore being performed. Not defining this hook is + * the same as accepting the mode encoded into operand 0 of the cstore expander + * patterns. + * + * ??? ZipCPU --- I don't follow this documentation. We'll leave this at the + * default therefore. + */ + +/* 17.09 Stack Layout and Calling Conventions */ + + +/* STACK_GROWS_DOWNWARD ... Define this macro if pushing a word onto the stack + * moves the stack pointer to a smaller address, and false otherwise. + * + * ZipCPU ... well, our stack does grow downward, but it doesn't do so auto- + * magically. We have to move the stack pointer ourselves. However, since this + * is our convention, we'll define it as such. + */ +#undef STACK_GROWS_DOWNWARD +#define STACK_GROWS_DOWNWARD 1 + +/* STACK_PUSH_CODE ... This macro defines the operation used when something is + * pushed on the stack. In RTL, a push operation will be + * (set (mem( STACK_PUSH_CODE(reg sp))) ...) The choiecs are PRE_DEC, POST_DEC, + * PRE_INC, and POST_INC. Which of these is correct depends on the stack + * direction and on whether the stack pointer points to the last item on the + * stack or whether it points to the space for the next item on the stack. + * The default is PRE_DECC when STACK_GROWS_DOWNWARD is true, which is almost + * always right, and PRE_INC otherwise, which is often wrong. + * + * ZipCPU --- None of these is right, so let's leave this at the default and + * see how badly we get mangled. In particular, ZipCPU doesn't have any of the + * PRE_DEC, POST_DEC, PRE_INC, or POST_INC addressing modes used here. + */ + +/* FRAME_GROWS_DOWNWARD ... Define this macro to nonzero if the addresses of + * local variable slots are at negative offsets from the frame pointer. + * + * ZipCPU --- If the frame pointer is defined as the stack pointer upon the + * start of function execution, and that stack pointer grows downward, then + * this should be the case as well. + */ +#undef FRAME_GROWS_DOWNWARD +#define FRAME_GROWS_DOWNWARD 1 +// #define FRAME_GROWS_DOWNWARD 0 // This was ECO32's value + + +/* ARGS_GROW_DOWNWARD ... Define this macro if successive arguments to a + * function occupy decreasing addresses on the stack. + * + * ZipCPU -- we can leave this up to the compiler's preferred implementation, + * it is of no consequence to the hardware. + */ + +/* STARTING_FRAME_OFFSET ... Offset from the frame pointer to the first local + * variable slot to be allocated. If FRAME_GROWS_DOWNWARD, find the next slot's + * offset by subtracting the firstt slot's length from STARTING_FRAME_OFFSET. + * Otherwise it is found by adding the length of the first slot to the value + * START_FRAME_OFFSET. + * + * ZipCPU --- I'm not certain on this, let's come back after we look at how + * the code is getting generated. However, the ECO32 code I am copying from + * suggests that 0 is the right value, so we'll use that here. + */ +// #warning "Re-evaluate me" +#define STARTING_FRAME_OFFSET 0 + +/* STACK_ALIGNMENT_NEEDED ... Define to zero to disable final alignment of the + * stack during reload. The nonzero default for this macro is suitable for most + * ports. + * + * ZipCPU --- we'll leave this at the default, although if any alignment code + * shows up on the stack we may need to adjust it. + */ + +/* STACK_POINTER_OFFSET ... Offset from the SP register to the first location at + * which outgoing arguments are placed. If not specified, the default value + * of zero is used. This is the proper value for most machines. + */ +#define STACK_POINTER_OFFSET 0 + +/* FIRST_PARM_OFFSET ... Offset from the argument pointer register to the first + * argument's address. On some machines it may depend on the data type of the + * function. + */ +#define FIRST_PARM_OFFSET(F) 0 + +/* STACK_DYNAMIC_OFFSET(F) ... Offset from the stack pointer register to an item + * dynamically allocated on the stack, e.g., by alloca. The default value for + * this macro is STACK_POINTER_OFFSET plus the length of the outgoing arguments. + * The default is correct for most machines, ... + * + * ZipCPU --- so we'll use it for the ZipCPU. + */ + +/* INITIAL_FRAME_ADDRESS_RTX ... A C expression whose value is RTL representing + * the address of the initial stack frame. This address is passed to + * RETURN_ADDR_RTX and DYNAMIC_CHAIN_ADDRESS. If you don't define this macro, + * a reasonable default value will be used. Define this macro in order to make + * frame pointer elimination work in the presence of __builtin_frame_address(C) + * and __builtin_return_address(C) for (C) not equal to zero. + * + * ZipCPU --- Let's try the reasonable default and see what happens. + */ + +/* SETUP_FRAME_ADDRESSES ... A C expression that produces the machine-specific + * code to setup the stack so that arbitrary frames can be accessed. For + * example, on the SPARC, we must flush all of the register windows to the stack + * before we can access arbitrary stack frames. You will seldom need to define + * this macro. The default is to do nothing. + * + * ZipCPU --- which is what we shall do here. + */ + +/* TARGET_BUILTIN_SETJMP_FRAME_VALUE(VOID) ... This target hook should return + * an RTX that is used to store the address of the current frame into the + * builtin setjmp buffer. The default value, virtual_stack_vars_rtx, is correct + * for most machines. One reason you may need to define this target hook is if + * hard_frame_pointer_rtx is the appropriate value on your machine. + * + * ZipCPU --- leave this undefined, since the default value should be correct + * for "most" machines. + */ + +/* FRAME_ADDR_RTX ... most machines do not need to define it. + */ + +/* RETURN_ADDR_RTX(COUNT,FRAMEADDR) ... A C expression whose value is RTL + * representing the value of the return address for the frame COUNT steps up + * from the current frame, after the prologue. FRAMEADDR is the frame pointer + * of the COUNT frame, or the frame pointer of the COUNT-1 frame if + * RETURN_ADDR_IN_PREVIOUS_FRAME is nonzero. The value of the expression must + * always be the correct address when COUNT is nonzero, but may be NULL_RTX if + * there is no way to determine the return address of other frames. + * + * ZipCPU --- I have no idea how we'd do this, so let's just return NULL_RTX. + */ +#undef RETURN_ADDR_RTX +#define RETURN_ADDR_RTX(COUNT,FRAMEADDR) NULL_RTX + +/* RETURN_ADDR_IN_PREVIOUS_FRAME ... Define this macro to nonzero value if the + * return address of a particular stack frame is accessed from the frame pointer + * of the previous stack frame. The zero default for this macro is suitable + * for most ports. + * + * ZipCPU---Default works here as well. + */ + +/* INCOMING_RETURN_ADDR_RTX ... A C expression whose value is RTL representing + * the location of the incoming return address at the beginning of any function, + * before the prologue. This RTL is either a REG, indicating that the return + * value is saved in 'REG', or a MEM representing the location in the stack. + * If this RTL is a REG, you should define DWARF_RETURN_COLUMN to + * DWARF_FRAME_REGNUM(REGNO). + * + * ZipCPU --- While our incoming return address could theoretically be in any + * register, our machine description file is going to place it into register + * R0, so that's what we return here. + */ +#undef INCOMING_RETURN_ADDR_RTX +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG(SImode, zip_R0) + + +/* DWARF_ALT_FRAME_RETURN_COLUMN + */ + +/* DWARF_ZERO_REG ... A C exrpession whose value is an integer giving a DWARF2 + * register number that is considered to always have the value zero. This + * should only be defined if the target has an architected zero register (ZipCPU + * does not), and someone decided it was a good idea to use that register number + * to terminate the stack backtrace. New ports should avoid this (so the + * ZipCPU port will avoid it as well). + * + */ + +/* TARGET_DWARF_HANDLE_FRAME_UNSPEC + */ + +/* INCOMING_FRAME_SP_OFFSET + */ +#define INCOMING_FRAME_SP_OFFSET 0 + +/* ARG_POINTER_CFA_OFFSET + */ + +/* FRAME_POINTER_CFA_OFFSET + */ + +/* CFA_FRAME_BASE_OFFSET + */ + +/* 17.09.02 Exception handling support */ + +/* EH_RETURN_DATA_REGNO(N) ... A C expression whose value is the Nth register + * number used for data by exception handlers, or INVALID_REGNUM if fewer than + * N registers are usable. The exception handling library routines communicate + * with the exception handlers via a set of agreed upon registers. Ideally + * these registers should be call clobbered; it is possible to use call-saved + * registers, but may negatively impact code size. The target must support at + * least 2 data registers, but should define 4 if their are enough free + * registers. + * + * You must define this macro if you want to support call frame exception + * handling like that provided by DWARF 2. + */ +#define EH_RETURN_DATA_REGNO(N) (((N<ZIP_FIRST_ARG_REGNO)||(N>ZIP_LAST_ARG_REGNO))?(N-1):INVALID_REGNUM) + +/* EH_RETURN_STACKADJ_RTX ... A C expression whose value is RTL representing + * a location in which to store a stack adjustment to be applied before function + * return. This is used to unwind the stack to an exception handler's call + * frame. It will be assigned zero on code paths that return normally. + * + * Do not define this macro if the stack pointer is saved and restored by the + * regular prolog and epilog code in the call frame itself (which it is for the + * ZipCPU); in this case, the exception handling library routines will update + * the stack location to be restored in place. Otherwise, you must define this + * macro if you want to support call frame exception handling like that provided + * by DWARF 2. + * + */ + +/* EH_RETURN_HANDLER_RTX ... A C expression whose value is RTL representing a + * location in which to store the address of an exception handler to which we + * should return. It will not be assigned on code paths that return normally. + * + * Typcally this is the location in the call frame at which the normal return + * address is stored. For targets that return by popping an address of the + * stack, this might be a memory address just below the target callf rame + * rather than inside the current call frame. If defined, + * EH_RETURN_STACKADJ_RTX will have already been assigned, so it may be used + * to calculate the location of the target call frame. + * + * If you want to support call frame exception handling, you must define either + * this macro or the eh_return instruction pattern. + */ +// #warning "I don't know what to do here." + +/* + * + * + * + * REST OF SECTION SKIPPED ... + * + * + * + */ + +/* 17.09.03 Specifying how stack checking is done */ + +/* STACK_CHECK_BUILTIN ... a non-zero value if stack checking is done by the + * configuration files in a machine-dependent manner. You should define this + * macro if stack checking is required by the ABI of your machine or if you + * would like to do stack checking in some more efficient way than the generic + * appraoch. The default value of this macro is zero. + * + * ZipCPU --- The default makes sense for us. + */ +// #define STACK_CHECK_BUILTIN 0 + +/* STACK_CHECK_STATIC_BUILTIN ... A nonzero value if static stack checking is + * done by the configuration files in a machine-dependent manner. You should + * define this macro if you would like to do static stack checking in some more + * efficient way than the generic approach. The default value of this macro + * is zero. + * + * ZipCPU --- The default makes sense for us. + */ + +/* STACK_CHECK_PROBE_INTERVAL_EXP ... An integer specifying the interval at + * which GCC must generate stack probe instructions, defined as 2 raised to this + * interval. You will normally define this macro so that the interval is no + * larger than the size of the "guard pages" at the end of a stack area. The + * default value of 12 (4096-byte interval) is suitable for most systems. + * + * ZipCPU --- Default. + */ + +/* STACK_CHECK_MOVING_SP ... An integer which is non-zero if GCC should move + * the stack pointer page by page when doing probes. This can be necessary + * on systems where the stack pointer contains the bottom address of the memory + * area accessible to the executing thread at any point in time. In this + * situation, an alternate signal stack is required in order to be able to + * recover from a stack overflow. The default value of this macro is zero. + * + * ZipCPU -- Default. + */ + +/* STACK_CHECK_PROTECT + */ +/* STACK_CHECK_MAX_FRAME_SIZE + * ... you should normally not change the default value of this macro. + */ +/* STACK_CHECK_FIXED_FRAME_SIZE + * ... you ... will normally use the default of four words. + */ + +/* STACK_CHECK_MAX_VAR_SIZE + * ... you will normally not need to override that default. + */ + +/* 17.09.04 Registers that Address the Stack Frame*/ + +/* STACK_POINTER_REGNUM ... The register number of the stack pointer register, + * which must also be a fixed register according to FIXED_REGISTERS. On most + * machines, the hardware determines which register this is. + */ +#undef STACK_POINTER_REGNUM +#define STACK_POINTER_REGNUM zip_SP + +/* FRAME_POINTER_REGNUM ... The register number of the frame pointer register, + * which is used to access certain automatic variables in the stack frame. On + * some machines, the hardware determines which register this is. On other + * machines you can choose any register you wish for this purpose. + * + * ZipCPU --- While I'd like to dump this pointer, since I don't really see + * a need for it, alloca() requires it. Therefore let's assine a register to + * this purpose and watch what the compiler does with it. + */ +#ifdef zip_FP_PSEUDO +#define FRAME_POINTER_REGNUM zip_FP_PSEUDO +#else +#define FRAME_POINTER_REGNUM zip_FP +#endif + +/* HARD_FRAME_POINTER_REGNUM ... On some machines the offset between the frame + * pointer and starting offset of the automatic variables is not known until + * after register allocation has been done (for example, because the saved + * registers are between these two locations). On those machines, define + * FRAME_POINTER_REGNUM the number of a special, fixed register to be used + * internally until the offset is known, and define HARD_FRAME_POINTER_REGNUM + * to be the actual hard register number used for the frame pointer. + * + * Do not define this macro if it would be the same as FRAME_POINTER_REGNUM + * + * ZipCPU --- we do not define this macro. + */ +#if (zip_FP == FRAME_POINTER_REGNUM) +#define HARD_FRAME_POINTER_REGNUM zip_FP +#endif + +/* ARG_POINTER_REGNUM ... The register number of the arg pointer register, which + * is used to access the function's argument list. On some machines, this is + * the same as the frame pointer register. On some machines, the hardware + * determines which register this is. On other machines, you can choose any + * register you wish for this purpose. If this is not the same register as the + * frame pointer register, then you must mark it as a fixed register according + * to FIXED_REGISTERs, or arrange to be able to eliminate it. + * + * ZipCPU --- We really don't want to lose another register to something + * pointless, so let's set this to be the frame pointer register. Especially + * given the ZipCPU's ease of accessing things via offsets of registers, this + * should work for a rather large stack frame. + */ +#define ARG_POINTER_REGNUM FRAME_POINTER_REGNUM + +/* HARD_FRAME_POINTER_IS_FRAME_POINTER ... define this to be a preprocessor + * constant that is nonzero if hard_frame_pointer_rtx and frame_pointer_rtx + * should be the same. The default definition is sufficient for us. + */ + +/* HARD_FRAME_POINTER_IS_ARG_POINTER ... + * ZipCPU doesn't need this macro + */ + +/* RETURN_ADDRESS_POINTER_REGNUM ... The register number of the return address + * pointer register, which is used to access the current function's return + * address from the stack. On some machines, the return address is not at a + * fixed offset from the frame pointer or stack pointer or argument pointer. + * This register can be defined to point to the return address on the stack, and + * then to be converted by ELIMINABLE_REGS into either the frame pointer or the + * stack pointer. + * + * Do not define this macro unless there is no other way to get the return + * address from the stack. + * + * ZipCPU---we need this. + */ +#define RETURN_ADDRESS_REGNUM zip_R0 + + +/* STATIC_CHAIN_REGNUM ... Register numbers used for passing a function's + * static chain pointer. If register windows are used, the register number as + * seen by the called function is STATIC_CHAIN_INCOMING_REGNUM, while the + * register number as seen by the calling function is STATIC_CHAIN_REGNUM. If + * these register are the same, STATIC_CHAIN_INCOMING_REGNUM need not be + * defined. + * + * ZipCPU doesn't have register windows, so we don't need to define this. + */ +// #warning "I have no reason to believe this will even work" +#define STATIC_CHAIN_REGNUM zip_GOT + +/* TARGET_STATIC_CHAIN ... This hook replaces the use of STATIC_CHAIN_REGNUM et + * al for targets that may use different static chain locations for different + * nested functions. This may be required if the target has function attributes + * that affect the calling conventions of the function and those calling + * conventions use different static chain locations. + * + * ZipCPU --- don't need this. + */ +// #define STATIC_CHAIN_REGNUM zip_R11 + + +/* DWARF_FRAME_REGISTERS ... This macro specifies the maximum number of hard + * registers that can be saved in a call frame. This is used to size data + * structures used in DWARF2 exception handling. + * + * Prior to GCC 3.0, this macro was needed in order to establish a stable + * exception handling ABI in the face of adding new hard registers for ISA + * extensions. In GCC 3.0 and later, the EH ABI is insulated from changes in + * the number of hard registers. Nevertheless, this macro can still be used to + * reduce the runtime memory requirements of the exception handling routines, + * which can be substantial if the ISA contains a lot of registers that are not + * call-saved. + * + * If this macro is not defined, it defaults to FIRST_PSEUDO_REGISTER. + * + * ZipCPU --- The default is not sufficient. The CC and PC registers need to + * be saved and examined as well in any debug/exception context. Hence, we + * define this to be all of our registers. + */ +#undef DWARF_FRAME_REGISTERS +#define DWARF_FRAME_REGISTERS 16 + +/* PRE_GCC3_DWARF_FRAME_REGISTERS ... This macro is similar to DWARF_FRAME_REG.. + * but is provided for backward compatibility in pre GCC 3.0 compiled code. + * + * If not defined, it defaults to DWARF_FRAME_REGISTERS---which is perfect for + * the ZipCPU. + */ + +/* DWARF_REG_TO_UNWIND_COLUMN(REGNO) ... Define this macro if the target's + * representation for dwarf registers is different than the internal + * representation for unwind column. Given a dwarf register, this macro should + * return the unwind column number to use instead. + * + * ... ??? + */ + +/* DWARF_FRAME_REGNUM(REGNO) ... Define this macro is the target's + * representation for dwarf registers used in .eh_frame or .debug_frame is + * different from that used in other debug info sections. Given a GCC hard + * register number, this macro should return the .eh_frame register number. + * The default is DBX_REGISTER_NUMBER(REGNO). + * + * ZipCPU --- provided we define DBX_REGISTER_NUMBER(REGNO) well, this default + * should still work for us. + */ + +/* DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) ... Define this macro to map register + * numbers held in the call frame info that GCC has collected using + * DWARF_FRAME_REGNO to those that should be output in .debug_frame (for_eh is + * zero) and .eh_frame (for_eh is non-zero). The default is to return REGNO. + * + * ZipCPU --- Default is good enough. + */ + +/* REG_VALUE_IN_UNWIND_CONTEXT ... Define this macro if the target stores + * register values as _Unwind_Word type in unwind context. It should be defined + * if target register size is larger than the size of void *. The default + * is to store register values as void *type. + * + * ZipCPU --- Default is what we need. + */ + +/* ASSUME_EXTENDED_UNWIND_CONTEXT ... Define this macro to be 1 if the target + * always uses extended unwind context with version, args_size, and by_value + * fields. If it is undefined, it will always be defined to 1 when REG_VALUE_IN_UNWIND_CONTEXT is defined and 0 otherwise. + * + */ + + +/* 17.09.05 Eliminating Frame Pointer and Arg Pointer */ + +/* TARGET_FRAME_POINTER_REQUIRED(VOID) ... This target hook should return true + * if a function must have and use a frame pointer. This target hook is + * called in the reload pass. If its return value is true, the function will + * have a frame pointer. + * + * This target hook can in principle examine the current function and decide + * according to the facts, but on most machines the constant false or the + * constant true suffices. Use false when the machine allows code to be + * generated with no frame pointer, and doing so saves some time or space. + * Use true when there is no possible advantage to avoiding a frame pointer. + * + * ZipCPU---if we add in a frame pointer, we become register starved. Hence, + * we'll treat this as a constant false--which is also the default value. + */ +#define target_frame_pointer_required zip_frame_pointer_required + +/* INITIAL_FRAME_POINTER_OFFSET ... A C statement to store in the variable + * depth-var the difference between the frame pointer and the stack pointer + * values immediately after the function prologue. The value would be computed + * from information such as the result of get_frame_size() and the tables of + * registers regs_ever_live and call_used_regs. + * + * If ELIMINABLE_REGS is defined, this macro will not be used and need not be + * defined. Otherwise, it must be defined even if TARGET_FRAME_POINTER_REQD + * always returns true; in that case you may set depth-var to anything. + * + * ZipCPU --- we intend to set ELIMINABLE_REGS, so this is not necessary. + */ +// #define INITIAL_FRAME_POINTER_OFFSET(DEPTH) (DEPTH) = 0 + + +/* ELIMINABLE_REGS ... If defined, this macro specifies a table of register + * pairs used to eliminate unneeded registers that point into the stack frame. + * If it is not defined, the only elimination attempted by the compiler is to + * replace references to the frame pointer with references to the stack pointer. + * + * On some machines, the position of the argument pointer is not known until + * the compilation is completed. In such a case, a separate hard register + * must be used for the argument pointer. This register can be eliminated by + * replacing it with either the frame pointer or the argument pointer, + * depending on whether or not the frame pointer has been eliminated. + * + * ZipCPU we'll take their suggestion and define this as: + */ +#undef ELIMINABLE_REGS +#ifdef zip_FP_PSEUDO +#define ELIMINABLE_REGS \ + {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} +#else +# if (ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM) +# define ELIMINABLE_REGS \ + {{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} +# else +# define ELIMINABLE_REGS \ + {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ + { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }, \ + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} +# endif +#endif + +/* bool TARGET_CAN_ELIMINATE(FROM,TO) ... This target function should return + * true if the compiler is allowed to try to replace register number FROM with + * register number TO. This target hook need only be defined if ELIMINABLE_REGS + * is defined, and will usually return true since most of the cases preventing + * register elimination are things that the compiler already knows about. + * + * ZipCPU ... does the compiler know about my decision as to whether or not + * the frame pointer was needed? Yes it does, but it's kept separately. We'll + * just say everything can be eliminated. + */ +#define TARGET_CAN_ELIMINATE zip_can_eliminate + +/* INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) ... This macro is similar to + * INITIAL_FRAME_POINTER_OFFSET. It specifies the initial difference between + * the specified pair of registers. This macro must be defined if + * ELIMINABLE_REGS is defined. + * + * ZipCPU---We had at one time set this to a default offset of 0. This didn't + * work. It turns out that this is not only the *initial* elimination offset, + * but also the offset along the way. Hence, when a variable needs to be + * spilled to the stack, this offset must change. Reload goes and checks for + * this, and adjusts registers if the offset has changed. Hence, without this, + * we get negative (i.e. illegal) stack offsets. + */ +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ + do { (OFFSET) = zip_initial_elimination_offset((FROM), (TO)); } \ + while(0) \ + +/* 17.09.06 Passing function arguments on the stack */ + +/* TARGET_PROMOTE_PROTOTYPES ... Returns true if an argument declared in a + * prototype as an integral type smaller than int should actually be + * passed as an int. In addition to avoiding errors in certain cases of + * mismatch, it also makes for better code on certain machines. The default is + * to not promote prototypes. + * + * Since everything is an int on the ZipCPU, let's promote anything smaller + * (which should still be an int) up to an int anyway. + */ +#undef TARGET_PROMOTE_PROTOTYPES +#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true + +/* PUSH_ARGS ... A C expression. If nonzero, push instructions will be used to + * pass outgoing arguments. If the target machine does not have a push + * instruction, set it to zero. That directs GCC to use an alternate strategy: + * to allocate the entire argument block and then store the arguments into it. + * When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too. + * + * ZipCPU does not have a push instruction, so we set this to zero. + */ +#undef PUSH_ARGS +#define PUSH_ARGS 0 + +/* PUSH_ARGS_REVERSED ... A C expression. If nonzero, function arguments will + * be evaluated last to first, rather than first to last. If this macro is + * not defined, it defaults to PUSH_ARGS on targets where the stack and args + * grow in opposite directions, and zero otherwise. + * + * ZipCPU---Let's evaluate our arguments first to last. + */ +#define PUSH_ARGS_REVERSED 1 + +/* PUSH_ROUNDING(NPUSHED) ... A C expression that is the number of bytes + * actually pushed onto the stack when an instruction attempts to push + * (NPUSHED) bytes. + * + * ZipCPU---We cannot push bytes. Let's leave this undefined and see what + * happens. + */ +// #warning "No appropriate definition seemed right." + +/* ACCUMULATE_OUTGOING_ARGS ... A C expression. If non-zero, the maximum amount + * of space required for outgoing arguments will be computed and placed into + * crtl->outgoing_args_size. No space will be pushed onto the stack for each call; instead the function prologue should increase the stack frame size by this + * amount. + * + * ZipCPU---This is *cool* and so necessary---it saves an extra two instructions + * each time we try to call a function/routine. Yes, we want and *need* this + * for good performance. I mean, think of it, free performance increase? Who + * could argue with that? + */ +#undef ACCUMULATE_OUTGOING_ARGS +#define ACCUMULATE_OUTGOING_ARGS 1 + + +/* REG_PARM_STACK_SPACCE(FN) ... Define this macro if functions should assume + * that stack space has been allocated for arguments even when their values + * are passed in registers. The value of this macro is the size, in bytes, of + * the area reserved for arguments passed in registers for the function + * represented by FN, which can be zero if GCC is calling a library function. + * The argument FN can be the FUNCTION_DECL, or the type itself of the function. + * + * This space can be allocated by the caller, or be part of the machine + * dependent stack frame: OUTGOING_REG_PARM_STACK_SPACE says which. + * + * ZipCPU --- Why allocate space you won't use? Let's leave this undefined + * therefore. + */ +// #undef REG_PARM_STACK_SPACE + + + +/* INCOMING_REG_PARM_STACK_SPACE(FN) ... Like REG_PARM_STACK_SPACE, but for + * incoming register arguments. Define this macro if space guaranteed when + * compiling a function body is different to space required when making a call, + * a situation that can arise with K&R style function definitions. + * + */ + +/* OUTGOING_REG_PARM_STACK_SPACE(FN) ... Define this to a nonzero value if it + * is the responsibility of the caller to allocate the area reserved for + * arguments passed in registers when calling a function of FN. FN may be NULL + * if the function called is a library function. + * + * ZipCPU---Why allocate space you don't need? + */ +#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 0 + + +/* STACK_PARMS_IN_REG_PARM_AREA ... Define this macro if REG_PARM_STACK_SPACE + * is defined, buyt the stack parameters don't skip the area specified by it. + * + * ZipCPU---We didn't define REG_PARM_STACK_SPACE, so we won't define this. + */ + +/* TARGET_RETURN_POPS_ARGS(DECL,FNTYPE,SZ) ... This target hook returns the + * number of bytes of its own arguments that a function pops on returning, or 0 + * if the function pops no arguments and the caller must therefore pop them all + * after the function returns. + * + * ZipCPU --- If we define this, we'll lose our gain from + * ACCUMULATE_OUTOING_ARGS. Thus, we leave this undefined. + */ + +/* CALL_POPS_ARGS(CUM) ... A C expression that should indicate the number of + * bytes a call sequence pops off of the stack. It is added to the value of + * RETURN_POPS_ARGS when compiling a function call. CUM is the variable in + * which all arguments to the function have been accumulated. + * + * ZipCPU---The call sequence, by itself, doesn't touch the stack. Therefore + * this is zero. + */ +#undef CALL_POPS_ARGS +#define CALL_POPS_ARGS(CUM) 0 + + +/* 17.09.07 Passing arguments in registers */ + +/* TARGET_FUNCTION_ARG ... Return an RTX indicating whether a function argument + * is passed in a register, and if so, which register. + */ +/* + * This has been poisoned ... so let's not define it anymore and look for + * a better way to do this ... + * + * #define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) (((NAMED) == 0) ? NULL_RTX + * : targetm.calls.must_pass_in_stack(MODE, TYPE) ? NULL_RTX + * : (CUM) > ZIP_LAST_ARG_REGNO ? NULL_RTX + * : gen_rtx_REG(MODE, CUM)) + */ +#define TARGET_FUNCTION_ARG zip_function_arg + + +/* TARGET_MUST_PASS_IN_STACK ... + */ +// #undef TARGET_MUST_PASS_IN_STACK +// #define TARGET_MUST_PASS_IN_STACK zip_must_pass_in_stack + +/* TARGET_FUNCTION_INCOMING_ARG ... Define this hook if the target machine + * has register windows, ... which ZipCPU does not have. + */ + +/* TARGET_USE_PSEUDO_PIC_REG(void) ... This hook should return 1 in case + * pseudo register should be created for pic_offset_table_rtx during function + * expand. + * + * This should be defined by global parameters, isn't it? + */ + +/* TARGET_INIT_PIC_REG(v) ... Perform a target dependent initialization of + * pic_offset_table_rtx. This hook is called at the start of register + * allocation. + * + * ZipCPU---Let's revisit this. + */ +// #warning "Come back and relook at relocations" + +/* TARGET_ARG_PARTIAL_BYTES ... This target hook returns the number of bytes + * at the beginning of an argument that must be put in registers. The value + * must be zero for arguments that are passed entirely in registers or that + * are entirely pushed on the stack. + */ +// #undef TARGET_ARG_PARTIAL_BYTES +// #define TARGET_ARG_PARTIAL_BYTES zip_arg_partial_bytes + +/* TARGET_PASS_BY_REFERENCE(CUM,MOD,TREE,NAMED) ... This target hook should + * return true if an argument at the position indicated by CUM should be passed + * by reference. This predicate is queried after target independent reasons + * for being pssed by reference, such as TREE_ADDRESSABLE(TREE). + * + */ +// #undef TARGET_PASS_BY_REFERENCE +// #define TARGET_PASS_BY_REFERENCE zip_pass_by_reference + +/* CUMULATIVE ARGS ... A C type for declaring a variable that is used as the + * first argument of 'FUNCTION_ARG' and other related values. + * + * ZipCPU---We're in trouble if an 'int' won't work, so let's just use that. + */ +#define CUMULATIVE_ARGS int + +/* + * OVERRIDE_ABI_FORMAT + */ + +/* INIT_CUMULATIVE_ARGS ... A C statement (sans semicolon) for initializing the + * variable CUM for the state at the beginning of the argument list. + * + * + * ZipCPU---The first argument is passed in register ZIP_FIRST_ARG_REGNO, or + * R1 (unless it has been redefined above ...) + */ +#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,FNDECL,N_NAMED_ARGS) (CUM = 0) + +/* INIT_CUMULATIVE_LIBCALL_ARGS + * INIT_CUMULATIVE_INCOMING_ARGS + * + * These default to the last INIT_CUM_ARGS value above. + */ + +/* TARGET_FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) .. This hook updates + * the summarizer variable pointed to by CUM to advance past an argument in + * the argument list. The values MODE, TYPE, and NAMED describe that + * argument. Once this is done, the variable CUM is suitable for analyzing the + * following argument with TARGET_FUNCTION_ARG, etc. This hook need not do + * anything if the argument in question was passed on the stack. The compiler + * knows how to track the amount of stack space used for arguments without + * any special help. + * + * ZipCPU---Here we simply copy from ECO32. + */ +#define TARGET_FUNCTION_ARG_ADVANCE zip_function_arg_advance + +/* + * TARGET_ARG_OFFSET --- not necessary + * FUNCTION_ARG_PADDING --- not necessary, since we shouldn't be padding + * PAD_VARARGS_DOWN --- not necessary, since we shouldn't be padding + * BLOCK_REG_PADDING + * TARGET_FUNCTION_ARG_BOUNDARY + * TARGET_FUNCTION_ARG_ROUND_BOUNDARY + */ + +/* FUNCTION_ARG_REGNO_P(REGNO) ... A C expression that is nonzero if REGNO is + * the number of a hard register in which function arguments are sometimes + * passed. This does not include implicit arguments such as the static chain + * and the structure-value address. On many machines, no registers can be used + * for this purpose since all function arguments are pushed on the stack. + */ +#define FUNCTION_ARG_REGNO_P(r) ((r >= ZIP_FIRST_ARG_REGNO)&&(r<=ZIP_LAST_ARG_REGNO)) + +/* TARGET_SPLIT_COMPLEX_ARG(TYPE) ... This hook should return true if parameter + * of type TYPE are passed as two scalar parameters. By default, GCC will + * attempt to pack complex arguments into the target's word size. Some ABI's + * require complex arguments to be split and treated as their individual + * components. + * + * The default value of this hook is NULL, which is treated as always false, + * and which should be good enough for ZipCPU--which can go either way. + */ + +/* TARGET_BUILD_BUILTIN_VA_LIST ... This hook returns a type node for va_list + * for the target. The default version of the hook returns void*. + * + */ + +/* TARGET_ENUM_VA_LIST_P + */ + +/* TARGET_FN_ABI_VA_LIST ... This hook returns the va_list type of the calling + * convention specified by FN. The default version of this returns va_list_type_node. + */ + +/* TARGET_FN_ABI_VA_LIST + */ + +/* TARGET_CANONICAL_VA_LIST_TYPE + */ + +/* TARGET_GIMPLIFY_VA_ARG_EXPR + */ + +/* TARGET_VALID_POINTER_MODE(MODE) ... Define this to return nonzero if the + * port can handle pointers with machine mode MODE. The default version of this + * hook returns true for both ptr_mode and Pmode. + * + * ZipCPU---if Pmode is properly defined (above, and I think it is), then the + * default behavior is quite appropriate. + */ + +/* TARGET_REF_MAY_ALIAS_ERRNO(REFP) ... Define this to return nonzero if the + * memory reference REF may alias with the system C library errno location. + * The default version of this hook assumes the system C library errno location + * is either a declaration of type int or accessed by dereferencing a pointer + * to int. + * + * ZipCPU --- Default sounds good to me. + */ + + +/* TARGET_SCALAR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if + * the port is prepared to handl instructions involving scalar mode MODE. For + * a scalar mode to be considered supported, all the basic arithmetic and + * comparisons must work. + * + * The default version of this hook returns true for any mode required to + * handle the basic C types (as defined by the port). Included here are the + * double-word arithmetic supported by the code in optabs.c. + */ +#undef TARGET_SCALAR_MODE_SUPPORTED_P +#define TARGET_SCALAR_MODE_SUPPORTED_P zip_scalar_mode_supported_p + +/* TARGET_VECTOR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if the + * port is prepared to handle instructions involving vector mode MODE. At the + * very least, it must have move patterns for this mode. + * + * ZipCPU---does not support any vector modes. + */ +#undef TARGET_VECTOR_MODE_SUPPORTED_P +#define TARGET_VECTOR_MODE_SUPPORTED_P hook_bool_mode_false + +/* TARGET_ARRAY_MODE_SUPPORTED_P(MODE, NELEMS) ... Return true if GCC should + * try to use a scalar mode to store an array of NELEMS elements, given that + * each element has mode MODE. Returning true here overrides the usual MAX_FIXED_MODE limit and allows GCC to use any defined integer mode. + * + * ZipCPU---Sounds good. + */ +// #undef TARGET_ARRAY_MODE_SUPPORTED_P +// #define TARGET_ARRAY_MODE_SUPPORTED_P zip_array_mode_supported_p + +/* TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P(MODE) ... Define this to return + * nonzero if libgcc provides support for the floating-point mode MODE, which is + * known to pass TARGET_SCALAR_MODE_SUPPORTED_P. The default version of this + * hook returns true for all of SFmode, DFmode, XFmode, and TFmode, if such + * modes exist. + * + * ZipCPU---We only support SFmode and DFmode, but for now only in emulation + * (if we can). Let's allow both of those and see how far we get. + */ +#undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P +#define TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P zip_libgcc_floating_mode_supported_p + +/* TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P(MODE) ... Define this to return + * nonzero for machine modes for which the port has small register classes. If + * target hook returns nonzero for a given MODE, the compiler will try to + * minimize the lifetime of registers in MODE. The hook may be called with + * VOIDmode as an argument. In this case, the hook is expected to return + * nonzero if it returns nonzero for any mode. + * + * The default version of this hook returns false for any mode. + * + * ZipCPU---Default sounds good. + */ + +/* 17.09.08 How scalar function values are returned */ + +/* TARGET_FUNCTION_VALUE + */ + +/* LIBCALL_VALUE + */ + + +/* 17.09.09 How large values are returned */ + +/* TARGET_RETURN_IN_MEMORY(TYP,FNTYP) ... This target hook should return a + * nonzero value to say to return the function value in memory, just as large + * structures are always returned. Here type will be the data type of the value + * and FNTYP will be the type of the function doing the returning, or NULL + * for libcalls. + * + */ +#undef TARGET_RETURN_IN_MEMORY +#define TARGET_RETURN_IN_MEMORY zip_return_in_memory + +/* DEFAULT_PCC_STRUCT_RETURN + * TARGET_STRUCT_VALUE_RTX + * PCC_STATIC_STRUCT_RETURN + * TARGET_GET_RAW_RESULT_MODE + * TARGET_GET_RAW_ARG_MODE + */ + + +/* 17.09.10 Caller-Saves Register Allocation */ +/* 17.09.11 Function Entry and Exit */ +// TARGET_ASM_FUNCTION_PROLOGUE +// TARGET_ASM_FUNCTION_END_PROLOGUE +// TARGET_ASM_FUNCCTION_BEGIN_EPILOGUE +// TARGET_ASM_FUNCTION_EPILOGUE +/* EXIT_IGNORE_STACK ... Define this macro as a C expression that is nonzero + * if the return instruction or the function epilogue ignores the value of the + * stack pointer; in other words, if it is safe to delete an instruction to + * adjust the stack pointer before a return from the function. + * + * The default is 0. + * + * Note that this macro's value is relevant only for functions for which frame + * pointers are maintained. It is never safe to delete a final stack adjustment + * in a function that has no frame pointer, and the compiler knows this + * regardless of EXIT_IGNORE_STACK. + * + * ZipCPU -- Thanks to the example of the m68k, and a careful selection of what + * our options otherwise could have been, our epilogue code does not use the + * stack register at all, but rather starts by moving the frame register into + * the stack register. + */ +#define EXIT_IGNORE_STACK 1 +// EPILOGUE_USES(regno) +// EH_USES(regno) +// TARGET_ASM_OUTPUT_MI_THUNK +// TARGET_ASM_CAN_OUTPUT_MI_THUNK + +/* 17.09.12 Generating code for profiling */ +// FUNCTION_PROFILER +// PROFILE_HOOK +// NO_PROFILE_COUNTERS +// PROFILE_BEFORE_PROLOGUE +// TARGET_KEEP_LEAF_WHEN_PROFILED + +/* 17.09.13 Permitting tail calls*/ + +/* TARGET_FUNCTION_OK_FOR_SIBCALL(DECL,EXP) ... True if it is OK to do sibling + * call optimizations for the specified call expression EXP. DECL will be the + * called function, or NULL if this is an indirect call. + * + * It is not uncommon for limitations of calling conventions to prevent tail + * calls to functions outside the current unit of translation, or during PIC + * compilation. The hook is used to enforce these restrictions, as the sibcall + * md pattern can not fail, or fall over to a 'normal' call. The criteria for + * successful sibling call optimization may vary greatly between different + * architectures. + * + * ?? What's a sibling call? + */ + +// TARGET_EXTRA_LIVE_ON_ENTRY +// TARGET_SET_UP_BY_PROLOGUE +// TARGET_WARN_FUNC_RETURN + +/* 17.09.14 Stack smashing protection */ +// TARGET_STACK_PROTECT_GUARD +// TARGET_STACK_PROTECT_FAIL +// TARGET_SUPPORTS_SPLIT_STACK + +/* 17.09.15 Miscellaneous register hooks */ + +// TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS + +/* TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS + * ZipCPU --- default is good enough for us. + */ + +/* 17.10 Implementing VARARGS MACROS */ + +/* ... + */ + +/* void TARGET_SETUP_INCOMING_VARARGS(A,M,T,I,S) ... This target hook offers an + * alternative to using __builtin_saveregs and defining the hook TARGET_EXPAND.. + * _BUILTIN_SAVEREGS. Use it to store the anonymous register arguments into the + * stack so that all the arguments appear to have been passed consecutively + * on the stack. Once this is done, you can use the standard implementation + * of varargs that works for machines that pass all their arguments on the + * stack. + */ +// #undef TARGET_SETUP_INCOMING_VARARGS +// #define TARGET_SETUP_INCOMING_VARARGS zip_setup_incoming_varargs + +/* ... + */ + +/* 17.11 Trampolines for Nested Functions */ + +/* TARGET_ASM_TRAMPOLINE_TEMPLATE ... This hook is called by + * assemble_trampoline_template to output, on the stream f, assembler code for + * a block of data that contains the constant parts of a trampoline. This code + * should not include a label--the label is taken care of automatically. + * + * ZipCPU -- looks like we need to do this. + */ +#undef TARGET_ASM_TRAMPOLINE_TEMPLATE +#define TARGET_ASM_TRAMPOLINE_TEMPLATE zip_asm_trampoline_template + +/* TRAMPOLINE_SECTION ... Return the section into which the trampoline template + * is to be placed. The default value is readonly_data_section. + * + * ZipCPU--default should be good enough. + */ + +/* TRAMPOLINE_SIZE ... A C expression for the size (in bytes) of the trampoline + * as an integer. + * + * ZipCPU--it's three instructions, or 96 bits. However, 32-bits is our minimal + * addressible unit, so what size do we offer here? We'll stick with the number + * of bytes, but we may need to change this later. + * + */ +// #warning "May need to redefine trampoline_size in words, not bytes" +#undef TRAMPOLINE_SIZE +#define TRAMPOLINE_SIZE 3 + +/* TRAMPOLINE_ALIGNMENT ... alignment required for trampolines, in bits. + * + * Well that's well known in ZipCPU --- 32-bits. + */ +#undef TRAMPOLINE_ALIGNMENT +#define TRAMPOLINE_ALIGNMENT 32 + +/* void TARGET_TRAMPOLINE_INIT(RTX,TREE,RTX CH) ... This hook is called to + * initialize a trampoline. m_tramp is an RTX for the memory block for the + * trampoline; TREE is the FUNCTION_DECL for the nested fucntion; CH is an + * rtx for the static chain value that should be passed to the function when + * it is called. + * + * ZipCPU ... Can we get by without this? + */ +#undef TARGET_TRAMPOLINE_INIT +#define TARGET_TRAMPOLINE_INIT zip_trampoline_init + +/* TARGET_TRAMPOLINE_ADJUST_ADDRESS(RTX) ... This hook should perform any + * machine-specific adjustment in the address of the trampoline. Its argument + * contains the address of the memory block that was passed to + * TARGET_TRAMPOLINE_INIT. In case the address to be used for a function call + * should be different from the address at which the template was stored, the + * different address should be returned; otherwise addr should be returned + * unchanged. If the hook is not defined, RTX (addr) will be used for function + * calls. + * + * ZipCPU--works for us! + */ + +/* CLEAR_INSN_CACHE(BEG,END) ... If defined, expands to a C expression clearing + * the instruction cache in the specified interval. The definition of this + * macro would typically be a series of asm statements. Both BEG and END are + * pointer expressions. + * + * ZipCPU --- Ouch! We have no way to do this (yet)! + */ + +/* TRANSFER_FROM_TRAMPOLINE ... Define this macro is trampolines need a special + * subroutine to do their work. The macro should expand to a series of asm + * statements which will be compiled with GCC. They go in a library function + * named __transfer_from_trampoline. + * + * We may need to rethink trampolines on ZipCPU. + */ + + +/* 17.12 Implicit Calls to Library Routines */ + +/* DECLARE_LIBRARY_RENAMES + * + * ZipCPU: Don't need it. + */ + +/* TARGET_INIT_LIBFUNCS(VOID) ... This hook should declare additional library + * routines or rename existing ones, using the functions set_optab_libfunc and + * init_one_libfunc defined in optabs.c. init_optabs calls this macro after + * initializing all the normal library routines. + * + * Most ports don't need to define this hook, so we won't either. + */ + +/* TARGET_LIBFUNC_GNU_PREFIX ... If false (the default), internal library + * routines start with two underscores. If set to true, these routines start + * with __gnu_ instead. + * + * ZipCPU: No change necessary. + */ + +/* FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE,COMPARISON) ... This macro should return + * true if the library routine that implements the floating point comparison + * operator comparison in mode mode will return a boolean and false if it will + * return a tristate. + * + * Most ports don't need to define this macro, so Zip CPU won't either. + */ + +/* TARGET_HAS_NO_HW_DIVIDE ... This macro should be defined if the target has no + * hardware divide instructions. If this macro is defined, GCC will use an + * algorithm which makes use of simple logical and arithmetic operations for + * 64-bit division. If the macro is not defined, GCC will use an algorithm + * which makes use of a 64-bit by 32-bit divide primitive. + * + * Zip CPU, though, doesn't have the 64-bit by 32-bit divide primitive, thus + * we have no HW DIVIDE (for now). + */ +#define TARGET_HAS_NO_HW_DIVIDE + +/* TARGET_EDOM ... The value of EDOM on the target machine, as a C integer + * expression. If you don't define this macro, GCC does not attempt to deposit + * the value of EDOM into errno directly. Look in /usr/include/errno.h to find + * the value of EDOM on your system. + * + * EDOM is the error created when a math argument is out of the domain of the + * function. + * + * ZipCPU: Don't need it---I don't think. + */ + +/* GEN_ERRNO_RTX ... Define this macro as a C exrpession to create an rtl + * expression that refers to the global "variable" errno. (On certain + * systems, errno may not actually be a variable.) If you don't define this + * macro, a reasonable default is used. + * + * ZipCPU --- if a reasonable default is used, we'll use that--especially since + * I doubt we'll be using errno for a while. + */ + +/* NEXT_OBJC_RUNTIME ... Set this macro to 1 to use the "NeXT" Objective-C + * message sending conventions by default. This calling convention involves + * passing the object, the selector and the method arguments all at once to the + * method-lookup library function. This is the usual setting when targetting + * Darwin/Mac OS X systems, which have the NeXT runtime installed. + * + * If the macro is set to 0, ... + * + * Doesn't look relevant (yet) for the Zip CPU--especially since we don't have + * an O/S yet. + */ + + + +/* 17.13 Addressing Modes */ + +/* C expressions that are nonzero if the machine supports pre-increment, + * pre-decrement, post-increment, or post-decrement addressing respectively. + */ +#define HAVE_PRE_INCREMENT (0) +#define HAVE_PRE_DECREMENT (0) +#define HAVE_POST_INCREMENT (0) +#define HAVE_POST_DECREMENT (0) + +/* C expression that is nonzero if the machine supports pre- or post- address + * side-effect generation involving constants other than the size of the memory + * operand. + */ +#define HAVE_PRE_MODIFY_DISP (0) +#define HAVE_POST_MODIFY_DISP (0) + +/* C expression that is non-zero if the machine supports pre- or post-address + * side-effect generation involving a register displacement. + */ +#define HAVE_PRE_MODIFY_REG (0) +#define HAVE_POST_MODIFY_REG (0) + +/* CONSTANT_ADDRESS_P(X) ... A C expression that is 1 if the RTX X is a constant + * which is a valid address. On most machines the default definition ... is + * acceptable, but a few machines are more restrictive as to which constant + * addresses are supported. + * + * Zip CPU is designed for offset addresses, not constant addresses. Although + * the CPU will support 18-bit signed constant addresses, the assembler and + * general programming model do not. Further, without knowing where the final + * address will be located, this is an unusable model. Therefore we will + * define this as not supported. + * + * In hindsight, this isn't true--labels and symbols are valid addresses, and + * they are also constant addresses. Hence, we leave this at its default. + */ +// #undef CONSTANT_ADDRESS_P +// #define CONSTANT_ADDRESS_P(X) (0) + +/* CONSTANT_P(X) ... CONSTANT_P, which is defined by target-independent code, + * accepts integer values expressions whose values are not explicitly known, + * such as symbol_ref, label_ref, and high expressions and const arithmetic + * expressions, in addition to const_int and const_double expressions. + * + * Huh??? + */ +// #define CONSTANT_P(X) ??? + +/* MAX_REGS_PER_ADDRESS ... A number, the maximum number of registers that can + * appear in a valid memory address. Note that it is up to you to specify a + * value equal to the maximum number that TARGET_LEGITIMATE_ADDRESS_P would + * ever accept. + */ +#define MAX_REGS_PER_ADDRESS 1 + +/* TARGET_LEGITIMATE_ADDRESS_P(MODE,RTX,STRICT) ... A function that returns + * whether RTX is a legitimate memory address on the target machine for a + * memory operation of mode MODE. + */ +#undef TARGET_LEGITIMATE_ADDRESS_P +#define TARGET_LEGITIMATE_ADDRESS_P zip_legitimate_address_p + +/* TARGET_MEM_CONSTRAINT ... A single character to be used instead of the + * default 'm' character for general memory addresses. This defines the + * constraint letter which matches the memory addresses accepted by + * TARGET_LEGITIMATE_ADDRESS_P. Define this macro if you want to support new + * address format in your back end without changing the semantics of the 'm' + * constraint. This is necessary in order to preserve functionality of inline + * assembly constructs using the 'm' constraint. + * + * ZipCPU--doesn't look like we need to define this at all. + */ + +/* FIND_BASE_TERM(X) ... A C expression to determine the base term of address + * X or to provide a simplified version of X from which alias.c can easily find + * the base term. This macro is used in only two places: find_base_value and + * find_base_term in alias.c. + * + * It is always safe for this macro to not be defined. It exists so that + * alias analysis can understand machine-dependent addresses. + * + * ZipCPU: We'll skip this then. + */ + +/* TARGET_LEGITIMIZE_ADDRESS(RTX,OLD,MODE) ... This hook is given an invalid + * memory address RTX for an operand of mode MODE and should try to return a + * valid memory address. RTX will always be the result of a call to + * break_out_memory_refs, and OLD will be the operand that was given to that + * function to produce RTX. + * + * ZipCPU -- + */ +#undef TARGET_LEGITIMIZE_ADDRESS +#define TARGET_LEGITIMIZE_ADDRESS zip_legitimize_address + +/* LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OP,TYPE,IND,WIN) ... A C compound statement + * that attempts to replace X, which is an address that needs reloading, with + * a valid memory address for an operand of mode MODE. WIN will be a C + * statement label elsewhere in the code. It is not necessary to define this + * macro, but it might be useful for performance reasons. + * + * ZipCPU: This is worth coming back to, according to the notes page, but it + * may also be a difficult macro to use. Look at other implementations before + * we dive into this. + */ +// #undef LEGITIMIZE_RELOAD_ADDRESS +// #define LEGITIMIZE_RELOAD_ADDRESS + +/* TARGET_MODE_DEPENDENT_ADDRESS_P(ADDR,SPACE) ... This hook returns true + * if memory address addr in address space addrspace can have different meanings + * depending on the machine mode of the memory reference it is used for or if + * the address is valid for some modes but not others. + */ +#undef TARGET_MODE_DEPENDENT_ADDRESS_P +#define TARGET_MODE_DEPENDENT_ADDRESS_P zip_mode_dependent_address_p + +/* TARGET_LEGITIMATE_CONSTANT_P(MODE,RTX) ... This hook returns true if x is a + * legitimate constant for a MODE-mode immediate operand on the target machine. + * You can assume the RTX satisfies CONSTANT_P, so you need not check this. + * + * The default definition returns true. + */ + +/* TARGET_DELIGITIMIZE_ADDRESS(RTX) + */ + +/* TARGET_CONST_NOT_OK_FOR_DEBUG_P(RTX) ... This hook should return true if RTX + * should not be emitted into debug sections. + */ + +/* TARGET_CANNOT_FORCE_CONST_MEM(MODE,RTX) ... This hook should return true if + * RTX is a form that cannot (or should not) be spilled to the constant pool. + * MODE is the mode of X. The default version returns false. + */ +// #define TARGET_CANNOT_FORCE_CONST_MEM hook_bool_mode_rtx_false + +/* TARGET_USE_BLOCKS_FOR_CONSTANT_P(MODE,RTX) ... This hook should return true + * if pool entries for constant RTX can be placed in an object_block structure. + * MODE is the mode of X. The default version returns false for all constants. + * + *???? + */ +// #warning "Huh?" + +/* TARGET_USE_BLOCKS_FOR_DECL_P(DECL) ... This hook should return true if pool + * entries for DECL should be placed in an object_block structure. The default + * version returns true for all DECL's. + * + * Sounds good. + */ + +/* TARGET_BUILTIN_RECIPROCAL(TREE) ... This hook should return the DECL of a + * function that implements the reciprocal of the machine specific builtin + * function fndecl, or NULL_TREE if such a function is not available. + */ + +/* TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD(VOID) ... This hook should return the + * DECL of a function f that given an address addr as an argument returns a mask + * m that can be used to extrract from two vectors the relevant data that + * resides in addr in case addr is not properly aligned. + * + * Zip CPU does not support vectorization. + */ + +/* Other vector, SIMD, and GOACC macros skipped as Zip CPU doesn't support + * such data accesses and manipulation. + */ + +/* 17.14 Anchored Addresses */ + +/* TARGET_MIN_ANCHOR_OFFSET ... The minimum offset that should be applied to + * a section anchor. On most targets, it should be the smallest offset that + * can be applied to a base register while still giving a legitimate address for + * every mode. The default value is 0. + * + * On the Zip CPU, this is the minimum operand B offset to a LOD or STO + * operation, which would be a signed 14 bit number. + */ +#undef TARGET_MIN_ANCHOR_OFFSET +#define TARGET_MIN_ANCHOR_OFFSET zip_min_anchor_offset + +/* TARGET_MAX_ANCHOR_OFFSET ... Like TARGET_MIN_ANCHOR_OFFSET, but the maximum + * (inclusive) offset that should be applied to section anchors. The default + * value is 0. + */ +#undef TARGET_MAX_ANCHOR_OFFSET +#define TARGET_MAX_ANCHOR_OFFSET zip_max_anchor_offset + +/* TARGET_ASM_OUTPUT_ANCHOR(RTX) ... Write the assembly code to define section + * anchor RTX, which is a SYMBOL_REF for which 'SYMBOL_REF_ANCHOR_P(RTL) is + * true. The hook is called with the assembly output position set to the + * beginning of SYMBOL_REF_BLOCK(X). + * + * If ASM_OUTPUT_DEF is available, the hook's default definition uses it to + * define the symbol as '. + SYMBOL_REF_BLOCK_OFFSET(RTL)'. If ASM_OUTPUT_DEF + * is not available, the hook's default definition is NULL, which disables the + * use of section anchors altogether. + * + * Section anchors will be very valuable in Zip CPU assembly, therefore we + * must define this hook. + */ +// #undef TARGET_ASM_OUTPUT_ANCHOR +// #define TARGET_ASM_OUTPUT_ANCHOR zip_asm_output_anchor + +/* TARGET_USE_ANCHORS_FOR_SYMBOL_P(RTX) ... Return true if GCC should attempt + * to use anchors to access SYMBOL_REF X. You can assume SYMBOL_REF_HAS_BLOCK_INFO_P(X) and !SYMBOL_REF_ANCHOR_P(X). + * + * The default version is correct for most targets, but you might need to intercept this hook to handle things like target specific attributes or target-specific sections. + * + * Not knowing anything more, we'll leave the default as is for the Zip CPU. + */ +// #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P +// #define TARGET_USE_ANCHORS_FOR_SYMBOL_P zip_use_anchors_for_symbol_p + +/* 17.15 Condition Code Status */ + +/* 17.15.1 Representation of condition codes using (cc0) --- that's us */ + +/* CC_STATUS_MDEP ... C code for a data type which is used for declaring + * the mdep component of cc_status. It defaults to int. + * + * ZipCPU---Int is good for us. + */ + +/* CC_STATUS_MDEP_INIT ... A C expression to initialize the mdep field to + * "empty". The default definition does nothing, since most machines don't + * use the field anyway. If you want to use the field, you should probably + * define this macro to initialize it. + */ + +/* NOTICE_UPDATE_CC(EXP, INSN) ... A C compound statement to set the components + * of cc_status appropriately for an insn insn whose body is exp. It is this + * macro's responsibility to recognize insns that set the condition code as + * a byproduct of other activity as well as those that explicitly set (cc0). + * + * ZipCPU --- We need this, as not all expressions set (cc0). + * + */ +#ifdef HAVE_cc0 +#define NOTICE_UPDATE_CC(EXP, INSN) zip_update_cc_notice(EXP, INSN) +#endif + + +/* 17.15.2 Representation of condition codes using registers */ +/* ... which the ZipCPU doesn't have. The ZipCPU has a CC0 register, and hence + * this section isn't supposed to apply. + */ + +/* SELECT_CC_MODE(op, x, y) ... On many machines, the condition code may be + * produced by other instructions than compares, for example the branch can use + * directyl the condition code set by a subtract instruction. However, on some + * machines when the condition code is set this way some bits (such as the + * overflow bit) are not set in the same way as a test instruction, so that a + * different branch instruction must be used for some conditional branches. + * When this happens, use the machinemode of the condition code register to + * record different formats of the condition code register. Modes can also be + * used to reccord which compare instruction (e.g. a signed or an unsigned + * comparison) produced the condition codes. + * + * If other modes than CCmode are required, add them to 'machine-modes.def' and + * define SELECT_CC_MODE to choose a mode given an operand of a compare. This + * is needed because the modes have to be chosen not only during RTL generation + * but also, for example, by instruction combination. The result of + * SELECT_CC_MODE should be consistent with the mode used in the patterns; ... + * + * ZipCPU ... We have only one CC Mode, so we'll use the CCmode defined in + * machine-modes.def and should be fine with it. Hence, this doesn't need + * to be defined. + */ + +/* TARGET_CANONICALIZE_COMPARISON(int,rtx *, rtx *, bool) ... On some machines + * (such as the ZipCPU) not all possible comparisons are defined, but you can + * convert an invalid comparison into a valid one. For example, the Alpha + * does not have a GT comparison, but you can use an LT comparison instead and + * swap the order of the operands. + * + * On such machines, implement this hook to do any required conversions: code + * is the initial comparison code and op0 and op1 are the left and right + * operands of the comparison, respectively. If op0_preserve_value is true the + * implementation is not allowed to change the value of op0 since the value + * might be used in RTXs which aren't comparisons. E.g. the implementation is + * not allowed to swap operands in that case. + * + * GCC will not assume that the comparison resulting from this macro is valid + * but will see if the resulting insn matches a pattern in the 'md' file. + * + * You need not implement this hook if it would never change the comparison + * code or operands. + * + * In the case of the ZipCPU, the ZipCPU only keeps track of 8 possible + * comparisons, and bastardizing other comparisons into those 8 is extremely + * painful. Therefore, we *need* this capability to make certain we can use + * our comparisons successfully. + * + * The only problem is ... this hook appears to only be called on non-CC0 + * machines. Hence, defining it hasn't done anything for us. + */ +#define TARGET_CANONICALIZE_COMPARISON zip_canonicalize_comparison + +/* REVERSIBLE_CC_MODE(MODE) ... A C expression whose value is one if it is + * always safe to reverse a comparison whose mode is MODE. If SELECT_CC_MODE + * can ever return MODE for a floating-point inequality comparison, than + * REVERSIBLE_CC_MODE(MODE) must be zero. + * + * You need not define this macro if it would always return zero or if the + * floating-point format is anything other than IEEE_FLOAT_FORMAT. For example, + * here ... + * + * ZipCPU -- We'll always return zero, so this need not be defined. + */ + +/* REVERSE_CONDITION(CODE,MODE) ... A C expression whose value is reversed + * condition code of thecode for comparison done in CC_MODE MODE. This macro + * is used only in case REVERSIBLE_CC_MODE(MODE) is nonzero. ... + * + * ZipCPU ... Since REVERSIBLE_CC_MODE(MODE) will always be zero, we'll leave + * this undefined. + */ + +/* bool TARGET_FIXED_CONDITION_CODE_REGS(int *, int *) ... On targets which do + * not use (cc0), and which use a hard register rather than a pseudo-register + * to hold condition codes, the regular CSE passes are often not able to + * identify cases in which the hard register is set to a common value. Use this + * hook to enable a small pass which optimizes such cases. This hook should + * return true to enable this pass, and it should set the integers to which its + * arguments point to the hard register numbers used for condition codes. When + * there is only one such register, as is true on most systems, the integer + * pointed to by p2 should be set to INVALID_REGNUM. + * + * The default version of this hook returns false. + * + * ZipCPU --- I like the idea of enabling optimizations. Let's return + * something other than false. + */ +#define TARGET_FIXED_CONDITION_CODE_REGS zip_fixed_condition_code_regs + +/* machine_mode TARGET_CC_MODES_COMPATIBLE(M1,M2) .. On targets which use + * multiple condition code modes in class MODE_CC, it is sometimes the case + * that a comparison can be validly done in more than one mode. On such a + * system, define this target hook to take two mode arguments and to return a + * mode in which both comparisons may be validly done. If there is no such + * mode, return VOIDmode. + * + * The default version of this hook checks whether the modes are the same. If + * they are, it returns that mode. If they are different, it returns VOIDmode. + * + * ZipCPU--Given that we only have the one CCmode, the default definition works + * well enough for us. + */ + +/* unsigned int TARGET_FLAGS_REGNUM ... If the target has a dedicated flags + * register, and it needs to use the post-reload comparison elimination pass, + * then this value should be set appropriately. + * + * ZipCPU---Looks like we can set this easily enough without any problems. + */ +#undef TARGET_FLAGS_REGNUM +#define TARGET_FLAGS_REGNUM zip_CC + +/* 17.16 Relative costs of operations */ + + +// #define REGISTER_MOVE_COST(MODE,FROM,TO) ((MODE==DImode)||(MODE==DFmode))?4:2 +// #define TARGET_REGISTER_MOVE_COST +// #define MEMORY_MOVE_COST(MODE, CLASS, IN) ((MODE==DImode)||(MODE==DFmode))?8:7 +/* TARGET_REGISTER_MOVE_COST(M,FRM,TO) ... This target hook should return the + * cost of moving data of mode M from a register in class FRM to one in class + * TO. The classes are expressed using the enumeration values such as + * GENERAL_REGS. A value of 2 is the default; other values are interpreted + * relative to that. + * + * It is not required that the cost always equal 2 when FROM is the same as TO; + * on some machines it is expensive to move between registers if they are not + * general registers. + * + * If reload sees ... + * + * ZipCPU ... We can leave this at its default value of 2. + */ + +/* TARGET_MEMORY_MOVE_COST(MOD,CL,IN) ... This target hook should return the + * cost of moving data of mode MOD between a register of class CL and memory. + * IN is false if the value is to be written to memory, true if it is to be + * read in. This cost is relative to those in TARGET_REGISTER_MOVE_COST. + * If moving between registers and memory is more expensive that between two + * registers, you should add this target hook to express the relative cost. + * + * If you do not add this target hook, GCC uses a default cost of 4 plus the + * cost of copying via a secondary reload register, if one is needed. If your + * machine requires a secondary reload register to copy between memory and a + * register of CL but the reload mechanism is more complex than copying via + * an intermediate, use this target hook to reflect the actual cost of the + * move. + * + * ZipCPU --- Memory moves are more expensive than twice the cost of register + * moves, so let's make certain this is defined. + */ +#define TARGET_MEMORY_MOVE_COST zip_memory_move_cost + +// #warning "This needs to be double checked, and annotated" +#define BRANCH_COST(SPEED,PREDICTABLE) ((PREDICTABLE)?2:5) + +/* Define this macro as a C expression which is nonzero if accessing less than + * a word of memory (i.e. a 'char' or a 'short') is no faster than accessing + * a word of memory. + */ +#define SLOW_BYTE_ACCESS 1 + +/* MOVE_RATIO(SPD) ... The threshold of number of scalar memory-to-memory move + * instructions, below which a sequence of instructions should be generated + * instead of a string move instruction or a library call. Increasing the + * value will always make code faster, but eventually incurs high cost in + * increased code size. + */ +#define MOVE_RATIO(SPD) 5 + +/* TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(SZ,ALGN,OP,SPD) ... + */ +// #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD) +// #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD)// needs hook + +/* CLEAR_RATIO(SPD) ... The threshold number of scalar move instructions, below + * which a sequence of instructions should be generated to clear memory instead + * of a string clear instruction or a library call. Increasing the value will + * always make the code faster, but eventually incurs high cost in increased + * code size. + */ +#define CLEAR_RATIO(SPD) MOVE_RATIO(SPD) + +/* NO_FUNCTION_CSE ... Define this macro to be true if it is as good or better + * to call a constant function address than to call an address kept in a + * register. + * + * On the Zip CPU, constant function addresses--especially relative ones, + * can be optimized into a single cycle delay. Register jumps will always + * stall the whole (5-stage) pipeline. + */ +#define NO_FUNCTION_CSE + +/* TARGET_RTX_COSTS(X,CODE,OUTER,OPNO,TOTAL,SPD) ... This target hook describes + * the relative costs of RTL expressions. + * + * The cost may depend on the precise form of the expression, which is avaialble + * for examination in X, and the fact that X appears as operand OPNO of an + * expression with rtx code OUTER. That is, the hook can assume that there is + * some RTX Y such that GET_CODE(Y)==OUTER and such that either (a) XEXP(Y,OPNO) + * == X or (b) XVEC(Y,OPNO) contains X. + * + * ... + * The hook returns true when all subexpressions of x have been processed and + * false when rtx_cost should recurse. + */ + +/* TARGET_ADDRESS_COST(ADDR,MODE,AS, SPD) ... This hook computes the cost of an + * addressing mode that contains ADDR. If not defined, the cost is computed + * from the ADDR expression and the TARGET_RTX_COST hook. In cases where more + * than one form of an address is known, the form with the lowest cost will be + * used. If multiple forms have the same, lowest, cost, the one that is the + * most complex will be used. + * + * ZipCPU really has only one address cost, the only type of address it + * supports. Sure, index addressing would cost us more, but we don't support + * that so ... I think we're okay defining this as a constant. Indeed, the + * docs state that, "On RISC amchines, all instructions normally have the same + * length and execution time. Hence all addresses will have equal costs." + */ +#undef TARGET_ADDRESS_COST +#define TARGET_ADDRESS_COST zip_address_cost + + +/* TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P ... This predicate controls the use + * of the eager delay slot filler to disallow speculatively executed + * instructions being placed in delay slots. Targets such as certain MIPS + * architectures posess both branches with and without delay slots. As the + * eager delay slot filler can decrease performance, disabling it is beneficial + * when ordinary branches are available. Use of delay slot branches filled + * using basic filler is often still desirable as the delay slot can hide a + * pipeline bubble. + */ +// How should Zip CPU define this--we have no delay slots. + + +/* 17.17 Instruction Scheduler */ + +#define TARGET_SCHED_ISSUE_RATE zip_sched_issue_rate + +/* 17.18 Dividing the Output into Sections */ + +/* Switch to the text or data segment. */ +#define TEXT_SECTION_ASM_OP "\t.text" +#define DATA_SECTION_ASM_OP "\t.data" + +// #undef TARGET_LIBGCC_SDATA_SECTION +// #define TARGET_LIBGCC_SDATA_SECTION ".sdata" + + +/* 17.19 Position Independent Code */ + +#define PIC_OFFSET_TABLE_REGNUM zip_GOT +#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED 0 +// #define LEGITIMATE_PIC_OPERAND_P(X) should evaluate to X(GOT) only + +/* 17.20 Defining the Output Assembler Language */ + +/* 17.20.4 Output of Data */ + +#undef TARGET_ASM_ALIGNED_HI_OP +#undef TARGET_ASM_ALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_HI_OP "\t.byte\t" +#define TARGET_ASM_UNALIGNED_SI_OP "\t.byte\t" +/* These hooks (above) specify assembly directives for creating certain kinds + * of integer objects. The TARGET_ASM_BYTE_OP directive creates a byte-sized + * object. The TARGET_ASMALIGNED_HI_OP one creates an aligned two-byte object + * and so on. Any of the hookd may be NULL, indicating that no suitable + * directive is available. + * + * The compiler will print these strings at the start of a new line, followed + * immediately by the object's initial value. In most cases, the string should + * contain a tab, a pseudo op, and then another tab. + */ + +/* 17.20.4 Output and Generation of Labels */ + +/* ASM_OUTPUT_LABEL + * ... A default definition of this macro is provided which is correct for + * most systems. + */ + +/* ASM_OUTPUT_FUNCTION_LABEL + * ... if not defined, then the function name is defined in the usual manner + * as a label. + */ + +/* ASM_OUTPUT_INTERNAL_LABEL ... Identical to ASM_OUTPUT_LABEL, except that name + * is known to refer to a compiler-generated label. The default definition + * uses assemble_name_raw, which is like assemble_name except that it is more + * efficient. + */ + +/* SIZE_ASM_OP ... A C string containing the appropriate assembler directive + * to specify the size of a symbol, without any arguments. ON systems that + * use ELF, the dfault is "\t.size\t"; on other systems, the default is not to + * define this macro. + * + * Define this amcro only if it is correct to use the default definitions of + * ASM_OUTPUT_SIZE_DERECTIVE and ASM_OUTPUT_MEASURED_SIZE for your system. + * If you need your own custom definitions of those macros, or if you do not + * need explicit symbol sizes at all, do not define this macro. + */ + +/* ASM_OUTPUT_SIZE_DIRECTIVE + * ASM_OUTPUT_MEASURED_SIZE + */ + +/* NO_DOLLAR_IN_LABEL ... Define this macro if the assembler does not accept + * the character '$' in label names. By default constructors and destructors + * in G++ have "$" in the identifiers. If this label is defined, '.' is + * used instead. + */ + +/* NO_DOT_IN_LABEL ... Define this macro if the assembler does not accept the + * character '.' in label names. By default constructors and destructors in + * G++ have names that use '.'. If this macro is defined, these names are + * rewritten to avoid '.'. + */ + +/* TYPE_ASM_OP ... A C string containing the appropriate assembler directive to + * specify the type of a symbol, without any arguments. On systems that use + * ELF the default in config/elfos.h is "\t.type\t"; on other systems, the default is not to define this macro. + * + * Define this macro only if it is correct to use the default definition of + * ASM_OUTPUT_TYPE_DIRECTIVE forr your system. If you need your own custom + * definition of this macr, or if you do not need explicit symbol types at all, + * do not define this macro. + */ + +/* TYPE OPERAND_FMD ... A + */ + +/* ASM_OUTPUT_TYPE_DIRECTIVE + */ + +/* ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) ... + * if this macro is not defined, then the function name is defined in the usual + * manner as a label (by means of ASM_OUTPUT_FUNCTION_LABEL). + */ + +/* ASM_DECLARE_FUNCTION_SIZE + * ASM_DECLARE_COLD_FUNCTION_NAME + * ASM_DECLARE_COLD_FUNCTION_SIZE + * ASM_DECLARE_OBJECT_NAME + * TARGET_ASM_DECLARE_CONSTANT_NAME + */ +/* ASM_DECLARE_REGISTER_GLOBAL(STREAM, DECL, REGNO, NAME) ... A C statement + * (sans semicolon) to output to the stdio stream STREAM any text necessary for + * claiming a register REGNO for a global variable DECL with name NAME. + * + * If you don't defin this macro, that is equivalent to dfining it to do + * nothing. + */ + +/* ASM_FINISH_DECLARE_OBJECT + * TARGET_ASM_GLOBALIZE_LABEL + * TARGET_ASM_GLOBALIZE_DECL_NAME + * TARGET_ASM_ASSEMBLE_UNDEFINED_DECL + * ASM_WEAKEN_LABEL + * ASM_WEAKEN_DECL + * ASM_OUTPUT_WEAKREF + * SUPPORTS_WEAK + * TARGET_SUPPORTS_WEAK + * MAKE_DECL_ONE_ONLY + * SUPPORTS_ONE_ONLY + * TARGTE_ASM_ASSEMBLE_VISIBILITY + * TARGET_WEAK_NOT_IN_ARCHIVE_TOC + * ASM_OUTPUT_EXTERNAL + * TARGET_ASM_EXTERNAL_LIBCALL + * TARGET_ASM_MARK_DECLPRESERVED + * ASM_OUTPUT_LABELREF + * TARGET_MANGLE_ASSEMBLER_NAME + * ASM_OUTPUT_SYMBOL_REF + * ASM_OUTPUT_LABEL_REF + * TARGET_ASM_INTERNAL_LABEL + * ASM_OUTPUT_DEBUG_LABEL + * ASM_GENERATE_INTERNAL_LABEL + * ASM_FORMAT_PRIVATE_NAME + */ + +/* ASM_OUTPUT_DEF ... A C statement to output to the stdio stream STREAM + * assembler code which defines (equates) the symbol NAME to have the value + * VALUE. + * + * ZipCPU---So many other things that we need depend upon this, that we need + * to implement a non-default version. + */ +#define ASM_OUTPUT_DEF zip_asm_output_def + +/* ASM_OUTPUT_DEF_FROM_DECLS + * TARGET_DEFERRED_OUTPUT_DEFS + * ASM_OUTPUT_WEAK_ALIAS + * OBJ_GEN_METHOD_LABEL + */ + + +/* 17.20.7 Output of Assembler Instructions */ + +#define REGISTER_NAMES { "R0","R1","R2","R3","R4","R5","R6","R7","R8","R9", \ + "R10","R11","R12","SP","CC","PC" } + +/* REGISTER_PREFIX (Undefined by default) + * LOCAL_LABEL_PREFIX (Undefined by default) + * USER_LABEL_PREFIX defaults to "*" + * IMMEDIATE_PREFIX (Undefined by default) + * + * If defined, C string expressions to be used for the '%R', '%L', '%U', and + * '%I' options of asm_fprintf (see 'final.c'). These are useful when a single + * 'md' file must support multiple assembler formats. In that case, the various + * 'tm.h' files can define these macros differently. + */ +// #define USER_LABEL_PREFIX "*" + +/* Defining memory operand address formats is in this section. */ + +/* 17.20.10 Assembler Commands for Alignment */ + +/* JUMP_ALIGN(label) ... The alignment (log base 2) to put in front of label, + * which is a common destination of jumps and has no fallthru incoming + * edge. This macro need not be defined if you don't want any special alignment + * to be done at such a time. Most machine descriptions do not currently define + * this macro. + * + * ZipCPU---The assembler should automatically deal with label alignment, so + * let's not do anything about it here. + */ + +/* TARGET_ASM_JUMP_ALIGN_MAX_SKIP + */ + +/* LABEL_ALIGN_AFTER_BARRIER + * TARGET_ASM_LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP + */ + +/* LOOP_ALIGN(label) + * TARGET_ASM_LOOP_ALIGN_MAX_SKIP + * LABEL_ALIGN + * TARGET_ASM_LABEL_ALIGN_MAX_SKIP + */ + +/* ASM_OUTPUT_SKIP(STREAM, NBYTES) A C statement to output to the stdio + * stream an assembler instruction to advance the location counter by nbytes + * bytes. + */ + +/* TARGET_ASM_LABEL_ALIGN */ +/* Assembler Commands for Alignment */ +#define ASM_OUTPUT_ALIGN(STREAM,POWER) \ + { int pwr = POWER; fprintf(STREAM, "\t.p2align %d\n", (pwr<2)?2:pwr); } + + + +/* 17.21 Controlling Debugging Information Format */ +/* 17.22 Cross Compilation and Floating Point */ + +// REAL_VALUE_TYPE +// REAL_VALUES_EQUAL +// REAL_VALUES_LESS ... Tess whether x is less than y +/* REAL_VALUE_FIX ... Truncates x to an unsigned integer, rouding toward zero. + * If x is negative, returns zero. + */ +// REAL_VALUE_ATOF +// REAL_VALUE_NEGATIVE +// REAL_VALUE_ISINF +// REAL_VALUE_ISNAN +/* REAL_ARITHMETIC(OUT,CODE,X,Y) ... (Macro) Calculates an arithmetic operation + * on two floating point values X and Y, storing the result in OUT (which must + * be a variable). + * + * The operation to be performed is specified by CODE. Only the following + * codes are supported: PLUS_EXPR, MINUS_EXPR, MULT_EXPR, RDIV_EXPR, MAX_EXPR, + * MIN_EXPR. + * + * If REAL_ARITHMETIC is asked to evaluate division by zero and the target's + * floating point format cannot represent infinity, it will call abort(). + * Callers shoudl check for this situation first, using MODE_HAS_INFINITIES. + */ +/* REAL_VALUE_NEGATE(X) ... [Macro] Returns the negative of the floating point + * value X. + */ +/* REAL_VALUE_ABS(X) ... [Macro] Returns the absolute value of X. + */ +/* 17.23 Mode switching instructions */ +/* 17.24 Defining target-specific uses of __attribute__ */ +#undef TARGET_OPTION_OVERRIDE +#define TARGET_OPTION_OVERRIDE zip_override_options + +/* 17.25 Emulating TLS */ +/* 17.26 Defining coprocessor specifics for MIPS targets*/ + + // ZipCPU isn't MIPS. + +/* 17.27 Parameters for Precompiled Header Validity Checking */ +/* 17.28 C++ ABI parameters */ +/* 17.29 Adding support for named address spaces */ +/* 17.30 Miscellaneous Parameters */ + +/* HAS_LONG_COND_BRANCH ... Define this boolean macro to indicate whether or + * not your architecture has conditional branches that can span all of memory. + * It is used in conjunction with an optimization that partitions hot and + * cold basic blocks into separate sections of the executable. If this macro + * is set to false, gcc will convert any conditional branches that attempt to + * cross between sections into unconditional branches or indirect jumps. + * + * ZipCPU --- The assembler renders long unconditional branch code without + * problems, so we can pretend that such long branches exist. + */ +#define HAS_LONG_COND_BRANCH true + +/* HAS_LONG_UNCOND_BRANCH ... Define this boolean macro to indicate whether + * or not your architecture has unconditional branches that can span all of + * memory. (ZipCPU does ... via the LOD (PC),PC instruction.) It is used in + * conjunction with an optimization that partitions hot and cold basic blocks + * into separate sections of the executable. If this macro is set to false, + * gcc will convert any unconditional branches that attempt to cross between + * sections into indirect jumps. + * + * ZipCPU has the LOD (PC),PC instruction which can be used to implement a long + * jump. + */ +#define HAS_LONG_UNCOND_BRANCH true + +/* CASE_VECTOR_MODE ... An alias for a machine mode name. This is the machine + * mode that eleemnts of a jump-table should have. + * + */ +#define CASE_VECTOR_MODE SImode + +/* CASE_VECTOR_SHORTEN_MODE(MIN,MAX,BODY) ... Optional: return the preferred + * mode for an addr_diff_vec when the minimum and maximum offset are known. + * If you define this, it enables extra code in branch shortening to deal with + * addr_diff_vec. To make this work, you also have to define INSN_ALIGN and + * make the alignment for addr_diff_vec explicit. The body argument is provided so that the offset_unsigned and scale flags can be updated. + * + * ZipCPU---No advantage here. + */ + +/* CASE_VECTOR_PC_RELATIVE ... Define this exrpession to indicate when + * jump-tables should contain relative addresses. You need not define this + * macro if jump-tables never contain relative addresses, or jump-tables + * should contain relative addresses only when -fPIC or -FPIC is in effect. + * + * ZipCPU---No advantage in PC-Relative jump tables--except in PIC relative + * code. + */ + +/* TARGET_CASE_VALUES_THRESHOLD(VOID) ... This function returns the smallest + * number of different values for which it is best to use a jump-table instead + * of a tree of conditional branches. The default is four for machines with a + * casesi instruction and five otherwise. This is best for most machines. + * + * ZipCPU---Leave at the default. + */ + +/* WORD_REGISTER_OPERATIONS ... Define this macro to 1 if operations between + * registers with integral mode smaller than a word are always performed on the + * entire register. Most RISC machines have this property and most CISC + * machines do not. + * + * ZipCPU---We have the property, 'cause we're fairly risk. + */ +#undef WORD_REGISTER_OPERATIONS +#define WORD_REGISTER_OPERATIONS 1 + +/* LOAD_EXTEND_OP(MEMODE) ... Define this macro to be a C expression indicating + * when insns that read memory in MEMMODE, an integral mode narrower than a + * word, set the bits outside of MEMMODE to be either the sign extension or + * zero-extension of the data read. Return SIGN_EXTEND for values of MEMMODE + * for which the insn sign-extends, ZERO_EXTEND for which it zero-extends, and + * UNKNOWN for other modes. + * + * Do not define this macro if it would always return UNKNOWN. + * + * ZipCPU---This should be irrelevant, so we leave it undefined. + */ +#undef LOAD_EXTEND_OP +#define LOAD_EXTEND_OP(MEM) SIGN_EXTEND + +/* SHORT_IMMEDIATES_SIGN_EXTEND ... Define this macro to 1 if loading short immediate values into registers sign extends. + * + * ZipCPU---All immediates are sign extended, so yes. + */ +#undef SHORT_IMMEDIATES_SIGN_EXTEND +#define SHORT_IMMEDIATES_SIGN_EXTEND 1 + +/* TARGET_MIN_DIVISIONS_FOR_RECIP_MUL + */ + +/* MOVE_MAX ... The maximum number of bytes that a single instruction can move + * quickly between memory and registers or between two memory locations. + * + * ZipCPU --- Although we can move 32-bits at a time, and most people would call + * this 4-bytes, the compiler defines a byte as the minimum addressable unit. + * Therefore, this is defined to be one. + */ +#define MOVE_MAX 1 + +/* MAX_MOVE_MAX ... The maximum number of bytes that a single instruction can + * move quickly between memory and registers or between two memory ... + * + * ZipCPU --- this sounds just the same as MOVE_MAX, which is the default + * definition of this. + */ + +/* SHIFT_COUNT_TRUNCATED ... A C expression that is nonzero if on this machine + * the number of bits actually used for the count of a shift operation is equal + * to the number of bits needed to represent the size of the object being + * shifted. + * + * You need not define this macro if it would have the value of zero. + * + * ZipCPU---A shift of 33 (or more) in either direction will wipe out the + * value in the register, therefore this value should be zero, the default. + */ + +/* TARGET_SHIFT_TRUNCATION_MASK(MODE) ... This function describes how the + * standard shift patterns for MODE deal with shifts by negative amounts or by + * more than the width of the mode. + * + * ZipCPU---The default is zero, since we didn't define SHIFT_COUNT_TRUNCATED. + * This is the case for the ZipCPU as well. + */ + +/* TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) ... A C expression which is nonzero + * if on this machine it is safe to "convert" an integer of INPREC bits to one + * of OUTPREC bits (where OUTPREC is smaller than INPREC) by merely operating on + * it as if it had OUTPREC bist. On many machines, this expression can be 1. + * + * ZiPCPU ... If both values are 32-bit, what conversion takes place? If one is + * 64-bit and the other 32-bit ... I suppose it would then be safe. + */ +#undef TRULY_NOOP_TRUNCATION +#define TRULY_NOOP_TRUNCATION(O,I) 1 + +/* TARGET_MODE_REP_EXTENDED(MODE,REPMODE) ... The representation of an integral + * mode can be such that the values are always extended to a wider integral + * mode. Return SIGN_EXTEND if values of MODE are represented in sign-extended + * form to REPMODE. Return UNKNOWN otherwise. (Currently none of the targets + * use zero-extended. + * + */ +// #undef TARGET_MODE_REP_EXTENDED +// #define TARGET_MODE_REP_EXTENDED(R,M) SIGN_EXTEND + +/* STORE_FLAG_VALUE ... A C expression describing the value returned by a + * comparison operator with an integral mode and stored by a store-flag + * instruction (cstoremode4) when the condition is true. This description + * must apply to all the cstoremode4 patterns and all the comparison operators + * whose results have MODE_INT mode. + * + * ZipCPU---Doesn't really have a STORE_FLAG_VALUE instruction ... + */ + +/* FLOAT_STORE_FLAG_VALUE + * + * ZipCPU + */ + +/* VECTOR_STORE_FLAG_VALUE ... define this macro on machines that have vector + * comparison operations that return a vector result ... + * + * ZipCPU---Doesn't support vector operations. + */ + +/* CLZ_DEFINED_VALUE_AT_ZERO(MODE, VAL) + * CTZ_DEFINED_VALUE_AT_ZERO(MODE, VAL) + * + * A C expression that indicates whetther the architecture defines a value for + * clz or ctz with a zero operand. A result of 0 indicates the value is + * undefined. If the value is defined for only the RTL expression, the macro should evaluate to 1. If the value also applies to the corresponding optab + * entry, then the macro should evaluate to 2. In cases where the value is + * defined, value should be set to this value. + * If this macro is not defined, the value of clz or ctz at zero is assumed to + * be undefined. + * + * ZipCPU---Has neither clz nor ctz instructions, so we don't need this. + */ + +/* Pmode ... An alias for the machine mode for pointers. On most machines, + * define this to be the integer mode corresponding to the width of a + * hardware pointer. SImode on 32-bits machines, or DImode on 64-bit machines. + * On some machines you must define this to be one of the partial + * integer modes, such as PSImode. + */ +#undef Pmode +#define Pmode SImode + +/* FUNCTION_MODE ... An alais for the machine mode used for memory references to + * function being called, in call RTL expressions. On most CISC machines, where + * an instruction can begin at any byte address, this should be QImode. On most + * RISC machines, where all instructions have fixed size and alignment, this + * should be a mode with the same size and alignment as the machine instruction + * words--typically SImode or HImode. + * + * ZipCPU---Definitely SImode, as with Pmode. (All words are 32-bits, including + * addresses on the ZipCPU. + */ +#undef FUNCTION_MODE +#define FUNCTION_MODE SImode + +/* STDC_0_IN_SYSTEM_HEADERS + */ + +/* TARGET_C_PREINCLUDE(V) ... Define this hook to return the name of a header + * file to be included at the start of all compilations, as if it had been + * included with #include <file>. If this hook returns NULL, or is not defined, + * or if the header is not found, or if the user specifies -ffreestanding or + * -nostdinc, no header is included. + * + * ZipCPU --- We don't have a standard library defined yet, so we'll leave this + * as NULL. + */ +#undef TARGET_C_PREINCLUDE +#define TARGET_C_PREINCLUDE NULL + +/* TARGET_CXX_IMPLICIT_EXTERN_C(CONST CHAR *) ... Define this hook to add target + * specific C++ implicit extern C functions. If this function returns true + * for the name of a file-scope function, that function implicitly gets extern + * "C" linkage rather than whatever linkage the declaration would normally have. + * An example of such function is WinMain on Win32 targets. + * + * ZipCPU---Not ready to deal with this yet. + */ + +/* NO_IMPLICIT_EXTERN_C ... Define this macro if the system header files + * support C++ as well as C. This macro inhibits the usual method of using + * system header files in C++, which is to pretend that the file's contents + * are enclosed in 'extern "C" {...}'. + * + * + * ZipCPU --- Don't have either C or C++ headers, so let's skip this for now. + * Eventually, though, I think ZipCPU and C++ would go very well together. + */ + +/* REGISTER_TARGET_PRAGMAS ... Define this macro if you want to implement any + * target specific pragmas. + * + * ZipCPU --- let's not. + */ + +/* HANDLE_PRAGMA_PACK_WITH_EXPANSION ... Define this macro if macros should be + * expanded in the arguments of #pragma pack(). + * + * ZipCPU ... why? + */ + +/* TARGET_DEFAULT_PACK_STRUCT ... If your target requires a struct packing + * default other than 0 (meaning the machine default), define this macro to + * the necessary value (in bytes). This must be a value that would also be + * valid to use with #pragma pack() (that is a small power of two. + */ + +/* DOLLARS_IN_IDENTIFIERS + * ZipCPU --- Default (not changing C) + */ + +/* INSN_SETS_ARE_DELAYED(INSN) ... Define this macro as a C expression that + * is nonzero if it is safe for the delay slot schedule to place instructions + * in the delay slot of INSN, even if they appear to use a resource set or + * clobbered in INSN. INSN is always a ... + * + * ZipCPU --- You need not define this macro if it would always return zero. + */ + +/* INSN_REFERENCES_ARE_DELAYED(INSN) ... Define this macro as a C expression + * that is nonzero if it is safe for the delay slot schedule to place + * instructions in the delay slot of INSN, even if they appear to set or clobber + * a resource referenced in INSN. INSN is always a jump_insn or an insn. On + * machines where some insn or jump_insn is really a function call and ... + * + * ZipCPU --- You need not define this macro if it would always return zero. + */ + +/* MULTIPLE_SYMBOL_SPACES ... Define this macro as a C expression that is + * nonzero if, in some cases, global symbols from one translation unit may not + * be bound to undefined symbols in another translation unit without user + * intervention. For instance, under Microsoft Windows symbols must be + * explicitly imported from shared libraries (DLLs). + * + * ZipCPU---You need not define this macro if it would always evaluate to zero, + * so we won't. + */ + +/* TARGET_MD_ASM_ADJUST + */ +/* MATH_LIBRARY ... Define this macro as a C constant ... you only need to + * define this macro if the default of "m" is wrong. + * + * ZipCPU --- as we don't have a math library yet, building one such that "m" + * works doesn't sound like a problem. Let's not define this. + */ + +/* LIBRARY_PATH_ENV ... Define this as a C string constant for the environment + * variable that specifies where the linker should look for libraries. + * + * Just in case we want to add libraries for ZipCPU, let's place them in + * /usr/local/zip/lib, so as not to confuse them with our local systems + * libraries. + */ +#define LIBRARY_PATH_ENV "/usr/local/zip/lib" + +/* TARGET_POSIX_IO ... Define this macro if the target supports the following + * POSIX file fucntions: access, mkdir, and file locking with fcntl/F_SETLKW. + * + * ZipCPU does not. + */ + +/* MAX_CONDITIONAL_EXECUTE ... A C expression for the maximum number of + * instructions to execute via conditional execution instructions instead of a + * branch. A value of BRANCH_COST+1 is the default if the machine does not use + * cc0 and 1 if it does use cc0. + * + * ZipCPU---This sounds good enough for the ZipCPU as well--as long as we have + * BRANCH_COST defined. However, BRANCH_COST is defined as conditionally to + * something else, so let's keep looking into this. + */ + +/* IFCVT_MODIFY_TESTS(CEINFO,TRUE,FALSE) ... Used if the target needs to + * perform machine-dependent modifications on the conditionals used for turning + * basic blocks into conditionally executed code. CEINFO points to a data + * structure, struct ce_if_block, which contains information about the currently + * processed blocks. TRUE and FALSE are the tests that are used for + * converting the then-block and the else-block, respectively. Set either TRUE + * or FALSE to a null pointer if the tests cannot be converted. + * + * ZipCPU --- I need to set this to properly take advantage of our conditional + * execution and conditional testing capabilities. + */ +#define IFCVT_MODIFY_TESTS(CI,TR,FL) zip_ifcvt_modify_tests(CI,&TR,&FL) + +/* IFCVT_MODIFY_MULTIPLE_TESTS(CEINFO, BB, TRUE, FALSE) ... Like + * IFCVT_MODIFY_TESTS, but used when converting more complicated if-statements + * into conditions combined by and and or operations. BB contains the basic + * block that contains the test that is currently being processed and about to + * be turned into a condition. + * + * + * ZipCPU --- I need to set this to properly take advantage of our conditional + * execution and conditional testing capabilities. + */ +// #warning "Need to come back to this." +#define IFCVT_MODIFY_MULTIPLE_TESTS(CI,BB,TR,FL) TR=NULL_RTX + + +/* IFCVT_MODIFY_INSN(CEINFO, PATTERN, INSN) ... A C expression to modify the + * PATTERN of an INSN that is to be converted to conditional execution format. + * CEINFO points to a data structure, struct ce_if_block, which contains + * information about the currently processed blocks. + * + * + * ZipCPU --- I need to set this to properly take advantage of our conditional + * execution and conditional testing capabilities. + */ +#define IFCVT_MODIFY_INSN(CE,PATRN,INSN) zip_ifcvt_modify_insn(CE,PATRN,INSN) + + +/* IFCVT_MODIFY_FINAL(CEINFO) ... A C expression to perform any final + * machine dependent modifications in converting code to conditional + * execution. The involved basic blocks can be found in struct ce_if_block + * structure pointed to be CEINFO. + * + * + * ZipCPU --- I need to set this to properly take advantage of our conditional + * execution and conditional testing capabilities. + */ +// #warning "Need to come back to this." +#define IFCVT_MODIFY_FINAL(CEINFO) zip_ifcvt_modify_final(CEINFO) + + +/* IFCVT_MODIFY_CANCEL(CEINFO) ... A C expression to cancel any machine + * dependent modifications in converting code to conditional execution. The + * involved basic blocks can be found in the struct ce_if_block structure that + * is pointed to by CEINFO. + * + * + * ZipCPU --- I need to set this to properly take advantage of our conditional + * execution and conditional testing capabilities. + */ +// #warning "Need to come back to this." +#define IFCVT_MODIFY_CANCEL(CEINFO) zip_ifcvt_modify_cancel(CEINFO) + + +/* IFCVT_MACHDEP_INIT(CEINFO) ... A C expression to initialize any machine + * specific data for if-conversion of the if-block in the CEINFO block structure + * that is pointed by CEINFO. + * + * + * ZipCPU --- I need to set this to properly take advantage of our conditional + * execution and conditional testing capabilities. + */ +// #warning "Need to come back to this." +#define IFCVT_MACHDEP_INIT(CEINFO) zip_ifcvt_machdep_init(CEINFO) + + +/* TARGET_MACHINE_DEPENDENT_REORG(VOID) ... If non-null, this hook performs a + * target specific pass over the instruction stream. The compiler will run it + * at all optimization levels, just before the point at which it normally does + * delayed branch scheduling. + * + * You need not implement the hook if it has nothing to do. + * + * ZipCPU---This may be part of a later upgrade, but shouldn't be needed to + * just get us started. + */ + + +/* TARGET_INIT_BUILTINS(VOID) ... Define this hook if you ahve any machine + * specific builtin functions that need to be defined. It should be a function + * that performs the necessary setup. Machine specific builtin functions can be + * useful to expand special machine instructions that would otherwise not + * normally be generated because they have no equivalent in the source language. + * + * To create a built in function, call the function lang_hooks.builtin_function + * which is defined by the language front end. You can use any type nodes + * set up by build_common_tree_nodes; only language front ends that use those + * two functions will call "TARGET_INIT_BUILTINS". + * + * ZipCPU---We need to come back to this. We should have several built-ins + * defined: rtu(), wait(), halt(), save_context(cstackregno), and + * restore_context(cstackregno). + * + */ +#undef TARGET_INIT_BUILTINS +#define TARGET_INIT_BUILTINS zip_init_builtins + +/* TARGET_BUILTIN_DECL(CODE,INITP) ... Define this hook if you have any + * machine specific builtin functions that need to be defined. It should be a + * function that returns the builtin function declaration for the builtin + * function code code. If there is no such builtin and it cannot be initialized + * at this time if INITP is true the function should return NULL_TREE. If + * CODE is out of range the fucntion should return error-mark_node. + * + * ZipCPU ... needs to be done, don't know how to do it yet. + */ +#undef TARGET_BUILTIN_DECL +#define TARGET_BUILTIN_DECL zip_builtin_decl + + +/* TARGET_EXPAND_BUILTIN(TREE,TGT,SUB,MODE,IGNORE) ... Expand a call to a + * machine specific built-in function that was set up by TARGET_INIT_BUILTINS. + * TREE is the expression for the function call; the result should go to + * TGT if that is convenient, and have mode MODE if that is convenient. SUB + * may be used as the target for computing one of EXP's operands. IGNORE is + * non-zero if the value is to be ignored. This function should return the + * result of the call to the built-in function. + * + * ZipCPU ... needs to do it, just to get our special intrinsic functions + */ +#define TARGET_EXPAND_BUILTIN zip_expand_builtin + + +/* TARGET_BUILTIN_CHKP_FUNCTION(FCODE) ... Allows the target to redefine + * builtin functions used by Pointer Bounds Checker for code instrumentation. + * + * ZipCPU --- not interested. + */ +/* TARGET_CHKP_BOUND_TYPE + * TARGET_CHKP_MAKE_BOUNDS_CONSTANT + * TARGET_CHKP_INITIALIZE_BOUNDS + * + * ZipCPU --- Same as last one. + */ + + +/* TARGET_RESOLVE_OVERLOADED_BUILTIN(LOC, FN, ARGS) ... Select a replacement + * for a machine specific built-in function that was set up by + * TARGET_INIT_BUILTINS. + * + * ZipCPU --- If I go to the trouble to create a builtin, why would I want + * to then overload it? + */ + +/* TARGET_FOLD_BUILTIN(FN,NARGS,ARGP,IGNORE) ... Fold a call to a machine + * specific built-in function that was set up by 'TARGET_INIT_BUILTINS' FN + * is the declaration of the built-in function. NARGS is the number of + * arguments passed to the function; the arguments themselves are pointed to by + * ARGP. The result is another tree, valid for both GIMPLE and GENERIC, + * containing as simplified expression for the call's result. If IGNORE is + * true the value will be ignored. + * + * ZipCPU --- You know, this and the previous couple sound like something + * whereby I might be able replace bit-reversal code with my bit reverse + * instruction. That would be very useful, but not necessary to get me + * started. + */ + +/* TARGET_GIMPLE_FOLD_BUILTIN + * TARGET_COMPARE_VERSION_PRIORITY + * TARGET_GET_FUNCTION_VERSIONS_DISPATCHER + * TARGET_GENERATE_VERSION_DISPATCHER_BODY + * TARGET_CAN_USE_DOLOOP_P + * TARGET_INVALID_WITHIN_DOOLOOP + * TARGET_LEGITIMATE_COMBINED_INSN + * TARGET_CAN_FOLLOW_JUMP + * TARGET_COMMUTATIVE_P + */ + +/* TARGET_ALLOCATE_INITIAL_VALUE(REGNO) ... When the initial value of a hard + * register has been copied in a pseudo register, it is often not necessary + * ... + */ +/* TARGET_UNSPEC_MAY_TRAP_P(RTX,FLAGS) ... This target hook returns nonzero in + * RTX, un unspec or unspec_volatile operation, might cause a trap. Targets + * can use this hook to enhance precision of analysis for unspec and + * unspec_volatile operations. You may call may_trap_p_1 to analyze inner + * elements of RTX in which case flags should be passed along. + */ + +/* TARGET_SET_CURRENT_FUNCTION(TREE) The compiler invokes this hook whenever + * it changes its current function context (CFUN). You can define this + * function if the back end needs to perform any initialization or reset + * actions on a per-function basis. For example, it may be used to implement + * function attributes that affect register usage or code generation patterns. + */ + +/* TARGET_OBJECT_SUFFIX ... Define this macro to be a C string representing the + * suffix for object files on your target machine. If you do not define this + * macro, GCC will use ".o" as the suffix for object files. + */ +#define TARGET_OBJECT_SUFFIX ".o" + +/* TARGET_EXECUTABLE_SUFFIX + */ +#define TARGET_EXECUTABLE_SUFFIX "" + +/* COLLECT_EXPORT_LIST ... If defined, collect2 will scan the individual object + * files specified on its command line and create an export list for the linker. + * Define this macro for systems like AIX, where the linker discards object + * files that are not referenced from main and uses export lists. + * + * ZipCPU --- shoudln't need this. + */ + +/* MODIFY_JNI_METHOD_CALL(MDECL) ... Define this macro to a C expression + * representing a variant of the method call mdecl, if Java Native Interface + * (JNI) methods must be invoked differently from other methods on your + * target. For example, on 32-bit MSWindows, JNI methods must be invoked + * using the stdcall calling convention and this macro is then ... + * + * ZipCPU----Don't need this. (yet) + */ + + +/* TARGET_CANNOT_MODIFY_JUMPS_P ... This target hook returns true past the + * point in which a new jump instructions could be created. On machines that + * require a register for every jump such as the SHmedia ISA of SH5, this point + * would typically be reload, so thiss target hook should be defined to a + * function such as: + * + * ZipCPU --- I don't get what this is for. + * Actually, in hind sight, ZipCPU needs this. Without this, the + * compiler will try to reorder basic blocks, shuffling logic around and so + * fortch, preventing our comparison optimizations from being used. By setting + * this function appropriately, we can prevent it from reversing conditions into + * conditions we don't support. + */ +#define TARGET_CANNOT_MODIFY_JUMPS_P zip_cannot_modify_jumps_p + +/* TARGET_BRANCH_TARGET_REGISTER_CLASS ... This target hook returns a register + * class for which branch target register optimizations should be applied. All + * registers in this class should be usable interchangably. After reload, + * registers in this class will be re-allocated and loads will be hoisted out of + * loops and be subjected to inter-block scheduling. + * + * ZipCPU---GENERAL_REGS, but this should be a default already ... + */ + + +/* TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED ... Branch target register + * optimization will by default exclude callee-saved registers that are not + * already live during the current function. If this target hook returns true, + * they will be included. The target code must then make sure that all target + * registers in the class returned by TARGET_BRANCH_REGISTER_CLASS that might + * be saved are saaved. + * + * ZipCPU--- + */ + + +/* TARGET_HAVE_CONDITIONAL_EXECUTION(VOID) ... This target hook returns true + * if the target supports conditional execution. This target hook is required + * only when the target has several different modes and they have different + * conditional execution capability, such as ARM. + * + * ZipCPU---Yes! All instructions may be conditionally executed (except the + * long version load immediate ...) + */ +#define TARGET_HAVE_CONDITIONAL_EXECUTION hook_bool_void_true + +/* TARGET_GEN_CCMP_FIRST(PREP,GEN,CODE,OP0,OP1) .. This function prepares to + * emit a comparison instruction for the first compare in a sequence of + * conditional comparisons. It returns an appropriate comparison with CC for + * passing to gen_ccmp_next or cbranch_optab. The instructions to prepare the + * compare are saved in prep_seq and the compare instructions are saved in + * gen_seq. They will be emitted when all the compares in the conditional + * comparison are generated without error. CODE is the rtx_code of the compare + * for op0 and op1. + * + * + * ZipCPU---??? + */ + +/* TARGET_GEN_CCMP_NEXT(PREP,GEN,PREV,CMP,OP0,OP1,BITCODE) ... This function + * prepares to emit a conditional comparison within a sequence of conditional + * comparisons. It returns an appropriate comparison with CC for passing to + * gen_ccmp_next or cbranch_optab. The insn to prepare the compare are saved + * in prep_seq and the compare instructions are saved in gen_seq. They will be + * emitted when all the compares in the conditional comparison are generated + * without error. The pPREV expression is the result of a prior call to either + * gen_ccmp_first or gen_ccmp_next. It may return NULL if the combination of + * PREV and this comparison is not supported, otherwise the result must be the + * appropriate for passing to gen_ccmp_next or cbranch_optab. CODE is the RTX + * code of the compare for op0 and op1. BITCODE is AND or IOR, which is the op + * on the compares. + * + * + * ZipCPU --- ??? + */ + +/* TARGET_LOOP_UNROLL_ADJUST(NUNROLL, LOOP) ... This target hook returns a new + * value for the number of times loop should be unrolled. The parameter NUNROLL + * is the number of times the loop is to be unrolled. The parameter loop is a + * pointer to the loop, which is going to be checked for unrolling. The target + * hook is required only when the target has special constraints like maximum number of memory accesses. + * + * + * ZipCPU -- ??? + */ + + +/* POWI_MAX_MULTS ... If defined, this macro is interpreted as a signed integer + * C expression that specifies the maximum number of floating point + * multiplications that should be emitted when expanding exponentiation by an + * integer constant inline. When this value is defined, exponentiation + * requiring more than this number of multiplications is implemented by calling + * the system library's pow, powf, or powl routines. The default value + places no upper bound on the multiplication count. + * + * ZipCPU---As we have no system library pow() routine (yet) ... we're not + * ready for this macro. + */ + + +/* TARGET_EXTRA_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should + * register any extra include files for the target. The parameter stdinc + * indicates if normal include files are present. The parameter SYSROOT is the + * system root directory. The parameter PFX is the prefix for the GCC + * directoiry. + * + * + * ZipCPU---None yet. + */ + +/* TARGET_EXTRA_PRE_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should + * register any extrra include files for the target before any standard headers. + * The parameter stdinc indicates if normal include files are present. + * + * ZipCPU --- None. + */ + +/* TARGET_OPTF(PATH) ... This target hook should register special include paths + * for the target. The parameter path is the integer to register. On Darwin + * systems, this is used for Framework includes, which have semantics that are + * different from -I. + * + * + * ZipCPU --- None. + */ + +/* TARGET_USE_LOCAL_THUNK_ALIAS_P(FN) ... This target macro returns if it is + * safe to use a local alias for a virtual function FN when constructing + * thunks, false otherwise. By default, the macro returns true for all + * functions, if a target supports aliases (i.e. defines ASM_OUTPUT_DEF), + * false otherwise. + * + * + * ZipCPU --- ??? + */ +// #warning "ASM_OUTPUT_DEF's definition has not been considered" + + +/* TARGET_FORMAT_TYPES ... If defined, this macro is the name of a global + * variable containing target-specific format checking information for the + * -Wformat option. The default is to have no target-specific format checks. + * + * ZipCPU --- Default + */ + +/* TARGET_N_FORMAT_TYPES + * + * ZipCPU --- Default + */ + +/* TARGET_OVERRIDES_FORMAT_ATTRIBUTES ... If defined, this macro is the name of + * a global variable containing target-specific format overrides for the + * -Wformat option. The default is to have no target specific format overrides. + * + * ZipCPU --- Default + */ + +/* TARGET_OVERRIDEES_FORMAT_ATTRIBUTES + * TARGET_OVERRIDEES_FORMAT_ATTRIBUTES_COUNT + * + * If defined, the (first) macro is the name of a global variable containing + * target-specific format overrides for the -Wformat option. + */ +/* TARGET_OVERRIDES_FORMAT_INIT ... If defined, this macro specifies the + * optional initialization routine for target specific customizations of the +* system printf and scanf formatter settings. + */ + +/* TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN(TLIST,FN,VAL) ... If defined, this + * macro returns the diagnostic message when it is illegal to pass an argument + * VAL to function FN with prototype TLIST. + * + * ZipCPU---Default. + */ + +/* TARGET_INVALID_CONVERSION + * TARGET_INVALID_UNARY_OP + * TARGET_INVALID_BINARY_OP + * TARGET_INVALID_PARAMETER_TYPE + * TARGET_INVALID_RETURN_TYPE + * TARGET_PROMOTED_TYPE + * TARGET_CONVERT_TO_TYPE + * TARGET_USE_JCR_SECTION_TYPE + * OBJC_JBLEN + * LIBGCC2_UNWIND_ATTRIBUTE + * TARGET_UPDATE_STACK_BOUNDARY + * TARGET_GET_DRAP_RTX + * TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS + */ +/* TARGET_CONST_ANCHOR ... On some architectures it can take multiple + * instructions to synthesize a constant. If there is another constant already + * in a register that is close enough in value then it is preferable that the + * new constant is computed from the register using immediate addition or + * subtraction. We accomplish this through CSE. Besides the value of the + * constant we also add a lower and an upper constant anchor to the available + * expressions. These are then queried when encountering new constants. The + * anchors are computed by rounding the constant up and down to a multiple of + * the value of TARGET_CONST_ANCHOR. TARGET_CONST_ANCHOR should be the maximum + * positive value accepted by immediate-add plus one. We currently assume that + * the value of TARGET_CONST_ANCHOR is a poewr of 2. For example, on MIPS, + * where add-immediate takes a 16-bit signed value, TARGET_CONST_ANCHOR is set + * to 0x8000. The default value is zero, which disables this optimization. + * + * ZipCPU---One of the great strengths of the ZipCPU ISA is its ability to + * access registers plus immediates. To use this, we *need* this capability. + * So, we define it here. (to 0x20000, or 2^17 because we can handle 18-bits of + * signed immediate offsets) + * + * On ZipCPU---2^17 + */ +#define TARGET_CONST_ANCHOR zip_const_anchor + +/* TARGET_ASAN_SHADOW_OFFSET ... Return the offset bitwise ored into shifted + * address to get corresponding Address Sanitizer shadow memory address. NULL + * if address Sanitizer is not supported by the target. + */ +#define TARGET_ASAN_SHADOW_OFFSET NULL + +/* TARGET_MEMMODEL_CHECK + */ +/* TARGET_ATOMIC_TEST_AND_SET_TRUEVAL ... This value should be set if the result + * written by atomic test and set is not exactly 1, i.e. the bool true. + */ +/* TARGET_HAS_IFUNC_P ... It returns true if the target supports GNU indirect + * functions. The support includes the assembler, linker, and dynamic linker. + * The default value of this hook is based on target's libc. + */ +#define TARGET_HAS_IFUNC_P hook_bool_void_true + +/* TARGET_ATOMIC_ALIGN_FOR_MODE(MODE) ... If defined, this function returns + * an appropriate alignment in bits for an atomic object of machine mode + * MODE. If 0 is returned then the default alignment for the specified mode + * is used. + * + * ZipCPU---Both default and 2 would be valid. We'll stick to the default. + */ + +/* TARGET_ATOMIC_ASSIGN_EXPAND_FENV --- ISO C11 requires atomic compound + * assignments that may raise floating-point exceptions to raise exceptions + * corresponding to the arithmetic operation whose result was successfully + * stored in a compare-and-exchange sequence. This requires code equivalent to + * calls to feholdexcept, feclearexcept and feupdateenv to be generated at + * appropriate points in the compare-and-exchange sequence. This hook should + * set *hold to an expression equivalent + * + * ZipCPU --- ??? + */ + +/* TARGET_RECORD_OFFLOAD_SYMBOL ... Used when offloaded functions are seen in + * the compilation unit and no named sections are available. It is called once + * for each symbol that must be recorded in the offload function and variable + * table. + * + * ZipCPU --- Offloaded functions? + */ + +/* TARGET_OFFLOAD_OPTIONS + * + * ZipCPU---none defined + */ + +/* TARGET_SUPPORTS_WIDE_INT ... On older ports, large integers are stored + * in CONST_DOUBLE rtl objects. Newer ports define TARGET_SUPPORTS_WIDE_INT + * to be nonzero to indicate that large integers are stored in CONST_WIDE_INT + * rtl objects. The CONST_WIDE_INT allows very large integer constants to be + * represented. CONST_DOUBLE is limited to twice the size of the hosts + * HOST_WIDE_INT representation. + * + * ZipCPU---We don't need these yet, so this isn't yet relevant. (These ints + * are wider than DImode ...) + */ +#define TARGET_SUPPORTS_WIDE_INT 0 + + +/* Now, for the prototype functions ...*/ +// These have been moved to zip-protos.h + +// extern void zip_init_builtins(void); +// extern void zip_asm_output_anchor(rtx x); +// extern bool zip_legitimate_address_p(enum machine_mode mode, rtx x, bool string); +// extern void zip_asm_trampoline_template(FILE *); +// extern void zip_initial_elimination_offset(int from, int to); +// extern void zip_print_operand(FILE *stream, rtx *x, int code); +// extern void zip_print_operand_address(FILE *stream, rtx *x); +// extern void zip_asm_output_def(FILE *s, const char *n, const char *v); +// extern void zip_update_cc_notice(rtx exp, rtx_insn *insn); +// extern int zip_address_operand(rtx op); +// extern int zip_const_address_operand(rtx op); +// extern void zip_expand_prologue(void); +// extern void zip_expand_epilogue(void); +// extern bool zip_gen_move_rtl(rtx, rtx); +// extern bool zip_load_address_lod(rtx, rtx); +// extern bool zip_load_address_sto(rtx, rtx); +// extern void zip_print_operand(FILE *fp, rtx x, int code); +// extern void zip_print_operand_address(FILE *fp, rtx x); +// extern bool zip_use_return_insn(void); + +#define UQQmode USQmode +#define UHQmode USQmode +#define UHAmode USAmode +#define QQmode SQmode +#define HQmode SQmode +#define QImode SImode +#define HImode SImode +#define QAmode SAmode +#define HAmode SAmode + +#include "insn-modes.h" +#include "zip-protos.h" + +#endif /* GCC_ZIP_H */ + diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/zip/zip.md gcc-5.3.0-zip/gcc/config/zip/zip.md --- gcc-5.3.0-original/gcc/config/zip/zip.md 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/config/zip/zip.md 2016-05-12 15:59:38.583777436 -0400 @@ -0,0 +1,3238 @@ +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Filename: zip.md +;; +;; Project: Zip CPU -- a small, lightweight, RISC CPU soft core +;; +;; Purpose: This is the machine description of the Zip CPU as needed by the +;; GNU compiler collection (GCC). +;; +;; +;; Creator: Dan Gisselquist, Ph.D. +;; Gisselquist Technology, LLC +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Copyright (C) 2015, Gisselquist Technology, LLC +;; +;; This program is free software (firmware): you can redistribute it and/or +;; modify it under the terms of the GNU General Public License as published +;; by the Free Software Foundation, either version 3 of the License, or (at +;; your option) any later version. +;; +;; This program is distributed in the hope that it will be useful, but WITHOUT +;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or +;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +;; for more details. +;; +;; License: GPL, v3, as defined and found on www.gnu.org, +;; http://www.gnu.org/licenses/gpl.html +;; +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; +;; - Immediate integer operand constraints +;; 'I' -2^22 ... 2^22-1, or -4194304 .. 4194303 (LDI insn) +;; 'M' -2^12 ... 2^12-1, or -4096 ... 4095 (MOV offset) +;; 'N' -2^14 ... 2^14-1, or -16384 ... 16383 (OpB offset) +;; 'O' -2^17 ... 2^17-1, or -131072 ... 131071 (OpB Immediate) +;; 'R' 0...31 (Shift value) +;; - Memory constraints +;; "Q" Op-B capable references to memory +;; "S" References to constant memory +;; - Address constraints +;; "U" Op-B capable address that references to memory +;; "T" Constant memory addresses +(define_constraint "M" + "An 13-bit signed immediate such as a MOV instruction can handle" + (and (match_code "const_int") + (match_test "(ival < 0x1000) && (ival >= -0x1000)"))) +(define_constraint "N" + "An 14-bit signed immediate offset such as an Op-B register offset" + (and (match_code "const_int") + (match_test "(ival < 0x2000) && (ival >= -0x2000)"))) +(define_constraint "O" + "An 18-bit signed immediate such as an Op-B Immediate can handle" + (and (match_code "const_int") + (match_test "(ival < 0x20000) && (ival >= -0x20000)"))) +(define_constraint "R" + "Bits that a value may be shifted" + (and (match_code "const_int") + (match_test "(ival < 32) && (ival >= 0)"))) +;; +; +; +; Our builtin functions, by identifier +; +(define_constants + [(UNSPEC_RTU 1) + (UNSPEC_HALT 2) + (UNSPEC_IDLE 3) + (UNSPEC_SYSCALL 4) + (UNSPEC_SAVE_CONTEXT 5) + (UNSPEC_RESTORE_CONTEXT 6) + (UNSPEC_BITREV 7) + (UNSPEC_GETUCC 8) + (UNSPEC_GETCC 9) + (UNSPEC_LDILO 10) + ; (UNSPEC_RAW_CALL 11) + ]) +; +; +; Registers by name +(define_constants + [(RTN_REG 0) ; Return address register + (RTNV_REG 1) ; Subroutine return value register + (AP_REG 10) ; Hopefully never used + (GBL_REG 11) ; Hopefully never used, but just in case ... + (FP_REG 12) + (SP_REG 13) + (CC_REG 14) + (PC_REG 15) + ]) +; +; +; + +;; Predicates +(define_predicate "zip_const_address_operand_p" + (match_code "symbol_ref,const,label_ref,code_label") +{ + return zip_const_address_operand(op); +}) + +(define_predicate "zip_address_operand_p" + (match_code "reg,plus") +{ + return zip_pd_opb_operand(op); +}) + +(define_predicate "zip_opb_operand_p" + (match_code "reg,plus,const_int,subreg") +{ + return zip_pd_opb_operand(op); +}) + +(define_predicate "zip_opb_immv_p" + (match_code "const_int") +{ + return (INTVAL(op)<((1<<13)-1))&&(INTVAL(op)>=-((1<<13))); +}) + +(define_predicate "zip_opb_single_operand_p" + (match_code "reg,subreg,const_int") +{ + return zip_pd_opb_operand(op); +}) + +(define_predicate "zip_mov_operand_p" + (match_code "reg,plus") +{ + return zip_pd_mov_operand(op); +}) + +(define_predicate "zip_memory_operand_p" + (match_code "mem") +{ + return zip_pd_opb_operand(XEXP(op,0)); +}) + +(define_predicate "zip_imm_operand_p" + (match_code "const_int") +{ + return zip_pd_imm_operand(op); +}) + +(define_predicate "zip_mvimm_operand_p" + (match_code "const_int") +{ + return zip_pd_mvimm_operand(op); +}) + +(define_predicate "zip_movdst_operand_p" + (match_code "mem,reg,subreg") +{ + if (MEM_P(op)) // Check for valid store address + return zip_pd_opb_operand(XEXP(op,0)); + else if (SUBREG_P(op)) + return 1; + else if ((REG_P(op))||(SUBREG_P(op))) + return register_operand(op, GET_MODE(op)); + return 1; +}) + +(define_predicate "zip_movsrc_operand_p" + (match_code "mem,reg,subreg,const_int,const,symbol_ref,label_ref,code_label") +{ + if (MEM_P(op)) + return zip_pd_opb_operand(XEXP(op,0)); + else if (GET_CODE(op)==PLUS) + return zip_pd_opb_operand(op); + else if (SUBREG_P(op)) { + //; As far as predicates are concerned, subregs must be valid. + //; The details of them are settled within the constraints. + return 1; + } else if ((REG_P(op))||(SUBREG_P(op))) + return register_operand(op,SImode); + else if (CONST_INT_P(op)) + return 1; + return 1; +}) + +;; Constraints +; +(define_memory_constraint "S" + "Any memory referenced by a constant address, possibly unknown at compile time" + (and (match_code "mem") + (match_test "zip_ct_const_address_operand(XEXP(op,0))"))) +(define_memory_constraint "Q" + "Any memory addressed suitably for a load or store instruction" + (and (match_code "mem") + (match_test "zip_ct_address_operand(XEXP(op,0))"))) +(define_address_constraint "U" + "An address suitable for a load or store instruction" + (and (match_code "reg,plus") + (match_test "zip_ct_address_operand(op)"))) +(define_address_constraint "T" + "Any constant address, to include those made by symbols unknown at compile time" + (and (match_code "label_ref,code_label,symbol_ref,const") + (match_test "zip_ct_const_address_operand(op)"))) +; +; +;; Attributes +; +(define_attr "predicable" "no,yes" (const_string "yes")) +(define_attr "conditional" "no,yes" (const_string "no")) +(define_attr "ccresult" "set,unknown,unchanged,validzn" (const_string "set")) +; +; Mode attributes +; (define_mode_iterator ZI [QI HI SI]) +; (define_mode_attr zipa [(QI "") (HI "") (SI "")]) +(define_mode_iterator ZI [SI]) +(define_mode_attr zipa [(SI "")]) +; +; +; +;; Instructions +; +; (define_insn +; optional name +; RTL template -- a vector of incomplete RTL expressions describing the +; semantics of the instruction. It is incomplete because it may +; contain match_operand, match_operator, and match_dup expressions +; The condition --- contains a C expression, may be an empty string +; output template or output statement--fragment of C code returning a str +; Attributes -- +; ) +; +; (match_operand:m n predicate constraint) +; Placeholder for operand #n of the instruction +; Predicate string that is the name of a fucntion w/ 2 arguments: +; (expression, machine mode) +; we can build functions: +; "isregister" to describe a register +; "isimmediate" to describe an immediate +; "offsetreg" to describe a register plus offset +; "anyregister" to describe *ANY* register (uRx or Rx) +; But ... functions "address_operand", "immediate_operand", +; "register_operand", "indirect_operand" +; "comparison_operatot" and "ordered_comparison_operator" +; are also available--be aware, they include more comparisons +; than Zip CPU can do. +; +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Move instructions: both +; (arbitrary) from variables to variables, but this gets +; expanded into: +; from registers to registers +; from immediates to registers +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +(define_expand "mov<mode>" + [(set (match_operand:ZI 0 "nonimmediate_operand" "") + (match_operand:ZI 1 "general_operand" ""))] + "" + {//; Everything except mem=const or mem=mem can be done easily + //; extern void zip_debug_rtx_pfx(const char *, const_rtx); + //; fprintf(stderr, "ZIP-GEN-MOVE\n"); + //; zip_debug_rtx_pfx("FROM: ", operands[1]); + //; zip_debug_rtx_pfx("TO : ", operands[0]); + + //; Need to load into a register between memory slots + if ((MEM_P(operands[0]))&&(MEM_P(operands[1]))) { + //; fprintf(stderr, "GEN-MOVSI: MEM -> MEM\n"); + if (can_create_pseudo_p()) { + rtx tmp = gen_reg_rtx(GET_MODE(operands[1])); + emit_insn(gen_movsi(tmp,operands[1])); + operands[1] = tmp; + } + } + + //; Op[0] has a bad address, need to legitimize it + if ((MEM_P(operands[0]))&& + ((zip_const_address_operand(XEXP(operands[0],0))) + ||(!zip_pd_opb_operand(XEXP(operands[0],0)))) + ) + { + //; fprintf(stderr, "GEN-MOVSI: Not to a MEM(REG)\n"); + if (can_create_pseudo_p()) { + rtx tmp = gen_reg_rtx(Pmode); + //; Load the address into a register + emit_insn(gen_movsi(tmp,XEXP(operands[0],0))); + XEXP(operands[0],0) = tmp; + mark_reg_pointer(tmp,1); + } + } + //; Op[1] is a constant. Need to load into a register before we can + //; place it into memory. + if ((MEM_P(operands[0]))&& + ((CONSTANT_P(operands[1])) + ||(CONST_INT_P(operands[1])))) { + //; fprintf(stderr, "GEN-MOVSI: CONST -> MEM\n"); + //; zip_debug_rtx_pfx("MEM : ", operands[0]); + //; zip_debug_rtx_pfx("CONST: ", operands[1]); + if (can_create_pseudo_p()) { + rtx tmp = gen_reg_rtx(GET_MODE(operands[0])); + emit_insn(gen_movsi(tmp,operands[1])); + operands[1] = tmp; + } + } + //; Op[1] has a bad address, need to legitimize it + if ((MEM_P(operands[1]))&& + //; (!REG_P(XEXP(operands[1],0))) + ((zip_const_address_operand(XEXP(operands[1],0))) + ||(!zip_pd_opb_operand(XEXP(operands[1],0))))) { + //; fprintf(stderr, "GEN-MOVSI: Not from a MEM(REG)\n"); + if (can_create_pseudo_p()) { + rtx tmp = gen_reg_rtx(Pmode); + emit_insn(gen_movsi(tmp,XEXP(operands[1],0))); + XEXP(operands[1],0) = tmp; + } else if (REG_P(operands[0])) { //; Can we steal Op[0]'s reg? + rtx tmp = operands[0]; + emit_insn(gen_movsi(tmp,XEXP(operands[1],0))); + XEXP(operands[1],0) = tmp; + } + } + } + [(set_attr "ccresult" "unchanged")]) +(define_insn "movsi_raw" + [(set (match_operand:SI 0 "zip_movdst_operand_p" "=r,Q,r,r") + (match_operand:SI 1 "zip_movsrc_operand_p" "r,r,Q,i"))] + "(register_operand(operands[0],SImode))||(register_operand(operands[1],SImode))" + "@ + MOV\t%1,%0 + STO\t%1,%0 + LOD\t%1,%0 + LDI\t%1,%0" + [(set_attr "ccresult" "unchanged")]) +(define_insn "mov<mode>_reg" ; Register to register move + [(set (match_operand:ZI 0 "register_operand" "=r") + (match_operand:ZI 1 "register_operand" "r"))] + "" + "MOV %1,%0" + [(set_attr "ccresult" "unchanged")]) +(define_insn "mov<mode>_reg_off" ; Register to register move, used by prologue + [(set (match_operand:ZI 0 "register_operand" "=r") + (plus:ZI (match_operand:ZI 1 "register_operand" "r") + (match_operand:ZI 2 "zip_mvimm_operand_p" "M"))) + ] + "" + "MOV %2(%1),%0" + [(set_attr "ccresult" "unchanged")]) +;(define_insn "mov<mode>_lod" ; Load from memory +; [(set (match_operand:ZI 0 "register_operand" "=r") +; (match_operand:ZI 1 "zip_memory_operand_p" "Q"))] +; "" +; "LOD %1,%0" +; [(set_attr "ccresult" "unchanged")]) +;(define_insn "mov<mode>_sto" ; Store into memory +; [(set (match_operand:ZI 0 "zip_memory_operand_p" "=Q") +; (match_operand:ZI 1 "register_operand" "r"))] +; "" +; "STO %1,%0" +; [(set_attr "ccresult" "unchanged")]) +(define_expand "mov<mode>_lod" ; Load from memory + [(set (match_operand:ZI 0 "register_operand" "=r") + (match_operand:ZI 1 "zip_memory_operand_p" "Q"))] + "") +(define_insn "*movsi_lod" + [(set (match_operand:SI 0 "register_operand" "=r") + (match_operand:SI 1 "zip_memory_operand_p" ""))] + "" + "LOD\t%1,%0" + [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")]) +(define_insn "movsi_lod_off" ; used by epilogue code + [(set (match_operand:SI 0 "register_operand" "=r") + (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "r") + (match_operand:SI 2 "const_int_operand" "N"))))] + "" + "LOD\t%2(%1),%0" + [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")]) +(define_expand "mov<mode>_sto" ; Store into memory + [(set (match_operand:ZI 0 "zip_memory_operand_p" "=Q") + (match_operand:ZI 1 "register_operand" "r"))] + "") +(define_insn "*movsi_sto" + [(set (match_operand:SI 0 "zip_memory_operand_p" "=Q") + (match_operand:SI 1 "register_operand" "r"))] + "" + "STO\t%1,%0" + [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")]) +(define_insn "movsi_sto_off" ; used by prologue code + [(set (mem:SI (plus:SI + (match_operand:SI 0 "register_operand" "r") + (match_operand:SI 1 "const_int_operand" "N"))) + (match_operand:SI 2 "register_operand" "r"))] + "" + "STO\t%2,%1(%0)" + [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")]) +(define_insn "mov<mode>_ldi" ; Load immediate + [(set (match_operand:ZI 0 "register_operand" "=r") + (match_operand:ZI 1 "immediate_operand" "ipU"))] + "" + "LDI %1,%0" + [(set_attr "ccresult" "unchanged")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Load and store multiple values +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; So far, from the code I've seen from GCC's output, +; these instructions do not appear to be necessary. +; +;(define_insn "load_multiple" +; for(a=0; a<%2; a++) +; LOD a(%1),%0+a +;(define_insn "store_multiple" +; for(a=0; a<%2; a++) +; STO %0+a,a(%1) +; pushsi -- Do not define, compiler will work around it nicely w/o our help +; +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Substitution Pattern +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +(define_subst "cc_substitution" + ; The pattern may not have any match_dup expressions. + [(set (match_operand:SI 0 "" "") (match_operand:SI 1 "" "")) + (clobber (reg:CC CC_REG))] + "" + [(set (match_dup 0) (match_dup 1)) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0))) + ]) +; +(define_subst_attr "cc_subst" "cc_substitution" "_raw" "_clobber") +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; General arithmetic instructions +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +; +(define_expand "add<mode>3" ; Fastest/best instruction always goes first + [(set (match_operand:ZI 0 "register_operand" "=r") + (plus:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_operand_p" ""))) + ]) +(define_insn_and_split "add<mode>3_split_reg" + [(set (match_operand:ZI 0 "register_operand" "=r") + (plus:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO")))] + "" + "#" ; This code means the instruction *must* be split + "(reload_completed)&&(REG_P(operands[0]))&&(REG_P(operands[1]))&&(REGNO(operands[0])==REGNO(operands[1]))" + [(parallel [(set (match_dup 0) (plus:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "predicable" "yes")]) +(define_insn_and_split "add<mode>3_split_off" + [(set (match_operand:ZI 0 "register_operand" "=r") + (plus:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N"))))] + "" + "#" ; This code means the instruction *must* be split + "(reload_completed)&&(REG_P(operands[0]))&&(REG_P(operands[1]))&&(REGNO(operands[0])==REGNO(operands[1]))" + [(parallel [(set (match_dup 0) (plus:ZI (match_dup 1) + (plus:ZI (match_dup 2) (match_dup 3)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "predicable" "yes")]) +(define_insn "addsi3_reg_clobber" + [(set (match_operand:SI 0 "register_operand" "=r") + (plus:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (clobber (reg:CC CC_REG))] + "" + "ADD %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "addsi3_reg_raw" + [(set (match_operand:SI 0 "register_operand" "=r") + (plus:SI (match_operand:SI 1 "register_operand" "0") + (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "ADD %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "add<mode>3_off_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (plus:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "ADD %3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "add<mode>3_off_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (plus:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (clobber (reg:CC CC_REG))] + "" + "ADD %3+%2,%0" + [(set_attr "ccresult" "set")]) +; +; +; +(define_expand "sub<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (minus:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_operand_p" "")))]) +(define_insn_and_split "sub<mode>3_split_reg" + [(set (match_operand:ZI 0 "register_operand" "=r") + (minus:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (minus:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set") (set_attr "predicable" "yes")]) +(define_insn "sub<mode>3_reg_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (minus:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "SUB %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "sub<mode>3_reg_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (minus:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (clobber (reg:CC CC_REG))] + "" + "SUB %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn_and_split "sub<mode>3_off_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (minus:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "%r") + (match_operand:ZI 3 "zip_opb_immv_p" "N"))))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (minus:ZI (match_dup 1) + (plus:ZI (match_dup 2) (match_dup 3)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "sub<mode>3_off_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (minus:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "%r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "SUB %3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "sub<mode>3_off_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (minus:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "%r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (clobber (reg:CC CC_REG))] + "" + "SUB %3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "mul<mode>3_oldstyle" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "%r") + (match_operand:ZI 2 "register_operand" "r"))) + (clobber (match_scratch:ZI 3 "=r")) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + ; "(R0 != R1)&&(R0 != R2)&&(R0!=R3)&&(R1!=R2)&&(R1=R3)&&(R2!=R3)" + "(!ZIP_LONGMPY)" + "MOV %1,%0 + MPYS %2,%0 + MOV %1,%3 + ROL 16,%3 + MPYS %2,%3 + ROL 16,%3 + AND 0x0ffff,%3 + ADD %3,%0 + MOV %2,%3 + ROL 16,%3 + MPYS %1,%3 + ROL 16,%3 + AND 0x0ffff,%3 + ADD %3,%0" + [(set_attr "ccresult" "unknown")]) +; +; +(define_expand "mul<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_operand_p" "")))] + "(ZIP_LONGMPY)") +(define_insn_and_split "mul<mode>3_split_reg" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO")))] + "(ZIP_LONGMPY)" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (mult:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn_and_split "mul<mode>3_split_off" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N"))))] + "(ZIP_LONGMPY)" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (mult:ZI (match_dup 1) + (plus:ZI (match_dup 2) (match_dup 3)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "mul<mode>3_reg_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (clobber (reg:CC CC_REG))] + "(ZIP_LONGMPY)" + "MPY\t%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "mul<mode>3_reg_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_LONGMPY)" + "MPY\t%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "mul<mode>3_off_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_LONGMPY)" + "MPY\t%3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "mul<mode>3_off_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (mult:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (clobber (reg:CC CC_REG))] + "(ZIP_LONGMPY)" + "MPY\t%3+%2,%0" + [(set_attr "ccresult" "set")]) +; +; +(define_expand "smulsi3_highpart" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI (mult:DI + (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (sign_extend:DI (match_operand:SI 2 "zip_opb_operand_p" ""))) + (const_int 32))))] + "(ZIP_LONGMPY)") +(define_insn_and_split "smulsi3_highpart_split_reg" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI (mult:DI + (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (sign_extend:DI (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (const_int 32))))] + "(ZIP_LONGMPY)" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) + (truncate:SI (ashiftrt:DI + (mult:DI + (sign_extend:DI (match_dup 1)) + (sign_extend:DI (match_dup 2))) + (const_int 32)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn_and_split "smulsi3_highpart_split_off" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashift:DI (mult:DI + (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (sign_extend:DI + (plus:SI (match_operand:SI 2 "register_operand" "r") + (match_operand:SI 3 "zip_opb_immv_p" "N")))) + (const_int 32))))] + "(ZIP_LONGMPY)" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) + (truncate:SI (ashiftrt:DI + (mult:SI + (sign_extend:DI (match_dup 1)) + (sign_extend:DI + (plus:SI (match_dup 2) (match_dup 3)))) + (const_int 32)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "smulsi3_highpart_reg_clobber" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:SI + (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (sign_extend:DI (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (const_int 32)))) + (clobber (reg:CC CC_REG))] + "(ZIP_LONGMPY)" + "MPYSHI\t%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "smulsi3_highpart_reg_raw" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:SI + (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (sign_extend:DI (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (const_int 32)))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_LONGMPY)" + "MPYSHI\t%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "smulsi3_highpart_off_raw" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:SI + (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (sign_extend:DI (plus:SI + (match_operand:SI 2 "register_operand" "r") + (match_operand:SI 3 "zip_opb_immv_p" "N")))) + (const_int 32)))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_LONGMPY)" + "MPYSHI\t%3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "smulsi3_highpart_off_clobber" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:SI + (sign_extend:DI (match_operand:SI 1 "register_operand" "0")) + (sign_extend:DI (plus:SI + (match_operand:SI 2 "register_operand" "r") + (match_operand:SI 3 "zip_opb_immv_p" "N")))) + (const_int 32)))) + (clobber (reg:CC CC_REG))] + "(ZIP_LONGMPY)" + "MPYSHI\t%3+%2,%0" + [(set_attr "ccresult" "set")]) +; +; +(define_expand "umulsi3_highpart" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI (mult:DI + (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (zero_extend:DI (match_operand:SI 2 "zip_opb_operand_p" ""))) + (const_int 32))))] + "(ZIP_LONGMPY)") +(define_insn_and_split "umulsi3_highpart_split_reg" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI (mult:DI + (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (zero_extend:DI (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (const_int 32))))] + "(ZIP_LONGMPY)" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) + (truncate:SI (ashiftrt:DI + (mult:SI + (zero_extend:DI (match_dup 1)) + (zero_extend:DI (match_dup 2))) + (const_int 32)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn_and_split "umulsi3_highpart_split_off" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:DI + (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (zero_extend:DI + (plus:SI (match_operand:SI 2 "register_operand" "r") + (match_operand:SI 3 "zip_opb_immv_p" "N")))) + (const_int 32))))] + "(ZIP_LONGMPY)" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) + (truncate:SI (ashiftrt:DI + (mult:DI + (zero_extend:DI (match_dup 1)) + (zero_extend:DI + (plus:SI (match_dup 2) (match_dup 3)))) + (const_int 32)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "umulsi3_highpart_reg_clobber" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:DI + (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (zero_extend:DI (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (const_int 32)))) + (clobber (reg:CC CC_REG))] + "(ZIP_LONGMPY)" + "MPYSHI\t%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "umulsi3_highpart_reg_raw" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:DI + (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (zero_extend:DI (match_operand:SI 2 "zip_opb_single_operand_p" "rO"))) + (const_int 32)))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_LONGMPY)" + "MPYSHI\t%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "umulsi3_highpart_off_raw" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:DI + (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (zero_extend:DI (plus:SI + (match_operand:SI 2 "register_operand" "r") + (match_operand:DI 3 "zip_opb_immv_p" "N")))) + (const_int 32)))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_LONGMPY)" + "MPYSHI\t%3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "umulsi3_highpart_off_clobber" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI (ashiftrt:DI + (mult:DI + (zero_extend:DI (match_operand:SI 1 "register_operand" "0")) + (zero_extend:DI (plus:SI + (match_operand:SI 2 "register_operand" "r") + (match_operand:DI 3 "zip_opb_immv_p" "N")))) + (const_int 32)))) + (clobber (reg:CC CC_REG))] + "(ZIP_LONGMPY)" + "MPYSHI\t%3+%2,%0" + [(set_attr "ccresult" "set")]) +; +; +(define_expand "div<mode>3" + [(parallel [(set (match_operand:ZI 0 "register_operand" "=r") + (div:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_operand_p" ""))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])] + "(ZIP_DIVIDE)") +(define_insn "div<mode>3_reg" + [(set (match_operand:ZI 0 "register_operand" "=r") + (div:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_DIVIDE)" + "DIVS %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "div<mode>3_off" + [(set (match_operand:ZI 0 "register_operand" "=r") + (div:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_DIVIDE)" + "DIVS %3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_expand "udiv<mode>3" + [(parallel [(set (match_operand:ZI 0 "register_operand" "=r") + (udiv:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_operand_p" ""))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])] + "(ZIP_DIVIDE)") +(define_insn "udiv<mode>3_reg" + [(set (match_operand:ZI 0 "register_operand" "=r") + (udiv:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_DIVIDE)" + "DIVU %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "udiv<mode>3_off" + [(set (match_operand:ZI 0 "register_operand" "=r") + (udiv:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_DIVIDE)" + "DIVU %3+%2,%0" + [(set_attr "ccresult" "set")]) +;; +;; modsi3 +;; umodsi3 +;; +(define_insn "umin<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (umin:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "CMP %0,%2 + MOV.C %2,%0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "umax<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (umax:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "CMP %2,%0 + MOV.C %2,%0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "smin<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (smin:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "CMP %2,%0 + MOV.GT %2,%0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "smax<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (smax:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "CMP %0,%2 + MOV.LT %2,%0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +; +(define_expand "and<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (and:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_operand_p" "")))]) +(define_insn_and_split "and<mode>3_reg_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (and:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (and:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "and<mode>3_reg_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (and:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "AND %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "and<mode>3_reg_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (and:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (clobber (reg:CC CC_REG))] + "" + "AND %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn_and_split "and<mode>3_off_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (and:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N"))))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (and:ZI (match_dup 1) + (plus:ZI (match_dup 2) (match_dup 3)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "and<mode>3_off_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (and:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "AND %3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "and<mode>3_off_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (and:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (clobber (reg:CC CC_REG))] + "" + "AND %3+%2,%0" + [(set_attr "ccresult" "set")]) +; +; +(define_expand "ior<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ior:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_operand_p" "")))]) +(define_insn_and_split "ior<mode>3_reg_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ior:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (ior:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "ior<mode>3_reg_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ior:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "OR %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "ior<mode>3_reg_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ior:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (clobber (reg:CC CC_REG))] + "" + "OR %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn_and_split "ior<mode>3_off_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ior:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N"))))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (ior:ZI (match_dup 1) + (plus:ZI (match_dup 2) (match_dup 3)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "ior<mode>3_off_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ior:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "OR %3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "ior<mode>3_off_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ior:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (clobber (reg:CC CC_REG))] + "" + "OR %3+%2,%0" + [(set_attr "ccresult" "set")]) +; +; +; +(define_expand "xor<mode>3" + [(parallel [(set (match_operand:ZI 0 "register_operand" "=r") + (xor:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_operand_p" ""))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])] + "") +(define_insn_and_split "xor<mode>3_reg_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (xor:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (xor:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "xor<mode>3_reg_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (xor:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "XOR %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "xor<mode>3_reg_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (xor:ZI (match_operand:ZI 1 "register_operand" "%0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (clobber (reg:CC CC_REG))] + "" + "XOR %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn_and_split "xor<mode>3_off_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (xor:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N"))))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (xor:ZI (match_dup 1) + (plus:ZI (match_dup 2) (match_dup 3)))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "xor<mode>3_off_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (xor:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "XOR %3+%2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "xor<mode>3_off_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (xor:ZI (match_operand:ZI 1 "register_operand" "0") + (plus:ZI (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_immv_p" "N")))) + (clobber (reg:CC CC_REG))] + "" + "XOR %3+%2,%0" + [(set_attr "ccresult" "set")]) +;(define_insn "addv<mode>4" + ;[(set (match_operand:ZI 0 "register_operand" "=r") + ;(plus:ZI (match_operand:ZI 1 "register_operand" "%r") + ;(match_operand:ZI 2 "general_operand" "rO"))) + ;(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0)) + ;(label_ref (match_operand 3)) + ;(pc)))] + ;"" + ;"MOV %1,%0 + ;ADD %2,%0 + ;BV %3" + ;[(set_attr "predicable" "no") (set_attr "ccresult" "set")]) +;;(define_insn "subvsi4" +;; MOV %1,%0 +;; SUB %2,%0 +;; BV %3 +;;(mulvsi4) +;;(define_insn "uaddvsi4" +;; ADD %2,%0 +;; BC %3 +;;(define_insn "usubvsi4" +;; MOV %1,%0 +;; SUB %2,%0 +;; BC %3 +;; +;; (define_insn "umulvsi4" +;; ... ???) +;; +(define_expand "ashr<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))]) +(define_insn_and_split "ashr<mode>3_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (ashiftrt:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "ashr<mode>3_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "ASR %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "ashr<mode>3_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (clobber (reg:CC CC_REG))] + "" + "ASR %2,%0" + [(set_attr "ccresult" "set")]) +; +; +(define_expand "ashl<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashift:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))]) +(define_insn_and_split "ashl<mode>3_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashift:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (ashift:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "ashl<mode>3_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashift:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "LSL %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "ashl<mode>3_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (ashift:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (clobber (reg:CC CC_REG))] + "" + "LSL %2,%0" + [(set_attr "ccresult" "set")]) +; +; +(define_expand "lshr<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (lshiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))]) +(define_insn_and_split "lshr<mode>3_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (lshiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (lshiftrt:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "lshr<mode>3_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (lshiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "LSR %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "lshr<mode>3_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (lshiftrt:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (clobber (reg:CC CC_REG))] + "" + "LSR %2,%0" + [(set_attr "ccresult" "set")]) +; +; +(define_expand "rotl<mode>3" + [(set (match_operand:ZI 0 "register_operand" "=r") + (rotate:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))]) +(define_insn_and_split "rotl<mode>3_split" + [(set (match_operand:ZI 0 "register_operand" "=r") + (rotate:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR")))] + "" + "#" + "(reload_completed)" + [(parallel [(set (match_dup 0) (rotate:ZI (match_dup 1) (match_dup 2))) + (clobber (reg:CC CC_REG))])] + "" + [(set_attr "ccresult" "set")]) +(define_insn "rotl<mode>3_raw" + [(set (match_operand:ZI 0 "register_operand" "=r") + (rotate:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "ROL %2,%0" + [(set_attr "ccresult" "set")]) +(define_insn "rotl<mode>3_clobber" + [(set (match_operand:ZI 0 "register_operand" "=r") + (rotate:ZI (match_operand:ZI 1 "register_operand" "0") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rR"))) + (clobber (reg:CC CC_REG))] + "" + "ROL %2,%0" + [(set_attr "ccresult" "set")]) +; +; +; +(define_insn "neg<mode>2" + [(set (match_operand:ZI 0 "register_operand" "=r") + (neg:ZI (match_operand:ZI 1 "register_operand" "r"))) + (clobber (reg:CC CC_REG))] + "" + "NEG %1,%0" + [(set_attr "ccresult" "validzn")]) +(define_insn "abs<mode>2" + [(set (match_operand:ZI 0 "register_operand" "=r") + (abs:ZI (match_operand:ZI 1 "register_operand" "0"))) + (clobber (reg:CC CC_REG))] + "" + "TEST %0 + NEG.LT %0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "popcount<mode>2" + [(set (match_operand:ZI 0 "register_operand" "=r") + (popcount:ZI (match_operand:ZI 1 "register_operand" "r"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "POPC %1,%0" + [(set_attr "ccresult" "set")]) +(define_expand "parity<mode>2" + [(parallel [(set (match_operand:ZI 0 "register_operand" "=r") + (popcount:ZI (match_operand:ZI 1 "register_operand" "r"))) + (clobber (reg:CC CC_REG))]) + (parallel [ + (set (match_dup 0) (and:ZI (match_dup 0) (const_int -2))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ]) +(define_insn "one_cmpl<mode>2" + [(set (match_operand:ZI 0 "register_operand" "=r") + (not:ZI (match_operand:ZI 1 "register_operand" "0"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "" + "XOR -1,%0" + [(set_attr "ccresult" "set")]) +; +; +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; General arithmetic instructions -- double words +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +(define_expand "movdi3" + [(set (match_operand:DI 0 "nonimmediate_operand" "") + (match_operand:DI 1 "general_operand" ""))]) +(define_insn "movdi_lod" + [(set (match_operand:DI 0 "register_operand" "=r") + (mem:DI (match_operand:SI 1 "zip_opb_operand_p" "")))] + "" + { + if (REG_P(operands[1])) + return "LOD\t(%1),%H0\n\tLOD\t1(%1),%L0"; + else if (GET_CODE(operands[1])==PLUS) { + if ((REG_P(XEXP(operands[1],0))) + &&(CONST_INT_P(XEXP(operands[1],1)))) { + static char buf[64]; + sprintf(buf, + "LOD\t%ld(%%1),%%H0\n\tLOD\t%ld(%%1),%%L0", + (long)INTVAL(XEXP(operands[1],1)), + (long)INTVAL(XEXP(operands[1],1)+1)); + return buf; + } + } return "BREAK"; + } + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +(define_insn "movdi_sto" + [(set (mem:DI (match_operand:SI 0 "zip_opb_operand_p" "")) + (match_operand:DI 1 "register_operand" "r"))] + "" + { + if (REG_P(operands[0])) + return "STO\t%H0,(%1)\n\tSTO\t%L0,1(%1)"; + else if (GET_CODE(operands[0])==PLUS) { + if ((REG_P(XEXP(operands[0],0))) + &&(CONST_INT_P(XEXP(operands[0],1)))) { + static char buf[64]; + sprintf(buf, + "STO\t%%H0,%ld(%%1)\n\tSTO\t%%L0,%ld(%%1)", + (long)INTVAL(XEXP(operands[0],1)), + (long)INTVAL(XEXP(operands[0],1)+1)); + return buf; + } + } return "BREAK"; + } + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +(define_insn "movdi_ldi" + [(set (match_operand:DI 0 "register_operand" "=r") + (match_operand:DI 1 "immediate_operand" "i"))] + "" + "LDI\t%H1,%H0\n\tLDI\t%L1,%L0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +(define_insn "adddi3" ; Fastest/best instruction always goes first + [(set (match_operand:DI 0 "register_operand" "=r") + (plus:DI (match_operand:DI 1 "register_operand" "0") + (match_operand:DI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "ADD %L2,%L0\n\tADD.C\t1,%H0\n\tADD\t%H2,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +(define_insn "subdi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (minus:DI (match_operand:DI 1 "register_operand" "0") + (match_operand:DI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "SUB %L2,%L0\n\tSUB.C\t1,%H0\n\tSUB\t%H2,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +(define_insn "anddi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (and:DI (match_operand:DI 1 "register_operand" "%0") + (match_operand:DI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "AND %L2,%L0\n\tAND\t%H2,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +(define_insn "iordi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (ior:DI (match_operand:DI 1 "register_operand" "%0") + (match_operand:DI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "OR %2,%0\n\tOR\t%H2,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +(define_insn "xordi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (xor:DI (match_operand:DI 1 "register_operand" "%0") + (match_operand:DI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "XOR %2,%0\n\tXOR\t%H2,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +(define_insn "negdi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (neg:DI (match_operand:DI 1 "register_operand" "0"))) + (clobber (reg:CC CC_REG)) + ] + "" + "XOR -1,%L0\n\tXOR\t-1,%H0\n\tADD\t1,%L0\n\tADD.C\t1,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +(define_insn "absdi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (abs:DI (match_operand:DI 1 "register_operand" "0"))) + (clobber (match_scratch:SI 2 "=r")) + (clobber (reg:CC CC_REG)) + ] + "" + "CLR %2 + TEST %H0 ; Problem, we can't tell conditions + LDILO.LT 1,%2 + XOR.LT -1,%L0 + XOR.LT -1,%H0 + ADD %2,%L0 + ADD.C 1,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "one_cmpldi2" + [(set (match_operand:DI 0 "register_operand" "=r") + (not:DI (match_operand:DI 1 "register_operand" "0"))) + (clobber (reg:CC CC_REG)) + ] + "" + "XOR -1,%L0\n\tXOR\t-1,%H0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "umindi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (umin:DI (match_operand:DI 1 "register_operand" "%0") + (match_operand:DI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "CMP %H0,%H2 + CMP.Z %L0,%L2 + MOV.C %H2,%H0 + MOV.C %L2,%L0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "umaxdi3" + [(set (match_operand:DI 0 "register_operand" "=r") + (umax:DI (match_operand:DI 1 "register_operand" "%0") + (match_operand:DI 2 "register_operand" "r"))) + (clobber (reg:CC CC_REG)) + ] + "" + "CMP %H2,%H0 + CMP.Z %L2,%L0 + MOV.C %H2,%H0 + MOV.C %L2,%L0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "popcountdi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (popcount:SI (match_operand:DI 1 "register_operand" "r"))) + (clobber (match_scratch:SI 2 "=r")) + (clobber (reg:CC CC_REG)) + ] + "" + "POPC %L1,%0 + POPC %H1,%2 + ADD %2,%0" + [(set_attr "predicable" "no") (set_attr "ccresult" "set")]) +(define_expand "paritydi2" + [(set (match_operand:SI 0 "register_operand" "=r") + (popcount (match_operand:DI 1 "register_operand" "r"))) + (set (match_dup 0) (and:SI (match_dup 0) (const_int -2))) + ]) +;(define_insn "extendsidi2" +; [(set (match_operand:DI 0 "register_operand" "=r") +; (sign_extend:DI (match_operand:SI 0 "register_operand" "r")))] +; "" +; "TEST\t%1\nMOV\t%1,%L0\nCLR\t%L1\nLDI.LT\t-1,%L1" +; [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +;(define_insn "mulsidi3" +; [(set (match_operand:DI 0 "register_operand" "=r") +; (mult:SI (match_operand:SI 1 "register_operand" "%r") +; (match_operand:SI 2 "register_operand" "r"))) +; (clobber (match_scratch:SI 3 "=r"))] +; ; "(R0 != R1)&&(R0 != R2)&&(R0!=R3)&&(R1!=R2)&&(R1=R3)&&(R2!=R3)" +; "" +; "MOV %1,%L0 +; MPYS %2,%L0 ; L0 = R2 * R1 +; MOV %1,%3 ; R3 = R1 +; ROL 16,%3 ; R3 = (R1 <<< 16) +; MPYS %2,%3 ; R3 = (R1 <<< 16) * R2 +; ROL 16,%3 ; R3 = upper bits of (R1<<<16)*R2 +; AND 0x0ffff,%3 +; ADD %3,%L0 ; L0 = L0 + R3 = L0 + (R1>>16)*R2 +; MOV %2,%3 +; ROL 16,%3 +; MPYS %1,%3 +; ROL 16,%3 +; AND 0x0ffff,%3 +; ADD %3,%0" +; [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) + +; +; Still missing DI instructions for smin:DI, smax:DI, movdicc, adddicc, +; mult:di, div:di, divu:di +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Conditional arithmetic instructions +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +; +(define_expand "cstore<mode>4" ; Store 0 or 1 in %0 based on cmp between %2&%3 + [(set (reg:CC CC_REG) (compare:CC (match_operand:ZI 2 "register_operand" "r") + (match_operand:ZI 3 "zip_opb_operand_p" "rO"))) + (set (match_operand:ZI 0 "register_operand" "=r") (if_then_else:ZI + (match_operator 1 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (const_int 1) (const_int 0)))] + "" + ) +(define_insn "cstoredi4" ; Store 0 or 1 in %0 based on cmp between %2&%3 + [(set (match_operand:SI 0 "register_operand" "=r") + (if_then_else:SI (match_operator 1 "ordered_comparison_operator" + [(match_operand:DI 2 "register_operand" "r") + (match_operand:DI 3 "register_operand" "r")]) + (const_int 1) (const_int 0))) + (clobber (reg:CC CC_REG))] + "" + { + switch(GET_CODE(operands[1])) { + case EQ: return "CLR\t%0\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.Z\t1,%0\n"; + case NE: return "CLR\t%0\n\tCMP\t%H3,%H2\n\tCMP.NZ\t%L3,%L2\n\tLDILO.NZ\t1,%0\n"; + case LTU: return "CLR\t%0\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.C\t1,%0\n"; + case LEU: return "CLR\t%0\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.C\t1,%0\n\tLDILO.Z\t1,%0\n"; + case GTU: return "CLR\t%0\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.C\t1,%0\n"; + case GEU: return "CLR\t%0\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.C\t1,%0\n\tLDILO.Z\t1,%0\n"; + default: + gcc_unreachable(); + } + } + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Comparison instructions, both compare and test +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +;; This will only work so well, since the direction of the compare is +;; important in unsigned compares. +;; +(define_expand "cmp<mode>" + [(set (reg:CC CC_REG) (compare:CC + (match_operand:ZI 0 "register_operand" "r") + (match_operand:ZI 1 "nonmemory_operand" "")))] + "" + { + if (!zip_opb_operand_p(operands[1],SImode)) { + if (can_create_pseudo_p()) { + //; fprintf(stderr, "Generating pseudo register for compare\n"); + rtx tmp = gen_reg_rtx(SImode); + emit_insn(gen_movsi(tmp,operands[1])); + operands[1] = tmp; + } else FAIL; + } + }) +(define_insn "cmp<mode>_reg" + [(set (reg:CC CC_REG) (compare:CC + (match_operand:ZI 0 "register_operand" "r") + (match_operand:ZI 1 "zip_opb_single_operand_p" "rO")))] + "" + "CMP\t%1,%0" + [(set_attr "ccresult" "set")]) +(define_insn "cmp<mode>_off" + [(set (reg:CC CC_REG) (compare:CC + (match_operand:ZI 0 "register_operand" "r") + (plus:ZI (match_operand:ZI 1 "register_operand" "r") + (match_operand 2 "zip_opb_immv_p" "N"))))] + "" + "CMP\t%2+%1,%0" + [(set_attr "ccresult" "set")]) +(define_insn "test<mode>" + [(set (reg:CC CC_REG) (compare:CC (and:ZI (match_operand:ZI 0 "register_operand" "r") + (match_operand:ZI 1 "zip_opb_single_operand_p" "rO")) + (const_int 0)))] + "" + "TEST %1,%0" + [(set_attr "ccresult" "set")]) +(define_insn "test<mode>_off" + [(set (reg:CC CC_REG) (compare:CC + (and:ZI (match_operand:ZI 0 "register_operand" "r") + (plus:ZI + (match_operand:ZI 1 "register_operand" "r") + (match_operand:ZI 2 "zip_opb_immv_p" "N"))) + (const_int 0)))] + "" + "TEST %2+%1,%0" + [(set_attr "ccresult" "set")]) +(define_insn "nop" + [(const_int 0)] + "" + "NOOP" + [(set_attr "ccresult" "unchanged")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Conditional execution predicates +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; Sadly, these aren't complete like they should be. Although these are all of +; the conditional execution prefixes that the Zip CPU supports, GCC looks for +; other conditions then these. That is, (cond_exec ...) is not as well +; recognized as (if_then_else ...). So we have to duplicate things to support +; both methods. +; +(define_cond_exec + [(ne (reg:CC CC_REG) (const_int 0))] + "" + "(NZ)" + [(set_attr "conditional" "yes")]) +(define_cond_exec + [(lt (reg:CC CC_REG) (const_int 0))] + "" + "(LT)" + [(set_attr "conditional" "yes")]) +(define_cond_exec + [(eq (reg:CC CC_REG) (const_int 0))] + "" + "(Z)" + [(set_attr "conditional" "yes")]) +(define_cond_exec + [(gt (reg:CC CC_REG) (const_int 0))] + "" + "(GT)" + [(set_attr "conditional" "yes")]) +(define_cond_exec + [(ge (reg:CC CC_REG) (const_int 0))] + "" + "(GE)" + [(set_attr "conditional" "yes")]) +(define_cond_exec + [(ltu (reg:CC CC_REG) (const_int 0))] + "" + "(C)" + [(set_attr "conditional" "yes")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Conditional move instructions, since these won't accept conditional +;; execution RTL +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; // Look for #define HAVE_conditional_move to understand how these might be +; // used. +; +(define_insn "set_zero_or_one<mode>" + [(set (match_operand:ZI 0 "register_operand" "=r") (if_then_else:ZI + (match_operator 1 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (const_int 1) (const_int 0)))] + "" + { return (zip_set_zero_or_one(operands[1], operands[0])); + } + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +(define_expand "mov<mode>cc" + [(set (match_operand:ZI 0 "register_operand" "=r") + (if_then_else:ZI (match_operand 1 "comparison_operator") + (match_operand:ZI 2 "general_operand" "rio") + (match_operand:ZI 3 "nonmemory_operand" "rio")))] + "" + { + if (zip_expand_movsicc(operands[0], operands[1], operands[2], operands[3])) + DONE; + else + FAIL; + }) +(define_insn_and_split "movsicc_bare" + [(set (match_operand:SI 0 "register_operand" "=r") + (if_then_else (match_operator 1 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (match_operand:SI 2 "general_operand" "rio") + (match_operand:SI 3 "register_operand" "0")))] + "(zip_supported_condition(GET_CODE(operands[1])))" + "#" + "(reload_completed)" + [(cond_exec (match_operator 1 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (set (match_dup 0) (match_dup 2)))] + "" [(set_attr "predicable" "no")]) +(define_insn "add<mode>cc" + [(set (match_operand:ZI 0 "register_operand" "=r,r") + (if_then_else:ZI (match_operator 1 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (plus:ZI (match_operand:ZI 2 "register_operand" "0,r") + (match_operand:ZI 3 "nonmemory_operand" "rO,M")) + (match_dup 0)))] + "" + { + return zip_addsicc(operands[0], operands[1], operands[2], operands[3]); + } + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +; +; +; +; While an interesting approach, the following suffers from problems when the +; move amount is constant. At anything less than four, moves should not require +; the movmemSI instruction. At anything greater, if constant, the initial tests +; are not required and should result in a hardcoded result. Practically, +; though, this should really be a define_expand instruction, calling on the +; RTX's of all the respective subinstructions found below. Or, perhaps, it is +; better as a subroutine? +; +;(define_insn "movmemSI" +; [(parallel [(set (mem:BLK (match_operand 0 "register_operand" "+r"));Dst +; (mem:BLK (match_operand 1 "register_operand" "+r")));Src +; (use (match_operand:SI 2 "register_operand" "+r"))]); Length +; (match_operand 3 "" "") ;Alignment +; (clobber (match_scratch:SI 4 "=r")) +; (clobber (match_scratch:SI 5 "=r")) +; (clobber (match_scratch:SI 6 "=r")) +; (clobber (match_scratch:SI 7 "=r"))] +; "" +; "TEST\t1,%2 +; LOD.NZ\t%1,%4 +; STO.NZ\t%4,%0 +; ADD.NZ\t1,%0 +; ADD.NZ\t1,%1 +; TEST\t2,%2 +; LOD.NZ\t%1,%4 +; LOD.NZ\t1(%1),%5 +; STO.NZ\t%4,(%0) +; STO.NZ\t%4,1(%0) +; ADD.NZ\t2,%0 +; ADD.NZ\t2,%1 +; AND\t-4,%2 +; BZ\t.Lskp%=\n.Ltop%=: +; LOD\t(%1),%4 +; LOD\t1(%1),%5 +; LOD\t2(%1,%6 +; LOD\t3(%1),%7 +; STO\t%4,(%1) +; STO\t%5,1(%1) +; STO\t%6,2(%1) +; STO\t%7,3(%1) +; SUB\t4,%2 +; BZ\t%.Lskp%= +; BRA\t.Ltop%=\n.Lskp%=:" +; [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +;(define_insn "setmemsi" +; [(parallel +; [(set (mem:BLK +; (match_operand 0 "register_operand" "+r")); Destination +; (match_operand:SI 2 "register_operand" "r")) ; Source +; (use (match_operand:SI 1 "register_operand" "+r"))]) ; Length +; (match_operand 3 "" "")] +; "" +; "TEST\t1,%1 +; STO.NZ\t%2,(%0) +; ADD.NZ\t1,%0 +; TEST\t2,%1 +; STO.NZ\t%2,(%0) +; STO.NZ\t%2,1(%0) +; ADD.NZ\t2,%0 +; AND\t-4,%1 +; BZ\t.Lskp%=\n.Ltop%=:\n +; STO\t%2,(%0) +; STO\t%2,1(%0) +; STO\t%2,2(%0) +; STO\t%2,3(%0) +; SUB\t%4,%0 +; BZ\t.Lskp%= +; BRA\t.Ltop%=\n.Lskp%=:" +; [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +;; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Control flow instructions +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +(define_expand "jump" + [(set (pc) + (label_ref (match_operand 0 "" "")))]); // Was general-op, "mro" +(define_insn "jump_const" + [(set (pc) + (match_operand:SI 0 "zip_const_address_operand_p" ""))] + "" + "BRA %0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +(define_insn "jump_label" ; Must be modeless, VOIDmode, not SI or any othr + [(set (pc) ; Otherwise it won't accept jumps to labels + (label_ref (match_operand 0 "" "")))] + "" + "BRA %0" + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +; +; This is really the same thing as an indirect jump ... the big difference +; is that the zip_address_operand_p checks for an "N" type condition, not an +; "M" type condition ... a bug, but one that works for now. (The assembler +; should be able to catch and except on it ...) +; +; #warning "This predicate is appropriate for non-moves, but not for JMPs" +(define_insn "jump_variable" + [(set (pc) + (match_operand:SI 0 "zip_address_operand_p" ""))] + "" + "JMP %0" + [(set_attr "ccresult" "unchanged")]) +; +; Indirect jumps ... both to registers, and registers plus offsets +; +(define_insn "indirect_jump" + [(set (pc) + (match_operand:SI 0 "register_operand" "r"))] + "" + "JMP %0" + [(set_attr "ccresult" "unchanged")]) +(define_insn "indirect_jump_mem" + [(set (pc) (match_operand:SI 0 "zip_memory_operand_p" "o"))] + "" + "LOD %0,PC" + [(set_attr "ccresult" "unchanged")]) +(define_insn "indirect_jump_off" + [(set (pc) + (plus:SI (match_operand:SI 0 "register_operand" "r") + (match_operand:SI 1 "const_int_operand" "M")))] + "" + "JMP %1(%0)" + [(set_attr "ccresult" "unchanged")]) +;; +; cbranchsi4 +;; Op 0 = the comparison operator (le,lt,eq,ne,gt,ge,and usgn ltu,geu,etc.) +;; Op 1&2 the operands of the compare instruction +;; Op 3 is the jump label +;; +;; +;; #warning Need to adjust this so that the "LT" code doesnt get generated ... +;; +(define_expand "cbranch<mode>4" + [(set (reg:CC CC_REG) (compare:CC (match_operand:ZI 1 "register_operand" "r") + (match_operand:ZI 2 "zip_opb_operand_p" "rO"))) + (set (pc) (if_then_else (match_operator 0 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (label_ref (match_operand 3 "" "")) + (pc)))] + "" + { + if (true) { + extern void zip_debug_rtx_pfx(const char *, const_rtx); + //; Two branches give us no end of difficulty when implementing. + //; Let's check for these two branch codes, and swap the + //; comparison to simplify them. + //; fprintf(stderr, "CBRANCH\n"); + //; zip_debug_rtx_pfx("- CMP: ", operands[0]); + //; zip_debug_rtx_pfx("- A : ", operands[1]); + //; zip_debug_rtx_pfx("- B : ", operands[2]); + //; zip_debug_rtx_pfx("- JMP: ", operands[3]); + //; Can we do better if we reverse some compares? + if ((GET_CODE(operands[0])==GTU)&&(REG_P(operands[2]))) { + //; fprintf(stderr, "CBRANCH:(GTU,?,REG,?)\n"); + emit_insn(gen_cmpsi(operands[2], operands[1])); + emit_jump_insn(gen_cbranch_jmp_ltu(operands[3])); + DONE; + } else if((GET_CODE(operands[0])==GEU)&&(REG_P(operands[2]))) { + //; fprintf(stderr, "CBRANCH:(GEU,?,REG,?)\n"); + emit_insn(gen_cmpsi_off(operands[2], operands[1], + GEN_INT(1))); + emit_jump_insn(gen_cbranch_jmp_ltu(operands[3])); + DONE; + } else if ((GET_CODE(operands[0])==LE)&&(REG_P(operands[2]))) { + //; fprintf(stderr, "CBRANCH:(LE,?,REG,?)\n"); + //; Swap operands, turn into a GTE compare + emit_insn(gen_cmpsi(operands[2], operands[1])); + emit_jump_insn(gen_cbranch_jmp_ge(operands[3])); + DONE; + } // ; Otherwise ... just handle the branch normally + + //; Except ... we can do better for some instructions, such as + //; LE. While we could exchange CMP Rx,Ry into -1(Rx),Ry, it + //; would be difficult to explain to users why MIN_INT didn't + //; compare properly. Hence we only adjust constant integers. + //; + if (GET_CODE(operands[0])==LE) { + if ((CONST_INT_P(operands[2])) + &&(INTVAL(operands[2])<(1<<17)-2)) { + //; fprintf(stderr, "CBRANCH:(LE,?,#,?)\n"); + emit_insn(gen_cmpsi(operands[1], + GEN_INT(INTVAL(operands[2])+1))); + emit_jump_insn(gen_cbranch_jmp_lt(operands[3])); + DONE; + //; Now for the controversial ones--where we add one + //; when it may or may not be permissable. For now, we + //; just do it anyway and postpone the philosophical + //; discussion for later. + } else if (REG_P(operands[2])) { + emit_insn(gen_cmpsi_off(operands[1], + operands[2],GEN_INT(1))); + emit_jump_insn(gen_cbranch_jmp_lt(operands[3])); + DONE; + } else if ((GET_CODE(operands[2])==PLUS) + &&(REG_P(XEXP(operands[2],0))) + &&(CONST_INT_P(XEXP(operands[2],1))) + &&(INTVAL(XEXP(operands[2],1))<((1<<13)-2))) { + emit_insn(gen_cmpsi_off(operands[1], + XEXP(operands[2],0), + GEN_INT(INTVAL(XEXP(operands[2],1))+1))); + emit_jump_insn(gen_cbranch_jmp_lt(operands[3])); + DONE; + } + } else if (GET_CODE(operands[0])==LEU) { + if ((CONST_INT_P(operands[2])) + &&(INTVAL(operands[2])<(1<<17)-2)) { + //; fprintf(stderr, "CBRANCH:(LEU,?,#,?)\n"); + emit_insn(gen_cmpsi(operands[1], + GEN_INT(INTVAL(operands[2])+1))); + emit_jump_insn(gen_cbranch_jmp_ltu(operands[3])); + DONE; + //; Now for the controversial ones--this time having + //; to do with unsigned compares. + } else if (REG_P(operands[2])) { + emit_insn(gen_cmpsi_off(operands[1], + operands[2],GEN_INT(1))); + emit_jump_insn(gen_cbranch_jmp_ltu(operands[3])); + DONE; + } else if ((GET_CODE(operands[2])==PLUS) + &&(REG_P(XEXP(operands[2],0))) + &&(CONST_INT_P(XEXP(operands[2],1))) + &&(INTVAL(XEXP(operands[2],1))<((1<<13)-2))) { + emit_insn(gen_cmpsi_off(operands[1], + XEXP(operands[2],0), + GEN_INT(INTVAL(XEXP(operands[2],1))+1))); + emit_jump_insn(gen_cbranch_jmp_ltu(operands[3])); + DONE; + } + }} + }) +(define_insn "cbranch_jmp_eq" + [(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "BZ\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_neq" + [(set (pc) (if_then_else (ne (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "BNZ\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_lt" + [(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "BLT\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_le" + [(set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "BLT\t%0 + BZ\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_gt" + [(set (pc) (if_then_else (gt (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "BGT\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_ge" + [(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "BGE\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_ltu" + [(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "BC\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_gtu" + [(set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc))) + ;(clobber (reg:CC CC_REG)) + ] + "" ; Flip the condition, and then we can jump + "BC\t.Lgtu%=\n\tBZ\t.Lgtu%=\n\tBRA\t%0\n.Lgtu%=:" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unknown")]) +(define_insn "cbranch_jmp_leu" + [(set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" ; Need to check for both LTU (i.e. C) and Z + "BC\t%0 + BZ\t%0" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unchanged")]) +(define_insn "cbranch_jmp_geu" + [(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc))) + ;(clobber (reg:CC CC_REG)) + ] + "" + "BC\t.Lgeu%=\n\tBRA\t%0\n.Lgeu%=:" + [(set_attr "predicable" "no") + (set_attr "conditional" "yes") + (set_attr "ccresult" "unknown")]) +(define_insn "cbranchdi4" + [(set (pc) (if_then_else + (match_operator 0 "ordered_comparison_operator" + [(match_operand:DI 1 "register_operand" "r") + (match_operand:DI 2 "register_operand" "r")]) + (label_ref (match_operand 3 "" "")) + (pc))) + (clobber (reg:CC CC_REG))] + "" + { + switch(GET_CODE(operands[0])) { + case EQ: + return "CMP\t%H2,%H1\n\tCMP.Z\t%L2,%L1\n\tBZ\t%3"; + case NE: + return "CMP\t%H2,%H1\n\tCMP.NZ\t%L2,%L1\n\tBNZ\t%3"; + case LE: + return "CMP\t%H2,%H1\n\tBLT\t%3\n\tCMP.Z\t%L2,%L1\n\tBC\t%3\n\tBZ\t%3"; + case GT: + return "CMP\t%H1,%H2\n\tBLT\t%3\n\tBNZ\t.Ldi%=\n\tCMP\t%L1,%L2\n\tBC\t%3\n.Ldi%=:"; + case LT: + return "CMP\t%H2,%H1\n\tBLT\t%3\n\tBNZ\t.Ldi%=\n\tCMP\t%L2,%L1\n\tBC\t%3\n.Ldi%=:"; + case GE: + return "CMP\t%H1,%H2\n\tBLT\t%3\n\tBNZ\t.Ldi%=\n\tCMP\t%L1,%L2\n\tBC\t%3\nBZ\t%3\n.Ldi%=:"; + case LTU: + return "CMP\t%H2,%H1\n\tCMP.Z\t%L2,%L1\n\tBC\t%3\n"; + case LEU: + return "CMP\t%H2,%H1\n\tCMP.Z\t%L2,%L1\n\tBC\t%3\n\tBZ\t%3"; + case GTU: + return "CMP\t%H1,%H2\n\tCMP.Z\t%L1,%L2\n\tBC\t%3\n"; + case GEU: + return "CMP\t%H1,%H2\n\tCMP.Z\t%L1,%L2\n\tBC\t%3\nBZ\t%3"; + default: + gcc_unreachable(); + } + } + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Subroutine call +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; There are two types of calls: "call" and "call_value". +; +; Each of these types of calls are then expanded into one of: +; +; _const - A call to a constant address, such as a symbol +; reference or a fixed location +; +; _label - This should be the same as _const, except that for +; some reason the RTL and matching rules are separate. +; Hence we have a separate rule for this. +; +; _mem - The memory address we wish to jump to is stored in +; memory somewhere, and we have only a pointer. In this +; case, we load that pointer straight to the PC and go. +; +; _var - The address to jump to is given as an offset to a +; register, such as X+R3. This is an indirect jump. +; Although we support it, it does require different RTL +; code. +; +(define_expand "call" + [(call (match_operand 0 "" "") + (match_operand 1 "" ""))] + "" + { + if (MEM_P(operands[0])) { + // This should always be the case + rtx addr = XEXP(operands[0],0); + if (zip_const_address_operand_p(addr, SImode)) { + //; fprintf(stderr, "Generating gen_void_call_const()\n"); + emit_call_insn(gen_void_call_const(addr, + operands[1])); + } else if ((MEM_P(addr))&&(zip_address_operand( + XEXP(addr,0)))) { + emit_call_insn(gen_void_call_mem(XEXP(addr,0), + operands[1])); + } else { + emit_call_insn(gen_void_call_var(addr, + operands[1])); + } + DONE; + } + }) +; +; +; +; How do we want to do this better? +; Replace the RTL w/ +; return_label= gen_label_rtx(); +; emit_movsi(gen_rtx_REG(zip_R0),plus_constant( +; gen_rtx_REG(zip_PC),return_label)); +; emit_jump(label_rtx( +; +; emit_label(return_label); +; +; The problem is: we can't! GCC distinguishes between jumps and calls when +; optimizing, and it doesn't see the need to keep the label around. Thus, the +; label gets removed and the call gets lost. Hence we do it this way (below). +; I'll probably bastardize a means of getting a new codelabel that GCC doesn't +; recognize as such, but for now we'll use .Lcall# as our label. +; +(define_insn "void_call_const" + [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" "")) + (match_operand 1 "const_int_operand" "n")) + (clobber (reg:SI RTN_REG)) + (clobber (reg:CC CC_REG))] + "" + "MOV .Lcall%=(PC),R0\;BRA\t%0\n.Lcall%=:" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "void_call_mem" + [(call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q")) + (match_operand 1 "const_int_operand" "n")) + (clobber (reg:SI RTN_REG)) + (clobber (reg:CC CC_REG))] + "" + "MOV .Lcall%=(PC),R0\;LOD\t%0,PC\n.Lcall%=:" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +; #warning "This predicate is appropriate for non-moves, but not for JMPs" +(define_insn "void_call_var" + [(call (mem:SI (match_operand:SI 0 "zip_address_operand_p" "")) + (match_operand 1 "const_int_operand" "n")) + (clobber (reg:SI RTN_REG)) + (clobber (reg:CC CC_REG))] + "" + "MOV .Lcall%=(PC),R0\;JMP\t%0\n.Lcall%=:" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_expand "call_value" + [(parallel [(set (reg:SI RTNV_REG) + (call (match_operand:SI 1 "" "") + (match_operand 2 "const_int_operand" "n"))) + (set (match_operand:SI 0 "register_operand" "=r") (reg:SI RTNV_REG)) + (clobber (reg:CC CC_REG))])] + "" + { + //; extern void zip_debug_rtx(const_rtx); + //; fprintf(stderr, "ZIP.MD::CALL-VALUE()\n"); + //; zip_debug_rtx(operands[1]); + if (MEM_P(operands[1])) { + //; fprintf(stderr, "ZIP.MD::CALL-VALUE() MEM_P\n"); + //; zip_debug_rtx(operands[1]); + //; This should always be the case + rtx addr = XEXP(operands[1],0); + if (zip_const_address_operand_p(addr, SImode)) { + //; fprintf(stderr, "Generating gen_reg_call_const()\n"); + emit_call_insn(gen_reg_call_const(addr, operands[2])); + } else if ((MEM_P(addr))&&(zip_address_operand(XEXP(addr,0)))) { + //; fprintf(stderr, "ZIP.MD::CALL-VALUE() INDIRECT\n"); + emit_call_insn(gen_reg_call_mem(XEXP(addr,0), operands[2])); + } else { + //; fprintf(stderr, "ZIP.MD::CALL-VALUE() INDIRECT\n"); + emit_call_insn(gen_reg_call_var(addr, operands[2])); + } + DONE; + } + }) +(define_insn "reg_call_const" + [(set (reg:SI RTNV_REG) + (call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" "")) + (match_operand 1 "const_int_operand" "n"))) + (clobber (reg:SI RTN_REG)) + (clobber (reg:CC CC_REG))] + "" + "MOV .Lcall%=(PC),R0\;BRA\t%0\n.Lcall%=:" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +(define_insn "reg_call_mem" + [(set (reg:SI RTNV_REG) + (call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q")) + (match_operand 1 "const_int_operand" "n"))) + (clobber (reg:SI RTN_REG)) + (clobber (reg:CC CC_REG))] + "" + "MOV .Lcall%=(PC),R0\n\tLOD\t%0,PC\n.Lcall%=:" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +; #warning "This predicate is appropriate for non-moves, but not for JMPs" +(define_insn "reg_call_var" + [(set (reg:SI RTNV_REG) + (call (mem:SI (match_operand:SI 0 "zip_address_operand_p" "")) + (match_operand 1 "const_int_operand" "n"))) + (clobber (reg:SI RTN_REG)) + (clobber (reg:CC CC_REG))] + "" + "MOV .Lcall%=(PC),R0\n\tJMP\t%0\n.Lcall%=:" + [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Frame manipulation RTX +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +(define_expand "prologue" + [(const_int 0)] + "" + "{ zip_expand_prologue(); DONE; }") +(define_expand "epilogue" + [(return)] + "" + "{ zip_expand_epilogue(); DONE; }") +(define_expand "return" ; In order to use the function predicate, this *must* + [(return)] ; be a define_expand + "zip_use_return_insn()") + ; "JMP R0" + ; [(set_attr "ccresult" "unchanged")]) +(define_insn "*return" ; A "*" -- means it cannot be called from C + [(return)] + "" + "JMP R0" + [(set_attr "ccresult" "unchanged")]) +(define_insn "simple_return" ; A "*" -- means it cannot be called from C + [(simple_return)] + "" + "JMP R0" + [(set_attr "ccresult" "unchanged")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Zip Builtin Functions +;; +;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +(define_insn "zip_rtu" + [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_RTU) + (clobber (reg:CC CC_REG))] + "(!ZIP_USER)" + "RTU" + [(set_attr "ccresult" "unknown")]) +(define_insn "zip_halt" ; Needs to be unspec_volatile, or optimizer will opt out + [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_HALT) + (clobber (reg:CC CC_REG))] + "(!ZIP_USER)" + "HALT" + [(set_attr "ccresult" "unknown")]) +(define_insn "zip_idle" + [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_IDLE) + (clobber (reg:CC CC_REG))] + "" + "WAIT" + [(set_attr "ccresult" "unknown")]) +(define_insn "zip_syscall" + [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_SYSCALL)] + "" + "CLR\tCC" + [(set_attr "ccresult" "unknown")]) +; +; +; Operator "save_context" +; +; Okay, so we're not really reading and writing operand 0, %0, however +; if we don't list it as a "+r" register, the compiler may allocate it +; among the other registers, thus we clobber it in the middle of the +; operation before the task is complete. +; +(define_insn "zip_save_context" + [(unspec_volatile + [ (match_operand:SI 0 "register_operand" "+r") ] + UNSPEC_SAVE_CONTEXT) + (clobber (match_scratch:SI 1 "=r")) + (clobber (match_scratch:SI 2 "=r")) + (clobber (match_scratch:SI 3 "=r")) + (clobber (match_scratch:SI 4 "=r"))] + "(!ZIP_USER)" + "MOV\tuR0,%1 + MOV\tuR1,%2 + MOV\tuR2,%3 + MOV\tuR3,%4 + STO\t%1,%0 + STO\t%2,1(%0) + STO\t%3,2(%0) + STO\t%4,3(%0) + MOV\tuR4,%1 + MOV\tuR5,%2 + MOV\tuR6,%3 + MOV\tuR7,%4 + STO\t%1,4(%0) + STO\t%2,5(%0) + STO\t%3,6(%0) + STO\t%4,7(%0) + MOV\tuR8,%1 + MOV\tuR9,%2 + MOV\tuR10,%3 + MOV\tuR11,%4 + STO\t%1,8(%0) + STO\t%2,9(%0) + STO\t%3,10(%0) + STO\t%4,11(%0) + MOV\tuR12,%1 + MOV\tuSP,%2 + MOV\tuCC,%3 + MOV\tuPC,%4 + STO\t%1,12(%0) + STO\t%2,13(%0) + STO\t%3,14(%0) + STO\t%4,15(%0)" + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +; +; See the comment above about why operand 0, %0, *must* be a "+r" operand, +; even though we don't really read (or change) its value throughout this +; operation. +; +(define_insn "zip_restore_context" + [(unspec_volatile [ + (match_operand:SI 0 "register_operand" "+r")] UNSPEC_RESTORE_CONTEXT) + (clobber (match_scratch:SI 1 "=r")) + (clobber (match_scratch:SI 2 "=r")) + (clobber (match_scratch:SI 3 "=r")) + (clobber (match_scratch:SI 4 "=r"))] + "(!ZIP_USER)" + "LOD\t0(%0),%1 + LOD\t1(%0),%2 + LOD\t2(%0),%3 + LOD\t3(%0),%4 + MOV\t%1,uR0 + MOV\t%2,uR1 + MOV\t%3,uR2 + MOV\t%4,uR3 + LOD\t4(%0),%1 + LOD\t5(%0),%2 + LOD\t6(%0),%3 + LOD\t7(%0),%4 + MOV\t%1,uR4 + MOV\t%2,uR5 + MOV\t%3,uR6 + MOV\t%4,uR7 + LOD\t8(%0),%1 + LOD\t9(%0),%2 + LOD\t10(%0),%3 + LOD\t11(%0),%4 + MOV\t%1,uR8 + MOV\t%2,uR9 + MOV\t%3,uR10 + MOV\t%4,uR11 + LOD\t12(%0),%1 + LOD\t13(%0),%2 + LOD\t14(%0),%3 + LOD\t15(%0),%4 + MOV\t%1,uR12 + MOV\t%2,uSP + MOV\t%3,uCC + MOV\t%4,uPC" + [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")]) +(define_insn "zip_bitrev" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_BITREV)) + ] + "" + "BREV\t%1,%0" + [(set_attr "ccresult" "unchanged")]) +(define_insn "zip_cc" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(reg:SI CC_REG)] UNSPEC_GETCC))] + "" + "MOV\tCC,%0" + [(set_attr "ccresult" "unchanged")]) +(define_insn "zip_ucc" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETUCC))] + "" + "MOV\tuCC,%0" + [(set_attr "ccresult" "unchanged")]) +(define_insn "zip_cc_sto" + [(set (mem:SI (match_operand:SI 0 "register_operand" "r")) + (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))] + "" + "STO\tCC,(%0)" + [(set_attr "ccresult" "unchanged")]) +(define_insn "zip_cc_sto_off" + [(set (mem:SI (plus:SI + (match_operand:SI 0 "register_operand" "r") + (match_operand:SI 1 "const_int_operand" "N"))) + (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))] + "" + "STO\tCC,%1(%0)" + [(set_attr "ccresult" "unchanged")]) +(define_insn "ldilo" + [(set (match_operand:SI 0 "register_operand" "=r") + (unspec:SI [(match_operand:SI 1 "immediate_operand" "")] UNSPEC_LDILO))] + "" + "LDILO %1,%0" + [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")]) + +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Floating point Op-codes +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +(define_insn "addsf3" + [(set (match_operand:SF 0 "register_operand" "=r") + (plus:SF (match_operand:SF 1 "register_operand" "0") + (match_operand:SF 2 "register_operand" "r"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_FPU)" + "FPADD %2,%0" + [(set_attr "ccresult" "unknown")]) +(define_insn "subsf3" + [(set (match_operand:SF 0 "register_operand" "=r") + (minus:SF (match_operand:SF 1 "register_operand" "0") + (match_operand:SF 2 "register_operand" "r"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_FPU)" + "FPSUB %2,%0" + [(set_attr "ccresult" "unknown")]) +(define_insn "mulsf3" + [(set (match_operand:SF 0 "register_operand" "=r") + (mult:SF (match_operand:SF 1 "register_operand" "0") + (match_operand:SF 2 "register_operand" "r"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_FPU)" + "FPMUL %2,%0" + [(set_attr "ccresult" "unknown")]) +(define_insn "divsf3" + [(set (match_operand:SF 0 "register_operand" "=r") + (div:SF (match_operand:SF 1 "register_operand" "0") + (match_operand:SF 2 "register_operand" "r"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_FPU)" + "FPDIV %2,%0" + [(set_attr "ccresult" "unknown")]) +(define_expand "negsf2" + [(set (match_operand:SF 0 "register_operand" "=r") + (neg:SF (match_operand:SF 1 "register_operand" "0"))) + ] + "" + { + operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0); + if (can_create_pseudo_p()) { + rtx tmp = gen_reg_rtx(SImode); + emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x80000000,SImode))); + emit_insn(gen_xorsi3(operands[0], operands[0], tmp)); + DONE; + } else { + emit_insn(gen_zip_bitrev(operands[0],operands[0])); + emit_insn(gen_iorsi3(operands[0], operands[0], + gen_int_mode(1,SImode))); + emit_insn(gen_zip_bitrev(operands[0],operands[0])); + DONE; + } + }) +(define_expand "abssf2" + [(set (match_operand:SF 0 "register_operand" "=r") + (abs:SF (match_operand:SF 1 "register_operand" "0"))) + ] + "" + { + operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0); + if (can_create_pseudo_p()) { + rtx tmp = gen_reg_rtx(SImode); + emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x7fffffff,SImode))); + emit_insn(gen_andsi3(operands[0], operands[0], tmp)); + DONE; + } else { + emit_insn(gen_zip_bitrev(operands[0],operands[0])); + emit_insn(gen_andsi3(operands[0], operands[0], + gen_int_mode(-2,SImode))); + emit_insn(gen_zip_bitrev(operands[0],operands[0])); + DONE; + } + }) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Trap Instruction +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; The ZipCPU doesn't really have a "trap" instruction per se. The goal is that +; *nothing* should ever trap, and so we should never get here. However, the +; compiler seems to want a trap instruction for some reason. (It keeps us +; from calling the abort() function, if we don't define these ...) So let's +; just grab onto the break instruction and declare it to be a trap instruction +; for our purposes. Alternatively, we might've used a syscall, but ... this +; will work for both user and system instructions. +; +(define_insn "trap" + [(trap_if (const_int 1) (const_int 0))] + "" + "BREAK" + [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")]) +; +(define_expand "ctrap<mode>4" + [(set (reg:CC CC_REG) (compare:CC + (match_operand:ZI 1 "register_operand" "r") + (match_operand:ZI 2 "zip_opb_single_operand_p" "rO"))) + (trap_if (match_operator 0 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (match_operand 3 "const_int_operand" "O"))] + "" + ) +(define_insn "trapif" + [(trap_if (match_operator 0 "ordered_comparison_operator" + [(reg:CC CC_REG) (const_int 0)]) + (match_operand 1 "const_int_operand" "O"))] + "" + "BREAK\t%1" + [(set_attr "predicable" "no")]) +; +; +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; +;; Unimplemented (or not yet implemented) RTL Codes +;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; +; +;(define_insn "sync_compare_and_swapsi" +; [(set ... +; )] +; "(ZIP_ATMOC)" +; LOCK (alu) // Hmmm ... need to modify if I will +; LOD %1,%0 OP-VALID // operate on the value before the store +; CMP %0,%2 DCD-valid +; STO.Z %2,%1 PF-valid +; +;(define_insn "sync_lock_test_and_setsi" +; LOCK +; LOD %1,%0 +; STO %0,%1 +; +;(define_insn "sync_lock_releasesi" +; STO %1,%0 +; +; +;(define_insn "negvsi3" +; "MOV %1,%0 +; XOR -1,%0 +; ADD 1,%0 +; BV %2" +; "") +; +; Match: +; CMP R1,R0 +; BGTU lbl +; Transform to: +; CMP R0,R1 +; BC lbl +; +(define_peephole2 + [(set (reg:CC CC_REG) (compare:CC + (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "register_operand"))) + (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 2 "")) + (pc)))] + "(ZIP_PEEPHOLE)" + [(set (reg:CC CC_REG) (compare:CC (match_dup 1) (match_dup 0))) + (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_dup 2)) + (pc)))] + "") +(define_peephole2 + [(match_scratch:SI 3 "=r") + (set (reg:CC CC_REG) (compare:CC + (match_operand:SI 0 "register_operand") + (match_operand 1 "const_int_operand"))) + (match_dup 3) + (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 2 "")) + (pc)))] + "(ZIP_PEEPHOLE)" + [(set (match_dup 3) (match_dup 1)) + (set (reg:CC CC_REG) (compare:CC (match_dup 3) (match_dup 0))) + (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_dup 2)) + (pc)))] + "") +;(define_peephole2 +; [(set (reg:CC CC_REG) (compare:CC +; (match_operand:SI 0 "register_operand") +; (match_operand 1 "const_int_operand"))) +; (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0)) +; (label_ref (match_operand 2 "")) +; (pc)))] +; "" +; [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1))) +; (set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0)) +; (label_ref (match_dup 2)) +; (pc)))] +; "operands[1] = GEN_INT(INTVAL(operands[1])-1);") +; +; +; Match: +; CMP R1,R0 +; BGEU lbl +; Transform to: +; CMP 1(R0),R1 +; BC lbl +; +(define_peephole2 + [(set (reg:CC CC_REG) (compare:CC + (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "register_operand"))) + (set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 2 "")) + (pc)))] + "(ZIP_PEEPHOLE)" + [(set (reg:CC CC_REG) (compare:CC + (match_dup 1) (plus:SI (match_dup 0) (const_int 1)))) + (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_dup 2)) + (pc)))] + "") +; +; +; Match: +; CMP R1,R0 +; BGE lbl +; Transform to: +; CMP 1(R0),R1 +; BLT lbl +; ... why? when we support a BGE instruction? +;(define_peephole2 + ;[(set (reg:CC CC_REG) (compare:CC + ;(match_operand:SI 0 "register_operand") + ;(match_operand:SI 1 "register_operand"))) + ;(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0)) + ;(label_ref (match_operand 2 "")) + ;(pc)))] + ;"(ZIP_PEEPHOLE)" + ;[(set (reg:CC CC_REG) (compare:CC (match_dup 1) + ;(plus:SI (match_dup 0) (const_int 1)))) + ;(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0)) + ;(label_ref (match_dup 2)) + ;(pc)))] + ;"") +; +; +; Match: +; CMP R1,R0 +; BLEU lbl +; Transform to: +; CMP 1(R1),R0 +; BC lbl +; +(define_peephole2 + [(set (reg:CC CC_REG) (compare:CC + (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "register_operand" ""))) + (set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 2 "" "")) + (pc)))] + "(ZIP_PEEPHOLE)" + [(set (reg:CC CC_REG) (compare:CC (match_dup 0) + (plus:SI (match_dup 1) (const_int 1)))) + (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_dup 2)) + (pc)))] + "") +; +; +; +; Match: +; CMP R1,R0 +; BLE lbl +; Transform to: +; CMP 1(R1),R0 +; BLT lbl +; +(define_peephole2 + [(set (reg:CC CC_REG) + (compare:CC (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "const_int_operand" ""))) + (set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 2 "" "")) + (pc)))] + "(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))" + [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1))) + (set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0)) + (label_ref (match_dup 2)) + (pc)))] + "operands[1] = GEN_INT(INTVAL(operands[1])+1);") +; +; Match: +; CMP R1,R0 +; BLEU lbl +; Transform to: +; CMP 1(R1),R0 +; BC(LTU) lbl +; +(define_peephole2 + [(set (reg:CC CC_REG) + (compare:CC (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "const_int_operand" ""))) + (set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_operand 2 "" "")) + (pc)))] + "(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))" + [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1))) + (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0)) + (label_ref (match_dup 2)) + (pc)))] + "operands[1] = GEN_INT(INTVAL(operands[1])+1);") +; +; +; +; +; Match: +; (parallel [(set () ()) (clobber (CC))]) +; (compare () ()) +; Transform to: +; (parallel [(set () ()) (set (CC) (0))] +; (compare () ()) +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 "")) + (clobber (reg:CC CC_REG))]) + (set (reg:CC CC_REG) (compare:CC (match_operand:SI 2 "") + (match_operand:SI 3 "")))] + "(ZIP_PEEPHOLE)&&zip_insn_sets_cc(insn)" + [(parallel [(set (match_dup 0) (match_dup 1)) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (reg:CC CC_REG) (compare:CC (match_dup 2) (match_dup 3)))] + "") +; +; +; +; Match: +; (parallel [(set () ()) (clobber (CC))]) +; (set () ()) +; (compare () ()) +; Transform to: +; (parallel [(set () ()) (set (CC) (0))] +; (set () ()) +; (compare () ()) +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 "")) + (clobber (reg:CC CC_REG))]) + (set (match_operand 2 "") (match_operand 3 "")) + (set (reg:CC CC_REG) (compare:CC (match_operand:SI 4 "") + (match_operand:SI 5 "")))] + "(ZIP_PEEPHOLE)&&(zip_insn_sets_cc(insn))&&((!REG_P(operands[2]))||(REGNO(operands[2])!=CC_REG))" + [(parallel [(set (match_dup 0) (match_dup 1)) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_dup 2) (match_dup 3)) + (set (reg:CC CC_REG) (compare:CC (match_dup 4) (match_dup 5)))] + "") +; +; +; +; Match: +; MOV A(R1),R3 +; CMP R3,R0 +; (R3 is dead) +; Transform to: +; CMP A(R1),R0 +; +(define_peephole2 + [(set (match_operand:SI 3 "register_operand") + (plus:SI (match_operand:SI 1 "register_operand") + (match_operand:SI 2 "zip_mvimm_operand_p"))) + (set (reg:CC CC_REG) + (compare:CC (match_operand:SI 0 "register_operand") + (match_dup 3)))] + "(ZIP_PEEPHOLE)&&peep2_regno_dead_p(2, REGNO(operands[3]))" + [(set (reg:CC CC_REG) (compare:CC (match_dup 0) + (plus:SI (match_dup 1) (match_dup 2))))] + "") +; +; +; Match: +; ALU OpB,R0 +; CMP 0,R0 +; Transform to: +; ALU OpB,R0 +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "")) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_PEEPHOLE)" + [(parallel [(set (match_dup 0) (match_dup 1)) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ]) +; +; +; Match: +; ALU OpB,R0 +; MOV R1,R2 // Can be LDI, LOD, STO, etc. +; CMP 0,R0 +; Transform to: +; ALU OpB,R0 +; MOV R0,R1 +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "")) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_operand:SI 2 "nonimmediate_operand") (match_operand:SI 3 "")) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))] + "(ZIP_PEEPHOLE)&&((!REG_P(operands[2]))||((REGNO(operands[2])!=REGNO(operands[0]))&&((REGNO(operands[2])>=FIRST_PSEUDO_REGISTER)||(REGNO(operands[2])<CC_REG))))" + [(parallel [(set (match_dup 0) (match_dup 1)) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_dup 2) (match_dup 3)) + ]) +; +; +; Match: +; ALU OpB,R0 +; MOV R0,R1 +; CMP 0,R1 +; Transform to: +; ALU OpB,R0 +; MOV R0,R1 +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "")) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_operand:SI 2 "register_operand") (match_dup 0)) + (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))] + "(ZIP_PEEPHOLE)" + [(parallel [(set (match_dup 0) (match_dup 1)) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_dup 2) (match_dup 3)) + ]) +; +; +; Match: +; MOV R1,R0 +; ADD $x,R0 +; (CCREG is dead, and x is within range ...) +; Transform to: +; MOV $x(R1),R0 +(define_peephole2 + [(set (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "register_operand")) + (parallel [(set (match_dup 0) (plus:SI (match_dup 0) + (match_operand 2 "zip_mvimm_operand_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ] + "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))" + [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))]) +; +; Match: +; MOV A(R0),R0 +; ADD $x,R1 +; (CCREG is dead, and (A+x) is within range ...) +; Transform to: +; MOV $x(R1),R0 +; +(define_peephole2 + [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_operand:SI 1 "register_operand") + (match_operand 2 "zip_mvimm_operand_p"))) + (parallel [(set (match_dup 0) (plus:SI (match_dup 0) + (match_operand 3 "zip_mvimm_operand_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ] + "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG)) + &&(INTVAL(operands[2])+INTVAL(operands[3])<((1<<17))) + &&(INTVAL(operands[2])+INTVAL(operands[3])>=-(1<<17))" + [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))] + "operands[2]=GEN_INT(INTVAL(operands[2])+INTVAL(operands[3]));") +; +; +; +; Match: +; ADD $x,R0 +; MOV R0,R1 +; (CCREG is dead, and R0 is dead) +; Transform to: +; MOV (A+$x)(R0),R1 +; ... again, how do I build this plus? +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_dup 0) + (match_operand 1 "zip_mvimm_operand_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_operand:SI 2 "register_operand") (match_dup 0))] + "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2, REGNO(operands[0])))&&(peep2_regno_dead_p(2,CC_REG))" + [(set (match_dup 2) (plus:SI (match_dup 0) (match_dup 1)))]) +; +; +; +; Match: +; ADD $x,R0 +; MOV A(R0),R1 +; (CCREG is dead, and R0 is dead) +; Transform to: +; MOV (A+$x)(R0),R1 +; +(define_peephole2 + [(parallel [ + (set (match_operand:SI 0 "register_operand") + (plus:SI (match_dup 0) + (match_operand 1 "zip_mvimm_operand_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_operand:SI 2 "register_operand") + (plus:SI (match_dup 0) + (match_operand 3 "zip_mvimm_operand_p"))) + ] + "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG)) + &&(peep2_regno_dead_p(1,REGNO(operands[0]))) + &&(INTVAL(operands[1])+INTVAL(operands[3])<((1<<17))) + &&(INTVAL(operands[1])+INTVAL(operands[3])>=-(1<<17))" + [(set (match_dup 0) (plus:SI (match_dup 2) (match_dup 3)))] + "operands[3]=GEN_INT(INTVAL(operands[1])+INTVAL(operands[3]));") +; +; +; +; Match: +; ADD $x,R0 +; ADD R0,Rn +; (R0 is dead, if R0 is not Rn) +; Transform to: +; ADD $x(R0),Rn +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_dup 0) + (match_operand 1 "zip_opb_immv_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (parallel [(set (match_operand:SI 2 "register_operand") + (plus:SI (match_dup 2) (match_dup 0))) + (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))]) + ] + "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[2]))&&(peep2_regno_dead_p(2, REGNO(operands[0])))" + [(parallel [(set (match_dup 2) + (plus:SI (match_dup 2) + (plus:SI (match_dup 0) + (match_dup 1)))) + (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))]) + ]) +; +; Match: +; ADD $x,R0 +; LOD -x(R0),R1 +; Transform to: +; LOD (R0),R1 +; ADD $x,R0 +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_dup 0) + (match_operand 1 "zip_opb_immv_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (match_operand:SI 3 "register_operand") + (mem:SI (plus:SI (match_dup 0) + (match_operand 2 "zip_opb_immv_p")))) + ] + "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))" + [(set (match_dup 3) (mem:SI (match_dup 0))) + (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ]) +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_dup 0) + (match_operand 1 "zip_opb_immv_p"))) + (clobber (reg:CC CC_REG))]) + (set (match_operand:SI 3 "register_operand") + (mem:SI (plus:SI (match_dup 0) + (match_operand 2 "zip_opb_immv_p")))) + ] + "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))" + [(set (match_dup 3) (mem:SI (match_dup 0))) + (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ]) +; +; +; +; Match: +; ADD $x,R0 +; STO R1,-x(R0) +; Transform to: +; STO R1,(R0) +; ADD $x,R0 +; +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_dup 0) + (match_operand 1 "zip_opb_immv_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + (set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p"))) + (match_operand:SI 3 "register_operand")) + ] + "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))" + [(set (mem:SI (match_dup 0)) (match_dup 3)) + (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ]) +(define_peephole2 + [(parallel [(set (match_operand:SI 0 "register_operand") + (plus:SI (match_dup 0) + (match_operand 1 "zip_opb_immv_p"))) + (clobber (reg:CC CC_REG))]) + (set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p"))) + (match_operand:SI 3 "register_operand")) + ] + "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))" + [(set (mem:SI (match_dup 0)) (match_dup 3)) + (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ]) +; +; +; Match: +; ADD $x,R0 +; ANY R1,R2 (destination is not R0, source does not reference R0) +; ADD R0,Rn (could be 1 or 2, not 0) +; (R0 is dead) +; Transform to: +; ANY R1,R2 +; ADD $x(R0),Rn +; +; +; +; Match: +; MOV R1,R0 +; AND #/R2,R0 +; (Ry dead ...) +; Transform to: +; TEST #/Rz,Rx +; +(define_peephole2 + [(set (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "register_operand")) + (parallel [(set (match_dup 0) + (and:SI (match_dup 0) + (match_operand:SI 2 "zip_opb_single_operand_p"))) + (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]) + ] + "((1)||(ZIP_PEEPHOLE))&&(peep2_regno_dead_p(2, REGNO(operands[0])))" + [(set (reg:CC CC_REG) (compare:CC (and:ZI (match_dup 1) (match_dup 2)) + (const_int 0)))]) +; +; Match: +; (call ... +; (set (pc) (label)) +; or (in asm) +; MOV .Lcallx(PC),R0 +; BRA (somewhere) +; .Lcallx +; BRA (somewhere-else) +; Transform to: +; +; (sequence [(call ... +; (set (pc) (label))]) +; or (in asm) +; "LDI (somewhere-else),R0 +; BRA subroutine" +; +; While the following looks good, it doesnt work. My guess is that the reason +; why it doesnt work is that the jump at the end crosses basic block boundaries. +; +;(define_insn "void_call_mem_unspec" +; [(call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p" ""))] UNSPEC_RAW_CALL) +; (match_operand 1 "const_int_operand" "n")) +; (clobber (reg:SI RTN_REG)) +; (clobber (reg:CC CC_REG))] +; "" +; "BRA\t%0,PC" +; [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +;(define_peephole2 +; [(parallel [(call (mem:SI (match_operand:VOID 0 "zip_const_address_operand_p")) +; (match_operand 1 "const_int_operand")) +; (clobber (reg:SI RTN_REG)) +; (clobber (reg:CC CC_REG))]) +; ; The match operand for the (set (pc) ...) cannot have anything but +; ; VOIDmode, or it wont match. +; (set (pc) (match_operand:VOID 2 "zip_const_address_operand_p"))] +; "" +; [(set (reg:SI RTN_REG) (match_dup 2)) +; (call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p"))] UNSPEC_RAW_CALL) +; (match_operand 1 "const_int_operand")) +; (use (reg:SI RTN_REG)) +; (clobber (reg:SI RTN_REG)) +; (clobber (reg:CC CC_REG))] +; "fprintf(stderr, \"CALL-JUMP Matched\");") +; +; +; +; So, the following *should* have worked as well. However, this falls apart +; because the 'final' routine can't tell if we are calling a subroutine in this +; function or not. +; +;(define_peephole + ;[(parallel [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p")) + ;(match_operand 1 "const_int_operand")) + ;(clobber (reg:SI RTN_REG)) + ;(clobber (reg:CC CC_REG))]) + ;(set (pc) (label_ref (match_operand 2 "")))] + ;"" + ;"LDI\t%2,R0\;BRA\t%0" + ;[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")]) +; +; and for +; BRA target +; BRA target ; two branches to the same identical target in a row ... +; +; +; +; STILL MISSING: +; SYSCALL(ID) +; MOV %ID,R0 +; CLR CC +; cmove ... the conditional move, created from a +; (set (match_op 0 "" "r") (if_then_else (condition) (a) (reg X)))) +; pattern diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/zip/zip-modes.def gcc-5.3.0-zip/gcc/config/zip/zip-modes.def --- gcc-5.3.0-original/gcc/config/zip/zip-modes.def 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/config/zip/zip-modes.def 2016-03-08 12:10:21.982586940 -0500 @@ -0,0 +1,23 @@ +/* + * Commends in C-long comment form + * class + * Mode = "SI" + * PRECISION, BYTESIZE, COUNT ?? + * FORMAT + * EXPR + * + * The manual says I need to define BITS_PER_UNIT here. + */ +// INT_MODE(QI, 1); +// INT_MODE(HI, 1); +// INT_MODE(SI, 1); +// INT_MODE(DI, 2); + +// FLOAT_MODE(SF, 1, ieee_single_format); +// FLOAT_MODE(DF, 2, ieee_single_format); + +// We cannot override machmodes.def from here. Thus, even though our QI, +// HI, and SI modes are all 1-byte, we cant set them that way here. The +// change needed to be made in machmodes.def. Hence, here is a target +// configuration change--in machmodes.def--that properly belonged in the +// config directory. diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config/zip/zip-protos.h gcc-5.3.0-zip/gcc/config/zip/zip-protos.h --- gcc-5.3.0-original/gcc/config/zip/zip-protos.h 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/config/zip/zip-protos.h 2016-05-09 11:16:54.734258166 -0400 @@ -0,0 +1,83 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// Filename: zip-protos.h +// +// Project: Zip CPU backend for the GNU Compiler Collection +// +// Purpose: +// +// Creator: Dan Gisselquist, Ph.D. +// Gisselquist Technology, LLC +// +//////////////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2016, Gisselquist Technology, LLC +// +// This program is free software (firmware): you can redistribute it and/or +// modify it under the terms of the GNU General Public License as published +// by the Free Software Foundation, either version 3 of the License, or (at +// your option) any later version. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// You should have received a copy of the GNU General Public License along +// with this program. (It's in the $(ROOT)/doc directory, run make with no +// target there if the PDF file isn't present.) If not, see +// <http://www.gnu.org/licenses/> for a copy. +// +// License: GPL, v3, as defined and found on www.gnu.org, +// http://www.gnu.org/licenses/gpl.html +// +// +//////////////////////////////////////////////////////////////////////////////// +#ifndef ZIP_PROTOS_H +#define ZIP_PROTOS_H + +extern bool zip_supported_condition(int c); +extern void zip_expand_prologue(void); +extern void zip_expand_epilogue(void); +extern bool zip_expand_movsicc(rtx,rtx,rtx,rtx); +extern int zip_initial_elimination_offset(int, int); +extern void zip_print_operand(FILE *, rtx, int); +extern void zip_print_operand_address(FILE *, rtx); +extern enum reg_class zip_reg_class(int); +extern rtx zip_return_addr_rtx(int, rtx); +extern int zip_num_arg_regs(enum machine_mode, tree); + +extern void zip_asm_output_def(FILE *s, const char *n, const char *v); + +#ifdef HAVE_cc0 +extern void zip_update_cc_notice(rtx exp, rtx_insn *insn); +#else +extern void zip_canonicalize_comparison(int *, rtx *, rtx *, bool); +#endif +extern int zip_address_operand(rtx op); +extern int zip_const_address_operand(rtx op); +extern bool zip_gen_move_rtl(rtx, rtx); +extern bool zip_use_return_insn(void); +extern const char *zip_set_zero_or_one(rtx, rtx); +extern const char *zip_movsicc(rtx, rtx, rtx, rtx); + +extern int zip_insn_sets_cc(rtx_insn *insn); +extern int zip_is_conditional(rtx_insn *insn); +extern int zip_ct_address_operand(rtx op); +extern int zip_pd_opb_operand(rtx op); +extern int zip_pd_mov_operand(rtx op); +extern int zip_pd_imm_operand(rtx op); +extern int zip_pd_mvimm_operand(rtx op); +extern int zip_ct_const_address_operand(rtx op); +extern int zip_pd_const_address_operand(rtx op); +extern const char *zip_movsicc(rtx, rtx, rtx, rtx); +extern const char *zip_addsicc(rtx, rtx, rtx, rtx); + +extern void zip_ifcvt_machdep_init(struct ce_if_block *ceinfo); +extern void zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo); +extern void zip_ifcvt_modify_final(struct ce_if_block *ceinfo); +extern void zip_ifcvt_modify_tests(struct ce_if_block *ceinfo, rtx *true_expr, rtx *false_expr); +extern void zip_ifcvt_modify_insn(struct ce_if_block *ceinfo, rtx pattern, rtx_insn *insn); + +#endif + diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/config.gcc gcc-5.3.0-zip/gcc/config.gcc --- gcc-5.3.0-original/gcc/config.gcc 2015-09-10 10:17:53.000000000 -0400 +++ gcc-5.3.0-zip/gcc/config.gcc 2016-02-14 00:53:37.389411987 -0500 @@ -479,6 +479,10 @@ tilepro*-*-*) cpu_type=tilepro ;; +zip*) + cpu_type=zip + tmake_file=zip/t-zip + ;; esac tm_file=${cpu_type}/${cpu_type}.h @@ -2972,6 +2976,15 @@ c_target_objs="m32c-pragma.o" cxx_target_objs="m32c-pragma.o" ;; +zip-*-netbsd*) + tm_file="${tm_file} elfos.h netbsd.h netbsd-elf.h zip/netbsd.h" + tmake_file="${tmake_file} zip/t-zip" + ;; +zip*) + target_has_targetm_common=yes + tm_file="elfos.h newlib-stdint.h ${tm_file}" + tmake_file="${tmake_file} zip/t-zip" + ;; *) echo "*** Configuration ${target} not supported" 1>&2 exit 1 diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/cse.c gcc-5.3.0-zip/gcc/cse.c --- gcc-5.3.0-original/gcc/cse.c 2015-02-03 15:41:38.000000000 -0500 +++ gcc-5.3.0-zip/gcc/cse.c 2016-05-04 11:07:38.874831028 -0400 @@ -70,6 +70,7 @@ #include "dbgcnt.h" #include "rtl-iter.h" + /* The basic idea of common subexpression elimination is to go through the code, keeping a record of expressions that would have the same value at the current scan point, and replacing @@ -634,6 +635,16 @@ /* Nonzero if X has the form (PLUS frame-pointer integer). */ +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +#include <stdio.h> +extern void zip_debug_rtx(const_rtx); +extern void zip_debug_rtx_pfx(char *, const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s\n", STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + static bool fixed_base_plus_p (rtx x) { @@ -2898,6 +2909,7 @@ validate_canon_reg (&XVECEXP (x, i, j), insn); } + ZIP_DEBUG_LINE("CANON-REG-RTN", x); return x; } @@ -3125,6 +3137,8 @@ rtx new_rtx = 0; int changed = 0; +ZIP_DEBUG_LINE("CSE:FOLD-RTX", insn); + /* Operands of X. */ /* Workaround -Wmaybe-uninitialized false positive during profiledbootstrap by initializing them. */ @@ -3144,14 +3158,16 @@ if (x == 0) return x; + ZIP_DEBUG_LINE("FOLD-RTX", x); /* Try to perform some initial simplifications on X. */ code = GET_CODE (x); switch (code) { case MEM: case SUBREG: - if ((new_rtx = equiv_constant (x)) != NULL_RTX) - return new_rtx; + if ((new_rtx = equiv_constant (x)) != NULL_RTX) { + ZIP_DEBUG_LINE("FOLD-RTX-NEW", new_rtx); + return new_rtx; } return x; case CONST: @@ -3208,6 +3224,8 @@ rtx folded_arg = XEXP (x, i), const_arg; machine_mode mode_arg = GET_MODE (folded_arg); + ZIP_DEBUG_LINE("FOLD-RTX-ARG = ", folded_arg); + switch (GET_CODE (folded_arg)) { case MEM: @@ -3317,6 +3335,7 @@ } apply_change_group (); + ZIP_DEBUG_LINE("FOLD-RTX-CANONICALIZED = ", insn); } /* If X is an arithmetic operation, see if we can simplify it. */ @@ -4203,6 +4222,7 @@ { rtx dest = SET_DEST (set); rtx src = SET_SRC (set); + ZIP_DEBUG_LINE("TRY-BACK-SUBSTITUTE-REG", insn); if (REG_P (dest) && REG_P (src) && ! HARD_REGISTER_P (src) @@ -4258,6 +4278,7 @@ } } } + ZIP_DEBUG_LINE("TRY-BACK-SUBSTITUTE-REG, done", insn); } /* Record all the SETs in this instruction into SETS_PTR, @@ -4351,6 +4372,7 @@ rtx tem; rtx x = PATTERN (insn); int i; + ZIP_DEBUG_LINE("CANONICALIZE-INSN", insn); if (CALL_P (insn)) { @@ -4364,6 +4386,7 @@ canon_reg (SET_SRC (x), insn); apply_change_group (); fold_rtx (SET_SRC (x), insn); + ZIP_DEBUG_LINE("CANONICALIZE-INSN, was set:", insn); } else if (GET_CODE (x) == CLOBBER) { @@ -4400,6 +4423,7 @@ canon_reg (PATTERN (insn), insn); else if (GET_CODE (x) == PARALLEL) { + ZIP_DEBUG_LINE("CANONICALIZE-INSN/parallel", insn); for (i = XVECLEN (x, 0) - 1; i >= 0; i--) { rtx y = XVECEXP (x, 0, i); @@ -4491,6 +4515,7 @@ The result of apply_change_group can be ignored; see canon_reg. */ + ZIP_DEBUG_LINE("CANONICALIZE-INSN/done", insn); apply_change_group (); } diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/defaults.h gcc-5.3.0-zip/gcc/defaults.h --- gcc-5.3.0-original/gcc/defaults.h 2015-03-03 10:04:02.000000000 -0500 +++ gcc-5.3.0-zip/gcc/defaults.h 2016-02-06 16:57:53.939410173 -0500 @@ -480,6 +480,8 @@ #define LOG2_BITS_PER_UNIT 3 #elif BITS_PER_UNIT == 16 #define LOG2_BITS_PER_UNIT 4 +#elif BITS_PER_UNIT == 32 +#define LOG2_BITS_PER_UNIT 5 #else #error Unknown BITS_PER_UNIT #endif diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/df-scan.c gcc-5.3.0-zip/gcc/df-scan.c --- gcc-5.3.0-original/gcc/df-scan.c 2015-01-09 15:18:42.000000000 -0500 +++ gcc-5.3.0-zip/gcc/df-scan.c 2016-04-14 16:12:03.614777002 -0400 @@ -61,6 +61,14 @@ typedef struct df_mw_hardreg *df_mw_hardreg_ptr; +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s:%d/%s\n", __FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + #ifndef HAVE_epilogue #define HAVE_epilogue 0 diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/doc/gcc.log gcc-5.3.0-zip/gcc/doc/gcc.log --- gcc-5.3.0-original/gcc/doc/gcc.log 1969-12-31 19:00:00.000000000 -0500 +++ gcc-5.3.0-zip/gcc/doc/gcc.log 2016-01-30 15:18:43.262724969 -0500 @@ -0,0 +1,214 @@ +This is pdfTeX, Version 3.1415926-2.5-1.40.14 (TeX Live 2013/Debian) (format=pdfetex 2014.5.7) 30 JAN 2016 15:17 +entering extended mode + restricted \write18 enabled. + file:line:error style messages enabled. + %&-line parsing enabled. +**\catcode126=12 \def\normaltilde{~}\catcode126=13 \let~\normaltilde \input ./ +gcc.texi +(./gcc.texi (/usr/share/texmf/tex/texinfo/texinfo.tex +Loading texinfo [version 2013-09-11.11]: +\bindingoffset=\dimen16 +\normaloffset=\dimen17 +\pagewidth=\dimen18 +\pageheight=\dimen19 +\outerhsize=\dimen20 +\outervsize=\dimen21 +\cornerlong=\dimen22 +\cornerthick=\dimen23 +\topandbottommargin=\dimen24 +\headlinebox=\box16 +\footlinebox=\box17 +\margin=\insert252 +\EMsimple=\toks13 +\groupbox=\box18 +\groupinvalidhelp=\toks14 +\mil=\dimen25 +\exdentamount=\skip18 +\inmarginspacing=\skip19 +\centerpenalty=\count27 + pdf, +\tempnum=\count28 +\lnkcount=\count29 +\filename=\toks15 +\filenamelength=\count30 +\pgn=\count31 +\toksA=\toks16 +\toksB=\toks17 +\toksC=\toks18 +\toksD=\toks19 +\boxA=\box19 +\countA=\count32 +\nopdfimagehelp=\toks20 + fonts, +\sffam=\fam8 +\textleading=\dimen26 + markup, +\fontdepth=\count33 + glyphs, +\errorbox=\box20 + +page headings, +\titlepagetopglue=\skip20 +\titlepagebottomglue=\skip21 +\evenheadline=\toks21 +\oddheadline=\toks22 +\evenfootline=\toks23 +\oddfootline=\toks24 + tables, +\tableindent=\dimen27 +\itemindent=\dimen28 +\itemmargin=\dimen29 +\itemmax=\dimen30 +\itemno=\count34 +\multitableparskip=\skip22 +\multitableparindent=\skip23 +\multitablecolspace=\dimen31 +\multitablelinespace=\skip24 +\colcount=\count35 +\everytab=\toks25 + conditionals, +\doignorecount=\count36 + indexing, +\whatsitskip=\skip25 +\whatsitpenalty=\count37 +\secondaryindent=\skip26 +\partialpage=\box21 +\doublecolumnhsize=\dimen32 + sectioning, +\unnumberedno=\count38 +\chapno=\count39 +\secno=\count40 +\subsecno=\count41 +\subsubsecno=\count42 +\appendixno=\count43 +\absseclevel=\count44 +\secbase=\count45 +\chapheadingskip=\skip27 +\secheadingskip=\skip28 +\subsecheadingskip=\skip29 + toc, +\tocfile=\write0 +\contentsrightmargin=\skip30 +\savepageno=\count46 +\lastnegativepageno=\count47 +\tocindent=\dimen33 + environments, +\lispnarrowing=\skip31 +\envskipamount=\skip32 +\circthick=\dimen34 +\cartouter=\dimen35 +\cartinner=\dimen36 +\normbskip=\skip33 +\normpskip=\skip34 +\normlskip=\skip35 +\lskip=\skip36 +\rskip=\skip37 +\nonfillparindent=\dimen37 +\tabw=\dimen38 +\verbbox=\box22 + +defuns, +\defbodyindent=\skip38 +\defargsindent=\skip39 +\deflastargmargin=\skip40 +\defunpenalty=\count48 +\parencount=\count49 +\brackcount=\count50 + macros, +\paramno=\count51 +\macname=\toks26 + cross references, +\auxfile=\write1 +\savesfregister=\count52 +\toprefbox=\box23 +\printedrefnamebox=\box24 +\infofilenamebox=\box25 +\printedmanualbox=\box26 + insertions, +\footnoteno=\count53 +\SAVEfootins=\box27 +\SAVEmargin=\box28 + +(/usr/share/texlive/texmf-dist/tex/generic/epsf/epsf.tex +This is `epsf.tex' v2.7.4 <14 February 2011> +\epsffilein=\read1 +\epsfframemargin=\dimen39 +\epsfframethickness=\dimen40 +\epsfrsize=\dimen41 +\epsftmp=\dimen42 +\epsftsize=\dimen43 +\epsfxsize=\dimen44 +\epsfysize=\dimen45 +\pspoints=\dimen46 +) +\noepsfhelp=\toks27 + localization, +\nolanghelp=\toks28 +\countUTFx=\count54 +\countUTFy=\count55 +\countUTFz=\count56 + formatting, +\defaultparindent=\dimen47 + and turning on texinfo input format.) +\openout1 = `gcc.aux'. + +@cpindfile=@write2 +@fnindfile=@write3 +@vrindfile=@write4 +@tpindfile=@write5 +@kyindfile=@write6 +@pgindfile=@write7 +texinfo.tex: doing @include of gcc-common.texi + + +./gcc.texi:25: I can't find file `gcc-common.texi'. +@temp ->@input gcc-common.texi + +@includezzz ...and @input #1 }@expandafter }@temp + @popthisfilestack +l.25 @include gcc-common.texi + +(Press Enter to retry, or Control-D to exit) +Please type another input file name: include/gcc-common.texi +(./include/gcc-common.texi +texinfo.tex: doing @include of gcc-vers.texi + + +./include/gcc-common.texi:11: I can't find file `gcc-vers.texi'. +@temp ->@input gcc-vers.texi + +@includezzz ...and @input #1 }@expandafter }@temp + @popthisfilestack +l.11 @include gcc-vers.texi + +(Press Enter to retry, or Control-D to exit) +Please type another input file name: include/gcc-vers.texi +./include/gcc-common.texi:11: I can't find file `include/gcc-vers.texi'. +@temp ->@input gcc-vers.texi + +@includezzz ...and @input #1 }@expandafter }@temp + @popthisfilestack +l.11 @include gcc-vers.texi + +(Press Enter to retry, or Control-D to exit) +Please type another input file name: +./include/gcc-common.texi:11: I can't find file `include/gcc-vers.texi'. +@temp ->@input gcc-vers.texi + +@includezzz ...and @input #1 }@expandafter }@temp + @popthisfilestack +l.11 @include gcc-vers.texi + +(Press Enter to retry, or Control-D to exit) +Please type another input file name: +./include/gcc-common.texi:11: Emergency stop. +@temp ->@input gcc-vers.texi + +@includezzz ...and @input #1 }@expandafter }@temp + @popthisfilestack +l.11 @include gcc-vers.texi + +End of file on the terminal! + +./include/gcc-common.texi:11: ==> Fatal error occurred, no output PDF file pro +duced! diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/emit-rtl.c gcc-5.3.0-zip/gcc/emit-rtl.c --- gcc-5.3.0-original/gcc/emit-rtl.c 2015-08-05 07:20:59.000000000 -0400 +++ gcc-5.3.0-zip/gcc/emit-rtl.c 2016-05-02 07:48:47.925017436 -0400 @@ -81,6 +81,15 @@ #include "builtins.h" #include "rtl-iter.h" +// #define DO_ZIP_DEBUGS +#include <stdio.h> +#ifdef DO_ZIP_DEBUGS +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +extern void zip_debug_rtx(const_rtx); +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + struct target_rtl default_target_rtl; #if SWITCHABLE_TARGET struct target_rtl *this_target_rtl = &default_target_rtl; @@ -2979,6 +2988,8 @@ break; } +ZIP_DEBUG_LINE("Before RTX_FLAG",x); + /* This rtx may not be shared. If it has already been seen, replace it with a copy of itself. */ @@ -2989,6 +3000,8 @@ } RTX_FLAG (x, used) = 1; +ZIP_DEBUG_LINE("Post RTX_FLAG",x); + /* Now scan the subexpressions recursively. We can store any replaced subexpressions directly into X since we know X is not shared! Any vectors in X @@ -3665,7 +3678,9 @@ split_branch_probability = XINT (note, 0); probability = split_branch_probability; +ZIP_DEBUG_LINE("Before split", trial); seq = safe_as_a <rtx_insn *> (split_insns (pat, trial)); +ZIP_DEBUG_LINE("After split", seq); split_branch_probability = -1; @@ -3834,6 +3849,7 @@ if (! tem->deleted () && INSN_P (tem)) tem = try_split (PATTERN (tem), tem, 1); + /* Return either the first or the last insn, depending on which was requested. */ return last diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/expr.c gcc-5.3.0-zip/gcc/expr.c --- gcc-5.3.0-original/gcc/expr.c 2015-04-07 10:34:06.000000000 -0400 +++ gcc-5.3.0-zip/gcc/expr.c 2016-03-08 04:07:01.426335724 -0500 @@ -7999,6 +7999,8 @@ the back of the caller. The normal operating mode is to pass FALSE for this parameter. */ +#include "print-tree.h" + rtx expand_expr_real (tree exp, rtx target, machine_mode tmode, enum expand_modifier modifier, rtx *alt_rtl, diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/final.c gcc-5.3.0-zip/gcc/final.c --- gcc-5.3.0-original/gcc/final.c 2015-01-15 08:28:42.000000000 -0500 +++ gcc-5.3.0-zip/gcc/final.c 2016-04-20 20:52:07.186056061 -0400 @@ -109,6 +109,14 @@ #include "wide-int-print.h" #include "rtl-iter.h" +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s:%d/%s\n", __FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + #ifdef XCOFF_DEBUGGING_INFO #include "xcoffout.h" /* Needed for external data declarations for e.g. AIX 4.x. */ @@ -2071,6 +2079,8 @@ /* Output the insns. */ for (insn = first; insn;) { + ZIP_DEBUG_LINE("final()\n", insn); + if (HAVE_ATTR_length) { if ((unsigned) INSN_UID (insn) >= INSN_ADDRESSES_SIZE ()) diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/genmodes.c gcc-5.3.0-zip/gcc/genmodes.c --- gcc-5.3.0-original/gcc/genmodes.c 2015-01-05 07:33:28.000000000 -0500 +++ gcc-5.3.0-zip/gcc/genmodes.c 2016-03-04 21:27:49.669147699 -0500 @@ -330,7 +330,8 @@ the size of a CC mode is four units. */ validate_mode (m, UNSET, UNSET, UNSET, UNSET, UNSET); - m->bytesize = 4; + /* For the ZipCPU, however, it is only one unit */ + m->bytesize = 1; m->ncomponents = 1; m->component = 0; break; @@ -766,11 +767,12 @@ /* So put the default value unless the target needs a non standard value. */ -#ifdef BITS_PER_UNIT - bits_per_unit = BITS_PER_UNIT; -#else - bits_per_unit = 8; -#endif +// #ifdef BITS_PER_UNIT + // bits_per_unit = BITS_PER_UNIT; +// #else + bits_per_unit = 32; +#warning "Is there a more automated way to set bits per unit?" +// #endif #ifdef MAX_BITSIZE_MODE_ANY_INT max_bitsize_mode_any_int = MAX_BITSIZE_MODE_ANY_INT; @@ -1083,7 +1085,7 @@ first = modes[c]; last = 0; for (m = first; m; last = m, m = m->next) - ; + if ((m->next)&&(m->next->bytesize == m->bytesize)) first = m; /* Don't use BImode for MIN_MODE_INT, since otherwise the middle end will try to use it for bitfields in structures and the @@ -1268,7 +1270,7 @@ continue; if (m->precision != (unsigned int) -1) { - if (m2->precision != 2 * m->precision) + if (m2->precision < 2 * m->precision) continue; } else @@ -1323,7 +1325,6 @@ tagged_printf ("MODE_MASK (%u)", m->precision, m->name); else tagged_printf ("MODE_MASK (%u*BITS_PER_UNIT)", m->bytesize, m->name); - puts ("#undef MODE_MASK"); print_closer (); } @@ -1351,12 +1352,23 @@ int c; struct mode_data *m; + puts( +"\n\n/* This is a rather strange conundrum. Alignment is used by the host in\n" +" * the assembly file, whereas the size is used by the target. Thus, for\n" +" * now, to align to a single target word means to align to 4 8-bit bytes in\n" +" * assembly. If you get it wrong, the assembler will try to help. Thus,\n" +" * aligning to anything less than 4 (1 target word) will cause an alignment\n" +" * of the target word in size. However, this tries to do a little something\n" +" * teach our compiler what we are doing.\n" +" */\n"); print_maybe_const_decl ("%sunsigned char", "mode_base_align", "NUM_MACHINE_MODES", alignment); for_all_modes (c, m) - tagged_printf ("%u", m->alignment, m->name); + tagged_printf ("%u", 4*m->bytesize, + // m->alignment, + m->name); print_closer (); } diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/jump.c gcc-5.3.0-zip/gcc/jump.c --- gcc-5.3.0-original/gcc/jump.c 2015-01-27 04:19:30.000000000 -0500 +++ gcc-5.3.0-zip/gcc/jump.c 2016-04-06 14:06:50.207563805 -0400 @@ -80,6 +80,14 @@ #include "target.h" #include "rtl-iter.h" +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +#include <stdio.h> +#define ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX); }while(0) +extern void zip_debug_rtx(const_rtx); +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif /* Optimize jump y; x: ... y: jumpif... x? Don't know if it is worth bothering with. */ /* Optimize two cases of conditional jump to conditional jump? @@ -1136,6 +1144,7 @@ int i; const char *fmt; +ZIP_DEBUG_LINE("Mark jump label",insn); switch (code) { case PC: @@ -1248,6 +1257,8 @@ break; } +ZIP_DEBUG_LINE("Post case",insn); + fmt = GET_RTX_FORMAT (code); /* The primary target of a tablejump is the label of the ADDR_VEC, diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/machmode.def gcc-5.3.0-zip/gcc/machmode.def --- gcc-5.3.0-original/gcc/machmode.def 2015-01-05 07:33:28.000000000 -0500 +++ gcc-5.3.0-zip/gcc/machmode.def 2016-03-08 11:56:45.375491523 -0500 @@ -184,11 +184,11 @@ larger types, then corresponding modes must be added here. The name OI is reserved for a 256-bit type (needed by some back ends). */ -INT_MODE (QI, 1); -INT_MODE (HI, 2); -INT_MODE (SI, 4); -INT_MODE (DI, 8); -INT_MODE (TI, 16); +// INT_MODE (QI, 1); +// INT_MODE (HI, 1); +INT_MODE (SI, 1); +INT_MODE (DI, 2); +INT_MODE (TI, 4); /* No partial integer modes are defined by default. */ @@ -206,35 +206,35 @@ These are the IEEE mappings. They can be overridden with RESET_FLOAT_FORMAT or at runtime (in TARGET_OPTION_OVERRIDE). */ -FLOAT_MODE (SF, 4, ieee_single_format); -FLOAT_MODE (DF, 8, ieee_double_format); +FLOAT_MODE (SF, 1, ieee_single_format); +FLOAT_MODE (DF, 2, ieee_double_format); /* Basic CC modes. FIXME define this only for targets that need it. */ CC_MODE (CC); /* Fixed-point modes. */ -FRACT_MODE (QQ, 1, 7); /* s.7 */ -FRACT_MODE (HQ, 2, 15); /* s.15 */ -FRACT_MODE (SQ, 4, 31); /* s.31 */ -FRACT_MODE (DQ, 8, 63); /* s.63 */ -FRACT_MODE (TQ, 16, 127); /* s.127 */ - -UFRACT_MODE (UQQ, 1, 8); /* .8 */ -UFRACT_MODE (UHQ, 2, 16); /* .16 */ -UFRACT_MODE (USQ, 4, 32); /* .32 */ -UFRACT_MODE (UDQ, 8, 64); /* .64 */ -UFRACT_MODE (UTQ, 16, 128); /* .128 */ - -ACCUM_MODE (HA, 2, 8, 7); /* s8.7 */ -ACCUM_MODE (SA, 4, 16, 15); /* s16.15 */ -ACCUM_MODE (DA, 8, 32, 31); /* s32.31 */ -ACCUM_MODE (TA, 16, 64, 63); /* s64.63 */ - -UACCUM_MODE (UHA, 2, 8, 8); /* 8.8 */ -UACCUM_MODE (USA, 4, 16, 16); /* 16.16 */ -UACCUM_MODE (UDA, 8, 32, 32); /* 32.32 */ -UACCUM_MODE (UTA, 16, 64, 64); /* 64.64 */ +/* FRACT_MODE (QQ, 1, 7); /* s.7 */ +/* FRACT_MODE (HQ, 1, 15); /* s.15 */ +FRACT_MODE (SQ, 1, 31); /* s.31 */ +FRACT_MODE (DQ, 2, 63); /* s.63 */ +FRACT_MODE (TQ, 4, 127); /* s.127 */ + +/* UFRACT_MODE (UQQ, 1, 8); /* .8 */ +/* UFRACT_MODE (UHQ, 1, 16); /* .16 */ +UFRACT_MODE (USQ, 1, 32); /* .32 */ +UFRACT_MODE (UDQ, 2, 64); /* .64 */ +UFRACT_MODE (UTQ, 4, 128); /* .128 */ + +/* ACCUM_MODE (HA, 2, 8, 7); /* s8.7 */ +ACCUM_MODE (SA, 1, 16, 15); /* s16.15 */ +ACCUM_MODE (DA, 2, 32, 31); /* s32.31 */ +ACCUM_MODE (TA, 4, 64, 63); /* s64.63 */ + +/* UACCUM_MODE (UHA, 2, 8, 8); /* 8.8 */ +UACCUM_MODE (USA, 1, 16, 16); /* 16.16 */ +UACCUM_MODE (UDA, 2, 32, 32); /* 32.32 */ +UACCUM_MODE (UTA, 4, 64, 64); /* 64.64 */ /* Allow the target to specify additional modes of various kinds. */ #if HAVE_EXTRA_MODES @@ -246,9 +246,9 @@ COMPLEX_MODES (FLOAT); /* Decimal floating point modes. */ -DECIMAL_FLOAT_MODE (SD, 4, decimal_single_format); -DECIMAL_FLOAT_MODE (DD, 8, decimal_double_format); -DECIMAL_FLOAT_MODE (TD, 16, decimal_quad_format); +DECIMAL_FLOAT_MODE (SD, 1, decimal_single_format); +DECIMAL_FLOAT_MODE (DD, 2, decimal_double_format); +DECIMAL_FLOAT_MODE (TD, 4, decimal_quad_format); /* The symbol Pmode stands for one of the above machine modes (usually SImode). The tm.h file specifies which one. It is not a distinct mode. */ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/machmode.h gcc-5.3.0-zip/gcc/machmode.h --- gcc-5.3.0-original/gcc/machmode.h 2015-01-05 07:33:28.000000000 -0500 +++ gcc-5.3.0-zip/gcc/machmode.h 2016-02-06 17:21:49.592924065 -0500 @@ -180,13 +180,7 @@ /* Get the size in bytes and bits of an object of mode MODE. */ extern CONST_MODE_SIZE unsigned char mode_size[NUM_MACHINE_MODES]; -#if GCC_VERSION >= 4001 -#define GET_MODE_SIZE(MODE) \ - ((unsigned short) (__builtin_constant_p (MODE) \ - ? mode_size_inline (MODE) : mode_size[MODE])) -#else #define GET_MODE_SIZE(MODE) ((unsigned short) mode_size[MODE]) -#endif #define GET_MODE_BITSIZE(MODE) \ ((unsigned short) (GET_MODE_SIZE (MODE) * BITS_PER_UNIT)) diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/objc/objc-encoding.c gcc-5.3.0-zip/gcc/objc/objc-encoding.c --- gcc-5.3.0-original/gcc/objc/objc-encoding.c 2015-01-09 15:18:42.000000000 -0500 +++ gcc-5.3.0-zip/gcc/objc/objc-encoding.c 2016-03-04 22:53:40.431902505 -0500 @@ -765,10 +765,14 @@ { switch (TYPE_MODE (type)) { +#ifdef HAVE_QImode case QImode: charType = 'C'; break; +#endif +#ifdef HAVE_HImode case HImode: charType = 'S'; break; +#endif case SImode: { if (type == long_unsigned_type_node) @@ -788,10 +792,14 @@ { switch (TYPE_MODE (type)) { +#ifdef HAVE_QImode case QImode: charType = 'c'; break; +#endif +#ifdef HAVE_HImode case HImode: charType = 's'; break; +#endif case SImode: { if (type == long_integer_type_node) diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/postreload.c gcc-5.3.0-zip/gcc/postreload.c --- gcc-5.3.0-original/gcc/postreload.c 2015-07-17 09:50:38.000000000 -0400 +++ gcc-5.3.0-zip/gcc/postreload.c 2016-03-24 21:30:23.402590035 -0400 @@ -71,6 +71,14 @@ #include "df.h" #include "dbgcnt.h" +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s:%d/%s\n", __FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + static int reload_cse_noop_set_p (rtx); static bool reload_cse_simplify (rtx_insn *, rtx); static void reload_cse_regs_1 (void); @@ -120,6 +128,8 @@ basic_block insn_bb = BLOCK_FOR_INSN (insn); unsigned insn_bb_succs = EDGE_COUNT (insn_bb->succs); + ZIP_DEBUG_LINE("RELOAD-CSE-SIMPLIFY:",insn); + if (GET_CODE (body) == SET) { int count = 0; @@ -147,6 +157,7 @@ apply_change_group (); else reload_cse_simplify_operands (insn, testreg); + ZIP_DEBUG_LINE("End of reload_cse_simplify_operands", insn); } else if (GET_CODE (body) == PARALLEL) { @@ -205,6 +216,7 @@ apply_change_group (); else reload_cse_simplify_operands (insn, testreg); + ZIP_DEBUG_LINE("End of reload_cse_simplify_operands", insn); } done: @@ -246,6 +258,7 @@ cfg_changed |= reload_cse_simplify (insn, testreg); cselib_process_insn (insn); + ZIP_DEBUG_LINE("End-CSE-REGS-1:",insn); } /* Clean up. */ @@ -276,6 +289,8 @@ #endif bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn)); + + ZIP_DEBUG_LINE("RELOAD:Attempting to simplify set",set); dreg = true_regnum (SET_DEST (set)); if (dreg < 0) return 0; @@ -427,6 +442,7 @@ /* Array of alternatives, sorted in order of decreasing desirability. */ int *alternative_order; + ZIP_DEBUG_LINE("Simplify-Operands", insn); extract_constrain_insn (insn); if (recog_data.n_alternatives == 0 || recog_data.n_operands == 0) @@ -519,6 +535,7 @@ SET_HARD_REG_BIT (equiv_regs[i], REGNO (l->loc)); } + ZIP_DEBUG_LINE("Simplify-Operands - A", insn); alternative_mask preferred = get_preferred_alternatives (insn); for (i = 0; i < recog_data.n_operands; i++) { @@ -617,6 +634,7 @@ } } } + ZIP_DEBUG_LINE("Simplify-Operands - B", insn); /* Record all alternatives which are better or equal to the currently matching one in the alternative_order array. */ @@ -666,6 +684,7 @@ validate_change (insn, recog_data.operand_loc[i], gen_rtx_REG (mode, op_alt_regno[i][j]), 1); } + ZIP_DEBUG_LINE("Simplify-Operands - C", insn); for (i = recog_data.n_dups - 1; i >= 0; i--) { @@ -679,6 +698,7 @@ gen_rtx_REG (mode, op_alt_regno[op][j]), 1); } + ZIP_DEBUG_LINE("Simplify-Operands - C", insn); return apply_change_group (); } diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/recog.c gcc-5.3.0-zip/gcc/recog.c --- gcc-5.3.0-original/gcc/recog.c 2015-03-20 02:07:30.000000000 -0400 +++ gcc-5.3.0-zip/gcc/recog.c 2016-04-14 23:19:07.630839483 -0400 @@ -68,6 +68,15 @@ #include "df.h" #include "insn-codes.h" +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s:%d/%s\n", __FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + + #ifndef STACK_PUSH_CODE #ifdef STACK_GROWS_DOWNWARD #define STACK_PUSH_CODE PRE_DEC @@ -2283,6 +2292,7 @@ recog_data.n_dups = 0; recog_data.is_asm = false; +ZIP_DEBUG_LINE("Extract-insn", insn); switch (GET_CODE (body)) { case USE: @@ -3671,7 +3681,6 @@ break; /* The buffer filled to the current maximum, so try to match. */ - pos = peep2_buf_position (peep2_current + peep2_current_count); peep2_insn_data[pos].insn = PEEP2_EOB; COPY_REG_SET (peep2_insn_data[pos].live_before, live); @@ -3704,6 +3713,7 @@ rebuild_jump_labels (get_insns ()); if (peep2_do_cleanup_cfg) cleanup_cfg (CLEANUP_CFG_CHANGED); + } #endif /* HAVE_peephole2 */ diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/reload1.c gcc-5.3.0-zip/gcc/reload1.c --- gcc-5.3.0-original/gcc/reload1.c 2015-01-15 08:28:42.000000000 -0500 +++ gcc-5.3.0-zip/gcc/reload1.c 2016-04-20 20:51:38.590252867 -0400 @@ -72,6 +72,14 @@ #include "dumpfile.h" #include "rtl-iter.h" +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do { fprintf(stderr, "%s:%d/%s\n", __FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + /* This file contains the reload pass of the compiler, which is run after register allocation has been done. It checks that each insn is valid (operands required to be in registers really @@ -794,6 +802,20 @@ basic_block bb; bool inserted; +#ifdef DO_ZIP_DEBUGS + { + fprintf(stderr, "BEFORE-RELOAD\n"); + int total_count = 0, current_count = 0; + for (insn = first; insn; insn = NEXT_INSN (insn)) + total_count++; + for (insn = first; insn; insn = NEXT_INSN (insn)) { + fprintf(stderr, "B %3d/%3d", current_count++, total_count); + zip_debug_rtx(insn); + } + fprintf(stderr, "BEFORE-RELOAD -- END OF INSTRUCTION LIST\n"); + } +#endif + /* Make sure even insns with volatile mem refs are recognizable. */ init_recog (); @@ -1366,6 +1388,20 @@ reload_completed = !failure; +#ifdef DO_ZIP_DEBUGS + { + fprintf(stderr, "AFTER-RELOAD\n"); + int total_count = 0, current_count = 0; + for (insn = first; insn; insn = NEXT_INSN (insn)) + total_count++; + for (insn = first; insn; insn = NEXT_INSN (insn)) { + fprintf(stderr, "A %3d/%3d", current_count++, total_count); + zip_debug_rtx(insn); + } + fprintf(stderr, "AFTER-RELOAD -- END OF INSTRUCTION LIST\n"); + } +#endif + return need_dce; } diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/reload.c gcc-5.3.0-zip/gcc/reload.c --- gcc-5.3.0-original/gcc/reload.c 2015-01-15 08:28:42.000000000 -0500 +++ gcc-5.3.0-zip/gcc/reload.c 2016-04-06 17:49:33.418613170 -0400 @@ -136,6 +136,15 @@ #include "target.h" #include "ira.h" +// #define DO_ZIP_DEBUGS +#ifdef DO_ZIP_DEBUGS +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr, "%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX); }while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif + + /* True if X is a constant that can be forced into the constant pool. MODE is the mode of the operand, or VOIDmode if not known. */ #define CONST_POOL_OK_P(MODE, X) \ @@ -2700,6 +2709,8 @@ hard_regs_live_known = live_known; static_reload_reg_p = reload_reg_p; +ZIP_DEBUG_LINE("Find reloads\n", insn); + /* JUMP_INSNs and CALL_INSNs are not allowed to have any output reloads; neither are insns that SET cc0. Insns that use CC0 are not allowed to have any input reloads. */ @@ -2707,8 +2718,17 @@ no_output_reloads = 1; #ifdef HAVE_cc0 + // If the instruction depends upon cc0, such as a branch, if_then_else, or + // cond_exec instruction, we cannot change the input so that the instruction + // relies on another register--cc0 is specific. This requries that the + // references be only cc0 and (const_int 0), rather than allowing other + // registers here as well. if (reg_referenced_p (cc0_rtx, PATTERN (insn))) no_input_reloads = 1; + // If the result of an instruction is the cc0 register, that cannot + // be changed, therefore no output reloading is allowed. This only + // works if instructions *only* set the cc0 register, and not multiple + // registers. if (reg_set_p (cc0_rtx, PATTERN (insn))) no_output_reloads = 1; #endif diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/target-def.h gcc-5.3.0-zip/gcc/target-def.h --- gcc-5.3.0-original/gcc/target-def.h 2015-01-05 07:33:28.000000000 -0500 +++ gcc-5.3.0-zip/gcc/target-def.h 2016-05-12 15:20:06.896745740 -0400 @@ -29,24 +29,44 @@ rather than the .c file, then we need to wrap the default definition in a #ifndef, since files include tm.h before this one. */ +#ifndef TARGET_ASM_ALIGNED_HI_OP #define TARGET_ASM_ALIGNED_HI_OP "\t.short\t" +#endif +#ifndef TARGET_ASM_ALIGNED_SI_OP #define TARGET_ASM_ALIGNED_SI_OP "\t.long\t" +#endif #define TARGET_ASM_ALIGNED_DI_OP NULL #define TARGET_ASM_ALIGNED_TI_OP NULL /* GAS and SYSV4 assemblers accept these. */ #if defined (OBJECT_FORMAT_ELF) -#define TARGET_ASM_UNALIGNED_HI_OP "\t.2byte\t" -#define TARGET_ASM_UNALIGNED_SI_OP "\t.4byte\t" -#define TARGET_ASM_UNALIGNED_DI_OP "\t.8byte\t" -#define TARGET_ASM_UNALIGNED_TI_OP NULL +# ifndef TARGET_ASM_UNALIGNED_HI_OP +# define TARGET_ASM_UNALIGNED_HI_OP "\t.2byte\t" +# endif +# ifndef TARGET_ASM_UNALIGNED_SI_OP +# define TARGET_ASM_UNALIGNED_SI_OP "\t.4b0te\t" +# endif +# ifndef TARGET_ASM_UNALIGNED_DI_OP +# define TARGET_ASM_UNALIGNED_DI_OP "\t.8byte\t" +# endif +# define TARGET_ASM_UNALIGNED_TI_OP NULL #else -#define TARGET_ASM_UNALIGNED_HI_OP NULL -#define TARGET_ASM_UNALIGNED_SI_OP NULL -#define TARGET_ASM_UNALIGNED_DI_OP NULL -#define TARGET_ASM_UNALIGNED_TI_OP NULL +# ifndef TARGET_ASM_UNALIGNED_HI_OP +# define TARGET_ASM_UNALIGNED_HI_OP NULL +# endif +# ifndef TARGET_ASM_UNALIGNED_SI_OP +# define TARGET_ASM_UNALIGNED_SI_OP NULL +# endif +# ifndef TARGET_ASM_UNALIGNED_DI_OP +# define TARGET_ASM_UNALIGNED_DI_OP NULL +# endif +# define TARGET_ASM_UNALIGNED_TI_OP NULL #endif /* OBJECT_FORMAT_ELF */ +#ifndef TARGET_ASM_UNALIGNED_TI_OP +#define TARGET_ASM_UNALIGNED_TI_OP NULL +#endif + #if !defined(TARGET_ASM_CONSTRUCTOR) && !defined(USE_COLLECT2) # ifdef CTORS_SECTION_ASM_OP # define TARGET_ASM_CONSTRUCTOR default_ctor_section_asm_out_constructor diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/testsuite/lib/target-supports.exp gcc-5.3.0-zip/gcc/testsuite/lib/target-supports.exp --- gcc-5.3.0-original/gcc/testsuite/lib/target-supports.exp 2015-11-26 05:10:58.000000000 -0500 +++ gcc-5.3.0-zip/gcc/testsuite/lib/target-supports.exp 2016-01-30 15:14:21.620586694 -0500 @@ -503,6 +503,11 @@ return 0 } + # Zip CPU doesn't support profiling (yet) + if { [istarget zip*] } + return 0 + } + # MinGW does not support -p. if { [istarget *-*-mingw*] && $test_what == "-p" } { return 0 @@ -986,6 +991,12 @@ }] } + # No real hardware FPU support for ZipCPU yet--even though the instruction + # set supports it, the CPU just isn't ready yet. + if { [istarget zip*-*-*] } { + return 0 + } + # This proc is actually checking the availabilty of FPU # support for doubles, so on the RX we must fail if the # 64-bit double multilib has been selected. diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/tree-ssa-math-opts.c gcc-5.3.0-zip/gcc/tree-ssa-math-opts.c --- gcc-5.3.0-original/gcc/tree-ssa-math-opts.c 2015-08-11 03:58:07.000000000 -0400 +++ gcc-5.3.0-zip/gcc/tree-ssa-math-opts.c 2016-02-12 11:21:11.309149239 -0500 @@ -972,7 +972,7 @@ { if (val & 1) { - digit = val & ((1 << POWI_WINDOW_SIZE) - 1); + digit = val & ((1l << POWI_WINDOW_SIZE) - 1); result += powi_lookup_cost (digit, cache) + POWI_WINDOW_SIZE + 1; val >>= POWI_WINDOW_SIZE; @@ -1012,7 +1012,7 @@ } else if (n & 1) { - digit = n & ((1 << POWI_WINDOW_SIZE) - 1); + digit = n & ((1l << POWI_WINDOW_SIZE) - 1); op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache); op1 = powi_as_mults_1 (gsi, loc, type, digit, cache); } @@ -1651,7 +1651,7 @@ }; #define BITS_PER_MARKER 8 -#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1) +#define MARKER_MASK ((1l << BITS_PER_MARKER) - 1) #define MARKER_BYTE_UNKNOWN MARKER_MASK #define HEAD_MARKER(n, size) \ ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER))) @@ -1687,7 +1687,7 @@ /* Zero out the extra bits of N in order to avoid them being shifted into the significant bits. */ if (size < 64 / BITS_PER_MARKER) - n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; + n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1; switch (code) { @@ -1714,7 +1714,7 @@ } /* Zero unused bits for size. */ if (size < 64 / BITS_PER_MARKER) - n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; + n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1; return true; } @@ -1761,7 +1761,7 @@ n->n = CMPNOP; if (size < 64 / BITS_PER_MARKER) - n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; + n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1; return true; } @@ -2020,7 +2020,7 @@ { int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; uint64_t val = int_cst_value (rhs2), mask = 0; - uint64_t tmp = (1 << BITS_PER_UNIT) - 1; + uint64_t tmp = (1l << BITS_PER_UNIT) - 1; /* Only constants masking full bytes are allowed. */ for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT) @@ -2064,7 +2064,7 @@ { /* If STMT casts to a smaller type mask out the bits not belonging to the target type. */ - n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1; + n->n &= ((uint64_t) 1l << (type_size * BITS_PER_MARKER)) - 1; } n->type = type; if (!n->base_addr) @@ -2177,7 +2177,7 @@ { uint64_t mask; - mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1; + mask = ((uint64_t) 1l << (n->range * BITS_PER_MARKER)) - 1; cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER; cmpnop &= mask; } diff -Naur '--exclude=*.swp' gcc-5.3.0-original/gcc/var-tracking.c gcc-5.3.0-zip/gcc/var-tracking.c --- gcc-5.3.0-original/gcc/var-tracking.c 2015-03-26 09:19:00.000000000 -0400 +++ gcc-5.3.0-zip/gcc/var-tracking.c 2016-03-31 18:55:03.584197958 -0400 @@ -143,6 +143,13 @@ #include "rtl-iter.h" #include "fibonacci_heap.h" +#ifdef DO_ZIP_DEBUGS +#include <stdio.h> +extern void zip_debug_rtx(const_rtx); +#define ZIP_DEBUG_LINE(STR,RTX) do {fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX); } while(0) +#else +#define ZIP_DEBUG_LINE(STR,RTX) +#endif typedef fibonacci_heap <long, basic_block_def> bb_heap_t; typedef fibonacci_node <long, basic_block_def> bb_heap_node_t; @@ -6356,6 +6363,7 @@ machine_mode indmode = TYPE_MODE (TREE_TYPE (argtype)); rtx mem = gen_rtx_MEM (indmode, x); + ZIP_DEBUG_LINE("Var-tracking mem-ref", mem); cselib_val *val = cselib_lookup (mem, indmode, 0, VOIDmode); if (val && cselib_preserved_value_p (val)) { @@ -6462,12 +6470,14 @@ machine_mode mode = TYPE_MODE (TREE_TYPE (OBJ_TYPE_REF_EXPR (obj_type_ref))); rtx clobbered = gen_rtx_MEM (mode, this_arg); + ZIP_DEBUG_LINE("Var-tracking mem-ref/clobbered", clobbered); HOST_WIDE_INT token = tree_to_shwi (OBJ_TYPE_REF_TOKEN (obj_type_ref)); if (token) clobbered = plus_constant (mode, clobbered, token * GET_MODE_SIZE (mode)); clobbered = gen_rtx_MEM (mode, clobbered); + ZIP_DEBUG_LINE("Var-tracking mem-ref/clobbered-2", clobbered); x = gen_rtx_CONCAT (mode, gen_rtx_CLOBBER (VOIDmode, pc_rtx), clobbered); call_arguments = gen_rtx_EXPR_LIST (VOIDmode, x, call_arguments); @@ -9790,6 +9800,7 @@ machine_mode indmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (parm))); rtx mem = gen_rtx_MEM (indmode, incoming); + ZIP_DEBUG_LINE("Var-tracking mem-ref/incoming", incoming); cselib_val *val = cselib_lookup_from_insn (mem, indmode, true, VOIDmode, get_insns ()); diff -Naur '--exclude=*.swp' gcc-5.3.0-original/libgcc/config.host gcc-5.3.0-zip/libgcc/config.host --- gcc-5.3.0-original/libgcc/config.host 2015-10-01 08:01:18.000000000 -0400 +++ gcc-5.3.0-zip/libgcc/config.host 2016-01-30 15:16:00.459883558 -0500 @@ -195,6 +195,9 @@ tic6x-*-*) cpu_type=c6x ;; +zip*) + cpu_type=zip + ;; esac # Common parts for widely ported systems. @@ -1300,6 +1303,9 @@ echo "*** Configuration ${host} not supported" 1>&2 exit 1 ;; +zip*) + tmake_file="${tmake_file} t-softfp-sfdf t-softfp" + ;; esac case ${host} in diff -Naur '--exclude=*.swp' gcc-5.3.0-original/libgomp/configure.tgt gcc-5.3.0-zip/libgomp/configure.tgt --- gcc-5.3.0-original/libgomp/configure.tgt 2015-03-13 06:57:07.000000000 -0400 +++ gcc-5.3.0-zip/libgomp/configure.tgt 2016-01-30 15:16:51.323521641 -0500 @@ -150,6 +150,9 @@ # Need to link with -lpthread so libgomp.so is self-contained. XLDFLAGS="${XLDFLAGS} -lpthread" ;; + zip*) + config_path="bsd posix" + ;; *) ;;
Go to most recent revision | Compare with Previous | Blame | View Log