OpenCores
URL https://opencores.org/ocsvn/zipcpu/zipcpu/trunk

Subversion Repositories zipcpu

[/] [zipcpu/] [trunk/] [sw/] [gcc-zippatch.patch] - Rev 206

Go to most recent revision | Compare with Previous | Blame | View Log

diff -Naur '--exclude=*.swp' gcc-6.2.0/config.sub gcc-6.2.0-zip/config.sub
--- gcc-6.2.0/config.sub	2015-12-31 16:13:28.000000000 -0500
+++ gcc-6.2.0-zip/config.sub	2017-01-11 11:07:21.116065311 -0500
@@ -355,6 +355,14 @@
 	xscaleel)
 		basic_machine=armel-unknown
 		;;
+	zip-*-linux*)
+		basic_machine=zip
+		os=-linux
+		;;
+	zip*)
+		basic_machine=zip-unknown
+		os=-none
+		;;
 
 	# We use `pc' rather than `unknown'
 	# because (1) that's what they normally are, and
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure gcc-6.2.0-zip/configure
--- gcc-6.2.0/configure	2016-03-17 18:54:19.000000000 -0400
+++ gcc-6.2.0-zip/configure	2017-02-06 21:54:22.244807700 -0500
@@ -3548,6 +3548,44 @@
   ft32-*-*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
+  zip*)
+    noconfigdirs="$noconfigdirs ${libgcj}"
+    noconfigdirs="$noconfigdirs target-boehm-gc"
+    noconfigdirs="$noconfigdirs target-libgfortran"
+    # noconfigdirs="$noconfigdirs target-libsanitizer"
+    # noconfigdirs="$noconfigdirs target-libada"
+    # noconfigdirs="$noconfigdirs target-libatomic"
+    # noconfigdirs="$noconfigdirs target-libcilkrts"
+    # noconfigdirs="$noconfigdirs target-libitm"
+    # noconfigdirs="$noconfigdirs target-libquadmath"
+    # noconfigdirs="$noconfigdirs target-libstdc++-v3"
+    # noconfigdirs="$noconfigdirs target-libssp"
+    # noconfigdirs="$noconfigdirs target-libgo"
+    # noconfigdirs="$noconfigdirs target-libgomp"
+    # noconfigdirs="$noconfigdirs target-libvtv"
+    # noconfigdirs="$noconfigdirs target-libobjc"
+	# target-libgcc
+	#	target-liboffloadmic
+	#	target-libmpx	# Only gets enabled by request
+	#	target-libbacktrace
+	#	${libgcj}
+	#	target-boehm-gc
+	#	target-libada
+	#	target-libatomic
+	#	target-libcilkrts
+	#	target-libgfortran
+	#	target-libgo
+	#	target-libgomp
+	#	target-libitm
+	#	target-libobjc
+	#	target-libquadmath
+	#	target-libsanitizer
+	#	target-libstdc++-v3
+	#	target-libssp
+	#	target-libvtv
+	# target-libgloss
+	# target-newlib
+    ;;
   *-*-lynxos*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
@@ -3575,6 +3613,9 @@
     *-*-aix*)
 	noconfigdirs="$noconfigdirs target-libgo"
 	;;
+    zip*)
+	noconfigdirs="$noconfigdirs target-libgo"
+	;;
     esac
 fi
 
@@ -3971,6 +4012,9 @@
   vax-*-*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
+  zip*)
+    noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof"
+    ;;
 esac
 
 # If we aren't building newlib, then don't build libgloss, since libgloss
@@ -6785,16 +6829,16 @@
 # CFLAGS_FOR_TARGET and CXXFLAGS_FOR_TARGET.
 if test "x$CFLAGS_FOR_TARGET" = x; then
   if test "x${is_cross_compiler}" = xyes; then
-    CFLAGS_FOR_TARGET="-g -O2"
+    CFLAGS_FOR_TARGET="-O3"
   else
     CFLAGS_FOR_TARGET=$CFLAGS
     case " $CFLAGS " in
-      *" -O2 "*) ;;
-      *) CFLAGS_FOR_TARGET="-O2 $CFLAGS_FOR_TARGET" ;;
+      *" -O3 "*) ;;
+      *) CFLAGS_FOR_TARGET="-O3 $CFLAGS_FOR_TARGET" ;;
     esac
     case " $CFLAGS " in
       *" -g "* | *" -g3 "*) ;;
-      *) CFLAGS_FOR_TARGET="-g $CFLAGS_FOR_TARGET" ;;
+      *) CFLAGS_FOR_TARGET="$CFLAGS_FOR_TARGET" ;;
     esac
   fi
 fi
@@ -6802,16 +6846,16 @@
 
 if test "x$CXXFLAGS_FOR_TARGET" = x; then
   if test "x${is_cross_compiler}" = xyes; then
-    CXXFLAGS_FOR_TARGET="-g -O2"
+    CXXFLAGS_FOR_TARGET="-O3"
   else
     CXXFLAGS_FOR_TARGET=$CXXFLAGS
     case " $CXXFLAGS " in
-      *" -O2 "*) ;;
-      *) CXXFLAGS_FOR_TARGET="-O2 $CXXFLAGS_FOR_TARGET" ;;
+      *" -O3 "*) ;;
+      *) CXXFLAGS_FOR_TARGET="-O3 $CXXFLAGS_FOR_TARGET" ;;
     esac
     case " $CXXFLAGS " in
       *" -g "* | *" -g3 "*) ;;
-      *) CXXFLAGS_FOR_TARGET="-g $CXXFLAGS_FOR_TARGET" ;;
+      *) CXXFLAGS_FOR_TARGET="$CXXFLAGS_FOR_TARGET" ;;
     esac
   fi
 fi
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure.ac gcc-6.2.0-zip/configure.ac
--- gcc-6.2.0/configure.ac	2016-03-17 18:54:19.000000000 -0400
+++ gcc-6.2.0-zip/configure.ac	2017-01-10 12:43:23.819301273 -0500
@@ -884,6 +884,9 @@
   ft32-*-*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
+  zip*)
+    noconfigdirs="$noconfigdirs ${libgcj}"
+    ;;
   *-*-lynxos*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
@@ -911,6 +914,9 @@
     *-*-aix*)
 	noconfigdirs="$noconfigdirs target-libgo"
 	;;
+    zip*)
+	noconfigdirs="$noconfigdirs target-libgo"
+	;;
     esac
 fi
 
@@ -1307,6 +1313,10 @@
   vax-*-*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
+  zip*)
+    noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof ${libgcj}"
+    unsupported_languages="$unsupported_languages fortran"
+    ;;
 esac
 
 # If we aren't building newlib, then don't build libgloss, since libgloss
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cfgexpand.c gcc-6.2.0-zip/gcc/cfgexpand.c
--- gcc-6.2.0/gcc/cfgexpand.c	2016-04-27 08:23:50.000000000 -0400
+++ gcc-6.2.0-zip/gcc/cfgexpand.c	2016-12-31 16:38:36.195534819 -0500
@@ -74,6 +74,15 @@
 #include "tree-chkp.h"
 #include "rtl-chkp.h"
 
+
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 /* Some systems use __main in a way incompatible with its use in gcc, in these
    cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
    give the same symbol without quotes for an alternative entry point.  You
@@ -1172,7 +1181,7 @@
 		base_align = crtl->max_used_stack_slot_alignment;
 	      else
 		base_align = MAX (crtl->max_used_stack_slot_alignment,
-				  GET_MODE_ALIGNMENT (SImode)
+				  GET_MODE_ALIGNMENT (word_mode)
 				  << ASAN_SHADOW_SHIFT);
 	    }
 	  else
@@ -2225,7 +2234,7 @@
 	  data.asan_vec.safe_push (offset);
 	  /* Leave space for alignment if STRICT_ALIGNMENT.  */
 	  if (STRICT_ALIGNMENT)
-	    alloc_stack_frame_space ((GET_MODE_ALIGNMENT (SImode)
+	    alloc_stack_frame_space ((GET_MODE_ALIGNMENT (word_mode)
 				      << ASAN_SHADOW_SHIFT)
 				     / BITS_PER_UNIT, 1);
 
@@ -5745,7 +5754,7 @@
       && (last = get_last_insn ())
       && JUMP_P (last))
     {
-      rtx dummy = gen_reg_rtx (SImode);
+      rtx dummy = gen_reg_rtx (word_mode);
       emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
     }
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cgraphbuild.c gcc-6.2.0-zip/gcc/cgraphbuild.c
--- gcc-6.2.0/gcc/cgraphbuild.c	2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/cgraphbuild.c	2016-12-31 16:39:44.963107994 -0500
@@ -32,6 +32,15 @@
 #include "ipa-utils.h"
 #include "except.h"
 
+
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 /* Context of record_reference.  */
 struct record_reference_ctx
 {
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/combine.c gcc-6.2.0-zip/gcc/combine.c
--- gcc-6.2.0/gcc/combine.c	2016-08-08 06:06:15.000000000 -0400
+++ gcc-6.2.0-zip/gcc/combine.c	2017-02-03 09:25:19.676720321 -0500
@@ -103,6 +103,15 @@
 #include "rtl-iter.h"
 #include "print-rtl.h"
 
+#define	DO_ZIP_DEBUGS
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 #ifndef LOAD_EXTEND_OP
 #define LOAD_EXTEND_OP(M) UNKNOWN
 #endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/common/config/zip/zip-common.c gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c
--- gcc-6.2.0/gcc/common/config/zip/zip-common.c	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c	2017-01-11 09:41:34.483106099 -0500
@@ -0,0 +1,52 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename: 	common/config/zip/zip-common.c
+//
+// Project:	Zip CPU backend for the GNU Compiler Collection
+//
+// Purpose:	To eliminate the frame register automatically.
+//
+// Creator:	Dan Gisselquist, Ph.D.
+//		Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License:	GPL, v3, as defined and found on www.gnu.org,
+//		http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+
+static const struct default_options zip_option_optimization_table[] =
+  {
+    { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+    { OPT_LEVELS_NONE, 0, NULL, 0 }
+  };
+
+#undef	TARGET_OPTION_OPTIMIZATION_TABLE
+#define	TARGET_OPTION_OPTIMIZATION_TABLE	zip_option_optimization_table
+
+struct gcc_targetm_common	targetm_common = TARGETM_COMMON_INITIALIZER;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/genzipops.c gcc-6.2.0-zip/gcc/config/zip/genzipops.c
--- gcc-6.2.0/gcc/config/zip/genzipops.c	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/genzipops.c	2017-03-07 12:03:59.062584503 -0500
@@ -0,0 +1,444 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename:	genzipops.c
+//
+// Project:	Zip CPU -- a small, lightweight, RISC CPU soft core
+//
+// Purpose:	This program generates the zip-ops.md machine description file.
+//
+//	While I understand that this is not GCC's preferred method of generating
+//	machine description files, there were just so many instructions to
+//	generate, and so many forms of them, and the GCC infrastructure didn't
+//	support the conditional execution model of the ZipCPU that ... I built
+//	it this way.
+//
+//	As of this writing, building zip-ops.md is not an automatic part of
+//	making GCC.  To build genzipops, just type:
+//
+//	g++ genzipops.c -o genzipops
+//
+//	And to run it, type:
+//
+//	genzipops > zip-ops.md
+//
+//	genzipops takes no arguments, and does nothing but write the machine
+//	descriptions to the standard output.
+//
+//
+// Creator:	Dan Gisselquist, Ph.D.
+//		Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2017, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory.  Run make with no
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License:	GPL, v3, as defined and found on www.gnu.org,
+//		http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+void	legal(FILE *fp) {
+	fprintf(fp, ""
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";; Filename:	zip-ops.md\n"
+";;\n"
+";; Project:	Zip CPU -- a small, lightweight, RISC CPU soft core\n"
+";;\n"
+";; Purpose:	This is a computer generated machine description of the\n"
+";;		ZipCPU\'s operations.  It is computer generated simply for\n"
+";;	two reasons.  First, I can\'t seem to find a way to generate this\n"
+";;	information within GCC\'s current constructs.  Specifically, the\n"
+";;	CPU\'s instructions normally set the condition codes, unless they\n"
+";;	are conditional instructions when they don\'t.  Second, the ZipCPU is\n"
+";;	actually quite regular.  Almost all of the instructions have the same\n"
+";;	form.  This form turns into many, many RTL instructions.  Because the\n"
+";;	CPU doesn\'t match any of the others within GCC, that means either\n"
+";;	I have a *lot* of cut, copy, paste, and edit to do to create the file\n"
+";;	and upon any and every edit, or I need to build a program to generate\n"
+";;	the remaining .md constructs.  Hence, I chose the latter to minimize\n"
+";;	the amount of work I needed to do.\n"
+";;\n"
+";;\n"
+";; Creator:	Dan Gisselquist, Ph.D.\n"
+";;		Gisselquist Technology, LLC\n"
+";;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";; Copyright (C) 2017, Gisselquist Technology, LLC\n"
+";;\n"
+";; This program is free software (firmware): you can redistribute it and/or\n"
+";; modify it under the terms of  the GNU General Public License as published\n"
+";; by the Free Software Foundation, either version 3 of the License, or (at\n"
+";; your option) any later version.\n"
+";;\n"
+";; This program is distributed in the hope that it will be useful, but WITHOUT\n"
+";; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or\n"
+";; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License\n"
+";; for more details.\n"
+";;\n"
+";; License:	GPL, v3, as defined and found on www.gnu.org,\n"
+";;		http://www.gnu.org/licenses/gpl.html\n"
+";;\n"
+";;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";;\n");
+}
+
+void	gen_heading(FILE *fp, const char *heading) {
+	fprintf(fp, ";\n;\n; %s\n;\n;\n", heading);
+}
+
+void	genzip_condop(FILE *fp, const char *md_opname,
+		const char *rtxstr, const char *insn_cond,
+		const char *zip_op,
+		const char *rtx_cond, const char *zip_cond) {
+
+	fprintf(fp, "(define_insn \"%s_%s\"\n"
+		"\t[(cond_exec (%s (reg:CC CC_REG) (const_int 0))\n"
+		"\t\t\t%s)]\n"
+		"\t\"%s\"\t; Condition\n"
+		"\t\"%s.%s\\t%%1,%%0\t; genzip, conditional operator\"\t; Template\n"
+		"\t[(set_attr \"predicable\" \"no\") "
+		"(set_attr \"ccresult\" \"unchanged\")])\n;\n;\n",
+		md_opname, rtx_cond, rtx_cond, rtxstr, insn_cond, zip_op, zip_cond);
+
+}
+
+void	genzipop_long(FILE *fp, const char *md_opname, const char *uncond_rtx, const char *insn_cond, const char *split_rtx, const char *dup_rtx, const char *zip_op) {
+	char	heading[128];
+	sprintf(heading, "%s (genzipop_long)", zip_op);
+	fprintf(fp, ";\n;\n;\n; %s (genzipop_long)\n;\n;\n;\n", zip_op);
+
+	fprintf(fp, "(define_insn \"%s\"\n"
+"\t[%s\n"
+"\t(clobber (reg:CC CC_REG))]\n"
+"\t\"%s\"\n"
+"\t\"%s\\t%%2,%%0\t; %s\"\n"
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+		md_opname, uncond_rtx, insn_cond, zip_op, md_opname);
+
+
+	fprintf(fp, "(define_insn \"%s_raw\"\n"
+"\t[%s\n"
+"\t(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]\n"
+"\t\"%s\"\n"
+"\t\"%s\\t%%1,%%0\t; %s_raw\"\n"
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+	md_opname, dup_rtx, insn_cond, zip_op, md_opname);
+
+	genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "eq", "Z");
+	genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ne", "NZ");
+	genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "lt", "LT");
+	genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ge", "GE");
+	genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ltu", "C");
+	genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "geu", "NC");
+}
+
+void	genzipop(FILE *fp, const char *md_opname, const char *rtx_name, const char *insn_cond, const char *zip_op) {
+	char	rtxstr[512], splitstr[512], dupstr[512], altname[64];
+
+	sprintf(rtxstr, 
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t\t(match_operand:SI 2 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+	sprintf(splitstr,
+	    "(set (match_dup 0) (%s (match_dup 0) (match_dup 2)))", rtx_name);
+
+	sprintf(dupstr, 
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+
+	genzipop_long(fp, md_opname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+
+	sprintf(rtxstr, 
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t\t(plus:SI (match_operand:SI 2 \"register_operand\" \"r\")\n"
+"\t\t\t\t(match_operand:SI 3 \"const_int_operand\" \"N\"))))", rtx_name);
+	sprintf(splitstr,
+	    "(set (match_dup 0) (%s (match_dup 0)\n"
+"\t\t\t(plus:SI (match_dup 2) (match_dup 3))))", rtx_name);
+
+	sprintf(dupstr, 
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t\t(plus:SI (match_operand:SI 1 \"register_operand\" \"r\")\n"
+"\t\t\t\t(match_operand:SI 2 \"const_int_operand\" \"N\"))))", rtx_name);
+
+	sprintf(altname, "%s_off", md_opname);
+
+	genzipop_long(fp, altname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+}
+
+void	gencmov(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+	fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+	"\t[(set (match_operand:SI 0 \"register_operand\" \"=r,r,r,Q\")\n"
+		"\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+		"\t\t\t(match_operand:SI 1 \"general_operand\" \"r,Q,i,r\")\n"
+		"\t\t\t(match_dup 0)))]\n"
+	"\t\"\"\n"
+	"\t\"@\n"
+	"\tMOV.%s\t%%1,%%0\t; cmov\n"
+	"\tLW.%s\t%%1,%%0\t; cmov\n"
+	"\tLDI.%s\t%%1,%%0\t; cmov\n"
+	"\tSW.%s\t%%1,%%0\t; cmov\"\n"
+	"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+	md_opname, md_cond, md_cond, zip_cond, zip_cond, zip_cond, zip_cond);
+
+}
+
+void	gencadd(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+	fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+	"\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+		"\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+			"\t\t\t(plus:SI (match_dup 0)\n"
+				"\t\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+			"\t\t\t(match_dup 0)))]\n"
+	"\t\"\"\n"
+	"\t\"ADD.%s\t%%1,%%0\t; cadd\"\n"
+	"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+	md_opname, md_cond, md_cond, zip_cond);
+}
+
+void	gencnot(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+	fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+	"\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+		"\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+			"\t\t\t(xor:SI (match_dup 0)\n"
+				"\t\t\t\t(const_int -1))\n"
+			"\t\t\t(match_dup 0)))]\n"
+	"\t\"\"\n"
+	"\t\"NOT.%s\t%%0\t; cnot\"\n"
+	"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+	md_opname, md_cond, md_cond, zip_cond);
+}
+
+void	gencneg(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+	fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+	"\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+		"\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+			"\t\t\t(neg:SI (match_dup 0))\n"
+			"\t\t\t(match_dup 0)))]\n"
+	"\t\"\"\n"
+	"\t\"NEG.%s\t%%0\t; cneg\"\n"
+	"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+	md_opname, md_cond, md_cond, zip_cond);
+}
+
+
+void	gencand(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+	fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+	"\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+		"\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+			"\t\t\t(and:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+			"\t\t\t(match_dup 0)))]\n"
+	"\t\"\"\n"
+	"\t\"AND.%s\t%%1,%%0\t; cand\"\n"
+	"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+	md_opname, md_cond, md_cond, zip_cond);
+}
+
+
+void	gencior(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+	fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+	"\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+		"\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+			"\t\t\t(ior:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+			"\t\t\t(match_dup 0)))]\n"
+	"\t\"\"\n"
+	"\t\"OR.%s\t%%1,%%0\t; cior\"\n"
+	"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+	md_opname, md_cond, md_cond, zip_cond);
+}
+
+void	gencxor(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+	fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+	"\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+		"\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+			"\t\t\t(xor:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+			"\t\t\t(match_dup 0)))]\n"
+	"\t\"\"\n"
+	"\t\"XOR.%s\t%%1,%%0\t; cxor\"\n"
+	"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+	md_opname, md_cond, md_cond, zip_cond);
+}
+
+void	usage(void) {
+	printf("USAGE: genzipops <new-zip-ops.md filename>\n");
+}
+
+const	char	*TMPPATH = ".zip-ops.md";
+const	char	*TAILPATH = "zip-ops.md";
+
+int main(int argc, char **argv) {
+	FILE	*fp = fopen(TMPPATH, "w");
+	const char	*newname = TAILPATH;
+
+	if ((argc>1)&&(argv[1][0] == '-')) {
+		usage();
+		exit(EXIT_FAILURE);
+	}
+
+	if (argc>1) {
+		if ((strlen(argv[1])>=strlen(TAILPATH))
+			&&(strcmp(&argv[1][strlen(argv[1])-strlen(TAILPATH)],
+				TAILPATH)==0)
+			&&(access(argv[1], F_OK)==0))
+				unlink(argv[1]);
+		newname = argv[1];
+	}
+
+	legal(fp);
+	genzipop(fp, "addsi3",  "plus:SI",    "",             "ADD");
+	genzipop(fp, "subsi3",  "minus:SI",   "",             "SUB");
+	genzipop(fp, "mulsi3",  "mult:SI",    "",             "MPY");
+	genzipop(fp, "divsi3",  "div:SI",     "(ZIP_DIVIDE)", "DIVS");
+	genzipop(fp, "udivsi3", "udiv:SI",    "(ZIP_DIVIDE)", "DIVU");
+	genzipop(fp, "andsi3",  "and:SI",     "",             "AND");
+	genzipop(fp, "iorsi3",  "ior:SI",     "",             "OR");
+	genzipop(fp, "xorsi3",  "xor:SI",     "",             "XOR");
+	genzipop(fp, "ashrsi3", "ashiftrt:SI","",             "ASR");
+	genzipop(fp, "ashlsi3", "ashift:SI",  "",             "LSL");
+	genzipop(fp, "lshrsi3", "lshiftrt:SI","",             "LSR");
+
+	genzipop_long(fp, "smulsi_highpart",
+		"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+		"\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+		"\t\t\t(sign_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+		"\t\t\t(sign_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+		"\t\t\t(const_int 32))))",
+		"(ZIP_HAS_DI)",
+		"(set (match_dup 0)\n"
+		"\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+		"\t\t\t(sign_extend:DI (match_dup 1))\n"
+		"\t\t\t(sign_extend:DI (match_dup 2)))\n"
+		"\t\t\t(const_int 32))))",
+		//
+		"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+		"\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+		"\t\t\t(sign_extend:DI (match_dup 0))\n"
+		"\t\t\t(sign_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+		"\t\t\t(const_int 32))))",
+		"MPYSHI");
+	genzipop_long(fp, "umulsi_highpart",
+		"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+		"\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+		"\t\t\t(zero_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+		"\t\t\t(zero_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+		"\t\t\t(const_int 32))))",
+		"(ZIP_HAS_DI)",
+		"(set (match_dup 0)\n"
+		"\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+		"\t\t\t(zero_extend:DI (match_dup 1))\n"
+		"\t\t\t(zero_extend:DI (match_dup 2)))\n"
+		"\t\t\t(const_int 32))))",
+		//
+		"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+		"\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+		"\t\t\t(zero_extend:DI (match_dup 0))\n"
+		"\t\t\t(zero_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+		"\t\t\t(const_int 32))))",
+		"MPYUHI");
+
+	gen_heading(fp, "Conditional move instructions");
+
+	gencmov(fp, "cmov", "eq", "Z");
+	gencmov(fp, "cmov", "ne", "NZ");
+	gencmov(fp, "cmov", "lt", "LT");
+	gencmov(fp, "cmov", "ge", "GE");
+	gencmov(fp, "cmov", "ltu", "C");
+	gencmov(fp, "cmov", "geu", "NC");
+
+	gen_heading(fp, "Conditional add instructions");
+
+	gencadd(fp, "cadd", "eq", "Z");
+	gencadd(fp, "cadd", "ne", "NZ");
+	gencadd(fp, "cadd", "lt", "LT");
+	gencadd(fp, "cadd", "ge", "GE");
+	gencadd(fp, "cadd", "ltu", "C");
+	gencadd(fp, "cadd", "geu", "NC");
+
+	gen_heading(fp, "Conditional negate instructions");
+
+	gencneg(fp, "cneg", "eq", "Z");
+	gencneg(fp, "cneg", "ne", "NZ");
+	gencneg(fp, "cneg", "lt", "LT");
+	gencneg(fp, "cneg", "ge", "GE");
+	gencneg(fp, "cneg", "ltu", "C");
+	gencneg(fp, "cneg", "geu", "NC");
+
+	gen_heading(fp, "Conditional not instructions");
+
+	gencnot(fp, "cnot", "eq", "Z");
+	gencnot(fp, "cnot", "ne", "NZ");
+	gencnot(fp, "cnot", "lt", "LT");
+	gencnot(fp, "cnot", "ge", "GE");
+	gencnot(fp, "cnot", "ltu", "C");
+	gencnot(fp, "cnot", "geu", "NC");
+
+	gen_heading(fp, "Conditional and instructions");
+
+	gencand(fp, "cand", "eq", "Z");
+	gencand(fp, "cand", "ne", "NZ");
+	gencand(fp, "cand", "lt", "LT");
+	gencand(fp, "cand", "ge", "GE");
+	gencand(fp, "cand", "ltu", "C");
+	gencand(fp, "cand", "geu", "NC");
+
+	gen_heading(fp, "Conditional ior instructions");
+
+	gencior(fp, "cior", "eq", "Z");
+	gencior(fp, "cior", "ne", "NZ");
+	gencior(fp, "cior", "lt", "LT");
+	gencior(fp, "cior", "ge", "GE");
+	gencior(fp, "cior", "ltu", "C");
+	gencior(fp, "cior", "geu", "NC");
+
+	gen_heading(fp, "Conditional xor instructions");
+
+	gencxor(fp, "cxor", "eq", "Z");
+	gencxor(fp, "cxor", "ne", "NZ");
+	gencxor(fp, "cxor", "lt", "LT");
+	gencxor(fp, "cxor", "ge", "GE");
+	gencxor(fp, "cxor", "ltu", "C");
+	gencxor(fp, "cxor", "geu", "NC");
+
+	fclose(fp);
+
+	if (rename(TMPPATH, newname) != 0) {
+		fprintf(stderr, "ERR: Could not create %s, leaving results in %s\n", newname, TMPPATH);
+		exit(EXIT_FAILURE);
+	} exit(EXIT_SUCCESS);
+}
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.c gcc-6.2.0-zip/gcc/config/zip/zip.c
--- gcc-6.2.0/gcc/config/zip/zip.c	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.c	2017-03-07 12:03:18.566583672 -0500
@@ -0,0 +1,2679 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename: 	zip.c
+//
+// Project:	Zip CPU backend for the GNU Compiler Collection
+//
+// Purpose:	
+//
+// Creator:	Dan Gisselquist, Ph.D.
+//		Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License:	GPL, v3, as defined and found on www.gnu.org,
+//		http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "lcm.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
+#include "predict.h"
+#include "basic-block.h"
+#include "bitmap.h"
+#include "df.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "machmode.h"
+#include "symtab.h"
+#include "rtlhash.h"
+#include "tree.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "expr.h"
+#include "function.h"
+#include "recog.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "builtins.h"
+#include "calls.h"
+#include "langhooks.h"
+#include "optabs.h"
+#include "explow.h"
+#include "emit-rtl.h"
+#include "ifcvt.h"
+#include "genrtl.h"
+
+// #include "tmp_p.h"
+#include "target.h"
+#include "target-def.h"
+// #include "tm-constrs.h"
+#include "tm-preds.h"
+
+#include "diagnostic.h"
+// #include "integrate.h"
+
+#include "zip-protos.h"
+
+static	bool	zip_return_in_memory(const_tree, const_tree);
+static	bool	zip_frame_pointer_required(void);
+
+static void zip_function_arg_advance(cumulative_args_t ca, enum machine_mode mode,
+		const_tree type, bool named);
+static rtx zip_function_arg(cumulative_args_t ca, enum machine_mode mode, const_tree type, bool named);
+
+static	void	zip_asm_trampoline_template(FILE *);
+static	void	zip_trampoline_init(rtx, tree, rtx);
+static	void	zip_init_builtins(void);
+static	tree	zip_builtin_decl(unsigned, bool);
+// static void	zip_asm_output_anchor(rtx x);
+	void 	zip_asm_output_def(FILE *s, const char *n, const char *v);
+static	rtx	zip_expand_builtin(tree exp, rtx target, rtx subtarget,
+			enum machine_mode tmode, int	ignore);
+static	bool	zip_scalar_mode_supported_p(enum machine_mode mode);
+static	bool	zip_libgcc_floating_mode_supported_p(enum machine_mode mode);
+static	int	zip_address_cost(rtx addr, enum machine_mode mode, addr_space_t as, bool spd);
+static	bool	zip_mode_dependent_address_p(const_rtx addr, addr_space_t);
+static	unsigned HOST_WIDE_INT	zip_const_anchor = 0x20000;
+static		 HOST_WIDE_INT	zip_min_opb_imm = -0x20000;
+static		 HOST_WIDE_INT	zip_max_opb_imm =  0x1ffff;
+static		 HOST_WIDE_INT	zip_min_anchor_offset = -0x2000;
+static		 HOST_WIDE_INT	zip_max_anchor_offset =  0x1fff;
+static		 HOST_WIDE_INT	zip_min_mov_offset = -0x1000;
+static		 HOST_WIDE_INT	zip_max_mov_offset =  0x0fff;
+static	int	zip_sched_issue_rate(void) { return 1; }
+static	bool	zip_legitimate_address_p(machine_mode, rtx, bool);
+static	bool	zip_legitimate_move_operand_p(machine_mode, rtx, bool);
+	void	zip_debug_rtx_pfx(const char *, const_rtx x);
+	void	zip_debug_rtx(const_rtx x);
+static	void	zip_override_options(void);
+static	bool	zip_can_eliminate(int from ATTRIBUTE_UNUSED, int to);
+static	int	zip_memory_move_cost(machine_mode, reg_class_t, bool);
+static	rtx	zip_legitimize_address(rtx x, rtx oldx, machine_mode mode);
+static	bool	zip_cannot_modify_jumps_p(void);
+static bool	zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b);
+
+
+#define	ZIP_ALL_DEBUG_OFF	false
+#define	ZIP_ALL_DEBUG_ON	false
+#define	ZIPDEBUGFLAG(A,B)	const bool A = 			\
+		((ZIP_ALL_DEBUG_ON)||(B))&&(!ZIP_ALL_DEBUG_OFF)
+
+enum ZIP_BUILTIN_ID_CODE {
+	ZIP_BUILTIN_RTU,
+	ZIP_BUILTIN_HALT,
+	ZIP_BUILTIN_IDLE,
+	ZIP_BUILTIN_SYSCALL,
+	ZIP_BUILTIN_SAVE_CONTEXT,
+	ZIP_BUILTIN_RESTORE_CONTEXT,
+	ZIP_BUILTIN_BITREV,
+	ZIP_BUILTIN_CC,
+	ZIP_BUILTIN_UCC,
+	ZIP_BUILTIN_BUSY,
+	ZIP_BUILTIN_MAX
+};
+
+static	GTY (()) tree	zip_builtins[(int)ZIP_BUILTIN_MAX];
+static	enum insn_code	zip_builtins_icode[(int)ZIP_BUILTIN_MAX];
+
+#undef	TARGET_ASM_ALIGNED_HI_OP
+#undef	TARGET_ASM_ALIGNED_SI_OP
+#undef	TARGET_ASM_ALIGNED_DI_OP
+#define	TARGET_ASM_ALIGNED_HI_OP	"\t.short\t"
+#define	TARGET_ASM_ALIGNED_SI_OP	"\t.int\t"
+#define	TARGET_ASM_ALIGNED_DI_OP	"\t.quad\t"
+
+#undef	TARGET_ASM_UNALIGNED_HI_OP
+#undef	TARGET_ASM_UNALIGNED_SI_OP
+#undef	TARGET_ASM_UNALIGNED_DI_OP
+#define	TARGET_ASM_UNALIGNED_HI_OP	TARGET_ASM_ALIGNED_HI_OP
+#define	TARGET_ASM_UNALIGNED_SI_OP	TARGET_ASM_ALIGNED_SI_OP
+#define	TARGET_ASM_UNALIGNED_DI_OP	TARGET_ASM_ALIGNED_DI_OP
+
+#include "gt-zip.h"
+
+/* The Global 'targetm' Variable. */
+struct	gcc_target	targetm = TARGET_INITIALIZER;
+
+
+enum	reg_class zip_reg_class(int);
+
+#define	LOSE_AND_RETURN(msgid, x)		\
+	do {					\
+		zip_operand_lossage(msgid, x);	\
+		return;				\
+	} while(0)
+
+/* Per-function machine data. */
+struct GTY(()) machine_function
+{
+	/* number of pretented arguments for varargs */
+	int	pretend_size;
+
+	/* Number of bytes saved on the stack for local variables. */
+	int	local_vars_size;
+
+	/* Number of bytes saved on stack for register save area */
+	int	saved_reg_size;
+	int	save_ret;
+
+	int	sp_fp_offset;
+	bool	fp_needed;
+	int	size_for_adjusting_sp;
+};
+
+/* Allocate a chunk of memory for per-function machine-dependent data. */
+
+static struct machine_function *
+zip_init_machine_status(void) {
+	return ggc_cleared_alloc<machine_function>();
+}
+
+static	void
+zip_override_options(void)
+{
+	init_machine_status = zip_init_machine_status;
+}
+
+enum	reg_class
+zip_reg_class(int regno)
+{
+	if (is_ZIP_GENERAL_REG(regno)) {
+		return GENERAL_REGS;
+	} else if (is_ZIP_REG(regno)) {
+		return ALL_REGS;
+	} return NO_REGS;
+}
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+static	bool
+zip_return_in_memory(const_tree type, const_tree fntype ATTRIBUTE_UNUSED) {
+	const	HOST_WIDE_INT size = int_size_in_bytes(type);
+	return (size == -1)||(size > 2*UNITS_PER_WORD);
+}
+
+/* Emit an error emssage when we're in an asm, and a fatal error for "normal"
+ * insn.  Formatted output isn't easily implemented, since we use output operand
+ * lossage to output the actual message and handle the categorization of the
+ * error.  */
+
+static void
+zip_operand_lossage(const char *msgid, rtx op) {
+	debug_rtx(op);
+	zip_debug_rtx(op);
+	output_operand_lossage("%s", msgid);
+}
+
+/* The PRINT_OPERAND_ADDRESS worker.   */
+void
+zip_print_operand_address(FILE *file, rtx x) {
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) zip_debug_rtx(x);
+	switch(GET_CODE(x)) {
+		case REG:
+			gcc_assert(is_ZIP_REG(REGNO(x)));
+			gcc_assert(REGNO(x) < 16);
+			fprintf(file, "(%s)", reg_names[REGNO(x)]);
+			break;
+		case SYMBOL_REF:
+			fprintf(file, "%s", XSTR(x,0));
+			break;
+		case LABEL_REF:
+			x = LABEL_REF_LABEL(x);
+		case CODE_LABEL:
+			{ char buf[256];
+			ASM_GENERATE_INTERNAL_LABEL(buf, "L", CODE_LABEL_NUMBER(x));
+#ifdef	ASM_OUTPUT_LABEL_REF
+			ASM_OUTPUT_LABEL_REF(file, buf);
+#else
+			assemble_name(file, buf);
+#endif
+			}
+			break;
+		case PLUS:
+			if (!REG_P(XEXP(x, 0))) {
+				fprintf(stderr, "Unsupported address construct\n");
+				zip_debug_rtx(x);
+				abort();
+			} gcc_assert(is_ZIP_REG(REGNO(XEXP(x,0))));
+			gcc_assert(REGNO(XEXP(x,0))<16);
+			if (CONST_INT_P(XEXP(x, 1))) {
+				if (INTVAL(XEXP(x,1))!=0) {
+					fprintf(file, "%ld(%s)",
+					(long)INTVAL(XEXP(x, 1)),
+					reg_names[REGNO(XEXP(x, 0))]);
+				} else {
+					fprintf(file, "(%s)",
+					reg_names[REGNO(XEXP(x, 0))]);
+				}
+			} else if (GET_CODE(XEXP(x,1)) == SYMBOL_REF) {
+				fprintf(file, "%s(%s)", XSTR(x,0),
+					reg_names[REGNO(XEXP(x, 0))]);
+			} else if ((GET_CODE(XEXP(x, 1)) == MINUS)
+				&& (GET_CODE(XEXP(XEXP(x, 1), 0))==SYMBOL_REF)
+				&& (GET_CODE(XEXP(XEXP(x, 1), 1))==SYMBOL_REF)) {
+				fprintf(file, "%s-%s(%s)",
+					XSTR(XEXP(XEXP(x, 1),0),0),
+					XSTR(XEXP(XEXP(x, 1),1),0),
+					reg_names[REGNO(XEXP(x, 0))]);
+			} else
+				fprintf(file, "#INVALID(%s)",
+					reg_names[REGNO(XEXP(x, 0))]);
+			/*
+			else if (GET_CODE(XEXP(addr, 1)) == LABEL)
+				fprintf(file, "%s(%s)",
+					GET_CODE(XEXP(addr, 1)),
+					reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+			else if ((GET_CODE(XEXP(addr, 1)) == MINUS)
+				&& (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 0))==LABEL)
+				&& (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 1))==LABEL)) {
+				fprintf(file, "%s-%s(%s)",
+					reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+					reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+					reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+			} 
+			*/
+			break;
+		// We don't support direct memory addressing within our
+		// instruction set, even though the instructions themselves
+		// would support direct memory addressing of the lower 18 bits
+		// of memory space.
+		case MEM:
+			if (dbg) zip_debug_rtx(x);
+			zip_print_operand_address(file, XEXP(x, 0));
+			break;
+		case CONST_INT:
+			fprintf(file, "%ld",(long)INTVAL(x));
+			break;
+		default:
+			fprintf(stderr, "Unknown address format\n");
+			zip_debug_rtx(x);
+			abort(); break;
+			// output_addr_const(file, x);
+		break;
+	}
+}
+
+/* The PRINT_OPERAND worker. */
+
+void
+zip_print_operand(FILE *file, rtx x, int code)
+{
+	rtx operand = x;
+	int	rgoff = 0;
+
+	// fprintf(file, "Print Operand!\n");
+
+	/* New code entries should just be added to the switch below.  If
+	 * handling is finished, just return.  If handling was just a
+	 * modification of the operand, the modified operand should be put in
+	 * "operand", and then do a break to let default handling
+	 * (zero-modifier) output the operand.
+	 */
+	switch(code) {
+		case 0:
+			/* No code, print as usual. */
+			break;
+		case 'L':
+			/* Lower of two registers, print one up */
+			rgoff = 1;
+			break;
+		case 'R':
+		case 'H':
+			/* Higher of a register pair, print normal */
+			break;
+
+		default:
+			LOSE_AND_RETURN("invalid operand modifier letter", x);
+	}
+
+	/* Print an operand as without a modifier letter. */
+	switch (GET_CODE(operand)) {
+	case REG:
+		if (REGNO(operand)+rgoff >= FIRST_PSEUDO_REGISTER)
+			internal_error("internal error: bad register: %d", REGNO(operand));
+		fprintf(file, "%s", reg_names[REGNO(operand)+rgoff]);
+		return;
+	case SCRATCH:
+		LOSE_AND_RETURN("Need a scratch register", x);
+		return;
+
+	case CODE_LABEL:
+	case LABEL_REF:
+	case SYMBOL_REF:
+	case PLUS:
+		PRINT_OPERAND_ADDRESS(file, operand);
+		return;
+	case MEM:
+		PRINT_OPERAND_ADDRESS(file, XEXP(operand, 0));
+		return;
+
+	default:
+		/* No need to handle all strange variants, let
+		 * output_addr_const do it for us.
+	 	 */
+		if (CONSTANT_P(operand)) {
+			output_addr_const(file, operand);
+			return;
+		}
+
+		zip_debug_rtx(x);
+		LOSE_AND_RETURN("unexpected operand", x);
+	}
+}
+
+static bool
+zip_frame_pointer_required(void)
+{
+	// This should really depend upon whether we have variable sized
+	// arguments in our frame or not.  Once this fails, let's look
+	// at what the problem was and then whether or not we can detect
+	// it.
+	//
+	// Use a GCC global to determine our answer
+	if (cfun->calls_alloca)
+		return true;
+
+	// If the stack frame is too large to access saved registers with
+	// immediate offsets, then we *must* use a frame pointer
+	unsigned stack_size = 36;
+	stack_size += (ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+
+	//
+	// if cfun->machine->size_for_adjusting_sp might ever be larger than
+	//	 zip_max_anchor_offset, then we MUST have a frame pointer.
+	//
+	// cfun->machine->size_for_adjusting_sp =
+	//		get_frame_size
+	//		+ saved_reg_size (will always be <= 36)
+	//		+ outgoing_args_size;
+	//		+ pretend_args_size;
+
+	if(crtl->args.pretend_args_size > 0)
+		stack_size += crtl->args.pretend_args_size;
+	stack_size += get_frame_size();
+	// Align our attempted stack size
+	stack_size = ((stack_size+3)&-4);
+
+	// Now here's our test
+	if (stack_size >= zip_max_anchor_offset)
+		return true;
+	return (frame_pointer_needed);
+/*
+*/
+}
+
+/* Determine whether or not a register needs to be saved on the stack or not.
+ */
+static bool
+zip_save_reg(int regno) {
+	if (regno == 0)
+		return ((!crtl->is_leaf)
+			||((df_regs_ever_live_p(0))&&(!call_used_regs[0])));
+	else if ((regno == zip_GOT)&&(!ZIP_PIC))
+		return	((df_regs_ever_live_p(regno))
+				&&(!call_used_regs[regno]));
+	else if (regno == zip_FP)
+		return((zip_frame_pointer_required())||((df_regs_ever_live_p(regno))
+				&&(!call_used_regs[regno])));
+	else if (regno < zip_FP)
+		return	((df_regs_ever_live_p(regno))
+				&&(!call_used_regs[regno]));
+	return false;
+}
+
+/* Compute the size of the local area and the size to be adjusted by the
+ * prologue and epilogue.
+ *
+ * Here's what we are looking at (top is the current, bottom is the last ...)
+ *
+ *	Stack Pointer ->
+ *			Outgoing arguments
+ *			Local variables (could be variable size)
+ *	Frame Pointer ->	(= Stack Pointer + sp_fp_offset)
+ *			Saved return address, if saved
+ *			Other Saved registers
+ *			Saved frame pointer (if used)
+ *			Saved R12, if used
+ *			(Stack pointer is not saved)
+ *			(PRETEND-ARGS)
+ *	Original stack pointer ->	(= Stack_Pointer +size_for_adjusting_sp)
+ *			Called arguments (not passed in registers)
+ *			Return arguments (not R1, args.pretend_args_size)
+ *		(Prior function's stack frame ... )
+ *
+ */
+static void
+zip_compute_frame(void) {
+	int	regno;
+	int	args_size;
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "ZIP-COMPUTE-FRAME: %s\n", current_function_name());
+	// gcc_assert(crtl);
+	gcc_assert(cfun);
+	gcc_assert(cfun->machine);
+
+	args_size=(ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+
+	if(crtl->args.pretend_args_size > 0) {
+		args_size += crtl->args.pretend_args_size;
+		if (dbg) fprintf(stderr, "%s pretend_args_size : %d\n", current_function_name(),
+			crtl->args.pretend_args_size);
+		cfun->machine->pretend_size = crtl->args.pretend_args_size;
+	}
+
+	cfun->machine->local_vars_size = get_frame_size();
+
+	// Force frame alignment of the local variable section
+	cfun->machine->local_vars_size += 3;
+	cfun->machine->local_vars_size &= -4;
+
+	// Save callee-saved registers. 
+	cfun->machine->saved_reg_size = 0;
+	for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+		if (zip_save_reg(regno))
+			cfun->machine->saved_reg_size += 4;
+	}
+
+	cfun->machine->fp_needed = (zip_frame_pointer_required());
+
+	if ((cfun->machine->fp_needed)&&
+			(!df_regs_ever_live_p(zip_FP))) {
+		cfun->machine->saved_reg_size += 4;
+	}
+
+	cfun->machine->sp_fp_offset = crtl->outgoing_args_size
+				+ cfun->machine->local_vars_size;
+	cfun->machine->size_for_adjusting_sp = cfun->machine->local_vars_size
+			+ cfun->machine->saved_reg_size
+			+ args_size;
+	if(dbg) {
+		fprintf(stderr, "\t---- STACK PTR ----\n");
+		fprintf(stderr, "\tOUTGOIN-SIZE: %d\n",
+			crtl->outgoing_args_size);
+		fprintf(stderr, "\tLOCALS-SIZE : %d\n",
+			cfun->machine->local_vars_size);
+		fprintf(stderr, "\t---- FRAME PTR ----%s\n",
+			cfun->machine->fp_needed?"":" (Eliminated)");
+		fprintf(stderr, "\tREGISTERS   : %d\n",
+			cfun->machine->saved_reg_size);
+		fprintf(stderr, "\tPRETEND SIZE: %d\n",
+			crtl->args.pretend_args_size);
+		fprintf(stderr, "\t---- ARG PTR (Original SP, should be eliminated) ----\n");
+		fprintf(stderr, "\t----\n");
+		fprintf(stderr, "\tARGS-SIZE   : %d\n", args_size);
+		fprintf(stderr, "\tSP_FP_OFFSET: %d\n",
+			cfun->machine->sp_fp_offset);
+		fprintf(stderr, "\tSP-ADJUSTMNT: %d\n",
+			cfun->machine->size_for_adjusting_sp);
+	}
+}
+
+void
+zip_save_registers(rtx basereg_rtx, int sp_offset_to_first_register) {
+	rtx	insn;
+	ZIPDEBUGFLAG(dbg, false);
+
+	// Compute Frame has already been calculated before coming into here
+	//
+	// zip_compute_frame();
+	if (dbg)  fprintf(stderr, "PROLOGUE::SAVE-REGISTER\n");
+
+	int offset = 0, regno;
+	for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+		if (zip_save_reg(regno)) {
+			if (dbg) fprintf(stderr,
+				"PROLOGUE::SAVE-REGISTER Saving R%d in %d+%d(SP)\n",
+				regno, sp_offset_to_first_register, offset);
+			insn=emit_insn(gen_movsi_sto_off(
+				basereg_rtx,
+				GEN_INT(sp_offset_to_first_register +offset),
+				gen_rtx_REG(SImode, regno)));
+			RTX_FRAME_RELATED_P(insn) = 1;
+			offset += 4;
+		}
+	} if (dbg)  fprintf(stderr, "%d registers saved%s\n", offset,
+		(crtl->saves_all_registers)?", should be all of them":", less than all");
+
+}
+
+/*
+ * zip_expand_small_prologue()
+ *
+ * To be used when the sp_fp_offset is less then zip_max_opb_offset.
+ *
+ *
+ * Approach:
+ *	SUB size_for_adjusting_sp,SP
+ *	SW REG,0(SP)
+ *	SW REG,4(SP)
+ *	SW REG,8(SP)
+ *	....
+ *	SW REG,#(SP)
+ *
+ * and if we need a frame register, we'll either do ...
+ *	MOV sp_fp_offset+SP,FP
+ * or if the offset is too large, we'll do ...
+ *	MOV SP,FP
+ *	ADD sp_fp_offset,FP
+ *
+ */
+void
+zip_expand_small_prologue(void) {
+	ZIPDEBUGFLAG(dbg, false);
+	rtx	insn;
+
+	zip_compute_frame();
+
+	if (dbg)  fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(SP-FP offset is %d)\n",
+		cfun->machine->sp_fp_offset);
+
+	insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+			gen_int_mode(cfun->machine->size_for_adjusting_sp,
+				SImode)));
+	RTX_FRAME_RELATED_P(insn) = 1;
+
+	zip_save_registers(stack_pointer_rtx, cfun->machine->sp_fp_offset);
+
+	if (cfun->machine->fp_needed) {
+		if (dbg)  fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(FP-NEEDED)\n");
+		if (dbg) zip_debug_rtx(stack_pointer_rtx);
+		if (dbg) zip_debug_rtx(frame_pointer_rtx);
+		if (cfun->machine->sp_fp_offset < zip_max_mov_offset) {
+			if (dbg)  fprintf(stderr,
+				"PROLOGUE:::EXPAND-SMALL-PROLOGUE() "
+				"gen_movsi_reg_off(FP, SP, %d), %d < %ld\n",
+				cfun->machine->sp_fp_offset,
+				cfun->machine->sp_fp_offset,
+				zip_max_mov_offset);
+			insn = emit_insn(gen_movsi_reg_off(frame_pointer_rtx,
+				stack_pointer_rtx,
+				GEN_INT(cfun->machine->sp_fp_offset)));
+			RTX_FRAME_RELATED_P(insn) = 1;
+		} else {
+			rtx	fp_rtx;
+
+			fp_rtx = gen_rtx_REG(SImode, zip_FP);
+
+			insn = emit_insn(gen_movsi(fp_rtx, stack_pointer_rtx));
+			RTX_FRAME_RELATED_P(insn) = 1;
+
+			insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx,
+				GEN_INT(cfun->machine->sp_fp_offset)));
+			RTX_FRAME_RELATED_P(insn) = 1;
+		}
+	}
+}
+
+/*
+ * zip_expand_large_prologue()
+ *
+ * The prologue function will be called when the size_for_adjusting_sp is too
+ * large to fit into a single OPB-immediate as part of a subtract.
+ *
+ * Approach:
+ *	SUB (size_for_adjusting_sp-sp_fp_offset),SP
+ *	SW R0,(SP)
+ *	SW R5,4(SP)
+ *	SW R6,8SP)
+ *	SW R7,(SP)
+ *	...
+ *	SW FP,(SP)
+ *
+ *	LDI sp_fp_offset,FP
+ *	SUB FP,SP
+ *	ADD SP,FP
+ */
+void
+zip_expand_large_prologue(void) {
+	ZIPDEBUGFLAG(dbg, false);
+	rtx	insn, fp_rtx;
+
+	gcc_assert(cfun->machine->fp_needed);
+
+	if (dbg)	fprintf(stderr, "PROLOGUE::expand-large(%d-%d)\n",
+				cfun->machine->size_for_adjusting_sp,
+				cfun->machine->sp_fp_offset);
+	insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+		gen_int_mode(cfun->machine->size_for_adjusting_sp
+				-cfun->machine->sp_fp_offset, SImode)));
+	RTX_FRAME_RELATED_P(insn) = 1;
+
+	zip_save_registers(stack_pointer_rtx, 0);
+
+	fp_rtx = gen_rtx_REG(SImode, zip_FP);
+
+	insn = emit_insn(gen_movsi(fp_rtx,
+		gen_int_mode(cfun->machine->sp_fp_offset, SImode)));
+	RTX_FRAME_RELATED_P(insn) = 1;
+
+	insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+			fp_rtx));
+	RTX_FRAME_RELATED_P(insn) = 1;
+
+	insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx, stack_pointer_rtx));
+	RTX_FRAME_RELATED_P(insn) = 1;
+}
+
+void
+zip_expand_prologue(void) {
+	ZIPDEBUGFLAG(dbg, false);
+
+	zip_compute_frame();
+
+	if (dbg)  fprintf(stderr, "PROLOGUE: Computing Prologue instructions\n");
+	if (dbg)  fprintf(stderr, "PROLOGUE: SP-FP offset is %d\n", 
+			cfun->machine->sp_fp_offset);
+	if (cfun->machine->size_for_adjusting_sp != 0) {
+		if (cfun->machine->size_for_adjusting_sp <= zip_max_anchor_offset) {
+			if (dbg)  fprintf(stderr, "PROLOGUE: "
+					"%d <= %ld, so going small\n",
+					cfun->machine->size_for_adjusting_sp,
+					zip_max_opb_imm);
+			zip_expand_small_prologue();
+		} else {
+			zip_expand_large_prologue();
+		}
+	}
+}
+
+int
+zip_use_return_insn(void)
+{
+	if ((!reload_completed)||(cfun->machine->fp_needed)
+			||(get_frame_size()!=0)) {
+		// If R0 ever gets pushed to the stack, then we cannot 
+		// use a master return from anywhere.  We need to clean up the
+		// stack first.
+		if ((!crtl->is_leaf)||((df_regs_ever_live_p(0))
+						&&(!call_used_regs[0]))) {
+			return 0;
+		}
+	}
+	zip_compute_frame();
+	return (cfun->machine->size_for_adjusting_sp == 0)?1:0;
+}
+
+/* As per the notes in M68k.c, quote the function epilogue should not depend
+ * upon the current stack pointer.  It should use the frame pointer only,
+ * if there is a frame pointer.  This is mandatory because of alloca; we also
+ * take advantage of it to omit stack adjustments before returning ...
+ *
+ * Let's see if we can use their approach here.
+ *
+ * We can't.  Consider our choices:
+ *	LW (FP),R0
+ *	LW 4(FP),R4
+ *	LW 8(FP),R5
+ *	LW 12(FP),R6
+ *	LW 16(FP),FP
+ *	... Then what is the stack pointer?
+ * or 
+ *	LW (FP),R0
+ *	LW 4(FP),R4
+ *	LW 8(FP),R5
+ *	LW 12(FP),R6
+ *	MOV FP,SP
+ *	LW 16(SP),FP
+ *	... Which suffers unnecessary pipeline stalls, and certainly doesn't
+ *	exploit our pipeline memory function
+ * or
+ *	MOV FP,SP
+ *	LW (SP),R0
+ *	LW 4(SP),R4
+ *	LW 8(SP),R5
+ *	LW 12(SP),R6
+ *	LW 16(SP),FP
+ * Which will be our choice.  Note that we do use the stack pointer, eventually.
+ *
+ */
+void
+zip_expand_epilogue(void) {
+	int	regno, offset;
+	ZIPDEBUGFLAG(dbg, false);
+	rtx	insn;
+
+	zip_compute_frame();
+
+	if (dbg) fprintf(stderr, "EPILOG::\n");
+	if (cfun->machine->fp_needed) {
+		// This is done special--if you can't trust the stack pointer
+		// enough so that you must have a frame pointer, then you can't
+		// trust its offset enough to restore from it.  Hence, we start
+		// by moving the frame pointer to the stack pointer to recover
+		// the stack pointer back to a usable value.
+		if (dbg) fprintf(stderr, "EPILOG::Moving frame pointer to stack register\n");
+		insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+		RTX_FRAME_RELATED_P(insn) = 1;
+	}
+
+	if (cfun->machine->saved_reg_size != 0) {
+		if (cfun->machine->fp_needed)
+			offset = 0;
+		else
+			offset = cfun->machine->sp_fp_offset;
+		if (dbg) fprintf(stderr, "EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+		for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+			if (zip_save_reg(regno)) {
+				if (dbg) fprintf(stderr, "EPILOG::RESTORING R%d from SP+%d\n", regno, offset);
+				rtx reg = gen_rtx_REG(SImode, regno);
+				insn = emit_insn(gen_movsi_lod_off(
+						reg,
+						stack_pointer_rtx,
+						GEN_INT(offset)));
+				add_reg_note(insn, REG_CFA_RESTORE, reg);
+				RTX_FRAME_RELATED_P(insn) = 1;
+				offset += 4;
+			}
+		}
+	}
+
+	if (cfun->machine->fp_needed) {
+		// Restore the stack pointer back to the original, the
+		// difference being the difference from the frame pointer
+		// to the original stack
+		insn = emit_insn(gen_addsi3(stack_pointer_rtx,
+			stack_pointer_rtx,
+			GEN_INT(cfun->machine->size_for_adjusting_sp
+				-cfun->machine->sp_fp_offset)));
+		RTX_FRAME_RELATED_P(insn) = 1;
+	} else {
+		// else now the difference is between the stack pointer and
+		// the original stack pointer.
+		if (dbg) fprintf(stderr, "EPILOG::ADDSI3(StackPtr, %d)\n",
+				cfun->machine->size_for_adjusting_sp);
+		insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+			GEN_INT(cfun->machine->size_for_adjusting_sp)));
+		RTX_FRAME_RELATED_P(insn) = 1;
+	}
+	if (dbg) fprintf(stderr, "EPILOG::EMITTING-RETURN\n");
+
+	// The return RTX is not allowed to be frame related
+	insn = emit_jump_insn(ret_rtx);
+	// RTX_FRAME_RELATED_P(insn) = 1;
+}
+
+void
+zip_sibcall_epilogue(void) {
+	int	regno, offset;
+	ZIPDEBUGFLAG(dbg, false);
+	rtx	insn;
+
+	zip_compute_frame();
+
+	if (dbg) fprintf(stderr, "EPILOG::\n");
+	if (cfun->machine->fp_needed) {
+		// This is done special--if you can't trust the stack pointer
+		// enough so that you must have a frame pointer, then you can't
+		// trust its offset enough to restore from it.  Hence, we start
+		// by moving the frame pointer to the stack pointer to recover
+		// the stack pointer back to a usable value.
+		if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Moving frame pointer to stack register\n");
+		insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+		RTX_FRAME_RELATED_P(insn) = 1;
+	}
+
+	if (cfun->machine->saved_reg_size != 0) {
+		if (cfun->machine->fp_needed)
+			offset = 0;
+		else
+			offset = cfun->machine->sp_fp_offset;
+		if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+		for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+			if (zip_save_reg(regno)) {
+				if (dbg) fprintf(stderr, "SIBCALL-EPILOG::RESTORING R%d\n", regno);
+				rtx reg = gen_rtx_REG(SImode, regno);
+				insn = emit_insn(gen_movsi_lod_off(
+						reg,
+						stack_pointer_rtx,
+						GEN_INT(offset)));
+				add_reg_note(insn, REG_CFA_RESTORE, reg);
+				RTX_FRAME_RELATED_P(insn) = 1;
+				offset += 4;
+			}
+		}
+	}
+
+	if (cfun->machine->fp_needed) {
+		// Restore the stack pointer back to the original, the
+		// difference being the difference from the frame pointer
+		// to the original stack
+		insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+			GEN_INT(cfun->machine->size_for_adjusting_sp
+				-cfun->machine->sp_fp_offset)));
+		RTX_FRAME_RELATED_P(insn) = 1;
+	} else {
+		// else now the difference is between the stack pointer and
+		// the original stack pointer.
+		if (dbg) fprintf(stderr, "SIBCALL-EPILOG::ADDSI3(StackPtr, %d)\n",
+				cfun->machine->size_for_adjusting_sp);
+		insn = emit_insn(gen_addsi3(stack_pointer_rtx,stack_pointer_rtx,
+			GEN_INT(cfun->machine->size_for_adjusting_sp)));
+		RTX_FRAME_RELATED_P(insn) = 1;
+	}
+}
+
+rtx
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+{
+	//
+	// Don't try to compute anything other than frame zero.
+	//
+	if (count != 0)
+		return NULL_RTX;
+
+	// Make sure we've computed our frame, do we need to save registers?
+	zip_compute_frame();
+
+	if (zip_save_reg(zip_LR)) {
+		if (cfun->machine->fp_needed)
+			return gen_rtx_MEM(SImode, frame_pointer_rtx);
+		else
+			return gen_rtx_MEM(SImode, gen_rtx_PLUS(Pmode,
+					stack_pointer_rtx,
+					GEN_INT(cfun->machine->sp_fp_offset)));
+	} else {
+		return gen_rtx_REG(Pmode, zip_LR);
+
+	}
+}
+
+/* Implement RETURN_ADDR_RTX(COUNT, FRAMEADDR).
+ *
+ * We currently only support calculating the return address for the current
+ * frame.
+ */
+
+/*
+rtx
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+{
+	if (count)
+		return NULL_RTX;
+
+	zip_compute_frame();
+
+	// saved return address for current function is at fp - 1
+	if (cfun->machine->save_ret)
+		return gen_rtx_MEM(Pmode, plus_constant(frame_pointer_rtx,
+				-UNITS_PER_WORD));
+	return get_hard_reg_initial_val(Pmode, RETURN_ADDRESS_REGNUM);
+}
+*/
+
+/* Implements the macro INITIAL_ELIMINATION_OFFSET,
+ * return the OFFSET.
+ */
+int
+zip_initial_elimination_offset(int from, int to) {
+	int	ret = 0;
+	zip_compute_frame();
+
+/*
+	if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+		ret = cfun->machine->sp_fp_offset;
+	} else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+		// Since the ARG_POINTER_REGNUM is defined to be identical
+		// to the FRAME_POINTER_REGNUM, this "if" will never ever
+		// get called.
+		ret = cfun->machine->sp_fp_offset;
+	} else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+		// Since we define ARG_POINTER_REGNUM to be FRAME_POINTER_REGNUM
+		// we're asked for the offset between the frame pointer and
+		// itself.  The result had better be zero.
+		//
+		ret = 0;
+	} else {
+		abort();
+	}
+*/
+
+	// Let's try using an ARG_POINTER != FRAME_POINTER
+	if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+		ret = cfun->machine->sp_fp_offset;
+	} else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+		// Since the ARG_POINTER_REGNUM is defined to be identical
+		// to the FRAME_POINTER_REGNUM, this "if" will never ever
+		// get called.
+		ret = cfun->machine->size_for_adjusting_sp;
+	} else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+		ret = cfun->machine->size_for_adjusting_sp
+			- cfun->machine->sp_fp_offset;
+	} else {
+		abort();
+	}
+
+	return ret;
+}
+
+/*
+ * Code taken from m68k ...
+ */
+static bool
+zip_can_eliminate(int from, int to)
+{
+	// fprintf(stderr, "CAN_ELIMINATE::QUERYING(%d,%d)\n", from, to);
+	if ((from == zip_FP)&&(to == zip_SP))
+		return !cfun->machine->fp_needed;
+	return true;
+}
+
+/* Compute the number of word sized registers needed to hold a function 
+ * argument of mode INT_MODE and tree type TYPE.
+ */
+int
+zip_num_arg_regs(enum machine_mode mode, const_tree type) {
+	int	size;
+
+	if (targetm.calls.must_pass_in_stack(mode, type))
+		return 0;
+
+	if ((type)&&(mode == BLKmode))
+		size = int_size_in_bytes(type);
+	else
+		size = GET_MODE_SIZE(mode);
+
+	return (size + UNITS_PER_WORD - 1)/UNITS_PER_WORD;
+}
+
+static void
+zip_function_arg_advance(cumulative_args_t ca, machine_mode mode,
+		const_tree type, bool named ATTRIBUTE_UNUSED) {
+	CUMULATIVE_ARGS *cum;
+	int	nreg;
+
+	cum = get_cumulative_args(ca);
+	nreg = zip_num_arg_regs(mode, type);
+	if (((*cum)+nreg) > NUM_ARG_REGS)
+		(*cum) = NUM_ARG_REGS;
+	else
+		(*cum) += nreg;
+}
+
+static rtx
+zip_function_arg(cumulative_args_t ca, machine_mode mode,
+		const_tree type ATTRIBUTE_UNUSED, bool named) {
+	CUMULATIVE_ARGS *cum;
+
+	if (!named)
+		return NULL_RTX;
+	cum = get_cumulative_args(ca);
+
+	if ((*cum) >= NUM_ARG_REGS)
+		return NULL_RTX;
+	return
+		gen_rtx_REG(mode, (*cum)+1);
+}
+
+/* DECL is the declaration of the function being targeted by the call, and EXP
+ * is the CALL_EXPR representing the call.
+ */
+bool	zip_function_ok_for_sibcall(ATTRIBUTE_UNUSED tree decl, tree exp) {
+	// calls.c already checks whether or not the parameter stack space
+	// is identical, so ... let's hope this all works and find out.
+
+	//
+	// Actually, this will fail:  If the sibling uses R5 to pass registers
+	// in and we don't, then there will be no way to restore R5.  This is
+	// true for the current configuration.  It will be true for future
+	// configurations if the sibling ever uses a register that must be
+	// saved as a parameter register.
+	//
+	// We can check this ... if we can count how many registers the
+	// sibling call will use.
+	//
+	CUMULATIVE_ARGS	cum_v;
+	cumulative_args_t	cum;
+	tree		parameter;
+	machine_mode	mode;
+	tree		ttype;
+	rtx		parm_rtx;
+	int		i;
+	static const char zip_call_used_register[] = CALL_USED_REGISTERS;
+
+	INIT_CUMULATIVE_ARGS(cum_v, NULL, NULL, 0,0);
+	cum = pack_cumulative_args(&cum_v);
+	for (i=0; i<call_expr_nargs(exp); i++) {
+
+		parameter = CALL_EXPR_ARG(exp, i);
+
+		if ((!parameter) || (TREE_CODE(parameter)==ERROR_MARK))
+			return true;
+		ttype = TREE_TYPE(parameter);
+		gcc_assert(ttype);
+		mode = ttype->type_common.mode;
+
+		if (pass_by_reference(&cum_v, mode, ttype, true)) {
+			mode = Pmode;
+			ttype = build_pointer_type(ttype);
+		}
+
+		parm_rtx = zip_function_arg(cum, mode, ttype, 0);
+		zip_function_arg_advance(cum, mode, ttype, 0);
+		if (!parm_rtx)
+			continue;
+
+		// If it is a register
+		//	and it is *NOT* a CALL_USED_REGISTER
+		//	then we can't do this.
+		//
+		// Example: func(R1,..R4,R5)
+		//	can be followed by func2(R1,.., up to R5)
+		//	(not supported, though... just to simplify our test
+		//	below)
+		// Example: func(R1,..R4)
+		//	cannot be followed by func2(R1,..,R5)
+		//	We would blow R5 away by our prologue, even if it was
+		//	properly set.
+		// Example: func(R1,..R5)
+		//	can be followed by func2(R1,.., up to R4)
+		//	func2 may save R5 (which doesn't need saving) but that's
+		//		irrelevant
+		// Example: func(R1,..up to R4)
+		//	can be followed by func2(R1,.., up to R4)
+		//	
+		if (REG_P(parm_rtx)&&(REGNO(parm_rtx))
+				&&(REGNO(parm_rtx)<sizeof(zip_call_used_register))
+				&&(!zip_call_used_register[REGNO(parm_rtx)]))
+			return false;
+	}
+
+	return true;
+
+	// We also need to check if the return types are the same ... or
+	// will GCC handle that for us?
+}
+
+void	zip_canonicalize_comparison(int *code, rtx *op0, rtx *op1,
+		bool preserve_op0)
+{
+	ZIPDEBUGFLAG(dbg, false);
+	bool	reverse = false;
+
+	if (dbg) fprintf(stderr, "CANONICALIZE ...%s\n", (preserve_op0)?"(Preserve Op0)":"");
+	if (dbg) zip_debug_rtx_pfx("CODE", gen_rtx_fmt_ee((rtx_code)*code, VOIDmode, gen_rtx_REG(CCmode,zip_CC), const0_rtx));
+	if (dbg) zip_debug_rtx_pfx("OP0 ", *op0);
+	if (dbg) zip_debug_rtx_pfx("OP1 ", *op1);
+
+	// Z	->	Z
+	// NZ	->	!Z
+	// LT	->	N
+	// GE	->	!N
+	// LTU	->	C
+	// GEU	->	!C
+	//
+	// LTE	->	GTE w/ swapped operands
+	// GT	->	LT  w/ swapped operands
+	// GTU	->	LTU w/ swapped operands
+	// LEU	->	GEU w/ swapped operands
+	//
+
+	if ((CONST_INT_P(*op0))||(GET_CODE(*op0) == PLUS)) {
+		rtx	tmp = *op0;
+		*op0 = *op1;
+		*op1 = tmp;
+		*code = (int)swap_condition((enum rtx_code)*code);
+	}
+
+	if (*code == GTU) {
+		if (REG_P(*op1)) {
+			//; Reverse the comparison
+			reverse = true;
+		} else if (CONST_INT_P(*op1)) {
+			//; A >  B
+			//; A >= B+1
+			//; Add one to the integer constant,
+			//; And use a GEU comparison
+			*code = GEU;
+			*op1 = GEN_INT(INTVAL(*op1)+1);
+		} else {
+			//; Reverse the comparison
+			reverse = true;
+		}
+	} else if (*code == LEU) {
+		if (REG_P(*op1)) {
+			reverse = true;
+		} else if (CONST_INT_P(*op1)) {
+			//; A <= B
+			//; A <  B+1
+			//; Add one to the integer constant,
+			//; And use a GTU comparison
+			*op1 = GEN_INT(INTVAL(*op1)+1);
+			*code = LTU;
+		} else {
+			reverse = true;
+		}
+	} else if (*code == LE) {
+		if (REG_P(*op1)) {
+			reverse = true;
+		} else if (CONST_INT_P(*op1)) {
+			//; A <  B
+			//; A <= B-1
+			//; Add one to the integer constant,
+			//; And use a GTU comparison
+			*op1 = GEN_INT(INTVAL(*op1)-1);
+			*code = LT;
+		} else {
+			reverse = true;
+		}
+	} else if (*code == GT) {
+		if (REG_P(*op1)) {
+			//; Reverse the comparison
+			reverse = true;
+		} else if (CONST_INT_P(*op1)) {
+			//; A >  B
+			//; A >= B+1
+			//; Add one to the integer constant,
+			//; And use a GTU comparison
+			*op1 = GEN_INT(INTVAL(*op1)+1);
+			*code = GE;
+		} else {
+			reverse = true;
+		}
+	}
+
+	if (reverse) {
+		rtx tem = *op0;
+		*op0 = *op1;
+		*op1 = tem;
+		*code = (int)swap_condition((enum rtx_code)*code);
+	}
+}
+
+static bool
+zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b) {
+	*a = zip_CC;
+	*b = INVALID_REGNUM;
+	return true;
+}
+
+
+/* totally buggy - we can't return pointers to nested functions */
+static void
+zip_asm_trampoline_template(FILE *f)
+{
+	fprintf(f, "\tbrev\t0,r1\n");
+	fprintf(f, "\tldilo\t0,r1\n");
+	fprintf(f, "\tjmp r1\n");
+}
+
+/* Worker function for TARGET_TRAMPOLINE_INIT. */
+static void
+zip_trampoline_init(rtx m_tramp ATTRIBUTE_UNUSED,
+	tree fndecl ATTRIBUTE_UNUSED,
+	rtx chain_value ATTRIBUTE_UNUSED) {
+// #warning "This needs to be filled out"
+	abort();
+}
+
+static tree
+def_builtin(const char *name, enum insn_code icode, enum ZIP_BUILTIN_ID_CODE code,
+	tree type)
+{
+	tree t = add_builtin_function(name,type,code, BUILT_IN_MD, NULL, NULL_TREE);
+
+	if(t) {
+		zip_builtins[code] = t;
+		zip_builtins_icode[code] = icode;
+	}
+
+	return t;
+
+}
+
+void	zip_init_builtins(void) {
+
+  tree void_ftype_void = build_function_type_list(void_type_node, NULL_TREE);
+#ifdef	HAVE_zip_rtu
+  def_builtin("zip_rtu", CODE_FOR_zip_rtu, ZIP_BUILTIN_RTU, void_ftype_void);
+#endif
+#ifdef	HAVE_zip_halt
+  def_builtin("zip_halt",  CODE_FOR_zip_halt,  ZIP_BUILTIN_HALT, void_ftype_void);
+#endif
+#ifdef	HAVE_zip_busy
+  def_builtin("zip_busy",  CODE_FOR_zip_busy,  ZIP_BUILTIN_BUSY, void_ftype_void);
+#endif
+#ifdef	HAVE_zip_idle
+  def_builtin("zip_idle", CODE_FOR_zip_idle, ZIP_BUILTIN_IDLE, void_ftype_void);
+#endif
+
+#ifdef	HAVE_zip_syscall
+// Support int SYSCALL(callID, int a, int b, int c);
+  def_builtin("zip_syscall", CODE_FOR_zip_syscall, ZIP_BUILTIN_SYSCALL,
+  			build_function_type_list(void_type_node, NULL_TREE));
+#endif
+
+#ifdef	HAVE_zip_save_context
+  def_builtin("zip_save_context", CODE_FOR_zip_save_context, ZIP_BUILTIN_SAVE_CONTEXT, 
+  		build_function_type_list(void_type_node, ptr_type_node, 0));
+#endif
+
+#ifdef	HAVE_zip_restore_context
+  def_builtin("zip_restore_context", CODE_FOR_zip_restore_context, ZIP_BUILTIN_RESTORE_CONTEXT,
+  	build_function_type_list(void_type_node, ptr_type_node, 0));
+#endif
+
+#ifdef	HAVE_zip_bitrev
+  def_builtin("zip_bitrev", CODE_FOR_zip_bitrev, ZIP_BUILTIN_BITREV,
+  	build_function_type_list(unsigned_type_node, unsigned_type_node,
+		NULL_TREE));
+#endif
+
+#ifdef	HAVE_zip_cc
+  def_builtin("zip_cc", CODE_FOR_zip_cc, ZIP_BUILTIN_CC,
+  	build_function_type_list(unsigned_type_node, NULL_TREE));
+#endif
+
+#ifdef	HAVE_zip_ucc
+  def_builtin("zip_ucc", CODE_FOR_zip_ucc, ZIP_BUILTIN_UCC,
+  	build_function_type_list(unsigned_type_node, NULL_TREE));
+#endif
+
+}
+
+static tree
+zip_builtin_decl(unsigned zip_builtin_code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+  if (zip_builtin_code >= ZIP_BUILTIN_MAX)
+    return error_mark_node;
+
+  return zip_builtins[zip_builtin_code];
+}
+
+static rtx
+zip_expand_builtin(tree exp, rtx target,
+		rtx subtarget ATTRIBUTE_UNUSED,
+		machine_mode tmode ATTRIBUTE_UNUSED,
+		int	ignore ATTRIBUTE_UNUSED)
+{
+	tree	fndecl = TREE_OPERAND(CALL_EXPR_FN(exp), 0);
+	bool	nonvoid = (TREE_TYPE(TREE_TYPE(fndecl)) != void_type_node);
+	enum	ZIP_BUILTIN_ID_CODE code=(enum ZIP_BUILTIN_ID_CODE)DECL_FUNCTION_CODE(fndecl);
+	enum	insn_code icode = zip_builtins_icode[code];
+	rtx	pat, op[5];
+	call_expr_arg_iterator	iter;
+	tree	arg;
+
+	if ((code == ZIP_BUILTIN_SAVE_CONTEXT)
+			||(code == ZIP_BUILTIN_RESTORE_CONTEXT)) {
+		arg = first_call_expr_arg(exp, &iter);
+		if (arg == error_mark_node)
+			return NULL_RTX;
+		op[0] = expand_normal(arg);
+		if (GET_CODE(op[0]) != REG)
+			op[0] = force_reg(Pmode, op[0]);
+		pat = GEN_FCN(icode)(op[0]);
+	} else if (code == ZIP_BUILTIN_BITREV) {
+		arg = first_call_expr_arg(exp, &iter);
+		if (arg == error_mark_node) {
+			return NULL_RTX;
+		}
+		op[0] = expand_normal(arg);
+		if (!target)
+			target = gen_reg_rtx(SImode);
+		pat = GEN_FCN(icode)(target, op[0]);
+	} else if ((code == ZIP_BUILTIN_CC)||(code == ZIP_BUILTIN_UCC)) {
+		if (!target)
+			target = gen_reg_rtx(SImode);
+		pat = GEN_FCN(icode)(target);
+	} else // RTU, HALT, IDLE
+		pat = GEN_FCN(icode)();
+	if (!pat)
+		return NULL_RTX;
+	emit_insn(pat);
+	return (nonvoid ? target : const0_rtx);
+}
+
+static	bool
+zip_scalar_mode_supported_p(enum machine_mode mode)
+{
+	if ((ZIP_HAS_DI)&&(mode == DImode))
+		return true;
+	if ((mode==SImode)||(mode==HImode)||(mode==QImode))
+		return true;
+	if (mode==SFmode)	// &&(ZIP_FPU)
+		return true;	// If (!ZIP_CPU), will need to be emulated
+	if (mode==DFmode)	// Must always be emulated
+		return true;
+	return false;
+}
+
+static	bool
+zip_libgcc_floating_mode_supported_p(enum machine_mode mode)
+{
+	return ((mode)==SFmode)||((mode)==DFmode);
+}
+
+static	int
+zip_address_cost(rtx addr ATTRIBUTE_UNUSED,
+	enum machine_mode mode ATTRIBUTE_UNUSED,
+	addr_space_t as ATTRIBUTE_UNUSED, bool spd ATTRIBUTE_UNUSED) {
+	return 1;
+}
+
+static	bool
+zip_mode_dependent_address_p(const_rtx addr ATTRIBUTE_UNUSED,
+	addr_space_t as ATTRIBUTE_UNUSED) {
+	return false;
+}
+
+static void
+zip_debug_print(const char *pfx, int lvl, const char *str) {
+	int	i;
+	i = lvl;
+	if ((true)||(lvl == 0))
+		fprintf(stderr, "%s", pfx);
+	else
+		i += strlen(pfx);
+	while(i-->0)
+		fprintf(stderr, "  ");
+	fprintf(stderr, "%s\n", str);
+}
+
+static void
+zip_debug_print_m(const char *pfx, int lvl, const char *str, enum machine_mode m) {
+	int	i;
+
+	i = lvl;
+	if ((true)||(lvl == 0))
+		fprintf(stderr, "%s", pfx);
+	else
+		i = lvl+strlen(pfx);
+	while(i-->0)
+		fprintf(stderr, "  ");
+	switch(m) {
+		case VOIDmode:
+			fprintf(stderr, "%s:V\n", str);
+			break;
+		case BLKmode:
+			fprintf(stderr, "%s:BLK\n", str);
+			break;
+		case BImode:
+			fprintf(stderr, "%s:BI\n", str);
+			break;
+		case QImode:
+			fprintf(stderr, "%s:QI\n", str);
+			break;
+		case HImode:
+			fprintf(stderr, "%s:HI\n", str);
+			break;
+#ifdef	HAVE_SImode
+		case SImode:
+			fprintf(stderr, "%s:SI\n", str);
+			break;
+#endif
+#ifdef	HAVE_DImode
+		case DImode:
+			fprintf(stderr, "%s:DI\n", str);
+			break;
+#endif
+		case CCmode:
+			fprintf(stderr, "%s:CC\n", str);
+			break;
+		default:
+			fprintf(stderr, "%s:?\n", str);
+	}
+}
+
+static	void
+zip_debug_rtx_1(const char *pfx, const_rtx x, int lvl) {
+	if (x == NULL_RTX) {
+		zip_debug_print(pfx, lvl, "(NULL-RTX)");
+		return;
+	} else if (GET_CODE(x) > NUM_RTX_CODE) {
+		char	buf[64];
+		sprintf(buf, "(BAD-RTX-CODE %d)", GET_CODE(x));
+		zip_debug_print(pfx, lvl, buf);
+		gcc_assert(0 && "Bad RTX Code");
+		return;
+	} switch(GET_CODE(x)) { // rtl.def
+	case PARALLEL:
+		zip_debug_print(pfx, lvl, "(PARALLEL");
+		if (XVEC(x,0) != NULL)
+			for(int j=0; j<XVECLEN(x,0);j++)
+				zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		debug_rtx(x);
+		break;
+	case INT_LIST: zip_debug_print(pfx, lvl, "(INT-LIST"); break;
+	case SEQUENCE:
+		zip_debug_print(pfx, lvl, "(SEQUENCE");
+		for(int j=0; j<XVECLEN(x,0);j++)
+			zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		debug_rtx(x);
+		break;
+	case ADDRESS: zip_debug_print(pfx, lvl, "(ADDRESS"); break;
+	case DEBUG_INSN: zip_debug_print(pfx, lvl, "(DEBUG-INSN"); break;
+	case INSN:
+		zip_debug_print(pfx, lvl, "(INSN");
+		/*
+		{ const rtx_insn *tmp_rtx;
+		for(tmp_rtx = as_a <const rtx_insn *>(x); tmp_rtx != 0; tmp_rtx = NEXT_INSN(tmp_rtx)) {
+			zip_debug_rtx_1(tmp_rtx, lvl+1);
+		}}
+		*/
+		zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		debug_rtx(x);
+		break;
+	case JUMP_INSN: zip_debug_print(pfx, lvl, "(JUMP-INSN");
+		zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		/*
+		if (JUMP_LABEL(x)) {
+			if (GET_CODE(JUMP_LABEL(x)) == LABEL_REF) {
+				char	buf[64];
+				sprintf(buf, "(LABEL *.L%d))", CODE_LABEL_NUMBER(LABEL_REF_LABEL(JUMP_LABEL(x))));
+				zip_debug_print(pfx, lvl+1, buf);
+			} else if (GET_CODE(JUMP_LABEL(x))==CODE_LABEL) {
+				char	buf[64];
+				sprintf(buf, "(CODE_LABEL *.L%d))", CODE_LABEL_NUMBER(JUMP_LABEL(x)));
+				zip_debug_print(pfx, lvl+1, buf);
+			} else
+			zip_debug_print(pfx, lvl+1, "(w/Label))");
+		} else
+			zip_debug_print(pfx, lvl+1, "(NO label))");
+		debug_rtx(x);
+		*/
+		break;
+	case CALL:
+		zip_debug_print(pfx, lvl, "(CALL (Adr) (Args)");
+		zip_debug_rtx_1(pfx, XEXP(x,0), lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1), lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case CALL_INSN: zip_debug_print(pfx, lvl, "(CALL-INSN");
+		debug_rtx(x);
+		break;
+	case BARRIER: zip_debug_print(pfx, lvl, "(BARRIER)"); break;
+	case RETURN: zip_debug_print(pfx, lvl, "(RETURN)"); break;
+	case NOTE:
+		{	char buf[128];
+			sprintf(buf, "(NOTE %s)", GET_REG_NOTE_NAME(GET_MODE(x)));
+			zip_debug_print(pfx, lvl, buf);
+		}break;
+	case COND_EXEC: zip_debug_print(pfx, lvl, "(COND_EXEC)");
+		debug_rtx(x);
+		break;
+	case ASM_INPUT: zip_debug_print(pfx, lvl, "(ASM INPUT)"); break;
+	case ASM_OPERANDS: zip_debug_print(pfx, lvl, "(ASM OPERANDS)"); break;
+	case UNSPEC: zip_debug_print(pfx, lvl, "(UNSPEC)"); break;
+	case UNSPEC_VOLATILE: zip_debug_print(pfx, lvl, "(UNSPEC_VOLATILE)"); break;
+	case CODE_LABEL:
+		{
+			char	buf[128];
+			sprintf(buf, "(CODE_LABEL *.L%d)", CODE_LABEL_NUMBER(x));
+			zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+		} break;
+	case SET:
+		zip_debug_print_m(pfx, lvl, "(SET", GET_MODE(x));
+		zip_debug_rtx_1(pfx, SET_DEST(x),lvl+1);
+		zip_debug_rtx_1(pfx, SET_SRC(x),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		debug_rtx(x);
+		break;
+	case REG: {
+		char buf[25], mstr[4];
+		mstr[0] = '\0';
+		if (GET_MODE(x) == QImode)
+			strcpy(mstr, ":QI");
+		else if (GET_MODE(x) == HImode)
+			strcpy(mstr, ":HI");
+		else if (GET_MODE(x) == VOIDmode)
+			strcpy(mstr, ":V");
+		if (REGNO(x) == zip_PC)
+			sprintf(buf, "(PC%s)", mstr);
+		else if (REGNO(x) == zip_CC)
+			sprintf(buf, "(CC%s)", mstr);
+		else if (REGNO(x) == zip_SP)
+			sprintf(buf, "(SP%s)", mstr);
+		else if (REGNO(x) == zip_FP)
+			sprintf(buf, "(REG%s FP)", mstr);
+		else if (REGNO(x) == zip_GOT)
+			sprintf(buf, "(REG%s GBL)", mstr);
+		else if (FUNCTION_VALUE_REGNO_P(REGNO(x)))
+			sprintf(buf, "(REG%s RTN-VL)", mstr);
+		else if (REGNO(x) == RETURN_ADDRESS_REGNUM)
+			sprintf(buf, "(REG%s RTN-AD)", mstr);
+		else
+			sprintf(buf, "(REG%s %d)", mstr, REGNO(x));
+		if (mstr[0])
+			zip_debug_print(pfx, lvl, buf);
+		else
+			zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+		} break;
+	case IF_THEN_ELSE: // 51
+		zip_debug_print(pfx, lvl, "(IF-THEN-ELSE");
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,2),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case PC:
+		zip_debug_print(pfx, lvl, "(PC)");
+		break;
+	case CC0:
+		zip_debug_print(pfx, lvl, "(CC0)");
+		break;
+	case COMPARE:
+		zip_debug_print_m(pfx, lvl, "(COMPARE", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case CONST:
+		zip_debug_print_m(pfx, lvl, "(CONST", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case CONST_INT:
+		{ char buf[128];
+		if (GET_MODE(x)==QImode)
+			sprintf(buf, "(CONST_INT:QI %ld)", (long)INTVAL(x));
+		else if (GET_MODE(x)==VOIDmode)
+			sprintf(buf, "(CONST_INT:V %ld, %016lx)", (long)INTVAL(x),
+				(unsigned long)INTVAL(x));
+		else
+			sprintf(buf, "(CONST_INT:? %ld)", (long)INTVAL(x));
+		zip_debug_print(pfx, lvl, buf);
+		} break;
+	case LABEL_REF:
+		{ char buf[256];
+		sprintf(buf, "(LABEL *.L%d)", CODE_LABEL_NUMBER(LABEL_REF_LABEL(x)));
+		zip_debug_print(pfx, lvl, buf);
+		}
+		break;
+	case SYMBOL_REF:
+		{
+			char buf[1024];
+			sprintf(buf, "(SYMBOL: %s)", XSTR(x,0));
+			// fprintf(file, "%s", XSTR(x,0));
+			zip_debug_print(pfx, lvl, buf);
+		}
+		break;
+	case MEM:
+		zip_debug_print_m(pfx, lvl, "(MEM", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	/*
+	case VALUE:
+		{
+			char buf[64];
+			sprintf(buf, "(VALUE: %d)", INTVAL(XEXP,0));
+			zip_debug_print_m(pfx, lvl, "buf", GET_MODE(x));
+		}
+		break;
+	*/
+	case PLUS:
+		zip_debug_print_m(pfx, lvl, "(PLUS", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case MINUS:
+		zip_debug_print_m(pfx, lvl, "(MINUS", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case AND:
+		zip_debug_print_m(pfx, lvl, "(AND", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case IOR:
+		zip_debug_print_m(pfx, lvl, "(OR", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case XOR:
+		zip_debug_print_m(pfx, lvl, "(XOR", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case MULT:
+		zip_debug_print_m(pfx, lvl, "(MULT", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case EQ:	// 
+		zip_debug_print_m(pfx, lvl, "(EQ", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case NE:	// 
+		zip_debug_print_m(pfx, lvl, "(NE", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case GE:	//
+		zip_debug_print_m(pfx, lvl, "(GE", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case GT:	//
+		zip_debug_print_m(pfx, lvl, "(GT", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case LE:	// 
+		zip_debug_print_m(pfx, lvl, "(LE", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case LT:	// 
+		zip_debug_print_m(pfx, lvl, "(LT", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case GEU:	// 
+		zip_debug_print_m(pfx, lvl, "(GEU", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case GTU:	// 
+		zip_debug_print_m(pfx, lvl, "(GTU", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case LEU:	// 
+		zip_debug_print_m(pfx, lvl, "(LEU", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case LTU:	// 
+		zip_debug_print_m(pfx, lvl, "(LTU", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case SCRATCH:	// 
+		zip_debug_print_m(pfx, lvl, "(SCRATCH)", GET_MODE(x));
+		break;
+	case SUBREG:
+		{ char buf[64], mstr[8];
+		if (GET_MODE(x) == QImode)
+			strcpy(mstr, ":QI");
+		else if (GET_MODE(x) == HImode)
+			strcpy(mstr, ":HI");
+		else if (GET_MODE(x) == SImode)
+			strcpy(mstr, ":SI");
+		else if (GET_MODE(x) == VOIDmode)
+			strcpy(mstr, ":V");
+		else
+			strcpy(mstr, ":?");
+		if (REG_P(XEXP(x,0))) {
+			int hreg = REGNO(XEXP(x,0)), mod = GET_MODE(XEXP(x,0)),
+				sb = SUBREG_BYTE(x);
+			if (mod==QImode)
+			sprintf(buf,"(SUBREG%s (REG:QI %d)/%d)",mstr,hreg, sb);
+			else if (mod==HImode)
+			sprintf(buf,"(SUBREG%s (REG:HI %d)/%d)",mstr,hreg, sb);
+			else if (mod==QImode)
+			sprintf(buf,"(SUBREG%s (REG:QI %d)/%d)",mstr,hreg, sb);
+			else if (mod==VOIDmode)
+			sprintf(buf,"(SUBREG%s (REG:V %d)/%d)",mstr,hreg, sb);
+			else
+			sprintf(buf,"(SUBREG%s %d:?/%d)",mstr,hreg, sb);
+			zip_debug_print(pfx, lvl, buf);
+		} else if (MEM_P(XEXP(x,0))) {
+			sprintf(buf, "(SUBREG%s /%d", mstr,SUBREG_BYTE(x));
+			zip_debug_print(pfx, lvl, buf);
+			zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+			zip_debug_print(pfx, lvl, ")");
+		} else {
+			sprintf(buf, "(SUBREG%s UNK /%d", mstr,SUBREG_BYTE(x));
+			zip_debug_print(pfx, lvl, buf);
+			zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+			zip_debug_print(pfx, lvl, ")");
+		}}
+		break;
+	case ASHIFT:
+		zip_debug_print_m(pfx, lvl, "(ASHIFT", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case ASHIFTRT:
+		zip_debug_print_m(pfx, lvl, "(ASHIFTRT", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case LSHIFTRT:
+		zip_debug_print_m(pfx, lvl, "(LSHIFTRT", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case ZERO_EXTRACT:
+		zip_debug_print_m(pfx, lvl, "(ZERO_EXTRACT", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	case ZERO_EXTEND:
+		zip_debug_print_m(pfx, lvl, "(ZERO_EXTEND", GET_MODE(x));
+		zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+		zip_debug_print(pfx, lvl, ")");
+		break;
+	default:
+		{ char buf[128];
+		sprintf(buf, "(? = %d) -- calling DEBUG-RTX", GET_CODE(x));
+		zip_debug_print(pfx, lvl, buf);
+		debug_rtx(x);
+		} break;
+	}
+}
+
+void
+zip_debug_rtx_pfx(const char *pfx, const_rtx x) {
+	zip_debug_rtx_1(pfx, x, 0);
+}
+
+void
+zip_debug_rtx(const_rtx x) {
+	zip_debug_rtx_pfx("", x);
+}
+
+void
+zip_debug_ccode(int ccode) {
+	switch(ccode) {
+	case	EQ: fprintf(stderr, "EQ"); break;
+	case	NE: fprintf(stderr, "NE"); break;
+	case	GE: fprintf(stderr, "GE"); break;
+	case	LT: fprintf(stderr, "LT"); break;
+	case	LTU: fprintf(stderr, "LTU"); break;
+	case	GEU: fprintf(stderr, "GEU"); break;
+	case	GT: fprintf(stderr, "GT[!]"); break;
+	case	LE: fprintf(stderr, "LE[!]"); break;
+	case	GTU: fprintf(stderr, "GTU[!]"); break;
+	case	LEU: fprintf(stderr, "LEU[!]"); break;
+	default:
+		fprintf(stderr, "%d", ccode); break;
+	}
+}
+
+void
+zip_debug_insn(rtx_insn *insn ATTRIBUTE_UNUSED) {
+}
+
+void
+zip_debug_bb(basic_block bb) {
+	rtx_insn	*insn;
+
+	fprintf(stderr, "************ BASIC-BLOCK ***************\n");
+	FOR_BB_INSNS(bb, insn)
+	{
+		zip_debug_rtx(insn);
+	}
+}
+
+
+static	bool
+zip_legitimate_opb(rtx x, bool strict)
+{
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB\n");
+	if (dbg) zip_debug_rtx_pfx("Test: ", x);
+
+	if (NULL_RTX == x)
+		return false;
+	else if ((GET_MODE(x) != QImode)
+			&&(GET_MODE(x) != HImode)
+			&&(GET_MODE(x) != SImode)
+			&&(GET_MODE(x) != VOIDmode)) {
+		if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> Mode failure\n");
+		return false;
+	} else if ((strict)&&(REG_P(x))) {
+		if (REGNO(x)<zip_CC) {
+			if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+			return true;
+		} else return false;
+	} else if (register_operand(x, GET_MODE(x))) {
+		// This also handles subregs
+		if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+		return true;
+	} else if ((CONST_INT_P(x))
+		&&(INTVAL(x) >= zip_min_opb_imm)
+		&&(INTVAL(x) <= zip_max_opb_imm)) {
+		if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (Const) %ld <= %ld <= %ld\n", (long)zip_min_opb_imm, (long)INTVAL(x), (long)zip_max_opb_imm);
+		return true;
+	// } else if ((GET_CODE(x) == LABEL_REF)||(GET_CODE(x)==CODE_LABEL)) {
+		// return true;
+	} else if (GET_CODE(x) == PLUS) {
+		// Is it a valid register?
+		rtx	regrtx = XEXP(x, 0);
+		if ((!strict)&&(!REG_P(regrtx))) {
+			if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No (No reg in +%s)\n",
+			(GET_CODE(XEXP(x,1))==REG)?", reg in op[1]":"");
+			return false;
+		} else if ((strict)&&((!REG_P(XEXP(x,0)))||(REGNO(XEXP(x,0))>=zip_CC))) {
+			return false;
+		} if ((GET_CODE(XEXP(x, 1)) == CONST_INT)
+			&&(INTVAL(XEXP(x, 1)) <= zip_max_anchor_offset)
+			&&(INTVAL(XEXP(x, 1)) >= zip_min_anchor_offset)) {
+			if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (reg+int)\n");
+			// if((INTVAL(XEXP(x,1))<0)&&(REGNO(XEXP(x,0))==zip_SP))
+				// gcc_unreachable();
+			return true;
+		} if ((GET_CODE(XEXP(x, 1)) == LABEL_REF)
+			||(GET_CODE(XEXP(x, 1)) == CODE_LABEL)
+			||(GET_CODE(XEXP(x, 1)) == SYMBOL_REF)) {
+			// While we can technically support this, the problem
+			// is that the symbol address could be anywhere, and we
+			// have no way of recovering if it's outside of our
+			// 14 allowable bits.
+			if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No. (reg+lbl)\n");
+			return false;
+		}
+	}
+
+	if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No\n");
+	if (dbg) zip_debug_rtx(x);
+	return false;
+}
+
+static	bool
+zip_legitimate_move_operand_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict) {
+	const bool	dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+	if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND\n");
+	if (dbg) zip_debug_rtx_pfx("VMov?: ", x);
+
+	if (!zip_legitimate_opb(x, strict))
+		return false;
+	else if ((GET_CODE(x)==PLUS)&&(CONST_INT_P(XEXP(x,1)))) {
+		if ((INTVAL(XEXP(x, 1)) > zip_max_mov_offset)
+			||(INTVAL(XEXP(x, 1)) < zip_min_mov_offset)) {
+			if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> NO! (reg+int), int out of bounds: %ld\n", (long)INTVAL(XEXP(x,1)));
+			return false;
+		}
+	}
+
+	if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> Yes\n");
+	if (dbg) zip_debug_rtx(x);
+	return true;
+}
+
+int
+zip_pd_mov_operand(rtx op)
+{
+	const bool	dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+	if (dbg) fprintf(stderr, "ZIP-VALID-MOV(predicate) for OPERAND\n");
+	return zip_legitimate_move_operand_p(VOIDmode, op, !can_create_pseudo_p());
+}
+
+int
+zip_pd_mvimm_operand(rtx op)
+{
+	const bool	dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+	if (dbg) fprintf(stderr, "ZIP-VALID-MVIMM(predicate) for OPERAND\n");
+	if (!CONST_INT_P(op))
+		return false;
+	if (INTVAL(op) > zip_max_mov_offset)
+		return false;
+	if (INTVAL(op) < zip_min_mov_offset)
+		return false;
+	return true;
+}
+
+int
+zip_pd_imm_operand(rtx op)
+{
+	const bool	dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+	if (dbg) fprintf(stderr, "ZIP-VALID-IMM(predicate) for OPERAND\n");
+	if (!CONST_INT_P(op))
+		return false;
+	if (INTVAL(op) > zip_max_anchor_offset)
+		return false;
+	if (INTVAL(op) < zip_min_anchor_offset)
+		return false;
+	return true;
+}
+
+int
+zip_address_operand(rtx op)
+{
+	const bool	dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+	if (dbg) fprintf(stderr, "ZIP-ADDRESS for OPERAND\n");
+	if ((REG_P(op))&&(REGNO(op)==zip_CC))
+		return false;
+	else if ((GET_CODE(op) == PLUS)&&(REG_P(XEXP(op,0)))
+			&&(REGNO(XEXP(op,0))==zip_CC))
+		return false;
+	else
+		return zip_legitimate_opb(op, !can_create_pseudo_p());
+}
+
+int
+zip_pd_opb_operand(rtx op)
+{
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "ZIP-OPB(predicate) for OPERAND\n");
+	return zip_legitimate_opb(op, false); //, !can_create_pseudo_p());
+}
+
+int
+zip_ct_address_operand(rtx op)
+{
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "ZIP-ADDRESS(constraint) for OPERAND\n");
+	return zip_legitimate_opb(op, !can_create_pseudo_p());
+}
+
+int
+zip_const_address_operand(rtx x) {
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS?\n");
+	if (dbg) zip_debug_rtx(x);
+	if ((GET_MODE(x) != SImode)&&(GET_MODE(x) != VOIDmode)) {
+		fprintf(stderr, "is ZIP-CONST-ADDRESS? -> NO, BAD MODE\n");
+		return false;
+	}
+	if ((GET_CODE(x) == LABEL_REF)
+			||(GET_CODE(x) == CODE_LABEL)
+			||(GET_CODE(x) == SYMBOL_REF)) {
+		if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (LBL)\n");
+		return true;
+	} else if (CONST_INT_P(x)) {
+		if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (INT)\n");
+		return true;
+	} else if (GET_CODE(x) == PLUS) {
+		if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(PLUS)\n");
+		return ((zip_const_address_operand(XEXP(x,0)))
+			&&(CONST_INT_P(XEXP(x,1))));
+	} else if (GET_CODE(x) == MINUS) {
+		if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(MINUS)\n");
+		return ((zip_const_address_operand(XEXP(x,0)))
+			&&(zip_const_address_operand(XEXP(x,1))));
+	}
+
+	if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> No\n");
+	if (dbg) zip_debug_rtx(x);
+	return false;
+}
+
+int
+zip_ct_const_address_operand(rtx x) {
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(constraint)\n");
+	return zip_const_address_operand(x);
+}
+
+int
+zip_pd_const_address_operand(rtx x) {
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(predicate)\n");
+	return zip_const_address_operand(x);
+}
+
+
+static	bool
+zip_legitimate_address_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict)
+{
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "Zip-LEGITIMATE-ADDRESS-P\n");
+	if (dbg) zip_debug_rtx(x);
+
+	// Only insist the register be a valid register if strict is true
+	if (zip_legitimate_opb(x, strict))
+		return true;
+	// else if (zip_const_address_operand(x))
+		// return true;
+
+	return false;
+}
+
+static	rtx
+zip_legitimize_address(rtx x, rtx oldx ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) {
+	ZIPDEBUGFLAG(dbg, false);
+
+
+	if (dbg) zip_debug_rtx_pfx("LEGITIMIZE: ", x);
+	if (zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+		return x;
+
+	if (dbg) zip_debug_rtx_pfx("ILLEGITIMATE: ", x);
+	if (GET_CODE(x)==PLUS) {
+		// if ((zip_legitimate_address_p(mode, XEXP(x,0),
+		//		!can_create_pseudo_p()))
+		//	&&(GETMODE(XEXP(x,1))==CONST_INT)) {
+		//}
+		if (!REG_P(XEXP(x,0)))
+			XEXP(x,0) = force_reg(Pmode,XEXP(x,0));
+		if ((!zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+			&&(!CONST_INT_P(XEXP(x,1))))
+			x = force_reg(GET_MODE(x),x);
+	} else if (MEM_P(x))
+		x = force_reg(GET_MODE(x),x);
+
+	if (dbg) zip_debug_rtx_pfx("LEGITIMATE: ", x);
+	return x;
+}
+
+void
+zip_asm_output_def(FILE *stream, const char *name, const char *value)
+{
+	fprintf(stream, "\t.equ %s, %s\n", name, value);
+}
+
+const char *zip_set_zero_or_one(rtx condition, rtx dst) {
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "ZIP::SET-ZERO-OR-ONE\n");
+	if (dbg) zip_debug_rtx_pfx("CND", condition);
+	if (dbg) zip_debug_rtx_pfx("REG", dst);
+	switch(GET_CODE(condition)) {
+	case EQ:	return "LDI\t0,%0\n\tLDILO.Z\t1,%0\t; set01_eq";
+	case NE:	return "LDI\t0,%0\n\tLDILO.NZ\t1,%0\t; set01_ne";
+	case LT:	return "LDI\t0,%0\n\tLDILO.LT\t1,%0\t; set01_lt";
+	case GT:	return "LDI\t1,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_gt";
+	case LE:	return "LDI\t0,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_le";
+	case GE:	return "LDI\t0,%0\n\tLDILO.GE\t1,%0\t; set01_ge";
+	case LTU:	return "LDI\t0,%0\n\tLDILO.C\t1,%0\t; set01_ltu";
+	case GEU:	return "LDI\t0,%0\n\tLDILO.NC\t1,%0\t; set01_geu";
+	case GTU:	return "LDI\t1,%0\n\tLDILO.C\t0,%0\n\tLDILO.Z\t0,%0\t; set01_gtu";
+	case LEU:	return "LDI\t0,%0\n\tLDILO.C\t1,%0\n\tLDILO.Z\t1,%0\t; set01_leu";
+	default:
+		zip_debug_rtx(condition);
+		internal_error("CSTORE Unsupported condition");
+		return NULL;
+	}
+}
+
+int
+zip_supported_condition(int c) {
+	switch(c) {
+	case EQ: case NE: case LT: case GE: case LTU: case GEU:
+		return 1;
+		break;
+	default:
+		break;
+	} return 0;
+}
+
+bool
+zip_signed_comparison(int c) {
+	switch(c) {
+	case NE: case LT: case EQ: case GE:
+		return true;
+	default:
+		break;
+	} return false;
+}
+
+int
+zip_expand_movdi(rtx dst, rtx src) {
+	ZIPDEBUGFLAG(dbg, false);
+
+	if (dbg) fprintf(stderr, "\nZIP::MOVDI\n");
+	if (dbg) zip_debug_rtx_pfx("DST", dst);
+	if (dbg) zip_debug_rtx_pfx("SRC", src);
+
+	// MOV !REG->!REG
+	if ((!REG_P(dst))&&(!REG_P(src))&&(can_create_pseudo_p())) {
+		// This includes:
+		//	MOV MEM->MEM
+		//	MOV IMM->MEM
+		if (dbg) fprintf(stderr, "ZIP::MOVDI -- !REG->!REG\n");
+
+		rtx tmp = gen_reg_rtx(DImode);
+		emit_insn(gen_movdi(tmp, src));
+		emit_insn(gen_movdi(dst, tmp));
+		return 1;
+	}
+
+	// MOV REG->REG
+	if ((REG_P(dst))&&(REG_P(src))) {
+		if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->REG\n");
+
+		emit_insn(gen_movdi_raw(dst, src));
+		return 1;
+	}
+
+	// MOV REG->MEM (a store instruction)
+	if ((MEM_P(dst))&&(REG_P(src))) {
+		rtx	addr = XEXP(dst,0);
+		long	offset = 0;
+		if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+			offset = INTVAL(XEXP(addr,1));
+
+		if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->MEM\n");
+		if (REG_P(addr)) {
+			emit_insn(gen_movdi_raw(dst, src));
+			return 1;
+		} else if ((GET_CODE(addr)==PLUS)
+			&&(REG_P(XEXP(addr,0)))
+			&&(CONST_INT_P(XEXP(addr,1)))
+			&&(offset>=(long)zip_min_anchor_offset)
+			&&(offset+4<(long)zip_max_anchor_offset)) {
+			// Demonstrated and works
+			emit_insn(gen_movdi_raw(dst, src));
+			return 1;
+		} else if (can_create_pseudo_p()) {
+			rtx tmp = gen_reg_rtx(Pmode);
+			emit_insn(gen_movsi(tmp, addr));
+			emit_insn(gen_movdi_raw(gen_rtx_MEM(DImode, tmp), src));
+			return 1;
+		}
+	}
+
+	// MOV MEM->REG (a load instruction)
+	if ((REG_P(dst))&&(MEM_P(src))) {
+		rtx addr = XEXP(src,0);
+		long	offset = 0;
+		if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+			offset = INTVAL(XEXP(addr,1));
+
+		if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM->REG\n");
+		if (REG_P(addr)) {
+			if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[R]->REG\n");
+			emit_insn(gen_movdi_raw(dst, src));
+			return 1;
+		} else if ((GET_CODE(addr)==PLUS)
+			&&(REG_P(XEXP(addr,0)))
+			&&(CONST_INT_P(XEXP(addr,1)))
+			&&(offset>=(long)zip_min_anchor_offset)
+			&&(offset+4<(long)zip_max_anchor_offset)) {
+			if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[#+R]->REG -- DONE\n");
+			emit_insn(gen_movdi_raw(dst, src));
+			return 1;
+		} else if (can_create_pseudo_p()) {
+			if (dbg) fprintf(stderr, "ZIP::MOVDI -- LDI #,R, MEM[R]->REG\n");
+			rtx tmp = gen_reg_rtx(Pmode);
+			emit_insn(gen_movsi(tmp, addr));
+			emit_insn(gen_movdi_raw(dst,
+				gen_rtx_MEM(DImode, tmp)));
+			return 1;
+		} else if (dbg)
+			fprintf(stderr, "ZIP::MOVDI -- MEM[?]->REG (no match)\n");
+	}
+
+	// MOV #->REG (An LDI instruction, but for DIwords)
+	if ((CONST_INT_P(src))&&(REG_P(dst))) {
+		if (dbg) fprintf(stderr, "ZIP::MOVDI -- IMM->REG\n");
+		emit_insn(gen_movdi_raw(dst, src));
+		return 1;
+	}
+
+	return 0;
+}
+
+const char *
+zip_addsicc(rtx dst, rtx condition, rtx ifsrc, rtx addv) {
+	// We know upon entry that REG_P(dst) must be true
+	if (!REG_P(dst))
+		internal_error("%s","ADDSICC into something other than register");
+
+	if ((REG_P(dst))&&(REG_P(ifsrc))&&(REG_P(addv))
+		&&(REGNO(dst)!=REGNO(ifsrc))) {
+		switch (GET_CODE(condition)) {
+		case EQ: return "MOV.Z\t%2,%0\n\tADD.Z\t%3,%0";
+		case NE: return "MOV.NZ\t%2,%0\n\tADD.NZ\t%3,%0";
+		case LT: return "MOV.LT\t%2,%0\n\tADD.LT\t%3,%0";
+
+		case LE: return "MOV.LT\t%3,%0\n\tMOV.Z\t%3,%0\n\tADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+		case GE: return "MOV.GE\t%2,%0\n\tADD.GE\t%3,%0";
+
+		case GT: return "BLT\t%.Laddsi%=\n\tBZ\t%%.Laddsi%=\n\tMOV\t%2,%0\n\tADD\t%3,%0\n.Laddsi%=:";
+		case LTU: return "MOV.C\t%2,%0\n\tADD.C\t%3,%0";
+
+		case LEU: return "MOV.C\t%2,%0\n\tMOV.Z\t%2,%0\n\tADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+		case GEU: return "MOV.NC\t%2,%0\n\tADD.NC\t%3,%0";
+		case GTU: return "BZ\t%.Laddsi%=\n\tMOV.NC\t%3,%0\n\tADD.NC\t%3,%0\n.Laddsi%=:";
+		default:
+			internal_error("%s", "Zip/No usable addsi expansion");
+			break;
+		}
+	}
+
+	if ((REG_P(ifsrc))&&(REGNO(dst)==REGNO(ifsrc))) {
+		switch (GET_CODE(condition)) {
+		case EQ: return "ADD.Z\t%3,%0";
+		case NE: return "ADD.NZ\t%3,%0";
+		case LT: return "ADD.LT\t%3,%0";
+		case LE: return "ADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+		case GE: return "ADD.GE\t%3,%0";
+		case GT: return "ADD.GE\t%3,%0\n\tSUB.Z\t%3,%0";
+		case LTU: return "ADD.C\t%3,%0";
+		case LEU: return "ADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+		case GEU: return "ADD.NC\t%3,%0";
+		case GTU: return "SUB.Z\t%3,%0\n\tADD.NC\t%3,%0";
+		default:
+			internal_error("%s", "Zip/No usable addsi expansion");
+			break;
+		}
+	} else {
+		// MOV A+REG,REG
+		switch (GET_CODE(condition)) {
+		case EQ: return "MOV.Z\t%3+%2,%0";
+		case NE: return "MOV.NZ\t%3+%2,%0";
+		case LT: return "MOV.LT\t%3+%2,%0";
+		case GT: return "BLT\t.Laddcc%=\n\tBZ\t.Laddcc%=\n\tMOV\t%3+%2,%0\n.Laddcc%=";
+		case LE: return "MOV.LT\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+		case GE: return "MOV.GE\t%3+%2,%0";
+		case LTU: return "MOV.C\t%3+%2,%0";
+		case LEU: return "MOV.C\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+		case GEU: return "MOV.NC\t%3+%2,%0";
+		case GTU: return "BZ\t.Laddcc%=\n\tMOV.NC\t%3+%2,%0\n\t.Laddcc%=:";
+		default:
+			internal_error("%s", "Zip/No usable addsi(reg,reg) expansion");
+			break;
+		}
+	}
+
+	return "BREAK";
+}
+
+static	int	zip_memory_move_cost(machine_mode mode, reg_class_t ATTRIBUTE_UNUSED, bool in ATTRIBUTE_UNUSED) {
+	int	rv = 14;
+	if ((mode == DImode)||(mode == DFmode))
+		rv += 2;
+	return rv;
+}
+
+// #warning "How do we tell the compiler LDI label is expensive as 2 ops"?
+static	bool	zip_cannot_modify_jumps_p(void) {
+	// Let's try their suggested approach, keeping us from modifying jumps
+	// after reload.  This should also allow our peephole2 optimizations
+	// to adjust things back to what they need to be if necessary.
+	return (reload_completed || reload_in_progress);
+}
+
+rtx_insn	*zip_ifcvt_info;
+
+void
+zip_ifcvt_modify_tests(ce_if_block *ce_info ATTRIBUTE_UNUSED, rtx *true_expr, rtx *false_expr) {
+	const bool	dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+	if (dbg) fprintf(stderr, "IFCVT-MODIFY-TESTS\n");
+	if (*true_expr) switch(GET_CODE(*true_expr)) {
+		// These are our unsupported conditions
+		case LE:
+		case GT:
+		case LEU:
+		case GTU:
+			if (dbg) fprintf(stderr, "TRUE, missing expr\n");
+			if (dbg) zip_debug_rtx(*true_expr);
+			*true_expr = NULL_RTX;
+			break;
+		default: // LT, GT, GTE, LTU, NE, EQ
+			break;
+	}
+
+	if (*false_expr) switch(GET_CODE(*false_expr)) {
+		case LE:
+		case GT:
+		case LEU:
+		case GTU:
+			if (dbg) fprintf(stderr, "FALSE, missing expr\n");
+			if (dbg) zip_debug_rtx(*false_expr);
+			*false_expr = NULL_RTX;
+		default:
+			break;
+	}
+	if ((dbg)&&((!*true_expr)||(!*false_expr)))
+		fprintf(stderr, "IFCVT-MODIFY-TESTS -- FAIL\n");
+}
+
+void
+zip_ifcvt_machdep_init(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+if (!ceinfo->then_bb)
+	return;
+rtx_insn *insn;
+FOR_BB_INSNS(ceinfo->then_bb, insn) {
+	fprintf(stderr, "IFCVT -- INIT\n");
+	zip_debug_rtx_pfx("INIT-BB", insn);
+}
+*/
+/*
+	zip_ifcvt_info = NULL;
+	rtx_insn *insn, *ifinsn = NULL;
+	FOR_BB_INSNS(ceinfo->test_bb, insn) {
+		rtx	p;
+		p = single_set(insn);
+		if (!p) continue;
+		if (SET_DEST(p)==pc_rtx) {
+			ifinsn = insn;
+		}
+		if (!REG_P(SET_DEST(p)))
+			continue;
+		if (GET_MODE(SET_DEST(p))!=CCmode)
+			continue;
+		if (REGNO(SET_DEST(p))!=zip_CC)
+			continue;
+		zip_ifcvt_info = insn;
+	}
+
+	if (zip_ifcvt_info)
+		zip_debug_rtx_pfx("PUTATIVE-CMP",zip_ifcvt_info);
+	if (ifinsn)
+		zip_debug_rtx_pfx("PRIOR-JMP",ifinsn);
+*/
+}
+
+void
+zip_ifcvt_modify_insn(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED,
+		rtx pattern ATTRIBUTE_UNUSED,
+		rtx_insn *insn ATTRIBUTE_UNUSED) {
+	// zip_debug_rtx_pfx("MODIFY-INSN: ", insn);
+}
+
+void
+zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+	fprintf(stderr, "IFCVT -- CANCEL\n");
+	zip_ifcvt_info = NULL;
+*/
+}
+
+void
+zip_ifcvt_modify_final(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+rtx_insn *insn;
+FOR_BB_INSNS(ceinfo->test_bb, insn) {
+	fprintf(stderr, "IFCVT -- FINAL\n");
+	zip_debug_rtx_pfx("FINAL-TEST-BB", insn);
+}
+	zip_ifcvt_info = NULL;
+*/
+}
+
+
+int	zip_insn_sets_cc(rtx_insn *insn) {
+	return (get_attr_ccresult(insn)==CCRESULT_SET);
+}
+
+const char *
+zip_cbranchdi_const(rtx comparison,
+		rtx a ATTRIBUTE_UNUSED,
+		rtx b,
+		rtx label ATTRIBUTE_UNUSED) {
+	gcc_assert(CONST_INT_P(b));
+	long value = INTVAL(b);
+
+	// Look into the combine routines to find out why this routine never
+	// gets called.
+
+	switch(GET_CODE(comparison)) {
+	case EQ:
+		if (value < 0)
+		  return "CMP\t-1,%H1\t; cbranchdi/# EQ (neg)\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+		else
+		  return "CMP\t0,%H1\t; cbranchdi/# EQ\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+	case NE:
+		if (value < 0)
+		  return "CMP\t-1,%H1\t; cbranchdi/# NE (neg)\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+		else
+		  return "CMP\t0,%H1\t; cbranchdi/# NE\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+	case LE:
+		if (value == 0)
+			return "CMP\t0,%H1\t; cbranchdi/# LE 0\n\tBLT\t%3\n\tCMP.Z\t0,%L1\n\tBZ\t%3";
+		else if (value == -1)
+			return "CMP\t0,%H1\t; cbranchdi/# LE -1\n\tBLT\t%3";
+		else if (value < 0) {
+			char	tmp[128];
+			sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# LE (neg)\n"
+				"\tBLT\t.Lcmpdile%%=\n"
+				"\tBNZ\t%%3\n"
+				"\tCMP\t%ld,%%L1\n"
+				"\tBC\t%%3", (value+1l)&0x0ffffffff);
+			return ggc_alloc_string(tmp, -1);
+		} else { //; value > 0
+			char	tmp[128];
+			sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LE\n"
+				"\tBLT\t%%3\n"
+				"\tBNZ\t.Lcmple%%=\n"
+				"\tCMP\t%ld,%%L1\n"
+				"\tBC\t%%3\n"
+				".Lcmple%%=:", value-1);
+			return ggc_alloc_string(tmp, -1);
+		}
+	case LT:
+		if (value == 0)
+			return "CMP\t0,%H1\t; cbranchdi/# LT 0\n\tBLT\t%3";
+		else if (value < 0)
+			return "CMP\t-1,%H1\t; cbranchdi/# LT neg\n\tCMP.Z\t%2,%L1\n\tBC\t%3";
+		else
+			return "CMP\t0,%H1\t; cbranchdi/# LT\n"
+				"\tBLT\t%3\n"
+				"\tBNZ\t.Lcmplt%=\n"
+				"\tCMP\t%2,%L1\n"
+				"\tBC\t%3\n"
+				".Lcmplt%=:";
+	case GT:
+		if (value == 0)
+			return "CMP\t1,%H1\t; cbranchdi/# GT 0\n"
+				"\tBGE\t%3\n"
+				"\tBNZ\t.Lcmpgt%=\n"
+				"\tCMP\t0,%L1\n"
+				"\tBNZ\t%3\n"
+				".Lcmpgt%=:";
+		else if (value == -1)
+			return "CMP\t0,%H1\t; cbranchdi/# GT -1\n"
+				"\tBGE\t%3\n";
+		else if (value < 0) {
+			char	tmp[128];
+			sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# GT neg\n"
+				"\tBLT\t.Lcmpgt%%=\n"
+				"\tBNZ\t%%3\n"
+				"\tCMP\t%ld,%%H3\n"
+				"\tBNC\t%%3\n"
+				".Lcmpgt%%=:", value+1l);
+			return ggc_alloc_string(tmp, -1);
+		} else {
+			char	tmp[128];
+			sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# GT\n"
+				"\tBLT\t.Lcmpgt%%=\n"
+				"\tBNZ\t%%3\n"
+				"\tCMP\t%ld,%%L1\n"
+				"\tBNC\t%%3\n"
+				".Lcmpgt%%=:", value+1l);
+			return ggc_alloc_string(tmp, -1);
+		}
+	case GE:
+		if (value == 0)
+			return "CMP\t0,%H1\t; cbranchdi/# GE 0\n"
+				"\tBLT\t.Lcmpge%=\n"
+				"\tBNZ\t%3\n"
+				"\tCMP\t0,%L1\n"
+				"\tBNC\t%3\n"
+				".Lcmpge%=:";
+		else if (value == -1)
+			return "CMP\t-1,%H1\t; cbranchdi/# GE -1\n"
+				"\tBLT\t.Lcmpge%=\n"
+				"\tBNZ\t%3\n"
+				"\tCMP\t-1,%L1\n"
+				"\tBZ\t%3\n"
+				".Lcmpge%=:";
+		else if (value < 0)
+			return "CMP\t-1,%H1\t; cbranchdi/# GE <\n"
+				"\tBLT\t.Lcmpge%=\n"
+				"\tBNZ\t%3\n"
+				"\tCMP\t%2,%L1\n"
+				"\tBNC\t%3\n"
+				".Lcmpge%=:";
+		else
+			return "CMP\t0,%H1\t; cbranchdi/# GE\n"
+				"\tBLT\t.Lcmpge%=\n"
+				"\tBNZ\t%3\n"
+				"\tCMP\t%2,%L1\n"
+				"\tBNC\t%3\n"
+				".Lcmpge%=:";
+	case LTU:
+		if (value == 0) { //; Impossible, cannot be < 0 unsignd
+			return "; cbranchdi/# LTU 0 (Impossible!)";
+		} else
+			return "CMP\t0,%H1\t; cbranchdi/#\n\tCMP.Z\t%2,%L1\n\tBC\t%3\n";
+	case LEU:
+		if (value == 0) { //; Only possible if == 0
+			return "CMP\t0,%%H0\t; cbranchdi/# LEU 0\n"
+				"\tCMP.Z\t0,%%L0\n"
+				"\tBZ\t%3";
+		} else {
+			//; Subtract one, and LTU works
+			char	tmp[128];
+			sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LEU\n"
+				"\tCMP.Z\t%ld,%%L1\n"
+				"\tBC\t%%3\n", value-1);
+			return ggc_alloc_string(tmp, -1);
+		}
+	case GTU:
+		if (value == 0) {
+			//; Equivalent to not equal to zero
+			return "CMP\t0,%H1\t; cbranchdi/# GTU 0\n\tCMP.Z\t0,%L1\n\tBNZ\t%3";
+		} else {
+			char	tmp[128];
+			sprintf(tmp, 
+				"CMP\t0,%%H1\t; cbranchdi/# GTU\n"
+				"\tBNZ\t%%3\n"
+				"\tCMP\t%ld,%%L1\n"
+				"\tBNC\t%%3\n", value+1);
+			return ggc_alloc_string(tmp, -1);
+		}
+	case GEU:
+		if (value == 0) //; Unsigned, always true
+			return "BRA\t%3\t; cbranchdi/# GEU 0";
+		else
+			return "CMP\t0,%H1\t; cbranchdi/# GEU\n"
+				"\tBNZ\t%3\n"
+				"\tCMP\t%2,%L1\n"
+				"\tBNC\t%3";
+	default:
+		gcc_unreachable();
+	}
+}
+
+const char *
+zip_cbranchdi_reg(rtx comparison,
+		rtx a ATTRIBUTE_UNUSED,
+		rtx b ATTRIBUTE_UNUSED,
+		rtx label ATTRIBUTE_UNUSED) {
+
+	switch(GET_CODE(comparison)) {
+		case EQ:
+			return "CMP\t%H2,%H1\t; cbranchdi/r EQ\n\tCMP.Z\t%L2,%L1\n\tBZ\t%3";
+		case NE:
+			return "CMP\t%H2,%H1\t; cbranchdi/r NE\n\tCMP.Z\t%L2,%L1\n\tBNZ\t%3";
+		case LE:
+			return "CMP\t%H2,%H1\t; cbranchdi/r LE\n"
+				"\tBLT\t%3\n"
+				"\tBNZ\t.Ldi%=\n"
+				"\tCMP\t%L1,%L2\n"
+				"\tBNC\t%3\n"
+				".Ldi%=:";
+		case GT:
+			return "CMP\t%H1,%H2\t; cbranchdi/r GT\n"
+				"\tBLT\t%3\n"
+				"\tBNZ\t.Ldi%=\n"
+				"\tCMP\t%L1,%L2\n"
+				"\tBC\t%3\n"
+				".Ldi%=:";
+		case LT:
+			return "CMP\t%H2,%H1\t; cbranchdi/r LT\n"
+				"\tBLT\t%3\n"
+				"\tBNZ\t.Ldi%=\n"
+				"\tCMP\t%L2,%L1\n"
+				"\tBC\t%3\n"
+				".Ldi%=:";
+		case GE:
+			return "CMP\t%H1,%H2\t; cbranchdi/r GE\n"
+				"\tBLT\t%3\n"
+				"\tBNZ\t.Ldi%=\n"
+				"\tCMP\t%L2,%L1\n"
+				"\tBNC\t%3\n"
+				".Ldi%=:";
+		case LTU:
+			return "CMP\t%H2,%H1\t; cbranchdi/r LTU\n"
+				"\tCMP.Z\t%L2,%L1\n"
+				"\tBC\t%3\n";
+		case LEU:
+			return "CMP\t%H1,%H2\t; cbranchdi/r LEU\n"
+				"\tBC\t.Ldi%=\n"	//; H1 > H2, skip
+				"\tCMP.Z\t%L1,%L2\n"	//; (H1==H2) test L1-L2
+				"\tBNC\t%3\n"		//; If (L1>=L2)||(H1>H2)
+				".Ldi%=:";
+		case GTU:
+			return "CMP\t%H1,%H2\t; cbranchdi/r GTU\n"
+				"\tCMP.Z\t%L1,%L2\n"
+				"\tBC\t%3";
+		case GEU:
+			return "CMP\t%H2,%H1\t; cbranchdi/r GEU\n"
+				"\tBC\t.Ldi%=\n"
+				"\tCMP.Z\t%L2,%L1\n"
+				"\tBNC\t%3\n"
+				".Ldi%=:";
+		default:
+			gcc_unreachable();
+	}
+}
+
+const char *
+zip_cbranchdi(rtx comparison, rtx a, rtx b, rtx label) {
+	if (REG_P(b))
+		return zip_cbranchdi_reg(comparison, a, b, label);
+	else
+		return zip_cbranchdi_const(comparison, a, b, label);
+}
+
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zipdbg.h gcc-6.2.0-zip/gcc/config/zip/zipdbg.h
--- gcc-6.2.0/gcc/config/zip/zipdbg.h	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zipdbg.h	2017-02-17 16:47:25.727651898 -0500
@@ -0,0 +1,8 @@
+#define	DO_ZIP_DEBUGS
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-di.md gcc-6.2.0-zip/gcc/config/zip/zip-di.md
--- gcc-6.2.0/gcc/config/zip/zip-di.md	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-di.md	2017-02-22 15:56:17.195319460 -0500
@@ -0,0 +1,528 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Filename:	zip-di.md
+;;
+;; Project:	Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;; Purpose:	This is the machine description of the Zip CPU as needed by the
+;;		GNU compiler collection (GCC).  Specifically, this is the
+;;	section of the description associated with 64-bit values and
+;;	arithmetic.
+;;
+;;
+;; Creator:	Dan Gisselquist, Ph.D.
+;;		Gisselquist Technology, LLC
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Copyright (C) 2015, Gisselquist Technology, LLC
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;;
+;; License:	GPL, v3, as defined and found on www.gnu.org,
+;;		http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;
+;
+;
+(define_expand "movdi"
+	[(set (match_operand:DI 0 "nonimmediate_operand" "")
+		(match_operand:DI 1 "general_operand" ""))]
+	"(ZIP_HAS_DI)"
+	{
+		if (zip_expand_movdi(operands[0], operands[1]))
+			DONE;
+		FAIL;
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+(define_insn "movdi_raw"
+	[(set (match_operand:DI 0 "nonimmediate_operand" "=r,Q,r,r")
+		(match_operand:DI 1 "general_operand" "r,r,Q,i"))]
+	"(ZIP_HAS_DI)"
+	{
+		if ((REG_P(operands[0]))&&(REG_P(operands[1])))
+			return	"MOV %H1,%H0\t; MOV:DI\n\tMOV %L1,%L0";
+		else if (MEM_P(operands[0]))	//; StoreDI
+			return	"SW %H1,%0\t; Store:DI\n\tSW %L1,4+%0";
+		else if (MEM_P(operands[1]))	//; LoadDI
+			return	"LW %1,%H0\t; Load:DI\n\tLW 4+%1,%L0";
+		else if (CONST_INT_P(operands[1])) {
+			char	tmp[128];
+			HOST_WIDE_INT	v = INTVAL(operands[1]);
+			sprintf(tmp, "LDI\t0x%08x,%%H0\t; LDI #:DI,%%H0\n\tLDI\t0x%08x,%%L0",
+				(unsigned)(v>>32),
+				(unsigned)(v));
+			return ggc_alloc_string(tmp, -1);
+		} else
+			gcc_unreachable();
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+; ADD
+;
+;
+(define_insn "adddi3" ; Fastest/best instruction always goes first
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(plus:DI (match_operand:DI 1 "register_operand" "0")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"ADD	%L2,%L0\n\tADD.C\t1,%H0\n\tADD\t%H2,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; SUB
+;
+;
+(define_insn "subdi3"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(minus:DI (match_operand:DI 1 "register_operand" "0")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"SUB	%L2,%L0\n\tSUB.C\t1,%H0\n\tSUB\t%H2,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; AND
+;
+;
+(define_insn "anddi3"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(and:DI (match_operand:DI 1 "register_operand" "%0")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"AND	%L2,%L0\t; AND:DI\n\tAND\t%H2,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; iOR
+;
+;
+(define_insn "iordi3"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(ior:DI (match_operand:DI 1 "register_operand" "%0")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"OR	%L2,%L0\t; OR:DI\n\tOR\t%H2,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; XOR
+;
+;
+(define_insn "xordi3"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(xor:DI (match_operand:DI 1 "register_operand" "%0")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"XOR	%L2,%L0\t; XOR:DI\n\tXOR\t%H2,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; NEG
+;
+;
+(define_insn "negdi2"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(neg:DI (match_operand:DI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"XOR	-1,%L0\t; NEG:DI\n\tXOR\t-1,%H0\n\tADD\t1,%L0\n\tADD.C\t1,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; ABS
+;
+;
+(define_insn "absdi2"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(abs:DI (match_operand:DI 1 "register_operand" "0")))
+	(clobber (match_scratch:SI 2 "=r"))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"CLR	%2	; ABSDI
+	TEST	%H0
+	LDILO.LT	1,%2
+	XOR.LT	-1,%L0
+	XOR.LT	-1,%H0
+	ADD	%2,%L0
+	ADD.C	1,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; NOT
+;
+;
+(define_insn "one_cmpldi2"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(not:DI (match_operand:DI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"XOR	-1,%L0\t; NOT:DI\n\tXOR\t-1,%H0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; Unsigned min/max
+;
+;
+(define_insn "umindi3"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(umin:DI (match_operand:DI 1 "register_operand" "%0")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"CMP	%H0,%H2	; umin:DI
+	CMP.Z	%L0,%L2
+	MOV.C	%H2,%H0
+	MOV.C	%L2,%L0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "umaxdi3"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(umax:DI (match_operand:DI 1 "register_operand" "%0")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	"(ZIP_HAS_DI)"
+	"CMP	%H2,%H0	; umax:DI
+	CMP.Z	%L2,%L0
+	MOV.C	%H2,%H0
+	MOV.C	%L2,%L0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; Multiply
+;
+;
+(define_expand "muldi3"
+	[(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+		(mult:DI (match_operand:DI 1 "register_operand" "r")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (match_dup 1))
+	(clobber (match_dup 2))
+	(clobber (match_scratch:SI 3 "=r"))
+	(clobber (reg:CC CC_REG))])]
+	"(ZIP_HAS_DI)")
+;
+(define_insn "muldi3_raw"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(mult:DI (match_operand:DI 1 "register_operand" "r")
+			(match_operand:DI 2 "register_operand" "r")))
+	(clobber (match_dup 1))
+	(clobber (match_dup 2))
+	(clobber (match_scratch:SI 3 "=r"))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_HAS_DI)"
+	{
+		int	regno[3];
+		regno[0] = REGNO(operands[0]);
+		regno[1] = REGNO(operands[1]);
+		regno[2] = REGNO(operands[2]);
+		//; We need to adjust what we are doing based upon which
+		//; registers are in common.  We have a couple of cases:
+		//;
+		if ((regno[0] == regno[1])&&(regno[0] == regno[2])) {
+			//; RA = RA * RA
+			//;
+			//; (H0:L0) = (H0:L0) * (H0:L0)
+			//; (H0:L0) = (H0*2^32 + L0) * (H0 * 2^32 + L0)
+			//; (H0:L0) = (H0*H0*2^64 + (H0*L0+L0*H0)*2^32 + L0 *L0)
+			//;	= (H0*L0+L0*H1):(L0*L0)
+			//;    :L0  = LOPART(L0 * L0)
+			//;  H0     = HIPART(L0 * L0)
+			//;  H0    += LOPART(H0 * L0)
+			//;  H0    += LOPART(L0 * H0)
+			//;
+			//;  Rx = L0
+			//;  H0 *= L0  ( =   LOPART( HI * LO )
+			//;  H0 <<= 1  ( = 2*LOPART( HI * LO ) )
+			//;  Rx *= L0  ( =   HIPART( LO * LO )
+			//;  L0 *= L0  ( =   LOPART( LO * LO )
+			//;  H0 += Rx  ( = 2*LOPART( HI * LO ) + HIPART( LO *LO)
+			//;
+			return "; muldi3_raw/A (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+				"\tMOV\t%L0,%3\n"
+				"\tMPY\t%L0,%H0\n"
+				"\tLSL\t1,%H0\n"
+				"\tMPYUHI\t%L0,%3\n"
+				"\tMPY\t%L0,%L0\n"
+				"\tADD\t%3,%H0";
+		} else if ((regno[0] != regno[1])&&(regno[1] == regno[2])) {
+			//; RA = RB * RB
+			//;
+			//; (H0:L0) = (H1:L1) * (H1:L1)
+			//; (H0:L0) = (H1*2^32 + L1) * (H1 * 2^32 + L1)
+			//; (H0:L0) = (H1*H1*2^64 + (H1*L1+L1*H1)*2^32 + L1 * L1)
+			//;	= (H1*L1+L1*H1):(L1*L1)
+			//;    :L0  = LOPART(L1 * L1)
+			//;  H0     = HIPART(L1 * L1)
+			//;  H0    += LOPART(H1 * L1)
+			//;  H0    += LOPART(L1 * H1)
+			//;
+			//; -------------------
+			//;     L0  = L1
+			//;     L0  = LOPART(L0 * L1)
+			//;     H0  = H1
+			//;     H0  = LOPART(H0 * L1)
+			//;     H0 <<= 1;	i.e. *= 2
+			//;     L1  = HIPART(L1 * L1)
+			//;     H0 += L1
+			//;
+			return "; muldi3_raw/B (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+			"\tMOV\t%L1,%L0\n"
+			"\tMPY\t%L1,%L0\n"
+			"\tMOV\t%H1,%H0\n"
+			"\tMPY\t%H1,%H0\n"
+			"\tLSL\t1,%H0\n"
+			"\tMPY\t%L1,%L1\n"
+			"\tADD\t%L2,%H0";
+		} else if ((regno[0] == regno[1])&&(regno[1] != regno[2])) {
+			//; RA = RA * RB, with scratch Rx
+			//;
+			//; (H0:L0) = (H0:L0) * (H1:L1)
+			//; (H0:L0) = (H0*2^32 + L0) * (H1 * 2^32 + L1)
+			//; (H0:L0) = (H0*H1*2^64 + (H0*L1+L0*H1)*2^32 + L0 *L1)
+			//;	= (H0*L1+L0*H1):(L0*L1)
+			//;     Rx  = L0
+			//;    :L0  = LOPART(L1 * R0)
+			//;  H0     = LOPART(H0 * L1)
+			//;  H0    += H1 = LOPART(Rx * H1)
+			//;  H0    += HIPART(L1 * Rx)
+			//;
+			return "; muldi3_raw/C (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+			"\tMOV\t%L0,%3\n"
+			"\tMPY\t%L1,%L0\n"
+			"\tMOV\t%L1,%H0\n"
+			"\tMPY\t%H1,%H0\n"
+			"\tMPY\t%3,%H1\n"
+			"\tADD\t%H1,%H0\n"
+			"\tMPY\t%3,%L1\n"
+			"\tADD\t%L1,%H0";
+		} else {
+			//; RA = RB * RC
+			//;
+			//; (H0:L0) = (H1:L1) * (H2:L2)
+			//; (H0:L0) = (H1*2^32 + L1) * (H2 * 2^32 + L2)
+			//; (H0:L0) = (H1*H2*2^64 + (H1*L2+L1*H2)*2^32 + L1 *L2)
+			//;	= (H1*L2+L1*H2):(L1*L2)
+			//;    :L0  = LOPART(L1 * L2)
+			//;  H0     = HIPART(L1 * L2)
+			//;  H0    += LOPART(H1 * L2)
+			//;  H0    += LOPART(L1 * H2)
+			//;
+			//; We can re-order this to try to save some registers
+			//;
+			//;     H1 *= L0		// Was H1 * L2
+			//;    :L0  = LOPART(L1 * L2)
+			//;  H0     = LOPART(L1 * R1)
+			//;  H0    += HIPART(L1 * H2)
+			//;  H0    += H1
+			//;
+		     return "; muldi3_raw/D (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+			"\tMPY	%L2,%H1	; H1 = H1 * L2\n"
+			"\tMPY	%L1,%H2	; H2 = L1 * L2\n"
+			"\tMOV	%L2,%L0	; H0:L0 = L1 * L2\n"
+			"\tMOV	%L2,%H0\n"
+			"\tMPY	%L1,%L0\n"
+			"\tMPYUHI	%L1,%H0\n"
+			"\tADD	%H2,%H0	; H0 += (H2 = L1 * H2)\n"
+			"\tADD	%H1,%H0	; H0 += (H1 = H1 * L2)";
+		}
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; Still missing DI instructions for smin:DI, smax:DI, movdicc, adddicc,
+;	div:di, divu:di (library routine)
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Conditional arithmetic instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+(define_insn "cstoredi4" ; Store 0 or 1 in %0 based on cmp between %2&%3
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(if_then_else:SI (match_operator 1 "ordered_comparison_operator" 
+			[(match_operand:DI 2 "register_operand" "r")
+				(match_operand:DI 3 "register_operand" "r")])
+			(const_int 1) (const_int 0)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_HAS_DI)&&(0)"
+	{
+		switch(GET_CODE(operands[1])) {
+		case EQ:	return "CLR\t%0\t; CSTORE-EQ\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.Z\t1,%0\n";
+		case NE:	return "CLR\t%0\t; CSTORE-NE\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.NZ\t1,%0\n";
+		//; Missing LT
+		//; Missing LE
+		//; Missing GT
+		//; Missing GE
+		case LTU:	return "CLR\t%0\t; CSTORE-LTU\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.C\t1,%0\n";
+		case LEU:
+			return "CLR\t%0\t; CSTORE-LEU\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.NC\t1,%0\n";
+		case GTU:	return "CLR\t%0\t; CSTORE-GTU\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.C\t1,%0\n";
+		case GEU:
+			return "CLR\t%0\t; CSTORE-GEU\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.NC\t1,%0\n";
+		default:
+			gcc_unreachable();
+		}
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Comparison instructions, both compare and test
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_expand "cmpdi"
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:DI 0 "register_operand" "r")
+		(match_operand:DI 1 "nonmemory_operand" "")))]
+	""
+	{
+		if (!REG_P(operands[1])) {
+			if (can_create_pseudo_p()) {
+				//; fprintf(stderr, "Generating pseudo register for compare\n");
+				rtx tmp = gen_reg_rtx(DImode);
+				emit_insn(gen_movdi(tmp,operands[1]));
+				operands[1] = tmp;
+				emit_insn(gen_cmpdi_reg(operands[0],tmp));
+				DONE;
+			} else FAIL;
+		}
+	})
+(define_insn "cmpdi_reg"
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand" "r")
+		(match_operand:SI 1 "register_operand" "r")))]
+	""
+	"CMP\t%H1,%H0
+	CMP.Z\t%L1,%L0"
+	[(set_attr "ccresult" "set")])
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Conditional move instructions, since these won't accept conditional
+;;	execution RTL
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+(define_expand "cbranchdi4"
+	[(set (pc) (if_then_else
+		(match_operator 0 "ordered_comparison_operator"
+			[(match_operand:DI 1 "register_operand" "r")
+				(match_operand:DI 2 "nonimmediate_operand" "")])
+			(label_ref (match_operand 3 "" ""))
+			(pc)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_HAS_DI)"
+	{
+		if (!REG_P(operands[2])) {
+			if ((CONST_INT_P(operands[2]))
+				&&(INTVAL(operands[2])> -(1l<<17))
+				&&(INTVAL(operands[2])<(1l<<17)-1)) {
+				emit_jump_insn(gen_cbranchdi4_internal(operands[0],
+					operands[1], operands[2], operands[3]));
+				DONE;
+			} if (can_create_pseudo_p()) {
+				rtx tmp = gen_reg_rtx(DImode);
+				emit_insn(gen_movsi(tmp, operands[2]));
+				operands[2] = tmp;
+			}
+		}
+
+		if (REG_P(operands[2])) {
+			emit_jump_insn(gen_cbranchdi4_internal(operands[0],
+				operands[1], operands[2], operands[3]));
+			DONE;
+		}
+	})
+(define_insn "cbranchdi4_internal"
+	[(set (pc) (if_then_else
+		(match_operator 0 "ordered_comparison_operator"
+			[(match_operand:DI 1 "register_operand" "r,r,r")
+				(match_operand:DI 2 "nonmemory_operand" "K,x,r")])
+			(label_ref (match_operand 3 "" ""))
+			(pc)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_HAS_DI)"
+	{
+		return zip_cbranchdi(operands[0], operands[1], operands[2], operands[3]);
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Unimplemented (or not yet implemented) RTL Codes
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;(define_insn "addvdi4"
+;	)
+;(define_insn "subvdi4"
+;	)
+;(define_insn "mulvdi4"
+;	)
+;(define_insn "umulvdi4"
+;	)
+;(define_insn "umulvdi4"
+;	)
+;(define_insn "negvdi3"
+;	)
+;
+;(define_insn "maddsidi4"
+;(define_insn "umaddsidi4"
+;(define_insn "msubsidi4"
+;(define_insn "umsubsidi4"
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-float.md gcc-6.2.0-zip/gcc/config/zip/zip-float.md
--- gcc-6.2.0/gcc/config/zip/zip-float.md	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-float.md	2017-01-10 14:01:42.029341062 -0500
@@ -0,0 +1,138 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Filename:	zip-float.md
+;;
+;; Project:	Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;; Purpose:	This is the machine description of the ZipCPU floating point
+;;		unit (if installed).
+;;
+;;
+;; Creator:	Dan Gisselquist, Ph.D.
+;;		Gisselquist Technology, LLC
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;;
+;; License:	GPL, v3, as defined and found on www.gnu.org,
+;;		http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Floating point Op-codes
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_insn "addsf3"
+	[(set (match_operand:SF 0 "register_operand" "=r")
+		(plus:SF (match_operand:SF 1 "register_operand" "0")
+			(match_operand:SF 2 "register_operand" "r")))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	"(ZIP_FPU)"
+	"FPADD	%2,%0"
+	[(set_attr "ccresult" "unknown")])
+(define_insn "subsf3"
+	[(set (match_operand:SF 0 "register_operand" "=r")
+		(minus:SF (match_operand:SF 1 "register_operand" "0")
+			(match_operand:SF 2 "register_operand" "r")))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	"(ZIP_FPU)"
+	"FPSUB	%2,%0"
+	[(set_attr "ccresult" "unknown")])
+(define_insn "mulsf3"
+	[(set (match_operand:SF 0 "register_operand" "=r")
+		(mult:SF (match_operand:SF 1 "register_operand" "0")
+			(match_operand:SF 2 "register_operand" "r")))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	"(ZIP_FPU)"
+	"FPMUL	%2,%0"
+	[(set_attr "ccresult" "unknown")])
+(define_insn "divsf3"
+	[(set (match_operand:SF 0 "register_operand" "=r")
+		(div:SF (match_operand:SF 1 "register_operand" "0")
+			(match_operand:SF 2 "register_operand" "r")))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	"(ZIP_FPU)"
+	"FPDIV	%2,%0"
+	[(set_attr "ccresult" "unknown")])
+; (define_insn "floatsisf2"
+;	[(set (match_operand:SF 0 "register_operand" "=r"
+;		(float:QI (match_operand:SF 1 "register_operand" "r"))))
+;	(set (reg:CC CC_REG) (compare:CC (match_dup 1) (const_int 0)))]
+;	"(ZIP_FPU)"
+;	"FPI2F	%1,%0")
+; (define_insn "floatunssisf2" ... ?)
+; (define_insn "fix_truncsfsi2"
+;	[(set (match_operand:QI 0 "register_operand" "=r"
+;		(float:SF (match_operand:SF 1 "register_operand" "r"))))
+;	(set (reg:CC CC_REG) (compare:CC (match_dup 1) (const_int 0)))]
+;	"(ZIP_FPU)"
+;	"FPI2F	%1,%0")
+; (define_insn "nearbyintsf2" ... ?)
+; (define_insn "truncsfsi2" ... ?)
+(define_expand "negsf2"
+	[(set (match_operand:SF 0 "register_operand" "=r")
+		(neg:SF (match_operand:SF 1 "register_operand" "0")))
+	]
+	""
+	{
+		operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0);
+		if (can_create_pseudo_p()) {
+			rtx tmp = gen_reg_rtx(SImode);
+			emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x80000000,SImode)));
+			emit_insn(gen_xorsi3(operands[0], operands[0], tmp));
+			DONE;
+		} else {
+			emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+			emit_insn(gen_iorsi3(operands[0], operands[0],
+				gen_int_mode(1,SImode)));
+			emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+			DONE;
+		}
+	})
+(define_expand "abssf2"
+	[(set (match_operand:SF 0 "register_operand" "=r")
+		(abs:SF (match_operand:SF 1 "register_operand" "0")))
+	]
+	""
+	{
+		operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0);
+		if (can_create_pseudo_p()) {
+			rtx tmp = gen_reg_rtx(SImode);
+			emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x7fffffff,SImode)));
+			emit_insn(gen_andsi3(operands[0], operands[0], tmp));
+			DONE;
+		} else {
+			emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+			emit_insn(gen_andsi3(operands[0], operands[0],
+				gen_int_mode(-2,SImode)));
+			emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+			DONE;
+		}
+	})
+;
+;
+; STILL MISSING:
+;
+;
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.h gcc-6.2.0-zip/gcc/config/zip/zip.h
--- gcc-6.2.0/gcc/config/zip/zip.h	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.h	2017-03-03 09:30:57.671304970 -0500
@@ -0,0 +1,4114 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename: 	gcc/config/zip/zip.h
+//
+// Project:	Zip CPU backend for the GNU Compiler Collection
+//
+// Purpose:	
+//
+// Creator:	Dan Gisselquist, Ph.D.
+//		Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License:	GPL, v3, as defined and found on www.gnu.org,
+//		http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+#ifndef	GCC_ZIP_H
+#define	GCC_ZIP_H
+
+
+//
+//
+// Zip CPU configuration defines
+//
+//
+#define	ZIP_USER	0	// Assume we are in supervisor mode
+#define	ZIP_MULTIPLY	1	// Assume we have multiply instructions
+#define	ZIP_DIVIDE	1	// Assume we have divide instructions
+#define	ZIP_FPU		0	// Assume we have no floating point instructions
+#define	ZIP_PIPELINED	1	// Assume our instructions are pipelined
+#define	ZIP_THUMB	1	// Assume we have the THUMB feature
+#define	ZIP_ATOMIC	(ZIP_PIPELINED)
+#define	ZIP_PIC		0	// Attempting to produce PIC code, with GOT
+#define	ZIP_HAS_DI	1
+// Should we use the peephole optimizations?
+#define	ZIP_PEEPHOLE	1	// 0 means no peephole optimizations.
+#define	ZIP_NOT_AN_INSTRUCTION	"NAI\t;// This is not an instruction.  Getting here implies a compiler error.  Please contact help support\n"
+
+// Zip has 16 registers in each user mode.
+//	Register 15 is the program counter (PC)
+//	Register 14 is the condition codes (CC)
+//	Register 13 is the stack pointer   (SP)
+//	Register 12 (may be) the Global Offset Table pointer (GOT)
+//	Register  0 (may be) the return address pointer
+// Registers 16-31 may only be used in supervisor mode.
+#define	is_ZIP_GENERAL_REG(REGNO)	((REGNO)<13)
+#define	is_ZIP_REG(REGNO)		((REGNO)<33)
+
+#define	zip_AP_PSEUDO	32
+#define	zip_PC		15
+#define	zip_CC		14
+#define	zip_SP		13
+#define	zip_FP		12
+#define	zip_GOT		11
+// #define	zip_AP		10	// We're using a PSEUDO REG instead
+#define	zip_R5		5	// Used for the static chain, if it exists
+#define	zip_R1		1
+#define	zip_R0		0
+#define	zip_LR		zip_R0	// Link Register is also R0
+
+#define	ZIP_FIRST_ARG_REGNO	1
+#define	ZIP_LAST_ARG_REGNO	5
+#define	NUM_ARG_REGS		(ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1)
+#define	MAX_PARM_REGS		(ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1)
+
+/* The overall framework of an assembler file */
+
+#define	ASM_COMMENT_START	";"
+#define	ASM_APP_ON		""
+#define	ASM_APP_OFF		""
+
+#define	FILE_ASM_OP		"\t.file\n"
+
+/* Output and Generation of Labels */
+#define	GLOBAL_ASM_OP		"\t.global\t"
+
+#define	BITS_PER_WORD		32
+
+
+/* A C compound statement to output to stdio stream STREAM the assembler syntax
+ * for an instruction operand X. */
+#define	PRINT_OPERAND(STREAM, X, CODE)	zip_print_operand(STREAM, X, CODE)
+#define	PRINT_OPERAND_ADDRESS(STREAM, X) zip_print_operand_address(STREAM, X)
+
+/* Passing arguments in registers */
+#define	FUNCTION_VALUE_REGNO_P(REGNO)	((REGNO)==zip_R1)
+
+/* Define how to find the value returned by a function.  VALTYPE is the data
+ * type of the value (as a tree).  If the precise function being called is known
+ * FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */
+#define	FUNCTION_VALUE(VALTYPE, FUNC) gen_rtx_REG(TYPE_MODE(VALTYPE), zip_R1)
+
+/* Define how to find the value returned by a library function assuming the
+ * value has mode MODE.
+ */
+#define	LIBCALL_VALUE(MODE)	gen_rtx_REG(MODE, zip_R1)
+
+
+/* STACK AND CALLING */
+
+
+/* Define this macro as a C expression that is nonzero for registers that are
+ * used by the epilogue or the return pattern.  The stack and frame pointer
+ * registers are already assumed to be used as needed.
+ */
+#define	EPILOGUE_USES(R)	(R == RETURN_ADDRESS_REGNUM)
+
+
+/* The best alignment to use in cases where we have a choice. */
+#define	FASTEST_ALIGNMENT	BITS_PER_WORD
+
+/* Generate Code for Profiling
+ */
+#define	FUNCTION_PROFILER(FILE,LABELNO)		(abort(), 0)
+
+
+/* A C expression which is nonzero if register number NUM is suitable for use
+ * as an index register in operand addresses.
+ */
+#define	REGNO_OK_FOR_INDEX_P(NUM)	0
+
+
+/* A C compound statement with a conditional 'goto LABEL;' executed if X
+ * (an RTX) is a legitimate memory address on the target machine for a memory
+ * operand of mode MODE.
+ */
+/* 17.03 Controlling the Compilation Driver, 'gcc' */
+// DRIVER_SELF_SPECS
+// OPTION_DEFAULT_SPECS
+// CPP_SPEC
+// CPLUSPLUS_CPP_SPEC
+// CC1_SPEC
+// CC1PLUS_SPEC
+/* ASM_SPEC ... A C string constant that tells the GCC driver program options
+ * to pass to the assembler.  It can also specify how to translate options you
+ * give to GCC into options for GCC to pass to the assembler.  See the file
+ * 'sun3.h' for an example of this.
+ *
+ * Do not define thismacro if it does not need to do anything.
+ */
+// #undef	ASM_SPEC
+// ASM_FINAL_SPEC
+// ASM_NEEDS_DASH_FOR_PIPED_INPUT
+
+/* LINK_SPEC ... A C string constant that tells the GCC driver program options
+ * to pass to the linker.  It can also specify how to translate options you give
+ * to GCC into options for GCC to pass to the linker.
+ *
+ * Do not define this macro if it does not need to do anything.
+ */
+
+/* LIB_SPEC ... Another C string constant very much like LINK_SPEC.  The
+ * difference between the two is that LIB_SPEC is used at the end of the 
+ * command given to the linker.
+ *
+ * If this macro is not defined, a default is provided that loads the standard
+ * C library from the usual place.  See 'gcc.c'.
+ *
+ * ZipCPU ... We need this at its default value.  It is necessary to build
+ * the various GCC libraries that depend upon one another and newlib.  Hence,
+ * as an example we *must* include the library containing strnlen or libgfortran
+ * will not.  Alternatively, we might figure out how to pass arguments to the
+ * compiler via the configure process ... but we'll just allow this to have its
+ * default value for now.
+ */
+// #undef	LIB_SPEC
+// #define	LIB_SPEC	"%{!g:-lc} %{g:-lg} -lzip"
+// #define	LIB_SPEC	""
+
+/* LIBGCC_SPEC ... Another C string constant that tells the GCC driver program
+ * hoow and when to place a reference to 'libgcc.a' into the linker command
+ * line.  This constant is placed both before and after the value of LIB_SPEC.
+ *
+ * If this macro is not defined, the GCC driver provides a default that passes
+ * the string '-lgcc' to the linker.
+ */
+#undef	LIBGCC_SPEC
+#define	LIBGCC_SPEC	""
+
+/* REAL_LIBGCC_SPEC ... By default, if ENABLE_SHARED_LIBGCC is defined, the
+ * LIBGCC_SPEC is not directly used by the driver program but is instead
+ * modified to refer to different versions of 'libgcc.a' depending on the 
+ * values of the command line flags '-static', '-shared', '-static-libgcc',
+ * and '-shared-libgcc'.  On targets where these modifications are
+ * inappropriate, define REAL_LIBGCC_SPEC instead.  REAL_LIBGCC_SPEC tells the
+ * driver how to place a reference to 'libgcc' on the link command line, but
+ * unlike LIBGCC_SPEC, it is used unmodified.
+ */
+#define	REAL_LIBGCC_SPEC	""
+
+// USE_LD_AS_NEEDED
+// LINK_EH_SPEC
+
+/* STARTFILE_SPEC ... Another C string constant used much like LINK_SPEC.  The
+ * difference between the two is that STARTFILE_SPEC is used at the very
+ * beginning of the command given to the linker.
+ *
+ * If this macro is not defined, a default is provided that loads the standard
+ * C startup file from the usual place.  See 'gcc.c'
+ */
+#undef	STARTFILE_SPEC
+#define	STARTFILE_SPEC	""
+
+/* ENDFILE_SPEC ... Another C string constant used much like LINK_SPEC.  The
+ * difference between the two is that ENDFILE_SPEC is used at the very end
+ * of the command given to the linker. 
+ *
+ * Do not define this macro if it does not do anything.
+ */
+// #undef	ENDFILE_SPEC
+// #define	ENDFILE_SPEC	""
+
+// THREAD_MODEL_SPEC
+// SYSROOT_SUFFIX_SPEC
+// SYSROOT_HEADERS_SUFFIX_SPEC
+// EXTRA_SPECS
+// LINK_LIBGCC_SPECIAL_1
+// LINK_GCC_C_SEQUENCE_SPEC
+// LINK_COMMAND_SPEC
+// TARGET_ALWAYS_STRIP_DOTDOT
+// MULTILIB_DEFAULTS
+// RELATIVE_PREFIX_NOT_LINKDIR
+// MD_EXEC_PREFIX
+// STANDARD_STARTFILE_PREFIX
+// STANDARD_STARTFILE_PREFIX_1
+// STANDARD_STARTFILE_PREFIX_2
+// MD_STARTFILE_PREFIX
+// MD_STARTFILE_PREFIX_1
+// INIT_ENVIRONMENT
+// LOCAL_INCLUDE_DIR
+#undef	LOCAL_INCLUDE_DIR
+
+// NATIVE_SYSTEM_HEADER_COMPONENT
+// INCLUDE_DEFAULTS
+
+/* 17.03 Run-time Target Specification */
+
+/* TARGET_CPU_CPP_BUILTINS() ... This function-like macro expands to a block of
+ * code that defines built-in preprocessor macros and assertions for the target
+ * CPU, using the functions builtin_define, builtin_define_std, and
+ * builtin_assert.  When the front end calls this macro it provides a trailing
+ * semicolon, and since it has finished command line option proccessing your
+ * code can use those results freely.
+ *
+ * ZipCPU --- We should probably capture in this macro what capabilities the
+ * command line parameters we've been given indicate that our CPU has.  That
+ * way, code can be adjusted depending upon the CPU's capabilities.
+ */
+#define	TARGET_CPU_CPP_BUILTINS()				\
+	{ builtin_define("__ZIPCPU__");				\
+	builtin_define("__IEEE_BIG_ENDIAN");			\
+	builtin_define("_LDBL_EQ_DBL");				\
+	if (ZIP_FPU) builtin_define("__ZIPFPU__");		\
+	else builtin_define("_SOFT_FLOAT");			\
+	if (ZIP_ATOMIC) builtin_define("__ZIPATOMIC__");	\
+	}
+	// If (zip_param_has_fpu)  builtin_define("__ZIPFPU__");
+	// If (zip_param_has_div)  builtin_define("__ZIPDIV__");
+	// If (zip_param_has_mpy)  builtin_define("__ZIPMPY__");
+	// If (zip_param_has_lock) builtin_define("__ZIPLOCK__");
+	// If (zip_param_supervisor) builtin_define("__ZIPUREGS__");
+	// If (we support int64s) builtin_define("___int64_t_defined");
+
+/* TARGET_OS_CPP_BUILTINS() ... Similarly to TARGET_CPU_CPP_BUILTINS but this
+ * macro is optional and is used for the target operating system instead.
+ */
+
+/* Option macros: (we need to define these eventually ... )
+ *
+ *	TARGET_HANDLE_OPTION
+ *	TARGET_HANDLE_C_OPTION
+ *	TARGET_OBJ_CONSTRUCT_STRING_OBJECT
+ *	TARGET_OBJ_DECLARE_UNRESOLVED_CLASS_REFERENCE
+ *	TARGET_OBJ_DECLARE_CLASS_DEFINITION
+ *	TARGET_STRING_OBJECT_REF_TYPE_P
+ *	TARGET_CHECK_STRING_OBJECT_FORMAT_ARG
+ *	TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE(VOID)
+ *	C_COMMON_OVERRIDE_OTPTIONS
+ *	TARGET_OPTION_OPTIMIZATION_TABLE
+ *	TARGET_OPTION_INIT_STRUCT
+ *	TARGET_OPTION_DEFAULT_PARAMS
+ */
+
+/* SWITCHABLE_TARGET
+ *
+ * Zip CPU doesn't need this, so it defaults to zero.  No need to change it
+ * here.
+ */
+
+/* TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(VOID) ... Returns true if the
+ * target supports IEEE 754 floating-point exceptions and rounding modes, false
+ * otherwise.  This is intended to relate to the float and double types, but not
+ * necessarily "long double".  By default, returns true if the adddf3
+ * instruction pattern is available and false otherwise, on the assumption that
+ * hardware floating point supports exceptions and rounding modes but software
+ * floating point does not.
+ *
+ * ZipCPU floating point is barely going to be functional, I doubt it will
+ * support all of these bells and whistles when full functionality is even 
+ * achieved.  Therefore, we won't support these modes.  However, we can't just
+ * set this to zero, so let's come back to this.
+ */
+// #warning "Wrong answer encoded to date"
+// #undef	TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P
+// #define	TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(X)	0
+
+/* 17.04 Defining data structures for per-function information */
+
+/* INIT_EXPANDERS ... Macro called to initialize any target specific
+ * information.  This macro is called once per function, before generation of
+ * any RTL has begun.  The intention is to allow the initialization of the 
+ * function pointer init_machine_status.
+ */
+// #warning "I may need to define this to handle function return addresses ..."
+
+
+/* 17.05 Storage Layout */
+
+
+/* Storage Layout */
+#define	BITS_BIG_ENDIAN		0	// MSB has highest number
+#define	BYTES_BIG_ENDIAN	1	// 1 if MSB is lowest number
+#define	WORDS_BIG_ENDIAN	1	// 1 if MSW is lowest number
+#define	FLOAT_WORDS_BIG_ENDIAN	1
+#define	UNITS_PER_WORD		4	// Storage units in a word, pwr of 2:1-8
+/* POINTER_SIZE ... Width of a pointer in bits.  You must specify a value no
+ * wider than the width of Pmode.  If it is not equal to the width of Pmode,
+ * you must define POINTERS_EXTEND_UNSIGNED. If you do not specify a value the
+ * default is BITS_PER_WORD.
+ *
+ * ZipCPU --- All of our pointers are 32-bits, the width of our address bus.
+ */
+#define	POINTER_SIZE		32	// Ptr width in bits
+
+/* POINTERS_EXTEND_UNSIGNED ... A C expression that determines how pointers
+ * should be extended from ptr_mode to either Pmode or word_mode.  It is greater
+ * than zero if pointers should be zero-extended, zero if they should be sign
+ * extended, and negative if some other conversion is needed.  In the last case,
+ * the extension is done by the target's ptr_extend instruction.
+ *
+ * You need not define this macro if the ptr_mode, Pmode, and word_mode are all
+ * the same width.
+ *
+ * ZipCPU --- We don't need to define this macro, since PMode and ptr_mode, and
+ * our word_mode (SImode) all have the same width.
+ */
+// #define	POINTERS_EXTEND_UNSIGNED	1
+
+/* PROMOTE_MODE(m,unsignedp,type) ... A macro to update m and unsignedp when an
+ * object whose type is type and which has he specified mode and signedness is
+ * to be stored in a register.  This macro is only called when type is a scalar
+ * type.
+ *
+ * On most RISC machines, which only have operations that operate on a full
+ * register, define this macro to set m to word_mode if m is an integer mode
+ * narrower than BITS_PER_WORD.  In most cases, only integer modes should be
+ * widened because wider precision floating-point operations are usually more
+ * expensive than their narrower counterparts.
+ *
+ * For most machines, the macro definition does not change unsigndep.  However,
+ * some machines, have instructions that preferentially handle either signed or
+ * unsigned quantities of certain modes.  For example, on the DEC Alpha, 32-bit
+ * loads from memory and 32-bit add instructions sign-extend the result to 
+ * 64-bits. On such machines, set unsignedp according to which kind of extension
+ * is more efficient.
+ *
+ * Do not define this macro if it would never modify m.
+ *
+ * ZipCPU --- 
+ */
+#define	PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+	do {							\
+		if ((GET_MODE_CLASS(MODE) == MODE_INT)		\
+				&& (GET_MODE_SIZE(MODE) < 4)) {	\
+			(MODE) = SImode;			\
+			(UNSIGNEDP) = 1;			\
+		}						\
+	} while(0)
+
+// TARGET_PROMOTE_FUNCTION_MODE
+#define	TARGET_PROMOTE_FUNCTION_MODE	default_promote_function_mode_always_promote
+
+/* PARM_BOUNDARY ... Normal alignment required for function parameters on the
+ * stack, in bits.  All stack parameters receive at least this much alignment
+ * regardless of data type.  On most machines, this is the same as the size of
+ * an integer.
+ */
+#define	PARM_BOUNDARY	32
+
+/* STACK_BOUNDARY ... Define this macro to the minimum alignment enforced by
+ * hardware for the stack pointer on this machine.  The definition is a C 
+ * expression for the desired alignment (measured in bits).  This value is used
+ * as a default if PREFERRED_STACK_BOUNDARY is not defined.  On most machines,
+ * this should be the same as PARM_BOUNDARY.
+ */
+#define	STACK_BOUNDARY	PARM_BOUNDARY
+
+/* PREFERRED_STACK_BOUNDARY ... Define this ... */
+#define	PREFERRED_STACK_BOUNDARY	STACK_BOUNDARY
+
+/* INCOMING_STACK_BOUNDARY ... Define this macro if the incoming stack boundary
+ * may be different from PREFERRED_STACK_BOUNDARY.  This macro must evaluate
+ * to a value equal to or larger than STACK_BOUNDARY.
+ */
+#define	INCOMING_STACK_BOUNDARY	STACK_BOUNDARY
+
+/* FUNCTION_BOUNDARY ... Alignment required for a function entry point, in bits.
+ */
+#define	FUNCTION_BOUNDARY	32
+
+/* BIGGEST_ALIGNMENT ... Biggest alignment that any data type can require on
+ * this machine, in bits.  Note that this is not the biggest alignment that is
+ * supported, just the biggest alignment that, when violated, may cause a fault.
+ */
+#define BIGGEST_ALIGNMENT	32
+
+/* MALLOC_ABI_ALIGNMENT
+ */
+
+/* ATTRIBUTE_ALIGNED_VALUE
+ */
+
+/* MINIMUM_ATOMIC_ALIGNMENT ... If defined, the smallest alignment, that can be
+ * given to an object that can be referenced in one operation, without
+ * disturbing any nearby object.  Normally, this is BITS_PER_UNIT, but may be
+ * larger on machines that don't have byte or halfword store operations.
+ */
+#define	MINIMUM_ATOMIC_ALIGNMENT	BITS_PER_UNIT
+
+/* BIGGEST_FIELD_ALIGNMENT ... Biggest alignment that any structure or union
+ * field can require on this machine, in bits.  If defined, this overrides
+ * BIGGEST_ALIGNMENT for structure and union fields only, unless the field 
+ * alignment has been set by the __attribute__((aligned(n))) construct.
+ */
+#define	BIGGEST_FIELD_ALIGNMENT	BITS_PER_WORD
+
+/* ADJUST_FIELD_ALIGN(FIELD, COMPUTED) ... An expression for the alignment of
+ * a structure field FIELD if the alignment computed in the usual way (including
+ * applying BIGGEST_ALIGNMENT and BIGGEST_FIELD_ALIGNMENT) is COMPUTED.  
+ */
+// #define	ADJUST_FIELD_ALIGN(A,B)	BITS_PER_WORD
+
+/* MAX_STACK_ALIGNMENT ... Biggest stack alignment guaranteed by the backend.
+ * Use this macro to specify the maximum alignment of a variable on the stack.
+ *
+ * If not defined, the default value is STACK_BOUNDARY
+ */
+// #define	MAX_STACK_ALIGNMENT	BITS_PER_WORD
+
+/* MAX_OFILE_ALIGNMENT
+ */
+
+/* DATA_ALIGNMENT(TYPE, BASIC-ALIGN) ... If defined, a C expression to compute
+ * the alignment for a variable in the static store.  TYPE is the data type, and
+ * BASIC-ALIGN is the alignment that the object would ordinarily have.  The 
+ * value of this macro is used instead of that alignment to align the object.
+ *
+ * If this macro is not defined, then BASIC-ALIGN is used.
+ *
+ * ZipCPU -- 
+ */
+// #define	DATA_ALIGNMENT(TYPE, ALIGN)	BITS_PER_WORD
+
+
+/* DATA_ABI_ALIGNMENT(TYPE,BASIC-ALIGN)
+ */
+
+/* CONSTANT_ALIGNMENT(CONST, BASIC-ALIGN) ... If defined, a C expression to
+ * compute the alignment given to a constant that is being placed in memory.
+ * CONST is the constant and BASIC-ALIGN is the alignment that the object
+ * would ordinarily have.  The value of this macro is used instead of that 
+ * alignment to align the object.  
+ *
+ * If this macro is not defined, then BASIC-ALIGN is used.
+ *
+ * ZipCPU -- in hindsiht, if this macro is not defined then the compiler is
+ * broken.  We'll define it as above.
+ * 
+ */
+#define	CONSTANT_ALIGNMENT(EXP, ALIGN)	BITS_PER_WORD
+
+/* LOCAL_ALIGNMENT(TYPE,BASIC-ALIGN) ... If defined ...
+ */
+// #define	LOCAL_ALIGNMENT(TYP,ALIGN)	BITS_PER_WORD
+
+/* TARGET_VECTOR_ALIGNMENT
+ */
+
+/* STACK_SLOT_ALIGNMENT
+ */
+#define	STACK_SLOT_ALIGNMENT(T,M,B)	BITS_PER_WORD
+
+/* LOCAL_DECL_ALIGNMEN(DECL)
+ */
+// #define	LOCAL_DECL_ALIGNMENT(DECL)	BITS_PER_WORD
+
+/* MINIMUM_ALIGNMENT
+ */
+// #define	MINIMUM_ALIGNMENT(EXP,MOD,ALIGN)	BITS_PER_WORD
+
+/* EMPTY_FIELD_BOUNDARY
+ * Alignment of field after 'int : 0' in a structure.
+ */
+#define	EMPTY_FIELD_BOUNDARY	BITS_PER_WORD
+
+/* STRUCTURE_SIE_BOUNDARY
+ * ZipCPU -- Every structures size must be a multiple of 32-bits.
+ */
+#define	STRUCTURE_SIZE_BOUNDARY	BITS_PER_WORD
+
+/* STRICT_ALIGNMENT ... Set this nonzero if move instructions will actually
+ * fail to work when given unaligned data.  If instructions will merely go
+ * slower in that case, define this macro as 0.
+ *
+ * ZipCPU -- 
+ */
+#define	STRICT_ALIGNMENT	1
+
+/* PCC_BITFIELD_TYPE_MATTERS -- define this if you wish to imitate the the way 
+ * other C compilers handle alignment of bit-fields and the structures that
+ * contain them.  
+ *
+ * The behavior is that the type written for a named bit-field (int, short, or
+ * other integer type) imposes an alignment for the entire structure, as if the
+ * structure really did contain an ordinary field of that type.  In addition,
+ * the bit-field is placed within the structure so that it would fit within
+ * such a field, not crossing a boundary for it.
+ * 
+ * Thus, no most machines, a named bit-field whose type is written as int would
+ * not cross a four-byte boundary, and would force four-byte alignment for the
+ * whole structure.  (The alignment used may not be four bytes; it is controlled
+ * by other alignment parameters.)
+ * 
+ * An unnamed bit-field will not affect the alignment of the containing
+ * structure.
+ *
+ * If the macro is defined, its definition should be a C expression, a non
+ * zero value for the expression enables this behavior.
+ * Look at the fundamental type that is used for a bit-field and use that to
+ * impose alignment on the enclosing structure.  struct s{int a:8}; should
+ * have the same alignment as 'int', not 'char'.
+ */
+#undef	PCC_BITFIELD_TYPE_MATTERS
+#define	PCC_BITFIELD_TYPE_MATTERS	0
+
+/* MAX_FIXED_MODE_SIZE ... An integer expression for the size in bits of the
+ * largest integer machine mode that should actually be used.  All integer
+ * machine modes of this size or smaller can be used for structures and unions
+ * with the appropriate sizes.  If this macro is undefined,
+ * GET_MODE_BITSIZE(DImode) is assumed.
+ *
+ * ZipCPU ... Get_MODE_BITSIZE(DImode) will be 64, and this really is the
+ * size in bits of the largest integer machine mode.  However, that's the case
+ * with most DI implementations: A long is two words, spliced together.  We'd
+ * like to support that eventually, but we need to get there.  Hence, let's use
+ * compile time flag (ZIP_HAS_DI) that we can enable when we're ready.
+ */
+#undef	MAX_FIXED_MODE_SIZE
+#ifdef	ZIP_HAS_DI
+# define MAX_FIXED_MODE_SIZE	GET_MODE_BITSIZE(DImode)
+#else
+# define MAX_FIXED_MODE_SIZE	GET_MODE_BITSIZE(SImode)
+#endif
+
+
+
+/* 17.06 Layout of Source Language Data Types */
+
+#undef	LONG_TYPE_SIZE
+#undef	LONG_LONG_TYPE_SIZE
+//
+#define	LONG_TYPE_SIZE	64
+#define	LONG_LONG_TYPE_SIZE	64
+// SHORT_FRAC_TYPE_SIZE
+// LONG_FFRACT_TYPE_SIZE
+// LONG_LONG_FRACT_TIME_SIZE
+
+/* LIBGCC2_GNU_PREFIX ... This macro corresponds to the TARGET_GNU_PREFIX target
+ * hook and should be defined if that hook is overriden to be true.  It causes
+ * function names in libgcc to be changed to use a __gnu_ prefix for their name
+ * rather than the default __.  A port which uses this macro should also arrange
+ * to use t-gnu-prefix in the libgcc config.host.
+ *
+ * ZipCPU -- I see no reason to define and therefore change this behavior.
+ */
+
+/* TARGET_FLT_EVAL_METHOD ... A C expression for the value for FLT_EVAL_METHOD
+ * in float.h,, assuming, if applicable, that the floating-point control word
+ * is in its default state.  If you do not define this macro the value of
+ * FLT_EVAL_METHOD will be zero.
+ * 
+ * ZipCPU --- ???
+ */
+
+/* WIDEST_HARDWARE_FP_SIZE ... A C expression for the size in bits of the widest
+ * floating-point format supported by the hardware.  If you define this macro,
+ * you must specify a value less than or equal to the value of LONG_DOUBLE_...
+ * If you do not define this macro, the value of LONG_DOUBLE_TYPE_SIZE is the
+ * default.
+ *
+ * ZipCPU supports 32-bit IEEE floats--IF THE SUPPORT IS COMPILED IN!  This
+ * really needs to be determined, then, based upon a compile time parameter
+ * where the one compiling the code states whether or not the H/W even has
+ * floating point support.
+ *
+ * For now, we'll assume it does--but once we implement GCC parameters, we'll
+ * need to change this.
+ */
+#undef	WIDEST_HARDWARE_FP_SIZE
+// #warning "Definition needs to change if no FPU present"
+#define	WIDEST_HARDWARE_FP_SIZE	FLOAT_TYPE_SIZE
+
+/* DEFAULT_SIGNED_CHAR ... An expression whose value is 1 or 0, according to 
+ * whether the type char should be signed or unsigned by default.  The user
+ * can always override this default with the options -fsigned-char and
+ * -funsigned-char.
+ *
+ * ZipCPU--Our hardware produces unsigned characters (and shorts) by default,
+ * so let's stick to that.
+ */
+#define	DEFAULT_SIGNED_CHAR	0
+
+/* TARGET_DEFAULT_SHORT_ENUMS(VOID) ... This target hook should return true if
+ * the compiler should give an enum type only as many bytes as it takes to 
+ * represent the range of possible values of that type.  It should return
+ * false if all enum types should be allocated like int.
+ *
+ * The default is to return false.  This is what the ZipCPU needs, so we won't
+ * override it.
+ */
+
+/* SIZE_TYPE ... A C expression for a string describing the name of the data
+ * type to use for size values.  The typedef name size_t is defined using the
+ * contents of the string.
+ * 
+ * If you don't define this macro, the default is "long unsigned int".  Since
+ * on the ZipCPU this is a 32-bit number, and all ZipCPU values are 32-bits,
+ * the default seems perfect for us.
+ */
+#define	SIZE_TYPE	"unsigned int"
+
+/* SIZETYPE ... GCC defines internal types () for expressions dealing with size.
+ * This macro is a C expression for a string describing the name of the data 
+ * type from which the precision of sizetype is extracted.  The string has the 
+ * same restrictions as SIZE_TYPE string.  If you don't define this macro, the
+ * default is SIZE_TYPE --- which seems good enough for us.
+ */
+
+/* PTRDIFF_TYPE ... A C expression for a string describing the name of the data
+ * type to use for the result of subtracting two pointers.  The typedef name
+ * ptrdiff_t is defined using the contents of the string.  See SIZE_TYPE for
+ * more information.
+ *
+ * The default is "long int" which for the ZipCPU is 32-bits---still good enough
+ * for us.
+ */
+#define	PTRDIFF_TYPE	"int"
+
+/* WCHAR_TYPE ... A C expression for a string describing the name of the data
+ * type to use for wide characters.  The typedef name wchar_t is defined using
+ * the contents of  the string.  If you don't define this macro, the default is
+ * 'int'--good enough for ZipCPU.
+ */
+// #define	WCHAR_TYPE	"int"
+
+/* WCHAR_TYPE_SIZE ... A C expression for the size in bits of the data type for
+ * wide characters.  This is used in cpp, which cannot make use of WCHAR_TYPE.
+ *
+ * ZipCPU -- This defaults to INT_TYPE_SIZE, which will work for us
+ */
+// #define	WCHAR_TYPE_SIZE	32
+
+/* WINT_TYPE ... A C expression for a string describing the name of the data
+ * type to use for wide characters passed to printf and returned from getwc.
+ * The typedef name wint_t is defined using the contents of the string.  See
+ *
+ * ZipCPU -- The default should work well enough for us.
+ */
+// #define	WINT_TYPE	"int"
+
+/* INTMAX_TYPE ... A C expression for a string describing the name of the 
+ * data type that can represent any value of any standard or extended signed
+ * integer type.  The typedef name intmax_t is defined using the contents of
+ * the string.
+ *
+ * If you don't define this macro, the default is the first of "int", "long int"
+ * or "long long int" that has as much precision as "long long int".
+ */
+
+/* UINTMAX_TYPE ... same as INTMAX_TYPE, but for unsigned 
+ */
+
+#undef	SIG_ATOMIC_TYPE
+#if (ZIP_ATOMIC != 0)
+#define	SIG_ATOMIC_TYPE	"int"
+#else
+#define	SIG_ATOMIC_TYPE	NULL	// We have no atomic types, but registers
+#endif
+#undef	INT8_TYPE
+#define	INT8_TYPE		"char"
+#undef	INT16_TYPE
+#define	INT16_TYPE		"short int"
+#undef	INT32_TYPE
+#define	INT32_TYPE		"int"
+#undef	UINT8_TYPE
+#define	UINT8_TYPE		"unsigned char"
+#undef	UINT16_TYPE
+#define	UINT16_TYPE		"short unsigned int"
+#undef	UINT32_TYPE
+#define	UINT32_TYPE		"unsigned int"
+#undef	INT_LEAST8_TYPE	
+#define	INT_LEAST8_TYPE		"char"
+#undef	INT_LEAST16_TYPE
+#define	INT_LEAST16_TYPE	"short int"
+#undef	INT_LEAST32_TYPE
+#define	INT_LEAST32_TYPE	"int"
+#undef	UINT_LEAST8_TYPE
+#define	UINT_LEAST8_TYPE	"unsigned char"
+#undef	UINT_LEAST16_TYPE
+#define	UINT_LEAST16_TYPE	"short unsigned int"
+#undef	UINT_LEAST32_TYPE
+#define	UINT_LEAST32_TYPE	"unsigned int"
+#undef	INT_FAST8_TYPE
+#define	INT_FAST8_TYPE		"char"
+#undef	INT_FAST16_TYPE
+#define	INT_FAST16_TYPE		"short int"
+#undef	INT_FAST32_TYPE
+#define	INT_FAST32_TYPE		"int"
+#undef	UINT_FAST8_TYPE
+#define	UINT_FAST8_TYPE		"unsigned char"
+#undef	UINT_FAST16_TYPE
+#define	UINT_FAST16_TYPE	"short unsigned int"
+#undef	UINT_FAST32_TYPE
+#define	UINT_FAST32_TYPE	"unsigned int"
+#undef	INTPTR_TYPE
+#define	INTPTR_TYPE		"unsigned int"
+#undef	UINTPTR_TYPE
+#define	UINTPTR_TYPE		"unsigned int"
+
+#undef	INT64_TYPE
+#undef	UINT64_TYPE
+#undef	INT_LEAST64_TYPE
+#undef	UINT_LEAST64_TYPE
+#undef	INT_FAST64_TYPE
+#undef	UINT_FAST64_TYPE
+
+#if (ZIP_HAS_DI != 0)
+#define	INT64_TYPE		"long int"
+#define	UINT64_TYPE		"long unsigned int"
+#define	INT_LEAST64_TYPE	"long int"
+#define	UINT_LEAST64_TYPE	"long unsigned int"
+#define	INT_FAST64_TYPE		"long int"
+#define	UINT_FAST64_TYPE	"long unsigned int"
+#else
+#define	INT64_TYPE		NULL
+#define	UINT64_TYPE		NULL
+#define	INT_LEAST64_TYPE	NULL
+#define	UINT_LEAST64_TYPE	NULL
+#define	INT_FAST64_TYPE		NULL
+#define	UINT_FAST64_TYPE	NULL
+#endif
+
+#define	TARGET_PTRMEMFUNC_VBI_LOCATION	ptrmemfunc_vbit_in_pfn
+
+
+/* 17.07 Register Usage / Register definitions */
+
+/* FIRST_PSEUDO_REGISTER ... Number of hardware registers known to the compiler.
+ * They receive numbers 0 through FIRST_PSEUDO_REGISTER-1; thus the first
+ * pseudo register's numbrer really is assigned the number
+ * FIRST_PSEUDO_REGISTER.
+ *
+ * ZipCPU---There are 16 registers in the ZipCPU, numbered 0-15 with the CC
+ * and PC register being numbered 14 and 15 respectively.  The ZipCPU has
+ * another 16 registers, identical to the first, but user mode registers.  These
+ * are number the same as the first (0-15) in user mode, but numbered (16-31)
+ * in supervisor mode.  In addition, we create a pretend argument pointer
+ * register, zip_AP_PSEUDO, to refer to our arguments.  This final register,
+ * although it gets a valid number, will be eliminated in optimization.
+ */
+#define	FIRST_PSEUDO_REGISTER	(zip_AP_PSEUDO+1)
+
+/* FIXED_REGISTERS ... An initializer that says which registers are used for
+ * fixed purposes all throughout the compiled code and are therefore not
+ * available for general allocation.  These would include the stack pointer, the
+ * frame pointer (except on machines where that can be used as a general
+ * register when no frame pointer is needed), the program counter on machines
+ * where that is considered one of the addressable registers, and any other
+ * numbered register with a standard use.
+ *
+ * This information is expressed as a sequence of numbers, separated by commas,
+ * and surrounded by braces.  The nth number is 1 if register n is fixed, 0
+ * otherwise.
+ *
+ * For the Zip CPU, we have three fixed registers that are not available for
+ * general allocation:
+ *
+ * 	SP	The stack pointer
+ * 	CC	The condition codes and CPU state register
+ *	PC	The program counter
+ *
+ * Other registers, such as FP (the frame pointer) or GBL (the global offset
+ * table pointer) are registers that we hope will not be so fixed.
+ *
+ * Okay, just updated this process.  We now have more registers that are not
+ * available for general allocation:
+ *	uR0-uPC		User registers
+ *	PSEUDO-AP	The pseudo arg pointer
+ */
+#define	FIXED_REGISTERS		{ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1 }
+
+/* CALL_USED_REGISTERS ... like FIXED_REGISTERS but has 1 for each register
+ * that is clobbered (in general) by function calls as well as for fixed
+ * registers.  This macro therefore identifies the registers that are not
+ * available for general allocation of values that must live across function
+ * calls.
+ * 
+ * If a register has 0 in CALL_USED_REGISTERS, the compiler automatically saves
+ * it on function entry and restores it on function exit, if the register is
+ * used within the function.
+ *
+ * On the Zip CPU, we must save R0 (the return address), and (let's pick) any
+ * register above R5.
+ */
+#define	CALL_USED_REGISTERS	{ 0,1,1,1, 1,0,0,0, 0,0,0,0, 0,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,  1 }
+
+/* CALL_REALLY_USED_REGISTERS ...  optional macro that, if not defined, defaults
+ * to the value of CALL_USED_REGISTERS.
+ */
+
+/* HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE) ... A C expression that is nonzero
+ * if it is not permissible to store a value of mode MODE in hard register REGNO
+ * across a call without some part of it being clobbbered.  For most machines,
+ * this macro need not be defined.  It is only required for machines that do
+ * not preserve the entire contents of a register across a call.
+ *
+ * ZipCPU--Always preserves the entire contents of those registers that are
+ * preserved across calls, so this shouldnt need to be defined.
+ */
+// #define	HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE)	(REGNO==0)
+
+/* TARGET_CONDITIONAL_REGISTER_USAGE(VOID) ... This hook may conditionally 
+ * modify five variables fixed_regs, call_used_regs, global_regs, reg_names, and
+ * reg_class_contents, to take into account any dependence of these register
+ * sets on target flags.  The first three of these are of type char[]
+ * (interpreted as Boolean vectors).  global_regs is a const char *[] and
+ * reg_class_contents is a HARD_REG_SET.  Before the macro is called,
+ * fixed_regs, call_used_regs, reg_class_contents, and reg_names have been
+ * initialized from FIXED_REGISTERS, CALL_USED_REGISTERS, REG_CLASS_CONTENTS,
+ * and REGISTER_NAMES, respectively.  global_regs has been cleared, and any
+ * -ffixed-reg, -fcall-used-reg, and -fcall-saved-reg command options have been
+ * applied.
+ *
+ * ZipCPU -- I may need to return and define this depending upon how the
+ * GBL register allocation goes.  But for now, we'll leave this at its default
+ * value.
+ */
+// #warning "Revisit me after FP and GBL allocation"
+
+/* INCOMING_REGNO(out) ... Define this macro if the target machine has register
+ * windows. ...
+ *
+ * Zip CPU has no register windows.
+ */
+
+/* OUTGOING_REGNO ... same thing.
+ * LOCAL_REGNO ... same thing.
+ */
+
+/* PC_REGNUM ... If the program counter has a register number, define this as
+ * that register number.  Otherwise do not define it.
+ */
+#define	PC_REGNUM	zip_PC
+
+
+/* REG_ALLOC_ORDER ... If defined, an initializer for a vector of integers, 
+ * containing the number of hard registers in the order in which GCC should
+ * prefer to use them (from most preferred to least.
+ *
+ * If this macro is not defined, registers are used lowest numbered first (all
+ * else being equal).
+ *
+ * Since the default is the ZipCPU desired case, we won't define this here.
+ */
+
+/* ADJUST_REG_ALLOC_ORDER ... on most machines it is not necessary to define
+ * this macro, so we won't either.
+ */
+
+/* HONOR_REG_ALLOC_ORDER ... 
+ */
+
+/* HONOR_REG_ALLOC_ORDER ... on most machines it is not necessary to define
+ * this macro, so we won't either.
+ */
+
+/* HARD_REGNO_NREGS(REGNO, MODE) ... A C expression for the number of
+ * consecutive hard registers, starting at register number REGNO, required to
+ * hold a value of mode MODE.
+ *
+ * On a machine where all registers are exactly one word, a suitable definition
+ * is given of ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)/UNITS_PER_WORD.
+ *
+ */
+#undef	HARD_REGNO_NREGS
+#define	HARD_REGNO_NREGS(REGNO, MODE)	((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)\
+		/ (UNITS_PER_WORD))
+
+/* HARD_REGNO_NREGS_HAS_PADDING(REGNO,MODE) ... A C expression that is nonzero
+ * if a value of mode MODE, stored in memory, ends with padding that causes it
+ * to take up more space than in registers starting at register number REGNO
+ * (as determined by multiplying GCC's notion of the size of the register when
+ * containing this mode by the number of registers returned by HARD_REGNO_NREGS)
+ * By default this is zero.
+ *
+ * Zip CPU --- The default looks good enough to me.
+ */
+
+/* HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE)
+ *
+ * ZipCPU ---
+ */
+
+/* REGMODE_NATURAL_SIZE(MODE) -- Define this macro if the natural size of 
+ * registers that hold values of mode mode is not the word size.  It is a C
+ * expression that should give the natural size in bytes for the specified mode.
+ * It is used by the register allocator to try to optimize its results.
+ *
+ * ZipCPU ---
+ */
+// #define	REGMODE_NATURAL_SIZE(MODE)	(((MODE)==DImode)?2:1)
+
+/* HARD_REGNO_MODE_OK ... A C expression that is nonzero if it is permissible
+ * to store a value of mode MODE in a hard register number REGNO (or in several
+ * registers starting with that one).  For a machine where all registers are
+ * equivalent, a suitable definition is '1'.  You need not include code to check
+ * for the numbers of fixed registers, because the allocation mechanism
+ * considered them to be always occupied.
+ *
+ * ZipCPU --- As long as you are already avoiding the fixed registers, the
+ * suitable default definition mentioned above should be sufficient.
+ */
+#undef	HARD_REGNO_MODE_OK
+#define	HARD_REGNO_MODE_OK(R,M)	(R<zip_CC)
+
+/* HARD_REGNO_RENAME_OK(FROM,TO) ... A C expression that is nonzero if it is
+ * okay to rename a hard register FROM to another hard register TO.  One common
+ * use of this macro is to prevernt renaming of a register to another register
+ * that is not saved by a prologue in an interrupt handler.  The default is
+ * always nonzero.
+ *
+ * ZipCPU --- The default looks good enough to us.
+ */
+#undef	HARD_REGNO_RENAME_OK
+#define	HARD_REGNO_RENAME_OK(FROM,TO)	((is_ZIP_GENERAL_REG(FROM))&&(is_ZIP_GENERAL_REG(TO)))
+
+
+/* MODES_TIABLE_P(M1, M2) ... A C expression that is nonzero if a value of mode
+ * M1 is accessible in mode M2 without copying.
+ *
+ * ZipCPU --- well, that's true for us (although we support scant few modes) ...
+ * so lets' set to one.
+ */
+#define	MODES_TIEABLE_P(M1,M2)	1
+
+/* TARGET_HARD_REGNO_SCRATCH_OK(REGNO)
+ * This target hook should return true if it is OK to use a hard register
+ * REGNO as a scratch register in peephole2.  One common use of this macro is
+ * to prevent using of a register that is not saved by a prologue in an 
+ * interrupt handler.  The default version of this hook always returns true.
+ *
+ * ZipCPU --- the default works for us as well.  If you are in an interrupt
+ * context, you have an entirely new set of registers (the supervisor set), so
+ * this is a non-issue.
+ */
+ 
+/* AVOID_CCMODE_COPIES ... define this macro if the compiler should avoid
+ * copies to/from CCmode register(s).  You should only define this macro if
+ * support for copying to/from CCmode is incomplete.
+ *
+ * ZipCPU --- CCmode register copies work like any other, so we'll keep with the
+ * default definition.
+ */
+
+/* STACK_REGS ... Define this if the machine has any stack-like registers.
+ *
+ * Zip CPU has no stack-like registers, as their definition is different from
+ * the ZipCPU stack pointer register.
+ */
+
+/* 17.08 Register Classes */
+
+/* enum reg_class ... An enumerate type that must be defined with all the 
+ * register class names as enumerated values.  NO_REGS must be first.  ALL_REGS
+ * must be the last register class, followed by one more enumerated value,
+ * LIM_REG_CLASSES, which is not a register class but rather tells how many
+ * classes there are.
+ *
+ * ZipCPU --- We'll defined register 0-13 as general registers, 14-15 in
+ * all_regs, and go from there.
+ */
+enum	reg_class {
+	NO_REGS, GENERAL_REGS,
+	USER_REGS,
+	ALL_REGS, LIM_REG_CLASSES
+};
+
+/* N_REG_CLASSES ... the number of distinct register classes, defined as follows
+ */
+#define	N_REG_CLASSES	(int)LIM_REG_CLASSES
+
+/* REG_CLASS_NAMES ... An initializer containing the names of the register
+ * classes as C string constants.  These names are used in writing some of the
+ * debugging dumps.
+ */
+#define	REG_CLASS_NAMES { "NO_REGS", "GENERAL_REGS", "USER_REGS", "ALL_REGS" }
+
+/* REG_CLASS_CONTENTS ... An initializer containing the contents of the register
+ * classes, as integers which are bit masks.  The nth integer specifies the
+ * contents of class n.  That way the integer mask is interpreted as that
+ * register r is in the class if (mask&(1<<r)) is 1.
+ *
+ * When the machine has more than 32 registers, an integer does not suffice.  
+ * Then the integers are replaced by sub-initializers, braced groupings
+ * containing several integers.  Each sub-initializer must be suitable as an
+ * initializer for the type HARD_REG_SET which is defined in 'hard-reg-set.h'.
+ * In this situation, the first integer in each subinitializer corresponds to
+ * registers 0-31, the second integer to registers 32-634, and so on.
+ *
+ * ZipCPU --- This is straight forward, three register classes, etc.
+ */
+#define	REG_CLASS_CONTENTS { { 0x000000000, 0}, {0x00003fff, 0}, {0x0ffff0000, 0}, {0x0ffffffff, 1} }
+
+/* REGNO_REG_CLASS ... A C expression whose value is a register class
+ * containing hard register REGNO.  In general there is more than one such
+ * class;  Choose a class which is minimal, meaning that no smaller class also
+ * contains the register.
+ */
+#define	REGNO_REG_CLASS(R)	(is_ZIP_REG(R)?(((R)<=13)?GENERAL_REGS:ALL_REGS):NO_REGS)
+
+/* BASE_REG_CLASS ... A macro whose definition is the name of the class to which
+ * a valid base register must belong.  A base register is one used in an address
+ * which is the register value plus a displacement.
+ */
+#undef	BASE_REG_CLASS
+#define	BASE_REG_CLASS	GENERAL_REGS
+
+/* MODE_BASE_CLASS(MODE) ... This is a variation of the BASE_REG_CLASS macro
+ * which allows the selection of a bse register in a mode dependent manner.  If
+ * mode is VOIDmode then it should return the same value as BASE_REG_CLASS.
+ */
+#undef	MODE_BASE_CLASS
+#define	MODE_BASE_CLASS(MODE)	GENERAL_REGS
+
+/* MODE_BASE_REG_REG_CLASS(MODE) ... A C expression whose value is the register
+ * class to which a valid base register must belong in order to be used in a
+ * base plus index register address.  You should define this macro if base plus
+ * index addresses have different requirements than other base register uses.
+ *
+ * Zip CPU does not support the base plus index addressing mode, thus ...
+ */
+// #undef	MODE_BASE_REG_REG_CLASS
+// #define	MODE_BASE_REG_REG_CLASS(MODE)	NO_REGS
+
+/* INDEX_REG_CLASS ... A macro whose definition is the name of the class to
+ * which a valid index register must belong.  An index register is one used in
+ * an address where its value is either multiplied by a scale factor or added
+ * to another register (as well as added to a displacement).
+ *
+ * ZipCPU -- Has no index registers.
+ */
+#undef	INDEX_REG_CLASS
+#define	INDEX_REG_CLASS	NO_REGS
+
+/* REGNO_OK_FOR_BASE_P(NUM) ... A C expression which is nonzero if register
+ * number num is suitable for use as a base register in operand addresses.
+ */
+#undef	REGNO_OK_FOR_BASE_P
+# define REGNO_OK_FOR_BASE_P(NUM)	((NUM>=FIRST_PSEUDO_REGISTER)||(NUM != zip_CC))
+
+/* REGNO_MODE_OK_FOR_BASE_P ... A C expressison that is just like
+ * REGNO_OK_FOR_BASE_P, except that that expression may examine the mode of the 
+ * memory reference in MODE.  You should define this macro if the mode of the
+ * memory reference affects whether a register may be used as a base register.
+ *
+ * ZipCPU --- the mode doesn't affect anything, so we don't define this.
+ */
+
+/* REGNO_MODE_OK_FOR_REG_BASE_P(NUM, MODE) ... base plus index operand
+ * addresses, accessing memory in mode mode.
+ *
+ * Use of this macro is deprecated.
+ */
+
+/* REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) ... A C expression which is
+ * nonzero if a register number N is suitable for use as a base register in 
+ * operand addresses, accessing memory in mode M in address space AS.  This is
+ * similar to REGNO_MODE_OK_FOR_BASE_P, except that the expression may examine
+ * the context in which the register appears in the memory reference.  
+ *
+ * ZipCPU---We aren't specific in how we use our registers.
+ */
+#define	REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) REGNO_OK_FOR_BASE_P(N)
+
+/* REGNO_OK_FOR_INDEX_P(REGNO) ... A C expression which is nonzero if register
+ * num is suitable for use as an index register in opernad addressess.  It may
+ * be either a suitable hard register or a pseudo register that has been
+ * allocated such as a hard register.
+ *
+ * ZipCPU has no index registers, therefore we declare this to be zero.
+ */
+#undef	REGNO_OK_FOR_INDEX_P
+#define	REGNO_OK_FOR_INDEX_P(REGNO)	0
+
+/* TARGET_PREFERRED_RENAME_CLASS(RCLASS) ... A target hook that places
+ * additional preference on the register class to use when it is necessary to 
+ * rename a register in class RCLASS to another class, or perhaps NO_REGS, if no
+ * preferred register class is found or hook preferred_rename_class is not
+ * implemented.  SOmething returning a more restrictive class makes better code.
+ * For example, on ARM, thumb-2 instructions using LO_REGS may be smaller than
+ * instructions using GENERIC_REGS.  By returning LO_REGS from
+ * preferred_rename_class, code size can be reduced.
+ */
+// #undef TARGET_PREFERRED_RENAME_CLASS
+// #define	TARGET_PREFERRED_RENAME_CLASS(RCLASS)	RCLASS
+
+/* TARGET_PREFERRED_RELOAD_CLASS(X,RC) ... A target hook that places additional
+ * restri tions on the register class to use when it is necessary to copy value
+ * X into a register in class RC.  The value is a register class; rehaps RC, or
+ * perhaps a smaller class.
+ *
+ * The default fversion of this hook always returns value of RC argument, which
+ * sounds quite appropriate for the ZipCPU.
+ */
+
+/* PREFERRED_RELOAD_CLASS(X,CLASS) ... A C expression that places additional
+ * restrictions on the register class to use when it is necessary to copy
+ * value X into a register in class CLASS.  On many machines, the following
+ * definition is safe: PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS) 
+ * Sometimes returning a more restrictive class makes better code.  For example,
+ * on the 68k, when x is an integer constant that is in range for a moveq
+ * instruction, the value of this macro is always DATA_REGS as long as CLASS
+ * includes the data registers.  Requiring a data register guarantees that a
+ * 'moveq' will be used.
+ *
+ * ZipCPU --- you can't load certain values into all members of ALL_REGS.  For
+ * example, loading (sleep and !gie) into the CC register could halt the CPU.
+ * Hence, we only allow loads into the GENERAL_REG class.
+ */
+#define	PREFERRED_RELOAD_CLASS(X, CLASS)	GENERAL_REGS
+
+/* TARGET_PREFERRED_OUTPUT_RELOAD_CLASS(RTX,RCLASS) ... Like TARGET_PREFERRED_..
+ * RELOAD_CLASS, but for output instead of input reloads.
+ *
+ * ZipCPU --- there's gotta be a valid default behaviour for this.
+ */
+
+/* LIMIT_RELOAD_CLASS(MODE, CL) ... 
+ *
+ * Don't define this macro unless the target machine has limitations which
+ * require the macro to do something nontrivial.  ZipCPU doesn't, so we won't.
+ */
+
+/* TARGET_SECONDARY_RELOAD
+ * SECONDARY_ ...
+ * Don't think we need these ...
+ */
+
+/* CLASS_MAX_NREGS(CLASS,MODE) ... A C expression for the maximum number of
+ * consecutive registers of class CLASS needed to hold a value of mode MODE.
+ *
+ * This is closely related to the macro HARD_REGNO_NREGS.  In fact, the value
+ * of the macro CLASS_MAX_REGS(CL,M) should be the maximum value of
+ * HARD_REGNO_NREGS(REGNO,MODE) for all REGNO values in the class CLASS.
+ *
+ * This macro helps control the handling of multiple word values in the reload
+ * pass.
+ * 
+ * ZipCPU --- We'll just use HARDNO_REGNO_NREGS, since CLASS is independent for
+ * us.  We'll also choose register R0, since ... well, since it simply doesn't
+ * matter.  (HARD_REGNO_NREGS ignores this anyway)
+ */
+#define	CLASS_MAX_NREGS(CLASS, MODE)	HARD_REGNO_NREGS(0,MODE)
+
+/* CANNOT_CHANGE_MODE_CLASS
+ * ???
+ */
+
+/* TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
+ */
+
+/* TARRGET_LRA_P
+ * Default looks good.
+ */
+
+/* TARGET_REGISTER_PRIORITY(INT) ... A target hook which returns the register
+ * priority number to which the register HARD_REGNO belongs to.  The bigger the
+ * number
+ *
+ * The default version of this target hook returns always zero---good enough for
+ * the ZipCPU.
+ */
+
+/* TARGET_REGISTER_USAGE_LEVELING_P(VOID) ... A target hook which returns true
+ * if we need register usage leveling.  That means if a few hard registers are
+ * equally good for the assignment, we choose the least used hard register.  The
+ * register usage leveling may be profitable for some targets.  Don't use usage
+ * leveling for targets with conditional execution or targets with big register
+ * files as it hurts if-conversion and cross-jumping optimizations.  The default
+ * version of this target hook returns always false.
+ *
+ * ZipCPU --- Default is the right answer.
+ */
+
+/* TARGET_DIFFERENT_ADDR_DISPLACEMENT_P ...
+ * Default looks good.
+ */
+
+/* TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P ... 
+ * Default looks good.
+ */
+
+/* TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT ....
+ */
+
+/* TARGET_SPILL_CLASS
+ *
+ * ZipCPU --- If we were running in supervisor mode only, this might be the
+ * user set of registers.  However, we're not building for that mode (now),
+ * so we'll leave this at the default of NO_REGS.
+ */
+
+/* TARGET_CSTORE_MODE(ICODE) ... Defines the machine mode to use for the
+ * boolean result of conditional store patterns.  The ICODE argument is the 
+ * instruction code for the cstore being performed.  Not defining this hook is
+ * the same as accepting the mode encoded into operand 0 of the cstore expander
+ * patterns.
+ *
+ * ??? ZipCPU --- I don't follow this documentation.  We'll leave this at the
+ * default therefore.
+ */
+
+/* 17.09 Stack Layout and Calling Conventions */
+
+
+/* STACK_GROWS_DOWNWARD ... Define this macro if pushing a word onto the stack
+ * moves the stack pointer to a smaller address, and false otherwise.
+ *
+ * ZipCPU ... well, our stack does grow downward, but it doesn't do so auto-
+ * magically.  We have to move the stack pointer ourselves.  However, since this
+ * is our convention, we'll define it as such.
+ */
+#undef	STACK_GROWS_DOWNWARD
+#define	STACK_GROWS_DOWNWARD	1
+
+/* STACK_PUSH_CODE ... This macro defines the operation used when something is 
+ * pushed on the stack.  In RTL, a push operation will be
+ * (set (mem( STACK_PUSH_CODE(reg sp))) ...) The choiecs are PRE_DEC, POST_DEC,
+ * PRE_INC, and POST_INC.  Which of these is correct depends on the stack 
+ * direction and on whether the stack pointer points to the last item on the
+ * stack or whether it points to the space for the next item on the stack.
+ * The default is PRE_DECC when STACK_GROWS_DOWNWARD is true, which is almost
+ * always right, and PRE_INC otherwise, which is often wrong.
+ *
+ * ZipCPU --- None of these is right, so let's leave this at the default and
+ * see how badly we get mangled.  In particular, ZipCPU doesn't have any of the
+ * PRE_DEC, POST_DEC, PRE_INC, or POST_INC addressing modes used here.
+ */
+
+/* FRAME_GROWS_DOWNWARD ... Define this macro to nonzero if the addresses of
+ * local variable slots are at negative offsets from the frame pointer.
+ *
+ * ZipCPU --- If the frame pointer is defined as the stack pointer upon the
+ * start of function execution, and that stack pointer grows downward, then
+ * this should be the case as well.
+ */
+#undef	FRAME_GROWS_DOWNWARD
+#define	FRAME_GROWS_DOWNWARD	1
+// #define	FRAME_GROWS_DOWNWARD	0	// This was ECO32's value
+
+
+/* ARGS_GROW_DOWNWARD ... Define this macro if successive arguments to a
+ * function occupy decreasing addresses on the stack.
+ *
+ * ZipCPU -- we can leave this up to the compiler's preferred implementation,
+ * it is of no consequence to the hardware.
+ */
+
+/* STARTING_FRAME_OFFSET ... Offset from the frame pointer to the first local
+ * variable slot to be allocated.  If FRAME_GROWS_DOWNWARD, find the next slot's
+ * offset by subtracting the first slot's length from STARTING_FRAME_OFFSET.
+ * Otherwise it is found by adding the length of the first slot to the value
+ * START_FRAME_OFFSET.
+ *
+ * ZipCPU --- I'm not certain on this, let's come back after we look at how
+ * the code is getting generated.  However, the ECO32 code I am copying from
+ * suggests that 0 is the right value, so we'll use that here.
+ */
+// #warning "Re-evaluate me" --- I did.  This still looks good.
+#define	STARTING_FRAME_OFFSET	0
+
+/* STACK_ALIGNMENT_NEEDED ... Define to zero to disable final alignment of the
+ * stack during reload.  The nonzero default for this macro is suitable for most
+ * ports.
+ *
+ * ZipCPU --- Yes, our stack needs to be aligned.  The default should work
+ * nicely.
+ */
+
+/* STACK_POINTER_OFFSET ... Offset from the SP register to the first location at
+ * which outgoing arguments are placed.  If not specified, the default value
+ * of zero is used.  This is the proper value for most machines.
+ */
+#define	STACK_POINTER_OFFSET	0
+
+/* FIRST_PARM_OFFSET ... Offset from the argument pointer register to the first
+ * argument's address.  On some machines it may depend on the data type of the
+ * function.
+ */
+#define	FIRST_PARM_OFFSET(F)	0
+
+/* STACK_DYNAMIC_OFFSET(F) ... Offset from the stack pointer register to an item
+ * dynamically allocated on the stack, e.g., by alloca.  The default value for
+ * this macro is STACK_POINTER_OFFSET plus the length of the outgoing arguments.
+ * The default is correct for most machines, ...
+ *
+ * ZipCPU --- so we'll use it for the ZipCPU.
+ */
+
+/* INITIAL_FRAME_ADDRESS_RTX ... A C expression whose value is RTL representing
+ * the address of the initial stack frame.  This address is passed to
+ * RETURN_ADDR_RTX and DYNAMIC_CHAIN_ADDRESS.  If you don't define this macro,
+ * a reasonable default value will be used.  Define this macro in order to make
+ * frame pointer elimination work in the presence of __builtin_frame_address(C)
+ * and __builtin_return_address(C) for (C) not equal to zero.
+ *
+ * ZipCPU --- Let's try the reasonable default and see what happens.
+ */
+
+/* SETUP_FRAME_ADDRESSES ... A C expression that produces the machine-specific
+ * code to setup the stack so that arbitrary frames can be accessed.  For
+ * example, on the SPARC, we must flush all of the register windows to the stack
+ * before we can access arbitrary stack frames.  You will seldom need to define
+ * this macro.  The default is to do nothing.
+ *
+ * ZipCPU --- which is what we shall do here.
+ */
+
+/* TARGET_BUILTIN_SETJMP_FRAME_VALUE(VOID) ... This target hook should return
+ * an RTX that is used to store the address of the current frame into the
+ * builtin setjmp buffer.  The default value, virtual_stack_vars_rtx, is correct
+ * for most machines.  One reason you may need to define this target hook is if
+ * hard_frame_pointer_rtx is the appropriate value on your machine.
+ *
+ * ZipCPU --- leave this undefined, since the default value should be correct
+ * for "most" machines.
+ */
+
+/* FRAME_ADDR_RTX ... most machines do not need to define it.
+ */
+
+/* RETURN_ADDR_RTX(COUNT,FRAMEADDR) ... A C expression whose value is RTL
+ * representing the value of the return address for the frame COUNT steps up
+ * from the current frame, after the prologue.  FRAMEADDR is the frame pointer
+ * of the COUNT frame, or the frame pointer of the COUNT-1 frame if 
+ * RETURN_ADDR_IN_PREVIOUS_FRAME is nonzero.  The value of the expression must
+ * always be the correct address when COUNT is nonzero, but may be NULL_RTX if
+ * there is no way to determine the return address of other frames.
+ *
+ * ZipCPU --- Our answer for the current frame is ... it depends.  If we can
+ * force the use of a frame in every debug context, we could compute this for
+ * COUNT != 0.  For now, we'll just look at the registers we save and return
+ * where the return address is in the current frame.  To do that, though, we
+ * need some help from C.
+ */
+#undef	RETURN_ADDR_RTX
+#define	RETURN_ADDR_RTX(COUNT,FRAMEADDR)	zip_return_addr_rtx(COUNT,FRAMEADDR)
+
+/* RETURN_ADDR_IN_PREVIOUS_FRAME ... Define this macro to nonzero value if the
+ * return address of a particular stack frame is accessed from the frame pointer
+ * of the previous stack frame.  The zero default for this macro is suitable
+ * for most ports.
+ *
+ * ZipCPU---Default works here as well.
+ */
+
+/* INCOMING_RETURN_ADDR_RTX ... A C expression whose value is RTL representing
+ * the location of the incoming return address at the beginning of any function,
+ * before the prologue.  This RTL is either a REG, indicating that the return
+ * value is saved in 'REG', or a MEM representing the location in the stack.  
+ * If this RTL is a REG, you should define DWARF_RETURN_COLUMN to
+ * DWARF_FRAME_REGNUM(REGNO).
+ *
+ * ZipCPU --- While our incoming return address could theoretically be in any
+ * register, our machine description file is going to place it into register
+ * R0, so that's what we return here.
+ */
+#undef	INCOMING_RETURN_ADDR_RTX
+#define	INCOMING_RETURN_ADDR_RTX	gen_rtx_REG(SImode, zip_LR)
+
+
+/* DWARF_ALT_FRAME_RETURN_COLUMN
+ */
+
+/* DWARF_ZERO_REG ... A C exrpession whose value is an integer giving a DWARF2
+ * register number that is considered to always have the value zero.  This
+ * should only be defined if the target has an architected zero register (ZipCPU
+ * does not), and someone decided it was a good idea to use that register number
+ * to terminate the stack backtrace.  New ports should avoid this (so the
+ * ZipCPU port will avoid it as well).
+ *
+ */
+
+/* TARGET_DWARF_HANDLE_FRAME_UNSPEC
+ */
+
+/* INCOMING_FRAME_SP_OFFSET ... A C expression whose value is an integer giving
+ * the offset, in bytes, from the value of the stack pointer register to the
+ * top of the stack frame at the beginning of any function, before the prologue.
+ * The top of the frame is defined to be the value of the stack pointer in the
+ * previous frame, just before the call instruction.
+ *
+ * You only need to define this macro if you want to support call frame 
+ * debugging information like that provided by DWARF 2.
+ *
+ * ZipCPU---Our value is zero.
+ */
+#define	INCOMING_FRAME_SP_OFFSET	0
+
+/* ARG_POINTER_CFA_OFFSET
+ */
+
+/* FRAME_POINTER_CFA_OFFSET
+ */
+
+/* CFA_FRAME_BASE_OFFSET
+ */
+
+/* 17.09.02 Exception handling support */
+
+/* EH_RETURN_DATA_REGNO(N) ... A C expression whose value is the Nth register
+ * number used for data by exception handlers, or INVALID_REGNUM if fewer than
+ * N registers are usable.  The exception handling library routines communicate
+ * with the exception handlers via a set of agreed upon registers.  Ideally
+ * these registers should be call clobbered; it is possible to use call-saved
+ * registers, but may negatively impact code size.  The target must support at 
+ * least 2 data registers, but should define 4 if their are enough free
+ * registers.  
+ *
+ * You must define this macro if you want to support call frame exception 
+ * handling like that provided by DWARF 2.
+ *
+ * ZipCPU -- We copy much of our definition from Moxie.
+ */
+#define	EH_RETURN_DATA_REGNO(N)	((N<3)?(N+ZIP_FIRST_ARG_REGNO):INVALID_REGNUM)
+
+/* EH_RETURN_STACKADJ_RTX ... A C expression whose value is RTL representing
+ * a location in which to store a stack adjustment to be applied before function
+ * return.  This is used to unwind the stack to an exception handler's call
+ * frame.  It will be assigned zero on code paths that return normally.
+ *
+ * Do not define this macro if the stack pointer is saved and restored by the
+ * regular prolog and epilog code in the call frame itself (which it is for the
+ * ZipCPU); in this case, the exception handling library routines will update 
+ * the stack location to be restored in place.  Otherwise, you must define this
+ * macro if you want to support call frame exception handling like that provided
+ * by DWARF 2.
+ *
+ */
+
+/* EH_RETURN_HANDLER_RTX ... A C expression whose value is RTL representing a
+ * location in which to store the address of an exception handler to which we
+ * should return.  It will not be assigned on code paths that return normally.
+ *
+ * Typcally this is the location in the call frame at which the normal return
+ * address is stored.  For targets that return by popping an address of the 
+ * stack, this might be a memory address just below the target call frame
+ * rather than inside the current call frame.  If defined,
+ * EH_RETURN_STACKADJ_RTX will have already been assigned, so it may be used
+ * to calculate the location of the target call frame.
+ *
+ * If you want to support call frame exception handling, you must define either
+ * this macro or the eh_return instruction pattern.
+ *
+ * ZipCPU --- We again copy from Moxie
+ */
+#define	EH_RETURN_HANDLER_RTX	\
+	gen_frame_mem(Pmode, plus_constant(Pmode, frame_pointer_rtx, UNITS_PER_WORD))
+
+/*
+ *
+ *
+ *
+ *   REST OF SECTION SKIPPED ...
+ *
+ *
+ *
+ */
+
+/* 17.09.03 Specifying how stack checking is done */
+
+/* STACK_CHECK_BUILTIN ... a non-zero value if stack checking is done by the
+ * configuration files in a machine-dependent manner.  You should define this
+ * macro if stack checking is required by the ABI of your machine or if you
+ * would like to do stack checking in some more efficient way than the generic
+ * appraoch.  The default value of this macro is zero.
+ *
+ * ZipCPU --- The default makes sense for us.
+ */
+// #define STACK_CHECK_BUILTIN	0
+
+/* STACK_CHECK_STATIC_BUILTIN ... A nonzero value if static stack checking is 
+ * done by the configuration files in a machine-dependent manner.  You should
+ * define this macro if you would like to do static stack checking in some more
+ * efficient way than the generic approach.  The default value of this macro
+ * is zero.
+ *
+ * ZipCPU --- The default makes sense for us.
+ */
+
+/* STACK_CHECK_PROBE_INTERVAL_EXP ...  An integer specifying the interval at
+ * which GCC must generate stack probe instructions, defined as 2 raised to this
+ * interval.  You will normally define this macro so that the interval is no
+ * larger than the size of the "guard pages" at the end of a stack area.  The
+ * default value of 12 (4096-byte interval) is suitable for most systems.
+ *
+ * ZipCPU --- Default.
+ */
+
+/* STACK_CHECK_MOVING_SP ... An integer which is non-zero if GCC should move
+ * the stack pointer page by page when doing probes.  This can be necessary
+ * on systems where the stack pointer contains the bottom address of the memory
+ * area accessible to the executing thread at any point in time.  In this
+ * situation, an alternate signal stack is required in order to be able to
+ * recover from a stack overflow.  The default value of this macro is zero.
+ *
+ * ZipCPU -- Default.
+ */
+
+/* STACK_CHECK_PROTECT
+ */
+/* STACK_CHECK_MAX_FRAME_SIZE
+ * ... you should normally not change the default value of this macro.
+ */
+/* STACK_CHECK_FIXED_FRAME_SIZE
+ * ... you ... will normally use the default of four words.
+ */
+
+/* STACK_CHECK_MAX_VAR_SIZE
+ * ... you will normally not need to override that default.
+ */
+
+/* 17.09.04 Registers that Address the Stack Frame*/
+
+/* STACK_POINTER_REGNUM ... The register number of the stack pointer register,
+ * which must also be a fixed register according to FIXED_REGISTERS.  On most
+ * machines, the hardware determines which register this is.
+ */
+#undef STACK_POINTER_REGNUM
+#define	STACK_POINTER_REGNUM	zip_SP
+
+/* FRAME_POINTER_REGNUM ... The register number of the frame pointer register,
+ * which is used to access certain automatic variables in the stack frame.  On
+ * some machines, the hardware determines which register this is.  On other
+ * machines you can choose any register you wish for this purpose.
+ *
+ * ZipCPU --- While I'd like to dump this pointer, since I don't really see
+ * a need for it, alloca() requires it.  Therefore let's assine a register to
+ * this purpose and watch what the compiler does with it.
+ */
+#define	FRAME_POINTER_REGNUM	zip_FP
+
+/* HARD_FRAME_POINTER_REGNUM ... On some machines the offset between the frame
+ * pointer and starting offset of the automatic variables is not known until 
+ * after register allocation has been done (for example, because the saved 
+ * registers are between these two locations).  On those machines, define 
+ * FRAME_POINTER_REGNUM the number of a special, fixed register to be used
+ * internally until the offset is known, and define HARD_FRAME_POINTER_REGNUM
+ * to be the actual hard register number used for the frame pointer.
+ *
+ * Do not define this macro if it would be the same as FRAME_POINTER_REGNUM
+ *
+ * ZipCPU --- we do not define this macro.
+ */
+#define HARD_FRAME_POINTER_REGNUM	zip_FP
+
+/* ARG_POINTER_REGNUM ... The register number of the arg pointer register, which
+ * is used to access the function's argument list.  On some machines, this is
+ * the same as the frame pointer register.  On some machines, the hardware
+ * determines which register this is.  On other machines, you can choose any
+ * register you wish for this purpose.  If this is not the same register as the
+ * frame pointer register, then you must mark it as a fixed register according
+ * to FIXED_REGISTERs, or arrange to be able to eliminate it.
+ *
+ * ZipCPU --- We really don't want to lose another register to something
+ * pointless, so let's set this to be the frame pointer register.  Especially
+ * given the ZipCPU's ease of accessing things via offsets of registers, this
+ * should work for a rather large stack frame.
+ *
+ * However ... we had problems with passing 6+ variables on the stack, so let's
+ * try creating a pseudo register for this, and appropriately adjusting the
+ * offset between this pseudo register and the stack pointer ...
+ */
+#define	ARG_POINTER_REGNUM	zip_AP_PSEUDO
+
+/* HARD_FRAME_POINTER_IS_FRAME_POINTER ... define this to be a preprocessor
+ * constant that is nonzero if hard_frame_pointer_rtx and frame_pointer_rtx
+ * should be the same.  The default definition is sufficient for us.
+ */
+
+/* HARD_FRAME_POINTER_IS_ARG_POINTER ... 
+ * ZipCPU doesn't need this macro
+ */
+
+/* RETURN_ADDRESS_POINTER_REGNUM ... The register number of the return address
+ * pointer register, which is used to access the current function's return
+ * address from the stack.  On some machines, the return address is not at a 
+ * fixed offset from the frame pointer or stack pointer or argument pointer.
+ * This register can be defined to point to the return address on the stack, and
+ * then to be converted by ELIMINABLE_REGS into either the frame pointer or the
+ * stack pointer.
+ *
+ * Do not define this macro unless there is no other way to get the return 
+ * address from the stack.
+ *
+ * ZipCPU---we need this.
+ */
+#define	RETURN_ADDRESS_REGNUM	zip_LR
+
+
+/* STATIC_CHAIN_REGNUM ... Register numbers used for passing a function's
+ * static chain pointer.  If register windows are used, the register number as
+ * seen by the called function is STATIC_CHAIN_INCOMING_REGNUM, while the
+ * register number as seen by the calling function is STATIC_CHAIN_REGNUM.  If
+ * these register are the same, STATIC_CHAIN_INCOMING_REGNUM need not be
+ * defined.
+ *
+ * ZipCPU --- even without register windows, we still need to pick an
+ * (arbitrary) register to pass the pointer to the static chain in the case of
+ * nested functions.  Let's arbitrarily pick R5, and ... see how that works for
+ * us.
+ */
+#define	STATIC_CHAIN_REGNUM	zip_R5
+
+/* TARGET_STATIC_CHAIN ... This hook replaces the use of STATIC_CHAIN_REGNUM et
+ * al for targets that may use different static chain locations for different
+ * nested functions.  This may be required if the target has function attributes
+ * that affect the calling conventions of the function and those calling
+ * conventions use different static chain locations.
+ *
+ * ZipCPU --- don't need this.
+ */
+// #define	TARGET_STATIC_CHAIN	zip_R11
+
+
+/* DWARF_FRAME_REGISTERS ... This macro specifies  the maximum number of hard
+ * registers that can be saved in a call frame.  This is used to size data
+ * structures used in DWARF2 exception handling.  
+ *
+ * Prior to GCC 3.0, this macro was needed in order to establish a stable
+ * exception handling ABI in the face of adding new hard registers for ISA
+ * extensions.  In GCC 3.0 and later, the EH ABI is insulated from changes in
+ * the number of hard registers.  Nevertheless, this macro can still be used to
+ * reduce the runtime memory requirements of the exception handling routines,
+ * which can be substantial if the ISA contains a lot of registers that are not
+ * call-saved.
+ *
+ * If this macro is not defined, it defaults to FIRST_PSEUDO_REGISTER.
+ *
+ * ZipCPU --- The default is not sufficient.  The CC and PC registers need to 
+ * be saved and examined as well in any debug/exception context.  Hence, we
+ * define this to be all of our registers.
+ */
+#undef DWARF_FRAME_REGISTERS
+#define	DWARF_FRAME_REGISTERS	16
+
+/* PRE_GCC3_DWARF_FRAME_REGISTERS ... This macro is similar to DWARF_FRAME_REG..
+ * but is provided for backward compatibility in pre GCC 3.0 compiled code.
+ *
+ * If not defined, it defaults to DWARF_FRAME_REGISTERS---which is perfect for
+ * the ZipCPU.
+ */
+
+/* DWARF_REG_TO_UNWIND_COLUMN(REGNO) ... Define this macro if the target's
+ * representation for dwarf registers is different than the internal 
+ * representation for unwind column.  Given a dwarf register, this macro should
+ * return the unwind column number to use instead.
+ *
+ * ... ???
+ */
+
+/* DWARF_FRAME_REGNUM(REGNO) ... Define this macro is the target's
+ * representation for dwarf registers used in .eh_frame or .debug_frame is
+ * different from that used in other debug info sections.  Given a GCC hard
+ * register number, this macro should return the .eh_frame register number.
+ * The default is DBX_REGISTER_NUMBER(REGNO).
+ *
+ * ZipCPU --- provided we define DBX_REGISTER_NUMBER(REGNO) well, this default
+ * should still work for us.
+ */
+
+/* DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) ... Define this macro to map register
+ * numbers held in the call frame info that GCC has collected using
+ * DWARF_FRAME_REGNO to those that should be output in .debug_frame (for_eh is
+ * zero) and .eh_frame (for_eh is non-zero). The default is to return REGNO.
+ *
+ * ZipCPU --- Default is good enough.
+ */
+
+/* REG_VALUE_IN_UNWIND_CONTEXT ... Define this macro if the target stores
+ * register values as _Unwind_Word type in unwind context.  It should be defined
+ * if target register size is larger than the size of void *.  The default
+ * is to store register values as void *type.
+ *
+ * ZipCPU --- Default is what we need.
+ */
+
+/* ASSUME_EXTENDED_UNWIND_CONTEXT ... Define this macro to be 1 if the target 
+ * always uses extended unwind context with version, args_size, and by_value
+ * fields.  If it is undefined, it will always be defined to 1 when REG_VALUE_IN_UNWIND_CONTEXT is defined and 0 otherwise.
+ *
+ */
+
+
+/* 17.09.05 Eliminating Frame Pointer and Arg Pointer */
+
+/* TARGET_FRAME_POINTER_REQUIRED(VOID) ... This target hook should return true 
+ * if a function must have and use a frame pointer.  This target hook is
+ * called in the reload pass.  If its return value is true, the function will
+ * have a frame pointer.
+ *
+ * This target hook can in principle examine the current function and decide
+ * according to the facts, but on most machines the constant false or the
+ * constant true suffices.  Use false when the machine allows code to be
+ * generated with no frame pointer, and doing so saves some time or space. 
+ * Use true when there is no possible advantage to avoiding a frame pointer.
+ *
+ * ZipCPU---if we add in a frame pointer, we become register starved.  Hence,
+ * we'll treat this as a constant false--which is also the default value.
+ */
+#define	target_frame_pointer_required	zip_frame_pointer_required
+
+/* INITIAL_FRAME_POINTER_OFFSET ... A C statement to store in the variable
+ * depth-var the difference between the frame pointer and the stack pointer
+ * values immediately after the function prologue.  The value would be computed
+ * from information such as the result of get_frame_size() and the tables of 
+ * registers regs_ever_live and call_used_regs.
+ *
+ * If ELIMINABLE_REGS is defined, this macro will not be used and need not be
+ * defined.  Otherwise, it must be defined even if TARGET_FRAME_POINTER_REQD
+ * always returns true; in that case you may set depth-var to anything.
+ *
+ * ZipCPU --- we intend to set ELIMINABLE_REGS, so this is not necessary.
+ */
+// #define	INITIAL_FRAME_POINTER_OFFSET(DEPTH)	(DEPTH) = 0
+
+
+/* ELIMINABLE_REGS ... If defined, this macro specifies a table of register
+ * pairs used to eliminate unneeded registers that point into the stack frame.
+ * If it is not defined, the only elimination attempted by the compiler is to
+ * replace references to the frame pointer with references to the stack pointer.
+ *
+ * On some machines, the position of the argument pointer is not known until
+ * the compilation is completed.  In such a case, a separate hard register
+ * must be used for the argument pointer.  This register can be eliminated by
+ * replacing it with either the frame pointer or the argument pointer,
+ * depending on whether or not the frame pointer has been eliminated.
+ *
+ * ZipCPU we'll take their suggestion and define this as:
+ */
+#undef ELIMINABLE_REGS
+#define	ELIMINABLE_REGS	\
+	{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM },	\
+	 { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM },	\
+	 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* bool TARGET_CAN_ELIMINATE(FROM,TO) ... This target function should return
+ * true if the compiler is allowed to try to replace register number FROM with
+ * register number TO.  This target hook need only be defined if ELIMINABLE_REGS
+ * is defined, and will usually return true since most of the cases preventing
+ * register elimination are things that the compiler  already knows about.
+ * 
+ * ZipCPU ... does the compiler  know about my decision as to whether or not
+ * the frame pointer was needed?  Yes it does, but it's kept separately.  We'll
+ * just say everything can be eliminated.
+ */
+#define TARGET_CAN_ELIMINATE	zip_can_eliminate
+
+/* INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) ... This macro is similar to 
+ * INITIAL_FRAME_POINTER_OFFSET.  It specifies the initial difference between
+ * the specified pair of registers.  This macro must be defined if
+ * ELIMINABLE_REGS is defined.
+ *
+ * ZipCPU---We had at one time set this to a default offset of 0.  This didn't
+ * work.  It turns out that this is not only the *initial* elimination offset,
+ * but also the offset along the way.  Hence, when a variable needs to be
+ * spilled to the stack, this offset must change.  Reload goes and checks for
+ * this, and adjusts registers if the offset has changed.  Hence, without this,
+ * we get negative (i.e. illegal) stack offsets.
+ */
+#define	INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET)		\
+	do { (OFFSET) = zip_initial_elimination_offset((FROM), (TO)); } \
+	while(0)					\
+
+/* 17.09.06 Passing function arguments on the stack */
+
+/* TARGET_PROMOTE_PROTOTYPES ... Returns true if an argument declared in a 
+ * prototype as an integral type smaller than int should actually be
+ * passed as an int.  In addition to avoiding errors in certain cases of
+ * mismatch, it also makes for better code on certain machines.  The default is
+ * to not promote prototypes.
+ *
+ * Since everything is an int on the ZipCPU, let's promote anything smaller
+ * (which should still be an int) up to an int anyway.
+ */
+#undef	TARGET_PROMOTE_PROTOTYPES
+#define	TARGET_PROMOTE_PROTOTYPES	hook_bool_const_tree_true
+
+/* PUSH_ARGS ... A C expression.  If nonzero, push instructions will be used to
+ * pass outgoing arguments.  If the target machine does not have a push
+ * instruction, set it to zero.  That directs GCC to use an alternate strategy:
+ * to allocate the entire argument block and then store the arguments into it.
+ * When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
+ *
+ * ZipCPU does not have a push instruction, so we set this to zero.
+ */
+#undef	PUSH_ARGS
+#define	PUSH_ARGS	0
+
+/* PUSH_ARGS_REVERSED ... A C expression.  If nonzero, function arguments will
+ * be evaluated last to first, rather than first to last.  If this macro is
+ * not defined, it defaults to PUSH_ARGS on targets where the stack and args
+ * grow in opposite directions, and zero otherwise.
+ *
+ * ZipCPU---Let's evaluate our arguments first to last.
+ */
+#define	PUSH_ARGS_REVERSED	1
+
+/* PUSH_ROUNDING(NPUSHED) ... A C expression that is the number of bytes
+ * actually pushed onto the stack when an instruction attempts to push
+ * (NPUSHED) bytes.
+ *
+ * ZipCPU---We cannot push bytes.  Let's leave this undefined and see what
+ * happens.
+ */
+// #warning "No appropriate definition seemed right."
+
+/* ACCUMULATE_OUTGOING_ARGS ... A C expression.  If non-zero, the maximum amount
+ * of space required for outgoing arguments will be computed and placed into
+ * crtl->outgoing_args_size.  No space will be pushed onto the stack for each
+ * call; instead the function prologue should increase the stack frame size by
+ * this amount.
+ *
+ * ZipCPU---This is *cool* and so necessary---it saves an extra two instructions
+ * each time we try to call a function/routine.  Yes, we want and *need* this
+ * for good performance.  I mean, think of it, free performance increase?  Who
+ * could argue with that?
+ */
+#undef	ACCUMULATE_OUTGOING_ARGS
+#define	ACCUMULATE_OUTGOING_ARGS	1
+
+
+/* REG_PARM_STACK_SPACE(FN) ... Define this macro if functions should assume
+ * that stack space has been allocated for arguments even when their values
+ * are passed in registers.  The value of this macro is the size, in bytes, of
+ * the area reserved for arguments passed in registers for the function
+ * represented by FN, which can be zero if GCC is calling a library function.
+ * The argument FN can be the FUNCTION_DECL, or the type itself of the function.
+ *
+ * This space can be allocated by the caller, or be part of the machine
+ * dependent stack frame: OUTGOING_REG_PARM_STACK_SPACE says which.
+ *
+ * ZipCPU --- Why allocate space you won't use?  Let's leave this undefined
+ * therefore.
+ */
+// #undef	REG_PARM_STACK_SPACE
+
+
+
+/* INCOMING_REG_PARM_STACK_SPACE(FN) ... Like REG_PARM_STACK_SPACE, but for
+ * incoming register arguments.  Define this macro if space guaranteed when
+ * compiling a function body is different to space required when making a call,
+ * a situation that can arise with K&R style function definitions.
+ *
+ */
+
+/* OUTGOING_REG_PARM_STACK_SPACE(FN) ... Define this to a nonzero value if it 
+ * is the responsibility of the caller to allocate the area reserved for
+ * arguments passed in registers when calling a function of FN.  FN may be NULL
+ * if the function called is a library function.
+ *
+ * ZipCPU---Why allocate space you don't need?
+ */
+#define	OUTGOING_REG_PARM_STACK_SPACE(FNTYPE)	0
+
+
+/* STACK_PARMS_IN_REG_PARM_AREA ... Define this macro if REG_PARM_STACK_SPACE
+ * is defined, buyt the stack parameters don't skip the area specified by it.
+ *
+ * ZipCPU---We didn't define REG_PARM_STACK_SPACE, so we won't define this.
+ */
+
+/* TARGET_RETURN_POPS_ARGS(DECL,FNTYPE,SZ) ... This target hook returns the
+ * number of bytes of its own arguments that a function pops on returning, or 0
+ * if the function pops no arguments and the caller must therefore pop them all
+ * after the function returns.
+ *
+ * ZipCPU --- If we define this, we'll lose our gain from
+ * ACCUMULATE_OUTOING_ARGS.  Thus, we leave this undefined.
+ */
+
+/* CALL_POPS_ARGS(CUM) ... A C expression that should indicate the number of
+ * bytes a call sequence pops off of the stack.  It is added to the value of
+ * RETURN_POPS_ARGS when compiling a function call.  CUM is the variable in
+ * which all arguments to the function have been accumulated.
+ *
+ * ZipCPU---The call sequence, by itself, doesn't touch the stack.  Therefore
+ * this is zero.
+ */
+#undef CALL_POPS_ARGS
+#define	CALL_POPS_ARGS(CUM)	0
+
+
+/* 17.09.07 Passing arguments in registers */
+
+/* TARGET_FUNCTION_ARG ... Return an RTX indicating whether a function argument
+ * is passed in a register, and if so, which register.
+ */
+/* 
+ * This has been poisoned ... so let's not define it anymore and look for
+ * a better way to do this ...
+ *
+ * #define	FUNCTION_ARG(CUM, MODE, TYPE, NAMED) (((NAMED) == 0) ? NULL_RTX
+ * 	: targetm.calls.must_pass_in_stack(MODE, TYPE)	? NULL_RTX
+ * 	: (CUM) > ZIP_LAST_ARG_REGNO			? NULL_RTX
+ * 	: gen_rtx_REG(MODE, CUM))
+ */
+#define	TARGET_FUNCTION_ARG	zip_function_arg
+
+
+/* TARGET_MUST_PASS_IN_STACK(MODE, TYPE) ... This target hook should return
+ * true if we should not pass TYPE solely in registers.  The file 'expr.h'
+ * defines a definition that is usually appropriate, refer to 'expr.h' for 
+ * additional documentation.
+ *
+ * ZipCPU ... Ok, so I looked into expr.h and didn't find anything that looked
+ * like this.  So ... I don't know.
+ */
+// #undef	TARGET_MUST_PASS_IN_STACK
+// #define	TARGET_MUST_PASS_IN_STACK	zip_must_pass_in_stack
+
+/* TARGET_FUNCTION_INCOMING_ARG ... Define this hook if the target machine
+ * has register windows, ... which ZipCPU does not have.
+ */
+
+/* TARGET_USE_PSEUDO_PIC_REG(void) ... This hook should return 1 in case
+ * pseudo register should be created for pic_offset_table_rtx during function
+ * expand.
+ *
+ * This should be defined by global parameters, isn't it?
+ */
+
+/* TARGET_INIT_PIC_REG(v) ... Perform a target dependent initialization of 
+ * pic_offset_table_rtx.  This hook is called at the start of register
+ * allocation.
+ *
+ * ZipCPU---Let's revisit this.
+ */
+// #warning "Come back and relook at relocations"
+
+/* TARGET_ARG_PARTIAL_BYTES ... This target hook returns the number of bytes
+ * at the beginning of an argument that must be put in registers.  The value
+ * must be zero for arguments that are passed entirely in registers or that
+ * are entirely pushed on the stack.
+ */
+// #undef	TARGET_ARG_PARTIAL_BYTES
+// #define	TARGET_ARG_PARTIAL_BYTES	zip_arg_partial_bytes
+
+/* TARGET_PASS_BY_REFERENCE(CUM,MOD,TREE,NAMED) ... This target hook should
+ * return true if an argument at the position indicated by CUM should be passed
+ * by reference.  This predicate is queried after target independent reasons
+ * for being pssed by reference, such as TREE_ADDRESSABLE(TREE).
+ *
+ */
+// #undef	TARGET_PASS_BY_REFERENCE
+// #define	TARGET_PASS_BY_REFERENCE	zip_pass_by_reference
+
+/* CUMULATIVE ARGS ...  A C type for declaring a variable that is used as the
+ * first argument of 'FUNCTION_ARG' and other related values.
+ *
+ * ZipCPU---We're in trouble if an 'int' won't work, so let's just use that.
+ */
+#define	CUMULATIVE_ARGS	int
+
+/*
+ * OVERRIDE_ABI_FORMAT
+ */
+
+/* INIT_CUMULATIVE_ARGS ... A C statement (sans semicolon) for initializing the
+ * variable CUM for the state at the beginning of the argument list. 
+ *
+ *
+ * ZipCPU---The first argument is passed in register ZIP_FIRST_ARG_REGNO, or
+ * R1 (unless it has been redefined above ...)
+ */
+#define	INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,FNDECL,N_NAMED_ARGS) (CUM = 0)
+
+/* INIT_CUMULATIVE_LIBCALL_ARGS
+ * INIT_CUMULATIVE_INCOMING_ARGS
+ *
+ * These default to the last INIT_CUM_ARGS value above.
+ */
+
+/* TARGET_FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) .. This hook updates
+ * the summarizer variable pointed to by CUM to advance past an argument in 
+ * the argument list.  The values MODE, TYPE, and NAMED describe that
+ * argument.  Once this is done, the variable CUM is suitable for analyzing the
+ * following argument with TARGET_FUNCTION_ARG, etc.  This hook need not do
+ * anything if the argument in question was passed on the stack.  The compiler
+ * knows how to track the amount of stack space used for arguments without
+ * any special help.
+ *
+ * ZipCPU---Here we simply copy from ECO32.
+ */
+#define	TARGET_FUNCTION_ARG_ADVANCE	zip_function_arg_advance
+
+/*
+ * TARGET_ARG_OFFSET(MODE, TYPE) ... If defined, a C expression that is the
+ * number of bytes to add to the offset of the argument passed in memory. 
+ * This is needed for the SPU, which passes char and short arguments in the
+ * preferred slot that is in the middle of the quad word instead of starting
+ * at the top.
+ *
+ * ZipCPU -- sounds like the default would be (more) appropriate.
+ */
+/*
+ * FUNCTION_ARG_PADDING	--- not necessary, since we shouldn't be padding
+ * PAD_VARARGS_DOWN	--- not necessary, since we shouldn't be padding
+ * BLOCK_REG_PADDING
+ * TARGET_FUNCTION_ARG_BOUNDARY
+ * TARGET_FUNCTION_ARG_ROUND_BOUNDARY
+ */
+
+/* FUNCTION_ARG_REGNO_P(REGNO) ... A C expression that is nonzero if REGNO is
+ * the number of a hard register in which function arguments are sometimes
+ * passed.  This does not include implicit arguments such as the static chain
+ * and the structure-value address.  On many machines, no registers can be used
+ * for this purpose since all function arguments are pushed on the stack.
+ */
+#define	FUNCTION_ARG_REGNO_P(r) ((r >= ZIP_FIRST_ARG_REGNO)&&(r<=ZIP_LAST_ARG_REGNO))
+
+/* TARGET_SPLIT_COMPLEX_ARG(TYPE) ... This hook should return true if parameter
+ * of type TYPE are passed as two scalar parameters.  By default, GCC will
+ * attempt to pack complex arguments into the target's word size.  Some ABI's
+ * require complex arguments to be split and treated as their individual
+ * components.  
+ *
+ * The default value of this hook is NULL, which is treated as always false,
+ * and which should be good enough for ZipCPU--which can go either way.
+ */
+
+/* TARGET_BUILD_BUILTIN_VA_LIST ... This hook returns a type node for va_list
+ * for the target.  The default version of the hook returns void*.
+ *
+ */
+
+/* TARGET_ENUM_VA_LIST_P
+ */
+
+/* TARGET_FN_ABI_VA_LIST ... This hook returns the va_list type of the calling
+ * convention specified by FN.  The default version of this returns va_list_type_node.
+ */
+
+/* TARGET_FN_ABI_VA_LIST
+ */
+
+/* TARGET_CANONICAL_VA_LIST_TYPE
+ */
+
+/* TARGET_GIMPLIFY_VA_ARG_EXPR
+ */
+
+/* TARGET_VALID_POINTER_MODE(MODE) ... Define this to return nonzero if the
+ * port can handle pointers with machine mode MODE.  The default version of this
+ * hook returns true for both ptr_mode and Pmode.
+ *
+ * ZipCPU---if Pmode is properly defined (above, and I think it is), then the
+ * default behavior is quite appropriate.
+ */
+
+/* TARGET_REF_MAY_ALIAS_ERRNO(REFP) ... Define this to return nonzero if the
+ * memory reference REF may alias with the system C library errno location.
+ * The default version of this hook assumes the system C library errno location
+ * is either a declaration of type int or accessed by dereferencing a pointer
+ * to int.
+ *
+ * ZipCPU --- Default sounds good to me.
+ */
+
+
+/* TARGET_SCALAR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if
+ * the port is prepared to handle instructions involving scalar mode MODE.  For
+ * a scalar mode to be considered supported, all the basic arithmetic and
+ * comparisons must work.
+ *
+ * The default version of this hook returns true for any mode required to
+ * handle the basic C types (as defined by the port).  Included here are the
+ * double-word arithmetic supported by the code in optabs.c.
+ *
+ * ZipCPU --- This controls whether a data type of the given mode can even be
+ * declared in C/C++.  Without support for such a mode, you can't even declare
+ * a data type of this type.  Hence, we should support SFmode and DFmode, even
+ * though the hardware *may* support SFmode, and it will *never* support DFmode.
+ */
+#undef	TARGET_SCALAR_MODE_SUPPORTED_P
+#define	TARGET_SCALAR_MODE_SUPPORTED_P	zip_scalar_mode_supported_p
+
+/* TARGET_VECTOR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if the
+ * port is prepared to handle instructions involving vector mode MODE.  At the
+ * very least, it must have move patterns for this mode.
+ *
+ * ZipCPU---does not support any vector modes.
+ */
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define	TARGET_VECTOR_MODE_SUPPORTED_P	hook_bool_mode_false
+
+/* TARGET_ARRAY_MODE_SUPPORTED_P(MODE, NELEMS) ... Return true if GCC should 
+ * try to use a scalar mode to store an array of NELEMS elements, given that
+ * each element has mode MODE.  Returning true here overrides the usual
+ * MAX_FIXED_MODE limit and allows GCC to use any defined integer mode.
+ *
+ * ZipCPU---Sounds good.
+ */
+// #undef	TARGET_ARRAY_MODE_SUPPORTED_P
+// #define	TARGET_ARRAY_MODE_SUPPORTED_P	zip_array_mode_supported_p
+
+/* TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P(MODE) ... Define this to return
+ * nonzero if libgcc provides support for the floating-point mode MODE, which is
+ * known to pass TARGET_SCALAR_MODE_SUPPORTED_P.  The default version of this
+ * hook returns true for all of SFmode, DFmode, XFmode, and TFmode, if such
+ * modes exist.
+ *
+ * ZipCPU---We only support SFmode and DFmode, but for now only in emulation
+ * (if we can).  Let's allow both of those and see how far we get.
+ */
+#undef	TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P
+#define	TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P	zip_libgcc_floating_mode_supported_p
+
+/* TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P(MODE) ... Define this to return
+ * nonzero for machine modes for which the port has small register classes.  If
+ * target hook returns nonzero for a given MODE, the compiler will try to 
+ * minimize the lifetime of registers in MODE.  The hook may be called with
+ * VOIDmode as an argument.  In this case, the hook is expected to return 
+ * nonzero if it returns nonzero for any mode.
+ *
+ * The default version of this hook returns false for any mode.
+ *
+ * ZipCPU---Default sounds good.
+ */
+
+/* 17.09.08 How scalar function values are returned */
+
+/* TARGET_FUNCTION_VALUE
+ */
+
+/* LIBCALL_VALUE
+ */
+
+
+/* 17.09.09 How large values are returned */
+
+/* TARGET_RETURN_IN_MEMORY(TYP,FNTYP) ... This target hook should return a
+ * nonzero value to say to return the function value in memory, just as large
+ * structures are always returned.  Here type will be the data type of the value
+ * and FNTYP will be the type of the function doing the returning, or NULL
+ * for libcalls.
+ *
+ */
+#undef	TARGET_RETURN_IN_MEMORY
+#define	TARGET_RETURN_IN_MEMORY	zip_return_in_memory
+
+/* DEFAULT_PCC_STRUCT_RETURN
+ * TARGET_STRUCT_VALUE_RTX
+ * PCC_STATIC_STRUCT_RETURN
+ * TARGET_GET_RAW_RESULT_MODE
+ * TARGET_GET_RAW_ARG_MODE
+ */
+
+
+/* 17.09.10 Caller-Saves Register Allocation */
+/* 17.09.11 Function Entry and Exit */
+// TARGET_ASM_FUNCTION_PROLOGUE
+// TARGET_ASM_FUNCTION_END_PROLOGUE
+// TARGET_ASM_FUNCCTION_BEGIN_EPILOGUE
+// TARGET_ASM_FUNCTION_EPILOGUE
+/* EXIT_IGNORE_STACK ... Define this macro as a C expression that is nonzero
+ * if the return instruction or the function epilogue ignores the value of the
+ * stack pointer; in other words, if it is safe to delete an instruction to 
+ * adjust the stack pointer before a return from the function.
+ *
+ * The default is 0.
+ *
+ * Note that this macro's value is relevant only for functions for which frame
+ * pointers are maintained.  It is never safe to delete a final stack adjustment
+ * in a function that has no frame pointer, and the compiler knows this
+ * regardless of EXIT_IGNORE_STACK.
+ *
+ * ZipCPU -- Thanks to the example of the m68k, and a careful selection of what
+ * our options otherwise could have been, our epilogue code does not use the
+ * stack register at all, but rather starts by moving the frame register into
+ * the stack register.
+ */
+#define EXIT_IGNORE_STACK	1
+// EPILOGUE_USES(regno)
+// EH_USES(regno)
+// TARGET_ASM_OUTPUT_MI_THUNK
+// TARGET_ASM_CAN_OUTPUT_MI_THUNK
+
+/* 17.09.12 Generating code for profiling */
+// FUNCTION_PROFILER
+// PROFILE_HOOK
+// NO_PROFILE_COUNTERS
+// PROFILE_BEFORE_PROLOGUE
+// TARGET_KEEP_LEAF_WHEN_PROFILED
+
+/* 17.09.13 Permitting tail calls*/
+
+/* TARGET_FUNCTION_OK_FOR_SIBCALL(DECL,EXP) ... True if it is OK to do sibling
+ * call optimizations for the specified call expression EXP.  DECL will be the
+ * called function, or NULL if this is an indirect call.
+ *
+ * It is not uncommon for limitations of calling conventions to prevent tail
+ * calls to functions outside the current unit of translation, or during PIC
+ * compilation.  The hook is used to enforce these restrictions, as the sibcall
+ * md pattern can not fail, or fall over to a 'normal' call.  The criteria for
+ * successful sibling call optimization may vary greatly between different
+ * architectures.
+ *
+ * What's a sibling call?  "Sibling calls or tail calls terminate the function
+ * in a nonn-standard way and thus an edge to the exit must be present.  
+ * EDGE_SIBCALL and EDGE_ABNORMAL are set in such case(s).  These edges only
+ * exist in the RTL representation.
+ *
+ * So, basically, a sibling call is a function call at the end of one function.
+ * Rather than setting up a new stack frame, return address, etc, it is 
+ * possible to just jump to this new function, leaving the return address for
+ * the prior function as the (now) return address for this one.
+ *
+ * ZipCPU --- These are good things.  We wish to support them.  We will require,
+ * though, that the sibling require no more stack space than the original.
+ * We might go even stricter, requiring that the sibling require no stack space
+ * at all--and only register variables.
+ */
+#define	TARGET_FUNCTION_OK_FOR_SIBCALL	zip_function_ok_for_sibcall
+
+/* TARGET_EXTRA_LIVE_ON_ENTRY(REGS) ... Add any hard registers to regs that are
+ * live on entry to the function.  This hook only needs to be defined to provide
+ * registers that cannot be found by examination of FUNTION_ARG_REGNO_P, the
+ * ... and the ...
+ *
+ * ZipCPU -- the default should be good enough for us.
+ */
+/* TARGET_SET_UP_BY_PROLOGUE(CONTAINER) ... This hook should add additional
+ * registers that are computed by the prologue to the hard register set for
+ * shrink-wrapping optimization purposes.
+ *
+ * ??
+ */
+
+/* TARGET_WARN_FUNC_RETURN(TREE) ... True if a function's return statements
+ * should be checked for matching the function's return type.  This includes
+ * checking for falling off the end of a non-void function.  Return false if
+ * no such check should be made.
+ *
+ * ZipCPU--the default should be good enough for us.
+ */
+
+/* 17.09.14 Stack smashing protection */
+// TARGET_STACK_PROTECT_GUARD
+// TARGET_STACK_PROTECT_FAIL
+// TARGET_SUPPORTS_SPLIT_STACK
+
+/* 17.09.15 Miscellaneous register hooks */
+
+// TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+
+/* TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+ * ZipCPU --- default is good enough for us.
+ */
+
+/* 17.10 Implementing VARARGS MACROS */
+
+/* ...
+ */
+
+/* void TARGET_SETUP_INCOMING_VARARGS(A,M,T,I,S) ... This target hook offers an
+ * alternative to using __builtin_saveregs and defining the hook TARGET_EXPAND..
+ * _BUILTIN_SAVEREGS.  Use it to store the anonymous register arguments into the
+ * stack so that all the arguments appear to have been passed consecutively
+ * on the stack.  Once this is done, you can use the standard implementation
+ * of varargs that works for machines that pass all their arguments on the
+ * stack.
+ */
+// #undef	TARGET_SETUP_INCOMING_VARARGS
+// #define	TARGET_SETUP_INCOMING_VARARGS	zip_setup_incoming_varargs
+
+/* ...
+ */
+
+/* 17.11 Trampolines for Nested Functions */
+
+/* TARGET_ASM_TRAMPOLINE_TEMPLATE ... This hook is called by
+ * assemble_trampoline_template to output, on the stream f, assembler code for
+ * a block of data that contains the constant parts of a trampoline.  This code
+ * should not include a label--the label is taken care of automatically.
+ *
+ * ZipCPU -- looks like we need to do this.
+ */
+#undef	TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define	TARGET_ASM_TRAMPOLINE_TEMPLATE	zip_asm_trampoline_template
+
+/* TRAMPOLINE_SECTION ... Return the section into which the trampoline template
+ * is to be placed.  The default value is readonly_data_section.
+ *
+ * ZipCPU--default should be good enough.
+ */
+
+/* TRAMPOLINE_SIZE ... A C expression for the size (in bytes) of the trampoline
+ * as an integer.
+ *
+ * ZipCPU--it's three instructions, or 96 bits: BREV, LDILO, and JMP
+ *
+ */
+// #warning "May need to redefine trampoline_size in words, not bytes"
+#undef	TRAMPOLINE_SIZE
+#define	TRAMPOLINE_SIZE	3*UNITS_PER_WORD
+
+/* TRAMPOLINE_ALIGNMENT ... alignment required for trampolines, in bits.
+ *
+ * Well that's well known in ZipCPU --- 32-bits.
+ */
+#undef	TRAMPOLINE_ALIGNMENT
+#define	TRAMPOLINE_ALIGNMENT	UNITS_PER_WORD
+
+/* void TARGET_TRAMPOLINE_INIT(RTX,TREE,RTX CH) ... This hook is called to
+ * initialize a trampoline.  m_tramp is an RTX for the memory block for the
+ * trampoline; TREE is the FUNCTION_DECL for the nested fucntion;  CH is an
+ * rtx for the static chain value that should be passed to the function when
+ * it is called.
+ *
+ * ZipCPU ... Can we get by without this?
+ */
+#undef	TARGET_TRAMPOLINE_INIT
+#define	TARGET_TRAMPOLINE_INIT	zip_trampoline_init
+
+/* TARGET_TRAMPOLINE_ADJUST_ADDRESS(RTX) ... This hook should perform any
+ * machine-specific adjustment in the address of the trampoline.  Its argument
+ * contains the address of the memory block that was passed to
+ * TARGET_TRAMPOLINE_INIT.  In case the address to be used for a function call
+ * should be different from the address at which the template was stored, the
+ * different address should be returned; otherwise addr should be returned 
+ * unchanged.  If the hook is not defined, RTX (addr) will be used for function
+ * calls.
+ * 
+ * ZipCPU--works for us!
+ */
+
+/* CLEAR_INSN_CACHE(BEG,END) ... If defined, expands to a C expression clearing
+ * the instruction cache in the specified interval.  The definition of this
+ * macro would typically be a series of asm statements.   Both BEG and END are
+ * pointer expressions.
+ *
+ * ZipCPU --- Ouch!  We have no way to do this (yet)!
+ */
+#define	CLEAR_INSN_CACHE(BEG,END)	gcc_assert(0);
+
+/* TRANSFER_FROM_TRAMPOLINE ... Define this macro is trampolines need a special
+ * subroutine to do their work.  The macro should expand to a series of asm
+ * statements which will be compiled with GCC.  They go in a library function
+ * named __transfer_from_trampoline.
+ *
+ * We may need to rethink trampolines on ZipCPU.
+ */
+
+
+/* 17.12 Implicit Calls to Library Routines */
+
+/* DECLARE_LIBRARY_RENAMES
+ *
+ * ZipCPU: Don't need it.
+ */
+
+/* TARGET_INIT_LIBFUNCS(VOID) ... This hook should declare additional library
+ * routines or rename existing ones, using the functions set_optab_libfunc and
+ * init_one_libfunc defined in optabs.c.  init_optabs calls this macro after
+ * initializing all the normal library routines.
+ *
+ * Most ports don't need to define this hook, so we won't either.
+ */
+
+/* TARGET_LIBFUNC_GNU_PREFIX ... If false (the default), internal library
+ * routines start with two underscores.  If set to true, these routines start
+ * with __gnu_ instead.
+ *
+ * ZipCPU: No change necessary.
+ */
+
+/* FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE,COMPARISON) ... This macro should return
+ * true if the library routine that implements the floating point comparison
+ * operator comparison in mode mode will return a boolean and false if it will
+ * return a tristate.  
+ *
+ * Most ports don't need to define this macro, so Zip CPU won't either.
+ */
+
+/* TARGET_HAS_NO_HW_DIVIDE ... This macro should be defined if the target has no
+ * hardware divide instructions.  If this macro is defined, GCC will use an
+ * algorithm which makes use of simple logical and arithmetic operations for
+ * 64-bit division.  If the macro is not defined, GCC will use an algorithm
+ * which makes use of a 64-bit by 32-bit divide primitive.
+ *
+ * Zip CPU, though, doesn't have the 64-bit by 32-bit divide primitive, thus
+ * we have no HW DIVIDE (for now).
+ */
+#define	TARGET_HAS_NO_HW_DIVIDE
+
+/* TARGET_EDOM ... The value of EDOM on the target machine, as a C integer 
+ * expression.  If you don't define this macro, GCC does not attempt to deposit
+ * the value of EDOM into errno directly.  Look in /usr/include/errno.h to find
+ * the value of EDOM on your system.
+ *
+ * EDOM is the error created when a math argument is out of the domain of the
+ * function.
+ *
+ * ZipCPU: Don't need it---I don't think.
+ */
+
+/* GEN_ERRNO_RTX ... Define this macro as a C exrpession to create an rtl
+ * expression that refers to the global "variable" errno.  (On certain
+ * systems, errno may not actually be a variable.)  If you don't define this
+ * macro, a reasonable default is used.
+ *
+ * ZipCPU --- if a reasonable default is used, we'll use that--especially since
+ * I doubt we'll be using errno for a while.
+ */
+
+/* NEXT_OBJC_RUNTIME ... Set this macro to 1 to use the "NeXT" Objective-C
+ * message sending conventions by default.  This calling convention involves
+ * passing the object, the selector and the method arguments all at once to the
+ * method-lookup library function.  This is the usual setting when targetting
+ * Darwin/Mac OS X systems, which have the NeXT runtime installed.
+ *
+ * If the macro is set to 0, ...
+ * 
+ * Doesn't look relevant (yet) for the Zip CPU--especially since we don't have
+ * an O/S yet.
+ */
+
+
+
+/* 17.13 Addressing Modes */
+
+/* C expressions that are nonzero if the machine supports pre-increment,
+ * pre-decrement, post-increment, or post-decrement addressing respectively.
+ */
+#define	HAVE_PRE_INCREMENT	(0)
+#define	HAVE_PRE_DECREMENT	(0)
+#define	HAVE_POST_INCREMENT	(0)
+#define	HAVE_POST_DECREMENT	(0)
+
+/* C expression that is nonzero if the machine supports pre- or post- address
+ * side-effect generation involving constants other than the size of the memory
+ * operand.
+ */
+#define	HAVE_PRE_MODIFY_DISP	(0)
+#define	HAVE_POST_MODIFY_DISP	(0)
+
+/* C expression that is non-zero if the machine supports pre- or post-address
+ * side-effect generation involving a register displacement.
+ */
+#define	HAVE_PRE_MODIFY_REG	(0)
+#define	HAVE_POST_MODIFY_REG	(0)
+
+/* CONSTANT_ADDRESS_P(X) ... A C expression that is 1 if the RTX X is a constant
+ * which is a valid address.  On most machines the default definition ... is
+ * acceptable, but a few machines are more restrictive as to which constant
+ * addresses are supported.
+ *
+ * Zip CPU is designed for offset addresses, not constant addresses.  Although
+ * the CPU will support 18-bit signed constant addresses, the assembler and
+ * general programming model do not.  Further, without knowing where the final
+ * address will be located, this is an unusable model.  Therefore we will
+ * define this as not supported.
+ *
+ * In hindsight, this isn't true--labels and symbols are valid addresses, and
+ * they are also constant addresses.  Hence, we leave this at its default.
+ */
+// #undef	CONSTANT_ADDRESS_P
+// #define	CONSTANT_ADDRESS_P(X)	(0)
+
+/* CONSTANT_P(X) ... CONSTANT_P, which is defined by target-independent code,
+ * accepts integer values expressions whose values are not explicitly known,
+ * such as symbol_ref, label_ref, and high expressions and const arithmetic
+ * expressions, in addition to const_int and const_double expressions.
+ *
+ * Huh???
+ */
+// #define CONSTANT_P(X) ???
+
+/* MAX_REGS_PER_ADDRESS ... A number, the maximum number of registers that can
+ * appear in a valid memory address.  Note that it is up to you to specify a 
+ * value equal to the maximum number that TARGET_LEGITIMATE_ADDRESS_P would
+ * ever accept.
+ */
+#define	MAX_REGS_PER_ADDRESS	1
+
+/* TARGET_LEGITIMATE_ADDRESS_P(MODE,RTX,STRICT) ... A function that returns
+ * whether RTX is a legitimate memory address on the target machine for a 
+ * memory operation of mode MODE.
+ */
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P	zip_legitimate_address_p
+
+/* TARGET_MEM_CONSTRAINT ... A single character to be used instead of the
+ * default 'm' character for general memory addresses.  This defines the
+ * constraint letter which matches the memory addresses accepted by
+ * TARGET_LEGITIMATE_ADDRESS_P.  Define this macro if you want to support new
+ * address format in your back end without changing the semantics of the 'm'
+ * constraint.  This is necessary in order to preserve functionality of inline
+ * assembly constructs using the 'm' constraint.
+ *
+ * ZipCPU--doesn't look like we need to define this at all.
+ */
+
+/* FIND_BASE_TERM(X) ... A C expression to determine the base term of address
+ * X or to provide a simplified version of X from which alias.c can easily find
+ * the base term.  This macro is used in only two places: find_base_value and
+ * find_base_term in alias.c.
+ *
+ * It is always safe for this macro  to not be defined.  It exists so that
+ * alias analysis can understand machine-dependent addresses.
+ *
+ * ZipCPU: We'll skip this then.
+ */
+
+/* TARGET_LEGITIMIZE_ADDRESS(RTX,OLD,MODE) ... This hook is given an invalid
+ * memory address RTX for an operand of mode MODE and should try to return a 
+ * valid memory address.  RTX will always be the result of a call to 
+ * break_out_memory_refs, and OLD will be the operand that was given to that
+ * function to produce RTX.
+ *
+ * ZipCPU -- 
+ */
+#undef	TARGET_LEGITIMIZE_ADDRESS
+#define	TARGET_LEGITIMIZE_ADDRESS	zip_legitimize_address
+
+/* LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OP,TYPE,IND,WIN) ... A C compound statement
+ * that attempts to replace X, which is an address that needs reloading, with
+ * a valid memory address for an operand of mode MODE.  WIN will be a C
+ * statement label elsewhere in the code.  It is not necessary to define this
+ * macro, but it might be useful for performance reasons.
+ *
+ * ZipCPU: This is worth coming back to, according to the notes page, but it
+ * may also be a difficult macro to use.  Look at other implementations before
+ * we dive into this.
+ */
+// #undef LEGITIMIZE_RELOAD_ADDRESS
+// #define LEGITIMIZE_RELOAD_ADDRESS
+
+/* TARGET_MODE_DEPENDENT_ADDRESS_P(ADDR,SPACE) ... This hook returns true
+ * if memory address addr in address space addrspace can have different meanings
+ * depending on the machine mode of the memory reference it is used for or if
+ * the address is valid for some modes but not others.
+ */
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#define	TARGET_MODE_DEPENDENT_ADDRESS_P		zip_mode_dependent_address_p
+
+/* TARGET_LEGITIMATE_CONSTANT_P(MODE,RTX) ... This hook returns true if x is a
+ * legitimate constant for a MODE-mode immediate operand on the target machine.
+ * You can assume the RTX satisfies CONSTANT_P, so you need not check this.
+ * 
+ * The default definition returns true.
+ */
+
+/* TARGET_DELIGITIMIZE_ADDRESS(RTX)
+ */
+
+/* TARGET_CONST_NOT_OK_FOR_DEBUG_P(RTX) ... This hook should return true if RTX
+ * should not be emitted into debug sections.
+ */
+
+/* TARGET_CANNOT_FORCE_CONST_MEM(MODE,RTX) ... This hook should return true if
+ * RTX is a form that cannot (or should not) be spilled to the constant pool.
+ * MODE is the mode of X.  The default version returns false.
+ */
+// #define	TARGET_CANNOT_FORCE_CONST_MEM	hook_bool_mode_rtx_false
+
+/* TARGET_USE_BLOCKS_FOR_CONSTANT_P(MODE,RTX) ... This hook should return true
+ * if pool entries for constant RTX can be placed in an object_block structure.
+ * MODE is the mode of X.  The default version returns false for all constants.
+ *
+ *????
+ */
+// #warning "Huh?"
+
+/* TARGET_USE_BLOCKS_FOR_DECL_P(DECL) ... This hook should return true if pool
+ * entries for DECL should be placed in an object_block structure.  The default
+ * version returns true for all DECL's.
+ *
+ * Sounds good.
+ */
+
+/* TARGET_BUILTIN_RECIPROCAL(TREE) ... This hook should return the DECL of a
+ * function that implements the reciprocal of the machine specific builtin
+ * function fndecl, or NULL_TREE if such a function is not available.
+ */
+
+/* TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD(VOID) ... This hook should return the
+ * DECL of a function f that given an address addr as an argument returns a mask
+ * m that can be used to extrract from two vectors the relevant data that
+ * resides in addr in case addr is not properly aligned.
+ *
+ * Zip CPU does not support vectorization.
+ */
+
+/* Other vector, SIMD, and GOACC macros skipped as Zip CPU doesn't support
+ * such data accesses and manipulation.
+ */
+
+/* 17.14 Anchored Addresses */
+
+/* TARGET_MIN_ANCHOR_OFFSET ... The minimum offset that should be applied to
+ * a section anchor.  On most targets, it should be the smallest offset that
+ * can be applied to a base register while still giving a legitimate address for
+ * every mode.  The default value is 0.
+ *
+ * On the Zip CPU, this is the minimum operand B offset to a LW or SW
+ * operation, which would be a signed 14 bit number.
+ */
+#undef	TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET	zip_min_anchor_offset
+
+/* TARGET_MAX_ANCHOR_OFFSET ... Like TARGET_MIN_ANCHOR_OFFSET, but the maximum
+ * (inclusive) offset that should be applied to section anchors.  The default
+ * value is 0.
+ */
+#undef	TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET	zip_max_anchor_offset
+
+/* TARGET_ASM_OUTPUT_ANCHOR(RTX) ... Write the assembly code to define section
+ * anchor RTX, which is a SYMBOL_REF for which 'SYMBOL_REF_ANCHOR_P(RTL) is
+ * true.  The hook is called with the assembly output position set to the
+ * beginning of SYMBOL_REF_BLOCK(X).
+ *
+ * If ASM_OUTPUT_DEF is available, the hook's default definition uses it to
+ * define the symbol as '. + SYMBOL_REF_BLOCK_OFFSET(RTL)'.  If ASM_OUTPUT_DEF
+ * is not available, the hook's default definition is NULL, which disables the
+ * use of section anchors altogether.
+ *
+ * Section anchors would be very valuable in Zip CPU assembly, therefore we
+ * must define this hook.  However ... no one else seems to ever define these
+ * hooks, so I really dont have much of an example to work with
+ */
+// #warning "Come back to this"
+// #undef	TARGET_ASM_OUTPUT_ANCHOR
+// #define	TARGET_ASM_OUTPUT_ANCHOR	zip_asm_output_anchor
+
+/* TARGET_USE_ANCHORS_FOR_SYMBOL_P(RTX) ... Return true if GCC should attempt
+ * to use anchors to access SYMBOL_REF X.  You can assume
+ * SYMBOL_REF_HAS_BLOCK_INFO_P(X) and !SYMBOL_REF_ANCHOR_P(X).
+ *
+ * The default version is correct for most targets, but you might need to
+ * intercept this hook to handle things like target specific attributes or
+ * target-specific sections.
+ *
+ * Not knowing anything more, we'll leave the default as is for the Zip CPU.
+ */
+// #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
+// #define TARGET_USE_ANCHORS_FOR_SYMBOL_P	zip_use_anchors_for_symbol_p
+
+/* 17.15 Condition Code Status */
+
+/* 17.15.1 Representation of condition codes using (cc0) --- that's us */
+
+/* CC_STATUS_MDEP ... C code for a data type which is used for declaring
+ * the mdep component of cc_status.  It defaults to int.
+ *
+ * ZipCPU---Int is good for us.
+ */
+
+/* CC_STATUS_MDEP_INIT ... A C expression to initialize the mdep field to
+ * "empty".  The default definition does nothing, since most machines don't
+ * use the field anyway.  If you want to use the field, you should probably
+ * define  this macro to initialize it.
+ */
+
+/* NOTICE_UPDATE_CC(EXP, INSN) ... A C compound statement to set the components
+ * of cc_status appropriately for an insn insn whose body is exp.  It is this
+ * macro's responsibility to recognize insns that set the condition code as
+ * a byproduct of other activity as well as those that explicitly set (cc0).
+ *
+ * ZipCPU --- We need this, as not all expressions set (cc0).
+ *
+ * In hind sight, this is the *old* and unsupported way of doing business within
+ * GCC.  To switch to the new way, all instruction definitions within our .md
+ * file have been adjusted to either change or clobber the CC register.
+ *
+ */
+#ifdef	HAVE_cc0
+// #define	NOTICE_UPDATE_CC(EXP, INSN)	zip_update_cc_notice(EXP, INSN)
+#error "ZipCPU no longer has CC0"
+#endif
+
+
+/* 17.15.2 Representation of condition codes using registers */
+/* ... which the ZipCPU doesn't have.  The ZipCPU has a CC0 register, and hence
+ * this section isn't supposed to apply.
+ */
+
+/* SELECT_CC_MODE(op, x, y) ... On many machines, the condition code may be
+ * produced by other instructions than compares, for example the branch can use
+ * directyl the condition code set by a subtract instruction.  However, on some
+ * machines when the condition code is set this way some bits (such as the 
+ * overflow bit) are not set in the same way as a test instruction, so that a
+ * different branch instruction must be used for some conditional branches.
+ * When this happens, use the machinemode of the condition code register to
+ * record different formats of the condition code register.  Modes can also be
+ * used to reccord which compare instruction (e.g. a signed or an unsigned
+ * comparison) produced the condition codes.  
+ *
+ * If other modes than CCmode are required, add them to 'machine-modes.def' and
+ * define SELECT_CC_MODE to choose a mode given an operand of a compare.  This
+ * is needed because the modes have to be chosen not only during RTL generation
+ * but also, for example, by instruction combination.  The result of 
+ * SELECT_CC_MODE should be consistent with the mode used in the patterns; ...
+ *
+ * ZipCPU ... We have only one CC Mode, so we'll use the CCmode defined in
+ * machine-modes.def and should be fine with it.  Hence, this doesn't need
+ * to be defined.
+ */
+
+/* TARGET_CANONICALIZE_COMPARISON(int,rtx *, rtx *, bool) ... On some machines
+ * (such as the ZipCPU) not all possible comparisons are defined, but you can
+ * convert an invalid comparison into a valid one.  For example, the Alpha
+ * does not have a GT comparison, but you can use an LT comparison instead and
+ * swap the order of the operands.
+ *
+ * On such machines, implement this hook to do any required conversions:  code
+ * is the initial comparison code and op0 and op1 are the left and right
+ * operands of the comparison, respectively.  If op0_preserve_value is true the
+ * implementation is not allowed to change the value of op0 since the value 
+ * might be used in RTXs which aren't comparisons.  E.g. the implementation is
+ * not allowed to swap operands in that case.
+ *
+ * GCC will not assume that the comparison resulting from this macro is valid
+ * but will see if the resulting insn matches a pattern in the 'md' file.
+ *
+ * You need not implement this hook if it would never change the comparison
+ * code or operands.
+ *
+ * In the case of the ZipCPU, the ZipCPU only keeps track of 8 possible
+ * comparisons, and bastardizing other comparisons into those 8 is extremely
+ * painful.  Therefore, we *need* this capability to make certain we can use
+ * our comparisons successfully.
+ *
+ * The only problem is ... this hook appears to only be called on non-CC0
+ * machines.  Hence, defining it hasn't done anything for us.
+ */
+#define	TARGET_CANONICALIZE_COMPARISON	zip_canonicalize_comparison
+
+/* REVERSIBLE_CC_MODE(MODE) ... A C expression whose value is one if it is 
+ * always safe to reverse a comparison whose mode is MODE.  If SELECT_CC_MODE
+ * can ever return MODE for a floating-point inequality comparison, than 
+ * REVERSIBLE_CC_MODE(MODE) must be zero.
+ *
+ * You need not define this macro if it would always return zero or if the
+ * floating-point format is anything other than IEEE_FLOAT_FORMAT.  For example,
+ * here ...
+ *
+ * ZipCPU -- We'll always return zero, so this need not be defined.
+ */
+
+/* REVERSE_CONDITION(CODE,MODE) ... A C expression whose value is reversed
+ * condition code of thecode for comparison done in CC_MODE MODE.  This macro
+ * is used only in case REVERSIBLE_CC_MODE(MODE) is nonzero. ...
+ *
+ * ZipCPU ... Since REVERSIBLE_CC_MODE(MODE) will always be zero, we'll leave
+ * this undefined.
+ */
+
+/* bool TARGET_FIXED_CONDITION_CODE_REGS(int *, int *) ... On targets which do
+ * not use (cc0), and which use a hard register rather than a pseudo-register
+ * to hold condition codes, the regular CSE passes are often not able to 
+ * identify cases in which the hard register is set to a common value.  Use this
+ * hook to enable a small pass which optimizes such cases.  This hook should 
+ * return true to enable this pass, and it should set the integers to which its
+ * arguments point to the hard register numbers used for condition codes.  When
+ * there is only one such register, as is true on most systems, the integer
+ * pointed to by p2 should  be set to INVALID_REGNUM.
+ *
+ * The default version of this hook returns false.
+ *
+ * ZipCPU --- I like the idea of enabling optimizations.  Let's return
+ * something other than false.
+ */
+#define	TARGET_FIXED_CONDITION_CODE_REGS	zip_fixed_condition_code_regs
+
+/* machine_mode TARGET_CC_MODES_COMPATIBLE(M1,M2) .. On targets which use
+ * multiple condition code modes in class MODE_CC, it is sometimes the case
+ * that a comparison can be validly done in more than one mode.  On such a 
+ * system, define this target hook to take two mode arguments and to return a 
+ * mode in which both comparisons may be validly done.  If there is no such
+ * mode, return VOIDmode.
+ *
+ * The default version of this hook checks whether the modes are the same.  If
+ * they are, it returns that mode.  If they are different, it returns VOIDmode.
+ *
+ * ZipCPU--Given that we only have the one CCmode, the default definition works
+ * well enough for us.
+ */
+
+/* unsigned int TARGET_FLAGS_REGNUM ... If the target has a dedicated flags
+ * register, and it needs to use the post-reload comparison elimination pass,
+ * then this value should be set appropriately.
+ *
+ * ZipCPU---Looks like we can set this easily enough without any problems.
+ */
+#undef	TARGET_FLAGS_REGNUM
+#define	TARGET_FLAGS_REGNUM	zip_CC
+
+/* 17.16 Relative costs of operations */
+
+
+// #define	REGISTER_MOVE_COST(MODE,FROM,TO)	((MODE==DImode)||(MODE==DFmode))?4:2
+// #define	TARGET_REGISTER_MOVE_COST
+// #define	MEMORY_MOVE_COST(MODE, CLASS, IN)	((MODE==DImode)||(MODE==DFmode))?8:7
+/* TARGET_REGISTER_MOVE_COST(M,FRM,TO) ... This target hook should return the
+ * cost of moving data of mode M from a register in class FRM to one in class
+ * TO.  The classes are expressed using the enumeration values such as 
+ * GENERAL_REGS.  A value of 2 is the default; other values are interpreted
+ * relative to that. 
+ *
+ * It is not required that the cost always equal 2 when FROM is the same as TO;
+ * on some machines it is expensive to move between registers if they are not
+ * general registers.
+ *
+ * If reload sees ...
+ *
+ * ZipCPU ... We can leave this at its default value of 2.
+ */
+
+/* TARGET_MEMORY_MOVE_COST(MOD,CL,IN) ... This target hook should return the 
+ * cost of moving data of mode MOD between a register of class CL and memory.
+ * IN is false if the value is to be written to memory, true if it is to be
+ * read in.  This cost is relative to those in TARGET_REGISTER_MOVE_COST.
+ * If moving between registers and memory is more expensive that between two
+ * registers, you should add this target hook to express the relative cost.
+ *
+ * If you do not add this target hook, GCC uses a default cost of 4 plus the
+ * cost of copying via a secondary reload register, if one is needed.  If your
+ * machine requires a secondary reload register to copy between memory and a
+ * register of CL but the reload mechanism is more complex than copying via
+ * an intermediate, use this target hook to reflect the actual cost of the
+ * move.
+ *
+ * ZipCPU --- Memory moves are more expensive than twice the cost of register
+ * moves, so let's make certain this is defined.
+ */
+#define	TARGET_MEMORY_MOVE_COST	zip_memory_move_cost
+
+// #warning "This needs to be double checked, and annotated"
+#define	BRANCH_COST(SPEED,PREDICTABLE)		((PREDICTABLE)?2:5)
+
+/* Define this macro as a C expression which is nonzero if accessing less than 
+ * a word of memory (i.e. a 'char' or a 'short') is no faster than accessing
+ * a word of memory.
+ */
+#define	SLOW_BYTE_ACCESS	1
+
+/* MOVE_RATIO(SPD) ... The threshold of number of scalar memory-to-memory move
+ * instructions, below which a sequence of instructions should be generated 
+ * instead of a string move instruction or a library call.  Increasing the
+ * value will always make code faster, but eventually incurs high cost in
+ * increased code size.
+ */
+#define	MOVE_RATIO(SPD)	5
+
+/* TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(SZ,ALGN,OP,SPD) ...
+ */
+// #undef	TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD)
+// #define	TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD)// needs hook
+
+/* CLEAR_RATIO(SPD) ... The threshold number of scalar move instructions, below
+ * which a sequence of instructions should be generated to clear memory instead
+ * of a string clear instruction or a library call.  Increasing the value will
+ * always make the code faster, but eventually incurs high cost in increased
+ * code size.
+ */
+#define	CLEAR_RATIO(SPD)	MOVE_RATIO(SPD)
+
+/* NO_FUNCTION_CSE ... Define this macro to be true if it is as good or better
+ * to call a constant function address than to call an address kept in a 
+ * register.
+ * 
+ * On the Zip CPU, constant function addresses--especially relative ones,
+ * can be optimized into a single cycle delay.  Register jumps will always
+ * stall the whole (5-stage) pipeline.
+ */
+#define	NO_FUNCTION_CSE	true
+
+/* TARGET_RTX_COSTS(X,CODE,OUTER,OPNO,TOTAL,SPD) ... This target hook describes
+ * the relative costs of RTL expressions.
+ *
+ * The cost may depend on the precise form of the expression, which is avaialble
+ * for examination in X, and the fact that X appears as operand OPNO of an 
+ * expression with rtx code OUTER.  That is, the hook can assume that there is
+ * some RTX Y such that GET_CODE(Y)==OUTER and such that either (a) XEXP(Y,OPNO)
+ * == X or (b) XVEC(Y,OPNO) contains X.
+ *
+ * ...
+ * The hook returns true when all subexpressions of x have been processed and
+ * false when rtx_cost should recurse.
+ */
+
+/* TARGET_ADDRESS_COST(ADDR,MODE,AS, SPD) ... This hook computes the cost of an
+ * addressing mode that contains ADDR.  If not defined, the cost is computed
+ * from the ADDR expression and the TARGET_RTX_COST hook.  In cases where more
+ * than one form of an address is known, the form with the lowest cost will be
+ * used.  If multiple forms have the same, lowest, cost, the one that is the
+ * most complex will be used.
+ *
+ * ZipCPU really has only one address cost, the only type of address it
+ * supports.  Sure, index addressing would cost us more, but we don't support
+ * that so ... I think we're okay defining this as a constant.  Indeed, the
+ * docs state that, "On RISC amchines, all instructions normally have the same
+ * length and execution time.  Hence all addresses will have equal costs."
+ */
+#undef TARGET_ADDRESS_COST
+#define	TARGET_ADDRESS_COST	zip_address_cost
+
+
+/* TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P ... This predicate controls the use 
+ * of the eager delay slot filler to disallow speculatively executed
+ * instructions being placed in delay slots.  Targets such as certain MIPS
+ * architectures posess both branches with and without delay slots.  As the
+ * eager delay slot filler can decrease performance, disabling it is beneficial
+ * when ordinary branches are available.  Use of delay slot branches filled
+ * using basic filler is often still desirable as the delay slot can hide a
+ * pipeline bubble.
+ */
+// How should Zip CPU define this--we have no delay slots.
+
+
+/* 17.17 Instruction Scheduler */
+
+#define	TARGET_SCHED_ISSUE_RATE	zip_sched_issue_rate
+
+/* 17.18 Dividing the Output into Sections */
+
+/* Switch to the text or data segment. */
+#define	TEXT_SECTION_ASM_OP	"\t.text"
+#define	DATA_SECTION_ASM_OP	"\t.data"
+
+// #undef	TARGET_LIBGCC_SDATA_SECTION
+// #define	TARGET_LIBGCC_SDATA_SECTION	".sdata"
+
+
+/* 17.19 Position Independent Code */
+
+#define	PIC_OFFSET_TABLE_REGNUM			zip_GOT
+#define	PIC_OFFSET_TABLE_REG_CALL_CLOBBERED	0
+// #define LEGITIMATE_PIC_OPERAND_P(X) should evaluate to X(GOT) only
+
+/* 17.20 Defining the Output Assembler Language */
+
+/* 17.20.2 Output of Data */
+
+/* These hooks (above) specify assembly directives for creating certain kinds
+ * of integer objects.  The TARGET_ASM_BYTE_OP directive creates a byte-sized
+ * object.  The TARGET_ASMALIGNED_HI_OP one creates an aligned two-byte object
+ * and so on.  Any of the hookd may be NULL, indicating that no suitable 
+ * directive is available.
+ *
+ * The compiler will print these strings at the start of a new line, followed
+ * immediately by the object's initial value.  In most cases, the string should
+ * contain a tab, a pseudo op, and then another tab.
+ */
+
+#undef	TARGET_ASM_ALIGNED_HI_OP
+#undef	TARGET_ASM_ALIGNED_SI_OP
+// #undef	TARGET_ASM_ALIGNED_DI_OP
+#define	TARGET_ASM_ALIGNED_HI_OP	".short"
+// The assembler is set up to call a 4-byte integer a long.  This definition of
+// a long isn't consistent with the compilers definition.  For this reason,
+// the ZipCPU backend for the GNU Assembler defines a long as a 64-bit number,
+// and an int as a 32-bit number.
+#define	TARGET_ASM_ALIGNED_SI_OP	".int"
+// #define	TARGET_ASM_ALIGNED_DI_OP	".long"	
+
+
+/* 17.20.4 Output and Generation of Labels */
+
+/* ASM_OUTPUT_LABEL
+ * ... A default definition of this macro is provided which is correct for 
+ * most systems.
+ */
+
+/* ASM_OUTPUT_FUNCTION_LABEL
+ * ... if not defined, then the function name is defined in the usual manner
+ * as a label.
+ */
+
+/* ASM_OUTPUT_INTERNAL_LABEL ... Identical to ASM_OUTPUT_LABEL, except that name
+ * is known to refer to a compiler-generated label.  The default definition
+ * uses assemble_name_raw, which is like assemble_name except that it is more
+ * efficient.
+ */
+
+/* SIZE_ASM_OP ... A C string containing the appropriate assembler directive
+ * to specify the size of a symbol, without any arguments.  ON systems that
+ * use ELF, the dfault is "\t.size\t"; on other systems, the default is not to
+ * define this macro.
+ *
+ * Define this amcro only if it is correct to use the default definitions of
+ * ASM_OUTPUT_SIZE_DERECTIVE and ASM_OUTPUT_MEASURED_SIZE for your system.
+ * If you need your own custom definitions of those macros, or if you do not
+ * need explicit symbol sizes at all, do not define this macro.
+ */
+
+/* ASM_OUTPUT_SIZE_DIRECTIVE
+ * ASM_OUTPUT_MEASURED_SIZE
+ */
+
+/* NO_DOLLAR_IN_LABEL ... Define this macro if the assembler does not accept
+ * the character '$' in label names.  By default constructors and destructors
+ * in G++ have "$" in the identifiers.  If this label is defined, '.' is
+ * used instead.
+ */
+
+/* NO_DOT_IN_LABEL ... Define this macro if the assembler does not accept the
+ * character '.' in label names.  By default constructors and destructors in
+ * G++ have names that use '.'.  If this macro is defined, these names are
+ * rewritten to avoid '.'.
+ */
+
+/* TYPE_ASM_OP ... A C string containing the appropriate assembler directive to
+ * specify the type of a symbol, without any arguments.  On systems that use
+ * ELF the default in config/elfos.h is "\t.type\t"; on other systems, the default is not to define this macro.
+ *
+ * Define this macro only if it is correct to use the default definition of
+ * ASM_OUTPUT_TYPE_DIRECTIVE forr your system.  If you need your own custom
+ * definition of this macr, or if you do not need explicit symbol types at all,
+ * do not define this macro.
+ */
+
+/* TYPE OPERAND_FMD ... A 
+ */
+
+/* ASM_OUTPUT_TYPE_DIRECTIVE
+ */
+
+/* ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) ... 
+ * if this macro is not defined, then the function name is defined in the usual
+ * manner as a label (by means of ASM_OUTPUT_FUNCTION_LABEL).
+ */
+
+/* ASM_DECLARE_FUNCTION_SIZE
+ * ASM_DECLARE_COLD_FUNCTION_NAME
+ * ASM_DECLARE_COLD_FUNCTION_SIZE
+ * ASM_DECLARE_OBJECT_NAME
+ * TARGET_ASM_DECLARE_CONSTANT_NAME
+ */
+/* ASM_DECLARE_REGISTER_GLOBAL(STREAM, DECL, REGNO, NAME) ... A C statement
+ * (sans semicolon) to output to the stdio stream STREAM any text necessary for
+ * claiming a register REGNO for a global variable DECL with name NAME.
+ *
+ * If you don't defin this macro, that is equivalent to dfining it to do
+ * nothing.
+ */
+
+/* ASM_FINISH_DECLARE_OBJECT
+ * TARGET_ASM_GLOBALIZE_LABEL
+ * TARGET_ASM_GLOBALIZE_DECL_NAME
+ * TARGET_ASM_ASSEMBLE_UNDEFINED_DECL
+ * ASM_WEAKEN_LABEL
+ * ASM_WEAKEN_DECL
+ * ASM_OUTPUT_WEAKREF
+ * SUPPORTS_WEAK
+ * TARGET_SUPPORTS_WEAK
+ * MAKE_DECL_ONE_ONLY
+ * SUPPORTS_ONE_ONLY
+ * TARGTE_ASM_ASSEMBLE_VISIBILITY
+ * TARGET_WEAK_NOT_IN_ARCHIVE_TOC
+ * ASM_OUTPUT_EXTERNAL
+ * TARGET_ASM_EXTERNAL_LIBCALL
+ * TARGET_ASM_MARK_DECLPRESERVED
+ * ASM_OUTPUT_LABELREF
+ * TARGET_MANGLE_ASSEMBLER_NAME
+ * ASM_OUTPUT_SYMBOL_REF
+ * ASM_OUTPUT_LABEL_REF
+ * TARGET_ASM_INTERNAL_LABEL
+ * ASM_OUTPUT_DEBUG_LABEL
+ * ASM_GENERATE_INTERNAL_LABEL
+ * ASM_FORMAT_PRIVATE_NAME
+ */
+
+/* ASM_OUTPUT_DEF ... A C statement to output to the stdio stream STREAM
+ * assembler code which defines (equates) the symbol NAME to have the value
+ * VALUE.
+ *
+ * ZipCPU---So many other things that we need depend upon this, that we need
+ * to implement a non-default version.
+ */
+#define	ASM_OUTPUT_DEF	zip_asm_output_def
+
+/* ASM_OUTPUT_DEF_FROM_DECLS
+ * TARGET_DEFERRED_OUTPUT_DEFS
+ * ASM_OUTPUT_WEAK_ALIAS
+ * OBJ_GEN_METHOD_LABEL
+ */
+
+
+/* 17.20.7 Output of Assembler Instructions */
+
+#define	REGISTER_NAMES {					\
+	"R0", "R1", "R2",  "R3",  "R4",  "R5", "R6", "R7",	\
+	"R8", "R9", "R10", "R11", "R12", "SP", "CC", "PC",	\
+	"uR0","uR1","uR2", "uR3", "uR4", "uR5","uR6","uR7",	\
+	"uR8","uR9","uR10","uR11","uR12","uSP","uCC","uPC",	\
+	"PSEUDO-AP" }
+
+/* REGISTER_PREFIX	(Undefined by default)
+ * LOCAL_LABEL_PREFIX	(Undefined by default)
+ * USER_LABEL_PREFIX	defaults to "*"
+ * IMMEDIATE_PREFIX	(Undefined by default)
+ *
+ * If defined, C string expressions to be used for the '%R', '%L', '%U', and
+ * '%I' options of asm_fprintf (see 'final.c').  These are useful when a single
+ * 'md' file must support multiple assembler formats.  In that case, the various
+ * 'tm.h' files can define these macros differently.
+ */ 
+// #define	USER_LABEL_PREFIX	"*"
+
+/* Defining memory operand address formats is in this section. */
+
+/* 17.20.10 Assembler Commands for Alignment */
+
+/* JUMP_ALIGN(label) ... The alignment (log base 2) to put in front of label,
+ * which is a common destination of jumps and has no fallthru incoming
+ * edge.  This macro need not be defined if you don't want any special alignment
+ * to be done at such a time.  Most machine descriptions do not currently define
+ * this macro.  
+ *
+ * ZipCPU---The assembler should automatically deal with label alignment, so
+ * let's not do anything about it here.
+ */
+
+/* TARGET_ASM_JUMP_ALIGN_MAX_SKIP
+ */
+
+/* LABEL_ALIGN_AFTER_BARRIER
+ * TARGET_ASM_LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP
+ */
+
+/* LOOP_ALIGN(label)
+ * TARGET_ASM_LOOP_ALIGN_MAX_SKIP
+ * LABEL_ALIGN
+ * TARGET_ASM_LABEL_ALIGN_MAX_SKIP
+ */
+
+/* ASM_OUTPUT_SKIP(STREAM, NBYTES) A C statement to output to the stdio
+ * stream an assembler instruction to advance the location counter by nbytes
+ * bytes.  
+ */
+
+/* TARGET_ASM_LABEL_ALIGN */
+/* Assembler Commands for Alignment */
+#define	ASM_OUTPUT_ALIGN(STREAM,POWER)	\
+	do { fprintf(STREAM, "\t.align\t%d\n", POWER); } while (0)
+
+
+/* 17.21 Controlling Debugging Information Format */
+/* 17.22 Cross Compilation and Floating Point */
+
+// REAL_VALUE_TYPE
+// REAL_VALUES_EQUAL
+// REAL_VALUES_LESS ... Tess whether x is less than y
+/* REAL_VALUE_FIX ... Truncates x to an unsigned integer, rouding toward zero.
+ * If x is negative, returns zero.
+ */
+// REAL_VALUE_ATOF
+// REAL_VALUE_NEGATIVE
+// REAL_VALUE_ISINF
+// REAL_VALUE_ISNAN
+/* REAL_ARITHMETIC(OUT,CODE,X,Y) ... (Macro) Calculates an arithmetic operation
+ * on two floating point values X and Y, storing the result in OUT (which must
+ * be a variable).
+ *
+ * The operation to be performed is specified by CODE.  Only the following
+ * codes are supported: PLUS_EXPR, MINUS_EXPR, MULT_EXPR, RDIV_EXPR, MAX_EXPR,
+ * MIN_EXPR.
+ *
+ * If REAL_ARITHMETIC is asked to evaluate division by zero and the target's
+ * floating point format cannot represent infinity, it will call abort().  
+ * Callers shoudl check for this situation first, using MODE_HAS_INFINITIES.
+ */
+/* REAL_VALUE_NEGATE(X) ... [Macro] Returns the negative of the floating point 
+ * value X.
+ */
+/* REAL_VALUE_ABS(X) ... [Macro] Returns the absolute value of X.
+ */
+/* 17.23 Mode switching instructions */
+/* 17.24 Defining target-specific uses of __attribute__ */
+#undef	TARGET_OPTION_OVERRIDE
+#define	TARGET_OPTION_OVERRIDE	zip_override_options
+
+/* 17.25 Emulating TLS */
+/* 17.26 Defining coprocessor specifics for MIPS targets*/
+
+ // ZipCPU isn't MIPS.
+
+/* 17.27 Parameters for Precompiled Header Validity Checking */
+/* 17.28 C++ ABI parameters */
+/* 17.29 Adding support for named address spaces */
+/* 17.30 Miscellaneous Parameters */
+
+/* HAS_LONG_COND_BRANCH ... Define this boolean macro to indicate whether or
+ * not your architecture has conditional branches that can span all of memory.
+ * It is used in conjunction with an optimization that partitions hot and
+ * cold basic blocks into separate sections of the executable.  If this macro
+ * is set to false, gcc will convert any conditional branches that attempt to
+ * cross between sections into unconditional branches or indirect jumps.
+ *
+ * ZipCPU --- The assembler renders long unconditional branch code without
+ * problems, so we can pretend that such long branches exist.
+ */
+#define	HAS_LONG_COND_BRANCH true
+
+/* HAS_LONG_UNCOND_BRANCH ... Define this boolean macro to indicate whether
+ * or not your architecture has unconditional branches that can span all of 
+ * memory.  (ZipCPU does ... via the LW (PC),PC instruction.)  It is used in
+ * conjunction with an optimization that partitions hot and cold basic blocks
+ * into separate sections of the executable.  If this macro is set to false,
+ * gcc will convert any unconditional branches that attempt to cross between
+ * sections into indirect jumps.
+ *
+ * ZipCPU has the LW (PC),PC instruction which can be used to implement a long
+ * jump.
+ */
+#define	HAS_LONG_UNCOND_BRANCH	true
+
+/* CASE_VECTOR_MODE ... An alias for a machine mode name.  This is the machine
+ * mode that elements of a jump-table should have.
+ *
+ */
+#define	CASE_VECTOR_MODE	SImode
+
+/* CASE_VECTOR_SHORTEN_MODE(MIN,MAX,BODY) ... Optional: return the preferred
+ * mode for an addr_diff_vec when the minimum and maximum offset are known.
+ * If you define this, it enables extra code in branch shortening to deal with
+ * addr_diff_vec.  To make this work, you also have to define INSN_ALIGN and
+ * make the alignment for addr_diff_vec explicit.  The body argument is provided so that the offset_unsigned and scale flags can be updated.
+ *
+ * ZipCPU---No advantage here.
+ */
+
+/* CASE_VECTOR_PC_RELATIVE ... Define this exrpession to indicate when
+ * jump-tables should contain relative addresses.  You need not define this
+ * macro if jump-tables never contain relative addresses, or jump-tables
+ * should contain relative addresses only when -fPIC or -FPIC is in effect.
+ *
+ * ZipCPU---No advantage in PC-Relative jump tables--except in PIC relative
+ * code.
+ */
+
+/* TARGET_CASE_VALUES_THRESHOLD(VOID) ... This function returns the smallest 
+ * number of different values for which it is best to use a jump-table instead
+ * of a tree of conditional branches.  The default is four for machines with a
+ * casesi instruction and five otherwise.  This is best for most machines.
+ *
+ * ZipCPU---Leave at the default.
+ */
+
+/* WORD_REGISTER_OPERATIONS ... Define this macro to 1 if operations between
+ * registers with integral mode smaller than a word are always performed on the
+ * entire register.  Most RISC machines have this property and most CISC
+ * machines do not.
+ *
+ * ZipCPU---We have the property, 'cause we're fairly risc.
+ */
+#undef	WORD_REGISTER_OPERATIONS
+#define	WORD_REGISTER_OPERATIONS	1
+
+/* LOAD_EXTEND_OP(MEMODE) ... Define this macro to be a C expression indicating
+ * when insns that read memory in MEMMODE, an integral mode narrower than a
+ * word, set the bits outside of MEMMODE to be either the sign extension or
+ * zero-extension of the data read.  Return SIGN_EXTEND for values of MEMMODE
+ * for which the insn sign-extends, ZERO_EXTEND for which it zero-extends, and
+ * UNKNOWN for other modes.
+ * 
+ * Do not define this macro if it would always return UNKNOWN.  
+ *
+ * ZipCPU---Our memory unit zero extends registers, so we'll zero extend here.
+ */
+#undef	LOAD_EXTEND_OP
+#define	LOAD_EXTEND_OP(MEM)	ZERO_EXTEND
+
+/* SHORT_IMMEDIATES_SIGN_EXTEND ... Define this macro to 1 if loading short
+ * immediate values into registers sign extends.
+ *
+ * ZipCPU---All immediates are sign extended, so yes.
+ */
+#undef	SHORT_IMMEDIATES_SIGN_EXTEND
+#define	SHORT_IMMEDIATES_SIGN_EXTEND	1
+
+/* TARGET_MIN_DIVISIONS_FOR_RECIP_MUL
+ */
+
+/* MOVE_MAX ... The maximum number of bytes that a single instruction can move
+ * quickly between memory and registers or between two memory locations.
+ *
+ * ZipCPU --- Although we can move 32-bits at a time, and most people would call
+ * this 4-bytes, the compiler defines a byte as the minimum addressable unit.
+ * Therefore, this is defined to be one.
+ */
+#define	MOVE_MAX	UNITS_PER_WORD
+
+/* MAX_MOVE_MAX ... The maximum number of bytes that a single instruction can
+ * move quickly between memory and registers or between two memory ...
+ *
+ * ZipCPU --- this sounds just the same as MOVE_MAX, which is the default
+ * definition of this.
+ */
+
+/* SHIFT_COUNT_TRUNCATED ... A C expression that is nonzero if on this machine
+ * the number of bits actually used for the count of a shift operation is equal
+ * to the number of bits needed to represent the size of the object being
+ * shifted.  
+ *
+ * You need not define this macro if it would have the value of zero.
+ *
+ * ZipCPU---A shift of 33 (or more) in either direction will wipe out the
+ * value in the register, therefore this value should be zero, the default.
+ */
+
+/* TARGET_SHIFT_TRUNCATION_MASK(MODE) ... This function describes how the
+ * standard shift patterns for MODE deal with shifts by negative amounts or by
+ * more than the width of the mode.  
+ *
+ * ZipCPU---The default is zero, since we didn't define SHIFT_COUNT_TRUNCATED.
+ * This is the case for the ZipCPU as well.
+ */
+
+/* TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) ... A C expression which is nonzero
+ * if on this machine it is safe to "convert" an integer of INPREC bits to one
+ * of OUTPREC bits (where OUTPREC is smaller than INPREC) by merely operating on
+ * it as if it had OUTPREC bist.  On many machines, this expression can be 1.
+ * 
+ * ZiPCPU ... 
+ */
+#undef	TRULY_NOOP_TRUNCATION
+#define TRULY_NOOP_TRUNCATION(O,I)	1
+
+/* TARGET_MODE_REP_EXTENDED(MODE,REPMODE) ... The representation of an integral
+ * mode can be such that the values are always extended to a wider integral
+ * mode.  Return SIGN_EXTEND if values of MODE are represented in sign-extended
+ * form to REPMODE.  Return UNKNOWN otherwise.  (Currently none of the targets
+ * use zero-extended this way so unlike LOAD_EXTEND_OP, TARGET_MODE_REP_EXTENDED
+ * is expected to return either SIGN_EXTEND or UNKNOWN.  Also, no target extends
+ * MODE to REP_MODE so that REP_MODE is not the next widest integral mode and
+ * we currently take advantage of this fact.)  
+ *
+ * Similarly to LOAD_EXTEND_OP you may return a non_UNKNOWN value even if the
+ * extension is not performed on certain hard registers as long as for the
+ * REGNO_REG_CLASS of tehsse hard registers CANNOT_CHANGE_MODE_CLASS returns
+ * zero.
+ *
+ * Not that TARGET_MODE_REP_EXTENDED and LOAD_EXTEND_OP describe two related
+ * properties.  If you define TARGET_MODE_REP_EXTENDED(mode,wordmode) you
+ * probably also want to define LOAD_EXTEND_OP(mode) to return the same type
+ * of extension.
+ *
+ * In order to enforce the representation of mode, TRULY_NOOP_TRUNCATION should
+ * return false when truncating to MODE.
+ *
+ * ZipCPU ... ???
+ */
+// #undef	TARGET_MODE_REP_EXTENDED
+// #define	TARGET_MODE_REP_EXTENDED(R,M)	UNKNOWN
+
+/* STORE_FLAG_VALUE ... A C expression describing the value returned by a
+ * comparison operator with an integral mode and stored by a store-flag
+ * instruction (cstoremode4) when the condition is true.  This description
+ * must apply to all the cstoremode4 patterns and all the comparison operators
+ * whose results have MODE_INT mode.
+ *
+ * ZipCPU---Doesn't really have a STORE_FLAG_VALUE instruction ...
+ */
+
+/* FLOAT_STORE_FLAG_VALUE
+ *
+ * ZipCPU
+ */
+
+/* VECTOR_STORE_FLAG_VALUE ... define this macro on machines that have vector
+ * comparison operations that return a vector result ...
+ *
+ * ZipCPU---Doesn't support vector operations.
+ */
+
+/* CLZ_DEFINED_VALUE_AT_ZERO(MODE, VAL)
+ * CTZ_DEFINED_VALUE_AT_ZERO(MODE, VAL)
+ *
+ * A C expression that indicates whetther the architecture defines a value for
+ * clz or ctz with a zero operand.  A result of 0 indicates the value is
+ * undefined.  If the value is defined for only the RTL expression, the macro
+ * should evaluate to 1.  If the value also applies to the corresponding optab
+ * entry, then the macro should evaluate to 2.  In cases where the value is
+ * defined, value should be set to this value.
+ * If this macro is not defined, the value of clz or ctz at zero is assumed to 
+ * be undefined.
+ *
+ * ZipCPU---Has neither clz nor ctz instructions, so we don't need this.
+ */
+
+/* Pmode ... An alias for the machine mode for pointers.  On most machines, 
+ * define this to be the integer mode corresponding to the width of a 
+ * hardware pointer.  SImode on 32-bits machines, or DImode on 64-bit machines.
+ * On some machines you must define this to be one of the partial
+ * integer modes, such as PSImode.
+ *
+ * ZipCPU--the machine mode for pointers is one word (32-bits).  The one word
+ * mode is the SImode, so that's what we use here.
+ */
+#undef	Pmode
+#define	Pmode	SImode
+
+/* FUNCTION_MODE ... An alias for the machine mode used for memory references to
+ * function being called, in call RTL expressions.  On most CISC machines, where
+ * an instruction can begin at any byte address, this should be QImode.  On most
+ * RISC machines, where all instructions have fixed size and alignment, this
+ * should be a mode with the same size and alignment as the machine instruction
+ * words--typically SImode or HImode.
+ *
+ * ZipCPU---Definitely SImode, as with Pmode.  (All words are 32-bits, including
+ * addresses on the ZipCPU.
+ */
+#undef	FUNCTION_MODE
+#define	FUNCTION_MODE	SImode
+
+/* STDC_0_IN_SYSTEM_HEADERS
+ */
+
+/* TARGET_C_PREINCLUDE(V) ... Define this hook to return the name of  a header
+ * file to be included at the start of all compilations, as if it had been
+ * included with #include <file>.  If this hook returns NULL, or is not defined,
+ * or if the header is not found, or if the user specifies -ffreestanding or
+ * -nostdinc, no header is included.
+ *
+ * ZipCPU --- We don't have a standard library defined yet, so we'll leave this
+ * as NULL.
+ */
+#undef	TARGET_C_PREINCLUDE
+#define	TARGET_C_PREINCLUDE	NULL
+
+/* TARGET_CXX_IMPLICIT_EXTERN_C(CONST CHAR *) ... Define this hook to add target
+ * specific C++ implicit extern C functions.  If this function returns true
+ * for the name of a file-scope function, that function implicitly gets extern
+ * "C" linkage rather than whatever linkage the declaration would normally have.
+ * An example of such function is WinMain on Win32 targets.
+ *
+ * ZipCPU---Not ready to deal with this yet.
+ */
+
+/* NO_IMPLICIT_EXTERN_C ... Define this macro if the system header files
+ * support C++ as well as C.  This macro inhibits the usual method of using
+ * system header files in C++, which is to pretend that the file's contents
+ * are enclosed in 'extern "C" {...}'.
+ *
+ *
+ * ZipCPU --- Don't have either C or C++ headers, so let's skip this for now.
+ * Eventually, though, I think ZipCPU and C++ would go very well together.
+ */
+
+/* REGISTER_TARGET_PRAGMAS ... Define this macro if you want to implement any
+ * target specific pragmas.
+ *
+ * ZipCPU --- let's not.
+ */
+
+/* HANDLE_PRAGMA_PACK_WITH_EXPANSION ... Define this macro if macros should be
+ * expanded in the arguments of #pragma pack().
+ *
+ * ZipCPU ... why?
+ */
+
+/* TARGET_DEFAULT_PACK_STRUCT ... If your target requires a struct packing
+ * default other than 0 (meaning the machine default), define this macro to
+ * the necessary value (in bytes).  This must be a value that would also be
+ * valid to use with #pragma pack() (that is a small power of two.
+ */
+
+/* DOLLARS_IN_IDENTIFIERS
+ * ZipCPU --- Default (not changing C)
+ */
+
+/* INSN_SETS_ARE_DELAYED(INSN) ... Define this macro as a C expression that
+ * is nonzero if it is safe for the delay slot schedule to place instructions
+ * in the delay slot of INSN, even if they appear to use a resource set or
+ * clobbered in INSN.  INSN is always a ...
+ *
+ * ZipCPU --- You need not define this macro if it would always return zero.
+ */
+ 
+/* INSN_REFERENCES_ARE_DELAYED(INSN) ... Define this macro as a C expression
+ * that is nonzero if it is safe for the delay slot schedule to place
+ * instructions in the delay slot of INSN, even if they appear to set or clobber
+ * a resource referenced in INSN.  INSN is always a jump_insn or an insn.  On
+ * machines where some insn or jump_insn is really a function call and ...
+ *
+ * ZipCPU --- You need not define this macro if it would always return zero.
+ */
+
+/* MULTIPLE_SYMBOL_SPACES ... Define this macro as a C expression that is
+ * nonzero if, in some cases, global symbols from one translation unit may not
+ * be bound to undefined symbols in another translation unit without user
+ * intervention.  For instance, under Microsoft Windows symbols must be
+ * explicitly imported from shared libraries (DLLs).  
+ *
+ * ZipCPU---You need not define this macro if it would always evaluate to zero,
+ * so we won't.
+ */
+
+/* TARGET_MD_ASM_ADJUST
+ */
+/* MATH_LIBRARY ... Define this macro as a C constant ... you only need to
+ * define this macro if the default of "m" is wrong.
+ *
+ * ZipCPU --- as we don't have a math library yet, building one such that "m"
+ * works doesn't sound like a problem.  Let's not define this.
+ */
+
+/* LIBRARY_PATH_ENV ... Define this as a C string constant for the environment
+ * variable that specifies where the linker should look for libraries.
+ *
+ * Just in case we want to add libraries for ZipCPU, let's place them in
+ * /usr/local/zip/lib, so as not to confuse them with our local systems 
+ * libraries.
+ */
+#define	LIBRARY_PATH_ENV	"/usr/local/zip/lib"
+
+/* TARGET_POSIX_IO ... Define this macro if the target supports the following
+ * POSIX file fucntions: access, mkdir, and file locking with fcntl/F_SETLKW.
+ *
+ * ZipCPU does not.
+ */
+
+/* MAX_CONDITIONAL_EXECUTE ... A C expression for the maximum number of 
+ * instructions to execute via conditional execution instructions instead of a 
+ * branch.  A value of BRANCH_COST+1 is the default if the machine does not use
+ * cc0 and 1 if it does use cc0.
+ *
+ * ZipCPU---This sounds good enough for the ZipCPU as well--as long as we have
+ * BRANCH_COST defined.  However, BRANCH_COST is defined as conditionally to
+ * something else, so let's keep looking into this.
+ */
+
+/* IFCVT_MODIFY_TESTS(CEINFO,TRUE,FALSE) ... Used if the target needs to 
+ * perform machine-dependent modifications on the conditionals used for turning
+ * basic blocks into conditionally executed code.  CEINFO points to a data
+ * structure, struct ce_if_block, which contains information about the currently
+ * processed blocks.  TRUE and FALSE are the tests that are used for
+ * converting the then-block and the else-block, respectively.  Set either TRUE
+ * or FALSE to a null pointer if the tests cannot be converted.
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ */
+#define	IFCVT_MODIFY_TESTS(CI,TR,FL)	zip_ifcvt_modify_tests(CI,&TR,&FL)
+
+/* IFCVT_MODIFY_MULTIPLE_TESTS(CEINFO, BB, TRUE, FALSE) ... Like
+ * IFCVT_MODIFY_TESTS, but used when converting more complicated if-statements
+ * into conditions combined by and and or operations.  BB contains the basic
+ * block that contains the test that is currently being processed and about to
+ * be turned into a condition.
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ */
+// #warning "Need to come back to this."
+#define	IFCVT_MODIFY_MULTIPLE_TESTS(CI,BB,TR,FL) TR=NULL_RTX
+
+
+/* IFCVT_MODIFY_INSN(CEINFO, PATTERN, INSN) ... A C expression to modify the
+ * PATTERN of an INSN that is to be converted to conditional execution format.
+ * CEINFO points to a data structure, struct ce_if_block, which contains
+ * information about the currently processed blocks.
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ */
+#define	IFCVT_MODIFY_INSN(CE,PATRN,INSN) zip_ifcvt_modify_insn(CE,PATRN,INSN)
+
+
+/* IFCVT_MODIFY_FINAL(CEINFO) ... A C expression to perform any final
+ * machine dependent modifications in converting code to conditional 
+ * execution.  The involved basic blocks can be found in struct ce_if_block
+ * structure pointed to be CEINFO.
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ */
+// #warning "Need to come back to this."
+#define	IFCVT_MODIFY_FINAL(CEINFO)	zip_ifcvt_modify_final(CEINFO)
+
+
+/* IFCVT_MODIFY_CANCEL(CEINFO) ... A C expression to cancel any machine
+ * dependent modifications in converting code to conditional execution.  The
+ * involved basic blocks can be found in the struct ce_if_block structure that
+ * is pointed to by CEINFO.
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ */
+// #warning "Need to come back to this."
+#define	IFCVT_MODIFY_CANCEL(CEINFO)	zip_ifcvt_modify_cancel(CEINFO)
+
+
+/* IFCVT_MACHDEP_INIT(CEINFO) ... A C expression to initialize any machine
+ * specific data for if-conversion of the if-block in the CEINFO block structure
+ * that is pointed by CEINFO.
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ */
+// #warning "Need to come back to this."
+#define	IFCVT_MACHDEP_INIT(CEINFO)	zip_ifcvt_machdep_init(CEINFO)
+
+
+/* TARGET_MACHINE_DEPENDENT_REORG(VOID) ... If non-null, this hook performs a
+ * target specific pass over the instruction stream.  The compiler will run it
+ * at all optimization levels, just before the point at which it normally does
+ * delayed branch scheduling.  
+ *
+ * You need not implement the hook if it has nothing to do.
+ *
+ * ZipCPU---This may be part of a later upgrade, but shouldn't be needed to
+ * just get us started.
+ */
+
+
+/* TARGET_INIT_BUILTINS(VOID) ... Define this hook if you ahve any machine 
+ * specific builtin functions that need to be defined.  It should be a function
+ * that performs the necessary setup.  Machine specific builtin functions can be
+ * useful to expand special machine instructions that would otherwise not
+ * normally be generated because they have no equivalent in the source language.
+ *
+ * To create a built in function, call the function lang_hooks.builtin_function
+ * which is defined by the language front end.  You can use any type nodes
+ * set up by build_common_tree_nodes; only language front ends that use those
+ * two functions will call "TARGET_INIT_BUILTINS".
+ * 
+ * ZipCPU---We need to come back to this.  We should have several built-ins
+ * defined: rtu(), wait(), halt(), save_context(cstackregno), and 
+ * restore_context(cstackregno).
+ *
+ */
+#undef	TARGET_INIT_BUILTINS
+#define	TARGET_INIT_BUILTINS	zip_init_builtins
+
+/* TARGET_BUILTIN_DECL(CODE,INITP) ... Define this hook if you have any
+ * machine specific builtin functions that need to be defined.  It should be a
+ * function that returns the builtin function declaration for the builtin 
+ * function code code.  If there is no such builtin and it cannot be initialized
+ * at this time if INITP is true the function should return NULL_TREE.  If 
+ * CODE is out of range the fucntion should return error-mark_node.
+ *
+ * ZipCPU ... needs to be done, don't know how to do it yet.
+ */
+#undef	TARGET_BUILTIN_DECL
+#define	TARGET_BUILTIN_DECL	zip_builtin_decl
+
+
+/* TARGET_EXPAND_BUILTIN(TREE,TGT,SUB,MODE,IGNORE) ... Expand a call to a
+ * machine specific built-in function that was set up by TARGET_INIT_BUILTINS.
+ * TREE is the expression for the function call; the result should go to
+ * TGT if that is convenient, and have mode MODE if that is convenient.  SUB
+ * may be used as the target for computing one of EXP's operands.  IGNORE is
+ * non-zero if the value is to be ignored.  This function should return the
+ * result of the call to the built-in function. 
+ *
+ * ZipCPU ... needs to do it, just to get our special intrinsic functions
+ */
+#define	TARGET_EXPAND_BUILTIN	zip_expand_builtin
+
+
+/* TARGET_BUILTIN_CHKP_FUNCTION(FCODE) ... Allows the target to redefine
+ * builtin functions used by Pointer Bounds Checker for code instrumentation.
+ *
+ * ZipCPU --- not interested.
+ */
+/* TARGET_CHKP_BOUND_TYPE
+ * TARGET_CHKP_MAKE_BOUNDS_CONSTANT
+ * TARGET_CHKP_INITIALIZE_BOUNDS
+ *
+ * ZipCPU --- Same as last one.
+ */
+
+
+/* TARGET_RESOLVE_OVERLOADED_BUILTIN(LOC, FN, ARGS) ... Select a replacement
+ * for a machine specific built-in function that was set up by
+ * TARGET_INIT_BUILTINS.
+ * 
+ * ZipCPU --- If I go to the trouble to create a builtin, why would I want
+ * to then overload it?
+ */
+
+/* TARGET_FOLD_BUILTIN(FN,NARGS,ARGP,IGNORE) ... Fold a call to a machine
+ * specific built-in function that was set up by 'TARGET_INIT_BUILTINS'  FN
+ * is the declaration of the built-in function.  NARGS is the number of
+ * arguments passed to the function; the arguments themselves are pointed to by
+ * ARGP.  The result is another tree, valid for both GIMPLE and GENERIC,
+ * containing as simplified expression for the call's result.  If IGNORE is
+ * true the value will be ignored.
+ *
+ * ZipCPU --- You know, this and the previous couple sound like something
+ * whereby I might be able replace bit-reversal code with my bit reverse
+ * instruction.  That would be very useful, but not necessary to get me
+ * started.
+ */
+
+/* TARGET_GIMPLE_FOLD_BUILTIN
+ * TARGET_COMPARE_VERSION_PRIORITY
+ * TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
+ * TARGET_GENERATE_VERSION_DISPATCHER_BODY
+ * TARGET_CAN_USE_DOLOOP_P
+ * TARGET_INVALID_WITHIN_DOOLOOP
+ * TARGET_LEGITIMATE_COMBINED_INSN
+ * TARGET_CAN_FOLLOW_JUMP
+ * TARGET_COMMUTATIVE_P
+ */
+
+/* TARGET_ALLOCATE_INITIAL_VALUE(REGNO)  ... When the initial value of a hard
+ * register has been copied in a pseudo register, it is often not necessary
+ * ...
+ */
+/* TARGET_UNSPEC_MAY_TRAP_P(RTX,FLAGS)  ... This target hook returns nonzero in
+ * RTX, un unspec or unspec_volatile operation, might cause a trap.  Targets
+ * can use this hook to enhance precision of analysis for unspec and
+ * unspec_volatile operations.  You may call may_trap_p_1 to analyze inner
+ * elements of RTX in which case flags should be passed along.
+ */
+
+/* TARGET_SET_CURRENT_FUNCTION(TREE)  The compiler invokes this hook whenever
+ * it changes its current function context (CFUN).  You can define this 
+ * function if the back end needs to perform any initialization or reset
+ * actions on a per-function basis.  For example, it may be used to implement
+ * function attributes that affect register usage or code generation patterns.
+ */
+
+/* TARGET_OBJECT_SUFFIX ... Define this macro to be a C string representing the
+ * suffix for object files on your target machine.  If you do not define this
+ * macro, GCC will use ".o" as the suffix for object files.
+ */
+#define	TARGET_OBJECT_SUFFIX	".o"
+
+/* TARGET_EXECUTABLE_SUFFIX
+ */
+#define	TARGET_EXECUTABLE_SUFFIX	""
+
+/* COLLECT_EXPORT_LIST ... If defined, collect2 will scan the individual object
+ * files specified on its command line and create an export list for the linker.
+ * Define this macro for systems like AIX, where the linker discards object
+ * files that are not referenced from main and uses export lists.
+ *
+ * ZipCPU --- shoudln't need this.
+ */
+
+/* MODIFY_JNI_METHOD_CALL(MDECL)  ... Define this macro to a C expression
+ * representing a variant of the method call mdecl, if Java Native Interface
+ * (JNI) methods must be invoked differently from other methods on your
+ * target.  For example, on 32-bit MSWindows, JNI methods must be invoked
+ * using the stdcall calling convention and this macro is then ...
+ *
+ * ZipCPU----Don't need this.  (yet)
+ */
+
+
+/* TARGET_CANNOT_MODIFY_JUMPS_P ... This target hook returns true past the
+ * point in which a new jump instructions could be created.  On machines that
+ * require a register for every jump such as the SHmedia ISA of SH5, this point
+ * would typically be reload, so thiss target hook should be defined to a 
+ * function such as:
+ *
+ * ZipCPU --- I don't get what this is for.  
+ * 	Actually, in hind sight, ZipCPU needs this.  Without this, the 
+ * compiler will try to reorder basic blocks, shuffling logic around and so
+ * fortch, preventing our comparison optimizations from being used.  By setting
+ * this function appropriately, we can prevent it from reversing conditions into
+ * conditions we don't support.
+ */
+#define	TARGET_CANNOT_MODIFY_JUMPS_P	zip_cannot_modify_jumps_p
+
+/* TARGET_BRANCH_TARGET_REGISTER_CLASS ... This target hook returns a register
+ * class for which branch target register optimizations should be applied.  All
+ * registers in this class should be usable interchangably.  After reload,
+ * registers in this class will be re-allocated and loads will be hoisted out of
+ * loops and be subjected to inter-block scheduling.
+ *
+ * ZipCPU---GENERAL_REGS, but this should be a default already ...
+ */
+
+
+/* TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED ...  Branch target register
+ * optimization will by default exclude callee-saved registers that are not
+ * already live during the current function.  If this target hook returns true,
+ * they will be included.  The target code must then make sure that all target
+ * registers in the class returned by TARGET_BRANCH_REGISTER_CLASS that might
+ * be saved are saaved.  
+ *
+ * ZipCPU---
+ */
+
+
+/* TARGET_HAVE_CONDITIONAL_EXECUTION(VOID) ... This target hook returns true
+ * if the target supports conditional execution.  This target hook is required
+ * only when the target has several different modes and they have different
+ * conditional execution capability, such as ARM.
+ *
+ * ZipCPU---Yes!  All instructions may be conditionally executed (except the
+ * long version load immediate ...)
+ */
+#define	TARGET_HAVE_CONDITIONAL_EXECUTION	hook_bool_void_true
+
+/* TARGET_GEN_CCMP_FIRST(PREP,GEN,CODE,OP0,OP1) .. This function prepares to
+ * emit a comparison instruction for the first compare in a sequence of
+ * conditional comparisons.  It returns an appropriate comparison with CC for
+ * passing to gen_ccmp_next or cbranch_optab.  The instructions to prepare the
+ * compare are saved in prep_seq and the compare instructions are saved in
+ * gen_seq.  They will be emitted when all the compares in the conditional
+ * comparison are generated without error.  CODE is the rtx_code of the compare
+ * for op0 and op1.
+ *
+ *
+ * ZipCPU---???
+ */
+
+/* TARGET_GEN_CCMP_NEXT(PREP,GEN,PREV,CMP,OP0,OP1,BITCODE) ... This function
+ * prepares to emit a conditional comparison within a sequence of conditional
+ * comparisons.  It returns an appropriate comparison with CC for passing to
+ * gen_ccmp_next or cbranch_optab.  The insn to prepare the compare are saved
+ * in prep_seq and the compare instructions are saved in gen_seq.  They will be
+ * emitted when all the compares in the conditional comparison are generated
+ * without error.  The pPREV expression is the result of a prior call to either
+ * gen_ccmp_first or gen_ccmp_next.  It may return NULL if the combination of
+ * PREV and this comparison is not supported, otherwise the result must be the
+ * appropriate for passing to gen_ccmp_next or cbranch_optab.  CODE is the RTX
+ * code of the compare for op0 and op1.  BITCODE is AND or IOR, which is the op
+ * on the compares.
+ * 
+ *
+ * ZipCPU --- ???
+ */
+
+/* TARGET_LOOP_UNROLL_ADJUST(NUNROLL, LOOP) ... This target hook returns a new
+ * value for the number of times loop should be unrolled.  The parameter NUNROLL
+ * is the number of times the loop is to be unrolled.  The parameter loop is a
+ * pointer to the loop, which is going to be checked for unrolling.  The target 
+ * hook is required only when the target has special constraints like maximum number of memory accesses.
+ *
+ *
+ * ZipCPU -- ???
+ */
+
+
+/* POWI_MAX_MULTS ... If defined, this macro is interpreted as a signed integer
+ * C expression that specifies the maximum number of floating point
+ * multiplications that should be emitted when expanding exponentiation by an
+ * integer constant inline.  When this value is defined, exponentiation
+ * requiring more than this number of multiplications is implemented by calling
+ * the system library's pow, powf, or powl routines.  The default value
+ places no upper bound on the multiplication count.
+ *
+ * ZipCPU---As we have no system library pow() routine (yet) ... we're not
+ * ready for this macro.
+ */
+
+
+/* TARGET_EXTRA_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should 
+ * register any extra include files for the target.  The parameter stdinc
+ * indicates if normal include files are present.  The parameter SYSROOT is the
+ * system root directory.  The parameter PFX is the prefix for the GCC
+ * directoiry.
+ *
+ *
+ * ZipCPU---None yet.
+ */
+
+/* TARGET_EXTRA_PRE_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should
+ * register any extrra include files for the target before any standard headers.
+ * The parameter stdinc indicates if normal include files are present.
+ *
+ * ZipCPU --- None.
+ */
+
+/* TARGET_OPTF(PATH) ... This target hook should register special include paths
+ * for the target.  The parameter path is the integer to register.  On Darwin
+ * systems, this is used for Framework includes, which have semantics that are
+ * different from -I.
+ *
+ *
+ * ZipCPU --- None.
+ */
+
+/* TARGET_USE_LOCAL_THUNK_ALIAS_P(FN) ... This target macro returns if it is 
+ * safe to use a local alias for a virtual function FN when constructing 
+ * thunks, false otherwise.  By default, the macro returns true for all 
+ * functions, if a target supports aliases (i.e. defines ASM_OUTPUT_DEF),
+ * false otherwise.
+ *
+ *
+ * ZipCPU --- ???
+ */
+// #warning "ASM_OUTPUT_DEF's definition has not been considered"
+
+
+/* TARGET_FORMAT_TYPES ... If defined, this macro is the name of a global 
+ * variable containing target-specific format checking information for the
+ * -Wformat option.  The default is to have no target-specific format checks.
+ *
+ * ZipCPU --- Default
+ */
+
+/* TARGET_N_FORMAT_TYPES
+ *
+ * ZipCPU --- Default
+ */
+
+/* TARGET_OVERRIDES_FORMAT_ATTRIBUTES ... If defined, this macro is the name of 
+ * a global variable containing target-specific format overrides for the 
+ * -Wformat option.  The default is to have no target specific format overrides.
+ *
+ * ZipCPU --- Default
+ */
+
+/* TARGET_OVERRIDEES_FORMAT_ATTRIBUTES
+ * TARGET_OVERRIDEES_FORMAT_ATTRIBUTES_COUNT
+ *
+ * If defined, the (first) macro is the name of a global variable containing
+ * target-specific format overrides for the -Wformat option.
+ */
+/* TARGET_OVERRIDES_FORMAT_INIT ... If defined, this macro specifies the
+ * optional initialization routine for target specific customizations of the 
+* system printf and scanf formatter settings.
+ */
+ 
+/* TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN(TLIST,FN,VAL) ... If defined, this
+ * macro returns the diagnostic message when it is illegal to pass an argument
+ * VAL to function FN with prototype TLIST.
+ *
+ * ZipCPU---Default.
+ */
+
+/* TARGET_INVALID_CONVERSION
+ * TARGET_INVALID_UNARY_OP
+ * TARGET_INVALID_BINARY_OP
+ * TARGET_INVALID_PARAMETER_TYPE
+ * TARGET_INVALID_RETURN_TYPE
+ * TARGET_PROMOTED_TYPE
+ * TARGET_CONVERT_TO_TYPE
+ * TARGET_USE_JCR_SECTION_TYPE
+ * OBJC_JBLEN
+ * LIBGCC2_UNWIND_ATTRIBUTE
+ * TARGET_UPDATE_STACK_BOUNDARY
+ * TARGET_GET_DRAP_RTX
+ * TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+ */
+/* TARGET_CONST_ANCHOR ... On some architectures it can take multiple
+ * instructions to synthesize a constant. If there is another constant already
+ * in a register that is close enough in value then it is preferable that the
+ * new constant is computed from the register using immediate addition or
+ * subtraction.  We accomplish this through CSE.  Besides the value of the
+ * constant we also add a lower and an upper constant anchor to the available
+ * expressions.  These are then queried when encountering new constants.  The
+ * anchors are computed by rounding the constant up and down to a multiple of
+ * the value of TARGET_CONST_ANCHOR.  TARGET_CONST_ANCHOR should be the maximum
+ * positive value accepted by immediate-add plus one.  We currently assume that
+ * the value of TARGET_CONST_ANCHOR is a poewr of 2.  For example, on MIPS, 
+ * where add-immediate takes a 16-bit signed value, TARGET_CONST_ANCHOR is set
+ * to 0x8000.  The default value is zero, which disables this optimization.
+ *
+ * ZipCPU---One of the great strengths of the ZipCPU ISA is its ability to 
+ * access registers plus immediates.  To use this, we *need* this capability.
+ * So, we define it here. (to 0x20000, or 2^17 because we can handle 18-bits of
+ * signed immediate offsets)
+ *
+ * On ZipCPU---2^17
+ */
+#define	TARGET_CONST_ANCHOR	zip_const_anchor
+
+/* TARGET_ASAN_SHADOW_OFFSET ... Return the offset bitwise ored into shifted
+ * address to get corresponding Address Sanitizer shadow memory address.  NULL
+ * if address Sanitizer is not supported by the target.
+ */
+#define	TARGET_ASAN_SHADOW_OFFSET	NULL
+
+/* TARGET_MEMMODEL_CHECK
+ */
+/* TARGET_ATOMIC_TEST_AND_SET_TRUEVAL ... This value should be set if the result
+ * written by atomic test and set is not exactly 1, i.e. the bool true.
+ */
+/* TARGET_HAS_IFUNC_P ... It returns true if the target supports GNU indirect
+ * functions.  The support includes the assembler, linker, and dynamic linker.
+ * The default value of this hook is based on target's libc.
+ */
+#define	TARGET_HAS_IFUNC_P	hook_bool_void_true
+
+/* TARGET_ATOMIC_ALIGN_FOR_MODE(MODE) ... If defined, this function returns
+ * an appropriate alignment in bits for an atomic object of machine mode
+ * MODE.  If 0 is returned then the default alignment for the specified mode
+ * is used.
+ * 
+ * ZipCPU---Both default and 2 would be valid.  We'll stick to the default.
+ */
+
+/* TARGET_ATOMIC_ASSIGN_EXPAND_FENV --- ISO C11 requires atomic compound 
+ * assignments that may raise floating-point exceptions to raise exceptions
+ * corresponding to the arithmetic operation whose result was successfully 
+ * stored in a compare-and-exchange sequence.  This requires code equivalent to
+ * calls to feholdexcept, feclearexcept and feupdateenv to be generated at
+ * appropriate points in the compare-and-exchange sequence.  This hook should
+ * set *hold to an expression equivalent 
+ *
+ * ZipCPU --- ???
+ */
+
+/* TARGET_RECORD_OFFLOAD_SYMBOL ... Used when offloaded functions are seen in
+ * the compilation unit and no named sections are available.  It is called once
+ * for each symbol that must be recorded in the offload function and variable
+ * table.
+ * 
+ * ZipCPU --- Offloaded functions?
+ */
+
+/* TARGET_OFFLOAD_OPTIONS
+ *
+ * ZipCPU---none defined
+ */
+
+/* TARGET_SUPPORTS_WIDE_INT ... On older ports, large integers are stored
+ * in CONST_DOUBLE rtl objects.  Newer ports define TARGET_SUPPORTS_WIDE_INT
+ * to be nonzero to indicate that large integers are stored in CONST_WIDE_INT
+ * rtl objects.  The CONST_WIDE_INT allows very large integer constants to be
+ * represented.  CONST_DOUBLE is limited to twice the size of the hosts
+ * HOST_WIDE_INT representation.
+ *
+ * ZipCPU---We don't need these yet, so this isn't yet relevant.  (These ints
+ * are wider than DImode ...)
+ */
+#define	TARGET_SUPPORTS_WIDE_INT	0
+
+
+/* Now, for the prototype functions ...*/
+// These have been moved to zip-protos.h
+
+// extern void zip_init_builtins(void);
+// extern void zip_asm_output_anchor(rtx x);
+// extern bool zip_legitimate_address_p(enum machine_mode mode, rtx x, bool string);
+// extern void zip_asm_trampoline_template(FILE *);
+// extern void zip_initial_elimination_offset(int from, int to);
+// extern void zip_print_operand(FILE *stream, rtx *x, int code);
+// extern void zip_print_operand_address(FILE *stream, rtx *x);
+// extern void zip_asm_output_def(FILE *s, const char *n, const char *v);
+// extern void zip_update_cc_notice(rtx exp, rtx_insn *insn);
+// extern	int zip_address_operand(rtx op);
+// extern	int zip_const_address_operand(rtx op);
+// extern void zip_expand_prologue(void);
+// extern void zip_expand_epilogue(void);
+// extern bool zip_gen_move_rtl(rtx, rtx);
+// extern bool zip_load_address_lod(rtx, rtx);
+// extern bool zip_load_address_sto(rtx, rtx);
+// extern void zip_print_operand(FILE *fp, rtx x, int code);
+// extern void zip_print_operand_address(FILE *fp, rtx x);
+extern int zip_use_return_insn(void);
+
+#include "insn-modes.h"
+// #include "zip-protos.h"	// Cant include this here!
+
+#endif	/* GCC_ZIP_H */
+
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.md gcc-6.2.0-zip/gcc/config/zip/zip.md
--- gcc-6.2.0/gcc/config/zip/zip.md	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.md	2017-03-07 12:02:29.862582673 -0500
@@ -0,0 +1,1968 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Filename:	zip.md
+;;
+;; Project:	Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;; Purpose:	This is the machine description of the Zip CPU as needed by the
+;;		GNU compiler collection (GCC).
+;;
+;;
+;; Creator:	Dan Gisselquist, Ph.D.
+;;		Gisselquist Technology, LLC
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Copyright (C) 2015-2017, Gisselquist Technology, LLC
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;;
+;; License:	GPL, v3, as defined and found on www.gnu.org,
+;;		http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; - Immediate integer operand constraints
+;;	'I'	-2^22 ... 2^22-1, or -4194304 .. 4194303	(LDI insn)
+;;	'x'	-2^17 ... 2^17-1,				(DI const)
+;;	'K'	0...2^17-2					(ucmp offset)
+;;	'M'	-2^12 ... 2^12-1, or -4096 ... 4095		(MOV offset)
+;;	'N'	-2^14 ... 2^14-1, or -16384 ... 16383		(OpB offset)
+;;	'O'	-2^17 ... 2^17-1, or -131072 ... 131071		(OpB Immediate)
+;;	'R'	0...31						(Shift value)
+;; - Memory constraints
+;;	"Q"	Op-B capable references to memory
+;;	"S"	References to constant memory
+;; - Address constraints
+;;	"U"	Op-B capable address that references to memory
+;;	"T"	Constant memory addresses
+(define_constraint "x"
+  "An 17-bit signed immediate such as a CMP:DI instruction can handle"
+  (and (match_code "const_wide_int")
+       (match_test "(ival < 0x20000l) && (ival >= -0x20000l)")))
+(define_constraint "K"
+  "An 17-bit signed immediate such as a CMP:DI instruction can handle"
+  (and (match_code "const_int")
+       (match_test "(ival < 0x20000) && (ival >= -0x20000)")))
+(define_constraint "M"
+  "An 13-bit signed immediate such as a MOV instruction can handle"
+  (and (match_code "const_int")
+       (match_test "(ival < 0x1000) && (ival >= -0x1000)")))
+(define_constraint "N"
+  "An 14-bit signed immediate offset such as an Op-B register offset"
+  (and (match_code "const_int")
+       (match_test "(ival < 0x2000) && (ival >= -0x2000)")))
+(define_constraint "O"
+  "An 18-bit signed immediate such as an Op-B Immediate can handle"
+  (and (match_code "const_int")
+       (match_test "(ival < 0x20000) && (ival >= -0x20000)")))
+(define_constraint "R"
+  "Bits that a value may be shifted"
+  (and (match_code "const_int")
+       (match_test "(ival < 32) && (ival >= 0)")))
+;;
+;
+;
+; Our builtin functions, by identifier
+;
+(define_constants
+	[(UNSPEC_RTU		 1)
+	(UNSPEC_HALT		 2)
+	(UNSPEC_IDLE		 3)
+	(UNSPEC_SYSCALL		 4)
+	(UNSPEC_SAVE_CONTEXT	 5)
+	(UNSPEC_RESTORE_CONTEXT	 6)
+	(UNSPEC_BITREV		 7)
+	(UNSPEC_GETUCC		 8)
+	(UNSPEC_GETCC		 9)
+	(UNSPEC_LDILO		10)
+	; (UNSPEC_RAW_CALL	11)
+	])
+;
+;
+; Registers by name
+(define_constants
+  [(RTN_REG		0)	; Return address register
+   (RTNV_REG		1)	; Subroutine return value register
+   (AP_REG		10)	; Hopefully never used
+   (GBL_REG		11)	; Hopefully never used, but just in case ...
+   (FP_REG		12)
+   (SP_REG		13)
+   (CC_REG		14)
+   (PC_REG		15)
+  ])
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Mode iterator
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+(define_mode_iterator ZI [QI HI SI])
+(define_mode_attr sz [(QI "B") (HI "H") (SI "W")])
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Predicates
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_predicate "zip_const_address_operand_p"
+	(match_code "symbol_ref,const,label_ref,code_label")
+{
+	return zip_const_address_operand(op);
+})
+
+(define_predicate "zip_address_operand_p"
+	(match_code "reg,plus")
+{
+	return zip_pd_opb_operand(op);
+})
+
+(define_predicate "zip_opb_operand_p"
+	(match_code "reg,plus,const_int,subreg")
+{
+	return zip_pd_opb_operand(op);
+})
+
+(define_predicate "zip_opb_immv_p"
+	(match_code "const_int")
+{
+	return (INTVAL(op)<((1<<13)-1))&&(INTVAL(op)>=-((1<<13)));
+})
+
+(define_predicate "zip_opb_single_operand_p"
+	(match_code "reg,subreg,const_int")
+{
+	return zip_pd_opb_operand(op);
+})
+
+(define_predicate "zip_mov_operand_p"
+	(match_code "reg,plus")
+{
+	return zip_pd_mov_operand(op);
+})
+
+(define_predicate "zip_memory_operand_p"
+	(match_code "mem")
+{
+	return zip_pd_opb_operand(XEXP(op,0));
+})
+
+(define_predicate "zip_imm_operand_p"
+	(match_code "const_int")
+{
+	return zip_pd_imm_operand(op);
+})
+
+(define_predicate "zip_mvimm_operand_p"
+	(match_code "const_int")
+{
+	return zip_pd_mvimm_operand(op);
+})
+
+;
+; zip_movdst_operand_p and zip_movsrc_operand_p are no longer necessary, and
+; are being deprecated.
+;
+;(define_predicate "zip_movdst_operand_p"
+;	(match_code "mem,reg,subreg")
+;{
+;	if (MEM_P(op)) // Check for valid store address
+;		return zip_pd_opb_operand(XEXP(op,0));
+;	else if ((SUBREG_P(op))&&(REG_P(XEXP(op,0))))
+;		return 1;
+;	else if (REG_P(op))
+;		return register_operand(op, GET_MODE(op));
+;	return 1;
+;})
+
+;(define_predicate "zip_movsrc_operand_p"
+;	(match_code "mem,reg,subreg,const_int,const,symbol_ref,label_ref,code_label")
+;{
+;	if (MEM_P(op))
+;		return zip_pd_opb_operand(XEXP(op,0));
+;	else if (GET_CODE(op)==PLUS)
+;		return zip_pd_opb_operand(op);
+;	else if ((SUBREG_P(op))&&(REG_P(XEXP(op,0)))) {
+;		//; As far as predicates are concerned, subregs must be valid.
+;		//; The details of them are settled within the constraints.
+;		return 1;
+;	} else if ((REG_P(op))||(SUBREG_P(op)))
+;		return register_operand(op,SImode);
+;	else if (CONST_INT_P(op))
+;		return 1;
+;	return 1;
+;})
+
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Constraints
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_memory_constraint "S"
+	"Any memory referenced by a constant address, possibly unknown at compile time"
+	(and (match_code "mem")
+		(match_test "zip_ct_const_address_operand(XEXP(op,0))")))
+(define_memory_constraint "Q"
+	"Any memory addressed suitably for a load or store instruction"
+	(and (match_code "mem")
+		(match_test "zip_ct_address_operand(XEXP(op,0))")))
+(define_address_constraint "U"
+	"An address suitable for a load or store instruction"
+	(and (match_code "reg,plus")
+		(match_test "zip_ct_address_operand(op)")))
+(define_address_constraint "T"
+	"Any constant address, to include those made by symbols unknown at compile time"
+	(and (match_code "label_ref,code_label,symbol_ref,const")
+		(match_test "zip_ct_const_address_operand(op)")))
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Attributes
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+(define_attr "predicable"  "no,yes" (const_string "yes"))
+(define_attr "ccresult" "set,unknown,unchanged,validzn" (const_string "set"))
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;; Instructions
+;
+; (define_insn
+;	optional name
+;	RTL template -- a vector of incomplete RTL expressions describing the
+;		semantics of the instruction.  It is incomplete because it may
+;		contain match_operand, match_operator, and match_dup expressions
+;	The condition --- contains a C expression, may be an empty string
+;	output template or output statement--fragment of C code returning a str
+;	Attributes -- 
+;	)
+;
+; (match_operand:m n predicate constraint)
+;	Placeholder for operand #n of the instruction
+;	Predicate	string that is the name of a fucntion w/ 2 arguments:
+;				(expression, machine mode)
+;		we can build functions:
+;			"isregister"	to describe a register
+;			"isimmediate"	to describe an immediate
+;			"offsetreg"	to describe a register plus offset
+;			"anyregister"	to describe *ANY* register (uRx or Rx)
+;		But ... functions "address_operand", "immediate_operand",
+;			"register_operand", "indirect_operand"
+;		"comparison_operatot" and "ordered_comparison_operator"
+;		are also available--be aware, they include more comparisons
+;		than Zip CPU can do.
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Move instructions: both
+;;	(arbitrary) from variables to variables, but this gets
+;;		expanded into:
+;;	from registers to registers
+;;	from immediates to registers
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_expand "mov<mode>"
+	[(set (match_operand:ZI 0 "nonimmediate_operand" "")
+		(match_operand:ZI 1 "general_operand" ""))]
+	""
+	{//; Everything except mem=const or mem=mem can be done easily
+	//; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+	//; fprintf(stderr, "ZIP-GEN-MOVE\n");
+	//; zip_debug_rtx_pfx("FROM: ", operands[1]);
+	//; zip_debug_rtx_pfx("TO  : ", operands[0]);
+
+	//; Need to load into a register between memory slots
+	if ((MEM_P(operands[0]))&&(MEM_P(operands[1]))) {
+		//; fprintf(stderr, "GEN-MOVSI: MEM -> MEM\n");
+		if (can_create_pseudo_p()) {
+			rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+			if (GET_MODE(operands[1])==QImode)
+				emit_insn(gen_movqi(tmp,operands[1]));
+			else if (GET_MODE(operands[1])==HImode)
+				emit_insn(gen_movhi(tmp,operands[1]));
+			else
+				emit_insn(gen_movsi(tmp,operands[1]));
+			operands[1] = tmp;
+		}
+	}}
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_raw"
+	[(set (match_operand:ZI 0 "nonimmediate_operand" "=r,Q,r,r")
+		(match_operand:ZI 1 "general_operand" "r,r,Q,i"))]
+	""
+	"@
+	MOV\t%1,%0
+	S<sz>\t%1,%0
+	L<sz>\t%1,%0
+	LDI\t%1,%0"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "movsi_reg_off" ; Register to register move, used by prologue
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(plus:SI (match_operand:SI 1 "register_operand" "r")
+			(match_operand:SI 2 "zip_mvimm_operand_p" "M")))
+		]
+	""
+	"MOV	%2(%1),%0"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_lod"	; Load from memory
+	[(set (match_operand:ZI 0 "register_operand" "=r")
+		(match_operand:ZI 1 "zip_memory_operand_p" "Q"))]
+	""
+	"L<sz>\t%1,%0"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_lod_off" ; used by epilogue code
+	[(set (match_operand:ZI 0 "register_operand" "=r")
+		(mem:ZI (plus:SI (match_operand:SI 1 "register_operand" "r")
+			(match_operand:SI 2 "zip_opb_immv_p" "N"))))]
+	""
+	"L<sz>\t%2(%1),%0"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_sto"	; Store into memory
+	[(set (match_operand:ZI 0 "zip_memory_operand_p" "=Q")
+		(match_operand:ZI 1 "register_operand" "r"))]
+	""
+	"S<sz>\t%1,%0"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_sto_off" ; used by prologue code
+	[(set (mem:ZI (plus:SI
+			(match_operand:SI 0 "register_operand" "r")
+			(match_operand:SI 1 "zip_opb_immv_p" "N")))
+		(match_operand:ZI 2 "register_operand" "r"))]
+	""
+	"S<sz>\t%2,%1(%0)"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_ldi"	; Load immediate
+	[(set (match_operand:ZI 0 "register_operand" "=r")
+		(match_operand:ZI 1 "immediate_operand" "ipU"))]
+	""
+	"LDI	%1,%0"
+	[(set_attr "ccresult" "unchanged")])
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Load and store multiple values
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; So far, from the code I've seen from GCC's output,
+; these instructions do not appear to be necessary.
+;
+;(define_insn "load_multiple"
+;	for(a=0; a<%2; a++)
+;		LW a(%1),%0+a
+;(define_insn "store_multiple"
+;	for(a=0; a<%2; a++)
+;		SW %0+a,a(%1)
+; pushsi -- Do not define, compiler will work around it nicely w/o our help
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Substitution Pattern
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+(define_subst "cc_substitution"
+	; The pattern may not have any match_dup expressions.
+	[(set (match_operand:SI 0 "" "") (match_operand:SI 1 "" ""))
+		(clobber (reg:CC CC_REG))]
+	""
+	[(set (match_dup 0) (match_dup 1))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))
+	])
+;
+(define_subst_attr "cc_subst" "cc_substitution" "_raw" "_clobber")
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Mode conversions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+(define_insn "zero_extendqisi2"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(zero_extend:SI
+			(match_operand:QI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))]
+	""
+	"AND\t255,%0	; zero_extendqisi2 ... reg"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+
+(define_insn "zero_extendqisi2_raw"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(zero_extend:SI
+			(match_operand:QI 1 "register_operand" "0")))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	""
+	"AND\t255,%0	; zero_extendqisi2 ... raw/set CC"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+
+(define_insn "zero_extendqisi2_mem"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(zero_extend:SI
+			(match_operand:QI 1 "memory_operand" "Q")))]
+	""
+	"LB\t%1,%0\t; Zero-Extend:QI"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+(define_insn "zero_extendhisi2"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(zero_extend:SI
+			(match_operand:HI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))]
+	""
+	"AND\t65535,%0	; zero_extendhisi2 ... reg"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+(define_insn "zero_extendhisi2_raw"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(zero_extend:SI
+			(match_operand:HI 1 "register_operand" "0")))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	""
+	"AND\t65535,%0	; zero_extendhisi2 ... reg"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+(define_insn "zero_extendhisi2_mem"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(zero_extend:SI
+			(match_operand:HI 1 "memory_operand" "Q")))
+	]
+	""
+	"LH\t%1,%0\t; Zero-Extend:HI"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "set")])
+;
+;
+;
+;
+(define_insn "extendqisi2"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(sign_extend:SI
+			(match_operand:QI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))]
+	""
+	"SEXTB\t%0\t; SEXTB"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "extendhisi2"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(sign_extend:SI
+			(match_operand:HI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))]
+	""
+	"SEXTH\t%0\t; SEXTH"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; General arithmetic instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;;
+;; modsi3
+;; umodsi3
+;;
+(define_insn "uminsi3"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(umin:SI (match_operand:SI 1 "register_operand" "%0")
+			(match_operand:SI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	""
+	"CMP	%0,%2
+	MOV.C	%2,%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "umaxsi3"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(umax:SI (match_operand:SI 1 "register_operand" "%0")
+			(match_operand:SI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	""
+	"CMP	%2,%0
+	MOV.C	%2,%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "sminsi3"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(smin:SI (match_operand:SI 1 "register_operand" "%0")
+			(match_operand:SI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	""
+	"CMP	%2,%0
+	MOV.GE	%2,%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "smaxsi3"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(smax:SI (match_operand:SI 1 "register_operand" "%0")
+			(match_operand:SI 2 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))
+	]
+	""
+	"CMP	%2,%0
+	MOV.LT	%2,%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; AND
+; iOR
+; XOR
+;
+;
+;
+;(define_insn "addvsi4"
+	;[(set (match_operand:SI 0 "register_operand" "=r")
+		;(plus:SI (match_operand:SI 1 "register_operand" "%r")
+			;(match_operand:SI 2 "general_operand" "rO")))
+	;(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+			;(label_ref (match_operand 3))
+			;(pc)))]
+	;""
+	;"MOV	%1,%0
+	;ADD	%2,%0
+	;BV	%3"
+	;[(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+;;(define_insn "subvsi4"
+;;	MOV	%1,%0
+;;	SUB	%2,%0
+;;	BV	%3
+;;(mulvsi4)
+;;(define_insn "uaddvsi4"
+;;	ADD	%2,%0
+;;	BC	%3
+;;(define_insn "usubvsi4"
+;;	MOV	%1,%0
+;;	SUB	%2,%0
+;;	BC	%3
+;;
+;; (define_insn "umulvsi4"
+;;	... ???)
+;;
+;
+;
+; ASR
+; LSL
+; LSR
+;
+;
+;
+;
+;
+; Others:  NEG, TEST, POPC, NOT
+;
+;
+(define_insn "negsi2"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(neg:SI (match_operand:SI 1 "register_operand" "r")))
+	(clobber (reg:CC CC_REG))]
+	""
+	"NEG	%1,%0"	;//; = MOV -1(%1),%0, XOR -1,%0
+	[(set_attr "ccresult" "validzn")])
+(define_insn "abssi2"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(abs:SI (match_operand:SI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))]
+	""
+	"TEST\t%0\n\tNEG.LT\t%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn_and_split "one_cmplsi2"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(not:SI (match_operand:SI 1 "register_operand" "0")))
+	(clobber (reg:CC CC_REG))]
+	""
+	"#"
+	""
+	[(parallel [(set (match_dup 0) (xor:SI (match_dup 1) (const_int -1)))
+		(clobber (reg:CC CC_REG))])]
+	""
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Conditional arithmetic instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Comparison instructions, both compare and test
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;; This will only work so well, since the direction of the compare is
+;; important in unsigned compares.
+;;
+(define_expand "cmpsi"
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand" "r")
+		(match_operand:SI 1 "nonmemory_operand" "")))]
+	""
+	{
+		if (!zip_opb_operand_p(operands[1],SImode)) {
+			if (can_create_pseudo_p()) {
+				//; fprintf(stderr, "Generating pseudo register for compare\n");
+				rtx tmp = gen_reg_rtx(SImode);
+				emit_insn(gen_movsi(tmp,operands[1]));
+				operands[1] = tmp;
+			} else FAIL;
+		}
+	})
+(define_insn "cmpsi_reg"
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand" "r")
+		(match_operand:SI 1 "zip_opb_single_operand_p" "rO")))]
+	""
+	"CMP\t%1,%0"
+	[(set_attr "ccresult" "set")])
+(define_insn "cmpsi_off"
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand" "r")
+		(plus:SI (match_operand:SI 1 "register_operand" "r")
+			(match_operand 2 "zip_opb_immv_p" "N"))))]
+	""
+	"CMP\t%2+%1,%0"
+	[(set_attr "ccresult" "set")])
+(define_insn "testsi"
+	[(set (reg:CC CC_REG) (compare:CC (and:SI (match_operand:SI 0 "register_operand" "r")
+				(match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
+			(const_int 0)))]
+	""
+	"TEST	%1,%0"
+	[(set_attr "ccresult" "set")])
+(define_insn "testsi_off"
+	[(set (reg:CC CC_REG) (compare:CC
+		(and:SI (match_operand:SI 0 "register_operand" "r")
+			(plus:SI
+				(match_operand:SI 1 "register_operand" "r")
+				(match_operand:SI 2 "zip_opb_immv_p" "N")))
+		(const_int 0)))]
+	""
+	"TEST	%2+%1,%0"
+	[(set_attr "ccresult" "set")])
+(define_insn "nop"
+	[(const_int 0)]
+	""
+	"NOOP"
+	[(set_attr "ccresult" "unchanged")])
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Conditional execution predicates
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; Sadly, these aren't complete like they should be.  Although these are all of
+; the conditional execution prefixes that the Zip CPU supports, GCC looks for
+; other conditions then these.  That is, (cond_exec ...) is not as well 
+; recognized as (if_then_else ...).  So we have to duplicate things to support
+; both methods.
+;
+(define_cond_exec
+	[(eq  (reg:CC CC_REG) (const_int 0))] "" "[Z]")
+(define_cond_exec
+	[(ne  (reg:CC CC_REG) (const_int 0))] "" "[NZ]")
+(define_cond_exec
+	[(lt  (reg:CC CC_REG) (const_int 0))] "" "[LT]")
+(define_cond_exec
+	[(ge  (reg:CC CC_REG) (const_int 0))] "" "[GE]")
+(define_cond_exec
+	[(ltu (reg:CC CC_REG) (const_int 0))] "" "[C]")
+(define_cond_exec
+	[(geu (reg:CC CC_REG) (const_int 0))] "" "[NC]")
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Conditional move instructions, since these won't accept conditional
+;;	execution RTL
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; // Look for #define HAVE_conditional_move to understand how these might be
+; // used.
+;
+; set_zero_or_one_si
+; movsicc
+(define_expand "movsicc"
+	[(set (match_operand:SI 0 "nonimmediate_operand" "")
+		(if_then_else:SI (match_operand 1 "comparison_operator")
+			(match_operand:SI 2 "general_operand" "")
+			(match_operand:SI 3 "general_operand" "")))]
+	""
+	{
+	//; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+	//; fprintf(stderr, "MOVSICC\n");
+	//; zip_debug_rtx_pfx("- DST: ", operands[0]);
+	//; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+	//; zip_debug_rtx_pfx("- NEW: ", operands[2]);
+	//; zip_debug_rtx_pfx("- DEF: ", operands[3]);
+
+	if (!REG_P(operands[2]))
+		operands[2] = force_reg(SImode, operands[2]);
+
+	if ((!REG_P(operands[3]))||(REGNO(operands[0]) != REGNO(operands[3])))
+		emit_insn(gen_movsi(operands[0], operands[3]));
+	operands[3] = operands[0];
+
+
+	rtx_code	ccode = GET_CODE(operands[1]);
+	rtx	cmpop0 = copy_rtx(XEXP(operands[1], 0));
+	rtx	cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+	zip_canonicalize_comparison((int *)&ccode, &cmpop0, &cmpop1, true);
+	emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+	operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+			gen_rtx_REG(CCmode, 14), const0_rtx);
+	})
+;
+;
+;
+(define_expand "addsicc"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(if_then_else:SI (match_operand 1 "comparison_operator")
+			(plus:SI (match_operand:SI 2 "register_operand" "0")
+				(match_operand:SI 3 "zip_opb_single_operand_p" "rO"))
+			(match_dup 2)))]
+	""
+	{
+		//; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+		//; fprintf(stderr, "ADDSICC\n");
+		//; zip_debug_rtx_pfx("- DST: ", operands[0]);
+		//; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+		//; zip_debug_rtx_pfx("- OLD: ", operands[2]);
+		//; zip_debug_rtx_pfx("- INC: ", operands[3]);
+
+		if (!REG_P(operands[2]))
+			operands[2] = force_reg(SImode, operands[2]);
+		if (REGNO(operands[0]) != REGNO(operands[2]))
+			emit_insn(gen_movsi(operands[0], operands[2]));
+		operands[2] = operands[0];
+
+		rtx_code	ccode = GET_CODE(operands[1]);
+		rtx	cmpop0 = copy_rtx(XEXP(operands[1], 0));
+		rtx	cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+		zip_canonicalize_comparison((int *)&ccode, &cmpop0, &cmpop1, true);
+		emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+		operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+			gen_rtx_REG(CCmode, 14), const0_rtx);
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+(define_expand "notsicc"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(if_then_else:SI (match_operand 1 "comparison_operator")
+			(xor:SI (match_operand:SI 2 "register_operand" "0")
+				(const_int -1))
+			(match_dup 2)))]
+	""
+	{
+		extern void zip_debug_rtx_pfx(const char *, const_rtx);
+		//; fprintf(stderr, "NOTSICC\n");
+		//; zip_debug_rtx_pfx("- DST: ", operands[0]);
+		//; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+		//; zip_debug_rtx_pfx("- NOT: ", operands[2]);
+		//; zip_debug_rtx_pfx("- OLD: ", operands[3]);
+
+		if (!REG_P(operands[2]))
+			operands[2] = force_reg(SImode, operands[2]);
+		if (REGNO(operands[0]) != REGNO(operands[2]))
+			emit_insn(gen_movsi(operands[0], operands[2]));
+
+		rtx_code	ccode = GET_CODE(operands[1]);
+		rtx	cmpop0 = copy_rtx(XEXP(operands[1], 0));
+		rtx	cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+		zip_canonicalize_comparison((int *)&ccode,&cmpop0,&cmpop1,true);
+		emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+		operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+			gen_rtx_REG(CCmode, 14), const0_rtx);
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+(define_expand "negsicc"
+	[(set (match_operand:SI 0 "register_operand" "+r")
+		(if_then_else:SI (match_operand 1 "comparison_operator")
+			(neg:SI (match_operand:SI 2 "register_operand" "0"))
+			(match_operand:SI 3 "register_operand" "0")))]
+	""
+	{
+		//; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+		//; fprintf(stderr, "NEGSICC\n");
+		//; zip_debug_rtx_pfx("- DST: ", operands[0]);
+		//; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+		//; zip_debug_rtx_pfx("- NOT: ", operands[2]);
+
+		if (!REG_P(operands[2]))
+			operands[2] = force_reg(SImode, operands[2]);
+		if (REGNO(operands[0]) != REGNO(operands[3]))
+			emit_insn(gen_movsi(operands[0], operands[3]));
+
+		rtx_code	ccode = GET_CODE(operands[1]);
+		rtx	cmpop0 = copy_rtx(XEXP(operands[1], 0));
+		rtx	cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+		zip_canonicalize_comparison((int *)&ccode,&cmpop0,&cmpop1,true);
+		emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+		operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+			gen_rtx_REG(CCmode, 14), const0_rtx);
+
+		if (REGNO(operands[0]) != REGNO(operands[2]))
+			emit_insn(gen_movsicc(operands[0], operands[1], operands[2], operands[0]));
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+(define_expand "cstoresi4"
+	[(set (reg:CC CC_REG) (compare:CC (match_operand:SI 2 "register_operand" "r")
+		(match_operand:SI 3 "zip_opb_operand_p" "rO")))
+	(set (match_operand:SI 0 "register_operand" "=r")
+		(if_then_else:SI
+			(match_operator 1 "ordered_comparison_operator"
+				[(reg:CC CC_REG) (const_int 0)])
+			(const_int 1) (const_int 0)))]
+	""
+	{
+		//; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+		//; fprintf(stderr, "CSTORESI4\n");
+		//; zip_debug_rtx_pfx("- DST: ", operands[0]);
+		//; zip_debug_rtx_pfx("- TST: ", operands[1]);
+		//; zip_debug_rtx_pfx("- A  : ", operands[2]);
+		//; zip_debug_rtx_pfx("-  -B: ", operands[3]);
+
+		rtx_code	ccode = GET_CODE(operands[1]);
+
+		zip_canonicalize_comparison((int *)&ccode,&operands[2],&operands[3],true);
+		emit_insn(gen_cmpsi(operands[2], operands[3]));
+		emit_insn(gen_movsi(operands[0], const0_rtx));
+		switch(ccode) {
+		case EQ:
+			emit_insn(gen_cmov_eq(operands[0], const1_rtx));
+			break;
+		case NE:
+			emit_insn(gen_cmov_ne(operands[0], const1_rtx));
+			break;
+		case LT:
+			emit_insn(gen_cmov_lt(operands[0], const1_rtx));
+			break;
+		case GE:
+			emit_insn(gen_cmov_ge(operands[0], const1_rtx));
+			break;
+		case LTU:
+			emit_insn(gen_cmov_ltu(operands[0], const1_rtx));
+			break;
+		case GEU:
+			emit_insn(gen_cmov_geu(operands[0], const1_rtx));
+			break;
+		default:
+			FAIL;
+		} DONE;
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Control flow instructions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_expand "jump"
+	[(set (pc)
+		(label_ref (match_operand 0 "" "")))])
+(define_insn "jump_const"
+	[(set (pc)
+		(match_operand:SI 0 "zip_const_address_operand_p" ""))]
+	""
+	"BRA	%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+(define_insn "jump_label"	; Must be modeless, VOIDmode, not SI or any othr
+	[(set (pc)	; Otherwise it won't accept jumps to labels
+		(label_ref (match_operand 0 "" "")))]
+	""
+	"BRA	%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+; This is really the same thing as an indirect jump ... the big difference
+; is that the zip_address_operand_p checks for an "N" type condition, not an
+; "M" type condition ... a bug, but one that works for now.  (The assembler
+; should be able to catch and except on it ...)
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "jump_variable"
+	[(set (pc)
+		(match_operand:SI 0 "zip_address_operand_p" ""))]
+	""
+	"JMP	%0"
+	[(set_attr "ccresult" "unchanged")])
+;
+; Indirect jumps ... both to registers, and registers plus offsets
+;
+(define_insn "indirect_jump"
+	[(set (pc)
+		(match_operand:SI 0 "register_operand" "r"))]
+	""
+	"JMP	%0"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "indirect_jump_mem"
+	[(set (pc) (match_operand:SI 0 "zip_memory_operand_p" "o"))]
+	""
+	"LW	%0,PC"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "indirect_jump_off"
+	[(set (pc)
+		(plus:SI (match_operand:SI 0 "register_operand" "r")
+			(match_operand:SI 1 "const_int_operand" "M")))]
+	""
+	"JMP	%1(%0)"
+	[(set_attr "ccresult" "unchanged")])
+;;
+; cbranchsi4
+;;	Op 0 = the comparison operator (le,lt,eq,ne,gt,ge,and usgn ltu,geu,etc.)
+;;	Op 1&2 the operands of the compare instruction
+;;	Op 3 is the jump label
+;;
+;;
+;;
+(define_expand "cbranchsi4"
+	[(set (reg:CC CC_REG) (compare:CC (match_operand:SI 1 "register_operand" "r")
+		(match_operand:SI 2 "zip_opb_operand_p" "rO")))
+	(set (pc) (if_then_else (match_operator 0 "ordered_comparison_operator"
+			[(reg:CC CC_REG) (const_int 0)])
+			(label_ref (match_operand 3 "" ""))
+			(pc)))]
+	""
+	{
+		if (true) {
+		//; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+		//; Two branches give us no end of difficulty when implementing.
+		//; Let's check for these two branch codes, and swap the 
+		//; comparison to simplify them.
+		//; fprintf(stderr, "CBRANCH\n");
+		//; zip_debug_rtx_pfx("- CMP: ", operands[0]);
+		//; zip_debug_rtx_pfx("- A  : ", operands[1]);
+		//; zip_debug_rtx_pfx("- B  : ", operands[2]);
+		//; zip_debug_rtx_pfx("- JMP: ", operands[3]);
+		//; Can we do better if we reverse some compares?
+		//;
+		//; We have GE, LT, LTU, and GEU conditions
+		//; Figure out how to create the other conditions from
+		//; these.
+		if (GET_CODE(operands[0])==GTU) {
+			if (REG_P(operands[2])) {
+				//; Reverse the comparison
+				emit_insn(gen_cmpsi(operands[2],operands[1]));
+				emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+				&&(INTVAL(operands[2])>-(1<<17)+2)) {
+				//; A >  B
+				//; A >= B+1
+				//; Add one to the integer constant,
+				//; And use a GEU comparison
+				emit_insn(gen_cmpsi(operands[1],
+					GEN_INT(INTVAL(operands[2])+1)));
+				emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+				&&(can_create_pseudo_p())) {
+					rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+					emit_insn(gen_movsi(tmp,operands[2]));
+					emit_insn(gen_cmpsi(tmp,operands[1]));
+					emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+
+				DONE;
+				
+			}
+		} else if (GET_CODE(operands[0]) == LEU) {
+			if (REG_P(operands[2])) {
+				//; Reverse the comparison
+				emit_insn(gen_cmpsi(operands[2],operands[1]));
+				emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+				&&(INTVAL(operands[2])<(1<<17)-2)) {
+				//; A <= B
+				//; A <  B+1
+				//; Add one to the integer constant,
+				//; And use a GTU comparison
+				emit_insn(gen_cmpsi(operands[1],
+					GEN_INT(INTVAL(operands[2])+1)));
+				emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+				&&(can_create_pseudo_p())) {
+					rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+					emit_insn(gen_movsi(tmp,operands[2]));
+					emit_insn(gen_cmpsi(tmp,operands[1]));
+					emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+				DONE;
+
+			}
+		} else if (GET_CODE(operands[0]) == LE) {
+			if (REG_P(operands[2])) {
+				//; Reverse the comparison
+				emit_insn(gen_cmpsi(operands[2],operands[1]));
+				emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+				&&(INTVAL(operands[2])<(1<<17)-2)) {
+				//; A <= B
+				//; A <  B+1
+				//; Add one to the integer constant,
+				//; And use a GTU comparison
+				emit_insn(gen_cmpsi(operands[1],
+					GEN_INT(INTVAL(operands[2])+1)));
+				emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+				&&(can_create_pseudo_p())) {
+				rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+					emit_insn(gen_movsi(tmp,operands[2]));
+					emit_insn(gen_cmpsi(tmp,operands[1]));
+					emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+				DONE;
+
+			}
+		} else if (GET_CODE(operands[0]) == GT) {
+			if (REG_P(operands[2])) {
+				//; Reverse the comparison
+				emit_insn(gen_cmpsi(operands[2],operands[1]));
+				emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+				&&(INTVAL(operands[2])<(1<<17)-2)) {
+				//; A >  B
+				//; A >= B+1
+				//; Add one to the integer constant,
+				//; And use a GTU comparison
+				emit_insn(gen_cmpsi(operands[1],
+					GEN_INT(INTVAL(operands[2])+1)));
+				emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+				DONE;
+			} else if ((CONST_INT_P(operands[2]))
+					&&(can_create_pseudo_p())) {
+				rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+				emit_insn(gen_movsi(tmp,operands[2]));
+				emit_insn(gen_cmpsi(tmp,operands[1]));
+				emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+
+				DONE;
+			}
+		}
+	}})
+(define_insn "cbranch_jmp_eq"
+	[(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BZ\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_neq"
+	[(set (pc) (if_then_else (ne (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BNZ\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_lt"
+	[(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BLT\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_le"
+	[(set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BLT\t%0\n\tBZ\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_gt"
+	[(set (pc) (if_then_else (gt (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BZ\t.Lgt%=\n\tBGE\t%0\n\t.Lgt%=:"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_gte"
+	[(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BGE\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_ltu"
+	[(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BC\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_gtu"
+	[(set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	;// We could flip the condition code, and then be able to jump.
+	;// The problem is that doing this adjusts the condition code, and
+	;// we aren't allowed to do that here.
+	;//
+	;// The problem here is the equals.  What do you do if A=B?  Our new
+	;// condition tests for A>=B, not A>B.  So ... how do you get rid of
+	;// the equals?  We do so here by branching around. (sigh)
+	"BZ\t.Lgtu%=\n\tBNC\t%0\n.Lgtu%=:"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unknown")])
+(define_insn "cbranch_jmp_leu"
+	[(set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""	; Need to check for both LTU (i.e. C) and Z
+	"BC\t%0
+	BZ\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_geu"
+	[(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+		 (label_ref (match_operand 0 "" ""))
+		 (pc)))]
+	""
+	"BNC\t%0"
+	[(set_attr "predicable" "no")
+		(set_attr "ccresult" "unchanged")])
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Looping constructs
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+; 
+(define_insn "decrement_and_branch_until_zero"
+	[(set (pc) (if_then_else
+		(ge (plus:SI (match_operand:SI 0 "register_operand" "+r,Q")
+			(const_int -1)) (const_int 0))
+		(label_ref (match_operand 1 "" ""))
+		(pc)))
+	(set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))
+	;(set (reg:CC CC_REG)
+		;(compare:CC (minus:SI (match_dup 0) (const_int 1))
+ 			;(const_int 0)))
+	(clobber (match_scratch:SI 2 "=r,r"))
+	(clobber (reg:CC CC_REG))]
+	""
+	{
+		if (MEM_P(operands[0])) {
+			//; We could also go searching for dead regs if
+			//; necessary
+			return "LW %0,%2"
+				"\t; decrement_and_branch_until_zero(MEM)\n"
+				"\tADD\t-1,%2\t\n"
+				"\tSW %2,%0\n"
+				"\tBLT\t.Ldec%=\n"
+				"\tBRA\t%1\n"
+				".Ldec%=:";
+		}
+		return "ADD\t-1,%0\t; decrement_and_branch_until_zero (REG)\n"
+			"\tBLT\t.Ldec%=\n"
+			"\tBRA\t%1\n"
+			".Ldec%=:";
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; Requires TARGET_CAN_USE_DOLOOP_P to be set appropriately in order to use
+;
+;
+;(define_insn "doloop_end"
+	;[(set (pc)
+		;(if_then_else
+			;(ne (plus:SI (match_operand:SI 0 "register_operand" "+r")
+				;;(const_int -1)) (const_int 0))
+			;(label_ref (match_operand 1 "" ""))
+			;(pc)))
+	;(set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))
+	;; (set (reg:CC CC_REG)
+		;; (compare:CC (minus:SI (match_dup 0) (const_int 1))
+ 			;; (const_int 0)))
+	;(clobber (reg:CC CC_REG))]
+	;"(reload_completed)"
+	;"ADD\t-1,%0\t; doloop_end\n\tBZ\t.Lloop%=\n\tBRA\t%1\n.Lloop%=:"
+	;[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+; Since we have a doloop_end, we must also have a doloop_begin.  Since the 
+; ZipCPU has no special begin looping instruction, we'll simply define this
+; as a null instruction.
+;
+; (define_expand "doloop_begin" [(const_int 0)] "(0)")
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Subroutine call
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+; There are two types of calls: "call" and "call_value".
+;
+; Each of these types of calls are then expanded into one of:
+;
+;	_const		- A call to a constant address, such as a symbol
+;			reference or a fixed location
+;
+;	_label		- This should be the same as _const, except that for
+;			some reason the RTL and matching rules are separate.
+;			Hence we have a separate rule for this.
+;
+;	_mem		- The memory address we wish to jump to is stored in
+;			memory somewhere, and we have only a pointer.  In this
+;			case, we load that pointer straight to the PC and go.
+;
+;	_var		- The address to jump to is given as an offset to a 
+;			register, such as X+R3.  This is an indirect jump.  
+;			Although we support it, it does require different RTL
+;			code.
+;
+(define_expand "call"
+	[(call (match_operand 0 "" "")
+		(match_operand 1 "" ""))]
+	""
+	{
+		if (MEM_P(operands[0])) {
+			;// extern void zip_debug_rtx(const_rtx);
+			;//
+			;// fprintf(stderr, "CALL: ");
+			;// zip_debug_rtx(operands[0]);
+			;//
+			;//
+			;// This should always be the case
+			rtx addr = XEXP(operands[0],0);
+			if (zip_const_address_operand_p(addr, SImode)) {
+				//; fprintf(stderr, "Generating gen_void_call_const()\n");
+				emit_call_insn(gen_void_call_const(addr,
+						operands[1]));
+			} else if ((MEM_P(addr))&&(zip_address_operand(
+							XEXP(addr,0)))) {
+				fprintf(stderr, "ERR: ZIP.MD::CALL INDIRECT\n");
+				emit_call_insn(gen_void_call_mem(XEXP(addr,0),
+								 operands[1]));
+				gcc_assert(0);
+			} else {
+				emit_call_insn(gen_void_call_var(operands[0],
+								 operands[1]));
+			}
+			DONE;
+		} else FAIL;
+	})
+;
+(define_expand "sibcall"
+	[(call (mem:SI (match_operand 0 "zip_const_address_operand_p" ""))
+		(match_operand 1 "" ""))
+	(use (match_operand 2 "" ""))
+	(use (reg:SI RTN_REG))
+	(simple_return)]
+	""
+	{
+		if (MEM_P(operands[0])) {
+			;// extern void zip_debug_rtx(const_rtx);
+			;//
+			;// fprintf(stderr, "CALL: ");
+			;// zip_debug_rtx(operands[0]);
+			;//
+			;//
+			;// This should always be the case
+			rtx addr = XEXP(operands[0],0);
+			if (zip_const_address_operand_p(addr, SImode)) {
+				//; fprintf(stderr, "Generating gen_void_call_const()\n");
+				emit_call_insn(gen_void_sibcall_const(addr,
+						operands[1]));
+			} else if ((MEM_P(addr))&&(zip_address_operand(
+							XEXP(addr,0)))) {
+				fprintf(stderr, "ERR: ZIP.MD::SIBCALL INDIRECT\n");
+				emit_call_insn(gen_void_sibcall_mem(XEXP(addr,0),
+								 operands[1]));
+				gcc_assert(0);
+			} else {
+				emit_call_insn(gen_void_sibcall_var(operands[0],
+								 operands[1]));
+			}
+			DONE;
+		} else FAIL;
+	}) ; "BAR\t%0\n"
+;
+(define_insn "void_sibcall_const"
+	[(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" ""))
+			(match_operand 1 "const_int_operand" "n"))
+		(use (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))
+		(simple_return)]
+	""
+	"BRA\t%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "void_sibcall_mem"
+	[(call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q"))
+			(match_operand 1 "const_int_operand" "n"))
+		(use (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))
+		(simple_return)]
+	""
+	"LW\t%0,PC"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "void_sibcall_var"
+	[(call (match_operand:SI 0 "zip_memory_operand_p" "")
+			(match_operand 1 "const_int_operand" "n"))
+		(use (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))
+		(simple_return)]
+	""
+	"JMP\t%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+(define_expand "sibcall_value"
+	[(set (match_operand 0 "register_operand" "")
+		(call (mem:SI
+			(match_operand 1 "zip_const_address_operand_p" ""))
+		(match_operand 2 "" "")))
+	(use (match_operand 3 "" ""))
+	(use (reg:SI RTN_REG))
+	(clobber (reg:CC CC_REG))
+	(simple_return)]
+	""
+	{
+		if (MEM_P(operands[1])) {
+			;// extern void zip_debug_rtx(const_rtx);
+			;//
+			;// fprintf(stderr, "SIBCALL/V: ");
+			;// zip_debug_rtx(operands[1]);
+			;//
+			;//
+			;// This should always be the case
+			rtx addr = XEXP(operands[1],0);
+			if (zip_const_address_operand_p(addr, SImode)) {
+				emit_call_insn(gen_reg_sibcall_const(operands[0], addr, operands[2]));
+			} else if ((MEM_P(addr))&&(zip_address_operand(XEXP(addr,0)))) {
+				fprintf(stderr, "ERR: ZIP.MD::SIBCALL-VALUE() INDIRECT\n");
+				emit_call_insn(gen_reg_sibcall_mem(operands[0], XEXP(addr,0), operands[2]));
+				gcc_assert(0);
+			} else {
+				emit_call_insn(gen_reg_sibcall_var(operands[0], operands[1], operands[2]));
+			}
+			DONE;
+		} else FAIL;
+	})
+;
+;
+;
+;
+; How do we want to do this better?
+;	Replace the RTL w/
+;		return_label= gen_label_rtx();
+;		emit_movsi(gen_rtx_REG(zip_R0),plus_constant(
+;			gen_rtx_REG(zip_PC),return_label));
+;		emit_jump(label_rtx(
+;		
+;		emit_label(return_label);
+;
+; The problem is: we can't!  GCC distinguishes between jumps and calls when
+; optimizing, and it doesn't see the need to keep the label around.  Thus, the
+; label gets removed and the call gets lost.  Hence we do it this way (below).
+; I'll probably bastardize a means of getting a new codelabel that GCC doesn't
+; recognize as such, but for now we'll use .Lcall# as our label. 
+;
+(define_insn "void_call_const"
+	[(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" ""))
+			(match_operand 1 "const_int_operand" "n"))
+		(clobber (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))]
+	""
+	"JSR\t%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "void_call_mem"
+	[(call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q"))
+			(match_operand 1 "const_int_operand" "n"))
+		(clobber (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))]
+	""
+	"MOV	.Lcall%=(PC),R0\;LW\t%0,PC\n.Lcall%=:"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "void_call_var"
+	[(call (match_operand:SI 0 "zip_memory_operand_p" "")
+			(match_operand 1 "const_int_operand" "n"))
+		(clobber (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))]
+	""
+	{
+		if (REG_P(operands[0]))
+			return "JSR\t(%0)";
+		else
+			return "JSR\t%0";
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+(define_expand "call_value"
+	[(parallel [(set (match_operand 0 "register_operand")
+		(call (match_operand:SI 1 "" "")
+			(match_operand 2 "const_int_operand" "n")))
+	(clobber (reg:SI RTN_REG))
+	(clobber (reg:CC CC_REG))])]
+	""
+	{
+		if (MEM_P(operands[1])) {
+			;// extern void zip_debug_rtx(const_rtx);
+			;//
+			;// fprintf(stderr, "CALL/V: ");
+			;// zip_debug_rtx(operands[1]);
+			;//
+			;//
+			//; This should always be the case
+			rtx addr = XEXP(operands[1],0);
+			if (zip_const_address_operand_p(addr, SImode)) {
+				//; fprintf(stderr, "Generating gen_reg_call_const()\n");
+				emit_call_insn(gen_reg_call_const(operands[0], addr, operands[2]));
+			} else if ((MEM_P(addr))&&(zip_address_operand(XEXP(addr,0)))) {
+				fprintf(stderr, "ERR: ZIP.MD::CALL-VALUE() INDIRECT\n");
+				emit_call_insn(gen_reg_call_mem(operands[0], XEXP(addr,0), operands[2]));
+				gcc_assert(0);
+			} else {
+				//; fprintf(stderr, "ZIP.MD::CALL-VALUE() INDIRECT\n");
+				emit_call_insn(gen_reg_call_var(operands[0], operands[1], operands[2]));
+			}
+			DONE;
+		} else FAIL;
+	})
+(define_insn "reg_call_const"
+	[(set (match_operand 0 "register_operand" "")
+		(call (mem:SI (match_operand:SI 1 "zip_const_address_operand_p" ""))
+			(match_operand 2 "const_int_operand" "n")))
+		(clobber (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))]
+	""
+	"JSR\t%1"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "reg_call_mem"
+	[(set (match_operand 0 "register_operand" "")
+		(call (mem:SI (match_operand:SI 1 "zip_memory_operand_p" "Q"))
+			(match_operand 2 "const_int_operand" "n")))
+		(clobber (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))]
+	""
+	"MOV	.Lcall%=(PC),R0\t; CALL MEM (untested)\n\tLW\t%1,PC\n.Lcall%=:"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "reg_call_var"
+	[(set (match_operand 0 "register_operand" "")
+		(call (match_operand:SI 1 "zip_memory_operand_p" "")
+			(match_operand 2 "const_int_operand" "n")))
+		(clobber (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))]
+	""
+	{
+		;// extern void zip_debug_rtx(const_rtx);
+
+		;// fprintf(stderr, "CALL-V/REG: ");
+		;// zip_debug_rtx(operands[0]);
+
+		if (REG_P(operands[1]))
+			return "JSR\t(%1)";
+		else
+			return "JSR\t%1";
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+(define_insn "reg_sibcall_const"
+	[(set (match_operand 0 "register_operand" "")
+		(call (mem:SI (match_operand:SI 1 "zip_const_address_operand_p" ""))
+			(match_operand 2 "const_int_operand" "n")))
+		(use (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))
+		(simple_return)]
+	""
+	"BRA\t%1"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "reg_sibcall_mem"
+	[(set (match_operand 0 "register_operand" "")
+		(call (mem:SI (match_operand:SI 1 "zip_memory_operand_p" "Q"))
+			(match_operand 2 "const_int_operand" "n")))
+		(use (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))
+		(simple_return)]
+	""
+	"LW\t%1,PC"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "reg_sibcall_var"
+	[(set (match_operand 0 "register_operand" "")
+		(call (match_operand:SI 1 "zip_memory_operand_p" "")
+			(match_operand 2 "const_int_operand" "n")))
+		(use (reg:SI RTN_REG))
+		(clobber (reg:CC CC_REG))
+		(simple_return)]
+	""
+	{
+		if (REG_P(operands[1]))
+			return "JMP\t(%1); REG_SIBCALL_VAR";
+		else
+			return "JMP\t%1";
+	}
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Frame manipulation RTX
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_expand "prologue"
+	[(const_int 0)]
+	""
+	"{ zip_expand_prologue(); DONE; }")
+(define_expand "sibcall_epilogue"
+	[(return)]
+	""
+	"{ zip_sibcall_epilogue(); DONE; }")
+(define_expand "epilogue"
+	[(return)]
+	""
+	"{ zip_expand_epilogue(); DONE; }")
+(define_expand "return" ; In order to use the function predicate, this *must*
+	[(return)]	; be a define_expand
+	"zip_use_return_insn()")
+	; "JMP	R0"
+	; [(set_attr "ccresult" "unchanged")])
+(define_insn "*return"	; A "*" -- means it cannot be called from C
+	[(return)]
+	""
+	"RETN"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "simple_return"	; A "*" -- means it cannot be called from C
+	[(simple_return)]
+	""
+	"RETN"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "return_if_eq"
+	[(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+			(return) (pc)))]
+	"zip_use_return_insn()"
+	"RETN.Z"
+	[(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_ne"
+	[(set (pc) (if_then_else (ne (reg:CC CC_REG) (const_int 0))
+			(return) (pc)))]
+	"zip_use_return_insn()"
+	"RETN.NZ"
+	[(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_lt"
+	[(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+			(return) (pc)))]
+	"zip_use_return_insn()"
+	"RETN.LT"
+	[(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_gte"
+	[(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+			(return) (pc)))]
+	"(zip_use_return_insn())"
+	"RETN.GTE"
+	[(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_ltu"
+	[(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+			(return) (pc)))]
+	"zip_use_return_insn()"
+	"RETN.C"
+	[(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_geu"
+	[(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+			(return) (pc)))]
+	"(zip_use_return_insn())"
+	"RETN.NC"
+	[(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Zip Builtin Functions
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+(define_insn "zip_rtu"
+	[(unspec_volatile [(reg:SI CC_REG)] UNSPEC_RTU)
+	(clobber (reg:CC CC_REG))]
+	"(!ZIP_USER)"
+	"RTU"
+	[(set_attr "ccresult" "unknown")])
+(define_insn "zip_busy"
+	[(set (pc) (minus:SI (pc) (const_int 1)))]
+	""
+	"BUSY"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "zip_halt" ; Needs to be unspec_volatile, or optimizer will opt out
+	[(unspec_volatile [(reg:SI CC_REG)] UNSPEC_HALT)
+	(clobber (reg:CC CC_REG))]
+	"(!ZIP_USER)"
+	"HALT"
+	[(set_attr "ccresult" "unknown")])
+(define_insn "zip_idle"
+	[(unspec_volatile [(reg:SI CC_REG)] UNSPEC_IDLE)
+	(clobber (reg:CC CC_REG))]
+	""
+	"WAIT"
+	[(set_attr "ccresult" "unknown")])
+(define_insn "zip_syscall"
+	[(unspec_volatile [(reg:SI CC_REG)] UNSPEC_SYSCALL)]
+	""
+	"CLR\tCC"
+	[(set_attr "ccresult" "unknown")])
+;
+;
+; Operator "save_context"
+;
+;	Okay, so we're not really reading and writing operand 0, %0, however
+;	if we don't list it as a "+r" register, the compiler may allocate it
+;	among the other registers, thus we clobber it in the middle of the
+;	operation before the task is complete.
+;
+(define_insn "zip_save_context"
+	[(unspec_volatile
+			[ (match_operand:SI 0 "register_operand" "+r") ]
+			UNSPEC_SAVE_CONTEXT)
+		(clobber (match_scratch:SI 1 "=r"))
+		(clobber (match_scratch:SI 2 "=r"))
+		(clobber (match_scratch:SI 3 "=r"))
+		(clobber (match_scratch:SI 4 "=r"))]
+	"(!ZIP_USER)"
+	"MOV\tuR0,%1
+	MOV\tuR1,%2
+	MOV\tuR2,%3
+	MOV\tuR3,%4
+	SW\t%1,%0
+	SW\t%2,4(%0)
+	SW\t%3,8(%0)
+	SW\t%4,12(%0)
+	MOV\tuR4,%1
+	MOV\tuR5,%2
+	MOV\tuR6,%3
+	MOV\tuR7,%4
+	SW\t%1,16(%0)
+	SW\t%2,20(%0)
+	SW\t%3,24(%0)
+	SW\t%4,28(%0)
+	MOV\tuR8,%1
+	MOV\tuR9,%2
+	MOV\tuR10,%3
+	MOV\tuR11,%4
+	SW\t%1,32(%0)
+	SW\t%2,36(%0)
+	SW\t%3,40(%0)
+	SW\t%4,44(%0)
+	MOV\tuR12,%1
+	MOV\tuSP,%2
+	MOV\tuCC,%3
+	MOV\tuPC,%4
+	SW\t%1,48(%0)
+	SW\t%2,52(%0)
+	SW\t%3,56(%0)
+	SW\t%4,60(%0)"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+; See the comment above about why operand 0, %0, *must* be a "+r" operand,
+; even though we don't really read (or change) its value throughout this
+; operation.
+;
+(define_insn "zip_restore_context"
+	[(unspec_volatile [
+		(match_operand:SI 0 "register_operand" "+r")] UNSPEC_RESTORE_CONTEXT)
+	(clobber (match_scratch:SI 1 "=r"))
+	(clobber (match_scratch:SI 2 "=r"))
+	(clobber (match_scratch:SI 3 "=r"))
+	(clobber (match_scratch:SI 4 "=r"))]
+	"(!ZIP_USER)"
+	"LW\t0(%0),%1
+	LW\t4(%0),%2
+	LW\t8(%0),%3
+	LW\t12(%0),%4
+	MOV\t%1,uR0
+	MOV\t%2,uR1
+	MOV\t%3,uR2
+	MOV\t%4,uR3
+	LW\t16(%0),%1
+	LW\t20(%0),%2
+	LW\t24(%0),%3
+	LW\t28(%0),%4
+	MOV\t%1,uR4
+	MOV\t%2,uR5
+	MOV\t%3,uR6
+	MOV\t%4,uR7
+	LW\t32(%0),%1
+	LW\t36(%0),%2
+	LW\t40(%0),%3
+	LW\t44(%0),%4
+	MOV\t%1,uR8
+	MOV\t%2,uR9
+	MOV\t%3,uR10
+	MOV\t%4,uR11
+	LW\t48(%0),%1
+	LW\t52(%0),%2
+	LW\t56(%0),%3
+	LW\t60(%0),%4
+	MOV\t%1,uR12
+	MOV\t%2,uSP
+	MOV\t%3,uCC
+	MOV\t%4,uPC"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+(define_insn "zip_bitrev"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_BITREV))
+	]
+	""
+	"BREV\t%1,%0"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "zip_cc"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(unspec:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+	""
+	"MOV\tCC,%0"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "zip_ucc"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETUCC))]
+	""
+	"MOV\tuCC,%0"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "zip_cc_sto"
+	[(set (mem:SI (match_operand:SI 0 "register_operand" "r"))
+		(unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+	""
+	"SW\tCC,(%0)"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "zip_cc_sto_off"
+	[(set (mem:SI (plus:SI
+			(match_operand:SI 0 "register_operand" "r")
+			(match_operand:SI 1 "const_int_operand" "N")))
+		(unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+	""
+	"SW\tCC,%1(%0)"
+	[(set_attr "ccresult" "unchanged")])
+(define_insn "ldilo"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(unspec:SI [(match_operand:SI 1 "immediate_operand" "")] UNSPEC_LDILO))]
+	""
+	"LDILO	%1,%0"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+
+;
+;
+; Missing still: zip_break(idno)
+; Would also be nice to have a zip_reg builtin, allowing us to read or write
+; a register, as in zip_reg(5)=40;.  Not sure what this means, though, when the
+; number placed into this is not constant, or how to specify that it must *only*
+; be constant.  Thats actually the problem with both proposals, zip_break(id)
+; and zip_reg(regno)--both depend upon a compile time constant to work.
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Trap Instruction
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+; The ZipCPU doesn't really have a "trap" instruction per se.  The goal is that
+; *nothing* should ever trap, and so we should never get here.  However, the
+; compiler seems to want a trap instruction for some reason.  (It keeps us
+; from calling the abort() function, if we don't define these ...)  So let's
+; just grab onto the break instruction and declare it to be a trap instruction
+; for our purposes.  Alternatively, we might've used a syscall, but ... this
+; will work for both user and system instructions.
+;
+(define_insn "trap"
+	[(trap_if (const_int 1) (const_int 0))]
+	""
+	"BREAK"
+	[(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+;
+(define_expand "ctrapsi4"
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 1 "register_operand" "r")
+		(match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+	(trap_if (match_operator 0 "ordered_comparison_operator"
+			[(reg:CC CC_REG) (const_int 0)])
+			(match_operand 3 "const_int_operand" "O"))]
+	""
+	)
+(define_insn "trapif"
+	[(trap_if (match_operator 0 "ordered_comparison_operator"
+			[(reg:CC CC_REG) (const_int 0)])
+			(match_operand 1 "const_int_operand" "O"))]
+	""
+	"BREAK\t%1"
+	[(set_attr "predicable" "no")])
+;
+;
+;
+;
+(include "zip-di.md")
+(include "zip-ops.md")
+(include "zip-float.md")
+(include "zip-sync.md")
+(include "zip-peephole.md")
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Unimplemented (or not yet implemented) RTL Codes
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;(define_insn "addvsi4"
+;	)
+;(define_insn "subvsi4"
+;	)
+;(define_insn "mulvsi4"
+;	)
+;(define_insn "umulvsi4"
+;	)
+;(define_insn "umulvsi4"
+;	)
+;(define_insn "negvsi3"
+;	"MOV	%1,%0
+;	XOR	-1,%0
+;	ADD	1,%0
+;	BV	%2"
+;	)
+;
+;(define_insn "ssum_widen
+;(define_insn "usum_widen
+;(define_insn "udot_prod"
+;(define_insn "maddsidi4"
+;(define_insn "umaddsidi4"
+;(define_insn "msubsidi4"
+;(define_insn "umsubsidi4"
+;
+;
+; STILL MISSING:
+;	SYSCALL(ID)
+;		MOV %ID,R0
+;		CLR	CC
+;	cmove	... the conditional move, created from a
+;	(set (match_op 0 "" "r") (if_then_else (condition) (a) (reg X))))
+;	pattern
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-modes.def gcc-6.2.0-zip/gcc/config/zip/zip-modes.def
--- gcc-6.2.0/gcc/config/zip/zip-modes.def	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-modes.def	2017-01-10 12:46:54.791966242 -0500
@@ -0,0 +1 @@
+#define	BITS_PER_UNIT	8
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-peephole.md gcc-6.2.0-zip/gcc/config/zip/zip-peephole.md
--- gcc-6.2.0/gcc/config/zip/zip-peephole.md	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-peephole.md	2017-03-01 15:46:02.440221158 -0500
@@ -0,0 +1,768 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Filename:	zip-peephole.md
+;;
+;; Project:	Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;; Purpose:	This is a machine description of a variety of peephole
+;;		optimizations which can be applied to the ZipCPU RTL
+;;	representation.
+;;
+;;
+;; Creator:	Dan Gisselquist, Ph.D.
+;;		Gisselquist Technology, LLC
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;;
+;; License:	GPL, v3, as defined and found on www.gnu.org,
+;;		http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Peephole optimizations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+; Match:
+;	CMP	R1,R0
+;	BGTU	lbl
+; Transform to:
+;	CMP	R0,R1
+;	BC	lbl
+;
+(define_peephole2
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand")
+		(match_operand:SI 1 "register_operand")))
+	(set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_operand 2 ""))
+			(pc)))]
+	"(ZIP_PEEPHOLE)"
+	[(set (reg:CC CC_REG) (compare:CC (match_dup 1) (match_dup 0)))
+	(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_dup 2))
+			(pc)))]
+	"")
+(define_peephole2
+	[(match_scratch:SI 3 "=r")
+	(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand")
+		(match_operand 1 "const_int_operand")))
+	(match_dup 3)
+	(set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_operand 2 ""))
+			(pc)))]
+	"(ZIP_PEEPHOLE)"
+	[(set (match_dup 3) (match_dup 1))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 3) (match_dup 0)))
+	(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_dup 2))
+			(pc)))]
+	"")
+;(define_peephole2
+;	[(set (reg:CC CC_REG) (compare:CC
+;		(match_operand:SI 0 "register_operand")
+;		(match_operand 1 "const_int_operand")))
+;	(set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
+;			(label_ref (match_operand 2 ""))
+;			(pc)))]
+;	""
+;	[(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
+;	(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+;			(label_ref (match_dup 2))
+;			(pc)))]
+;	"operands[1] = GEN_INT(INTVAL(operands[1])-1);")
+;
+;
+; Match:
+;	CMP	R1,R0
+;	BGEU	lbl
+; Transform to:
+;	CMP	1(R0),R1
+;	BC	lbl
+;
+(define_peephole2
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand")
+		(match_operand:SI 1 "register_operand")))
+	(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_operand 2 ""))
+			(pc)))]
+	"(ZIP_PEEPHOLE)"
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_dup 1) (plus:SI (match_dup 0) (const_int 1))))
+	(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_dup 2))
+			(pc)))]
+	"")
+;
+;
+; Match:
+;	CMP	R1,R0
+;	BGE	lbl
+; Transform to:
+;	CMP	1(R0),R1
+;	BLT	lbl
+; ... why?  when we support a BGE instruction?
+;(define_peephole2
+	;[(set (reg:CC CC_REG) (compare:CC
+		;(match_operand:SI 0 "register_operand")
+		;(match_operand:SI 1 "register_operand")))
+	;(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+			;(label_ref (match_operand 2 ""))
+			;(pc)))]
+	;"(ZIP_PEEPHOLE)"
+	;[(set (reg:CC CC_REG) (compare:CC (match_dup 1)
+			;(plus:SI (match_dup 0) (const_int 1))))
+	;(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+			;(label_ref (match_dup 2))
+			;(pc)))]
+	;"")
+;
+;
+; Match:
+;	CMP	R1,R0
+;	BLEU	lbl
+; Transform to:
+;	CMP	1(R1),R0
+;	BC	lbl
+;
+(define_peephole2
+	[(set (reg:CC CC_REG) (compare:CC
+		(match_operand:SI 0 "register_operand" "")
+		(match_operand:SI 1 "register_operand" "")))
+	(set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_operand 2 "" ""))
+			(pc)))]
+	"(ZIP_PEEPHOLE)"
+	[(set (reg:CC CC_REG) (compare:CC (match_dup 0)
+			(plus:SI (match_dup 1) (const_int 1))))
+	(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_dup 2))
+			(pc)))]
+	"")
+; 
+;
+;
+; Match:
+;	CMP	R1,R0
+;	BLE	lbl
+; Transform to:
+;	CMP	1(R1),R0
+;	BLT	lbl
+;
+(define_peephole2
+	[(set (reg:CC CC_REG)
+		(compare:CC (match_operand:SI 0 "register_operand" "")
+			(match_operand:SI 1 "const_int_operand" "")))
+	(set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_operand 2 "" ""))
+			(pc)))]
+	"(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))"
+	[(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
+	(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_dup 2))
+			(pc)))]
+	"operands[1] = GEN_INT(INTVAL(operands[1])+1);")
+;
+; Match:
+;	CMP	R1,R0
+;	BLEU	lbl
+; Transform to:
+;	CMP	1(R1),R0
+;	BC(LTU)	lbl
+;
+(define_peephole2
+	[(set (reg:CC CC_REG)
+		(compare:CC (match_operand:SI 0 "register_operand" "")
+			(match_operand:SI 1 "const_int_operand" "")))
+	(set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_operand 2 "" ""))
+			(pc)))]
+	"(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))"
+	[(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
+	(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+			(label_ref (match_dup 2))
+			(pc)))]
+	"operands[1] = GEN_INT(INTVAL(operands[1])+1);")
+;
+;
+;
+;
+; Match:
+;	(parallel [(set () ()) (clobber (CC))])
+;	(compare () ())
+; Transform to:
+;	(parallel [(set () ()) (set (CC) (0))]
+;	(compare () ())
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 ""))
+		(clobber (reg:CC CC_REG))])
+	(set (reg:CC CC_REG) (compare:CC (match_operand:SI 2 "")
+			(match_operand:SI 3 "")))]
+	"(ZIP_PEEPHOLE)&&zip_insn_sets_cc(insn)"
+	[(parallel [(set (match_dup 0) (match_dup 1))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (reg:CC CC_REG) (compare:CC (match_dup 2) (match_dup 3)))]
+	"")
+;
+;
+;
+; Match:
+;	(parallel [(set () ()) (clobber (CC))])
+;	(set () ())
+;	(compare () ())
+; Transform to:
+;	(parallel [(set () ()) (set (CC) (0))]
+;	(set () ())
+;	(compare () ())
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 ""))
+		(clobber (reg:CC CC_REG))])
+	(set (match_operand 2 "") (match_operand 3 ""))
+	(set (reg:CC CC_REG) (compare:CC (match_operand:SI 4 "")
+			(match_operand:SI 5 "")))]
+	"(ZIP_PEEPHOLE)&&(zip_insn_sets_cc(insn))&&((!REG_P(operands[2]))||(REGNO(operands[2])!=CC_REG))"
+	[(parallel [(set (match_dup 0) (match_dup 1))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_dup 2) (match_dup 3))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 4) (match_dup 5)))]
+	"")
+;
+;
+;
+; Match:
+;	MOV A(R1),R3
+;	CMP R3,R0
+;	(R3 is dead)
+; Transform to:
+;	CMP A(R1),R0
+;
+(define_peephole2
+	[(set (match_operand:SI 3 "register_operand")
+		(plus:SI (match_operand:SI 1 "register_operand")
+			(match_operand:SI 2 "zip_mvimm_operand_p")))
+	(set (reg:CC CC_REG)
+		(compare:CC (match_operand:SI 0 "register_operand")
+			(match_dup 3)))]
+	"(ZIP_PEEPHOLE)&&peep2_regno_dead_p(2, REGNO(operands[3]))"
+	[(set (reg:CC CC_REG) (compare:CC (match_dup 0)
+		(plus:SI (match_dup 1) (match_dup 2))))]
+	"")
+;
+;
+; Match:
+;	ALU OpB,R0
+;	CMP 0,R0
+; Transform to:
+;	ALU OpB,R0
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(match_operand:SI 1 ""))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	"(ZIP_PEEPHOLE)"
+	[(parallel [(set (match_dup 0) (match_dup 1))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	])
+;
+;
+; Match:
+;	ALU OpB,R0
+;	MOV R1,R2	// Can be LDI, LOD, STO, etc.
+;	CMP 0,R0
+; Transform to:
+;	ALU OpB,R0
+;	MOV R0,R1
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(match_operand:SI 1 ""))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_operand:SI 2 "nonimmediate_operand") (match_operand:SI 3 ""))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+	"(ZIP_PEEPHOLE)&&((!REG_P(operands[2]))||((REGNO(operands[2])!=REGNO(operands[0]))&&((REGNO(operands[2])>=FIRST_PSEUDO_REGISTER)||(REGNO(operands[2])<CC_REG))))"
+	[(parallel [(set (match_dup 0) (match_dup 1))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_dup 2) (match_dup 3))
+	])
+;
+;
+; Match:
+;	ALU OpB,R0
+;	MOV R0,R1
+;	CMP 0,R1
+; Transform to:
+;	ALU OpB,R0
+;	MOV R0,R1
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(match_operand:SI 1 ""))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_operand:SI 2 "register_operand") (match_dup 0))
+	(set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))]
+	"(ZIP_PEEPHOLE)"
+	[(parallel [(set (match_dup 0) (match_dup 1))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_dup 2) (match_dup 3))
+	])
+;
+;	
+; Match:
+;	MOV R1,R0
+;	ADD $x,R0
+;	(CCREG is dead, and x is within range ...)
+; Transform to:
+;	MOV $x(R1),R0
+(define_peephole2
+	[(set (match_operand:SI 0 "register_operand")
+		(match_operand:SI 1 "register_operand"))
+	(parallel [(set (match_dup 0) (plus:SI (match_dup 0)
+				(match_operand 2 "zip_mvimm_operand_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	]
+	"(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))"
+	[(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
+;
+; Match:
+;	MOV A(R0),R0
+;	ADD $x,R1
+;	(CCREG is dead, and (A+x) is within range ...)
+; Transform to:
+;	MOV $x(R1),R0
+;
+(define_peephole2
+	[(set (match_operand:SI 0 "register_operand")
+		(plus:SI (match_operand:SI 1 "register_operand")
+			(match_operand 2 "zip_mvimm_operand_p")))
+	(parallel [(set (match_dup 0) (plus:SI (match_dup 0)
+				(match_operand 3 "zip_mvimm_operand_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	]
+	"(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))
+		&&(INTVAL(operands[2])+INTVAL(operands[3])<((1<<17)))
+		&&(INTVAL(operands[2])+INTVAL(operands[3])>=-(1<<17))"
+	[(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))]
+	"operands[2]=GEN_INT(INTVAL(operands[2])+INTVAL(operands[3]));")
+;
+;
+;
+; Match:
+;	ADD $x,R0
+;	MOV R0,R1
+;	(CCREG is dead, and R0 is dead)
+; Transform to:
+;	MOV (A+$x)(R0),R1
+; ... again, how do I build this plus?
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(plus:SI (match_dup 0)
+				(match_operand 1 "zip_mvimm_operand_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_operand:SI 2 "register_operand") (match_dup 0))]
+	"(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2, REGNO(operands[0])))&&(peep2_regno_dead_p(2,CC_REG))"
+	[(set (match_dup 2) (plus:SI (match_dup 0) (match_dup 1)))])
+;
+;
+;
+; Match:
+;	ADD $x,R0
+;	MOV A(R0),R1
+;	(CCREG is dead, and R0 is dead)
+; Transform to:
+;	MOV (A+$x)(R0),R1
+;
+(define_peephole2
+	[(parallel [
+		(set (match_operand:SI 0 "register_operand")
+			(plus:SI (match_dup 0)
+				(match_operand 1 "zip_mvimm_operand_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_operand:SI 2 "register_operand")
+		(plus:SI (match_dup 0)
+			(match_operand 3 "zip_mvimm_operand_p")))
+	]
+	"(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))
+		&&(peep2_regno_dead_p(1,REGNO(operands[0])))
+		&&(INTVAL(operands[1])+INTVAL(operands[3])<((1<<17)))
+		&&(INTVAL(operands[1])+INTVAL(operands[3])>=-(1<<17))"
+	[(set (match_dup 0) (plus:SI (match_dup 2) (match_dup 3)))]
+	"operands[3]=GEN_INT(INTVAL(operands[1])+INTVAL(operands[3]));")
+;
+;
+;
+; Match:
+;	ADD	$x,R0
+;	ADD	R0,Rn
+;	(R0 is dead, if R0 is not Rn)
+; Transform to:
+;	ADD	$x(R0),Rn
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(plus:SI (match_dup 0)
+				(match_operand 1 "zip_opb_immv_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(parallel [(set (match_operand:SI 2 "register_operand")
+			(plus:SI (match_dup 2) (match_dup 0)))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))])
+	]
+	"(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[2]))&&(peep2_regno_dead_p(2, REGNO(operands[0])))"
+	[(parallel [(set (match_dup 2)
+			(plus:SI (match_dup 2)
+				(plus:SI (match_dup 0)
+					(match_dup 1))))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))])
+	])
+;
+; Match:
+;	ADD	$x,R0
+;	LOD	-x(R0),R1
+; Transform to:
+;	LOD	(R0),R1
+;	ADD	$x,R0
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(plus:SI (match_dup 0)
+				(match_operand 1 "zip_opb_immv_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (match_operand:SI 3 "register_operand")
+		(mem:SI (plus:SI (match_dup 0)
+			(match_operand 2 "zip_opb_immv_p"))))
+	]
+	"(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+	[(set (match_dup 3) (mem:SI (match_dup 0)))
+	(parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	])
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(plus:SI (match_dup 0)
+				(match_operand 1 "zip_opb_immv_p")))
+		(clobber (reg:CC CC_REG))])
+	(set (match_operand:SI 3 "register_operand")
+		(mem:SI (plus:SI (match_dup 0)
+			(match_operand 2 "zip_opb_immv_p"))))
+	]
+	"(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+	[(set (match_dup 3) (mem:SI (match_dup 0)))
+	(parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	])
+;
+;
+;
+; Match:
+;	ADD	$x,R0
+;	STO	R1,-x(R0)
+; Transform to:
+;	STO	R1,(R0)
+;	ADD	$x,R0
+;
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(plus:SI (match_dup 0)
+				(match_operand 1 "zip_opb_immv_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	(set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p")))
+		(match_operand:SI 3 "register_operand"))
+	]
+	"(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+	[(set (mem:SI (match_dup 0)) (match_dup 3))
+	(parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	])
+(define_peephole2
+	[(parallel [(set (match_operand:SI 0 "register_operand")
+			(plus:SI (match_dup 0)
+				(match_operand 1 "zip_opb_immv_p")))
+		(clobber (reg:CC CC_REG))])
+	(set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p")))
+		(match_operand:SI 3 "register_operand"))
+	]
+	"(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+	[(set (mem:SI (match_dup 0)) (match_dup 3))
+	(parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	])
+;
+;
+; Match:
+;	ADD	$x,R0
+;	ANY	R1,R2 (destination is not R0, source does not reference R0)
+;	ADD	R0,Rn (could be 1 or 2, not 0)
+;	(R0 is dead)
+; Transform to:
+;	ANY	R1,R2
+;	ADD	$x(R0),Rn
+;
+;
+;
+; Match:
+;	MOV	R1,R0
+;	AND	#/R2,R0
+;	(Ry dead ...)
+; Transform to:
+;	TEST	#/Rz,Rx
+;
+(define_peephole2
+	[(set (match_operand:SI 0 "register_operand")
+		(match_operand:SI 1 "register_operand"))
+	(parallel [(set (match_operand:SI 3 "register_operand")
+			(and:SI (match_dup 0)
+				(match_operand:SI 2 "zip_opb_single_operand_p")))
+		(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+	]
+	"((1)||(ZIP_PEEPHOLE))&&(peep2_regno_dead_p(2, REGNO(operands[0])))&&(peep2_regno_dead_p(2, REGNO(operands[3])))"
+	[(set (reg:CC CC_REG) (compare:CC (and:SI (match_dup 1) (match_dup 2))
+			(const_int 0)))])
+;
+;
+; Match:
+;	LB OpB,Rx
+;	AND 255,Rx	(in form of zero_extend)
+; Transform to:
+;	LB OpB,Rx
+;
+;
+(define_peephole2
+	[(set (match_operand:QI 0 "register_operand")
+		(match_operand:QI 1 "memory_operand"))
+	(parallel [(set (match_operand:SI 2 "register_operand")
+			(zero_extend:SI (match_dup 0)))
+		(clobber (reg:CC CC_REG))])]
+	"((1)||(ZIP_PEEPHOLE))"
+	[(parallel [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+		(clobber (reg:CC CC_REG))])])
+;
+;
+;
+; Match:
+;	LW OpB,Rx
+;	AND 65535,Rx
+; Transform to:
+;	LW OpB,Rx
+;
+;
+(define_peephole2
+	[(set (match_operand:HI 0 "register_operand")
+		(match_operand:HI 1 "memory_operand"))
+	(parallel [(set (match_operand:SI 2 "register_operand")
+			(zero_extend:SI (match_dup 0)))
+		(clobber (reg:CC CC_REG))])]
+	"((1)||(ZIP_PEEPHOLE))
+		&&(REG_P(operands[0]))
+		&&(REG_P(operands[2]))
+		&&(REGNO(operands[0])==REGNO(operands[2]))"
+	[(parallel [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+		(clobber (reg:CC CC_REG))])])
+;
+;
+;
+; Match:
+;	LDI 0,Rx
+;	LDI.y #,Rx
+;	Add Rx,Ry
+; Transform to:
+;	Add.y #,Ry
+;
+;
+(define_peephole2
+	[(set (match_operand:SI 0 "register_operand") (const_int 0))
+	(set (match_dup 0)
+		(if_then_else:SI
+			(match_operator 1 "ordered_comparison_operator"
+				[(reg:CC CC_REG) (const_int 0)])
+			(match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+	(parallel [
+		(set (match_operand:SI 3 "register_operand")
+			(plus:SI (match_dup 3) (match_dup 0)))
+		(clobber (reg:CC CC_REG))
+		])]
+	"((1)||(ZIP_PEEPHOLE))
+		&&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+	[(set (match_dup 3)
+		(if_then_else:SI
+			(match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+			(plus:SI (match_dup 3) (match_dup 2))
+			(match_dup 3)))])
+;
+;
+; Match:
+;	LDI	0,Rx
+;	LDI.y	#,Rx
+;	XOR	Rx,Rc
+; Transform to:
+;	XOR.y #,Ry
+;
+;
+(define_peephole2
+	[(set (match_operand:SI 0 "register_operand") (const_int 0))
+	(set (match_dup 0)
+		(if_then_else:SI
+			(match_operator 1 "ordered_comparison_operator"
+				[(reg:CC CC_REG) (const_int 0)])
+			(match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+	(parallel [
+		(set (match_operand:SI 3 "register_operand")
+			(xor:SI (match_dup 3) (match_dup 0)))
+		(clobber (reg:CC CC_REG))
+		])]
+	"((1)||(ZIP_PEEPHOLE))
+		&&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+	[(set (match_dup 3)
+		(if_then_else:SI
+			(match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+			(xor:SI (match_dup 3) (match_dup 2))
+			(match_dup 3)))])
+;
+;
+;
+;
+;
+; Match:
+;	LDI	0,Rx
+;	LDI.y	#,Rx
+;	OR	Rx,Rc
+; Transform to:
+;	OR.y #,Ry
+;
+;
+;
+(define_peephole2
+	[(set (match_operand:SI 0 "register_operand") (const_int 0))
+	(set (match_dup 0)
+		(if_then_else:SI (match_operator 1 "ordered_comparison_operator"
+				[(reg:CC CC_REG) (const_int 0)])
+			(match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+	(parallel [(set (match_operand:SI 3 "register_operand")
+			(ior:SI (match_dup 3) (match_dup 0)))
+		(clobber (reg:CC CC_REG))])]
+	"((1)||(ZIP_PEEPHOLE))
+		&&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+	[(set (match_dup 3)
+		(if_then_else:SI
+			(match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+			(ior:SI (match_dup 3) (match_dup 2))
+			(match_dup 3)))])
+;
+;
+;
+; Match:
+;	AND 255,Rx
+;	SB OpB,Rx
+;	(AND Rx is DEAD)
+; Transform to:
+;	SB OpB,Rx
+;
+;
+;(define_peephole2
+	;[(set (match_operand:SI 2 "register_operand")
+		;(zero_extend:SI (match_operand:SI 0)))
+	;[(set (match_operand:QI 0 "memory_operand")
+		;(match_operand:QI 1 "memory_operand"))
+	;"((1)||(ZIP_PEEPHOLE))"
+	;[(set (match_dup 2) (zero_extend:SI (match_dup 1)))])
+;
+;
+;
+;
+;
+; Match:
+;	(call ...
+;	(set (pc) (label))
+;  or (in asm)
+;	MOV	.Lcallx(PC),R0
+;	BRA	(somewhere)
+; .Lcallx
+;	BRA	(somewhere-else)
+; Transform to:
+;	
+;	(sequence [(call ...
+;		(set (pc) (label))])
+;   or (in asm)
+;	"LDI	(somewhere-else),R0
+;	BRA	subroutine"
+;
+; While the following looks good, it doesnt work.  My guess is that the reason
+; why it doesnt work is that the jump at the end crosses basic block boundaries.
+;
+;(define_insn "void_call_mem_unspec"
+;	[(call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p" ""))] UNSPEC_RAW_CALL)
+;			(match_operand 1 "const_int_operand" "n"))
+;		(clobber (reg:SI RTN_REG))
+;		(clobber (reg:CC CC_REG))]
+;	""
+;	"BRA\t%0,PC"
+;	[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;(define_peephole2
+;	[(parallel [(call (mem:SI (match_operand:VOID 0 "zip_const_address_operand_p"))
+;			(match_operand 1 "const_int_operand"))
+;		(clobber (reg:SI RTN_REG))
+;		(clobber (reg:CC CC_REG))])
+;	; The match operand for the (set (pc) ...) cannot have anything but
+;	; VOIDmode, or it wont match.
+;	(set (pc) (match_operand:VOID 2 "zip_const_address_operand_p"))]
+;	""
+;	[(set (reg:SI RTN_REG) (match_dup 2))
+;	(call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p"))] UNSPEC_RAW_CALL)
+;			(match_operand 1 "const_int_operand"))
+;		(use (reg:SI RTN_REG))
+;		(clobber (reg:SI RTN_REG))
+;		(clobber (reg:CC CC_REG))]
+;	"fprintf(stderr, \"CALL-JUMP Matched\");")
+;
+;
+;
+; So, the following *should* have worked as well.  However, this falls apart
+; because the 'final' routine can't tell if we are calling a subroutine in this
+; function or not.
+;
+;(define_peephole
+	;[(parallel [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p"))
+			;(match_operand 1 "const_int_operand"))
+		;(clobber (reg:SI RTN_REG))
+		;(clobber (reg:CC CC_REG))])
+	;(set (pc) (label_ref (match_operand 2 "")))]
+	;""
+	;"LDI\t%2,R0\;BRA\t%0"
+	;[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+; and for
+;	BRA target
+;	BRA target ; two branches to the same identical target in a row ...
+;
+;
+;
+; STILL MISSING:
+;
+;
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-protos.h gcc-6.2.0-zip/gcc/config/zip/zip-protos.h
--- gcc-6.2.0/gcc/config/zip/zip-protos.h	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-protos.h	2017-02-17 16:45:53.264117439 -0500
@@ -0,0 +1,82 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename: 	zip-protos.h
+//
+// Project:	Zip CPU backend for the GNU Compiler Collection
+//
+// Purpose:	
+//
+// Creator:	Dan Gisselquist, Ph.D.
+//		Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2016, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License:	GPL, v3, as defined and found on www.gnu.org,
+//		http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+#ifndef	ZIP_PROTOS_H
+#define	ZIP_PROTOS_H
+
+extern	int	zip_supported_condition(int c);
+extern	void	zip_expand_prologue(void);
+extern	void	zip_expand_epilogue(void);
+extern	void	zip_sibcall_epilogue(void);
+extern	int	zip_expand_movdi(rtx,rtx);
+extern	int	zip_expand_movsicc(rtx,rtx,rtx,rtx);
+extern	int	zip_initial_elimination_offset(int, int);
+extern	void	zip_print_operand(FILE *, rtx, int);
+extern	void	zip_print_operand_address(FILE *, rtx);
+extern	enum	reg_class	zip_reg_class(int);
+extern	rtx	zip_return_addr_rtx(int, rtx);
+extern	int	zip_num_arg_regs(enum machine_mode, tree);
+
+extern	void	zip_asm_output_def(FILE *s, const char *n, const char *v);
+
+extern	void	zip_canonicalize_comparison(int *, rtx *, rtx *, bool);
+extern	bool	zip_function_ok_for_sibcall(tree, tree);
+extern	int	zip_address_operand(rtx op);
+extern	int	zip_const_address_operand(rtx op);
+extern	int	zip_use_return_insn(void);
+extern const char *zip_set_zero_or_one(rtx, rtx);
+extern const char *zip_movsicc(rtx, rtx, rtx, rtx);
+
+extern	int	zip_insn_sets_cc(rtx_insn *insn);
+extern	int	zip_is_conditional(rtx_insn *insn);
+extern	int	zip_ct_address_operand(rtx op);
+extern	int	zip_pd_opb_operand(rtx op);
+extern	int	zip_pd_mov_operand(rtx op);
+extern	int	zip_pd_imm_operand(rtx op);
+extern	int	zip_pd_mvimm_operand(rtx op);
+extern	int	zip_ct_const_address_operand(rtx op);
+extern	int	zip_pd_const_address_operand(rtx op);
+extern	const char *zip_movsicc(rtx, rtx, rtx, rtx);
+extern	const char *zip_addqics(rtx, rtx, rtx, rtx);
+extern	const char *zip_cbranchdi(rtx, rtx, rtx, rtx);
+
+extern	void	zip_ifcvt_machdep_init(struct ce_if_block *ceinfo);
+extern	void	zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo);
+extern	void	zip_ifcvt_modify_final(struct ce_if_block *ceinfo);
+extern	void	zip_ifcvt_modify_tests(struct ce_if_block *ceinfo, rtx *true_expr, rtx *false_expr);
+extern	void	zip_ifcvt_modify_insn(struct ce_if_block *ceinfo, rtx pattern, rtx_insn *insn);
+
+#endif
+
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-sync.md gcc-6.2.0-zip/gcc/config/zip/zip-sync.md
--- gcc-6.2.0/gcc/config/zip/zip-sync.md	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-sync.md	2017-02-22 18:03:26.740198685 -0500
@@ -0,0 +1,415 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Filename:	zip-sync.md
+;;
+;; Project:	Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;; Purpose:	This is that portion of the machine description of the Zip CPU
+;;		which is focused on atomic operations.
+;;
+;;
+;; Creator:	Dan Gisselquist, Ph.D.
+;;		Gisselquist Technology, LLC
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;;
+;; License:	GPL, v3, as defined and found on www.gnu.org,
+;;		http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Atomic access Op-codes
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+; First, the basic atomic_ operators, add, sub, ior, and, and xor
+;
+(define_insn "atomic_addsi"
+	[(set (match_operand:SI 0 "memory_operand" "+Q")
+		(plus:SI (match_dup 0)
+			(match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+	(match_operand 2 "const_int_operand" "")	; Memory model used
+	(clobber (match_scratch:SI 3 "=r"))	; Scratch register
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %0,%3\n\tADD %1,%3\n\tSW %3,%0"
+	[(set_attr "predicable" "no")])
+(define_insn "atomic_subsi"
+	[(set (match_operand:SI 0 "memory_operand" "+Q")
+		(minus:SI (match_dup 0)
+			(match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+	(match_operand 2 "const_int_operand" "")	; Memory model used
+	(clobber (match_scratch:SI 3 "=r"))	; Scratch register
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %0,%3\n\tSUB %1,%3\n\tSW %3,%0"
+	[(set_attr "predicable" "no")])
+(define_insn "atomic_iorsi"
+	[(set (match_operand:SI 0 "memory_operand" "+Q")
+		(ior:SI (match_dup 0)
+			(match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+	(match_operand 2 "const_int_operand" "")	; Memory model used
+	(clobber (match_scratch:SI 3 "=r"))	; Scratch register
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %0,%3\n\tOR %1,%3\n\tSW %3,%0"
+	[(set_attr "predicable" "no")])
+(define_expand "atomic_andsi"
+	[(match_operand:SI 0 "memory_operand" "+Q")
+	(match_operand:SI 1 "zip_opb_single_operand_p" "rO")
+	(match_operand 2 "" "")			; Memory model used
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_andsi(operands[0], operands[1]));
+		DONE;
+	})
+(define_insn "reissue_atomic_andsi"
+	[(set (match_operand:SI 0 "memory_operand" "+Q")
+		(and:SI (match_dup 0)
+			(match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+	(clobber (match_scratch:SI 2 "=r"))	; Scratch register
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %0,%2\n\tAND %1,%2\n\tSW %2,%0"
+	[(set_attr "predicable" "no")])
+(define_expand "atomic_xorsi"
+	[(match_operand:SI 0 "memory_operand" "+Q")
+	(match_operand:SI 1 "zip_opb_single_operand_p" "rO")
+	(match_operand 2 "" "")			; Memory model used
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_xorsi(operands[0], operands[1]));
+		DONE;
+	})
+(define_insn "reissue_atomic_xorsi"
+	[(set (match_operand:SI 0 "memory_operand" "+Q")
+		(xor:SI (match_dup 0)
+			(match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+	(clobber (match_scratch:SI 2 "=r"))	; Scratch register
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %0,%2\n\tXOR %1,%2\n\tSW %2,%0"
+	[(set_attr "predicable" "no")])
+;
+;
+; Given how the ZipCPU is put together, all SI LODs and STOs are atomic.  DI
+; loads and stores need the LOCK command, to keep from breaking within them.
+; Further, GCC expects that anything <= the word size can use a normal load
+; or store instruction.  Hence we don't need anything but the DI load and
+; stores.
+;
+(define_insn "atomic_loaddi"
+	[(set (match_operand:DI 0 "register_operand" "=r")
+		(match_operand:DI 1 "memory_operand" "Q"))
+	(match_operand 2 "const_int_operand" "")]
+	"(ZIP_HAS_DI)&&(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%H0\t; Atomic Load:DI\n\tLW 4+%1,%L0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+(define_insn "atomic_storedi"
+	[(set (match_operand:DI 0 "memory_operand" "=Q")
+		(match_operand:DI 1 "register_operand" "r"))
+	(match_operand 2 "const_int_operand" "")]
+	"(ZIP_HAS_DI)&&(ZIP_ATOMIC)"
+	"LOCK\n\tSW %H1,%0\t; Atomic Store:DI\n\tSW %L1,4+%0"
+	[(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+(define_insn "atomic_exchangesi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+			(match_operand:SI 1 "memory_operand" "+Q"))
+		(set (match_dup 1) (match_operand:SI 2 "register_operand" "r"))
+	(match_operand 3 "const_int_operand" "")]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tSW %2,%1"
+	[(set_attr "predicable" "no")])
+;
+;
+;
+; Here's another set of the atomic operators, this time those that leave their
+; result in operand zero.
+;
+(define_expand "atomic_add_fetchsi"
+	[(match_operand:SI 0 "register_operand" "=r")
+	(match_operand:SI 1 "memory_operand" "+Q")
+	(match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+	(match_operand 3 "const_int_operand" "")	;// Memory model used
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_add_fetchsi(operands[0],
+			operands[1], operands[2]));
+		DONE;
+	})
+(define_insn "reissue_atomic_add_fetchsi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(plus:SI (match_operand:SI 1 "memory_operand" "+Q")
+			(match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+	(set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tADD %2,%0\n\tSW %0,%1"
+	[(set_attr "predicable" "no")])
+(define_expand "atomic_sub_fetchsi"
+	[(match_operand:SI 0 "register_operand" "=r")
+	(match_operand:SI 1 "memory_operand" "+Q")
+	(match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+	(match_operand 3 "const_int_operand" "")
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_sub_fetchsi(
+			operands[0], operands[1], operands[2]));
+		DONE;
+	})
+(define_insn "reissue_atomic_sub_fetchsi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(minus:SI (match_operand:SI 1 "memory_operand" "+Q")
+			(match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+	(set (match_dup 1) (minus:SI (match_dup 1) (match_dup 2)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tSUB %2,%0\n\tSW %0,%1"
+	[(set_attr "predicable" "no")])
+(define_expand "atomic_or_fetchsi"
+	[(match_operand:SI 0 "register_operand" "=r")
+	(match_operand:SI 1 "memory_operand" "+Q")
+	(match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+	(match_operand 3 "const_int_operand" "")
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_or_fetchsi(
+			operands[0], operands[1], operands[2]));
+		DONE;
+	})
+(define_insn "reissue_atomic_or_fetchsi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(ior:SI (match_operand:SI 1 "memory_operand" "+Q")
+			(match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+	(set (match_dup 1) (ior:SI (match_dup 1) (match_dup 2)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tOR %2,%0\n\tSW %0,%1"
+	[(set_attr "predicable" "no")])
+(define_expand "atomic_and_fetchsi"
+	[(match_operand:SI 0 "register_operand" "=r")
+	(match_operand:SI 1 "memory_operand" "+Q")
+	(match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+	(match_operand 3 "const_int_operand" "")
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_and_fetchsi(
+			operands[0], operands[1], operands[2]));
+		DONE;
+	})
+(define_insn "reissue_atomic_and_fetchsi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(and:SI (match_operand:SI 1 "memory_operand" "+Q")
+			(match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+	(set (match_dup 1) (and:SI (match_dup 1) (match_dup 2)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tAND %2,%0\n\tSW %0,%1"
+	[(set_attr "predicable" "no")])
+(define_expand "atomic_xor_fetchsi"
+	[(match_operand:SI 0 "register_operand" "=r")
+	(match_operand:SI 1 "memory_operand" "+Q")
+	(match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+	(match_operand 3 "const_int_operand" "")			;// Memory model
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_xor_fetchsi(
+			operands[0], operands[1], operands[2]));
+		DONE;
+	})
+(define_insn "reissue_atomic_xor_fetchsi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(xor:SI (match_operand:SI 1 "memory_operand" "+Q")
+			(match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+	(set (match_dup 1) (xor:SI (match_dup 1) (match_dup 2)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tXOR %2,%0\n\tSW %0,%1"
+	[(set_attr "predicable" "no")])
+;
+;
+;
+;
+(define_expand "atomic_fetch_addsi"
+	[(set (match_operand:SI 1 "memory_operand" "+Q")
+		(plus:SI (match_dup 1)
+			(match_operand:SI 2 "register_operand" "=r")))
+	(set (match_operand:SI 0 "register_operand" "=r")
+		(match_dup 1))
+	(set (match_dup 2) (plus:SI (match_dup 1) (match_dup 2)))
+	(match_operand 3 "const_int_operand" "")				; Memory model used
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_fetch_addsi(
+			operands[0], operands[1], operands[2]));
+		DONE;
+	})
+(define_insn "reissue_atomic_fetch_addsi"
+	[(set (match_operand:SI 1 "memory_operand" "+Q")
+		(plus:SI (match_dup 1)
+			(match_operand:SI 2 "register_operand" "=r")))
+	(set (match_operand:SI 0 "register_operand" "=r")
+		(match_dup 1))
+	(set (match_dup 2) (plus:SI (match_dup 1) (match_dup 2)))
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tADD %0,%2\n\tSW %2,%1"
+	[(set_attr "predicable" "no")])
+(define_expand "atomic_fetch_subsi"
+	[(match_operand:SI 0 "register_operand" "=r")
+	(match_operand:SI 1 "memory_operand" "+Q")
+	(match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_fetch_subsi(
+			operands[0], operands[1], operands[2]));
+		DONE;
+	})
+(define_insn "reissue_atomic_fetch_subsi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(minus:SI (match_operand:SI 1 "memory_operand" "+Q")
+			(match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+	(set (match_dup 1) (minus:SI (match_dup 1) (match_dup 2)))
+	(clobber (match_scratch:SI 3 "=r"))	; Scratch register
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"NEG %2,%3\n\tLOCK\n\tLW %1,%0\n\tADD %0,%3\n\tSW %3,%1"
+	[(set_attr "predicable" "no")])
+(define_insn "atomic_fetch_orsi"
+	[(set (match_operand:SI 1 "memory_operand" "+Q")
+		(ior:SI (match_dup 1)
+			(match_operand:SI 2 "register_operand" "=r")))
+	(set (match_operand:SI 0 "register_operand" "=r")
+		(match_dup 1))
+	(set (match_dup 2) (ior:SI (match_dup 1) (match_dup 2)))
+	(match_operand 3 "const_int_operand" "")	; Memory model used
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tOR %0,%2\n\tSW %2,%1"
+	[(set_attr "predicable" "no")])
+(define_insn "atomic_fetch_andsi"
+	[(set (match_operand:SI 1 "memory_operand" "+Q")
+		(and:SI (match_dup 1)
+			(match_operand:SI 2 "register_operand" "=r")))
+	(set (match_operand:SI 0 "register_operand" "=r")
+		(match_dup 1))
+	(set (match_dup 2) (and:SI (match_dup 1) (match_dup 2)))
+	(match_operand 3 "const_int_operand" "")	; Memory model used
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tAND %0,%2\n\tSW %2,%1"
+	[(set_attr "predicable" "no")])
+(define_insn "atomic_fetch_xorsi"
+	[(set (match_operand:SI 1 "memory_operand" "+Q")
+		(xor:SI (match_dup 1)
+			(match_operand:SI 2 "register_operand" "=r")))
+	(set (match_operand:SI 0 "register_operand" "=r")
+		(match_dup 1))
+	(set (match_dup 2) (xor:SI (match_dup 1) (match_dup 2)))
+	(match_operand 3 "const_int_operand" "")	; Memory model used
+	(clobber (reg:CC CC_REG))]
+	"(ZIP_ATOMIC)"
+	"LOCK\n\tLW %1,%0\n\tXOR %0,%2\n\tSW %2,%1"
+	[(set_attr "predicable" "no")])
+;
+;
+;
+;
+(define_insn "atomic_test_and_set"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(zero_extend:SI
+			(match_operand:QI 1 "memory_operand" "+Q")))
+	(set (match_dup 1) (const_int 1))
+	(match_operand 2 "const_int_operand" "")	; Memory model used
+	(clobber (match_scratch:SI 3 "=r"))]	; Scratch register
+	"(ZIP_ATOMIC)"
+	"LDI	1,%3
+	LOCK
+	LB	%1,%0
+	SB	%3,%1"
+	[(set_attr "predicable" "no")])
+;
+;
+;
+(define_expand "atomic_compare_and_swapsi"
+	[(match_operand:SI 0 "register_operand" "=r")	;; bool output
+	(match_operand:SI 1 "register_operand" "=r")	;; val output
+	(match_operand:SI 2 "memory_operand" "+Q")	;; memory
+	(match_operand:SI 3 "zip_opb_single_operand_p" "rO") ;; Expected
+	(match_operand:SI 4 "register_operand" "r")	;; Desired
+	(match_operand 5 "const_int_operand" "")	; is_weak
+	(match_operand 6 "const_int_operand" "")	; mem model on success
+	(match_operand 7 "const_int_operand" "")	; mem model on failure
+	]
+	"(ZIP_ATOMIC)"
+	{
+		emit_insn(gen_reissue_atomic_compare_and_swapsi(
+			operands[0], operands[1],
+			operands[2], operands[3],
+			operands[4]));
+		DONE;
+	})
+
+(define_insn "reissue_atomic_compare_and_swapsi"
+	[(set (match_operand:SI 0 "register_operand" "=r")
+		(if_then_else
+			(eq (match_operand:SI 2 "memory_operand" "+Q")
+				(match_operand:SI 3 "zip_opb_single_operand_p" "rO"))
+			(const_int 1)
+			(const_int 0)))
+	(set (match_operand:SI 1 "register_operand" "=r") (match_dup 2))
+	(set (match_dup 2) (if_then_else
+			(eq (match_dup 2) (match_dup 3))
+				(match_operand:SI 4 "register_operand" "r")
+				(match_dup 0)))]
+	"(ZIP_ATOMIC)"
+	"CLR %0
+	LOCK
+	LW %2,%1
+	CMP %3,%1
+	SW %4,%1
+	LDI.Z 1,%0"
+	[(set_attr "predicable" "no")])
+;
+;
+;
+;
+; STILL MISSING:
+;
+;	deprecated sync_* atomic functions
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config.gcc gcc-6.2.0-zip/gcc/config.gcc
--- gcc-6.2.0/gcc/config.gcc	2016-06-08 09:34:25.000000000 -0400
+++ gcc-6.2.0-zip/gcc/config.gcc	2016-12-31 16:41:06.258602919 -0500
@@ -493,6 +493,10 @@
 tilepro*-*-*)
 	cpu_type=tilepro
 	;;
+zip*)
+	cpu_type=zip
+	tmake_file=zip/t-zip
+	;;
 esac
 
 tm_file=${cpu_type}/${cpu_type}.h
@@ -3042,6 +3046,11 @@
 	c_target_objs="m32c-pragma.o"
 	cxx_target_objs="m32c-pragma.o"
  	;;
+zip*)
+	target_has_targetm_common=yes
+	tm_file="elfos.h newlib-stdint.h ${tm_file}"
+	tmake_file="${tmake_file} zip/t-zip"
+	;;
 *)
 	echo "*** Configuration ${target} not supported" 1>&2
 	exit 1
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cse.c gcc-6.2.0-zip/gcc/cse.c
--- gcc-6.2.0/gcc/cse.c	2016-02-04 04:56:13.000000000 -0500
+++ gcc-6.2.0-zip/gcc/cse.c	2017-02-06 21:46:10.525049918 -0500
@@ -42,6 +42,16 @@
 #include "dbgcnt.h"
 #include "rtl-iter.h"
 
+
+// #define	DO_ZIP_DEBUGS
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 #ifndef LOAD_EXTEND_OP
 #define LOAD_EXTEND_OP(M) UNKNOWN
 #endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/defaults.h gcc-6.2.0-zip/gcc/defaults.h
--- gcc-6.2.0/gcc/defaults.h	2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/defaults.h	2016-12-31 16:42:29.094087738 -0500
@@ -495,6 +495,8 @@
 #define LOG2_BITS_PER_UNIT 3
 #elif BITS_PER_UNIT == 16
 #define LOG2_BITS_PER_UNIT 4
+#elif BITS_PER_UNIT == 32
+#define LOG2_BITS_PER_UNIT 5
 #else
 #error Unknown BITS_PER_UNIT
 #endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/df-scan.c gcc-6.2.0-zip/gcc/df-scan.c
--- gcc-6.2.0/gcc/df-scan.c	2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/df-scan.c	2016-12-31 16:43:04.557867015 -0500
@@ -35,6 +35,14 @@
 #include "dumpfile.h"
 
 
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 /* The set of hard registers in eliminables[i].from. */
 
 static HARD_REG_SET elim_reg_set;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/emit-rtl.c gcc-6.2.0-zip/gcc/emit-rtl.c
--- gcc-6.2.0/gcc/emit-rtl.c	2016-02-16 18:12:19.000000000 -0500
+++ gcc-6.2.0-zip/gcc/emit-rtl.c	2016-12-31 16:43:35.065677060 -0500
@@ -59,6 +59,15 @@
 #include "stor-layout.h"
 #include "opts.h"
 
+
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 struct target_rtl default_target_rtl;
 #if SWITCHABLE_TARGET
 struct target_rtl *this_target_rtl = &default_target_rtl;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/final.c gcc-6.2.0-zip/gcc/final.c
--- gcc-6.2.0/gcc/final.c	2016-01-22 11:44:10.000000000 -0500
+++ gcc-6.2.0-zip/gcc/final.c	2017-02-06 15:35:15.410085646 -0500
@@ -79,6 +79,16 @@
 #include "rtl-iter.h"
 #include "print-rtl.h"
 
+
+// #define	DO_ZIP_DEBUGS
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 #ifdef XCOFF_DEBUGGING_INFO
 #include "xcoffout.h"		/* Needed for external data declarations.  */
 #endif
@@ -2944,6 +2954,8 @@
 
 	current_output_insn = debug_insn = insn;
 
+ZIP_DEBUG_LINE("FINAL-INSN", insn);
+
 	/* Find the proper template for this insn.  */
 	templ = get_insn_template (insn_code_number, insn);
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/fold-const.c gcc-6.2.0-zip/gcc/fold-const.c
--- gcc-6.2.0/gcc/fold-const.c	2016-07-08 16:20:23.000000000 -0400
+++ gcc-6.2.0-zip/gcc/fold-const.c	2016-12-31 16:47:49.000093249 -0500
@@ -1247,7 +1247,7 @@
 	    wide_int w2 = arg2;
 	    f2.data.high = w2.elt (1);
 	    f2.data.low = w2.elt (0);
-	    f2.mode = SImode;
+	    f2.mode = word_mode;
 	  }
 	  break;
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/jump.c gcc-6.2.0-zip/gcc/jump.c
--- gcc-6.2.0/gcc/jump.c	2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/jump.c	2016-12-31 16:49:10.675582836 -0500
@@ -50,6 +50,15 @@
 #include "cfgrtl.h"
 #include "rtl-iter.h"
 
+
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 /* Optimize jump y; x: ... y: jumpif... x?
    Don't know if it is worth bothering with.  */
 /* Optimize two cases of conditional jump to conditional jump?
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/loop-doloop.c gcc-6.2.0-zip/gcc/loop-doloop.c
--- gcc-6.2.0/gcc/loop-doloop.c	2016-01-14 18:12:53.000000000 -0500
+++ gcc-6.2.0-zip/gcc/loop-doloop.c	2016-12-31 16:50:27.099104820 -0500
@@ -37,6 +37,15 @@
 #include "regs.h"
 #include "df.h"
 
+
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 /* This module is used to modify loops with a determinable number of
    iterations to use special low-overhead looping instructions.
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/loop-iv.c gcc-6.2.0-zip/gcc/loop-iv.c
--- gcc-6.2.0/gcc/loop-iv.c	2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/loop-iv.c	2016-12-31 16:52:42.034259845 -0500
@@ -1715,8 +1715,8 @@
   if (op0 != XEXP (cond, 0)
       || op1 != XEXP (cond, 1)
       || code != GET_CODE (cond)
-      || GET_MODE (cond) != SImode)
-    cond = gen_rtx_fmt_ee (code, SImode, op0, op1);
+      || GET_MODE (cond) != word_mode)
+    cond = gen_rtx_fmt_ee (code, word_mode, op0, op1);
 
   return cond;
 }
@@ -2083,9 +2083,9 @@
   rtx mmin, mmax, cond_over, cond_under;
 
   get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax);
-  cond_under = simplify_gen_relational (LT, SImode, iv->extend_mode,
+  cond_under = simplify_gen_relational (LT, word_mode, iv->extend_mode,
 					iv->base, mmin);
-  cond_over = simplify_gen_relational (GT, SImode, iv->extend_mode,
+  cond_over = simplify_gen_relational (GT, word_mode, iv->extend_mode,
 				       iv->base, mmax);
 
   switch (cond)
@@ -2464,7 +2464,7 @@
 	if (iv0.step == const0_rtx)
 	  {
 	    tmp = lowpart_subreg (mode, iv0.base, comp_mode);
-	    assumption = simplify_gen_relational (EQ, SImode, mode, tmp,
+	    assumption = simplify_gen_relational (EQ, word_mode, mode, tmp,
 						  mode_mmax);
 	    if (assumption == const_true_rtx)
 	      goto zero_iter_simplify;
@@ -2474,7 +2474,7 @@
 	else
 	  {
 	    tmp = lowpart_subreg (mode, iv1.base, comp_mode);
-	    assumption = simplify_gen_relational (EQ, SImode, mode, tmp,
+	    assumption = simplify_gen_relational (EQ, word_mode, mode, tmp,
 						  mode_mmin);
 	    if (assumption == const_true_rtx)
 	      goto zero_iter_simplify;
@@ -2561,10 +2561,10 @@
 	      bound = simplify_gen_binary (MINUS, comp_mode, bound, delta);
 	      bound = lowpart_subreg (mode, bound, comp_mode);
 	      tmp = lowpart_subreg (mode, iv0.base, comp_mode);
-	      may_xform = simplify_gen_relational (cond, SImode, mode,
+	      may_xform = simplify_gen_relational (cond, word_mode, mode,
 						   bound, tmp);
 	      may_not_xform = simplify_gen_relational (reverse_condition (cond),
-						       SImode, mode,
+						       word_mode, mode,
 						       bound, tmp);
 	    }
 	  else
@@ -2573,10 +2573,10 @@
 	      bound = simplify_gen_binary (PLUS, comp_mode, bound, delta);
 	      bound = lowpart_subreg (mode, bound, comp_mode);
 	      tmp = lowpart_subreg (mode, iv1.base, comp_mode);
-	      may_xform = simplify_gen_relational (cond, SImode, mode,
+	      may_xform = simplify_gen_relational (cond, word_mode, mode,
 						   tmp, bound);
 	      may_not_xform = simplify_gen_relational (reverse_condition (cond),
-						       SImode, mode,
+						       word_mode, mode,
 						       tmp, bound);
 	    }
 	}
@@ -2629,7 +2629,7 @@
 	  tmp0 = lowpart_subreg (mode, iv0.base, comp_mode);
 	  tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
 	  assumption = simplify_gen_relational (reverse_condition (cond),
-						SImode, mode, tmp0, tmp1);
+						word_mode, mode, tmp0, tmp1);
 	  if (assumption == const_true_rtx)
 	    goto zero_iter_simplify;
 	  else if (assumption != const0_rtx)
@@ -2671,7 +2671,7 @@
 
       tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
       tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode));
-      assumption = simplify_gen_relational (NE, SImode, mode, tmp, const0_rtx);
+      assumption = simplify_gen_relational (NE, word_mode, mode, tmp, const0_rtx);
       desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite);
 
       tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode));
@@ -2703,19 +2703,19 @@
 	      /* If s is power of 2, we know that the loop is infinite if
 		 a % s <= b % s and b + s overflows.  */
 	      assumption = simplify_gen_relational (reverse_condition (cond),
-						    SImode, mode,
+						    word_mode, mode,
 						    tmp1, bound);
 
 	      t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step);
 	      t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step);
-	      tmp = simplify_gen_relational (cond, SImode, mode, t0, t1);
-	      assumption = simplify_gen_binary (AND, SImode, assumption, tmp);
+	      tmp = simplify_gen_relational (cond, word_mode, mode, t0, t1);
+	      assumption = simplify_gen_binary (AND, word_mode, assumption, tmp);
 	      desc->infinite =
 		      alloc_EXPR_LIST (0, assumption, desc->infinite);
 	    }
 	  else
 	    {
-	      assumption = simplify_gen_relational (cond, SImode, mode,
+	      assumption = simplify_gen_relational (cond, word_mode, mode,
 						    tmp1, bound);
 	      desc->assumptions =
 		      alloc_EXPR_LIST (0, assumption, desc->assumptions);
@@ -2724,7 +2724,7 @@
 	  tmp = simplify_gen_binary (PLUS, comp_mode, iv1.base, iv0.step);
 	  tmp = lowpart_subreg (mode, tmp, comp_mode);
 	  assumption = simplify_gen_relational (reverse_condition (cond),
-						SImode, mode, tmp0, tmp);
+						word_mode, mode, tmp0, tmp);
 
 	  delta = simplify_gen_binary (PLUS, mode, tmp1, step);
 	  delta = simplify_gen_binary (MINUS, mode, delta, tmp0);
@@ -2747,19 +2747,19 @@
 	      /* If s is power of 2, we know that the loop is infinite if
 		 a % s <= b % s and a - s overflows.  */
 	      assumption = simplify_gen_relational (reverse_condition (cond),
-						    SImode, mode,
+						    word_mode, mode,
 						    bound, tmp0);
 
 	      t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step);
 	      t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step);
-	      tmp = simplify_gen_relational (cond, SImode, mode, t0, t1);
-	      assumption = simplify_gen_binary (AND, SImode, assumption, tmp);
+	      tmp = simplify_gen_relational (cond, word_mode, mode, t0, t1);
+	      assumption = simplify_gen_binary (AND, word_mode, assumption, tmp);
 	      desc->infinite =
 		      alloc_EXPR_LIST (0, assumption, desc->infinite);
 	    }
 	  else
 	    {
-	      assumption = simplify_gen_relational (cond, SImode, mode,
+	      assumption = simplify_gen_relational (cond, word_mode, mode,
 						    bound, tmp0);
 	      desc->assumptions =
 		      alloc_EXPR_LIST (0, assumption, desc->assumptions);
@@ -2768,7 +2768,7 @@
 	  tmp = simplify_gen_binary (PLUS, comp_mode, iv0.base, iv1.step);
 	  tmp = lowpart_subreg (mode, tmp, comp_mode);
 	  assumption = simplify_gen_relational (reverse_condition (cond),
-						SImode, mode,
+						word_mode, mode,
 						tmp, tmp1);
 	  delta = simplify_gen_binary (MINUS, mode, tmp0, step);
 	  delta = simplify_gen_binary (MINUS, mode, tmp1, delta);
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/optabs.c gcc-6.2.0-zip/gcc/optabs.c
--- gcc-6.2.0/gcc/optabs.c	2016-02-16 10:15:40.000000000 -0500
+++ gcc-6.2.0-zip/gcc/optabs.c	2017-03-01 15:46:15.660221429 -0500
@@ -43,6 +43,17 @@
 #include "optabs-tree.h"
 #include "libfuncs.h"
 
+
+// #define	DO_ZIP_DEBUGS
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+extern	void	zip_debug_rtx_pfx(const char *,const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
 				   machine_mode *);
 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
@@ -6985,6 +6996,15 @@
 maybe_gen_insn (enum insn_code icode, unsigned int nops,
 		struct expand_operand *ops)
 {
+#ifdef	DO_ZIP_DEBUGS
+fprintf(stderr, "ICODE = %d\n", icode);
+fprintf(stderr, "NOPS  = %d\n", nops);
+for(int i=0; i<nops; i++) {
+	char	str[10];
+	sprintf(str, "Op %d: ", i);
+	zip_debug_rtx_pfx(str, ops[i].value);
+}
+#endif
   gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
   if (!maybe_legitimize_operands (icode, 0, nops, ops))
     return NULL;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/recog.c gcc-6.2.0-zip/gcc/recog.c
--- gcc-6.2.0/gcc/recog.c	2016-01-29 13:47:17.000000000 -0500
+++ gcc-6.2.0-zip/gcc/recog.c	2017-02-06 15:47:48.493946049 -0500
@@ -40,6 +40,16 @@
 #include "reload.h"
 #include "tree-pass.h"
 
+
+// #define	DO_ZIP_DEBUGS
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 #ifndef STACK_POP_CODE
 #if STACK_GROWS_DOWNWARD
 #define STACK_POP_CODE POST_INC
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/reload1.c gcc-6.2.0-zip/gcc/reload1.c
--- gcc-6.2.0/gcc/reload1.c	2016-03-18 04:25:57.000000000 -0400
+++ gcc-6.2.0-zip/gcc/reload1.c	2017-02-06 15:54:21.067740343 -0500
@@ -42,6 +42,15 @@
 #include "dumpfile.h"
 #include "rtl-iter.h"
 
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
+
 /* This file contains the reload pass of the compiler, which is
    run after register allocation has been done.  It checks that
    each insn is valid (operands required to be in registers really
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/reload.c gcc-6.2.0-zip/gcc/reload.c
--- gcc-6.2.0/gcc/reload.c	2016-02-13 20:37:40.000000000 -0500
+++ gcc-6.2.0-zip/gcc/reload.c	2017-03-01 15:46:26.784221658 -0500
@@ -106,6 +106,15 @@
 #include "addresses.h"
 #include "params.h"
 
+// #define	DO_ZIP_DEBUGS
+#ifdef	DO_ZIP_DEBUGS
+#include <stdio.h>
+#define	ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern	void	zip_debug_rtx(const_rtx);
+#else
+#define	ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
 /* True if X is a constant that can be forced into the constant pool.
    MODE is the mode of the operand, or VOIDmode if not known.  */
 #define CONST_POOL_OK_P(MODE, X)		\
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/testsuite/lib/target-supports.exp gcc-6.2.0-zip/gcc/testsuite/lib/target-supports.exp
--- gcc-6.2.0/gcc/testsuite/lib/target-supports.exp	2016-07-05 13:54:02.000000000 -0400
+++ gcc-6.2.0-zip/gcc/testsuite/lib/target-supports.exp	2016-12-31 16:59:52.719091392 -0500
@@ -545,6 +545,11 @@
 	return 0
     }
 
+    # ZipCPU doesnt support profiling (yet).
+    if { [istarget zip*] } {
+	return 0
+    }
+
     # cygwin does not support -p.
     if { [istarget *-*-cygwin*] && $test_what == "-p" } {
 	return 0
@@ -1090,6 +1095,12 @@
 	}]
     }
 
+    # No real hardware FPU support for ZipCPU yet --- even though the
+    # instruction set supports it, the CPU just isn't ready (yet).
+    if { [istarget zip*] } {
+        return 0
+    }
+
     # This proc is actually checking the availabilty of FPU
     # support for doubles, so on the RX we must fail if the
     # 64-bit double multilib has been selected.
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/tree-ssa-math-opts.c gcc-6.2.0-zip/gcc/tree-ssa-math-opts.c
--- gcc-6.2.0/gcc/tree-ssa-math-opts.c	2016-04-04 11:42:19.000000000 -0400
+++ gcc-6.2.0-zip/gcc/tree-ssa-math-opts.c	2016-12-31 17:02:24.405602214 -0500
@@ -983,7 +983,7 @@
     {
       if (val & 1)
 	{
-	  digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
+	  digit = val & ((1l << POWI_WINDOW_SIZE) - 1);
 	  result += powi_lookup_cost (digit, cache)
 		    + POWI_WINDOW_SIZE + 1;
 	  val >>= POWI_WINDOW_SIZE;
@@ -1023,7 +1023,7 @@
     }
   else if (n & 1)
     {
-      digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
+      digit = n & ((1l << POWI_WINDOW_SIZE) - 1);
       op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
       op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
     }
@@ -1957,7 +1957,7 @@
 };
 
 #define BITS_PER_MARKER 8
-#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
+#define MARKER_MASK ((1l << BITS_PER_MARKER) - 1)
 #define MARKER_BYTE_UNKNOWN MARKER_MASK
 #define HEAD_MARKER(n, size) \
   ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
@@ -1993,7 +1993,7 @@
   /* Zero out the extra bits of N in order to avoid them being shifted
      into the significant bits.  */
   if (size < 64 / BITS_PER_MARKER)
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
 
   switch (code)
     {
@@ -2020,7 +2020,7 @@
     }
   /* Zero unused bits for size.  */
   if (size < 64 / BITS_PER_MARKER)
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
   return true;
 }
 
@@ -2067,7 +2067,7 @@
   n->n = CMPNOP;
 
   if (size < 64 / BITS_PER_MARKER)
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
 
   return true;
 }
@@ -2372,7 +2372,7 @@
 	      {
 		/* If STMT casts to a smaller type mask out the bits not
 		   belonging to the target type.  */
-		n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
+		n->n &= ((uint64_t) 1l << (type_size * BITS_PER_MARKER)) - 1;
 	      }
 	    n->type = type;
 	    if (!n->base_addr)
@@ -2489,7 +2489,7 @@
     {
       uint64_t mask;
 
-      mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
+      mask = ((uint64_t) 1l << (n->range * BITS_PER_MARKER)) - 1;
       cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
       cmpnop &= mask;
     }
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/varasm.c gcc-6.2.0-zip/gcc/varasm.c
--- gcc-6.2.0/gcc/varasm.c	2016-03-31 11:30:33.000000000 -0400
+++ gcc-6.2.0-zip/gcc/varasm.c	2016-12-31 17:03:08.629193673 -0500
@@ -2771,7 +2771,7 @@
 
   /* Put out the first word with the specified alignment.  */
   if (reverse)
-    elt = flip_storage_order (SImode, gen_int_mode (data[nelts - 1], SImode));
+    elt = flip_storage_order (word_mode, gen_int_mode (data[nelts - 1], SImode));
   else
     elt = GEN_INT (data[0]);
   assemble_integer (elt, MIN (nunits, units_per), align, 1);
@@ -2783,7 +2783,7 @@
   for (int i = 1; i < nelts; i++)
     {
       if (reverse)
-	elt = flip_storage_order (SImode,
+	elt = flip_storage_order (word_mode,
 				  gen_int_mode (data[nelts - 1 - i], SImode));
       else
 	elt = GEN_INT (data[i]);
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/config/zip/sfp-machine.h gcc-6.2.0-zip/libgcc/config/zip/sfp-machine.h
--- gcc-6.2.0/libgcc/config/zip/sfp-machine.h	1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/libgcc/config/zip/sfp-machine.h	2017-01-27 12:27:05.094921759 -0500
@@ -0,0 +1,53 @@
+#ifndef	ZIP_SFP_MACHINE_H
+#define	ZIP_SFP_MACHINE_H
+
+#define	__BIG_ENDIAN	4321
+#define	__BYTE_ORDER	__BIG_ENDIAN
+
+#define	_FP_W_TYPE_SIZE	32
+#define	_FP_W_TYPE	unsigned int
+#define	_FP_WS_TYPE	signed int
+#define	_FP_I_TYPE	long
+
+#define	_FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define	_FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+
+#define	_FP_DIV_MEAT_S(R,X,Y)	_FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define	_FP_DIV_MEAT_D(R,X,Y)	_FP_DIV_MEAT_2_udiv(D,R,X,Y)
+
+#define	_FP_NANFRAC_S	((_FP_QNANBIT_S<<1)-1)
+#define	_FP_NANFRAC_D	((_FP_QNANBIT_D<<1)-1), -1
+
+#define	_FP_QNANNEGATEDP	0
+#define	_FP_NANSIGN_S		0
+#define	_FP_NANSIGN_D		0
+#define	_FP_KEEPNANFRACP	1
+
+/* Someone please check this.  --- copied from one of many other places  */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)                      \
+  do {                                                          \
+    if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)          \
+        && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs))     \
+      {                                                         \
+        R##_s = Y##_s;                                          \
+        _FP_FRAC_COPY_##wc(R,Y);                                \
+      }                                                         \
+    else                                                        \
+      {                                                         \
+        R##_s = X##_s;                                          \
+        _FP_FRAC_COPY_##wc(R,X);                                \
+      }                                                         \
+    R##_c = FP_CLS_NAN;                                         \
+  } while (0)
+
+/* Not checked.  */
+#define _FP_TININESS_AFTER_ROUNDING 0
+
+#ifndef	__BYTE_ORDER
+#define	__BYTE_ORDER	__BIG_ENDIAN
+#endif
+
+#define	strong_alias(name, aliasname) _stong_alias(name, aliasname)
+#define	_strong_alias(name, aliasname)	\
+	extern __typeof(name) aliasname __attribute__ ((alias (#name)));
+#endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/config.host gcc-6.2.0-zip/libgcc/config.host
--- gcc-6.2.0/libgcc/config.host	2016-05-17 02:15:52.000000000 -0400
+++ gcc-6.2.0-zip/libgcc/config.host	2017-01-27 12:28:31.470410459 -0500
@@ -197,6 +197,9 @@
 tic6x-*-*)
 	cpu_type=c6x
 	;;
+zip*)
+	cpu_type=zip
+	;;
 esac
 
 # Common parts for widely ported systems.
@@ -1328,6 +1331,10 @@
 	tmake_file="$tmake_file nvptx/t-nvptx"
 	extra_parts="crt0.o"
 	;;
+zip*)
+	tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl t-softfp"
+	# extra_parts="crt0.o"
+	;;
 *)
 	echo "*** Configuration ${host} not supported" 1>&2
 	exit 1
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/configure gcc-6.2.0-zip/libgcc/configure
--- gcc-6.2.0/libgcc/configure	2016-04-11 15:45:35.000000000 -0400
+++ gcc-6.2.0-zip/libgcc/configure	2017-01-27 15:53:43.141531475 -0500
@@ -3805,13 +3805,13 @@
   CFLAGS=$ac_save_CFLAGS
 elif test $ac_cv_prog_cc_g = yes; then
   if test "$GCC" = yes; then
-    CFLAGS="-g -O2"
+    CFLAGS="-O3"
   else
     CFLAGS="-g"
   fi
 else
   if test "$GCC" = yes; then
-    CFLAGS="-O2"
+    CFLAGS="-O3"
   else
     CFLAGS=
   fi
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/Makefile.in gcc-6.2.0-zip/libgcc/Makefile.in
--- gcc-6.2.0/libgcc/Makefile.in	2016-02-25 07:23:52.000000000 -0500
+++ gcc-6.2.0-zip/libgcc/Makefile.in	2017-01-27 15:54:32.241240828 -0500
@@ -229,8 +229,8 @@
 
 # Options to use when compiling libgcc2.a.
 #
-LIBGCC2_DEBUG_CFLAGS = -g
-LIBGCC2_CFLAGS = -O2 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
+LIBGCC2_DEBUG_CFLAGS =
+LIBGCC2_CFLAGS = -O3 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
 		 $(LIBGCC2_DEBUG_CFLAGS) -DIN_LIBGCC2 \
 		 -fbuilding-libgcc -fno-stack-protector \
 		 $(INHIBIT_LIBC_CFLAGS)
@@ -284,7 +284,7 @@
 		  $(INCLUDES) @set_have_cc_tls@ @set_use_emutls@
 
 # Options to use when compiling crtbegin/end.
-CRTSTUFF_CFLAGS = -O2 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
+CRTSTUFF_CFLAGS = -O3 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
   $(NO_PIE_CFLAGS) -finhibit-size-directive -fno-inline -fno-exceptions \
   -fno-zero-initialized-in-bss -fno-toplevel-reorder -fno-tree-vectorize \
   -fbuilding-libgcc -fno-stack-protector $(FORCE_EXPLICIT_EH_REGISTRY) \
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgomp/configure.tgt gcc-6.2.0-zip/libgomp/configure.tgt
--- gcc-6.2.0/libgomp/configure.tgt	2015-09-03 12:20:35.000000000 -0400
+++ gcc-6.2.0-zip/libgomp/configure.tgt	2016-12-31 17:06:26.795473062 -0500
@@ -161,6 +161,9 @@
 	    config_path="rtems posix"
 	fi
 	;;
+  zip*)
+	config_path="bsd posix"
+	;;
 
   *)
 	;;
 

Go to most recent revision | Compare with Previous | Blame | View Log

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.