OpenCores
URL https://opencores.org/ocsvn/zipcpu/zipcpu/trunk

Subversion Repositories zipcpu

[/] [zipcpu/] [trunk/] [sw/] [gcc-zippatch.patch] - Diff between revs 202 and 209

Only display areas with differences | Details | Blame | View Log

Rev 202 Rev 209
diff -Naur '--exclude=*.swp' gcc-6.2.0/config.sub gcc-6.2.0-zip/config.sub
diff -Naur '--exclude=*.swp' gcc-6.2.0/config.sub gcc-6.2.0-zip/config.sub
--- gcc-6.2.0/config.sub        2015-12-31 16:13:28.000000000 -0500
--- gcc-6.2.0/config.sub        2015-12-31 16:13:28.000000000 -0500
+++ gcc-6.2.0-zip/config.sub    2017-01-11 11:07:21.116065311 -0500
+++ gcc-6.2.0-zip/config.sub    2017-01-11 11:07:21.116065311 -0500
@@ -355,6 +355,14 @@
@@ -355,6 +355,14 @@
        xscaleel)
        xscaleel)
                basic_machine=armel-unknown
                basic_machine=armel-unknown
                ;;
                ;;
+       zip-*-linux*)
+       zip-*-linux*)
+               basic_machine=zip
+               basic_machine=zip
+               os=-linux
+               os=-linux
+               ;;
+               ;;
+       zip*)
+       zip*)
+               basic_machine=zip-unknown
+               basic_machine=zip-unknown
+               os=-none
+               os=-none
+               ;;
+               ;;
 
 
        # We use `pc' rather than `unknown'
        # We use `pc' rather than `unknown'
        # because (1) that's what they normally are, and
        # because (1) that's what they normally are, and
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure gcc-6.2.0-zip/configure
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure gcc-6.2.0-zip/configure
--- gcc-6.2.0/configure 2016-03-17 18:54:19.000000000 -0400
--- gcc-6.2.0/configure 2016-03-17 18:54:19.000000000 -0400
+++ gcc-6.2.0-zip/configure     2017-02-06 21:54:22.244807700 -0500
+++ gcc-6.2.0-zip/configure     2017-02-06 21:54:22.244807700 -0500
@@ -3548,6 +3548,44 @@
@@ -3548,6 +3548,44 @@
   ft32-*-*)
   ft32-*-*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
     ;;
+  zip*)
+  zip*)
+    noconfigdirs="$noconfigdirs ${libgcj}"
+    noconfigdirs="$noconfigdirs ${libgcj}"
+    noconfigdirs="$noconfigdirs target-boehm-gc"
+    noconfigdirs="$noconfigdirs target-boehm-gc"
+    noconfigdirs="$noconfigdirs target-libgfortran"
+    noconfigdirs="$noconfigdirs target-libgfortran"
+    # noconfigdirs="$noconfigdirs target-libsanitizer"
+    # noconfigdirs="$noconfigdirs target-libsanitizer"
+    # noconfigdirs="$noconfigdirs target-libada"
+    # noconfigdirs="$noconfigdirs target-libada"
+    # noconfigdirs="$noconfigdirs target-libatomic"
+    # noconfigdirs="$noconfigdirs target-libatomic"
+    # noconfigdirs="$noconfigdirs target-libcilkrts"
+    # noconfigdirs="$noconfigdirs target-libcilkrts"
+    # noconfigdirs="$noconfigdirs target-libitm"
+    # noconfigdirs="$noconfigdirs target-libitm"
+    # noconfigdirs="$noconfigdirs target-libquadmath"
+    # noconfigdirs="$noconfigdirs target-libquadmath"
+    # noconfigdirs="$noconfigdirs target-libstdc++-v3"
+    # noconfigdirs="$noconfigdirs target-libstdc++-v3"
+    # noconfigdirs="$noconfigdirs target-libssp"
+    # noconfigdirs="$noconfigdirs target-libssp"
+    # noconfigdirs="$noconfigdirs target-libgo"
+    # noconfigdirs="$noconfigdirs target-libgo"
+    # noconfigdirs="$noconfigdirs target-libgomp"
+    # noconfigdirs="$noconfigdirs target-libgomp"
+    # noconfigdirs="$noconfigdirs target-libvtv"
+    # noconfigdirs="$noconfigdirs target-libvtv"
+    # noconfigdirs="$noconfigdirs target-libobjc"
+    # noconfigdirs="$noconfigdirs target-libobjc"
+       # target-libgcc
+       # target-libgcc
+       #       target-liboffloadmic
+       #       target-liboffloadmic
+       #       target-libmpx   # Only gets enabled by request
+       #       target-libmpx   # Only gets enabled by request
+       #       target-libbacktrace
+       #       target-libbacktrace
+       #       ${libgcj}
+       #       ${libgcj}
+       #       target-boehm-gc
+       #       target-boehm-gc
+       #       target-libada
+       #       target-libada
+       #       target-libatomic
+       #       target-libatomic
+       #       target-libcilkrts
+       #       target-libcilkrts
+       #       target-libgfortran
+       #       target-libgfortran
+       #       target-libgo
+       #       target-libgo
+       #       target-libgomp
+       #       target-libgomp
+       #       target-libitm
+       #       target-libitm
+       #       target-libobjc
+       #       target-libobjc
+       #       target-libquadmath
+       #       target-libquadmath
+       #       target-libsanitizer
+       #       target-libsanitizer
+       #       target-libstdc++-v3
+       #       target-libstdc++-v3
+       #       target-libssp
+       #       target-libssp
+       #       target-libvtv
+       #       target-libvtv
+       # target-libgloss
+       # target-libgloss
+       # target-newlib
+       # target-newlib
+    ;;
+    ;;
   *-*-lynxos*)
   *-*-lynxos*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
     ;;
@@ -3575,6 +3613,9 @@
@@ -3575,6 +3613,9 @@
     *-*-aix*)
     *-*-aix*)
        noconfigdirs="$noconfigdirs target-libgo"
        noconfigdirs="$noconfigdirs target-libgo"
        ;;
        ;;
+    zip*)
+    zip*)
+       noconfigdirs="$noconfigdirs target-libgo"
+       noconfigdirs="$noconfigdirs target-libgo"
+       ;;
+       ;;
     esac
     esac
 fi
 fi
 
 
@@ -3971,6 +4012,9 @@
@@ -3971,6 +4012,9 @@
   vax-*-*)
   vax-*-*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
     ;;
+  zip*)
+  zip*)
+    noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof"
+    noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof"
+    ;;
+    ;;
 esac
 esac
 
 
 # If we aren't building newlib, then don't build libgloss, since libgloss
 # If we aren't building newlib, then don't build libgloss, since libgloss
@@ -6785,16 +6829,16 @@
@@ -6785,16 +6829,16 @@
 # CFLAGS_FOR_TARGET and CXXFLAGS_FOR_TARGET.
 # CFLAGS_FOR_TARGET and CXXFLAGS_FOR_TARGET.
 if test "x$CFLAGS_FOR_TARGET" = x; then
 if test "x$CFLAGS_FOR_TARGET" = x; then
   if test "x${is_cross_compiler}" = xyes; then
   if test "x${is_cross_compiler}" = xyes; then
-    CFLAGS_FOR_TARGET="-g -O2"
-    CFLAGS_FOR_TARGET="-g -O2"
+    CFLAGS_FOR_TARGET="-O3"
+    CFLAGS_FOR_TARGET="-O3"
   else
   else
     CFLAGS_FOR_TARGET=$CFLAGS
     CFLAGS_FOR_TARGET=$CFLAGS
     case " $CFLAGS " in
     case " $CFLAGS " in
-      *" -O2 "*) ;;
-      *" -O2 "*) ;;
-      *) CFLAGS_FOR_TARGET="-O2 $CFLAGS_FOR_TARGET" ;;
-      *) CFLAGS_FOR_TARGET="-O2 $CFLAGS_FOR_TARGET" ;;
+      *" -O3 "*) ;;
+      *" -O3 "*) ;;
+      *) CFLAGS_FOR_TARGET="-O3 $CFLAGS_FOR_TARGET" ;;
+      *) CFLAGS_FOR_TARGET="-O3 $CFLAGS_FOR_TARGET" ;;
     esac
     esac
     case " $CFLAGS " in
     case " $CFLAGS " in
       *" -g "* | *" -g3 "*) ;;
       *" -g "* | *" -g3 "*) ;;
-      *) CFLAGS_FOR_TARGET="-g $CFLAGS_FOR_TARGET" ;;
-      *) CFLAGS_FOR_TARGET="-g $CFLAGS_FOR_TARGET" ;;
+      *) CFLAGS_FOR_TARGET="$CFLAGS_FOR_TARGET" ;;
+      *) CFLAGS_FOR_TARGET="$CFLAGS_FOR_TARGET" ;;
     esac
     esac
   fi
   fi
 fi
 fi
@@ -6802,16 +6846,16 @@
@@ -6802,16 +6846,16 @@
 
 
 if test "x$CXXFLAGS_FOR_TARGET" = x; then
 if test "x$CXXFLAGS_FOR_TARGET" = x; then
   if test "x${is_cross_compiler}" = xyes; then
   if test "x${is_cross_compiler}" = xyes; then
-    CXXFLAGS_FOR_TARGET="-g -O2"
-    CXXFLAGS_FOR_TARGET="-g -O2"
+    CXXFLAGS_FOR_TARGET="-O3"
+    CXXFLAGS_FOR_TARGET="-O3"
   else
   else
     CXXFLAGS_FOR_TARGET=$CXXFLAGS
     CXXFLAGS_FOR_TARGET=$CXXFLAGS
     case " $CXXFLAGS " in
     case " $CXXFLAGS " in
-      *" -O2 "*) ;;
-      *" -O2 "*) ;;
-      *) CXXFLAGS_FOR_TARGET="-O2 $CXXFLAGS_FOR_TARGET" ;;
-      *) CXXFLAGS_FOR_TARGET="-O2 $CXXFLAGS_FOR_TARGET" ;;
+      *" -O3 "*) ;;
+      *" -O3 "*) ;;
+      *) CXXFLAGS_FOR_TARGET="-O3 $CXXFLAGS_FOR_TARGET" ;;
+      *) CXXFLAGS_FOR_TARGET="-O3 $CXXFLAGS_FOR_TARGET" ;;
     esac
     esac
     case " $CXXFLAGS " in
     case " $CXXFLAGS " in
       *" -g "* | *" -g3 "*) ;;
       *" -g "* | *" -g3 "*) ;;
-      *) CXXFLAGS_FOR_TARGET="-g $CXXFLAGS_FOR_TARGET" ;;
-      *) CXXFLAGS_FOR_TARGET="-g $CXXFLAGS_FOR_TARGET" ;;
+      *) CXXFLAGS_FOR_TARGET="$CXXFLAGS_FOR_TARGET" ;;
+      *) CXXFLAGS_FOR_TARGET="$CXXFLAGS_FOR_TARGET" ;;
     esac
     esac
   fi
   fi
 fi
 fi
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure.ac gcc-6.2.0-zip/configure.ac
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure.ac gcc-6.2.0-zip/configure.ac
--- gcc-6.2.0/configure.ac      2016-03-17 18:54:19.000000000 -0400
--- gcc-6.2.0/configure.ac      2016-03-17 18:54:19.000000000 -0400
+++ gcc-6.2.0-zip/configure.ac  2017-01-10 12:43:23.819301273 -0500
+++ gcc-6.2.0-zip/configure.ac  2017-01-10 12:43:23.819301273 -0500
@@ -884,6 +884,9 @@
@@ -884,6 +884,9 @@
   ft32-*-*)
   ft32-*-*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
     ;;
+  zip*)
+  zip*)
+    noconfigdirs="$noconfigdirs ${libgcj}"
+    noconfigdirs="$noconfigdirs ${libgcj}"
+    ;;
+    ;;
   *-*-lynxos*)
   *-*-lynxos*)
     noconfigdirs="$noconfigdirs ${libgcj}"
     noconfigdirs="$noconfigdirs ${libgcj}"
     ;;
     ;;
@@ -911,6 +914,9 @@
@@ -911,6 +914,9 @@
     *-*-aix*)
     *-*-aix*)
        noconfigdirs="$noconfigdirs target-libgo"
        noconfigdirs="$noconfigdirs target-libgo"
        ;;
        ;;
+    zip*)
+    zip*)
+       noconfigdirs="$noconfigdirs target-libgo"
+       noconfigdirs="$noconfigdirs target-libgo"
+       ;;
+       ;;
     esac
     esac
 fi
 fi
 
 
@@ -1307,6 +1313,10 @@
@@ -1307,6 +1313,10 @@
   vax-*-*)
   vax-*-*)
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     noconfigdirs="$noconfigdirs target-newlib target-libgloss"
     ;;
     ;;
+  zip*)
+  zip*)
+    noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof ${libgcj}"
+    noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof ${libgcj}"
+    unsupported_languages="$unsupported_languages fortran"
+    unsupported_languages="$unsupported_languages fortran"
+    ;;
+    ;;
 esac
 esac
 
 
 # If we aren't building newlib, then don't build libgloss, since libgloss
 # If we aren't building newlib, then don't build libgloss, since libgloss
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cfgexpand.c gcc-6.2.0-zip/gcc/cfgexpand.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cfgexpand.c gcc-6.2.0-zip/gcc/cfgexpand.c
--- gcc-6.2.0/gcc/cfgexpand.c   2016-04-27 08:23:50.000000000 -0400
--- gcc-6.2.0/gcc/cfgexpand.c   2016-04-27 08:23:50.000000000 -0400
+++ gcc-6.2.0-zip/gcc/cfgexpand.c       2016-12-31 16:38:36.195534819 -0500
+++ gcc-6.2.0-zip/gcc/cfgexpand.c       2018-06-05 21:18:19.438798488 -0400
@@ -74,6 +74,15 @@
@@ -74,6 +74,16 @@
 #include "tree-chkp.h"
 #include "tree-chkp.h"
 #include "rtl-chkp.h"
 #include "rtl-chkp.h"
 
 
+
+
 
+// #define     DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 /* Some systems use __main in a way incompatible with its use in gcc, in these
 /* Some systems use __main in a way incompatible with its use in gcc, in these
    cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
    cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
    give the same symbol without quotes for an alternative entry point.  You
    give the same symbol without quotes for an alternative entry point.  You
@@ -1172,7 +1181,7 @@
@@ -1172,7 +1182,7 @@
                base_align = crtl->max_used_stack_slot_alignment;
                base_align = crtl->max_used_stack_slot_alignment;
              else
              else
                base_align = MAX (crtl->max_used_stack_slot_alignment,
                base_align = MAX (crtl->max_used_stack_slot_alignment,
-                                 GET_MODE_ALIGNMENT (SImode)
-                                 GET_MODE_ALIGNMENT (SImode)
+                                 GET_MODE_ALIGNMENT (word_mode)
+                                 GET_MODE_ALIGNMENT (word_mode)
                                  << ASAN_SHADOW_SHIFT);
                                  << ASAN_SHADOW_SHIFT);
            }
            }
          else
          else
@@ -2225,7 +2234,7 @@
@@ -2225,7 +2235,7 @@
          data.asan_vec.safe_push (offset);
          data.asan_vec.safe_push (offset);
          /* Leave space for alignment if STRICT_ALIGNMENT.  */
          /* Leave space for alignment if STRICT_ALIGNMENT.  */
          if (STRICT_ALIGNMENT)
          if (STRICT_ALIGNMENT)
-           alloc_stack_frame_space ((GET_MODE_ALIGNMENT (SImode)
-           alloc_stack_frame_space ((GET_MODE_ALIGNMENT (SImode)
+           alloc_stack_frame_space ((GET_MODE_ALIGNMENT (word_mode)
+           alloc_stack_frame_space ((GET_MODE_ALIGNMENT (word_mode)
                                      << ASAN_SHADOW_SHIFT)
                                      << ASAN_SHADOW_SHIFT)
                                     / BITS_PER_UNIT, 1);
                                     / BITS_PER_UNIT, 1);
 
 
@@ -5745,7 +5754,7 @@
@@ -5745,7 +5755,7 @@
       && (last = get_last_insn ())
       && (last = get_last_insn ())
       && JUMP_P (last))
       && JUMP_P (last))
     {
     {
-      rtx dummy = gen_reg_rtx (SImode);
-      rtx dummy = gen_reg_rtx (SImode);
+      rtx dummy = gen_reg_rtx (word_mode);
+      rtx dummy = gen_reg_rtx (word_mode);
       emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
       emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
     }
     }
 
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cgraphbuild.c gcc-6.2.0-zip/gcc/cgraphbuild.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cgraphbuild.c gcc-6.2.0-zip/gcc/cgraphbuild.c
--- gcc-6.2.0/gcc/cgraphbuild.c 2016-01-04 09:30:50.000000000 -0500
--- gcc-6.2.0/gcc/cgraphbuild.c 2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/cgraphbuild.c     2016-12-31 16:39:44.963107994 -0500
+++ gcc-6.2.0-zip/gcc/cgraphbuild.c     2016-12-31 16:39:44.963107994 -0500
@@ -32,6 +32,15 @@
@@ -32,6 +32,15 @@
 #include "ipa-utils.h"
 #include "ipa-utils.h"
 #include "except.h"
 #include "except.h"
 
 
+
+
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 /* Context of record_reference.  */
 /* Context of record_reference.  */
 struct record_reference_ctx
 struct record_reference_ctx
 {
 {
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/combine.c gcc-6.2.0-zip/gcc/combine.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/combine.c gcc-6.2.0-zip/gcc/combine.c
--- gcc-6.2.0/gcc/combine.c     2016-08-08 06:06:15.000000000 -0400
--- gcc-6.2.0/gcc/combine.c     2016-08-08 06:06:15.000000000 -0400
+++ gcc-6.2.0-zip/gcc/combine.c 2017-02-03 09:25:19.676720321 -0500
+++ gcc-6.2.0-zip/gcc/combine.c 2017-02-03 09:25:19.676720321 -0500
@@ -103,6 +103,15 @@
@@ -103,6 +103,15 @@
 #include "rtl-iter.h"
 #include "rtl-iter.h"
 #include "print-rtl.h"
 #include "print-rtl.h"
 
 
+#define        DO_ZIP_DEBUGS
+#define        DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 #ifndef LOAD_EXTEND_OP
 #ifndef LOAD_EXTEND_OP
 #define LOAD_EXTEND_OP(M) UNKNOWN
 #define LOAD_EXTEND_OP(M) UNKNOWN
 #endif
 #endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/common/config/zip/zip-common.c gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/common/config/zip/zip-common.c gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c
--- gcc-6.2.0/gcc/common/config/zip/zip-common.c        1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/common/config/zip/zip-common.c        1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c    2017-01-11 09:41:34.483106099 -0500
+++ gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c    2017-01-11 09:41:34.483106099 -0500
@@ -0,0 +1,52 @@
@@ -0,0 +1,52 @@
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Filename:   common/config/zip/zip-common.c
+// Filename:   common/config/zip/zip-common.c
+//
+//
+// Project:    Zip CPU backend for the GNU Compiler Collection
+// Project:    Zip CPU backend for the GNU Compiler Collection
+//
+//
+// Purpose:    To eliminate the frame register automatically.
+// Purpose:    To eliminate the frame register automatically.
+//
+//
+// Creator:    Dan Gisselquist, Ph.D.
+// Creator:    Dan Gisselquist, Ph.D.
+//             Gisselquist Technology, LLC
+//             Gisselquist Technology, LLC
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+//
+// This program is free software (firmware): you can redistribute it and/or
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+// your option) any later version.
+//
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+// for more details.
+//
+//
+// You should have received a copy of the GNU General Public License along
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+// <http://www.gnu.org/licenses/> for a copy.
+//
+//
+// License:    GPL, v3, as defined and found on www.gnu.org,
+// License:    GPL, v3, as defined and found on www.gnu.org,
+//             http://www.gnu.org/licenses/gpl.html
+//             http://www.gnu.org/licenses/gpl.html
+//
+//
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+#include "config.h"
+#include "config.h"
+#include "system.h"
+#include "system.h"
+#include "coretypes.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tm.h"
+#include "common/common-target.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+#include "common/common-target-def.h"
+
+
+static const struct default_options zip_option_optimization_table[] =
+static const struct default_options zip_option_optimization_table[] =
+  {
+  {
+    { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+    { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+    { OPT_LEVELS_NONE, 0, NULL, 0 }
+    { OPT_LEVELS_NONE, 0, NULL, 0 }
+  };
+  };
+
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define        TARGET_OPTION_OPTIMIZATION_TABLE        zip_option_optimization_table
+#define        TARGET_OPTION_OPTIMIZATION_TABLE        zip_option_optimization_table
+
+
+struct gcc_targetm_common      targetm_common = TARGETM_COMMON_INITIALIZER;
+struct gcc_targetm_common      targetm_common = TARGETM_COMMON_INITIALIZER;
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/genzipops gcc-6.2.0-zip/gcc/config/zip/genzipops
 
--- gcc-6.2.0/gcc/config/zip/genzipops  1969-12-31 19:00:00.000000000 -0500
 
+++ gcc-6.2.0-zip/gcc/config/zip/genzipops      2018-03-22 18:33:09.823726748 -0400
 
@@ -0,0 +1,201 @@
 
+ELF>@@€N@8       @@@@@@øø88@8@@@ô9ô9 >>`>`˜À (>(>`(>`ÐÐTT@T@DDPåtdÄ6Ä6@Ä6@œœQåtdRåtd>>`>`ðð/lib64/ld-linux-x86-64.so.2GNU GNUÁ•F3m›ö+•Z\Lè=à–öŽ<9ò‹4B;#tm^† fWPIÀ@`libc.so.6exitsprintffopenputs__stack_chk_failunlinkstrlenfclosestderrfwriterenamefprintfaccessstrcmp__libc_start_main__gmon_start__GLIBC_2.4GLIBC_2.2.5ii
•ui      Ÿø?`        À@`@` @`(@`0@`8@`@@`H@`P@`X@`
 
+`@`h@`p@`
x@`€@`HƒìH‹9 H…ÀtèHƒÄÃÿ59 ÿ%9 @ÿ%9 héàÿÿÿÿ%ú8 héÐÿÿÿÿ%ò8 héÀÿÿÿÿ%ê8 hé°ÿÿÿÿ%â8 hé ÿÿÿÿ%Ú8 héÿÿÿÿ%Ò8 hé€ÿÿÿÿ%Ê8 hépÿÿÿÿ%Â8 hé`ÿÿÿÿ%º8 h     éPÿÿÿÿ%²8 h
 
+é@ÿÿÿÿ%ª8 hé0ÿÿÿÿ%¢8 hé ÿÿÿÿ%š8 h
éÿÿÿÿ%8 f1íI‰Ñ^H‰âHƒäðPTIÇÀð@HÇÁ€@HÇÇ>@è7ÿÿÿôfD¸¯@`UH-¨@`HƒøH‰åv¸H…Àt]¿¨@`ÿàf„]Ã@f.„¾¨@`UHî¨@`HÁþH‰åH‰ðHÁè?HÆHÑþt¸H…Àt]¿¨@`ÿà]ÃfD€=8 uUH‰åènÿÿÿ]Æþ7 óÃ@¿ >`Hƒ?u듸H…ÀtñUH‰åÿÐ]ézÿÿÿUH‰åHƒìH‰}øH‹EøH‰Áº¸¾¿@èÃþÿÿÉÃUH‰åHƒìH‰}øH‰uðH‹UðH‹Eø¾Á @H‰Ç¸è6þÿÿÉÃUH‰åHƒì0H‰}øH‰uðH‰UèH‰MàL‰EØL‰MÐH‹}èH‹uÐH‹MÐH‹UðH‹EøHƒìÿuÿuØÿuàI‰ùI‰ð¾Ð @H‰Ç¸èÚýÿÿHƒÄ ÉÃUH‰åHìÐH‰½hÿÿÿH‰µ`ÿÿÿH‰•XÿÿÿH‰PÿÿÿL‰…HÿÿÿL‰@ÿÿÿH‹EH‰…8ÿÿÿdH‹%(H‰Eø1ÀH‹•8ÿÿÿH…pÿÿÿ¾³!@H‰Ç¸è¤ýÿÿH‹•8ÿÿÿH‹…hÿÿÿ¾È!@H‰Ç¸èDýÿÿH‹½8ÿÿÿH‹µPÿÿÿH‹XÿÿÿH‹•`ÿÿÿH‹…hÿÿÿHƒìÿµ`ÿÿÿI‰ùI‰ð¾ð!@H‰Ç¸èÿüÿÿHƒÄH‹½8ÿÿÿH‹µPÿÿÿH‹@ÿÿÿH‹•`ÿÿÿH‹…hÿÿÿHƒìÿµ`ÿÿÿI‰ùI‰ð¾€"@H‰Ç¸è¶üÿÿHƒÄH‹½8ÿÿÿH‹PÿÿÿH‹•@ÿÿÿH‹µ`ÿÿÿH‹…hÿÿÿHƒìh?#@A¹<#@I‰øH‰ÇèBþÿÿHƒÄH‹½8ÿÿÿH‹PÿÿÿH‹•@ÿÿÿH‹µ`ÿÿÿH‹…hÿÿÿHƒìhD#@A¹A#@I‰øH‰ÇèþÿÿHƒÄH‹½8ÿÿÿH‹PÿÿÿH‹•@ÿÿÿH‹µ`ÿÿÿH‹…hÿÿÿHƒìhJ#@A¹G#@I‰øH‰ÇèÀýÿÿHƒÄH‹½8ÿÿÿH‹PÿÿÿH‹•@ÿÿÿH‹µ`ÿÿÿH‹…hÿÿÿHƒìhP#@A¹M#@I‰øH‰ÇèýÿÿHƒÄH‹½8ÿÿÿH‹PÿÿÿH‹•@ÿÿÿH‹µ`ÿÿÿH‹…hÿÿÿHƒìhW#@A¹S#@I‰øH‰Çè>ýÿÿHƒÄH‹½8ÿÿÿH‹PÿÿÿH‹•@ÿÿÿH‹µ`ÿÿÿH‹…hÿÿÿHƒìh]#@A¹Y#@I‰øH‰ÇèýüÿÿHƒÄH‹EødH3%(tèçúÿÿÉÃUH‰åHì€H‰½¨ùÿÿH‰µ ùÿÿH‰•˜ùÿÿH‰ùÿÿL‰…ˆùÿÿdH‹%(H‰Eø1ÀH‹•˜ùÿÿH…ðùÿÿ¾`#@H‰Ç¸èøúÿÿH‹•˜ùÿÿH…ðûÿÿ¾$@H‰Ç¸èØúÿÿH‹•˜ùÿÿH…ðýÿÿ¾8$@H‰Ç¸è¸úÿÿL…ðýÿÿH½ðûÿÿH‹ùÿÿH•ðùÿÿH‹µ ùÿÿH‹…¨ùÿÿHƒìÿµˆùÿÿM‰ÁI‰øH‰ÇècüÿÿHƒÄH‹•˜ùÿÿH…ðùÿÿ¾¸$@H‰Ç¸èRúÿÿH‹•˜ùÿÿH…ðûÿÿ¾ˆ%@H‰Ç¸è2úÿÿH‹•˜ùÿÿH…ðýÿÿ¾Ø%@H‰Ç¸èúÿÿH‹• ùÿÿH…°ùÿÿ¾Š&@H‰Ç¸èòùÿÿL…ðýÿÿH½ðûÿÿH‹ùÿÿH•ðùÿÿHµ°ùÿÿH‹…¨ùÿÿHƒìÿµˆùÿÿM‰ÁI‰øH‰ÇèûÿÿHƒÄH‹EødH3%(tè'ùÿÿÉÃUH‰åHƒì H‰}øH‰uðH‰UèH‰MàH‹}àH‹uèH‹MèH‹UðH‹EøHƒìÿuàÿuàÿuàI‰ùI‰ð¾˜&@H‰Ç¸èùÿÿHƒÄ ÉÃUH‰åHƒì H‰}øH‰uðH‰UèH‰MàH‹}àH‹uèH‹MèH‹UðH‹EøI‰ùI‰ð¾(@H‰Ç¸è¹øÿÿÉÃUH‰åHƒì H‰}øH‰uðH‰UèH‰MàH‹}àH‹uèH‹MèH‹UðH‹EøI‰ùI‰ð¾`)@H‰Ç¸èrøÿÿÉÃUH‰åHƒì H‰}øH‰uðH‰UèH‰MàH‹}àH‹uèH‹MèH‹UðH‹EøI‰ùI‰ð¾ˆ*@H‰Ç¸è+øÿÿÉÃUH‰åHƒì H‰}øH‰uðH‰UèH‰MàH‹}àH‹uèH‹MèH‹UðH‹EøI‰ùI‰ð¾˜+@H‰Ç¸èä÷ÿÿÉÃUH‰åHƒì H‰}øH‰uðH‰UèH‰MàH‹}àH‹uèH‹MèH‹UðH‹EøI‰ùI‰ð¾à,@H‰Ç¸è÷ÿÿÉÃUH‰åHƒì H‰}øH‰uðH‰UèH‰MàH‹}àH‹uèH‹MèH‹UðH‹EøI‰ùI‰ð¾(.@H‰Ç¸èV÷ÿÿÉÃUH‰å¿p/@èåöÿÿ]ÃUH‰åAUATSHƒì(‰}ÌH‰uÀH‹?0 ¾²/@H‰Çè:÷ÿÿH‰EØH‹/0 H‰EЃ}Ì~!H‹EÀHƒÀH‹¶<-uè›ÿÿÿ¿è4÷ÿÿƒ}ÌŽ´H‹EÀHƒÀH‹H‰Çè‡öÿÿH‰ÃH‹Ý/ H‰ÇèuöÿÿH9Ãr~H‹É/ H‹EÀHƒÀL‹ H‹EÀHƒÀH‹H‰ÇèKöÿÿI‰ÅH‹¡/ H‰Çè9öÿÿI)ÅL‰èLàH‰ÞH‰ÇèUöÿÿ…Àu/H‹EÀHƒÀH‹¾H‰ÇèYöÿÿ…ÀuH‹EÀHƒÀH‹H‰ÇèÂõÿÿH‹EÀH‹@H‰EÐH‹EØH‰Çè÷ÿÿH‹EØA¸´/@¹¸/@º¹/@¾Á/@H‰ÇèäúÿÿH‹EØA¸È/@¹¸/@ºÌ/@¾Õ/@H‰ÇèÃúÿÿH‹EØA¸Ü/@¹¸/@ºà/@¾è/@H‰Çè¢úÿÿH‹EØA¸ï/@¹ô/@º0@¾0@H‰ÇèúÿÿH‹EØA¸0@¹ô/@º0@¾0@H‰Çè`úÿÿH‹EØA¸$0@¹¸/@º(0@¾/0@H‰Çè?úÿÿH‹EØA¸60@¹¸/@º90@¾@0@H‰ÇèúÿÿH‹EØA¸G0@¹¸/@ºK0@¾R0@H‰ÇèýùÿÿH‹EØA¸Y0@¹¸/@º]0@¾i0@H‰ÇèÜùÿÿH‹EØA¸q0@¹¸/@ºu0@¾0@H‰Çè»ùÿÿH‹EØA¸‡0@¹¸/@º‹0@¾—0@H‰ÇèšùÿÿH‹EØHƒìh3@A¹ 0@A¸x1@¹
 
+2@º2@¾3@H‰Çè¬öÿÿHƒÄH‹EØHƒìh5@A¹ 3@A¸ø3@¹
 
+2@º4@¾5@H‰ÇèxöÿÿHƒÄH‹Eؾ–5@H‰ÇèÖõÿÿH‹Eع?#@º<#@¾´5@H‰ÇèÆúÿÿH‹EعD#@ºA#@¾´5@H‰Çè«úÿÿH‹EعJ#@ºG#@¾´5@H‰ÇèúÿÿH‹EعP#@ºM#@¾´5@H‰ÇèuúÿÿH‹EعW#@ºS#@¾´5@H‰ÇèZúÿÿH‹Eع]#@ºY#@¾´5@H‰Çè?úÿÿH‹Eؾ¹5@H‰Çè#õÿÿH‹Eع?#@º<#@¾Ö5@H‰ÇèkúÿÿH‹EعD#@ºA#@¾Ö5@H‰ÇèPúÿÿH‹EعJ#@ºG#@¾Ö5@H‰Çè5úÿÿH‹EعP#@ºM#@¾Ö5@H‰ÇèúÿÿH‹EعW#@ºS#@¾Ö5@H‰ÇèÿùÿÿH‹Eع]#@ºY#@¾Ö5@H‰ÇèäùÿÿH‹Eؾà5@H‰ÇèpôÿÿH‹Eع?#@º<#@¾6@H‰ÇèFúÿÿH‹EعD#@ºA#@¾6@H‰Çè+úÿÿH‹EعJ#@ºG#@¾6@H‰ÇèúÿÿH‹EعP#@ºM#@¾6@H‰ÇèõùÿÿH‹EعW#@ºS#@¾6@H‰ÇèÚùÿÿH‹Eع]#@ºY#@¾6@H‰Çè¿ùÿÿH‹Eؾ6@H‰Çè½óÿÿH‹Eع?#@º<#@¾"6@H‰ÇèLùÿÿH‹EعD#@ºA#@¾"6@H‰Çè1ùÿÿH‹EعJ#@ºG#@¾"6@H‰ÇèùÿÿH‹EعP#@ºM#@¾"6@H‰ÇèûøÿÿH‹EعW#@ºS#@¾"6@H‰ÇèàøÿÿH‹Eع]#@ºY#@¾"6@H‰ÇèÅøÿÿH‹Eؾ'6@H‰Çè
 
+óÿÿH‹Eع?#@º<#@¾D6@H‰Çè'ùÿÿH‹EعD#@ºA#@¾D6@H‰ÇèùÿÿH‹EعJ#@ºG#@¾D6@H‰ÇèñøÿÿH‹EعP#@ºM#@¾D6@H‰ÇèÖøÿÿH‹EعW#@ºS#@¾D6@H‰Çè»øÿÿH‹Eع]#@ºY#@¾D6@H‰Çè øÿÿH‹EؾI6@H‰ÇèWòÿÿH‹Eع?#@º<#@¾f6@H‰Çè»øÿÿH‹EعD#@ºA#@¾f6@H‰Çè øÿÿH‹EعJ#@ºG#@¾f6@H‰Çè…øÿÿH‹EعP#@ºM#@¾f6@H‰ÇèjøÿÿH‹EعW#@ºS#@¾f6@H‰ÇèOøÿÿH‹Eع]#@ºY#@¾f6@H‰Çè4øÿÿH‹Eؾk6@H‰Çè¤ñÿÿH‹Eع?#@º<#@¾ˆ6@H‰ÇèOøÿÿH‹EعD#@ºA#@¾ˆ6@H‰Çè4øÿÿH‹EعJ#@ºG#@¾ˆ6@H‰ÇèøÿÿH‹EعP#@ºM#@¾ˆ6@H‰Çèþ÷ÿÿH‹EعW#@ºS#@¾ˆ6@H‰Çèã÷ÿÿH‹Eع]#@ºY#@¾ˆ6@H‰ÇèÈ÷ÿÿH‹EØH‰ÇèïÿÿH‹g( H‹UÐH‰ÖH‰Çèpïÿÿ…Àt.H‹
M( H‹n( H‹Uо6@H‰Ç¸èïÿÿ¿è^ïÿÿ¿èTïÿÿ@AWAVA‰ÿAUATL%~% UH-~% SI‰öI‰ÕL)åHƒìHÁýè'îÿÿH…ít 1Û„L‰êL‰öD‰ÿAÿÜHƒÃH9ëuêHƒÄ[]A\A]A^A_Ãf.„óÃHƒìHƒÄÃ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;; Filename:   zip-ops.md
 
+;;
 
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
 
+;;
 
+;; Purpose:    This is a computer generated machine description of the
 
+;;             ZipCPU's operations.  It is computer generated simply for
 
+;;     two reasons.  First, I can't seem to find a way to generate this
 
+;;     information within GCC's current constructs.  Specifically, the
 
+;;     CPU's instructions normally set the condition codes, unless they
 
+;;     are conditional instructions when they don't.  Second, the ZipCPU is
 
+;;     actually quite regular.  Almost all of the instructions have the same
 
+;;     form.  This form turns into many, many RTL instructions.  Because the
 
+;;     CPU doesn't match any of the others within GCC, that means either
 
+;;     I have a *lot* of cut, copy, paste, and edit to do to create the file
 
+;;     and upon any and every edit, or I need to build a program to generate
 
+;;     the remaining .md constructs.  Hence, I chose the latter to minimize
 
+;;     the amount of work I needed to do.
 
+;;
 
+;;
 
+;; Creator:    Dan Gisselquist, Ph.D.
 
+;;             Gisselquist Technology, LLC
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;; Copyright (C) 2017, Gisselquist Technology, LLC
 
+;;
 
+;; This program is free software (firmware): you can redistribute it and/or
 
+;; modify it under the terms of  the GNU General Public License as published
 
+;; by the Free Software Foundation, either version 3 of the License, or (at
 
+;; your option) any later version.
 
+;;
 
+;; This program is distributed in the hope that it will be useful, but WITHOUT
 
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
 
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 
+;; for more details.
 
+;;
 
+;; License:    GPL, v3, as defined and found on www.gnu.org,
 
+;;             http://www.gnu.org/licenses/gpl.html
 
+;;
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;;
 
+;
 
+;
 
+; %s
 
+;
 
+;
 
+(define_insn "%s_%s"
 
+       [(cond_exec (%s (reg:CC CC_REG) (const_int 0))
 
+                       %s)]
 
+       "%s"    ; Condition
 
+       "%s.%s\t%%1,%%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+%s (genzipop_long);
 
+;
 
+;
 
+; %s (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "%s"
 
+       [%s
 
+       (clobber (reg:CC CC_REG))]
 
+       "%s"
 
+       "%s\t%%2,%%0    ; %s"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "%s_raw"
 
+       [%s
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       "%s"
 
+       "%s\t%%1,%%0    ; %s_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+eqZneNZltLTgeGEltuCgeuNC(set (match_operand:SI 0 "register_operand" "=r")
 
+               (%s (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))(set (match_dup 0) (%s (match_dup 0) (match_dup 2)))(set (match_operand:SI 0 "register_operand" "=r")
 
+               (%s (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))(set (match_operand:SI 0 "register_operand" "=r")
 
+               (%s (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))(set (match_dup 0) (%s (match_dup 0)
 
+                       (plus:SI (match_dup 2) (match_dup 3))))(set (match_operand:SI 0 "register_operand" "=r")
 
+               (%s (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))%s_off;
 
+;
 
+(define_insn "%s_%s"
 
+       [(set (match_operand:SI 0 "register_operand" "=r,r,r,Q")
 
+               (if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))
 
+                       (match_operand:SI 1 "general_operand" "r,Q,i,r")
 
+                       (match_dup 0)))]
 
+       ""
 
+       "@
 
+       MOV.%s  %%1,%%0 ; cmov
 
+       LW.%s   %%1,%%0 ; cmov
 
+       LDI.%s  %%1,%%0 ; cmov
 
+       SW.%s   %%1,%%0 ; cmov"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "%s_%s"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))
 
+                       (plus:SI (match_dup 0)
 
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "ADD.%s %%1,%%0 ; cadd"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "%s_%s"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0)
 
+                               (const_int -1))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NOT.%s %%0     ; cnot"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "%s_%s"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))
 
+                       (neg:SI (match_dup 0))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NEG.%s %%0     ; cneg"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "%s_%s"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))
 
+                       (and:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "AND.%s %%1,%%0 ; cand"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "%s_%s"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))
 
+                       (ior:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "OR.%s  %%1,%%0 ; cior"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "%s_%s"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "XOR.%s %%1,%%0 ; cxor"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+USAGE: genzipops <new-zip-ops.md filename>.zip-ops.mdzip-ops.mdwADDplus:SIaddsi3SUBminus:SIsubsi3MPYmult:SImulsi3DIVS(ZIP_DIVIDE)div:SIdivsi3DIVUudiv:SIudivsi3ANDand:SIandsi3ORior:SIiorsi3XORxor:SIxorsi3ASRashiftrt:SIashrsi3LSLashift:SIashlsi3LSRlshiftrt:SIlshrsi3(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))(set (match_dup 0)
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 1))
 
+                       (sign_extend:DI (match_dup 2)))
 
+                       (const_int 32))))(ZIP_HAS_DI)(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_operand:SI 1 "register_operand" "0"))
 
+                       (sign_extend:DI (match_operand:SI 2 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))smulsi_highpartMPYSHI(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))(set (match_dup 0)
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 1))
 
+                       (zero_extend:DI (match_dup 2)))
 
+                       (const_int 32))))(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_operand:SI 1 "register_operand" "0"))
 
+                       (zero_extend:DI (match_operand:SI 2 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))umulsi_highpartMPYUHIConditional move instructionscmovConditional add instructionscaddConditional negate instructionscnegConditional not instructionscnotConditional and instructionscandConditional ior instructionsciorConditional xor instructionscxorERR: Could not create %s, leaving results in %s
 
+;˜<Ðÿÿä<Ñÿÿ´2Òÿÿ\Òÿÿ,‰ÒÿÿLéÒÿÿl§ÕÿÿŒg×ÿÿ¬¿×ÿÿÌØÿÿìMØÿÿ”Øÿÿ,ÛØÿÿL"ÙÿÿliÙÿÿŒzÙÿÿ¬¼áÿÿÌ,âÿÿzRx€Ðÿÿ*zRx$PÏÿÿðFJw€?;*3$"DÑÿÿ*A†C
ed(Ñÿÿ-A†C
h„5Ñÿÿ`A†C
[¤uÑÿÿ¾A†C
¹ÄÔÿÿÀA†C
»ä³ÕÿÿXA†C
SëÕÿÿGA†C
B$ÖÿÿGA†C
BD9ÖÿÿGA†C
Bd`ÖÿÿGA†C
B„‡ÖÿÿGA†C
B¤®ÖÿÿGA†C
BÄÕÖÿÿA†C
LäÆÖÿÿ>A†C
IŒƒDèÞÿÿeBBŽE B(ŒH0†H8ƒM@r8A0A(B BBBLßÿÿÐ@°@Ø@
ô@>`>`õþÿo˜@X@À@
 
+«@`Pˆ@X@0        þÿÿo(@ÿÿÿoðÿÿo@(>`@&@6@F@V@f@v@†@–@¦@¶@Æ@Ö@æ@›/@§/@GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.9) 5.4.0 201606098@T@t@˜@À@X@@(@   X@
 
+ˆ@Ø@@
ð@@ô@@Ä6@`7@>`>` >`(>`ø?`@`ˆ@`À@`ñÿ >`0@p@.°@DÈ@`S>`zÐ@†>`¥ñÿñÿ±ð9@¿ >`ñÿË>`Ü(>`å>`øÄ6@@`!ð@1Ÿ@G9M - ˆ@`i+@Xqƒ @`Œk@À•M     @`£˜@`«¨@`²+ô@ÆÚö­ @¾Ê@G+ˆ@`8LX@GTi x@`…@”€@e×Ð@`¤  @-1@*°æ@G¸ƒ@GÀ¨@`Ì>@>Ñ-@×ëþ &ö@*,ASg¨@`s @GžØ@•À@`crtstuff.c__JCR_LIST__deregister_tm_clones__do_global_dtors_auxcompleted.7594__do_global_dtors_aux_fini_array_entryframe_dummy__frame_dummy_init_array_entrygenzipops.c__FRAME_END____JCR_END____init_array_end_DYNAMIC__init_array_start__GNU_EH_FRAME_HDR_GLOBAL_OFFSET_TABLE___libc_csu_finigenciorunlink@@GLIBC_2.2.5_ITM_deregisterTMCloneTablegencmovputs@@GLIBC_2.2.5TAILPATHgenzipopgenzip_condopTMPPATH_edatafclose@@GLIBC_2.2.5strlen@@GLIBC_2.2.5__stack_chk_fail@@GLIBC_2.4genzipop_longgencnot__libc_start_main@@GLIBC_2.2.5__data_startstrcmp@@GLIBC_2.2.5gencandfprintf@@GLIBC_2.2.5__gmon_start____dso_handle_IO_stdin_used__libc_csu_initgen_headinggencxorgencadd__bss_startmainusageaccess@@GLIBC_2.2.5fopen@@GLIBC_2.2.5_Jv_RegisterClassesrename@@GLIBC_2.2.5legalsprintf@@GLIBC_2.2.5exit@@GLIBC_2.2.5fwrite@@GLIBC_2.2.5__TMC_END___ITM_registerTMCloneTablegencnegstderr@@GLIBC_2.2.5.symtab.strtab.shstrtab.interp.note.ABI-tag.note.gnu.build-id.gnu.hash.dynsym.dynstr.gnu.version.gnu.version_r.rela.dyn.rela.plt.init.plt.got.text.fini.rodata.eh_frame_hdr.eh_frame.init_array.fini_array.jcr.dynamic.got.plt.data.bss.comment8@8#T@T 1t@t$Döÿÿo˜@˜$NÀ@À˜VX@X«^ÿÿÿo@"kþÿÿo(@(0zX@X0„Bˆ@ˆPŽØ@؉@ð”ð@ð@ò£ô@ô  ©@Á±Ä6@Ä6œ¿`7@`7”É>`>Õ>`>á >` >æ(>`(>Иø?`ø?ï@`@ˆøˆ@`ˆ@ þÀ@`¨@ 0¨@4qMà@è/ ÈI©
 
\ No newline at end of file
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/genzipops.c gcc-6.2.0-zip/gcc/config/zip/genzipops.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/genzipops.c gcc-6.2.0-zip/gcc/config/zip/genzipops.c
--- gcc-6.2.0/gcc/config/zip/genzipops.c        1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/genzipops.c        1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/genzipops.c    2017-03-07 12:03:59.062584503 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/genzipops.c    2017-03-07 12:03:59.062584503 -0500
@@ -0,0 +1,444 @@
@@ -0,0 +1,444 @@
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Filename:   genzipops.c
+// Filename:   genzipops.c
+//
+//
+// Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+// Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+//
+//
+// Purpose:    This program generates the zip-ops.md machine description file.
+// Purpose:    This program generates the zip-ops.md machine description file.
+//
+//
+//     While I understand that this is not GCC's preferred method of generating
+//     While I understand that this is not GCC's preferred method of generating
+//     machine description files, there were just so many instructions to
+//     machine description files, there were just so many instructions to
+//     generate, and so many forms of them, and the GCC infrastructure didn't
+//     generate, and so many forms of them, and the GCC infrastructure didn't
+//     support the conditional execution model of the ZipCPU that ... I built
+//     support the conditional execution model of the ZipCPU that ... I built
+//     it this way.
+//     it this way.
+//
+//
+//     As of this writing, building zip-ops.md is not an automatic part of
+//     As of this writing, building zip-ops.md is not an automatic part of
+//     making GCC.  To build genzipops, just type:
+//     making GCC.  To build genzipops, just type:
+//
+//
+//     g++ genzipops.c -o genzipops
+//     g++ genzipops.c -o genzipops
+//
+//
+//     And to run it, type:
+//     And to run it, type:
+//
+//
+//     genzipops > zip-ops.md
+//     genzipops > zip-ops.md
+//
+//
+//     genzipops takes no arguments, and does nothing but write the machine
+//     genzipops takes no arguments, and does nothing but write the machine
+//     descriptions to the standard output.
+//     descriptions to the standard output.
+//
+//
+//
+//
+// Creator:    Dan Gisselquist, Ph.D.
+// Creator:    Dan Gisselquist, Ph.D.
+//             Gisselquist Technology, LLC
+//             Gisselquist Technology, LLC
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Copyright (C) 2017, Gisselquist Technology, LLC
+// Copyright (C) 2017, Gisselquist Technology, LLC
+//
+//
+// This program is free software (firmware): you can redistribute it and/or
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+// your option) any later version.
+//
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+// for more details.
+//
+//
+// You should have received a copy of the GNU General Public License along
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory.  Run make with no
+// with this program.  (It's in the $(ROOT)/doc directory.  Run make with no
+// target there if the PDF file isn't present.)  If not, see
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+// <http://www.gnu.org/licenses/> for a copy.
+//
+//
+// License:    GPL, v3, as defined and found on www.gnu.org,
+// License:    GPL, v3, as defined and found on www.gnu.org,
+//             http://www.gnu.org/licenses/gpl.html
+//             http://www.gnu.org/licenses/gpl.html
+//
+//
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+//
+//
+#include <unistd.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdio.h>
+#include <string.h>
+#include <string.h>
+
+
+void   legal(FILE *fp) {
+void   legal(FILE *fp) {
+       fprintf(fp, ""
+       fprintf(fp, ""
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";;\n"
+";; Filename:  zip-ops.md\n"
+";; Filename:  zip-ops.md\n"
+";;\n"
+";;\n"
+";; Project:   Zip CPU -- a small, lightweight, RISC CPU soft core\n"
+";; Project:   Zip CPU -- a small, lightweight, RISC CPU soft core\n"
+";;\n"
+";;\n"
+";; Purpose:   This is a computer generated machine description of the\n"
+";; Purpose:   This is a computer generated machine description of the\n"
+";;            ZipCPU\'s operations.  It is computer generated simply for\n"
+";;            ZipCPU\'s operations.  It is computer generated simply for\n"
+";;    two reasons.  First, I can\'t seem to find a way to generate this\n"
+";;    two reasons.  First, I can\'t seem to find a way to generate this\n"
+";;    information within GCC\'s current constructs.  Specifically, the\n"
+";;    information within GCC\'s current constructs.  Specifically, the\n"
+";;    CPU\'s instructions normally set the condition codes, unless they\n"
+";;    CPU\'s instructions normally set the condition codes, unless they\n"
+";;    are conditional instructions when they don\'t.  Second, the ZipCPU is\n"
+";;    are conditional instructions when they don\'t.  Second, the ZipCPU is\n"
+";;    actually quite regular.  Almost all of the instructions have the same\n"
+";;    actually quite regular.  Almost all of the instructions have the same\n"
+";;    form.  This form turns into many, many RTL instructions.  Because the\n"
+";;    form.  This form turns into many, many RTL instructions.  Because the\n"
+";;    CPU doesn\'t match any of the others within GCC, that means either\n"
+";;    CPU doesn\'t match any of the others within GCC, that means either\n"
+";;    I have a *lot* of cut, copy, paste, and edit to do to create the file\n"
+";;    I have a *lot* of cut, copy, paste, and edit to do to create the file\n"
+";;    and upon any and every edit, or I need to build a program to generate\n"
+";;    and upon any and every edit, or I need to build a program to generate\n"
+";;    the remaining .md constructs.  Hence, I chose the latter to minimize\n"
+";;    the remaining .md constructs.  Hence, I chose the latter to minimize\n"
+";;    the amount of work I needed to do.\n"
+";;    the amount of work I needed to do.\n"
+";;\n"
+";;\n"
+";;\n"
+";;\n"
+";; Creator:   Dan Gisselquist, Ph.D.\n"
+";; Creator:   Dan Gisselquist, Ph.D.\n"
+";;            Gisselquist Technology, LLC\n"
+";;            Gisselquist Technology, LLC\n"
+";;\n"
+";;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";;\n"
+";; Copyright (C) 2017, Gisselquist Technology, LLC\n"
+";; Copyright (C) 2017, Gisselquist Technology, LLC\n"
+";;\n"
+";;\n"
+";; This program is free software (firmware): you can redistribute it and/or\n"
+";; This program is free software (firmware): you can redistribute it and/or\n"
+";; modify it under the terms of  the GNU General Public License as published\n"
+";; modify it under the terms of  the GNU General Public License as published\n"
+";; by the Free Software Foundation, either version 3 of the License, or (at\n"
+";; by the Free Software Foundation, either version 3 of the License, or (at\n"
+";; your option) any later version.\n"
+";; your option) any later version.\n"
+";;\n"
+";;\n"
+";; This program is distributed in the hope that it will be useful, but WITHOUT\n"
+";; This program is distributed in the hope that it will be useful, but WITHOUT\n"
+";; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or\n"
+";; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or\n"
+";; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License\n"
+";; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License\n"
+";; for more details.\n"
+";; for more details.\n"
+";;\n"
+";;\n"
+";; License:   GPL, v3, as defined and found on www.gnu.org,\n"
+";; License:   GPL, v3, as defined and found on www.gnu.org,\n"
+";;            http://www.gnu.org/licenses/gpl.html\n"
+";;            http://www.gnu.org/licenses/gpl.html\n"
+";;\n"
+";;\n"
+";;\n"
+";;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";;\n"
+";;\n");
+";;\n");
+}
+}
+
+
+void   gen_heading(FILE *fp, const char *heading) {
+void   gen_heading(FILE *fp, const char *heading) {
+       fprintf(fp, ";\n;\n; %s\n;\n;\n", heading);
+       fprintf(fp, ";\n;\n; %s\n;\n;\n", heading);
+}
+}
+
+
+void   genzip_condop(FILE *fp, const char *md_opname,
+void   genzip_condop(FILE *fp, const char *md_opname,
+               const char *rtxstr, const char *insn_cond,
+               const char *rtxstr, const char *insn_cond,
+               const char *zip_op,
+               const char *zip_op,
+               const char *rtx_cond, const char *zip_cond) {
+               const char *rtx_cond, const char *zip_cond) {
+
+
+       fprintf(fp, "(define_insn \"%s_%s\"\n"
+       fprintf(fp, "(define_insn \"%s_%s\"\n"
+               "\t[(cond_exec (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t[(cond_exec (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t\t%s)]\n"
+               "\t\t\t%s)]\n"
+               "\t\"%s\"\t; Condition\n"
+               "\t\"%s\"\t; Condition\n"
+               "\t\"%s.%s\\t%%1,%%0\t; genzip, conditional operator\"\t; Template\n"
+               "\t\"%s.%s\\t%%1,%%0\t; genzip, conditional operator\"\t; Template\n"
+               "\t[(set_attr \"predicable\" \"no\") "
+               "\t[(set_attr \"predicable\" \"no\") "
+               "(set_attr \"ccresult\" \"unchanged\")])\n;\n;\n",
+               "(set_attr \"ccresult\" \"unchanged\")])\n;\n;\n",
+               md_opname, rtx_cond, rtx_cond, rtxstr, insn_cond, zip_op, zip_cond);
+               md_opname, rtx_cond, rtx_cond, rtxstr, insn_cond, zip_op, zip_cond);
+
+
+}
+}
+
+
+void   genzipop_long(FILE *fp, const char *md_opname, const char *uncond_rtx, const char *insn_cond, const char *split_rtx, const char *dup_rtx, const char *zip_op) {
+void   genzipop_long(FILE *fp, const char *md_opname, const char *uncond_rtx, const char *insn_cond, const char *split_rtx, const char *dup_rtx, const char *zip_op) {
+       char    heading[128];
+       char    heading[128];
+       sprintf(heading, "%s (genzipop_long)", zip_op);
+       sprintf(heading, "%s (genzipop_long)", zip_op);
+       fprintf(fp, ";\n;\n;\n; %s (genzipop_long)\n;\n;\n;\n", zip_op);
+       fprintf(fp, ";\n;\n;\n; %s (genzipop_long)\n;\n;\n;\n", zip_op);
+
+
+       fprintf(fp, "(define_insn \"%s\"\n"
+       fprintf(fp, "(define_insn \"%s\"\n"
+"\t[%s\n"
+"\t[%s\n"
+"\t(clobber (reg:CC CC_REG))]\n"
+"\t(clobber (reg:CC CC_REG))]\n"
+"\t\"%s\"\n"
+"\t\"%s\"\n"
+"\t\"%s\\t%%2,%%0\t; %s\"\n"
+"\t\"%s\\t%%2,%%0\t; %s\"\n"
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+               md_opname, uncond_rtx, insn_cond, zip_op, md_opname);
+               md_opname, uncond_rtx, insn_cond, zip_op, md_opname);
+
+
+
+
+       fprintf(fp, "(define_insn \"%s_raw\"\n"
+       fprintf(fp, "(define_insn \"%s_raw\"\n"
+"\t[%s\n"
+"\t[%s\n"
+"\t(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]\n"
+"\t(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]\n"
+"\t\"%s\"\n"
+"\t\"%s\"\n"
+"\t\"%s\\t%%1,%%0\t; %s_raw\"\n"
+"\t\"%s\\t%%1,%%0\t; %s_raw\"\n"
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+       md_opname, dup_rtx, insn_cond, zip_op, md_opname);
+       md_opname, dup_rtx, insn_cond, zip_op, md_opname);
+
+
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "eq", "Z");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "eq", "Z");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ne", "NZ");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ne", "NZ");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "lt", "LT");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "lt", "LT");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ge", "GE");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ge", "GE");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ltu", "C");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ltu", "C");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "geu", "NC");
+       genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "geu", "NC");
+}
+}
+
+
+void   genzipop(FILE *fp, const char *md_opname, const char *rtx_name, const char *insn_cond, const char *zip_op) {
+void   genzipop(FILE *fp, const char *md_opname, const char *rtx_name, const char *insn_cond, const char *zip_op) {
+       char    rtxstr[512], splitstr[512], dupstr[512], altname[64];
+       char    rtxstr[512], splitstr[512], dupstr[512], altname[64];
+
+
+       sprintf(rtxstr,
+       sprintf(rtxstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t\t(match_operand:SI 2 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+"\t\t\t(match_operand:SI 2 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+       sprintf(splitstr,
+       sprintf(splitstr,
+           "(set (match_dup 0) (%s (match_dup 0) (match_dup 2)))", rtx_name);
+           "(set (match_dup 0) (%s (match_dup 0) (match_dup 2)))", rtx_name);
+
+
+       sprintf(dupstr,
+       sprintf(dupstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+"\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+
+
+       genzipop_long(fp, md_opname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+       genzipop_long(fp, md_opname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+
+
+       sprintf(rtxstr,
+       sprintf(rtxstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t\t(plus:SI (match_operand:SI 2 \"register_operand\" \"r\")\n"
+"\t\t\t(plus:SI (match_operand:SI 2 \"register_operand\" \"r\")\n"
+"\t\t\t\t(match_operand:SI 3 \"const_int_operand\" \"N\"))))", rtx_name);
+"\t\t\t\t(match_operand:SI 3 \"const_int_operand\" \"N\"))))", rtx_name);
+       sprintf(splitstr,
+       sprintf(splitstr,
+           "(set (match_dup 0) (%s (match_dup 0)\n"
+           "(set (match_dup 0) (%s (match_dup 0)\n"
+"\t\t\t(plus:SI (match_dup 2) (match_dup 3))))", rtx_name);
+"\t\t\t(plus:SI (match_dup 2) (match_dup 3))))", rtx_name);
+
+
+       sprintf(dupstr,
+       sprintf(dupstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t\t(plus:SI (match_operand:SI 1 \"register_operand\" \"r\")\n"
+"\t\t\t(plus:SI (match_operand:SI 1 \"register_operand\" \"r\")\n"
+"\t\t\t\t(match_operand:SI 2 \"const_int_operand\" \"N\"))))", rtx_name);
+"\t\t\t\t(match_operand:SI 2 \"const_int_operand\" \"N\"))))", rtx_name);
+
+
+       sprintf(altname, "%s_off", md_opname);
+       sprintf(altname, "%s_off", md_opname);
+
+
+       genzipop_long(fp, altname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+       genzipop_long(fp, altname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+}
+}
+
+
+void   gencmov(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+void   gencmov(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+       fprintf(fp, ";\n;\n"
+       fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+"(define_insn \"%s_%s\"\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"=r,r,r,Q\")\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"=r,r,r,Q\")\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t\t(match_operand:SI 1 \"general_operand\" \"r,Q,i,r\")\n"
+               "\t\t\t(match_operand:SI 1 \"general_operand\" \"r,Q,i,r\")\n"
+               "\t\t\t(match_dup 0)))]\n"
+               "\t\t\t(match_dup 0)))]\n"
+       "\t\"\"\n"
+       "\t\"\"\n"
+       "\t\"@\n"
+       "\t\"@\n"
+       "\tMOV.%s\t%%1,%%0\t; cmov\n"
+       "\tMOV.%s\t%%1,%%0\t; cmov\n"
+       "\tLW.%s\t%%1,%%0\t; cmov\n"
+       "\tLW.%s\t%%1,%%0\t; cmov\n"
+       "\tLDI.%s\t%%1,%%0\t; cmov\n"
+       "\tLDI.%s\t%%1,%%0\t; cmov\n"
+       "\tSW.%s\t%%1,%%0\t; cmov\"\n"
+       "\tSW.%s\t%%1,%%0\t; cmov\"\n"
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       md_opname, md_cond, md_cond, zip_cond, zip_cond, zip_cond, zip_cond);
+       md_opname, md_cond, md_cond, zip_cond, zip_cond, zip_cond, zip_cond);
+
+
+}
+}
+
+
+void   gencadd(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+void   gencadd(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+       fprintf(fp, ";\n;\n"
+       fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+"(define_insn \"%s_%s\"\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+                       "\t\t\t(plus:SI (match_dup 0)\n"
+                       "\t\t\t(plus:SI (match_dup 0)\n"
+                               "\t\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                               "\t\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                       "\t\t\t(match_dup 0)))]\n"
+                       "\t\t\t(match_dup 0)))]\n"
+       "\t\"\"\n"
+       "\t\"\"\n"
+       "\t\"ADD.%s\t%%1,%%0\t; cadd\"\n"
+       "\t\"ADD.%s\t%%1,%%0\t; cadd\"\n"
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       md_opname, md_cond, md_cond, zip_cond);
+       md_opname, md_cond, md_cond, zip_cond);
+}
+}
+
+
+void   gencnot(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+void   gencnot(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+       fprintf(fp, ";\n;\n"
+       fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+"(define_insn \"%s_%s\"\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+                       "\t\t\t(xor:SI (match_dup 0)\n"
+                       "\t\t\t(xor:SI (match_dup 0)\n"
+                               "\t\t\t\t(const_int -1))\n"
+                               "\t\t\t\t(const_int -1))\n"
+                       "\t\t\t(match_dup 0)))]\n"
+                       "\t\t\t(match_dup 0)))]\n"
+       "\t\"\"\n"
+       "\t\"\"\n"
+       "\t\"NOT.%s\t%%0\t; cnot\"\n"
+       "\t\"NOT.%s\t%%0\t; cnot\"\n"
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       md_opname, md_cond, md_cond, zip_cond);
+       md_opname, md_cond, md_cond, zip_cond);
+}
+}
+
+
+void   gencneg(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+void   gencneg(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+       fprintf(fp, ";\n;\n"
+       fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+"(define_insn \"%s_%s\"\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+                       "\t\t\t(neg:SI (match_dup 0))\n"
+                       "\t\t\t(neg:SI (match_dup 0))\n"
+                       "\t\t\t(match_dup 0)))]\n"
+                       "\t\t\t(match_dup 0)))]\n"
+       "\t\"\"\n"
+       "\t\"\"\n"
+       "\t\"NEG.%s\t%%0\t; cneg\"\n"
+       "\t\"NEG.%s\t%%0\t; cneg\"\n"
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       md_opname, md_cond, md_cond, zip_cond);
+       md_opname, md_cond, md_cond, zip_cond);
+}
+}
+
+
+
+
+void   gencand(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+void   gencand(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+       fprintf(fp, ";\n;\n"
+       fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+"(define_insn \"%s_%s\"\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+                       "\t\t\t(and:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                       "\t\t\t(and:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                       "\t\t\t(match_dup 0)))]\n"
+                       "\t\t\t(match_dup 0)))]\n"
+       "\t\"\"\n"
+       "\t\"\"\n"
+       "\t\"AND.%s\t%%1,%%0\t; cand\"\n"
+       "\t\"AND.%s\t%%1,%%0\t; cand\"\n"
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       md_opname, md_cond, md_cond, zip_cond);
+       md_opname, md_cond, md_cond, zip_cond);
+}
+}
+
+
+
+
+void   gencior(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+void   gencior(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+       fprintf(fp, ";\n;\n"
+       fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+"(define_insn \"%s_%s\"\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+                       "\t\t\t(ior:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                       "\t\t\t(ior:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                       "\t\t\t(match_dup 0)))]\n"
+                       "\t\t\t(match_dup 0)))]\n"
+       "\t\"\"\n"
+       "\t\"\"\n"
+       "\t\"OR.%s\t%%1,%%0\t; cior\"\n"
+       "\t\"OR.%s\t%%1,%%0\t; cior\"\n"
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       md_opname, md_cond, md_cond, zip_cond);
+       md_opname, md_cond, md_cond, zip_cond);
+}
+}
+
+
+void   gencxor(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+void   gencxor(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+       fprintf(fp, ";\n;\n"
+       fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+"(define_insn \"%s_%s\"\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+       "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+               "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+                       "\t\t\t(xor:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                       "\t\t\t(xor:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+                       "\t\t\t(match_dup 0)))]\n"
+                       "\t\t\t(match_dup 0)))]\n"
+       "\t\"\"\n"
+       "\t\"\"\n"
+       "\t\"XOR.%s\t%%1,%%0\t; cxor\"\n"
+       "\t\"XOR.%s\t%%1,%%0\t; cxor\"\n"
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+       md_opname, md_cond, md_cond, zip_cond);
+       md_opname, md_cond, md_cond, zip_cond);
+}
+}
+
+
+void   usage(void) {
+void   usage(void) {
+       printf("USAGE: genzipops <new-zip-ops.md filename>\n");
+       printf("USAGE: genzipops <new-zip-ops.md filename>\n");
+}
+}
+
+
+const  char    *TMPPATH = ".zip-ops.md";
+const  char    *TMPPATH = ".zip-ops.md";
+const  char    *TAILPATH = "zip-ops.md";
+const  char    *TAILPATH = "zip-ops.md";
+
+
+int main(int argc, char **argv) {
+int main(int argc, char **argv) {
+       FILE    *fp = fopen(TMPPATH, "w");
+       FILE    *fp = fopen(TMPPATH, "w");
+       const char      *newname = TAILPATH;
+       const char      *newname = TAILPATH;
+
+
+       if ((argc>1)&&(argv[1][0] == '-')) {
+       if ((argc>1)&&(argv[1][0] == '-')) {
+               usage();
+               usage();
+               exit(EXIT_FAILURE);
+               exit(EXIT_FAILURE);
+       }
+       }
+
+
+       if (argc>1) {
+       if (argc>1) {
+               if ((strlen(argv[1])>=strlen(TAILPATH))
+               if ((strlen(argv[1])>=strlen(TAILPATH))
+                       &&(strcmp(&argv[1][strlen(argv[1])-strlen(TAILPATH)],
+                       &&(strcmp(&argv[1][strlen(argv[1])-strlen(TAILPATH)],
+                               TAILPATH)==0)
+                               TAILPATH)==0)
+                       &&(access(argv[1], F_OK)==0))
+                       &&(access(argv[1], F_OK)==0))
+                               unlink(argv[1]);
+                               unlink(argv[1]);
+               newname = argv[1];
+               newname = argv[1];
+       }
+       }
+
+
+       legal(fp);
+       legal(fp);
+       genzipop(fp, "addsi3",  "plus:SI",    "",             "ADD");
+       genzipop(fp, "addsi3",  "plus:SI",    "",             "ADD");
+       genzipop(fp, "subsi3",  "minus:SI",   "",             "SUB");
+       genzipop(fp, "subsi3",  "minus:SI",   "",             "SUB");
+       genzipop(fp, "mulsi3",  "mult:SI",    "",             "MPY");
+       genzipop(fp, "mulsi3",  "mult:SI",    "",             "MPY");
+       genzipop(fp, "divsi3",  "div:SI",     "(ZIP_DIVIDE)", "DIVS");
+       genzipop(fp, "divsi3",  "div:SI",     "(ZIP_DIVIDE)", "DIVS");
+       genzipop(fp, "udivsi3", "udiv:SI",    "(ZIP_DIVIDE)", "DIVU");
+       genzipop(fp, "udivsi3", "udiv:SI",    "(ZIP_DIVIDE)", "DIVU");
+       genzipop(fp, "andsi3",  "and:SI",     "",             "AND");
+       genzipop(fp, "andsi3",  "and:SI",     "",             "AND");
+       genzipop(fp, "iorsi3",  "ior:SI",     "",             "OR");
+       genzipop(fp, "iorsi3",  "ior:SI",     "",             "OR");
+       genzipop(fp, "xorsi3",  "xor:SI",     "",             "XOR");
+       genzipop(fp, "xorsi3",  "xor:SI",     "",             "XOR");
+       genzipop(fp, "ashrsi3", "ashiftrt:SI","",             "ASR");
+       genzipop(fp, "ashrsi3", "ashiftrt:SI","",             "ASR");
+       genzipop(fp, "ashlsi3", "ashift:SI",  "",             "LSL");
+       genzipop(fp, "ashlsi3", "ashift:SI",  "",             "LSL");
+       genzipop(fp, "lshrsi3", "lshiftrt:SI","",             "LSR");
+       genzipop(fp, "lshrsi3", "lshiftrt:SI","",             "LSR");
+
+
+       genzipop_long(fp, "smulsi_highpart",
+       genzipop_long(fp, "smulsi_highpart",
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t\t(sign_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+               "\t\t\t(sign_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+               "\t\t\t(sign_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(sign_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(const_int 32))))",
+               "\t\t\t(const_int 32))))",
+               "(ZIP_HAS_DI)",
+               "(ZIP_HAS_DI)",
+               "(set (match_dup 0)\n"
+               "(set (match_dup 0)\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t\t(sign_extend:DI (match_dup 1))\n"
+               "\t\t\t(sign_extend:DI (match_dup 1))\n"
+               "\t\t\t(sign_extend:DI (match_dup 2)))\n"
+               "\t\t\t(sign_extend:DI (match_dup 2)))\n"
+               "\t\t\t(const_int 32))))",
+               "\t\t\t(const_int 32))))",
+               //
+               //
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t\t(sign_extend:DI (match_dup 0))\n"
+               "\t\t\t(sign_extend:DI (match_dup 0))\n"
+               "\t\t\t(sign_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(sign_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(const_int 32))))",
+               "\t\t\t(const_int 32))))",
+               "MPYSHI");
+               "MPYSHI");
+       genzipop_long(fp, "umulsi_highpart",
+       genzipop_long(fp, "umulsi_highpart",
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t\t(zero_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+               "\t\t\t(zero_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+               "\t\t\t(zero_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(zero_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(const_int 32))))",
+               "\t\t\t(const_int 32))))",
+               "(ZIP_HAS_DI)",
+               "(ZIP_HAS_DI)",
+               "(set (match_dup 0)\n"
+               "(set (match_dup 0)\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t\t(zero_extend:DI (match_dup 1))\n"
+               "\t\t\t(zero_extend:DI (match_dup 1))\n"
+               "\t\t\t(zero_extend:DI (match_dup 2)))\n"
+               "\t\t\t(zero_extend:DI (match_dup 2)))\n"
+               "\t\t\t(const_int 32))))",
+               "\t\t\t(const_int 32))))",
+               //
+               //
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+               "\t\t\t(zero_extend:DI (match_dup 0))\n"
+               "\t\t\t(zero_extend:DI (match_dup 0))\n"
+               "\t\t\t(zero_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(zero_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+               "\t\t\t(const_int 32))))",
+               "\t\t\t(const_int 32))))",
+               "MPYUHI");
+               "MPYUHI");
+
+
+       gen_heading(fp, "Conditional move instructions");
+       gen_heading(fp, "Conditional move instructions");
+
+
+       gencmov(fp, "cmov", "eq", "Z");
+       gencmov(fp, "cmov", "eq", "Z");
+       gencmov(fp, "cmov", "ne", "NZ");
+       gencmov(fp, "cmov", "ne", "NZ");
+       gencmov(fp, "cmov", "lt", "LT");
+       gencmov(fp, "cmov", "lt", "LT");
+       gencmov(fp, "cmov", "ge", "GE");
+       gencmov(fp, "cmov", "ge", "GE");
+       gencmov(fp, "cmov", "ltu", "C");
+       gencmov(fp, "cmov", "ltu", "C");
+       gencmov(fp, "cmov", "geu", "NC");
+       gencmov(fp, "cmov", "geu", "NC");
+
+
+       gen_heading(fp, "Conditional add instructions");
+       gen_heading(fp, "Conditional add instructions");
+
+
+       gencadd(fp, "cadd", "eq", "Z");
+       gencadd(fp, "cadd", "eq", "Z");
+       gencadd(fp, "cadd", "ne", "NZ");
+       gencadd(fp, "cadd", "ne", "NZ");
+       gencadd(fp, "cadd", "lt", "LT");
+       gencadd(fp, "cadd", "lt", "LT");
+       gencadd(fp, "cadd", "ge", "GE");
+       gencadd(fp, "cadd", "ge", "GE");
+       gencadd(fp, "cadd", "ltu", "C");
+       gencadd(fp, "cadd", "ltu", "C");
+       gencadd(fp, "cadd", "geu", "NC");
+       gencadd(fp, "cadd", "geu", "NC");
+
+
+       gen_heading(fp, "Conditional negate instructions");
+       gen_heading(fp, "Conditional negate instructions");
+
+
+       gencneg(fp, "cneg", "eq", "Z");
+       gencneg(fp, "cneg", "eq", "Z");
+       gencneg(fp, "cneg", "ne", "NZ");
+       gencneg(fp, "cneg", "ne", "NZ");
+       gencneg(fp, "cneg", "lt", "LT");
+       gencneg(fp, "cneg", "lt", "LT");
+       gencneg(fp, "cneg", "ge", "GE");
+       gencneg(fp, "cneg", "ge", "GE");
+       gencneg(fp, "cneg", "ltu", "C");
+       gencneg(fp, "cneg", "ltu", "C");
+       gencneg(fp, "cneg", "geu", "NC");
+       gencneg(fp, "cneg", "geu", "NC");
+
+
+       gen_heading(fp, "Conditional not instructions");
+       gen_heading(fp, "Conditional not instructions");
+
+
+       gencnot(fp, "cnot", "eq", "Z");
+       gencnot(fp, "cnot", "eq", "Z");
+       gencnot(fp, "cnot", "ne", "NZ");
+       gencnot(fp, "cnot", "ne", "NZ");
+       gencnot(fp, "cnot", "lt", "LT");
+       gencnot(fp, "cnot", "lt", "LT");
+       gencnot(fp, "cnot", "ge", "GE");
+       gencnot(fp, "cnot", "ge", "GE");
+       gencnot(fp, "cnot", "ltu", "C");
+       gencnot(fp, "cnot", "ltu", "C");
+       gencnot(fp, "cnot", "geu", "NC");
+       gencnot(fp, "cnot", "geu", "NC");
+
+
+       gen_heading(fp, "Conditional and instructions");
+       gen_heading(fp, "Conditional and instructions");
+
+
+       gencand(fp, "cand", "eq", "Z");
+       gencand(fp, "cand", "eq", "Z");
+       gencand(fp, "cand", "ne", "NZ");
+       gencand(fp, "cand", "ne", "NZ");
+       gencand(fp, "cand", "lt", "LT");
+       gencand(fp, "cand", "lt", "LT");
+       gencand(fp, "cand", "ge", "GE");
+       gencand(fp, "cand", "ge", "GE");
+       gencand(fp, "cand", "ltu", "C");
+       gencand(fp, "cand", "ltu", "C");
+       gencand(fp, "cand", "geu", "NC");
+       gencand(fp, "cand", "geu", "NC");
+
+
+       gen_heading(fp, "Conditional ior instructions");
+       gen_heading(fp, "Conditional ior instructions");
+
+
+       gencior(fp, "cior", "eq", "Z");
+       gencior(fp, "cior", "eq", "Z");
+       gencior(fp, "cior", "ne", "NZ");
+       gencior(fp, "cior", "ne", "NZ");
+       gencior(fp, "cior", "lt", "LT");
+       gencior(fp, "cior", "lt", "LT");
+       gencior(fp, "cior", "ge", "GE");
+       gencior(fp, "cior", "ge", "GE");
+       gencior(fp, "cior", "ltu", "C");
+       gencior(fp, "cior", "ltu", "C");
+       gencior(fp, "cior", "geu", "NC");
+       gencior(fp, "cior", "geu", "NC");
+
+
+       gen_heading(fp, "Conditional xor instructions");
+       gen_heading(fp, "Conditional xor instructions");
+
+
+       gencxor(fp, "cxor", "eq", "Z");
+       gencxor(fp, "cxor", "eq", "Z");
+       gencxor(fp, "cxor", "ne", "NZ");
+       gencxor(fp, "cxor", "ne", "NZ");
+       gencxor(fp, "cxor", "lt", "LT");
+       gencxor(fp, "cxor", "lt", "LT");
+       gencxor(fp, "cxor", "ge", "GE");
+       gencxor(fp, "cxor", "ge", "GE");
+       gencxor(fp, "cxor", "ltu", "C");
+       gencxor(fp, "cxor", "ltu", "C");
+       gencxor(fp, "cxor", "geu", "NC");
+       gencxor(fp, "cxor", "geu", "NC");
+
+
+       fclose(fp);
+       fclose(fp);
+
+
+       if (rename(TMPPATH, newname) != 0) {
+       if (rename(TMPPATH, newname) != 0) {
+               fprintf(stderr, "ERR: Could not create %s, leaving results in %s\n", newname, TMPPATH);
+               fprintf(stderr, "ERR: Could not create %s, leaving results in %s\n", newname, TMPPATH);
+               exit(EXIT_FAILURE);
+               exit(EXIT_FAILURE);
+       } exit(EXIT_SUCCESS);
+       } exit(EXIT_SUCCESS);
+}
+}
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.c gcc-6.2.0-zip/gcc/config/zip/zip.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.c gcc-6.2.0-zip/gcc/config/zip/zip.c
--- gcc-6.2.0/gcc/config/zip/zip.c      1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip.c      1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.c  2017-03-07 12:03:18.566583672 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.c  2018-06-05 21:17:24.151098201 -0400
@@ -0,0 +1,2679 @@
@@ -0,0 +1,2711 @@
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Filename:   zip.c
+// Filename:   zip.c
+//
+//
+// Project:    Zip CPU backend for the GNU Compiler Collection
+// Project:    Zip CPU backend for the GNU Compiler Collection
+//
+//
+// Purpose:
+// Purpose:
+//
+//
+// Creator:    Dan Gisselquist, Ph.D.
+// Creator:    Dan Gisselquist, Ph.D.
+//             Gisselquist Technology, LLC
+//             Gisselquist Technology, LLC
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+//
+// This program is free software (firmware): you can redistribute it and/or
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+// your option) any later version.
+//
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+// for more details.
+//
+//
+// You should have received a copy of the GNU General Public License along
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+// <http://www.gnu.org/licenses/> for a copy.
+//
+//
+// License:    GPL, v3, as defined and found on www.gnu.org,
+// License:    GPL, v3, as defined and found on www.gnu.org,
+//             http://www.gnu.org/licenses/gpl.html
+//             http://www.gnu.org/licenses/gpl.html
+//
+//
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+#include "config.h"
+#include "config.h"
+#include "system.h"
+#include "system.h"
+#include "coretypes.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tm.h"
+#include "rtl.h"
+#include "rtl.h"
+#include "dominance.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "cfg.h"
+#include "cfgrtl.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "cfganal.h"
+#include "lcm.h"
+#include "lcm.h"
+#include "cfgbuild.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
+#include "cfgcleanup.h"
+#include "predict.h"
+#include "predict.h"
+#include "basic-block.h"
+#include "basic-block.h"
+#include "bitmap.h"
+#include "bitmap.h"
+#include "df.h"
+#include "df.h"
+#include "hashtab.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "hash-set.h"
+#include "machmode.h"
+#include "machmode.h"
+#include "symtab.h"
+#include "symtab.h"
+#include "rtlhash.h"
+#include "rtlhash.h"
+#include "tree.h"
+#include "tree.h"
+#include "regs.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "real.h"
+#include "insn-config.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "conditions.h"
+#include "output.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "flags.h"
+#include "expr.h"
+#include "expr.h"
+#include "function.h"
+#include "function.h"
+#include "recog.h"
+#include "recog.h"
+#include "toplev.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "ggc.h"
+#include "builtins.h"
+#include "builtins.h"
+#include "calls.h"
+#include "calls.h"
+#include "langhooks.h"
+#include "langhooks.h"
+#include "optabs.h"
+#include "optabs.h"
+#include "explow.h"
+#include "explow.h"
+#include "emit-rtl.h"
+#include "emit-rtl.h"
+#include "ifcvt.h"
+#include "ifcvt.h"
+#include "genrtl.h"
+#include "genrtl.h"
+
+
+// #include "tmp_p.h"
+// #include "tmp_p.h"
+#include "target.h"
+#include "target.h"
+#include "target-def.h"
+#include "target-def.h"
+// #include "tm-constrs.h"
+// #include "tm-constrs.h"
+#include "tm-preds.h"
+#include "tm-preds.h"
+
+
+#include "diagnostic.h"
+#include "diagnostic.h"
+// #include "integrate.h"
+// #include "integrate.h"
+
+
+#include "zip-protos.h"
+#include "zip-protos.h"
+
+
+static bool    zip_return_in_memory(const_tree, const_tree);
+static bool    zip_return_in_memory(const_tree, const_tree);
+static bool    zip_frame_pointer_required(void);
+static bool    zip_frame_pointer_required(void);
+
+
+static void zip_function_arg_advance(cumulative_args_t ca, enum machine_mode mode,
+static void zip_function_arg_advance(cumulative_args_t ca, enum machine_mode mode,
+               const_tree type, bool named);
+               const_tree type, bool named);
+static rtx zip_function_arg(cumulative_args_t ca, enum machine_mode mode, const_tree type, bool named);
+static rtx zip_function_arg(cumulative_args_t ca, enum machine_mode mode, const_tree type, bool named);
+
+
+static void    zip_asm_trampoline_template(FILE *);
+static void    zip_asm_trampoline_template(FILE *);
+static void    zip_trampoline_init(rtx, tree, rtx);
+static void    zip_trampoline_init(rtx, tree, rtx);
+static void    zip_init_builtins(void);
+static void    zip_init_builtins(void);
+static tree    zip_builtin_decl(unsigned, bool);
+static tree    zip_builtin_decl(unsigned, bool);
+// static void zip_asm_output_anchor(rtx x);
+// static void zip_asm_output_anchor(rtx x);
+       void    zip_asm_output_def(FILE *s, const char *n, const char *v);
+       void    zip_asm_output_def(FILE *s, const char *n, const char *v);
+static rtx     zip_expand_builtin(tree exp, rtx target, rtx subtarget,
+static rtx     zip_expand_builtin(tree exp, rtx target, rtx subtarget,
+                       enum machine_mode tmode, int    ignore);
+                       enum machine_mode tmode, int    ignore);
+static bool    zip_scalar_mode_supported_p(enum machine_mode mode);
+static bool    zip_scalar_mode_supported_p(enum machine_mode mode);
+static bool    zip_libgcc_floating_mode_supported_p(enum machine_mode mode);
+static bool    zip_libgcc_floating_mode_supported_p(enum machine_mode mode);
+static int     zip_address_cost(rtx addr, enum machine_mode mode, addr_space_t as, bool spd);
+static int     zip_address_cost(rtx addr, enum machine_mode mode, addr_space_t as, bool spd);
+static bool    zip_mode_dependent_address_p(const_rtx addr, addr_space_t);
+static bool    zip_mode_dependent_address_p(const_rtx addr, addr_space_t);
+static unsigned HOST_WIDE_INT  zip_const_anchor = 0x20000;
+static unsigned HOST_WIDE_INT  zip_const_anchor = 0x20000;
+static          HOST_WIDE_INT  zip_min_opb_imm = -0x20000;
+static          HOST_WIDE_INT  zip_min_opb_imm = -0x20000;
+static          HOST_WIDE_INT  zip_max_opb_imm =  0x1ffff;
+static          HOST_WIDE_INT  zip_max_opb_imm =  0x1ffff;
+static          HOST_WIDE_INT  zip_min_anchor_offset = -0x2000;
+static          HOST_WIDE_INT  zip_min_anchor_offset = -0x2000;
+static          HOST_WIDE_INT  zip_max_anchor_offset =  0x1fff;
+static          HOST_WIDE_INT  zip_max_anchor_offset =  0x1fff;
+static          HOST_WIDE_INT  zip_min_mov_offset = -0x1000;
+static          HOST_WIDE_INT  zip_min_mov_offset = -0x1000;
+static          HOST_WIDE_INT  zip_max_mov_offset =  0x0fff;
+static          HOST_WIDE_INT  zip_max_mov_offset =  0x0fff;
+static int     zip_sched_issue_rate(void) { return 1; }
+static int     zip_sched_issue_rate(void) { return 1; }
+static bool    zip_legitimate_address_p(machine_mode, rtx, bool);
+static bool    zip_legitimate_address_p(machine_mode, rtx, bool);
+static bool    zip_legitimate_move_operand_p(machine_mode, rtx, bool);
+static bool    zip_legitimate_move_operand_p(machine_mode, rtx, bool);
+       void    zip_debug_rtx_pfx(const char *, const_rtx x);
+       void    zip_debug_rtx_pfx(const char *, const_rtx x);
+       void    zip_debug_rtx(const_rtx x);
+       void    zip_debug_rtx(const_rtx x);
+static void    zip_override_options(void);
+static void    zip_override_options(void);
+static bool    zip_can_eliminate(int from ATTRIBUTE_UNUSED, int to);
+static bool    zip_can_eliminate(int from ATTRIBUTE_UNUSED, int to);
+static int     zip_memory_move_cost(machine_mode, reg_class_t, bool);
+static int     zip_memory_move_cost(machine_mode, reg_class_t, bool);
+static rtx     zip_legitimize_address(rtx x, rtx oldx, machine_mode mode);
+static rtx     zip_legitimize_address(rtx x, rtx oldx, machine_mode mode);
+static bool    zip_cannot_modify_jumps_p(void);
+static bool    zip_cannot_modify_jumps_p(void);
+static bool    zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b);
+static bool    zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b);
+
+
+
+
+#define        ZIP_ALL_DEBUG_OFF       false
+#define        ZIP_ALL_DEBUG_OFF       false
+#define        ZIP_ALL_DEBUG_ON        false
+#define        ZIP_ALL_DEBUG_ON        false
+#define        ZIPDEBUGFLAG(A,B)       const bool A =                  \
+#define        ZIPDEBUGFLAG(A,B)       const bool A =                  \
+               ((ZIP_ALL_DEBUG_ON)||(B))&&(!ZIP_ALL_DEBUG_OFF)
+               ((ZIP_ALL_DEBUG_ON)||(B))&&(!ZIP_ALL_DEBUG_OFF)
+
+
+enum ZIP_BUILTIN_ID_CODE {
+enum ZIP_BUILTIN_ID_CODE {
+       ZIP_BUILTIN_RTU,
+       ZIP_BUILTIN_RTU,
+       ZIP_BUILTIN_HALT,
+       ZIP_BUILTIN_HALT,
+       ZIP_BUILTIN_IDLE,
+       ZIP_BUILTIN_IDLE,
+       ZIP_BUILTIN_SYSCALL,
+       ZIP_BUILTIN_SYSCALL,
+       ZIP_BUILTIN_SAVE_CONTEXT,
+       ZIP_BUILTIN_SAVE_CONTEXT,
+       ZIP_BUILTIN_RESTORE_CONTEXT,
+       ZIP_BUILTIN_RESTORE_CONTEXT,
+       ZIP_BUILTIN_BITREV,
+       ZIP_BUILTIN_BITREV,
+       ZIP_BUILTIN_CC,
+       ZIP_BUILTIN_CC,
+       ZIP_BUILTIN_UCC,
+       ZIP_BUILTIN_UCC,
+       ZIP_BUILTIN_BUSY,
+       ZIP_BUILTIN_BUSY,
+       ZIP_BUILTIN_MAX
+       ZIP_BUILTIN_MAX
+};
+};
+
+
+static GTY (()) tree   zip_builtins[(int)ZIP_BUILTIN_MAX];
+static GTY (()) tree   zip_builtins[(int)ZIP_BUILTIN_MAX];
+static enum insn_code  zip_builtins_icode[(int)ZIP_BUILTIN_MAX];
+static enum insn_code  zip_builtins_icode[(int)ZIP_BUILTIN_MAX];
+
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#undef TARGET_ASM_ALIGNED_HI_OP
+#undef TARGET_ASM_ALIGNED_SI_OP
+#undef TARGET_ASM_ALIGNED_SI_OP
+#undef TARGET_ASM_ALIGNED_DI_OP
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define        TARGET_ASM_ALIGNED_HI_OP        "\t.short\t"
+#define        TARGET_ASM_ALIGNED_HI_OP        "\t.short\t"
+#define        TARGET_ASM_ALIGNED_SI_OP        "\t.int\t"
+#define        TARGET_ASM_ALIGNED_SI_OP        "\t.int\t"
+#define        TARGET_ASM_ALIGNED_DI_OP        "\t.quad\t"
+#define        TARGET_ASM_ALIGNED_DI_OP        "\t.quad\t"
+
+
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define        TARGET_ASM_UNALIGNED_HI_OP      TARGET_ASM_ALIGNED_HI_OP
+#define        TARGET_ASM_UNALIGNED_HI_OP      TARGET_ASM_ALIGNED_HI_OP
+#define        TARGET_ASM_UNALIGNED_SI_OP      TARGET_ASM_ALIGNED_SI_OP
+#define        TARGET_ASM_UNALIGNED_SI_OP      TARGET_ASM_ALIGNED_SI_OP
+#define        TARGET_ASM_UNALIGNED_DI_OP      TARGET_ASM_ALIGNED_DI_OP
+#define        TARGET_ASM_UNALIGNED_DI_OP      TARGET_ASM_ALIGNED_DI_OP
+
+
+#include "gt-zip.h"
+#include "gt-zip.h"
+
+
+/* The Global 'targetm' Variable. */
+/* The Global 'targetm' Variable. */
+struct gcc_target      targetm = TARGET_INITIALIZER;
+struct gcc_target      targetm = TARGET_INITIALIZER;
+
+
+
+
+enum   reg_class zip_reg_class(int);
+enum   reg_class zip_reg_class(int);
+
+
+#define        LOSE_AND_RETURN(msgid, x)               \
+#define        LOSE_AND_RETURN(msgid, x)               \
+       do {                                    \
+       do {                                    \
+               zip_operand_lossage(msgid, x);  \
+               zip_operand_lossage(msgid, x);  \
+               return;                         \
+               return;                         \
+       } while(0)
+       } while(0)
+
+
+/* Per-function machine data. */
+/* Per-function machine data. */
+struct GTY(()) machine_function
+struct GTY(()) machine_function
+{
+{
+       /* number of pretented arguments for varargs */
+       /* number of pretented arguments for varargs */
+       int     pretend_size;
+       int     pretend_size;
+
+
+       /* Number of bytes saved on the stack for local variables. */
+       /* Number of bytes saved on the stack for local variables. */
+       int     local_vars_size;
+       int     local_vars_size;
+
+
+       /* Number of bytes saved on stack for register save area */
+       /* Number of bytes saved on stack for register save area */
+       int     saved_reg_size;
+       int     saved_reg_size;
+       int     save_ret;
+       int     save_ret;
+
+
+       int     sp_fp_offset;
+       int     sp_fp_offset;
+       bool    fp_needed;
+       bool    fp_needed;
+       int     size_for_adjusting_sp;
+       int     size_for_adjusting_sp;
+};
+};
+
+
+/* Allocate a chunk of memory for per-function machine-dependent data. */
+/* Allocate a chunk of memory for per-function machine-dependent data. */
+
+
+static struct machine_function *
+static struct machine_function *
+zip_init_machine_status(void) {
+zip_init_machine_status(void) {
+       return ggc_cleared_alloc<machine_function>();
+       return ggc_cleared_alloc<machine_function>();
+}
+}
+
+
+static void
+static void
+zip_override_options(void)
+zip_override_options(void)
+{
+{
+       init_machine_status = zip_init_machine_status;
+       init_machine_status = zip_init_machine_status;
+}
+}
+
+
+enum   reg_class
+enum   reg_class
+zip_reg_class(int regno)
+zip_reg_class(int regno)
+{
+{
+       if (is_ZIP_GENERAL_REG(regno)) {
+       if (is_ZIP_GENERAL_REG(regno)) {
+               return GENERAL_REGS;
+               return GENERAL_REGS;
+       } else if (is_ZIP_REG(regno)) {
+       } else if (is_ZIP_REG(regno)) {
+               return ALL_REGS;
+               return ALL_REGS;
+       } return NO_REGS;
+       } return NO_REGS;
+}
+}
+
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+static bool
+static bool
+zip_return_in_memory(const_tree type, const_tree fntype ATTRIBUTE_UNUSED) {
+zip_return_in_memory(const_tree type, const_tree fntype ATTRIBUTE_UNUSED) {
+       const   HOST_WIDE_INT size = int_size_in_bytes(type);
+       const   HOST_WIDE_INT size = int_size_in_bytes(type);
+       return (size == -1)||(size > 2*UNITS_PER_WORD);
+       return (size == -1)||(size > 2*UNITS_PER_WORD);
+}
+}
+
+
+/* Emit an error emssage when we're in an asm, and a fatal error for "normal"
+/* Emit an error emssage when we're in an asm, and a fatal error for "normal"
+ * insn.  Formatted output isn't easily implemented, since we use output operand
+ * insn.  Formatted output isn't easily implemented, since we use output operand
+ * lossage to output the actual message and handle the categorization of the
+ * lossage to output the actual message and handle the categorization of the
+ * error.  */
+ * error.  */
+
+
+static void
+static void
+zip_operand_lossage(const char *msgid, rtx op) {
+zip_operand_lossage(const char *msgid, rtx op) {
+       debug_rtx(op);
+       debug_rtx(op);
+       zip_debug_rtx(op);
+       zip_debug_rtx(op);
+       output_operand_lossage("%s", msgid);
+       output_operand_lossage("%s", msgid);
+}
+}
+
+
+/* The PRINT_OPERAND_ADDRESS worker.   */
+/* The PRINT_OPERAND_ADDRESS worker.   */
+void
+void
+zip_print_operand_address(FILE *file, rtx x) {
+zip_print_operand_address(FILE *file, rtx x) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) zip_debug_rtx(x);
+       if (dbg) zip_debug_rtx(x);
+       switch(GET_CODE(x)) {
+       switch(GET_CODE(x)) {
+               case REG:
+               case REG:
+                       gcc_assert(is_ZIP_REG(REGNO(x)));
+                       gcc_assert(is_ZIP_REG(REGNO(x)));
+                       gcc_assert(REGNO(x) < 16);
+                       gcc_assert(REGNO(x) < 16);
+                       fprintf(file, "(%s)", reg_names[REGNO(x)]);
+                       fprintf(file, "(%s)", reg_names[REGNO(x)]);
+                       break;
+                       break;
+               case SYMBOL_REF:
+               case SYMBOL_REF:
+                       fprintf(file, "%s", XSTR(x,0));
+                       fprintf(file, "%s", XSTR(x,0));
+                       break;
+                       break;
+               case LABEL_REF:
+               case LABEL_REF:
+                       x = LABEL_REF_LABEL(x);
+                       x = LABEL_REF_LABEL(x);
+               case CODE_LABEL:
+               case CODE_LABEL:
+                       { char buf[256];
+                       { char buf[256];
+                       ASM_GENERATE_INTERNAL_LABEL(buf, "L", CODE_LABEL_NUMBER(x));
+                       ASM_GENERATE_INTERNAL_LABEL(buf, "L", CODE_LABEL_NUMBER(x));
+#ifdef ASM_OUTPUT_LABEL_REF
+#ifdef ASM_OUTPUT_LABEL_REF
+                       ASM_OUTPUT_LABEL_REF(file, buf);
+                       ASM_OUTPUT_LABEL_REF(file, buf);
+#else
+#else
+                       assemble_name(file, buf);
+                       assemble_name(file, buf);
+#endif
+#endif
+                       }
+                       }
+                       break;
+                       break;
+               case PLUS:
+               case PLUS:
+                       if (!REG_P(XEXP(x, 0))) {
+                       if (!REG_P(XEXP(x, 0))) {
+                               fprintf(stderr, "Unsupported address construct\n");
+                               fprintf(stderr, "Unsupported address construct\n");
+                               zip_debug_rtx(x);
+                               zip_debug_rtx(x);
+                               abort();
+                               abort();
+                       } gcc_assert(is_ZIP_REG(REGNO(XEXP(x,0))));
+                       } gcc_assert(is_ZIP_REG(REGNO(XEXP(x,0))));
+                       gcc_assert(REGNO(XEXP(x,0))<16);
+                       gcc_assert(REGNO(XEXP(x,0))<16);
+                       if (CONST_INT_P(XEXP(x, 1))) {
+                       if (CONST_INT_P(XEXP(x, 1))) {
+                               if (INTVAL(XEXP(x,1))!=0) {
+                               if (INTVAL(XEXP(x,1))!=0) {
+                                       fprintf(file, "%ld(%s)",
+                                       fprintf(file, "%ld(%s)",
+                                       (long)INTVAL(XEXP(x, 1)),
+                                       (long)INTVAL(XEXP(x, 1)),
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                               } else {
+                               } else {
+                                       fprintf(file, "(%s)",
+                                       fprintf(file, "(%s)",
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                               }
+                               }
+                       } else if (GET_CODE(XEXP(x,1)) == SYMBOL_REF) {
+                       } else if (GET_CODE(XEXP(x,1)) == SYMBOL_REF) {
+                               fprintf(file, "%s(%s)", XSTR(x,0),
+                               fprintf(file, "%s(%s)", XSTR(x,0),
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                       } else if ((GET_CODE(XEXP(x, 1)) == MINUS)
+                       } else if ((GET_CODE(XEXP(x, 1)) == MINUS)
+                               && (GET_CODE(XEXP(XEXP(x, 1), 0))==SYMBOL_REF)
+                               && (GET_CODE(XEXP(XEXP(x, 1), 0))==SYMBOL_REF)
+                               && (GET_CODE(XEXP(XEXP(x, 1), 1))==SYMBOL_REF)) {
+                               && (GET_CODE(XEXP(XEXP(x, 1), 1))==SYMBOL_REF)) {
+                               fprintf(file, "%s-%s(%s)",
+                               fprintf(file, "%s-%s(%s)",
+                                       XSTR(XEXP(XEXP(x, 1),0),0),
+                                       XSTR(XEXP(XEXP(x, 1),0),0),
+                                       XSTR(XEXP(XEXP(x, 1),1),0),
+                                       XSTR(XEXP(XEXP(x, 1),1),0),
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                       } else
+                       } else
+                               fprintf(file, "#INVALID(%s)",
+                               fprintf(file, "#INVALID(%s)",
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                                       reg_names[REGNO(XEXP(x, 0))]);
+                       /*
+                       /*
+                       else if (GET_CODE(XEXP(addr, 1)) == LABEL)
+                       else if (GET_CODE(XEXP(addr, 1)) == LABEL)
+                               fprintf(file, "%s(%s)",
+                               fprintf(file, "%s(%s)",
+                                       GET_CODE(XEXP(addr, 1)),
+                                       GET_CODE(XEXP(addr, 1)),
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                       else if ((GET_CODE(XEXP(addr, 1)) == MINUS)
+                       else if ((GET_CODE(XEXP(addr, 1)) == MINUS)
+                               && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 0))==LABEL)
+                               && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 0))==LABEL)
+                               && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 1))==LABEL)) {
+                               && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 1))==LABEL)) {
+                               fprintf(file, "%s-%s(%s)",
+                               fprintf(file, "%s-%s(%s)",
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                                       reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+                       }
+                       }
+                       */
+                       */
+                       break;
+                       break;
+               // We don't support direct memory addressing within our
+               // We don't support direct memory addressing within our
+               // instruction set, even though the instructions themselves
+               // instruction set, even though the instructions themselves
+               // would support direct memory addressing of the lower 18 bits
+               // would support direct memory addressing of the lower 18 bits
+               // of memory space.
+               // of memory space.
+               case MEM:
+               case MEM:
+                       if (dbg) zip_debug_rtx(x);
+                       if (dbg) zip_debug_rtx(x);
+                       zip_print_operand_address(file, XEXP(x, 0));
+                       zip_print_operand_address(file, XEXP(x, 0));
+                       break;
+                       break;
+               case CONST_INT:
+               case CONST_INT:
+                       fprintf(file, "%ld",(long)INTVAL(x));
+                       fprintf(file, "%ld",(long)INTVAL(x));
+                       break;
+                       break;
+               default:
+               default:
+                       fprintf(stderr, "Unknown address format\n");
+                       fprintf(stderr, "Unknown address format\n");
+                       zip_debug_rtx(x);
+                       zip_debug_rtx(x);
+                       abort(); break;
+                       abort(); break;
+                       // output_addr_const(file, x);
+                       // output_addr_const(file, x);
+               break;
+               break;
+       }
+       }
+}
+}
+
+
+/* The PRINT_OPERAND worker. */
+/* The PRINT_OPERAND worker. */
+
+
+void
+void
+zip_print_operand(FILE *file, rtx x, int code)
+zip_print_operand(FILE *file, rtx x, int code)
+{
+{
+       rtx operand = x;
+       rtx operand = x;
+       int     rgoff = 0;
+       int     rgoff = 0;
+
+
+       // fprintf(file, "Print Operand!\n");
+       // fprintf(file, "Print Operand!\n");
+
+
+       /* New code entries should just be added to the switch below.  If
+       /* New code entries should just be added to the switch below.  If
+        * handling is finished, just return.  If handling was just a
+        * handling is finished, just return.  If handling was just a
+        * modification of the operand, the modified operand should be put in
+        * modification of the operand, the modified operand should be put in
+        * "operand", and then do a break to let default handling
+        * "operand", and then do a break to let default handling
+        * (zero-modifier) output the operand.
+        * (zero-modifier) output the operand.
+        */
+        */
+       switch(code) {
+       switch(code) {
+               case 0:
+               case 0:
+                       /* No code, print as usual. */
+                       /* No code, print as usual. */
+                       break;
+                       break;
+               case 'L':
+               case 'L':
+                       /* Lower of two registers, print one up */
+                       /* Lower of two registers, print one up */
+                       rgoff = 1;
+                       rgoff = 1;
+                       break;
+                       break;
+               case 'R':
+               case 'R':
+               case 'H':
+               case 'H':
+                       /* Higher of a register pair, print normal */
+                       /* Higher of a register pair, print normal */
+                       break;
+                       break;
+
+
+               default:
+               default:
+                       LOSE_AND_RETURN("invalid operand modifier letter", x);
+                       LOSE_AND_RETURN("invalid operand modifier letter", x);
+       }
+       }
+
+
+       /* Print an operand as without a modifier letter. */
+       /* Print an operand as without a modifier letter. */
+       switch (GET_CODE(operand)) {
+       switch (GET_CODE(operand)) {
+       case REG:
+       case REG:
+               if (REGNO(operand)+rgoff >= FIRST_PSEUDO_REGISTER)
+               if (REGNO(operand)+rgoff >= FIRST_PSEUDO_REGISTER)
+                       internal_error("internal error: bad register: %d", REGNO(operand));
+                       internal_error("internal error: bad register: %d", REGNO(operand));
+               fprintf(file, "%s", reg_names[REGNO(operand)+rgoff]);
+               fprintf(file, "%s", reg_names[REGNO(operand)+rgoff]);
+               return;
+               return;
+       case SCRATCH:
+       case SCRATCH:
+               LOSE_AND_RETURN("Need a scratch register", x);
+               LOSE_AND_RETURN("Need a scratch register", x);
+               return;
+               return;
+
+
+       case CODE_LABEL:
+       case CODE_LABEL:
+       case LABEL_REF:
+       case LABEL_REF:
+       case SYMBOL_REF:
+       case SYMBOL_REF:
+       case PLUS:
+       case PLUS:
+               PRINT_OPERAND_ADDRESS(file, operand);
+               PRINT_OPERAND_ADDRESS(file, operand);
+               return;
+               return;
+       case MEM:
+       case MEM:
+               PRINT_OPERAND_ADDRESS(file, XEXP(operand, 0));
+               PRINT_OPERAND_ADDRESS(file, XEXP(operand, 0));
+               return;
+               return;
+
+
+       default:
+       default:
+               /* No need to handle all strange variants, let
+               /* No need to handle all strange variants, let
+                * output_addr_const do it for us.
+                * output_addr_const do it for us.
+                */
+                */
+               if (CONSTANT_P(operand)) {
+               if (CONSTANT_P(operand)) {
+                       output_addr_const(file, operand);
+                       output_addr_const(file, operand);
+                       return;
+                       return;
+               }
+               }
+
+
+               zip_debug_rtx(x);
+               zip_debug_rtx(x);
+               LOSE_AND_RETURN("unexpected operand", x);
+               LOSE_AND_RETURN("unexpected operand", x);
+       }
+       }
+}
+}
+
+
+static bool
+static bool
+zip_frame_pointer_required(void)
+zip_frame_pointer_required(void)
+{
+{
+       // This should really depend upon whether we have variable sized
+       // This should really depend upon whether we have variable sized
+       // arguments in our frame or not.  Once this fails, let's look
+       // arguments in our frame or not.  Once this fails, let's look
+       // at what the problem was and then whether or not we can detect
+       // at what the problem was and then whether or not we can detect
+       // it.
+       // it.
+       //
+       //
+       // Use a GCC global to determine our answer
+       // Use a GCC global to determine our answer
+       if (cfun->calls_alloca)
+       if (cfun->calls_alloca)
+               return true;
+               return true;
+
+
+       // If the stack frame is too large to access saved registers with
+       // If the stack frame is too large to access saved registers with
+       // immediate offsets, then we *must* use a frame pointer
+       // immediate offsets, then we *must* use a frame pointer
+       unsigned stack_size = 36;
+       unsigned stack_size = 36;
+       stack_size += (ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+       stack_size += (ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+
+
+       //
+       //
+       // if cfun->machine->size_for_adjusting_sp might ever be larger than
+       // if cfun->machine->size_for_adjusting_sp might ever be larger than
+       //       zip_max_anchor_offset, then we MUST have a frame pointer.
+       //       zip_max_anchor_offset, then we MUST have a frame pointer.
+       //
+       //
+       // cfun->machine->size_for_adjusting_sp =
+       // cfun->machine->size_for_adjusting_sp =
+       //              get_frame_size
+       //              get_frame_size
+       //              + saved_reg_size (will always be <= 36)
+       //              + saved_reg_size (will always be <= 36)
+       //              + outgoing_args_size;
+       //              + outgoing_args_size;
+       //              + pretend_args_size;
+       //              + pretend_args_size;
+
+
+       if(crtl->args.pretend_args_size > 0)
+       if(crtl->args.pretend_args_size > 0)
+               stack_size += crtl->args.pretend_args_size;
+               stack_size += crtl->args.pretend_args_size;
+       stack_size += get_frame_size();
+       stack_size += get_frame_size();
+       // Align our attempted stack size
+       // Align our attempted stack size
+       stack_size = ((stack_size+3)&-4);
+       stack_size = ((stack_size+3)&-4);
+
+
+       // Now here's our test
+       // Now here's our test
+       if (stack_size >= zip_max_anchor_offset)
+       if (stack_size >= zip_max_anchor_offset)
+               return true;
+               return true;
+       return (frame_pointer_needed);
+       return (frame_pointer_needed);
+/*
+/*
+*/
+*/
+}
+}
+
+
+/* Determine whether or not a register needs to be saved on the stack or not.
+/* Determine whether or not a register needs to be saved on the stack or not.
+ */
+ */
+static bool
+static bool
+zip_save_reg(int regno) {
+zip_save_reg(int regno) {
+       if (regno == 0)
+       if (regno == 0)
+               return ((!crtl->is_leaf)
+               return ((!crtl->is_leaf)
+                       ||((df_regs_ever_live_p(0))&&(!call_used_regs[0])));
+                       ||((df_regs_ever_live_p(0))&&(!call_used_regs[0])));
+       else if ((regno == zip_GOT)&&(!ZIP_PIC))
+       else if ((regno == zip_GOT)&&(!ZIP_PIC))
+               return  ((df_regs_ever_live_p(regno))
+               return  ((df_regs_ever_live_p(regno))
+                               &&(!call_used_regs[regno]));
+                               &&(!call_used_regs[regno]));
+       else if (regno == zip_FP)
+       else if (regno == zip_FP)
+               return((zip_frame_pointer_required())||((df_regs_ever_live_p(regno))
+               return((zip_frame_pointer_required())||((df_regs_ever_live_p(regno))
+                               &&(!call_used_regs[regno])));
+                               &&(!call_used_regs[regno])));
+       else if (regno < zip_FP)
+       else if (regno < zip_FP)
+               return  ((df_regs_ever_live_p(regno))
+               return  ((df_regs_ever_live_p(regno))
+                               &&(!call_used_regs[regno]));
+                               &&(!call_used_regs[regno]));
+       return false;
+       return false;
+}
+}
+
+
+/* Compute the size of the local area and the size to be adjusted by the
+/* Compute the size of the local area and the size to be adjusted by the
+ * prologue and epilogue.
+ * prologue and epilogue.
+ *
+ *
+ * Here's what we are looking at (top is the current, bottom is the last ...)
+ * Here's what we are looking at (top is the current, bottom is the last ...)
+ *
+ *
+ *     Stack Pointer ->
+ *     Stack Pointer ->
+ *                     Outgoing arguments
+ *                     Outgoing arguments
+ *                     Local variables (could be variable size)
+ *                     Local variables (could be variable size)
+ *     Frame Pointer ->        (= Stack Pointer + sp_fp_offset)
+ *     Frame Pointer ->        (= Stack Pointer + sp_fp_offset)
+ *                     Saved return address, if saved
+ *                     Saved return address, if saved
+ *                     Other Saved registers
+ *                     Other Saved registers
+ *                     Saved frame pointer (if used)
+ *                     Saved frame pointer (if used)
+ *                     Saved R12, if used
+ *                     Saved R12, if used
+ *                     (Stack pointer is not saved)
+ *                     (Stack pointer is not saved)
+ *                     (PRETEND-ARGS)
+ *                     (PRETEND-ARGS)
+ *     Original stack pointer ->       (= Stack_Pointer +size_for_adjusting_sp)
+ *     Original stack pointer ->       (= Stack_Pointer +size_for_adjusting_sp)
+ *                     Called arguments (not passed in registers)
+ *                     Called arguments (not passed in registers)
+ *                     Return arguments (not R1, args.pretend_args_size)
+ *                     Return arguments (not R1, args.pretend_args_size)
+ *             (Prior function's stack frame ... )
+ *             (Prior function's stack frame ... )
+ *
+ *
+ */
+ */
+static void
+static void
+zip_compute_frame(void) {
+zip_compute_frame(void) {
+       int     regno;
+       int     regno;
+       int     args_size;
+       int     args_size;
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "ZIP-COMPUTE-FRAME: %s\n", current_function_name());
+       if (dbg) fprintf(stderr, "ZIP-COMPUTE-FRAME: %s\n", current_function_name());
+       // gcc_assert(crtl);
+       // gcc_assert(crtl);
+       gcc_assert(cfun);
+       gcc_assert(cfun);
+       gcc_assert(cfun->machine);
+       gcc_assert(cfun->machine);
+
+
+       args_size=(ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+       args_size=(ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+
+
+       if(crtl->args.pretend_args_size > 0) {
+       if(crtl->args.pretend_args_size > 0) {
+               args_size += crtl->args.pretend_args_size;
+               args_size += crtl->args.pretend_args_size;
+               if (dbg) fprintf(stderr, "%s pretend_args_size : %d\n", current_function_name(),
+               if (dbg) fprintf(stderr, "%s pretend_args_size : %d\n", current_function_name(),
+                       crtl->args.pretend_args_size);
+                       crtl->args.pretend_args_size);
+               cfun->machine->pretend_size = crtl->args.pretend_args_size;
+               cfun->machine->pretend_size = crtl->args.pretend_args_size;
+       }
+       }
+
+
+       cfun->machine->local_vars_size = get_frame_size();
+       cfun->machine->local_vars_size = get_frame_size();
+
+
+       // Force frame alignment of the local variable section
+       // Force frame alignment of the local variable section
+       cfun->machine->local_vars_size += 3;
+       cfun->machine->local_vars_size += 3;
+       cfun->machine->local_vars_size &= -4;
+       cfun->machine->local_vars_size &= -4;
+
+
+       // Save callee-saved registers.
+       // Save callee-saved registers.
+       cfun->machine->saved_reg_size = 0;
+       cfun->machine->saved_reg_size = 0;
+       for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+       for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+               if (zip_save_reg(regno))
+               if (zip_save_reg(regno))
+                       cfun->machine->saved_reg_size += 4;
+                       cfun->machine->saved_reg_size += 4;
+       }
+       }
+
+
+       cfun->machine->fp_needed = (zip_frame_pointer_required());
+       cfun->machine->fp_needed = (zip_frame_pointer_required());
+
+
+       if ((cfun->machine->fp_needed)&&
+       if ((cfun->machine->fp_needed)&&
+                       (!df_regs_ever_live_p(zip_FP))) {
+                       (!df_regs_ever_live_p(zip_FP))) {
+               cfun->machine->saved_reg_size += 4;
+               cfun->machine->saved_reg_size += 4;
+       }
+       }
+
+
+       cfun->machine->sp_fp_offset = crtl->outgoing_args_size
+       cfun->machine->sp_fp_offset = crtl->outgoing_args_size
+                               + cfun->machine->local_vars_size;
+                               + cfun->machine->local_vars_size;
+       cfun->machine->size_for_adjusting_sp = cfun->machine->local_vars_size
+       cfun->machine->size_for_adjusting_sp = cfun->machine->local_vars_size
+                       + cfun->machine->saved_reg_size
+                       + cfun->machine->saved_reg_size
+                       + args_size;
+                       + args_size;
+       if(dbg) {
+       if(dbg) {
+               fprintf(stderr, "\t---- STACK PTR ----\n");
+               fprintf(stderr, "\t---- STACK PTR ----\n");
+               fprintf(stderr, "\tOUTGOIN-SIZE: %d\n",
+               fprintf(stderr, "\tOUTGOIN-SIZE: %d\n",
+                       crtl->outgoing_args_size);
+                       crtl->outgoing_args_size);
+               fprintf(stderr, "\tLOCALS-SIZE : %d\n",
+               fprintf(stderr, "\tLOCALS-SIZE : %d\n",
+                       cfun->machine->local_vars_size);
+                       cfun->machine->local_vars_size);
+               fprintf(stderr, "\t---- FRAME PTR ----%s\n",
+               fprintf(stderr, "\t---- FRAME PTR ----%s\n",
+                       cfun->machine->fp_needed?"":" (Eliminated)");
+                       cfun->machine->fp_needed?"":" (Eliminated)");
+               fprintf(stderr, "\tREGISTERS   : %d\n",
+               fprintf(stderr, "\tREGISTERS   : %d\n",
+                       cfun->machine->saved_reg_size);
+                       cfun->machine->saved_reg_size);
+               fprintf(stderr, "\tPRETEND SIZE: %d\n",
+               fprintf(stderr, "\tPRETEND SIZE: %d\n",
+                       crtl->args.pretend_args_size);
+                       crtl->args.pretend_args_size);
+               fprintf(stderr, "\t---- ARG PTR (Original SP, should be eliminated) ----\n");
+               fprintf(stderr, "\t---- ARG PTR (Original SP, should be eliminated) ----\n");
+               fprintf(stderr, "\t----\n");
+               fprintf(stderr, "\t----\n");
+               fprintf(stderr, "\tARGS-SIZE   : %d\n", args_size);
+               fprintf(stderr, "\tARGS-SIZE   : %d\n", args_size);
+               fprintf(stderr, "\tSP_FP_OFFSET: %d\n",
+               fprintf(stderr, "\tSP_FP_OFFSET: %d\n",
+                       cfun->machine->sp_fp_offset);
+                       cfun->machine->sp_fp_offset);
+               fprintf(stderr, "\tSP-ADJUSTMNT: %d\n",
+               fprintf(stderr, "\tSP-ADJUSTMNT: %d\n",
+                       cfun->machine->size_for_adjusting_sp);
+                       cfun->machine->size_for_adjusting_sp);
+       }
+       }
+}
+}
+
+
+void
+void
+zip_save_registers(rtx basereg_rtx, int sp_offset_to_first_register) {
+zip_save_registers(rtx basereg_rtx, int sp_offset_to_first_register) {
+       rtx     insn;
+       rtx     insn;
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       // Compute Frame has already been calculated before coming into here
+       // Compute Frame has already been calculated before coming into here
+       //
+       //
+       // zip_compute_frame();
+       // zip_compute_frame();
+       if (dbg)  fprintf(stderr, "PROLOGUE::SAVE-REGISTER\n");
+       if (dbg)  fprintf(stderr, "PROLOGUE::SAVE-REGISTER\n");
+
+
+       int offset = 0, regno;
+       int offset = 0, regno;
+       for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+       for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+               if (zip_save_reg(regno)) {
+               if (zip_save_reg(regno)) {
+                       if (dbg) fprintf(stderr,
+                       if (dbg) fprintf(stderr,
+                               "PROLOGUE::SAVE-REGISTER Saving R%d in %d+%d(SP)\n",
+                               "PROLOGUE::SAVE-REGISTER Saving R%d in %d+%d(SP)\n",
+                               regno, sp_offset_to_first_register, offset);
+                               regno, sp_offset_to_first_register, offset);
+                       insn=emit_insn(gen_movsi_sto_off(
+                       insn=emit_insn(gen_movsi_sto_off(
+                               basereg_rtx,
+                               basereg_rtx,
+                               GEN_INT(sp_offset_to_first_register +offset),
+                               GEN_INT(sp_offset_to_first_register +offset),
+                               gen_rtx_REG(SImode, regno)));
+                               gen_rtx_REG(SImode, regno)));
+                       RTX_FRAME_RELATED_P(insn) = 1;
+                       RTX_FRAME_RELATED_P(insn) = 1;
+                       offset += 4;
+                       offset += 4;
+               }
+               }
+       } if (dbg)  fprintf(stderr, "%d registers saved%s\n", offset,
+       } if (dbg)  fprintf(stderr, "%d registers saved%s\n", offset,
+               (crtl->saves_all_registers)?", should be all of them":", less than all");
+               (crtl->saves_all_registers)?", should be all of them":", less than all");
+
+
+}
+}
+
+
+/*
+/*
+ * zip_expand_small_prologue()
+ * zip_expand_small_prologue()
+ *
+ *
+ * To be used when the sp_fp_offset is less then zip_max_opb_offset.
+ * To be used when the sp_fp_offset is less then zip_max_opb_offset.
+ *
+ *
+ *
+ *
+ * Approach:
+ * Approach:
+ *     SUB size_for_adjusting_sp,SP
+ *     SUB size_for_adjusting_sp,SP
+ *     SW REG,0(SP)
+ *     SW REG,0(SP)
+ *     SW REG,4(SP)
+ *     SW REG,4(SP)
+ *     SW REG,8(SP)
+ *     SW REG,8(SP)
+ *     ....
+ *     ....
+ *     SW REG,#(SP)
+ *     SW REG,#(SP)
+ *
+ *
+ * and if we need a frame register, we'll either do ...
+ * and if we need a frame register, we'll either do ...
+ *     MOV sp_fp_offset+SP,FP
+ *     MOV sp_fp_offset+SP,FP
+ * or if the offset is too large, we'll do ...
+ * or if the offset is too large, we'll do ...
+ *     MOV SP,FP
+ *     MOV SP,FP
+ *     ADD sp_fp_offset,FP
+ *     ADD sp_fp_offset,FP
+ *
+ *
+ */
+ */
+void
+void
+zip_expand_small_prologue(void) {
+zip_expand_small_prologue(void) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+       rtx     insn;
+       rtx     insn;
+
+
+       zip_compute_frame();
+       zip_compute_frame();
+
+
+       if (dbg)  fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(SP-FP offset is %d)\n",
+       if (dbg)  fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(SP-FP offset is %d)\n",
+               cfun->machine->sp_fp_offset);
+               cfun->machine->sp_fp_offset);
+
+
+       insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+       insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+                       gen_int_mode(cfun->machine->size_for_adjusting_sp,
+                       gen_int_mode(cfun->machine->size_for_adjusting_sp,
+                               SImode)));
+                               SImode)));
+       RTX_FRAME_RELATED_P(insn) = 1;
+       RTX_FRAME_RELATED_P(insn) = 1;
+
+
+       zip_save_registers(stack_pointer_rtx, cfun->machine->sp_fp_offset);
+       zip_save_registers(stack_pointer_rtx, cfun->machine->sp_fp_offset);
+
+
+       if (cfun->machine->fp_needed) {
+       if (cfun->machine->fp_needed) {
+               if (dbg)  fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(FP-NEEDED)\n");
+               if (dbg)  fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(FP-NEEDED)\n");
+               if (dbg) zip_debug_rtx(stack_pointer_rtx);
+               if (dbg) zip_debug_rtx(stack_pointer_rtx);
+               if (dbg) zip_debug_rtx(frame_pointer_rtx);
+               if (dbg) zip_debug_rtx(frame_pointer_rtx);
+               if (cfun->machine->sp_fp_offset < zip_max_mov_offset) {
+               if (cfun->machine->sp_fp_offset < zip_max_mov_offset) {
+                       if (dbg)  fprintf(stderr,
+                       if (dbg)  fprintf(stderr,
+                               "PROLOGUE:::EXPAND-SMALL-PROLOGUE() "
+                               "PROLOGUE:::EXPAND-SMALL-PROLOGUE() "
+                               "gen_movsi_reg_off(FP, SP, %d), %d < %ld\n",
+                               "gen_movsi_reg_off(FP, SP, %d), %d < %ld\n",
+                               cfun->machine->sp_fp_offset,
+                               cfun->machine->sp_fp_offset,
+                               cfun->machine->sp_fp_offset,
+                               cfun->machine->sp_fp_offset,
+                               zip_max_mov_offset);
+                               zip_max_mov_offset);
+                       insn = emit_insn(gen_movsi_reg_off(frame_pointer_rtx,
+                       insn = emit_insn(gen_movsi_reg_off(frame_pointer_rtx,
+                               stack_pointer_rtx,
+                               stack_pointer_rtx,
+                               GEN_INT(cfun->machine->sp_fp_offset)));
+                               GEN_INT(cfun->machine->sp_fp_offset)));
+                       RTX_FRAME_RELATED_P(insn) = 1;
+                       RTX_FRAME_RELATED_P(insn) = 1;
+               } else {
+               } else {
+                       rtx     fp_rtx;
+                       rtx     fp_rtx;
+
+
+                       fp_rtx = gen_rtx_REG(SImode, zip_FP);
+                       fp_rtx = gen_rtx_REG(SImode, zip_FP);
+
+
+                       insn = emit_insn(gen_movsi(fp_rtx, stack_pointer_rtx));
+                       insn = emit_insn(gen_movsi(fp_rtx, stack_pointer_rtx));
+                       RTX_FRAME_RELATED_P(insn) = 1;
+                       RTX_FRAME_RELATED_P(insn) = 1;
+
+
+                       insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx,
+                       insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx,
+                               GEN_INT(cfun->machine->sp_fp_offset)));
+                               GEN_INT(cfun->machine->sp_fp_offset)));
+                       RTX_FRAME_RELATED_P(insn) = 1;
+                       RTX_FRAME_RELATED_P(insn) = 1;
+               }
+               }
+       }
+       }
+}
+}
+
+
+/*
+/*
+ * zip_expand_large_prologue()
+ * zip_expand_large_prologue()
+ *
+ *
+ * The prologue function will be called when the size_for_adjusting_sp is too
+ * The prologue function will be called when the size_for_adjusting_sp is too
+ * large to fit into a single OPB-immediate as part of a subtract.
+ * large to fit into a single OPB-immediate as part of a subtract.
+ *
+ *
+ * Approach:
+ * Approach:
+ *     SUB (size_for_adjusting_sp-sp_fp_offset),SP
+ *     SUB (size_for_adjusting_sp-sp_fp_offset),SP
+ *     SW R0,(SP)
+ *     SW R0,(SP)
+ *     SW R5,4(SP)
+ *     SW R5,4(SP)
+ *     SW R6,8SP)
+ *     SW R6,8SP)
+ *     SW R7,(SP)
+ *     SW R7,(SP)
+ *     ...
+ *     ...
+ *     SW FP,(SP)
+ *     SW FP,(SP)
+ *
+ *
+ *     LDI sp_fp_offset,FP
+ *     LDI sp_fp_offset,FP
+ *     SUB FP,SP
+ *     SUB FP,SP
+ *     ADD SP,FP
+ *     ADD SP,FP
+ */
+ */
+void
+void
+zip_expand_large_prologue(void) {
+zip_expand_large_prologue(void) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+       rtx     insn, fp_rtx;
+       rtx     insn, fp_rtx;
+
+
+       gcc_assert(cfun->machine->fp_needed);
+       gcc_assert(cfun->machine->fp_needed);
+
+
+       if (dbg)        fprintf(stderr, "PROLOGUE::expand-large(%d-%d)\n",
+       if (dbg)        fprintf(stderr, "PROLOGUE::expand-large(%d-%d)\n",
+                               cfun->machine->size_for_adjusting_sp,
+                               cfun->machine->size_for_adjusting_sp,
+                               cfun->machine->sp_fp_offset);
+                               cfun->machine->sp_fp_offset);
+       insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+       insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+               gen_int_mode(cfun->machine->size_for_adjusting_sp
+               gen_int_mode(cfun->machine->size_for_adjusting_sp
+                               -cfun->machine->sp_fp_offset, SImode)));
+                               -cfun->machine->sp_fp_offset, SImode)));
+       RTX_FRAME_RELATED_P(insn) = 1;
+       RTX_FRAME_RELATED_P(insn) = 1;
+
+
+       zip_save_registers(stack_pointer_rtx, 0);
+       zip_save_registers(stack_pointer_rtx, 0);
+
+
+       fp_rtx = gen_rtx_REG(SImode, zip_FP);
+       fp_rtx = gen_rtx_REG(SImode, zip_FP);
+
+
+       insn = emit_insn(gen_movsi(fp_rtx,
+       insn = emit_insn(gen_movsi(fp_rtx,
+               gen_int_mode(cfun->machine->sp_fp_offset, SImode)));
+               gen_int_mode(cfun->machine->sp_fp_offset, SImode)));
+       RTX_FRAME_RELATED_P(insn) = 1;
+       RTX_FRAME_RELATED_P(insn) = 1;
+
+
+       insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+       insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+                       fp_rtx));
+                       fp_rtx));
+       RTX_FRAME_RELATED_P(insn) = 1;
+       RTX_FRAME_RELATED_P(insn) = 1;
+
+
+       insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx, stack_pointer_rtx));
+       insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx, stack_pointer_rtx));
+       RTX_FRAME_RELATED_P(insn) = 1;
+       RTX_FRAME_RELATED_P(insn) = 1;
+}
+}
+
+
+void
+void
+zip_expand_prologue(void) {
+zip_expand_prologue(void) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       zip_compute_frame();
+       zip_compute_frame();
+
+
+       if (dbg)  fprintf(stderr, "PROLOGUE: Computing Prologue instructions\n");
+       if (dbg)  fprintf(stderr, "PROLOGUE: Computing Prologue instructions\n");
+       if (dbg)  fprintf(stderr, "PROLOGUE: SP-FP offset is %d\n",
+       if (dbg)  fprintf(stderr, "PROLOGUE: SP-FP offset is %d\n",
+                       cfun->machine->sp_fp_offset);
+                       cfun->machine->sp_fp_offset);
+       if (cfun->machine->size_for_adjusting_sp != 0) {
+       if (cfun->machine->size_for_adjusting_sp != 0) {
+               if (cfun->machine->size_for_adjusting_sp <= zip_max_anchor_offset) {
+               if (cfun->machine->size_for_adjusting_sp <= zip_max_anchor_offset) {
+                       if (dbg)  fprintf(stderr, "PROLOGUE: "
+                       if (dbg)  fprintf(stderr, "PROLOGUE: "
+                                       "%d <= %ld, so going small\n",
+                                       "%d <= %ld, so going small\n",
+                                       cfun->machine->size_for_adjusting_sp,
+                                       cfun->machine->size_for_adjusting_sp,
+                                       zip_max_opb_imm);
+                                       zip_max_opb_imm);
+                       zip_expand_small_prologue();
+                       zip_expand_small_prologue();
+               } else {
+               } else {
+                       zip_expand_large_prologue();
+                       zip_expand_large_prologue();
+               }
+               }
+       }
+       }
+}
+}
+
+
+int
+int
+zip_use_return_insn(void)
+zip_use_return_insn(void)
+{
+{
+       if ((!reload_completed)||(cfun->machine->fp_needed)
+       if ((!reload_completed)||(cfun->machine->fp_needed)
+                       ||(get_frame_size()!=0)) {
+                       ||(get_frame_size()!=0)) {
+               // If R0 ever gets pushed to the stack, then we cannot
+               // If R0 ever gets pushed to the stack, then we cannot
+               // use a master return from anywhere.  We need to clean up the
+               // use a master return from anywhere.  We need to clean up the
+               // stack first.
+               // stack first.
+               if ((!crtl->is_leaf)||((df_regs_ever_live_p(0))
+               if ((!crtl->is_leaf)||((df_regs_ever_live_p(0))
+                                               &&(!call_used_regs[0]))) {
+                                               &&(!call_used_regs[0]))) {
+                       return 0;
+                       return 0;
+               }
+               }
+       }
+       }
+       zip_compute_frame();
+       zip_compute_frame();
+       return (cfun->machine->size_for_adjusting_sp == 0)?1:0;
+       return (cfun->machine->size_for_adjusting_sp == 0)?1:0;
+}
+}
+
+
+/* As per the notes in M68k.c, quote the function epilogue should not depend
+/* As per the notes in M68k.c, quote the function epilogue should not depend
+ * upon the current stack pointer.  It should use the frame pointer only,
+ * upon the current stack pointer.  It should use the frame pointer only,
+ * if there is a frame pointer.  This is mandatory because of alloca; we also
+ * if there is a frame pointer.  This is mandatory because of alloca; we also
+ * take advantage of it to omit stack adjustments before returning ...
+ * take advantage of it to omit stack adjustments before returning ...
+ *
+ *
+ * Let's see if we can use their approach here.
+ * Let's see if we can use their approach here.
+ *
+ *
+ * We can't.  Consider our choices:
+ * We can't.  Consider our choices:
+ *     LW (FP),R0
+ *     LW (FP),R0
+ *     LW 4(FP),R4
+ *     LW 4(FP),R4
+ *     LW 8(FP),R5
+ *     LW 8(FP),R5
+ *     LW 12(FP),R6
+ *     LW 12(FP),R6
+ *     LW 16(FP),FP
+ *     LW 16(FP),FP
+ *     ... Then what is the stack pointer?
+ *     ... Then what is the stack pointer?
+ * or
+ * or
+ *     LW (FP),R0
+ *     LW (FP),R0
+ *     LW 4(FP),R4
+ *     LW 4(FP),R4
+ *     LW 8(FP),R5
+ *     LW 8(FP),R5
+ *     LW 12(FP),R6
+ *     LW 12(FP),R6
+ *     MOV FP,SP
+ *     MOV FP,SP
+ *     LW 16(SP),FP
+ *     LW 16(SP),FP
+ *     ... Which suffers unnecessary pipeline stalls, and certainly doesn't
+ *     ... Which suffers unnecessary pipeline stalls, and certainly doesn't
+ *     exploit our pipeline memory function
+ *     exploit our pipeline memory function
+ * or
+ * or
+ *     MOV FP,SP
+ *     MOV FP,SP
+ *     LW (SP),R0
+ *     LW (SP),R0
+ *     LW 4(SP),R4
+ *     LW 4(SP),R4
+ *     LW 8(SP),R5
+ *     LW 8(SP),R5
+ *     LW 12(SP),R6
+ *     LW 12(SP),R6
+ *     LW 16(SP),FP
+ *     LW 16(SP),FP
+ * Which will be our choice.  Note that we do use the stack pointer, eventually.
+ * Which will be our choice.  Note that we do use the stack pointer, eventually.
+ *
+ *
+ */
+ */
+void
+void
+zip_expand_epilogue(void) {
+zip_expand_epilogue(void) {
+       int     regno, offset;
+       int     regno, offset;
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+       rtx     insn;
+       rtx     insn;
+
+
+       zip_compute_frame();
+       zip_compute_frame();
+
+
+       if (dbg) fprintf(stderr, "EPILOG::\n");
+       if (dbg) fprintf(stderr, "EPILOG::\n");
+       if (cfun->machine->fp_needed) {
+       if (cfun->machine->fp_needed) {
+               // This is done special--if you can't trust the stack pointer
+               // This is done special--if you can't trust the stack pointer
+               // enough so that you must have a frame pointer, then you can't
+               // enough so that you must have a frame pointer, then you can't
+               // trust its offset enough to restore from it.  Hence, we start
+               // trust its offset enough to restore from it.  Hence, we start
+               // by moving the frame pointer to the stack pointer to recover
+               // by moving the frame pointer to the stack pointer to recover
+               // the stack pointer back to a usable value.
+               // the stack pointer back to a usable value.
+               if (dbg) fprintf(stderr, "EPILOG::Moving frame pointer to stack register\n");
+               if (dbg) fprintf(stderr, "EPILOG::Moving frame pointer to stack register\n");
+               insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+               insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+               RTX_FRAME_RELATED_P(insn) = 1;
+               RTX_FRAME_RELATED_P(insn) = 1;
+       }
+       }
+
+
+       if (cfun->machine->saved_reg_size != 0) {
+       if (cfun->machine->saved_reg_size != 0) {
+               if (cfun->machine->fp_needed)
+               if (cfun->machine->fp_needed)
+                       offset = 0;
+                       offset = 0;
+               else
+               else
+                       offset = cfun->machine->sp_fp_offset;
+                       offset = cfun->machine->sp_fp_offset;
+               if (dbg) fprintf(stderr, "EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+               if (dbg) fprintf(stderr, "EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+               for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+               for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+                       if (zip_save_reg(regno)) {
+                       if (zip_save_reg(regno)) {
+                               if (dbg) fprintf(stderr, "EPILOG::RESTORING R%d from SP+%d\n", regno, offset);
+                               if (dbg) fprintf(stderr, "EPILOG::RESTORING R%d from SP+%d\n", regno, offset);
+                               rtx reg = gen_rtx_REG(SImode, regno);
+                               rtx reg = gen_rtx_REG(SImode, regno);
+                               insn = emit_insn(gen_movsi_lod_off(
+                               insn = emit_insn(gen_movsi_lod_off(
+                                               reg,
+                                               reg,
+                                               stack_pointer_rtx,
+                                               stack_pointer_rtx,
+                                               GEN_INT(offset)));
+                                               GEN_INT(offset)));
+                               add_reg_note(insn, REG_CFA_RESTORE, reg);
+                               add_reg_note(insn, REG_CFA_RESTORE, reg);
+                               RTX_FRAME_RELATED_P(insn) = 1;
+                               RTX_FRAME_RELATED_P(insn) = 1;
+                               offset += 4;
+                               offset += 4;
+                       }
+                       }
+               }
+               }
+       }
+       }
+
+
+       if (cfun->machine->fp_needed) {
+       if (cfun->machine->fp_needed) {
+               // Restore the stack pointer back to the original, the
+               // Restore the stack pointer back to the original, the
+               // difference being the difference from the frame pointer
+               // difference being the difference from the frame pointer
+               // to the original stack
+               // to the original stack
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx,
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx,
+                       stack_pointer_rtx,
+                       stack_pointer_rtx,
+                       GEN_INT(cfun->machine->size_for_adjusting_sp
+                       GEN_INT(cfun->machine->size_for_adjusting_sp
+                               -cfun->machine->sp_fp_offset)));
+                               -cfun->machine->sp_fp_offset)));
+               RTX_FRAME_RELATED_P(insn) = 1;
+               RTX_FRAME_RELATED_P(insn) = 1;
+       } else {
+       } else {
+               // else now the difference is between the stack pointer and
+               // else now the difference is between the stack pointer and
+               // the original stack pointer.
+               // the original stack pointer.
+               if (dbg) fprintf(stderr, "EPILOG::ADDSI3(StackPtr, %d)\n",
+               if (dbg) fprintf(stderr, "EPILOG::ADDSI3(StackPtr, %d)\n",
+                               cfun->machine->size_for_adjusting_sp);
+                               cfun->machine->size_for_adjusting_sp);
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+                       GEN_INT(cfun->machine->size_for_adjusting_sp)));
+                       GEN_INT(cfun->machine->size_for_adjusting_sp)));
+               RTX_FRAME_RELATED_P(insn) = 1;
+               RTX_FRAME_RELATED_P(insn) = 1;
+       }
+       }
+       if (dbg) fprintf(stderr, "EPILOG::EMITTING-RETURN\n");
+       if (dbg) fprintf(stderr, "EPILOG::EMITTING-RETURN\n");
+
+
+       // The return RTX is not allowed to be frame related
+       // The return RTX is not allowed to be frame related
+       insn = emit_jump_insn(ret_rtx);
+       insn = emit_jump_insn(ret_rtx);
+       // RTX_FRAME_RELATED_P(insn) = 1;
+       // RTX_FRAME_RELATED_P(insn) = 1;
+}
+}
+
+
+void
+void
+zip_sibcall_epilogue(void) {
+zip_sibcall_epilogue(void) {
+       int     regno, offset;
+       int     regno, offset;
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+       rtx     insn;
+       rtx     insn;
+
+
+       zip_compute_frame();
+       zip_compute_frame();
+
+
+       if (dbg) fprintf(stderr, "EPILOG::\n");
+       if (dbg) fprintf(stderr, "EPILOG::\n");
+       if (cfun->machine->fp_needed) {
+       if (cfun->machine->fp_needed) {
+               // This is done special--if you can't trust the stack pointer
+               // This is done special--if you can't trust the stack pointer
+               // enough so that you must have a frame pointer, then you can't
+               // enough so that you must have a frame pointer, then you can't
+               // trust its offset enough to restore from it.  Hence, we start
+               // trust its offset enough to restore from it.  Hence, we start
+               // by moving the frame pointer to the stack pointer to recover
+               // by moving the frame pointer to the stack pointer to recover
+               // the stack pointer back to a usable value.
+               // the stack pointer back to a usable value.
+               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Moving frame pointer to stack register\n");
+               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Moving frame pointer to stack register\n");
+               insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+               insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+               RTX_FRAME_RELATED_P(insn) = 1;
+               RTX_FRAME_RELATED_P(insn) = 1;
+       }
+       }
+
+
+       if (cfun->machine->saved_reg_size != 0) {
+       if (cfun->machine->saved_reg_size != 0) {
+               if (cfun->machine->fp_needed)
+               if (cfun->machine->fp_needed)
+                       offset = 0;
+                       offset = 0;
+               else
+               else
+                       offset = cfun->machine->sp_fp_offset;
+                       offset = cfun->machine->sp_fp_offset;
+               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+               for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+               for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+                       if (zip_save_reg(regno)) {
+                       if (zip_save_reg(regno)) {
+                               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::RESTORING R%d\n", regno);
+                               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::RESTORING R%d\n", regno);
+                               rtx reg = gen_rtx_REG(SImode, regno);
+                               rtx reg = gen_rtx_REG(SImode, regno);
+                               insn = emit_insn(gen_movsi_lod_off(
+                               insn = emit_insn(gen_movsi_lod_off(
+                                               reg,
+                                               reg,
+                                               stack_pointer_rtx,
+                                               stack_pointer_rtx,
+                                               GEN_INT(offset)));
+                                               GEN_INT(offset)));
+                               add_reg_note(insn, REG_CFA_RESTORE, reg);
+                               add_reg_note(insn, REG_CFA_RESTORE, reg);
+                               RTX_FRAME_RELATED_P(insn) = 1;
+                               RTX_FRAME_RELATED_P(insn) = 1;
+                               offset += 4;
+                               offset += 4;
+                       }
+                       }
+               }
+               }
+       }
+       }
+
+
+       if (cfun->machine->fp_needed) {
+       if (cfun->machine->fp_needed) {
+               // Restore the stack pointer back to the original, the
+               // Restore the stack pointer back to the original, the
+               // difference being the difference from the frame pointer
+               // difference being the difference from the frame pointer
+               // to the original stack
+               // to the original stack
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+                       GEN_INT(cfun->machine->size_for_adjusting_sp
+                       GEN_INT(cfun->machine->size_for_adjusting_sp
+                               -cfun->machine->sp_fp_offset)));
+                               -cfun->machine->sp_fp_offset)));
+               RTX_FRAME_RELATED_P(insn) = 1;
+               RTX_FRAME_RELATED_P(insn) = 1;
+       } else {
+       } else {
+               // else now the difference is between the stack pointer and
+               // else now the difference is between the stack pointer and
+               // the original stack pointer.
+               // the original stack pointer.
+               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::ADDSI3(StackPtr, %d)\n",
+               if (dbg) fprintf(stderr, "SIBCALL-EPILOG::ADDSI3(StackPtr, %d)\n",
+                               cfun->machine->size_for_adjusting_sp);
+                               cfun->machine->size_for_adjusting_sp);
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx,stack_pointer_rtx,
+               insn = emit_insn(gen_addsi3(stack_pointer_rtx,stack_pointer_rtx,
+                       GEN_INT(cfun->machine->size_for_adjusting_sp)));
+                       GEN_INT(cfun->machine->size_for_adjusting_sp)));
+               RTX_FRAME_RELATED_P(insn) = 1;
+               RTX_FRAME_RELATED_P(insn) = 1;
+       }
+       }
+}
+}
+
+
+rtx
+rtx
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+{
+{
+       //
+       //
+       // Don't try to compute anything other than frame zero.
+       // Don't try to compute anything other than frame zero.
+       //
+       //
+       if (count != 0)
+       if (count != 0)
+               return NULL_RTX;
+               return NULL_RTX;
+
+
+       // Make sure we've computed our frame, do we need to save registers?
+       // Make sure we've computed our frame, do we need to save registers?
+       zip_compute_frame();
+       zip_compute_frame();
+
+
+       if (zip_save_reg(zip_LR)) {
+       if (zip_save_reg(zip_LR)) {
+               if (cfun->machine->fp_needed)
+               if (cfun->machine->fp_needed)
+                       return gen_rtx_MEM(SImode, frame_pointer_rtx);
+                       return gen_rtx_MEM(SImode, frame_pointer_rtx);
+               else
+               else
+                       return gen_rtx_MEM(SImode, gen_rtx_PLUS(Pmode,
+                       return gen_rtx_MEM(SImode, gen_rtx_PLUS(Pmode,
+                                       stack_pointer_rtx,
+                                       stack_pointer_rtx,
+                                       GEN_INT(cfun->machine->sp_fp_offset)));
+                                       GEN_INT(cfun->machine->sp_fp_offset)));
+       } else {
+       } else {
+               return gen_rtx_REG(Pmode, zip_LR);
+               return gen_rtx_REG(Pmode, zip_LR);
+
+
+       }
+       }
+}
+}
+
+
+/* Implement RETURN_ADDR_RTX(COUNT, FRAMEADDR).
+/* Implement RETURN_ADDR_RTX(COUNT, FRAMEADDR).
+ *
+ *
+ * We currently only support calculating the return address for the current
+ * We currently only support calculating the return address for the current
+ * frame.
+ * frame.
+ */
+ */
+
+
+/*
+/*
+rtx
+rtx
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+{
+{
+       if (count)
+       if (count)
+               return NULL_RTX;
+               return NULL_RTX;
+
+
+       zip_compute_frame();
+       zip_compute_frame();
+
+
+       // saved return address for current function is at fp - 1
+       // saved return address for current function is at fp - 1
+       if (cfun->machine->save_ret)
+       if (cfun->machine->save_ret)
+               return gen_rtx_MEM(Pmode, plus_constant(frame_pointer_rtx,
+               return gen_rtx_MEM(Pmode, plus_constant(frame_pointer_rtx,
+                               -UNITS_PER_WORD));
+                               -UNITS_PER_WORD));
+       return get_hard_reg_initial_val(Pmode, RETURN_ADDRESS_REGNUM);
+       return get_hard_reg_initial_val(Pmode, RETURN_ADDRESS_REGNUM);
+}
+}
+*/
+*/
+
+
+/* Implements the macro INITIAL_ELIMINATION_OFFSET,
+/* Implements the macro INITIAL_ELIMINATION_OFFSET,
+ * return the OFFSET.
+ * return the OFFSET.
+ */
+ */
+int
+int
+zip_initial_elimination_offset(int from, int to) {
+zip_initial_elimination_offset(int from, int to) {
+       int     ret = 0;
+       int     ret = 0;
+       zip_compute_frame();
+       zip_compute_frame();
+
+
+/*
+/*
+       if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+       if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+               ret = cfun->machine->sp_fp_offset;
+               ret = cfun->machine->sp_fp_offset;
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+               // Since the ARG_POINTER_REGNUM is defined to be identical
+               // Since the ARG_POINTER_REGNUM is defined to be identical
+               // to the FRAME_POINTER_REGNUM, this "if" will never ever
+               // to the FRAME_POINTER_REGNUM, this "if" will never ever
+               // get called.
+               // get called.
+               ret = cfun->machine->sp_fp_offset;
+               ret = cfun->machine->sp_fp_offset;
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+               // Since we define ARG_POINTER_REGNUM to be FRAME_POINTER_REGNUM
+               // Since we define ARG_POINTER_REGNUM to be FRAME_POINTER_REGNUM
+               // we're asked for the offset between the frame pointer and
+               // we're asked for the offset between the frame pointer and
+               // itself.  The result had better be zero.
+               // itself.  The result had better be zero.
+               //
+               //
+               ret = 0;
+               ret = 0;
+       } else {
+       } else {
+               abort();
+               abort();
+       }
+       }
+*/
+*/
+
+
+       // Let's try using an ARG_POINTER != FRAME_POINTER
+       // Let's try using an ARG_POINTER != FRAME_POINTER
+       if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+       if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+               ret = cfun->machine->sp_fp_offset;
+               ret = cfun->machine->sp_fp_offset;
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+               // Since the ARG_POINTER_REGNUM is defined to be identical
+               // Since the ARG_POINTER_REGNUM is defined to be identical
+               // to the FRAME_POINTER_REGNUM, this "if" will never ever
+               // to the FRAME_POINTER_REGNUM, this "if" will never ever
+               // get called.
+               // get called.
+               ret = cfun->machine->size_for_adjusting_sp;
+               ret = cfun->machine->size_for_adjusting_sp;
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+       } else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+               ret = cfun->machine->size_for_adjusting_sp
+               ret = cfun->machine->size_for_adjusting_sp
+                       - cfun->machine->sp_fp_offset;
+                       - cfun->machine->sp_fp_offset;
+       } else {
+       } else {
+               abort();
+               abort();
+       }
+       }
+
+
+       return ret;
+       return ret;
+}
+}
+
+
+/*
+/*
+ * Code taken from m68k ...
+ * Code taken from m68k ...
+ */
+ */
+static bool
+static bool
+zip_can_eliminate(int from, int to)
+zip_can_eliminate(int from, int to)
+{
+{
+       // fprintf(stderr, "CAN_ELIMINATE::QUERYING(%d,%d)\n", from, to);
+       // fprintf(stderr, "CAN_ELIMINATE::QUERYING(%d,%d)\n", from, to);
+       if ((from == zip_FP)&&(to == zip_SP))
+       if ((from == zip_FP)&&(to == zip_SP))
+               return !cfun->machine->fp_needed;
+               return !cfun->machine->fp_needed;
+       return true;
+       return true;
+}
+}
+
+
+/* Compute the number of word sized registers needed to hold a function
+/* Compute the number of word sized registers needed to hold a function
+ * argument of mode INT_MODE and tree type TYPE.
+ * argument of mode INT_MODE and tree type TYPE.
+ */
+ */
+int
+int
+zip_num_arg_regs(enum machine_mode mode, const_tree type) {
+zip_num_arg_regs(enum machine_mode mode, const_tree type) {
+       int     size;
+       int     size;
+
+
+       if (targetm.calls.must_pass_in_stack(mode, type))
+       if (targetm.calls.must_pass_in_stack(mode, type))
+               return 0;
+               return 0;
+
+
+       if ((type)&&(mode == BLKmode))
+       if ((type)&&(mode == BLKmode))
+               size = int_size_in_bytes(type);
+               size = int_size_in_bytes(type);
+       else
+       else
+               size = GET_MODE_SIZE(mode);
+               size = GET_MODE_SIZE(mode);
+
+
+       return (size + UNITS_PER_WORD - 1)/UNITS_PER_WORD;
+       return (size + UNITS_PER_WORD - 1)/UNITS_PER_WORD;
+}
+}
+
+
+static void
+static void
+zip_function_arg_advance(cumulative_args_t ca, machine_mode mode,
+zip_function_arg_advance(cumulative_args_t ca, machine_mode mode,
+               const_tree type, bool named ATTRIBUTE_UNUSED) {
+               const_tree type, bool named ATTRIBUTE_UNUSED) {
+       CUMULATIVE_ARGS *cum;
+       CUMULATIVE_ARGS *cum;
+       int     nreg;
+       int     nreg;
+
+
+       cum = get_cumulative_args(ca);
+       cum = get_cumulative_args(ca);
+       nreg = zip_num_arg_regs(mode, type);
+       nreg = zip_num_arg_regs(mode, type);
+       if (((*cum)+nreg) > NUM_ARG_REGS)
+       if (((*cum)+nreg) > NUM_ARG_REGS)
+               (*cum) = NUM_ARG_REGS;
+               (*cum) = NUM_ARG_REGS;
+       else
+       else
+               (*cum) += nreg;
+               (*cum) += nreg;
+}
+}
+
+
+static rtx
+static rtx
+zip_function_arg(cumulative_args_t ca, machine_mode mode,
+zip_function_arg(cumulative_args_t ca, machine_mode mode,
+               const_tree type ATTRIBUTE_UNUSED, bool named) {
+               const_tree type ATTRIBUTE_UNUSED, bool named) {
+       CUMULATIVE_ARGS *cum;
+       CUMULATIVE_ARGS *cum;
+
+
+       if (!named)
+       if (!named)
+               return NULL_RTX;
+               return NULL_RTX;
+       cum = get_cumulative_args(ca);
+       cum = get_cumulative_args(ca);
+
+
+       if ((*cum) >= NUM_ARG_REGS)
+       if ((*cum) >= NUM_ARG_REGS)
+               return NULL_RTX;
+               return NULL_RTX;
+       return
+       return
+               gen_rtx_REG(mode, (*cum)+1);
+               gen_rtx_REG(mode, (*cum)+1);
+}
+}
+
+
+/* DECL is the declaration of the function being targeted by the call, and EXP
+/* DECL is the declaration of the function being targeted by the call, and EXP
+ * is the CALL_EXPR representing the call.
+ * is the CALL_EXPR representing the call.
+ */
+ */
+bool   zip_function_ok_for_sibcall(ATTRIBUTE_UNUSED tree decl, tree exp) {
+bool   zip_function_ok_for_sibcall(ATTRIBUTE_UNUSED tree decl, tree exp) {
+       // calls.c already checks whether or not the parameter stack space
+       // calls.c already checks whether or not the parameter stack space
+       // is identical, so ... let's hope this all works and find out.
+       // is identical, so ... let's hope this all works and find out.
+
+
+       //
+       //
+       // Actually, this will fail:  If the sibling uses R5 to pass registers
+       // Actually, this will fail:  If the sibling uses R5 to pass registers
+       // in and we don't, then there will be no way to restore R5.  This is
+       // in and we don't, then there will be no way to restore R5.  This is
+       // true for the current configuration.  It will be true for future
+       // true for the current configuration.  It will be true for future
+       // configurations if the sibling ever uses a register that must be
+       // configurations if the sibling ever uses a register that must be
+       // saved as a parameter register.
+       // saved as a parameter register.
+       //
+       //
+       // We can check this ... if we can count how many registers the
+       // We can check this ... if we can count how many registers the
+       // sibling call will use.
+       // sibling call will use.
+       //
+       //
+       CUMULATIVE_ARGS cum_v;
+       CUMULATIVE_ARGS cum_v;
+       cumulative_args_t       cum;
+       cumulative_args_t       cum;
+       tree            parameter;
+       tree            parameter;
+       machine_mode    mode;
+       machine_mode    mode;
+       tree            ttype;
+       tree            ttype;
+       rtx             parm_rtx;
+       rtx             parm_rtx;
+       int             i;
+       int             i;
+       static const char zip_call_used_register[] = CALL_USED_REGISTERS;
+       static const char zip_call_used_register[] = CALL_USED_REGISTERS;
+
+
+       INIT_CUMULATIVE_ARGS(cum_v, NULL, NULL, 0,0);
+       INIT_CUMULATIVE_ARGS(cum_v, NULL, NULL, 0,0);
+       cum = pack_cumulative_args(&cum_v);
+       cum = pack_cumulative_args(&cum_v);
+       for (i=0; i<call_expr_nargs(exp); i++) {
+       for (i=0; i<call_expr_nargs(exp); i++) {
+
+
+               parameter = CALL_EXPR_ARG(exp, i);
+               parameter = CALL_EXPR_ARG(exp, i);
+
+
+               if ((!parameter) || (TREE_CODE(parameter)==ERROR_MARK))
+               if ((!parameter) || (TREE_CODE(parameter)==ERROR_MARK))
+                       return true;
+                       return true;
+               ttype = TREE_TYPE(parameter);
+               ttype = TREE_TYPE(parameter);
+               gcc_assert(ttype);
+               gcc_assert(ttype);
+               mode = ttype->type_common.mode;
+               mode = ttype->type_common.mode;
+
+
+               if (pass_by_reference(&cum_v, mode, ttype, true)) {
+               if (pass_by_reference(&cum_v, mode, ttype, true)) {
+                       mode = Pmode;
+                       mode = Pmode;
+                       ttype = build_pointer_type(ttype);
+                       ttype = build_pointer_type(ttype);
+               }
+               }
+
+
+               parm_rtx = zip_function_arg(cum, mode, ttype, 0);
+               parm_rtx = zip_function_arg(cum, mode, ttype, 0);
+               zip_function_arg_advance(cum, mode, ttype, 0);
+               zip_function_arg_advance(cum, mode, ttype, 0);
+               if (!parm_rtx)
+               if (!parm_rtx)
+                       continue;
+                       continue;
+
+
+               // If it is a register
+               // If it is a register
+               //      and it is *NOT* a CALL_USED_REGISTER
+               //      and it is *NOT* a CALL_USED_REGISTER
+               //      then we can't do this.
+               //      then we can't do this.
+               //
+               //
+               // Example: func(R1,..R4,R5)
+               // Example: func(R1,..R4,R5)
+               //      can be followed by func2(R1,.., up to R5)
+               //      can be followed by func2(R1,.., up to R5)
+               //      (not supported, though... just to simplify our test
+               //      (not supported, though... just to simplify our test
+               //      below)
+               //      below)
+               // Example: func(R1,..R4)
+               // Example: func(R1,..R4)
+               //      cannot be followed by func2(R1,..,R5)
+               //      cannot be followed by func2(R1,..,R5)
+               //      We would blow R5 away by our prologue, even if it was
+               //      We would blow R5 away by our prologue, even if it was
+               //      properly set.
+               //      properly set.
+               // Example: func(R1,..R5)
+               // Example: func(R1,..R5)
+               //      can be followed by func2(R1,.., up to R4)
+               //      can be followed by func2(R1,.., up to R4)
+               //      func2 may save R5 (which doesn't need saving) but that's
+               //      func2 may save R5 (which doesn't need saving) but that's
+               //              irrelevant
+               //              irrelevant
+               // Example: func(R1,..up to R4)
+               // Example: func(R1,..up to R4)
+               //      can be followed by func2(R1,.., up to R4)
+               //      can be followed by func2(R1,.., up to R4)
+               //
+               //
+               if (REG_P(parm_rtx)&&(REGNO(parm_rtx))
+               if (REG_P(parm_rtx)&&(REGNO(parm_rtx))
+                               &&(REGNO(parm_rtx)<sizeof(zip_call_used_register))
+                               &&(REGNO(parm_rtx)<sizeof(zip_call_used_register))
+                               &&(!zip_call_used_register[REGNO(parm_rtx)]))
+                               &&(!zip_call_used_register[REGNO(parm_rtx)]))
+                       return false;
+                       return false;
+       }
+       }
+
+
+       return true;
+       return true;
+
+
+       // We also need to check if the return types are the same ... or
+       // We also need to check if the return types are the same ... or
+       // will GCC handle that for us?
+       // will GCC handle that for us?
+}
+}
+
+
+void   zip_canonicalize_comparison(int *code, rtx *op0, rtx *op1,
+void   zip_canonicalize_comparison(int *code, rtx *op0, rtx *op1,
+               bool preserve_op0)
+               bool preserve_op0)
+{
+{
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+       bool    reverse = false;
+       bool    reverse = false;
+
+
+       if (dbg) fprintf(stderr, "CANONICALIZE ...%s\n", (preserve_op0)?"(Preserve Op0)":"");
+       if (dbg) fprintf(stderr, "CANONICALIZE ...%s\n", (preserve_op0)?"(Preserve Op0)":"");
+       if (dbg) zip_debug_rtx_pfx("CODE", gen_rtx_fmt_ee((rtx_code)*code, VOIDmode, gen_rtx_REG(CCmode,zip_CC), const0_rtx));
+       if (dbg) zip_debug_rtx_pfx("CODE", gen_rtx_fmt_ee((rtx_code)*code, VOIDmode, gen_rtx_REG(CCmode,zip_CC), const0_rtx));
+       if (dbg) zip_debug_rtx_pfx("OP0 ", *op0);
+       if (dbg) zip_debug_rtx_pfx("OP0 ", *op0);
+       if (dbg) zip_debug_rtx_pfx("OP1 ", *op1);
+       if (dbg) zip_debug_rtx_pfx("OP1 ", *op1);
+
+
+       // Z    ->      Z
+       // Z    ->      Z
+       // NZ   ->      !Z
+       // NZ   ->      !Z
+       // LT   ->      N
+       // LT   ->      N
+       // GE   ->      !N
+       // GE   ->      !N
+       // LTU  ->      C
+       // LTU  ->      C
+       // GEU  ->      !C
+       // GEU  ->      !C
+       //
+       //
+       // LTE  ->      GTE w/ swapped operands
+       // LTE  ->      GTE w/ swapped operands
+       // GT   ->      LT  w/ swapped operands
+       // GT   ->      LT  w/ swapped operands
+       // GTU  ->      LTU w/ swapped operands
+       // GTU  ->      LTU w/ swapped operands
+       // LEU  ->      GEU w/ swapped operands
+       // LEU  ->      GEU w/ swapped operands
+       //
+       //
+
+
+       if ((CONST_INT_P(*op0))||(GET_CODE(*op0) == PLUS)) {
+       if ((CONST_INT_P(*op0))||(GET_CODE(*op0) == PLUS)) {
+               rtx     tmp = *op0;
+               rtx     tmp = *op0;
+               *op0 = *op1;
+               *op0 = *op1;
+               *op1 = tmp;
+               *op1 = tmp;
+               *code = (int)swap_condition((enum rtx_code)*code);
+               *code = (int)swap_condition((enum rtx_code)*code);
+       }
+       }
+
+
+       if (*code == GTU) {
+       if (*code == GTU) {
+               if (REG_P(*op1)) {
+               if (REG_P(*op1)) {
+                       //; Reverse the comparison
+                       //; Reverse the comparison
+                       reverse = true;
+                       reverse = true;
+               } else if (CONST_INT_P(*op1)) {
+               } else if (CONST_INT_P(*op1)) {
+                       //; A >  B
+                       //; A >  B
+                       //; A >= B+1
+                       //; A >= B+1
+                       //; Add one to the integer constant,
+                       //; Add one to the integer constant,
+                       //; And use a GEU comparison
+                       //; And use a GEU comparison
+                       *code = GEU;
+                       *code = GEU;
+                       *op1 = GEN_INT(INTVAL(*op1)+1);
+                       *op1 = GEN_INT(INTVAL(*op1)+1);
+               } else {
+               } else {
+                       //; Reverse the comparison
+                       //; Reverse the comparison
+                       reverse = true;
+                       reverse = true;
+               }
+               }
+       } else if (*code == LEU) {
+       } else if (*code == LEU) {
+               if (REG_P(*op1)) {
+               if (REG_P(*op1)) {
+                       reverse = true;
+                       reverse = true;
+               } else if (CONST_INT_P(*op1)) {
+               } else if (CONST_INT_P(*op1)) {
+                       //; A <= B
+                       //; A <= B
+                       //; A <  B+1
+                       //; A <  B+1
+                       //; Add one to the integer constant,
+                       //; Add one to the integer constant,
+                       //; And use a GTU comparison
+                       //; And use a GTU comparison
+                       *op1 = GEN_INT(INTVAL(*op1)+1);
+                       *op1 = GEN_INT(INTVAL(*op1)+1);
+                       *code = LTU;
+                       *code = LTU;
+               } else {
+               } else {
+                       reverse = true;
+                       reverse = true;
+               }
+               }
+       } else if (*code == LE) {
+       } else if (*code == LE) {
+               if (REG_P(*op1)) {
+               if (REG_P(*op1)) {
+                       reverse = true;
+                       reverse = true;
+               } else if (CONST_INT_P(*op1)) {
+               } else if (CONST_INT_P(*op1)) {
+                       //; A <  B
+                       //; A <  B
+                       //; A <= B-1
+                       //; A <= B-1
+                       //; Add one to the integer constant,
+                       //; Add one to the integer constant,
+                       //; And use a GTU comparison
+                       //; And use a GTU comparison
+                       *op1 = GEN_INT(INTVAL(*op1)-1);
+                       *op1 = GEN_INT(INTVAL(*op1)-1);
+                       *code = LT;
+                       *code = LT;
+               } else {
+               } else {
+                       reverse = true;
+                       reverse = true;
+               }
+               }
+       } else if (*code == GT) {
+       } else if (*code == GT) {
+               if (REG_P(*op1)) {
+               if (REG_P(*op1)) {
+                       //; Reverse the comparison
+                       //; Reverse the comparison
+                       reverse = true;
+                       reverse = true;
+               } else if (CONST_INT_P(*op1)) {
+               } else if (CONST_INT_P(*op1)) {
+                       //; A >  B
+                       //; A >  B
+                       //; A >= B+1
+                       //; A >= B+1
+                       //; Add one to the integer constant,
+                       //; Add one to the integer constant,
+                       //; And use a GTU comparison
+                       //; And use a GTU comparison
+                       *op1 = GEN_INT(INTVAL(*op1)+1);
+                       *op1 = GEN_INT(INTVAL(*op1)+1);
+                       *code = GE;
+                       *code = GE;
+               } else {
+               } else {
+                       reverse = true;
+                       reverse = true;
+               }
+               }
+       }
+       }
+
+
+       if (reverse) {
+       if (reverse) {
+               rtx tem = *op0;
+               rtx tem = *op0;
+               *op0 = *op1;
+               *op0 = *op1;
+               *op1 = tem;
+               *op1 = tem;
+               *code = (int)swap_condition((enum rtx_code)*code);
+               *code = (int)swap_condition((enum rtx_code)*code);
+       }
+       }
+}
+}
+
+
+static bool
+static bool
+zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b) {
+zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b) {
+       *a = zip_CC;
+       *a = zip_CC;
+       *b = INVALID_REGNUM;
+       *b = INVALID_REGNUM;
+       return true;
+       return true;
+}
+}
+
+
+
+
+/* totally buggy - we can't return pointers to nested functions */
+/* totally buggy - we can't return pointers to nested functions */
+static void
+static void
+zip_asm_trampoline_template(FILE *f)
+zip_asm_trampoline_template(FILE *f)
+{
+{
+       fprintf(f, "\tbrev\t0,r1\n");
+       fprintf(f, "\tbrev\t0,r1\n");
+       fprintf(f, "\tldilo\t0,r1\n");
+       fprintf(f, "\tldilo\t0,r1\n");
+       fprintf(f, "\tjmp r1\n");
+       fprintf(f, "\tjmp r1\n");
+}
+}
+
+
+/* Worker function for TARGET_TRAMPOLINE_INIT. */
+/* Worker function for TARGET_TRAMPOLINE_INIT. */
+static void
+static void
+zip_trampoline_init(rtx m_tramp ATTRIBUTE_UNUSED,
+zip_trampoline_init(rtx m_tramp ATTRIBUTE_UNUSED,
+       tree fndecl ATTRIBUTE_UNUSED,
+       tree fndecl ATTRIBUTE_UNUSED,
+       rtx chain_value ATTRIBUTE_UNUSED) {
+       rtx chain_value ATTRIBUTE_UNUSED) {
+// #warning "This needs to be filled out"
+// #warning "This needs to be filled out"
+       abort();
+       abort();
+}
+}
+
+
+static tree
+static tree
+def_builtin(const char *name, enum insn_code icode, enum ZIP_BUILTIN_ID_CODE code,
+def_builtin(const char *name, enum insn_code icode, enum ZIP_BUILTIN_ID_CODE code,
+       tree type)
+       tree type)
+{
+{
+       tree t = add_builtin_function(name,type,code, BUILT_IN_MD, NULL, NULL_TREE);
+       tree t = add_builtin_function(name,type,code, BUILT_IN_MD, NULL, NULL_TREE);
+
+
+       if(t) {
+       if(t) {
+               zip_builtins[code] = t;
+               zip_builtins[code] = t;
+               zip_builtins_icode[code] = icode;
+               zip_builtins_icode[code] = icode;
+       }
+       }
+
+
+       return t;
+       return t;
+
+
+}
+}
+
+
+void   zip_init_builtins(void) {
+void   zip_init_builtins(void) {
+
+
+  tree void_ftype_void = build_function_type_list(void_type_node, NULL_TREE);
+  tree void_ftype_void = build_function_type_list(void_type_node, NULL_TREE);
+#ifdef HAVE_zip_rtu
+#ifdef HAVE_zip_rtu
+  def_builtin("zip_rtu", CODE_FOR_zip_rtu, ZIP_BUILTIN_RTU, void_ftype_void);
+  def_builtin("zip_rtu", CODE_FOR_zip_rtu, ZIP_BUILTIN_RTU, void_ftype_void);
+#endif
+#endif
+#ifdef HAVE_zip_halt
+#ifdef HAVE_zip_halt
+  def_builtin("zip_halt",  CODE_FOR_zip_halt,  ZIP_BUILTIN_HALT, void_ftype_void);
+  def_builtin("zip_halt",  CODE_FOR_zip_halt,  ZIP_BUILTIN_HALT, void_ftype_void);
+#endif
+#endif
+#ifdef HAVE_zip_busy
+#ifdef HAVE_zip_busy
+  def_builtin("zip_busy",  CODE_FOR_zip_busy,  ZIP_BUILTIN_BUSY, void_ftype_void);
+  def_builtin("zip_busy",  CODE_FOR_zip_busy,  ZIP_BUILTIN_BUSY, void_ftype_void);
+#endif
+#endif
+#ifdef HAVE_zip_idle
+#ifdef HAVE_zip_idle
+  def_builtin("zip_idle", CODE_FOR_zip_idle, ZIP_BUILTIN_IDLE, void_ftype_void);
+  def_builtin("zip_idle", CODE_FOR_zip_idle, ZIP_BUILTIN_IDLE, void_ftype_void);
+#endif
+#endif
+
+
+#ifdef HAVE_zip_syscall
+#ifdef HAVE_zip_syscall
+// Support int SYSCALL(callID, int a, int b, int c);
+// Support int SYSCALL(callID, int a, int b, int c);
+  def_builtin("zip_syscall", CODE_FOR_zip_syscall, ZIP_BUILTIN_SYSCALL,
+  def_builtin("zip_syscall", CODE_FOR_zip_syscall, ZIP_BUILTIN_SYSCALL,
+                       build_function_type_list(void_type_node, NULL_TREE));
+                       build_function_type_list(void_type_node, NULL_TREE));
+#endif
+#endif
+
+
+#ifdef HAVE_zip_save_context
+#ifdef HAVE_zip_save_context
+  def_builtin("zip_save_context", CODE_FOR_zip_save_context, ZIP_BUILTIN_SAVE_CONTEXT,
+  def_builtin("zip_save_context", CODE_FOR_zip_save_context, ZIP_BUILTIN_SAVE_CONTEXT,
+               build_function_type_list(void_type_node, ptr_type_node, 0));
+               build_function_type_list(void_type_node, ptr_type_node, 0));
+#endif
+#endif
+
+
+#ifdef HAVE_zip_restore_context
+#ifdef HAVE_zip_restore_context
+  def_builtin("zip_restore_context", CODE_FOR_zip_restore_context, ZIP_BUILTIN_RESTORE_CONTEXT,
+  def_builtin("zip_restore_context", CODE_FOR_zip_restore_context, ZIP_BUILTIN_RESTORE_CONTEXT,
+       build_function_type_list(void_type_node, ptr_type_node, 0));
+       build_function_type_list(void_type_node, ptr_type_node, 0));
+#endif
+#endif
+
+
+#ifdef HAVE_zip_bitrev
+#ifdef HAVE_zip_bitrev
+  def_builtin("zip_bitrev", CODE_FOR_zip_bitrev, ZIP_BUILTIN_BITREV,
+  def_builtin("zip_bitrev", CODE_FOR_zip_bitrev, ZIP_BUILTIN_BITREV,
+       build_function_type_list(unsigned_type_node, unsigned_type_node,
+       build_function_type_list(unsigned_type_node, unsigned_type_node,
+               NULL_TREE));
+               NULL_TREE));
+#endif
+#endif
+
+
+#ifdef HAVE_zip_cc
+#ifdef HAVE_zip_cc
+  def_builtin("zip_cc", CODE_FOR_zip_cc, ZIP_BUILTIN_CC,
+  def_builtin("zip_cc", CODE_FOR_zip_cc, ZIP_BUILTIN_CC,
+       build_function_type_list(unsigned_type_node, NULL_TREE));
+       build_function_type_list(unsigned_type_node, NULL_TREE));
+#endif
+#endif
+
+
+#ifdef HAVE_zip_ucc
+#ifdef HAVE_zip_ucc
+  def_builtin("zip_ucc", CODE_FOR_zip_ucc, ZIP_BUILTIN_UCC,
+  def_builtin("zip_ucc", CODE_FOR_zip_ucc, ZIP_BUILTIN_UCC,
+       build_function_type_list(unsigned_type_node, NULL_TREE));
+       build_function_type_list(unsigned_type_node, NULL_TREE));
+#endif
+#endif
+
+
+}
+}
+
+
+static tree
+static tree
+zip_builtin_decl(unsigned zip_builtin_code, bool initialize_p ATTRIBUTE_UNUSED)
+zip_builtin_decl(unsigned zip_builtin_code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+{
+  if (zip_builtin_code >= ZIP_BUILTIN_MAX)
+  if (zip_builtin_code >= ZIP_BUILTIN_MAX)
+    return error_mark_node;
+    return error_mark_node;
+
+
+  return zip_builtins[zip_builtin_code];
+  return zip_builtins[zip_builtin_code];
+}
+}
+
+
+static rtx
+static rtx
+zip_expand_builtin(tree exp, rtx target,
+zip_expand_builtin(tree exp, rtx target,
+               rtx subtarget ATTRIBUTE_UNUSED,
+               rtx subtarget ATTRIBUTE_UNUSED,
+               machine_mode tmode ATTRIBUTE_UNUSED,
+               machine_mode tmode ATTRIBUTE_UNUSED,
+               int     ignore ATTRIBUTE_UNUSED)
+               int     ignore ATTRIBUTE_UNUSED)
+{
+{
+       tree    fndecl = TREE_OPERAND(CALL_EXPR_FN(exp), 0);
+       tree    fndecl = TREE_OPERAND(CALL_EXPR_FN(exp), 0);
+       bool    nonvoid = (TREE_TYPE(TREE_TYPE(fndecl)) != void_type_node);
+       bool    nonvoid = (TREE_TYPE(TREE_TYPE(fndecl)) != void_type_node);
+       enum    ZIP_BUILTIN_ID_CODE code=(enum ZIP_BUILTIN_ID_CODE)DECL_FUNCTION_CODE(fndecl);
+       enum    ZIP_BUILTIN_ID_CODE code=(enum ZIP_BUILTIN_ID_CODE)DECL_FUNCTION_CODE(fndecl);
+       enum    insn_code icode = zip_builtins_icode[code];
+       enum    insn_code icode = zip_builtins_icode[code];
+       rtx     pat, op[5];
+       rtx     pat, op[5];
+       call_expr_arg_iterator  iter;
+       call_expr_arg_iterator  iter;
+       tree    arg;
+       tree    arg;
+
+
+       if ((code == ZIP_BUILTIN_SAVE_CONTEXT)
+       if ((code == ZIP_BUILTIN_SAVE_CONTEXT)
+                       ||(code == ZIP_BUILTIN_RESTORE_CONTEXT)) {
+                       ||(code == ZIP_BUILTIN_RESTORE_CONTEXT)) {
+               arg = first_call_expr_arg(exp, &iter);
+               arg = first_call_expr_arg(exp, &iter);
+               if (arg == error_mark_node)
+               if (arg == error_mark_node)
+                       return NULL_RTX;
+                       return NULL_RTX;
+               op[0] = expand_normal(arg);
+               op[0] = expand_normal(arg);
+               if (GET_CODE(op[0]) != REG)
+               if (GET_CODE(op[0]) != REG)
+                       op[0] = force_reg(Pmode, op[0]);
+                       op[0] = force_reg(Pmode, op[0]);
+               pat = GEN_FCN(icode)(op[0]);
+               pat = GEN_FCN(icode)(op[0]);
+       } else if (code == ZIP_BUILTIN_BITREV) {
+       } else if (code == ZIP_BUILTIN_BITREV) {
+               arg = first_call_expr_arg(exp, &iter);
+               arg = first_call_expr_arg(exp, &iter);
+               if (arg == error_mark_node) {
+               if (arg == error_mark_node) {
+                       return NULL_RTX;
+                       return NULL_RTX;
+               }
+               }
+               op[0] = expand_normal(arg);
+               op[0] = expand_normal(arg);
+               if (!target)
+               if (!target)
+                       target = gen_reg_rtx(SImode);
+                       target = gen_reg_rtx(SImode);
+               pat = GEN_FCN(icode)(target, op[0]);
+               pat = GEN_FCN(icode)(target, op[0]);
+       } else if ((code == ZIP_BUILTIN_CC)||(code == ZIP_BUILTIN_UCC)) {
+       } else if ((code == ZIP_BUILTIN_CC)||(code == ZIP_BUILTIN_UCC)) {
+               if (!target)
+               if (!target)
+                       target = gen_reg_rtx(SImode);
+                       target = gen_reg_rtx(SImode);
+               pat = GEN_FCN(icode)(target);
+               pat = GEN_FCN(icode)(target);
+       } else // RTU, HALT, IDLE
+       } else // RTU, HALT, IDLE
+               pat = GEN_FCN(icode)();
+               pat = GEN_FCN(icode)();
+       if (!pat)
+       if (!pat)
+               return NULL_RTX;
+               return NULL_RTX;
+       emit_insn(pat);
+       emit_insn(pat);
+       return (nonvoid ? target : const0_rtx);
+       return (nonvoid ? target : const0_rtx);
+}
+}
+
+
+static bool
+static bool
+zip_scalar_mode_supported_p(enum machine_mode mode)
+zip_scalar_mode_supported_p(enum machine_mode mode)
+{
+{
+       if ((ZIP_HAS_DI)&&(mode == DImode))
+       if ((ZIP_HAS_DI)&&(mode == DImode))
+               return true;
+               return true;
+       if ((mode==SImode)||(mode==HImode)||(mode==QImode))
+       if ((mode==SImode)||(mode==HImode)||(mode==QImode))
+               return true;
+               return true;
+       if (mode==SFmode)       // &&(ZIP_FPU)
+       if (mode==SFmode)       // &&(ZIP_FPU)
+               return true;    // If (!ZIP_CPU), will need to be emulated
+               return true;    // If (!ZIP_CPU), will need to be emulated
+       if (mode==DFmode)       // Must always be emulated
+       if (mode==DFmode)       // Must always be emulated
+               return true;
+               return true;
+       return false;
+       return false;
+}
+}
+
+
+static bool
+static bool
+zip_libgcc_floating_mode_supported_p(enum machine_mode mode)
+zip_libgcc_floating_mode_supported_p(enum machine_mode mode)
+{
+{
+       return ((mode)==SFmode)||((mode)==DFmode);
+       return ((mode)==SFmode)||((mode)==DFmode);
+}
+}
+
+
+static int
+static int
+zip_address_cost(rtx addr ATTRIBUTE_UNUSED,
+zip_address_cost(rtx addr ATTRIBUTE_UNUSED,
+       enum machine_mode mode ATTRIBUTE_UNUSED,
+       enum machine_mode mode ATTRIBUTE_UNUSED,
+       addr_space_t as ATTRIBUTE_UNUSED, bool spd ATTRIBUTE_UNUSED) {
+       addr_space_t as ATTRIBUTE_UNUSED, bool spd ATTRIBUTE_UNUSED) {
+       return 1;
+       return 1;
+}
+}
+
+
+static bool
+static bool
+zip_mode_dependent_address_p(const_rtx addr ATTRIBUTE_UNUSED,
+zip_mode_dependent_address_p(const_rtx addr ATTRIBUTE_UNUSED,
+       addr_space_t as ATTRIBUTE_UNUSED) {
+       addr_space_t as ATTRIBUTE_UNUSED) {
+       return false;
+       return false;
+}
+}
+
+
+static void
+static void
+zip_debug_print(const char *pfx, int lvl, const char *str) {
+zip_debug_print(const char *pfx, int lvl, const char *str) {
+       int     i;
+       int     i;
+       i = lvl;
+       i = lvl;
+       if ((true)||(lvl == 0))
+       if ((true)||(lvl == 0))
+               fprintf(stderr, "%s", pfx);
+               fprintf(stderr, "%s", pfx);
+       else
+       else
+               i += strlen(pfx);
+               i += strlen(pfx);
+       while(i-->0)
+       while(i-->0)
+               fprintf(stderr, "  ");
+               fprintf(stderr, "  ");
+       fprintf(stderr, "%s\n", str);
+       fprintf(stderr, "%s\n", str);
+}
+}
+
+
+static void
+static void
+zip_debug_print_m(const char *pfx, int lvl, const char *str, enum machine_mode m) {
+zip_debug_print_m(const char *pfx, int lvl, const char *str, enum machine_mode m) {
+       int     i;
+       int     i;
+
+
+       i = lvl;
+       i = lvl;
+       if ((true)||(lvl == 0))
+       if ((true)||(lvl == 0))
+               fprintf(stderr, "%s", pfx);
+               fprintf(stderr, "%s", pfx);
+       else
+       else
+               i = lvl+strlen(pfx);
+               i = lvl+strlen(pfx);
+       while(i-->0)
+       while(i-->0)
+               fprintf(stderr, "  ");
+               fprintf(stderr, "  ");
+       switch(m) {
+       switch(m) {
+               case VOIDmode:
+               case VOIDmode:
+                       fprintf(stderr, "%s:V\n", str);
+                       fprintf(stderr, "%s:V\n", str);
+                       break;
+                       break;
+               case BLKmode:
+               case BLKmode:
+                       fprintf(stderr, "%s:BLK\n", str);
+                       fprintf(stderr, "%s:BLK\n", str);
+                       break;
+                       break;
+               case BImode:
+               case BImode:
+                       fprintf(stderr, "%s:BI\n", str);
+                       fprintf(stderr, "%s:BI\n", str);
+                       break;
+                       break;
+               case QImode:
+               case QImode:
+                       fprintf(stderr, "%s:QI\n", str);
+                       fprintf(stderr, "%s:QI\n", str);
+                       break;
+                       break;
+               case HImode:
+               case HImode:
+                       fprintf(stderr, "%s:HI\n", str);
+                       fprintf(stderr, "%s:HI\n", str);
+                       break;
+                       break;
+#ifdef HAVE_SImode
+#ifdef HAVE_SImode
+               case SImode:
+               case SImode:
+                       fprintf(stderr, "%s:SI\n", str);
+                       fprintf(stderr, "%s:SI\n", str);
+                       break;
+                       break;
+#endif
+#endif
+#ifdef HAVE_DImode
+#ifdef HAVE_DImode
+               case DImode:
+               case DImode:
+                       fprintf(stderr, "%s:DI\n", str);
+                       fprintf(stderr, "%s:DI\n", str);
+                       break;
+                       break;
+#endif
+#endif
+               case CCmode:
+               case CCmode:
+                       fprintf(stderr, "%s:CC\n", str);
+                       fprintf(stderr, "%s:CC\n", str);
+                       break;
+                       break;
+               default:
+               default:
+                       fprintf(stderr, "%s:?\n", str);
+                       fprintf(stderr, "%s:?\n", str);
+       }
+       }
+}
+}
+
+
+static void
+static void
+zip_debug_rtx_1(const char *pfx, const_rtx x, int lvl) {
+zip_debug_rtx_1(const char *pfx, const_rtx x, int lvl) {
+       if (x == NULL_RTX) {
+       if (x == NULL_RTX) {
+               zip_debug_print(pfx, lvl, "(NULL-RTX)");
+               zip_debug_print(pfx, lvl, "(NULL-RTX)");
+               return;
+               return;
+       } else if (GET_CODE(x) > NUM_RTX_CODE) {
+       } else if (GET_CODE(x) > NUM_RTX_CODE) {
+               char    buf[64];
+               char    buf[64];
+               sprintf(buf, "(BAD-RTX-CODE %d)", GET_CODE(x));
+               sprintf(buf, "(BAD-RTX-CODE %d)", GET_CODE(x));
+               zip_debug_print(pfx, lvl, buf);
+               zip_debug_print(pfx, lvl, buf);
+               gcc_assert(0 && "Bad RTX Code");
+               gcc_assert(0 && "Bad RTX Code");
+               return;
+               return;
+       } switch(GET_CODE(x)) { // rtl.def
+       } switch(GET_CODE(x)) { // rtl.def
+       case PARALLEL:
+       case PARALLEL:
+               zip_debug_print(pfx, lvl, "(PARALLEL");
+               zip_debug_print(pfx, lvl, "(PARALLEL");
+               if (XVEC(x,0) != NULL)
+               if (XVEC(x,0) != NULL)
+                       for(int j=0; j<XVECLEN(x,0);j++)
+                       for(int j=0; j<XVECLEN(x,0);j++)
+                               zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+                               zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               debug_rtx(x);
+               debug_rtx(x);
+               break;
+               break;
+       case INT_LIST: zip_debug_print(pfx, lvl, "(INT-LIST"); break;
+       case INT_LIST: zip_debug_print(pfx, lvl, "(INT-LIST"); break;
+       case SEQUENCE:
+       case SEQUENCE:
+               zip_debug_print(pfx, lvl, "(SEQUENCE");
+               zip_debug_print(pfx, lvl, "(SEQUENCE");
+               for(int j=0; j<XVECLEN(x,0);j++)
+               for(int j=0; j<XVECLEN(x,0);j++)
+                       zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+                       zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               debug_rtx(x);
+               debug_rtx(x);
+               break;
+               break;
+       case ADDRESS: zip_debug_print(pfx, lvl, "(ADDRESS"); break;
+       case ADDRESS: zip_debug_print(pfx, lvl, "(ADDRESS"); break;
+       case DEBUG_INSN: zip_debug_print(pfx, lvl, "(DEBUG-INSN"); break;
+       case DEBUG_INSN: zip_debug_print(pfx, lvl, "(DEBUG-INSN"); break;
+       case INSN:
+       case INSN:
+               zip_debug_print(pfx, lvl, "(INSN");
+               zip_debug_print(pfx, lvl, "(INSN");
+               /*
+               /*
+               { const rtx_insn *tmp_rtx;
+               { const rtx_insn *tmp_rtx;
+               for(tmp_rtx = as_a <const rtx_insn *>(x); tmp_rtx != 0; tmp_rtx = NEXT_INSN(tmp_rtx)) {
+               for(tmp_rtx = as_a <const rtx_insn *>(x); tmp_rtx != 0; tmp_rtx = NEXT_INSN(tmp_rtx)) {
+                       zip_debug_rtx_1(tmp_rtx, lvl+1);
+                       zip_debug_rtx_1(tmp_rtx, lvl+1);
+               }}
+               }}
+               */
+               */
+               zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+               zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               debug_rtx(x);
+               debug_rtx(x);
+               break;
+               break;
+       case JUMP_INSN: zip_debug_print(pfx, lvl, "(JUMP-INSN");
+       case JUMP_INSN: zip_debug_print(pfx, lvl, "(JUMP-INSN");
+               zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+               zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               /*
+               /*
+               if (JUMP_LABEL(x)) {
+               if (JUMP_LABEL(x)) {
+                       if (GET_CODE(JUMP_LABEL(x)) == LABEL_REF) {
+                       if (GET_CODE(JUMP_LABEL(x)) == LABEL_REF) {
+                               char    buf[64];
+                               char    buf[64];
+                               sprintf(buf, "(LABEL *.L%d))", CODE_LABEL_NUMBER(LABEL_REF_LABEL(JUMP_LABEL(x))));
+                               sprintf(buf, "(LABEL *.L%d))", CODE_LABEL_NUMBER(LABEL_REF_LABEL(JUMP_LABEL(x))));
+                               zip_debug_print(pfx, lvl+1, buf);
+                               zip_debug_print(pfx, lvl+1, buf);
+                       } else if (GET_CODE(JUMP_LABEL(x))==CODE_LABEL) {
+                       } else if (GET_CODE(JUMP_LABEL(x))==CODE_LABEL) {
+                               char    buf[64];
+                               char    buf[64];
+                               sprintf(buf, "(CODE_LABEL *.L%d))", CODE_LABEL_NUMBER(JUMP_LABEL(x)));
+                               sprintf(buf, "(CODE_LABEL *.L%d))", CODE_LABEL_NUMBER(JUMP_LABEL(x)));
+                               zip_debug_print(pfx, lvl+1, buf);
+                               zip_debug_print(pfx, lvl+1, buf);
+                       } else
+                       } else
+                       zip_debug_print(pfx, lvl+1, "(w/Label))");
+                       zip_debug_print(pfx, lvl+1, "(w/Label))");
+               } else
+               } else
+                       zip_debug_print(pfx, lvl+1, "(NO label))");
+                       zip_debug_print(pfx, lvl+1, "(NO label))");
+               debug_rtx(x);
+               debug_rtx(x);
+               */
+               */
+               break;
+               break;
+       case CALL:
+       case CALL:
+               zip_debug_print(pfx, lvl, "(CALL (Adr) (Args)");
+               zip_debug_print(pfx, lvl, "(CALL (Adr) (Args)");
+               zip_debug_rtx_1(pfx, XEXP(x,0), lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0), lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1), lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1), lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case CALL_INSN: zip_debug_print(pfx, lvl, "(CALL-INSN");
+       case CALL_INSN: zip_debug_print(pfx, lvl, "(CALL-INSN");
+               debug_rtx(x);
+               debug_rtx(x);
+               break;
+               break;
+       case BARRIER: zip_debug_print(pfx, lvl, "(BARRIER)"); break;
+       case BARRIER: zip_debug_print(pfx, lvl, "(BARRIER)"); break;
+       case RETURN: zip_debug_print(pfx, lvl, "(RETURN)"); break;
+       case RETURN: zip_debug_print(pfx, lvl, "(RETURN)"); break;
+       case NOTE:
+       case NOTE:
+               {       char buf[128];
+               {       char buf[128];
+                       sprintf(buf, "(NOTE %s)", GET_REG_NOTE_NAME(GET_MODE(x)));
+                       sprintf(buf, "(NOTE %s)", GET_REG_NOTE_NAME(GET_MODE(x)));
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_print(pfx, lvl, buf);
+               }break;
+               }break;
+       case COND_EXEC: zip_debug_print(pfx, lvl, "(COND_EXEC)");
+       case COND_EXEC: zip_debug_print(pfx, lvl, "(COND_EXEC)");
+               debug_rtx(x);
+               debug_rtx(x);
+               break;
+               break;
+       case ASM_INPUT: zip_debug_print(pfx, lvl, "(ASM INPUT)"); break;
+       case ASM_INPUT: zip_debug_print(pfx, lvl, "(ASM INPUT)"); break;
+       case ASM_OPERANDS: zip_debug_print(pfx, lvl, "(ASM OPERANDS)"); break;
+       case ASM_OPERANDS: zip_debug_print(pfx, lvl, "(ASM OPERANDS)"); break;
+       case UNSPEC: zip_debug_print(pfx, lvl, "(UNSPEC)"); break;
+       case UNSPEC: zip_debug_print(pfx, lvl, "(UNSPEC)"); break;
+       case UNSPEC_VOLATILE: zip_debug_print(pfx, lvl, "(UNSPEC_VOLATILE)"); break;
+       case UNSPEC_VOLATILE: zip_debug_print(pfx, lvl, "(UNSPEC_VOLATILE)"); break;
+       case CODE_LABEL:
+       case CODE_LABEL:
+               {
+               {
+                       char    buf[128];
+                       char    buf[128];
+                       sprintf(buf, "(CODE_LABEL *.L%d)", CODE_LABEL_NUMBER(x));
+                       sprintf(buf, "(CODE_LABEL *.L%d)", CODE_LABEL_NUMBER(x));
+                       zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+                       zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+               } break;
+               } break;
+       case SET:
+       case SET:
+               zip_debug_print_m(pfx, lvl, "(SET", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(SET", GET_MODE(x));
+               zip_debug_rtx_1(pfx, SET_DEST(x),lvl+1);
+               zip_debug_rtx_1(pfx, SET_DEST(x),lvl+1);
+               zip_debug_rtx_1(pfx, SET_SRC(x),lvl+1);
+               zip_debug_rtx_1(pfx, SET_SRC(x),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               debug_rtx(x);
+               debug_rtx(x);
+               break;
+               break;
+       case REG: {
+       case REG: {
+               char buf[25], mstr[4];
+               char buf[25], mstr[4];
+               mstr[0] = '\0';
+               mstr[0] = '\0';
+               if (GET_MODE(x) == QImode)
+               if (GET_MODE(x) == QImode)
+                       strcpy(mstr, ":QI");
+                       strcpy(mstr, ":QI");
+               else if (GET_MODE(x) == HImode)
+               else if (GET_MODE(x) == HImode)
+                       strcpy(mstr, ":HI");
+                       strcpy(mstr, ":HI");
+               else if (GET_MODE(x) == VOIDmode)
+               else if (GET_MODE(x) == VOIDmode)
+                       strcpy(mstr, ":V");
+                       strcpy(mstr, ":V");
+               if (REGNO(x) == zip_PC)
+               if (REGNO(x) == zip_PC)
+                       sprintf(buf, "(PC%s)", mstr);
+                       sprintf(buf, "(PC%s)", mstr);
+               else if (REGNO(x) == zip_CC)
+               else if (REGNO(x) == zip_CC)
+                       sprintf(buf, "(CC%s)", mstr);
+                       sprintf(buf, "(CC%s)", mstr);
+               else if (REGNO(x) == zip_SP)
+               else if (REGNO(x) == zip_SP)
+                       sprintf(buf, "(SP%s)", mstr);
+                       sprintf(buf, "(SP%s)", mstr);
+               else if (REGNO(x) == zip_FP)
+               else if (REGNO(x) == zip_FP)
+                       sprintf(buf, "(REG%s FP)", mstr);
+                       sprintf(buf, "(REG%s FP)", mstr);
+               else if (REGNO(x) == zip_GOT)
+               else if (REGNO(x) == zip_GOT)
+                       sprintf(buf, "(REG%s GBL)", mstr);
+                       sprintf(buf, "(REG%s GBL)", mstr);
+               else if (FUNCTION_VALUE_REGNO_P(REGNO(x)))
+               else if (FUNCTION_VALUE_REGNO_P(REGNO(x)))
+                       sprintf(buf, "(REG%s RTN-VL)", mstr);
+                       sprintf(buf, "(REG%s RTN-VL)", mstr);
+               else if (REGNO(x) == RETURN_ADDRESS_REGNUM)
+               else if (REGNO(x) == RETURN_ADDRESS_REGNUM)
+                       sprintf(buf, "(REG%s RTN-AD)", mstr);
+                       sprintf(buf, "(REG%s RTN-AD)", mstr);
+               else
+               else
+                       sprintf(buf, "(REG%s %d)", mstr, REGNO(x));
+                       sprintf(buf, "(REG%s %d)", mstr, REGNO(x));
+               if (mstr[0])
+               if (mstr[0])
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_print(pfx, lvl, buf);
+               else
+               else
+                       zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+                       zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+               } break;
+               } break;
+       case IF_THEN_ELSE: // 51
+       case IF_THEN_ELSE: // 51
+               zip_debug_print(pfx, lvl, "(IF-THEN-ELSE");
+               zip_debug_print(pfx, lvl, "(IF-THEN-ELSE");
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,2),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,2),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case PC:
+       case PC:
+               zip_debug_print(pfx, lvl, "(PC)");
+               zip_debug_print(pfx, lvl, "(PC)");
+               break;
+               break;
+       case CC0:
+       case CC0:
+               zip_debug_print(pfx, lvl, "(CC0)");
+               zip_debug_print(pfx, lvl, "(CC0)");
+               break;
+               break;
+       case COMPARE:
+       case COMPARE:
+               zip_debug_print_m(pfx, lvl, "(COMPARE", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(COMPARE", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case CONST:
+       case CONST:
+               zip_debug_print_m(pfx, lvl, "(CONST", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(CONST", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case CONST_INT:
+       case CONST_INT:
+               { char buf[128];
+               { char buf[128];
+               if (GET_MODE(x)==QImode)
+               if (GET_MODE(x)==QImode)
+                       sprintf(buf, "(CONST_INT:QI %ld)", (long)INTVAL(x));
+                       sprintf(buf, "(CONST_INT:QI %ld)", (long)INTVAL(x));
+               else if (GET_MODE(x)==VOIDmode)
+               else if (GET_MODE(x)==VOIDmode)
+                       sprintf(buf, "(CONST_INT:V %ld, %016lx)", (long)INTVAL(x),
+                       sprintf(buf, "(CONST_INT:V %ld, %016lx)", (long)INTVAL(x),
+                               (unsigned long)INTVAL(x));
+                               (unsigned long)INTVAL(x));
+               else
+               else
+                       sprintf(buf, "(CONST_INT:? %ld)", (long)INTVAL(x));
+                       sprintf(buf, "(CONST_INT:? %ld)", (long)INTVAL(x));
+               zip_debug_print(pfx, lvl, buf);
+               zip_debug_print(pfx, lvl, buf);
+               } break;
+               } break;
+       case LABEL_REF:
+       case LABEL_REF:
+               { char buf[256];
+               { char buf[256];
+               sprintf(buf, "(LABEL *.L%d)", CODE_LABEL_NUMBER(LABEL_REF_LABEL(x)));
+               sprintf(buf, "(LABEL *.L%d)", CODE_LABEL_NUMBER(LABEL_REF_LABEL(x)));
+               zip_debug_print(pfx, lvl, buf);
+               zip_debug_print(pfx, lvl, buf);
+               }
+               }
+               break;
+               break;
+       case SYMBOL_REF:
+       case SYMBOL_REF:
+               {
+               {
+                       char buf[1024];
+                       char buf[1024];
+                       sprintf(buf, "(SYMBOL: %s)", XSTR(x,0));
+                       sprintf(buf, "(SYMBOL: %s)", XSTR(x,0));
+                       // fprintf(file, "%s", XSTR(x,0));
+                       // fprintf(file, "%s", XSTR(x,0));
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_print(pfx, lvl, buf);
+               }
+               }
+               break;
+               break;
+       case MEM:
+       case MEM:
+               zip_debug_print_m(pfx, lvl, "(MEM", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(MEM", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       /*
+       /*
+       case VALUE:
+       case VALUE:
+               {
+               {
+                       char buf[64];
+                       char buf[64];
+                       sprintf(buf, "(VALUE: %d)", INTVAL(XEXP,0));
+                       sprintf(buf, "(VALUE: %d)", INTVAL(XEXP,0));
+                       zip_debug_print_m(pfx, lvl, "buf", GET_MODE(x));
+                       zip_debug_print_m(pfx, lvl, "buf", GET_MODE(x));
+               }
+               }
+               break;
+               break;
+       */
+       */
+       case PLUS:
+       case PLUS:
+               zip_debug_print_m(pfx, lvl, "(PLUS", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(PLUS", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case MINUS:
+       case MINUS:
+               zip_debug_print_m(pfx, lvl, "(MINUS", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(MINUS", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case AND:
+       case AND:
+               zip_debug_print_m(pfx, lvl, "(AND", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(AND", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case IOR:
+       case IOR:
+               zip_debug_print_m(pfx, lvl, "(OR", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(OR", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case XOR:
+       case XOR:
+               zip_debug_print_m(pfx, lvl, "(XOR", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(XOR", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case MULT:
+       case MULT:
+               zip_debug_print_m(pfx, lvl, "(MULT", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(MULT", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case EQ:        //
+       case EQ:        //
+               zip_debug_print_m(pfx, lvl, "(EQ", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(EQ", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case NE:        //
+       case NE:        //
+               zip_debug_print_m(pfx, lvl, "(NE", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(NE", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case GE:        //
+       case GE:        //
+               zip_debug_print_m(pfx, lvl, "(GE", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(GE", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case GT:        //
+       case GT:        //
+               zip_debug_print_m(pfx, lvl, "(GT", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(GT", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case LE:        //
+       case LE:        //
+               zip_debug_print_m(pfx, lvl, "(LE", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(LE", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case LT:        //
+       case LT:        //
+               zip_debug_print_m(pfx, lvl, "(LT", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(LT", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case GEU:       //
+       case GEU:       //
+               zip_debug_print_m(pfx, lvl, "(GEU", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(GEU", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case GTU:       //
+       case GTU:       //
+               zip_debug_print_m(pfx, lvl, "(GTU", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(GTU", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case LEU:       //
+       case LEU:       //
+               zip_debug_print_m(pfx, lvl, "(LEU", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(LEU", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case LTU:       //
+       case LTU:       //
+               zip_debug_print_m(pfx, lvl, "(LTU", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(LTU", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case SCRATCH:   //
+       case SCRATCH:   //
+               zip_debug_print_m(pfx, lvl, "(SCRATCH)", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(SCRATCH)", GET_MODE(x));
+               break;
+               break;
+       case SUBREG:
+       case SUBREG:
+               { char buf[64], mstr[8];
+               { char buf[64], mstr[8];
+               if (GET_MODE(x) == QImode)
+               if (GET_MODE(x) == QImode)
+                       strcpy(mstr, ":QI");
+                       strcpy(mstr, ":QI");
+               else if (GET_MODE(x) == HImode)
+               else if (GET_MODE(x) == HImode)
+                       strcpy(mstr, ":HI");
+                       strcpy(mstr, ":HI");
+               else if (GET_MODE(x) == SImode)
+               else if (GET_MODE(x) == SImode)
+                       strcpy(mstr, ":SI");
+                       strcpy(mstr, ":SI");
 
+               else if (GET_MODE(x) == DImode)
 
+                       strcpy(mstr, ":DI");
 
+               else if (GET_MODE(x) == SFmode)
 
+                       strcpy(mstr, ":SF");
 
+               else if (GET_MODE(x) == DFmode)
 
+                       strcpy(mstr, ":DF");
+               else if (GET_MODE(x) == VOIDmode)
+               else if (GET_MODE(x) == VOIDmode)
+                       strcpy(mstr, ":V");
+                       strcpy(mstr, ":V");
+               else
+               else
+                       strcpy(mstr, ":?");
+                       strcpy(mstr, ":?");
+               if (REG_P(XEXP(x,0))) {
+               if (REG_P(XEXP(x,0))) {
+                       int hreg = REGNO(XEXP(x,0)), mod = GET_MODE(XEXP(x,0)),
+                       int hreg = REGNO(XEXP(x,0)), mod = GET_MODE(XEXP(x,0)),
+                               sb = SUBREG_BYTE(x);
+                               sb = SUBREG_BYTE(x);
+                       if (mod==QImode)
+                       if (mod==SFmode)
+                       sprintf(buf,"(SUBREG%s (REG:QI %d)/%d)",mstr,hreg, sb);
+                       sprintf(buf,"(SUBREG%s (REG:SF %d)/%d)",mstr,hreg, sb);
 
+                       else if (mod==DFmode)
 
+                       sprintf(buf,"(SUBREG%s (REG:DF %d)/%d)",mstr,hreg, sb);
 
+                       else if (mod==DImode)
 
+                       sprintf(buf,"(SUBREG%s (REG:DI %d)/%d)",mstr,hreg, sb);
 
+                       else if (mod==SImode)
 
+                       sprintf(buf,"(SUBREG%s (REG:SI %d)/%d)",mstr,hreg, sb);
+                       else if (mod==HImode)
+                       else if (mod==HImode)
+                       sprintf(buf,"(SUBREG%s (REG:HI %d)/%d)",mstr,hreg, sb);
+                       sprintf(buf,"(SUBREG%s (REG:HI %d)/%d)",mstr,hreg, sb);
+                       else if (mod==QImode)
+                       else if (mod==QImode)
+                       sprintf(buf,"(SUBREG%s (REG:QI %d)/%d)",mstr,hreg, sb);
+                       sprintf(buf,"(SUBREG%s (REG:QI %d)/%d)",mstr,hreg, sb);
+                       else if (mod==VOIDmode)
+                       else if (mod==VOIDmode)
+                       sprintf(buf,"(SUBREG%s (REG:V %d)/%d)",mstr,hreg, sb);
+                       sprintf(buf,"(SUBREG%s (REG:V %d)/%d)",mstr,hreg, sb);
+                       else
+                       else
+                       sprintf(buf,"(SUBREG%s %d:?/%d)",mstr,hreg, sb);
+                       sprintf(buf,"(SUBREG%s %d:?/%d)",mstr,hreg, sb);
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_print(pfx, lvl, buf);
+               } else if (MEM_P(XEXP(x,0))) {
+               } else if (MEM_P(XEXP(x,0))) {
+                       sprintf(buf, "(SUBREG%s /%d", mstr,SUBREG_BYTE(x));
+                       sprintf(buf, "(SUBREG%s /%d", mstr,SUBREG_BYTE(x));
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+                       zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+                       zip_debug_print(pfx, lvl, ")");
+                       zip_debug_print(pfx, lvl, ")");
+               } else {
+               } else {
+                       sprintf(buf, "(SUBREG%s UNK /%d", mstr,SUBREG_BYTE(x));
+                       sprintf(buf, "(SUBREG%s UNK /%d", mstr,SUBREG_BYTE(x));
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_print(pfx, lvl, buf);
+                       zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+                       zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+                       zip_debug_print(pfx, lvl, ")");
+                       zip_debug_print(pfx, lvl, ")");
+               }}
+               }}
+               break;
+               break;
+       case ASHIFT:
+       case ASHIFT:
+               zip_debug_print_m(pfx, lvl, "(ASHIFT", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(ASHIFT", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case ASHIFTRT:
+       case ASHIFTRT:
+               zip_debug_print_m(pfx, lvl, "(ASHIFTRT", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(ASHIFTRT", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case LSHIFTRT:
+       case LSHIFTRT:
+               zip_debug_print_m(pfx, lvl, "(LSHIFTRT", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(LSHIFTRT", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case ZERO_EXTRACT:
+       case ZERO_EXTRACT:
+               zip_debug_print_m(pfx, lvl, "(ZERO_EXTRACT", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(ZERO_EXTRACT", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       case ZERO_EXTEND:
+       case ZERO_EXTEND:
+               zip_debug_print_m(pfx, lvl, "(ZERO_EXTEND", GET_MODE(x));
+               zip_debug_print_m(pfx, lvl, "(ZERO_EXTEND", GET_MODE(x));
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+               zip_debug_print(pfx, lvl, ")");
+               zip_debug_print(pfx, lvl, ")");
+               break;
+               break;
+       default:
+       default:
+               { char buf[128];
+               { char buf[128];
+               sprintf(buf, "(? = %d) -- calling DEBUG-RTX", GET_CODE(x));
+               sprintf(buf, "(? = %d) -- calling DEBUG-RTX", GET_CODE(x));
+               zip_debug_print(pfx, lvl, buf);
+               zip_debug_print(pfx, lvl, buf);
+               debug_rtx(x);
+               debug_rtx(x);
+               } break;
+               } break;
+       }
+       }
+}
+}
+
+
+void
+void
+zip_debug_rtx_pfx(const char *pfx, const_rtx x) {
+zip_debug_rtx_pfx(const char *pfx, const_rtx x) {
+       zip_debug_rtx_1(pfx, x, 0);
+       zip_debug_rtx_1(pfx, x, 0);
+}
+}
+
+
+void
+void
+zip_debug_rtx(const_rtx x) {
+zip_debug_rtx(const_rtx x) {
+       zip_debug_rtx_pfx("", x);
+       zip_debug_rtx_pfx("", x);
+}
+}
+
+
+void
+void
+zip_debug_ccode(int ccode) {
+zip_debug_ccode(int ccode) {
+       switch(ccode) {
+       switch(ccode) {
+       case    EQ: fprintf(stderr, "EQ"); break;
+       case    EQ: fprintf(stderr, "EQ"); break;
+       case    NE: fprintf(stderr, "NE"); break;
+       case    NE: fprintf(stderr, "NE"); break;
+       case    GE: fprintf(stderr, "GE"); break;
+       case    GE: fprintf(stderr, "GE"); break;
+       case    LT: fprintf(stderr, "LT"); break;
+       case    LT: fprintf(stderr, "LT"); break;
+       case    LTU: fprintf(stderr, "LTU"); break;
+       case    LTU: fprintf(stderr, "LTU"); break;
+       case    GEU: fprintf(stderr, "GEU"); break;
+       case    GEU: fprintf(stderr, "GEU"); break;
+       case    GT: fprintf(stderr, "GT[!]"); break;
+       case    GT: fprintf(stderr, "GT[!]"); break;
+       case    LE: fprintf(stderr, "LE[!]"); break;
+       case    LE: fprintf(stderr, "LE[!]"); break;
+       case    GTU: fprintf(stderr, "GTU[!]"); break;
+       case    GTU: fprintf(stderr, "GTU[!]"); break;
+       case    LEU: fprintf(stderr, "LEU[!]"); break;
+       case    LEU: fprintf(stderr, "LEU[!]"); break;
+       default:
+       default:
+               fprintf(stderr, "%d", ccode); break;
+               fprintf(stderr, "%d", ccode); break;
+       }
+       }
+}
+}
+
+
+void
+void
+zip_debug_insn(rtx_insn *insn ATTRIBUTE_UNUSED) {
+zip_debug_insn(rtx_insn *insn ATTRIBUTE_UNUSED) {
+}
+}
+
+
+void
+void
+zip_debug_bb(basic_block bb) {
+zip_debug_bb(basic_block bb) {
+       rtx_insn        *insn;
+       rtx_insn        *insn;
+
+
+       fprintf(stderr, "************ BASIC-BLOCK ***************\n");
+       fprintf(stderr, "************ BASIC-BLOCK ***************\n");
+       FOR_BB_INSNS(bb, insn)
+       FOR_BB_INSNS(bb, insn)
+       {
+       {
+               zip_debug_rtx(insn);
+               zip_debug_rtx(insn);
+       }
+       }
+}
+}
+
+
+
+
+static bool
+static bool
+zip_legitimate_opb(rtx x, bool strict)
+zip_legitimate_opb(rtx x, bool strict)
+{
+{
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB\n");
+       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB\n");
+       if (dbg) zip_debug_rtx_pfx("Test: ", x);
+       if (dbg) zip_debug_rtx_pfx("Test: ", x);
+
+
+       if (NULL_RTX == x)
+       if (NULL_RTX == x)
+               return false;
+               return false;
+       else if ((GET_MODE(x) != QImode)
+       else if ((GET_MODE(x) != QImode)
+                       &&(GET_MODE(x) != HImode)
+                       &&(GET_MODE(x) != HImode)
+                       &&(GET_MODE(x) != SImode)
+                       &&(GET_MODE(x) != SImode)
+                       &&(GET_MODE(x) != VOIDmode)) {
+                       &&(GET_MODE(x) != VOIDmode)) {
+               if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> Mode failure\n");
+               if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> Mode failure\n");
+               return false;
+               return false;
+       } else if ((strict)&&(REG_P(x))) {
+       } else if ((strict)&&(REG_P(x))) {
+               if (REGNO(x)<zip_CC) {
+               if (REGNO(x)<zip_CC) {
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+                       return true;
+                       return true;
+               } else return false;
+               } else return false;
+       } else if (register_operand(x, GET_MODE(x))) {
+       } else if (register_operand(x, GET_MODE(x))) {
+               // This also handles subregs
+               // This also handles subregs
+               if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+               if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+               return true;
+               return true;
+       } else if ((CONST_INT_P(x))
+       } else if ((CONST_INT_P(x))
+               &&(INTVAL(x) >= zip_min_opb_imm)
+               &&(INTVAL(x) >= zip_min_opb_imm)
+               &&(INTVAL(x) <= zip_max_opb_imm)) {
+               &&(INTVAL(x) <= zip_max_opb_imm)) {
+               if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (Const) %ld <= %ld <= %ld\n", (long)zip_min_opb_imm, (long)INTVAL(x), (long)zip_max_opb_imm);
+               if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (Const) %ld <= %ld <= %ld\n", (long)zip_min_opb_imm, (long)INTVAL(x), (long)zip_max_opb_imm);
+               return true;
+               return true;
+       // } else if ((GET_CODE(x) == LABEL_REF)||(GET_CODE(x)==CODE_LABEL)) {
+       // } else if ((GET_CODE(x) == LABEL_REF)||(GET_CODE(x)==CODE_LABEL)) {
+               // return true;
+               // return true;
+       } else if (GET_CODE(x) == PLUS) {
+       } else if (GET_CODE(x) == PLUS) {
+               // Is it a valid register?
+               // Is it a valid register?
+               rtx     regrtx = XEXP(x, 0);
+               rtx     regrtx = XEXP(x, 0);
+               if ((!strict)&&(!REG_P(regrtx))) {
+               if ((!strict)&&(!REG_P(regrtx))) {
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No (No reg in +%s)\n",
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No (No reg in +%s)\n",
+                       (GET_CODE(XEXP(x,1))==REG)?", reg in op[1]":"");
+                       (GET_CODE(XEXP(x,1))==REG)?", reg in op[1]":"");
+                       return false;
+                       return false;
+               } else if ((strict)&&((!REG_P(XEXP(x,0)))||(REGNO(XEXP(x,0))>=zip_CC))) {
+               } else if ((strict)&&((!REG_P(XEXP(x,0)))||(REGNO(XEXP(x,0))>=zip_CC))) {
+                       return false;
+                       return false;
+               } if ((GET_CODE(XEXP(x, 1)) == CONST_INT)
+               } if ((GET_CODE(XEXP(x, 1)) == CONST_INT)
+                       &&(INTVAL(XEXP(x, 1)) <= zip_max_anchor_offset)
+                       &&(INTVAL(XEXP(x, 1)) <= zip_max_anchor_offset)
+                       &&(INTVAL(XEXP(x, 1)) >= zip_min_anchor_offset)) {
+                       &&(INTVAL(XEXP(x, 1)) >= zip_min_anchor_offset)) {
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (reg+int)\n");
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (reg+int)\n");
+                       // if((INTVAL(XEXP(x,1))<0)&&(REGNO(XEXP(x,0))==zip_SP))
+                       // if((INTVAL(XEXP(x,1))<0)&&(REGNO(XEXP(x,0))==zip_SP))
+                               // gcc_unreachable();
+                               // gcc_unreachable();
+                       return true;
+                       return true;
+               } if ((GET_CODE(XEXP(x, 1)) == LABEL_REF)
+               } if ((GET_CODE(XEXP(x, 1)) == LABEL_REF)
+                       ||(GET_CODE(XEXP(x, 1)) == CODE_LABEL)
+                       ||(GET_CODE(XEXP(x, 1)) == CODE_LABEL)
+                       ||(GET_CODE(XEXP(x, 1)) == SYMBOL_REF)) {
+                       ||(GET_CODE(XEXP(x, 1)) == SYMBOL_REF)) {
+                       // While we can technically support this, the problem
+                       // While we can technically support this, the problem
+                       // is that the symbol address could be anywhere, and we
+                       // is that the symbol address could be anywhere, and we
+                       // have no way of recovering if it's outside of our
+                       // have no way of recovering if it's outside of our
+                       // 14 allowable bits.
+                       // 14 allowable bits.
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No. (reg+lbl)\n");
+                       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No. (reg+lbl)\n");
+                       return false;
+                       return false;
+               }
+               }
+       }
+       }
+
+
+       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No\n");
+       if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No\n");
+       if (dbg) zip_debug_rtx(x);
+       if (dbg) zip_debug_rtx(x);
+       return false;
+       return false;
+}
+}
+
+
+static bool
+static bool
+zip_legitimate_move_operand_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict) {
+zip_legitimate_move_operand_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict) {
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+
+       if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND\n");
+       if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND\n");
+       if (dbg) zip_debug_rtx_pfx("VMov?: ", x);
+       if (dbg) zip_debug_rtx_pfx("VMov?: ", x);
+
+
+       if (!zip_legitimate_opb(x, strict))
+       if (!zip_legitimate_opb(x, strict))
+               return false;
+               return false;
+       else if ((GET_CODE(x)==PLUS)&&(CONST_INT_P(XEXP(x,1)))) {
+       else if ((GET_CODE(x)==PLUS)&&(CONST_INT_P(XEXP(x,1)))) {
+               if ((INTVAL(XEXP(x, 1)) > zip_max_mov_offset)
+               if ((INTVAL(XEXP(x, 1)) > zip_max_mov_offset)
+                       ||(INTVAL(XEXP(x, 1)) < zip_min_mov_offset)) {
+                       ||(INTVAL(XEXP(x, 1)) < zip_min_mov_offset)) {
+                       if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> NO! (reg+int), int out of bounds: %ld\n", (long)INTVAL(XEXP(x,1)));
+                       if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> NO! (reg+int), int out of bounds: %ld\n", (long)INTVAL(XEXP(x,1)));
+                       return false;
+                       return false;
+               }
+               }
+       }
+       }
+
+
+       if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> Yes\n");
+       if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> Yes\n");
+       if (dbg) zip_debug_rtx(x);
+       if (dbg) zip_debug_rtx(x);
+       return true;
+       return true;
+}
+}
+
+
+int
+int
+zip_pd_mov_operand(rtx op)
+zip_pd_mov_operand(rtx op)
+{
+{
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+
+       if (dbg) fprintf(stderr, "ZIP-VALID-MOV(predicate) for OPERAND\n");
+       if (dbg) fprintf(stderr, "ZIP-VALID-MOV(predicate) for OPERAND\n");
+       return zip_legitimate_move_operand_p(VOIDmode, op, !can_create_pseudo_p());
+       return zip_legitimate_move_operand_p(VOIDmode, op, !can_create_pseudo_p());
+}
+}
+
+
+int
+int
+zip_pd_mvimm_operand(rtx op)
+zip_pd_mvimm_operand(rtx op)
+{
+{
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+
+       if (dbg) fprintf(stderr, "ZIP-VALID-MVIMM(predicate) for OPERAND\n");
+       if (dbg) fprintf(stderr, "ZIP-VALID-MVIMM(predicate) for OPERAND\n");
+       if (!CONST_INT_P(op))
+       if (!CONST_INT_P(op))
+               return false;
+               return false;
+       if (INTVAL(op) > zip_max_mov_offset)
+       if (INTVAL(op) > zip_max_mov_offset)
+               return false;
+               return false;
+       if (INTVAL(op) < zip_min_mov_offset)
+       if (INTVAL(op) < zip_min_mov_offset)
+               return false;
+               return false;
+       return true;
+       return true;
+}
+}
+
+
+int
+int
+zip_pd_imm_operand(rtx op)
+zip_pd_imm_operand(rtx op)
+{
+{
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+
+       if (dbg) fprintf(stderr, "ZIP-VALID-IMM(predicate) for OPERAND\n");
+       if (dbg) fprintf(stderr, "ZIP-VALID-IMM(predicate) for OPERAND\n");
+       if (!CONST_INT_P(op))
+       if (!CONST_INT_P(op))
+               return false;
+               return false;
+       if (INTVAL(op) > zip_max_anchor_offset)
+       if (INTVAL(op) > zip_max_anchor_offset)
+               return false;
+               return false;
+       if (INTVAL(op) < zip_min_anchor_offset)
+       if (INTVAL(op) < zip_min_anchor_offset)
+               return false;
+               return false;
+       return true;
+       return true;
+}
+}
+
+
+int
+int
+zip_address_operand(rtx op)
+zip_address_operand(rtx op)
+{
+{
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+
+       if (dbg) fprintf(stderr, "ZIP-ADDRESS for OPERAND\n");
+       if (dbg) fprintf(stderr, "ZIP-ADDRESS for OPERAND\n");
+       if ((REG_P(op))&&(REGNO(op)==zip_CC))
+       if ((REG_P(op))&&(REGNO(op)==zip_CC))
+               return false;
+               return false;
+       else if ((GET_CODE(op) == PLUS)&&(REG_P(XEXP(op,0)))
+       else if ((GET_CODE(op) == PLUS)&&(REG_P(XEXP(op,0)))
+                       &&(REGNO(XEXP(op,0))==zip_CC))
+                       &&(REGNO(XEXP(op,0))==zip_CC))
+               return false;
+               return false;
+       else
+       else
+               return zip_legitimate_opb(op, !can_create_pseudo_p());
+               return zip_legitimate_opb(op, !can_create_pseudo_p());
+}
+}
+
+
+int
+int
+zip_pd_opb_operand(rtx op)
+zip_pd_opb_operand(rtx op)
+{
+{
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "ZIP-OPB(predicate) for OPERAND\n");
+       if (dbg) fprintf(stderr, "ZIP-OPB(predicate) for OPERAND\n");
+       return zip_legitimate_opb(op, false); //, !can_create_pseudo_p());
+       return zip_legitimate_opb(op, false); //, !can_create_pseudo_p());
+}
+}
+
+
+int
+int
+zip_ct_address_operand(rtx op)
+zip_ct_address_operand(rtx op)
+{
+{
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "ZIP-ADDRESS(constraint) for OPERAND\n");
+       if (dbg) fprintf(stderr, "ZIP-ADDRESS(constraint) for OPERAND\n");
+       return zip_legitimate_opb(op, !can_create_pseudo_p());
+       return zip_legitimate_opb(op, !can_create_pseudo_p());
+}
+}
+
+
+int
+int
+zip_const_address_operand(rtx x) {
+zip_const_address_operand(rtx x) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS?\n");
+       if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS?\n");
+       if (dbg) zip_debug_rtx(x);
+       if (dbg) zip_debug_rtx(x);
+       if ((GET_MODE(x) != SImode)&&(GET_MODE(x) != VOIDmode)) {
+       if ((GET_MODE(x) != SImode)&&(GET_MODE(x) != VOIDmode)) {
+               fprintf(stderr, "is ZIP-CONST-ADDRESS? -> NO, BAD MODE\n");
+               fprintf(stderr, "is ZIP-CONST-ADDRESS? -> NO, BAD MODE\n");
+               return false;
+               return false;
+       }
+       }
+       if ((GET_CODE(x) == LABEL_REF)
+       if ((GET_CODE(x) == LABEL_REF)
+                       ||(GET_CODE(x) == CODE_LABEL)
+                       ||(GET_CODE(x) == CODE_LABEL)
+                       ||(GET_CODE(x) == SYMBOL_REF)) {
+                       ||(GET_CODE(x) == SYMBOL_REF)) {
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (LBL)\n");
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (LBL)\n");
+               return true;
+               return true;
+       } else if (CONST_INT_P(x)) {
+       } else if (CONST_INT_P(x)) {
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (INT)\n");
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (INT)\n");
+               return true;
+               return true;
+       } else if (GET_CODE(x) == PLUS) {
+       } else if (GET_CODE(x) == PLUS) {
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(PLUS)\n");
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(PLUS)\n");
+               return ((zip_const_address_operand(XEXP(x,0)))
+               return ((zip_const_address_operand(XEXP(x,0)))
+                       &&(CONST_INT_P(XEXP(x,1))));
+                       &&(CONST_INT_P(XEXP(x,1))));
+       } else if (GET_CODE(x) == MINUS) {
+       } else if (GET_CODE(x) == MINUS) {
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(MINUS)\n");
+               if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(MINUS)\n");
+               return ((zip_const_address_operand(XEXP(x,0)))
+               return ((zip_const_address_operand(XEXP(x,0)))
+                       &&(zip_const_address_operand(XEXP(x,1))));
+                       &&(zip_const_address_operand(XEXP(x,1))));
+       }
+       }
+
+
+       if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> No\n");
+       if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> No\n");
+       if (dbg) zip_debug_rtx(x);
+       if (dbg) zip_debug_rtx(x);
+       return false;
+       return false;
+}
+}
+
+
+int
+int
+zip_ct_const_address_operand(rtx x) {
+zip_ct_const_address_operand(rtx x) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(constraint)\n");
+       if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(constraint)\n");
+       return zip_const_address_operand(x);
+       return zip_const_address_operand(x);
+}
+}
+
+
+int
+int
+zip_pd_const_address_operand(rtx x) {
+zip_pd_const_address_operand(rtx x) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(predicate)\n");
+       if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(predicate)\n");
+       return zip_const_address_operand(x);
+       return zip_const_address_operand(x);
+}
+}
+
+
+
+
+static bool
+static bool
+zip_legitimate_address_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict)
+zip_legitimate_address_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict)
+{
+{
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "Zip-LEGITIMATE-ADDRESS-P\n");
+       if (dbg) fprintf(stderr, "Zip-LEGITIMATE-ADDRESS-P\n");
+       if (dbg) zip_debug_rtx(x);
+       if (dbg) zip_debug_rtx(x);
+
+
+       // Only insist the register be a valid register if strict is true
+       // Only insist the register be a valid register if strict is true
+       if (zip_legitimate_opb(x, strict))
+       if (zip_legitimate_opb(x, strict))
+               return true;
+               return true;
+       // else if (zip_const_address_operand(x))
+       // else if (zip_const_address_operand(x))
+               // return true;
+               // return true;
+
+
+       return false;
+       return false;
+}
+}
+
+
+static rtx
+static rtx
+zip_legitimize_address(rtx x, rtx oldx ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) {
+zip_legitimize_address(rtx x, rtx oldx ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+
+
+       if (dbg) zip_debug_rtx_pfx("LEGITIMIZE: ", x);
+       if (dbg) zip_debug_rtx_pfx("LEGITIMIZE: ", x);
+       if (zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+       if (zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+               return x;
+               return x;
+
+
+       if (dbg) zip_debug_rtx_pfx("ILLEGITIMATE: ", x);
+       if (dbg) zip_debug_rtx_pfx("ILLEGITIMATE: ", x);
+       if (GET_CODE(x)==PLUS) {
+       if (GET_CODE(x)==PLUS) {
+               // if ((zip_legitimate_address_p(mode, XEXP(x,0),
+               // if ((zip_legitimate_address_p(mode, XEXP(x,0),
+               //              !can_create_pseudo_p()))
+               //              !can_create_pseudo_p()))
+               //      &&(GETMODE(XEXP(x,1))==CONST_INT)) {
+               //      &&(GETMODE(XEXP(x,1))==CONST_INT)) {
+               //}
+               //}
+               if (!REG_P(XEXP(x,0)))
+               if (!REG_P(XEXP(x,0)))
+                       XEXP(x,0) = force_reg(Pmode,XEXP(x,0));
+                       XEXP(x,0) = force_reg(Pmode,XEXP(x,0));
+               if ((!zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+               if ((!zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+                       &&(!CONST_INT_P(XEXP(x,1))))
+                       &&(!CONST_INT_P(XEXP(x,1))))
+                       x = force_reg(GET_MODE(x),x);
+                       x = force_reg(GET_MODE(x),x);
+       } else if (MEM_P(x))
+       } else if (MEM_P(x))
+               x = force_reg(GET_MODE(x),x);
+               x = force_reg(GET_MODE(x),x);
+
+
+       if (dbg) zip_debug_rtx_pfx("LEGITIMATE: ", x);
+       if (dbg) zip_debug_rtx_pfx("LEGITIMATE: ", x);
+       return x;
+       return x;
+}
+}
+
+
+void
+void
+zip_asm_output_def(FILE *stream, const char *name, const char *value)
+zip_asm_output_def(FILE *stream, const char *name, const char *value)
+{
+{
+       fprintf(stream, "\t.equ %s, %s\n", name, value);
+       fprintf(stream, "\t.equ %s, %s\n", name, value);
+}
+}
+
+
+const char *zip_set_zero_or_one(rtx condition, rtx dst) {
+const char *zip_set_zero_or_one(rtx condition, rtx dst) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "ZIP::SET-ZERO-OR-ONE\n");
+       if (dbg) fprintf(stderr, "ZIP::SET-ZERO-OR-ONE\n");
+       if (dbg) zip_debug_rtx_pfx("CND", condition);
+       if (dbg) zip_debug_rtx_pfx("CND", condition);
+       if (dbg) zip_debug_rtx_pfx("REG", dst);
+       if (dbg) zip_debug_rtx_pfx("REG", dst);
+       switch(GET_CODE(condition)) {
+       switch(GET_CODE(condition)) {
+       case EQ:        return "LDI\t0,%0\n\tLDILO.Z\t1,%0\t; set01_eq";
+       case EQ:        return "LDI\t0,%0\n\tLDILO.Z\t1,%0\t; set01_eq";
+       case NE:        return "LDI\t0,%0\n\tLDILO.NZ\t1,%0\t; set01_ne";
+       case NE:        return "LDI\t0,%0\n\tLDILO.NZ\t1,%0\t; set01_ne";
+       case LT:        return "LDI\t0,%0\n\tLDILO.LT\t1,%0\t; set01_lt";
+       case LT:        return "LDI\t0,%0\n\tLDILO.LT\t1,%0\t; set01_lt";
+       case GT:        return "LDI\t1,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_gt";
+       case GT:        return "LDI\t1,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_gt";
+       case LE:        return "LDI\t0,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_le";
+       case LE:        return "LDI\t0,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_le";
+       case GE:        return "LDI\t0,%0\n\tLDILO.GE\t1,%0\t; set01_ge";
+       case GE:        return "LDI\t0,%0\n\tLDILO.GE\t1,%0\t; set01_ge";
+       case LTU:       return "LDI\t0,%0\n\tLDILO.C\t1,%0\t; set01_ltu";
+       case LTU:       return "LDI\t0,%0\n\tLDILO.C\t1,%0\t; set01_ltu";
+       case GEU:       return "LDI\t0,%0\n\tLDILO.NC\t1,%0\t; set01_geu";
+       case GEU:       return "LDI\t0,%0\n\tLDILO.NC\t1,%0\t; set01_geu";
+       case GTU:       return "LDI\t1,%0\n\tLDILO.C\t0,%0\n\tLDILO.Z\t0,%0\t; set01_gtu";
+       case GTU:       return "LDI\t1,%0\n\tLDILO.C\t0,%0\n\tLDILO.Z\t0,%0\t; set01_gtu";
+       case LEU:       return "LDI\t0,%0\n\tLDILO.C\t1,%0\n\tLDILO.Z\t1,%0\t; set01_leu";
+       case LEU:       return "LDI\t0,%0\n\tLDILO.C\t1,%0\n\tLDILO.Z\t1,%0\t; set01_leu";
+       default:
+       default:
+               zip_debug_rtx(condition);
+               zip_debug_rtx(condition);
+               internal_error("CSTORE Unsupported condition");
+               internal_error("CSTORE Unsupported condition");
+               return NULL;
+               return NULL;
+       }
+       }
+}
+}
+
+
+int
+int
+zip_supported_condition(int c) {
+zip_supported_condition(int c) {
+       switch(c) {
+       switch(c) {
+       case EQ: case NE: case LT: case GE: case LTU: case GEU:
+       case EQ: case NE: case LT: case GE: case LTU: case GEU:
+               return 1;
+               return 1;
+               break;
+               break;
+       default:
+       default:
+               break;
+               break;
+       } return 0;
+       } return 0;
+}
+}
+
+
+bool
+bool
+zip_signed_comparison(int c) {
+zip_signed_comparison(int c) {
+       switch(c) {
+       switch(c) {
+       case NE: case LT: case EQ: case GE:
+       case NE: case LT: case EQ: case GE:
+               return true;
+               return true;
+       default:
+       default:
+               break;
+               break;
+       } return false;
+       } return false;
+}
+}
+
+
+int
+int
+zip_expand_movdi(rtx dst, rtx src) {
+zip_expand_movdi(rtx dst, rtx src) {
+       ZIPDEBUGFLAG(dbg, false);
+       ZIPDEBUGFLAG(dbg, false);
+
+
+       if (dbg) fprintf(stderr, "\nZIP::MOVDI\n");
+       if (dbg) fprintf(stderr, "\nZIP::MOVDI\n");
+       if (dbg) zip_debug_rtx_pfx("DST", dst);
+       if (dbg) zip_debug_rtx_pfx("  DST", dst);
+       if (dbg) zip_debug_rtx_pfx("SRC", src);
+       if (dbg) zip_debug_rtx_pfx("  SRC", src);
+
+
+       // MOV !REG->!REG
+       // MOV !REG->!REG
+       if ((!REG_P(dst))&&(!REG_P(src))&&(can_create_pseudo_p())) {
+       if ((!REG_P(dst))&&(!REG_P(src))&&(can_create_pseudo_p())) {
+               // This includes:
+               // This includes:
+               //      MOV MEM->MEM
+               //      MOV MEM->MEM
+               //      MOV IMM->MEM
+               //      MOV IMM->MEM
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- !REG->!REG\n");
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- !REG->!REG\n");
+
+
+               rtx tmp = gen_reg_rtx(DImode);
+               rtx tmp = gen_reg_rtx(DImode);
+               emit_insn(gen_movdi(tmp, src));
+               emit_insn(gen_movdi(tmp, src));
+               emit_insn(gen_movdi(dst, tmp));
+               emit_insn(gen_movdi(dst, tmp));
+               return 1;
+               return 1;
+       }
+       }
+
+
+       // MOV REG->REG
+       // MOV REG->REG
+       if ((REG_P(dst))&&(REG_P(src))) {
+       if ((REG_P(dst))&&(REG_P(src))) {
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->REG\n");
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->REG\n");
+
+
+               emit_insn(gen_movdi_raw(dst, src));
+               emit_insn(gen_movdi_raw(dst, src));
+               return 1;
+               return 1;
+       }
+       }
+
+
 
+       // MOV SUBREG->REG
 
+       if ((REG_P(dst))&&(SUBREG_P(src))) {
 
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- SUBREG->REG\n");
 
+
 
+               if (GET_MODE(src)==DImode) {
 
+                       emit_insn(gen_movdi_raw(dst, src));
 
+                       return 1;
 
+               }
 
+       }
 
+
 
+       // MOV REG->SUBREG
 
+       if ((SUBREG_P(dst))&&(REG_P(src))) {
 
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->SUBREG\n");
 
+
 
+               if (GET_MODE(dst)==DImode) {
 
+                       emit_insn(gen_movdi_raw(dst, src));
 
+                       return 1;
 
+               }
 
+       }
 
+
+       // MOV REG->MEM (a store instruction)
+       // MOV REG->MEM (a store instruction)
+       if ((MEM_P(dst))&&(REG_P(src))) {
+       if ((MEM_P(dst))&&(REG_P(src))) {
+               rtx     addr = XEXP(dst,0);
+               rtx     addr = XEXP(dst,0);
+               long    offset = 0;
+               long    offset = 0;
+               if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+               if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+                       offset = INTVAL(XEXP(addr,1));
+                       offset = INTVAL(XEXP(addr,1));
+
+
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->MEM\n");
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->MEM\n");
+               if (REG_P(addr)) {
+               if (REG_P(addr)) {
+                       emit_insn(gen_movdi_raw(dst, src));
+                       emit_insn(gen_movdi_raw(dst, src));
+                       return 1;
+                       return 1;
+               } else if ((GET_CODE(addr)==PLUS)
+               } else if ((GET_CODE(addr)==PLUS)
+                       &&(REG_P(XEXP(addr,0)))
+                       &&(REG_P(XEXP(addr,0)))
+                       &&(CONST_INT_P(XEXP(addr,1)))
+                       &&(CONST_INT_P(XEXP(addr,1)))
+                       &&(offset>=(long)zip_min_anchor_offset)
+                       &&(offset>=(long)zip_min_anchor_offset)
+                       &&(offset+4<(long)zip_max_anchor_offset)) {
+                       &&(offset+4<(long)zip_max_anchor_offset)) {
+                       // Demonstrated and works
+                       // Demonstrated and works
+                       emit_insn(gen_movdi_raw(dst, src));
+                       emit_insn(gen_movdi_raw(dst, src));
+                       return 1;
+                       return 1;
+               } else if (can_create_pseudo_p()) {
+               } else if (can_create_pseudo_p()) {
+                       rtx tmp = gen_reg_rtx(Pmode);
+                       rtx tmp = gen_reg_rtx(Pmode);
+                       emit_insn(gen_movsi(tmp, addr));
+                       emit_insn(gen_movsi(tmp, addr));
+                       emit_insn(gen_movdi_raw(gen_rtx_MEM(DImode, tmp), src));
+                       emit_insn(gen_movdi_raw(gen_rtx_MEM(DImode, tmp), src));
+                       return 1;
+                       return 1;
+               }
+               }
+       }
+       }
+
+
+       // MOV MEM->REG (a load instruction)
+       // MOV MEM->REG (a load instruction)
+       if ((REG_P(dst))&&(MEM_P(src))) {
+       if ((REG_P(dst))&&(MEM_P(src))) {
+               rtx addr = XEXP(src,0);
+               rtx addr = XEXP(src,0);
+               long    offset = 0;
+               long    offset = 0;
+               if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+               if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+                       offset = INTVAL(XEXP(addr,1));
+                       offset = INTVAL(XEXP(addr,1));
+
+
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM->REG\n");
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM->REG\n");
+               if (REG_P(addr)) {
+               if (REG_P(addr)) {
+                       if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[R]->REG\n");
+                       if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[R]->REG\n");
+                       emit_insn(gen_movdi_raw(dst, src));
+                       emit_insn(gen_movdi_raw(dst, src));
+                       return 1;
+                       return 1;
+               } else if ((GET_CODE(addr)==PLUS)
+               } else if ((GET_CODE(addr)==PLUS)
+                       &&(REG_P(XEXP(addr,0)))
+                       &&(REG_P(XEXP(addr,0)))
+                       &&(CONST_INT_P(XEXP(addr,1)))
+                       &&(CONST_INT_P(XEXP(addr,1)))
+                       &&(offset>=(long)zip_min_anchor_offset)
+                       &&(offset>=(long)zip_min_anchor_offset)
+                       &&(offset+4<(long)zip_max_anchor_offset)) {
+                       &&(offset+4<(long)zip_max_anchor_offset)) {
+                       if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[#+R]->REG -- DONE\n");
+                       if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[#+R]->REG -- DONE\n");
+                       emit_insn(gen_movdi_raw(dst, src));
+                       emit_insn(gen_movdi_raw(dst, src));
+                       return 1;
+                       return 1;
+               } else if (can_create_pseudo_p()) {
+               } else if (can_create_pseudo_p()) {
+                       if (dbg) fprintf(stderr, "ZIP::MOVDI -- LDI #,R, MEM[R]->REG\n");
+                       if (dbg) fprintf(stderr, "ZIP::MOVDI -- LDI #,R, MEM[R]->REG\n");
+                       rtx tmp = gen_reg_rtx(Pmode);
+                       rtx tmp = gen_reg_rtx(Pmode);
+                       emit_insn(gen_movsi(tmp, addr));
+                       emit_insn(gen_movsi(tmp, addr));
+                       emit_insn(gen_movdi_raw(dst,
+                       emit_insn(gen_movdi_raw(dst,
+                               gen_rtx_MEM(DImode, tmp)));
+                               gen_rtx_MEM(DImode, tmp)));
+                       return 1;
+                       return 1;
+               } else if (dbg)
+               } else if (dbg)
+                       fprintf(stderr, "ZIP::MOVDI -- MEM[?]->REG (no match)\n");
+                       fprintf(stderr, "ZIP::MOVDI -- MEM[?]->REG (no match)\n");
+       }
+       }
+
+
+       // MOV #->REG (An LDI instruction, but for DIwords)
+       // MOV #->REG (An LDI instruction, but for DIwords)
+       if ((CONST_INT_P(src))&&(REG_P(dst))) {
+       if ((CONST_INT_P(src))&&(REG_P(dst))) {
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- IMM->REG\n");
+               if (dbg) fprintf(stderr, "ZIP::MOVDI -- IMM->REG\n");
+               emit_insn(gen_movdi_raw(dst, src));
+               emit_insn(gen_movdi_raw(dst, src));
+               return 1;
+               return 1;
+       }
+       }
+
+
+       return 0;
+       return 0;
+}
+}
+
+
+const char *
+const char *
+zip_addsicc(rtx dst, rtx condition, rtx ifsrc, rtx addv) {
+zip_addsicc(rtx dst, rtx condition, rtx ifsrc, rtx addv) {
+       // We know upon entry that REG_P(dst) must be true
+       // We know upon entry that REG_P(dst) must be true
+       if (!REG_P(dst))
+       if (!REG_P(dst))
+               internal_error("%s","ADDSICC into something other than register");
+               internal_error("%s","ADDSICC into something other than register");
+
+
+       if ((REG_P(dst))&&(REG_P(ifsrc))&&(REG_P(addv))
+       if ((REG_P(dst))&&(REG_P(ifsrc))&&(REG_P(addv))
+               &&(REGNO(dst)!=REGNO(ifsrc))) {
+               &&(REGNO(dst)!=REGNO(ifsrc))) {
+               switch (GET_CODE(condition)) {
+               switch (GET_CODE(condition)) {
+               case EQ: return "MOV.Z\t%2,%0\n\tADD.Z\t%3,%0";
+               case EQ: return "MOV.Z\t%2,%0\n\tADD.Z\t%3,%0";
+               case NE: return "MOV.NZ\t%2,%0\n\tADD.NZ\t%3,%0";
+               case NE: return "MOV.NZ\t%2,%0\n\tADD.NZ\t%3,%0";
+               case LT: return "MOV.LT\t%2,%0\n\tADD.LT\t%3,%0";
+               case LT: return "MOV.LT\t%2,%0\n\tADD.LT\t%3,%0";
+
+
+               case LE: return "MOV.LT\t%3,%0\n\tMOV.Z\t%3,%0\n\tADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+               case LE: return "MOV.LT\t%3,%0\n\tMOV.Z\t%3,%0\n\tADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+               case GE: return "MOV.GE\t%2,%0\n\tADD.GE\t%3,%0";
+               case GE: return "MOV.GE\t%2,%0\n\tADD.GE\t%3,%0";
+
+
+               case GT: return "BLT\t%.Laddsi%=\n\tBZ\t%%.Laddsi%=\n\tMOV\t%2,%0\n\tADD\t%3,%0\n.Laddsi%=:";
+               case GT: return "BLT\t%.Laddsi%=\n\tBZ\t%%.Laddsi%=\n\tMOV\t%2,%0\n\tADD\t%3,%0\n.Laddsi%=:";
+               case LTU: return "MOV.C\t%2,%0\n\tADD.C\t%3,%0";
+               case LTU: return "MOV.C\t%2,%0\n\tADD.C\t%3,%0";
+
+
+               case LEU: return "MOV.C\t%2,%0\n\tMOV.Z\t%2,%0\n\tADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+               case LEU: return "MOV.C\t%2,%0\n\tMOV.Z\t%2,%0\n\tADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+               case GEU: return "MOV.NC\t%2,%0\n\tADD.NC\t%3,%0";
+               case GEU: return "MOV.NC\t%2,%0\n\tADD.NC\t%3,%0";
+               case GTU: return "BZ\t%.Laddsi%=\n\tMOV.NC\t%3,%0\n\tADD.NC\t%3,%0\n.Laddsi%=:";
+               case GTU: return "BZ\t%.Laddsi%=\n\tMOV.NC\t%3,%0\n\tADD.NC\t%3,%0\n.Laddsi%=:";
+               default:
+               default:
+                       internal_error("%s", "Zip/No usable addsi expansion");
+                       internal_error("%s", "Zip/No usable addsi expansion");
+                       break;
+                       break;
+               }
+               }
+       }
+       }
+
+
+       if ((REG_P(ifsrc))&&(REGNO(dst)==REGNO(ifsrc))) {
+       if ((REG_P(ifsrc))&&(REGNO(dst)==REGNO(ifsrc))) {
+               switch (GET_CODE(condition)) {
+               switch (GET_CODE(condition)) {
+               case EQ: return "ADD.Z\t%3,%0";
+               case EQ: return "ADD.Z\t%3,%0";
+               case NE: return "ADD.NZ\t%3,%0";
+               case NE: return "ADD.NZ\t%3,%0";
+               case LT: return "ADD.LT\t%3,%0";
+               case LT: return "ADD.LT\t%3,%0";
+               case LE: return "ADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+               case LE: return "ADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+               case GE: return "ADD.GE\t%3,%0";
+               case GE: return "ADD.GE\t%3,%0";
+               case GT: return "ADD.GE\t%3,%0\n\tSUB.Z\t%3,%0";
+               case GT: return "ADD.GE\t%3,%0\n\tSUB.Z\t%3,%0";
+               case LTU: return "ADD.C\t%3,%0";
+               case LTU: return "ADD.C\t%3,%0";
+               case LEU: return "ADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+               case LEU: return "ADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+               case GEU: return "ADD.NC\t%3,%0";
+               case GEU: return "ADD.NC\t%3,%0";
+               case GTU: return "SUB.Z\t%3,%0\n\tADD.NC\t%3,%0";
+               case GTU: return "SUB.Z\t%3,%0\n\tADD.NC\t%3,%0";
+               default:
+               default:
+                       internal_error("%s", "Zip/No usable addsi expansion");
+                       internal_error("%s", "Zip/No usable addsi expansion");
+                       break;
+                       break;
+               }
+               }
+       } else {
+       } else {
+               // MOV A+REG,REG
+               // MOV A+REG,REG
+               switch (GET_CODE(condition)) {
+               switch (GET_CODE(condition)) {
+               case EQ: return "MOV.Z\t%3+%2,%0";
+               case EQ: return "MOV.Z\t%3+%2,%0";
+               case NE: return "MOV.NZ\t%3+%2,%0";
+               case NE: return "MOV.NZ\t%3+%2,%0";
+               case LT: return "MOV.LT\t%3+%2,%0";
+               case LT: return "MOV.LT\t%3+%2,%0";
+               case GT: return "BLT\t.Laddcc%=\n\tBZ\t.Laddcc%=\n\tMOV\t%3+%2,%0\n.Laddcc%=";
+               case GT: return "BLT\t.Laddcc%=\n\tBZ\t.Laddcc%=\n\tMOV\t%3+%2,%0\n.Laddcc%=";
+               case LE: return "MOV.LT\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+               case LE: return "MOV.LT\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+               case GE: return "MOV.GE\t%3+%2,%0";
+               case GE: return "MOV.GE\t%3+%2,%0";
+               case LTU: return "MOV.C\t%3+%2,%0";
+               case LTU: return "MOV.C\t%3+%2,%0";
+               case LEU: return "MOV.C\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+               case LEU: return "MOV.C\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+               case GEU: return "MOV.NC\t%3+%2,%0";
+               case GEU: return "MOV.NC\t%3+%2,%0";
+               case GTU: return "BZ\t.Laddcc%=\n\tMOV.NC\t%3+%2,%0\n\t.Laddcc%=:";
+               case GTU: return "BZ\t.Laddcc%=\n\tMOV.NC\t%3+%2,%0\n\t.Laddcc%=:";
+               default:
+               default:
+                       internal_error("%s", "Zip/No usable addsi(reg,reg) expansion");
+                       internal_error("%s", "Zip/No usable addsi(reg,reg) expansion");
+                       break;
+                       break;
+               }
+               }
+       }
+       }
+
+
+       return "BREAK";
+       return "BREAK";
+}
+}
+
+
+static int     zip_memory_move_cost(machine_mode mode, reg_class_t ATTRIBUTE_UNUSED, bool in ATTRIBUTE_UNUSED) {
+static int     zip_memory_move_cost(machine_mode mode, reg_class_t ATTRIBUTE_UNUSED, bool in ATTRIBUTE_UNUSED) {
+       int     rv = 14;
+       int     rv = 14;
+       if ((mode == DImode)||(mode == DFmode))
+       if ((mode == DImode)||(mode == DFmode))
+               rv += 2;
+               rv += 2;
+       return rv;
+       return rv;
+}
+}
+
+
+// #warning "How do we tell the compiler LDI label is expensive as 2 ops"?
+// #warning "How do we tell the compiler LDI label is expensive as 2 ops"?
+static bool    zip_cannot_modify_jumps_p(void) {
+static bool    zip_cannot_modify_jumps_p(void) {
+       // Let's try their suggested approach, keeping us from modifying jumps
+       // Let's try their suggested approach, keeping us from modifying jumps
+       // after reload.  This should also allow our peephole2 optimizations
+       // after reload.  This should also allow our peephole2 optimizations
+       // to adjust things back to what they need to be if necessary.
+       // to adjust things back to what they need to be if necessary.
+       return (reload_completed || reload_in_progress);
+       return (reload_completed || reload_in_progress);
+}
+}
+
+
+rtx_insn       *zip_ifcvt_info;
+rtx_insn       *zip_ifcvt_info;
+
+
+void
+void
+zip_ifcvt_modify_tests(ce_if_block *ce_info ATTRIBUTE_UNUSED, rtx *true_expr, rtx *false_expr) {
+zip_ifcvt_modify_tests(ce_if_block *ce_info ATTRIBUTE_UNUSED, rtx *true_expr, rtx *false_expr) {
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+       const bool      dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+       if (dbg) fprintf(stderr, "IFCVT-MODIFY-TESTS\n");
+       if (dbg) fprintf(stderr, "IFCVT-MODIFY-TESTS\n");
+       if (*true_expr) switch(GET_CODE(*true_expr)) {
+       if (*true_expr) switch(GET_CODE(*true_expr)) {
+               // These are our unsupported conditions
+               // These are our unsupported conditions
+               case LE:
+               case LE:
+               case GT:
+               case GT:
+               case LEU:
+               case LEU:
+               case GTU:
+               case GTU:
+                       if (dbg) fprintf(stderr, "TRUE, missing expr\n");
+                       if (dbg) fprintf(stderr, "TRUE, missing expr\n");
+                       if (dbg) zip_debug_rtx(*true_expr);
+                       if (dbg) zip_debug_rtx(*true_expr);
+                       *true_expr = NULL_RTX;
+                       *true_expr = NULL_RTX;
+                       break;
+                       break;
+               default: // LT, GT, GTE, LTU, NE, EQ
+               default: // LT, GT, GTE, LTU, NE, EQ
+                       break;
+                       break;
+       }
+       }
+
+
+       if (*false_expr) switch(GET_CODE(*false_expr)) {
+       if (*false_expr) switch(GET_CODE(*false_expr)) {
+               case LE:
+               case LE:
+               case GT:
+               case GT:
+               case LEU:
+               case LEU:
+               case GTU:
+               case GTU:
+                       if (dbg) fprintf(stderr, "FALSE, missing expr\n");
+                       if (dbg) fprintf(stderr, "FALSE, missing expr\n");
+                       if (dbg) zip_debug_rtx(*false_expr);
+                       if (dbg) zip_debug_rtx(*false_expr);
+                       *false_expr = NULL_RTX;
+                       *false_expr = NULL_RTX;
+               default:
+               default:
+                       break;
+                       break;
+       }
+       }
+       if ((dbg)&&((!*true_expr)||(!*false_expr)))
+       if ((dbg)&&((!*true_expr)||(!*false_expr)))
+               fprintf(stderr, "IFCVT-MODIFY-TESTS -- FAIL\n");
+               fprintf(stderr, "IFCVT-MODIFY-TESTS -- FAIL\n");
+}
+}
+
+
+void
+void
+zip_ifcvt_machdep_init(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+zip_ifcvt_machdep_init(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+/*
+if (!ceinfo->then_bb)
+if (!ceinfo->then_bb)
+       return;
+       return;
+rtx_insn *insn;
+rtx_insn *insn;
+FOR_BB_INSNS(ceinfo->then_bb, insn) {
+FOR_BB_INSNS(ceinfo->then_bb, insn) {
+       fprintf(stderr, "IFCVT -- INIT\n");
+       fprintf(stderr, "IFCVT -- INIT\n");
+       zip_debug_rtx_pfx("INIT-BB", insn);
+       zip_debug_rtx_pfx("INIT-BB", insn);
+}
+}
+*/
+*/
+/*
+/*
+       zip_ifcvt_info = NULL;
+       zip_ifcvt_info = NULL;
+       rtx_insn *insn, *ifinsn = NULL;
+       rtx_insn *insn, *ifinsn = NULL;
+       FOR_BB_INSNS(ceinfo->test_bb, insn) {
+       FOR_BB_INSNS(ceinfo->test_bb, insn) {
+               rtx     p;
+               rtx     p;
+               p = single_set(insn);
+               p = single_set(insn);
+               if (!p) continue;
+               if (!p) continue;
+               if (SET_DEST(p)==pc_rtx) {
+               if (SET_DEST(p)==pc_rtx) {
+                       ifinsn = insn;
+                       ifinsn = insn;
+               }
+               }
+               if (!REG_P(SET_DEST(p)))
+               if (!REG_P(SET_DEST(p)))
+                       continue;
+                       continue;
+               if (GET_MODE(SET_DEST(p))!=CCmode)
+               if (GET_MODE(SET_DEST(p))!=CCmode)
+                       continue;
+                       continue;
+               if (REGNO(SET_DEST(p))!=zip_CC)
+               if (REGNO(SET_DEST(p))!=zip_CC)
+                       continue;
+                       continue;
+               zip_ifcvt_info = insn;
+               zip_ifcvt_info = insn;
+       }
+       }
+
+
+       if (zip_ifcvt_info)
+       if (zip_ifcvt_info)
+               zip_debug_rtx_pfx("PUTATIVE-CMP",zip_ifcvt_info);
+               zip_debug_rtx_pfx("PUTATIVE-CMP",zip_ifcvt_info);
+       if (ifinsn)
+       if (ifinsn)
+               zip_debug_rtx_pfx("PRIOR-JMP",ifinsn);
+               zip_debug_rtx_pfx("PRIOR-JMP",ifinsn);
+*/
+*/
+}
+}
+
+
+void
+void
+zip_ifcvt_modify_insn(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED,
+zip_ifcvt_modify_insn(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED,
+               rtx pattern ATTRIBUTE_UNUSED,
+               rtx pattern ATTRIBUTE_UNUSED,
+               rtx_insn *insn ATTRIBUTE_UNUSED) {
+               rtx_insn *insn ATTRIBUTE_UNUSED) {
+       // zip_debug_rtx_pfx("MODIFY-INSN: ", insn);
+       // zip_debug_rtx_pfx("MODIFY-INSN: ", insn);
+}
+}
+
+
+void
+void
+zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+/*
+       fprintf(stderr, "IFCVT -- CANCEL\n");
+       fprintf(stderr, "IFCVT -- CANCEL\n");
+       zip_ifcvt_info = NULL;
+       zip_ifcvt_info = NULL;
+*/
+*/
+}
+}
+
+
+void
+void
+zip_ifcvt_modify_final(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+zip_ifcvt_modify_final(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+/*
+rtx_insn *insn;
+rtx_insn *insn;
+FOR_BB_INSNS(ceinfo->test_bb, insn) {
+FOR_BB_INSNS(ceinfo->test_bb, insn) {
+       fprintf(stderr, "IFCVT -- FINAL\n");
+       fprintf(stderr, "IFCVT -- FINAL\n");
+       zip_debug_rtx_pfx("FINAL-TEST-BB", insn);
+       zip_debug_rtx_pfx("FINAL-TEST-BB", insn);
+}
+}
+       zip_ifcvt_info = NULL;
+       zip_ifcvt_info = NULL;
+*/
+*/
+}
+}
+
+
+
+
+int    zip_insn_sets_cc(rtx_insn *insn) {
+int    zip_insn_sets_cc(rtx_insn *insn) {
+       return (get_attr_ccresult(insn)==CCRESULT_SET);
+       return (get_attr_ccresult(insn)==CCRESULT_SET);
+}
+}
+
+
+const char *
+const char *
+zip_cbranchdi_const(rtx comparison,
+zip_cbranchdi_const(rtx comparison,
+               rtx a ATTRIBUTE_UNUSED,
+               rtx a ATTRIBUTE_UNUSED,
+               rtx b,
+               rtx b,
+               rtx label ATTRIBUTE_UNUSED) {
+               rtx label ATTRIBUTE_UNUSED) {
+       gcc_assert(CONST_INT_P(b));
+       gcc_assert(CONST_INT_P(b));
+       long value = INTVAL(b);
+       long value = INTVAL(b);
+
+
+       // Look into the combine routines to find out why this routine never
+       // Look into the combine routines to find out why this routine never
+       // gets called.
+       // gets called.
+
+
+       switch(GET_CODE(comparison)) {
+       switch(GET_CODE(comparison)) {
+       case EQ:
+       case EQ:
+               if (value < 0)
+               if (value < 0)
+                 return "CMP\t-1,%H1\t; cbranchdi/# EQ (neg)\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+                 return "CMP\t-1,%H1\t; cbranchdi/# EQ (neg)\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+               else
+               else
+                 return "CMP\t0,%H1\t; cbranchdi/# EQ\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+                 return "CMP\t0,%H1\t; cbranchdi/# EQ\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+       case NE:
+       case NE:
+               if (value < 0)
+               if (value < 0)
+                 return "CMP\t-1,%H1\t; cbranchdi/# NE (neg)\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+                 return "CMP\t-1,%H1\t; cbranchdi/# NE (neg)\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+               else
+               else
+                 return "CMP\t0,%H1\t; cbranchdi/# NE\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+                 return "CMP\t0,%H1\t; cbranchdi/# NE\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+       case LE:
+       case LE:
+               if (value == 0)
+               if (value == 0)
+                       return "CMP\t0,%H1\t; cbranchdi/# LE 0\n\tBLT\t%3\n\tCMP.Z\t0,%L1\n\tBZ\t%3";
+                       return "CMP\t0,%H1\t; cbranchdi/# LE 0\n\tBLT\t%3\n\tCMP.Z\t0,%L1\n\tBZ\t%3";
+               else if (value == -1)
+               else if (value == -1)
+                       return "CMP\t0,%H1\t; cbranchdi/# LE -1\n\tBLT\t%3";
+                       return "CMP\t0,%H1\t; cbranchdi/# LE -1\n\tBLT\t%3";
+               else if (value < 0) {
+               else if (value < 0) {
+                       char    tmp[128];
+                       char    tmp[128];
+                       sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# LE (neg)\n"
+                       sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# LE (neg)\n"
+                               "\tBLT\t.Lcmpdile%%=\n"
+                               "\tBLT\t.Lcmpdile%%=\n"
+                               "\tBNZ\t%%3\n"
+                               "\tBNZ\t%%3\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tBC\t%%3", (value+1l)&0x0ffffffff);
+                               "\tBC\t%%3", (value+1l)&0x0ffffffff);
+                       return ggc_alloc_string(tmp, -1);
+                       return ggc_alloc_string(tmp, -1);
+               } else { //; value > 0
+               } else { //; value > 0
+                       char    tmp[128];
+                       char    tmp[128];
+                       sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LE\n"
+                       sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LE\n"
+                               "\tBLT\t%%3\n"
+                               "\tBLT\t%%3\n"
+                               "\tBNZ\t.Lcmple%%=\n"
+                               "\tBNZ\t.Lcmple%%=\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tBC\t%%3\n"
+                               "\tBC\t%%3\n"
+                               ".Lcmple%%=:", value-1);
+                               ".Lcmple%%=:", value-1);
+                       return ggc_alloc_string(tmp, -1);
+                       return ggc_alloc_string(tmp, -1);
+               }
+               }
+       case LT:
+       case LT:
+               if (value == 0)
+               if (value == 0)
+                       return "CMP\t0,%H1\t; cbranchdi/# LT 0\n\tBLT\t%3";
+                       return "CMP\t0,%H1\t; cbranchdi/# LT 0\n\tBLT\t%3";
+               else if (value < 0)
+               else if (value < 0)
+                       return "CMP\t-1,%H1\t; cbranchdi/# LT neg\n\tCMP.Z\t%2,%L1\n\tBC\t%3";
+                       return "CMP\t-1,%H1\t; cbranchdi/# LT neg\n\tCMP.Z\t%2,%L1\n\tBC\t%3";
+               else
+               else
+                       return "CMP\t0,%H1\t; cbranchdi/# LT\n"
+                       return "CMP\t0,%H1\t; cbranchdi/# LT\n"
+                               "\tBLT\t%3\n"
+                               "\tBLT\t%3\n"
+                               "\tBNZ\t.Lcmplt%=\n"
+                               "\tBNZ\t.Lcmplt%=\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tBC\t%3\n"
+                               "\tBC\t%3\n"
+                               ".Lcmplt%=:";
+                               ".Lcmplt%=:";
+       case GT:
+       case GT:
+               if (value == 0)
+               if (value == 0)
+                       return "CMP\t1,%H1\t; cbranchdi/# GT 0\n"
+                       return "CMP\t1,%H1\t; cbranchdi/# GT 0\n"
+                               "\tBGE\t%3\n"
+                               "\tBGE\t%3\n"
+                               "\tBNZ\t.Lcmpgt%=\n"
+                               "\tBNZ\t.Lcmpgt%=\n"
+                               "\tCMP\t0,%L1\n"
+                               "\tCMP\t0,%L1\n"
+                               "\tBNZ\t%3\n"
+                               "\tBNZ\t%3\n"
+                               ".Lcmpgt%=:";
+                               ".Lcmpgt%=:";
+               else if (value == -1)
+               else if (value == -1)
+                       return "CMP\t0,%H1\t; cbranchdi/# GT -1\n"
+                       return "CMP\t0,%H1\t; cbranchdi/# GT -1\n"
+                               "\tBGE\t%3\n";
+                               "\tBGE\t%3\n";
+               else if (value < 0) {
+               else if (value < 0) {
+                       char    tmp[128];
+                       char    tmp[128];
+                       sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# GT neg\n"
+                       sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# GT neg\n"
+                               "\tBLT\t.Lcmpgt%%=\n"
+                               "\tBLT\t.Lcmpgt%%=\n"
+                               "\tBNZ\t%%3\n"
+                               "\tBNZ\t%%3\n"
+                               "\tCMP\t%ld,%%H3\n"
+                               "\tCMP\t%ld,%%H3\n"
+                               "\tBNC\t%%3\n"
+                               "\tBNC\t%%3\n"
+                               ".Lcmpgt%%=:", value+1l);
+                               ".Lcmpgt%%=:", value+1l);
+                       return ggc_alloc_string(tmp, -1);
+                       return ggc_alloc_string(tmp, -1);
+               } else {
+               } else {
+                       char    tmp[128];
+                       char    tmp[128];
+                       sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# GT\n"
+                       sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# GT\n"
+                               "\tBLT\t.Lcmpgt%%=\n"
+                               "\tBLT\t.Lcmpgt%%=\n"
+                               "\tBNZ\t%%3\n"
+                               "\tBNZ\t%%3\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tBNC\t%%3\n"
+                               "\tBNC\t%%3\n"
+                               ".Lcmpgt%%=:", value+1l);
+                               ".Lcmpgt%%=:", value+1l);
+                       return ggc_alloc_string(tmp, -1);
+                       return ggc_alloc_string(tmp, -1);
+               }
+               }
+       case GE:
+       case GE:
+               if (value == 0)
+               if (value == 0)
+                       return "CMP\t0,%H1\t; cbranchdi/# GE 0\n"
+                       return "CMP\t0,%H1\t; cbranchdi/# GE 0\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBNZ\t%3\n"
+                               "\tBNZ\t%3\n"
+                               "\tCMP\t0,%L1\n"
+                               "\tCMP\t0,%L1\n"
+                               "\tBNC\t%3\n"
+                               "\tBNC\t%3\n"
+                               ".Lcmpge%=:";
+                               ".Lcmpge%=:";
+               else if (value == -1)
+               else if (value == -1)
+                       return "CMP\t-1,%H1\t; cbranchdi/# GE -1\n"
+                       return "CMP\t-1,%H1\t; cbranchdi/# GE -1\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBNZ\t%3\n"
+                               "\tBNZ\t%3\n"
+                               "\tCMP\t-1,%L1\n"
+                               "\tCMP\t-1,%L1\n"
+                               "\tBZ\t%3\n"
+                               "\tBZ\t%3\n"
+                               ".Lcmpge%=:";
+                               ".Lcmpge%=:";
+               else if (value < 0)
+               else if (value < 0)
+                       return "CMP\t-1,%H1\t; cbranchdi/# GE <\n"
+                       return "CMP\t-1,%H1\t; cbranchdi/# GE <\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBNZ\t%3\n"
+                               "\tBNZ\t%3\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tBNC\t%3\n"
+                               "\tBNC\t%3\n"
+                               ".Lcmpge%=:";
+                               ".Lcmpge%=:";
+               else
+               else
+                       return "CMP\t0,%H1\t; cbranchdi/# GE\n"
+                       return "CMP\t0,%H1\t; cbranchdi/# GE\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBLT\t.Lcmpge%=\n"
+                               "\tBNZ\t%3\n"
+                               "\tBNZ\t%3\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tBNC\t%3\n"
+                               "\tBNC\t%3\n"
+                               ".Lcmpge%=:";
+                               ".Lcmpge%=:";
+       case LTU:
+       case LTU:
+               if (value == 0) { //; Impossible, cannot be < 0 unsignd
+               if (value == 0) { //; Impossible, cannot be < 0 unsignd
+                       return "; cbranchdi/# LTU 0 (Impossible!)";
+                       return "; cbranchdi/# LTU 0 (Impossible!)";
+               } else
+               } else
+                       return "CMP\t0,%H1\t; cbranchdi/#\n\tCMP.Z\t%2,%L1\n\tBC\t%3\n";
+                       return "CMP\t0,%H1\t; cbranchdi/#\n\tCMP.Z\t%2,%L1\n\tBC\t%3\n";
+       case LEU:
+       case LEU:
+               if (value == 0) { //; Only possible if == 0
+               if (value == 0) { //; Only possible if == 0
+                       return "CMP\t0,%%H0\t; cbranchdi/# LEU 0\n"
+                       return "CMP\t0,%%H0\t; cbranchdi/# LEU 0\n"
+                               "\tCMP.Z\t0,%%L0\n"
+                               "\tCMP.Z\t0,%%L0\n"
+                               "\tBZ\t%3";
+                               "\tBZ\t%3";
+               } else {
+               } else {
+                       //; Subtract one, and LTU works
+                       //; Subtract one, and LTU works
+                       char    tmp[128];
+                       char    tmp[128];
+                       sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LEU\n"
+                       sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LEU\n"
+                               "\tCMP.Z\t%ld,%%L1\n"
+                               "\tCMP.Z\t%ld,%%L1\n"
+                               "\tBC\t%%3\n", value-1);
+                               "\tBC\t%%3\n", value-1);
+                       return ggc_alloc_string(tmp, -1);
+                       return ggc_alloc_string(tmp, -1);
+               }
+               }
+       case GTU:
+       case GTU:
+               if (value == 0) {
+               if (value == 0) {
+                       //; Equivalent to not equal to zero
+                       //; Equivalent to not equal to zero
+                       return "CMP\t0,%H1\t; cbranchdi/# GTU 0\n\tCMP.Z\t0,%L1\n\tBNZ\t%3";
+                       return "CMP\t0,%H1\t; cbranchdi/# GTU 0\n\tCMP.Z\t0,%L1\n\tBNZ\t%3";
+               } else {
+               } else {
+                       char    tmp[128];
+                       char    tmp[128];
+                       sprintf(tmp,
+                       sprintf(tmp,
+                               "CMP\t0,%%H1\t; cbranchdi/# GTU\n"
+                               "CMP\t0,%%H1\t; cbranchdi/# GTU\n"
+                               "\tBNZ\t%%3\n"
+                               "\tBNZ\t%%3\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tCMP\t%ld,%%L1\n"
+                               "\tBNC\t%%3\n", value+1);
+                               "\tBNC\t%%3\n", value+1);
+                       return ggc_alloc_string(tmp, -1);
+                       return ggc_alloc_string(tmp, -1);
+               }
+               }
+       case GEU:
+       case GEU:
+               if (value == 0) //; Unsigned, always true
+               if (value == 0) //; Unsigned, always true
+                       return "BRA\t%3\t; cbranchdi/# GEU 0";
+                       return "BRA\t%3\t; cbranchdi/# GEU 0";
+               else
+               else
+                       return "CMP\t0,%H1\t; cbranchdi/# GEU\n"
+                       return "CMP\t0,%H1\t; cbranchdi/# GEU\n"
+                               "\tBNZ\t%3\n"
+                               "\tBNZ\t%3\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tCMP\t%2,%L1\n"
+                               "\tBNC\t%3";
+                               "\tBNC\t%3";
+       default:
+       default:
+               gcc_unreachable();
+               gcc_unreachable();
+       }
+       }
+}
+}
+
+
+const char *
+const char *
+zip_cbranchdi_reg(rtx comparison,
+zip_cbranchdi_reg(rtx comparison,
+               rtx a ATTRIBUTE_UNUSED,
+               rtx a ATTRIBUTE_UNUSED,
+               rtx b ATTRIBUTE_UNUSED,
+               rtx b ATTRIBUTE_UNUSED,
+               rtx label ATTRIBUTE_UNUSED) {
+               rtx label ATTRIBUTE_UNUSED) {
+
+
+       switch(GET_CODE(comparison)) {
+       switch(GET_CODE(comparison)) {
+               case EQ:
+               case EQ:
+                       return "CMP\t%H2,%H1\t; cbranchdi/r EQ\n\tCMP.Z\t%L2,%L1\n\tBZ\t%3";
+                       return "CMP\t%H2,%H1\t; cbranchdi/r EQ\n\tCMP.Z\t%L2,%L1\n\tBZ\t%3";
+               case NE:
+               case NE:
+                       return "CMP\t%H2,%H1\t; cbranchdi/r NE\n\tCMP.Z\t%L2,%L1\n\tBNZ\t%3";
+                       return "CMP\t%H2,%H1\t; cbranchdi/r NE\n\tCMP.Z\t%L2,%L1\n\tBNZ\t%3";
+               case LE:
+               case LE:
+                       return "CMP\t%H2,%H1\t; cbranchdi/r LE\n"
+                       return "CMP\t%H2,%H1\t; cbranchdi/r LE\n"
+                               "\tBLT\t%3\n"
+                               "\tBLT\t%3\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tCMP\t%L1,%L2\n"
+                               "\tCMP\t%L1,%L2\n"
+                               "\tBNC\t%3\n"
+                               "\tBNC\t%3\n"
+                               ".Ldi%=:";
+                               ".Ldi%=:";
+               case GT:
+               case GT:
+                       return "CMP\t%H1,%H2\t; cbranchdi/r GT\n"
+                       return "CMP\t%H1,%H2\t; cbranchdi/r GT\n"
+                               "\tBLT\t%3\n"
+                               "\tBLT\t%3\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tCMP\t%L1,%L2\n"
+                               "\tCMP\t%L1,%L2\n"
+                               "\tBC\t%3\n"
+                               "\tBC\t%3\n"
+                               ".Ldi%=:";
+                               ".Ldi%=:";
+               case LT:
+               case LT:
+                       return "CMP\t%H2,%H1\t; cbranchdi/r LT\n"
+                       return "CMP\t%H2,%H1\t; cbranchdi/r LT\n"
+                               "\tBLT\t%3\n"
+                               "\tBLT\t%3\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tCMP\t%L2,%L1\n"
+                               "\tCMP\t%L2,%L1\n"
+                               "\tBC\t%3\n"
+                               "\tBC\t%3\n"
+                               ".Ldi%=:";
+                               ".Ldi%=:";
+               case GE:
+               case GE:
+                       return "CMP\t%H1,%H2\t; cbranchdi/r GE\n"
+                       return "CMP\t%H1,%H2\t; cbranchdi/r GE\n"
+                               "\tBLT\t%3\n"
+                               "\tBLT\t%3\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tBNZ\t.Ldi%=\n"
+                               "\tCMP\t%L2,%L1\n"
+                               "\tCMP\t%L2,%L1\n"
+                               "\tBNC\t%3\n"
+                               "\tBNC\t%3\n"
+                               ".Ldi%=:";
+                               ".Ldi%=:";
+               case LTU:
+               case LTU:
+                       return "CMP\t%H2,%H1\t; cbranchdi/r LTU\n"
+                       return "CMP\t%H2,%H1\t; cbranchdi/r LTU\n"
+                               "\tCMP.Z\t%L2,%L1\n"
+                               "\tCMP.Z\t%L2,%L1\n"
+                               "\tBC\t%3\n";
+                               "\tBC\t%3\n";
+               case LEU:
+               case LEU:
+                       return "CMP\t%H1,%H2\t; cbranchdi/r LEU\n"
+                       return "CMP\t%H1,%H2\t; cbranchdi/r LEU\n"
+                               "\tBC\t.Ldi%=\n"        //; H1 > H2, skip
+                               "\tBC\t.Ldi%=\n"        //; H1 > H2, skip
+                               "\tCMP.Z\t%L1,%L2\n"    //; (H1==H2) test L1-L2
+                               "\tCMP.Z\t%L1,%L2\n"    //; (H1==H2) test L1-L2
+                               "\tBNC\t%3\n"           //; If (L1>=L2)||(H1>H2)
+                               "\tBNC\t%3\n"           //; If (L1>=L2)||(H1>H2)
+                               ".Ldi%=:";
+                               ".Ldi%=:";
+               case GTU:
+               case GTU:
+                       return "CMP\t%H1,%H2\t; cbranchdi/r GTU\n"
+                       return "CMP\t%H1,%H2\t; cbranchdi/r GTU\n"
+                               "\tCMP.Z\t%L1,%L2\n"
+                               "\tCMP.Z\t%L1,%L2\n"
+                               "\tBC\t%3";
+                               "\tBC\t%3";
+               case GEU:
+               case GEU:
+                       return "CMP\t%H2,%H1\t; cbranchdi/r GEU\n"
+                       return "CMP\t%H2,%H1\t; cbranchdi/r GEU\n"
+                               "\tBC\t.Ldi%=\n"
+                               "\tBC\t.Ldi%=\n"
+                               "\tCMP.Z\t%L2,%L1\n"
+                               "\tCMP.Z\t%L2,%L1\n"
+                               "\tBNC\t%3\n"
+                               "\tBNC\t%3\n"
+                               ".Ldi%=:";
+                               ".Ldi%=:";
+               default:
+               default:
+                       gcc_unreachable();
+                       gcc_unreachable();
+       }
+       }
+}
+}
+
+
+const char *
+const char *
+zip_cbranchdi(rtx comparison, rtx a, rtx b, rtx label) {
+zip_cbranchdi(rtx comparison, rtx a, rtx b, rtx label) {
+       if (REG_P(b))
+       if (REG_P(b))
+               return zip_cbranchdi_reg(comparison, a, b, label);
+               return zip_cbranchdi_reg(comparison, a, b, label);
+       else
+       else
+               return zip_cbranchdi_const(comparison, a, b, label);
+               return zip_cbranchdi_const(comparison, a, b, label);
+}
+}
+
+
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zipdbg.h gcc-6.2.0-zip/gcc/config/zip/zipdbg.h
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zipdbg.h gcc-6.2.0-zip/gcc/config/zip/zipdbg.h
--- gcc-6.2.0/gcc/config/zip/zipdbg.h   1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zipdbg.h   1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zipdbg.h       2017-02-17 16:47:25.727651898 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zipdbg.h       2017-02-17 16:47:25.727651898 -0500
@@ -0,0 +1,8 @@
@@ -0,0 +1,8 @@
+#define        DO_ZIP_DEBUGS
+#define        DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-di.md gcc-6.2.0-zip/gcc/config/zip/zip-di.md
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-di.md gcc-6.2.0-zip/gcc/config/zip/zip-di.md
--- gcc-6.2.0/gcc/config/zip/zip-di.md  1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip-di.md  1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-di.md      2017-02-22 15:56:17.195319460 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-di.md      2018-06-05 18:57:29.386399718 -0400
@@ -0,0 +1,528 @@
@@ -0,0 +1,548 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Filename:   zip-di.md
+;; Filename:   zip-di.md
+;;
+;;
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;;
+;; Purpose:    This is the machine description of the Zip CPU as needed by the
+;; Purpose:    This is the machine description of the Zip CPU as needed by the
+;;             GNU compiler collection (GCC).  Specifically, this is the
+;;             GNU compiler collection (GCC).  Specifically, this is the
+;;     section of the description associated with 64-bit values and
+;;     section of the description associated with 64-bit values and
+;;     arithmetic.
+;;     arithmetic.
+;;
+;;
+;;
+;;
+;; Creator:    Dan Gisselquist, Ph.D.
+;; Creator:    Dan Gisselquist, Ph.D.
+;;             Gisselquist Technology, LLC
+;;             Gisselquist Technology, LLC
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Copyright (C) 2015, Gisselquist Technology, LLC
+;; Copyright (C) 2015-2018, Gisselquist Technology, LLC
+;;
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;; your option) any later version.
+;;
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;; for more details.
+;;
+;;
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;;             http://www.gnu.org/licenses/gpl.html
+;;             http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;;
+;;
+;
+;
+;
+;
+;
+;
+(define_expand "movdi"
+(define_expand "movdi"
+       [(set (match_operand:DI 0 "nonimmediate_operand" "")
+       [(set (match_operand:DI 0 "nonimmediate_operand" "")
+               (match_operand:DI 1 "general_operand" ""))]
+               (match_operand:DI 1 "general_operand" ""))]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       {
+       {
+               if (zip_expand_movdi(operands[0], operands[1]))
+               if (zip_expand_movdi(operands[0], operands[1]))
+                       DONE;
+                       DONE;
+               FAIL;
+               FAIL;
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+(define_insn "movdi_raw"
+(define_insn "movdi_raw"
+       [(set (match_operand:DI 0 "nonimmediate_operand" "=r,Q,r,r")
+       [(set (match_operand:DI 0 "nonimmediate_operand" "=r,Q,r,r")
+               (match_operand:DI 1 "general_operand" "r,r,Q,i"))]
+               (match_operand:DI 1 "general_operand" "r,r,Q,i"))]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       {
+       {
+               if ((REG_P(operands[0]))&&(REG_P(operands[1])))
+               if ((REG_P(operands[0]))&&(REG_P(operands[1])))
+                       return  "MOV %H1,%H0\t; MOV:DI\n\tMOV %L1,%L0";
+                       return  "MOV %H1,%H0\t; MOV:DI\n\tMOV %L1,%L0";
+               else if (MEM_P(operands[0]))    //; StoreDI
+               else if (MEM_P(operands[0]))    //; StoreDI
+                       return  "SW %H1,%0\t; Store:DI\n\tSW %L1,4+%0";
+                       return  "SW %H1,%0\t; Store:DI\n\tSW %L1,4+%0";
+               else if (MEM_P(operands[1]))    //; LoadDI
+               else if (MEM_P(operands[1])) {  //; LoadDI
 
+                       //; Deal with the case of
 
+                       //;     LOD (R0),R0
 
+                       //;     LOD 4(R0),R1
 
+                       //; By reversing the order of the operands, to
 
+                       //;     LOD 4(R0),R1
 
+                       //;     LOD (R0),R0
 
+                       //; This isn't efficient, so let's do whatever we can to
 
+                       //; avoid this, still ... if we do it, we can make it
 
+                       //; work
 
+                       rtx     address = XEXP(operands[1],0);
 
+                       int     hazard = 0;
 
+                       if ( (REG_P(address))
 
+                               &&((REGNO(address))==(REGNO(operands[0]))) )
 
+                               hazard = 1;
 
+                       else if ( (PLUS == (GET_CODE(address)))
 
+                               &&(REGNO(XEXP(address,0))==(REGNO(operands[0]))) )
 
+                               hazard = 1;
 
+                       if (hazard)
 
+                               return  "LW 4+%1,%L0\t; Load:DI\n\tLW %1,%H0";
 
+                       else
+                       return  "LW %1,%H0\t; Load:DI\n\tLW 4+%1,%L0";
+                       return  "LW %1,%H0\t; Load:DI\n\tLW 4+%1,%L0";
+               else if (CONST_INT_P(operands[1])) {
+               } else if (CONST_INT_P(operands[1])) {
+                       char    tmp[128];
+                       char    tmp[128];
+                       HOST_WIDE_INT   v = INTVAL(operands[1]);
+                       HOST_WIDE_INT   v = INTVAL(operands[1]);
+                       sprintf(tmp, "LDI\t0x%08x,%%H0\t; LDI #:DI,%%H0\n\tLDI\t0x%08x,%%L0",
+                       sprintf(tmp, "LDI\t0x%08x,%%H0\t; LDI #:DI,%%H0\n\tLDI\t0x%08x,%%L0",
+                               (unsigned)(v>>32),
+                               (unsigned)(v>>32),
+                               (unsigned)(v));
+                               (unsigned)(v));
+                       return ggc_alloc_string(tmp, -1);
+                       return ggc_alloc_string(tmp, -1);
+               } else
+               } else
+                       gcc_unreachable();
+                       gcc_unreachable();
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+; ADD
+; ADD
+;
+;
+;
+;
+(define_insn "adddi3" ; Fastest/best instruction always goes first
+(define_insn "adddi3" ; Fastest/best instruction always goes first
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (plus:DI (match_operand:DI 1 "register_operand" "0")
+               (plus:DI (match_operand:DI 1 "register_operand" "0")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "ADD    %L2,%L0\n\tADD.C\t1,%H0\n\tADD\t%H2,%H0"
+       "ADD    %L2,%L0\n\tADD.C\t1,%H0\n\tADD\t%H2,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+; SUB
+; SUB
+;
+;
+;
+;
+(define_insn "subdi3"
+(define_insn "subdi3"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (minus:DI (match_operand:DI 1 "register_operand" "0")
+               (minus:DI (match_operand:DI 1 "register_operand" "0")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "SUB    %L2,%L0\n\tSUB.C\t1,%H0\n\tSUB\t%H2,%H0"
+       "SUB    %L2,%L0\n\tSUB.C\t1,%H0\n\tSUB\t%H2,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+; AND
+; AND
+;
+;
+;
+;
+(define_insn "anddi3"
+(define_insn "anddi3"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (and:DI (match_operand:DI 1 "register_operand" "%0")
+               (and:DI (match_operand:DI 1 "register_operand" "%0")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "AND    %L2,%L0\t; AND:DI\n\tAND\t%H2,%H0"
+       "AND    %L2,%L0\t; AND:DI\n\tAND\t%H2,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+; iOR
+; iOR
+;
+;
+;
+;
+(define_insn "iordi3"
+(define_insn "iordi3"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (ior:DI (match_operand:DI 1 "register_operand" "%0")
+               (ior:DI (match_operand:DI 1 "register_operand" "%0")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "OR     %L2,%L0\t; OR:DI\n\tOR\t%H2,%H0"
+       "OR     %L2,%L0\t; OR:DI\n\tOR\t%H2,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+; XOR
+; XOR
+;
+;
+;
+;
+(define_insn "xordi3"
+(define_insn "xordi3"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (xor:DI (match_operand:DI 1 "register_operand" "%0")
+               (xor:DI (match_operand:DI 1 "register_operand" "%0")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "XOR    %L2,%L0\t; XOR:DI\n\tXOR\t%H2,%H0"
+       "XOR    %L2,%L0\t; XOR:DI\n\tXOR\t%H2,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; NEG
+; NEG
+;
+;
+;
+;
+(define_insn "negdi2"
+(define_insn "negdi2"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (neg:DI (match_operand:DI 1 "register_operand" "0")))
+               (neg:DI (match_operand:DI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "XOR    -1,%L0\t; NEG:DI\n\tXOR\t-1,%H0\n\tADD\t1,%L0\n\tADD.C\t1,%H0"
+       "XOR    -1,%L0\t; NEG:DI\n\tXOR\t-1,%H0\n\tADD\t1,%L0\n\tADD.C\t1,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+; ABS
+; ABS
+;
+;
+;
+;
+(define_insn "absdi2"
+(define_insn "absdi2"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (abs:DI (match_operand:DI 1 "register_operand" "0")))
+               (abs:DI (match_operand:DI 1 "register_operand" "0")))
+       (clobber (match_scratch:SI 2 "=r"))
+       (clobber (match_scratch:SI 2 "=r"))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "CLR    %2      ; ABSDI
+       "CLR    %2      ; ABSDI
+       TEST    %H0
+       TEST    %H0
+       LDILO.LT        1,%2
+       LDILO.LT        1,%2
+       XOR.LT  -1,%L0
+       XOR.LT  -1,%L0
+       XOR.LT  -1,%H0
+       XOR.LT  -1,%H0
+       ADD     %2,%L0
+       ADD     %2,%L0
+       ADD.C   1,%H0"
+       ADD.C   1,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; NOT
+; NOT
+;
+;
+;
+;
+(define_insn "one_cmpldi2"
+(define_insn "one_cmpldi2"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (not:DI (match_operand:DI 1 "register_operand" "0")))
+               (not:DI (match_operand:DI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "XOR    -1,%L0\t; NOT:DI\n\tXOR\t-1,%H0"
+       "XOR    -1,%L0\t; NOT:DI\n\tXOR\t-1,%H0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; Unsigned min/max
+; Unsigned min/max
+;
+;
+;
+;
+(define_insn "umindi3"
+(define_insn "umindi3"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (umin:DI (match_operand:DI 1 "register_operand" "%0")
+               (umin:DI (match_operand:DI 1 "register_operand" "%0")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "CMP    %H0,%H2 ; umin:DI
+       "CMP    %H0,%H2 ; umin:DI
+       CMP.Z   %L0,%L2
+       CMP.Z   %L0,%L2
+       MOV.C   %H2,%H0
+       MOV.C   %H2,%H0
+       MOV.C   %L2,%L0"
+       MOV.C   %L2,%L0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "umaxdi3"
+(define_insn "umaxdi3"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (umax:DI (match_operand:DI 1 "register_operand" "%0")
+               (umax:DI (match_operand:DI 1 "register_operand" "%0")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       "CMP    %H2,%H0 ; umax:DI
+       "CMP    %H2,%H0 ; umax:DI
+       CMP.Z   %L2,%L0
+       CMP.Z   %L2,%L0
+       MOV.C   %H2,%H0
+       MOV.C   %H2,%H0
+       MOV.C   %L2,%L0"
+       MOV.C   %L2,%L0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; Multiply
+; Multiply
+;
+;
+;
+;
+(define_expand "muldi3"
+(define_expand "muldi3"
+       [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+       [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+               (mult:DI (match_operand:DI 1 "register_operand" "r")
+               (mult:DI (match_operand:DI 1 "register_operand" "r")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (match_dup 1))
+       (clobber (match_dup 1))
+       (clobber (match_dup 2))
+       (clobber (match_dup 2))
+       (clobber (match_scratch:SI 3 "=r"))
+       (clobber (match_scratch:SI 3 "=r"))
+       (clobber (reg:CC CC_REG))])]
+       (clobber (reg:CC CC_REG))])]
+       "(ZIP_HAS_DI)")
+       "(ZIP_HAS_DI)")
+;
+;
+(define_insn "muldi3_raw"
+(define_insn "muldi3_raw"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (mult:DI (match_operand:DI 1 "register_operand" "r")
+               (mult:DI (match_operand:DI 1 "register_operand" "r")
+                       (match_operand:DI 2 "register_operand" "r")))
+                       (match_operand:DI 2 "register_operand" "r")))
+       (clobber (match_dup 1))
+       (clobber (match_dup 1))
+       (clobber (match_dup 2))
+       (clobber (match_dup 2))
+       (clobber (match_scratch:SI 3 "=r"))
+       (clobber (match_scratch:SI 3 "=r"))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       {
+       {
+               int     regno[3];
+               int     regno[3];
+               regno[0] = REGNO(operands[0]);
+               regno[0] = REGNO(operands[0]);
+               regno[1] = REGNO(operands[1]);
+               regno[1] = REGNO(operands[1]);
+               regno[2] = REGNO(operands[2]);
+               regno[2] = REGNO(operands[2]);
+               //; We need to adjust what we are doing based upon which
+               //; We need to adjust what we are doing based upon which
+               //; registers are in common.  We have a couple of cases:
+               //; registers are in common.  We have a couple of cases:
+               //;
+               //;
+               if ((regno[0] == regno[1])&&(regno[0] == regno[2])) {
+               if ((regno[0] == regno[1])&&(regno[0] == regno[2])) {
+                       //; RA = RA * RA
+                       //; RA = RA * RA
+                       //;
+                       //;
+                       //; (H0:L0) = (H0:L0) * (H0:L0)
+                       //; (H0:L0) = (H0:L0) * (H0:L0)
+                       //; (H0:L0) = (H0*2^32 + L0) * (H0 * 2^32 + L0)
+                       //; (H0:L0) = (H0*2^32 + L0) * (H0 * 2^32 + L0)
+                       //; (H0:L0) = (H0*H0*2^64 + (H0*L0+L0*H0)*2^32 + L0 *L0)
+                       //; (H0:L0) = (H0*H0*2^64 + (H0*L0+L0*H0)*2^32 + L0 *L0)
+                       //;     = (H0*L0+L0*H1):(L0*L0)
+                       //;     = (H0*L0+L0*H1):(L0*L0)
+                       //;    :L0  = LOPART(L0 * L0)
+                       //;    :L0  = LOPART(L0 * L0)
+                       //;  H0     = HIPART(L0 * L0)
+                       //;  H0     = HIPART(L0 * L0)
+                       //;  H0    += LOPART(H0 * L0)
+                       //;  H0    += LOPART(H0 * L0)
+                       //;  H0    += LOPART(L0 * H0)
+                       //;  H0    += LOPART(L0 * H0)
+                       //;
+                       //;
+                       //;  Rx = L0
+                       //;  Rx = L0
+                       //;  H0 *= L0  ( =   LOPART( HI * LO )
+                       //;  H0 *= L0  ( =   LOPART( HI * LO )
+                       //;  H0 <<= 1  ( = 2*LOPART( HI * LO ) )
+                       //;  H0 <<= 1  ( = 2*LOPART( HI * LO ) )
+                       //;  Rx *= L0  ( =   HIPART( LO * LO )
+                       //;  Rx *= L0  ( =   HIPART( LO * LO )
+                       //;  L0 *= L0  ( =   LOPART( LO * LO )
+                       //;  L0 *= L0  ( =   LOPART( LO * LO )
+                       //;  H0 += Rx  ( = 2*LOPART( HI * LO ) + HIPART( LO *LO)
+                       //;  H0 += Rx  ( = 2*LOPART( HI * LO ) + HIPART( LO *LO)
+                       //;
+                       //;
+                       return "; muldi3_raw/A (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                       return "; muldi3_raw/A (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                               "\tMOV\t%L0,%3\n"
+                               "\tMOV\t%L0,%3\n"
+                               "\tMPY\t%L0,%H0\n"
+                               "\tMPY\t%L0,%H0\n"
+                               "\tLSL\t1,%H0\n"
+                               "\tLSL\t1,%H0\n"
+                               "\tMPYUHI\t%L0,%3\n"
+                               "\tMPYUHI\t%L0,%3\n"
+                               "\tMPY\t%L0,%L0\n"
+                               "\tMPY\t%L0,%L0\n"
+                               "\tADD\t%3,%H0";
+                               "\tADD\t%3,%H0";
+               } else if ((regno[0] != regno[1])&&(regno[1] == regno[2])) {
+               } else if ((regno[0] != regno[1])&&(regno[1] == regno[2])) {
+                       //; RA = RB * RB
+                       //; RA = RB * RB
+                       //;
+                       //;
+                       //; (H0:L0) = (H1:L1) * (H1:L1)
+                       //; (H0:L0) = (H1:L1) * (H1:L1)
+                       //; (H0:L0) = (H1*2^32 + L1) * (H1 * 2^32 + L1)
+                       //; (H0:L0) = (H1*2^32 + L1) * (H1 * 2^32 + L1)
+                       //; (H0:L0) = (H1*H1*2^64 + (H1*L1+L1*H1)*2^32 + L1 * L1)
+                       //; (H0:L0) = (H1*H1*2^64 + (H1*L1+L1*H1)*2^32 + L1 * L1)
+                       //;     = (H1*L1+L1*H1):(L1*L1)
+                       //;     = (H1*L1+L1*H1):(L1*L1)
+                       //;    :L0  = LOPART(L1 * L1)
+                       //;    :L0  = LOPART(L1 * L1)
+                       //;  H0     = HIPART(L1 * L1)
+                       //;  H0     = HIPART(L1 * L1)
+                       //;  H0    += LOPART(H1 * L1)
+                       //;  H0    += LOPART(H1 * L1)
+                       //;  H0    += LOPART(L1 * H1)
+                       //;  H0    += LOPART(L1 * H1)
+                       //;
+                       //;
+                       //; -------------------
+                       //; -------------------
+                       //;     L0  = L1
+                       //;     L0  = L1
+                       //;     L0  = LOPART(L0 * L1)
+                       //;     L0  = LOPART(L0 * L1)
+                       //;     H0  = H1
+                       //;     H0  = H1
+                       //;     H0  = LOPART(H0 * L1)
+                       //;     H0  = LOPART(H0 * L1)
+                       //;     H0 <<= 1;       i.e. *= 2
+                       //;     H0 <<= 1;       i.e. *= 2
+                       //;     L1  = HIPART(L1 * L1)
+                       //;     L1  = HIPART(L1 * L1)
+                       //;     H0 += L1
+                       //;     H0 += L1
+                       //;
+                       //;
+                       return "; muldi3_raw/B (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                       return "; muldi3_raw/B (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                       "\tMOV\t%L1,%L0\n"
+                       "\tMOV\t%L1,%L0\n"
+                       "\tMPY\t%L1,%L0\n"
+                       "\tMPY\t%L1,%L0\n"
+                       "\tMOV\t%H1,%H0\n"
+                       "\tMOV\t%H1,%H0\n"
+                       "\tMPY\t%H1,%H0\n"
+                       "\tMPY\t%H1,%H0\n"
+                       "\tLSL\t1,%H0\n"
+                       "\tLSL\t1,%H0\n"
+                       "\tMPY\t%L1,%L1\n"
+                       "\tMPY\t%L1,%L1\n"
+                       "\tADD\t%L2,%H0";
+                       "\tADD\t%L2,%H0";
+               } else if ((regno[0] == regno[1])&&(regno[1] != regno[2])) {
+               } else if ((regno[0] == regno[1])&&(regno[1] != regno[2])) {
+                       //; RA = RA * RB, with scratch Rx
+                       //; RA = RA * RB, with scratch Rx
+                       //;
+                       //;
+                       //; (H0:L0) = (H0:L0) * (H1:L1)
+                       //; (H0:L0) = (H0:L0) * (H1:L1)
+                       //; (H0:L0) = (H0*2^32 + L0) * (H1 * 2^32 + L1)
+                       //; (H0:L0) = (H0*2^32 + L0) * (H1 * 2^32 + L1)
+                       //; (H0:L0) = (H0*H1*2^64 + (H0*L1+L0*H1)*2^32 + L0 *L1)
+                       //; (H0:L0) = (H0*H1*2^64 + (H0*L1+L0*H1)*2^32 + L0 *L1)
+                       //;     = (H0*L1+L0*H1):(L0*L1)
+                       //;     = (H0*L1+L0*H1):(L0*L1)
+                       //;     Rx  = L0
+                       //;     Rx  = L0
+                       //;    :L0  = LOPART(L1 * R0)
+                       //;    :L0  = LOPART(L1 * R0)
+                       //;  H0     = LOPART(H0 * L1)
+                       //;  H0     = LOPART(H0 * L1)
+                       //;  H0    += H1 = LOPART(Rx * H1)
+                       //;  H0    += H1 = LOPART(Rx * H1)
+                       //;  H0    += HIPART(L1 * Rx)
+                       //;  H0    += HIPART(L1 * Rx)
+                       //;
+                       //;
+                       return "; muldi3_raw/C (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                       return "; muldi3_raw/C (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                       "\tMOV\t%L0,%3\n"
+                       "\tMOV\t%L0,%3\n"
+                       "\tMPY\t%L1,%L0\n"
+                       "\tMPY\t%L1,%L0\n"
+                       "\tMOV\t%L1,%H0\n"
+                       "\tMOV\t%L1,%H0\n"
+                       "\tMPY\t%H1,%H0\n"
+                       "\tMPY\t%H1,%H0\n"
+                       "\tMPY\t%3,%H1\n"
+                       "\tMPY\t%3,%H1\n"
+                       "\tADD\t%H1,%H0\n"
+                       "\tADD\t%H1,%H0\n"
+                       "\tMPY\t%3,%L1\n"
+                       "\tMPY\t%3,%L1\n"
+                       "\tADD\t%L1,%H0";
+                       "\tADD\t%L1,%H0";
+               } else {
+               } else {
+                       //; RA = RB * RC
+                       //; RA = RB * RC
+                       //;
+                       //;
+                       //; (H0:L0) = (H1:L1) * (H2:L2)
+                       //; (H0:L0) = (H1:L1) * (H2:L2)
+                       //; (H0:L0) = (H1*2^32 + L1) * (H2 * 2^32 + L2)
+                       //; (H0:L0) = (H1*2^32 + L1) * (H2 * 2^32 + L2)
+                       //; (H0:L0) = (H1*H2*2^64 + (H1*L2+L1*H2)*2^32 + L1 *L2)
+                       //; (H0:L0) = (H1*H2*2^64 + (H1*L2+L1*H2)*2^32 + L1 *L2)
+                       //;     = (H1*L2+L1*H2):(L1*L2)
+                       //;     = (H1*L2+L1*H2):(L1*L2)
+                       //;    :L0  = LOPART(L1 * L2)
+                       //;    :L0  = LOPART(L1 * L2)
+                       //;  H0     = HIPART(L1 * L2)
+                       //;  H0     = HIPART(L1 * L2)
+                       //;  H0    += LOPART(H1 * L2)
+                       //;  H0    += LOPART(H1 * L2)
+                       //;  H0    += LOPART(L1 * H2)
+                       //;  H0    += LOPART(L1 * H2)
+                       //;
+                       //;
+                       //; We can re-order this to try to save some registers
+                       //; We can re-order this to try to save some registers
+                       //;
+                       //;
+                       //;     H1 *= L0                // Was H1 * L2
+                       //;     H1 *= L0                // Was H1 * L2
+                       //;    :L0  = LOPART(L1 * L2)
+                       //;    :L0  = LOPART(L1 * L2)
+                       //;  H0     = LOPART(L1 * R1)
+                       //;  H0     = LOPART(L1 * R1)
+                       //;  H0    += HIPART(L1 * H2)
+                       //;  H0    += HIPART(L1 * H2)
+                       //;  H0    += H1
+                       //;  H0    += H1
+                       //;
+                       //;
+                    return "; muldi3_raw/D (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                    return "; muldi3_raw/D (%H0:%L0) = (%H1:%L1) * (%H2:%L2)\n"
+                       "\tMPY  %L2,%H1 ; H1 = H1 * L2\n"
+                       "\tMPY  %L2,%H1 ; H1 = H1 * L2\n"
+                       "\tMPY  %L1,%H2 ; H2 = L1 * L2\n"
+                       "\tMPY  %L1,%H2 ; H2 = L1 * L2\n"
+                       "\tMOV  %L2,%L0 ; H0:L0 = L1 * L2\n"
+                       "\tMOV  %L2,%L0 ; H0:L0 = L1 * L2\n"
+                       "\tMOV  %L2,%H0\n"
+                       "\tMOV  %L2,%H0\n"
+                       "\tMPY  %L1,%L0\n"
+                       "\tMPY  %L1,%L0\n"
+                       "\tMPYUHI       %L1,%H0\n"
+                       "\tMPYUHI       %L1,%H0\n"
+                       "\tADD  %H2,%H0 ; H0 += (H2 = L1 * H2)\n"
+                       "\tADD  %H2,%H0 ; H0 += (H2 = L1 * H2)\n"
+                       "\tADD  %H1,%H0 ; H0 += (H1 = H1 * L2)";
+                       "\tADD  %H1,%H0 ; H0 += (H1 = H1 * L2)";
+               }
+               }
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; Still missing DI instructions for smin:DI, smax:DI, movdicc, adddicc,
+; Still missing DI instructions for smin:DI, smax:DI, movdicc, adddicc,
+;      div:di, divu:di (library routine)
+;      div:di, divu:di (library routine)
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Conditional arithmetic instructions
+;; Conditional arithmetic instructions
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;
+;
+(define_insn "cstoredi4" ; Store 0 or 1 in %0 based on cmp between %2&%3
+(define_insn "cstoredi4" ; Store 0 or 1 in %0 based on cmp between %2&%3
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (if_then_else:SI (match_operator 1 "ordered_comparison_operator"
+               (if_then_else:SI (match_operator 1 "ordered_comparison_operator"
+                       [(match_operand:DI 2 "register_operand" "r")
+                       [(match_operand:DI 2 "register_operand" "r")
+                               (match_operand:DI 3 "register_operand" "r")])
+                               (match_operand:DI 3 "register_operand" "r")])
+                       (const_int 1) (const_int 0)))
+                       (const_int 1) (const_int 0)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_HAS_DI)&&(0)"
+       "(ZIP_HAS_DI)&&(0)"
+       {
+       {
+               switch(GET_CODE(operands[1])) {
+               switch(GET_CODE(operands[1])) {
+               case EQ:        return "CLR\t%0\t; CSTORE-EQ\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.Z\t1,%0\n";
+               case EQ:        return "CLR\t%0\t; CSTORE-EQ\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.Z\t1,%0\n";
+               case NE:        return "CLR\t%0\t; CSTORE-NE\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.NZ\t1,%0\n";
+               case NE:        return "CLR\t%0\t; CSTORE-NE\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.NZ\t1,%0\n";
+               //; Missing LT
+               //; Missing LT
+               //; Missing LE
+               //; Missing LE
+               //; Missing GT
+               //; Missing GT
+               //; Missing GE
+               //; Missing GE
+               case LTU:       return "CLR\t%0\t; CSTORE-LTU\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.C\t1,%0\n";
+               case LTU:       return "CLR\t%0\t; CSTORE-LTU\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.C\t1,%0\n";
+               case LEU:
+               case LEU:
+                       return "CLR\t%0\t; CSTORE-LEU\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.NC\t1,%0\n";
+                       return "CLR\t%0\t; CSTORE-LEU\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.NC\t1,%0\n";
+               case GTU:       return "CLR\t%0\t; CSTORE-GTU\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.C\t1,%0\n";
+               case GTU:       return "CLR\t%0\t; CSTORE-GTU\n\tCMP\t%H2,%H3\n\tCMP.Z\t%L2,%L3\n\tLDILO.C\t1,%0\n";
+               case GEU:
+               case GEU:
+                       return "CLR\t%0\t; CSTORE-GEU\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.NC\t1,%0\n";
+                       return "CLR\t%0\t; CSTORE-GEU\n\tCMP\t%H3,%H2\n\tCMP.Z\t%L3,%L2\n\tLDILO.NC\t1,%0\n";
+               default:
+               default:
+                       gcc_unreachable();
+                       gcc_unreachable();
+               }
+               }
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Comparison instructions, both compare and test
+;; Comparison instructions, both compare and test
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_expand "cmpdi"
+(define_expand "cmpdi"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (match_operand:DI 0 "register_operand" "r")
+               (match_operand:DI 0 "register_operand" "r")
+               (match_operand:DI 1 "nonmemory_operand" "")))]
+               (match_operand:DI 1 "nonmemory_operand" "")))]
+       ""
+       ""
+       {
+       {
+               if (!REG_P(operands[1])) {
+               if (!REG_P(operands[1])) {
+                       if (can_create_pseudo_p()) {
+                       if (can_create_pseudo_p()) {
+                               //; fprintf(stderr, "Generating pseudo register for compare\n");
+                               //; fprintf(stderr, "Generating pseudo register for compare\n");
+                               rtx tmp = gen_reg_rtx(DImode);
+                               rtx tmp = gen_reg_rtx(DImode);
+                               emit_insn(gen_movdi(tmp,operands[1]));
+                               emit_insn(gen_movdi(tmp,operands[1]));
+                               operands[1] = tmp;
+                               operands[1] = tmp;
+                               emit_insn(gen_cmpdi_reg(operands[0],tmp));
+                               emit_insn(gen_cmpdi_reg(operands[0],tmp));
+                               DONE;
+                               DONE;
+                       } else FAIL;
+                       } else FAIL;
+               }
+               }
+       })
+       })
+(define_insn "cmpdi_reg"
+(define_insn "cmpdi_reg"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (match_operand:SI 0 "register_operand" "r")
+               (match_operand:SI 0 "register_operand" "r")
+               (match_operand:SI 1 "register_operand" "r")))]
+               (match_operand:SI 1 "register_operand" "r")))]
+       ""
+       ""
+       "CMP\t%H1,%H0
+       "CMP\t%H1,%H0
+       CMP.Z\t%L1,%L0"
+       CMP.Z\t%L1,%L0"
+       [(set_attr "ccresult" "set")])
+       [(set_attr "ccresult" "set")])
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Conditional move instructions, since these won't accept conditional
+;; Conditional move instructions, since these won't accept conditional
+;;     execution RTL
+;;     execution RTL
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+(define_expand "cbranchdi4"
+(define_expand "cbranchdi4"
+       [(set (pc) (if_then_else
+       [(set (pc) (if_then_else
+               (match_operator 0 "ordered_comparison_operator"
+               (match_operator 0 "ordered_comparison_operator"
+                       [(match_operand:DI 1 "register_operand" "r")
+                       [(match_operand:DI 1 "register_operand" "r")
+                               (match_operand:DI 2 "nonimmediate_operand" "")])
+                               (match_operand:DI 2 "nonimmediate_operand" "")])
+                       (label_ref (match_operand 3 "" ""))
+                       (label_ref (match_operand 3 "" ""))
+                       (pc)))
+                       (pc)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       {
+       {
+               if (!REG_P(operands[2])) {
+               if (!REG_P(operands[2])) {
+                       if ((CONST_INT_P(operands[2]))
+                       if ((CONST_INT_P(operands[2]))
+                               &&(INTVAL(operands[2])> -(1l<<17))
+                               &&(INTVAL(operands[2])> -(1l<<17))
+                               &&(INTVAL(operands[2])<(1l<<17)-1)) {
+                               &&(INTVAL(operands[2])<(1l<<17)-1)) {
+                               emit_jump_insn(gen_cbranchdi4_internal(operands[0],
+                               emit_jump_insn(gen_cbranchdi4_internal(operands[0],
+                                       operands[1], operands[2], operands[3]));
+                                       operands[1], operands[2], operands[3]));
+                               DONE;
+                               DONE;
+                       } if (can_create_pseudo_p()) {
+                       } if (can_create_pseudo_p()) {
+                               rtx tmp = gen_reg_rtx(DImode);
+                               rtx tmp = gen_reg_rtx(DImode);
+                               emit_insn(gen_movsi(tmp, operands[2]));
+                               emit_insn(gen_movsi(tmp, operands[2]));
+                               operands[2] = tmp;
+                               operands[2] = tmp;
+                       }
+                       }
+               }
+               }
+
+
+               if (REG_P(operands[2])) {
+               if (REG_P(operands[2])) {
+                       emit_jump_insn(gen_cbranchdi4_internal(operands[0],
+                       emit_jump_insn(gen_cbranchdi4_internal(operands[0],
+                               operands[1], operands[2], operands[3]));
+                               operands[1], operands[2], operands[3]));
+                       DONE;
+                       DONE;
+               }
+               }
+       })
+       })
+(define_insn "cbranchdi4_internal"
+(define_insn "cbranchdi4_internal"
+       [(set (pc) (if_then_else
+       [(set (pc) (if_then_else
+               (match_operator 0 "ordered_comparison_operator"
+               (match_operator 0 "ordered_comparison_operator"
+                       [(match_operand:DI 1 "register_operand" "r,r,r")
+                       [(match_operand:DI 1 "register_operand" "r,r,r")
+                               (match_operand:DI 2 "nonmemory_operand" "K,x,r")])
+                               (match_operand:DI 2 "nonmemory_operand" "K,x,r")])
+                       (label_ref (match_operand 3 "" ""))
+                       (label_ref (match_operand 3 "" ""))
+                       (pc)))
+                       (pc)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_HAS_DI)"
+       "(ZIP_HAS_DI)"
+       {
+       {
+               return zip_cbranchdi(operands[0], operands[1], operands[2], operands[3]);
+               return zip_cbranchdi(operands[0], operands[1], operands[2], operands[3]);
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Unimplemented (or not yet implemented) RTL Codes
+;; Unimplemented (or not yet implemented) RTL Codes
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;
+;
+;(define_insn "addvdi4"
+;(define_insn "addvdi4"
+;      )
+;      )
+;(define_insn "subvdi4"
+;(define_insn "subvdi4"
+;      )
+;      )
+;(define_insn "mulvdi4"
+;(define_insn "mulvdi4"
+;      )
+;      )
+;(define_insn "umulvdi4"
+;(define_insn "umulvdi4"
+;      )
+;      )
+;(define_insn "umulvdi4"
+;(define_insn "umulvdi4"
+;      )
+;      )
+;(define_insn "negvdi3"
+;(define_insn "negvdi3"
+;      )
+;      )
+;
+;
+;(define_insn "maddsidi4"
+;(define_insn "maddsidi4"
+;(define_insn "umaddsidi4"
+;(define_insn "umaddsidi4"
+;(define_insn "msubsidi4"
+;(define_insn "msubsidi4"
+;(define_insn "umsubsidi4"
+;(define_insn "umsubsidi4"
+;
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-float.md gcc-6.2.0-zip/gcc/config/zip/zip-float.md
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-float.md gcc-6.2.0-zip/gcc/config/zip/zip-float.md
--- gcc-6.2.0/gcc/config/zip/zip-float.md       1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip-float.md       1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-float.md   2017-01-10 14:01:42.029341062 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-float.md   2017-01-10 14:01:42.029341062 -0500
@@ -0,0 +1,138 @@
@@ -0,0 +1,138 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Filename:   zip-float.md
+;; Filename:   zip-float.md
+;;
+;;
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;;
+;; Purpose:    This is the machine description of the ZipCPU floating point
+;; Purpose:    This is the machine description of the ZipCPU floating point
+;;             unit (if installed).
+;;             unit (if installed).
+;;
+;;
+;;
+;;
+;; Creator:    Dan Gisselquist, Ph.D.
+;; Creator:    Dan Gisselquist, Ph.D.
+;;             Gisselquist Technology, LLC
+;;             Gisselquist Technology, LLC
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;;
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;; your option) any later version.
+;;
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;; for more details.
+;;
+;;
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;;             http://www.gnu.org/licenses/gpl.html
+;;             http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;;
+;;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Floating point Op-codes
+;; Floating point Op-codes
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_insn "addsf3"
+(define_insn "addsf3"
+       [(set (match_operand:SF 0 "register_operand" "=r")
+       [(set (match_operand:SF 0 "register_operand" "=r")
+               (plus:SF (match_operand:SF 1 "register_operand" "0")
+               (plus:SF (match_operand:SF 1 "register_operand" "0")
+                       (match_operand:SF 2 "register_operand" "r")))
+                       (match_operand:SF 2 "register_operand" "r")))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       "(ZIP_FPU)"
+       "(ZIP_FPU)"
+       "FPADD  %2,%0"
+       "FPADD  %2,%0"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+(define_insn "subsf3"
+(define_insn "subsf3"
+       [(set (match_operand:SF 0 "register_operand" "=r")
+       [(set (match_operand:SF 0 "register_operand" "=r")
+               (minus:SF (match_operand:SF 1 "register_operand" "0")
+               (minus:SF (match_operand:SF 1 "register_operand" "0")
+                       (match_operand:SF 2 "register_operand" "r")))
+                       (match_operand:SF 2 "register_operand" "r")))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       "(ZIP_FPU)"
+       "(ZIP_FPU)"
+       "FPSUB  %2,%0"
+       "FPSUB  %2,%0"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+(define_insn "mulsf3"
+(define_insn "mulsf3"
+       [(set (match_operand:SF 0 "register_operand" "=r")
+       [(set (match_operand:SF 0 "register_operand" "=r")
+               (mult:SF (match_operand:SF 1 "register_operand" "0")
+               (mult:SF (match_operand:SF 1 "register_operand" "0")
+                       (match_operand:SF 2 "register_operand" "r")))
+                       (match_operand:SF 2 "register_operand" "r")))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       "(ZIP_FPU)"
+       "(ZIP_FPU)"
+       "FPMUL  %2,%0"
+       "FPMUL  %2,%0"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+(define_insn "divsf3"
+(define_insn "divsf3"
+       [(set (match_operand:SF 0 "register_operand" "=r")
+       [(set (match_operand:SF 0 "register_operand" "=r")
+               (div:SF (match_operand:SF 1 "register_operand" "0")
+               (div:SF (match_operand:SF 1 "register_operand" "0")
+                       (match_operand:SF 2 "register_operand" "r")))
+                       (match_operand:SF 2 "register_operand" "r")))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       "(ZIP_FPU)"
+       "(ZIP_FPU)"
+       "FPDIV  %2,%0"
+       "FPDIV  %2,%0"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+; (define_insn "floatsisf2"
+; (define_insn "floatsisf2"
+;      [(set (match_operand:SF 0 "register_operand" "=r"
+;      [(set (match_operand:SF 0 "register_operand" "=r"
+;              (float:QI (match_operand:SF 1 "register_operand" "r"))))
+;              (float:QI (match_operand:SF 1 "register_operand" "r"))))
+;      (set (reg:CC CC_REG) (compare:CC (match_dup 1) (const_int 0)))]
+;      (set (reg:CC CC_REG) (compare:CC (match_dup 1) (const_int 0)))]
+;      "(ZIP_FPU)"
+;      "(ZIP_FPU)"
+;      "FPI2F  %1,%0")
+;      "FPI2F  %1,%0")
+; (define_insn "floatunssisf2" ... ?)
+; (define_insn "floatunssisf2" ... ?)
+; (define_insn "fix_truncsfsi2"
+; (define_insn "fix_truncsfsi2"
+;      [(set (match_operand:QI 0 "register_operand" "=r"
+;      [(set (match_operand:QI 0 "register_operand" "=r"
+;              (float:SF (match_operand:SF 1 "register_operand" "r"))))
+;              (float:SF (match_operand:SF 1 "register_operand" "r"))))
+;      (set (reg:CC CC_REG) (compare:CC (match_dup 1) (const_int 0)))]
+;      (set (reg:CC CC_REG) (compare:CC (match_dup 1) (const_int 0)))]
+;      "(ZIP_FPU)"
+;      "(ZIP_FPU)"
+;      "FPI2F  %1,%0")
+;      "FPI2F  %1,%0")
+; (define_insn "nearbyintsf2" ... ?)
+; (define_insn "nearbyintsf2" ... ?)
+; (define_insn "truncsfsi2" ... ?)
+; (define_insn "truncsfsi2" ... ?)
+(define_expand "negsf2"
+(define_expand "negsf2"
+       [(set (match_operand:SF 0 "register_operand" "=r")
+       [(set (match_operand:SF 0 "register_operand" "=r")
+               (neg:SF (match_operand:SF 1 "register_operand" "0")))
+               (neg:SF (match_operand:SF 1 "register_operand" "0")))
+       ]
+       ]
+       ""
+       ""
+       {
+       {
+               operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0);
+               operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0);
+               if (can_create_pseudo_p()) {
+               if (can_create_pseudo_p()) {
+                       rtx tmp = gen_reg_rtx(SImode);
+                       rtx tmp = gen_reg_rtx(SImode);
+                       emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x80000000,SImode)));
+                       emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x80000000,SImode)));
+                       emit_insn(gen_xorsi3(operands[0], operands[0], tmp));
+                       emit_insn(gen_xorsi3(operands[0], operands[0], tmp));
+                       DONE;
+                       DONE;
+               } else {
+               } else {
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       emit_insn(gen_iorsi3(operands[0], operands[0],
+                       emit_insn(gen_iorsi3(operands[0], operands[0],
+                               gen_int_mode(1,SImode)));
+                               gen_int_mode(1,SImode)));
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       DONE;
+                       DONE;
+               }
+               }
+       })
+       })
+(define_expand "abssf2"
+(define_expand "abssf2"
+       [(set (match_operand:SF 0 "register_operand" "=r")
+       [(set (match_operand:SF 0 "register_operand" "=r")
+               (abs:SF (match_operand:SF 1 "register_operand" "0")))
+               (abs:SF (match_operand:SF 1 "register_operand" "0")))
+       ]
+       ]
+       ""
+       ""
+       {
+       {
+               operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0);
+               operands[0] = gen_rtx_SUBREG(SImode, operands[0], 0);
+               if (can_create_pseudo_p()) {
+               if (can_create_pseudo_p()) {
+                       rtx tmp = gen_reg_rtx(SImode);
+                       rtx tmp = gen_reg_rtx(SImode);
+                       emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x7fffffff,SImode)));
+                       emit_insn(gen_movsi_ldi(tmp,gen_int_mode(0x7fffffff,SImode)));
+                       emit_insn(gen_andsi3(operands[0], operands[0], tmp));
+                       emit_insn(gen_andsi3(operands[0], operands[0], tmp));
+                       DONE;
+                       DONE;
+               } else {
+               } else {
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       emit_insn(gen_andsi3(operands[0], operands[0],
+                       emit_insn(gen_andsi3(operands[0], operands[0],
+                               gen_int_mode(-2,SImode)));
+                               gen_int_mode(-2,SImode)));
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       emit_insn(gen_zip_bitrev(operands[0],operands[0]));
+                       DONE;
+                       DONE;
+               }
+               }
+       })
+       })
+;
+;
+;
+;
+; STILL MISSING:
+; STILL MISSING:
+;
+;
+;
+;
+;
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.h gcc-6.2.0-zip/gcc/config/zip/zip.h
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.h gcc-6.2.0-zip/gcc/config/zip/zip.h
--- gcc-6.2.0/gcc/config/zip/zip.h      1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip.h      1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.h  2017-03-03 09:30:57.671304970 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.h  2018-06-05 22:22:38.581639662 -0400
@@ -0,0 +1,4114 @@
@@ -0,0 +1,4114 @@
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Filename:   gcc/config/zip/zip.h
+// Filename:   gcc/config/zip/zip.h
+//
+//
+// Project:    Zip CPU backend for the GNU Compiler Collection
+// Project:    Zip CPU backend for the GNU Compiler Collection
+//
+//
+// Purpose:
+// Purpose:
+//
+//
+// Creator:    Dan Gisselquist, Ph.D.
+// Creator:    Dan Gisselquist, Ph.D.
+//             Gisselquist Technology, LLC
+//             Gisselquist Technology, LLC
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+//
+// This program is free software (firmware): you can redistribute it and/or
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+// your option) any later version.
+//
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+// for more details.
+//
+//
+// You should have received a copy of the GNU General Public License along
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+// <http://www.gnu.org/licenses/> for a copy.
+//
+//
+// License:    GPL, v3, as defined and found on www.gnu.org,
+// License:    GPL, v3, as defined and found on www.gnu.org,
+//             http://www.gnu.org/licenses/gpl.html
+//             http://www.gnu.org/licenses/gpl.html
+//
+//
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+#ifndef        GCC_ZIP_H
+#ifndef        GCC_ZIP_H
+#define        GCC_ZIP_H
+#define        GCC_ZIP_H
+
+
+
+
+//
+//
+//
+//
+// Zip CPU configuration defines
+// Zip CPU configuration defines
+//
+//
+//
+//
+#define        ZIP_USER        0        // Assume we are in supervisor mode
+#define        ZIP_USER        0        // Assume we are in supervisor mode
+#define        ZIP_MULTIPLY    1       // Assume we have multiply instructions
+#define        ZIP_MULTIPLY    1       // Assume we have multiply instructions
+#define        ZIP_DIVIDE      1       // Assume we have divide instructions
+#define        ZIP_DIVIDE      1       // Assume we have divide instructions
+#define        ZIP_FPU         0        // Assume we have no floating point instructions
+#define        ZIP_FPU         0        // Assume we have no floating point instructions
+#define        ZIP_PIPELINED   1       // Assume our instructions are pipelined
+#define        ZIP_PIPELINED   1       // Assume our instructions are pipelined
+#define        ZIP_THUMB       1       // Assume we have the THUMB feature
+#define        ZIP_THUMB       1       // Assume we have the THUMB feature
+#define        ZIP_ATOMIC      (ZIP_PIPELINED)
+#define        ZIP_ATOMIC      (ZIP_PIPELINED)
+#define        ZIP_PIC         0        // Attempting to produce PIC code, with GOT
+#define        ZIP_PIC         0        // Attempting to produce PIC code, with GOT
+#define        ZIP_HAS_DI      1
+#define        ZIP_HAS_DI      1
+// Should we use the peephole optimizations?
+// Should we use the peephole optimizations?
+#define        ZIP_PEEPHOLE    1       // 0 means no peephole optimizations.
+#define        ZIP_PEEPHOLE    1       // 0 means no peephole optimizations.
+#define        ZIP_NOT_AN_INSTRUCTION  "NAI\t;// This is not an instruction.  Getting here implies a compiler error.  Please contact help support\n"
+#define        ZIP_NOT_AN_INSTRUCTION  "NAI\t;// This is not an instruction.  Getting here implies a compiler error.  Please contact help support\n"
+
+
+// Zip has 16 registers in each user mode.
+// Zip has 16 registers in each user mode.
+//     Register 15 is the program counter (PC)
+//     Register 15 is the program counter (PC)
+//     Register 14 is the condition codes (CC)
+//     Register 14 is the condition codes (CC)
+//     Register 13 is the stack pointer   (SP)
+//     Register 13 is the stack pointer   (SP)
+//     Register 12 (may be) the Global Offset Table pointer (GOT)
+//     Register 12 (may be) the Global Offset Table pointer (GOT)
+//     Register  0 (may be) the return address pointer
+//     Register  0 (may be) the return address pointer
+// Registers 16-31 may only be used in supervisor mode.
+// Registers 16-31 may only be used in supervisor mode.
+#define        is_ZIP_GENERAL_REG(REGNO)       ((REGNO)<13)
+#define        is_ZIP_GENERAL_REG(REGNO)       ((REGNO)<13)
+#define        is_ZIP_REG(REGNO)               ((REGNO)<33)
+#define        is_ZIP_REG(REGNO)               ((REGNO)<33)
+
+
+#define        zip_AP_PSEUDO   32
+#define        zip_AP_PSEUDO   32
+#define        zip_PC          15
+#define        zip_PC          15
+#define        zip_CC          14
+#define        zip_CC          14
+#define        zip_SP          13
+#define        zip_SP          13
+#define        zip_FP          12
+#define        zip_FP          12
+#define        zip_GOT         11
+#define        zip_GOT         11
+// #define     zip_AP          10      // We're using a PSEUDO REG instead
+// #define     zip_AP          10      // We're using a PSEUDO REG instead
+#define        zip_R5          5       // Used for the static chain, if it exists
+#define        zip_R5          5       // Used for the static chain, if it exists
+#define        zip_R1          1
+#define        zip_R1          1
+#define        zip_R0          0
+#define        zip_R0          0
+#define        zip_LR          zip_R0  // Link Register is also R0
+#define        zip_LR          zip_R0  // Link Register is also R0
+
+
+#define        ZIP_FIRST_ARG_REGNO     1
+#define        ZIP_FIRST_ARG_REGNO     1
+#define        ZIP_LAST_ARG_REGNO      5
+#define        ZIP_LAST_ARG_REGNO      5
+#define        NUM_ARG_REGS            (ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1)
+#define        NUM_ARG_REGS            (ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1)
+#define        MAX_PARM_REGS           (ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1)
+#define        MAX_PARM_REGS           (ZIP_LAST_ARG_REGNO-ZIP_FIRST_ARG_REGNO+1)
+
+
+/* The overall framework of an assembler file */
+/* The overall framework of an assembler file */
+
+
+#define        ASM_COMMENT_START       ";"
+#define        ASM_COMMENT_START       ";"
+#define        ASM_APP_ON              ""
+#define        ASM_APP_ON              ""
+#define        ASM_APP_OFF             ""
+#define        ASM_APP_OFF             ""
+
+
+#define        FILE_ASM_OP             "\t.file\n"
+#define        FILE_ASM_OP             "\t.file\n"
+
+
+/* Output and Generation of Labels */
+/* Output and Generation of Labels */
+#define        GLOBAL_ASM_OP           "\t.global\t"
+#define        GLOBAL_ASM_OP           "\t.global\t"
+
+
+#define        BITS_PER_WORD           32
+#define        BITS_PER_WORD           32
+
+
+
+
+/* A C compound statement to output to stdio stream STREAM the assembler syntax
+/* A C compound statement to output to stdio stream STREAM the assembler syntax
+ * for an instruction operand X. */
+ * for an instruction operand X. */
+#define        PRINT_OPERAND(STREAM, X, CODE)  zip_print_operand(STREAM, X, CODE)
+#define        PRINT_OPERAND(STREAM, X, CODE)  zip_print_operand(STREAM, X, CODE)
+#define        PRINT_OPERAND_ADDRESS(STREAM, X) zip_print_operand_address(STREAM, X)
+#define        PRINT_OPERAND_ADDRESS(STREAM, X) zip_print_operand_address(STREAM, X)
+
+
+/* Passing arguments in registers */
+/* Passing arguments in registers */
+#define        FUNCTION_VALUE_REGNO_P(REGNO)   ((REGNO)==zip_R1)
+#define        FUNCTION_VALUE_REGNO_P(REGNO)   ((REGNO)==zip_R1)
+
+
+/* Define how to find the value returned by a function.  VALTYPE is the data
+/* Define how to find the value returned by a function.  VALTYPE is the data
+ * type of the value (as a tree).  If the precise function being called is known
+ * type of the value (as a tree).  If the precise function being called is known
+ * FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */
+ * FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */
+#define        FUNCTION_VALUE(VALTYPE, FUNC) gen_rtx_REG(TYPE_MODE(VALTYPE), zip_R1)
+#define        FUNCTION_VALUE(VALTYPE, FUNC) gen_rtx_REG(TYPE_MODE(VALTYPE), zip_R1)
+
+
+/* Define how to find the value returned by a library function assuming the
+/* Define how to find the value returned by a library function assuming the
+ * value has mode MODE.
+ * value has mode MODE.
+ */
+ */
+#define        LIBCALL_VALUE(MODE)     gen_rtx_REG(MODE, zip_R1)
+#define        LIBCALL_VALUE(MODE)     gen_rtx_REG(MODE, zip_R1)
+
+
+
+
+/* STACK AND CALLING */
+/* STACK AND CALLING */
+
+
+
+
+/* Define this macro as a C expression that is nonzero for registers that are
+/* Define this macro as a C expression that is nonzero for registers that are
+ * used by the epilogue or the return pattern.  The stack and frame pointer
+ * used by the epilogue or the return pattern.  The stack and frame pointer
+ * registers are already assumed to be used as needed.
+ * registers are already assumed to be used as needed.
+ */
+ */
+#define        EPILOGUE_USES(R)        (R == RETURN_ADDRESS_REGNUM)
+#define        EPILOGUE_USES(R)        (R == RETURN_ADDRESS_REGNUM)
+
+
+
+
+/* The best alignment to use in cases where we have a choice. */
+/* The best alignment to use in cases where we have a choice. */
+#define        FASTEST_ALIGNMENT       BITS_PER_WORD
+#define        FASTEST_ALIGNMENT       BITS_PER_WORD
+
+
+/* Generate Code for Profiling
+/* Generate Code for Profiling
+ */
+ */
+#define        FUNCTION_PROFILER(FILE,LABELNO)         (abort(), 0)
+#define        FUNCTION_PROFILER(FILE,LABELNO)         (abort(), 0)
+
+
+
+
+/* A C expression which is nonzero if register number NUM is suitable for use
+/* A C expression which is nonzero if register number NUM is suitable for use
+ * as an index register in operand addresses.
+ * as an index register in operand addresses.
+ */
+ */
+#define        REGNO_OK_FOR_INDEX_P(NUM)       0
+#define        REGNO_OK_FOR_INDEX_P(NUM)       0
+
+
+
+
+/* A C compound statement with a conditional 'goto LABEL;' executed if X
+/* A C compound statement with a conditional 'goto LABEL;' executed if X
+ * (an RTX) is a legitimate memory address on the target machine for a memory
+ * (an RTX) is a legitimate memory address on the target machine for a memory
+ * operand of mode MODE.
+ * operand of mode MODE.
+ */
+ */
+/* 17.03 Controlling the Compilation Driver, 'gcc' */
+/* 17.03 Controlling the Compilation Driver, 'gcc' */
+// DRIVER_SELF_SPECS
+// DRIVER_SELF_SPECS
+// OPTION_DEFAULT_SPECS
+// OPTION_DEFAULT_SPECS
+// CPP_SPEC
+// CPP_SPEC
+// CPLUSPLUS_CPP_SPEC
+// CPLUSPLUS_CPP_SPEC
+// CC1_SPEC
+// CC1_SPEC
+// CC1PLUS_SPEC
+// CC1PLUS_SPEC
+/* ASM_SPEC ... A C string constant that tells the GCC driver program options
+/* ASM_SPEC ... A C string constant that tells the GCC driver program options
+ * to pass to the assembler.  It can also specify how to translate options you
+ * to pass to the assembler.  It can also specify how to translate options you
+ * give to GCC into options for GCC to pass to the assembler.  See the file
+ * give to GCC into options for GCC to pass to the assembler.  See the file
+ * 'sun3.h' for an example of this.
+ * 'sun3.h' for an example of this.
+ *
+ *
+ * Do not define thismacro if it does not need to do anything.
+ * Do not define thismacro if it does not need to do anything.
+ */
+ */
+// #undef      ASM_SPEC
+// #undef      ASM_SPEC
+// ASM_FINAL_SPEC
+// ASM_FINAL_SPEC
+// ASM_NEEDS_DASH_FOR_PIPED_INPUT
+// ASM_NEEDS_DASH_FOR_PIPED_INPUT
+
+
+/* LINK_SPEC ... A C string constant that tells the GCC driver program options
+/* LINK_SPEC ... A C string constant that tells the GCC driver program options
+ * to pass to the linker.  It can also specify how to translate options you give
+ * to pass to the linker.  It can also specify how to translate options you give
+ * to GCC into options for GCC to pass to the linker.
+ * to GCC into options for GCC to pass to the linker.
+ *
+ *
+ * Do not define this macro if it does not need to do anything.
+ * Do not define this macro if it does not need to do anything.
+ */
+ */
+
+
+/* LIB_SPEC ... Another C string constant very much like LINK_SPEC.  The
+/* LIB_SPEC ... Another C string constant very much like LINK_SPEC.  The
+ * difference between the two is that LIB_SPEC is used at the end of the
+ * difference between the two is that LIB_SPEC is used at the end of the
+ * command given to the linker.
+ * command given to the linker.
+ *
+ *
+ * If this macro is not defined, a default is provided that loads the standard
+ * If this macro is not defined, a default is provided that loads the standard
+ * C library from the usual place.  See 'gcc.c'.
+ * C library from the usual place.  See 'gcc.c'.
+ *
+ *
+ * ZipCPU ... We need this at its default value.  It is necessary to build
+ * ZipCPU ... We need this at its default value.  It is necessary to build
+ * the various GCC libraries that depend upon one another and newlib.  Hence,
+ * the various GCC libraries that depend upon one another and newlib.  Hence,
+ * as an example we *must* include the library containing strnlen or libgfortran
+ * as an example we *must* include the library containing strnlen or libgfortran
+ * will not.  Alternatively, we might figure out how to pass arguments to the
+ * will not.  Alternatively, we might figure out how to pass arguments to the
+ * compiler via the configure process ... but we'll just allow this to have its
+ * compiler via the configure process ... but we'll just allow this to have its
+ * default value for now.
+ * default value for now.
+ */
+ */
+// #undef      LIB_SPEC
+// #undef      LIB_SPEC
+// #define     LIB_SPEC        "%{!g:-lc} %{g:-lg} -lzip"
+// #define     LIB_SPEC        "%{!g:-lc} %{g:-lg} -lzip"
+// #define     LIB_SPEC        ""
+// #define     LIB_SPEC        ""
+
+
+/* LIBGCC_SPEC ... Another C string constant that tells the GCC driver program
+/* LIBGCC_SPEC ... Another C string constant that tells the GCC driver program
+ * hoow and when to place a reference to 'libgcc.a' into the linker command
+ * hoow and when to place a reference to 'libgcc.a' into the linker command
+ * line.  This constant is placed both before and after the value of LIB_SPEC.
+ * line.  This constant is placed both before and after the value of LIB_SPEC.
+ *
+ *
+ * If this macro is not defined, the GCC driver provides a default that passes
+ * If this macro is not defined, the GCC driver provides a default that passes
+ * the string '-lgcc' to the linker.
+ * the string '-lgcc' to the linker.
+ */
+ */
+#undef LIBGCC_SPEC
+// #undef      LIBGCC_SPEC
+#define        LIBGCC_SPEC     ""
+// #define     LIBGCC_SPEC     ""
+
+
+/* REAL_LIBGCC_SPEC ... By default, if ENABLE_SHARED_LIBGCC is defined, the
+/* REAL_LIBGCC_SPEC ... By default, if ENABLE_SHARED_LIBGCC is defined, the
+ * LIBGCC_SPEC is not directly used by the driver program but is instead
+ * LIBGCC_SPEC is not directly used by the driver program but is instead
+ * modified to refer to different versions of 'libgcc.a' depending on the
+ * modified to refer to different versions of 'libgcc.a' depending on the
+ * values of the command line flags '-static', '-shared', '-static-libgcc',
+ * values of the command line flags '-static', '-shared', '-static-libgcc',
+ * and '-shared-libgcc'.  On targets where these modifications are
+ * and '-shared-libgcc'.  On targets where these modifications are
+ * inappropriate, define REAL_LIBGCC_SPEC instead.  REAL_LIBGCC_SPEC tells the
+ * inappropriate, define REAL_LIBGCC_SPEC instead.  REAL_LIBGCC_SPEC tells the
+ * driver how to place a reference to 'libgcc' on the link command line, but
+ * driver how to place a reference to 'libgcc' on the link command line, but
+ * unlike LIBGCC_SPEC, it is used unmodified.
+ * unlike LIBGCC_SPEC, it is used unmodified.
+ */
+ */
+#define        REAL_LIBGCC_SPEC        ""
+#define        REAL_LIBGCC_SPEC        ""
+
+
+// USE_LD_AS_NEEDED
+// USE_LD_AS_NEEDED
+// LINK_EH_SPEC
+// LINK_EH_SPEC
+
+
+/* STARTFILE_SPEC ... Another C string constant used much like LINK_SPEC.  The
+/* STARTFILE_SPEC ... Another C string constant used much like LINK_SPEC.  The
+ * difference between the two is that STARTFILE_SPEC is used at the very
+ * difference between the two is that STARTFILE_SPEC is used at the very
+ * beginning of the command given to the linker.
+ * beginning of the command given to the linker.
+ *
+ *
+ * If this macro is not defined, a default is provided that loads the standard
+ * If this macro is not defined, a default is provided that loads the standard
+ * C startup file from the usual place.  See 'gcc.c'
+ * C startup file from the usual place.  See 'gcc.c'
+ */
+ */
+#undef STARTFILE_SPEC
+#undef STARTFILE_SPEC
+#define        STARTFILE_SPEC  ""
+#define        STARTFILE_SPEC  ""
+
+
+/* ENDFILE_SPEC ... Another C string constant used much like LINK_SPEC.  The
+/* ENDFILE_SPEC ... Another C string constant used much like LINK_SPEC.  The
+ * difference between the two is that ENDFILE_SPEC is used at the very end
+ * difference between the two is that ENDFILE_SPEC is used at the very end
+ * of the command given to the linker.
+ * of the command given to the linker.
+ *
+ *
+ * Do not define this macro if it does not do anything.
+ * Do not define this macro if it does not do anything.
+ */
+ */
+// #undef      ENDFILE_SPEC
+// #undef      ENDFILE_SPEC
+// #define     ENDFILE_SPEC    ""
+// #define     ENDFILE_SPEC    ""
+
+
+// THREAD_MODEL_SPEC
+// THREAD_MODEL_SPEC
+// SYSROOT_SUFFIX_SPEC
+// SYSROOT_SUFFIX_SPEC
+// SYSROOT_HEADERS_SUFFIX_SPEC
+// SYSROOT_HEADERS_SUFFIX_SPEC
+// EXTRA_SPECS
+// EXTRA_SPECS
+// LINK_LIBGCC_SPECIAL_1
+// LINK_LIBGCC_SPECIAL_1
+// LINK_GCC_C_SEQUENCE_SPEC
+// LINK_GCC_C_SEQUENCE_SPEC
+// LINK_COMMAND_SPEC
+// LINK_COMMAND_SPEC
+// TARGET_ALWAYS_STRIP_DOTDOT
+// TARGET_ALWAYS_STRIP_DOTDOT
+// MULTILIB_DEFAULTS
+// MULTILIB_DEFAULTS
+// RELATIVE_PREFIX_NOT_LINKDIR
+// RELATIVE_PREFIX_NOT_LINKDIR
+// MD_EXEC_PREFIX
+// MD_EXEC_PREFIX
+// STANDARD_STARTFILE_PREFIX
+// STANDARD_STARTFILE_PREFIX
+// STANDARD_STARTFILE_PREFIX_1
+// STANDARD_STARTFILE_PREFIX_1
+// STANDARD_STARTFILE_PREFIX_2
+// STANDARD_STARTFILE_PREFIX_2
+// MD_STARTFILE_PREFIX
+// MD_STARTFILE_PREFIX
+// MD_STARTFILE_PREFIX_1
+// MD_STARTFILE_PREFIX_1
+// INIT_ENVIRONMENT
+// INIT_ENVIRONMENT
+// LOCAL_INCLUDE_DIR
+// LOCAL_INCLUDE_DIR
+#undef LOCAL_INCLUDE_DIR
+#undef LOCAL_INCLUDE_DIR
+
+
+// NATIVE_SYSTEM_HEADER_COMPONENT
+// NATIVE_SYSTEM_HEADER_COMPONENT
+// INCLUDE_DEFAULTS
+// INCLUDE_DEFAULTS
+
+
+/* 17.03 Run-time Target Specification */
+/* 17.03 Run-time Target Specification */
+
+
+/* TARGET_CPU_CPP_BUILTINS() ... This function-like macro expands to a block of
+/* TARGET_CPU_CPP_BUILTINS() ... This function-like macro expands to a block of
+ * code that defines built-in preprocessor macros and assertions for the target
+ * code that defines built-in preprocessor macros and assertions for the target
+ * CPU, using the functions builtin_define, builtin_define_std, and
+ * CPU, using the functions builtin_define, builtin_define_std, and
+ * builtin_assert.  When the front end calls this macro it provides a trailing
+ * builtin_assert.  When the front end calls this macro it provides a trailing
+ * semicolon, and since it has finished command line option proccessing your
+ * semicolon, and since it has finished command line option proccessing your
+ * code can use those results freely.
+ * code can use those results freely.
+ *
+ *
+ * ZipCPU --- We should probably capture in this macro what capabilities the
+ * ZipCPU --- We should probably capture in this macro what capabilities the
+ * command line parameters we've been given indicate that our CPU has.  That
+ * command line parameters we've been given indicate that our CPU has.  That
+ * way, code can be adjusted depending upon the CPU's capabilities.
+ * way, code can be adjusted depending upon the CPU's capabilities.
+ */
+ */
+#define        TARGET_CPU_CPP_BUILTINS()                               \
+#define        TARGET_CPU_CPP_BUILTINS()                               \
+       { builtin_define("__ZIPCPU__");                         \
+       { builtin_define("__ZIPCPU__");                         \
 
+       builtin_define("__BIG_ENDIAN__");                       \
+       builtin_define("__IEEE_BIG_ENDIAN");                    \
+       builtin_define("__IEEE_BIG_ENDIAN");                    \
+       builtin_define("_LDBL_EQ_DBL");                         \
+       builtin_define("_LDBL_EQ_DBL");                         \
+       if (ZIP_FPU) builtin_define("__ZIPFPU__");              \
+       if (ZIP_FPU) builtin_define("__ZIPFPU__");              \
+       else builtin_define("_SOFT_FLOAT");                     \
+       else builtin_define("_SOFT_FLOAT");                     \
+       if (ZIP_ATOMIC) builtin_define("__ZIPATOMIC__");        \
+       if (ZIP_ATOMIC) builtin_define("__ZIPATOMIC__");        \
+       }
+       }
+       // If (zip_param_has_fpu)  builtin_define("__ZIPFPU__");
+       // If (zip_param_has_fpu)  builtin_define("__ZIPFPU__");
+       // If (zip_param_has_div)  builtin_define("__ZIPDIV__");
+       // If (zip_param_has_div)  builtin_define("__ZIPDIV__");
+       // If (zip_param_has_mpy)  builtin_define("__ZIPMPY__");
+       // If (zip_param_has_mpy)  builtin_define("__ZIPMPY__");
+       // If (zip_param_has_lock) builtin_define("__ZIPLOCK__");
+       // If (zip_param_has_lock) builtin_define("__ZIPLOCK__");
+       // If (zip_param_supervisor) builtin_define("__ZIPUREGS__");
+       // If (zip_param_supervisor) builtin_define("__ZIPUREGS__");
+       // If (we support int64s) builtin_define("___int64_t_defined");
+       // If (we support int64s) builtin_define("___int64_t_defined");
+
+
+/* TARGET_OS_CPP_BUILTINS() ... Similarly to TARGET_CPU_CPP_BUILTINS but this
+/* TARGET_OS_CPP_BUILTINS() ... Similarly to TARGET_CPU_CPP_BUILTINS but this
+ * macro is optional and is used for the target operating system instead.
+ * macro is optional and is used for the target operating system instead.
+ */
+ */
+
+
+/* Option macros: (we need to define these eventually ... )
+/* Option macros: (we need to define these eventually ... )
+ *
+ *
+ *     TARGET_HANDLE_OPTION
+ *     TARGET_HANDLE_OPTION
+ *     TARGET_HANDLE_C_OPTION
+ *     TARGET_HANDLE_C_OPTION
+ *     TARGET_OBJ_CONSTRUCT_STRING_OBJECT
+ *     TARGET_OBJ_CONSTRUCT_STRING_OBJECT
+ *     TARGET_OBJ_DECLARE_UNRESOLVED_CLASS_REFERENCE
+ *     TARGET_OBJ_DECLARE_UNRESOLVED_CLASS_REFERENCE
+ *     TARGET_OBJ_DECLARE_CLASS_DEFINITION
+ *     TARGET_OBJ_DECLARE_CLASS_DEFINITION
+ *     TARGET_STRING_OBJECT_REF_TYPE_P
+ *     TARGET_STRING_OBJECT_REF_TYPE_P
+ *     TARGET_CHECK_STRING_OBJECT_FORMAT_ARG
+ *     TARGET_CHECK_STRING_OBJECT_FORMAT_ARG
+ *     TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE(VOID)
+ *     TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE(VOID)
+ *     C_COMMON_OVERRIDE_OTPTIONS
+ *     C_COMMON_OVERRIDE_OTPTIONS
+ *     TARGET_OPTION_OPTIMIZATION_TABLE
+ *     TARGET_OPTION_OPTIMIZATION_TABLE
+ *     TARGET_OPTION_INIT_STRUCT
+ *     TARGET_OPTION_INIT_STRUCT
+ *     TARGET_OPTION_DEFAULT_PARAMS
+ *     TARGET_OPTION_DEFAULT_PARAMS
+ */
+ */
+
+
+/* SWITCHABLE_TARGET
+/* SWITCHABLE_TARGET
+ *
+ *
+ * Zip CPU doesn't need this, so it defaults to zero.  No need to change it
+ * Zip CPU doesn't need this, so it defaults to zero.  No need to change it
+ * here.
+ * here.
+ */
+ */
+
+
+/* TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(VOID) ... Returns true if the
+/* TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(VOID) ... Returns true if the
+ * target supports IEEE 754 floating-point exceptions and rounding modes, false
+ * target supports IEEE 754 floating-point exceptions and rounding modes, false
+ * otherwise.  This is intended to relate to the float and double types, but not
+ * otherwise.  This is intended to relate to the float and double types, but not
+ * necessarily "long double".  By default, returns true if the adddf3
+ * necessarily "long double".  By default, returns true if the adddf3
+ * instruction pattern is available and false otherwise, on the assumption that
+ * instruction pattern is available and false otherwise, on the assumption that
+ * hardware floating point supports exceptions and rounding modes but software
+ * hardware floating point supports exceptions and rounding modes but software
+ * floating point does not.
+ * floating point does not.
+ *
+ *
+ * ZipCPU floating point is barely going to be functional, I doubt it will
+ * ZipCPU floating point is barely going to be functional, I doubt it will
+ * support all of these bells and whistles when full functionality is even
+ * support all of these bells and whistles when full functionality is even
+ * achieved.  Therefore, we won't support these modes.  However, we can't just
+ * achieved.  Therefore, we won't support these modes.  However, we can't just
+ * set this to zero, so let's come back to this.
+ * set this to zero, so let's come back to this.
+ */
+ */
+// #warning "Wrong answer encoded to date"
 
+// #undef      TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P
+// #undef      TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P
+// #define     TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(X) 0
+// #define     TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P(X) 0
+
+
+/* 17.04 Defining data structures for per-function information */
+/* 17.04 Defining data structures for per-function information */
+
+
+/* INIT_EXPANDERS ... Macro called to initialize any target specific
+/* INIT_EXPANDERS ... Macro called to initialize any target specific
+ * information.  This macro is called once per function, before generation of
+ * information.  This macro is called once per function, before generation of
+ * any RTL has begun.  The intention is to allow the initialization of the
+ * any RTL has begun.  The intention is to allow the initialization of the
+ * function pointer init_machine_status.
+ * function pointer init_machine_status.
+ */
+ */
+// #warning "I may need to define this to handle function return addresses ..."
+// #warning "I may need to define this to handle function return addresses ..."
+
+
+
+
+/* 17.05 Storage Layout */
+/* 17.05 Storage Layout */
+
+
+
+
+/* Storage Layout */
+/* Storage Layout */
+#define        BITS_BIG_ENDIAN         0        // MSB has highest number
+#define        BITS_BIG_ENDIAN         0        // MSB has highest number
+#define        BYTES_BIG_ENDIAN        1       // 1 if MSB is lowest number
+#define        BYTES_BIG_ENDIAN        1       // 1 if MSB is lowest number
+#define        WORDS_BIG_ENDIAN        1       // 1 if MSW is lowest number
+#define        WORDS_BIG_ENDIAN        1       // 1 if MSW is lowest number
+#define        FLOAT_WORDS_BIG_ENDIAN  1
+#define        FLOAT_WORDS_BIG_ENDIAN  1
+#define        UNITS_PER_WORD          4       // Storage units in a word, pwr of 2:1-8
+#define        UNITS_PER_WORD          4       // Storage units in a word, pwr of 2:1-8
+/* POINTER_SIZE ... Width of a pointer in bits.  You must specify a value no
+/* POINTER_SIZE ... Width of a pointer in bits.  You must specify a value no
+ * wider than the width of Pmode.  If it is not equal to the width of Pmode,
+ * wider than the width of Pmode.  If it is not equal to the width of Pmode,
+ * you must define POINTERS_EXTEND_UNSIGNED. If you do not specify a value the
+ * you must define POINTERS_EXTEND_UNSIGNED. If you do not specify a value the
+ * default is BITS_PER_WORD.
+ * default is BITS_PER_WORD.
+ *
+ *
+ * ZipCPU --- All of our pointers are 32-bits, the width of our address bus.
+ * ZipCPU --- All of our pointers are 32-bits, the width of our address bus.
+ */
+ */
+#define        POINTER_SIZE            32      // Ptr width in bits
+#define        POINTER_SIZE            32      // Ptr width in bits
+
+
+/* POINTERS_EXTEND_UNSIGNED ... A C expression that determines how pointers
+/* POINTERS_EXTEND_UNSIGNED ... A C expression that determines how pointers
+ * should be extended from ptr_mode to either Pmode or word_mode.  It is greater
+ * should be extended from ptr_mode to either Pmode or word_mode.  It is greater
+ * than zero if pointers should be zero-extended, zero if they should be sign
+ * than zero if pointers should be zero-extended, zero if they should be sign
+ * extended, and negative if some other conversion is needed.  In the last case,
+ * extended, and negative if some other conversion is needed.  In the last case,
+ * the extension is done by the target's ptr_extend instruction.
+ * the extension is done by the target's ptr_extend instruction.
+ *
+ *
+ * You need not define this macro if the ptr_mode, Pmode, and word_mode are all
+ * You need not define this macro if the ptr_mode, Pmode, and word_mode are all
+ * the same width.
+ * the same width.
+ *
+ *
+ * ZipCPU --- We don't need to define this macro, since PMode and ptr_mode, and
+ * ZipCPU --- We don't need to define this macro, since PMode and ptr_mode, and
+ * our word_mode (SImode) all have the same width.
+ * our word_mode (SImode) all have the same width.
+ */
+ */
+// #define     POINTERS_EXTEND_UNSIGNED        1
+// #define     POINTERS_EXTEND_UNSIGNED        1
+
+
+/* PROMOTE_MODE(m,unsignedp,type) ... A macro to update m and unsignedp when an
+/* PROMOTE_MODE(m,unsignedp,type) ... A macro to update m and unsignedp when an
+ * object whose type is type and which has he specified mode and signedness is
+ * object whose type is type and which has he specified mode and signedness is
+ * to be stored in a register.  This macro is only called when type is a scalar
+ * to be stored in a register.  This macro is only called when type is a scalar
+ * type.
+ * type.
+ *
+ *
+ * On most RISC machines, which only have operations that operate on a full
+ * On most RISC machines, which only have operations that operate on a full
+ * register, define this macro to set m to word_mode if m is an integer mode
+ * register, define this macro to set m to word_mode if m is an integer mode
+ * narrower than BITS_PER_WORD.  In most cases, only integer modes should be
+ * narrower than BITS_PER_WORD.  In most cases, only integer modes should be
+ * widened because wider precision floating-point operations are usually more
+ * widened because wider precision floating-point operations are usually more
+ * expensive than their narrower counterparts.
+ * expensive than their narrower counterparts.
+ *
+ *
+ * For most machines, the macro definition does not change unsigndep.  However,
+ * For most machines, the macro definition does not change unsigndep.  However,
+ * some machines, have instructions that preferentially handle either signed or
+ * some machines, have instructions that preferentially handle either signed or
+ * unsigned quantities of certain modes.  For example, on the DEC Alpha, 32-bit
+ * unsigned quantities of certain modes.  For example, on the DEC Alpha, 32-bit
+ * loads from memory and 32-bit add instructions sign-extend the result to
+ * loads from memory and 32-bit add instructions sign-extend the result to
+ * 64-bits. On such machines, set unsignedp according to which kind of extension
+ * 64-bits. On such machines, set unsignedp according to which kind of extension
+ * is more efficient.
+ * is more efficient.
+ *
+ *
+ * Do not define this macro if it would never modify m.
+ * Do not define this macro if it would never modify m.
+ *
+ *
+ * ZipCPU ---
+ * ZipCPU ---
+ */
+ */
+#define        PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+#define        PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+       do {                                                    \
+       do {                                                    \
+               if ((GET_MODE_CLASS(MODE) == MODE_INT)          \
+               if ((GET_MODE_CLASS(MODE) == MODE_INT)          \
+                               && (GET_MODE_SIZE(MODE) < 4)) { \
+                               && (GET_MODE_SIZE(MODE) < 4)) { \
+                       (MODE) = SImode;                        \
+                       (MODE) = SImode;                        \
+                       (UNSIGNEDP) = 1;                        \
+                       (UNSIGNEDP) = 1;                        \
+               }                                               \
+               }                                               \
+       } while(0)
+       } while(0)
+
+
+// TARGET_PROMOTE_FUNCTION_MODE
+// TARGET_PROMOTE_FUNCTION_MODE
+#define        TARGET_PROMOTE_FUNCTION_MODE    default_promote_function_mode_always_promote
+#define        TARGET_PROMOTE_FUNCTION_MODE    default_promote_function_mode_always_promote
+
+
+/* PARM_BOUNDARY ... Normal alignment required for function parameters on the
+/* PARM_BOUNDARY ... Normal alignment required for function parameters on the
+ * stack, in bits.  All stack parameters receive at least this much alignment
+ * stack, in bits.  All stack parameters receive at least this much alignment
+ * regardless of data type.  On most machines, this is the same as the size of
+ * regardless of data type.  On most machines, this is the same as the size of
+ * an integer.
+ * an integer.
+ */
+ */
+#define        PARM_BOUNDARY   32
+#define        PARM_BOUNDARY   32
+
+
+/* STACK_BOUNDARY ... Define this macro to the minimum alignment enforced by
+/* STACK_BOUNDARY ... Define this macro to the minimum alignment enforced by
+ * hardware for the stack pointer on this machine.  The definition is a C
+ * hardware for the stack pointer on this machine.  The definition is a C
+ * expression for the desired alignment (measured in bits).  This value is used
+ * expression for the desired alignment (measured in bits).  This value is used
+ * as a default if PREFERRED_STACK_BOUNDARY is not defined.  On most machines,
+ * as a default if PREFERRED_STACK_BOUNDARY is not defined.  On most machines,
+ * this should be the same as PARM_BOUNDARY.
+ * this should be the same as PARM_BOUNDARY.
+ */
+ */
+#define        STACK_BOUNDARY  PARM_BOUNDARY
+#define        STACK_BOUNDARY  PARM_BOUNDARY
+
+
+/* PREFERRED_STACK_BOUNDARY ... Define this ... */
+/* PREFERRED_STACK_BOUNDARY ... Define this ... */
+#define        PREFERRED_STACK_BOUNDARY        STACK_BOUNDARY
+#define        PREFERRED_STACK_BOUNDARY        STACK_BOUNDARY
+
+
+/* INCOMING_STACK_BOUNDARY ... Define this macro if the incoming stack boundary
+/* INCOMING_STACK_BOUNDARY ... Define this macro if the incoming stack boundary
+ * may be different from PREFERRED_STACK_BOUNDARY.  This macro must evaluate
+ * may be different from PREFERRED_STACK_BOUNDARY.  This macro must evaluate
+ * to a value equal to or larger than STACK_BOUNDARY.
+ * to a value equal to or larger than STACK_BOUNDARY.
+ */
+ */
+#define        INCOMING_STACK_BOUNDARY STACK_BOUNDARY
+#define        INCOMING_STACK_BOUNDARY STACK_BOUNDARY
+
+
+/* FUNCTION_BOUNDARY ... Alignment required for a function entry point, in bits.
+/* FUNCTION_BOUNDARY ... Alignment required for a function entry point, in bits.
+ */
+ */
+#define        FUNCTION_BOUNDARY       32
+#define        FUNCTION_BOUNDARY       32
+
+
+/* BIGGEST_ALIGNMENT ... Biggest alignment that any data type can require on
+/* BIGGEST_ALIGNMENT ... Biggest alignment that any data type can require on
+ * this machine, in bits.  Note that this is not the biggest alignment that is
+ * this machine, in bits.  Note that this is not the biggest alignment that is
+ * supported, just the biggest alignment that, when violated, may cause a fault.
+ * supported, just the biggest alignment that, when violated, may cause a fault.
+ */
+ */
+#define BIGGEST_ALIGNMENT      32
+#define BIGGEST_ALIGNMENT      32
+
+
+/* MALLOC_ABI_ALIGNMENT
+/* MALLOC_ABI_ALIGNMENT
+ */
+ */
+
+
+/* ATTRIBUTE_ALIGNED_VALUE
+/* ATTRIBUTE_ALIGNED_VALUE
+ */
+ */
+
+
+/* MINIMUM_ATOMIC_ALIGNMENT ... If defined, the smallest alignment, that can be
+/* MINIMUM_ATOMIC_ALIGNMENT ... If defined, the smallest alignment, that can be
+ * given to an object that can be referenced in one operation, without
+ * given to an object that can be referenced in one operation, without
+ * disturbing any nearby object.  Normally, this is BITS_PER_UNIT, but may be
+ * disturbing any nearby object.  Normally, this is BITS_PER_UNIT, but may be
+ * larger on machines that don't have byte or halfword store operations.
+ * larger on machines that don't have byte or halfword store operations.
+ */
+ */
+#define        MINIMUM_ATOMIC_ALIGNMENT        BITS_PER_UNIT
+#define        MINIMUM_ATOMIC_ALIGNMENT        BITS_PER_UNIT
+
+
+/* BIGGEST_FIELD_ALIGNMENT ... Biggest alignment that any structure or union
+/* BIGGEST_FIELD_ALIGNMENT ... Biggest alignment that any structure or union
+ * field can require on this machine, in bits.  If defined, this overrides
+ * field can require on this machine, in bits.  If defined, this overrides
+ * BIGGEST_ALIGNMENT for structure and union fields only, unless the field
+ * BIGGEST_ALIGNMENT for structure and union fields only, unless the field
+ * alignment has been set by the __attribute__((aligned(n))) construct.
+ * alignment has been set by the __attribute__((aligned(n))) construct.
+ */
+ */
+#define        BIGGEST_FIELD_ALIGNMENT BITS_PER_WORD
+#define        BIGGEST_FIELD_ALIGNMENT BITS_PER_WORD
+
+
+/* ADJUST_FIELD_ALIGN(FIELD, COMPUTED) ... An expression for the alignment of
+/* ADJUST_FIELD_ALIGN(FIELD, COMPUTED) ... An expression for the alignment of
+ * a structure field FIELD if the alignment computed in the usual way (including
+ * a structure field FIELD if the alignment computed in the usual way (including
+ * applying BIGGEST_ALIGNMENT and BIGGEST_FIELD_ALIGNMENT) is COMPUTED.
+ * applying BIGGEST_ALIGNMENT and BIGGEST_FIELD_ALIGNMENT) is COMPUTED.
+ */
+ */
+// #define     ADJUST_FIELD_ALIGN(A,B) BITS_PER_WORD
+// #define     ADJUST_FIELD_ALIGN(A,B) BITS_PER_WORD
+
+
+/* MAX_STACK_ALIGNMENT ... Biggest stack alignment guaranteed by the backend.
+/* MAX_STACK_ALIGNMENT ... Biggest stack alignment guaranteed by the backend.
+ * Use this macro to specify the maximum alignment of a variable on the stack.
+ * Use this macro to specify the maximum alignment of a variable on the stack.
+ *
+ *
+ * If not defined, the default value is STACK_BOUNDARY
+ * If not defined, the default value is STACK_BOUNDARY
+ */
+ */
+// #define     MAX_STACK_ALIGNMENT     BITS_PER_WORD
+// #define     MAX_STACK_ALIGNMENT     BITS_PER_WORD
+
+
+/* MAX_OFILE_ALIGNMENT
+/* MAX_OFILE_ALIGNMENT
+ */
+ */
+
+
+/* DATA_ALIGNMENT(TYPE, BASIC-ALIGN) ... If defined, a C expression to compute
+/* DATA_ALIGNMENT(TYPE, BASIC-ALIGN) ... If defined, a C expression to compute
+ * the alignment for a variable in the static store.  TYPE is the data type, and
+ * the alignment for a variable in the static store.  TYPE is the data type, and
+ * BASIC-ALIGN is the alignment that the object would ordinarily have.  The
+ * BASIC-ALIGN is the alignment that the object would ordinarily have.  The
+ * value of this macro is used instead of that alignment to align the object.
+ * value of this macro is used instead of that alignment to align the object.
+ *
+ *
+ * If this macro is not defined, then BASIC-ALIGN is used.
+ * If this macro is not defined, then BASIC-ALIGN is used.
+ *
+ *
+ * ZipCPU --
+ * ZipCPU --
+ */
+ */
+// #define     DATA_ALIGNMENT(TYPE, ALIGN)     BITS_PER_WORD
+// #define     DATA_ALIGNMENT(TYPE, ALIGN)     BITS_PER_WORD
+
+
+
+
+/* DATA_ABI_ALIGNMENT(TYPE,BASIC-ALIGN)
+/* DATA_ABI_ALIGNMENT(TYPE,BASIC-ALIGN)
+ */
+ */
+
+
+/* CONSTANT_ALIGNMENT(CONST, BASIC-ALIGN) ... If defined, a C expression to
+/* CONSTANT_ALIGNMENT(CONST, BASIC-ALIGN) ... If defined, a C expression to
+ * compute the alignment given to a constant that is being placed in memory.
+ * compute the alignment given to a constant that is being placed in memory.
+ * CONST is the constant and BASIC-ALIGN is the alignment that the object
+ * CONST is the constant and BASIC-ALIGN is the alignment that the object
+ * would ordinarily have.  The value of this macro is used instead of that
+ * would ordinarily have.  The value of this macro is used instead of that
+ * alignment to align the object.
+ * alignment to align the object.
+ *
+ *
+ * If this macro is not defined, then BASIC-ALIGN is used.
+ * If this macro is not defined, then BASIC-ALIGN is used.
+ *
+ *
+ * ZipCPU -- in hindsiht, if this macro is not defined then the compiler is
+ * ZipCPU -- in hindsiht, if this macro is not defined then the compiler is
+ * broken.  We'll define it as above.
+ * broken.  We'll define it as above.
+ *
+ *
+ */
+ */
+#define        CONSTANT_ALIGNMENT(EXP, ALIGN)  BITS_PER_WORD
+#define        CONSTANT_ALIGNMENT(EXP, ALIGN)  BITS_PER_WORD
+
+
+/* LOCAL_ALIGNMENT(TYPE,BASIC-ALIGN) ... If defined ...
+/* LOCAL_ALIGNMENT(TYPE,BASIC-ALIGN) ... If defined ...
+ */
+ */
+// #define     LOCAL_ALIGNMENT(TYP,ALIGN)      BITS_PER_WORD
+// #define     LOCAL_ALIGNMENT(TYP,ALIGN)      BITS_PER_WORD
+
+
+/* TARGET_VECTOR_ALIGNMENT
+/* TARGET_VECTOR_ALIGNMENT
+ */
+ */
+
+
+/* STACK_SLOT_ALIGNMENT
+/* STACK_SLOT_ALIGNMENT
+ */
+ */
+#define        STACK_SLOT_ALIGNMENT(T,M,B)     BITS_PER_WORD
+#define        STACK_SLOT_ALIGNMENT(T,M,B)     BITS_PER_WORD
+
+
+/* LOCAL_DECL_ALIGNMEN(DECL)
+/* LOCAL_DECL_ALIGNMEN(DECL)
+ */
+ */
+// #define     LOCAL_DECL_ALIGNMENT(DECL)      BITS_PER_WORD
+// #define     LOCAL_DECL_ALIGNMENT(DECL)      BITS_PER_WORD
+
+
+/* MINIMUM_ALIGNMENT
+/* MINIMUM_ALIGNMENT
+ */
+ */
+// #define     MINIMUM_ALIGNMENT(EXP,MOD,ALIGN)        BITS_PER_WORD
+// #define     MINIMUM_ALIGNMENT(EXP,MOD,ALIGN)        BITS_PER_WORD
+
+
+/* EMPTY_FIELD_BOUNDARY
+/* EMPTY_FIELD_BOUNDARY
+ * Alignment of field after 'int : 0' in a structure.
+ * Alignment of field after 'int : 0' in a structure.
+ */
+ */
+#define        EMPTY_FIELD_BOUNDARY    BITS_PER_WORD
+#define        EMPTY_FIELD_BOUNDARY    BITS_PER_WORD
+
+
+/* STRUCTURE_SIE_BOUNDARY
+/* STRUCTURE_SIE_BOUNDARY
+ * ZipCPU -- Every structures size must be a multiple of 32-bits.
+ * ZipCPU -- Every structures size must be a multiple of 32-bits.
+ */
+ */
+#define        STRUCTURE_SIZE_BOUNDARY BITS_PER_WORD
+#define        STRUCTURE_SIZE_BOUNDARY BITS_PER_WORD
+
+
+/* STRICT_ALIGNMENT ... Set this nonzero if move instructions will actually
+/* STRICT_ALIGNMENT ... Set this nonzero if move instructions will actually
+ * fail to work when given unaligned data.  If instructions will merely go
+ * fail to work when given unaligned data.  If instructions will merely go
+ * slower in that case, define this macro as 0.
+ * slower in that case, define this macro as 0.
+ *
+ *
+ * ZipCPU --
+ * ZipCPU --
+ */
+ */
+#define        STRICT_ALIGNMENT        1
+#define        STRICT_ALIGNMENT        1
+
+
+/* PCC_BITFIELD_TYPE_MATTERS -- define this if you wish to imitate the the way
+/* PCC_BITFIELD_TYPE_MATTERS -- define this if you wish to imitate the the way
+ * other C compilers handle alignment of bit-fields and the structures that
+ * other C compilers handle alignment of bit-fields and the structures that
+ * contain them.
+ * contain them.
+ *
+ *
+ * The behavior is that the type written for a named bit-field (int, short, or
+ * The behavior is that the type written for a named bit-field (int, short, or
+ * other integer type) imposes an alignment for the entire structure, as if the
+ * other integer type) imposes an alignment for the entire structure, as if the
+ * structure really did contain an ordinary field of that type.  In addition,
+ * structure really did contain an ordinary field of that type.  In addition,
+ * the bit-field is placed within the structure so that it would fit within
+ * the bit-field is placed within the structure so that it would fit within
+ * such a field, not crossing a boundary for it.
+ * such a field, not crossing a boundary for it.
+ *
+ *
+ * Thus, no most machines, a named bit-field whose type is written as int would
+ * Thus, no most machines, a named bit-field whose type is written as int would
+ * not cross a four-byte boundary, and would force four-byte alignment for the
+ * not cross a four-byte boundary, and would force four-byte alignment for the
+ * whole structure.  (The alignment used may not be four bytes; it is controlled
+ * whole structure.  (The alignment used may not be four bytes; it is controlled
+ * by other alignment parameters.)
+ * by other alignment parameters.)
+ *
+ *
+ * An unnamed bit-field will not affect the alignment of the containing
+ * An unnamed bit-field will not affect the alignment of the containing
+ * structure.
+ * structure.
+ *
+ *
+ * If the macro is defined, its definition should be a C expression, a non
+ * If the macro is defined, its definition should be a C expression, a non
+ * zero value for the expression enables this behavior.
+ * zero value for the expression enables this behavior.
+ * Look at the fundamental type that is used for a bit-field and use that to
+ * Look at the fundamental type that is used for a bit-field and use that to
+ * impose alignment on the enclosing structure.  struct s{int a:8}; should
+ * impose alignment on the enclosing structure.  struct s{int a:8}; should
+ * have the same alignment as 'int', not 'char'.
+ * have the same alignment as 'int', not 'char'.
+ */
+ */
+#undef PCC_BITFIELD_TYPE_MATTERS
+#undef PCC_BITFIELD_TYPE_MATTERS
+#define        PCC_BITFIELD_TYPE_MATTERS       0
+#define        PCC_BITFIELD_TYPE_MATTERS       0
+
+
+/* MAX_FIXED_MODE_SIZE ... An integer expression for the size in bits of the
+/* MAX_FIXED_MODE_SIZE ... An integer expression for the size in bits of the
+ * largest integer machine mode that should actually be used.  All integer
+ * largest integer machine mode that should actually be used.  All integer
+ * machine modes of this size or smaller can be used for structures and unions
+ * machine modes of this size or smaller can be used for structures and unions
+ * with the appropriate sizes.  If this macro is undefined,
+ * with the appropriate sizes.  If this macro is undefined,
+ * GET_MODE_BITSIZE(DImode) is assumed.
+ * GET_MODE_BITSIZE(DImode) is assumed.
+ *
+ *
+ * ZipCPU ... Get_MODE_BITSIZE(DImode) will be 64, and this really is the
+ * ZipCPU ... Get_MODE_BITSIZE(DImode) will be 64, and this really is the
+ * size in bits of the largest integer machine mode.  However, that's the case
+ * size in bits of the largest integer machine mode.  However, that's the case
+ * with most DI implementations: A long is two words, spliced together.  We'd
+ * with most DI implementations: A long is two words, spliced together.  We'd
+ * like to support that eventually, but we need to get there.  Hence, let's use
+ * like to support that eventually, but we need to get there.  Hence, let's use
+ * compile time flag (ZIP_HAS_DI) that we can enable when we're ready.
+ * compile time flag (ZIP_HAS_DI) that we can enable when we're ready.
+ */
+ */
+#undef MAX_FIXED_MODE_SIZE
+#undef MAX_FIXED_MODE_SIZE
+#ifdef ZIP_HAS_DI
+#ifdef ZIP_HAS_DI
+# define MAX_FIXED_MODE_SIZE   GET_MODE_BITSIZE(DImode)
+# define MAX_FIXED_MODE_SIZE   GET_MODE_BITSIZE(DImode)
+#else
+#else
+# define MAX_FIXED_MODE_SIZE   GET_MODE_BITSIZE(SImode)
+# define MAX_FIXED_MODE_SIZE   GET_MODE_BITSIZE(SImode)
+#endif
+#endif
+
+
+
+
+
+
+/* 17.06 Layout of Source Language Data Types */
+/* 17.06 Layout of Source Language Data Types */
+
+
+#undef LONG_TYPE_SIZE
+#undef LONG_TYPE_SIZE
+#undef LONG_LONG_TYPE_SIZE
+#undef LONG_LONG_TYPE_SIZE
+//
+//
+#define        LONG_TYPE_SIZE  64
+#define        LONG_TYPE_SIZE  64
+#define        LONG_LONG_TYPE_SIZE     64
+#define        LONG_LONG_TYPE_SIZE     64
+// SHORT_FRAC_TYPE_SIZE
+// SHORT_FRAC_TYPE_SIZE
+// LONG_FFRACT_TYPE_SIZE
+// LONG_FFRACT_TYPE_SIZE
+// LONG_LONG_FRACT_TIME_SIZE
+// LONG_LONG_FRACT_TIME_SIZE
+
+
+/* LIBGCC2_GNU_PREFIX ... This macro corresponds to the TARGET_GNU_PREFIX target
+/* LIBGCC2_GNU_PREFIX ... This macro corresponds to the TARGET_GNU_PREFIX target
+ * hook and should be defined if that hook is overriden to be true.  It causes
+ * hook and should be defined if that hook is overriden to be true.  It causes
+ * function names in libgcc to be changed to use a __gnu_ prefix for their name
+ * function names in libgcc to be changed to use a __gnu_ prefix for their name
+ * rather than the default __.  A port which uses this macro should also arrange
+ * rather than the default __.  A port which uses this macro should also arrange
+ * to use t-gnu-prefix in the libgcc config.host.
+ * to use t-gnu-prefix in the libgcc config.host.
+ *
+ *
+ * ZipCPU -- I see no reason to define and therefore change this behavior.
+ * ZipCPU -- I see no reason to define and therefore change this behavior.
+ */
+ */
+
+
+/* TARGET_FLT_EVAL_METHOD ... A C expression for the value for FLT_EVAL_METHOD
+/* TARGET_FLT_EVAL_METHOD ... A C expression for the value for FLT_EVAL_METHOD
+ * in float.h,, assuming, if applicable, that the floating-point control word
+ * in float.h,, assuming, if applicable, that the floating-point control word
+ * is in its default state.  If you do not define this macro the value of
+ * is in its default state.  If you do not define this macro the value of
+ * FLT_EVAL_METHOD will be zero.
+ * FLT_EVAL_METHOD will be zero.
+ *
+ *
+ * ZipCPU --- ???
+ * ZipCPU --- ???
+ */
+ */
+
+
+/* WIDEST_HARDWARE_FP_SIZE ... A C expression for the size in bits of the widest
+/* WIDEST_HARDWARE_FP_SIZE ... A C expression for the size in bits of the widest
+ * floating-point format supported by the hardware.  If you define this macro,
+ * floating-point format supported by the hardware.  If you define this macro,
+ * you must specify a value less than or equal to the value of LONG_DOUBLE_...
+ * you must specify a value less than or equal to the value of LONG_DOUBLE_...
+ * If you do not define this macro, the value of LONG_DOUBLE_TYPE_SIZE is the
+ * If you do not define this macro, the value of LONG_DOUBLE_TYPE_SIZE is the
+ * default.
+ * default.
+ *
+ *
+ * ZipCPU supports 32-bit IEEE floats--IF THE SUPPORT IS COMPILED IN!  This
+ * ZipCPU supports 32-bit IEEE floats--IF THE SUPPORT IS COMPILED IN!  This
+ * really needs to be determined, then, based upon a compile time parameter
+ * really needs to be determined, then, based upon a compile time parameter
+ * where the one compiling the code states whether or not the H/W even has
+ * where the one compiling the code states whether or not the H/W even has
+ * floating point support.
+ * floating point support.
+ *
+ *
+ * For now, we'll assume it does--but once we implement GCC parameters, we'll
+ * For now, we'll assume it does--but once we implement GCC parameters, we'll
+ * need to change this.
+ * need to change this.
+ */
+ */
+#undef WIDEST_HARDWARE_FP_SIZE
+#undef WIDEST_HARDWARE_FP_SIZE
+// #warning "Definition needs to change if no FPU present"
+// #warning "Definition needs to change if no FPU present"
+#define        WIDEST_HARDWARE_FP_SIZE FLOAT_TYPE_SIZE
+#define        WIDEST_HARDWARE_FP_SIZE FLOAT_TYPE_SIZE
+
+
+/* DEFAULT_SIGNED_CHAR ... An expression whose value is 1 or 0, according to
+/* DEFAULT_SIGNED_CHAR ... An expression whose value is 1 or 0, according to
+ * whether the type char should be signed or unsigned by default.  The user
+ * whether the type char should be signed or unsigned by default.  The user
+ * can always override this default with the options -fsigned-char and
+ * can always override this default with the options -fsigned-char and
+ * -funsigned-char.
+ * -funsigned-char.
+ *
+ *
+ * ZipCPU--Our hardware produces unsigned characters (and shorts) by default,
+ * ZipCPU--Our hardware produces unsigned characters (and shorts) by default,
+ * so let's stick to that.
+ * so let's stick to that.
+ */
+ */
+#define        DEFAULT_SIGNED_CHAR     0
+#define        DEFAULT_SIGNED_CHAR     0
+
+
+/* TARGET_DEFAULT_SHORT_ENUMS(VOID) ... This target hook should return true if
+/* TARGET_DEFAULT_SHORT_ENUMS(VOID) ... This target hook should return true if
+ * the compiler should give an enum type only as many bytes as it takes to
+ * the compiler should give an enum type only as many bytes as it takes to
+ * represent the range of possible values of that type.  It should return
+ * represent the range of possible values of that type.  It should return
+ * false if all enum types should be allocated like int.
+ * false if all enum types should be allocated like int.
+ *
+ *
+ * The default is to return false.  This is what the ZipCPU needs, so we won't
+ * The default is to return false.  This is what the ZipCPU needs, so we won't
+ * override it.
+ * override it.
+ */
+ */
+
+
+/* SIZE_TYPE ... A C expression for a string describing the name of the data
+/* SIZE_TYPE ... A C expression for a string describing the name of the data
+ * type to use for size values.  The typedef name size_t is defined using the
+ * type to use for size values.  The typedef name size_t is defined using the
+ * contents of the string.
+ * contents of the string.
+ *
+ *
+ * If you don't define this macro, the default is "long unsigned int".  Since
+ * If you don't define this macro, the default is "long unsigned int".  Since
+ * on the ZipCPU this is a 32-bit number, and all ZipCPU values are 32-bits,
+ * on the ZipCPU this is a 32-bit number, and all ZipCPU values are 32-bits,
+ * the default seems perfect for us.
+ * the default seems perfect for us.
+ */
+ */
+#define        SIZE_TYPE       "unsigned int"
+#define        SIZE_TYPE       "unsigned int"
+
+
+/* SIZETYPE ... GCC defines internal types () for expressions dealing with size.
+/* SIZETYPE ... GCC defines internal types () for expressions dealing with size.
+ * This macro is a C expression for a string describing the name of the data
+ * This macro is a C expression for a string describing the name of the data
+ * type from which the precision of sizetype is extracted.  The string has the
+ * type from which the precision of sizetype is extracted.  The string has the
+ * same restrictions as SIZE_TYPE string.  If you don't define this macro, the
+ * same restrictions as SIZE_TYPE string.  If you don't define this macro, the
+ * default is SIZE_TYPE --- which seems good enough for us.
+ * default is SIZE_TYPE --- which seems good enough for us.
+ */
+ */
+
+
+/* PTRDIFF_TYPE ... A C expression for a string describing the name of the data
+/* PTRDIFF_TYPE ... A C expression for a string describing the name of the data
+ * type to use for the result of subtracting two pointers.  The typedef name
+ * type to use for the result of subtracting two pointers.  The typedef name
+ * ptrdiff_t is defined using the contents of the string.  See SIZE_TYPE for
+ * ptrdiff_t is defined using the contents of the string.  See SIZE_TYPE for
+ * more information.
+ * more information.
+ *
+ *
+ * The default is "long int" which for the ZipCPU is 32-bits---still good enough
+ * The default is "long int" which for the ZipCPU is 32-bits---still good enough
+ * for us.
+ * for us.
+ */
+ */
+#define        PTRDIFF_TYPE    "int"
+#define        PTRDIFF_TYPE    "int"
+
+
+/* WCHAR_TYPE ... A C expression for a string describing the name of the data
+/* WCHAR_TYPE ... A C expression for a string describing the name of the data
+ * type to use for wide characters.  The typedef name wchar_t is defined using
+ * type to use for wide characters.  The typedef name wchar_t is defined using
+ * the contents of  the string.  If you don't define this macro, the default is
+ * the contents of  the string.  If you don't define this macro, the default is
+ * 'int'--good enough for ZipCPU.
+ * 'int'--good enough for ZipCPU.
+ */
+ */
+// #define     WCHAR_TYPE      "int"
+// #define     WCHAR_TYPE      "int"
+
+
+/* WCHAR_TYPE_SIZE ... A C expression for the size in bits of the data type for
+/* WCHAR_TYPE_SIZE ... A C expression for the size in bits of the data type for
+ * wide characters.  This is used in cpp, which cannot make use of WCHAR_TYPE.
+ * wide characters.  This is used in cpp, which cannot make use of WCHAR_TYPE.
+ *
+ *
+ * ZipCPU -- This defaults to INT_TYPE_SIZE, which will work for us
+ * ZipCPU -- This defaults to INT_TYPE_SIZE, which will work for us
+ */
+ */
+// #define     WCHAR_TYPE_SIZE 32
+// #define     WCHAR_TYPE_SIZE 32
+
+
+/* WINT_TYPE ... A C expression for a string describing the name of the data
+/* WINT_TYPE ... A C expression for a string describing the name of the data
+ * type to use for wide characters passed to printf and returned from getwc.
+ * type to use for wide characters passed to printf and returned from getwc.
+ * The typedef name wint_t is defined using the contents of the string.  See
+ * The typedef name wint_t is defined using the contents of the string.  See
+ *
+ *
+ * ZipCPU -- The default should work well enough for us.
+ * ZipCPU -- The default should work well enough for us.
+ */
+ */
+// #define     WINT_TYPE       "int"
+// #define     WINT_TYPE       "int"
+
+
+/* INTMAX_TYPE ... A C expression for a string describing the name of the
+/* INTMAX_TYPE ... A C expression for a string describing the name of the
+ * data type that can represent any value of any standard or extended signed
+ * data type that can represent any value of any standard or extended signed
+ * integer type.  The typedef name intmax_t is defined using the contents of
+ * integer type.  The typedef name intmax_t is defined using the contents of
+ * the string.
+ * the string.
+ *
+ *
+ * If you don't define this macro, the default is the first of "int", "long int"
+ * If you don't define this macro, the default is the first of "int", "long int"
+ * or "long long int" that has as much precision as "long long int".
+ * or "long long int" that has as much precision as "long long int".
+ */
+ */
+
+
+/* UINTMAX_TYPE ... same as INTMAX_TYPE, but for unsigned
+/* UINTMAX_TYPE ... same as INTMAX_TYPE, but for unsigned
+ */
+ */
+
+
+#undef SIG_ATOMIC_TYPE
+#undef SIG_ATOMIC_TYPE
+#if (ZIP_ATOMIC != 0)
+#if (ZIP_ATOMIC != 0)
+#define        SIG_ATOMIC_TYPE "int"
+#define        SIG_ATOMIC_TYPE "int"
+#else
+#else
+#define        SIG_ATOMIC_TYPE NULL    // We have no atomic types, but registers
+#define        SIG_ATOMIC_TYPE NULL    // We have no atomic types, but registers
+#endif
+#endif
+#undef INT8_TYPE
+#undef INT8_TYPE
+#define        INT8_TYPE               "char"
+#define        INT8_TYPE               "char"
+#undef INT16_TYPE
+#undef INT16_TYPE
+#define        INT16_TYPE              "short int"
+#define        INT16_TYPE              "short int"
+#undef INT32_TYPE
+#undef INT32_TYPE
+#define        INT32_TYPE              "int"
+#define        INT32_TYPE              "int"
+#undef UINT8_TYPE
+#undef UINT8_TYPE
+#define        UINT8_TYPE              "unsigned char"
+#define        UINT8_TYPE              "unsigned char"
+#undef UINT16_TYPE
+#undef UINT16_TYPE
+#define        UINT16_TYPE             "short unsigned int"
+#define        UINT16_TYPE             "short unsigned int"
+#undef UINT32_TYPE
+#undef UINT32_TYPE
+#define        UINT32_TYPE             "unsigned int"
+#define        UINT32_TYPE             "unsigned int"
+#undef INT_LEAST8_TYPE
+#undef INT_LEAST8_TYPE
+#define        INT_LEAST8_TYPE         "char"
+#define        INT_LEAST8_TYPE         "char"
+#undef INT_LEAST16_TYPE
+#undef INT_LEAST16_TYPE
+#define        INT_LEAST16_TYPE        "short int"
+#define        INT_LEAST16_TYPE        "short int"
+#undef INT_LEAST32_TYPE
+#undef INT_LEAST32_TYPE
+#define        INT_LEAST32_TYPE        "int"
+#define        INT_LEAST32_TYPE        "int"
+#undef UINT_LEAST8_TYPE
+#undef UINT_LEAST8_TYPE
+#define        UINT_LEAST8_TYPE        "unsigned char"
+#define        UINT_LEAST8_TYPE        "unsigned char"
+#undef UINT_LEAST16_TYPE
+#undef UINT_LEAST16_TYPE
+#define        UINT_LEAST16_TYPE       "short unsigned int"
+#define        UINT_LEAST16_TYPE       "short unsigned int"
+#undef UINT_LEAST32_TYPE
+#undef UINT_LEAST32_TYPE
+#define        UINT_LEAST32_TYPE       "unsigned int"
+#define        UINT_LEAST32_TYPE       "unsigned int"
+#undef INT_FAST8_TYPE
+#undef INT_FAST8_TYPE
+#define        INT_FAST8_TYPE          "char"
+#define        INT_FAST8_TYPE          "char"
+#undef INT_FAST16_TYPE
+#undef INT_FAST16_TYPE
+#define        INT_FAST16_TYPE         "short int"
+#define        INT_FAST16_TYPE         "short int"
+#undef INT_FAST32_TYPE
+#undef INT_FAST32_TYPE
+#define        INT_FAST32_TYPE         "int"
+#define        INT_FAST32_TYPE         "int"
+#undef UINT_FAST8_TYPE
+#undef UINT_FAST8_TYPE
+#define        UINT_FAST8_TYPE         "unsigned char"
+#define        UINT_FAST8_TYPE         "unsigned char"
+#undef UINT_FAST16_TYPE
+#undef UINT_FAST16_TYPE
+#define        UINT_FAST16_TYPE        "short unsigned int"
+#define        UINT_FAST16_TYPE        "short unsigned int"
+#undef UINT_FAST32_TYPE
+#undef UINT_FAST32_TYPE
+#define        UINT_FAST32_TYPE        "unsigned int"
+#define        UINT_FAST32_TYPE        "unsigned int"
+#undef INTPTR_TYPE
+#undef INTPTR_TYPE
+#define        INTPTR_TYPE             "unsigned int"
+#define        INTPTR_TYPE             "unsigned int"
+#undef UINTPTR_TYPE
+#undef UINTPTR_TYPE
+#define        UINTPTR_TYPE            "unsigned int"
+#define        UINTPTR_TYPE            "unsigned int"
+
+
+#undef INT64_TYPE
+#undef INT64_TYPE
+#undef UINT64_TYPE
+#undef UINT64_TYPE
+#undef INT_LEAST64_TYPE
+#undef INT_LEAST64_TYPE
+#undef UINT_LEAST64_TYPE
+#undef UINT_LEAST64_TYPE
+#undef INT_FAST64_TYPE
+#undef INT_FAST64_TYPE
+#undef UINT_FAST64_TYPE
+#undef UINT_FAST64_TYPE
+
+
+#if (ZIP_HAS_DI != 0)
+#if (ZIP_HAS_DI != 0)
+#define        INT64_TYPE              "long int"
+#define        INT64_TYPE              "long int"
+#define        UINT64_TYPE             "long unsigned int"
+#define        UINT64_TYPE             "long unsigned int"
+#define        INT_LEAST64_TYPE        "long int"
+#define        INT_LEAST64_TYPE        "long int"
+#define        UINT_LEAST64_TYPE       "long unsigned int"
+#define        UINT_LEAST64_TYPE       "long unsigned int"
+#define        INT_FAST64_TYPE         "long int"
+#define        INT_FAST64_TYPE         "long int"
+#define        UINT_FAST64_TYPE        "long unsigned int"
+#define        UINT_FAST64_TYPE        "long unsigned int"
+#else
+#else
+#define        INT64_TYPE              NULL
+#define        INT64_TYPE              NULL
+#define        UINT64_TYPE             NULL
+#define        UINT64_TYPE             NULL
+#define        INT_LEAST64_TYPE        NULL
+#define        INT_LEAST64_TYPE        NULL
+#define        UINT_LEAST64_TYPE       NULL
+#define        UINT_LEAST64_TYPE       NULL
+#define        INT_FAST64_TYPE         NULL
+#define        INT_FAST64_TYPE         NULL
+#define        UINT_FAST64_TYPE        NULL
+#define        UINT_FAST64_TYPE        NULL
+#endif
+#endif
+
+
+#define        TARGET_PTRMEMFUNC_VBI_LOCATION  ptrmemfunc_vbit_in_pfn
+#define        TARGET_PTRMEMFUNC_VBI_LOCATION  ptrmemfunc_vbit_in_pfn
+
+
+
+
+/* 17.07 Register Usage / Register definitions */
+/* 17.07 Register Usage / Register definitions */
+
+
+/* FIRST_PSEUDO_REGISTER ... Number of hardware registers known to the compiler.
+/* FIRST_PSEUDO_REGISTER ... Number of hardware registers known to the compiler.
+ * They receive numbers 0 through FIRST_PSEUDO_REGISTER-1; thus the first
+ * They receive numbers 0 through FIRST_PSEUDO_REGISTER-1; thus the first
+ * pseudo register's numbrer really is assigned the number
+ * pseudo register's numbrer really is assigned the number
+ * FIRST_PSEUDO_REGISTER.
+ * FIRST_PSEUDO_REGISTER.
+ *
+ *
+ * ZipCPU---There are 16 registers in the ZipCPU, numbered 0-15 with the CC
+ * ZipCPU---There are 16 registers in the ZipCPU, numbered 0-15 with the CC
+ * and PC register being numbered 14 and 15 respectively.  The ZipCPU has
+ * and PC register being numbered 14 and 15 respectively.  The ZipCPU has
+ * another 16 registers, identical to the first, but user mode registers.  These
+ * another 16 registers, identical to the first, but user mode registers.  These
+ * are number the same as the first (0-15) in user mode, but numbered (16-31)
+ * are number the same as the first (0-15) in user mode, but numbered (16-31)
+ * in supervisor mode.  In addition, we create a pretend argument pointer
+ * in supervisor mode.  In addition, we create a pretend argument pointer
+ * register, zip_AP_PSEUDO, to refer to our arguments.  This final register,
+ * register, zip_AP_PSEUDO, to refer to our arguments.  This final register,
+ * although it gets a valid number, will be eliminated in optimization.
+ * although it gets a valid number, will be eliminated in optimization.
+ */
+ */
+#define        FIRST_PSEUDO_REGISTER   (zip_AP_PSEUDO+1)
+#define        FIRST_PSEUDO_REGISTER   (zip_AP_PSEUDO+1)
+
+
+/* FIXED_REGISTERS ... An initializer that says which registers are used for
+/* FIXED_REGISTERS ... An initializer that says which registers are used for
+ * fixed purposes all throughout the compiled code and are therefore not
+ * fixed purposes all throughout the compiled code and are therefore not
+ * available for general allocation.  These would include the stack pointer, the
+ * available for general allocation.  These would include the stack pointer, the
+ * frame pointer (except on machines where that can be used as a general
+ * frame pointer (except on machines where that can be used as a general
+ * register when no frame pointer is needed), the program counter on machines
+ * register when no frame pointer is needed), the program counter on machines
+ * where that is considered one of the addressable registers, and any other
+ * where that is considered one of the addressable registers, and any other
+ * numbered register with a standard use.
+ * numbered register with a standard use.
+ *
+ *
+ * This information is expressed as a sequence of numbers, separated by commas,
+ * This information is expressed as a sequence of numbers, separated by commas,
+ * and surrounded by braces.  The nth number is 1 if register n is fixed, 0
+ * and surrounded by braces.  The nth number is 1 if register n is fixed, 0
+ * otherwise.
+ * otherwise.
+ *
+ *
+ * For the Zip CPU, we have three fixed registers that are not available for
+ * For the Zip CPU, we have three fixed registers that are not available for
+ * general allocation:
+ * general allocation:
+ *
+ *
+ *     SP      The stack pointer
+ *     SP      The stack pointer
+ *     CC      The condition codes and CPU state register
+ *     CC      The condition codes and CPU state register
+ *     PC      The program counter
+ *     PC      The program counter
+ *
+ *
+ * Other registers, such as FP (the frame pointer) or GBL (the global offset
+ * Other registers, such as FP (the frame pointer) or GBL (the global offset
+ * table pointer) are registers that we hope will not be so fixed.
+ * table pointer) are registers that we hope will not be so fixed.
+ *
+ *
+ * Okay, just updated this process.  We now have more registers that are not
+ * Okay, just updated this process.  We now have more registers that are not
+ * available for general allocation:
+ * available for general allocation:
+ *     uR0-uPC         User registers
+ *     uR0-uPC         User registers
+ *     PSEUDO-AP       The pseudo arg pointer
+ *     PSEUDO-AP       The pseudo arg pointer
+ */
+ */
+#define        FIXED_REGISTERS         { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1 }
+#define        FIXED_REGISTERS         { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1 }
+
+
+/* CALL_USED_REGISTERS ... like FIXED_REGISTERS but has 1 for each register
+/* CALL_USED_REGISTERS ... like FIXED_REGISTERS but has 1 for each register
+ * that is clobbered (in general) by function calls as well as for fixed
+ * that is clobbered (in general) by function calls as well as for fixed
+ * registers.  This macro therefore identifies the registers that are not
+ * registers.  This macro therefore identifies the registers that are not
+ * available for general allocation of values that must live across function
+ * available for general allocation of values that must live across function
+ * calls.
+ * calls.
+ *
+ *
+ * If a register has 0 in CALL_USED_REGISTERS, the compiler automatically saves
+ * If a register has 0 in CALL_USED_REGISTERS, the compiler automatically saves
+ * it on function entry and restores it on function exit, if the register is
+ * it on function entry and restores it on function exit, if the register is
+ * used within the function.
+ * used within the function.
+ *
+ *
+ * On the Zip CPU, we must save R0 (the return address), and (let's pick) any
+ * On the Zip CPU, we must save R0 (the return address), and (let's pick) any
+ * register above R5.
+ * register above R5.
+ */
+ */
+#define        CALL_USED_REGISTERS     { 0,1,1,1, 1,0,0,0, 0,0,0,0, 0,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,  1 }
+#define        CALL_USED_REGISTERS     { 0,1,1,1, 1,0,0,0, 0,0,0,0, 0,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,  1 }
+
+
+/* CALL_REALLY_USED_REGISTERS ...  optional macro that, if not defined, defaults
+/* CALL_REALLY_USED_REGISTERS ...  optional macro that, if not defined, defaults
+ * to the value of CALL_USED_REGISTERS.
+ * to the value of CALL_USED_REGISTERS.
+ */
+ */
+
+
+/* HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE) ... A C expression that is nonzero
+/* HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE) ... A C expression that is nonzero
+ * if it is not permissible to store a value of mode MODE in hard register REGNO
+ * if it is not permissible to store a value of mode MODE in hard register REGNO
+ * across a call without some part of it being clobbbered.  For most machines,
+ * across a call without some part of it being clobbbered.  For most machines,
+ * this macro need not be defined.  It is only required for machines that do
+ * this macro need not be defined.  It is only required for machines that do
+ * not preserve the entire contents of a register across a call.
+ * not preserve the entire contents of a register across a call.
+ *
+ *
+ * ZipCPU--Always preserves the entire contents of those registers that are
+ * ZipCPU--Always preserves the entire contents of those registers that are
+ * preserved across calls, so this shouldnt need to be defined.
+ * preserved across calls, so this shouldnt need to be defined.
+ */
+ */
+// #define     HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE)      (REGNO==0)
+// #define     HARD_REGNO_CALL_PART_CLOBBERED(REGNO,MODE)      (REGNO==0)
+
+
+/* TARGET_CONDITIONAL_REGISTER_USAGE(VOID) ... This hook may conditionally
+/* TARGET_CONDITIONAL_REGISTER_USAGE(VOID) ... This hook may conditionally
+ * modify five variables fixed_regs, call_used_regs, global_regs, reg_names, and
+ * modify five variables fixed_regs, call_used_regs, global_regs, reg_names, and
+ * reg_class_contents, to take into account any dependence of these register
+ * reg_class_contents, to take into account any dependence of these register
+ * sets on target flags.  The first three of these are of type char[]
+ * sets on target flags.  The first three of these are of type char[]
+ * (interpreted as Boolean vectors).  global_regs is a const char *[] and
+ * (interpreted as Boolean vectors).  global_regs is a const char *[] and
+ * reg_class_contents is a HARD_REG_SET.  Before the macro is called,
+ * reg_class_contents is a HARD_REG_SET.  Before the macro is called,
+ * fixed_regs, call_used_regs, reg_class_contents, and reg_names have been
+ * fixed_regs, call_used_regs, reg_class_contents, and reg_names have been
+ * initialized from FIXED_REGISTERS, CALL_USED_REGISTERS, REG_CLASS_CONTENTS,
+ * initialized from FIXED_REGISTERS, CALL_USED_REGISTERS, REG_CLASS_CONTENTS,
+ * and REGISTER_NAMES, respectively.  global_regs has been cleared, and any
+ * and REGISTER_NAMES, respectively.  global_regs has been cleared, and any
+ * -ffixed-reg, -fcall-used-reg, and -fcall-saved-reg command options have been
+ * -ffixed-reg, -fcall-used-reg, and -fcall-saved-reg command options have been
+ * applied.
+ * applied.
+ *
+ *
+ * ZipCPU -- I may need to return and define this depending upon how the
+ * ZipCPU -- I may need to return and define this depending upon how the
+ * GBL register allocation goes.  But for now, we'll leave this at its default
+ * GBL register allocation goes.  But for now, we'll leave this at its default
+ * value.
+ * value.
+ */
+ */
+// #warning "Revisit me after FP and GBL allocation"
+// #warning "Revisit me after FP and GBL allocation"
+
+
+/* INCOMING_REGNO(out) ... Define this macro if the target machine has register
+/* INCOMING_REGNO(out) ... Define this macro if the target machine has register
+ * windows. ...
+ * windows. ...
+ *
+ *
+ * Zip CPU has no register windows.
+ * Zip CPU has no register windows.
+ */
+ */
+
+
+/* OUTGOING_REGNO ... same thing.
+/* OUTGOING_REGNO ... same thing.
+ * LOCAL_REGNO ... same thing.
+ * LOCAL_REGNO ... same thing.
+ */
+ */
+
+
+/* PC_REGNUM ... If the program counter has a register number, define this as
+/* PC_REGNUM ... If the program counter has a register number, define this as
+ * that register number.  Otherwise do not define it.
+ * that register number.  Otherwise do not define it.
+ */
+ */
+#define        PC_REGNUM       zip_PC
+#define        PC_REGNUM       zip_PC
+
+
+
+
+/* REG_ALLOC_ORDER ... If defined, an initializer for a vector of integers,
+/* REG_ALLOC_ORDER ... If defined, an initializer for a vector of integers,
+ * containing the number of hard registers in the order in which GCC should
+ * containing the number of hard registers in the order in which GCC should
+ * prefer to use them (from most preferred to least.
+ * prefer to use them (from most preferred to least.
+ *
+ *
+ * If this macro is not defined, registers are used lowest numbered first (all
+ * If this macro is not defined, registers are used lowest numbered first (all
+ * else being equal).
+ * else being equal).
+ *
+ *
+ * Since the default is the ZipCPU desired case, we won't define this here.
+ * Since the default is the ZipCPU desired case, we won't define this here.
+ */
+ */
+
+
+/* ADJUST_REG_ALLOC_ORDER ... on most machines it is not necessary to define
+/* ADJUST_REG_ALLOC_ORDER ... on most machines it is not necessary to define
+ * this macro, so we won't either.
+ * this macro, so we won't either.
+ */
+ */
+
+
+/* HONOR_REG_ALLOC_ORDER ...
+/* HONOR_REG_ALLOC_ORDER ...
+ */
+ */
+
+
+/* HONOR_REG_ALLOC_ORDER ... on most machines it is not necessary to define
+/* HONOR_REG_ALLOC_ORDER ... on most machines it is not necessary to define
+ * this macro, so we won't either.
+ * this macro, so we won't either.
+ */
+ */
+
+
+/* HARD_REGNO_NREGS(REGNO, MODE) ... A C expression for the number of
+/* HARD_REGNO_NREGS(REGNO, MODE) ... A C expression for the number of
+ * consecutive hard registers, starting at register number REGNO, required to
+ * consecutive hard registers, starting at register number REGNO, required to
+ * hold a value of mode MODE.
+ * hold a value of mode MODE.
+ *
+ *
+ * On a machine where all registers are exactly one word, a suitable definition
+ * On a machine where all registers are exactly one word, a suitable definition
+ * is given of ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)/UNITS_PER_WORD.
+ * is given of ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)/UNITS_PER_WORD.
+ *
+ *
+ */
+ */
+#undef HARD_REGNO_NREGS
+#undef HARD_REGNO_NREGS
+#define        HARD_REGNO_NREGS(REGNO, MODE)   ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)\
+#define        HARD_REGNO_NREGS(REGNO, MODE)   ((GET_MODE_SIZE(MODE)+UNITS_PER_WORD-1)\
+               / (UNITS_PER_WORD))
+               / (UNITS_PER_WORD))
+
+
+/* HARD_REGNO_NREGS_HAS_PADDING(REGNO,MODE) ... A C expression that is nonzero
+/* HARD_REGNO_NREGS_HAS_PADDING(REGNO,MODE) ... A C expression that is nonzero
+ * if a value of mode MODE, stored in memory, ends with padding that causes it
+ * if a value of mode MODE, stored in memory, ends with padding that causes it
+ * to take up more space than in registers starting at register number REGNO
+ * to take up more space than in registers starting at register number REGNO
+ * (as determined by multiplying GCC's notion of the size of the register when
+ * (as determined by multiplying GCC's notion of the size of the register when
+ * containing this mode by the number of registers returned by HARD_REGNO_NREGS)
+ * containing this mode by the number of registers returned by HARD_REGNO_NREGS)
+ * By default this is zero.
+ * By default this is zero.
+ *
+ *
+ * Zip CPU --- The default looks good enough to me.
+ * Zip CPU --- The default looks good enough to me.
+ */
+ */
+
+
+/* HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE)
+/* HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE)
+ *
+ *
+ * ZipCPU ---
+ * ZipCPU ---
+ */
+ */
+
+
+/* REGMODE_NATURAL_SIZE(MODE) -- Define this macro if the natural size of
+/* REGMODE_NATURAL_SIZE(MODE) -- Define this macro if the natural size of
+ * registers that hold values of mode mode is not the word size.  It is a C
+ * registers that hold values of mode mode is not the word size.  It is a C
+ * expression that should give the natural size in bytes for the specified mode.
+ * expression that should give the natural size in bytes for the specified mode.
+ * It is used by the register allocator to try to optimize its results.
+ * It is used by the register allocator to try to optimize its results.
+ *
+ *
+ * ZipCPU ---
+ * ZipCPU ---
+ */
+ */
+// #define     REGMODE_NATURAL_SIZE(MODE)      (((MODE)==DImode)?2:1)
+// #define     REGMODE_NATURAL_SIZE(MODE)      (((MODE)==DImode)?2:1)
+
+
+/* HARD_REGNO_MODE_OK ... A C expression that is nonzero if it is permissible
+/* HARD_REGNO_MODE_OK ... A C expression that is nonzero if it is permissible
+ * to store a value of mode MODE in a hard register number REGNO (or in several
+ * to store a value of mode MODE in a hard register number REGNO (or in several
+ * registers starting with that one).  For a machine where all registers are
+ * registers starting with that one).  For a machine where all registers are
+ * equivalent, a suitable definition is '1'.  You need not include code to check
+ * equivalent, a suitable definition is '1'.  You need not include code to check
+ * for the numbers of fixed registers, because the allocation mechanism
+ * for the numbers of fixed registers, because the allocation mechanism
+ * considered them to be always occupied.
+ * considered them to be always occupied.
+ *
+ *
+ * ZipCPU --- As long as you are already avoiding the fixed registers, the
+ * ZipCPU --- As long as you are already avoiding the fixed registers, the
+ * suitable default definition mentioned above should be sufficient.
+ * suitable default definition mentioned above should be sufficient.
+ */
+ */
+#undef HARD_REGNO_MODE_OK
+#undef HARD_REGNO_MODE_OK
+#define        HARD_REGNO_MODE_OK(R,M) (R<zip_CC)
+#define        HARD_REGNO_MODE_OK(R,M) (R<zip_CC)
+
+
+/* HARD_REGNO_RENAME_OK(FROM,TO) ... A C expression that is nonzero if it is
+/* HARD_REGNO_RENAME_OK(FROM,TO) ... A C expression that is nonzero if it is
+ * okay to rename a hard register FROM to another hard register TO.  One common
+ * okay to rename a hard register FROM to another hard register TO.  One common
+ * use of this macro is to prevernt renaming of a register to another register
+ * use of this macro is to prevernt renaming of a register to another register
+ * that is not saved by a prologue in an interrupt handler.  The default is
+ * that is not saved by a prologue in an interrupt handler.  The default is
+ * always nonzero.
+ * always nonzero.
+ *
+ *
+ * ZipCPU --- The default looks good enough to us.
+ * ZipCPU --- The default looks good enough to us.
+ */
+ */
+#undef HARD_REGNO_RENAME_OK
+#undef HARD_REGNO_RENAME_OK
+#define        HARD_REGNO_RENAME_OK(FROM,TO)   ((is_ZIP_GENERAL_REG(FROM))&&(is_ZIP_GENERAL_REG(TO)))
+#define        HARD_REGNO_RENAME_OK(FROM,TO)   ((is_ZIP_GENERAL_REG(FROM))&&(is_ZIP_GENERAL_REG(TO)))
+
+
+
+
+/* MODES_TIABLE_P(M1, M2) ... A C expression that is nonzero if a value of mode
+/* MODES_TIABLE_P(M1, M2) ... A C expression that is nonzero if a value of mode
+ * M1 is accessible in mode M2 without copying.
+ * M1 is accessible in mode M2 without copying.
+ *
+ *
+ * ZipCPU --- well, that's true for us (although we support scant few modes) ...
+ * ZipCPU --- well, that's true for us (although we support scant few modes) ...
+ * so lets' set to one.
+ * so lets' set to one.
+ */
+ */
+#define        MODES_TIEABLE_P(M1,M2)  1
+#define        MODES_TIEABLE_P(M1,M2)  1
+
+
+/* TARGET_HARD_REGNO_SCRATCH_OK(REGNO)
+/* TARGET_HARD_REGNO_SCRATCH_OK(REGNO)
+ * This target hook should return true if it is OK to use a hard register
+ * This target hook should return true if it is OK to use a hard register
+ * REGNO as a scratch register in peephole2.  One common use of this macro is
+ * REGNO as a scratch register in peephole2.  One common use of this macro is
+ * to prevent using of a register that is not saved by a prologue in an
+ * to prevent using of a register that is not saved by a prologue in an
+ * interrupt handler.  The default version of this hook always returns true.
+ * interrupt handler.  The default version of this hook always returns true.
+ *
+ *
+ * ZipCPU --- the default works for us as well.  If you are in an interrupt
+ * ZipCPU --- the default works for us as well.  If you are in an interrupt
+ * context, you have an entirely new set of registers (the supervisor set), so
+ * context, you have an entirely new set of registers (the supervisor set), so
+ * this is a non-issue.
+ * this is a non-issue.
+ */
+ */
+
+
+/* AVOID_CCMODE_COPIES ... define this macro if the compiler should avoid
+/* AVOID_CCMODE_COPIES ... define this macro if the compiler should avoid
+ * copies to/from CCmode register(s).  You should only define this macro if
+ * copies to/from CCmode register(s).  You should only define this macro if
+ * support for copying to/from CCmode is incomplete.
+ * support for copying to/from CCmode is incomplete.
+ *
+ *
+ * ZipCPU --- CCmode register copies work like any other, so we'll keep with the
+ * ZipCPU --- CCmode register copies work like any other, so we'll keep with the
+ * default definition.
+ * default definition.
+ */
+ */
+
+
+/* STACK_REGS ... Define this if the machine has any stack-like registers.
+/* STACK_REGS ... Define this if the machine has any stack-like registers.
+ *
+ *
+ * Zip CPU has no stack-like registers, as their definition is different from
+ * Zip CPU has no stack-like registers, as their definition is different from
+ * the ZipCPU stack pointer register.
+ * the ZipCPU stack pointer register.
+ */
+ */
+
+
+/* 17.08 Register Classes */
+/* 17.08 Register Classes */
+
+
+/* enum reg_class ... An enumerate type that must be defined with all the
+/* enum reg_class ... An enumerate type that must be defined with all the
+ * register class names as enumerated values.  NO_REGS must be first.  ALL_REGS
+ * register class names as enumerated values.  NO_REGS must be first.  ALL_REGS
+ * must be the last register class, followed by one more enumerated value,
+ * must be the last register class, followed by one more enumerated value,
+ * LIM_REG_CLASSES, which is not a register class but rather tells how many
+ * LIM_REG_CLASSES, which is not a register class but rather tells how many
+ * classes there are.
+ * classes there are.
+ *
+ *
+ * ZipCPU --- We'll defined register 0-13 as general registers, 14-15 in
+ * ZipCPU --- We'll defined register 0-13 as general registers, 14-15 in
+ * all_regs, and go from there.
+ * all_regs, and go from there.
+ */
+ */
+enum   reg_class {
+enum   reg_class {
+       NO_REGS, GENERAL_REGS,
+       NO_REGS, GENERAL_REGS,
+       USER_REGS,
+       USER_REGS,
+       ALL_REGS, LIM_REG_CLASSES
+       ALL_REGS, LIM_REG_CLASSES
+};
+};
+
+
+/* N_REG_CLASSES ... the number of distinct register classes, defined as follows
+/* N_REG_CLASSES ... the number of distinct register classes, defined as follows
+ */
+ */
+#define        N_REG_CLASSES   (int)LIM_REG_CLASSES
+#define        N_REG_CLASSES   (int)LIM_REG_CLASSES
+
+
+/* REG_CLASS_NAMES ... An initializer containing the names of the register
+/* REG_CLASS_NAMES ... An initializer containing the names of the register
+ * classes as C string constants.  These names are used in writing some of the
+ * classes as C string constants.  These names are used in writing some of the
+ * debugging dumps.
+ * debugging dumps.
+ */
+ */
+#define        REG_CLASS_NAMES { "NO_REGS", "GENERAL_REGS", "USER_REGS", "ALL_REGS" }
+#define        REG_CLASS_NAMES { "NO_REGS", "GENERAL_REGS", "USER_REGS", "ALL_REGS" }
+
+
+/* REG_CLASS_CONTENTS ... An initializer containing the contents of the register
+/* REG_CLASS_CONTENTS ... An initializer containing the contents of the register
+ * classes, as integers which are bit masks.  The nth integer specifies the
+ * classes, as integers which are bit masks.  The nth integer specifies the
+ * contents of class n.  That way the integer mask is interpreted as that
+ * contents of class n.  That way the integer mask is interpreted as that
+ * register r is in the class if (mask&(1<<r)) is 1.
+ * register r is in the class if (mask&(1<<r)) is 1.
+ *
+ *
+ * When the machine has more than 32 registers, an integer does not suffice.
+ * When the machine has more than 32 registers, an integer does not suffice.
+ * Then the integers are replaced by sub-initializers, braced groupings
+ * Then the integers are replaced by sub-initializers, braced groupings
+ * containing several integers.  Each sub-initializer must be suitable as an
+ * containing several integers.  Each sub-initializer must be suitable as an
+ * initializer for the type HARD_REG_SET which is defined in 'hard-reg-set.h'.
+ * initializer for the type HARD_REG_SET which is defined in 'hard-reg-set.h'.
+ * In this situation, the first integer in each subinitializer corresponds to
+ * In this situation, the first integer in each subinitializer corresponds to
+ * registers 0-31, the second integer to registers 32-634, and so on.
+ * registers 0-31, the second integer to registers 32-634, and so on.
+ *
+ *
+ * ZipCPU --- This is straight forward, three register classes, etc.
+ * ZipCPU --- This is straight forward, three register classes, etc.
+ */
+ */
+#define        REG_CLASS_CONTENTS { { 0x000000000, 0}, {0x00003fff, 0}, {0x0ffff0000, 0}, {0x0ffffffff, 1} }
+#define        REG_CLASS_CONTENTS { { 0x000000000, 0}, {0x00003fff, 0}, {0x0ffff0000, 0}, {0x0ffffffff, 1} }
+
+
+/* REGNO_REG_CLASS ... A C expression whose value is a register class
+/* REGNO_REG_CLASS ... A C expression whose value is a register class
+ * containing hard register REGNO.  In general there is more than one such
+ * containing hard register REGNO.  In general there is more than one such
+ * class;  Choose a class which is minimal, meaning that no smaller class also
+ * class;  Choose a class which is minimal, meaning that no smaller class also
+ * contains the register.
+ * contains the register.
+ */
+ */
+#define        REGNO_REG_CLASS(R)      (is_ZIP_REG(R)?(((R)<=13)?GENERAL_REGS:ALL_REGS):NO_REGS)
+#define        REGNO_REG_CLASS(R)      (is_ZIP_REG(R)?(((R)<=13)?GENERAL_REGS:ALL_REGS):NO_REGS)
+
+
+/* BASE_REG_CLASS ... A macro whose definition is the name of the class to which
+/* BASE_REG_CLASS ... A macro whose definition is the name of the class to which
+ * a valid base register must belong.  A base register is one used in an address
+ * a valid base register must belong.  A base register is one used in an address
+ * which is the register value plus a displacement.
+ * which is the register value plus a displacement.
+ */
+ */
+#undef BASE_REG_CLASS
+#undef BASE_REG_CLASS
+#define        BASE_REG_CLASS  GENERAL_REGS
+#define        BASE_REG_CLASS  GENERAL_REGS
+
+
+/* MODE_BASE_CLASS(MODE) ... This is a variation of the BASE_REG_CLASS macro
+/* MODE_BASE_CLASS(MODE) ... This is a variation of the BASE_REG_CLASS macro
+ * which allows the selection of a bse register in a mode dependent manner.  If
+ * which allows the selection of a bse register in a mode dependent manner.  If
+ * mode is VOIDmode then it should return the same value as BASE_REG_CLASS.
+ * mode is VOIDmode then it should return the same value as BASE_REG_CLASS.
+ */
+ */
+#undef MODE_BASE_CLASS
+#undef MODE_BASE_CLASS
+#define        MODE_BASE_CLASS(MODE)   GENERAL_REGS
+#define        MODE_BASE_CLASS(MODE)   GENERAL_REGS
+
+
+/* MODE_BASE_REG_REG_CLASS(MODE) ... A C expression whose value is the register
+/* MODE_BASE_REG_REG_CLASS(MODE) ... A C expression whose value is the register
+ * class to which a valid base register must belong in order to be used in a
+ * class to which a valid base register must belong in order to be used in a
+ * base plus index register address.  You should define this macro if base plus
+ * base plus index register address.  You should define this macro if base plus
+ * index addresses have different requirements than other base register uses.
+ * index addresses have different requirements than other base register uses.
+ *
+ *
+ * Zip CPU does not support the base plus index addressing mode, thus ...
+ * Zip CPU does not support the base plus index addressing mode, thus ...
+ */
+ */
+// #undef      MODE_BASE_REG_REG_CLASS
+// #undef      MODE_BASE_REG_REG_CLASS
+// #define     MODE_BASE_REG_REG_CLASS(MODE)   NO_REGS
+// #define     MODE_BASE_REG_REG_CLASS(MODE)   NO_REGS
+
+
+/* INDEX_REG_CLASS ... A macro whose definition is the name of the class to
+/* INDEX_REG_CLASS ... A macro whose definition is the name of the class to
+ * which a valid index register must belong.  An index register is one used in
+ * which a valid index register must belong.  An index register is one used in
+ * an address where its value is either multiplied by a scale factor or added
+ * an address where its value is either multiplied by a scale factor or added
+ * to another register (as well as added to a displacement).
+ * to another register (as well as added to a displacement).
+ *
+ *
+ * ZipCPU -- Has no index registers.
+ * ZipCPU -- Has no index registers.
+ */
+ */
+#undef INDEX_REG_CLASS
+#undef INDEX_REG_CLASS
+#define        INDEX_REG_CLASS NO_REGS
+#define        INDEX_REG_CLASS NO_REGS
+
+
+/* REGNO_OK_FOR_BASE_P(NUM) ... A C expression which is nonzero if register
+/* REGNO_OK_FOR_BASE_P(NUM) ... A C expression which is nonzero if register
+ * number num is suitable for use as a base register in operand addresses.
+ * number num is suitable for use as a base register in operand addresses.
+ */
+ */
+#undef REGNO_OK_FOR_BASE_P
+#undef REGNO_OK_FOR_BASE_P
+# define REGNO_OK_FOR_BASE_P(NUM)      ((NUM>=FIRST_PSEUDO_REGISTER)||(NUM != zip_CC))
+# define REGNO_OK_FOR_BASE_P(NUM)      ((NUM>=FIRST_PSEUDO_REGISTER)||(NUM != zip_CC))
+
+
+/* REGNO_MODE_OK_FOR_BASE_P ... A C expressison that is just like
+/* REGNO_MODE_OK_FOR_BASE_P ... A C expressison that is just like
+ * REGNO_OK_FOR_BASE_P, except that that expression may examine the mode of the
+ * REGNO_OK_FOR_BASE_P, except that that expression may examine the mode of the
+ * memory reference in MODE.  You should define this macro if the mode of the
+ * memory reference in MODE.  You should define this macro if the mode of the
+ * memory reference affects whether a register may be used as a base register.
+ * memory reference affects whether a register may be used as a base register.
+ *
+ *
+ * ZipCPU --- the mode doesn't affect anything, so we don't define this.
+ * ZipCPU --- the mode doesn't affect anything, so we don't define this.
+ */
+ */
+
+
+/* REGNO_MODE_OK_FOR_REG_BASE_P(NUM, MODE) ... base plus index operand
+/* REGNO_MODE_OK_FOR_REG_BASE_P(NUM, MODE) ... base plus index operand
+ * addresses, accessing memory in mode mode.
+ * addresses, accessing memory in mode mode.
+ *
+ *
+ * Use of this macro is deprecated.
+ * Use of this macro is deprecated.
+ */
+ */
+
+
+/* REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) ... A C expression which is
+/* REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) ... A C expression which is
+ * nonzero if a register number N is suitable for use as a base register in
+ * nonzero if a register number N is suitable for use as a base register in
+ * operand addresses, accessing memory in mode M in address space AS.  This is
+ * operand addresses, accessing memory in mode M in address space AS.  This is
+ * similar to REGNO_MODE_OK_FOR_BASE_P, except that the expression may examine
+ * similar to REGNO_MODE_OK_FOR_BASE_P, except that the expression may examine
+ * the context in which the register appears in the memory reference.
+ * the context in which the register appears in the memory reference.
+ *
+ *
+ * ZipCPU---We aren't specific in how we use our registers.
+ * ZipCPU---We aren't specific in how we use our registers.
+ */
+ */
+#define        REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) REGNO_OK_FOR_BASE_P(N)
+#define        REGNO_MODE_CODE_OK_FOR_BASE_P(N,M,AS,OC,IC) REGNO_OK_FOR_BASE_P(N)
+
+
+/* REGNO_OK_FOR_INDEX_P(REGNO) ... A C expression which is nonzero if register
+/* REGNO_OK_FOR_INDEX_P(REGNO) ... A C expression which is nonzero if register
+ * num is suitable for use as an index register in opernad addressess.  It may
+ * num is suitable for use as an index register in opernad addressess.  It may
+ * be either a suitable hard register or a pseudo register that has been
+ * be either a suitable hard register or a pseudo register that has been
+ * allocated such as a hard register.
+ * allocated such as a hard register.
+ *
+ *
+ * ZipCPU has no index registers, therefore we declare this to be zero.
+ * ZipCPU has no index registers, therefore we declare this to be zero.
+ */
+ */
+#undef REGNO_OK_FOR_INDEX_P
+#undef REGNO_OK_FOR_INDEX_P
+#define        REGNO_OK_FOR_INDEX_P(REGNO)     0
+#define        REGNO_OK_FOR_INDEX_P(REGNO)     0
+
+
+/* TARGET_PREFERRED_RENAME_CLASS(RCLASS) ... A target hook that places
+/* TARGET_PREFERRED_RENAME_CLASS(RCLASS) ... A target hook that places
+ * additional preference on the register class to use when it is necessary to
+ * additional preference on the register class to use when it is necessary to
+ * rename a register in class RCLASS to another class, or perhaps NO_REGS, if no
+ * rename a register in class RCLASS to another class, or perhaps NO_REGS, if no
+ * preferred register class is found or hook preferred_rename_class is not
+ * preferred register class is found or hook preferred_rename_class is not
+ * implemented.  SOmething returning a more restrictive class makes better code.
+ * implemented.  SOmething returning a more restrictive class makes better code.
+ * For example, on ARM, thumb-2 instructions using LO_REGS may be smaller than
+ * For example, on ARM, thumb-2 instructions using LO_REGS may be smaller than
+ * instructions using GENERIC_REGS.  By returning LO_REGS from
+ * instructions using GENERIC_REGS.  By returning LO_REGS from
+ * preferred_rename_class, code size can be reduced.
+ * preferred_rename_class, code size can be reduced.
+ */
+ */
+// #undef TARGET_PREFERRED_RENAME_CLASS
+// #undef TARGET_PREFERRED_RENAME_CLASS
+// #define     TARGET_PREFERRED_RENAME_CLASS(RCLASS)   RCLASS
+// #define     TARGET_PREFERRED_RENAME_CLASS(RCLASS)   RCLASS
+
+
+/* TARGET_PREFERRED_RELOAD_CLASS(X,RC) ... A target hook that places additional
+/* TARGET_PREFERRED_RELOAD_CLASS(X,RC) ... A target hook that places additional
+ * restri tions on the register class to use when it is necessary to copy value
+ * restri tions on the register class to use when it is necessary to copy value
+ * X into a register in class RC.  The value is a register class; rehaps RC, or
+ * X into a register in class RC.  The value is a register class; rehaps RC, or
+ * perhaps a smaller class.
+ * perhaps a smaller class.
+ *
+ *
+ * The default fversion of this hook always returns value of RC argument, which
+ * The default fversion of this hook always returns value of RC argument, which
+ * sounds quite appropriate for the ZipCPU.
+ * sounds quite appropriate for the ZipCPU.
+ */
+ */
+
+
+/* PREFERRED_RELOAD_CLASS(X,CLASS) ... A C expression that places additional
+/* PREFERRED_RELOAD_CLASS(X,CLASS) ... A C expression that places additional
+ * restrictions on the register class to use when it is necessary to copy
+ * restrictions on the register class to use when it is necessary to copy
+ * value X into a register in class CLASS.  On many machines, the following
+ * value X into a register in class CLASS.  On many machines, the following
+ * definition is safe: PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
+ * definition is safe: PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
+ * Sometimes returning a more restrictive class makes better code.  For example,
+ * Sometimes returning a more restrictive class makes better code.  For example,
+ * on the 68k, when x is an integer constant that is in range for a moveq
+ * on the 68k, when x is an integer constant that is in range for a moveq
+ * instruction, the value of this macro is always DATA_REGS as long as CLASS
+ * instruction, the value of this macro is always DATA_REGS as long as CLASS
+ * includes the data registers.  Requiring a data register guarantees that a
+ * includes the data registers.  Requiring a data register guarantees that a
+ * 'moveq' will be used.
+ * 'moveq' will be used.
+ *
+ *
+ * ZipCPU --- you can't load certain values into all members of ALL_REGS.  For
+ * ZipCPU --- you can't load certain values into all members of ALL_REGS.  For
+ * example, loading (sleep and !gie) into the CC register could halt the CPU.
+ * example, loading (sleep and !gie) into the CC register could halt the CPU.
+ * Hence, we only allow loads into the GENERAL_REG class.
+ * Hence, we only allow loads into the GENERAL_REG class.
+ */
+ */
+#define        PREFERRED_RELOAD_CLASS(X, CLASS)        GENERAL_REGS
+#define        PREFERRED_RELOAD_CLASS(X, CLASS)        GENERAL_REGS
+
+
+/* TARGET_PREFERRED_OUTPUT_RELOAD_CLASS(RTX,RCLASS) ... Like TARGET_PREFERRED_..
+/* TARGET_PREFERRED_OUTPUT_RELOAD_CLASS(RTX,RCLASS) ... Like TARGET_PREFERRED_..
+ * RELOAD_CLASS, but for output instead of input reloads.
+ * RELOAD_CLASS, but for output instead of input reloads.
+ *
+ *
+ * ZipCPU --- there's gotta be a valid default behaviour for this.
+ * ZipCPU --- there's gotta be a valid default behaviour for this.
+ */
+ */
+
+
+/* LIMIT_RELOAD_CLASS(MODE, CL) ...
+/* LIMIT_RELOAD_CLASS(MODE, CL) ...
+ *
+ *
+ * Don't define this macro unless the target machine has limitations which
+ * Don't define this macro unless the target machine has limitations which
+ * require the macro to do something nontrivial.  ZipCPU doesn't, so we won't.
+ * require the macro to do something nontrivial.  ZipCPU doesn't, so we won't.
+ */
+ */
+
+
+/* TARGET_SECONDARY_RELOAD
+/* TARGET_SECONDARY_RELOAD
+ * SECONDARY_ ...
+ * SECONDARY_ ...
+ * Don't think we need these ...
+ * Don't think we need these ...
+ */
+ */
+
+
+/* CLASS_MAX_NREGS(CLASS,MODE) ... A C expression for the maximum number of
+/* CLASS_MAX_NREGS(CLASS,MODE) ... A C expression for the maximum number of
+ * consecutive registers of class CLASS needed to hold a value of mode MODE.
+ * consecutive registers of class CLASS needed to hold a value of mode MODE.
+ *
+ *
+ * This is closely related to the macro HARD_REGNO_NREGS.  In fact, the value
+ * This is closely related to the macro HARD_REGNO_NREGS.  In fact, the value
+ * of the macro CLASS_MAX_REGS(CL,M) should be the maximum value of
+ * of the macro CLASS_MAX_REGS(CL,M) should be the maximum value of
+ * HARD_REGNO_NREGS(REGNO,MODE) for all REGNO values in the class CLASS.
+ * HARD_REGNO_NREGS(REGNO,MODE) for all REGNO values in the class CLASS.
+ *
+ *
+ * This macro helps control the handling of multiple word values in the reload
+ * This macro helps control the handling of multiple word values in the reload
+ * pass.
+ * pass.
+ *
+ *
+ * ZipCPU --- We'll just use HARDNO_REGNO_NREGS, since CLASS is independent for
+ * ZipCPU --- We'll just use HARDNO_REGNO_NREGS, since CLASS is independent for
+ * us.  We'll also choose register R0, since ... well, since it simply doesn't
+ * us.  We'll also choose register R0, since ... well, since it simply doesn't
+ * matter.  (HARD_REGNO_NREGS ignores this anyway)
+ * matter.  (HARD_REGNO_NREGS ignores this anyway)
+ */
+ */
+#define        CLASS_MAX_NREGS(CLASS, MODE)    HARD_REGNO_NREGS(0,MODE)
+#define        CLASS_MAX_NREGS(CLASS, MODE)    HARD_REGNO_NREGS(0,MODE)
+
+
+/* CANNOT_CHANGE_MODE_CLASS
+/* CANNOT_CHANGE_MODE_CLASS
+ * ???
+ * ???
+ */
+ */
+
+
+/* TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
+/* TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
+ */
+ */
+
+
+/* TARRGET_LRA_P
+/* TARRGET_LRA_P
+ * Default looks good.
+ * Default looks good.
+ */
+ */
+
+
+/* TARGET_REGISTER_PRIORITY(INT) ... A target hook which returns the register
+/* TARGET_REGISTER_PRIORITY(INT) ... A target hook which returns the register
+ * priority number to which the register HARD_REGNO belongs to.  The bigger the
+ * priority number to which the register HARD_REGNO belongs to.  The bigger the
+ * number
+ * number
+ *
+ *
+ * The default version of this target hook returns always zero---good enough for
+ * The default version of this target hook returns always zero---good enough for
+ * the ZipCPU.
+ * the ZipCPU.
+ */
+ */
+
+
+/* TARGET_REGISTER_USAGE_LEVELING_P(VOID) ... A target hook which returns true
+/* TARGET_REGISTER_USAGE_LEVELING_P(VOID) ... A target hook which returns true
+ * if we need register usage leveling.  That means if a few hard registers are
+ * if we need register usage leveling.  That means if a few hard registers are
+ * equally good for the assignment, we choose the least used hard register.  The
+ * equally good for the assignment, we choose the least used hard register.  The
+ * register usage leveling may be profitable for some targets.  Don't use usage
+ * register usage leveling may be profitable for some targets.  Don't use usage
+ * leveling for targets with conditional execution or targets with big register
+ * leveling for targets with conditional execution or targets with big register
+ * files as it hurts if-conversion and cross-jumping optimizations.  The default
+ * files as it hurts if-conversion and cross-jumping optimizations.  The default
+ * version of this target hook returns always false.
+ * version of this target hook returns always false.
+ *
+ *
+ * ZipCPU --- Default is the right answer.
+ * ZipCPU --- Default is the right answer.
+ */
+ */
+
+
+/* TARGET_DIFFERENT_ADDR_DISPLACEMENT_P ...
+/* TARGET_DIFFERENT_ADDR_DISPLACEMENT_P ...
+ * Default looks good.
+ * Default looks good.
+ */
+ */
+
+
+/* TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P ...
+/* TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P ...
+ * Default looks good.
+ * Default looks good.
+ */
+ */
+
+
+/* TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT ....
+/* TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT ....
+ */
+ */
+
+
+/* TARGET_SPILL_CLASS
+/* TARGET_SPILL_CLASS
+ *
+ *
+ * ZipCPU --- If we were running in supervisor mode only, this might be the
+ * ZipCPU --- If we were running in supervisor mode only, this might be the
+ * user set of registers.  However, we're not building for that mode (now),
+ * user set of registers.  However, we're not building for that mode (now),
+ * so we'll leave this at the default of NO_REGS.
+ * so we'll leave this at the default of NO_REGS.
+ */
+ */
+
+
+/* TARGET_CSTORE_MODE(ICODE) ... Defines the machine mode to use for the
+/* TARGET_CSTORE_MODE(ICODE) ... Defines the machine mode to use for the
+ * boolean result of conditional store patterns.  The ICODE argument is the
+ * boolean result of conditional store patterns.  The ICODE argument is the
+ * instruction code for the cstore being performed.  Not defining this hook is
+ * instruction code for the cstore being performed.  Not defining this hook is
+ * the same as accepting the mode encoded into operand 0 of the cstore expander
+ * the same as accepting the mode encoded into operand 0 of the cstore expander
+ * patterns.
+ * patterns.
+ *
+ *
+ * ??? ZipCPU --- I don't follow this documentation.  We'll leave this at the
+ * ??? ZipCPU --- I don't follow this documentation.  We'll leave this at the
+ * default therefore.
+ * default therefore.
+ */
+ */
+
+
+/* 17.09 Stack Layout and Calling Conventions */
+/* 17.09 Stack Layout and Calling Conventions */
+
+
+
+
+/* STACK_GROWS_DOWNWARD ... Define this macro if pushing a word onto the stack
+/* STACK_GROWS_DOWNWARD ... Define this macro if pushing a word onto the stack
+ * moves the stack pointer to a smaller address, and false otherwise.
+ * moves the stack pointer to a smaller address, and false otherwise.
+ *
+ *
+ * ZipCPU ... well, our stack does grow downward, but it doesn't do so auto-
+ * ZipCPU ... well, our stack does grow downward, but it doesn't do so auto-
+ * magically.  We have to move the stack pointer ourselves.  However, since this
+ * magically.  We have to move the stack pointer ourselves.  However, since this
+ * is our convention, we'll define it as such.
+ * is our convention, we'll define it as such.
+ */
+ */
+#undef STACK_GROWS_DOWNWARD
+#undef STACK_GROWS_DOWNWARD
+#define        STACK_GROWS_DOWNWARD    1
+#define        STACK_GROWS_DOWNWARD    1
+
+
+/* STACK_PUSH_CODE ... This macro defines the operation used when something is
+/* STACK_PUSH_CODE ... This macro defines the operation used when something is
+ * pushed on the stack.  In RTL, a push operation will be
+ * pushed on the stack.  In RTL, a push operation will be
+ * (set (mem( STACK_PUSH_CODE(reg sp))) ...) The choiecs are PRE_DEC, POST_DEC,
+ * (set (mem( STACK_PUSH_CODE(reg sp))) ...) The choiecs are PRE_DEC, POST_DEC,
+ * PRE_INC, and POST_INC.  Which of these is correct depends on the stack
+ * PRE_INC, and POST_INC.  Which of these is correct depends on the stack
+ * direction and on whether the stack pointer points to the last item on the
+ * direction and on whether the stack pointer points to the last item on the
+ * stack or whether it points to the space for the next item on the stack.
+ * stack or whether it points to the space for the next item on the stack.
+ * The default is PRE_DECC when STACK_GROWS_DOWNWARD is true, which is almost
+ * The default is PRE_DECC when STACK_GROWS_DOWNWARD is true, which is almost
+ * always right, and PRE_INC otherwise, which is often wrong.
+ * always right, and PRE_INC otherwise, which is often wrong.
+ *
+ *
+ * ZipCPU --- None of these is right, so let's leave this at the default and
+ * ZipCPU --- None of these is right, so let's leave this at the default and
+ * see how badly we get mangled.  In particular, ZipCPU doesn't have any of the
+ * see how badly we get mangled.  In particular, ZipCPU doesn't have any of the
+ * PRE_DEC, POST_DEC, PRE_INC, or POST_INC addressing modes used here.
+ * PRE_DEC, POST_DEC, PRE_INC, or POST_INC addressing modes used here.
+ */
+ */
+
+
+/* FRAME_GROWS_DOWNWARD ... Define this macro to nonzero if the addresses of
+/* FRAME_GROWS_DOWNWARD ... Define this macro to nonzero if the addresses of
+ * local variable slots are at negative offsets from the frame pointer.
+ * local variable slots are at negative offsets from the frame pointer.
+ *
+ *
+ * ZipCPU --- If the frame pointer is defined as the stack pointer upon the
+ * ZipCPU --- If the frame pointer is defined as the stack pointer upon the
+ * start of function execution, and that stack pointer grows downward, then
+ * start of function execution, and that stack pointer grows downward, then
+ * this should be the case as well.
+ * this should be the case as well.
+ */
+ */
+#undef FRAME_GROWS_DOWNWARD
+#undef FRAME_GROWS_DOWNWARD
+#define        FRAME_GROWS_DOWNWARD    1
+#define        FRAME_GROWS_DOWNWARD    1
+// #define     FRAME_GROWS_DOWNWARD    0        // This was ECO32's value
+// #define     FRAME_GROWS_DOWNWARD    0        // This was ECO32's value
+
+
+
+
+/* ARGS_GROW_DOWNWARD ... Define this macro if successive arguments to a
+/* ARGS_GROW_DOWNWARD ... Define this macro if successive arguments to a
+ * function occupy decreasing addresses on the stack.
+ * function occupy decreasing addresses on the stack.
+ *
+ *
+ * ZipCPU -- we can leave this up to the compiler's preferred implementation,
+ * ZipCPU -- we can leave this up to the compiler's preferred implementation,
+ * it is of no consequence to the hardware.
+ * it is of no consequence to the hardware.
+ */
+ */
+
+
+/* STARTING_FRAME_OFFSET ... Offset from the frame pointer to the first local
+/* STARTING_FRAME_OFFSET ... Offset from the frame pointer to the first local
+ * variable slot to be allocated.  If FRAME_GROWS_DOWNWARD, find the next slot's
+ * variable slot to be allocated.  If FRAME_GROWS_DOWNWARD, find the next slot's
+ * offset by subtracting the first slot's length from STARTING_FRAME_OFFSET.
+ * offset by subtracting the first slot's length from STARTING_FRAME_OFFSET.
+ * Otherwise it is found by adding the length of the first slot to the value
+ * Otherwise it is found by adding the length of the first slot to the value
+ * START_FRAME_OFFSET.
+ * START_FRAME_OFFSET.
+ *
+ *
+ * ZipCPU --- I'm not certain on this, let's come back after we look at how
+ * ZipCPU --- I'm not certain on this, let's come back after we look at how
+ * the code is getting generated.  However, the ECO32 code I am copying from
+ * the code is getting generated.  However, the ECO32 code I am copying from
+ * suggests that 0 is the right value, so we'll use that here.
+ * suggests that 0 is the right value, so we'll use that here.
+ */
+ */
+// #warning "Re-evaluate me" --- I did.  This still looks good.
+// #warning "Re-evaluate me" --- I did.  This still looks good.
+#define        STARTING_FRAME_OFFSET   0
+#define        STARTING_FRAME_OFFSET   0
+
+
+/* STACK_ALIGNMENT_NEEDED ... Define to zero to disable final alignment of the
+/* STACK_ALIGNMENT_NEEDED ... Define to zero to disable final alignment of the
+ * stack during reload.  The nonzero default for this macro is suitable for most
+ * stack during reload.  The nonzero default for this macro is suitable for most
+ * ports.
+ * ports.
+ *
+ *
+ * ZipCPU --- Yes, our stack needs to be aligned.  The default should work
+ * ZipCPU --- Yes, our stack needs to be aligned.  The default should work
+ * nicely.
+ * nicely.
+ */
+ */
+
+
+/* STACK_POINTER_OFFSET ... Offset from the SP register to the first location at
+/* STACK_POINTER_OFFSET ... Offset from the SP register to the first location at
+ * which outgoing arguments are placed.  If not specified, the default value
+ * which outgoing arguments are placed.  If not specified, the default value
+ * of zero is used.  This is the proper value for most machines.
+ * of zero is used.  This is the proper value for most machines.
+ */
+ */
+#define        STACK_POINTER_OFFSET    0
+#define        STACK_POINTER_OFFSET    0
+
+
+/* FIRST_PARM_OFFSET ... Offset from the argument pointer register to the first
+/* FIRST_PARM_OFFSET ... Offset from the argument pointer register to the first
+ * argument's address.  On some machines it may depend on the data type of the
+ * argument's address.  On some machines it may depend on the data type of the
+ * function.
+ * function.
+ */
+ */
+#define        FIRST_PARM_OFFSET(F)    0
+#define        FIRST_PARM_OFFSET(F)    0
+
+
+/* STACK_DYNAMIC_OFFSET(F) ... Offset from the stack pointer register to an item
+/* STACK_DYNAMIC_OFFSET(F) ... Offset from the stack pointer register to an item
+ * dynamically allocated on the stack, e.g., by alloca.  The default value for
+ * dynamically allocated on the stack, e.g., by alloca.  The default value for
+ * this macro is STACK_POINTER_OFFSET plus the length of the outgoing arguments.
+ * this macro is STACK_POINTER_OFFSET plus the length of the outgoing arguments.
+ * The default is correct for most machines, ...
+ * The default is correct for most machines, ...
+ *
+ *
+ * ZipCPU --- so we'll use it for the ZipCPU.
+ * ZipCPU --- so we'll use it for the ZipCPU.
+ */
+ */
+
+
+/* INITIAL_FRAME_ADDRESS_RTX ... A C expression whose value is RTL representing
+/* INITIAL_FRAME_ADDRESS_RTX ... A C expression whose value is RTL representing
+ * the address of the initial stack frame.  This address is passed to
+ * the address of the initial stack frame.  This address is passed to
+ * RETURN_ADDR_RTX and DYNAMIC_CHAIN_ADDRESS.  If you don't define this macro,
+ * RETURN_ADDR_RTX and DYNAMIC_CHAIN_ADDRESS.  If you don't define this macro,
+ * a reasonable default value will be used.  Define this macro in order to make
+ * a reasonable default value will be used.  Define this macro in order to make
+ * frame pointer elimination work in the presence of __builtin_frame_address(C)
+ * frame pointer elimination work in the presence of __builtin_frame_address(C)
+ * and __builtin_return_address(C) for (C) not equal to zero.
+ * and __builtin_return_address(C) for (C) not equal to zero.
+ *
+ *
+ * ZipCPU --- Let's try the reasonable default and see what happens.
+ * ZipCPU --- Let's try the reasonable default and see what happens.
+ */
+ */
+
+
+/* SETUP_FRAME_ADDRESSES ... A C expression that produces the machine-specific
+/* SETUP_FRAME_ADDRESSES ... A C expression that produces the machine-specific
+ * code to setup the stack so that arbitrary frames can be accessed.  For
+ * code to setup the stack so that arbitrary frames can be accessed.  For
+ * example, on the SPARC, we must flush all of the register windows to the stack
+ * example, on the SPARC, we must flush all of the register windows to the stack
+ * before we can access arbitrary stack frames.  You will seldom need to define
+ * before we can access arbitrary stack frames.  You will seldom need to define
+ * this macro.  The default is to do nothing.
+ * this macro.  The default is to do nothing.
+ *
+ *
+ * ZipCPU --- which is what we shall do here.
+ * ZipCPU --- which is what we shall do here.
+ */
+ */
+
+
+/* TARGET_BUILTIN_SETJMP_FRAME_VALUE(VOID) ... This target hook should return
+/* TARGET_BUILTIN_SETJMP_FRAME_VALUE(VOID) ... This target hook should return
+ * an RTX that is used to store the address of the current frame into the
+ * an RTX that is used to store the address of the current frame into the
+ * builtin setjmp buffer.  The default value, virtual_stack_vars_rtx, is correct
+ * builtin setjmp buffer.  The default value, virtual_stack_vars_rtx, is correct
+ * for most machines.  One reason you may need to define this target hook is if
+ * for most machines.  One reason you may need to define this target hook is if
+ * hard_frame_pointer_rtx is the appropriate value on your machine.
+ * hard_frame_pointer_rtx is the appropriate value on your machine.
+ *
+ *
+ * ZipCPU --- leave this undefined, since the default value should be correct
+ * ZipCPU --- leave this undefined, since the default value should be correct
+ * for "most" machines.
+ * for "most" machines.
+ */
+ */
+
+
+/* FRAME_ADDR_RTX ... most machines do not need to define it.
+/* FRAME_ADDR_RTX ... most machines do not need to define it.
+ */
+ */
+
+
+/* RETURN_ADDR_RTX(COUNT,FRAMEADDR) ... A C expression whose value is RTL
+/* RETURN_ADDR_RTX(COUNT,FRAMEADDR) ... A C expression whose value is RTL
+ * representing the value of the return address for the frame COUNT steps up
+ * representing the value of the return address for the frame COUNT steps up
+ * from the current frame, after the prologue.  FRAMEADDR is the frame pointer
+ * from the current frame, after the prologue.  FRAMEADDR is the frame pointer
+ * of the COUNT frame, or the frame pointer of the COUNT-1 frame if
+ * of the COUNT frame, or the frame pointer of the COUNT-1 frame if
+ * RETURN_ADDR_IN_PREVIOUS_FRAME is nonzero.  The value of the expression must
+ * RETURN_ADDR_IN_PREVIOUS_FRAME is nonzero.  The value of the expression must
+ * always be the correct address when COUNT is nonzero, but may be NULL_RTX if
+ * always be the correct address when COUNT is nonzero, but may be NULL_RTX if
+ * there is no way to determine the return address of other frames.
+ * there is no way to determine the return address of other frames.
+ *
+ *
+ * ZipCPU --- Our answer for the current frame is ... it depends.  If we can
+ * ZipCPU --- Our answer for the current frame is ... it depends.  If we can
+ * force the use of a frame in every debug context, we could compute this for
+ * force the use of a frame in every debug context, we could compute this for
+ * COUNT != 0.  For now, we'll just look at the registers we save and return
+ * COUNT != 0.  For now, we'll just look at the registers we save and return
+ * where the return address is in the current frame.  To do that, though, we
+ * where the return address is in the current frame.  To do that, though, we
+ * need some help from C.
+ * need some help from C.
+ */
+ */
+#undef RETURN_ADDR_RTX
+#undef RETURN_ADDR_RTX
+#define        RETURN_ADDR_RTX(COUNT,FRAMEADDR)        zip_return_addr_rtx(COUNT,FRAMEADDR)
+#define        RETURN_ADDR_RTX(COUNT,FRAMEADDR)        zip_return_addr_rtx(COUNT,FRAMEADDR)
+
+
+/* RETURN_ADDR_IN_PREVIOUS_FRAME ... Define this macro to nonzero value if the
+/* RETURN_ADDR_IN_PREVIOUS_FRAME ... Define this macro to nonzero value if the
+ * return address of a particular stack frame is accessed from the frame pointer
+ * return address of a particular stack frame is accessed from the frame pointer
+ * of the previous stack frame.  The zero default for this macro is suitable
+ * of the previous stack frame.  The zero default for this macro is suitable
+ * for most ports.
+ * for most ports.
+ *
+ *
+ * ZipCPU---Default works here as well.
+ * ZipCPU---Default works here as well.
+ */
+ */
+
+
+/* INCOMING_RETURN_ADDR_RTX ... A C expression whose value is RTL representing
+/* INCOMING_RETURN_ADDR_RTX ... A C expression whose value is RTL representing
+ * the location of the incoming return address at the beginning of any function,
+ * the location of the incoming return address at the beginning of any function,
+ * before the prologue.  This RTL is either a REG, indicating that the return
+ * before the prologue.  This RTL is either a REG, indicating that the return
+ * value is saved in 'REG', or a MEM representing the location in the stack.
+ * value is saved in 'REG', or a MEM representing the location in the stack.
+ * If this RTL is a REG, you should define DWARF_RETURN_COLUMN to
+ * If this RTL is a REG, you should define DWARF_RETURN_COLUMN to
+ * DWARF_FRAME_REGNUM(REGNO).
+ * DWARF_FRAME_REGNUM(REGNO).
+ *
+ *
+ * ZipCPU --- While our incoming return address could theoretically be in any
+ * ZipCPU --- While our incoming return address could theoretically be in any
+ * register, our machine description file is going to place it into register
+ * register, our machine description file is going to place it into register
+ * R0, so that's what we return here.
+ * R0, so that's what we return here.
+ */
+ */
+#undef INCOMING_RETURN_ADDR_RTX
+#undef INCOMING_RETURN_ADDR_RTX
+#define        INCOMING_RETURN_ADDR_RTX        gen_rtx_REG(SImode, zip_LR)
+#define        INCOMING_RETURN_ADDR_RTX        gen_rtx_REG(SImode, zip_LR)
+
+
+
+
+/* DWARF_ALT_FRAME_RETURN_COLUMN
+/* DWARF_ALT_FRAME_RETURN_COLUMN
+ */
+ */
+
+
+/* DWARF_ZERO_REG ... A C exrpession whose value is an integer giving a DWARF2
+/* DWARF_ZERO_REG ... A C exrpession whose value is an integer giving a DWARF2
+ * register number that is considered to always have the value zero.  This
+ * register number that is considered to always have the value zero.  This
+ * should only be defined if the target has an architected zero register (ZipCPU
+ * should only be defined if the target has an architected zero register (ZipCPU
+ * does not), and someone decided it was a good idea to use that register number
+ * does not), and someone decided it was a good idea to use that register number
+ * to terminate the stack backtrace.  New ports should avoid this (so the
+ * to terminate the stack backtrace.  New ports should avoid this (so the
+ * ZipCPU port will avoid it as well).
+ * ZipCPU port will avoid it as well).
+ *
+ *
+ */
+ */
+
+
+/* TARGET_DWARF_HANDLE_FRAME_UNSPEC
+/* TARGET_DWARF_HANDLE_FRAME_UNSPEC
+ */
+ */
+
+
+/* INCOMING_FRAME_SP_OFFSET ... A C expression whose value is an integer giving
+/* INCOMING_FRAME_SP_OFFSET ... A C expression whose value is an integer giving
+ * the offset, in bytes, from the value of the stack pointer register to the
+ * the offset, in bytes, from the value of the stack pointer register to the
+ * top of the stack frame at the beginning of any function, before the prologue.
+ * top of the stack frame at the beginning of any function, before the prologue.
+ * The top of the frame is defined to be the value of the stack pointer in the
+ * The top of the frame is defined to be the value of the stack pointer in the
+ * previous frame, just before the call instruction.
+ * previous frame, just before the call instruction.
+ *
+ *
+ * You only need to define this macro if you want to support call frame
+ * You only need to define this macro if you want to support call frame
+ * debugging information like that provided by DWARF 2.
+ * debugging information like that provided by DWARF 2.
+ *
+ *
+ * ZipCPU---Our value is zero.
+ * ZipCPU---Our value is zero.
+ */
+ */
+#define        INCOMING_FRAME_SP_OFFSET        0
+#define        INCOMING_FRAME_SP_OFFSET        0
+
+
+/* ARG_POINTER_CFA_OFFSET
+/* ARG_POINTER_CFA_OFFSET
+ */
+ */
+
+
+/* FRAME_POINTER_CFA_OFFSET
+/* FRAME_POINTER_CFA_OFFSET
+ */
+ */
+
+
+/* CFA_FRAME_BASE_OFFSET
+/* CFA_FRAME_BASE_OFFSET
+ */
+ */
+
+
+/* 17.09.02 Exception handling support */
+/* 17.09.02 Exception handling support */
+
+
+/* EH_RETURN_DATA_REGNO(N) ... A C expression whose value is the Nth register
+/* EH_RETURN_DATA_REGNO(N) ... A C expression whose value is the Nth register
+ * number used for data by exception handlers, or INVALID_REGNUM if fewer than
+ * number used for data by exception handlers, or INVALID_REGNUM if fewer than
+ * N registers are usable.  The exception handling library routines communicate
+ * N registers are usable.  The exception handling library routines communicate
+ * with the exception handlers via a set of agreed upon registers.  Ideally
+ * with the exception handlers via a set of agreed upon registers.  Ideally
+ * these registers should be call clobbered; it is possible to use call-saved
+ * these registers should be call clobbered; it is possible to use call-saved
+ * registers, but may negatively impact code size.  The target must support at
+ * registers, but may negatively impact code size.  The target must support at
+ * least 2 data registers, but should define 4 if their are enough free
+ * least 2 data registers, but should define 4 if their are enough free
+ * registers.
+ * registers.
+ *
+ *
+ * You must define this macro if you want to support call frame exception
+ * You must define this macro if you want to support call frame exception
+ * handling like that provided by DWARF 2.
+ * handling like that provided by DWARF 2.
+ *
+ *
+ * ZipCPU -- We copy much of our definition from Moxie.
+ * ZipCPU -- We copy much of our definition from Moxie.
+ */
+ */
+#define        EH_RETURN_DATA_REGNO(N) ((N<3)?(N+ZIP_FIRST_ARG_REGNO):INVALID_REGNUM)
+#define        EH_RETURN_DATA_REGNO(N) ((N<3)?(N+ZIP_FIRST_ARG_REGNO):INVALID_REGNUM)
+
+
+/* EH_RETURN_STACKADJ_RTX ... A C expression whose value is RTL representing
+/* EH_RETURN_STACKADJ_RTX ... A C expression whose value is RTL representing
+ * a location in which to store a stack adjustment to be applied before function
+ * a location in which to store a stack adjustment to be applied before function
+ * return.  This is used to unwind the stack to an exception handler's call
+ * return.  This is used to unwind the stack to an exception handler's call
+ * frame.  It will be assigned zero on code paths that return normally.
+ * frame.  It will be assigned zero on code paths that return normally.
+ *
+ *
+ * Do not define this macro if the stack pointer is saved and restored by the
+ * Do not define this macro if the stack pointer is saved and restored by the
+ * regular prolog and epilog code in the call frame itself (which it is for the
+ * regular prolog and epilog code in the call frame itself (which it is for the
+ * ZipCPU); in this case, the exception handling library routines will update
+ * ZipCPU); in this case, the exception handling library routines will update
+ * the stack location to be restored in place.  Otherwise, you must define this
+ * the stack location to be restored in place.  Otherwise, you must define this
+ * macro if you want to support call frame exception handling like that provided
+ * macro if you want to support call frame exception handling like that provided
+ * by DWARF 2.
+ * by DWARF 2.
+ *
+ *
+ */
+ */
+
+
+/* EH_RETURN_HANDLER_RTX ... A C expression whose value is RTL representing a
+/* EH_RETURN_HANDLER_RTX ... A C expression whose value is RTL representing a
+ * location in which to store the address of an exception handler to which we
+ * location in which to store the address of an exception handler to which we
+ * should return.  It will not be assigned on code paths that return normally.
+ * should return.  It will not be assigned on code paths that return normally.
+ *
+ *
+ * Typcally this is the location in the call frame at which the normal return
+ * Typcally this is the location in the call frame at which the normal return
+ * address is stored.  For targets that return by popping an address of the
+ * address is stored.  For targets that return by popping an address of the
+ * stack, this might be a memory address just below the target call frame
+ * stack, this might be a memory address just below the target call frame
+ * rather than inside the current call frame.  If defined,
+ * rather than inside the current call frame.  If defined,
+ * EH_RETURN_STACKADJ_RTX will have already been assigned, so it may be used
+ * EH_RETURN_STACKADJ_RTX will have already been assigned, so it may be used
+ * to calculate the location of the target call frame.
+ * to calculate the location of the target call frame.
+ *
+ *
+ * If you want to support call frame exception handling, you must define either
+ * If you want to support call frame exception handling, you must define either
+ * this macro or the eh_return instruction pattern.
+ * this macro or the eh_return instruction pattern.
+ *
+ *
+ * ZipCPU --- We again copy from Moxie
+ * ZipCPU --- We again copy from Moxie
+ */
+ */
+#define        EH_RETURN_HANDLER_RTX   \
+#define        EH_RETURN_HANDLER_RTX   \
+       gen_frame_mem(Pmode, plus_constant(Pmode, frame_pointer_rtx, UNITS_PER_WORD))
+       gen_frame_mem(Pmode, plus_constant(Pmode, frame_pointer_rtx, UNITS_PER_WORD))
+
+
+/*
+/*
+ *
+ *
+ *
+ *
+ *
+ *
+ *   REST OF SECTION SKIPPED ...
+ *   REST OF SECTION SKIPPED ...
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+ */
+
+
+/* 17.09.03 Specifying how stack checking is done */
+/* 17.09.03 Specifying how stack checking is done */
+
+
+/* STACK_CHECK_BUILTIN ... a non-zero value if stack checking is done by the
+/* STACK_CHECK_BUILTIN ... a non-zero value if stack checking is done by the
+ * configuration files in a machine-dependent manner.  You should define this
+ * configuration files in a machine-dependent manner.  You should define this
+ * macro if stack checking is required by the ABI of your machine or if you
+ * macro if stack checking is required by the ABI of your machine or if you
+ * would like to do stack checking in some more efficient way than the generic
+ * would like to do stack checking in some more efficient way than the generic
+ * appraoch.  The default value of this macro is zero.
+ * appraoch.  The default value of this macro is zero.
+ *
+ *
+ * ZipCPU --- The default makes sense for us.
+ * ZipCPU --- The default makes sense for us.
+ */
+ */
+// #define STACK_CHECK_BUILTIN 0
+// #define STACK_CHECK_BUILTIN 0
+
+
+/* STACK_CHECK_STATIC_BUILTIN ... A nonzero value if static stack checking is
+/* STACK_CHECK_STATIC_BUILTIN ... A nonzero value if static stack checking is
+ * done by the configuration files in a machine-dependent manner.  You should
+ * done by the configuration files in a machine-dependent manner.  You should
+ * define this macro if you would like to do static stack checking in some more
+ * define this macro if you would like to do static stack checking in some more
+ * efficient way than the generic approach.  The default value of this macro
+ * efficient way than the generic approach.  The default value of this macro
+ * is zero.
+ * is zero.
+ *
+ *
+ * ZipCPU --- The default makes sense for us.
+ * ZipCPU --- The default makes sense for us.
+ */
+ */
+
+
+/* STACK_CHECK_PROBE_INTERVAL_EXP ...  An integer specifying the interval at
+/* STACK_CHECK_PROBE_INTERVAL_EXP ...  An integer specifying the interval at
+ * which GCC must generate stack probe instructions, defined as 2 raised to this
+ * which GCC must generate stack probe instructions, defined as 2 raised to this
+ * interval.  You will normally define this macro so that the interval is no
+ * interval.  You will normally define this macro so that the interval is no
+ * larger than the size of the "guard pages" at the end of a stack area.  The
+ * larger than the size of the "guard pages" at the end of a stack area.  The
+ * default value of 12 (4096-byte interval) is suitable for most systems.
+ * default value of 12 (4096-byte interval) is suitable for most systems.
+ *
+ *
+ * ZipCPU --- Default.
+ * ZipCPU --- Default.
+ */
+ */
+
+
+/* STACK_CHECK_MOVING_SP ... An integer which is non-zero if GCC should move
+/* STACK_CHECK_MOVING_SP ... An integer which is non-zero if GCC should move
+ * the stack pointer page by page when doing probes.  This can be necessary
+ * the stack pointer page by page when doing probes.  This can be necessary
+ * on systems where the stack pointer contains the bottom address of the memory
+ * on systems where the stack pointer contains the bottom address of the memory
+ * area accessible to the executing thread at any point in time.  In this
+ * area accessible to the executing thread at any point in time.  In this
+ * situation, an alternate signal stack is required in order to be able to
+ * situation, an alternate signal stack is required in order to be able to
+ * recover from a stack overflow.  The default value of this macro is zero.
+ * recover from a stack overflow.  The default value of this macro is zero.
+ *
+ *
+ * ZipCPU -- Default.
+ * ZipCPU -- Default.
+ */
+ */
+
+
+/* STACK_CHECK_PROTECT
+/* STACK_CHECK_PROTECT
+ */
+ */
+/* STACK_CHECK_MAX_FRAME_SIZE
+/* STACK_CHECK_MAX_FRAME_SIZE
+ * ... you should normally not change the default value of this macro.
+ * ... you should normally not change the default value of this macro.
+ */
+ */
+/* STACK_CHECK_FIXED_FRAME_SIZE
+/* STACK_CHECK_FIXED_FRAME_SIZE
+ * ... you ... will normally use the default of four words.
+ * ... you ... will normally use the default of four words.
+ */
+ */
+
+
+/* STACK_CHECK_MAX_VAR_SIZE
+/* STACK_CHECK_MAX_VAR_SIZE
+ * ... you will normally not need to override that default.
+ * ... you will normally not need to override that default.
+ */
+ */
+
+
+/* 17.09.04 Registers that Address the Stack Frame*/
+/* 17.09.04 Registers that Address the Stack Frame*/
+
+
+/* STACK_POINTER_REGNUM ... The register number of the stack pointer register,
+/* STACK_POINTER_REGNUM ... The register number of the stack pointer register,
+ * which must also be a fixed register according to FIXED_REGISTERS.  On most
+ * which must also be a fixed register according to FIXED_REGISTERS.  On most
+ * machines, the hardware determines which register this is.
+ * machines, the hardware determines which register this is.
+ */
+ */
+#undef STACK_POINTER_REGNUM
+#undef STACK_POINTER_REGNUM
+#define        STACK_POINTER_REGNUM    zip_SP
+#define        STACK_POINTER_REGNUM    zip_SP
+
+
+/* FRAME_POINTER_REGNUM ... The register number of the frame pointer register,
+/* FRAME_POINTER_REGNUM ... The register number of the frame pointer register,
+ * which is used to access certain automatic variables in the stack frame.  On
+ * which is used to access certain automatic variables in the stack frame.  On
+ * some machines, the hardware determines which register this is.  On other
+ * some machines, the hardware determines which register this is.  On other
+ * machines you can choose any register you wish for this purpose.
+ * machines you can choose any register you wish for this purpose.
+ *
+ *
+ * ZipCPU --- While I'd like to dump this pointer, since I don't really see
+ * ZipCPU --- While I'd like to dump this pointer, since I don't really see
+ * a need for it, alloca() requires it.  Therefore let's assine a register to
+ * a need for it, alloca() requires it.  Therefore let's assine a register to
+ * this purpose and watch what the compiler does with it.
+ * this purpose and watch what the compiler does with it.
+ */
+ */
+#define        FRAME_POINTER_REGNUM    zip_FP
+#define        FRAME_POINTER_REGNUM    zip_FP
+
+
+/* HARD_FRAME_POINTER_REGNUM ... On some machines the offset between the frame
+/* HARD_FRAME_POINTER_REGNUM ... On some machines the offset between the frame
+ * pointer and starting offset of the automatic variables is not known until
+ * pointer and starting offset of the automatic variables is not known until
+ * after register allocation has been done (for example, because the saved
+ * after register allocation has been done (for example, because the saved
+ * registers are between these two locations).  On those machines, define
+ * registers are between these two locations).  On those machines, define
+ * FRAME_POINTER_REGNUM the number of a special, fixed register to be used
+ * FRAME_POINTER_REGNUM the number of a special, fixed register to be used
+ * internally until the offset is known, and define HARD_FRAME_POINTER_REGNUM
+ * internally until the offset is known, and define HARD_FRAME_POINTER_REGNUM
+ * to be the actual hard register number used for the frame pointer.
+ * to be the actual hard register number used for the frame pointer.
+ *
+ *
+ * Do not define this macro if it would be the same as FRAME_POINTER_REGNUM
+ * Do not define this macro if it would be the same as FRAME_POINTER_REGNUM
+ *
+ *
+ * ZipCPU --- we do not define this macro.
+ * ZipCPU --- we do not define this macro.
+ */
+ */
+#define HARD_FRAME_POINTER_REGNUM      zip_FP
+#define HARD_FRAME_POINTER_REGNUM      zip_FP
+
+
+/* ARG_POINTER_REGNUM ... The register number of the arg pointer register, which
+/* ARG_POINTER_REGNUM ... The register number of the arg pointer register, which
+ * is used to access the function's argument list.  On some machines, this is
+ * is used to access the function's argument list.  On some machines, this is
+ * the same as the frame pointer register.  On some machines, the hardware
+ * the same as the frame pointer register.  On some machines, the hardware
+ * determines which register this is.  On other machines, you can choose any
+ * determines which register this is.  On other machines, you can choose any
+ * register you wish for this purpose.  If this is not the same register as the
+ * register you wish for this purpose.  If this is not the same register as the
+ * frame pointer register, then you must mark it as a fixed register according
+ * frame pointer register, then you must mark it as a fixed register according
+ * to FIXED_REGISTERs, or arrange to be able to eliminate it.
+ * to FIXED_REGISTERs, or arrange to be able to eliminate it.
+ *
+ *
+ * ZipCPU --- We really don't want to lose another register to something
+ * ZipCPU --- We really don't want to lose another register to something
+ * pointless, so let's set this to be the frame pointer register.  Especially
+ * pointless, so let's set this to be the frame pointer register.  Especially
+ * given the ZipCPU's ease of accessing things via offsets of registers, this
+ * given the ZipCPU's ease of accessing things via offsets of registers, this
+ * should work for a rather large stack frame.
+ * should work for a rather large stack frame.
+ *
+ *
+ * However ... we had problems with passing 6+ variables on the stack, so let's
+ * However ... we had problems with passing 6+ variables on the stack, so let's
+ * try creating a pseudo register for this, and appropriately adjusting the
+ * try creating a pseudo register for this, and appropriately adjusting the
+ * offset between this pseudo register and the stack pointer ...
+ * offset between this pseudo register and the stack pointer ...
+ */
+ */
+#define        ARG_POINTER_REGNUM      zip_AP_PSEUDO
+#define        ARG_POINTER_REGNUM      zip_AP_PSEUDO
+
+
+/* HARD_FRAME_POINTER_IS_FRAME_POINTER ... define this to be a preprocessor
+/* HARD_FRAME_POINTER_IS_FRAME_POINTER ... define this to be a preprocessor
+ * constant that is nonzero if hard_frame_pointer_rtx and frame_pointer_rtx
+ * constant that is nonzero if hard_frame_pointer_rtx and frame_pointer_rtx
+ * should be the same.  The default definition is sufficient for us.
+ * should be the same.  The default definition is sufficient for us.
+ */
+ */
+
+
+/* HARD_FRAME_POINTER_IS_ARG_POINTER ...
+/* HARD_FRAME_POINTER_IS_ARG_POINTER ...
+ * ZipCPU doesn't need this macro
+ * ZipCPU doesn't need this macro
+ */
+ */
+
+
+/* RETURN_ADDRESS_POINTER_REGNUM ... The register number of the return address
+/* RETURN_ADDRESS_POINTER_REGNUM ... The register number of the return address
+ * pointer register, which is used to access the current function's return
+ * pointer register, which is used to access the current function's return
+ * address from the stack.  On some machines, the return address is not at a
+ * address from the stack.  On some machines, the return address is not at a
+ * fixed offset from the frame pointer or stack pointer or argument pointer.
+ * fixed offset from the frame pointer or stack pointer or argument pointer.
+ * This register can be defined to point to the return address on the stack, and
+ * This register can be defined to point to the return address on the stack, and
+ * then to be converted by ELIMINABLE_REGS into either the frame pointer or the
+ * then to be converted by ELIMINABLE_REGS into either the frame pointer or the
+ * stack pointer.
+ * stack pointer.
+ *
+ *
+ * Do not define this macro unless there is no other way to get the return
+ * Do not define this macro unless there is no other way to get the return
+ * address from the stack.
+ * address from the stack.
+ *
+ *
+ * ZipCPU---we need this.
+ * ZipCPU---we need this.
+ */
+ */
+#define        RETURN_ADDRESS_REGNUM   zip_LR
+#define        RETURN_ADDRESS_REGNUM   zip_LR
+
+
+
+
+/* STATIC_CHAIN_REGNUM ... Register numbers used for passing a function's
+/* STATIC_CHAIN_REGNUM ... Register numbers used for passing a function's
+ * static chain pointer.  If register windows are used, the register number as
+ * static chain pointer.  If register windows are used, the register number as
+ * seen by the called function is STATIC_CHAIN_INCOMING_REGNUM, while the
+ * seen by the called function is STATIC_CHAIN_INCOMING_REGNUM, while the
+ * register number as seen by the calling function is STATIC_CHAIN_REGNUM.  If
+ * register number as seen by the calling function is STATIC_CHAIN_REGNUM.  If
+ * these register are the same, STATIC_CHAIN_INCOMING_REGNUM need not be
+ * these register are the same, STATIC_CHAIN_INCOMING_REGNUM need not be
+ * defined.
+ * defined.
+ *
+ *
+ * ZipCPU --- even without register windows, we still need to pick an
+ * ZipCPU --- even without register windows, we still need to pick an
+ * (arbitrary) register to pass the pointer to the static chain in the case of
+ * (arbitrary) register to pass the pointer to the static chain in the case of
+ * nested functions.  Let's arbitrarily pick R5, and ... see how that works for
+ * nested functions.  Let's arbitrarily pick R5, and ... see how that works for
+ * us.
+ * us.
+ */
+ */
+#define        STATIC_CHAIN_REGNUM     zip_R5
+#define        STATIC_CHAIN_REGNUM     zip_R5
+
+
+/* TARGET_STATIC_CHAIN ... This hook replaces the use of STATIC_CHAIN_REGNUM et
+/* TARGET_STATIC_CHAIN ... This hook replaces the use of STATIC_CHAIN_REGNUM et
+ * al for targets that may use different static chain locations for different
+ * al for targets that may use different static chain locations for different
+ * nested functions.  This may be required if the target has function attributes
+ * nested functions.  This may be required if the target has function attributes
+ * that affect the calling conventions of the function and those calling
+ * that affect the calling conventions of the function and those calling
+ * conventions use different static chain locations.
+ * conventions use different static chain locations.
+ *
+ *
+ * ZipCPU --- don't need this.
+ * ZipCPU --- don't need this.
+ */
+ */
+// #define     TARGET_STATIC_CHAIN     zip_R11
+// #define     TARGET_STATIC_CHAIN     zip_R11
+
+
+
+
+/* DWARF_FRAME_REGISTERS ... This macro specifies  the maximum number of hard
+/* DWARF_FRAME_REGISTERS ... This macro specifies  the maximum number of hard
+ * registers that can be saved in a call frame.  This is used to size data
+ * registers that can be saved in a call frame.  This is used to size data
+ * structures used in DWARF2 exception handling.
+ * structures used in DWARF2 exception handling.
+ *
+ *
+ * Prior to GCC 3.0, this macro was needed in order to establish a stable
+ * Prior to GCC 3.0, this macro was needed in order to establish a stable
+ * exception handling ABI in the face of adding new hard registers for ISA
+ * exception handling ABI in the face of adding new hard registers for ISA
+ * extensions.  In GCC 3.0 and later, the EH ABI is insulated from changes in
+ * extensions.  In GCC 3.0 and later, the EH ABI is insulated from changes in
+ * the number of hard registers.  Nevertheless, this macro can still be used to
+ * the number of hard registers.  Nevertheless, this macro can still be used to
+ * reduce the runtime memory requirements of the exception handling routines,
+ * reduce the runtime memory requirements of the exception handling routines,
+ * which can be substantial if the ISA contains a lot of registers that are not
+ * which can be substantial if the ISA contains a lot of registers that are not
+ * call-saved.
+ * call-saved.
+ *
+ *
+ * If this macro is not defined, it defaults to FIRST_PSEUDO_REGISTER.
+ * If this macro is not defined, it defaults to FIRST_PSEUDO_REGISTER.
+ *
+ *
+ * ZipCPU --- The default is not sufficient.  The CC and PC registers need to
+ * ZipCPU --- The default is not sufficient.  The CC and PC registers need to
+ * be saved and examined as well in any debug/exception context.  Hence, we
+ * be saved and examined as well in any debug/exception context.  Hence, we
+ * define this to be all of our registers.
+ * define this to be all of our registers.
+ */
+ */
+#undef DWARF_FRAME_REGISTERS
+#undef DWARF_FRAME_REGISTERS
+#define        DWARF_FRAME_REGISTERS   16
+#define        DWARF_FRAME_REGISTERS   16
+
+
+/* PRE_GCC3_DWARF_FRAME_REGISTERS ... This macro is similar to DWARF_FRAME_REG..
+/* PRE_GCC3_DWARF_FRAME_REGISTERS ... This macro is similar to DWARF_FRAME_REG..
+ * but is provided for backward compatibility in pre GCC 3.0 compiled code.
+ * but is provided for backward compatibility in pre GCC 3.0 compiled code.
+ *
+ *
+ * If not defined, it defaults to DWARF_FRAME_REGISTERS---which is perfect for
+ * If not defined, it defaults to DWARF_FRAME_REGISTERS---which is perfect for
+ * the ZipCPU.
+ * the ZipCPU.
+ */
+ */
+
+
+/* DWARF_REG_TO_UNWIND_COLUMN(REGNO) ... Define this macro if the target's
+/* DWARF_REG_TO_UNWIND_COLUMN(REGNO) ... Define this macro if the target's
+ * representation for dwarf registers is different than the internal
+ * representation for dwarf registers is different than the internal
+ * representation for unwind column.  Given a dwarf register, this macro should
+ * representation for unwind column.  Given a dwarf register, this macro should
+ * return the unwind column number to use instead.
+ * return the unwind column number to use instead.
+ *
+ *
+ * ... ???
+ * ... ???
+ */
+ */
+
+
+/* DWARF_FRAME_REGNUM(REGNO) ... Define this macro is the target's
+/* DWARF_FRAME_REGNUM(REGNO) ... Define this macro is the target's
+ * representation for dwarf registers used in .eh_frame or .debug_frame is
+ * representation for dwarf registers used in .eh_frame or .debug_frame is
+ * different from that used in other debug info sections.  Given a GCC hard
+ * different from that used in other debug info sections.  Given a GCC hard
+ * register number, this macro should return the .eh_frame register number.
+ * register number, this macro should return the .eh_frame register number.
+ * The default is DBX_REGISTER_NUMBER(REGNO).
+ * The default is DBX_REGISTER_NUMBER(REGNO).
+ *
+ *
+ * ZipCPU --- provided we define DBX_REGISTER_NUMBER(REGNO) well, this default
+ * ZipCPU --- provided we define DBX_REGISTER_NUMBER(REGNO) well, this default
+ * should still work for us.
+ * should still work for us.
+ */
+ */
+
+
+/* DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) ... Define this macro to map register
+/* DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) ... Define this macro to map register
+ * numbers held in the call frame info that GCC has collected using
+ * numbers held in the call frame info that GCC has collected using
+ * DWARF_FRAME_REGNO to those that should be output in .debug_frame (for_eh is
+ * DWARF_FRAME_REGNO to those that should be output in .debug_frame (for_eh is
+ * zero) and .eh_frame (for_eh is non-zero). The default is to return REGNO.
+ * zero) and .eh_frame (for_eh is non-zero). The default is to return REGNO.
+ *
+ *
+ * ZipCPU --- Default is good enough.
+ * ZipCPU --- Default is good enough.
+ */
+ */
+
+
+/* REG_VALUE_IN_UNWIND_CONTEXT ... Define this macro if the target stores
+/* REG_VALUE_IN_UNWIND_CONTEXT ... Define this macro if the target stores
+ * register values as _Unwind_Word type in unwind context.  It should be defined
+ * register values as _Unwind_Word type in unwind context.  It should be defined
+ * if target register size is larger than the size of void *.  The default
+ * if target register size is larger than the size of void *.  The default
+ * is to store register values as void *type.
+ * is to store register values as void *type.
+ *
+ *
+ * ZipCPU --- Default is what we need.
+ * ZipCPU --- Default is what we need.
+ */
+ */
+
+
+/* ASSUME_EXTENDED_UNWIND_CONTEXT ... Define this macro to be 1 if the target
+/* ASSUME_EXTENDED_UNWIND_CONTEXT ... Define this macro to be 1 if the target
+ * always uses extended unwind context with version, args_size, and by_value
+ * always uses extended unwind context with version, args_size, and by_value
+ * fields.  If it is undefined, it will always be defined to 1 when REG_VALUE_IN_UNWIND_CONTEXT is defined and 0 otherwise.
+ * fields.  If it is undefined, it will always be defined to 1 when REG_VALUE_IN_UNWIND_CONTEXT is defined and 0 otherwise.
+ *
+ *
+ */
+ */
+
+
+
+
+/* 17.09.05 Eliminating Frame Pointer and Arg Pointer */
+/* 17.09.05 Eliminating Frame Pointer and Arg Pointer */
+
+
+/* TARGET_FRAME_POINTER_REQUIRED(VOID) ... This target hook should return true
+/* TARGET_FRAME_POINTER_REQUIRED(VOID) ... This target hook should return true
+ * if a function must have and use a frame pointer.  This target hook is
+ * if a function must have and use a frame pointer.  This target hook is
+ * called in the reload pass.  If its return value is true, the function will
+ * called in the reload pass.  If its return value is true, the function will
+ * have a frame pointer.
+ * have a frame pointer.
+ *
+ *
+ * This target hook can in principle examine the current function and decide
+ * This target hook can in principle examine the current function and decide
+ * according to the facts, but on most machines the constant false or the
+ * according to the facts, but on most machines the constant false or the
+ * constant true suffices.  Use false when the machine allows code to be
+ * constant true suffices.  Use false when the machine allows code to be
+ * generated with no frame pointer, and doing so saves some time or space.
+ * generated with no frame pointer, and doing so saves some time or space.
+ * Use true when there is no possible advantage to avoiding a frame pointer.
+ * Use true when there is no possible advantage to avoiding a frame pointer.
+ *
+ *
+ * ZipCPU---if we add in a frame pointer, we become register starved.  Hence,
+ * ZipCPU---if we add in a frame pointer, we become register starved.  Hence,
+ * we'll treat this as a constant false--which is also the default value.
+ * we'll treat this as a constant false--which is also the default value.
+ */
+ */
+#define        target_frame_pointer_required   zip_frame_pointer_required
+#define        target_frame_pointer_required   zip_frame_pointer_required
+
+
+/* INITIAL_FRAME_POINTER_OFFSET ... A C statement to store in the variable
+/* INITIAL_FRAME_POINTER_OFFSET ... A C statement to store in the variable
+ * depth-var the difference between the frame pointer and the stack pointer
+ * depth-var the difference between the frame pointer and the stack pointer
+ * values immediately after the function prologue.  The value would be computed
+ * values immediately after the function prologue.  The value would be computed
+ * from information such as the result of get_frame_size() and the tables of
+ * from information such as the result of get_frame_size() and the tables of
+ * registers regs_ever_live and call_used_regs.
+ * registers regs_ever_live and call_used_regs.
+ *
+ *
+ * If ELIMINABLE_REGS is defined, this macro will not be used and need not be
+ * If ELIMINABLE_REGS is defined, this macro will not be used and need not be
+ * defined.  Otherwise, it must be defined even if TARGET_FRAME_POINTER_REQD
+ * defined.  Otherwise, it must be defined even if TARGET_FRAME_POINTER_REQD
+ * always returns true; in that case you may set depth-var to anything.
+ * always returns true; in that case you may set depth-var to anything.
+ *
+ *
+ * ZipCPU --- we intend to set ELIMINABLE_REGS, so this is not necessary.
+ * ZipCPU --- we intend to set ELIMINABLE_REGS, so this is not necessary.
+ */
+ */
+// #define     INITIAL_FRAME_POINTER_OFFSET(DEPTH)     (DEPTH) = 0
+// #define     INITIAL_FRAME_POINTER_OFFSET(DEPTH)     (DEPTH) = 0
+
+
+
+
+/* ELIMINABLE_REGS ... If defined, this macro specifies a table of register
+/* ELIMINABLE_REGS ... If defined, this macro specifies a table of register
+ * pairs used to eliminate unneeded registers that point into the stack frame.
+ * pairs used to eliminate unneeded registers that point into the stack frame.
+ * If it is not defined, the only elimination attempted by the compiler is to
+ * If it is not defined, the only elimination attempted by the compiler is to
+ * replace references to the frame pointer with references to the stack pointer.
+ * replace references to the frame pointer with references to the stack pointer.
+ *
+ *
+ * On some machines, the position of the argument pointer is not known until
+ * On some machines, the position of the argument pointer is not known until
+ * the compilation is completed.  In such a case, a separate hard register
+ * the compilation is completed.  In such a case, a separate hard register
+ * must be used for the argument pointer.  This register can be eliminated by
+ * must be used for the argument pointer.  This register can be eliminated by
+ * replacing it with either the frame pointer or the argument pointer,
+ * replacing it with either the frame pointer or the argument pointer,
+ * depending on whether or not the frame pointer has been eliminated.
+ * depending on whether or not the frame pointer has been eliminated.
+ *
+ *
+ * ZipCPU we'll take their suggestion and define this as:
+ * ZipCPU we'll take their suggestion and define this as:
+ */
+ */
+#undef ELIMINABLE_REGS
+#undef ELIMINABLE_REGS
+#define        ELIMINABLE_REGS \
+#define        ELIMINABLE_REGS \
+       {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM },  \
+       {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM },  \
+        { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM },  \
+        { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM },  \
+        { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+        { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+
+/* bool TARGET_CAN_ELIMINATE(FROM,TO) ... This target function should return
+/* bool TARGET_CAN_ELIMINATE(FROM,TO) ... This target function should return
+ * true if the compiler is allowed to try to replace register number FROM with
+ * true if the compiler is allowed to try to replace register number FROM with
+ * register number TO.  This target hook need only be defined if ELIMINABLE_REGS
+ * register number TO.  This target hook need only be defined if ELIMINABLE_REGS
+ * is defined, and will usually return true since most of the cases preventing
+ * is defined, and will usually return true since most of the cases preventing
+ * register elimination are things that the compiler  already knows about.
+ * register elimination are things that the compiler  already knows about.
+ *
+ *
+ * ZipCPU ... does the compiler  know about my decision as to whether or not
+ * ZipCPU ... does the compiler  know about my decision as to whether or not
+ * the frame pointer was needed?  Yes it does, but it's kept separately.  We'll
+ * the frame pointer was needed?  Yes it does, but it's kept separately.  We'll
+ * just say everything can be eliminated.
+ * just say everything can be eliminated.
+ */
+ */
+#define TARGET_CAN_ELIMINATE   zip_can_eliminate
+#define TARGET_CAN_ELIMINATE   zip_can_eliminate
+
+
+/* INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) ... This macro is similar to
+/* INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) ... This macro is similar to
+ * INITIAL_FRAME_POINTER_OFFSET.  It specifies the initial difference between
+ * INITIAL_FRAME_POINTER_OFFSET.  It specifies the initial difference between
+ * the specified pair of registers.  This macro must be defined if
+ * the specified pair of registers.  This macro must be defined if
+ * ELIMINABLE_REGS is defined.
+ * ELIMINABLE_REGS is defined.
+ *
+ *
+ * ZipCPU---We had at one time set this to a default offset of 0.  This didn't
+ * ZipCPU---We had at one time set this to a default offset of 0.  This didn't
+ * work.  It turns out that this is not only the *initial* elimination offset,
+ * work.  It turns out that this is not only the *initial* elimination offset,
+ * but also the offset along the way.  Hence, when a variable needs to be
+ * but also the offset along the way.  Hence, when a variable needs to be
+ * spilled to the stack, this offset must change.  Reload goes and checks for
+ * spilled to the stack, this offset must change.  Reload goes and checks for
+ * this, and adjusts registers if the offset has changed.  Hence, without this,
+ * this, and adjusts registers if the offset has changed.  Hence, without this,
+ * we get negative (i.e. illegal) stack offsets.
+ * we get negative (i.e. illegal) stack offsets.
+ */
+ */
+#define        INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET)            \
+#define        INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET)            \
+       do { (OFFSET) = zip_initial_elimination_offset((FROM), (TO)); } \
+       do { (OFFSET) = zip_initial_elimination_offset((FROM), (TO)); } \
+       while(0)                                        \
+       while(0)                                        \
+
+
+/* 17.09.06 Passing function arguments on the stack */
+/* 17.09.06 Passing function arguments on the stack */
+
+
+/* TARGET_PROMOTE_PROTOTYPES ... Returns true if an argument declared in a
+/* TARGET_PROMOTE_PROTOTYPES ... Returns true if an argument declared in a
+ * prototype as an integral type smaller than int should actually be
+ * prototype as an integral type smaller than int should actually be
+ * passed as an int.  In addition to avoiding errors in certain cases of
+ * passed as an int.  In addition to avoiding errors in certain cases of
+ * mismatch, it also makes for better code on certain machines.  The default is
+ * mismatch, it also makes for better code on certain machines.  The default is
+ * to not promote prototypes.
+ * to not promote prototypes.
+ *
+ *
+ * Since everything is an int on the ZipCPU, let's promote anything smaller
+ * Since everything is an int on the ZipCPU, let's promote anything smaller
+ * (which should still be an int) up to an int anyway.
+ * (which should still be an int) up to an int anyway.
+ */
+ */
+#undef TARGET_PROMOTE_PROTOTYPES
+#undef TARGET_PROMOTE_PROTOTYPES
+#define        TARGET_PROMOTE_PROTOTYPES       hook_bool_const_tree_true
+#define        TARGET_PROMOTE_PROTOTYPES       hook_bool_const_tree_true
+
+
+/* PUSH_ARGS ... A C expression.  If nonzero, push instructions will be used to
+/* PUSH_ARGS ... A C expression.  If nonzero, push instructions will be used to
+ * pass outgoing arguments.  If the target machine does not have a push
+ * pass outgoing arguments.  If the target machine does not have a push
+ * instruction, set it to zero.  That directs GCC to use an alternate strategy:
+ * instruction, set it to zero.  That directs GCC to use an alternate strategy:
+ * to allocate the entire argument block and then store the arguments into it.
+ * to allocate the entire argument block and then store the arguments into it.
+ * When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
+ * When PUSH_ARGS is nonzero, PUSH_ROUNDING must be defined too.
+ *
+ *
+ * ZipCPU does not have a push instruction, so we set this to zero.
+ * ZipCPU does not have a push instruction, so we set this to zero.
+ */
+ */
+#undef PUSH_ARGS
+#undef PUSH_ARGS
+#define        PUSH_ARGS       0
+#define        PUSH_ARGS       0
+
+
+/* PUSH_ARGS_REVERSED ... A C expression.  If nonzero, function arguments will
+/* PUSH_ARGS_REVERSED ... A C expression.  If nonzero, function arguments will
+ * be evaluated last to first, rather than first to last.  If this macro is
+ * be evaluated last to first, rather than first to last.  If this macro is
+ * not defined, it defaults to PUSH_ARGS on targets where the stack and args
+ * not defined, it defaults to PUSH_ARGS on targets where the stack and args
+ * grow in opposite directions, and zero otherwise.
+ * grow in opposite directions, and zero otherwise.
+ *
+ *
+ * ZipCPU---Let's evaluate our arguments first to last.
+ * ZipCPU---Let's evaluate our arguments first to last.
+ */
+ */
+#define        PUSH_ARGS_REVERSED      1
+#define        PUSH_ARGS_REVERSED      1
+
+
+/* PUSH_ROUNDING(NPUSHED) ... A C expression that is the number of bytes
+/* PUSH_ROUNDING(NPUSHED) ... A C expression that is the number of bytes
+ * actually pushed onto the stack when an instruction attempts to push
+ * actually pushed onto the stack when an instruction attempts to push
+ * (NPUSHED) bytes.
+ * (NPUSHED) bytes.
+ *
+ *
+ * ZipCPU---We cannot push bytes.  Let's leave this undefined and see what
+ * ZipCPU---We cannot push bytes.  Let's leave this undefined and see what
+ * happens.
+ * happens.
+ */
+ */
+// #warning "No appropriate definition seemed right."
+// #warning "No appropriate definition seemed right."
+
+
+/* ACCUMULATE_OUTGOING_ARGS ... A C expression.  If non-zero, the maximum amount
+/* ACCUMULATE_OUTGOING_ARGS ... A C expression.  If non-zero, the maximum amount
+ * of space required for outgoing arguments will be computed and placed into
+ * of space required for outgoing arguments will be computed and placed into
+ * crtl->outgoing_args_size.  No space will be pushed onto the stack for each
+ * crtl->outgoing_args_size.  No space will be pushed onto the stack for each
+ * call; instead the function prologue should increase the stack frame size by
+ * call; instead the function prologue should increase the stack frame size by
+ * this amount.
+ * this amount.
+ *
+ *
+ * ZipCPU---This is *cool* and so necessary---it saves an extra two instructions
+ * ZipCPU---This is *cool* and so necessary---it saves an extra two instructions
+ * each time we try to call a function/routine.  Yes, we want and *need* this
+ * each time we try to call a function/routine.  Yes, we want and *need* this
+ * for good performance.  I mean, think of it, free performance increase?  Who
+ * for good performance.  I mean, think of it, free performance increase?  Who
+ * could argue with that?
+ * could argue with that?
+ */
+ */
+#undef ACCUMULATE_OUTGOING_ARGS
+#undef ACCUMULATE_OUTGOING_ARGS
+#define        ACCUMULATE_OUTGOING_ARGS        1
+#define        ACCUMULATE_OUTGOING_ARGS        1
+
+
+
+
+/* REG_PARM_STACK_SPACE(FN) ... Define this macro if functions should assume
+/* REG_PARM_STACK_SPACE(FN) ... Define this macro if functions should assume
+ * that stack space has been allocated for arguments even when their values
+ * that stack space has been allocated for arguments even when their values
+ * are passed in registers.  The value of this macro is the size, in bytes, of
+ * are passed in registers.  The value of this macro is the size, in bytes, of
+ * the area reserved for arguments passed in registers for the function
+ * the area reserved for arguments passed in registers for the function
+ * represented by FN, which can be zero if GCC is calling a library function.
+ * represented by FN, which can be zero if GCC is calling a library function.
+ * The argument FN can be the FUNCTION_DECL, or the type itself of the function.
+ * The argument FN can be the FUNCTION_DECL, or the type itself of the function.
+ *
+ *
+ * This space can be allocated by the caller, or be part of the machine
+ * This space can be allocated by the caller, or be part of the machine
+ * dependent stack frame: OUTGOING_REG_PARM_STACK_SPACE says which.
+ * dependent stack frame: OUTGOING_REG_PARM_STACK_SPACE says which.
+ *
+ *
+ * ZipCPU --- Why allocate space you won't use?  Let's leave this undefined
+ * ZipCPU --- Why allocate space you won't use?  Let's leave this undefined
+ * therefore.
+ * therefore.
+ */
+ */
+// #undef      REG_PARM_STACK_SPACE
+// #undef      REG_PARM_STACK_SPACE
+
+
+
+
+
+
+/* INCOMING_REG_PARM_STACK_SPACE(FN) ... Like REG_PARM_STACK_SPACE, but for
+/* INCOMING_REG_PARM_STACK_SPACE(FN) ... Like REG_PARM_STACK_SPACE, but for
+ * incoming register arguments.  Define this macro if space guaranteed when
+ * incoming register arguments.  Define this macro if space guaranteed when
+ * compiling a function body is different to space required when making a call,
+ * compiling a function body is different to space required when making a call,
+ * a situation that can arise with K&R style function definitions.
+ * a situation that can arise with K&R style function definitions.
+ *
+ *
+ */
+ */
+
+
+/* OUTGOING_REG_PARM_STACK_SPACE(FN) ... Define this to a nonzero value if it
+/* OUTGOING_REG_PARM_STACK_SPACE(FN) ... Define this to a nonzero value if it
+ * is the responsibility of the caller to allocate the area reserved for
+ * is the responsibility of the caller to allocate the area reserved for
+ * arguments passed in registers when calling a function of FN.  FN may be NULL
+ * arguments passed in registers when calling a function of FN.  FN may be NULL
+ * if the function called is a library function.
+ * if the function called is a library function.
+ *
+ *
+ * ZipCPU---Why allocate space you don't need?
+ * ZipCPU---Why allocate space you don't need?
+ */
+ */
+#define        OUTGOING_REG_PARM_STACK_SPACE(FNTYPE)   0
+#define        OUTGOING_REG_PARM_STACK_SPACE(FNTYPE)   0
+
+
+
+
+/* STACK_PARMS_IN_REG_PARM_AREA ... Define this macro if REG_PARM_STACK_SPACE
+/* STACK_PARMS_IN_REG_PARM_AREA ... Define this macro if REG_PARM_STACK_SPACE
+ * is defined, buyt the stack parameters don't skip the area specified by it.
+ * is defined, buyt the stack parameters don't skip the area specified by it.
+ *
+ *
+ * ZipCPU---We didn't define REG_PARM_STACK_SPACE, so we won't define this.
+ * ZipCPU---We didn't define REG_PARM_STACK_SPACE, so we won't define this.
+ */
+ */
+
+
+/* TARGET_RETURN_POPS_ARGS(DECL,FNTYPE,SZ) ... This target hook returns the
+/* TARGET_RETURN_POPS_ARGS(DECL,FNTYPE,SZ) ... This target hook returns the
+ * number of bytes of its own arguments that a function pops on returning, or 0
+ * number of bytes of its own arguments that a function pops on returning, or 0
+ * if the function pops no arguments and the caller must therefore pop them all
+ * if the function pops no arguments and the caller must therefore pop them all
+ * after the function returns.
+ * after the function returns.
+ *
+ *
+ * ZipCPU --- If we define this, we'll lose our gain from
+ * ZipCPU --- If we define this, we'll lose our gain from
+ * ACCUMULATE_OUTOING_ARGS.  Thus, we leave this undefined.
+ * ACCUMULATE_OUTOING_ARGS.  Thus, we leave this undefined.
+ */
+ */
+
+
+/* CALL_POPS_ARGS(CUM) ... A C expression that should indicate the number of
+/* CALL_POPS_ARGS(CUM) ... A C expression that should indicate the number of
+ * bytes a call sequence pops off of the stack.  It is added to the value of
+ * bytes a call sequence pops off of the stack.  It is added to the value of
+ * RETURN_POPS_ARGS when compiling a function call.  CUM is the variable in
+ * RETURN_POPS_ARGS when compiling a function call.  CUM is the variable in
+ * which all arguments to the function have been accumulated.
+ * which all arguments to the function have been accumulated.
+ *
+ *
+ * ZipCPU---The call sequence, by itself, doesn't touch the stack.  Therefore
+ * ZipCPU---The call sequence, by itself, doesn't touch the stack.  Therefore
+ * this is zero.
+ * this is zero.
+ */
+ */
+#undef CALL_POPS_ARGS
+#undef CALL_POPS_ARGS
+#define        CALL_POPS_ARGS(CUM)     0
+#define        CALL_POPS_ARGS(CUM)     0
+
+
+
+
+/* 17.09.07 Passing arguments in registers */
+/* 17.09.07 Passing arguments in registers */
+
+
+/* TARGET_FUNCTION_ARG ... Return an RTX indicating whether a function argument
+/* TARGET_FUNCTION_ARG ... Return an RTX indicating whether a function argument
+ * is passed in a register, and if so, which register.
+ * is passed in a register, and if so, which register.
+ */
+ */
+/*
+/*
+ * This has been poisoned ... so let's not define it anymore and look for
+ * This has been poisoned ... so let's not define it anymore and look for
+ * a better way to do this ...
+ * a better way to do this ...
+ *
+ *
+ * #define     FUNCTION_ARG(CUM, MODE, TYPE, NAMED) (((NAMED) == 0) ? NULL_RTX
+ * #define     FUNCTION_ARG(CUM, MODE, TYPE, NAMED) (((NAMED) == 0) ? NULL_RTX
+ *     : targetm.calls.must_pass_in_stack(MODE, TYPE)  ? NULL_RTX
+ *     : targetm.calls.must_pass_in_stack(MODE, TYPE)  ? NULL_RTX
+ *     : (CUM) > ZIP_LAST_ARG_REGNO                    ? NULL_RTX
+ *     : (CUM) > ZIP_LAST_ARG_REGNO                    ? NULL_RTX
+ *     : gen_rtx_REG(MODE, CUM))
+ *     : gen_rtx_REG(MODE, CUM))
+ */
+ */
+#define        TARGET_FUNCTION_ARG     zip_function_arg
+#define        TARGET_FUNCTION_ARG     zip_function_arg
+
+
+
+
+/* TARGET_MUST_PASS_IN_STACK(MODE, TYPE) ... This target hook should return
+/* TARGET_MUST_PASS_IN_STACK(MODE, TYPE) ... This target hook should return
+ * true if we should not pass TYPE solely in registers.  The file 'expr.h'
+ * true if we should not pass TYPE solely in registers.  The file 'expr.h'
+ * defines a definition that is usually appropriate, refer to 'expr.h' for
+ * defines a definition that is usually appropriate, refer to 'expr.h' for
+ * additional documentation.
+ * additional documentation.
+ *
+ *
+ * ZipCPU ... Ok, so I looked into expr.h and didn't find anything that looked
+ * ZipCPU ... Ok, so I looked into expr.h and didn't find anything that looked
+ * like this.  So ... I don't know.
+ * like this.  So ... I don't know.
+ */
+ */
+// #undef      TARGET_MUST_PASS_IN_STACK
+// #undef      TARGET_MUST_PASS_IN_STACK
+// #define     TARGET_MUST_PASS_IN_STACK       zip_must_pass_in_stack
+// #define     TARGET_MUST_PASS_IN_STACK       zip_must_pass_in_stack
+
+
+/* TARGET_FUNCTION_INCOMING_ARG ... Define this hook if the target machine
+/* TARGET_FUNCTION_INCOMING_ARG ... Define this hook if the target machine
+ * has register windows, ... which ZipCPU does not have.
+ * has register windows, ... which ZipCPU does not have.
+ */
+ */
+
+
+/* TARGET_USE_PSEUDO_PIC_REG(void) ... This hook should return 1 in case
+/* TARGET_USE_PSEUDO_PIC_REG(void) ... This hook should return 1 in case
+ * pseudo register should be created for pic_offset_table_rtx during function
+ * pseudo register should be created for pic_offset_table_rtx during function
+ * expand.
+ * expand.
+ *
+ *
+ * This should be defined by global parameters, isn't it?
+ * This should be defined by global parameters, isn't it?
+ */
+ */
+
+
+/* TARGET_INIT_PIC_REG(v) ... Perform a target dependent initialization of
+/* TARGET_INIT_PIC_REG(v) ... Perform a target dependent initialization of
+ * pic_offset_table_rtx.  This hook is called at the start of register
+ * pic_offset_table_rtx.  This hook is called at the start of register
+ * allocation.
+ * allocation.
+ *
+ *
+ * ZipCPU---Let's revisit this.
+ * ZipCPU---Let's revisit this.
+ */
+ */
+// #warning "Come back and relook at relocations"
+// #warning "Come back and relook at relocations"
+
+
+/* TARGET_ARG_PARTIAL_BYTES ... This target hook returns the number of bytes
+/* TARGET_ARG_PARTIAL_BYTES ... This target hook returns the number of bytes
+ * at the beginning of an argument that must be put in registers.  The value
+ * at the beginning of an argument that must be put in registers.  The value
+ * must be zero for arguments that are passed entirely in registers or that
+ * must be zero for arguments that are passed entirely in registers or that
+ * are entirely pushed on the stack.
+ * are entirely pushed on the stack.
+ */
+ */
+// #undef      TARGET_ARG_PARTIAL_BYTES
+// #undef      TARGET_ARG_PARTIAL_BYTES
+// #define     TARGET_ARG_PARTIAL_BYTES        zip_arg_partial_bytes
+// #define     TARGET_ARG_PARTIAL_BYTES        zip_arg_partial_bytes
+
+
+/* TARGET_PASS_BY_REFERENCE(CUM,MOD,TREE,NAMED) ... This target hook should
+/* TARGET_PASS_BY_REFERENCE(CUM,MOD,TREE,NAMED) ... This target hook should
+ * return true if an argument at the position indicated by CUM should be passed
+ * return true if an argument at the position indicated by CUM should be passed
+ * by reference.  This predicate is queried after target independent reasons
+ * by reference.  This predicate is queried after target independent reasons
+ * for being pssed by reference, such as TREE_ADDRESSABLE(TREE).
+ * for being pssed by reference, such as TREE_ADDRESSABLE(TREE).
+ *
+ *
+ */
+ */
+// #undef      TARGET_PASS_BY_REFERENCE
+// #undef      TARGET_PASS_BY_REFERENCE
+// #define     TARGET_PASS_BY_REFERENCE        zip_pass_by_reference
+// #define     TARGET_PASS_BY_REFERENCE        zip_pass_by_reference
+
+
+/* CUMULATIVE ARGS ...  A C type for declaring a variable that is used as the
+/* CUMULATIVE ARGS ...  A C type for declaring a variable that is used as the
+ * first argument of 'FUNCTION_ARG' and other related values.
+ * first argument of 'FUNCTION_ARG' and other related values.
+ *
+ *
+ * ZipCPU---We're in trouble if an 'int' won't work, so let's just use that.
+ * ZipCPU---We're in trouble if an 'int' won't work, so let's just use that.
+ */
+ */
+#define        CUMULATIVE_ARGS int
+#define        CUMULATIVE_ARGS int
+
+
+/*
+/*
+ * OVERRIDE_ABI_FORMAT
+ * OVERRIDE_ABI_FORMAT
+ */
+ */
+
+
+/* INIT_CUMULATIVE_ARGS ... A C statement (sans semicolon) for initializing the
+/* INIT_CUMULATIVE_ARGS ... A C statement (sans semicolon) for initializing the
+ * variable CUM for the state at the beginning of the argument list.
+ * variable CUM for the state at the beginning of the argument list.
+ *
+ *
+ *
+ *
+ * ZipCPU---The first argument is passed in register ZIP_FIRST_ARG_REGNO, or
+ * ZipCPU---The first argument is passed in register ZIP_FIRST_ARG_REGNO, or
+ * R1 (unless it has been redefined above ...)
+ * R1 (unless it has been redefined above ...)
+ */
+ */
+#define        INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,FNDECL,N_NAMED_ARGS) (CUM = 0)
+#define        INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,FNDECL,N_NAMED_ARGS) (CUM = 0)
+
+
+/* INIT_CUMULATIVE_LIBCALL_ARGS
+/* INIT_CUMULATIVE_LIBCALL_ARGS
+ * INIT_CUMULATIVE_INCOMING_ARGS
+ * INIT_CUMULATIVE_INCOMING_ARGS
+ *
+ *
+ * These default to the last INIT_CUM_ARGS value above.
+ * These default to the last INIT_CUM_ARGS value above.
+ */
+ */
+
+
+/* TARGET_FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) .. This hook updates
+/* TARGET_FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) .. This hook updates
+ * the summarizer variable pointed to by CUM to advance past an argument in
+ * the summarizer variable pointed to by CUM to advance past an argument in
+ * the argument list.  The values MODE, TYPE, and NAMED describe that
+ * the argument list.  The values MODE, TYPE, and NAMED describe that
+ * argument.  Once this is done, the variable CUM is suitable for analyzing the
+ * argument.  Once this is done, the variable CUM is suitable for analyzing the
+ * following argument with TARGET_FUNCTION_ARG, etc.  This hook need not do
+ * following argument with TARGET_FUNCTION_ARG, etc.  This hook need not do
+ * anything if the argument in question was passed on the stack.  The compiler
+ * anything if the argument in question was passed on the stack.  The compiler
+ * knows how to track the amount of stack space used for arguments without
+ * knows how to track the amount of stack space used for arguments without
+ * any special help.
+ * any special help.
+ *
+ *
+ * ZipCPU---Here we simply copy from ECO32.
+ * ZipCPU---Here we simply copy from ECO32.
+ */
+ */
+#define        TARGET_FUNCTION_ARG_ADVANCE     zip_function_arg_advance
+#define        TARGET_FUNCTION_ARG_ADVANCE     zip_function_arg_advance
+
+
+/*
+/*
+ * TARGET_ARG_OFFSET(MODE, TYPE) ... If defined, a C expression that is the
+ * TARGET_ARG_OFFSET(MODE, TYPE) ... If defined, a C expression that is the
+ * number of bytes to add to the offset of the argument passed in memory.
+ * number of bytes to add to the offset of the argument passed in memory.
+ * This is needed for the SPU, which passes char and short arguments in the
+ * This is needed for the SPU, which passes char and short arguments in the
+ * preferred slot that is in the middle of the quad word instead of starting
+ * preferred slot that is in the middle of the quad word instead of starting
+ * at the top.
+ * at the top.
+ *
+ *
+ * ZipCPU -- sounds like the default would be (more) appropriate.
+ * ZipCPU -- sounds like the default would be (more) appropriate.
+ */
+ */
+/*
+/*
+ * FUNCTION_ARG_PADDING        --- not necessary, since we shouldn't be padding
+ * FUNCTION_ARG_PADDING        --- not necessary, since we shouldn't be padding
+ * PAD_VARARGS_DOWN    --- not necessary, since we shouldn't be padding
+ * PAD_VARARGS_DOWN    --- not necessary, since we shouldn't be padding
+ * BLOCK_REG_PADDING
+ * BLOCK_REG_PADDING
+ * TARGET_FUNCTION_ARG_BOUNDARY
+ * TARGET_FUNCTION_ARG_BOUNDARY
+ * TARGET_FUNCTION_ARG_ROUND_BOUNDARY
+ * TARGET_FUNCTION_ARG_ROUND_BOUNDARY
+ */
+ */
+
+
+/* FUNCTION_ARG_REGNO_P(REGNO) ... A C expression that is nonzero if REGNO is
+/* FUNCTION_ARG_REGNO_P(REGNO) ... A C expression that is nonzero if REGNO is
+ * the number of a hard register in which function arguments are sometimes
+ * the number of a hard register in which function arguments are sometimes
+ * passed.  This does not include implicit arguments such as the static chain
+ * passed.  This does not include implicit arguments such as the static chain
+ * and the structure-value address.  On many machines, no registers can be used
+ * and the structure-value address.  On many machines, no registers can be used
+ * for this purpose since all function arguments are pushed on the stack.
+ * for this purpose since all function arguments are pushed on the stack.
+ */
+ */
+#define        FUNCTION_ARG_REGNO_P(r) ((r >= ZIP_FIRST_ARG_REGNO)&&(r<=ZIP_LAST_ARG_REGNO))
+#define        FUNCTION_ARG_REGNO_P(r) ((r >= ZIP_FIRST_ARG_REGNO)&&(r<=ZIP_LAST_ARG_REGNO))
+
+
+/* TARGET_SPLIT_COMPLEX_ARG(TYPE) ... This hook should return true if parameter
+/* TARGET_SPLIT_COMPLEX_ARG(TYPE) ... This hook should return true if parameter
+ * of type TYPE are passed as two scalar parameters.  By default, GCC will
+ * of type TYPE are passed as two scalar parameters.  By default, GCC will
+ * attempt to pack complex arguments into the target's word size.  Some ABI's
+ * attempt to pack complex arguments into the target's word size.  Some ABI's
+ * require complex arguments to be split and treated as their individual
+ * require complex arguments to be split and treated as their individual
+ * components.
+ * components.
+ *
+ *
+ * The default value of this hook is NULL, which is treated as always false,
+ * The default value of this hook is NULL, which is treated as always false,
+ * and which should be good enough for ZipCPU--which can go either way.
+ * and which should be good enough for ZipCPU--which can go either way.
+ */
+ */
+
+
+/* TARGET_BUILD_BUILTIN_VA_LIST ... This hook returns a type node for va_list
+/* TARGET_BUILD_BUILTIN_VA_LIST ... This hook returns a type node for va_list
+ * for the target.  The default version of the hook returns void*.
+ * for the target.  The default version of the hook returns void*.
+ *
+ *
+ */
+ */
+
+
+/* TARGET_ENUM_VA_LIST_P
+/* TARGET_ENUM_VA_LIST_P
+ */
+ */
+
+
+/* TARGET_FN_ABI_VA_LIST ... This hook returns the va_list type of the calling
+/* TARGET_FN_ABI_VA_LIST ... This hook returns the va_list type of the calling
+ * convention specified by FN.  The default version of this returns va_list_type_node.
+ * convention specified by FN.  The default version of this returns va_list_type_node.
+ */
+ */
+
+
+/* TARGET_FN_ABI_VA_LIST
+/* TARGET_FN_ABI_VA_LIST
+ */
+ */
+
+
+/* TARGET_CANONICAL_VA_LIST_TYPE
+/* TARGET_CANONICAL_VA_LIST_TYPE
+ */
+ */
+
+
+/* TARGET_GIMPLIFY_VA_ARG_EXPR
+/* TARGET_GIMPLIFY_VA_ARG_EXPR
+ */
+ */
+
+
+/* TARGET_VALID_POINTER_MODE(MODE) ... Define this to return nonzero if the
+/* TARGET_VALID_POINTER_MODE(MODE) ... Define this to return nonzero if the
+ * port can handle pointers with machine mode MODE.  The default version of this
+ * port can handle pointers with machine mode MODE.  The default version of this
+ * hook returns true for both ptr_mode and Pmode.
+ * hook returns true for both ptr_mode and Pmode.
+ *
+ *
+ * ZipCPU---if Pmode is properly defined (above, and I think it is), then the
+ * ZipCPU---if Pmode is properly defined (above, and I think it is), then the
+ * default behavior is quite appropriate.
+ * default behavior is quite appropriate.
+ */
+ */
+
+
+/* TARGET_REF_MAY_ALIAS_ERRNO(REFP) ... Define this to return nonzero if the
+/* TARGET_REF_MAY_ALIAS_ERRNO(REFP) ... Define this to return nonzero if the
+ * memory reference REF may alias with the system C library errno location.
+ * memory reference REF may alias with the system C library errno location.
+ * The default version of this hook assumes the system C library errno location
+ * The default version of this hook assumes the system C library errno location
+ * is either a declaration of type int or accessed by dereferencing a pointer
+ * is either a declaration of type int or accessed by dereferencing a pointer
+ * to int.
+ * to int.
+ *
+ *
+ * ZipCPU --- Default sounds good to me.
+ * ZipCPU --- Default sounds good to me.
+ */
+ */
+
+
+
+
+/* TARGET_SCALAR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if
+/* TARGET_SCALAR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if
+ * the port is prepared to handle instructions involving scalar mode MODE.  For
+ * the port is prepared to handle instructions involving scalar mode MODE.  For
+ * a scalar mode to be considered supported, all the basic arithmetic and
+ * a scalar mode to be considered supported, all the basic arithmetic and
+ * comparisons must work.
+ * comparisons must work.
+ *
+ *
+ * The default version of this hook returns true for any mode required to
+ * The default version of this hook returns true for any mode required to
+ * handle the basic C types (as defined by the port).  Included here are the
+ * handle the basic C types (as defined by the port).  Included here are the
+ * double-word arithmetic supported by the code in optabs.c.
+ * double-word arithmetic supported by the code in optabs.c.
+ *
+ *
+ * ZipCPU --- This controls whether a data type of the given mode can even be
+ * ZipCPU --- This controls whether a data type of the given mode can even be
+ * declared in C/C++.  Without support for such a mode, you can't even declare
+ * declared in C/C++.  Without support for such a mode, you can't even declare
+ * a data type of this type.  Hence, we should support SFmode and DFmode, even
+ * a data type of this type.  Hence, we should support SFmode and DFmode, even
+ * though the hardware *may* support SFmode, and it will *never* support DFmode.
+ * though the hardware *may* support SFmode, and it will *never* support DFmode.
+ */
+ */
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define        TARGET_SCALAR_MODE_SUPPORTED_P  zip_scalar_mode_supported_p
+#define        TARGET_SCALAR_MODE_SUPPORTED_P  zip_scalar_mode_supported_p
+
+
+/* TARGET_VECTOR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if the
+/* TARGET_VECTOR_MODE_SUPPORTED_P(MODE) ... Define this to return nonzero if the
+ * port is prepared to handle instructions involving vector mode MODE.  At the
+ * port is prepared to handle instructions involving vector mode MODE.  At the
+ * very least, it must have move patterns for this mode.
+ * very least, it must have move patterns for this mode.
+ *
+ *
+ * ZipCPU---does not support any vector modes.
+ * ZipCPU---does not support any vector modes.
+ */
+ */
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define        TARGET_VECTOR_MODE_SUPPORTED_P  hook_bool_mode_false
+#define        TARGET_VECTOR_MODE_SUPPORTED_P  hook_bool_mode_false
+
+
+/* TARGET_ARRAY_MODE_SUPPORTED_P(MODE, NELEMS) ... Return true if GCC should
+/* TARGET_ARRAY_MODE_SUPPORTED_P(MODE, NELEMS) ... Return true if GCC should
+ * try to use a scalar mode to store an array of NELEMS elements, given that
+ * try to use a scalar mode to store an array of NELEMS elements, given that
+ * each element has mode MODE.  Returning true here overrides the usual
+ * each element has mode MODE.  Returning true here overrides the usual
+ * MAX_FIXED_MODE limit and allows GCC to use any defined integer mode.
+ * MAX_FIXED_MODE limit and allows GCC to use any defined integer mode.
+ *
+ *
+ * ZipCPU---Sounds good.
+ * ZipCPU---Sounds good.
+ */
+ */
+// #undef      TARGET_ARRAY_MODE_SUPPORTED_P
+// #undef      TARGET_ARRAY_MODE_SUPPORTED_P
+// #define     TARGET_ARRAY_MODE_SUPPORTED_P   zip_array_mode_supported_p
+// #define     TARGET_ARRAY_MODE_SUPPORTED_P   zip_array_mode_supported_p
+
+
+/* TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P(MODE) ... Define this to return
+/* TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P(MODE) ... Define this to return
+ * nonzero if libgcc provides support for the floating-point mode MODE, which is
+ * nonzero if libgcc provides support for the floating-point mode MODE, which is
+ * known to pass TARGET_SCALAR_MODE_SUPPORTED_P.  The default version of this
+ * known to pass TARGET_SCALAR_MODE_SUPPORTED_P.  The default version of this
+ * hook returns true for all of SFmode, DFmode, XFmode, and TFmode, if such
+ * hook returns true for all of SFmode, DFmode, XFmode, and TFmode, if such
+ * modes exist.
+ * modes exist.
+ *
+ *
+ * ZipCPU---We only support SFmode and DFmode, but for now only in emulation
+ * ZipCPU---We only support SFmode and DFmode, but for now only in emulation
+ * (if we can).  Let's allow both of those and see how far we get.
+ * (if we can).  Let's allow both of those and see how far we get.
+ */
+ */
+#undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P
+#undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P
+#define        TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P zip_libgcc_floating_mode_supported_p
+#define        TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P zip_libgcc_floating_mode_supported_p
+
+
+/* TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P(MODE) ... Define this to return
+/* TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P(MODE) ... Define this to return
+ * nonzero for machine modes for which the port has small register classes.  If
+ * nonzero for machine modes for which the port has small register classes.  If
+ * target hook returns nonzero for a given MODE, the compiler will try to
+ * target hook returns nonzero for a given MODE, the compiler will try to
+ * minimize the lifetime of registers in MODE.  The hook may be called with
+ * minimize the lifetime of registers in MODE.  The hook may be called with
+ * VOIDmode as an argument.  In this case, the hook is expected to return
+ * VOIDmode as an argument.  In this case, the hook is expected to return
+ * nonzero if it returns nonzero for any mode.
+ * nonzero if it returns nonzero for any mode.
+ *
+ *
+ * The default version of this hook returns false for any mode.
+ * The default version of this hook returns false for any mode.
+ *
+ *
+ * ZipCPU---Default sounds good.
+ * ZipCPU---Default sounds good.
+ */
+ */
+
+
+/* 17.09.08 How scalar function values are returned */
+/* 17.09.08 How scalar function values are returned */
+
+
+/* TARGET_FUNCTION_VALUE
+/* TARGET_FUNCTION_VALUE
+ */
+ */
+
+
+/* LIBCALL_VALUE
+/* LIBCALL_VALUE
+ */
+ */
+
+
+
+
+/* 17.09.09 How large values are returned */
+/* 17.09.09 How large values are returned */
+
+
+/* TARGET_RETURN_IN_MEMORY(TYP,FNTYP) ... This target hook should return a
+/* TARGET_RETURN_IN_MEMORY(TYP,FNTYP) ... This target hook should return a
+ * nonzero value to say to return the function value in memory, just as large
+ * nonzero value to say to return the function value in memory, just as large
+ * structures are always returned.  Here type will be the data type of the value
+ * structures are always returned.  Here type will be the data type of the value
+ * and FNTYP will be the type of the function doing the returning, or NULL
+ * and FNTYP will be the type of the function doing the returning, or NULL
+ * for libcalls.
+ * for libcalls.
+ *
+ *
+ */
+ */
+#undef TARGET_RETURN_IN_MEMORY
+#undef TARGET_RETURN_IN_MEMORY
+#define        TARGET_RETURN_IN_MEMORY zip_return_in_memory
+#define        TARGET_RETURN_IN_MEMORY zip_return_in_memory
+
+
+/* DEFAULT_PCC_STRUCT_RETURN
+/* DEFAULT_PCC_STRUCT_RETURN
+ * TARGET_STRUCT_VALUE_RTX
+ * TARGET_STRUCT_VALUE_RTX
+ * PCC_STATIC_STRUCT_RETURN
+ * PCC_STATIC_STRUCT_RETURN
+ * TARGET_GET_RAW_RESULT_MODE
+ * TARGET_GET_RAW_RESULT_MODE
+ * TARGET_GET_RAW_ARG_MODE
+ * TARGET_GET_RAW_ARG_MODE
+ */
+ */
+
+
+
+
+/* 17.09.10 Caller-Saves Register Allocation */
+/* 17.09.10 Caller-Saves Register Allocation */
+/* 17.09.11 Function Entry and Exit */
+/* 17.09.11 Function Entry and Exit */
+// TARGET_ASM_FUNCTION_PROLOGUE
+// TARGET_ASM_FUNCTION_PROLOGUE
+// TARGET_ASM_FUNCTION_END_PROLOGUE
+// TARGET_ASM_FUNCTION_END_PROLOGUE
+// TARGET_ASM_FUNCCTION_BEGIN_EPILOGUE
+// TARGET_ASM_FUNCCTION_BEGIN_EPILOGUE
+// TARGET_ASM_FUNCTION_EPILOGUE
+// TARGET_ASM_FUNCTION_EPILOGUE
+/* EXIT_IGNORE_STACK ... Define this macro as a C expression that is nonzero
+/* EXIT_IGNORE_STACK ... Define this macro as a C expression that is nonzero
+ * if the return instruction or the function epilogue ignores the value of the
+ * if the return instruction or the function epilogue ignores the value of the
+ * stack pointer; in other words, if it is safe to delete an instruction to
+ * stack pointer; in other words, if it is safe to delete an instruction to
+ * adjust the stack pointer before a return from the function.
+ * adjust the stack pointer before a return from the function.
+ *
+ *
+ * The default is 0.
+ * The default is 0.
+ *
+ *
+ * Note that this macro's value is relevant only for functions for which frame
+ * Note that this macro's value is relevant only for functions for which frame
+ * pointers are maintained.  It is never safe to delete a final stack adjustment
+ * pointers are maintained.  It is never safe to delete a final stack adjustment
+ * in a function that has no frame pointer, and the compiler knows this
+ * in a function that has no frame pointer, and the compiler knows this
+ * regardless of EXIT_IGNORE_STACK.
+ * regardless of EXIT_IGNORE_STACK.
+ *
+ *
+ * ZipCPU -- Thanks to the example of the m68k, and a careful selection of what
+ * ZipCPU -- Thanks to the example of the m68k, and a careful selection of what
+ * our options otherwise could have been, our epilogue code does not use the
+ * our options otherwise could have been, our epilogue code does not use the
+ * stack register at all, but rather starts by moving the frame register into
+ * stack register at all, but rather starts by moving the frame register into
+ * the stack register.
+ * the stack register.
+ */
+ */
+#define EXIT_IGNORE_STACK      1
+#define EXIT_IGNORE_STACK      1
+// EPILOGUE_USES(regno)
+// EPILOGUE_USES(regno)
+// EH_USES(regno)
+// EH_USES(regno)
+// TARGET_ASM_OUTPUT_MI_THUNK
+// TARGET_ASM_OUTPUT_MI_THUNK
+// TARGET_ASM_CAN_OUTPUT_MI_THUNK
+// TARGET_ASM_CAN_OUTPUT_MI_THUNK
+
+
+/* 17.09.12 Generating code for profiling */
+/* 17.09.12 Generating code for profiling */
+// FUNCTION_PROFILER
+// FUNCTION_PROFILER
+// PROFILE_HOOK
+// PROFILE_HOOK
+// NO_PROFILE_COUNTERS
+// NO_PROFILE_COUNTERS
+// PROFILE_BEFORE_PROLOGUE
+// PROFILE_BEFORE_PROLOGUE
+// TARGET_KEEP_LEAF_WHEN_PROFILED
+// TARGET_KEEP_LEAF_WHEN_PROFILED
+
+
+/* 17.09.13 Permitting tail calls*/
+/* 17.09.13 Permitting tail calls*/
+
+
+/* TARGET_FUNCTION_OK_FOR_SIBCALL(DECL,EXP) ... True if it is OK to do sibling
+/* TARGET_FUNCTION_OK_FOR_SIBCALL(DECL,EXP) ... True if it is OK to do sibling
+ * call optimizations for the specified call expression EXP.  DECL will be the
+ * call optimizations for the specified call expression EXP.  DECL will be the
+ * called function, or NULL if this is an indirect call.
+ * called function, or NULL if this is an indirect call.
+ *
+ *
+ * It is not uncommon for limitations of calling conventions to prevent tail
+ * It is not uncommon for limitations of calling conventions to prevent tail
+ * calls to functions outside the current unit of translation, or during PIC
+ * calls to functions outside the current unit of translation, or during PIC
+ * compilation.  The hook is used to enforce these restrictions, as the sibcall
+ * compilation.  The hook is used to enforce these restrictions, as the sibcall
+ * md pattern can not fail, or fall over to a 'normal' call.  The criteria for
+ * md pattern can not fail, or fall over to a 'normal' call.  The criteria for
+ * successful sibling call optimization may vary greatly between different
+ * successful sibling call optimization may vary greatly between different
+ * architectures.
+ * architectures.
+ *
+ *
+ * What's a sibling call?  "Sibling calls or tail calls terminate the function
+ * What's a sibling call?  "Sibling calls or tail calls terminate the function
+ * in a nonn-standard way and thus an edge to the exit must be present.
+ * in a nonn-standard way and thus an edge to the exit must be present.
+ * EDGE_SIBCALL and EDGE_ABNORMAL are set in such case(s).  These edges only
+ * EDGE_SIBCALL and EDGE_ABNORMAL are set in such case(s).  These edges only
+ * exist in the RTL representation.
+ * exist in the RTL representation.
+ *
+ *
+ * So, basically, a sibling call is a function call at the end of one function.
+ * So, basically, a sibling call is a function call at the end of one function.
+ * Rather than setting up a new stack frame, return address, etc, it is
+ * Rather than setting up a new stack frame, return address, etc, it is
+ * possible to just jump to this new function, leaving the return address for
+ * possible to just jump to this new function, leaving the return address for
+ * the prior function as the (now) return address for this one.
+ * the prior function as the (now) return address for this one.
+ *
+ *
+ * ZipCPU --- These are good things.  We wish to support them.  We will require,
+ * ZipCPU --- These are good things.  We wish to support them.  We will require,
+ * though, that the sibling require no more stack space than the original.
+ * though, that the sibling require no more stack space than the original.
+ * We might go even stricter, requiring that the sibling require no stack space
+ * We might go even stricter, requiring that the sibling require no stack space
+ * at all--and only register variables.
+ * at all--and only register variables.
+ */
+ */
+#define        TARGET_FUNCTION_OK_FOR_SIBCALL  zip_function_ok_for_sibcall
+#define        TARGET_FUNCTION_OK_FOR_SIBCALL  zip_function_ok_for_sibcall
+
+
+/* TARGET_EXTRA_LIVE_ON_ENTRY(REGS) ... Add any hard registers to regs that are
+/* TARGET_EXTRA_LIVE_ON_ENTRY(REGS) ... Add any hard registers to regs that are
+ * live on entry to the function.  This hook only needs to be defined to provide
+ * live on entry to the function.  This hook only needs to be defined to provide
+ * registers that cannot be found by examination of FUNTION_ARG_REGNO_P, the
+ * registers that cannot be found by examination of FUNTION_ARG_REGNO_P, the
+ * ... and the ...
+ * ... and the ...
+ *
+ *
+ * ZipCPU -- the default should be good enough for us.
+ * ZipCPU -- the default should be good enough for us.
+ */
+ */
+/* TARGET_SET_UP_BY_PROLOGUE(CONTAINER) ... This hook should add additional
+/* TARGET_SET_UP_BY_PROLOGUE(CONTAINER) ... This hook should add additional
+ * registers that are computed by the prologue to the hard register set for
+ * registers that are computed by the prologue to the hard register set for
+ * shrink-wrapping optimization purposes.
+ * shrink-wrapping optimization purposes.
+ *
+ *
+ * ??
+ * ??
+ */
+ */
+
+
+/* TARGET_WARN_FUNC_RETURN(TREE) ... True if a function's return statements
+/* TARGET_WARN_FUNC_RETURN(TREE) ... True if a function's return statements
+ * should be checked for matching the function's return type.  This includes
+ * should be checked for matching the function's return type.  This includes
+ * checking for falling off the end of a non-void function.  Return false if
+ * checking for falling off the end of a non-void function.  Return false if
+ * no such check should be made.
+ * no such check should be made.
+ *
+ *
+ * ZipCPU--the default should be good enough for us.
+ * ZipCPU--the default should be good enough for us.
+ */
+ */
+
+
+/* 17.09.14 Stack smashing protection */
+/* 17.09.14 Stack smashing protection */
+// TARGET_STACK_PROTECT_GUARD
+// TARGET_STACK_PROTECT_GUARD
+// TARGET_STACK_PROTECT_FAIL
+// TARGET_STACK_PROTECT_FAIL
+// TARGET_SUPPORTS_SPLIT_STACK
+// TARGET_SUPPORTS_SPLIT_STACK
+
+
+/* 17.09.15 Miscellaneous register hooks */
+/* 17.09.15 Miscellaneous register hooks */
+
+
+// TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+// TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+
+
+/* TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+/* TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+ * ZipCPU --- default is good enough for us.
+ * ZipCPU --- default is good enough for us.
+ */
+ */
+
+
+/* 17.10 Implementing VARARGS MACROS */
+/* 17.10 Implementing VARARGS MACROS */
+
+
+/* ...
+/* ...
+ */
+ */
+
+
+/* void TARGET_SETUP_INCOMING_VARARGS(A,M,T,I,S) ... This target hook offers an
+/* void TARGET_SETUP_INCOMING_VARARGS(A,M,T,I,S) ... This target hook offers an
+ * alternative to using __builtin_saveregs and defining the hook TARGET_EXPAND..
+ * alternative to using __builtin_saveregs and defining the hook TARGET_EXPAND..
+ * _BUILTIN_SAVEREGS.  Use it to store the anonymous register arguments into the
+ * _BUILTIN_SAVEREGS.  Use it to store the anonymous register arguments into the
+ * stack so that all the arguments appear to have been passed consecutively
+ * stack so that all the arguments appear to have been passed consecutively
+ * on the stack.  Once this is done, you can use the standard implementation
+ * on the stack.  Once this is done, you can use the standard implementation
+ * of varargs that works for machines that pass all their arguments on the
+ * of varargs that works for machines that pass all their arguments on the
+ * stack.
+ * stack.
+ */
+ */
+// #undef      TARGET_SETUP_INCOMING_VARARGS
+// #undef      TARGET_SETUP_INCOMING_VARARGS
+// #define     TARGET_SETUP_INCOMING_VARARGS   zip_setup_incoming_varargs
+// #define     TARGET_SETUP_INCOMING_VARARGS   zip_setup_incoming_varargs
+
+
+/* ...
+/* ...
+ */
+ */
+
+
+/* 17.11 Trampolines for Nested Functions */
+/* 17.11 Trampolines for Nested Functions */
+
+
+/* TARGET_ASM_TRAMPOLINE_TEMPLATE ... This hook is called by
+/* TARGET_ASM_TRAMPOLINE_TEMPLATE ... This hook is called by
+ * assemble_trampoline_template to output, on the stream f, assembler code for
+ * assemble_trampoline_template to output, on the stream f, assembler code for
+ * a block of data that contains the constant parts of a trampoline.  This code
+ * a block of data that contains the constant parts of a trampoline.  This code
+ * should not include a label--the label is taken care of automatically.
+ * should not include a label--the label is taken care of automatically.
+ *
+ *
+ * ZipCPU -- looks like we need to do this.
+ * ZipCPU -- looks like we need to do this.
+ */
+ */
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define        TARGET_ASM_TRAMPOLINE_TEMPLATE  zip_asm_trampoline_template
+#define        TARGET_ASM_TRAMPOLINE_TEMPLATE  zip_asm_trampoline_template
+
+
+/* TRAMPOLINE_SECTION ... Return the section into which the trampoline template
+/* TRAMPOLINE_SECTION ... Return the section into which the trampoline template
+ * is to be placed.  The default value is readonly_data_section.
+ * is to be placed.  The default value is readonly_data_section.
+ *
+ *
+ * ZipCPU--default should be good enough.
+ * ZipCPU--default should be good enough.
+ */
+ */
+
+
+/* TRAMPOLINE_SIZE ... A C expression for the size (in bytes) of the trampoline
+/* TRAMPOLINE_SIZE ... A C expression for the size (in bytes) of the trampoline
+ * as an integer.
+ * as an integer.
+ *
+ *
+ * ZipCPU--it's three instructions, or 96 bits: BREV, LDILO, and JMP
+ * ZipCPU--it's three instructions, or 96 bits: BREV, LDILO, and JMP
+ *
+ *
+ */
+ */
+// #warning "May need to redefine trampoline_size in words, not bytes"
+// #warning "May need to redefine trampoline_size in words, not bytes"
+#undef TRAMPOLINE_SIZE
+#undef TRAMPOLINE_SIZE
+#define        TRAMPOLINE_SIZE 3*UNITS_PER_WORD
+#define        TRAMPOLINE_SIZE 3*UNITS_PER_WORD
+
+
+/* TRAMPOLINE_ALIGNMENT ... alignment required for trampolines, in bits.
+/* TRAMPOLINE_ALIGNMENT ... alignment required for trampolines, in bits.
+ *
+ *
+ * Well that's well known in ZipCPU --- 32-bits.
+ * Well that's well known in ZipCPU --- 32-bits.
+ */
+ */
+#undef TRAMPOLINE_ALIGNMENT
+#undef TRAMPOLINE_ALIGNMENT
+#define        TRAMPOLINE_ALIGNMENT    UNITS_PER_WORD
+#define        TRAMPOLINE_ALIGNMENT    UNITS_PER_WORD
+
+
+/* void TARGET_TRAMPOLINE_INIT(RTX,TREE,RTX CH) ... This hook is called to
+/* void TARGET_TRAMPOLINE_INIT(RTX,TREE,RTX CH) ... This hook is called to
+ * initialize a trampoline.  m_tramp is an RTX for the memory block for the
+ * initialize a trampoline.  m_tramp is an RTX for the memory block for the
+ * trampoline; TREE is the FUNCTION_DECL for the nested fucntion;  CH is an
+ * trampoline; TREE is the FUNCTION_DECL for the nested fucntion;  CH is an
+ * rtx for the static chain value that should be passed to the function when
+ * rtx for the static chain value that should be passed to the function when
+ * it is called.
+ * it is called.
+ *
+ *
+ * ZipCPU ... Can we get by without this?
+ * ZipCPU ... Can we get by without this?
+ */
+ */
+#undef TARGET_TRAMPOLINE_INIT
+#undef TARGET_TRAMPOLINE_INIT
+#define        TARGET_TRAMPOLINE_INIT  zip_trampoline_init
+#define        TARGET_TRAMPOLINE_INIT  zip_trampoline_init
+
+
+/* TARGET_TRAMPOLINE_ADJUST_ADDRESS(RTX) ... This hook should perform any
+/* TARGET_TRAMPOLINE_ADJUST_ADDRESS(RTX) ... This hook should perform any
+ * machine-specific adjustment in the address of the trampoline.  Its argument
+ * machine-specific adjustment in the address of the trampoline.  Its argument
+ * contains the address of the memory block that was passed to
+ * contains the address of the memory block that was passed to
+ * TARGET_TRAMPOLINE_INIT.  In case the address to be used for a function call
+ * TARGET_TRAMPOLINE_INIT.  In case the address to be used for a function call
+ * should be different from the address at which the template was stored, the
+ * should be different from the address at which the template was stored, the
+ * different address should be returned; otherwise addr should be returned
+ * different address should be returned; otherwise addr should be returned
+ * unchanged.  If the hook is not defined, RTX (addr) will be used for function
+ * unchanged.  If the hook is not defined, RTX (addr) will be used for function
+ * calls.
+ * calls.
+ *
+ *
+ * ZipCPU--works for us!
+ * ZipCPU--works for us!
+ */
+ */
+
+
+/* CLEAR_INSN_CACHE(BEG,END) ... If defined, expands to a C expression clearing
+/* CLEAR_INSN_CACHE(BEG,END) ... If defined, expands to a C expression clearing
+ * the instruction cache in the specified interval.  The definition of this
+ * the instruction cache in the specified interval.  The definition of this
+ * macro would typically be a series of asm statements.   Both BEG and END are
+ * macro would typically be a series of asm statements.   Both BEG and END are
+ * pointer expressions.
+ * pointer expressions.
+ *
+ *
+ * ZipCPU --- Ouch!  We have no way to do this (yet)!
+ * ZipCPU --- Ouch!  We have no way to do this (yet)!
+ */
+ */
+#define        CLEAR_INSN_CACHE(BEG,END)       gcc_assert(0);
+#define        CLEAR_INSN_CACHE(BEG,END)       gcc_assert(0);
+
+
+/* TRANSFER_FROM_TRAMPOLINE ... Define this macro is trampolines need a special
+/* TRANSFER_FROM_TRAMPOLINE ... Define this macro is trampolines need a special
+ * subroutine to do their work.  The macro should expand to a series of asm
+ * subroutine to do their work.  The macro should expand to a series of asm
+ * statements which will be compiled with GCC.  They go in a library function
+ * statements which will be compiled with GCC.  They go in a library function
+ * named __transfer_from_trampoline.
+ * named __transfer_from_trampoline.
+ *
+ *
+ * We may need to rethink trampolines on ZipCPU.
+ * We may need to rethink trampolines on ZipCPU.
+ */
+ */
+
+
+
+
+/* 17.12 Implicit Calls to Library Routines */
+/* 17.12 Implicit Calls to Library Routines */
+
+
+/* DECLARE_LIBRARY_RENAMES
+/* DECLARE_LIBRARY_RENAMES
+ *
+ *
+ * ZipCPU: Don't need it.
+ * ZipCPU: Don't need it.
+ */
+ */
+
+
+/* TARGET_INIT_LIBFUNCS(VOID) ... This hook should declare additional library
+/* TARGET_INIT_LIBFUNCS(VOID) ... This hook should declare additional library
+ * routines or rename existing ones, using the functions set_optab_libfunc and
+ * routines or rename existing ones, using the functions set_optab_libfunc and
+ * init_one_libfunc defined in optabs.c.  init_optabs calls this macro after
+ * init_one_libfunc defined in optabs.c.  init_optabs calls this macro after
+ * initializing all the normal library routines.
+ * initializing all the normal library routines.
+ *
+ *
+ * Most ports don't need to define this hook, so we won't either.
+ * Most ports don't need to define this hook, so we won't either.
+ */
+ */
+
+
+/* TARGET_LIBFUNC_GNU_PREFIX ... If false (the default), internal library
+/* TARGET_LIBFUNC_GNU_PREFIX ... If false (the default), internal library
+ * routines start with two underscores.  If set to true, these routines start
+ * routines start with two underscores.  If set to true, these routines start
+ * with __gnu_ instead.
+ * with __gnu_ instead.
+ *
+ *
+ * ZipCPU: No change necessary.
+ * ZipCPU: No change necessary.
+ */
+ */
+
+
+/* FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE,COMPARISON) ... This macro should return
+/* FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE,COMPARISON) ... This macro should return
+ * true if the library routine that implements the floating point comparison
+ * true if the library routine that implements the floating point comparison
+ * operator comparison in mode mode will return a boolean and false if it will
+ * operator comparison in mode mode will return a boolean and false if it will
+ * return a tristate.
+ * return a tristate.
+ *
+ *
+ * Most ports don't need to define this macro, so Zip CPU won't either.
+ * Most ports don't need to define this macro, so Zip CPU won't either.
+ */
+ */
+
+
+/* TARGET_HAS_NO_HW_DIVIDE ... This macro should be defined if the target has no
+/* TARGET_HAS_NO_HW_DIVIDE ... This macro should be defined if the target has no
+ * hardware divide instructions.  If this macro is defined, GCC will use an
+ * hardware divide instructions.  If this macro is defined, GCC will use an
+ * algorithm which makes use of simple logical and arithmetic operations for
+ * algorithm which makes use of simple logical and arithmetic operations for
+ * 64-bit division.  If the macro is not defined, GCC will use an algorithm
+ * 64-bit division.  If the macro is not defined, GCC will use an algorithm
+ * which makes use of a 64-bit by 32-bit divide primitive.
+ * which makes use of a 64-bit by 32-bit divide primitive.
+ *
+ *
+ * Zip CPU, though, doesn't have the 64-bit by 32-bit divide primitive, thus
+ * Zip CPU, though, doesn't have the 64-bit by 32-bit divide primitive, thus
+ * we have no HW DIVIDE (for now).
+ * we have no HW DIVIDE (for now).
+ */
+ */
+#define        TARGET_HAS_NO_HW_DIVIDE
+#define        TARGET_HAS_NO_HW_DIVIDE
+
+
+/* TARGET_EDOM ... The value of EDOM on the target machine, as a C integer
+/* TARGET_EDOM ... The value of EDOM on the target machine, as a C integer
+ * expression.  If you don't define this macro, GCC does not attempt to deposit
+ * expression.  If you don't define this macro, GCC does not attempt to deposit
+ * the value of EDOM into errno directly.  Look in /usr/include/errno.h to find
+ * the value of EDOM into errno directly.  Look in /usr/include/errno.h to find
+ * the value of EDOM on your system.
+ * the value of EDOM on your system.
+ *
+ *
+ * EDOM is the error created when a math argument is out of the domain of the
+ * EDOM is the error created when a math argument is out of the domain of the
+ * function.
+ * function.
+ *
+ *
+ * ZipCPU: Don't need it---I don't think.
+ * ZipCPU: Don't need it---I don't think.
+ */
+ */
+
+
+/* GEN_ERRNO_RTX ... Define this macro as a C exrpession to create an rtl
+/* GEN_ERRNO_RTX ... Define this macro as a C exrpession to create an rtl
+ * expression that refers to the global "variable" errno.  (On certain
+ * expression that refers to the global "variable" errno.  (On certain
+ * systems, errno may not actually be a variable.)  If you don't define this
+ * systems, errno may not actually be a variable.)  If you don't define this
+ * macro, a reasonable default is used.
+ * macro, a reasonable default is used.
+ *
+ *
+ * ZipCPU --- if a reasonable default is used, we'll use that--especially since
+ * ZipCPU --- if a reasonable default is used, we'll use that--especially since
+ * I doubt we'll be using errno for a while.
+ * I doubt we'll be using errno for a while.
+ */
+ */
+
+
+/* NEXT_OBJC_RUNTIME ... Set this macro to 1 to use the "NeXT" Objective-C
+/* NEXT_OBJC_RUNTIME ... Set this macro to 1 to use the "NeXT" Objective-C
+ * message sending conventions by default.  This calling convention involves
+ * message sending conventions by default.  This calling convention involves
+ * passing the object, the selector and the method arguments all at once to the
+ * passing the object, the selector and the method arguments all at once to the
+ * method-lookup library function.  This is the usual setting when targetting
+ * method-lookup library function.  This is the usual setting when targetting
+ * Darwin/Mac OS X systems, which have the NeXT runtime installed.
+ * Darwin/Mac OS X systems, which have the NeXT runtime installed.
+ *
+ *
+ * If the macro is set to 0, ...
+ * If the macro is set to 0, ...
+ *
+ *
+ * Doesn't look relevant (yet) for the Zip CPU--especially since we don't have
+ * Doesn't look relevant (yet) for the Zip CPU--especially since we don't have
+ * an O/S yet.
+ * an O/S yet.
+ */
+ */
+
+
+
+
+
+
+/* 17.13 Addressing Modes */
+/* 17.13 Addressing Modes */
+
+
+/* C expressions that are nonzero if the machine supports pre-increment,
+/* C expressions that are nonzero if the machine supports pre-increment,
+ * pre-decrement, post-increment, or post-decrement addressing respectively.
+ * pre-decrement, post-increment, or post-decrement addressing respectively.
+ */
+ */
+#define        HAVE_PRE_INCREMENT      (0)
+#define        HAVE_PRE_INCREMENT      (0)
+#define        HAVE_PRE_DECREMENT      (0)
+#define        HAVE_PRE_DECREMENT      (0)
+#define        HAVE_POST_INCREMENT     (0)
+#define        HAVE_POST_INCREMENT     (0)
+#define        HAVE_POST_DECREMENT     (0)
+#define        HAVE_POST_DECREMENT     (0)
+
+
+/* C expression that is nonzero if the machine supports pre- or post- address
+/* C expression that is nonzero if the machine supports pre- or post- address
+ * side-effect generation involving constants other than the size of the memory
+ * side-effect generation involving constants other than the size of the memory
+ * operand.
+ * operand.
+ */
+ */
+#define        HAVE_PRE_MODIFY_DISP    (0)
+#define        HAVE_PRE_MODIFY_DISP    (0)
+#define        HAVE_POST_MODIFY_DISP   (0)
+#define        HAVE_POST_MODIFY_DISP   (0)
+
+
+/* C expression that is non-zero if the machine supports pre- or post-address
+/* C expression that is non-zero if the machine supports pre- or post-address
+ * side-effect generation involving a register displacement.
+ * side-effect generation involving a register displacement.
+ */
+ */
+#define        HAVE_PRE_MODIFY_REG     (0)
+#define        HAVE_PRE_MODIFY_REG     (0)
+#define        HAVE_POST_MODIFY_REG    (0)
+#define        HAVE_POST_MODIFY_REG    (0)
+
+
+/* CONSTANT_ADDRESS_P(X) ... A C expression that is 1 if the RTX X is a constant
+/* CONSTANT_ADDRESS_P(X) ... A C expression that is 1 if the RTX X is a constant
+ * which is a valid address.  On most machines the default definition ... is
+ * which is a valid address.  On most machines the default definition ... is
+ * acceptable, but a few machines are more restrictive as to which constant
+ * acceptable, but a few machines are more restrictive as to which constant
+ * addresses are supported.
+ * addresses are supported.
+ *
+ *
+ * Zip CPU is designed for offset addresses, not constant addresses.  Although
+ * Zip CPU is designed for offset addresses, not constant addresses.  Although
+ * the CPU will support 18-bit signed constant addresses, the assembler and
+ * the CPU will support 18-bit signed constant addresses, the assembler and
+ * general programming model do not.  Further, without knowing where the final
+ * general programming model do not.  Further, without knowing where the final
+ * address will be located, this is an unusable model.  Therefore we will
+ * address will be located, this is an unusable model.  Therefore we will
+ * define this as not supported.
+ * define this as not supported.
+ *
+ *
+ * In hindsight, this isn't true--labels and symbols are valid addresses, and
+ * In hindsight, this isn't true--labels and symbols are valid addresses, and
+ * they are also constant addresses.  Hence, we leave this at its default.
+ * they are also constant addresses.  Hence, we leave this at its default.
+ */
+ */
+// #undef      CONSTANT_ADDRESS_P
+// #undef      CONSTANT_ADDRESS_P
+// #define     CONSTANT_ADDRESS_P(X)   (0)
+// #define     CONSTANT_ADDRESS_P(X)   (0)
+
+
+/* CONSTANT_P(X) ... CONSTANT_P, which is defined by target-independent code,
+/* CONSTANT_P(X) ... CONSTANT_P, which is defined by target-independent code,
+ * accepts integer values expressions whose values are not explicitly known,
+ * accepts integer values expressions whose values are not explicitly known,
+ * such as symbol_ref, label_ref, and high expressions and const arithmetic
+ * such as symbol_ref, label_ref, and high expressions and const arithmetic
+ * expressions, in addition to const_int and const_double expressions.
+ * expressions, in addition to const_int and const_double expressions.
+ *
+ *
+ * Huh???
+ * Huh???
+ */
+ */
+// #define CONSTANT_P(X) ???
+// #define CONSTANT_P(X) ???
+
+
+/* MAX_REGS_PER_ADDRESS ... A number, the maximum number of registers that can
+/* MAX_REGS_PER_ADDRESS ... A number, the maximum number of registers that can
+ * appear in a valid memory address.  Note that it is up to you to specify a
+ * appear in a valid memory address.  Note that it is up to you to specify a
+ * value equal to the maximum number that TARGET_LEGITIMATE_ADDRESS_P would
+ * value equal to the maximum number that TARGET_LEGITIMATE_ADDRESS_P would
+ * ever accept.
+ * ever accept.
+ */
+ */
+#define        MAX_REGS_PER_ADDRESS    1
+#define        MAX_REGS_PER_ADDRESS    1
+
+
+/* TARGET_LEGITIMATE_ADDRESS_P(MODE,RTX,STRICT) ... A function that returns
+/* TARGET_LEGITIMATE_ADDRESS_P(MODE,RTX,STRICT) ... A function that returns
+ * whether RTX is a legitimate memory address on the target machine for a
+ * whether RTX is a legitimate memory address on the target machine for a
+ * memory operation of mode MODE.
+ * memory operation of mode MODE.
+ */
+ */
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P    zip_legitimate_address_p
+#define TARGET_LEGITIMATE_ADDRESS_P    zip_legitimate_address_p
+
+
+/* TARGET_MEM_CONSTRAINT ... A single character to be used instead of the
+/* TARGET_MEM_CONSTRAINT ... A single character to be used instead of the
+ * default 'm' character for general memory addresses.  This defines the
+ * default 'm' character for general memory addresses.  This defines the
+ * constraint letter which matches the memory addresses accepted by
+ * constraint letter which matches the memory addresses accepted by
+ * TARGET_LEGITIMATE_ADDRESS_P.  Define this macro if you want to support new
+ * TARGET_LEGITIMATE_ADDRESS_P.  Define this macro if you want to support new
+ * address format in your back end without changing the semantics of the 'm'
+ * address format in your back end without changing the semantics of the 'm'
+ * constraint.  This is necessary in order to preserve functionality of inline
+ * constraint.  This is necessary in order to preserve functionality of inline
+ * assembly constructs using the 'm' constraint.
+ * assembly constructs using the 'm' constraint.
+ *
+ *
+ * ZipCPU--doesn't look like we need to define this at all.
+ * ZipCPU--doesn't look like we need to define this at all.
+ */
+ */
+
+
+/* FIND_BASE_TERM(X) ... A C expression to determine the base term of address
+/* FIND_BASE_TERM(X) ... A C expression to determine the base term of address
+ * X or to provide a simplified version of X from which alias.c can easily find
+ * X or to provide a simplified version of X from which alias.c can easily find
+ * the base term.  This macro is used in only two places: find_base_value and
+ * the base term.  This macro is used in only two places: find_base_value and
+ * find_base_term in alias.c.
+ * find_base_term in alias.c.
+ *
+ *
+ * It is always safe for this macro  to not be defined.  It exists so that
+ * It is always safe for this macro  to not be defined.  It exists so that
+ * alias analysis can understand machine-dependent addresses.
+ * alias analysis can understand machine-dependent addresses.
+ *
+ *
+ * ZipCPU: We'll skip this then.
+ * ZipCPU: We'll skip this then.
+ */
+ */
+
+
+/* TARGET_LEGITIMIZE_ADDRESS(RTX,OLD,MODE) ... This hook is given an invalid
+/* TARGET_LEGITIMIZE_ADDRESS(RTX,OLD,MODE) ... This hook is given an invalid
+ * memory address RTX for an operand of mode MODE and should try to return a
+ * memory address RTX for an operand of mode MODE and should try to return a
+ * valid memory address.  RTX will always be the result of a call to
+ * valid memory address.  RTX will always be the result of a call to
+ * break_out_memory_refs, and OLD will be the operand that was given to that
+ * break_out_memory_refs, and OLD will be the operand that was given to that
+ * function to produce RTX.
+ * function to produce RTX.
+ *
+ *
+ * ZipCPU --
+ * ZipCPU --
+ */
+ */
+#undef TARGET_LEGITIMIZE_ADDRESS
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define        TARGET_LEGITIMIZE_ADDRESS       zip_legitimize_address
+#define        TARGET_LEGITIMIZE_ADDRESS       zip_legitimize_address
+
+
+/* LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OP,TYPE,IND,WIN) ... A C compound statement
+/* LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OP,TYPE,IND,WIN) ... A C compound statement
+ * that attempts to replace X, which is an address that needs reloading, with
+ * that attempts to replace X, which is an address that needs reloading, with
+ * a valid memory address for an operand of mode MODE.  WIN will be a C
+ * a valid memory address for an operand of mode MODE.  WIN will be a C
+ * statement label elsewhere in the code.  It is not necessary to define this
+ * statement label elsewhere in the code.  It is not necessary to define this
+ * macro, but it might be useful for performance reasons.
+ * macro, but it might be useful for performance reasons.
+ *
+ *
+ * ZipCPU: This is worth coming back to, according to the notes page, but it
+ * ZipCPU: This is worth coming back to, according to the notes page, but it
+ * may also be a difficult macro to use.  Look at other implementations before
+ * may also be a difficult macro to use.  Look at other implementations before
+ * we dive into this.
+ * we dive into this.
+ */
+ */
+// #undef LEGITIMIZE_RELOAD_ADDRESS
+// #undef LEGITIMIZE_RELOAD_ADDRESS
+// #define LEGITIMIZE_RELOAD_ADDRESS
+// #define LEGITIMIZE_RELOAD_ADDRESS
+
+
+/* TARGET_MODE_DEPENDENT_ADDRESS_P(ADDR,SPACE) ... This hook returns true
+/* TARGET_MODE_DEPENDENT_ADDRESS_P(ADDR,SPACE) ... This hook returns true
+ * if memory address addr in address space addrspace can have different meanings
+ * if memory address addr in address space addrspace can have different meanings
+ * depending on the machine mode of the memory reference it is used for or if
+ * depending on the machine mode of the memory reference it is used for or if
+ * the address is valid for some modes but not others.
+ * the address is valid for some modes but not others.
+ */
+ */
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#define        TARGET_MODE_DEPENDENT_ADDRESS_P         zip_mode_dependent_address_p
+#define        TARGET_MODE_DEPENDENT_ADDRESS_P         zip_mode_dependent_address_p
+
+
+/* TARGET_LEGITIMATE_CONSTANT_P(MODE,RTX) ... This hook returns true if x is a
+/* TARGET_LEGITIMATE_CONSTANT_P(MODE,RTX) ... This hook returns true if x is a
+ * legitimate constant for a MODE-mode immediate operand on the target machine.
+ * legitimate constant for a MODE-mode immediate operand on the target machine.
+ * You can assume the RTX satisfies CONSTANT_P, so you need not check this.
+ * You can assume the RTX satisfies CONSTANT_P, so you need not check this.
+ *
+ *
+ * The default definition returns true.
+ * The default definition returns true.
+ */
+ */
+
+
+/* TARGET_DELIGITIMIZE_ADDRESS(RTX)
+/* TARGET_DELIGITIMIZE_ADDRESS(RTX)
+ */
+ */
+
+
+/* TARGET_CONST_NOT_OK_FOR_DEBUG_P(RTX) ... This hook should return true if RTX
+/* TARGET_CONST_NOT_OK_FOR_DEBUG_P(RTX) ... This hook should return true if RTX
+ * should not be emitted into debug sections.
+ * should not be emitted into debug sections.
+ */
+ */
+
+
+/* TARGET_CANNOT_FORCE_CONST_MEM(MODE,RTX) ... This hook should return true if
+/* TARGET_CANNOT_FORCE_CONST_MEM(MODE,RTX) ... This hook should return true if
+ * RTX is a form that cannot (or should not) be spilled to the constant pool.
+ * RTX is a form that cannot (or should not) be spilled to the constant pool.
+ * MODE is the mode of X.  The default version returns false.
+ * MODE is the mode of X.  The default version returns false.
+ */
+ */
+// #define     TARGET_CANNOT_FORCE_CONST_MEM   hook_bool_mode_rtx_false
+// #define     TARGET_CANNOT_FORCE_CONST_MEM   hook_bool_mode_rtx_false
+
+
+/* TARGET_USE_BLOCKS_FOR_CONSTANT_P(MODE,RTX) ... This hook should return true
+/* TARGET_USE_BLOCKS_FOR_CONSTANT_P(MODE,RTX) ... This hook should return true
+ * if pool entries for constant RTX can be placed in an object_block structure.
+ * if pool entries for constant RTX can be placed in an object_block structure.
+ * MODE is the mode of X.  The default version returns false for all constants.
+ * MODE is the mode of X.  The default version returns false for all constants.
+ *
+ *
+ *????
+ *????
+ */
+ */
+// #warning "Huh?"
+// #warning "Huh?"
+
+
+/* TARGET_USE_BLOCKS_FOR_DECL_P(DECL) ... This hook should return true if pool
+/* TARGET_USE_BLOCKS_FOR_DECL_P(DECL) ... This hook should return true if pool
+ * entries for DECL should be placed in an object_block structure.  The default
+ * entries for DECL should be placed in an object_block structure.  The default
+ * version returns true for all DECL's.
+ * version returns true for all DECL's.
+ *
+ *
+ * Sounds good.
+ * Sounds good.
+ */
+ */
+
+
+/* TARGET_BUILTIN_RECIPROCAL(TREE) ... This hook should return the DECL of a
+/* TARGET_BUILTIN_RECIPROCAL(TREE) ... This hook should return the DECL of a
+ * function that implements the reciprocal of the machine specific builtin
+ * function that implements the reciprocal of the machine specific builtin
+ * function fndecl, or NULL_TREE if such a function is not available.
+ * function fndecl, or NULL_TREE if such a function is not available.
+ */
+ */
+
+
+/* TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD(VOID) ... This hook should return the
+/* TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD(VOID) ... This hook should return the
+ * DECL of a function f that given an address addr as an argument returns a mask
+ * DECL of a function f that given an address addr as an argument returns a mask
+ * m that can be used to extrract from two vectors the relevant data that
+ * m that can be used to extrract from two vectors the relevant data that
+ * resides in addr in case addr is not properly aligned.
+ * resides in addr in case addr is not properly aligned.
+ *
+ *
+ * Zip CPU does not support vectorization.
+ * Zip CPU does not support vectorization.
+ */
+ */
+
+
+/* Other vector, SIMD, and GOACC macros skipped as Zip CPU doesn't support
+/* Other vector, SIMD, and GOACC macros skipped as Zip CPU doesn't support
+ * such data accesses and manipulation.
+ * such data accesses and manipulation.
+ */
+ */
+
+
+/* 17.14 Anchored Addresses */
+/* 17.14 Anchored Addresses */
+
+
+/* TARGET_MIN_ANCHOR_OFFSET ... The minimum offset that should be applied to
+/* TARGET_MIN_ANCHOR_OFFSET ... The minimum offset that should be applied to
+ * a section anchor.  On most targets, it should be the smallest offset that
+ * a section anchor.  On most targets, it should be the smallest offset that
+ * can be applied to a base register while still giving a legitimate address for
+ * can be applied to a base register while still giving a legitimate address for
+ * every mode.  The default value is 0.
+ * every mode.  The default value is 0.
+ *
+ *
+ * On the Zip CPU, this is the minimum operand B offset to a LW or SW
+ * On the Zip CPU, this is the minimum operand B offset to a LW or SW
+ * operation, which would be a signed 14 bit number.
+ * operation, which would be a signed 14 bit number.
+ */
+ */
+#undef TARGET_MIN_ANCHOR_OFFSET
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET       zip_min_anchor_offset
+#define TARGET_MIN_ANCHOR_OFFSET       zip_min_anchor_offset
+
+
+/* TARGET_MAX_ANCHOR_OFFSET ... Like TARGET_MIN_ANCHOR_OFFSET, but the maximum
+/* TARGET_MAX_ANCHOR_OFFSET ... Like TARGET_MIN_ANCHOR_OFFSET, but the maximum
+ * (inclusive) offset that should be applied to section anchors.  The default
+ * (inclusive) offset that should be applied to section anchors.  The default
+ * value is 0.
+ * value is 0.
+ */
+ */
+#undef TARGET_MAX_ANCHOR_OFFSET
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET       zip_max_anchor_offset
+#define TARGET_MAX_ANCHOR_OFFSET       zip_max_anchor_offset
+
+
+/* TARGET_ASM_OUTPUT_ANCHOR(RTX) ... Write the assembly code to define section
+/* TARGET_ASM_OUTPUT_ANCHOR(RTX) ... Write the assembly code to define section
+ * anchor RTX, which is a SYMBOL_REF for which 'SYMBOL_REF_ANCHOR_P(RTL) is
+ * anchor RTX, which is a SYMBOL_REF for which 'SYMBOL_REF_ANCHOR_P(RTL) is
+ * true.  The hook is called with the assembly output position set to the
+ * true.  The hook is called with the assembly output position set to the
+ * beginning of SYMBOL_REF_BLOCK(X).
+ * beginning of SYMBOL_REF_BLOCK(X).
+ *
+ *
+ * If ASM_OUTPUT_DEF is available, the hook's default definition uses it to
+ * If ASM_OUTPUT_DEF is available, the hook's default definition uses it to
+ * define the symbol as '. + SYMBOL_REF_BLOCK_OFFSET(RTL)'.  If ASM_OUTPUT_DEF
+ * define the symbol as '. + SYMBOL_REF_BLOCK_OFFSET(RTL)'.  If ASM_OUTPUT_DEF
+ * is not available, the hook's default definition is NULL, which disables the
+ * is not available, the hook's default definition is NULL, which disables the
+ * use of section anchors altogether.
+ * use of section anchors altogether.
+ *
+ *
+ * Section anchors would be very valuable in Zip CPU assembly, therefore we
+ * Section anchors would be very valuable in Zip CPU assembly, therefore we
+ * must define this hook.  However ... no one else seems to ever define these
+ * must define this hook.  However ... no one else seems to ever define these
+ * hooks, so I really dont have much of an example to work with
+ * hooks, so I really dont have much of an example to work with
+ */
+ */
+// #warning "Come back to this"
+// #warning "Come back to this"
+// #undef      TARGET_ASM_OUTPUT_ANCHOR
+// #undef      TARGET_ASM_OUTPUT_ANCHOR
+// #define     TARGET_ASM_OUTPUT_ANCHOR        zip_asm_output_anchor
+// #define     TARGET_ASM_OUTPUT_ANCHOR        zip_asm_output_anchor
+
+
+/* TARGET_USE_ANCHORS_FOR_SYMBOL_P(RTX) ... Return true if GCC should attempt
+/* TARGET_USE_ANCHORS_FOR_SYMBOL_P(RTX) ... Return true if GCC should attempt
+ * to use anchors to access SYMBOL_REF X.  You can assume
+ * to use anchors to access SYMBOL_REF X.  You can assume
+ * SYMBOL_REF_HAS_BLOCK_INFO_P(X) and !SYMBOL_REF_ANCHOR_P(X).
+ * SYMBOL_REF_HAS_BLOCK_INFO_P(X) and !SYMBOL_REF_ANCHOR_P(X).
+ *
+ *
+ * The default version is correct for most targets, but you might need to
+ * The default version is correct for most targets, but you might need to
+ * intercept this hook to handle things like target specific attributes or
+ * intercept this hook to handle things like target specific attributes or
+ * target-specific sections.
+ * target-specific sections.
+ *
+ *
+ * Not knowing anything more, we'll leave the default as is for the Zip CPU.
+ * Not knowing anything more, we'll leave the default as is for the Zip CPU.
+ */
+ */
+// #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
+// #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
+// #define TARGET_USE_ANCHORS_FOR_SYMBOL_P     zip_use_anchors_for_symbol_p
+// #define TARGET_USE_ANCHORS_FOR_SYMBOL_P     zip_use_anchors_for_symbol_p
+
+
+/* 17.15 Condition Code Status */
+/* 17.15 Condition Code Status */
+
+
+/* 17.15.1 Representation of condition codes using (cc0) --- that's us */
+/* 17.15.1 Representation of condition codes using (cc0) --- that's us */
+
+
+/* CC_STATUS_MDEP ... C code for a data type which is used for declaring
+/* CC_STATUS_MDEP ... C code for a data type which is used for declaring
+ * the mdep component of cc_status.  It defaults to int.
+ * the mdep component of cc_status.  It defaults to int.
+ *
+ *
+ * ZipCPU---Int is good for us.
+ * ZipCPU---Int is good for us.
+ */
+ */
+
+
+/* CC_STATUS_MDEP_INIT ... A C expression to initialize the mdep field to
+/* CC_STATUS_MDEP_INIT ... A C expression to initialize the mdep field to
+ * "empty".  The default definition does nothing, since most machines don't
+ * "empty".  The default definition does nothing, since most machines don't
+ * use the field anyway.  If you want to use the field, you should probably
+ * use the field anyway.  If you want to use the field, you should probably
+ * define  this macro to initialize it.
+ * define  this macro to initialize it.
+ */
+ */
+
+
+/* NOTICE_UPDATE_CC(EXP, INSN) ... A C compound statement to set the components
+/* NOTICE_UPDATE_CC(EXP, INSN) ... A C compound statement to set the components
+ * of cc_status appropriately for an insn insn whose body is exp.  It is this
+ * of cc_status appropriately for an insn insn whose body is exp.  It is this
+ * macro's responsibility to recognize insns that set the condition code as
+ * macro's responsibility to recognize insns that set the condition code as
+ * a byproduct of other activity as well as those that explicitly set (cc0).
+ * a byproduct of other activity as well as those that explicitly set (cc0).
+ *
+ *
+ * ZipCPU --- We need this, as not all expressions set (cc0).
+ * ZipCPU --- We need this, as not all expressions set (cc0).
+ *
+ *
+ * In hind sight, this is the *old* and unsupported way of doing business within
+ * In hind sight, this is the *old* and unsupported way of doing business within
+ * GCC.  To switch to the new way, all instruction definitions within our .md
+ * GCC.  To switch to the new way, all instruction definitions within our .md
+ * file have been adjusted to either change or clobber the CC register.
+ * file have been adjusted to either change or clobber the CC register.
+ *
+ *
+ */
+ */
+#ifdef HAVE_cc0
+#ifdef HAVE_cc0
+// #define     NOTICE_UPDATE_CC(EXP, INSN)     zip_update_cc_notice(EXP, INSN)
+// #define     NOTICE_UPDATE_CC(EXP, INSN)     zip_update_cc_notice(EXP, INSN)
+#error "ZipCPU no longer has CC0"
+#error "ZipCPU no longer has CC0"
+#endif
+#endif
+
+
+
+
+/* 17.15.2 Representation of condition codes using registers */
+/* 17.15.2 Representation of condition codes using registers */
+/* ... which the ZipCPU doesn't have.  The ZipCPU has a CC0 register, and hence
+/* ... which the ZipCPU doesn't have.  The ZipCPU has a CC0 register, and hence
+ * this section isn't supposed to apply.
+ * this section isn't supposed to apply.
+ */
+ */
+
+
+/* SELECT_CC_MODE(op, x, y) ... On many machines, the condition code may be
+/* SELECT_CC_MODE(op, x, y) ... On many machines, the condition code may be
+ * produced by other instructions than compares, for example the branch can use
+ * produced by other instructions than compares, for example the branch can use
+ * directyl the condition code set by a subtract instruction.  However, on some
+ * directyl the condition code set by a subtract instruction.  However, on some
+ * machines when the condition code is set this way some bits (such as the
+ * machines when the condition code is set this way some bits (such as the
+ * overflow bit) are not set in the same way as a test instruction, so that a
+ * overflow bit) are not set in the same way as a test instruction, so that a
+ * different branch instruction must be used for some conditional branches.
+ * different branch instruction must be used for some conditional branches.
+ * When this happens, use the machinemode of the condition code register to
+ * When this happens, use the machinemode of the condition code register to
+ * record different formats of the condition code register.  Modes can also be
+ * record different formats of the condition code register.  Modes can also be
+ * used to reccord which compare instruction (e.g. a signed or an unsigned
+ * used to reccord which compare instruction (e.g. a signed or an unsigned
+ * comparison) produced the condition codes.
+ * comparison) produced the condition codes.
+ *
+ *
+ * If other modes than CCmode are required, add them to 'machine-modes.def' and
+ * If other modes than CCmode are required, add them to 'machine-modes.def' and
+ * define SELECT_CC_MODE to choose a mode given an operand of a compare.  This
+ * define SELECT_CC_MODE to choose a mode given an operand of a compare.  This
+ * is needed because the modes have to be chosen not only during RTL generation
+ * is needed because the modes have to be chosen not only during RTL generation
+ * but also, for example, by instruction combination.  The result of
+ * but also, for example, by instruction combination.  The result of
+ * SELECT_CC_MODE should be consistent with the mode used in the patterns; ...
+ * SELECT_CC_MODE should be consistent with the mode used in the patterns; ...
+ *
+ *
+ * ZipCPU ... We have only one CC Mode, so we'll use the CCmode defined in
+ * ZipCPU ... We have only one CC Mode, so we'll use the CCmode defined in
+ * machine-modes.def and should be fine with it.  Hence, this doesn't need
+ * machine-modes.def and should be fine with it.  Hence, this doesn't need
+ * to be defined.
+ * to be defined.
+ */
+ */
+
+
+/* TARGET_CANONICALIZE_COMPARISON(int,rtx *, rtx *, bool) ... On some machines
+/* TARGET_CANONICALIZE_COMPARISON(int,rtx *, rtx *, bool) ... On some machines
+ * (such as the ZipCPU) not all possible comparisons are defined, but you can
+ * (such as the ZipCPU) not all possible comparisons are defined, but you can
+ * convert an invalid comparison into a valid one.  For example, the Alpha
+ * convert an invalid comparison into a valid one.  For example, the Alpha
+ * does not have a GT comparison, but you can use an LT comparison instead and
+ * does not have a GT comparison, but you can use an LT comparison instead and
+ * swap the order of the operands.
+ * swap the order of the operands.
+ *
+ *
+ * On such machines, implement this hook to do any required conversions:  code
+ * On such machines, implement this hook to do any required conversions:  code
+ * is the initial comparison code and op0 and op1 are the left and right
+ * is the initial comparison code and op0 and op1 are the left and right
+ * operands of the comparison, respectively.  If op0_preserve_value is true the
+ * operands of the comparison, respectively.  If op0_preserve_value is true the
+ * implementation is not allowed to change the value of op0 since the value
+ * implementation is not allowed to change the value of op0 since the value
+ * might be used in RTXs which aren't comparisons.  E.g. the implementation is
+ * might be used in RTXs which aren't comparisons.  E.g. the implementation is
+ * not allowed to swap operands in that case.
+ * not allowed to swap operands in that case.
+ *
+ *
+ * GCC will not assume that the comparison resulting from this macro is valid
+ * GCC will not assume that the comparison resulting from this macro is valid
+ * but will see if the resulting insn matches a pattern in the 'md' file.
+ * but will see if the resulting insn matches a pattern in the 'md' file.
+ *
+ *
+ * You need not implement this hook if it would never change the comparison
+ * You need not implement this hook if it would never change the comparison
+ * code or operands.
+ * code or operands.
+ *
+ *
+ * In the case of the ZipCPU, the ZipCPU only keeps track of 8 possible
+ * In the case of the ZipCPU, the ZipCPU only keeps track of 8 possible
+ * comparisons, and bastardizing other comparisons into those 8 is extremely
+ * comparisons, and bastardizing other comparisons into those 8 is extremely
+ * painful.  Therefore, we *need* this capability to make certain we can use
+ * painful.  Therefore, we *need* this capability to make certain we can use
+ * our comparisons successfully.
+ * our comparisons successfully.
+ *
+ *
+ * The only problem is ... this hook appears to only be called on non-CC0
+ * The only problem is ... this hook appears to only be called on non-CC0
+ * machines.  Hence, defining it hasn't done anything for us.
+ * machines.  Hence, defining it hasn't done anything for us.
+ */
+ */
+#define        TARGET_CANONICALIZE_COMPARISON  zip_canonicalize_comparison
+#define        TARGET_CANONICALIZE_COMPARISON  zip_canonicalize_comparison
+
+
+/* REVERSIBLE_CC_MODE(MODE) ... A C expression whose value is one if it is
+/* REVERSIBLE_CC_MODE(MODE) ... A C expression whose value is one if it is
+ * always safe to reverse a comparison whose mode is MODE.  If SELECT_CC_MODE
+ * always safe to reverse a comparison whose mode is MODE.  If SELECT_CC_MODE
+ * can ever return MODE for a floating-point inequality comparison, than
+ * can ever return MODE for a floating-point inequality comparison, than
+ * REVERSIBLE_CC_MODE(MODE) must be zero.
+ * REVERSIBLE_CC_MODE(MODE) must be zero.
+ *
+ *
+ * You need not define this macro if it would always return zero or if the
+ * You need not define this macro if it would always return zero or if the
+ * floating-point format is anything other than IEEE_FLOAT_FORMAT.  For example,
+ * floating-point format is anything other than IEEE_FLOAT_FORMAT.  For example,
+ * here ...
+ * here ...
+ *
+ *
+ * ZipCPU -- We'll always return zero, so this need not be defined.
+ * ZipCPU -- We'll always return zero, so this need not be defined.
+ */
+ */
+
+
+/* REVERSE_CONDITION(CODE,MODE) ... A C expression whose value is reversed
+/* REVERSE_CONDITION(CODE,MODE) ... A C expression whose value is reversed
+ * condition code of thecode for comparison done in CC_MODE MODE.  This macro
+ * condition code of thecode for comparison done in CC_MODE MODE.  This macro
+ * is used only in case REVERSIBLE_CC_MODE(MODE) is nonzero. ...
+ * is used only in case REVERSIBLE_CC_MODE(MODE) is nonzero. ...
+ *
+ *
+ * ZipCPU ... Since REVERSIBLE_CC_MODE(MODE) will always be zero, we'll leave
+ * ZipCPU ... Since REVERSIBLE_CC_MODE(MODE) will always be zero, we'll leave
+ * this undefined.
+ * this undefined.
+ */
+ */
+
+
+/* bool TARGET_FIXED_CONDITION_CODE_REGS(int *, int *) ... On targets which do
+/* bool TARGET_FIXED_CONDITION_CODE_REGS(int *, int *) ... On targets which do
+ * not use (cc0), and which use a hard register rather than a pseudo-register
+ * not use (cc0), and which use a hard register rather than a pseudo-register
+ * to hold condition codes, the regular CSE passes are often not able to
+ * to hold condition codes, the regular CSE passes are often not able to
+ * identify cases in which the hard register is set to a common value.  Use this
+ * identify cases in which the hard register is set to a common value.  Use this
+ * hook to enable a small pass which optimizes such cases.  This hook should
+ * hook to enable a small pass which optimizes such cases.  This hook should
+ * return true to enable this pass, and it should set the integers to which its
+ * return true to enable this pass, and it should set the integers to which its
+ * arguments point to the hard register numbers used for condition codes.  When
+ * arguments point to the hard register numbers used for condition codes.  When
+ * there is only one such register, as is true on most systems, the integer
+ * there is only one such register, as is true on most systems, the integer
+ * pointed to by p2 should  be set to INVALID_REGNUM.
+ * pointed to by p2 should  be set to INVALID_REGNUM.
+ *
+ *
+ * The default version of this hook returns false.
+ * The default version of this hook returns false.
+ *
+ *
+ * ZipCPU --- I like the idea of enabling optimizations.  Let's return
+ * ZipCPU --- I like the idea of enabling optimizations.  Let's return
+ * something other than false.
+ * something other than false.
+ */
+ */
+#define        TARGET_FIXED_CONDITION_CODE_REGS        zip_fixed_condition_code_regs
+#define        TARGET_FIXED_CONDITION_CODE_REGS        zip_fixed_condition_code_regs
+
+
+/* machine_mode TARGET_CC_MODES_COMPATIBLE(M1,M2) .. On targets which use
+/* machine_mode TARGET_CC_MODES_COMPATIBLE(M1,M2) .. On targets which use
+ * multiple condition code modes in class MODE_CC, it is sometimes the case
+ * multiple condition code modes in class MODE_CC, it is sometimes the case
+ * that a comparison can be validly done in more than one mode.  On such a
+ * that a comparison can be validly done in more than one mode.  On such a
+ * system, define this target hook to take two mode arguments and to return a
+ * system, define this target hook to take two mode arguments and to return a
+ * mode in which both comparisons may be validly done.  If there is no such
+ * mode in which both comparisons may be validly done.  If there is no such
+ * mode, return VOIDmode.
+ * mode, return VOIDmode.
+ *
+ *
+ * The default version of this hook checks whether the modes are the same.  If
+ * The default version of this hook checks whether the modes are the same.  If
+ * they are, it returns that mode.  If they are different, it returns VOIDmode.
+ * they are, it returns that mode.  If they are different, it returns VOIDmode.
+ *
+ *
+ * ZipCPU--Given that we only have the one CCmode, the default definition works
+ * ZipCPU--Given that we only have the one CCmode, the default definition works
+ * well enough for us.
+ * well enough for us.
+ */
+ */
+
+
+/* unsigned int TARGET_FLAGS_REGNUM ... If the target has a dedicated flags
+/* unsigned int TARGET_FLAGS_REGNUM ... If the target has a dedicated flags
+ * register, and it needs to use the post-reload comparison elimination pass,
+ * register, and it needs to use the post-reload comparison elimination pass,
+ * then this value should be set appropriately.
+ * then this value should be set appropriately.
+ *
+ *
+ * ZipCPU---Looks like we can set this easily enough without any problems.
+ * ZipCPU---Looks like we can set this easily enough without any problems.
+ */
+ */
+#undef TARGET_FLAGS_REGNUM
+#undef TARGET_FLAGS_REGNUM
+#define        TARGET_FLAGS_REGNUM     zip_CC
+#define        TARGET_FLAGS_REGNUM     zip_CC
+
+
+/* 17.16 Relative costs of operations */
+/* 17.16 Relative costs of operations */
+
+
+
+
+// #define     REGISTER_MOVE_COST(MODE,FROM,TO)        ((MODE==DImode)||(MODE==DFmode))?4:2
+// #define     REGISTER_MOVE_COST(MODE,FROM,TO)        ((MODE==DImode)||(MODE==DFmode))?4:2
+// #define     TARGET_REGISTER_MOVE_COST
+// #define     TARGET_REGISTER_MOVE_COST
+// #define     MEMORY_MOVE_COST(MODE, CLASS, IN)       ((MODE==DImode)||(MODE==DFmode))?8:7
+// #define     MEMORY_MOVE_COST(MODE, CLASS, IN)       ((MODE==DImode)||(MODE==DFmode))?8:7
+/* TARGET_REGISTER_MOVE_COST(M,FRM,TO) ... This target hook should return the
+/* TARGET_REGISTER_MOVE_COST(M,FRM,TO) ... This target hook should return the
+ * cost of moving data of mode M from a register in class FRM to one in class
+ * cost of moving data of mode M from a register in class FRM to one in class
+ * TO.  The classes are expressed using the enumeration values such as
+ * TO.  The classes are expressed using the enumeration values such as
+ * GENERAL_REGS.  A value of 2 is the default; other values are interpreted
+ * GENERAL_REGS.  A value of 2 is the default; other values are interpreted
+ * relative to that.
+ * relative to that.
+ *
+ *
+ * It is not required that the cost always equal 2 when FROM is the same as TO;
+ * It is not required that the cost always equal 2 when FROM is the same as TO;
+ * on some machines it is expensive to move between registers if they are not
+ * on some machines it is expensive to move between registers if they are not
+ * general registers.
+ * general registers.
+ *
+ *
+ * If reload sees ...
+ * If reload sees ...
+ *
+ *
+ * ZipCPU ... We can leave this at its default value of 2.
+ * ZipCPU ... We can leave this at its default value of 2.
+ */
+ */
+
+
+/* TARGET_MEMORY_MOVE_COST(MOD,CL,IN) ... This target hook should return the
+/* TARGET_MEMORY_MOVE_COST(MOD,CL,IN) ... This target hook should return the
+ * cost of moving data of mode MOD between a register of class CL and memory.
+ * cost of moving data of mode MOD between a register of class CL and memory.
+ * IN is false if the value is to be written to memory, true if it is to be
+ * IN is false if the value is to be written to memory, true if it is to be
+ * read in.  This cost is relative to those in TARGET_REGISTER_MOVE_COST.
+ * read in.  This cost is relative to those in TARGET_REGISTER_MOVE_COST.
+ * If moving between registers and memory is more expensive that between two
+ * If moving between registers and memory is more expensive that between two
+ * registers, you should add this target hook to express the relative cost.
+ * registers, you should add this target hook to express the relative cost.
+ *
+ *
+ * If you do not add this target hook, GCC uses a default cost of 4 plus the
+ * If you do not add this target hook, GCC uses a default cost of 4 plus the
+ * cost of copying via a secondary reload register, if one is needed.  If your
+ * cost of copying via a secondary reload register, if one is needed.  If your
+ * machine requires a secondary reload register to copy between memory and a
+ * machine requires a secondary reload register to copy between memory and a
+ * register of CL but the reload mechanism is more complex than copying via
+ * register of CL but the reload mechanism is more complex than copying via
+ * an intermediate, use this target hook to reflect the actual cost of the
+ * an intermediate, use this target hook to reflect the actual cost of the
+ * move.
+ * move.
+ *
+ *
+ * ZipCPU --- Memory moves are more expensive than twice the cost of register
+ * ZipCPU --- Memory moves are more expensive than twice the cost of register
+ * moves, so let's make certain this is defined.
+ * moves, so let's make certain this is defined.
+ */
+ */
+#define        TARGET_MEMORY_MOVE_COST zip_memory_move_cost
+#define        TARGET_MEMORY_MOVE_COST zip_memory_move_cost
+
+
+// #warning "This needs to be double checked, and annotated"
+// #warning "This needs to be double checked, and annotated"
+#define        BRANCH_COST(SPEED,PREDICTABLE)          ((PREDICTABLE)?2:5)
+#define        BRANCH_COST(SPEED,PREDICTABLE)          ((PREDICTABLE)?2:5)
+
+
+/* Define this macro as a C expression which is nonzero if accessing less than
+/* Define this macro as a C expression which is nonzero if accessing less than
+ * a word of memory (i.e. a 'char' or a 'short') is no faster than accessing
+ * a word of memory (i.e. a 'char' or a 'short') is no faster than accessing
+ * a word of memory.
+ * a word of memory.
+ */
+ */
+#define        SLOW_BYTE_ACCESS        1
+#define        SLOW_BYTE_ACCESS        1
+
+
+/* MOVE_RATIO(SPD) ... The threshold of number of scalar memory-to-memory move
+/* MOVE_RATIO(SPD) ... The threshold of number of scalar memory-to-memory move
+ * instructions, below which a sequence of instructions should be generated
+ * instructions, below which a sequence of instructions should be generated
+ * instead of a string move instruction or a library call.  Increasing the
+ * instead of a string move instruction or a library call.  Increasing the
+ * value will always make code faster, but eventually incurs high cost in
+ * value will always make code faster, but eventually incurs high cost in
+ * increased code size.
+ * increased code size.
+ */
+ */
+#define        MOVE_RATIO(SPD) 5
+#define        MOVE_RATIO(SPD) 5
+
+
+/* TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(SZ,ALGN,OP,SPD) ...
+/* TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(SZ,ALGN,OP,SPD) ...
+ */
+ */
+// #undef      TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD)
+// #undef      TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD)
+// #define     TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD)// needs hook
+// #define     TARGET_USE_BY_PIECES_INFRASTRUCTURE_P(S,A,OP,SPD)// needs hook
+
+
+/* CLEAR_RATIO(SPD) ... The threshold number of scalar move instructions, below
+/* CLEAR_RATIO(SPD) ... The threshold number of scalar move instructions, below
+ * which a sequence of instructions should be generated to clear memory instead
+ * which a sequence of instructions should be generated to clear memory instead
+ * of a string clear instruction or a library call.  Increasing the value will
+ * of a string clear instruction or a library call.  Increasing the value will
+ * always make the code faster, but eventually incurs high cost in increased
+ * always make the code faster, but eventually incurs high cost in increased
+ * code size.
+ * code size.
+ */
+ */
+#define        CLEAR_RATIO(SPD)        MOVE_RATIO(SPD)
+#define        CLEAR_RATIO(SPD)        MOVE_RATIO(SPD)
+
+
+/* NO_FUNCTION_CSE ... Define this macro to be true if it is as good or better
+/* NO_FUNCTION_CSE ... Define this macro to be true if it is as good or better
+ * to call a constant function address than to call an address kept in a
+ * to call a constant function address than to call an address kept in a
+ * register.
+ * register.
+ *
+ *
+ * On the Zip CPU, constant function addresses--especially relative ones,
+ * On the Zip CPU, constant function addresses--especially relative ones,
+ * can be optimized into a single cycle delay.  Register jumps will always
+ * can be optimized into a single cycle delay.  Register jumps will always
+ * stall the whole (5-stage) pipeline.
+ * stall the whole (5-stage) pipeline.
+ */
+ */
+#define        NO_FUNCTION_CSE true
+#define        NO_FUNCTION_CSE true
+
+
+/* TARGET_RTX_COSTS(X,CODE,OUTER,OPNO,TOTAL,SPD) ... This target hook describes
+/* TARGET_RTX_COSTS(X,CODE,OUTER,OPNO,TOTAL,SPD) ... This target hook describes
+ * the relative costs of RTL expressions.
+ * the relative costs of RTL expressions.
+ *
+ *
+ * The cost may depend on the precise form of the expression, which is avaialble
+ * The cost may depend on the precise form of the expression, which is avaialble
+ * for examination in X, and the fact that X appears as operand OPNO of an
+ * for examination in X, and the fact that X appears as operand OPNO of an
+ * expression with rtx code OUTER.  That is, the hook can assume that there is
+ * expression with rtx code OUTER.  That is, the hook can assume that there is
+ * some RTX Y such that GET_CODE(Y)==OUTER and such that either (a) XEXP(Y,OPNO)
+ * some RTX Y such that GET_CODE(Y)==OUTER and such that either (a) XEXP(Y,OPNO)
+ * == X or (b) XVEC(Y,OPNO) contains X.
+ * == X or (b) XVEC(Y,OPNO) contains X.
+ *
+ *
+ * ...
+ * ...
+ * The hook returns true when all subexpressions of x have been processed and
+ * The hook returns true when all subexpressions of x have been processed and
+ * false when rtx_cost should recurse.
+ * false when rtx_cost should recurse.
+ */
+ */
+
+
+/* TARGET_ADDRESS_COST(ADDR,MODE,AS, SPD) ... This hook computes the cost of an
+/* TARGET_ADDRESS_COST(ADDR,MODE,AS, SPD) ... This hook computes the cost of an
+ * addressing mode that contains ADDR.  If not defined, the cost is computed
+ * addressing mode that contains ADDR.  If not defined, the cost is computed
+ * from the ADDR expression and the TARGET_RTX_COST hook.  In cases where more
+ * from the ADDR expression and the TARGET_RTX_COST hook.  In cases where more
+ * than one form of an address is known, the form with the lowest cost will be
+ * than one form of an address is known, the form with the lowest cost will be
+ * used.  If multiple forms have the same, lowest, cost, the one that is the
+ * used.  If multiple forms have the same, lowest, cost, the one that is the
+ * most complex will be used.
+ * most complex will be used.
+ *
+ *
+ * ZipCPU really has only one address cost, the only type of address it
+ * ZipCPU really has only one address cost, the only type of address it
+ * supports.  Sure, index addressing would cost us more, but we don't support
+ * supports.  Sure, index addressing would cost us more, but we don't support
+ * that so ... I think we're okay defining this as a constant.  Indeed, the
+ * that so ... I think we're okay defining this as a constant.  Indeed, the
+ * docs state that, "On RISC amchines, all instructions normally have the same
+ * docs state that, "On RISC amchines, all instructions normally have the same
+ * length and execution time.  Hence all addresses will have equal costs."
+ * length and execution time.  Hence all addresses will have equal costs."
+ */
+ */
+#undef TARGET_ADDRESS_COST
+#undef TARGET_ADDRESS_COST
+#define        TARGET_ADDRESS_COST     zip_address_cost
+#define        TARGET_ADDRESS_COST     zip_address_cost
+
+
+
+
+/* TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P ... This predicate controls the use
+/* TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P ... This predicate controls the use
+ * of the eager delay slot filler to disallow speculatively executed
+ * of the eager delay slot filler to disallow speculatively executed
+ * instructions being placed in delay slots.  Targets such as certain MIPS
+ * instructions being placed in delay slots.  Targets such as certain MIPS
+ * architectures posess both branches with and without delay slots.  As the
+ * architectures posess both branches with and without delay slots.  As the
+ * eager delay slot filler can decrease performance, disabling it is beneficial
+ * eager delay slot filler can decrease performance, disabling it is beneficial
+ * when ordinary branches are available.  Use of delay slot branches filled
+ * when ordinary branches are available.  Use of delay slot branches filled
+ * using basic filler is often still desirable as the delay slot can hide a
+ * using basic filler is often still desirable as the delay slot can hide a
+ * pipeline bubble.
+ * pipeline bubble.
+ */
+ */
+// How should Zip CPU define this--we have no delay slots.
+// How should Zip CPU define this--we have no delay slots.
+
+
+
+
+/* 17.17 Instruction Scheduler */
+/* 17.17 Instruction Scheduler */
+
+
+#define        TARGET_SCHED_ISSUE_RATE zip_sched_issue_rate
+#define        TARGET_SCHED_ISSUE_RATE zip_sched_issue_rate
+
+
+/* 17.18 Dividing the Output into Sections */
+/* 17.18 Dividing the Output into Sections */
+
+
+/* Switch to the text or data segment. */
+/* Switch to the text or data segment. */
+#define        TEXT_SECTION_ASM_OP     "\t.text"
+#define        TEXT_SECTION_ASM_OP     "\t.text"
+#define        DATA_SECTION_ASM_OP     "\t.data"
+#define        DATA_SECTION_ASM_OP     "\t.data"
+
+
+// #undef      TARGET_LIBGCC_SDATA_SECTION
+// #undef      TARGET_LIBGCC_SDATA_SECTION
+// #define     TARGET_LIBGCC_SDATA_SECTION     ".sdata"
+// #define     TARGET_LIBGCC_SDATA_SECTION     ".sdata"
+
+
+
+
+/* 17.19 Position Independent Code */
+/* 17.19 Position Independent Code */
+
+
+#define        PIC_OFFSET_TABLE_REGNUM                 zip_GOT
+#define        PIC_OFFSET_TABLE_REGNUM                 zip_GOT
+#define        PIC_OFFSET_TABLE_REG_CALL_CLOBBERED     0
+#define        PIC_OFFSET_TABLE_REG_CALL_CLOBBERED     0
+// #define LEGITIMATE_PIC_OPERAND_P(X) should evaluate to X(GOT) only
+// #define LEGITIMATE_PIC_OPERAND_P(X) should evaluate to X(GOT) only
+
+
+/* 17.20 Defining the Output Assembler Language */
+/* 17.20 Defining the Output Assembler Language */
+
+
+/* 17.20.2 Output of Data */
+/* 17.20.2 Output of Data */
+
+
+/* These hooks (above) specify assembly directives for creating certain kinds
+/* These hooks (above) specify assembly directives for creating certain kinds
+ * of integer objects.  The TARGET_ASM_BYTE_OP directive creates a byte-sized
+ * of integer objects.  The TARGET_ASM_BYTE_OP directive creates a byte-sized
+ * object.  The TARGET_ASMALIGNED_HI_OP one creates an aligned two-byte object
+ * object.  The TARGET_ASMALIGNED_HI_OP one creates an aligned two-byte object
+ * and so on.  Any of the hookd may be NULL, indicating that no suitable
+ * and so on.  Any of the hookd may be NULL, indicating that no suitable
+ * directive is available.
+ * directive is available.
+ *
+ *
+ * The compiler will print these strings at the start of a new line, followed
+ * The compiler will print these strings at the start of a new line, followed
+ * immediately by the object's initial value.  In most cases, the string should
+ * immediately by the object's initial value.  In most cases, the string should
+ * contain a tab, a pseudo op, and then another tab.
+ * contain a tab, a pseudo op, and then another tab.
+ */
+ */
+
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#undef TARGET_ASM_ALIGNED_HI_OP
+#undef TARGET_ASM_ALIGNED_SI_OP
+#undef TARGET_ASM_ALIGNED_SI_OP
+// #undef      TARGET_ASM_ALIGNED_DI_OP
+// #undef      TARGET_ASM_ALIGNED_DI_OP
+#define        TARGET_ASM_ALIGNED_HI_OP        ".short"
+#define        TARGET_ASM_ALIGNED_HI_OP        ".short"
+// The assembler is set up to call a 4-byte integer a long.  This definition of
+// The assembler is set up to call a 4-byte integer a long.  This definition of
+// a long isn't consistent with the compilers definition.  For this reason,
+// a long isn't consistent with the compilers definition.  For this reason,
+// the ZipCPU backend for the GNU Assembler defines a long as a 64-bit number,
+// the ZipCPU backend for the GNU Assembler defines a long as a 64-bit number,
+// and an int as a 32-bit number.
+// and an int as a 32-bit number.
+#define        TARGET_ASM_ALIGNED_SI_OP        ".int"
+#define        TARGET_ASM_ALIGNED_SI_OP        ".int"
+// #define     TARGET_ASM_ALIGNED_DI_OP        ".long"
+// #define     TARGET_ASM_ALIGNED_DI_OP        ".long"
+
+
+
+
+/* 17.20.4 Output and Generation of Labels */
+/* 17.20.4 Output and Generation of Labels */
+
+
+/* ASM_OUTPUT_LABEL
+/* ASM_OUTPUT_LABEL
+ * ... A default definition of this macro is provided which is correct for
+ * ... A default definition of this macro is provided which is correct for
+ * most systems.
+ * most systems.
+ */
+ */
+
+
+/* ASM_OUTPUT_FUNCTION_LABEL
+/* ASM_OUTPUT_FUNCTION_LABEL
+ * ... if not defined, then the function name is defined in the usual manner
+ * ... if not defined, then the function name is defined in the usual manner
+ * as a label.
+ * as a label.
+ */
+ */
+
+
+/* ASM_OUTPUT_INTERNAL_LABEL ... Identical to ASM_OUTPUT_LABEL, except that name
+/* ASM_OUTPUT_INTERNAL_LABEL ... Identical to ASM_OUTPUT_LABEL, except that name
+ * is known to refer to a compiler-generated label.  The default definition
+ * is known to refer to a compiler-generated label.  The default definition
+ * uses assemble_name_raw, which is like assemble_name except that it is more
+ * uses assemble_name_raw, which is like assemble_name except that it is more
+ * efficient.
+ * efficient.
+ */
+ */
+
+
+/* SIZE_ASM_OP ... A C string containing the appropriate assembler directive
+/* SIZE_ASM_OP ... A C string containing the appropriate assembler directive
+ * to specify the size of a symbol, without any arguments.  ON systems that
+ * to specify the size of a symbol, without any arguments.  ON systems that
+ * use ELF, the dfault is "\t.size\t"; on other systems, the default is not to
+ * use ELF, the default is "\t.size\t"; on other systems, the default is not to
+ * define this macro.
+ * define this macro.
+ *
+ *
+ * Define this amcro only if it is correct to use the default definitions of
+ * Define this amcro only if it is correct to use the default definitions of
+ * ASM_OUTPUT_SIZE_DERECTIVE and ASM_OUTPUT_MEASURED_SIZE for your system.
+ * ASM_OUTPUT_SIZE_DERECTIVE and ASM_OUTPUT_MEASURED_SIZE for your system.
+ * If you need your own custom definitions of those macros, or if you do not
+ * If you need your own custom definitions of those macros, or if you do not
+ * need explicit symbol sizes at all, do not define this macro.
+ * need explicit symbol sizes at all, do not define this macro.
+ */
+ */
+
+
+/* ASM_OUTPUT_SIZE_DIRECTIVE
+/* ASM_OUTPUT_SIZE_DIRECTIVE
+ * ASM_OUTPUT_MEASURED_SIZE
+ * ASM_OUTPUT_MEASURED_SIZE
+ */
+ */
+
+
+/* NO_DOLLAR_IN_LABEL ... Define this macro if the assembler does not accept
+/* NO_DOLLAR_IN_LABEL ... Define this macro if the assembler does not accept
+ * the character '$' in label names.  By default constructors and destructors
+ * the character '$' in label names.  By default constructors and destructors
+ * in G++ have "$" in the identifiers.  If this label is defined, '.' is
+ * in G++ have "$" in the identifiers.  If this label is defined, '.' is
+ * used instead.
+ * used instead.
+ */
+ */
+
+
+/* NO_DOT_IN_LABEL ... Define this macro if the assembler does not accept the
+/* NO_DOT_IN_LABEL ... Define this macro if the assembler does not accept the
+ * character '.' in label names.  By default constructors and destructors in
+ * character '.' in label names.  By default constructors and destructors in
+ * G++ have names that use '.'.  If this macro is defined, these names are
+ * G++ have names that use '.'.  If this macro is defined, these names are
+ * rewritten to avoid '.'.
+ * rewritten to avoid '.'.
+ */
+ */
+
+
+/* TYPE_ASM_OP ... A C string containing the appropriate assembler directive to
+/* TYPE_ASM_OP ... A C string containing the appropriate assembler directive to
+ * specify the type of a symbol, without any arguments.  On systems that use
+ * specify the type of a symbol, without any arguments.  On systems that use
+ * ELF the default in config/elfos.h is "\t.type\t"; on other systems, the default is not to define this macro.
+ * ELF the default in config/elfos.h is "\t.type\t"; on other systems, the default is not to define this macro.
+ *
+ *
+ * Define this macro only if it is correct to use the default definition of
+ * Define this macro only if it is correct to use the default definition of
+ * ASM_OUTPUT_TYPE_DIRECTIVE forr your system.  If you need your own custom
+ * ASM_OUTPUT_TYPE_DIRECTIVE forr your system.  If you need your own custom
+ * definition of this macr, or if you do not need explicit symbol types at all,
+ * definition of this macr, or if you do not need explicit symbol types at all,
+ * do not define this macro.
+ * do not define this macro.
+ */
+ */
+
+
+/* TYPE OPERAND_FMD ... A
+/* TYPE OPERAND_FMD ... A
+ */
+ */
+
+
+/* ASM_OUTPUT_TYPE_DIRECTIVE
+/* ASM_OUTPUT_TYPE_DIRECTIVE
+ */
+ */
+
+
+/* ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) ...
+/* ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) ...
+ * if this macro is not defined, then the function name is defined in the usual
+ * if this macro is not defined, then the function name is defined in the usual
+ * manner as a label (by means of ASM_OUTPUT_FUNCTION_LABEL).
+ * manner as a label (by means of ASM_OUTPUT_FUNCTION_LABEL).
+ */
+ */
+
+
+/* ASM_DECLARE_FUNCTION_SIZE
+/* ASM_DECLARE_FUNCTION_SIZE
+ * ASM_DECLARE_COLD_FUNCTION_NAME
+ * ASM_DECLARE_COLD_FUNCTION_NAME
+ * ASM_DECLARE_COLD_FUNCTION_SIZE
+ * ASM_DECLARE_COLD_FUNCTION_SIZE
+ * ASM_DECLARE_OBJECT_NAME
+ * ASM_DECLARE_OBJECT_NAME
+ * TARGET_ASM_DECLARE_CONSTANT_NAME
+ * TARGET_ASM_DECLARE_CONSTANT_NAME
+ */
+ */
+/* ASM_DECLARE_REGISTER_GLOBAL(STREAM, DECL, REGNO, NAME) ... A C statement
+/* ASM_DECLARE_REGISTER_GLOBAL(STREAM, DECL, REGNO, NAME) ... A C statement
+ * (sans semicolon) to output to the stdio stream STREAM any text necessary for
+ * (sans semicolon) to output to the stdio stream STREAM any text necessary for
+ * claiming a register REGNO for a global variable DECL with name NAME.
+ * claiming a register REGNO for a global variable DECL with name NAME.
+ *
+ *
+ * If you don't defin this macro, that is equivalent to dfining it to do
+ * If you don't defin this macro, that is equivalent to defining it to do
+ * nothing.
+ * nothing.
+ */
+ */
+
+
+/* ASM_FINISH_DECLARE_OBJECT
+/* ASM_FINISH_DECLARE_OBJECT
+ * TARGET_ASM_GLOBALIZE_LABEL
+ * TARGET_ASM_GLOBALIZE_LABEL
+ * TARGET_ASM_GLOBALIZE_DECL_NAME
+ * TARGET_ASM_GLOBALIZE_DECL_NAME
+ * TARGET_ASM_ASSEMBLE_UNDEFINED_DECL
+ * TARGET_ASM_ASSEMBLE_UNDEFINED_DECL
+ * ASM_WEAKEN_LABEL
+ * ASM_WEAKEN_LABEL
+ * ASM_WEAKEN_DECL
+ * ASM_WEAKEN_DECL
+ * ASM_OUTPUT_WEAKREF
+ * ASM_OUTPUT_WEAKREF
+ * SUPPORTS_WEAK
+ * SUPPORTS_WEAK
+ * TARGET_SUPPORTS_WEAK
+ * TARGET_SUPPORTS_WEAK
+ * MAKE_DECL_ONE_ONLY
+ * MAKE_DECL_ONE_ONLY
+ * SUPPORTS_ONE_ONLY
+ * SUPPORTS_ONE_ONLY
+ * TARGTE_ASM_ASSEMBLE_VISIBILITY
+ * TARGTE_ASM_ASSEMBLE_VISIBILITY
+ * TARGET_WEAK_NOT_IN_ARCHIVE_TOC
+ * TARGET_WEAK_NOT_IN_ARCHIVE_TOC
+ * ASM_OUTPUT_EXTERNAL
+ * ASM_OUTPUT_EXTERNAL
+ * TARGET_ASM_EXTERNAL_LIBCALL
+ * TARGET_ASM_EXTERNAL_LIBCALL
+ * TARGET_ASM_MARK_DECLPRESERVED
+ * TARGET_ASM_MARK_DECLPRESERVED
+ * ASM_OUTPUT_LABELREF
+ * ASM_OUTPUT_LABELREF
+ * TARGET_MANGLE_ASSEMBLER_NAME
+ * TARGET_MANGLE_ASSEMBLER_NAME
+ * ASM_OUTPUT_SYMBOL_REF
+ * ASM_OUTPUT_SYMBOL_REF
+ * ASM_OUTPUT_LABEL_REF
+ * ASM_OUTPUT_LABEL_REF
+ * TARGET_ASM_INTERNAL_LABEL
+ * TARGET_ASM_INTERNAL_LABEL
+ * ASM_OUTPUT_DEBUG_LABEL
+ * ASM_OUTPUT_DEBUG_LABEL
+ * ASM_GENERATE_INTERNAL_LABEL
+ * ASM_GENERATE_INTERNAL_LABEL
+ * ASM_FORMAT_PRIVATE_NAME
+ * ASM_FORMAT_PRIVATE_NAME
+ */
+ */
+
+
+/* ASM_OUTPUT_DEF ... A C statement to output to the stdio stream STREAM
+/* ASM_OUTPUT_DEF ... A C statement to output to the stdio stream STREAM
+ * assembler code which defines (equates) the symbol NAME to have the value
+ * assembler code which defines (equates) the symbol NAME to have the value
+ * VALUE.
+ * VALUE.
+ *
+ *
+ * ZipCPU---So many other things that we need depend upon this, that we need
+ * ZipCPU---So many other things that we need depend upon this, that we need
+ * to implement a non-default version.
+ * to implement a non-default version.
+ */
+ */
+#define        ASM_OUTPUT_DEF  zip_asm_output_def
+#define        ASM_OUTPUT_DEF  zip_asm_output_def
+
+
+/* ASM_OUTPUT_DEF_FROM_DECLS
+/* ASM_OUTPUT_DEF_FROM_DECLS
+ * TARGET_DEFERRED_OUTPUT_DEFS
+ * TARGET_DEFERRED_OUTPUT_DEFS
+ * ASM_OUTPUT_WEAK_ALIAS
+ * ASM_OUTPUT_WEAK_ALIAS
+ * OBJ_GEN_METHOD_LABEL
+ * OBJ_GEN_METHOD_LABEL
+ */
+ */
+
+
+
+
+/* 17.20.7 Output of Assembler Instructions */
+/* 17.20.7 Output of Assembler Instructions */
+
+
+#define        REGISTER_NAMES {                                        \
+#define        REGISTER_NAMES {                                        \
+       "R0", "R1", "R2",  "R3",  "R4",  "R5", "R6", "R7",      \
+       "R0", "R1", "R2",  "R3",  "R4",  "R5", "R6", "R7",      \
+       "R8", "R9", "R10", "R11", "R12", "SP", "CC", "PC",      \
+       "R8", "R9", "R10", "R11", "R12", "SP", "CC", "PC",      \
+       "uR0","uR1","uR2", "uR3", "uR4", "uR5","uR6","uR7",     \
+       "uR0","uR1","uR2", "uR3", "uR4", "uR5","uR6","uR7",     \
+       "uR8","uR9","uR10","uR11","uR12","uSP","uCC","uPC",     \
+       "uR8","uR9","uR10","uR11","uR12","uSP","uCC","uPC",     \
+       "PSEUDO-AP" }
+       "PSEUDO-AP" }
+
+
+/* REGISTER_PREFIX     (Undefined by default)
+/* REGISTER_PREFIX     (Undefined by default)
+ * LOCAL_LABEL_PREFIX  (Undefined by default)
+ * LOCAL_LABEL_PREFIX  (Undefined by default)
+ * USER_LABEL_PREFIX   defaults to "*"
+ * USER_LABEL_PREFIX   defaults to "*"
+ * IMMEDIATE_PREFIX    (Undefined by default)
+ * IMMEDIATE_PREFIX    (Undefined by default)
+ *
+ *
+ * If defined, C string expressions to be used for the '%R', '%L', '%U', and
+ * If defined, C string expressions to be used for the '%R', '%L', '%U', and
+ * '%I' options of asm_fprintf (see 'final.c').  These are useful when a single
+ * '%I' options of asm_fprintf (see 'final.c').  These are useful when a single
+ * 'md' file must support multiple assembler formats.  In that case, the various
+ * 'md' file must support multiple assembler formats.  In that case, the various
+ * 'tm.h' files can define these macros differently.
+ * 'tm.h' files can define these macros differently.
+ */
+ */
+// #define     USER_LABEL_PREFIX       "*"
+// #define     USER_LABEL_PREFIX       "*"
+
+
+/* Defining memory operand address formats is in this section. */
+/* Defining memory operand address formats is in this section. */
+
+
+/* 17.20.10 Assembler Commands for Alignment */
+/* 17.20.10 Assembler Commands for Alignment */
+
+
+/* JUMP_ALIGN(label) ... The alignment (log base 2) to put in front of label,
+/* JUMP_ALIGN(label) ... The alignment (log base 2) to put in front of label,
+ * which is a common destination of jumps and has no fallthru incoming
+ * which is a common destination of jumps and has no fallthru incoming
+ * edge.  This macro need not be defined if you don't want any special alignment
+ * edge.  This macro need not be defined if you don't want any special alignment
+ * to be done at such a time.  Most machine descriptions do not currently define
+ * to be done at such a time.  Most machine descriptions do not currently define
+ * this macro.
+ * this macro.
+ *
+ *
+ * ZipCPU---The assembler should automatically deal with label alignment, so
+ * ZipCPU---The assembler should automatically deal with label alignment, so
+ * let's not do anything about it here.
+ * let's not do anything about it here.
+ */
+ */
+
+
+/* TARGET_ASM_JUMP_ALIGN_MAX_SKIP
+/* TARGET_ASM_JUMP_ALIGN_MAX_SKIP
+ */
+ */
+
+
+/* LABEL_ALIGN_AFTER_BARRIER
+/* LABEL_ALIGN_AFTER_BARRIER
+ * TARGET_ASM_LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP
+ * TARGET_ASM_LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP
+ */
+ */
+
+
+/* LOOP_ALIGN(label)
+/* LOOP_ALIGN(label)
+ * TARGET_ASM_LOOP_ALIGN_MAX_SKIP
+ * TARGET_ASM_LOOP_ALIGN_MAX_SKIP
+ * LABEL_ALIGN
+ * LABEL_ALIGN
+ * TARGET_ASM_LABEL_ALIGN_MAX_SKIP
+ * TARGET_ASM_LABEL_ALIGN_MAX_SKIP
+ */
+ */
+
+
+/* ASM_OUTPUT_SKIP(STREAM, NBYTES) A C statement to output to the stdio
+/* ASM_OUTPUT_SKIP(STREAM, NBYTES) A C statement to output to the stdio
+ * stream an assembler instruction to advance the location counter by nbytes
+ * stream an assembler instruction to advance the location counter by nbytes
+ * bytes.
+ * bytes.
+ */
+ */
+
+
+/* TARGET_ASM_LABEL_ALIGN */
+/* TARGET_ASM_LABEL_ALIGN */
+/* Assembler Commands for Alignment */
+/* Assembler Commands for Alignment */
+#define        ASM_OUTPUT_ALIGN(STREAM,POWER)  \
+#define        ASM_OUTPUT_ALIGN(STREAM,POWER)  \
+       do { fprintf(STREAM, "\t.align\t%d\n", POWER); } while (0)
+       do { fprintf(STREAM, "\t.align\t%d\n", POWER); } while (0)
+
+
+
+
+/* 17.21 Controlling Debugging Information Format */
+/* 17.21 Controlling Debugging Information Format */
+/* 17.22 Cross Compilation and Floating Point */
+/* 17.22 Cross Compilation and Floating Point */
+
+
+// REAL_VALUE_TYPE
+// REAL_VALUE_TYPE
+// REAL_VALUES_EQUAL
+// REAL_VALUES_EQUAL
+// REAL_VALUES_LESS ... Tess whether x is less than y
+// REAL_VALUES_LESS ... Tess whether x is less than y
+/* REAL_VALUE_FIX ... Truncates x to an unsigned integer, rouding toward zero.
+/* REAL_VALUE_FIX ... Truncates x to an unsigned integer, rouding toward zero.
+ * If x is negative, returns zero.
+ * If x is negative, returns zero.
+ */
+ */
+// REAL_VALUE_ATOF
+// REAL_VALUE_ATOF
+// REAL_VALUE_NEGATIVE
+// REAL_VALUE_NEGATIVE
+// REAL_VALUE_ISINF
+// REAL_VALUE_ISINF
+// REAL_VALUE_ISNAN
+// REAL_VALUE_ISNAN
+/* REAL_ARITHMETIC(OUT,CODE,X,Y) ... (Macro) Calculates an arithmetic operation
+/* REAL_ARITHMETIC(OUT,CODE,X,Y) ... (Macro) Calculates an arithmetic operation
+ * on two floating point values X and Y, storing the result in OUT (which must
+ * on two floating point values X and Y, storing the result in OUT (which must
+ * be a variable).
+ * be a variable).
+ *
+ *
+ * The operation to be performed is specified by CODE.  Only the following
+ * The operation to be performed is specified by CODE.  Only the following
+ * codes are supported: PLUS_EXPR, MINUS_EXPR, MULT_EXPR, RDIV_EXPR, MAX_EXPR,
+ * codes are supported: PLUS_EXPR, MINUS_EXPR, MULT_EXPR, RDIV_EXPR, MAX_EXPR,
+ * MIN_EXPR.
+ * MIN_EXPR.
+ *
+ *
+ * If REAL_ARITHMETIC is asked to evaluate division by zero and the target's
+ * If REAL_ARITHMETIC is asked to evaluate division by zero and the target's
+ * floating point format cannot represent infinity, it will call abort().
+ * floating point format cannot represent infinity, it will call abort().
+ * Callers shoudl check for this situation first, using MODE_HAS_INFINITIES.
+ * Callers shoudl check for this situation first, using MODE_HAS_INFINITIES.
+ */
+ */
+/* REAL_VALUE_NEGATE(X) ... [Macro] Returns the negative of the floating point
+/* REAL_VALUE_NEGATE(X) ... [Macro] Returns the negative of the floating point
+ * value X.
+ * value X.
+ */
+ */
+/* REAL_VALUE_ABS(X) ... [Macro] Returns the absolute value of X.
+/* REAL_VALUE_ABS(X) ... [Macro] Returns the absolute value of X.
+ */
+ */
+/* 17.23 Mode switching instructions */
+/* 17.23 Mode switching instructions */
+/* 17.24 Defining target-specific uses of __attribute__ */
+/* 17.24 Defining target-specific uses of __attribute__ */
+#undef TARGET_OPTION_OVERRIDE
+#undef TARGET_OPTION_OVERRIDE
+#define        TARGET_OPTION_OVERRIDE  zip_override_options
+#define        TARGET_OPTION_OVERRIDE  zip_override_options
+
+
+/* 17.25 Emulating TLS */
+/* 17.25 Emulating TLS */
+/* 17.26 Defining coprocessor specifics for MIPS targets*/
+/* 17.26 Defining coprocessor specifics for MIPS targets*/
+
+
+ // ZipCPU isn't MIPS.
+ // ZipCPU isn't MIPS.
+
+
+/* 17.27 Parameters for Precompiled Header Validity Checking */
+/* 17.27 Parameters for Precompiled Header Validity Checking */
+/* 17.28 C++ ABI parameters */
+/* 17.28 C++ ABI parameters */
+/* 17.29 Adding support for named address spaces */
+/* 17.29 Adding support for named address spaces */
+/* 17.30 Miscellaneous Parameters */
+/* 17.30 Miscellaneous Parameters */
+
+
+/* HAS_LONG_COND_BRANCH ... Define this boolean macro to indicate whether or
+/* HAS_LONG_COND_BRANCH ... Define this boolean macro to indicate whether or
+ * not your architecture has conditional branches that can span all of memory.
+ * not your architecture has conditional branches that can span all of memory.
+ * It is used in conjunction with an optimization that partitions hot and
+ * It is used in conjunction with an optimization that partitions hot and
+ * cold basic blocks into separate sections of the executable.  If this macro
+ * cold basic blocks into separate sections of the executable.  If this macro
+ * is set to false, gcc will convert any conditional branches that attempt to
+ * is set to false, gcc will convert any conditional branches that attempt to
+ * cross between sections into unconditional branches or indirect jumps.
+ * cross between sections into unconditional branches or indirect jumps.
+ *
+ *
+ * ZipCPU --- The assembler renders long unconditional branch code without
+ * ZipCPU --- The assembler renders long unconditional branch code without
+ * problems, so we can pretend that such long branches exist.
+ * problems, so we can pretend that such long branches exist.
+ */
+ */
+#define        HAS_LONG_COND_BRANCH true
+#define        HAS_LONG_COND_BRANCH true
+
+
+/* HAS_LONG_UNCOND_BRANCH ... Define this boolean macro to indicate whether
+/* HAS_LONG_UNCOND_BRANCH ... Define this boolean macro to indicate whether
+ * or not your architecture has unconditional branches that can span all of
+ * or not your architecture has unconditional branches that can span all of
+ * memory.  (ZipCPU does ... via the LW (PC),PC instruction.)  It is used in
+ * memory.  (ZipCPU does ... via the LW (PC),PC instruction.)  It is used in
+ * conjunction with an optimization that partitions hot and cold basic blocks
+ * conjunction with an optimization that partitions hot and cold basic blocks
+ * into separate sections of the executable.  If this macro is set to false,
+ * into separate sections of the executable.  If this macro is set to false,
+ * gcc will convert any unconditional branches that attempt to cross between
+ * gcc will convert any unconditional branches that attempt to cross between
+ * sections into indirect jumps.
+ * sections into indirect jumps.
+ *
+ *
+ * ZipCPU has the LW (PC),PC instruction which can be used to implement a long
+ * ZipCPU has the LW (PC),PC instruction which can be used to implement a long
+ * jump.
+ * jump.
+ */
+ */
+#define        HAS_LONG_UNCOND_BRANCH  true
+#define        HAS_LONG_UNCOND_BRANCH  true
+
+
+/* CASE_VECTOR_MODE ... An alias for a machine mode name.  This is the machine
+/* CASE_VECTOR_MODE ... An alias for a machine mode name.  This is the machine
+ * mode that elements of a jump-table should have.
+ * mode that elements of a jump-table should have.
+ *
+ *
+ */
+ */
+#define        CASE_VECTOR_MODE        SImode
+#define        CASE_VECTOR_MODE        SImode
+
+
+/* CASE_VECTOR_SHORTEN_MODE(MIN,MAX,BODY) ... Optional: return the preferred
+/* CASE_VECTOR_SHORTEN_MODE(MIN,MAX,BODY) ... Optional: return the preferred
+ * mode for an addr_diff_vec when the minimum and maximum offset are known.
+ * mode for an addr_diff_vec when the minimum and maximum offset are known.
+ * If you define this, it enables extra code in branch shortening to deal with
+ * If you define this, it enables extra code in branch shortening to deal with
+ * addr_diff_vec.  To make this work, you also have to define INSN_ALIGN and
+ * addr_diff_vec.  To make this work, you also have to define INSN_ALIGN and
+ * make the alignment for addr_diff_vec explicit.  The body argument is provided so that the offset_unsigned and scale flags can be updated.
+ * make the alignment for addr_diff_vec explicit.  The body argument is provided so that the offset_unsigned and scale flags can be updated.
+ *
+ *
+ * ZipCPU---No advantage here.
+ * ZipCPU---No advantage here.
+ */
+ */
+
+
+/* CASE_VECTOR_PC_RELATIVE ... Define this exrpession to indicate when
+/* CASE_VECTOR_PC_RELATIVE ... Define this exrpession to indicate when
+ * jump-tables should contain relative addresses.  You need not define this
+ * jump-tables should contain relative addresses.  You need not define this
+ * macro if jump-tables never contain relative addresses, or jump-tables
+ * macro if jump-tables never contain relative addresses, or jump-tables
+ * should contain relative addresses only when -fPIC or -FPIC is in effect.
+ * should contain relative addresses only when -fPIC or -FPIC is in effect.
+ *
+ *
+ * ZipCPU---No advantage in PC-Relative jump tables--except in PIC relative
+ * ZipCPU---No advantage in PC-Relative jump tables--except in PIC relative
+ * code.
+ * code.
+ */
+ */
+
+
+/* TARGET_CASE_VALUES_THRESHOLD(VOID) ... This function returns the smallest
+/* TARGET_CASE_VALUES_THRESHOLD(VOID) ... This function returns the smallest
+ * number of different values for which it is best to use a jump-table instead
+ * number of different values for which it is best to use a jump-table instead
+ * of a tree of conditional branches.  The default is four for machines with a
+ * of a tree of conditional branches.  The default is four for machines with a
+ * casesi instruction and five otherwise.  This is best for most machines.
+ * casesi instruction and five otherwise.  This is best for most machines.
+ *
+ *
+ * ZipCPU---Leave at the default.
+ * ZipCPU---Leave at the default.
+ */
+ */
+
+
+/* WORD_REGISTER_OPERATIONS ... Define this macro to 1 if operations between
+/* WORD_REGISTER_OPERATIONS ... Define this macro to 1 if operations between
+ * registers with integral mode smaller than a word are always performed on the
+ * registers with integral mode smaller than a word are always performed on the
+ * entire register.  Most RISC machines have this property and most CISC
+ * entire register.  Most RISC machines have this property and most CISC
+ * machines do not.
+ * machines do not.
+ *
+ *
+ * ZipCPU---We have the property, 'cause we're fairly risc.
+ * ZipCPU---We have the property, 'cause we're fairly risc.
+ */
+ */
+#undef WORD_REGISTER_OPERATIONS
+#undef WORD_REGISTER_OPERATIONS
+#define        WORD_REGISTER_OPERATIONS        1
+#define        WORD_REGISTER_OPERATIONS        1
+
+
+/* LOAD_EXTEND_OP(MEMODE) ... Define this macro to be a C expression indicating
+/* LOAD_EXTEND_OP(MEMODE) ... Define this macro to be a C expression indicating
+ * when insns that read memory in MEMMODE, an integral mode narrower than a
+ * when insns that read memory in MEMMODE, an integral mode narrower than a
+ * word, set the bits outside of MEMMODE to be either the sign extension or
+ * word, set the bits outside of MEMMODE to be either the sign extension or
+ * zero-extension of the data read.  Return SIGN_EXTEND for values of MEMMODE
+ * zero-extension of the data read.  Return SIGN_EXTEND for values of MEMMODE
+ * for which the insn sign-extends, ZERO_EXTEND for which it zero-extends, and
+ * for which the insn sign-extends, ZERO_EXTEND for which it zero-extends, and
+ * UNKNOWN for other modes.
+ * UNKNOWN for other modes.
+ *
+ *
+ * Do not define this macro if it would always return UNKNOWN.
+ * Do not define this macro if it would always return UNKNOWN.
+ *
+ *
+ * ZipCPU---Our memory unit zero extends registers, so we'll zero extend here.
+ * ZipCPU---Our memory unit zero extends registers, so we'll zero extend here.
+ */
+ */
+#undef LOAD_EXTEND_OP
+#undef LOAD_EXTEND_OP
+#define        LOAD_EXTEND_OP(MEM)     ZERO_EXTEND
+#define        LOAD_EXTEND_OP(MEM)     ZERO_EXTEND
+
+
+/* SHORT_IMMEDIATES_SIGN_EXTEND ... Define this macro to 1 if loading short
+/* SHORT_IMMEDIATES_SIGN_EXTEND ... Define this macro to 1 if loading short
+ * immediate values into registers sign extends.
+ * immediate values into registers sign extends.
+ *
+ *
+ * ZipCPU---All immediates are sign extended, so yes.
+ * ZipCPU---All immediates are sign extended, so yes.
+ */
+ */
+#undef SHORT_IMMEDIATES_SIGN_EXTEND
+#undef SHORT_IMMEDIATES_SIGN_EXTEND
+#define        SHORT_IMMEDIATES_SIGN_EXTEND    1
+#define        SHORT_IMMEDIATES_SIGN_EXTEND    1
+
+
+/* TARGET_MIN_DIVISIONS_FOR_RECIP_MUL
+/* TARGET_MIN_DIVISIONS_FOR_RECIP_MUL
+ */
+ */
+
+
+/* MOVE_MAX ... The maximum number of bytes that a single instruction can move
+/* MOVE_MAX ... The maximum number of bytes that a single instruction can move
+ * quickly between memory and registers or between two memory locations.
+ * quickly between memory and registers or between two memory locations.
+ *
+ *
+ * ZipCPU --- Although we can move 32-bits at a time, and most people would call
+ * ZipCPU --- Although we can move 32-bits at a time, and most people would call
+ * this 4-bytes, the compiler defines a byte as the minimum addressable unit.
+ * this 4-bytes, the compiler defines a byte as the minimum addressable unit.
+ * Therefore, this is defined to be one.
+ * Therefore, this is defined to be one.
+ */
+ */
+#define        MOVE_MAX        UNITS_PER_WORD
+#define        MOVE_MAX        UNITS_PER_WORD
+
+
+/* MAX_MOVE_MAX ... The maximum number of bytes that a single instruction can
+/* MAX_MOVE_MAX ... The maximum number of bytes that a single instruction can
+ * move quickly between memory and registers or between two memory ...
+ * move quickly between memory and registers or between two memory ...
+ *
+ *
+ * ZipCPU --- this sounds just the same as MOVE_MAX, which is the default
+ * ZipCPU --- this sounds just the same as MOVE_MAX, which is the default
+ * definition of this.
+ * definition of this.
+ */
+ */
+
+
+/* SHIFT_COUNT_TRUNCATED ... A C expression that is nonzero if on this machine
+/* SHIFT_COUNT_TRUNCATED ... A C expression that is nonzero if on this machine
+ * the number of bits actually used for the count of a shift operation is equal
+ * the number of bits actually used for the count of a shift operation is equal
+ * to the number of bits needed to represent the size of the object being
+ * to the number of bits needed to represent the size of the object being
+ * shifted.
+ * shifted.
+ *
+ *
+ * You need not define this macro if it would have the value of zero.
+ * You need not define this macro if it would have the value of zero.
+ *
+ *
+ * ZipCPU---A shift of 33 (or more) in either direction will wipe out the
+ * ZipCPU---A shift of 33 (or more) in either direction will wipe out the
+ * value in the register, therefore this value should be zero, the default.
+ * value in the register, therefore this value should be zero, the default.
+ */
+ */
+
+
+/* TARGET_SHIFT_TRUNCATION_MASK(MODE) ... This function describes how the
+/* TARGET_SHIFT_TRUNCATION_MASK(MODE) ... This function describes how the
+ * standard shift patterns for MODE deal with shifts by negative amounts or by
+ * standard shift patterns for MODE deal with shifts by negative amounts or by
+ * more than the width of the mode.
+ * more than the width of the mode.
+ *
+ *
+ * ZipCPU---The default is zero, since we didn't define SHIFT_COUNT_TRUNCATED.
+ * ZipCPU---The default is zero, since we didn't define SHIFT_COUNT_TRUNCATED.
+ * This is the case for the ZipCPU as well.
+ * This is the case for the ZipCPU as well.
+ */
+ */
+
+
+/* TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) ... A C expression which is nonzero
+/* TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) ... A C expression which is nonzero
+ * if on this machine it is safe to "convert" an integer of INPREC bits to one
+ * if on this machine it is safe to "convert" an integer of INPREC bits to one
+ * of OUTPREC bits (where OUTPREC is smaller than INPREC) by merely operating on
+ * of OUTPREC bits (where OUTPREC is smaller than INPREC) by merely operating on
+ * it as if it had OUTPREC bist.  On many machines, this expression can be 1.
+ * it as if it had OUTPREC bist.  On many machines, this expression can be 1.
+ *
+ *
+ * ZiPCPU ...
+ * ZiPCPU ...
+ */
+ */
+#undef TRULY_NOOP_TRUNCATION
+#undef TRULY_NOOP_TRUNCATION
+#define TRULY_NOOP_TRUNCATION(O,I)     1
+#define TRULY_NOOP_TRUNCATION(O,I)     1
+
+
+/* TARGET_MODE_REP_EXTENDED(MODE,REPMODE) ... The representation of an integral
+/* TARGET_MODE_REP_EXTENDED(MODE,REPMODE) ... The representation of an integral
+ * mode can be such that the values are always extended to a wider integral
+ * mode can be such that the values are always extended to a wider integral
+ * mode.  Return SIGN_EXTEND if values of MODE are represented in sign-extended
+ * mode.  Return SIGN_EXTEND if values of MODE are represented in sign-extended
+ * form to REPMODE.  Return UNKNOWN otherwise.  (Currently none of the targets
+ * form to REPMODE.  Return UNKNOWN otherwise.  (Currently none of the targets
+ * use zero-extended this way so unlike LOAD_EXTEND_OP, TARGET_MODE_REP_EXTENDED
+ * use zero-extended this way so unlike LOAD_EXTEND_OP, TARGET_MODE_REP_EXTENDED
+ * is expected to return either SIGN_EXTEND or UNKNOWN.  Also, no target extends
+ * is expected to return either SIGN_EXTEND or UNKNOWN.  Also, no target extends
+ * MODE to REP_MODE so that REP_MODE is not the next widest integral mode and
+ * MODE to REP_MODE so that REP_MODE is not the next widest integral mode and
+ * we currently take advantage of this fact.)
+ * we currently take advantage of this fact.)
+ *
+ *
+ * Similarly to LOAD_EXTEND_OP you may return a non_UNKNOWN value even if the
+ * Similarly to LOAD_EXTEND_OP you may return a non_UNKNOWN value even if the
+ * extension is not performed on certain hard registers as long as for the
+ * extension is not performed on certain hard registers as long as for the
+ * REGNO_REG_CLASS of tehsse hard registers CANNOT_CHANGE_MODE_CLASS returns
+ * REGNO_REG_CLASS of tehsse hard registers CANNOT_CHANGE_MODE_CLASS returns
+ * zero.
+ * zero.
+ *
+ *
+ * Not that TARGET_MODE_REP_EXTENDED and LOAD_EXTEND_OP describe two related
+ * Not that TARGET_MODE_REP_EXTENDED and LOAD_EXTEND_OP describe two related
+ * properties.  If you define TARGET_MODE_REP_EXTENDED(mode,wordmode) you
+ * properties.  If you define TARGET_MODE_REP_EXTENDED(mode,wordmode) you
+ * probably also want to define LOAD_EXTEND_OP(mode) to return the same type
+ * probably also want to define LOAD_EXTEND_OP(mode) to return the same type
+ * of extension.
+ * of extension.
+ *
+ *
+ * In order to enforce the representation of mode, TRULY_NOOP_TRUNCATION should
+ * In order to enforce the representation of mode, TRULY_NOOP_TRUNCATION should
+ * return false when truncating to MODE.
+ * return false when truncating to MODE.
+ *
+ *
+ * ZipCPU ... ???
+ * ZipCPU ... ???
+ */
+ */
+// #undef      TARGET_MODE_REP_EXTENDED
+// #undef      TARGET_MODE_REP_EXTENDED
+// #define     TARGET_MODE_REP_EXTENDED(R,M)   UNKNOWN
+// #define     TARGET_MODE_REP_EXTENDED(R,M)   UNKNOWN
+
+
+/* STORE_FLAG_VALUE ... A C expression describing the value returned by a
+/* STORE_FLAG_VALUE ... A C expression describing the value returned by a
+ * comparison operator with an integral mode and stored by a store-flag
+ * comparison operator with an integral mode and stored by a store-flag
+ * instruction (cstoremode4) when the condition is true.  This description
+ * instruction (cstoremode4) when the condition is true.  This description
+ * must apply to all the cstoremode4 patterns and all the comparison operators
+ * must apply to all the cstoremode4 patterns and all the comparison operators
+ * whose results have MODE_INT mode.
+ * whose results have MODE_INT mode.
+ *
+ *
+ * ZipCPU---Doesn't really have a STORE_FLAG_VALUE instruction ...
+ * ZipCPU---Doesn't really have a STORE_FLAG_VALUE instruction ...
+ */
+ */
+
+
+/* FLOAT_STORE_FLAG_VALUE
+/* FLOAT_STORE_FLAG_VALUE
+ *
+ *
+ * ZipCPU
+ * ZipCPU
+ */
+ */
+
+
+/* VECTOR_STORE_FLAG_VALUE ... define this macro on machines that have vector
+/* VECTOR_STORE_FLAG_VALUE ... define this macro on machines that have vector
+ * comparison operations that return a vector result ...
+ * comparison operations that return a vector result ...
+ *
+ *
+ * ZipCPU---Doesn't support vector operations.
+ * ZipCPU---Doesn't support vector operations.
+ */
+ */
+
+
+/* CLZ_DEFINED_VALUE_AT_ZERO(MODE, VAL)
+/* CLZ_DEFINED_VALUE_AT_ZERO(MODE, VAL)
+ * CTZ_DEFINED_VALUE_AT_ZERO(MODE, VAL)
+ * CTZ_DEFINED_VALUE_AT_ZERO(MODE, VAL)
+ *
+ *
+ * A C expression that indicates whetther the architecture defines a value for
+ * A C expression that indicates whetther the architecture defines a value for
+ * clz or ctz with a zero operand.  A result of 0 indicates the value is
+ * clz or ctz with a zero operand.  A result of 0 indicates the value is
+ * undefined.  If the value is defined for only the RTL expression, the macro
+ * undefined.  If the value is defined for only the RTL expression, the macro
+ * should evaluate to 1.  If the value also applies to the corresponding optab
+ * should evaluate to 1.  If the value also applies to the corresponding optab
+ * entry, then the macro should evaluate to 2.  In cases where the value is
+ * entry, then the macro should evaluate to 2.  In cases where the value is
+ * defined, value should be set to this value.
+ * defined, value should be set to this value.
+ * If this macro is not defined, the value of clz or ctz at zero is assumed to
+ * If this macro is not defined, the value of clz or ctz at zero is assumed to
+ * be undefined.
+ * be undefined.
+ *
+ *
+ * ZipCPU---Has neither clz nor ctz instructions, so we don't need this.
+ * ZipCPU---Has neither clz nor ctz instructions, so we don't need this.
+ */
+ */
+
+
+/* Pmode ... An alias for the machine mode for pointers.  On most machines,
+/* Pmode ... An alias for the machine mode for pointers.  On most machines,
+ * define this to be the integer mode corresponding to the width of a
+ * define this to be the integer mode corresponding to the width of a
+ * hardware pointer.  SImode on 32-bits machines, or DImode on 64-bit machines.
+ * hardware pointer.  SImode on 32-bits machines, or DImode on 64-bit machines.
+ * On some machines you must define this to be one of the partial
+ * On some machines you must define this to be one of the partial
+ * integer modes, such as PSImode.
+ * integer modes, such as PSImode.
+ *
+ *
+ * ZipCPU--the machine mode for pointers is one word (32-bits).  The one word
+ * ZipCPU--the machine mode for pointers is one word (32-bits).  The one word
+ * mode is the SImode, so that's what we use here.
+ * mode is the SImode, so that's what we use here.
+ */
+ */
+#undef Pmode
+#undef Pmode
+#define        Pmode   SImode
+#define        Pmode   SImode
+
+
+/* FUNCTION_MODE ... An alias for the machine mode used for memory references to
+/* FUNCTION_MODE ... An alias for the machine mode used for memory references to
+ * function being called, in call RTL expressions.  On most CISC machines, where
+ * function being called, in call RTL expressions.  On most CISC machines, where
+ * an instruction can begin at any byte address, this should be QImode.  On most
+ * an instruction can begin at any byte address, this should be QImode.  On most
+ * RISC machines, where all instructions have fixed size and alignment, this
+ * RISC machines, where all instructions have fixed size and alignment, this
+ * should be a mode with the same size and alignment as the machine instruction
+ * should be a mode with the same size and alignment as the machine instruction
+ * words--typically SImode or HImode.
+ * words--typically SImode or HImode.
+ *
+ *
+ * ZipCPU---Definitely SImode, as with Pmode.  (All words are 32-bits, including
+ * ZipCPU---Definitely SImode, as with Pmode.  (All words are 32-bits, including
+ * addresses on the ZipCPU.
+ * addresses on the ZipCPU.
+ */
+ */
+#undef FUNCTION_MODE
+#undef FUNCTION_MODE
+#define        FUNCTION_MODE   SImode
+#define        FUNCTION_MODE   SImode
+
+
+/* STDC_0_IN_SYSTEM_HEADERS
+/* STDC_0_IN_SYSTEM_HEADERS
+ */
+ */
+
+
+/* TARGET_C_PREINCLUDE(V) ... Define this hook to return the name of  a header
+/* TARGET_C_PREINCLUDE(V) ... Define this hook to return the name of  a header
+ * file to be included at the start of all compilations, as if it had been
+ * file to be included at the start of all compilations, as if it had been
+ * included with #include <file>.  If this hook returns NULL, or is not defined,
+ * included with #include <file>.  If this hook returns NULL, or is not defined,
+ * or if the header is not found, or if the user specifies -ffreestanding or
+ * or if the header is not found, or if the user specifies -ffreestanding or
+ * -nostdinc, no header is included.
+ * -nostdinc, no header is included.
+ *
+ *
+ * ZipCPU --- We don't have a standard library defined yet, so we'll leave this
+ * ZipCPU --- We don't have a standard library defined yet, so we'll leave this
+ * as NULL.
+ * as NULL.
+ */
+ */
+#undef TARGET_C_PREINCLUDE
+#undef TARGET_C_PREINCLUDE
+#define        TARGET_C_PREINCLUDE     NULL
+#define        TARGET_C_PREINCLUDE     NULL
+
+
+/* TARGET_CXX_IMPLICIT_EXTERN_C(CONST CHAR *) ... Define this hook to add target
+/* TARGET_CXX_IMPLICIT_EXTERN_C(CONST CHAR *) ... Define this hook to add target
+ * specific C++ implicit extern C functions.  If this function returns true
+ * specific C++ implicit extern C functions.  If this function returns true
+ * for the name of a file-scope function, that function implicitly gets extern
+ * for the name of a file-scope function, that function implicitly gets extern
+ * "C" linkage rather than whatever linkage the declaration would normally have.
+ * "C" linkage rather than whatever linkage the declaration would normally have.
+ * An example of such function is WinMain on Win32 targets.
+ * An example of such function is WinMain on Win32 targets.
+ *
+ *
+ * ZipCPU---Not ready to deal with this yet.
+ * ZipCPU---Not ready to deal with this yet.
+ */
+ */
+
+
+/* NO_IMPLICIT_EXTERN_C ... Define this macro if the system header files
+/* NO_IMPLICIT_EXTERN_C ... Define this macro if the system header files
+ * support C++ as well as C.  This macro inhibits the usual method of using
+ * support C++ as well as C.  This macro inhibits the usual method of using
+ * system header files in C++, which is to pretend that the file's contents
+ * system header files in C++, which is to pretend that the file's contents
+ * are enclosed in 'extern "C" {...}'.
+ * are enclosed in 'extern "C" {...}'.
+ *
+ *
+ *
+ *
+ * ZipCPU --- Don't have either C or C++ headers, so let's skip this for now.
+ * ZipCPU --- Don't have either C or C++ headers, so let's skip this for now.
+ * Eventually, though, I think ZipCPU and C++ would go very well together.
+ * Eventually, though, I think ZipCPU and C++ would go very well together.
+ */
+ */
+
+
+/* REGISTER_TARGET_PRAGMAS ... Define this macro if you want to implement any
+/* REGISTER_TARGET_PRAGMAS ... Define this macro if you want to implement any
+ * target specific pragmas.
+ * target specific pragmas.
+ *
+ *
+ * ZipCPU --- let's not.
+ * ZipCPU --- let's not.
+ */
+ */
+
+
+/* HANDLE_PRAGMA_PACK_WITH_EXPANSION ... Define this macro if macros should be
+/* HANDLE_PRAGMA_PACK_WITH_EXPANSION ... Define this macro if macros should be
+ * expanded in the arguments of #pragma pack().
+ * expanded in the arguments of #pragma pack().
+ *
+ *
+ * ZipCPU ... why?
+ * ZipCPU ... why?
+ */
+ */
+
+
+/* TARGET_DEFAULT_PACK_STRUCT ... If your target requires a struct packing
+/* TARGET_DEFAULT_PACK_STRUCT ... If your target requires a struct packing
+ * default other than 0 (meaning the machine default), define this macro to
+ * default other than 0 (meaning the machine default), define this macro to
+ * the necessary value (in bytes).  This must be a value that would also be
+ * the necessary value (in bytes).  This must be a value that would also be
+ * valid to use with #pragma pack() (that is a small power of two.
+ * valid to use with #pragma pack() (that is a small power of two.
+ */
+ */
+
+
+/* DOLLARS_IN_IDENTIFIERS
+/* DOLLARS_IN_IDENTIFIERS
+ * ZipCPU --- Default (not changing C)
+ * ZipCPU --- Default (not changing C)
+ */
+ */
+
+
+/* INSN_SETS_ARE_DELAYED(INSN) ... Define this macro as a C expression that
+/* INSN_SETS_ARE_DELAYED(INSN) ... Define this macro as a C expression that
+ * is nonzero if it is safe for the delay slot schedule to place instructions
+ * is nonzero if it is safe for the delay slot schedule to place instructions
+ * in the delay slot of INSN, even if they appear to use a resource set or
+ * in the delay slot of INSN, even if they appear to use a resource set or
+ * clobbered in INSN.  INSN is always a ...
+ * clobbered in INSN.  INSN is always a ...
+ *
+ *
+ * ZipCPU --- You need not define this macro if it would always return zero.
+ * ZipCPU --- You need not define this macro if it would always return zero.
+ */
+ */
+
+
+/* INSN_REFERENCES_ARE_DELAYED(INSN) ... Define this macro as a C expression
+/* INSN_REFERENCES_ARE_DELAYED(INSN) ... Define this macro as a C expression
+ * that is nonzero if it is safe for the delay slot schedule to place
+ * that is nonzero if it is safe for the delay slot schedule to place
+ * instructions in the delay slot of INSN, even if they appear to set or clobber
+ * instructions in the delay slot of INSN, even if they appear to set or clobber
+ * a resource referenced in INSN.  INSN is always a jump_insn or an insn.  On
+ * a resource referenced in INSN.  INSN is always a jump_insn or an insn.  On
+ * machines where some insn or jump_insn is really a function call and ...
+ * machines where some insn or jump_insn is really a function call and ...
+ *
+ *
+ * ZipCPU --- You need not define this macro if it would always return zero.
+ * ZipCPU --- You need not define this macro if it would always return zero.
+ */
+ */
+
+
+/* MULTIPLE_SYMBOL_SPACES ... Define this macro as a C expression that is
+/* MULTIPLE_SYMBOL_SPACES ... Define this macro as a C expression that is
+ * nonzero if, in some cases, global symbols from one translation unit may not
+ * nonzero if, in some cases, global symbols from one translation unit may not
+ * be bound to undefined symbols in another translation unit without user
+ * be bound to undefined symbols in another translation unit without user
+ * intervention.  For instance, under Microsoft Windows symbols must be
+ * intervention.  For instance, under Microsoft Windows symbols must be
+ * explicitly imported from shared libraries (DLLs).
+ * explicitly imported from shared libraries (DLLs).
+ *
+ *
+ * ZipCPU---You need not define this macro if it would always evaluate to zero,
+ * ZipCPU---You need not define this macro if it would always evaluate to zero,
+ * so we won't.
+ * so we won't.
+ */
+ */
+
+
+/* TARGET_MD_ASM_ADJUST
+/* TARGET_MD_ASM_ADJUST
+ */
+ */
+/* MATH_LIBRARY ... Define this macro as a C constant ... you only need to
+/* MATH_LIBRARY ... Define this macro as a C constant ... you only need to
+ * define this macro if the default of "m" is wrong.
+ * define this macro if the default of "m" is wrong.
+ *
+ *
+ * ZipCPU --- as we don't have a math library yet, building one such that "m"
+ * ZipCPU --- as we don't have a math library yet, building one such that "m"
+ * works doesn't sound like a problem.  Let's not define this.
+ * works doesn't sound like a problem.  Let's not define this.
+ */
+ */
+
+
+/* LIBRARY_PATH_ENV ... Define this as a C string constant for the environment
+/* LIBRARY_PATH_ENV ... Define this as a C string constant for the environment
+ * variable that specifies where the linker should look for libraries.
+ * variable that specifies where the linker should look for libraries.
+ *
+ *
+ * Just in case we want to add libraries for ZipCPU, let's place them in
+ * Just in case we want to add libraries for ZipCPU, let's place them in
+ * /usr/local/zip/lib, so as not to confuse them with our local systems
+ * /usr/local/zip/lib, so as not to confuse them with our local systems
+ * libraries.
+ * libraries.
+ */
+ */
+#define        LIBRARY_PATH_ENV        "/usr/local/zip/lib"
+#define        LIBRARY_PATH_ENV        "/usr/local/zip/lib"
+
+
+/* TARGET_POSIX_IO ... Define this macro if the target supports the following
+/* TARGET_POSIX_IO ... Define this macro if the target supports the following
+ * POSIX file fucntions: access, mkdir, and file locking with fcntl/F_SETLKW.
+ * POSIX file fucntions: access, mkdir, and file locking with fcntl/F_SETLKW.
+ *
+ *
+ * ZipCPU does not.
+ * ZipCPU does not.
+ */
+ */
+
+
+/* MAX_CONDITIONAL_EXECUTE ... A C expression for the maximum number of
+/* MAX_CONDITIONAL_EXECUTE ... A C expression for the maximum number of
+ * instructions to execute via conditional execution instructions instead of a
+ * instructions to execute via conditional execution instructions instead of a
+ * branch.  A value of BRANCH_COST+1 is the default if the machine does not use
+ * branch.  A value of BRANCH_COST+1 is the default if the machine does not use
+ * cc0 and 1 if it does use cc0.
+ * cc0 and 1 if it does use cc0.
+ *
+ *
+ * ZipCPU---This sounds good enough for the ZipCPU as well--as long as we have
+ * ZipCPU---This sounds good enough for the ZipCPU as well--as long as we have
+ * BRANCH_COST defined.  However, BRANCH_COST is defined as conditionally to
+ * BRANCH_COST defined.  However, BRANCH_COST is defined as conditionally to
+ * something else, so let's keep looking into this.
+ * something else, so let's keep looking into this.
+ */
+ */
+
+
+/* IFCVT_MODIFY_TESTS(CEINFO,TRUE,FALSE) ... Used if the target needs to
+/* IFCVT_MODIFY_TESTS(CEINFO,TRUE,FALSE) ... Used if the target needs to
+ * perform machine-dependent modifications on the conditionals used for turning
+ * perform machine-dependent modifications on the conditionals used for turning
+ * basic blocks into conditionally executed code.  CEINFO points to a data
+ * basic blocks into conditionally executed code.  CEINFO points to a data
+ * structure, struct ce_if_block, which contains information about the currently
+ * structure, struct ce_if_block, which contains information about the currently
+ * processed blocks.  TRUE and FALSE are the tests that are used for
+ * processed blocks.  TRUE and FALSE are the tests that are used for
+ * converting the then-block and the else-block, respectively.  Set either TRUE
+ * converting the then-block and the else-block, respectively.  Set either TRUE
+ * or FALSE to a null pointer if the tests cannot be converted.
+ * or FALSE to a null pointer if the tests cannot be converted.
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ * execution and conditional testing capabilities.
+ */
+ */
+#define        IFCVT_MODIFY_TESTS(CI,TR,FL)    zip_ifcvt_modify_tests(CI,&TR,&FL)
+#define        IFCVT_MODIFY_TESTS(CI,TR,FL)    zip_ifcvt_modify_tests(CI,&TR,&FL)
+
+
+/* IFCVT_MODIFY_MULTIPLE_TESTS(CEINFO, BB, TRUE, FALSE) ... Like
+/* IFCVT_MODIFY_MULTIPLE_TESTS(CEINFO, BB, TRUE, FALSE) ... Like
+ * IFCVT_MODIFY_TESTS, but used when converting more complicated if-statements
+ * IFCVT_MODIFY_TESTS, but used when converting more complicated if-statements
+ * into conditions combined by and and or operations.  BB contains the basic
+ * into conditions combined by and and or operations.  BB contains the basic
+ * block that contains the test that is currently being processed and about to
+ * block that contains the test that is currently being processed and about to
+ * be turned into a condition.
+ * be turned into a condition.
+ *
+ *
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ * execution and conditional testing capabilities.
+ */
+ */
+// #warning "Need to come back to this."
+// #warning "Need to come back to this."
+#define        IFCVT_MODIFY_MULTIPLE_TESTS(CI,BB,TR,FL) TR=NULL_RTX
+#define        IFCVT_MODIFY_MULTIPLE_TESTS(CI,BB,TR,FL) TR=NULL_RTX
+
+
+
+
+/* IFCVT_MODIFY_INSN(CEINFO, PATTERN, INSN) ... A C expression to modify the
+/* IFCVT_MODIFY_INSN(CEINFO, PATTERN, INSN) ... A C expression to modify the
+ * PATTERN of an INSN that is to be converted to conditional execution format.
+ * PATTERN of an INSN that is to be converted to conditional execution format.
+ * CEINFO points to a data structure, struct ce_if_block, which contains
+ * CEINFO points to a data structure, struct ce_if_block, which contains
+ * information about the currently processed blocks.
+ * information about the currently processed blocks.
+ *
+ *
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ * execution and conditional testing capabilities.
+ */
+ */
+#define        IFCVT_MODIFY_INSN(CE,PATRN,INSN) zip_ifcvt_modify_insn(CE,PATRN,INSN)
+#define        IFCVT_MODIFY_INSN(CE,PATRN,INSN) zip_ifcvt_modify_insn(CE,PATRN,INSN)
+
+
+
+
+/* IFCVT_MODIFY_FINAL(CEINFO) ... A C expression to perform any final
+/* IFCVT_MODIFY_FINAL(CEINFO) ... A C expression to perform any final
+ * machine dependent modifications in converting code to conditional
+ * machine dependent modifications in converting code to conditional
+ * execution.  The involved basic blocks can be found in struct ce_if_block
+ * execution.  The involved basic blocks can be found in struct ce_if_block
+ * structure pointed to be CEINFO.
+ * structure pointed to be CEINFO.
+ *
+ *
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ * execution and conditional testing capabilities.
+ */
+ */
+// #warning "Need to come back to this."
+// #warning "Need to come back to this."
+#define        IFCVT_MODIFY_FINAL(CEINFO)      zip_ifcvt_modify_final(CEINFO)
+#define        IFCVT_MODIFY_FINAL(CEINFO)      zip_ifcvt_modify_final(CEINFO)
+
+
+
+
+/* IFCVT_MODIFY_CANCEL(CEINFO) ... A C expression to cancel any machine
+/* IFCVT_MODIFY_CANCEL(CEINFO) ... A C expression to cancel any machine
+ * dependent modifications in converting code to conditional execution.  The
+ * dependent modifications in converting code to conditional execution.  The
+ * involved basic blocks can be found in the struct ce_if_block structure that
+ * involved basic blocks can be found in the struct ce_if_block structure that
+ * is pointed to by CEINFO.
+ * is pointed to by CEINFO.
+ *
+ *
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ * execution and conditional testing capabilities.
+ */
+ */
+// #warning "Need to come back to this."
+// #warning "Need to come back to this."
+#define        IFCVT_MODIFY_CANCEL(CEINFO)     zip_ifcvt_modify_cancel(CEINFO)
+#define        IFCVT_MODIFY_CANCEL(CEINFO)     zip_ifcvt_modify_cancel(CEINFO)
+
+
+
+
+/* IFCVT_MACHDEP_INIT(CEINFO) ... A C expression to initialize any machine
+/* IFCVT_MACHDEP_INIT(CEINFO) ... A C expression to initialize any machine
+ * specific data for if-conversion of the if-block in the CEINFO block structure
+ * specific data for if-conversion of the if-block in the CEINFO block structure
+ * that is pointed by CEINFO.
+ * that is pointed by CEINFO.
+ *
+ *
+ *
+ *
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * ZipCPU --- I need to set this to properly take advantage of our conditional
+ * execution and conditional testing capabilities.
+ * execution and conditional testing capabilities.
+ */
+ */
+// #warning "Need to come back to this."
+// #warning "Need to come back to this."
+#define        IFCVT_MACHDEP_INIT(CEINFO)      zip_ifcvt_machdep_init(CEINFO)
+#define        IFCVT_MACHDEP_INIT(CEINFO)      zip_ifcvt_machdep_init(CEINFO)
+
+
+
+
+/* TARGET_MACHINE_DEPENDENT_REORG(VOID) ... If non-null, this hook performs a
+/* TARGET_MACHINE_DEPENDENT_REORG(VOID) ... If non-null, this hook performs a
+ * target specific pass over the instruction stream.  The compiler will run it
+ * target specific pass over the instruction stream.  The compiler will run it
+ * at all optimization levels, just before the point at which it normally does
+ * at all optimization levels, just before the point at which it normally does
+ * delayed branch scheduling.
+ * delayed branch scheduling.
+ *
+ *
+ * You need not implement the hook if it has nothing to do.
+ * You need not implement the hook if it has nothing to do.
+ *
+ *
+ * ZipCPU---This may be part of a later upgrade, but shouldn't be needed to
+ * ZipCPU---This may be part of a later upgrade, but shouldn't be needed to
+ * just get us started.
+ * just get us started.
+ */
+ */
+
+
+
+
+/* TARGET_INIT_BUILTINS(VOID) ... Define this hook if you ahve any machine
+/* TARGET_INIT_BUILTINS(VOID) ... Define this hook if you ahve any machine
+ * specific builtin functions that need to be defined.  It should be a function
+ * specific builtin functions that need to be defined.  It should be a function
+ * that performs the necessary setup.  Machine specific builtin functions can be
+ * that performs the necessary setup.  Machine specific builtin functions can be
+ * useful to expand special machine instructions that would otherwise not
+ * useful to expand special machine instructions that would otherwise not
+ * normally be generated because they have no equivalent in the source language.
+ * normally be generated because they have no equivalent in the source language.
+ *
+ *
+ * To create a built in function, call the function lang_hooks.builtin_function
+ * To create a built in function, call the function lang_hooks.builtin_function
+ * which is defined by the language front end.  You can use any type nodes
+ * which is defined by the language front end.  You can use any type nodes
+ * set up by build_common_tree_nodes; only language front ends that use those
+ * set up by build_common_tree_nodes; only language front ends that use those
+ * two functions will call "TARGET_INIT_BUILTINS".
+ * two functions will call "TARGET_INIT_BUILTINS".
+ *
+ *
+ * ZipCPU---We need to come back to this.  We should have several built-ins
+ * ZipCPU---We need to come back to this.  We should have several built-ins
+ * defined: rtu(), wait(), halt(), save_context(cstackregno), and
+ * defined: rtu(), wait(), halt(), save_context(cstackregno), and
+ * restore_context(cstackregno).
+ * restore_context(cstackregno).
+ *
+ *
+ */
+ */
+#undef TARGET_INIT_BUILTINS
+#undef TARGET_INIT_BUILTINS
+#define        TARGET_INIT_BUILTINS    zip_init_builtins
+#define        TARGET_INIT_BUILTINS    zip_init_builtins
+
+
+/* TARGET_BUILTIN_DECL(CODE,INITP) ... Define this hook if you have any
+/* TARGET_BUILTIN_DECL(CODE,INITP) ... Define this hook if you have any
+ * machine specific builtin functions that need to be defined.  It should be a
+ * machine specific builtin functions that need to be defined.  It should be a
+ * function that returns the builtin function declaration for the builtin
+ * function that returns the builtin function declaration for the builtin
+ * function code code.  If there is no such builtin and it cannot be initialized
+ * function code code.  If there is no such builtin and it cannot be initialized
+ * at this time if INITP is true the function should return NULL_TREE.  If
+ * at this time if INITP is true the function should return NULL_TREE.  If
+ * CODE is out of range the fucntion should return error-mark_node.
+ * CODE is out of range the fucntion should return error-mark_node.
+ *
+ *
+ * ZipCPU ... needs to be done, don't know how to do it yet.
+ * ZipCPU ... needs to be done, don't know how to do it yet.
+ */
+ */
+#undef TARGET_BUILTIN_DECL
+#undef TARGET_BUILTIN_DECL
+#define        TARGET_BUILTIN_DECL     zip_builtin_decl
+#define        TARGET_BUILTIN_DECL     zip_builtin_decl
+
+
+
+
+/* TARGET_EXPAND_BUILTIN(TREE,TGT,SUB,MODE,IGNORE) ... Expand a call to a
+/* TARGET_EXPAND_BUILTIN(TREE,TGT,SUB,MODE,IGNORE) ... Expand a call to a
+ * machine specific built-in function that was set up by TARGET_INIT_BUILTINS.
+ * machine specific built-in function that was set up by TARGET_INIT_BUILTINS.
+ * TREE is the expression for the function call; the result should go to
+ * TREE is the expression for the function call; the result should go to
+ * TGT if that is convenient, and have mode MODE if that is convenient.  SUB
+ * TGT if that is convenient, and have mode MODE if that is convenient.  SUB
+ * may be used as the target for computing one of EXP's operands.  IGNORE is
+ * may be used as the target for computing one of EXP's operands.  IGNORE is
+ * non-zero if the value is to be ignored.  This function should return the
+ * non-zero if the value is to be ignored.  This function should return the
+ * result of the call to the built-in function.
+ * result of the call to the built-in function.
+ *
+ *
+ * ZipCPU ... needs to do it, just to get our special intrinsic functions
+ * ZipCPU ... needs to do it, just to get our special intrinsic functions
+ */
+ */
+#define        TARGET_EXPAND_BUILTIN   zip_expand_builtin
+#define        TARGET_EXPAND_BUILTIN   zip_expand_builtin
+
+
+
+
+/* TARGET_BUILTIN_CHKP_FUNCTION(FCODE) ... Allows the target to redefine
+/* TARGET_BUILTIN_CHKP_FUNCTION(FCODE) ... Allows the target to redefine
+ * builtin functions used by Pointer Bounds Checker for code instrumentation.
+ * builtin functions used by Pointer Bounds Checker for code instrumentation.
+ *
+ *
+ * ZipCPU --- not interested.
+ * ZipCPU --- not interested.
+ */
+ */
+/* TARGET_CHKP_BOUND_TYPE
+/* TARGET_CHKP_BOUND_TYPE
+ * TARGET_CHKP_MAKE_BOUNDS_CONSTANT
+ * TARGET_CHKP_MAKE_BOUNDS_CONSTANT
+ * TARGET_CHKP_INITIALIZE_BOUNDS
+ * TARGET_CHKP_INITIALIZE_BOUNDS
+ *
+ *
+ * ZipCPU --- Same as last one.
+ * ZipCPU --- Same as last one.
+ */
+ */
+
+
+
+
+/* TARGET_RESOLVE_OVERLOADED_BUILTIN(LOC, FN, ARGS) ... Select a replacement
+/* TARGET_RESOLVE_OVERLOADED_BUILTIN(LOC, FN, ARGS) ... Select a replacement
+ * for a machine specific built-in function that was set up by
+ * for a machine specific built-in function that was set up by
+ * TARGET_INIT_BUILTINS.
+ * TARGET_INIT_BUILTINS.
+ *
+ *
+ * ZipCPU --- If I go to the trouble to create a builtin, why would I want
+ * ZipCPU --- If I go to the trouble to create a builtin, why would I want
+ * to then overload it?
+ * to then overload it?
+ */
+ */
+
+
+/* TARGET_FOLD_BUILTIN(FN,NARGS,ARGP,IGNORE) ... Fold a call to a machine
+/* TARGET_FOLD_BUILTIN(FN,NARGS,ARGP,IGNORE) ... Fold a call to a machine
+ * specific built-in function that was set up by 'TARGET_INIT_BUILTINS'  FN
+ * specific built-in function that was set up by 'TARGET_INIT_BUILTINS'  FN
+ * is the declaration of the built-in function.  NARGS is the number of
+ * is the declaration of the built-in function.  NARGS is the number of
+ * arguments passed to the function; the arguments themselves are pointed to by
+ * arguments passed to the function; the arguments themselves are pointed to by
+ * ARGP.  The result is another tree, valid for both GIMPLE and GENERIC,
+ * ARGP.  The result is another tree, valid for both GIMPLE and GENERIC,
+ * containing as simplified expression for the call's result.  If IGNORE is
+ * containing as simplified expression for the call's result.  If IGNORE is
+ * true the value will be ignored.
+ * true the value will be ignored.
+ *
+ *
+ * ZipCPU --- You know, this and the previous couple sound like something
+ * ZipCPU --- You know, this and the previous couple sound like something
+ * whereby I might be able replace bit-reversal code with my bit reverse
+ * whereby I might be able replace bit-reversal code with my bit reverse
+ * instruction.  That would be very useful, but not necessary to get me
+ * instruction.  That would be very useful, but not necessary to get me
+ * started.
+ * started.
+ */
+ */
+
+
+/* TARGET_GIMPLE_FOLD_BUILTIN
+/* TARGET_GIMPLE_FOLD_BUILTIN
+ * TARGET_COMPARE_VERSION_PRIORITY
+ * TARGET_COMPARE_VERSION_PRIORITY
+ * TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
+ * TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
+ * TARGET_GENERATE_VERSION_DISPATCHER_BODY
+ * TARGET_GENERATE_VERSION_DISPATCHER_BODY
+ * TARGET_CAN_USE_DOLOOP_P
+ * TARGET_CAN_USE_DOLOOP_P
+ * TARGET_INVALID_WITHIN_DOOLOOP
+ * TARGET_INVALID_WITHIN_DOOLOOP
+ * TARGET_LEGITIMATE_COMBINED_INSN
+ * TARGET_LEGITIMATE_COMBINED_INSN
+ * TARGET_CAN_FOLLOW_JUMP
+ * TARGET_CAN_FOLLOW_JUMP
+ * TARGET_COMMUTATIVE_P
+ * TARGET_COMMUTATIVE_P
+ */
+ */
+
+
+/* TARGET_ALLOCATE_INITIAL_VALUE(REGNO)  ... When the initial value of a hard
+/* TARGET_ALLOCATE_INITIAL_VALUE(REGNO)  ... When the initial value of a hard
+ * register has been copied in a pseudo register, it is often not necessary
+ * register has been copied in a pseudo register, it is often not necessary
+ * ...
+ * ...
+ */
+ */
+/* TARGET_UNSPEC_MAY_TRAP_P(RTX,FLAGS)  ... This target hook returns nonzero in
+/* TARGET_UNSPEC_MAY_TRAP_P(RTX,FLAGS)  ... This target hook returns nonzero in
+ * RTX, un unspec or unspec_volatile operation, might cause a trap.  Targets
+ * RTX, un unspec or unspec_volatile operation, might cause a trap.  Targets
+ * can use this hook to enhance precision of analysis for unspec and
+ * can use this hook to enhance precision of analysis for unspec and
+ * unspec_volatile operations.  You may call may_trap_p_1 to analyze inner
+ * unspec_volatile operations.  You may call may_trap_p_1 to analyze inner
+ * elements of RTX in which case flags should be passed along.
+ * elements of RTX in which case flags should be passed along.
+ */
+ */
+
+
+/* TARGET_SET_CURRENT_FUNCTION(TREE)  The compiler invokes this hook whenever
+/* TARGET_SET_CURRENT_FUNCTION(TREE)  The compiler invokes this hook whenever
+ * it changes its current function context (CFUN).  You can define this
+ * it changes its current function context (CFUN).  You can define this
+ * function if the back end needs to perform any initialization or reset
+ * function if the back end needs to perform any initialization or reset
+ * actions on a per-function basis.  For example, it may be used to implement
+ * actions on a per-function basis.  For example, it may be used to implement
+ * function attributes that affect register usage or code generation patterns.
+ * function attributes that affect register usage or code generation patterns.
+ */
+ */
+
+
+/* TARGET_OBJECT_SUFFIX ... Define this macro to be a C string representing the
+/* TARGET_OBJECT_SUFFIX ... Define this macro to be a C string representing the
+ * suffix for object files on your target machine.  If you do not define this
+ * suffix for object files on your target machine.  If you do not define this
+ * macro, GCC will use ".o" as the suffix for object files.
+ * macro, GCC will use ".o" as the suffix for object files.
+ */
+ */
+#define        TARGET_OBJECT_SUFFIX    ".o"
+#define        TARGET_OBJECT_SUFFIX    ".o"
+
+
+/* TARGET_EXECUTABLE_SUFFIX
+/* TARGET_EXECUTABLE_SUFFIX
+ */
+ */
+#define        TARGET_EXECUTABLE_SUFFIX        ""
+#define        TARGET_EXECUTABLE_SUFFIX        ""
+
+
+/* COLLECT_EXPORT_LIST ... If defined, collect2 will scan the individual object
+/* COLLECT_EXPORT_LIST ... If defined, collect2 will scan the individual object
+ * files specified on its command line and create an export list for the linker.
+ * files specified on its command line and create an export list for the linker.
+ * Define this macro for systems like AIX, where the linker discards object
+ * Define this macro for systems like AIX, where the linker discards object
+ * files that are not referenced from main and uses export lists.
+ * files that are not referenced from main and uses export lists.
+ *
+ *
+ * ZipCPU --- shoudln't need this.
+ * ZipCPU --- shoudln't need this.
+ */
+ */
+
+
+/* MODIFY_JNI_METHOD_CALL(MDECL)  ... Define this macro to a C expression
+/* MODIFY_JNI_METHOD_CALL(MDECL)  ... Define this macro to a C expression
+ * representing a variant of the method call mdecl, if Java Native Interface
+ * representing a variant of the method call mdecl, if Java Native Interface
+ * (JNI) methods must be invoked differently from other methods on your
+ * (JNI) methods must be invoked differently from other methods on your
+ * target.  For example, on 32-bit MSWindows, JNI methods must be invoked
+ * target.  For example, on 32-bit MSWindows, JNI methods must be invoked
+ * using the stdcall calling convention and this macro is then ...
+ * using the stdcall calling convention and this macro is then ...
+ *
+ *
+ * ZipCPU----Don't need this.  (yet)
+ * ZipCPU----Don't need this.  (yet)
+ */
+ */
+
+
+
+
+/* TARGET_CANNOT_MODIFY_JUMPS_P ... This target hook returns true past the
+/* TARGET_CANNOT_MODIFY_JUMPS_P ... This target hook returns true past the
+ * point in which a new jump instructions could be created.  On machines that
+ * point in which a new jump instructions could be created.  On machines that
+ * require a register for every jump such as the SHmedia ISA of SH5, this point
+ * require a register for every jump such as the SHmedia ISA of SH5, this point
+ * would typically be reload, so thiss target hook should be defined to a
+ * would typically be reload, so thiss target hook should be defined to a
+ * function such as:
+ * function such as:
+ *
+ *
+ * ZipCPU --- I don't get what this is for.
+ * ZipCPU --- I don't get what this is for.
+ *     Actually, in hind sight, ZipCPU needs this.  Without this, the
+ *     Actually, in hind sight, ZipCPU needs this.  Without this, the
+ * compiler will try to reorder basic blocks, shuffling logic around and so
+ * compiler will try to reorder basic blocks, shuffling logic around and so
+ * fortch, preventing our comparison optimizations from being used.  By setting
+ * fortch, preventing our comparison optimizations from being used.  By setting
+ * this function appropriately, we can prevent it from reversing conditions into
+ * this function appropriately, we can prevent it from reversing conditions into
+ * conditions we don't support.
+ * conditions we don't support.
+ */
+ */
+#define        TARGET_CANNOT_MODIFY_JUMPS_P    zip_cannot_modify_jumps_p
+#define        TARGET_CANNOT_MODIFY_JUMPS_P    zip_cannot_modify_jumps_p
+
+
+/* TARGET_BRANCH_TARGET_REGISTER_CLASS ... This target hook returns a register
+/* TARGET_BRANCH_TARGET_REGISTER_CLASS ... This target hook returns a register
+ * class for which branch target register optimizations should be applied.  All
+ * class for which branch target register optimizations should be applied.  All
+ * registers in this class should be usable interchangably.  After reload,
+ * registers in this class should be usable interchangably.  After reload,
+ * registers in this class will be re-allocated and loads will be hoisted out of
+ * registers in this class will be re-allocated and loads will be hoisted out of
+ * loops and be subjected to inter-block scheduling.
+ * loops and be subjected to inter-block scheduling.
+ *
+ *
+ * ZipCPU---GENERAL_REGS, but this should be a default already ...
+ * ZipCPU---GENERAL_REGS, but this should be a default already ...
+ */
+ */
+
+
+
+
+/* TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED ...  Branch target register
+/* TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED ...  Branch target register
+ * optimization will by default exclude callee-saved registers that are not
+ * optimization will by default exclude callee-saved registers that are not
+ * already live during the current function.  If this target hook returns true,
+ * already live during the current function.  If this target hook returns true,
+ * they will be included.  The target code must then make sure that all target
+ * they will be included.  The target code must then make sure that all target
+ * registers in the class returned by TARGET_BRANCH_REGISTER_CLASS that might
+ * registers in the class returned by TARGET_BRANCH_REGISTER_CLASS that might
+ * be saved are saaved.
+ * be saved are saaved.
+ *
+ *
+ * ZipCPU---
+ * ZipCPU---
+ */
+ */
+
+
+
+
+/* TARGET_HAVE_CONDITIONAL_EXECUTION(VOID) ... This target hook returns true
+/* TARGET_HAVE_CONDITIONAL_EXECUTION(VOID) ... This target hook returns true
+ * if the target supports conditional execution.  This target hook is required
+ * if the target supports conditional execution.  This target hook is required
+ * only when the target has several different modes and they have different
+ * only when the target has several different modes and they have different
+ * conditional execution capability, such as ARM.
+ * conditional execution capability, such as ARM.
+ *
+ *
+ * ZipCPU---Yes!  All instructions may be conditionally executed (except the
+ * ZipCPU---Yes!  All instructions may be conditionally executed (except the
+ * long version load immediate ...)
+ * long version load immediate ...)
+ */
+ */
+#define        TARGET_HAVE_CONDITIONAL_EXECUTION       hook_bool_void_true
+#define        TARGET_HAVE_CONDITIONAL_EXECUTION       hook_bool_void_true
+
+
+/* TARGET_GEN_CCMP_FIRST(PREP,GEN,CODE,OP0,OP1) .. This function prepares to
+/* TARGET_GEN_CCMP_FIRST(PREP,GEN,CODE,OP0,OP1) .. This function prepares to
+ * emit a comparison instruction for the first compare in a sequence of
+ * emit a comparison instruction for the first compare in a sequence of
+ * conditional comparisons.  It returns an appropriate comparison with CC for
+ * conditional comparisons.  It returns an appropriate comparison with CC for
+ * passing to gen_ccmp_next or cbranch_optab.  The instructions to prepare the
+ * passing to gen_ccmp_next or cbranch_optab.  The instructions to prepare the
+ * compare are saved in prep_seq and the compare instructions are saved in
+ * compare are saved in prep_seq and the compare instructions are saved in
+ * gen_seq.  They will be emitted when all the compares in the conditional
+ * gen_seq.  They will be emitted when all the compares in the conditional
+ * comparison are generated without error.  CODE is the rtx_code of the compare
+ * comparison are generated without error.  CODE is the rtx_code of the compare
+ * for op0 and op1.
+ * for op0 and op1.
+ *
+ *
+ *
+ *
+ * ZipCPU---???
+ * ZipCPU---???
+ */
+ */
+
+
+/* TARGET_GEN_CCMP_NEXT(PREP,GEN,PREV,CMP,OP0,OP1,BITCODE) ... This function
+/* TARGET_GEN_CCMP_NEXT(PREP,GEN,PREV,CMP,OP0,OP1,BITCODE) ... This function
+ * prepares to emit a conditional comparison within a sequence of conditional
+ * prepares to emit a conditional comparison within a sequence of conditional
+ * comparisons.  It returns an appropriate comparison with CC for passing to
+ * comparisons.  It returns an appropriate comparison with CC for passing to
+ * gen_ccmp_next or cbranch_optab.  The insn to prepare the compare are saved
+ * gen_ccmp_next or cbranch_optab.  The insn to prepare the compare are saved
+ * in prep_seq and the compare instructions are saved in gen_seq.  They will be
+ * in prep_seq and the compare instructions are saved in gen_seq.  They will be
+ * emitted when all the compares in the conditional comparison are generated
+ * emitted when all the compares in the conditional comparison are generated
+ * without error.  The pPREV expression is the result of a prior call to either
+ * without error.  The pPREV expression is the result of a prior call to either
+ * gen_ccmp_first or gen_ccmp_next.  It may return NULL if the combination of
+ * gen_ccmp_first or gen_ccmp_next.  It may return NULL if the combination of
+ * PREV and this comparison is not supported, otherwise the result must be the
+ * PREV and this comparison is not supported, otherwise the result must be the
+ * appropriate for passing to gen_ccmp_next or cbranch_optab.  CODE is the RTX
+ * appropriate for passing to gen_ccmp_next or cbranch_optab.  CODE is the RTX
+ * code of the compare for op0 and op1.  BITCODE is AND or IOR, which is the op
+ * code of the compare for op0 and op1.  BITCODE is AND or IOR, which is the op
+ * on the compares.
+ * on the compares.
+ *
+ *
+ *
+ *
+ * ZipCPU --- ???
+ * ZipCPU --- ???
+ */
+ */
+
+
+/* TARGET_LOOP_UNROLL_ADJUST(NUNROLL, LOOP) ... This target hook returns a new
+/* TARGET_LOOP_UNROLL_ADJUST(NUNROLL, LOOP) ... This target hook returns a new
+ * value for the number of times loop should be unrolled.  The parameter NUNROLL
+ * value for the number of times loop should be unrolled.  The parameter NUNROLL
+ * is the number of times the loop is to be unrolled.  The parameter loop is a
+ * is the number of times the loop is to be unrolled.  The parameter loop is a
+ * pointer to the loop, which is going to be checked for unrolling.  The target
+ * pointer to the loop, which is going to be checked for unrolling.  The target
+ * hook is required only when the target has special constraints like maximum number of memory accesses.
+ * hook is required only when the target has special constraints like maximum number of memory accesses.
+ *
+ *
+ *
+ *
+ * ZipCPU -- ???
+ * ZipCPU -- ???
+ */
+ */
+
+
+
+
+/* POWI_MAX_MULTS ... If defined, this macro is interpreted as a signed integer
+/* POWI_MAX_MULTS ... If defined, this macro is interpreted as a signed integer
+ * C expression that specifies the maximum number of floating point
+ * C expression that specifies the maximum number of floating point
+ * multiplications that should be emitted when expanding exponentiation by an
+ * multiplications that should be emitted when expanding exponentiation by an
+ * integer constant inline.  When this value is defined, exponentiation
+ * integer constant inline.  When this value is defined, exponentiation
+ * requiring more than this number of multiplications is implemented by calling
+ * requiring more than this number of multiplications is implemented by calling
+ * the system library's pow, powf, or powl routines.  The default value
+ * the system library's pow, powf, or powl routines.  The default value
+ places no upper bound on the multiplication count.
+ places no upper bound on the multiplication count.
+ *
+ *
+ * ZipCPU---As we have no system library pow() routine (yet) ... we're not
+ * ZipCPU---As we have no system library pow() routine (yet) ... we're not
+ * ready for this macro.
+ * ready for this macro.
+ */
+ */
+
+
+
+
+/* TARGET_EXTRA_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should
+/* TARGET_EXTRA_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should
+ * register any extra include files for the target.  The parameter stdinc
+ * register any extra include files for the target.  The parameter stdinc
+ * indicates if normal include files are present.  The parameter SYSROOT is the
+ * indicates if normal include files are present.  The parameter SYSROOT is the
+ * system root directory.  The parameter PFX is the prefix for the GCC
+ * system root directory.  The parameter PFX is the prefix for the GCC
+ * directoiry.
+ * directoiry.
+ *
+ *
+ *
+ *
+ * ZipCPU---None yet.
+ * ZipCPU---None yet.
+ */
+ */
+
+
+/* TARGET_EXTRA_PRE_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should
+/* TARGET_EXTRA_PRE_INCLUDES(SYSROOT, PFX, STDINC) ... This target hook should
+ * register any extrra include files for the target before any standard headers.
+ * register any extrra include files for the target before any standard headers.
+ * The parameter stdinc indicates if normal include files are present.
+ * The parameter stdinc indicates if normal include files are present.
+ *
+ *
+ * ZipCPU --- None.
+ * ZipCPU --- None.
+ */
+ */
+
+
+/* TARGET_OPTF(PATH) ... This target hook should register special include paths
+/* TARGET_OPTF(PATH) ... This target hook should register special include paths
+ * for the target.  The parameter path is the integer to register.  On Darwin
+ * for the target.  The parameter path is the integer to register.  On Darwin
+ * systems, this is used for Framework includes, which have semantics that are
+ * systems, this is used for Framework includes, which have semantics that are
+ * different from -I.
+ * different from -I.
+ *
+ *
+ *
+ *
+ * ZipCPU --- None.
+ * ZipCPU --- None.
+ */
+ */
+
+
+/* TARGET_USE_LOCAL_THUNK_ALIAS_P(FN) ... This target macro returns if it is
+/* TARGET_USE_LOCAL_THUNK_ALIAS_P(FN) ... This target macro returns if it is
+ * safe to use a local alias for a virtual function FN when constructing
+ * safe to use a local alias for a virtual function FN when constructing
+ * thunks, false otherwise.  By default, the macro returns true for all
+ * thunks, false otherwise.  By default, the macro returns true for all
+ * functions, if a target supports aliases (i.e. defines ASM_OUTPUT_DEF),
+ * functions, if a target supports aliases (i.e. defines ASM_OUTPUT_DEF),
+ * false otherwise.
+ * false otherwise.
+ *
+ *
+ *
+ *
+ * ZipCPU --- ???
+ * ZipCPU --- ???
+ */
+ */
+// #warning "ASM_OUTPUT_DEF's definition has not been considered"
+// #warning "ASM_OUTPUT_DEF's definition has not been considered"
+
+
+
+
+/* TARGET_FORMAT_TYPES ... If defined, this macro is the name of a global
+/* TARGET_FORMAT_TYPES ... If defined, this macro is the name of a global
+ * variable containing target-specific format checking information for the
+ * variable containing target-specific format checking information for the
+ * -Wformat option.  The default is to have no target-specific format checks.
+ * -Wformat option.  The default is to have no target-specific format checks.
+ *
+ *
+ * ZipCPU --- Default
+ * ZipCPU --- Default
+ */
+ */
+
+
+/* TARGET_N_FORMAT_TYPES
+/* TARGET_N_FORMAT_TYPES
+ *
+ *
+ * ZipCPU --- Default
+ * ZipCPU --- Default
+ */
+ */
+
+
+/* TARGET_OVERRIDES_FORMAT_ATTRIBUTES ... If defined, this macro is the name of
+/* TARGET_OVERRIDES_FORMAT_ATTRIBUTES ... If defined, this macro is the name of
+ * a global variable containing target-specific format overrides for the
+ * a global variable containing target-specific format overrides for the
+ * -Wformat option.  The default is to have no target specific format overrides.
+ * -Wformat option.  The default is to have no target specific format overrides.
+ *
+ *
+ * ZipCPU --- Default
+ * ZipCPU --- Default
+ */
+ */
+
+
+/* TARGET_OVERRIDEES_FORMAT_ATTRIBUTES
+/* TARGET_OVERRIDEES_FORMAT_ATTRIBUTES
+ * TARGET_OVERRIDEES_FORMAT_ATTRIBUTES_COUNT
+ * TARGET_OVERRIDEES_FORMAT_ATTRIBUTES_COUNT
+ *
+ *
+ * If defined, the (first) macro is the name of a global variable containing
+ * If defined, the (first) macro is the name of a global variable containing
+ * target-specific format overrides for the -Wformat option.
+ * target-specific format overrides for the -Wformat option.
+ */
+ */
+/* TARGET_OVERRIDES_FORMAT_INIT ... If defined, this macro specifies the
+/* TARGET_OVERRIDES_FORMAT_INIT ... If defined, this macro specifies the
+ * optional initialization routine for target specific customizations of the
+ * optional initialization routine for target specific customizations of the
+* system printf and scanf formatter settings.
+* system printf and scanf formatter settings.
+ */
+ */
+
+
+/* TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN(TLIST,FN,VAL) ... If defined, this
+/* TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN(TLIST,FN,VAL) ... If defined, this
+ * macro returns the diagnostic message when it is illegal to pass an argument
+ * macro returns the diagnostic message when it is illegal to pass an argument
+ * VAL to function FN with prototype TLIST.
+ * VAL to function FN with prototype TLIST.
+ *
+ *
+ * ZipCPU---Default.
+ * ZipCPU---Default.
+ */
+ */
+
+
+/* TARGET_INVALID_CONVERSION
+/* TARGET_INVALID_CONVERSION
+ * TARGET_INVALID_UNARY_OP
+ * TARGET_INVALID_UNARY_OP
+ * TARGET_INVALID_BINARY_OP
+ * TARGET_INVALID_BINARY_OP
+ * TARGET_INVALID_PARAMETER_TYPE
+ * TARGET_INVALID_PARAMETER_TYPE
+ * TARGET_INVALID_RETURN_TYPE
+ * TARGET_INVALID_RETURN_TYPE
+ * TARGET_PROMOTED_TYPE
+ * TARGET_PROMOTED_TYPE
+ * TARGET_CONVERT_TO_TYPE
+ * TARGET_CONVERT_TO_TYPE
+ * TARGET_USE_JCR_SECTION_TYPE
+ * TARGET_USE_JCR_SECTION_TYPE
+ * OBJC_JBLEN
+ * OBJC_JBLEN
+ * LIBGCC2_UNWIND_ATTRIBUTE
+ * LIBGCC2_UNWIND_ATTRIBUTE
+ * TARGET_UPDATE_STACK_BOUNDARY
+ * TARGET_UPDATE_STACK_BOUNDARY
+ * TARGET_GET_DRAP_RTX
+ * TARGET_GET_DRAP_RTX
+ * TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+ * TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+ */
+ */
+/* TARGET_CONST_ANCHOR ... On some architectures it can take multiple
+/* TARGET_CONST_ANCHOR ... On some architectures it can take multiple
+ * instructions to synthesize a constant. If there is another constant already
+ * instructions to synthesize a constant. If there is another constant already
+ * in a register that is close enough in value then it is preferable that the
+ * in a register that is close enough in value then it is preferable that the
+ * new constant is computed from the register using immediate addition or
+ * new constant is computed from the register using immediate addition or
+ * subtraction.  We accomplish this through CSE.  Besides the value of the
+ * subtraction.  We accomplish this through CSE.  Besides the value of the
+ * constant we also add a lower and an upper constant anchor to the available
+ * constant we also add a lower and an upper constant anchor to the available
+ * expressions.  These are then queried when encountering new constants.  The
+ * expressions.  These are then queried when encountering new constants.  The
+ * anchors are computed by rounding the constant up and down to a multiple of
+ * anchors are computed by rounding the constant up and down to a multiple of
+ * the value of TARGET_CONST_ANCHOR.  TARGET_CONST_ANCHOR should be the maximum
+ * the value of TARGET_CONST_ANCHOR.  TARGET_CONST_ANCHOR should be the maximum
+ * positive value accepted by immediate-add plus one.  We currently assume that
+ * positive value accepted by immediate-add plus one.  We currently assume that
+ * the value of TARGET_CONST_ANCHOR is a poewr of 2.  For example, on MIPS,
+ * the value of TARGET_CONST_ANCHOR is a poewr of 2.  For example, on MIPS,
+ * where add-immediate takes a 16-bit signed value, TARGET_CONST_ANCHOR is set
+ * where add-immediate takes a 16-bit signed value, TARGET_CONST_ANCHOR is set
+ * to 0x8000.  The default value is zero, which disables this optimization.
+ * to 0x8000.  The default value is zero, which disables this optimization.
+ *
+ *
+ * ZipCPU---One of the great strengths of the ZipCPU ISA is its ability to
+ * ZipCPU---One of the great strengths of the ZipCPU ISA is its ability to
+ * access registers plus immediates.  To use this, we *need* this capability.
+ * access registers plus immediates.  To use this, we *need* this capability.
+ * So, we define it here. (to 0x20000, or 2^17 because we can handle 18-bits of
+ * So, we define it here. (to 0x20000, or 2^17 because we can handle 18-bits of
+ * signed immediate offsets)
+ * signed immediate offsets)
+ *
+ *
+ * On ZipCPU---2^17
+ * On ZipCPU---2^17
+ */
+ */
+#define        TARGET_CONST_ANCHOR     zip_const_anchor
+#define        TARGET_CONST_ANCHOR     zip_const_anchor
+
+
+/* TARGET_ASAN_SHADOW_OFFSET ... Return the offset bitwise ored into shifted
+/* TARGET_ASAN_SHADOW_OFFSET ... Return the offset bitwise ored into shifted
+ * address to get corresponding Address Sanitizer shadow memory address.  NULL
+ * address to get corresponding Address Sanitizer shadow memory address.  NULL
+ * if address Sanitizer is not supported by the target.
+ * if address Sanitizer is not supported by the target.
+ */
+ */
+#define        TARGET_ASAN_SHADOW_OFFSET       NULL
+#define        TARGET_ASAN_SHADOW_OFFSET       NULL
+
+
+/* TARGET_MEMMODEL_CHECK
+/* TARGET_MEMMODEL_CHECK
+ */
+ */
+/* TARGET_ATOMIC_TEST_AND_SET_TRUEVAL ... This value should be set if the result
+/* TARGET_ATOMIC_TEST_AND_SET_TRUEVAL ... This value should be set if the result
+ * written by atomic test and set is not exactly 1, i.e. the bool true.
+ * written by atomic test and set is not exactly 1, i.e. the bool true.
+ */
+ */
+/* TARGET_HAS_IFUNC_P ... It returns true if the target supports GNU indirect
+/* TARGET_HAS_IFUNC_P ... It returns true if the target supports GNU indirect
+ * functions.  The support includes the assembler, linker, and dynamic linker.
+ * functions.  The support includes the assembler, linker, and dynamic linker.
+ * The default value of this hook is based on target's libc.
+ * The default value of this hook is based on target's libc.
+ */
+ */
+#define        TARGET_HAS_IFUNC_P      hook_bool_void_true
+#define        TARGET_HAS_IFUNC_P      hook_bool_void_true
+
+
+/* TARGET_ATOMIC_ALIGN_FOR_MODE(MODE) ... If defined, this function returns
+/* TARGET_ATOMIC_ALIGN_FOR_MODE(MODE) ... If defined, this function returns
+ * an appropriate alignment in bits for an atomic object of machine mode
+ * an appropriate alignment in bits for an atomic object of machine mode
+ * MODE.  If 0 is returned then the default alignment for the specified mode
+ * MODE.  If 0 is returned then the default alignment for the specified mode
+ * is used.
+ * is used.
+ *
+ *
+ * ZipCPU---Both default and 2 would be valid.  We'll stick to the default.
+ * ZipCPU---Both default and 2 would be valid.  We'll stick to the default.
+ */
+ */
+
+
+/* TARGET_ATOMIC_ASSIGN_EXPAND_FENV --- ISO C11 requires atomic compound
+/* TARGET_ATOMIC_ASSIGN_EXPAND_FENV --- ISO C11 requires atomic compound
+ * assignments that may raise floating-point exceptions to raise exceptions
+ * assignments that may raise floating-point exceptions to raise exceptions
+ * corresponding to the arithmetic operation whose result was successfully
+ * corresponding to the arithmetic operation whose result was successfully
+ * stored in a compare-and-exchange sequence.  This requires code equivalent to
+ * stored in a compare-and-exchange sequence.  This requires code equivalent to
+ * calls to feholdexcept, feclearexcept and feupdateenv to be generated at
+ * calls to feholdexcept, feclearexcept and feupdateenv to be generated at
+ * appropriate points in the compare-and-exchange sequence.  This hook should
+ * appropriate points in the compare-and-exchange sequence.  This hook should
+ * set *hold to an expression equivalent
+ * set *hold to an expression equivalent
+ *
+ *
+ * ZipCPU --- ???
+ * ZipCPU --- ???
+ */
+ */
+
+
+/* TARGET_RECORD_OFFLOAD_SYMBOL ... Used when offloaded functions are seen in
+/* TARGET_RECORD_OFFLOAD_SYMBOL ... Used when offloaded functions are seen in
+ * the compilation unit and no named sections are available.  It is called once
+ * the compilation unit and no named sections are available.  It is called once
+ * for each symbol that must be recorded in the offload function and variable
+ * for each symbol that must be recorded in the offload function and variable
+ * table.
+ * table.
+ *
+ *
+ * ZipCPU --- Offloaded functions?
+ * ZipCPU --- Offloaded functions?
+ */
+ */
+
+
+/* TARGET_OFFLOAD_OPTIONS
+/* TARGET_OFFLOAD_OPTIONS
+ *
+ *
+ * ZipCPU---none defined
+ * ZipCPU---none defined
+ */
+ */
+
+
+/* TARGET_SUPPORTS_WIDE_INT ... On older ports, large integers are stored
+/* TARGET_SUPPORTS_WIDE_INT ... On older ports, large integers are stored
+ * in CONST_DOUBLE rtl objects.  Newer ports define TARGET_SUPPORTS_WIDE_INT
+ * in CONST_DOUBLE rtl objects.  Newer ports define TARGET_SUPPORTS_WIDE_INT
+ * to be nonzero to indicate that large integers are stored in CONST_WIDE_INT
+ * to be nonzero to indicate that large integers are stored in CONST_WIDE_INT
+ * rtl objects.  The CONST_WIDE_INT allows very large integer constants to be
+ * rtl objects.  The CONST_WIDE_INT allows very large integer constants to be
+ * represented.  CONST_DOUBLE is limited to twice the size of the hosts
+ * represented.  CONST_DOUBLE is limited to twice the size of the hosts
+ * HOST_WIDE_INT representation.
+ * HOST_WIDE_INT representation.
+ *
+ *
+ * ZipCPU---We don't need these yet, so this isn't yet relevant.  (These ints
+ * ZipCPU---We don't need these yet, so this isn't yet relevant.  (These ints
+ * are wider than DImode ...)
+ * are wider than DImode ...)
+ */
+ */
+#define        TARGET_SUPPORTS_WIDE_INT        0
+#define        TARGET_SUPPORTS_WIDE_INT        0
+
+
+
+
+/* Now, for the prototype functions ...*/
+/* Now, for the prototype functions ...*/
+// These have been moved to zip-protos.h
+// These have been moved to zip-protos.h
+
+
+// extern void zip_init_builtins(void);
+// extern void zip_init_builtins(void);
+// extern void zip_asm_output_anchor(rtx x);
+// extern void zip_asm_output_anchor(rtx x);
+// extern bool zip_legitimate_address_p(enum machine_mode mode, rtx x, bool string);
+// extern bool zip_legitimate_address_p(enum machine_mode mode, rtx x, bool string);
+// extern void zip_asm_trampoline_template(FILE *);
+// extern void zip_asm_trampoline_template(FILE *);
+// extern void zip_initial_elimination_offset(int from, int to);
+// extern void zip_initial_elimination_offset(int from, int to);
+// extern void zip_print_operand(FILE *stream, rtx *x, int code);
+// extern void zip_print_operand(FILE *stream, rtx *x, int code);
+// extern void zip_print_operand_address(FILE *stream, rtx *x);
+// extern void zip_print_operand_address(FILE *stream, rtx *x);
+// extern void zip_asm_output_def(FILE *s, const char *n, const char *v);
+// extern void zip_asm_output_def(FILE *s, const char *n, const char *v);
+// extern void zip_update_cc_notice(rtx exp, rtx_insn *insn);
+// extern void zip_update_cc_notice(rtx exp, rtx_insn *insn);
+// extern      int zip_address_operand(rtx op);
+// extern      int zip_address_operand(rtx op);
+// extern      int zip_const_address_operand(rtx op);
+// extern      int zip_const_address_operand(rtx op);
+// extern void zip_expand_prologue(void);
+// extern void zip_expand_prologue(void);
+// extern void zip_expand_epilogue(void);
+// extern void zip_expand_epilogue(void);
+// extern bool zip_gen_move_rtl(rtx, rtx);
+// extern bool zip_gen_move_rtl(rtx, rtx);
+// extern bool zip_load_address_lod(rtx, rtx);
+// extern bool zip_load_address_lod(rtx, rtx);
+// extern bool zip_load_address_sto(rtx, rtx);
+// extern bool zip_load_address_sto(rtx, rtx);
+// extern void zip_print_operand(FILE *fp, rtx x, int code);
+// extern void zip_print_operand(FILE *fp, rtx x, int code);
+// extern void zip_print_operand_address(FILE *fp, rtx x);
+// extern void zip_print_operand_address(FILE *fp, rtx x);
+extern int zip_use_return_insn(void);
+extern int zip_use_return_insn(void);
+
+
+#include "insn-modes.h"
+#include "insn-modes.h"
+// #include "zip-protos.h"     // Cant include this here!
+// #include "zip-protos.h"     // Cant include this here!
+
+
+#endif /* GCC_ZIP_H */
+#endif /* GCC_ZIP_H */
+
+
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.md gcc-6.2.0-zip/gcc/config/zip/zip.md
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.md gcc-6.2.0-zip/gcc/config/zip/zip.md
--- gcc-6.2.0/gcc/config/zip/zip.md     1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip.md     1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.md 2017-03-07 12:02:29.862582673 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.md 2018-06-07 12:51:05.802342817 -0400
@@ -0,0 +1,1968 @@
@@ -0,0 +1,1897 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Filename:   zip.md
+;; Filename:   zip.md
+;;
+;;
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;;
+;; Purpose:    This is the machine description of the Zip CPU as needed by the
+;; Purpose:    This is the machine description of the Zip CPU as needed by the
+;;             GNU compiler collection (GCC).
+;;             GNU compiler collection (GCC).
+;;
+;;
+;;
+;;
+;; Creator:    Dan Gisselquist, Ph.D.
+;; Creator:    Dan Gisselquist, Ph.D.
+;;             Gisselquist Technology, LLC
+;;             Gisselquist Technology, LLC
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Copyright (C) 2015-2017, Gisselquist Technology, LLC
+;; Copyright (C) 2015-2018, Gisselquist Technology, LLC
+;;
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;; your option) any later version.
+;;
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;; for more details.
+;;
+;;
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;;             http://www.gnu.org/licenses/gpl.html
+;;             http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;;
+;;
+;; - Immediate integer operand constraints
+;; - Immediate integer operand constraints
+;;     'I'     -2^22 ... 2^22-1, or -4194304 .. 4194303        (LDI insn)
+;;     'I'     -2^22 ... 2^22-1, or -4194304 .. 4194303        (LDI insn)
+;;     'x'     -2^17 ... 2^17-1,                               (DI const)
+;;     'x'     -2^17 ... 2^17-1,                               (DI const)
+;;     'K'     0...2^17-2                                      (ucmp offset)
+;;     'K'     0...2^17-2                                      (ucmp offset)
+;;     'M'     -2^12 ... 2^12-1, or -4096 ... 4095             (MOV offset)
+;;     'M'     -2^12 ... 2^12-1, or -4096 ... 4095             (MOV offset)
+;;     'N'     -2^14 ... 2^14-1, or -16384 ... 16383           (OpB offset)
+;;     'N'     -2^14 ... 2^14-1, or -16384 ... 16383           (OpB offset)
+;;     'O'     -2^17 ... 2^17-1, or -131072 ... 131071         (OpB Immediate)
+;;     'O'     -2^17 ... 2^17-1, or -131072 ... 131071         (OpB Immediate)
+;;     'R'     0...31                                          (Shift value)
+;;     'R'     0...31                                          (Shift value)
+;; - Memory constraints
+;; - Memory constraints
+;;     "Q"     Op-B capable references to memory
+;;     "Q"     Op-B capable references to memory
+;;     "S"     References to constant memory
+;;     "S"     References to constant memory
+;; - Address constraints
+;; - Address constraints
+;;     "U"     Op-B capable address that references to memory
+;;     "U"     Op-B capable address that references to memory
+;;     "T"     Constant memory addresses
+;;     "T"     Constant memory addresses
+(define_constraint "x"
+(define_constraint "x"
+  "An 17-bit signed immediate such as a CMP:DI instruction can handle"
+  "An 17-bit signed immediate such as a CMP:DI instruction can handle"
+  (and (match_code "const_wide_int")
+  (and (match_code "const_wide_int")
+       (match_test "(ival < 0x20000l) && (ival >= -0x20000l)")))
+       (match_test "(ival < 0x20000l) && (ival >= -0x20000l)")))
+(define_constraint "K"
+(define_constraint "K"
+  "An 17-bit signed immediate such as a CMP:DI instruction can handle"
+  "An 17-bit signed immediate such as a CMP:DI instruction can handle"
+  (and (match_code "const_int")
+  (and (match_code "const_int")
+       (match_test "(ival < 0x20000) && (ival >= -0x20000)")))
+       (match_test "(ival < 0x20000) && (ival >= -0x20000)")))
+(define_constraint "M"
+(define_constraint "M"
+  "An 13-bit signed immediate such as a MOV instruction can handle"
+  "An 13-bit signed immediate such as a MOV instruction can handle"
+  (and (match_code "const_int")
+  (and (match_code "const_int")
+       (match_test "(ival < 0x1000) && (ival >= -0x1000)")))
+       (match_test "(ival < 0x1000) && (ival >= -0x1000)")))
+(define_constraint "N"
+(define_constraint "N"
+  "An 14-bit signed immediate offset such as an Op-B register offset"
+  "An 14-bit signed immediate offset such as an Op-B register offset"
+  (and (match_code "const_int")
+  (and (match_code "const_int")
+       (match_test "(ival < 0x2000) && (ival >= -0x2000)")))
+       (match_test "(ival < 0x2000) && (ival >= -0x2000)")))
+(define_constraint "O"
+(define_constraint "O"
+  "An 18-bit signed immediate such as an Op-B Immediate can handle"
+  "An 18-bit signed immediate such as an Op-B Immediate can handle"
+  (and (match_code "const_int")
+  (and (match_code "const_int")
+       (match_test "(ival < 0x20000) && (ival >= -0x20000)")))
+       (match_test "(ival < 0x20000) && (ival >= -0x20000)")))
+(define_constraint "R"
+(define_constraint "R"
+  "Bits that a value may be shifted"
+  "Bits that a value may be shifted"
+  (and (match_code "const_int")
+  (and (match_code "const_int")
+       (match_test "(ival < 32) && (ival >= 0)")))
+       (match_test "(ival < 32) && (ival >= 0)")))
+;;
+;;
+;
+;
+;
+;
+; Our builtin functions, by identifier
+; Our builtin functions, by identifier
+;
+;
+(define_constants
+(define_constants
+       [(UNSPEC_RTU             1)
+       [(UNSPEC_RTU             1)
+       (UNSPEC_HALT             2)
+       (UNSPEC_HALT             2)
+       (UNSPEC_IDLE             3)
+       (UNSPEC_IDLE             3)
+       (UNSPEC_SYSCALL          4)
+       (UNSPEC_SYSCALL          4)
+       (UNSPEC_SAVE_CONTEXT     5)
+       (UNSPEC_SAVE_CONTEXT     5)
+       (UNSPEC_RESTORE_CONTEXT  6)
+       (UNSPEC_RESTORE_CONTEXT  6)
+       (UNSPEC_BITREV           7)
+       (UNSPEC_BITREV           7)
+       (UNSPEC_GETUCC           8)
+       (UNSPEC_GETUCC           8)
+       (UNSPEC_GETCC            9)
+       (UNSPEC_GETCC            9)
+       (UNSPEC_LDILO           10)
+       (UNSPEC_LDILO           10)
+       ; (UNSPEC_RAW_CALL      11)
+       ; (UNSPEC_RAW_CALL      11)
+       ])
+       ])
+;
+;
+;
+;
+; Registers by name
+; Registers by name
+(define_constants
+(define_constants
+  [(RTN_REG            0)      ; Return address register
+  [(RTN_REG            0)      ; Return address register
+   (RTNV_REG           1)      ; Subroutine return value register
+   (RTNV_REG           1)      ; Subroutine return value register
+   (AP_REG             10)     ; Hopefully never used
+   (AP_REG             10)     ; Hopefully never used
+   (GBL_REG            11)     ; Hopefully never used, but just in case ...
+   (GBL_REG            11)     ; Hopefully never used, but just in case ...
+   (FP_REG             12)
+   (FP_REG             12)     ; Frame pointer register
+   (SP_REG             13)
+   (SP_REG             13)     ; Stack pointer register
+   (CC_REG             14)
+   (CC_REG             14)     ; Condition codes register
+   (PC_REG             15)
+   (PC_REG             15)     ; Program counter
+  ])
+  ])
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Mode iterator
+;; Mode iterator
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+(define_mode_iterator ZI [QI HI SI])
+(define_mode_iterator ZI [QI HI SI])
+(define_mode_attr sz [(QI "B") (HI "H") (SI "W")])
+(define_mode_attr sz [(QI "B") (HI "H") (SI "W")])
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Predicates
+;; Predicates
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_predicate "zip_const_address_operand_p"
+(define_predicate "zip_const_address_operand_p"
+       (match_code "symbol_ref,const,label_ref,code_label")
+       (match_code "symbol_ref,const,label_ref,code_label")
+{
+{
+       return zip_const_address_operand(op);
+       return zip_const_address_operand(op);
+})
+})
+
+
+(define_predicate "zip_address_operand_p"
+(define_predicate "zip_address_operand_p"
+       (match_code "reg,plus")
+       (match_code "reg,plus")
+{
+{
+       return zip_pd_opb_operand(op);
+       return zip_pd_opb_operand(op);
+})
+})
+
+
+(define_predicate "zip_opb_operand_p"
+(define_predicate "zip_opb_operand_p"
+       (match_code "reg,plus,const_int,subreg")
+       (match_code "reg,plus,const_int,subreg")
+{
+{
+       return zip_pd_opb_operand(op);
+       return zip_pd_opb_operand(op);
+})
+})
+
+
+(define_predicate "zip_opb_immv_p"
+(define_predicate "zip_opb_immv_p"
+       (match_code "const_int")
+       (match_code "const_int")
+{
+{
+       return (INTVAL(op)<((1<<13)-1))&&(INTVAL(op)>=-((1<<13)));
+       return (INTVAL(op)<((1<<13)-1))&&(INTVAL(op)>=-((1<<13)));
+})
+})
+
+
+(define_predicate "zip_opb_single_operand_p"
+(define_predicate "zip_opb_single_operand_p"
+       (match_code "reg,subreg,const_int")
+       (match_code "reg,subreg,const_int")
+{
+{
+       return zip_pd_opb_operand(op);
+       return zip_pd_opb_operand(op);
+})
+})
+
+
+(define_predicate "zip_mov_operand_p"
+(define_predicate "zip_mov_operand_p"
+       (match_code "reg,plus")
+       (match_code "reg,plus")
+{
+{
+       return zip_pd_mov_operand(op);
+       return zip_pd_mov_operand(op);
+})
+})
+
+
+(define_predicate "zip_memory_operand_p"
+(define_predicate "zip_memory_operand_p"
+       (match_code "mem")
+       (match_code "mem")
+{
+{
+       return zip_pd_opb_operand(XEXP(op,0));
+       return zip_pd_opb_operand(XEXP(op,0));
+})
+})
+
+
+(define_predicate "zip_imm_operand_p"
+(define_predicate "zip_imm_operand_p"
+       (match_code "const_int")
+       (match_code "const_int")
+{
+{
+       return zip_pd_imm_operand(op);
+       return zip_pd_imm_operand(op);
+})
+})
+
+
+(define_predicate "zip_mvimm_operand_p"
+(define_predicate "zip_mvimm_operand_p"
+       (match_code "const_int")
+       (match_code "const_int")
+{
+{
+       return zip_pd_mvimm_operand(op);
+       return zip_pd_mvimm_operand(op);
+})
+})
+
+
+;
+;
+; zip_movdst_operand_p and zip_movsrc_operand_p are no longer necessary, and
 
+; are being deprecated.
 
+;
 
+;(define_predicate "zip_movdst_operand_p"
 
+;      (match_code "mem,reg,subreg")
 
+;{
 
+;      if (MEM_P(op)) // Check for valid store address
 
+;              return zip_pd_opb_operand(XEXP(op,0));
 
+;      else if ((SUBREG_P(op))&&(REG_P(XEXP(op,0))))
 
+;              return 1;
 
+;      else if (REG_P(op))
 
+;              return register_operand(op, GET_MODE(op));
 
+;      return 1;
 
+;})
 
+
 
+;(define_predicate "zip_movsrc_operand_p"
 
+;      (match_code "mem,reg,subreg,const_int,const,symbol_ref,label_ref,code_label")
 
+;{
 
+;      if (MEM_P(op))
 
+;              return zip_pd_opb_operand(XEXP(op,0));
 
+;      else if (GET_CODE(op)==PLUS)
 
+;              return zip_pd_opb_operand(op);
 
+;      else if ((SUBREG_P(op))&&(REG_P(XEXP(op,0)))) {
 
+;              //; As far as predicates are concerned, subregs must be valid.
 
+;              //; The details of them are settled within the constraints.
 
+;              return 1;
 
+;      } else if ((REG_P(op))||(SUBREG_P(op)))
 
+;              return register_operand(op,SImode);
 
+;      else if (CONST_INT_P(op))
 
+;              return 1;
 
+;      return 1;
 
+;})
 
+
 
+;
 
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Constraints
+;; Constraints
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_memory_constraint "S"
+(define_memory_constraint "S"
+       "Any memory referenced by a constant address, possibly unknown at compile time"
+       "Any memory referenced by a constant address, possibly unknown at compile time"
+       (and (match_code "mem")
+       (and (match_code "mem")
+               (match_test "zip_ct_const_address_operand(XEXP(op,0))")))
+               (match_test "zip_ct_const_address_operand(XEXP(op,0))")))
+(define_memory_constraint "Q"
+(define_memory_constraint "Q"
+       "Any memory addressed suitably for a load or store instruction"
+       "Any memory addressed suitably for a load or store instruction"
+       (and (match_code "mem")
+       (and (match_code "mem")
+               (match_test "zip_ct_address_operand(XEXP(op,0))")))
+               (match_test "zip_ct_address_operand(XEXP(op,0))")))
+(define_address_constraint "U"
+(define_address_constraint "U"
+       "An address suitable for a load or store instruction"
+       "An address suitable for a load or store instruction"
+       (and (match_code "reg,plus")
+       (and (match_code "reg,plus")
+               (match_test "zip_ct_address_operand(op)")))
+               (match_test "zip_ct_address_operand(op)")))
+(define_address_constraint "T"
+(define_address_constraint "T"
+       "Any constant address, to include those made by symbols unknown at compile time"
+       "Any constant address, to include those made by symbols unknown at compile time"
+       (and (match_code "label_ref,code_label,symbol_ref,const")
+       (and (match_code "label_ref,code_label,symbol_ref,const")
+               (match_test "zip_ct_const_address_operand(op)")))
+               (match_test "zip_ct_const_address_operand(op)")))
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Attributes
+;; Attributes
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;
+(define_attr "predicable"  "no,yes" (const_string "yes"))
+(define_attr "predicable"  "no,yes" (const_string "yes"))
+(define_attr "ccresult" "set,unknown,unchanged,validzn" (const_string "set"))
+(define_attr "ccresult" "set,unknown,unchanged,validzn" (const_string "set"))
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Instructions
+;; Instructions
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;; Instructions
+;; Instructions
+;
+;
+; (define_insn
+; (define_insn
+;      optional name
+;      optional name
+;      RTL template -- a vector of incomplete RTL expressions describing the
+;      RTL template -- a vector of incomplete RTL expressions describing the
+;              semantics of the instruction.  It is incomplete because it may
+;              semantics of the instruction.  It is incomplete because it may
+;              contain match_operand, match_operator, and match_dup expressions
+;              contain match_operand, match_operator, and match_dup expressions
+;      The condition --- contains a C expression, may be an empty string
+;      The condition --- contains a C expression, may be an empty string
+;      output template or output statement--fragment of C code returning a str
+;      output template or output statement--fragment of C code returning a str
+;      Attributes --
+;      Attributes --
+;      )
+;      )
+;
+;
+; (match_operand:m n predicate constraint)
+; (match_operand:m n predicate constraint)
+;      Placeholder for operand #n of the instruction
+;      Placeholder for operand #n of the instruction
+;      Predicate       string that is the name of a fucntion w/ 2 arguments:
+;      Predicate       string that is the name of a fucntion w/ 2 arguments:
+;                              (expression, machine mode)
+;                              (expression, machine mode)
+;              we can build functions:
+;              we can build functions:
+;                      "isregister"    to describe a register
+;                      "isregister"    to describe a register
+;                      "isimmediate"   to describe an immediate
+;                      "isimmediate"   to describe an immediate
+;                      "offsetreg"     to describe a register plus offset
+;                      "offsetreg"     to describe a register plus offset
+;                      "anyregister"   to describe *ANY* register (uRx or Rx)
+;                      "anyregister"   to describe *ANY* register (uRx or Rx)
+;              But ... functions "address_operand", "immediate_operand",
+;              But ... functions "address_operand", "immediate_operand",
+;                      "register_operand", "indirect_operand"
+;                      "register_operand", "indirect_operand"
+;              "comparison_operatot" and "ordered_comparison_operator"
+;              "comparison_operatot" and "ordered_comparison_operator"
+;              are also available--be aware, they include more comparisons
+;              are also available--be aware, they include more comparisons
+;              than Zip CPU can do.
+;              than Zip CPU can do.
+;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Move instructions: both
+;; Move instructions: both
+;;     (arbitrary) from variables to variables, but this gets
+;;     (arbitrary) from variables to variables, but this gets
+;;             expanded into:
+;;             expanded into:
+;;     from registers to registers
+;;     from registers to registers
+;;     from immediates to registers
+;;     from immediates to registers
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_expand "mov<mode>"
+(define_expand "mov<mode>"
+       [(set (match_operand:ZI 0 "nonimmediate_operand" "")
+       [(set (match_operand:ZI 0 "nonimmediate_operand" "")
+               (match_operand:ZI 1 "general_operand" ""))]
+               (match_operand:ZI 1 "general_operand" ""))]
+       ""
+       ""
+       {//; Everything except mem=const or mem=mem can be done easily
+       {//; Everything except mem=const or mem=mem can be done easily
+       //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+       //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+       //; fprintf(stderr, "ZIP-GEN-MOVE\n");
+       //; fprintf(stderr, "ZIP-GEN-MOVE\n");
+       //; zip_debug_rtx_pfx("FROM: ", operands[1]);
+       //; zip_debug_rtx_pfx("FROM: ", operands[1]);
+       //; zip_debug_rtx_pfx("TO  : ", operands[0]);
+       //; zip_debug_rtx_pfx("TO  : ", operands[0]);
+
+
+       //; Need to load into a register between memory slots
+       //; Need to load into a register between memory slots
+       if ((MEM_P(operands[0]))&&(MEM_P(operands[1]))) {
+       if ((MEM_P(operands[0]))&&(MEM_P(operands[1]))) {
+               //; fprintf(stderr, "GEN-MOVSI: MEM -> MEM\n");
+               //; fprintf(stderr, "GEN-MOVSI: MEM -> MEM\n");
+               if (can_create_pseudo_p()) {
+               if (can_create_pseudo_p()) {
+                       rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                       rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                       if (GET_MODE(operands[1])==QImode)
+                       if (GET_MODE(operands[1])==QImode)
+                               emit_insn(gen_movqi(tmp,operands[1]));
+                               emit_insn(gen_movqi(tmp,operands[1]));
+                       else if (GET_MODE(operands[1])==HImode)
+                       else if (GET_MODE(operands[1])==HImode)
+                               emit_insn(gen_movhi(tmp,operands[1]));
+                               emit_insn(gen_movhi(tmp,operands[1]));
+                       else
+                       else
+                               emit_insn(gen_movsi(tmp,operands[1]));
+                               emit_insn(gen_movsi(tmp,operands[1]));
+                       operands[1] = tmp;
+                       operands[1] = tmp;
+               }
+               }
+       }}
+       }}
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_raw"
+(define_insn "mov<mode>_raw"
+       [(set (match_operand:ZI 0 "nonimmediate_operand" "=r,Q,r,r")
+       [(set (match_operand:ZI 0 "nonimmediate_operand" "=r,Q,r,r")
+               (match_operand:ZI 1 "general_operand" "r,r,Q,i"))]
+               (match_operand:ZI 1 "general_operand" "r,r,Q,i"))]
+       ""
+       ""
+       "@
+       "@
+       MOV\t%1,%0
+       MOV\t%1,%0
+       S<sz>\t%1,%0
+       S<sz>\t%1,%0
+       L<sz>\t%1,%0
+       L<sz>\t%1,%0
+       LDI\t%1,%0"
+       LDI\t%1,%0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "movsi_reg_off" ; Register to register move, used by prologue
+(define_insn "movsi_reg_off" ; Register to register move, used by prologue
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (plus:SI (match_operand:SI 1 "register_operand" "r")
+               (plus:SI (match_operand:SI 1 "register_operand" "r")
+                       (match_operand:SI 2 "zip_mvimm_operand_p" "M")))
+                       (match_operand:SI 2 "zip_mvimm_operand_p" "M")))
+               ]
+               ]
+       ""
+       ""
+       "MOV    %2(%1),%0"
+       "MOV    %2(%1),%0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_lod"   ; Load from memory
+(define_insn "mov<mode>_lod"   ; Load from memory
+       [(set (match_operand:ZI 0 "register_operand" "=r")
+       [(set (match_operand:ZI 0 "register_operand" "=r")
+               (match_operand:ZI 1 "zip_memory_operand_p" "Q"))]
+               (match_operand:ZI 1 "zip_memory_operand_p" "Q"))]
+       ""
+       ""
+       "L<sz>\t%1,%0"
+       "L<sz>\t%1,%0"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_lod_off" ; used by epilogue code
+(define_insn "mov<mode>_lod_off" ; used by epilogue code
+       [(set (match_operand:ZI 0 "register_operand" "=r")
+       [(set (match_operand:ZI 0 "register_operand" "=r")
+               (mem:ZI (plus:SI (match_operand:SI 1 "register_operand" "r")
+               (mem:ZI (plus:SI (match_operand:SI 1 "register_operand" "r")
+                       (match_operand:SI 2 "zip_opb_immv_p" "N"))))]
+                       (match_operand:SI 2 "zip_opb_immv_p" "N"))))]
+       ""
+       ""
+       "L<sz>\t%2(%1),%0"
+       "L<sz>\t%2(%1),%0"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_sto"   ; Store into memory
+(define_insn "mov<mode>_sto"   ; Store into memory
+       [(set (match_operand:ZI 0 "zip_memory_operand_p" "=Q")
+       [(set (match_operand:ZI 0 "zip_memory_operand_p" "=Q")
+               (match_operand:ZI 1 "register_operand" "r"))]
+               (match_operand:ZI 1 "register_operand" "r"))]
+       ""
+       ""
+       "S<sz>\t%1,%0"
+       "S<sz>\t%1,%0"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_sto_off" ; used by prologue code
+(define_insn "mov<mode>_sto_off" ; used by prologue code
+       [(set (mem:ZI (plus:SI
+       [(set (mem:ZI (plus:SI
+                       (match_operand:SI 0 "register_operand" "r")
+                       (match_operand:SI 0 "register_operand" "r")
+                       (match_operand:SI 1 "zip_opb_immv_p" "N")))
+                       (match_operand:SI 1 "zip_opb_immv_p" "N")))
+               (match_operand:ZI 2 "register_operand" "r"))]
+               (match_operand:ZI 2 "register_operand" "r"))]
+       ""
+       ""
+       "S<sz>\t%2,%1(%0)"
+       "S<sz>\t%2,%1(%0)"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "mov<mode>_ldi"   ; Load immediate
+(define_insn "mov<mode>_ldi"   ; Load immediate
+       [(set (match_operand:ZI 0 "register_operand" "=r")
+       [(set (match_operand:ZI 0 "register_operand" "=r")
+               (match_operand:ZI 1 "immediate_operand" "ipU"))]
+               (match_operand:ZI 1 "immediate_operand" "ipU"))]
+       ""
+       ""
+       "LDI    %1,%0"
+       "LDI    %1,%0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Load and store multiple values
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;
 
+; So far, from the code I've seen from GCC's output,
 
+; these instructions do not appear to be necessary.
 
+;
 
+;(define_insn "load_multiple"
 
+;      for(a=0; a<%2; a++)
 
+;              LW a(%1),%0+a
 
+;(define_insn "store_multiple"
 
+;      for(a=0; a<%2; a++)
 
+;              SW %0+a,a(%1)
 
+; pushsi -- Do not define, compiler will work around it nicely w/o our help
 
+;
 
+;
 
+;
 
+;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;; Substitution Pattern
+;; Substitution Pattern
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+(define_subst "cc_substitution"
+(define_subst "cc_substitution"
+       ; The pattern may not have any match_dup expressions.
+       ; The pattern may not have any match_dup expressions.
+       [(set (match_operand:SI 0 "" "") (match_operand:SI 1 "" ""))
+       [(set (match_operand:SI 0 "" "") (match_operand:SI 1 "" ""))
+               (clobber (reg:CC CC_REG))]
+               (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       [(set (match_dup 0) (match_dup 1))
+       [(set (match_dup 0) (match_dup 1))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))
+       ])
+       ])
+;
+;
+(define_subst_attr "cc_subst" "cc_substitution" "_raw" "_clobber")
+(define_subst_attr "cc_subst" "cc_substitution" "_raw" "_clobber")
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Mode conversions
+;; Mode conversions
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+(define_insn "zero_extendqisi2"
+(define_insn "zero_extendqisi2"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (zero_extend:SI
+               (zero_extend:SI
+                       (match_operand:QI 1 "register_operand" "0")))
+                       (match_operand:QI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "AND\t255,%0    ; zero_extendqisi2 ... reg"
+       "AND\t255,%0    ; zero_extendqisi2 ... reg"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+
+
+(define_insn "zero_extendqisi2_raw"
+(define_insn "zero_extendqisi2_raw"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (zero_extend:SI
+               (zero_extend:SI
+                       (match_operand:QI 1 "register_operand" "0")))
+                       (match_operand:QI 1 "register_operand" "0")))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       ""
+       ""
+       "AND\t255,%0    ; zero_extendqisi2 ... raw/set CC"
+       "AND\t255,%0    ; zero_extendqisi2 ... raw/set CC"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+
+
+(define_insn "zero_extendqisi2_mem"
+(define_insn "zero_extendqisi2_mem"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (zero_extend:SI
+               (zero_extend:SI
+                       (match_operand:QI 1 "memory_operand" "Q")))]
+                       (match_operand:QI 1 "memory_operand" "Q")))]
+       ""
+       ""
+       "LB\t%1,%0\t; Zero-Extend:QI"
+       "LB\t%1,%0\t; Zero-Extend:QI"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+;
+;
+(define_insn "zero_extendhisi2"
+(define_insn "zero_extendhisi2"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (zero_extend:SI
+               (zero_extend:SI
+                       (match_operand:HI 1 "register_operand" "0")))
+                       (match_operand:HI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "AND\t65535,%0  ; zero_extendhisi2 ... reg"
+       "AND\t65535,%0  ; zero_extendhisi2 ... reg"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+(define_insn "zero_extendhisi2_raw"
+(define_insn "zero_extendhisi2_raw"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (zero_extend:SI
+               (zero_extend:SI
+                       (match_operand:HI 1 "register_operand" "0")))
+                       (match_operand:HI 1 "register_operand" "0")))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       ""
+       ""
+       "AND\t65535,%0  ; zero_extendhisi2 ... reg"
+       "AND\t65535,%0  ; zero_extendhisi2 ... reg"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+(define_insn "zero_extendhisi2_mem"
+(define_insn "zero_extendhisi2_mem"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (zero_extend:SI
+               (zero_extend:SI
+                       (match_operand:HI 1 "memory_operand" "Q")))
+                       (match_operand:HI 1 "memory_operand" "Q")))
+       ]
+       ]
+       ""
+       ""
+       "LH\t%1,%0\t; Zero-Extend:HI"
+       "LH\t%1,%0\t; Zero-Extend:HI"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "set")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "set")])
+;
+;
+;
+;
+;
+;
+;
+;
+(define_insn "extendqisi2"
+(define_insn "extendqisi2"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (sign_extend:SI
+               (sign_extend:SI
+                       (match_operand:QI 1 "register_operand" "0")))
+                       (match_operand:QI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "SEXTB\t%0\t; SEXTB"
+       "SEXTB\t%0\t; SEXTB"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "extendhisi2"
+(define_insn "extendhisi2"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (sign_extend:SI
+               (sign_extend:SI
+                       (match_operand:HI 1 "register_operand" "0")))
+                       (match_operand:HI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "SEXTH\t%0\t; SEXTH"
+       "SEXTH\t%0\t; SEXTH"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; General arithmetic instructions
+;; General arithmetic instructions
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;;
+;;
+;; modsi3
+;; modsi3
+;; umodsi3
+;; umodsi3
+;;
+;;
+(define_insn "uminsi3"
+(define_insn "uminsi3"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (umin:SI (match_operand:SI 1 "register_operand" "%0")
+               (umin:SI (match_operand:SI 1 "register_operand" "%0")
+                       (match_operand:SI 2 "register_operand" "r")))
+                       (match_operand:SI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       ""
+       ""
+       "CMP    %0,%2
+       "CMP    %0,%2
+       MOV.C   %2,%0"
+       MOV.C   %2,%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "umaxsi3"
+(define_insn "umaxsi3"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (umax:SI (match_operand:SI 1 "register_operand" "%0")
+               (umax:SI (match_operand:SI 1 "register_operand" "%0")
+                       (match_operand:SI 2 "register_operand" "r")))
+                       (match_operand:SI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       ""
+       ""
+       "CMP    %2,%0
+       "CMP    %2,%0
+       MOV.C   %2,%0"
+       MOV.C   %2,%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "sminsi3"
+(define_insn "sminsi3"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (smin:SI (match_operand:SI 1 "register_operand" "%0")
+               (smin:SI (match_operand:SI 1 "register_operand" "%0")
+                       (match_operand:SI 2 "register_operand" "r")))
+                       (match_operand:SI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       ""
+       ""
+       "CMP    %2,%0
+       "CMP    %2,%0
+       MOV.GE  %2,%0"
+       MOV.GE  %2,%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "smaxsi3"
+(define_insn "smaxsi3"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (smax:SI (match_operand:SI 1 "register_operand" "%0")
+               (smax:SI (match_operand:SI 1 "register_operand" "%0")
+                       (match_operand:SI 2 "register_operand" "r")))
+                       (match_operand:SI 2 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       ]
+       ]
+       ""
+       ""
+       "CMP    %2,%0
+       "CMP    %2,%0
+       MOV.LT  %2,%0"
+       MOV.LT  %2,%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; AND
+; AND
+; iOR
+; iOR
+; XOR
+; XOR
+;
+;
+;
+;
+;
+;
+;(define_insn "addvsi4"
+;(define_insn "addvsi4"
+       ;[(set (match_operand:SI 0 "register_operand" "=r")
+       ;[(set (match_operand:SI 0 "register_operand" "=r")
+               ;(plus:SI (match_operand:SI 1 "register_operand" "%r")
+               ;(plus:SI (match_operand:SI 1 "register_operand" "%r")
+                       ;(match_operand:SI 2 "general_operand" "rO")))
+                       ;(match_operand:SI 2 "general_operand" "rO")))
+       ;(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+       ;(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+                       ;(label_ref (match_operand 3))
+                       ;(label_ref (match_operand 3))
+                       ;(pc)))]
+                       ;(pc)))]
+       ;""
+       ;""
+       ;"MOV   %1,%0
+       ;"MOV   %1,%0
+       ;ADD    %2,%0
+       ;ADD    %2,%0
+       ;BV     %3"
+       ;BV     %3"
+       ;[(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+       ;[(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+;;(define_insn "subvsi4"
+;;(define_insn "subvsi4"
+;;     MOV     %1,%0
+;;     MOV     %1,%0
+;;     SUB     %2,%0
+;;     SUB     %2,%0
+;;     BV      %3
+;;     BV      %3
+;;(mulvsi4)
+;;(mulvsi4)
+;;(define_insn "uaddvsi4"
+;;(define_insn "uaddvsi4"
+;;     ADD     %2,%0
+;;     ADD     %2,%0
+;;     BC      %3
+;;     BC      %3
+;;(define_insn "usubvsi4"
+;;(define_insn "usubvsi4"
+;;     MOV     %1,%0
+;;     MOV     %1,%0
+;;     SUB     %2,%0
+;;     SUB     %2,%0
+;;     BC      %3
+;;     BC      %3
+;;
+;;
+;; (define_insn "umulvsi4"
+;; (define_insn "umulvsi4"
+;;     ... ???)
+;;     ... ???)
+;;
+;;
+;
+;
+;
+;
+; ASR
 
+; LSL
 
+; LSR
 
+;
 
+;
+;
+;
+;
+;
+;
+;
+;
+; Others:  NEG, TEST, POPC, NOT
+; Others:  NEG, TEST, POPC, NOT
+;
+;
+;
+;
+(define_insn "negsi2"
+(define_insn "negsi2"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (neg:SI (match_operand:SI 1 "register_operand" "r")))
+               (neg:SI (match_operand:SI 1 "register_operand" "r")))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "NEG    %1,%0"  ;//; = MOV -1(%1),%0, XOR -1,%0
+       "NEG    %1,%0"  ;//; = MOV -1(%1),%0, XOR -1,%0
+       [(set_attr "ccresult" "validzn")])
+       [(set_attr "ccresult" "validzn")])
+(define_insn "abssi2"
+(define_insn "abssi2"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (abs:SI (match_operand:SI 1 "register_operand" "0")))
+               (abs:SI (match_operand:SI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "TEST\t%0\n\tNEG.LT\t%0"
+       "TEST\t%0\n\tNEG.LT\t%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn_and_split "one_cmplsi2"
+(define_insn_and_split "one_cmplsi2"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (not:SI (match_operand:SI 1 "register_operand" "0")))
+               (not:SI (match_operand:SI 1 "register_operand" "0")))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "#"
+       "#"
+       ""
+       ""
+       [(parallel [(set (match_dup 0) (xor:SI (match_dup 1) (const_int -1)))
+       [(parallel [(set (match_dup 0) (xor:SI (match_dup 1) (const_int -1)))
+               (clobber (reg:CC CC_REG))])]
+               (clobber (reg:CC CC_REG))])]
+       ""
+       ""
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Conditional arithmetic instructions
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;
 
+;
 
+;
 
+;
 
+;
 
+;
 
+;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;; Comparison instructions, both compare and test
+;; Comparison instructions, both compare and test
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;; This will only work so well, since the direction of the compare is
+;; This will only work so well, since the direction of the compare is
+;; important in unsigned compares.
+;; important in unsigned compares.
+;;
+;;
+(define_expand "cmpsi"
+(define_expand "cmpsi"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (match_operand:SI 0 "register_operand" "r")
+               (match_operand:SI 0 "register_operand" "r")
+               (match_operand:SI 1 "nonmemory_operand" "")))]
+               (match_operand:SI 1 "nonmemory_operand" "")))]
+       ""
+       ""
+       {
+       {
+               if (!zip_opb_operand_p(operands[1],SImode)) {
+               if (!zip_opb_operand_p(operands[1],SImode)) {
+                       if (can_create_pseudo_p()) {
+                       if (can_create_pseudo_p()) {
+                               //; fprintf(stderr, "Generating pseudo register for compare\n");
+                               //; fprintf(stderr, "Generating pseudo register for compare\n");
+                               rtx tmp = gen_reg_rtx(SImode);
+                               rtx tmp = gen_reg_rtx(SImode);
+                               emit_insn(gen_movsi(tmp,operands[1]));
+                               emit_insn(gen_movsi(tmp,operands[1]));
+                               operands[1] = tmp;
+                               operands[1] = tmp;
+                       } else FAIL;
+                       } else FAIL;
+               }
+               }
+       })
+       })
+(define_insn "cmpsi_reg"
+(define_insn "cmpsi_reg"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (match_operand:SI 0 "register_operand" "r")
+               (match_operand:SI 0 "register_operand" "r")
+               (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))]
+               (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))]
+       ""
+       ""
+       "CMP\t%1,%0"
+       "CMP\t%1,%0"
+       [(set_attr "ccresult" "set")])
+       [(set_attr "ccresult" "set")])
+(define_insn "cmpsi_off"
+(define_insn "cmpsi_off"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (match_operand:SI 0 "register_operand" "r")
+               (match_operand:SI 0 "register_operand" "r")
+               (plus:SI (match_operand:SI 1 "register_operand" "r")
+               (plus:SI (match_operand:SI 1 "register_operand" "r")
+                       (match_operand 2 "zip_opb_immv_p" "N"))))]
+                       (match_operand 2 "zip_opb_immv_p" "N"))))]
+       ""
+       ""
+       "CMP\t%2+%1,%0"
+       "CMP\t%2+%1,%0"
+       [(set_attr "ccresult" "set")])
+       [(set_attr "ccresult" "set")])
+(define_insn "testsi"
+(define_insn "testsi"
+       [(set (reg:CC CC_REG) (compare:CC (and:SI (match_operand:SI 0 "register_operand" "r")
+       [(set (reg:CC CC_REG) (compare:CC (and:SI (match_operand:SI 0 "register_operand" "r")
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
+                       (const_int 0)))]
+                       (const_int 0)))]
+       ""
+       ""
+       "TEST   %1,%0"
+       "TEST   %1,%0"
+       [(set_attr "ccresult" "set")])
+       [(set_attr "ccresult" "set")])
+(define_insn "testsi_off"
+(define_insn "testsi_off"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (and:SI (match_operand:SI 0 "register_operand" "r")
+               (and:SI (match_operand:SI 0 "register_operand" "r")
+                       (plus:SI
+                       (plus:SI
+                               (match_operand:SI 1 "register_operand" "r")
+                               (match_operand:SI 1 "register_operand" "r")
+                               (match_operand:SI 2 "zip_opb_immv_p" "N")))
+                               (match_operand:SI 2 "zip_opb_immv_p" "N")))
+               (const_int 0)))]
+               (const_int 0)))]
+       ""
+       ""
+       "TEST   %2+%1,%0"
+       "TEST   %2+%1,%0"
+       [(set_attr "ccresult" "set")])
+       [(set_attr "ccresult" "set")])
+(define_insn "nop"
+(define_insn "nop"
+       [(const_int 0)]
+       [(const_int 0)]
+       ""
+       ""
+       "NOOP"
+       "NOOP"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Conditional execution predicates
+;; Conditional execution predicates
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+; Sadly, these aren't complete like they should be.  Although these are all of
+; Sadly, these aren't complete like they should be.  Although these are all of
+; the conditional execution prefixes that the Zip CPU supports, GCC looks for
+; the conditional execution prefixes that the Zip CPU supports, GCC looks for
+; other conditions then these.  That is, (cond_exec ...) is not as well
+; other conditions then these.  That is, (cond_exec ...) is not as well
+; recognized as (if_then_else ...).  So we have to duplicate things to support
+; recognized as (if_then_else ...).  So we have to duplicate things to support
+; both methods.
+; both methods.
+;
+;
+(define_cond_exec
+(define_cond_exec
+       [(eq  (reg:CC CC_REG) (const_int 0))] "" "[Z]")
+       [(eq  (reg:CC CC_REG) (const_int 0))] "" "[Z]")
+(define_cond_exec
+(define_cond_exec
+       [(ne  (reg:CC CC_REG) (const_int 0))] "" "[NZ]")
+       [(ne  (reg:CC CC_REG) (const_int 0))] "" "[NZ]")
+(define_cond_exec
+(define_cond_exec
+       [(lt  (reg:CC CC_REG) (const_int 0))] "" "[LT]")
+       [(lt  (reg:CC CC_REG) (const_int 0))] "" "[LT]")
+(define_cond_exec
+(define_cond_exec
+       [(ge  (reg:CC CC_REG) (const_int 0))] "" "[GE]")
+       [(ge  (reg:CC CC_REG) (const_int 0))] "" "[GE]")
+(define_cond_exec
+(define_cond_exec
+       [(ltu (reg:CC CC_REG) (const_int 0))] "" "[C]")
+       [(ltu (reg:CC CC_REG) (const_int 0))] "" "[C]")
+(define_cond_exec
+(define_cond_exec
+       [(geu (reg:CC CC_REG) (const_int 0))] "" "[NC]")
+       [(geu (reg:CC CC_REG) (const_int 0))] "" "[NC]")
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Conditional move instructions, since these won't accept conditional
+;; Conditional move instructions, since these won't accept conditional
+;;     execution RTL
+;;     execution RTL
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+; // Look for #define HAVE_conditional_move to understand how these might be
+; // Look for #define HAVE_conditional_move to understand how these might be
+; // used.
+; // used.
+;
+;
+; set_zero_or_one_si
+; set_zero_or_one_si
+; movsicc
+; movsicc
+(define_expand "movsicc"
+(define_expand "movsicc"
+       [(set (match_operand:SI 0 "nonimmediate_operand" "")
+       [(set (match_operand:SI 0 "nonimmediate_operand" "")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+                       (match_operand:SI 2 "general_operand" "")
+                       (match_operand:SI 2 "general_operand" "")
+                       (match_operand:SI 3 "general_operand" "")))]
+                       (match_operand:SI 3 "general_operand" "")))]
+       ""
+       ""
+       {
+       {
+       //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+       //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+       //; fprintf(stderr, "MOVSICC\n");
+       //; fprintf(stderr, "MOVSICC\n");
+       //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+       //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+       //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+       //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+       //; zip_debug_rtx_pfx("- NEW: ", operands[2]);
+       //; zip_debug_rtx_pfx("- NEW: ", operands[2]);
+       //; zip_debug_rtx_pfx("- DEF: ", operands[3]);
+       //; zip_debug_rtx_pfx("- DEF: ", operands[3]);
+
+
+       if (!REG_P(operands[2]))
+       if (!REG_P(operands[2]))
+               operands[2] = force_reg(SImode, operands[2]);
+               operands[2] = force_reg(SImode, operands[2]);
+
+
+       if ((!REG_P(operands[3]))||(REGNO(operands[0]) != REGNO(operands[3])))
+       if ((!REG_P(operands[3]))||(REGNO(operands[0]) != REGNO(operands[3])))
+               emit_insn(gen_movsi(operands[0], operands[3]));
+               emit_insn(gen_movsi(operands[0], operands[3]));
+       operands[3] = operands[0];
+       operands[3] = operands[0];
+
+
+
+
+       rtx_code        ccode = GET_CODE(operands[1]);
+       rtx_code        ccode = GET_CODE(operands[1]);
+       rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+       rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+       rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+       rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+
+       zip_canonicalize_comparison((int *)&ccode, &cmpop0, &cmpop1, true);
+       zip_canonicalize_comparison((int *)&ccode, &cmpop0, &cmpop1, true);
+       emit_insn(gen_cmpsi(cmpop0, cmpop1));
+       emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+
+       operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+       operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+       })
+       })
+;
+;
+;
+;
+;
+;
+(define_expand "addsicc"
+(define_expand "addsicc"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+                       (plus:SI (match_operand:SI 2 "register_operand" "0")
+                       (plus:SI (match_operand:SI 2 "register_operand" "0")
+                               (match_operand:SI 3 "zip_opb_single_operand_p" "rO"))
+                               (match_operand:SI 3 "zip_opb_single_operand_p" "rO"))
+                       (match_dup 2)))]
+                       (match_dup 2)))]
+       ""
+       ""
+       {
+       {
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; fprintf(stderr, "ADDSICC\n");
+               //; fprintf(stderr, "ADDSICC\n");
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+               //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+               //; zip_debug_rtx_pfx("- OLD: ", operands[2]);
+               //; zip_debug_rtx_pfx("- OLD: ", operands[2]);
+               //; zip_debug_rtx_pfx("- INC: ", operands[3]);
+               //; zip_debug_rtx_pfx("- INC: ", operands[3]);
+
+
+               if (!REG_P(operands[2]))
+               if (!REG_P(operands[2]))
+                       operands[2] = force_reg(SImode, operands[2]);
+                       operands[2] = force_reg(SImode, operands[2]);
+               if (REGNO(operands[0]) != REGNO(operands[2]))
+               if (REGNO(operands[0]) != REGNO(operands[2]))
+                       emit_insn(gen_movsi(operands[0], operands[2]));
+                       emit_insn(gen_movsi(operands[0], operands[2]));
+               operands[2] = operands[0];
+               operands[2] = operands[0];
+
+
+               rtx_code        ccode = GET_CODE(operands[1]);
+               rtx_code        ccode = GET_CODE(operands[1]);
+               rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+               rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+               rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+               rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+
+               zip_canonicalize_comparison((int *)&ccode, &cmpop0, &cmpop1, true);
+               zip_canonicalize_comparison((int *)&ccode, &cmpop0, &cmpop1, true);
+               emit_insn(gen_cmpsi(cmpop0, cmpop1));
+               emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+
+               operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+               operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+(define_expand "notsicc"
+(define_expand "notsicc"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+                       (xor:SI (match_operand:SI 2 "register_operand" "0")
+                       (xor:SI (match_operand:SI 2 "register_operand" "0")
+                               (const_int -1))
+                               (const_int -1))
+                       (match_dup 2)))]
+                       (match_operand:SI 3 "register_operand" "0")))]
+       ""
+       ""
+       {
+       {
+               extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; fprintf(stderr, "NOTSICC\n");
+               //; fprintf(stderr, "NOTSICC\n");
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+               //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+               //; zip_debug_rtx_pfx("- NOT: ", operands[2]);
+               //; zip_debug_rtx_pfx("- NOT: ", operands[2]);
+               //; zip_debug_rtx_pfx("- OLD: ", operands[3]);
+               //; zip_debug_rtx_pfx("- OLD: ", operands[3]);
+
+
+               if (!REG_P(operands[2]))
 
+                       operands[2] = force_reg(SImode, operands[2]);
 
+               if (REGNO(operands[0]) != REGNO(operands[2]))
 
+                       emit_insn(gen_movsi(operands[0], operands[2]));
 
+
+
+               rtx_code        ccode = GET_CODE(operands[1]);
+               rtx_code        ccode = GET_CODE(operands[1]);
+               rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+               rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+               rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+               rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+
+               zip_canonicalize_comparison((int *)&ccode,&cmpop0,&cmpop1,true);
+               zip_canonicalize_comparison((int *)&ccode,&cmpop0,&cmpop1,true);
+               emit_insn(gen_cmpsi(cmpop0, cmpop1));
+               emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+
+               operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+               operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+(define_expand "negsicc"
+(define_expand "negsicc"
+       [(set (match_operand:SI 0 "register_operand" "+r")
+       [(set (match_operand:SI 0 "register_operand" "+r")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+               (if_then_else:SI (match_operand 1 "comparison_operator")
+                       (neg:SI (match_operand:SI 2 "register_operand" "0"))
+                       (neg:SI (match_operand:SI 2 "register_operand" "0"))
+                       (match_operand:SI 3 "register_operand" "0")))]
+                       (match_operand:SI 3 "register_operand" "0")))]
+       ""
+       ""
+       {
+       {
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; fprintf(stderr, "NEGSICC\n");
+               //; fprintf(stderr, "NEGSICC\n");
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+               //; zip_debug_rtx_pfx("- CMP: ", operands[1]);
+               //; zip_debug_rtx_pfx("- NOT: ", operands[2]);
+               //; zip_debug_rtx_pfx("- NOT: ", operands[2]);
+
+
+               if (!REG_P(operands[2]))
+               if (!REG_P(operands[2]))
+                       operands[2] = force_reg(SImode, operands[2]);
+                       operands[2] = force_reg(SImode, operands[2]);
+               if (REGNO(operands[0]) != REGNO(operands[3]))
+               if (REGNO(operands[0]) != REGNO(operands[3]))
+                       emit_insn(gen_movsi(operands[0], operands[3]));
+                       emit_insn(gen_movsi(operands[0], operands[3]));
+
+
+               rtx_code        ccode = GET_CODE(operands[1]);
+               rtx_code        ccode = GET_CODE(operands[1]);
+               rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+               rtx     cmpop0 = copy_rtx(XEXP(operands[1], 0));
+               rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+               rtx     cmpop1 = copy_rtx(XEXP(operands[1], 1));
+
+
+               zip_canonicalize_comparison((int *)&ccode,&cmpop0,&cmpop1,true);
+               zip_canonicalize_comparison((int *)&ccode,&cmpop0,&cmpop1,true);
+               emit_insn(gen_cmpsi(cmpop0, cmpop1));
+               emit_insn(gen_cmpsi(cmpop0, cmpop1));
+
+
+               operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+               operands[1] = gen_rtx_fmt_ee(ccode, VOIDmode,
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+                       gen_rtx_REG(CCmode, 14), const0_rtx);
+
+
+               if (REGNO(operands[0]) != REGNO(operands[2]))
+               if (REGNO(operands[0]) != REGNO(operands[2]))
+                       emit_insn(gen_movsicc(operands[0], operands[1], operands[2], operands[0]));
+                       emit_insn(gen_movsicc(operands[0], operands[1], operands[2], operands[0]));
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+(define_expand "cstoresi4"
+(define_expand "cstoresi4"
+       [(set (reg:CC CC_REG) (compare:CC (match_operand:SI 2 "register_operand" "r")
+       [(set (reg:CC CC_REG) (compare:CC (match_operand:SI 2 "register_operand" "r")
+               (match_operand:SI 3 "zip_opb_operand_p" "rO")))
+               (match_operand:SI 3 "zip_opb_operand_p" "rO")))
+       (set (match_operand:SI 0 "register_operand" "=r")
+       (set (match_operand:SI 0 "register_operand" "=r")
+               (if_then_else:SI
+               (if_then_else:SI
+                       (match_operator 1 "ordered_comparison_operator"
+                       (match_operator 1 "ordered_comparison_operator"
+                               [(reg:CC CC_REG) (const_int 0)])
+                               [(reg:CC CC_REG) (const_int 0)])
+                       (const_int 1) (const_int 0)))]
+                       (const_int 1) (const_int 0)))]
+       ""
+       ""
+       {
+       {
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; fprintf(stderr, "CSTORESI4\n");
+               //; fprintf(stderr, "CSTORESI4\n");
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- DST: ", operands[0]);
+               //; zip_debug_rtx_pfx("- TST: ", operands[1]);
+               //; zip_debug_rtx_pfx("- TST: ", operands[1]);
+               //; zip_debug_rtx_pfx("- A  : ", operands[2]);
+               //; zip_debug_rtx_pfx("- A  : ", operands[2]);
+               //; zip_debug_rtx_pfx("-  -B: ", operands[3]);
+               //; zip_debug_rtx_pfx("-  -B: ", operands[3]);
+
+
+               rtx_code        ccode = GET_CODE(operands[1]);
+               rtx_code        ccode = GET_CODE(operands[1]);
+
+
+               zip_canonicalize_comparison((int *)&ccode,&operands[2],&operands[3],true);
+               zip_canonicalize_comparison((int *)&ccode,&operands[2],&operands[3],true);
+               emit_insn(gen_cmpsi(operands[2], operands[3]));
+               emit_insn(gen_cmpsi(operands[2], operands[3]));
+               emit_insn(gen_movsi(operands[0], const0_rtx));
+               emit_insn(gen_movsi(operands[0], const0_rtx));
+               switch(ccode) {
+               switch(ccode) {
+               case EQ:
+               case EQ:
+                       emit_insn(gen_cmov_eq(operands[0], const1_rtx));
+                       emit_insn(gen_cmov_eq(operands[0], const1_rtx));
+                       break;
+                       break;
+               case NE:
+               case NE:
+                       emit_insn(gen_cmov_ne(operands[0], const1_rtx));
+                       emit_insn(gen_cmov_ne(operands[0], const1_rtx));
+                       break;
+                       break;
+               case LT:
+               case LT:
+                       emit_insn(gen_cmov_lt(operands[0], const1_rtx));
+                       emit_insn(gen_cmov_lt(operands[0], const1_rtx));
+                       break;
+                       break;
+               case GE:
+               case GE:
+                       emit_insn(gen_cmov_ge(operands[0], const1_rtx));
+                       emit_insn(gen_cmov_ge(operands[0], const1_rtx));
+                       break;
+                       break;
+               case LTU:
+               case LTU:
+                       emit_insn(gen_cmov_ltu(operands[0], const1_rtx));
+                       emit_insn(gen_cmov_ltu(operands[0], const1_rtx));
+                       break;
+                       break;
+               case GEU:
+               case GEU:
+                       emit_insn(gen_cmov_geu(operands[0], const1_rtx));
+                       emit_insn(gen_cmov_geu(operands[0], const1_rtx));
+                       break;
+                       break;
+               default:
+               default:
+                       FAIL;
+                       FAIL;
+               } DONE;
+               } DONE;
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Control flow instructions
+;; Control flow instructions
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_expand "jump"
+(define_expand "jump"
+       [(set (pc)
+       [(set (pc)
+               (label_ref (match_operand 0 "" "")))])
+               (label_ref (match_operand 0 "" "")))])
+(define_insn "jump_const"
+(define_insn "jump_const"
+       [(set (pc)
+       [(set (pc)
+               (match_operand:SI 0 "zip_const_address_operand_p" ""))]
+               (match_operand:SI 0 "zip_const_address_operand_p" ""))]
+       ""
+       ""
+       "BRA    %0"
+       "BRA    %0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+(define_insn "jump_label"      ; Must be modeless, VOIDmode, not SI or any othr
+(define_insn "jump_label"      ; Must be modeless, VOIDmode, not SI or any othr
+       [(set (pc)      ; Otherwise it won't accept jumps to labels
+       [(set (pc)      ; Otherwise it won't accept jumps to labels
+               (label_ref (match_operand 0 "" "")))]
+               (label_ref (match_operand 0 "" "")))]
+       ""
+       ""
+       "BRA    %0"
+       "BRA    %0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+; This is really the same thing as an indirect jump ... the big difference
+; This is really the same thing as an indirect jump ... the big difference
+; is that the zip_address_operand_p checks for an "N" type condition, not an
+; is that the zip_address_operand_p checks for an "N" type condition, not an
+; "M" type condition ... a bug, but one that works for now.  (The assembler
+; "M" type condition ... a bug, but one that works for now.  (The assembler
+; should be able to catch and except on it ...)
+; should be able to catch and except on it ...)
+;
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "jump_variable"
+(define_insn "jump_variable"
+       [(set (pc)
+       [(set (pc)
+               (match_operand:SI 0 "zip_address_operand_p" ""))]
+               (match_operand:SI 0 "zip_address_operand_p" ""))]
+       ""
+       ""
+       "JMP    %0"
+       "JMP    %0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+;
+;
+; Indirect jumps ... both to registers, and registers plus offsets
+; Indirect jumps ... both to registers, and registers plus offsets
+;
+;
+(define_insn "indirect_jump"
+(define_insn "indirect_jump"
+       [(set (pc)
+       [(set (pc)
+               (match_operand:SI 0 "register_operand" "r"))]
+               (match_operand:SI 0 "register_operand" "r"))]
+       ""
+       ""
+       "JMP    %0"
+       "JMP    %0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "indirect_jump_mem"
+(define_insn "indirect_jump_mem"
+       [(set (pc) (match_operand:SI 0 "zip_memory_operand_p" "o"))]
+       [(set (pc) (match_operand:SI 0 "zip_memory_operand_p" "o"))]
+       ""
+       ""
+       "LW     %0,PC"
+       "LW     %0,PC"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "indirect_jump_off"
+(define_insn "indirect_jump_off"
+       [(set (pc)
+       [(set (pc)
+               (plus:SI (match_operand:SI 0 "register_operand" "r")
+               (plus:SI (match_operand:SI 0 "register_operand" "r")
+                       (match_operand:SI 1 "const_int_operand" "M")))]
+                       (match_operand:SI 1 "const_int_operand" "M")))]
+       ""
+       ""
+       "JMP    %1(%0)"
+       "JMP    %1(%0)"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+;;
+;;
+; cbranchsi4
+; cbranchsi4
+;;     Op 0 = the comparison operator (le,lt,eq,ne,gt,ge,and usgn ltu,geu,etc.)
+;;     Op 0 = the comparison operator (le,lt,eq,ne,gt,ge,and usgn ltu,geu,etc.)
+;;     Op 1&2 the operands of the compare instruction
+;;     Op 1&2 the operands of the compare instruction
+;;     Op 3 is the jump label
+;;     Op 3 is the jump label
+;;
+;;
+;;
+;;
+;;
+;;
+(define_expand "cbranchsi4"
+(define_expand "cbranchsi4"
+       [(set (reg:CC CC_REG) (compare:CC (match_operand:SI 1 "register_operand" "r")
+       [(set (reg:CC CC_REG) (compare:CC (match_operand:SI 1 "register_operand" "r")
+               (match_operand:SI 2 "zip_opb_operand_p" "rO")))
+               (match_operand:SI 2 "zip_opb_operand_p" "rO")))
+       (set (pc) (if_then_else (match_operator 0 "ordered_comparison_operator"
+       (set (pc) (if_then_else (match_operator 0 "ordered_comparison_operator"
+                       [(reg:CC CC_REG) (const_int 0)])
+                       [(reg:CC CC_REG) (const_int 0)])
+                       (label_ref (match_operand 3 "" ""))
+                       (label_ref (match_operand 3 "" ""))
+                       (pc)))]
+                       (pc)))]
+       ""
+       ""
+       {
+       {
+               if (true) {
+               if (true) {
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; extern void zip_debug_rtx_pfx(const char *, const_rtx);
+               //; Two branches give us no end of difficulty when implementing.
+               //; Two branches give us no end of difficulty when implementing.
+               //; Let's check for these two branch codes, and swap the
+               //; Let's check for these two branch codes, and swap the
+               //; comparison to simplify them.
+               //; comparison to simplify them.
+               //; fprintf(stderr, "CBRANCH\n");
+               //; fprintf(stderr, "CBRANCH\n");
+               //; zip_debug_rtx_pfx("- CMP: ", operands[0]);
+               //; zip_debug_rtx_pfx("- CMP: ", operands[0]);
+               //; zip_debug_rtx_pfx("- A  : ", operands[1]);
+               //; zip_debug_rtx_pfx("- A  : ", operands[1]);
+               //; zip_debug_rtx_pfx("- B  : ", operands[2]);
+               //; zip_debug_rtx_pfx("- B  : ", operands[2]);
+               //; zip_debug_rtx_pfx("- JMP: ", operands[3]);
+               //; zip_debug_rtx_pfx("- JMP: ", operands[3]);
+               //; Can we do better if we reverse some compares?
+               //; Can we do better if we reverse some compares?
+               //;
+               //;
+               //; We have GE, LT, LTU, and GEU conditions
+               //; We have GE, LT, LTU, and GEU conditions
+               //; Figure out how to create the other conditions from
+               //; Figure out how to create the other conditions from
+               //; these.
+               //; these.
+               if (GET_CODE(operands[0])==GTU) {
+               if (GET_CODE(operands[0])==GTU) {
+                       if (REG_P(operands[2])) {
+                       if (REG_P(operands[2])) {
+                               //; Reverse the comparison
+                               //; Reverse the comparison
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                               &&(INTVAL(operands[2])>-(1<<17)+2)) {
+                               &&(INTVAL(operands[2])>-(1<<17)+2)) {
+                               //; A >  B
+                               //; A >  B
+                               //; A >= B+1
+                               //; A >= B+1
+                               //; Add one to the integer constant,
+                               //; Add one to the integer constant,
+                               //; And use a GEU comparison
+                               //; And use a GEU comparison
+                               emit_insn(gen_cmpsi(operands[1],
+                               emit_insn(gen_cmpsi(operands[1],
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                               emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                               &&(can_create_pseudo_p())) {
+                               &&(can_create_pseudo_p())) {
+                                       rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                                       rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                                       emit_insn(gen_movsi(tmp,operands[2]));
+                                       emit_insn(gen_movsi(tmp,operands[2]));
+                                       emit_insn(gen_cmpsi(tmp,operands[1]));
+                                       emit_insn(gen_cmpsi(tmp,operands[1]));
+                                       emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+                                       emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+
+
+                               DONE;
+                               DONE;
+
+
+                       }
+                       }
+               } else if (GET_CODE(operands[0]) == LEU) {
+               } else if (GET_CODE(operands[0]) == LEU) {
+                       if (REG_P(operands[2])) {
+                       if (REG_P(operands[2])) {
+                               //; Reverse the comparison
+                               //; Reverse the comparison
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                               &&(INTVAL(operands[2])<(1<<17)-2)) {
+                               &&(INTVAL(operands[2])<(1<<17)-2)) {
+                               //; A <= B
+                               //; A <= B
+                               //; A <  B+1
+                               //; A <  B+1
+                               //; Add one to the integer constant,
+                               //; Add one to the integer constant,
+                               //; And use a GTU comparison
+                               //; And use a GTU comparison
+                               emit_insn(gen_cmpsi(operands[1],
+                               emit_insn(gen_cmpsi(operands[1],
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                               emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_ltu(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                               &&(can_create_pseudo_p())) {
+                               &&(can_create_pseudo_p())) {
+                                       rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                                       rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                                       emit_insn(gen_movsi(tmp,operands[2]));
+                                       emit_insn(gen_movsi(tmp,operands[2]));
+                                       emit_insn(gen_cmpsi(tmp,operands[1]));
+                                       emit_insn(gen_cmpsi(tmp,operands[1]));
+                                       emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+                                       emit_jump_insn(gen_cbranch_jmp_geu(operands[3]));
+                               DONE;
+                               DONE;
+
+
+                       }
+                       }
+               } else if (GET_CODE(operands[0]) == LE) {
+               } else if (GET_CODE(operands[0]) == LE) {
+                       if (REG_P(operands[2])) {
+                       if (REG_P(operands[2])) {
+                               //; Reverse the comparison
+                               //; Reverse the comparison
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                               &&(INTVAL(operands[2])<(1<<17)-2)) {
+                               &&(INTVAL(operands[2])<(1<<17)-2)) {
+                               //; A <= B
+                               //; A <= B
+                               //; A <  B+1
+                               //; A <  B+1
+                               //; Add one to the integer constant,
+                               //; Add one to the integer constant,
+                               //; And use a GTU comparison
+                               //; And use a GTU comparison
+                               emit_insn(gen_cmpsi(operands[1],
+                               emit_insn(gen_cmpsi(operands[1],
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                               emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                               &&(can_create_pseudo_p())) {
+                               &&(can_create_pseudo_p())) {
+                               rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                               rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                                       emit_insn(gen_movsi(tmp,operands[2]));
+                                       emit_insn(gen_movsi(tmp,operands[2]));
+                                       emit_insn(gen_cmpsi(tmp,operands[1]));
+                                       emit_insn(gen_cmpsi(tmp,operands[1]));
+                                       emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+                                       emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+                               DONE;
+                               DONE;
+
+
+                       }
+                       }
+               } else if (GET_CODE(operands[0]) == GT) {
+               } else if (GET_CODE(operands[0]) == GT) {
+                       if (REG_P(operands[2])) {
+                       if (REG_P(operands[2])) {
+                               //; Reverse the comparison
+                               //; Reverse the comparison
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_insn(gen_cmpsi(operands[2],operands[1]));
+                               emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                               &&(INTVAL(operands[2])<(1<<17)-2)) {
+                               &&(INTVAL(operands[2])<(1<<17)-2)) {
+                               //; A >  B
+                               //; A >  B
+                               //; A >= B+1
+                               //; A >= B+1
+                               //; Add one to the integer constant,
+                               //; Add one to the integer constant,
+                               //; And use a GTU comparison
+                               //; And use a GTU comparison
+                               emit_insn(gen_cmpsi(operands[1],
+                               emit_insn(gen_cmpsi(operands[1],
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                                       GEN_INT(INTVAL(operands[2])+1)));
+                               emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_gte(operands[3]));
+                               DONE;
+                               DONE;
+                       } else if ((CONST_INT_P(operands[2]))
+                       } else if ((CONST_INT_P(operands[2]))
+                                       &&(can_create_pseudo_p())) {
+                                       &&(can_create_pseudo_p())) {
+                               rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                               rtx tmp = gen_reg_rtx(GET_MODE(operands[1]));
+                               emit_insn(gen_movsi(tmp,operands[2]));
+                               emit_insn(gen_movsi(tmp,operands[2]));
+                               emit_insn(gen_cmpsi(tmp,operands[1]));
+                               emit_insn(gen_cmpsi(tmp,operands[1]));
+                               emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+                               emit_jump_insn(gen_cbranch_jmp_lt(operands[3]));
+
+
+                               DONE;
+                               DONE;
+                       }
+                       }
+               }
+               }
+       }})
+       }})
+(define_insn "cbranch_jmp_eq"
+(define_insn "cbranch_jmp_eq"
+       [(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BZ\t%0"
+       "BZ\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_neq"
+(define_insn "cbranch_jmp_neq"
+       [(set (pc) (if_then_else (ne (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (ne (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BNZ\t%0"
+       "BNZ\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_lt"
+(define_insn "cbranch_jmp_lt"
+       [(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BLT\t%0"
+       "BLT\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_le"
+(define_insn "cbranch_jmp_le"
+       [(set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BLT\t%0\n\tBZ\t%0"
+       "BLT\t%0\n\tBZ\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_gt"
+(define_insn "cbranch_jmp_gt"
+       [(set (pc) (if_then_else (gt (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (gt (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BZ\t.Lgt%=\n\tBGE\t%0\n\t.Lgt%=:"
+       "BZ\t.Lgt%=\n\tBGE\t%0\n\t.Lgt%=:"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_gte"
+(define_insn "cbranch_jmp_gte"
+       [(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BGE\t%0"
+       "BGE\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_ltu"
+(define_insn "cbranch_jmp_ltu"
+       [(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BC\t%0"
+       "BC\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_gtu"
+(define_insn "cbranch_jmp_gtu"
+       [(set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       ;// We could flip the condition code, and then be able to jump.
+       ;// We could flip the condition code, and then be able to jump.
+       ;// The problem is that doing this adjusts the condition code, and
+       ;// The problem is that doing this adjusts the condition code, and
+       ;// we aren't allowed to do that here.
+       ;// we aren't allowed to do that here.
+       ;//
+       ;//
+       ;// The problem here is the equals.  What do you do if A=B?  Our new
+       ;// The problem here is the equals.  What do you do if A=B?  Our new
+       ;// condition tests for A>=B, not A>B.  So ... how do you get rid of
+       ;// condition tests for A>=B, not A>B.  So ... how do you get rid of
+       ;// the equals?  We do so here by branching around. (sigh)
+       ;// the equals?  We do so here by branching around. (sigh)
+       "BZ\t.Lgtu%=\n\tBNC\t%0\n.Lgtu%=:"
+       "BZ\t.Lgtu%=\n\tBNC\t%0\n.Lgtu%=:"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unknown")])
+               (set_attr "ccresult" "unknown")])
+(define_insn "cbranch_jmp_leu"
+(define_insn "cbranch_jmp_leu"
+       [(set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""      ; Need to check for both LTU (i.e. C) and Z
+       ""      ; Need to check for both LTU (i.e. C) and Z
+       "BC\t%0
+       "BC\t%0
+       BZ\t%0"
+       BZ\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+(define_insn "cbranch_jmp_geu"
+(define_insn "cbranch_jmp_geu"
+       [(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+                (label_ref (match_operand 0 "" ""))
+                (label_ref (match_operand 0 "" ""))
+                (pc)))]
+                (pc)))]
+       ""
+       ""
+       "BNC\t%0"
+       "BNC\t%0"
+       [(set_attr "predicable" "no")
+       [(set_attr "predicable" "no")
+               (set_attr "ccresult" "unchanged")])
+               (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Looping constructs
+;; Looping constructs
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_insn "decrement_and_branch_until_zero"
+(define_insn "decrement_and_branch_until_zero"
+       [(set (pc) (if_then_else
+       [(set (pc) (if_then_else
+               (ge (plus:SI (match_operand:SI 0 "register_operand" "+r,Q")
+               (ge (plus:SI (match_operand:SI 0 "register_operand" "+r,Q")
+                       (const_int -1)) (const_int 0))
+                       (const_int -1)) (const_int 0))
+               (label_ref (match_operand 1 "" ""))
+               (label_ref (match_operand 1 "" ""))
+               (pc)))
+               (pc)))
+       (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))
+       (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))
+       ;(set (reg:CC CC_REG)
+       ;(set (reg:CC CC_REG)
+               ;(compare:CC (minus:SI (match_dup 0) (const_int 1))
+               ;(compare:CC (minus:SI (match_dup 0) (const_int 1))
+                       ;(const_int 0)))
+                       ;(const_int 0)))
+       (clobber (match_scratch:SI 2 "=r,r"))
+       (clobber (match_scratch:SI 2 "=r,r"))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       {
+       {
+               if (MEM_P(operands[0])) {
+               if (MEM_P(operands[0])) {
+                       //; We could also go searching for dead regs if
+                       //; We could also go searching for dead regs if
+                       //; necessary
+                       //; necessary
+                       return "LW %0,%2"
+                       return "LW %0,%2"
+                               "\t; decrement_and_branch_until_zero(MEM)\n"
+                               "\t; decrement_and_branch_until_zero(MEM)\n"
+                               "\tADD\t-1,%2\t\n"
+                               "\tADD\t-1,%2\t\n"
+                               "\tSW %2,%0\n"
+                               "\tSW %2,%0\n"
+                               "\tBLT\t.Ldec%=\n"
+                               "\tBLT\t.Ldec%=\n"
+                               "\tBRA\t%1\n"
+                               "\tBRA\t%1\n"
+                               ".Ldec%=:";
+                               ".Ldec%=:";
+               }
+               }
+               return "ADD\t-1,%0\t; decrement_and_branch_until_zero (REG)\n"
+               return "ADD\t-1,%0\t; decrement_and_branch_until_zero (REG)\n"
+                       "\tBLT\t.Ldec%=\n"
+                       "\tBLT\t.Ldec%=\n"
+                       "\tBRA\t%1\n"
+                       "\tBRA\t%1\n"
+                       ".Ldec%=:";
+                       ".Ldec%=:";
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; Requires TARGET_CAN_USE_DOLOOP_P to be set appropriately in order to use
+; Requires TARGET_CAN_USE_DOLOOP_P to be set appropriately in order to use
+;
+;
+;
+;
+;(define_insn "doloop_end"
+;(define_insn "doloop_end"
+       ;[(set (pc)
+       ;[(set (pc)
+               ;(if_then_else
+               ;(if_then_else
+                       ;(ne (plus:SI (match_operand:SI 0 "register_operand" "+r")
+                       ;(ne (plus:SI (match_operand:SI 0 "register_operand" "+r")
+                               ;;(const_int -1)) (const_int 0))
+                               ;;(const_int -1)) (const_int 0))
+                       ;(label_ref (match_operand 1 "" ""))
+                       ;(label_ref (match_operand 1 "" ""))
+                       ;(pc)))
+                       ;(pc)))
+       ;(set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))
+       ;(set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))
+       ;; (set (reg:CC CC_REG)
+       ;; (set (reg:CC CC_REG)
+               ;; (compare:CC (minus:SI (match_dup 0) (const_int 1))
+               ;; (compare:CC (minus:SI (match_dup 0) (const_int 1))
+                       ;; (const_int 0)))
+                       ;; (const_int 0)))
+       ;(clobber (reg:CC CC_REG))]
+       ;(clobber (reg:CC CC_REG))]
+       ;"(reload_completed)"
+       ;"(reload_completed)"
+       ;"ADD\t-1,%0\t; doloop_end\n\tBZ\t.Lloop%=\n\tBRA\t%1\n.Lloop%=:"
+       ;"ADD\t-1,%0\t; doloop_end\n\tBZ\t.Lloop%=\n\tBRA\t%1\n.Lloop%=:"
+       ;[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       ;[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; Since we have a doloop_end, we must also have a doloop_begin.  Since the
+; Since we have a doloop_end, we must also have a doloop_begin.  Since the
+; ZipCPU has no special begin looping instruction, we'll simply define this
+; ZipCPU has no special begin looping instruction, we'll simply define this
+; as a null instruction.
+; as a null instruction.
+;
+;
+; (define_expand "doloop_begin" [(const_int 0)] "(0)")
+; (define_expand "doloop_begin" [(const_int 0)] "(0)")
+;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Subroutine call
+;; Subroutine call
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+; There are two types of calls: "call" and "call_value".
+; There are two types of calls: "call" and "call_value".
+;
+;
+; Each of these types of calls are then expanded into one of:
+; Each of these types of calls are then expanded into one of:
+;
+;
+;      _const          - A call to a constant address, such as a symbol
+;      _const          - A call to a constant address, such as a symbol
+;                      reference or a fixed location
+;                      reference or a fixed location
+;
+;
+;      _label          - This should be the same as _const, except that for
+;      _label          - This should be the same as _const, except that for
+;                      some reason the RTL and matching rules are separate.
+;                      some reason the RTL and matching rules are separate.
+;                      Hence we have a separate rule for this.
+;                      Hence we have a separate rule for this.
+;
+;
+;      _mem            - The memory address we wish to jump to is stored in
+;      _mem            - The memory address we wish to jump to is stored in
+;                      memory somewhere, and we have only a pointer.  In this
+;                      memory somewhere, and we have only a pointer.  In this
+;                      case, we load that pointer straight to the PC and go.
+;                      case, we load that pointer straight to the PC and go.
+;
+;
+;      _var            - The address to jump to is given as an offset to a
+;      _var            - The address to jump to is given as an offset to a
+;                      register, such as X+R3.  This is an indirect jump.
+;                      register, such as X+R3.  This is an indirect jump.
+;                      Although we support it, it does require different RTL
+;                      Although we support it, it does require different RTL
+;                      code.
+;                      code.
+;
+;
+(define_expand "call"
+(define_expand "call"
+       [(call (match_operand 0 "" "")
+       [(call (match_operand 0 "" "")
+               (match_operand 1 "" ""))]
+               (match_operand 1 "" ""))]
+       ""
+       ""
+       {
+       {
+               if (MEM_P(operands[0])) {
+               if (MEM_P(operands[0])) {
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;//
+                       ;//
+                       ;// fprintf(stderr, "CALL: ");
+                       ;// fprintf(stderr, "CALL: ");
+                       ;// zip_debug_rtx(operands[0]);
+                       ;// zip_debug_rtx(operands[0]);
+                       ;//
+                       ;//
+                       ;//
+                       ;//
+                       ;// This should always be the case
+                       ;// This should always be the case
+                       rtx addr = XEXP(operands[0],0);
+                       rtx addr = XEXP(operands[0],0);
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                               //; fprintf(stderr, "Generating gen_void_call_const()\n");
+                               //; fprintf(stderr, "Generating gen_void_call_const()\n");
+                               emit_call_insn(gen_void_call_const(addr,
+                               emit_call_insn(gen_void_call_const(addr,
+                                               operands[1]));
+                                               operands[1]));
+                       } else if ((MEM_P(addr))&&(zip_address_operand(
+                       } else if ((MEM_P(addr))&&(zip_address_operand(
+                                                       XEXP(addr,0)))) {
+                                                       XEXP(addr,0)))) {
+                               fprintf(stderr, "ERR: ZIP.MD::CALL INDIRECT\n");
+                               fprintf(stderr, "ERR: ZIP.MD::CALL INDIRECT\n");
+                               emit_call_insn(gen_void_call_mem(XEXP(addr,0),
+                               emit_call_insn(gen_void_call_mem(XEXP(addr,0),
+                                                                operands[1]));
+                                                                operands[1]));
+                               gcc_assert(0);
+                               gcc_assert(0);
+                       } else {
+                       } else {
+                               emit_call_insn(gen_void_call_var(operands[0],
+                               emit_call_insn(gen_void_call_var(operands[0],
+                                                                operands[1]));
+                                                                operands[1]));
+                       }
+                       }
+                       DONE;
+                       DONE;
+               } else FAIL;
+               } else FAIL;
+       })
+       })
+;
+;
+(define_expand "sibcall"
+(define_expand "sibcall"
+       [(call (mem:SI (match_operand 0 "zip_const_address_operand_p" ""))
+       [(call (mem:SI (match_operand 0 "zip_const_address_operand_p" ""))
+               (match_operand 1 "" ""))
+               (match_operand 1 "" ""))
+       (use (match_operand 2 "" ""))
+       (use (match_operand 2 "" ""))
+       (use (reg:SI RTN_REG))
+       (use (reg:SI RTN_REG))
+       (simple_return)]
+       (simple_return)]
+       ""
+       ""
+       {
+       {
+               if (MEM_P(operands[0])) {
+               if (MEM_P(operands[0])) {
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;//
+                       ;//
+                       ;// fprintf(stderr, "CALL: ");
+                       ;// fprintf(stderr, "CALL: ");
+                       ;// zip_debug_rtx(operands[0]);
+                       ;// zip_debug_rtx(operands[0]);
+                       ;//
+                       ;//
+                       ;//
+                       ;//
+                       ;// This should always be the case
+                       ;// This should always be the case
+                       rtx addr = XEXP(operands[0],0);
+                       rtx addr = XEXP(operands[0],0);
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                               //; fprintf(stderr, "Generating gen_void_call_const()\n");
+                               //; fprintf(stderr, "Generating gen_void_call_const()\n");
+                               emit_call_insn(gen_void_sibcall_const(addr,
+                               emit_call_insn(gen_void_sibcall_const(addr,
+                                               operands[1]));
+                                               operands[1]));
+                       } else if ((MEM_P(addr))&&(zip_address_operand(
+                       } else if ((MEM_P(addr))&&(zip_address_operand(
+                                                       XEXP(addr,0)))) {
+                                                       XEXP(addr,0)))) {
+                               fprintf(stderr, "ERR: ZIP.MD::SIBCALL INDIRECT\n");
+                               fprintf(stderr, "ERR: ZIP.MD::SIBCALL INDIRECT\n");
+                               emit_call_insn(gen_void_sibcall_mem(XEXP(addr,0),
+                               emit_call_insn(gen_void_sibcall_mem(XEXP(addr,0),
+                                                                operands[1]));
+                                                                operands[1]));
+                               gcc_assert(0);
+                               gcc_assert(0);
+                       } else {
+                       } else {
+                               emit_call_insn(gen_void_sibcall_var(operands[0],
+                               emit_call_insn(gen_void_sibcall_var(operands[0],
+                                                                operands[1]));
+                                                                operands[1]));
+                       }
+                       }
+                       DONE;
+                       DONE;
+               } else FAIL;
+               } else FAIL;
+       }) ; "BAR\t%0\n"
+       }) ; "BAR\t%0\n"
+;
+;
+(define_insn "void_sibcall_const"
+(define_insn "void_sibcall_const"
+       [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" ""))
+       [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" ""))
+                       (match_operand 1 "const_int_operand" "n"))
+                       (match_operand 1 "const_int_operand" "n"))
+               (use (reg:SI RTN_REG))
+               (use (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               (simple_return)]
+               (simple_return)]
+       ""
+       ""
+       "BRA\t%0"
+       "BRA\t%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "void_sibcall_mem"
+(define_insn "void_sibcall_mem"
+       [(call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q"))
+       [(call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q"))
+                       (match_operand 1 "const_int_operand" "n"))
+                       (match_operand 1 "const_int_operand" "n"))
+               (use (reg:SI RTN_REG))
+               (use (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               (simple_return)]
+               (simple_return)]
+       ""
+       ""
+       "LW\t%0,PC"
+       "LW\t%0,PC"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "void_sibcall_var"
+(define_insn "void_sibcall_var"
+       [(call (match_operand:SI 0 "zip_memory_operand_p" "")
+       [(call (match_operand:SI 0 "zip_memory_operand_p" "")
+                       (match_operand 1 "const_int_operand" "n"))
+                       (match_operand 1 "const_int_operand" "n"))
+               (use (reg:SI RTN_REG))
+               (use (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               (simple_return)]
+               (simple_return)]
+       ""
+       ""
+       "JMP\t%0"
+       "JMP\t%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+(define_expand "sibcall_value"
+(define_expand "sibcall_value"
+       [(set (match_operand 0 "register_operand" "")
+       [(set (match_operand 0 "register_operand" "")
+               (call (mem:SI
+               (call (mem:SI
+                       (match_operand 1 "zip_const_address_operand_p" ""))
+                       (match_operand 1 "zip_const_address_operand_p" ""))
+               (match_operand 2 "" "")))
+               (match_operand 2 "" "")))
+       (use (match_operand 3 "" ""))
+       (use (match_operand 3 "" ""))
+       (use (reg:SI RTN_REG))
+       (use (reg:SI RTN_REG))
+       (clobber (reg:CC CC_REG))
+       (clobber (reg:CC CC_REG))
+       (simple_return)]
+       (simple_return)]
+       ""
+       ""
+       {
+       {
+               if (MEM_P(operands[1])) {
+               if (MEM_P(operands[1])) {
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;//
+                       ;//
+                       ;// fprintf(stderr, "SIBCALL/V: ");
+                       ;// fprintf(stderr, "SIBCALL/V: ");
+                       ;// zip_debug_rtx(operands[1]);
+                       ;// zip_debug_rtx(operands[1]);
+                       ;//
+                       ;//
+                       ;//
+                       ;//
+                       ;// This should always be the case
+                       ;// This should always be the case
+                       rtx addr = XEXP(operands[1],0);
+                       rtx addr = XEXP(operands[1],0);
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                               emit_call_insn(gen_reg_sibcall_const(operands[0], addr, operands[2]));
+                               emit_call_insn(gen_reg_sibcall_const(operands[0], addr, operands[2]));
+                       } else if ((MEM_P(addr))&&(zip_address_operand(XEXP(addr,0)))) {
+                       } else if ((MEM_P(addr))&&(zip_address_operand(XEXP(addr,0)))) {
+                               fprintf(stderr, "ERR: ZIP.MD::SIBCALL-VALUE() INDIRECT\n");
+                               fprintf(stderr, "ERR: ZIP.MD::SIBCALL-VALUE() INDIRECT\n");
+                               emit_call_insn(gen_reg_sibcall_mem(operands[0], XEXP(addr,0), operands[2]));
+                               emit_call_insn(gen_reg_sibcall_mem(operands[0], XEXP(addr,0), operands[2]));
+                               gcc_assert(0);
+                               gcc_assert(0);
+                       } else {
+                       } else {
+                               emit_call_insn(gen_reg_sibcall_var(operands[0], operands[1], operands[2]));
+                               emit_call_insn(gen_reg_sibcall_var(operands[0], operands[1], operands[2]));
+                       }
+                       }
+                       DONE;
+                       DONE;
+               } else FAIL;
+               } else FAIL;
+       })
+       })
+;
+;
+;
+;
+;
+;
+;
+;
+; How do we want to do this better?
+; How do we want to do this better?
+;      Replace the RTL w/
+;      Replace the RTL w/
+;              return_label= gen_label_rtx();
+;              return_label= gen_label_rtx();
+;              emit_movsi(gen_rtx_REG(zip_R0),plus_constant(
+;              emit_movsi(gen_rtx_REG(zip_R0),plus_constant(
+;                      gen_rtx_REG(zip_PC),return_label));
+;                      gen_rtx_REG(zip_PC),return_label));
+;              emit_jump(label_rtx(
+;              emit_jump(label_rtx(
+;
+;
+;              emit_label(return_label);
+;              emit_label(return_label);
+;
+;
+; The problem is: we can't!  GCC distinguishes between jumps and calls when
+; The problem is: we can't!  GCC distinguishes between jumps and calls when
+; optimizing, and it doesn't see the need to keep the label around.  Thus, the
+; optimizing, and it doesn't see the need to keep the label around.  Thus, the
+; label gets removed and the call gets lost.  Hence we do it this way (below).
+; label gets removed and the call gets lost.  Hence we do it this way (below).
+; I'll probably bastardize a means of getting a new codelabel that GCC doesn't
+; I'll probably bastardize a means of getting a new codelabel that GCC doesn't
+; recognize as such, but for now we'll use .Lcall# as our label.
+; recognize as such, but for now we'll use .Lcall# as our label.
+;
+;
+(define_insn "void_call_const"
+(define_insn "void_call_const"
+       [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" ""))
+       [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p" ""))
+                       (match_operand 1 "const_int_operand" "n"))
+                       (match_operand 1 "const_int_operand" "n"))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))]
+               (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "JSR\t%0"
+       "JSR\t%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "void_call_mem"
+(define_insn "void_call_mem"
+       [(call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q"))
+       [(call (mem:SI (match_operand:SI 0 "zip_memory_operand_p" "Q"))
+                       (match_operand 1 "const_int_operand" "n"))
+                       (match_operand 1 "const_int_operand" "n"))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))]
+               (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "MOV    .Lcall%=(PC),R0\;LW\t%0,PC\n.Lcall%=:"
+       "MOV    .Lcall%=(PC),R0\;LW\t%0,PC\n.Lcall%=:"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "void_call_var"
+(define_insn "void_call_var"
+       [(call (match_operand:SI 0 "zip_memory_operand_p" "")
+       [(call (match_operand:SI 0 "zip_memory_operand_p" "")
+                       (match_operand 1 "const_int_operand" "n"))
+                       (match_operand 1 "const_int_operand" "n"))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))]
+               (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       {
+       {
+               if (REG_P(operands[0]))
+               if (REG_P(operands[0]))
+                       return "JSR\t(%0)";
+                       return "JSR\t(%0)";
+               else
+               else
+                       return "JSR\t%0";
+                       return "JSR\t%0";
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+(define_expand "call_value"
+(define_expand "call_value"
+       [(parallel [(set (match_operand 0 "register_operand")
+       [(parallel [(set (match_operand 0 "register_operand")
+               (call (match_operand:SI 1 "" "")
+               (call (match_operand:SI 1 "" "")
+                       (match_operand 2 "const_int_operand" "n")))
+                       (match_operand 2 "const_int_operand" "n")))
+       (clobber (reg:SI RTN_REG))
+       (clobber (reg:SI RTN_REG))
+       (clobber (reg:CC CC_REG))])]
+       (clobber (reg:CC CC_REG))])]
+       ""
+       ""
+       {
+       {
+               if (MEM_P(operands[1])) {
+               if (MEM_P(operands[1])) {
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;// extern void zip_debug_rtx(const_rtx);
+                       ;//
+                       ;//
+                       ;// fprintf(stderr, "CALL/V: ");
+                       ;// fprintf(stderr, "CALL/V: ");
+                       ;// zip_debug_rtx(operands[1]);
+                       ;// zip_debug_rtx(operands[1]);
+                       ;//
+                       ;//
+                       ;//
+                       ;//
+                       //; This should always be the case
+                       //; This should always be the case
+                       rtx addr = XEXP(operands[1],0);
+                       rtx addr = XEXP(operands[1],0);
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                       if (zip_const_address_operand_p(addr, SImode)) {
+                               //; fprintf(stderr, "Generating gen_reg_call_const()\n");
+                               //; fprintf(stderr, "Generating gen_reg_call_const()\n");
+                               emit_call_insn(gen_reg_call_const(operands[0], addr, operands[2]));
+                               emit_call_insn(gen_reg_call_const(operands[0], addr, operands[2]));
+                       } else if ((MEM_P(addr))&&(zip_address_operand(XEXP(addr,0)))) {
+                       } else if ((MEM_P(addr))&&(zip_address_operand(XEXP(addr,0)))) {
+                               fprintf(stderr, "ERR: ZIP.MD::CALL-VALUE() INDIRECT\n");
+                               fprintf(stderr, "ERR: ZIP.MD::CALL-VALUE() INDIRECT\n");
+                               emit_call_insn(gen_reg_call_mem(operands[0], XEXP(addr,0), operands[2]));
+                               emit_call_insn(gen_reg_call_mem(operands[0], XEXP(addr,0), operands[2]));
+                               gcc_assert(0);
+                               gcc_assert(0);
+                       } else {
+                       } else {
+                               //; fprintf(stderr, "ZIP.MD::CALL-VALUE() INDIRECT\n");
+                               //; fprintf(stderr, "ZIP.MD::CALL-VALUE() INDIRECT\n");
+                               emit_call_insn(gen_reg_call_var(operands[0], operands[1], operands[2]));
+                               emit_call_insn(gen_reg_call_var(operands[0], operands[1], operands[2]));
+                       }
+                       }
+                       DONE;
+                       DONE;
+               } else FAIL;
+               } else FAIL;
+       })
+       })
+(define_insn "reg_call_const"
+(define_insn "reg_call_const"
+       [(set (match_operand 0 "register_operand" "")
+       [(set (match_operand 0 "register_operand" "")
+               (call (mem:SI (match_operand:SI 1 "zip_const_address_operand_p" ""))
+               (call (mem:SI (match_operand:SI 1 "zip_const_address_operand_p" ""))
+                       (match_operand 2 "const_int_operand" "n")))
+                       (match_operand 2 "const_int_operand" "n")))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))]
+               (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "JSR\t%1"
+       "JSR\t%1"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "reg_call_mem"
+(define_insn "reg_call_mem"
+       [(set (match_operand 0 "register_operand" "")
+       [(set (match_operand 0 "register_operand" "")
+               (call (mem:SI (match_operand:SI 1 "zip_memory_operand_p" "Q"))
+               (call (mem:SI (match_operand:SI 1 "zip_memory_operand_p" "Q"))
+                       (match_operand 2 "const_int_operand" "n")))
+                       (match_operand 2 "const_int_operand" "n")))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))]
+               (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "MOV    .Lcall%=(PC),R0\t; CALL MEM (untested)\n\tLW\t%1,PC\n.Lcall%=:"
+       "MOV    .Lcall%=(PC),R0\t; CALL MEM (untested)\n\tLW\t%1,PC\n.Lcall%=:"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "reg_call_var"
+(define_insn "reg_call_var"
+       [(set (match_operand 0 "register_operand" "")
+       [(set (match_operand 0 "register_operand" "")
+               (call (match_operand:SI 1 "zip_memory_operand_p" "")
+               (call (match_operand:SI 1 "zip_memory_operand_p" "")
+                       (match_operand 2 "const_int_operand" "n")))
+                       (match_operand 2 "const_int_operand" "n")))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))]
+               (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       {
+       {
+               ;// extern void zip_debug_rtx(const_rtx);
+               ;// extern void zip_debug_rtx(const_rtx);
+
+
+               ;// fprintf(stderr, "CALL-V/REG: ");
+               ;// fprintf(stderr, "CALL-V/REG: ");
+               ;// zip_debug_rtx(operands[0]);
+               ;// zip_debug_rtx(operands[0]);
+
+
+               if (REG_P(operands[1]))
+               if (REG_P(operands[1]))
+                       return "JSR\t(%1)";
+                       return "JSR\t(%1)";
+               else
+               else
+                       return "JSR\t%1";
+                       return "JSR\t%1";
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+(define_insn "reg_sibcall_const"
+(define_insn "reg_sibcall_const"
+       [(set (match_operand 0 "register_operand" "")
+       [(set (match_operand 0 "register_operand" "")
+               (call (mem:SI (match_operand:SI 1 "zip_const_address_operand_p" ""))
+               (call (mem:SI (match_operand:SI 1 "zip_const_address_operand_p" ""))
+                       (match_operand 2 "const_int_operand" "n")))
+                       (match_operand 2 "const_int_operand" "n")))
+               (use (reg:SI RTN_REG))
+               (use (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               (simple_return)]
+               (simple_return)]
+       ""
+       ""
+       "BRA\t%1"
+       "BRA\t%1"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "reg_sibcall_mem"
+(define_insn "reg_sibcall_mem"
+       [(set (match_operand 0 "register_operand" "")
+       [(set (match_operand 0 "register_operand" "")
+               (call (mem:SI (match_operand:SI 1 "zip_memory_operand_p" "Q"))
+               (call (mem:SI (match_operand:SI 1 "zip_memory_operand_p" "Q"))
+                       (match_operand 2 "const_int_operand" "n")))
+                       (match_operand 2 "const_int_operand" "n")))
+               (use (reg:SI RTN_REG))
+               (use (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               (simple_return)]
+               (simple_return)]
+       ""
+       ""
+       "LW\t%1,PC"
+       "LW\t%1,PC"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+; #warning "This predicate is appropriate for non-moves, but not for JMPs"
+(define_insn "reg_sibcall_var"
+(define_insn "reg_sibcall_var"
+       [(set (match_operand 0 "register_operand" "")
+       [(set (match_operand 0 "register_operand" "")
+               (call (match_operand:SI 1 "zip_memory_operand_p" "")
+               (call (match_operand:SI 1 "zip_memory_operand_p" "")
+                       (match_operand 2 "const_int_operand" "n")))
+                       (match_operand 2 "const_int_operand" "n")))
+               (use (reg:SI RTN_REG))
+               (use (reg:SI RTN_REG))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               (simple_return)]
+               (simple_return)]
+       ""
+       ""
+       {
+       {
+               if (REG_P(operands[1]))
+               if (REG_P(operands[1]))
+                       return "JMP\t(%1); REG_SIBCALL_VAR";
+                       return "JMP\t(%1); REG_SIBCALL_VAR";
+               else
+               else
+                       return "JMP\t%1";
+                       return "JMP\t%1";
+       }
+       }
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Frame manipulation RTX
+;; Frame manipulation RTX
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_expand "prologue"
+(define_expand "prologue"
+       [(const_int 0)]
+       [(const_int 0)]
+       ""
+       ""
+       "{ zip_expand_prologue(); DONE; }")
+       "{ zip_expand_prologue(); DONE; }")
+(define_expand "sibcall_epilogue"
+(define_expand "sibcall_epilogue"
+       [(return)]
+       [(return)]
+       ""
+       ""
+       "{ zip_sibcall_epilogue(); DONE; }")
+       "{ zip_sibcall_epilogue(); DONE; }")
+(define_expand "epilogue"
+(define_expand "epilogue"
+       [(return)]
+       [(return)]
+       ""
+       ""
+       "{ zip_expand_epilogue(); DONE; }")
+       "{ zip_expand_epilogue(); DONE; }")
+(define_expand "return" ; In order to use the function predicate, this *must*
+(define_expand "return" ; In order to use the function predicate, this *must*
+       [(return)]      ; be a define_expand
+       [(return)]      ; be a define_expand
+       "zip_use_return_insn()")
+       "zip_use_return_insn()")
+       ; "JMP  R0"
+       ; "JMP  R0"
+       ; [(set_attr "ccresult" "unchanged")])
+       ; [(set_attr "ccresult" "unchanged")])
+(define_insn "*return" ; A "*" -- means it cannot be called from C
+(define_insn "*return" ; A "*" -- means it cannot be called from C
+       [(return)]
+       [(return)]
+       ""
+       ""
+       "RETN"
+       "RETN"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "simple_return"   ; A "*" -- means it cannot be called from C
+(define_insn "simple_return"   ; A "*" -- means it cannot be called from C
+       [(simple_return)]
+       [(simple_return)]
+       ""
+       ""
+       "RETN"
+       "RETN"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "return_if_eq"
+(define_insn "return_if_eq"
+       [(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (eq (reg:CC CC_REG) (const_int 0))
+                       (return) (pc)))]
+                       (return) (pc)))]
+       "zip_use_return_insn()"
+       "zip_use_return_insn()"
+       "RETN.Z"
+       "RETN.Z"
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_ne"
+(define_insn "return_if_ne"
+       [(set (pc) (if_then_else (ne (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (ne (reg:CC CC_REG) (const_int 0))
+                       (return) (pc)))]
+                       (return) (pc)))]
+       "zip_use_return_insn()"
+       "zip_use_return_insn()"
+       "RETN.NZ"
+       "RETN.NZ"
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_lt"
+(define_insn "return_if_lt"
+       [(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+                       (return) (pc)))]
+                       (return) (pc)))]
+       "zip_use_return_insn()"
+       "zip_use_return_insn()"
+       "RETN.LT"
+       "RETN.LT"
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_gte"
+(define_insn "return_if_gte"
+       [(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+                       (return) (pc)))]
+                       (return) (pc)))]
+       "(zip_use_return_insn())"
+       "(zip_use_return_insn())"
+       "RETN.GTE"
+       "RETN.GTE"
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_ltu"
+(define_insn "return_if_ltu"
+       [(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+                       (return) (pc)))]
+                       (return) (pc)))]
+       "zip_use_return_insn()"
+       "zip_use_return_insn()"
+       "RETN.C"
+       "RETN.C"
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+(define_insn "return_if_geu"
+(define_insn "return_if_geu"
+       [(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+       [(set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+                       (return) (pc)))]
+                       (return) (pc)))]
+       "(zip_use_return_insn())"
+       "(zip_use_return_insn())"
+       "RETN.NC"
+       "RETN.NC"
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+       [(set_attr "ccresult" "unchanged") (set_attr "predicable" "no")])
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Zip Builtin Functions
+;; Zip Builtin Functions
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+(define_insn "zip_rtu"
+(define_insn "zip_rtu"
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_RTU)
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_RTU)
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(!ZIP_USER)"
+       "(!ZIP_USER)"
+       "RTU"
+       "RTU"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+(define_insn "zip_busy"
+(define_insn "zip_busy"
+       [(set (pc) (minus:SI (pc) (const_int 1)))]
+       [(set (pc) (minus:SI (pc) (const_int 1)))]
+       ""
+       ""
+       "BUSY"
+       "BUSY"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+(define_insn "zip_halt" ; Needs to be unspec_volatile, or optimizer will opt out
+(define_insn "zip_halt" ; Needs to be unspec_volatile, or optimizer will opt out
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_HALT)
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_HALT)
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(!ZIP_USER)"
+       "(!ZIP_USER)"
+       "HALT"
+       "HALT"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+(define_insn "zip_idle"
+(define_insn "zip_idle"
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_IDLE)
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_IDLE)
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       ""
+       ""
+       "WAIT"
+       "WAIT"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+(define_insn "zip_syscall"
+(define_insn "zip_syscall"
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_SYSCALL)]
+       [(unspec_volatile [(reg:SI CC_REG)] UNSPEC_SYSCALL)]
+       ""
+       ""
+       "CLR\tCC"
+       "CLR\tCC"
+       [(set_attr "ccresult" "unknown")])
+       [(set_attr "ccresult" "unknown")])
+;
+;
+;
+;
+; Operator "save_context"
+; Operator "save_context"
+;
+;
+;      Okay, so we're not really reading and writing operand 0, %0, however
+;      Okay, so we're not really reading and writing operand 0, %0, however
+;      if we don't list it as a "+r" register, the compiler may allocate it
+;      if we don't list it as a "+r" register, the compiler may allocate it
+;      among the other registers, thus we clobber it in the middle of the
+;      among the other registers, thus we clobber it in the middle of the
+;      operation before the task is complete.
+;      operation before the task is complete.
+;
+;
+(define_insn "zip_save_context"
+(define_insn "zip_save_context"
+       [(unspec_volatile
+       [(unspec_volatile
+                       [ (match_operand:SI 0 "register_operand" "+r") ]
+                       [ (match_operand:SI 0 "register_operand" "+r") ]
+                       UNSPEC_SAVE_CONTEXT)
+                       UNSPEC_SAVE_CONTEXT)
+               (clobber (match_scratch:SI 1 "=r"))
+               (clobber (match_scratch:SI 1 "=r"))
+               (clobber (match_scratch:SI 2 "=r"))
+               (clobber (match_scratch:SI 2 "=r"))
+               (clobber (match_scratch:SI 3 "=r"))
+               (clobber (match_scratch:SI 3 "=r"))
+               (clobber (match_scratch:SI 4 "=r"))]
+               (clobber (match_scratch:SI 4 "=r"))]
+       "(!ZIP_USER)"
+       "(!ZIP_USER)"
+       "MOV\tuR0,%1
+       "MOV\tuR0,%1
+       MOV\tuR1,%2
+       MOV\tuR1,%2
+       MOV\tuR2,%3
+       MOV\tuR2,%3
+       MOV\tuR3,%4
+       MOV\tuR3,%4
+       SW\t%1,%0
+       SW\t%1,%0
+       SW\t%2,4(%0)
+       SW\t%2,4(%0)
+       SW\t%3,8(%0)
+       SW\t%3,8(%0)
+       SW\t%4,12(%0)
+       SW\t%4,12(%0)
+       MOV\tuR4,%1
+       MOV\tuR4,%1
+       MOV\tuR5,%2
+       MOV\tuR5,%2
+       MOV\tuR6,%3
+       MOV\tuR6,%3
+       MOV\tuR7,%4
+       MOV\tuR7,%4
+       SW\t%1,16(%0)
+       SW\t%1,16(%0)
+       SW\t%2,20(%0)
+       SW\t%2,20(%0)
+       SW\t%3,24(%0)
+       SW\t%3,24(%0)
+       SW\t%4,28(%0)
+       SW\t%4,28(%0)
+       MOV\tuR8,%1
+       MOV\tuR8,%1
+       MOV\tuR9,%2
+       MOV\tuR9,%2
+       MOV\tuR10,%3
+       MOV\tuR10,%3
+       MOV\tuR11,%4
+       MOV\tuR11,%4
+       SW\t%1,32(%0)
+       SW\t%1,32(%0)
+       SW\t%2,36(%0)
+       SW\t%2,36(%0)
+       SW\t%3,40(%0)
+       SW\t%3,40(%0)
+       SW\t%4,44(%0)
+       SW\t%4,44(%0)
+       MOV\tuR12,%1
+       MOV\tuR12,%1
+       MOV\tuSP,%2
+       MOV\tuSP,%2
+       MOV\tuCC,%3
+       MOV\tuCC,%3
+       MOV\tuPC,%4
+       MOV\tuPC,%4
+       SW\t%1,48(%0)
+       SW\t%1,48(%0)
+       SW\t%2,52(%0)
+       SW\t%2,52(%0)
+       SW\t%3,56(%0)
+       SW\t%3,56(%0)
+       SW\t%4,60(%0)"
+       SW\t%4,60(%0)"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+; See the comment above about why operand 0, %0, *must* be a "+r" operand,
+; See the comment above about why operand 0, %0, *must* be a "+r" operand,
+; even though we don't really read (or change) its value throughout this
+; even though we don't really read (or change) its value throughout this
+; operation.
+; operation.
+;
+;
+(define_insn "zip_restore_context"
+(define_insn "zip_restore_context"
+       [(unspec_volatile [
+       [(unspec_volatile [
+               (match_operand:SI 0 "register_operand" "+r")] UNSPEC_RESTORE_CONTEXT)
+               (match_operand:SI 0 "register_operand" "+r")] UNSPEC_RESTORE_CONTEXT)
+       (clobber (match_scratch:SI 1 "=r"))
+       (clobber (match_scratch:SI 1 "=r"))
+       (clobber (match_scratch:SI 2 "=r"))
+       (clobber (match_scratch:SI 2 "=r"))
+       (clobber (match_scratch:SI 3 "=r"))
+       (clobber (match_scratch:SI 3 "=r"))
+       (clobber (match_scratch:SI 4 "=r"))]
+       (clobber (match_scratch:SI 4 "=r"))]
+       "(!ZIP_USER)"
+       "(!ZIP_USER)"
+       "LW\t0(%0),%1
+       "LW\t0(%0),%1
+       LW\t4(%0),%2
+       LW\t4(%0),%2
+       LW\t8(%0),%3
+       LW\t8(%0),%3
+       LW\t12(%0),%4
+       LW\t12(%0),%4
+       MOV\t%1,uR0
+       MOV\t%1,uR0
+       MOV\t%2,uR1
+       MOV\t%2,uR1
+       MOV\t%3,uR2
+       MOV\t%3,uR2
+       MOV\t%4,uR3
+       MOV\t%4,uR3
+       LW\t16(%0),%1
+       LW\t16(%0),%1
+       LW\t20(%0),%2
+       LW\t20(%0),%2
+       LW\t24(%0),%3
+       LW\t24(%0),%3
+       LW\t28(%0),%4
+       LW\t28(%0),%4
+       MOV\t%1,uR4
+       MOV\t%1,uR4
+       MOV\t%2,uR5
+       MOV\t%2,uR5
+       MOV\t%3,uR6
+       MOV\t%3,uR6
+       MOV\t%4,uR7
+       MOV\t%4,uR7
+       LW\t32(%0),%1
+       LW\t32(%0),%1
+       LW\t36(%0),%2
+       LW\t36(%0),%2
+       LW\t40(%0),%3
+       LW\t40(%0),%3
+       LW\t44(%0),%4
+       LW\t44(%0),%4
+       MOV\t%1,uR8
+       MOV\t%1,uR8
+       MOV\t%2,uR9
+       MOV\t%2,uR9
+       MOV\t%3,uR10
+       MOV\t%3,uR10
+       MOV\t%4,uR11
+       MOV\t%4,uR11
+       LW\t48(%0),%1
+       LW\t48(%0),%1
+       LW\t52(%0),%2
+       LW\t52(%0),%2
+       LW\t56(%0),%3
+       LW\t56(%0),%3
+       LW\t60(%0),%4
+       LW\t60(%0),%4
+       MOV\t%1,uR12
+       MOV\t%1,uR12
+       MOV\t%2,uSP
+       MOV\t%2,uSP
+       MOV\t%3,uCC
+       MOV\t%3,uCC
+       MOV\t%4,uPC"
+       MOV\t%4,uPC"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+(define_insn "zip_bitrev"
+(define_insn "zip_bitrev"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_BITREV))
+               (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_BITREV))
+       ]
+       ]
+       ""
+       ""
+       "BREV\t%1,%0"
+       "BREV\t%1,%0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "zip_cc"
+(define_insn "zip_cc"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (unspec:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+               (unspec:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+       ""
+       ""
+       "MOV\tCC,%0"
+       "MOV\tCC,%0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "zip_ucc"
+(define_insn "zip_ucc"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETUCC))]
+               (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETUCC))]
+       ""
+       ""
+       "MOV\tuCC,%0"
+       "MOV\tuCC,%0"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "zip_cc_sto"
+(define_insn "zip_cc_sto"
+       [(set (mem:SI (match_operand:SI 0 "register_operand" "r"))
+       [(set (mem:SI (match_operand:SI 0 "register_operand" "r"))
+               (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+               (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+       ""
+       ""
+       "SW\tCC,(%0)"
+       "SW\tCC,(%0)"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "zip_cc_sto_off"
+(define_insn "zip_cc_sto_off"
+       [(set (mem:SI (plus:SI
+       [(set (mem:SI (plus:SI
+                       (match_operand:SI 0 "register_operand" "r")
+                       (match_operand:SI 0 "register_operand" "r")
+                       (match_operand:SI 1 "const_int_operand" "N")))
+                       (match_operand:SI 1 "const_int_operand" "N")))
+               (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+               (unspec_volatile:SI [(reg:SI CC_REG)] UNSPEC_GETCC))]
+       ""
+       ""
+       "SW\tCC,%1(%0)"
+       "SW\tCC,%1(%0)"
+       [(set_attr "ccresult" "unchanged")])
+       [(set_attr "ccresult" "unchanged")])
+(define_insn "ldilo"
+(define_insn "ldilo"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (unspec:SI [(match_operand:SI 1 "immediate_operand" "")] UNSPEC_LDILO))]
+               (unspec:SI [(match_operand:SI 1 "immediate_operand" "")] UNSPEC_LDILO))]
+       ""
+       ""
+       "LDILO  %1,%0"
+       "LDILO  %1,%0"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+
+
+;
+;
+;
+;
+; Missing still: zip_break(idno)
+; Missing still: zip_break(idno)
+; Would also be nice to have a zip_reg builtin, allowing us to read or write
+; Would also be nice to have a zip_reg builtin, allowing us to read or write
+; a register, as in zip_reg(5)=40;.  Not sure what this means, though, when the
+; a register, as in zip_reg(5)=40;.  Not sure what this means, though, when the
+; number placed into this is not constant, or how to specify that it must *only*
+; number placed into this is not constant, or how to specify that it must *only*
+; be constant.  Thats actually the problem with both proposals, zip_break(id)
+; be constant.  Thats actually the problem with both proposals, zip_break(id)
+; and zip_reg(regno)--both depend upon a compile time constant to work.
+; and zip_reg(regno)--both depend upon a compile time constant to work.
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Trap Instruction
+;; Trap Instruction
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+; The ZipCPU doesn't really have a "trap" instruction per se.  The goal is that
+; The ZipCPU doesn't really have a "trap" instruction per se.  The goal is that
+; *nothing* should ever trap, and so we should never get here.  However, the
+; *nothing* should ever trap, and so we should never get here.  However, the
+; compiler seems to want a trap instruction for some reason.  (It keeps us
+; compiler seems to want a trap instruction for some reason.  (It keeps us
+; from calling the abort() function, if we don't define these ...)  So let's
+; from calling the abort() function, if we don't define these ...)  So let's
+; just grab onto the break instruction and declare it to be a trap instruction
+; just grab onto the break instruction and declare it to be a trap instruction
+; for our purposes.  Alternatively, we might've used a syscall, but ... this
+; for our purposes.  Alternatively, we might've used a syscall, but ... this
+; will work for both user and system instructions.
+; will work for both user and system instructions.
+;
+;
+(define_insn "trap"
+(define_insn "trap"
+       [(trap_if (const_int 1) (const_int 0))]
+       [(trap_if (const_int 1) (const_int 0))]
+       ""
+       ""
+       "BREAK"
+       "BREAK"
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "yes") (set_attr "ccresult" "unchanged")])
+;
+;
 
+;
+(define_expand "ctrapsi4"
+(define_expand "ctrapsi4"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (match_operand:SI 1 "register_operand" "r")
+               (match_operand:SI 1 "register_operand" "r")
+               (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+               (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+       (trap_if (match_operator 0 "ordered_comparison_operator"
+       (trap_if (match_operator 0 "ordered_comparison_operator"
+                       [(reg:CC CC_REG) (const_int 0)])
+                       [(reg:CC CC_REG) (const_int 0)])
+                       (match_operand 3 "const_int_operand" "O"))]
+                       (match_operand 3 "const_int_operand" "O"))]
+       ""
+       ""
+       )
+       )
 
+;
 
+;
+(define_insn "trapif"
+(define_insn "trapif"
+       [(trap_if (match_operator 0 "ordered_comparison_operator"
+       [(trap_if (match_operator 0 "ordered_comparison_operator"
+                       [(reg:CC CC_REG) (const_int 0)])
+                       [(reg:CC CC_REG) (const_int 0)])
+                       (match_operand 1 "const_int_operand" "O"))]
+                       (match_operand 1 "const_int_operand" "O"))]
+       ""
+       ""
+       "BREAK\t%1"
+       "BREAK\t%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+;
+;
+;
+;
+;
+;
+;
+;
+(include "zip-di.md")
+(include "zip-di.md")
+(include "zip-ops.md")
+(include "zip-ops.md")
+(include "zip-float.md")
+(include "zip-float.md")
+(include "zip-sync.md")
+(include "zip-sync.md")
+(include "zip-peephole.md")
+(include "zip-peephole.md")
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Unimplemented (or not yet implemented) RTL Codes
+;; Unimplemented (or not yet implemented) RTL Codes
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+;
+;
+;(define_insn "addvsi4"
+;(define_insn "addvsi4"
+;      )
+;      )
+;(define_insn "subvsi4"
+;(define_insn "subvsi4"
+;      )
+;      )
+;(define_insn "mulvsi4"
+;(define_insn "mulvsi4"
+;      )
+;      )
+;(define_insn "umulvsi4"
+;(define_insn "umulvsi4"
+;      )
+;      )
+;(define_insn "umulvsi4"
+;(define_insn "umulvsi4"
+;      )
+;      )
+;(define_insn "negvsi3"
+;(define_insn "negvsi3"
+;      "MOV    %1,%0
+;      "MOV    %1,%0
+;      XOR     -1,%0
+;      XOR     -1,%0
+;      ADD     1,%0
+;      ADD     1,%0
+;      BV      %2"
+;      BV      %2"
+;      )
+;      )
+;
+;
+;(define_insn "ssum_widen
+;(define_insn "ssum_widen
+;(define_insn "usum_widen
+;(define_insn "usum_widen
+;(define_insn "udot_prod"
+;(define_insn "udot_prod"
+;(define_insn "maddsidi4"
+;(define_insn "maddsidi4"
+;(define_insn "umaddsidi4"
+;(define_insn "umaddsidi4"
+;(define_insn "msubsidi4"
+;(define_insn "msubsidi4"
+;(define_insn "umsubsidi4"
+;(define_insn "umsubsidi4"
+;
+;
+;
+;
+; STILL MISSING:
+; STILL MISSING:
+;      SYSCALL(ID)
+;      SYSCALL(ID)
+;              MOV %ID,R0
+;              MOV %ID,R0
+;              CLR     CC
+;              CLR     CC
+;      cmove   ... the conditional move, created from a
+;      cmove   ... the conditional move, created from a
+;      (set (match_op 0 "" "r") (if_then_else (condition) (a) (reg X))))
+;      (set (match_op 0 "" "r") (if_then_else (condition) (a) (reg X))))
+;      pattern
+;      pattern
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-modes.def gcc-6.2.0-zip/gcc/config/zip/zip-modes.def
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-ops.md gcc-6.2.0-zip/gcc/config/zip/zip-ops.md
--- gcc-6.2.0/gcc/config/zip/zip-modes.def      1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip-ops.md 1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-modes.def  2017-01-10 12:46:54.791966242 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-ops.md     2018-03-22 18:33:11.175718614 -0400
@@ -0,0 +1 @@
@@ -0,0 +1,2744 @@
+#define        BITS_PER_UNIT   8
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-peephole.md gcc-6.2.0-zip/gcc/config/zip/zip-peephole.md
 
--- gcc-6.2.0/gcc/config/zip/zip-peephole.md    1969-12-31 19:00:00.000000000 -0500
 
+++ gcc-6.2.0-zip/gcc/config/zip/zip-peephole.md        2017-03-01 15:46:02.440221158 -0500
 
@@ -0,0 +1,768 @@
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Filename:   zip-peephole.md
+;; Filename:   zip-ops.md
+;;
+;;
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;;
+;; Purpose:    This is a machine description of a variety of peephole
+;; Purpose:    This is a computer generated machine description of the
+;;             optimizations which can be applied to the ZipCPU RTL
+;;             ZipCPU's operations.  It is computer generated simply for
+;;     representation.
+;;     two reasons.  First, I can't seem to find a way to generate this
 
+;;     information within GCC's current constructs.  Specifically, the
 
+;;     CPU's instructions normally set the condition codes, unless they
 
+;;     are conditional instructions when they don't.  Second, the ZipCPU is
 
+;;     actually quite regular.  Almost all of the instructions have the same
 
+;;     form.  This form turns into many, many RTL instructions.  Because the
 
+;;     CPU doesn't match any of the others within GCC, that means either
 
+;;     I have a *lot* of cut, copy, paste, and edit to do to create the file
 
+;;     and upon any and every edit, or I need to build a program to generate
 
+;;     the remaining .md constructs.  Hence, I chose the latter to minimize
 
+;;     the amount of work I needed to do.
+;;
+;;
+;;
+;;
+;; Creator:    Dan Gisselquist, Ph.D.
+;; Creator:    Dan Gisselquist, Ph.D.
+;;             Gisselquist Technology, LLC
+;;             Gisselquist Technology, LLC
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;; Copyright (C) 2017, Gisselquist Technology, LLC
+;;
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;; your option) any later version.
+;;
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;; for more details.
+;;
+;;
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;;             http://www.gnu.org/licenses/gpl.html
+;;             http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
 
+;;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; ADD (genzipop_long)
+;;
 
+;; Peephole optimizations
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;
+;
+;
+;
+;
+;
 
+(define_insn "addsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "ADD\t%2,%0     ; addsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+;
+;
+;
+;
+; Match:
+(define_insn "addsi3_raw"
+;      CMP     R1,R0
+       [(set (match_operand:SI 0 "register_operand" "=r")
+;      BGTU    lbl
+               (plus:SI (match_dup 0)
+; Transform to:
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+;      CMP     R0,R1
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+;      BC      lbl
+       ""
 
+       "ADD\t%1,%0     ; addsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
+;
+;
+(define_peephole2
 
+       [(set (reg:CC CC_REG) (compare:CC
 
+               (match_operand:SI 0 "register_operand")
 
+               (match_operand:SI 1 "register_operand")))
 
+       (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_operand 2 ""))
 
+                       (pc)))]
 
+       "(ZIP_PEEPHOLE)"
 
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 1) (match_dup 0)))
 
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_dup 2))
 
+                       (pc)))]
 
+       "")
 
+(define_peephole2
 
+       [(match_scratch:SI 3 "=r")
 
+       (set (reg:CC CC_REG) (compare:CC
 
+               (match_operand:SI 0 "register_operand")
 
+               (match_operand 1 "const_int_operand")))
 
+       (match_dup 3)
 
+       (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_operand 2 ""))
 
+                       (pc)))]
 
+       "(ZIP_PEEPHOLE)"
 
+       [(set (match_dup 3) (match_dup 1))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 3) (match_dup 0)))
 
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_dup 2))
 
+                       (pc)))]
 
+       "")
 
+;(define_peephole2
 
+;      [(set (reg:CC CC_REG) (compare:CC
 
+;              (match_operand:SI 0 "register_operand")
 
+;              (match_operand 1 "const_int_operand")))
 
+;      (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
 
+;                      (label_ref (match_operand 2 ""))
 
+;                      (pc)))]
 
+;      ""
 
+;      [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
 
+;      (set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
 
+;                      (label_ref (match_dup 2))
 
+;                      (pc)))]
 
+;      "operands[1] = GEN_INT(INTVAL(operands[1])-1);")
 
+;
+;
 
+(define_insn "addsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ADD.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+; Match:
 
+;      CMP     R1,R0
 
+;      BGEU    lbl
 
+; Transform to:
 
+;      CMP     1(R0),R1
 
+;      BC      lbl
 
+;
+;
+(define_peephole2
+(define_insn "addsi3_ne"
+       [(set (reg:CC CC_REG) (compare:CC
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
+               (match_operand:SI 0 "register_operand")
+                       (set (match_operand:SI 0 "register_operand" "=r")
+               (match_operand:SI 1 "register_operand")))
+               (plus:SI (match_dup 0)
+       (set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
+                       (label_ref (match_operand 2 ""))
+       ""      ; Condition
+                       (pc)))]
+       "ADD.NZ\t%1,%0  ; genzip, conditional operator" ; Template
+       "(ZIP_PEEPHOLE)"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set (reg:CC CC_REG) (compare:CC
 
+               (match_dup 1) (plus:SI (match_dup 0) (const_int 1))))
 
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_dup 2))
 
+                       (pc)))]
 
+       "")
 
+;
+;
+;
+;
+; Match:
+(define_insn "addsi3_lt"
+;      CMP     R1,R0
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
+;      BGE     lbl
+                       (set (match_operand:SI 0 "register_operand" "=r")
+; Transform to:
+               (plus:SI (match_dup 0)
+;      CMP     1(R0),R1
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
+;      BLT     lbl
+       ""      ; Condition
+; ... why?  when we support a BGE instruction?
+       "ADD.LT\t%1,%0  ; genzip, conditional operator" ; Template
+;(define_peephole2
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       ;[(set (reg:CC CC_REG) (compare:CC
+;
+               ;(match_operand:SI 0 "register_operand")
+;
+               ;(match_operand:SI 1 "register_operand")))
+(define_insn "addsi3_ge"
+       ;(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
+                       ;(label_ref (match_operand 2 ""))
+                       (set (match_operand:SI 0 "register_operand" "=r")
+                       ;(pc)))]
+               (plus:SI (match_dup 0)
+       ;"(ZIP_PEEPHOLE)"
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
+       ;[(set (reg:CC CC_REG) (compare:CC (match_dup 1)
+       ""      ; Condition
 
+       "ADD.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "addsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ADD.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "addsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ADD.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; ADD (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "addsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "ADD\t%2,%0     ; addsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "addsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "ADD\t%1,%0     ; addsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "addsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ADD.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "addsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ADD.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "addsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ADD.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "addsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ADD.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "addsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ADD.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "addsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (plus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ADD.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; SUB (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "subsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "SUB\t%2,%0     ; subsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "subsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "SUB\t%1,%0     ; subsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "subsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "SUB.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "SUB.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "SUB.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "SUB.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "SUB.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "SUB.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; SUB (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "subsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "SUB\t%2,%0     ; subsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "subsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "SUB\t%1,%0     ; subsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "subsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "SUB.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "SUB.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "SUB.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "SUB.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "SUB.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "subsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (minus:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "SUB.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; MPY (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "mulsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "MPY\t%2,%0     ; mulsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "mulsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "MPY\t%1,%0     ; mulsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "mulsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "MPY.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "MPY.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "MPY.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "MPY.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "MPY.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "MPY.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; MPY (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "mulsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "MPY\t%2,%0     ; mulsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "mulsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "MPY\t%1,%0     ; mulsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "mulsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "MPY.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "MPY.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "MPY.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "MPY.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "MPY.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "mulsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (mult:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "MPY.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; DIVS (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "divsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVS\t%2,%0    ; divsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "divsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVS\t%1,%0    ; divsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "divsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.Z\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.NZ\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.LT\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.GE\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.C\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.NC\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; DIVS (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "divsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVS\t%2,%0    ; divsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "divsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVS\t%1,%0    ; divsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "divsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.Z\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.NZ\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.LT\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.GE\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.C\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "divsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (div:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVS.NC\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; DIVU (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "udivsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVU\t%2,%0    ; udivsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "udivsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVU\t%1,%0    ; udivsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "udivsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.Z\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.NZ\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.LT\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.GE\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.C\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.NC\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; DIVU (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "udivsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVU\t%2,%0    ; udivsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "udivsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       "(ZIP_DIVIDE)"
 
+       "DIVU\t%1,%0    ; udivsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "udivsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.Z\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.NZ\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.LT\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.GE\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.C\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "udivsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (udiv:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       "(ZIP_DIVIDE)"  ; Condition
 
+       "DIVU.NC\t%1,%0 ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; AND (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "andsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "AND\t%2,%0     ; andsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "andsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "AND\t%1,%0     ; andsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "andsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "AND.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "AND.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "AND.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "AND.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "AND.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "AND.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; AND (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "andsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "AND\t%2,%0     ; andsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "andsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "AND\t%1,%0     ; andsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "andsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "AND.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "AND.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "AND.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "AND.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "AND.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "andsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (and:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "AND.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; OR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "iorsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "OR\t%2,%0      ; iorsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "iorsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "OR\t%1,%0      ; iorsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "iorsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "OR.Z\t%1,%0    ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "OR.NZ\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "OR.LT\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "OR.GE\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "OR.C\t%1,%0    ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "OR.NC\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; OR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "iorsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "OR\t%2,%0      ; iorsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "iorsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "OR\t%1,%0      ; iorsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "iorsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "OR.Z\t%1,%0    ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "OR.NZ\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "OR.LT\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "OR.GE\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "OR.C\t%1,%0    ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "iorsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ior:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "OR.NC\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; XOR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "xorsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "XOR\t%2,%0     ; xorsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "xorsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "XOR\t%1,%0     ; xorsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "xorsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "XOR.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "XOR.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "XOR.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "XOR.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "XOR.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "XOR.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; XOR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "xorsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "XOR\t%2,%0     ; xorsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "xorsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "XOR\t%1,%0     ; xorsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "xorsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "XOR.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "XOR.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "XOR.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "XOR.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "XOR.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "xorsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (xor:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "XOR.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; ASR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "ashrsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "ASR\t%2,%0     ; ashrsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashrsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "ASR\t%1,%0     ; ashrsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashrsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ASR.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ASR.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ASR.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ASR.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ASR.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "ASR.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; ASR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "ashrsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "ASR\t%2,%0     ; ashrsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashrsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "ASR\t%1,%0     ; ashrsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashrsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ASR.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ASR.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ASR.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ASR.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ASR.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashrsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "ASR.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; LSL (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "ashlsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "LSL\t%2,%0     ; ashlsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashlsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "LSL\t%1,%0     ; ashlsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashlsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSL.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSL.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSL.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSL.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSL.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSL.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; LSL (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "ashlsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "LSL\t%2,%0     ; ashlsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashlsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "LSL\t%1,%0     ; ashlsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "ashlsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSL.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSL.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSL.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSL.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSL.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "ashlsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (ashift:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSL.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; LSR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "lshrsi3"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "LSR\t%2,%0     ; lshrsi3"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "lshrsi3_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "LSR\t%1,%0     ; lshrsi3_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "lshrsi3_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSR.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSR.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSR.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSR.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSR.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))))]
 
+       ""      ; Condition
 
+       "LSR.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; LSR (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "lshrsi3_off"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
 
+                       (plus:SI (match_operand:SI 2 "register_operand" "r")
 
+                               (match_operand:SI 3 "const_int_operand" "N"))))
 
+       (clobber (reg:CC CC_REG))]
 
+       ""
 
+       "LSR\t%2,%0     ; lshrsi3_off"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "lshrsi3_off_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N"))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       ""
 
+       "LSR\t%1,%0     ; lshrsi3_off_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "lshrsi3_off_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSR.Z\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_off_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSR.NZ\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_off_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSR.LT\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_off_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSR.GE\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_off_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSR.C\t%1,%0   ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "lshrsi3_off_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (lshiftrt:SI (match_dup 0)
 
+                       (plus:SI (match_operand:SI 1 "register_operand" "r")
 
+                               (match_operand:SI 2 "const_int_operand" "N")))))]
 
+       ""      ; Condition
 
+       "LSR.NC\t%1,%0  ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; MPYSHI (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "smulsi_highpart"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_operand:SI 1 "register_operand" "0"))
 
+                       (sign_extend:DI (match_operand:SI 2 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))
 
+       (clobber (reg:CC CC_REG))]
 
+       "(ZIP_HAS_DI)"
 
+       "MPYSHI\t%2,%0  ; smulsi_highpart"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "smulsi_highpart_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       "(ZIP_HAS_DI)"
 
+       "MPYSHI\t%1,%0  ; smulsi_highpart_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "smulsi_highpart_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYSHI.Z\t%1,%0        ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "smulsi_highpart_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYSHI.NZ\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "smulsi_highpart_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYSHI.LT\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "smulsi_highpart_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYSHI.GE\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "smulsi_highpart_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYSHI.C\t%1,%0        ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "smulsi_highpart_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (sign_extend:DI (match_dup 0))
 
+                       (sign_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYSHI.NC\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; MPYUHI (genzipop_long)
 
+;
 
+;
 
+;
 
+(define_insn "umulsi_highpart"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_operand:SI 1 "register_operand" "0"))
 
+                       (zero_extend:DI (match_operand:SI 2 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))
 
+       (clobber (reg:CC CC_REG))]
 
+       "(ZIP_HAS_DI)"
 
+       "MPYUHI\t%2,%0  ; umulsi_highpart"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "umulsi_highpart_raw"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32))))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
 
+       "(ZIP_HAS_DI)"
 
+       "MPYUHI\t%1,%0  ; umulsi_highpart_raw"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "set")])
 
+;
 
+;
 
+(define_insn "umulsi_highpart_eq"
 
+       [(cond_exec (eq (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYUHI.Z\t%1,%0        ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "umulsi_highpart_ne"
 
+       [(cond_exec (ne (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYUHI.NZ\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "umulsi_highpart_lt"
 
+       [(cond_exec (lt (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYUHI.LT\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "umulsi_highpart_ge"
 
+       [(cond_exec (ge (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYUHI.GE\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "umulsi_highpart_ltu"
 
+       [(cond_exec (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYUHI.C\t%1,%0        ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "umulsi_highpart_geu"
 
+       [(cond_exec (geu (reg:CC CC_REG) (const_int 0))
 
+                       (set (match_operand:SI 0 "register_operand" "=r")
 
+               (truncate:SI (ashiftrt:DI (mult:DI
 
+                       (zero_extend:DI (match_dup 0))
 
+                       (zero_extend:DI (match_operand:SI 1 "zip_opb_operand_p" "rO")))
 
+                       (const_int 32)))))]
 
+       "(ZIP_HAS_DI)"  ; Condition
 
+       "MPYUHI.NC\t%1,%0       ; genzip, conditional operator" ; Template
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+;
 
+;
 
+; Conditional move instructions
 
+;
 
+;
 
+;
 
+;
 
+(define_insn "cmov_eq"
 
+       [(set (match_operand:SI 0 "register_operand" "=r,r,r,Q")
 
+               (if_then_else:SI (eq (reg:CC CC_REG) (const_int 0))
 
+                       (match_operand:SI 1 "general_operand" "r,Q,i,r")
 
+                       (match_dup 0)))]
 
+       ""
 
+       "@
 
+       MOV.Z   %1,%0   ; cmov
 
+       LW.Z    %1,%0   ; cmov
 
+       LDI.Z   %1,%0   ; cmov
 
+       SW.Z    %1,%0   ; cmov"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cmov_ne"
 
+       [(set (match_operand:SI 0 "register_operand" "=r,r,r,Q")
 
+               (if_then_else:SI (ne (reg:CC CC_REG) (const_int 0))
 
+                       (match_operand:SI 1 "general_operand" "r,Q,i,r")
 
+                       (match_dup 0)))]
 
+       ""
 
+       "@
 
+       MOV.NZ  %1,%0   ; cmov
 
+       LW.NZ   %1,%0   ; cmov
 
+       LDI.NZ  %1,%0   ; cmov
 
+       SW.NZ   %1,%0   ; cmov"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cmov_lt"
 
+       [(set (match_operand:SI 0 "register_operand" "=r,r,r,Q")
 
+               (if_then_else:SI (lt (reg:CC CC_REG) (const_int 0))
 
+                       (match_operand:SI 1 "general_operand" "r,Q,i,r")
 
+                       (match_dup 0)))]
 
+       ""
 
+       "@
 
+       MOV.LT  %1,%0   ; cmov
 
+       LW.LT   %1,%0   ; cmov
 
+       LDI.LT  %1,%0   ; cmov
 
+       SW.LT   %1,%0   ; cmov"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cmov_ge"
 
+       [(set (match_operand:SI 0 "register_operand" "=r,r,r,Q")
 
+               (if_then_else:SI (ge (reg:CC CC_REG) (const_int 0))
 
+                       (match_operand:SI 1 "general_operand" "r,Q,i,r")
 
+                       (match_dup 0)))]
 
+       ""
 
+       "@
 
+       MOV.GE  %1,%0   ; cmov
 
+       LW.GE   %1,%0   ; cmov
 
+       LDI.GE  %1,%0   ; cmov
 
+       SW.GE   %1,%0   ; cmov"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cmov_ltu"
 
+       [(set (match_operand:SI 0 "register_operand" "=r,r,r,Q")
 
+               (if_then_else:SI (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (match_operand:SI 1 "general_operand" "r,Q,i,r")
 
+                       (match_dup 0)))]
 
+       ""
 
+       "@
 
+       MOV.C   %1,%0   ; cmov
 
+       LW.C    %1,%0   ; cmov
 
+       LDI.C   %1,%0   ; cmov
 
+       SW.C    %1,%0   ; cmov"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cmov_geu"
 
+       [(set (match_operand:SI 0 "register_operand" "=r,r,r,Q")
 
+               (if_then_else:SI (geu (reg:CC CC_REG) (const_int 0))
 
+                       (match_operand:SI 1 "general_operand" "r,Q,i,r")
 
+                       (match_dup 0)))]
 
+       ""
 
+       "@
 
+       MOV.NC  %1,%0   ; cmov
 
+       LW.NC   %1,%0   ; cmov
 
+       LDI.NC  %1,%0   ; cmov
 
+       SW.NC   %1,%0   ; cmov"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+; Conditional add instructions
 
+;
 
+;
 
+;
 
+;
 
+(define_insn "cadd_eq"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (eq (reg:CC CC_REG) (const_int 0))
 
+                       (plus:SI (match_dup 0)
 
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "ADD.Z  %1,%0   ; cadd"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cadd_ne"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (ne (reg:CC CC_REG) (const_int 0))
 
+                       (plus:SI (match_dup 0)
 
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "ADD.NZ %1,%0   ; cadd"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cadd_lt"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (lt (reg:CC CC_REG) (const_int 0))
 
+                       (plus:SI (match_dup 0)
 
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "ADD.LT %1,%0   ; cadd"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cadd_ge"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (ge (reg:CC CC_REG) (const_int 0))
 
+                       (plus:SI (match_dup 0)
 
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "ADD.GE %1,%0   ; cadd"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cadd_ltu"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (plus:SI (match_dup 0)
 
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "ADD.C  %1,%0   ; cadd"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cadd_geu"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (geu (reg:CC CC_REG) (const_int 0))
 
+                       (plus:SI (match_dup 0)
 
+                               (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "ADD.NC %1,%0   ; cadd"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+; Conditional negate instructions
 
+;
 
+;
 
+;
 
+;
 
+(define_insn "cneg_eq"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (eq (reg:CC CC_REG) (const_int 0))
 
+                       (neg:SI (match_dup 0))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NEG.Z  %0      ; cneg"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cneg_ne"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ne (reg:CC CC_REG) (const_int 0))
 
+                       (neg:SI (match_dup 0))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NEG.NZ %0      ; cneg"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cneg_lt"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (lt (reg:CC CC_REG) (const_int 0))
 
+                       (neg:SI (match_dup 0))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NEG.LT %0      ; cneg"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cneg_ge"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ge (reg:CC CC_REG) (const_int 0))
 
+                       (neg:SI (match_dup 0))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NEG.GE %0      ; cneg"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cneg_ltu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (neg:SI (match_dup 0))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NEG.C  %0      ; cneg"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cneg_geu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (geu (reg:CC CC_REG) (const_int 0))
 
+                       (neg:SI (match_dup 0))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NEG.NC %0      ; cneg"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+; Conditional not instructions
 
+;
 
+;
 
+;
 
+;
 
+(define_insn "cnot_eq"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (eq (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0)
 
+                               (const_int -1))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NOT.Z  %0      ; cnot"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cnot_ne"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (ne (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0)
 
+                               (const_int -1))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NOT.NZ %0      ; cnot"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cnot_lt"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (lt (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0)
 
+                               (const_int -1))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NOT.LT %0      ; cnot"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cnot_ge"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (ge (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0)
 
+                               (const_int -1))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NOT.GE %0      ; cnot"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cnot_ltu"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0)
 
+                               (const_int -1))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NOT.C  %0      ; cnot"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cnot_geu"
 
+       [(set (match_operand:SI 0 "register_operand" "=r")
 
+               (if_then_else:SI (geu (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0)
 
+                               (const_int -1))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "NOT.NC %0      ; cnot"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+; Conditional and instructions
 
+;
 
+;
 
+;
 
+;
 
+(define_insn "cand_eq"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (eq (reg:CC CC_REG) (const_int 0))
 
+                       (and:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "AND.Z  %1,%0   ; cand"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cand_ne"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ne (reg:CC CC_REG) (const_int 0))
 
+                       (and:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "AND.NZ %1,%0   ; cand"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cand_lt"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (lt (reg:CC CC_REG) (const_int 0))
 
+                       (and:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "AND.LT %1,%0   ; cand"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cand_ge"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ge (reg:CC CC_REG) (const_int 0))
 
+                       (and:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "AND.GE %1,%0   ; cand"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cand_ltu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (and:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "AND.C  %1,%0   ; cand"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cand_geu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (geu (reg:CC CC_REG) (const_int 0))
 
+                       (and:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "AND.NC %1,%0   ; cand"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+; Conditional ior instructions
 
+;
 
+;
 
+;
 
+;
 
+(define_insn "cior_eq"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (eq (reg:CC CC_REG) (const_int 0))
 
+                       (ior:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "OR.Z   %1,%0   ; cior"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cior_ne"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ne (reg:CC CC_REG) (const_int 0))
 
+                       (ior:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "OR.NZ  %1,%0   ; cior"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cior_lt"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (lt (reg:CC CC_REG) (const_int 0))
 
+                       (ior:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "OR.LT  %1,%0   ; cior"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cior_ge"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ge (reg:CC CC_REG) (const_int 0))
 
+                       (ior:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "OR.GE  %1,%0   ; cior"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cior_ltu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (ior:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "OR.C   %1,%0   ; cior"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cior_geu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (geu (reg:CC CC_REG) (const_int 0))
 
+                       (ior:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "OR.NC  %1,%0   ; cior"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+; Conditional xor instructions
 
+;
 
+;
 
+;
 
+;
 
+(define_insn "cxor_eq"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (eq (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "XOR.Z  %1,%0   ; cxor"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cxor_ne"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ne (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "XOR.NZ %1,%0   ; cxor"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cxor_lt"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (lt (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "XOR.LT %1,%0   ; cxor"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cxor_ge"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ge (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "XOR.GE %1,%0   ; cxor"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cxor_ltu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "XOR.C  %1,%0   ; cxor"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
+;
 
+;
 
+(define_insn "cxor_geu"
 
+       [(set (match_operand:SI 0 "register_operand" "+r")
 
+               (if_then_else:SI (geu (reg:CC CC_REG) (const_int 0))
 
+                       (xor:SI (match_dup 0) (match_operand:SI 1 "zip_opb_single_operand_p" "rO"))
 
+                       (match_dup 0)))]
 
+       ""
 
+       "XOR.NC %1,%0   ; cxor"
 
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-peephole.md gcc-6.2.0-zip/gcc/config/zip/zip-peephole.md
 
--- gcc-6.2.0/gcc/config/zip/zip-peephole.md    1969-12-31 19:00:00.000000000 -0500
 
+++ gcc-6.2.0-zip/gcc/config/zip/zip-peephole.md        2017-03-01 15:46:02.440221158 -0500
 
@@ -0,0 +1,768 @@
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;; Filename:   zip-peephole.md
 
+;;
 
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
 
+;;
 
+;; Purpose:    This is a machine description of a variety of peephole
 
+;;             optimizations which can be applied to the ZipCPU RTL
 
+;;     representation.
 
+;;
 
+;;
 
+;; Creator:    Dan Gisselquist, Ph.D.
 
+;;             Gisselquist Technology, LLC
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
 
+;;
 
+;; This program is free software (firmware): you can redistribute it and/or
 
+;; modify it under the terms of  the GNU General Public License as published
 
+;; by the Free Software Foundation, either version 3 of the License, or (at
 
+;; your option) any later version.
 
+;;
 
+;; This program is distributed in the hope that it will be useful, but WITHOUT
 
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
 
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 
+;; for more details.
 
+;;
 
+;; License:    GPL, v3, as defined and found on www.gnu.org,
 
+;;             http://www.gnu.org/licenses/gpl.html
 
+;;
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;
 
+;
 
+;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;;
 
+;; Peephole optimizations
 
+;;
 
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
+;
 
+;
 
+;
 
+;
 
+;
 
+; Match:
 
+;      CMP     R1,R0
 
+;      BGTU    lbl
 
+; Transform to:
 
+;      CMP     R0,R1
 
+;      BC      lbl
 
+;
 
+(define_peephole2
 
+       [(set (reg:CC CC_REG) (compare:CC
 
+               (match_operand:SI 0 "register_operand")
 
+               (match_operand:SI 1 "register_operand")))
 
+       (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_operand 2 ""))
 
+                       (pc)))]
 
+       "(ZIP_PEEPHOLE)"
 
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 1) (match_dup 0)))
 
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_dup 2))
 
+                       (pc)))]
 
+       "")
 
+(define_peephole2
 
+       [(match_scratch:SI 3 "=r")
 
+       (set (reg:CC CC_REG) (compare:CC
 
+               (match_operand:SI 0 "register_operand")
 
+               (match_operand 1 "const_int_operand")))
 
+       (match_dup 3)
 
+       (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_operand 2 ""))
 
+                       (pc)))]
 
+       "(ZIP_PEEPHOLE)"
 
+       [(set (match_dup 3) (match_dup 1))
 
+       (set (reg:CC CC_REG) (compare:CC (match_dup 3) (match_dup 0)))
 
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_dup 2))
 
+                       (pc)))]
 
+       "")
 
+;(define_peephole2
 
+;      [(set (reg:CC CC_REG) (compare:CC
 
+;              (match_operand:SI 0 "register_operand")
 
+;              (match_operand 1 "const_int_operand")))
 
+;      (set (pc) (if_then_else (gtu (reg:CC CC_REG) (const_int 0))
 
+;                      (label_ref (match_operand 2 ""))
 
+;                      (pc)))]
 
+;      ""
 
+;      [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
 
+;      (set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
 
+;                      (label_ref (match_dup 2))
 
+;                      (pc)))]
 
+;      "operands[1] = GEN_INT(INTVAL(operands[1])-1);")
 
+;
 
+;
 
+; Match:
 
+;      CMP     R1,R0
 
+;      BGEU    lbl
 
+; Transform to:
 
+;      CMP     1(R0),R1
 
+;      BC      lbl
 
+;
 
+(define_peephole2
 
+       [(set (reg:CC CC_REG) (compare:CC
 
+               (match_operand:SI 0 "register_operand")
 
+               (match_operand:SI 1 "register_operand")))
 
+       (set (pc) (if_then_else (geu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_operand 2 ""))
 
+                       (pc)))]
 
+       "(ZIP_PEEPHOLE)"
 
+       [(set (reg:CC CC_REG) (compare:CC
 
+               (match_dup 1) (plus:SI (match_dup 0) (const_int 1))))
 
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
 
+                       (label_ref (match_dup 2))
 
+                       (pc)))]
 
+       "")
 
+;
 
+;
 
+; Match:
 
+;      CMP     R1,R0
 
+;      BGE     lbl
 
+; Transform to:
 
+;      CMP     1(R0),R1
 
+;      BLT     lbl
 
+; ... why?  when we support a BGE instruction?
 
+;(define_peephole2
 
+       ;[(set (reg:CC CC_REG) (compare:CC
 
+               ;(match_operand:SI 0 "register_operand")
 
+               ;(match_operand:SI 1 "register_operand")))
 
+       ;(set (pc) (if_then_else (ge (reg:CC CC_REG) (const_int 0))
 
+                       ;(label_ref (match_operand 2 ""))
 
+                       ;(pc)))]
 
+       ;"(ZIP_PEEPHOLE)"
 
+       ;[(set (reg:CC CC_REG) (compare:CC (match_dup 1)
+                       ;(plus:SI (match_dup 0) (const_int 1))))
+                       ;(plus:SI (match_dup 0) (const_int 1))))
+       ;(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+       ;(set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+                       ;(label_ref (match_dup 2))
+                       ;(label_ref (match_dup 2))
+                       ;(pc)))]
+                       ;(pc)))]
+       ;"")
+       ;"")
+;
+;
+;
+;
+; Match:
+; Match:
+;      CMP     R1,R0
+;      CMP     R1,R0
+;      BLEU    lbl
+;      BLEU    lbl
+; Transform to:
+; Transform to:
+;      CMP     1(R1),R0
+;      CMP     1(R1),R0
+;      BC      lbl
+;      BC      lbl
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (reg:CC CC_REG) (compare:CC
+       [(set (reg:CC CC_REG) (compare:CC
+               (match_operand:SI 0 "register_operand" "")
+               (match_operand:SI 0 "register_operand" "")
+               (match_operand:SI 1 "register_operand" "")))
+               (match_operand:SI 1 "register_operand" "")))
+       (set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+       (set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+                       (label_ref (match_operand 2 "" ""))
+                       (label_ref (match_operand 2 "" ""))
+                       (pc)))]
+                       (pc)))]
+       "(ZIP_PEEPHOLE)"
+       "(ZIP_PEEPHOLE)"
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0)
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0)
+                       (plus:SI (match_dup 1) (const_int 1))))
+                       (plus:SI (match_dup 1) (const_int 1))))
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+                       (label_ref (match_dup 2))
+                       (label_ref (match_dup 2))
+                       (pc)))]
+                       (pc)))]
+       "")
+       "")
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      CMP     R1,R0
+;      CMP     R1,R0
+;      BLE     lbl
+;      BLE     lbl
+; Transform to:
+; Transform to:
+;      CMP     1(R1),R0
+;      CMP     1(R1),R0
+;      BLT     lbl
+;      BLT     lbl
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (reg:CC CC_REG)
+       [(set (reg:CC CC_REG)
+               (compare:CC (match_operand:SI 0 "register_operand" "")
+               (compare:CC (match_operand:SI 0 "register_operand" "")
+                       (match_operand:SI 1 "const_int_operand" "")))
+                       (match_operand:SI 1 "const_int_operand" "")))
+       (set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0))
+       (set (pc) (if_then_else (le (reg:CC CC_REG) (const_int 0))
+                       (label_ref (match_operand 2 "" ""))
+                       (label_ref (match_operand 2 "" ""))
+                       (pc)))]
+                       (pc)))]
+       "(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))"
+       "(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))"
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
+       (set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+       (set (pc) (if_then_else (lt (reg:CC CC_REG) (const_int 0))
+                       (label_ref (match_dup 2))
+                       (label_ref (match_dup 2))
+                       (pc)))]
+                       (pc)))]
+       "operands[1] = GEN_INT(INTVAL(operands[1])+1);")
+       "operands[1] = GEN_INT(INTVAL(operands[1])+1);")
+;
+;
+; Match:
+; Match:
+;      CMP     R1,R0
+;      CMP     R1,R0
+;      BLEU    lbl
+;      BLEU    lbl
+; Transform to:
+; Transform to:
+;      CMP     1(R1),R0
+;      CMP     1(R1),R0
+;      BC(LTU) lbl
+;      BC(LTU) lbl
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (reg:CC CC_REG)
+       [(set (reg:CC CC_REG)
+               (compare:CC (match_operand:SI 0 "register_operand" "")
+               (compare:CC (match_operand:SI 0 "register_operand" "")
+                       (match_operand:SI 1 "const_int_operand" "")))
+                       (match_operand:SI 1 "const_int_operand" "")))
+       (set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+       (set (pc) (if_then_else (leu (reg:CC CC_REG) (const_int 0))
+                       (label_ref (match_operand 2 "" ""))
+                       (label_ref (match_operand 2 "" ""))
+                       (pc)))]
+                       (pc)))]
+       "(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))"
+       "(ZIP_PEEPHOLE)&&(INTVAL(operands[1])<((1<<17)-2))"
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0) (match_dup 1)))
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+       (set (pc) (if_then_else (ltu (reg:CC CC_REG) (const_int 0))
+                       (label_ref (match_dup 2))
+                       (label_ref (match_dup 2))
+                       (pc)))]
+                       (pc)))]
+       "operands[1] = GEN_INT(INTVAL(operands[1])+1);")
+       "operands[1] = GEN_INT(INTVAL(operands[1])+1);")
+;
+;
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      (parallel [(set () ()) (clobber (CC))])
+;      (parallel [(set () ()) (clobber (CC))])
+;      (compare () ())
+;      (compare () ())
+; Transform to:
+; Transform to:
+;      (parallel [(set () ()) (set (CC) (0))]
+;      (parallel [(set () ()) (set (CC) (0))]
+;      (compare () ())
+;      (compare () ())
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 ""))
+       [(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 ""))
+               (clobber (reg:CC CC_REG))])
+               (clobber (reg:CC CC_REG))])
+       (set (reg:CC CC_REG) (compare:CC (match_operand:SI 2 "")
+       (set (reg:CC CC_REG) (compare:CC (match_operand:SI 2 "")
+                       (match_operand:SI 3 "")))]
+                       (match_operand:SI 3 "")))]
+       "(ZIP_PEEPHOLE)&&zip_insn_sets_cc(insn)"
+       "(ZIP_PEEPHOLE)&&zip_insn_sets_cc(insn)"
+       [(parallel [(set (match_dup 0) (match_dup 1))
+       [(parallel [(set (match_dup 0) (match_dup 1))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (reg:CC CC_REG) (compare:CC (match_dup 2) (match_dup 3)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 2) (match_dup 3)))]
+       "")
+       "")
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      (parallel [(set () ()) (clobber (CC))])
+;      (parallel [(set () ()) (clobber (CC))])
+;      (set () ())
+;      (set () ())
+;      (compare () ())
+;      (compare () ())
+; Transform to:
+; Transform to:
+;      (parallel [(set () ()) (set (CC) (0))]
+;      (parallel [(set () ()) (set (CC) (0))]
+;      (set () ())
+;      (set () ())
+;      (compare () ())
+;      (compare () ())
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 ""))
+       [(parallel [(set (match_operand:SI 0 "") (match_operand:SI 1 ""))
+               (clobber (reg:CC CC_REG))])
+               (clobber (reg:CC CC_REG))])
+       (set (match_operand 2 "") (match_operand 3 ""))
+       (set (match_operand 2 "") (match_operand 3 ""))
+       (set (reg:CC CC_REG) (compare:CC (match_operand:SI 4 "")
+       (set (reg:CC CC_REG) (compare:CC (match_operand:SI 4 "")
+                       (match_operand:SI 5 "")))]
+                       (match_operand:SI 5 "")))]
+       "(ZIP_PEEPHOLE)&&(zip_insn_sets_cc(insn))&&((!REG_P(operands[2]))||(REGNO(operands[2])!=CC_REG))"
+       "(ZIP_PEEPHOLE)&&(zip_insn_sets_cc(insn))&&((!REG_P(operands[2]))||(REGNO(operands[2])!=CC_REG))"
+       [(parallel [(set (match_dup 0) (match_dup 1))
+       [(parallel [(set (match_dup 0) (match_dup 1))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_dup 2) (match_dup 3))
+       (set (match_dup 2) (match_dup 3))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 4) (match_dup 5)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 4) (match_dup 5)))]
+       "")
+       "")
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      MOV A(R1),R3
+;      MOV A(R1),R3
+;      CMP R3,R0
+;      CMP R3,R0
+;      (R3 is dead)
+;      (R3 is dead)
+; Transform to:
+; Transform to:
+;      CMP A(R1),R0
+;      CMP A(R1),R0
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:SI 3 "register_operand")
+       [(set (match_operand:SI 3 "register_operand")
+               (plus:SI (match_operand:SI 1 "register_operand")
+               (plus:SI (match_operand:SI 1 "register_operand")
+                       (match_operand:SI 2 "zip_mvimm_operand_p")))
+                       (match_operand:SI 2 "zip_mvimm_operand_p")))
+       (set (reg:CC CC_REG)
+       (set (reg:CC CC_REG)
+               (compare:CC (match_operand:SI 0 "register_operand")
+               (compare:CC (match_operand:SI 0 "register_operand")
+                       (match_dup 3)))]
+                       (match_dup 3)))]
+       "(ZIP_PEEPHOLE)&&peep2_regno_dead_p(2, REGNO(operands[3]))"
+       "(ZIP_PEEPHOLE)&&peep2_regno_dead_p(2, REGNO(operands[3]))"
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0)
+       [(set (reg:CC CC_REG) (compare:CC (match_dup 0)
+               (plus:SI (match_dup 1) (match_dup 2))))]
+               (plus:SI (match_dup 1) (match_dup 2))))]
+       "")
+       "")
+;
+;
+;
+;
+; Match:
+; Match:
+;      ALU OpB,R0
+;      ALU OpB,R0
+;      CMP 0,R0
+;      CMP 0,R0
+; Transform to:
+; Transform to:
+;      ALU OpB,R0
+;      ALU OpB,R0
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (match_operand:SI 1 ""))
+                       (match_operand:SI 1 ""))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       "(ZIP_PEEPHOLE)"
+       "(ZIP_PEEPHOLE)"
+       [(parallel [(set (match_dup 0) (match_dup 1))
+       [(parallel [(set (match_dup 0) (match_dup 1))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ])
+       ])
+;
+;
+;
+;
+; Match:
+; Match:
+;      ALU OpB,R0
+;      ALU OpB,R0
+;      MOV R1,R2       // Can be LDI, LOD, STO, etc.
+;      MOV R1,R2       // Can be LDI, LOD, STO, etc.
+;      CMP 0,R0
+;      CMP 0,R0
+; Transform to:
+; Transform to:
+;      ALU OpB,R0
+;      ALU OpB,R0
+;      MOV R0,R1
+;      MOV R0,R1
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (match_operand:SI 1 ""))
+                       (match_operand:SI 1 ""))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_operand:SI 2 "nonimmediate_operand") (match_operand:SI 3 ""))
+       (set (match_operand:SI 2 "nonimmediate_operand") (match_operand:SI 3 ""))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]
+       "(ZIP_PEEPHOLE)&&((!REG_P(operands[2]))||((REGNO(operands[2])!=REGNO(operands[0]))&&((REGNO(operands[2])>=FIRST_PSEUDO_REGISTER)||(REGNO(operands[2])<CC_REG))))"
+       "(ZIP_PEEPHOLE)&&((!REG_P(operands[2]))||((REGNO(operands[2])!=REGNO(operands[0]))&&((REGNO(operands[2])>=FIRST_PSEUDO_REGISTER)||(REGNO(operands[2])<CC_REG))))"
+       [(parallel [(set (match_dup 0) (match_dup 1))
+       [(parallel [(set (match_dup 0) (match_dup 1))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_dup 2) (match_dup 3))
+       (set (match_dup 2) (match_dup 3))
+       ])
+       ])
+;
+;
+;
+;
+; Match:
+; Match:
+;      ALU OpB,R0
+;      ALU OpB,R0
+;      MOV R0,R1
+;      MOV R0,R1
+;      CMP 0,R1
+;      CMP 0,R1
+; Transform to:
+; Transform to:
+;      ALU OpB,R0
+;      ALU OpB,R0
+;      MOV R0,R1
+;      MOV R0,R1
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (match_operand:SI 1 ""))
+                       (match_operand:SI 1 ""))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_operand:SI 2 "register_operand") (match_dup 0))
+       (set (match_operand:SI 2 "register_operand") (match_dup 0))
+       (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))]
+       (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))]
+       "(ZIP_PEEPHOLE)"
+       "(ZIP_PEEPHOLE)"
+       [(parallel [(set (match_dup 0) (match_dup 1))
+       [(parallel [(set (match_dup 0) (match_dup 1))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_dup 2) (match_dup 3))
+       (set (match_dup 2) (match_dup 3))
+       ])
+       ])
+;
+;
+;
+;
+; Match:
+; Match:
+;      MOV R1,R0
+;      MOV R1,R0
+;      ADD $x,R0
+;      ADD $x,R0
+;      (CCREG is dead, and x is within range ...)
+;      (CCREG is dead, and x is within range ...)
+; Transform to:
+; Transform to:
+;      MOV $x(R1),R0
+;      MOV $x(R1),R0
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:SI 0 "register_operand")
+       [(set (match_operand:SI 0 "register_operand")
+               (match_operand:SI 1 "register_operand"))
+               (match_operand:SI 1 "register_operand"))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0)
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0)
+                               (match_operand 2 "zip_mvimm_operand_p")))
+                               (match_operand 2 "zip_mvimm_operand_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))"
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))"
+       [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
+       [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
+;
+;
+; Match:
+; Match:
+;      MOV A(R0),R0
+;      MOV A(R0),R0
+;      ADD $x,R1
+;      ADD $x,R1
+;      (CCREG is dead, and (A+x) is within range ...)
+;      (CCREG is dead, and (A+x) is within range ...)
+; Transform to:
+; Transform to:
+;      MOV $x(R1),R0
+;      MOV $x(R1),R0
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:SI 0 "register_operand")
+       [(set (match_operand:SI 0 "register_operand")
+               (plus:SI (match_operand:SI 1 "register_operand")
+               (plus:SI (match_operand:SI 1 "register_operand")
+                       (match_operand 2 "zip_mvimm_operand_p")))
+                       (match_operand 2 "zip_mvimm_operand_p")))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0)
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0)
+                               (match_operand 3 "zip_mvimm_operand_p")))
+                               (match_operand 3 "zip_mvimm_operand_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))
+               &&(INTVAL(operands[2])+INTVAL(operands[3])<((1<<17)))
+               &&(INTVAL(operands[2])+INTVAL(operands[3])<((1<<17)))
+               &&(INTVAL(operands[2])+INTVAL(operands[3])>=-(1<<17))"
+               &&(INTVAL(operands[2])+INTVAL(operands[3])>=-(1<<17))"
+       [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))]
+       [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))]
+       "operands[2]=GEN_INT(INTVAL(operands[2])+INTVAL(operands[3]));")
+       "operands[2]=GEN_INT(INTVAL(operands[2])+INTVAL(operands[3]));")
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      ADD $x,R0
+;      ADD $x,R0
+;      MOV R0,R1
+;      MOV R0,R1
+;      (CCREG is dead, and R0 is dead)
+;      (CCREG is dead, and R0 is dead)
+; Transform to:
+; Transform to:
+;      MOV (A+$x)(R0),R1
+;      MOV (A+$x)(R0),R1
+; ... again, how do I build this plus?
+; ... again, how do I build this plus?
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (plus:SI (match_dup 0)
+                       (plus:SI (match_dup 0)
+                               (match_operand 1 "zip_mvimm_operand_p")))
+                               (match_operand 1 "zip_mvimm_operand_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_operand:SI 2 "register_operand") (match_dup 0))]
+       (set (match_operand:SI 2 "register_operand") (match_dup 0))]
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2, REGNO(operands[0])))&&(peep2_regno_dead_p(2,CC_REG))"
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2, REGNO(operands[0])))&&(peep2_regno_dead_p(2,CC_REG))"
+       [(set (match_dup 2) (plus:SI (match_dup 0) (match_dup 1)))])
+       [(set (match_dup 2) (plus:SI (match_dup 0) (match_dup 1)))])
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      ADD $x,R0
+;      ADD $x,R0
+;      MOV A(R0),R1
+;      MOV A(R0),R1
+;      (CCREG is dead, and R0 is dead)
+;      (CCREG is dead, and R0 is dead)
+; Transform to:
+; Transform to:
+;      MOV (A+$x)(R0),R1
+;      MOV (A+$x)(R0),R1
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [
+       [(parallel [
+               (set (match_operand:SI 0 "register_operand")
+               (set (match_operand:SI 0 "register_operand")
+                       (plus:SI (match_dup 0)
+                       (plus:SI (match_dup 0)
+                               (match_operand 1 "zip_mvimm_operand_p")))
+                               (match_operand 1 "zip_mvimm_operand_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_operand:SI 2 "register_operand")
+       (set (match_operand:SI 2 "register_operand")
+               (plus:SI (match_dup 0)
+               (plus:SI (match_dup 0)
+                       (match_operand 3 "zip_mvimm_operand_p")))
+                       (match_operand 3 "zip_mvimm_operand_p")))
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))
+       "(ZIP_PEEPHOLE)&&(peep2_regno_dead_p(2,CC_REG))
+               &&(peep2_regno_dead_p(1,REGNO(operands[0])))
+               &&(peep2_regno_dead_p(1,REGNO(operands[0])))
+               &&(INTVAL(operands[1])+INTVAL(operands[3])<((1<<17)))
+               &&(INTVAL(operands[1])+INTVAL(operands[3])<((1<<17)))
+               &&(INTVAL(operands[1])+INTVAL(operands[3])>=-(1<<17))"
+               &&(INTVAL(operands[1])+INTVAL(operands[3])>=-(1<<17))"
+       [(set (match_dup 0) (plus:SI (match_dup 2) (match_dup 3)))]
+       [(set (match_dup 0) (plus:SI (match_dup 2) (match_dup 3)))]
+       "operands[3]=GEN_INT(INTVAL(operands[1])+INTVAL(operands[3]));")
+       "operands[3]=GEN_INT(INTVAL(operands[1])+INTVAL(operands[3]));")
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      ADD     $x,R0
+;      ADD     $x,R0
+;      ADD     R0,Rn
+;      ADD     R0,Rn
+;      (R0 is dead, if R0 is not Rn)
+;      (R0 is dead, if R0 is not Rn)
+; Transform to:
+; Transform to:
+;      ADD     $x(R0),Rn
+;      ADD     $x(R0),Rn
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (plus:SI (match_dup 0)
+                       (plus:SI (match_dup 0)
+                               (match_operand 1 "zip_opb_immv_p")))
+                               (match_operand 1 "zip_opb_immv_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (parallel [(set (match_operand:SI 2 "register_operand")
+       (parallel [(set (match_operand:SI 2 "register_operand")
+                       (plus:SI (match_dup 2) (match_dup 0)))
+                       (plus:SI (match_dup 2) (match_dup 0)))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))])
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[2]))&&(peep2_regno_dead_p(2, REGNO(operands[0])))"
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[2]))&&(peep2_regno_dead_p(2, REGNO(operands[0])))"
+       [(parallel [(set (match_dup 2)
+       [(parallel [(set (match_dup 2)
+                       (plus:SI (match_dup 2)
+                       (plus:SI (match_dup 2)
+                               (plus:SI (match_dup 0)
+                               (plus:SI (match_dup 0)
+                                       (match_dup 1))))
+                                       (match_dup 1))))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 2) (const_int 0)))])
+       ])
+       ])
+;
+;
+; Match:
+; Match:
+;      ADD     $x,R0
+;      ADD     $x,R0
+;      LOD     -x(R0),R1
+;      LOD     -x(R0),R1
+; Transform to:
+; Transform to:
+;      LOD     (R0),R1
+;      LOD     (R0),R1
+;      ADD     $x,R0
+;      ADD     $x,R0
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (plus:SI (match_dup 0)
+                       (plus:SI (match_dup 0)
+                               (match_operand 1 "zip_opb_immv_p")))
+                               (match_operand 1 "zip_opb_immv_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (match_operand:SI 3 "register_operand")
+       (set (match_operand:SI 3 "register_operand")
+               (mem:SI (plus:SI (match_dup 0)
+               (mem:SI (plus:SI (match_dup 0)
+                       (match_operand 2 "zip_opb_immv_p"))))
+                       (match_operand 2 "zip_opb_immv_p"))))
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       [(set (match_dup 3) (mem:SI (match_dup 0)))
+       [(set (match_dup 3) (mem:SI (match_dup 0)))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ])
+       ])
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (plus:SI (match_dup 0)
+                       (plus:SI (match_dup 0)
+                               (match_operand 1 "zip_opb_immv_p")))
+                               (match_operand 1 "zip_opb_immv_p")))
+               (clobber (reg:CC CC_REG))])
+               (clobber (reg:CC CC_REG))])
+       (set (match_operand:SI 3 "register_operand")
+       (set (match_operand:SI 3 "register_operand")
+               (mem:SI (plus:SI (match_dup 0)
+               (mem:SI (plus:SI (match_dup 0)
+                       (match_operand 2 "zip_opb_immv_p"))))
+                       (match_operand 2 "zip_opb_immv_p"))))
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       [(set (match_dup 3) (mem:SI (match_dup 0)))
+       [(set (match_dup 3) (mem:SI (match_dup 0)))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ])
+       ])
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      ADD     $x,R0
+;      ADD     $x,R0
+;      STO     R1,-x(R0)
+;      STO     R1,-x(R0)
+; Transform to:
+; Transform to:
+;      STO     R1,(R0)
+;      STO     R1,(R0)
+;      ADD     $x,R0
+;      ADD     $x,R0
+;
+;
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (plus:SI (match_dup 0)
+                       (plus:SI (match_dup 0)
+                               (match_operand 1 "zip_opb_immv_p")))
+                               (match_operand 1 "zip_opb_immv_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       (set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p")))
+       (set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p")))
+               (match_operand:SI 3 "register_operand"))
+               (match_operand:SI 3 "register_operand"))
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       [(set (mem:SI (match_dup 0)) (match_dup 3))
+       [(set (mem:SI (match_dup 0)) (match_dup 3))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ])
+       ])
+(define_peephole2
+(define_peephole2
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+       [(parallel [(set (match_operand:SI 0 "register_operand")
+                       (plus:SI (match_dup 0)
+                       (plus:SI (match_dup 0)
+                               (match_operand 1 "zip_opb_immv_p")))
+                               (match_operand 1 "zip_opb_immv_p")))
+               (clobber (reg:CC CC_REG))])
+               (clobber (reg:CC CC_REG))])
+       (set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p")))
+       (set (mem:SI (plus:SI (match_dup 0) (match_operand 2 "zip_opb_immv_p")))
+               (match_operand:SI 3 "register_operand"))
+               (match_operand:SI 3 "register_operand"))
+       ]
+       ]
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       "(ZIP_PEEPHOLE)&&(REGNO(operands[0])!=REGNO(operands[1]))&&(INTVAL(operands[1])==-INTVAL(operands[2]))"
+       [(set (mem:SI (match_dup 0)) (match_dup 3))
+       [(set (mem:SI (match_dup 0)) (match_dup 3))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+       (parallel [(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ])
+       ])
+;
+;
+;
+;
+; Match:
+; Match:
+;      ADD     $x,R0
+;      ADD     $x,R0
+;      ANY     R1,R2 (destination is not R0, source does not reference R0)
+;      ANY     R1,R2 (destination is not R0, source does not reference R0)
+;      ADD     R0,Rn (could be 1 or 2, not 0)
+;      ADD     R0,Rn (could be 1 or 2, not 0)
+;      (R0 is dead)
+;      (R0 is dead)
+; Transform to:
+; Transform to:
+;      ANY     R1,R2
+;      ANY     R1,R2
+;      ADD     $x(R0),Rn
+;      ADD     $x(R0),Rn
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      MOV     R1,R0
+;      MOV     R1,R0
+;      AND     #/R2,R0
+;      AND     #/R2,R0
+;      (Ry dead ...)
+;      (Ry dead ...)
+; Transform to:
+; Transform to:
+;      TEST    #/Rz,Rx
+;      TEST    #/Rz,Rx
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:SI 0 "register_operand")
+       [(set (match_operand:SI 0 "register_operand")
+               (match_operand:SI 1 "register_operand"))
+               (match_operand:SI 1 "register_operand"))
+       (parallel [(set (match_operand:SI 3 "register_operand")
+       (parallel [(set (match_operand:SI 3 "register_operand")
+                       (and:SI (match_dup 0)
+                       (and:SI (match_dup 0)
+                               (match_operand:SI 2 "zip_opb_single_operand_p")))
+                               (match_operand:SI 2 "zip_opb_single_operand_p")))
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+               (set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))])
+       ]
+       ]
+       "((1)||(ZIP_PEEPHOLE))&&(peep2_regno_dead_p(2, REGNO(operands[0])))&&(peep2_regno_dead_p(2, REGNO(operands[3])))"
+       "((1)||(ZIP_PEEPHOLE))&&(peep2_regno_dead_p(2, REGNO(operands[0])))&&(peep2_regno_dead_p(2, REGNO(operands[3])))"
+       [(set (reg:CC CC_REG) (compare:CC (and:SI (match_dup 1) (match_dup 2))
+       [(set (reg:CC CC_REG) (compare:CC (and:SI (match_dup 1) (match_dup 2))
+                       (const_int 0)))])
+                       (const_int 0)))])
+;
+;
+;
+;
+; Match:
+; Match:
+;      LB OpB,Rx
+;      LB OpB,Rx
+;      AND 255,Rx      (in form of zero_extend)
+;      AND 255,Rx      (in form of zero_extend)
+; Transform to:
+; Transform to:
+;      LB OpB,Rx
+;      LB OpB,Rx
+;
+;
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:QI 0 "register_operand")
+       [(set (match_operand:QI 0 "register_operand")
+               (match_operand:QI 1 "memory_operand"))
+               (match_operand:QI 1 "memory_operand"))
+       (parallel [(set (match_operand:SI 2 "register_operand")
+       (parallel [(set (match_operand:SI 2 "register_operand")
+                       (zero_extend:SI (match_dup 0)))
+                       (zero_extend:SI (match_dup 0)))
+               (clobber (reg:CC CC_REG))])]
+               (clobber (reg:CC CC_REG))])]
+       "((1)||(ZIP_PEEPHOLE))"
+       "((1)||(ZIP_PEEPHOLE))"
+       [(parallel [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+       [(parallel [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+               (clobber (reg:CC CC_REG))])])
+               (clobber (reg:CC CC_REG))])])
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      LW OpB,Rx
+;      LW OpB,Rx
+;      AND 65535,Rx
+;      AND 65535,Rx
+; Transform to:
+; Transform to:
+;      LW OpB,Rx
+;      LW OpB,Rx
+;
+;
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:HI 0 "register_operand")
+       [(set (match_operand:HI 0 "register_operand")
+               (match_operand:HI 1 "memory_operand"))
+               (match_operand:HI 1 "memory_operand"))
+       (parallel [(set (match_operand:SI 2 "register_operand")
+       (parallel [(set (match_operand:SI 2 "register_operand")
+                       (zero_extend:SI (match_dup 0)))
+                       (zero_extend:SI (match_dup 0)))
+               (clobber (reg:CC CC_REG))])]
+               (clobber (reg:CC CC_REG))])]
+       "((1)||(ZIP_PEEPHOLE))
+       "((1)||(ZIP_PEEPHOLE))
+               &&(REG_P(operands[0]))
+               &&(REG_P(operands[0]))
+               &&(REG_P(operands[2]))
+               &&(REG_P(operands[2]))
+               &&(REGNO(operands[0])==REGNO(operands[2]))"
+               &&(REGNO(operands[0])==REGNO(operands[2]))"
+       [(parallel [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+       [(parallel [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+               (clobber (reg:CC CC_REG))])])
+               (clobber (reg:CC CC_REG))])])
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      LDI 0,Rx
+;      LDI 0,Rx
+;      LDI.y #,Rx
+;      LDI.y #,Rx
+;      Add Rx,Ry
+;      Add Rx,Ry
+; Transform to:
+; Transform to:
+;      Add.y #,Ry
+;      Add.y #,Ry
+;
+;
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:SI 0 "register_operand") (const_int 0))
+       [(set (match_operand:SI 0 "register_operand") (const_int 0))
+       (set (match_dup 0)
+       (set (match_dup 0)
+               (if_then_else:SI
+               (if_then_else:SI
+                       (match_operator 1 "ordered_comparison_operator"
+                       (match_operator 1 "ordered_comparison_operator"
+                               [(reg:CC CC_REG) (const_int 0)])
+                               [(reg:CC CC_REG) (const_int 0)])
+                       (match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+                       (match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+       (parallel [
+       (parallel [
+               (set (match_operand:SI 3 "register_operand")
+               (set (match_operand:SI 3 "register_operand")
+                       (plus:SI (match_dup 3) (match_dup 0)))
+                       (plus:SI (match_dup 3) (match_dup 0)))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               ])]
+               ])]
+       "((1)||(ZIP_PEEPHOLE))
+       "((1)||(ZIP_PEEPHOLE))
+               &&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+               &&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+       [(set (match_dup 3)
+       [(set (match_dup 3)
+               (if_then_else:SI
+               (if_then_else:SI
+                       (match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+                       (match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+                       (plus:SI (match_dup 3) (match_dup 2))
+                       (plus:SI (match_dup 3) (match_dup 2))
+                       (match_dup 3)))])
+                       (match_dup 3)))])
+;
+;
+;
+;
+; Match:
+; Match:
+;      LDI     0,Rx
+;      LDI     0,Rx
+;      LDI.y   #,Rx
+;      LDI.y   #,Rx
+;      XOR     Rx,Rc
+;      XOR     Rx,Rc
+; Transform to:
+; Transform to:
+;      XOR.y #,Ry
+;      XOR.y #,Ry
+;
+;
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:SI 0 "register_operand") (const_int 0))
+       [(set (match_operand:SI 0 "register_operand") (const_int 0))
+       (set (match_dup 0)
+       (set (match_dup 0)
+               (if_then_else:SI
+               (if_then_else:SI
+                       (match_operator 1 "ordered_comparison_operator"
+                       (match_operator 1 "ordered_comparison_operator"
+                               [(reg:CC CC_REG) (const_int 0)])
+                               [(reg:CC CC_REG) (const_int 0)])
+                       (match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+                       (match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+       (parallel [
+       (parallel [
+               (set (match_operand:SI 3 "register_operand")
+               (set (match_operand:SI 3 "register_operand")
+                       (xor:SI (match_dup 3) (match_dup 0)))
+                       (xor:SI (match_dup 3) (match_dup 0)))
+               (clobber (reg:CC CC_REG))
+               (clobber (reg:CC CC_REG))
+               ])]
+               ])]
+       "((1)||(ZIP_PEEPHOLE))
+       "((1)||(ZIP_PEEPHOLE))
+               &&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+               &&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+       [(set (match_dup 3)
+       [(set (match_dup 3)
+               (if_then_else:SI
+               (if_then_else:SI
+                       (match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+                       (match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+                       (xor:SI (match_dup 3) (match_dup 2))
+                       (xor:SI (match_dup 3) (match_dup 2))
+                       (match_dup 3)))])
+                       (match_dup 3)))])
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      LDI     0,Rx
+;      LDI     0,Rx
+;      LDI.y   #,Rx
+;      LDI.y   #,Rx
+;      OR      Rx,Rc
+;      OR      Rx,Rc
+; Transform to:
+; Transform to:
+;      OR.y #,Ry
+;      OR.y #,Ry
+;
+;
+;
+;
+;
+;
+(define_peephole2
+(define_peephole2
+       [(set (match_operand:SI 0 "register_operand") (const_int 0))
+       [(set (match_operand:SI 0 "register_operand") (const_int 0))
+       (set (match_dup 0)
+       (set (match_dup 0)
+               (if_then_else:SI (match_operator 1 "ordered_comparison_operator"
+               (if_then_else:SI (match_operator 1 "ordered_comparison_operator"
+                               [(reg:CC CC_REG) (const_int 0)])
+                               [(reg:CC CC_REG) (const_int 0)])
+                       (match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+                       (match_operand:SI 2 "zip_opb_single_operand_p") (match_dup 0)))
+       (parallel [(set (match_operand:SI 3 "register_operand")
+       (parallel [(set (match_operand:SI 3 "register_operand")
+                       (ior:SI (match_dup 3) (match_dup 0)))
+                       (ior:SI (match_dup 3) (match_dup 0)))
+               (clobber (reg:CC CC_REG))])]
+               (clobber (reg:CC CC_REG))])]
+       "((1)||(ZIP_PEEPHOLE))
+       "((1)||(ZIP_PEEPHOLE))
+               &&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+               &&(peep2_regno_dead_p(3, REGNO(operands[0])))"
+       [(set (match_dup 3)
+       [(set (match_dup 3)
+               (if_then_else:SI
+               (if_then_else:SI
+                       (match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+                       (match_op_dup 1 [(reg:CC CC_REG) (const_int 0)])
+                       (ior:SI (match_dup 3) (match_dup 2))
+                       (ior:SI (match_dup 3) (match_dup 2))
+                       (match_dup 3)))])
+                       (match_dup 3)))])
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      AND 255,Rx
+;      AND 255,Rx
+;      SB OpB,Rx
+;      SB OpB,Rx
+;      (AND Rx is DEAD)
+;      (AND Rx is DEAD)
+; Transform to:
+; Transform to:
+;      SB OpB,Rx
+;      SB OpB,Rx
+;
+;
+;
+;
+;(define_peephole2
+;(define_peephole2
+       ;[(set (match_operand:SI 2 "register_operand")
+       ;[(set (match_operand:SI 2 "register_operand")
+               ;(zero_extend:SI (match_operand:SI 0)))
+               ;(zero_extend:SI (match_operand:SI 0)))
+       ;[(set (match_operand:QI 0 "memory_operand")
+       ;[(set (match_operand:QI 0 "memory_operand")
+               ;(match_operand:QI 1 "memory_operand"))
+               ;(match_operand:QI 1 "memory_operand"))
+       ;"((1)||(ZIP_PEEPHOLE))"
+       ;"((1)||(ZIP_PEEPHOLE))"
+       ;[(set (match_dup 2) (zero_extend:SI (match_dup 1)))])
+       ;[(set (match_dup 2) (zero_extend:SI (match_dup 1)))])
+;
+;
+;
+;
+;
+;
+;
+;
+;
+;
+; Match:
+; Match:
+;      (call ...
+;      (call ...
+;      (set (pc) (label))
+;      (set (pc) (label))
+;  or (in asm)
+;  or (in asm)
+;      MOV     .Lcallx(PC),R0
+;      MOV     .Lcallx(PC),R0
+;      BRA     (somewhere)
+;      BRA     (somewhere)
+; .Lcallx
+; .Lcallx
+;      BRA     (somewhere-else)
+;      BRA     (somewhere-else)
+; Transform to:
+; Transform to:
+;
+;
+;      (sequence [(call ...
+;      (sequence [(call ...
+;              (set (pc) (label))])
+;              (set (pc) (label))])
+;   or (in asm)
+;   or (in asm)
+;      "LDI    (somewhere-else),R0
+;      "LDI    (somewhere-else),R0
+;      BRA     subroutine"
+;      BRA     subroutine"
+;
+;
+; While the following looks good, it doesnt work.  My guess is that the reason
+; While the following looks good, it doesnt work.  My guess is that the reason
+; why it doesnt work is that the jump at the end crosses basic block boundaries.
+; why it doesnt work is that the jump at the end crosses basic block boundaries.
+;
+;
+;(define_insn "void_call_mem_unspec"
+;(define_insn "void_call_mem_unspec"
+;      [(call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p" ""))] UNSPEC_RAW_CALL)
+;      [(call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p" ""))] UNSPEC_RAW_CALL)
+;                      (match_operand 1 "const_int_operand" "n"))
+;                      (match_operand 1 "const_int_operand" "n"))
+;              (clobber (reg:SI RTN_REG))
+;              (clobber (reg:SI RTN_REG))
+;              (clobber (reg:CC CC_REG))]
+;              (clobber (reg:CC CC_REG))]
+;      ""
+;      ""
+;      "BRA\t%0,PC"
+;      "BRA\t%0,PC"
+;      [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;      [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;(define_peephole2
+;(define_peephole2
+;      [(parallel [(call (mem:SI (match_operand:VOID 0 "zip_const_address_operand_p"))
+;      [(parallel [(call (mem:SI (match_operand:VOID 0 "zip_const_address_operand_p"))
+;                      (match_operand 1 "const_int_operand"))
+;                      (match_operand 1 "const_int_operand"))
+;              (clobber (reg:SI RTN_REG))
+;              (clobber (reg:SI RTN_REG))
+;              (clobber (reg:CC CC_REG))])
+;              (clobber (reg:CC CC_REG))])
+;      ; The match operand for the (set (pc) ...) cannot have anything but
+;      ; The match operand for the (set (pc) ...) cannot have anything but
+;      ; VOIDmode, or it wont match.
+;      ; VOIDmode, or it wont match.
+;      (set (pc) (match_operand:VOID 2 "zip_const_address_operand_p"))]
+;      (set (pc) (match_operand:VOID 2 "zip_const_address_operand_p"))]
+;      ""
+;      ""
+;      [(set (reg:SI RTN_REG) (match_dup 2))
+;      [(set (reg:SI RTN_REG) (match_dup 2))
+;      (call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p"))] UNSPEC_RAW_CALL)
+;      (call (unspec:SI [(mem:SI (match_operand:VOID 0 "zip_const_address_operand_p"))] UNSPEC_RAW_CALL)
+;                      (match_operand 1 "const_int_operand"))
+;                      (match_operand 1 "const_int_operand"))
+;              (use (reg:SI RTN_REG))
+;              (use (reg:SI RTN_REG))
+;              (clobber (reg:SI RTN_REG))
+;              (clobber (reg:SI RTN_REG))
+;              (clobber (reg:CC CC_REG))]
+;              (clobber (reg:CC CC_REG))]
+;      "fprintf(stderr, \"CALL-JUMP Matched\");")
+;      "fprintf(stderr, \"CALL-JUMP Matched\");")
+;
+;
+;
+;
+;
+;
+; So, the following *should* have worked as well.  However, this falls apart
+; So, the following *should* have worked as well.  However, this falls apart
+; because the 'final' routine can't tell if we are calling a subroutine in this
+; because the 'final' routine can't tell if we are calling a subroutine in this
+; function or not.
+; function or not.
+;
+;
+;(define_peephole
+;(define_peephole
+       ;[(parallel [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p"))
+       ;[(parallel [(call (mem:SI (match_operand:SI 0 "zip_const_address_operand_p"))
+                       ;(match_operand 1 "const_int_operand"))
+                       ;(match_operand 1 "const_int_operand"))
+               ;(clobber (reg:SI RTN_REG))
+               ;(clobber (reg:SI RTN_REG))
+               ;(clobber (reg:CC CC_REG))])
+               ;(clobber (reg:CC CC_REG))])
+       ;(set (pc) (label_ref (match_operand 2 "")))]
+       ;(set (pc) (label_ref (match_operand 2 "")))]
+       ;""
+       ;""
+       ;"LDI\t%2,R0\;BRA\t%0"
+       ;"LDI\t%2,R0\;BRA\t%0"
+       ;[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+       ;[(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; and for
+; and for
+;      BRA target
+;      BRA target
+;      BRA target ; two branches to the same identical target in a row ...
+;      BRA target ; two branches to the same identical target in a row ...
+;
+;
+;
+;
+;
+;
+; STILL MISSING:
+; STILL MISSING:
+;
+;
+;
+;
+;
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-protos.h gcc-6.2.0-zip/gcc/config/zip/zip-protos.h
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-protos.h gcc-6.2.0-zip/gcc/config/zip/zip-protos.h
--- gcc-6.2.0/gcc/config/zip/zip-protos.h       1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip-protos.h       1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-protos.h   2017-02-17 16:45:53.264117439 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-protos.h   2017-02-17 16:45:53.264117439 -0500
@@ -0,0 +1,82 @@
@@ -0,0 +1,82 @@
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Filename:   zip-protos.h
+// Filename:   zip-protos.h
+//
+//
+// Project:    Zip CPU backend for the GNU Compiler Collection
+// Project:    Zip CPU backend for the GNU Compiler Collection
+//
+//
+// Purpose:
+// Purpose:
+//
+//
+// Creator:    Dan Gisselquist, Ph.D.
+// Creator:    Dan Gisselquist, Ph.D.
+//             Gisselquist Technology, LLC
+//             Gisselquist Technology, LLC
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+// Copyright (C) 2016, Gisselquist Technology, LLC
+// Copyright (C) 2016, Gisselquist Technology, LLC
+//
+//
+// This program is free software (firmware): you can redistribute it and/or
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of  the GNU General Public License as published
+// modify it under the terms of  the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+// your option) any later version.
+//
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+// for more details.
+//
+//
+// You should have received a copy of the GNU General Public License along
+// You should have received a copy of the GNU General Public License along
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// with this program.  (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.)  If not, see
+// target there if the PDF file isn't present.)  If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+// <http://www.gnu.org/licenses/> for a copy.
+//
+//
+// License:    GPL, v3, as defined and found on www.gnu.org,
+// License:    GPL, v3, as defined and found on www.gnu.org,
+//             http://www.gnu.org/licenses/gpl.html
+//             http://www.gnu.org/licenses/gpl.html
+//
+//
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+#ifndef        ZIP_PROTOS_H
+#ifndef        ZIP_PROTOS_H
+#define        ZIP_PROTOS_H
+#define        ZIP_PROTOS_H
+
+
+extern int     zip_supported_condition(int c);
+extern int     zip_supported_condition(int c);
+extern void    zip_expand_prologue(void);
+extern void    zip_expand_prologue(void);
+extern void    zip_expand_epilogue(void);
+extern void    zip_expand_epilogue(void);
+extern void    zip_sibcall_epilogue(void);
+extern void    zip_sibcall_epilogue(void);
+extern int     zip_expand_movdi(rtx,rtx);
+extern int     zip_expand_movdi(rtx,rtx);
+extern int     zip_expand_movsicc(rtx,rtx,rtx,rtx);
+extern int     zip_expand_movsicc(rtx,rtx,rtx,rtx);
+extern int     zip_initial_elimination_offset(int, int);
+extern int     zip_initial_elimination_offset(int, int);
+extern void    zip_print_operand(FILE *, rtx, int);
+extern void    zip_print_operand(FILE *, rtx, int);
+extern void    zip_print_operand_address(FILE *, rtx);
+extern void    zip_print_operand_address(FILE *, rtx);
+extern enum    reg_class       zip_reg_class(int);
+extern enum    reg_class       zip_reg_class(int);
+extern rtx     zip_return_addr_rtx(int, rtx);
+extern rtx     zip_return_addr_rtx(int, rtx);
+extern int     zip_num_arg_regs(enum machine_mode, tree);
+extern int     zip_num_arg_regs(enum machine_mode, tree);
+
+
+extern void    zip_asm_output_def(FILE *s, const char *n, const char *v);
+extern void    zip_asm_output_def(FILE *s, const char *n, const char *v);
+
+
+extern void    zip_canonicalize_comparison(int *, rtx *, rtx *, bool);
+extern void    zip_canonicalize_comparison(int *, rtx *, rtx *, bool);
+extern bool    zip_function_ok_for_sibcall(tree, tree);
+extern bool    zip_function_ok_for_sibcall(tree, tree);
+extern int     zip_address_operand(rtx op);
+extern int     zip_address_operand(rtx op);
+extern int     zip_const_address_operand(rtx op);
+extern int     zip_const_address_operand(rtx op);
+extern int     zip_use_return_insn(void);
+extern int     zip_use_return_insn(void);
+extern const char *zip_set_zero_or_one(rtx, rtx);
+extern const char *zip_set_zero_or_one(rtx, rtx);
+extern const char *zip_movsicc(rtx, rtx, rtx, rtx);
+extern const char *zip_movsicc(rtx, rtx, rtx, rtx);
+
+
+extern int     zip_insn_sets_cc(rtx_insn *insn);
+extern int     zip_insn_sets_cc(rtx_insn *insn);
+extern int     zip_is_conditional(rtx_insn *insn);
+extern int     zip_is_conditional(rtx_insn *insn);
+extern int     zip_ct_address_operand(rtx op);
+extern int     zip_ct_address_operand(rtx op);
+extern int     zip_pd_opb_operand(rtx op);
+extern int     zip_pd_opb_operand(rtx op);
+extern int     zip_pd_mov_operand(rtx op);
+extern int     zip_pd_mov_operand(rtx op);
+extern int     zip_pd_imm_operand(rtx op);
+extern int     zip_pd_imm_operand(rtx op);
+extern int     zip_pd_mvimm_operand(rtx op);
+extern int     zip_pd_mvimm_operand(rtx op);
+extern int     zip_ct_const_address_operand(rtx op);
+extern int     zip_ct_const_address_operand(rtx op);
+extern int     zip_pd_const_address_operand(rtx op);
+extern int     zip_pd_const_address_operand(rtx op);
+extern const char *zip_movsicc(rtx, rtx, rtx, rtx);
+extern const char *zip_movsicc(rtx, rtx, rtx, rtx);
+extern const char *zip_addqics(rtx, rtx, rtx, rtx);
+extern const char *zip_addqics(rtx, rtx, rtx, rtx);
+extern const char *zip_cbranchdi(rtx, rtx, rtx, rtx);
+extern const char *zip_cbranchdi(rtx, rtx, rtx, rtx);
+
+
+extern void    zip_ifcvt_machdep_init(struct ce_if_block *ceinfo);
+extern void    zip_ifcvt_machdep_init(struct ce_if_block *ceinfo);
+extern void    zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo);
+extern void    zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo);
+extern void    zip_ifcvt_modify_final(struct ce_if_block *ceinfo);
+extern void    zip_ifcvt_modify_final(struct ce_if_block *ceinfo);
+extern void    zip_ifcvt_modify_tests(struct ce_if_block *ceinfo, rtx *true_expr, rtx *false_expr);
+extern void    zip_ifcvt_modify_tests(struct ce_if_block *ceinfo, rtx *true_expr, rtx *false_expr);
+extern void    zip_ifcvt_modify_insn(struct ce_if_block *ceinfo, rtx pattern, rtx_insn *insn);
+extern void    zip_ifcvt_modify_insn(struct ce_if_block *ceinfo, rtx pattern, rtx_insn *insn);
+
+
+#endif
+#endif
+
+
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-sync.md gcc-6.2.0-zip/gcc/config/zip/zip-sync.md
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-sync.md gcc-6.2.0-zip/gcc/config/zip/zip-sync.md
--- gcc-6.2.0/gcc/config/zip/zip-sync.md        1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/gcc/config/zip/zip-sync.md        1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-sync.md    2017-02-22 18:03:26.740198685 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-sync.md    2017-02-22 18:03:26.740198685 -0500
@@ -0,0 +1,415 @@
@@ -0,0 +1,415 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Filename:   zip-sync.md
+;; Filename:   zip-sync.md
+;;
+;;
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;; Project:    Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;;
+;; Purpose:    This is that portion of the machine description of the Zip CPU
+;; Purpose:    This is that portion of the machine description of the Zip CPU
+;;             which is focused on atomic operations.
+;;             which is focused on atomic operations.
+;;
+;;
+;;
+;;
+;; Creator:    Dan Gisselquist, Ph.D.
+;; Creator:    Dan Gisselquist, Ph.D.
+;;             Gisselquist Technology, LLC
+;;             Gisselquist Technology, LLC
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;; Copyright (C) 2015,2017, Gisselquist Technology, LLC
+;;
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of  the GNU General Public License as published
+;; modify it under the terms of  the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;; your option) any later version.
+;;
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+;; for more details.
+;;
+;;
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;; License:    GPL, v3, as defined and found on www.gnu.org,
+;;             http://www.gnu.org/licenses/gpl.html
+;;             http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;;
+;;
+;
+;
+;
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;; Atomic access Op-codes
+;; Atomic access Op-codes
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+;
+;
+;
+;
+;
+; First, the basic atomic_ operators, add, sub, ior, and, and xor
+; First, the basic atomic_ operators, add, sub, ior, and, and xor
+;
+;
+(define_insn "atomic_addsi"
+(define_insn "atomic_addsi"
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+               (plus:SI (match_dup 0)
+               (plus:SI (match_dup 0)
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %0,%3\n\tADD %1,%3\n\tSW %3,%0"
+       "LOCK\n\tLW %0,%3\n\tADD %1,%3\n\tSW %3,%0"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_insn "atomic_subsi"
+(define_insn "atomic_subsi"
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+               (minus:SI (match_dup 0)
+               (minus:SI (match_dup 0)
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %0,%3\n\tSUB %1,%3\n\tSW %3,%0"
+       "LOCK\n\tLW %0,%3\n\tSUB %1,%3\n\tSW %3,%0"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_insn "atomic_iorsi"
+(define_insn "atomic_iorsi"
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+               (ior:SI (match_dup 0)
+               (ior:SI (match_dup 0)
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %0,%3\n\tOR %1,%3\n\tSW %3,%0"
+       "LOCK\n\tLW %0,%3\n\tOR %1,%3\n\tSW %3,%0"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_expand "atomic_andsi"
+(define_expand "atomic_andsi"
+       [(match_operand:SI 0 "memory_operand" "+Q")
+       [(match_operand:SI 0 "memory_operand" "+Q")
+       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")
+       (match_operand 2 "" "")                 ; Memory model used
+       (match_operand 2 "" "")                 ; Memory model used
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_andsi(operands[0], operands[1]));
+               emit_insn(gen_reissue_atomic_andsi(operands[0], operands[1]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_andsi"
+(define_insn "reissue_atomic_andsi"
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+               (and:SI (match_dup 0)
+               (and:SI (match_dup 0)
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+       (clobber (match_scratch:SI 2 "=r"))     ; Scratch register
+       (clobber (match_scratch:SI 2 "=r"))     ; Scratch register
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %0,%2\n\tAND %1,%2\n\tSW %2,%0"
+       "LOCK\n\tLW %0,%2\n\tAND %1,%2\n\tSW %2,%0"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_expand "atomic_xorsi"
+(define_expand "atomic_xorsi"
+       [(match_operand:SI 0 "memory_operand" "+Q")
+       [(match_operand:SI 0 "memory_operand" "+Q")
+       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")
+       (match_operand 2 "" "")                 ; Memory model used
+       (match_operand 2 "" "")                 ; Memory model used
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_xorsi(operands[0], operands[1]));
+               emit_insn(gen_reissue_atomic_xorsi(operands[0], operands[1]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_xorsi"
+(define_insn "reissue_atomic_xorsi"
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+       [(set (match_operand:SI 0 "memory_operand" "+Q")
+               (xor:SI (match_dup 0)
+               (xor:SI (match_dup 0)
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 1 "zip_opb_single_operand_p" "rO")))
+       (clobber (match_scratch:SI 2 "=r"))     ; Scratch register
+       (clobber (match_scratch:SI 2 "=r"))     ; Scratch register
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %0,%2\n\tXOR %1,%2\n\tSW %2,%0"
+       "LOCK\n\tLW %0,%2\n\tXOR %1,%2\n\tSW %2,%0"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+;
+;
+;
+;
+; Given how the ZipCPU is put together, all SI LODs and STOs are atomic.  DI
+; Given how the ZipCPU is put together, all SI LODs and STOs are atomic.  DI
+; loads and stores need the LOCK command, to keep from breaking within them.
+; loads and stores need the LOCK command, to keep from breaking within them.
+; Further, GCC expects that anything <= the word size can use a normal load
+; Further, GCC expects that anything <= the word size can use a normal load
+; or store instruction.  Hence we don't need anything but the DI load and
+; or store instruction.  Hence we don't need anything but the DI load and
+; stores.
+; stores.
+;
+;
+(define_insn "atomic_loaddi"
+(define_insn "atomic_loaddi"
+       [(set (match_operand:DI 0 "register_operand" "=r")
+       [(set (match_operand:DI 0 "register_operand" "=r")
+               (match_operand:DI 1 "memory_operand" "Q"))
+               (match_operand:DI 1 "memory_operand" "Q"))
+       (match_operand 2 "const_int_operand" "")]
+       (match_operand 2 "const_int_operand" "")]
+       "(ZIP_HAS_DI)&&(ZIP_ATOMIC)"
+       "(ZIP_HAS_DI)&&(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%H0\t; Atomic Load:DI\n\tLW 4+%1,%L0"
+       "LOCK\n\tLW %1,%H0\t; Atomic Load:DI\n\tLW 4+%1,%L0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+(define_insn "atomic_storedi"
+(define_insn "atomic_storedi"
+       [(set (match_operand:DI 0 "memory_operand" "=Q")
+       [(set (match_operand:DI 0 "memory_operand" "=Q")
+               (match_operand:DI 1 "register_operand" "r"))
+               (match_operand:DI 1 "register_operand" "r"))
+       (match_operand 2 "const_int_operand" "")]
+       (match_operand 2 "const_int_operand" "")]
+       "(ZIP_HAS_DI)&&(ZIP_ATOMIC)"
+       "(ZIP_HAS_DI)&&(ZIP_ATOMIC)"
+       "LOCK\n\tSW %H1,%0\t; Atomic Store:DI\n\tSW %L1,4+%0"
+       "LOCK\n\tSW %H1,%0\t; Atomic Store:DI\n\tSW %L1,4+%0"
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+       [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+;
+;
+;
+;
+;
+(define_insn "atomic_exchangesi"
+(define_insn "atomic_exchangesi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+                       (match_operand:SI 1 "memory_operand" "+Q"))
+                       (match_operand:SI 1 "memory_operand" "+Q"))
+               (set (match_dup 1) (match_operand:SI 2 "register_operand" "r"))
+               (set (match_dup 1) (match_operand:SI 2 "register_operand" "r"))
+       (match_operand 3 "const_int_operand" "")]
+       (match_operand 3 "const_int_operand" "")]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tSW %2,%1"
+       "LOCK\n\tLW %1,%0\n\tSW %2,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+;
+;
+;
+;
+;
+;
+; Here's another set of the atomic operators, this time those that leave their
+; Here's another set of the atomic operators, this time those that leave their
+; result in operand zero.
+; result in operand zero.
+;
+;
+(define_expand "atomic_add_fetchsi"
+(define_expand "atomic_add_fetchsi"
+       [(match_operand:SI 0 "register_operand" "=r")
+       [(match_operand:SI 0 "register_operand" "=r")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand 3 "const_int_operand" "")        ;// Memory model used
+       (match_operand 3 "const_int_operand" "")        ;// Memory model used
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_add_fetchsi(operands[0],
+               emit_insn(gen_reissue_atomic_add_fetchsi(operands[0],
+                       operands[1], operands[2]));
+                       operands[1], operands[2]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_add_fetchsi"
+(define_insn "reissue_atomic_add_fetchsi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (plus:SI (match_operand:SI 1 "memory_operand" "+Q")
+               (plus:SI (match_operand:SI 1 "memory_operand" "+Q")
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+       (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tADD %2,%0\n\tSW %0,%1"
+       "LOCK\n\tLW %1,%0\n\tADD %2,%0\n\tSW %0,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_expand "atomic_sub_fetchsi"
+(define_expand "atomic_sub_fetchsi"
+       [(match_operand:SI 0 "register_operand" "=r")
+       [(match_operand:SI 0 "register_operand" "=r")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand 3 "const_int_operand" "")
+       (match_operand 3 "const_int_operand" "")
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_sub_fetchsi(
+               emit_insn(gen_reissue_atomic_sub_fetchsi(
+                       operands[0], operands[1], operands[2]));
+                       operands[0], operands[1], operands[2]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_sub_fetchsi"
+(define_insn "reissue_atomic_sub_fetchsi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (minus:SI (match_operand:SI 1 "memory_operand" "+Q")
+               (minus:SI (match_operand:SI 1 "memory_operand" "+Q")
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+       (set (match_dup 1) (minus:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 1) (minus:SI (match_dup 1) (match_dup 2)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tSUB %2,%0\n\tSW %0,%1"
+       "LOCK\n\tLW %1,%0\n\tSUB %2,%0\n\tSW %0,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_expand "atomic_or_fetchsi"
+(define_expand "atomic_or_fetchsi"
+       [(match_operand:SI 0 "register_operand" "=r")
+       [(match_operand:SI 0 "register_operand" "=r")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand 3 "const_int_operand" "")
+       (match_operand 3 "const_int_operand" "")
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_or_fetchsi(
+               emit_insn(gen_reissue_atomic_or_fetchsi(
+                       operands[0], operands[1], operands[2]));
+                       operands[0], operands[1], operands[2]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_or_fetchsi"
+(define_insn "reissue_atomic_or_fetchsi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (ior:SI (match_operand:SI 1 "memory_operand" "+Q")
+               (ior:SI (match_operand:SI 1 "memory_operand" "+Q")
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+       (set (match_dup 1) (ior:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 1) (ior:SI (match_dup 1) (match_dup 2)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tOR %2,%0\n\tSW %0,%1"
+       "LOCK\n\tLW %1,%0\n\tOR %2,%0\n\tSW %0,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_expand "atomic_and_fetchsi"
+(define_expand "atomic_and_fetchsi"
+       [(match_operand:SI 0 "register_operand" "=r")
+       [(match_operand:SI 0 "register_operand" "=r")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand 3 "const_int_operand" "")
+       (match_operand 3 "const_int_operand" "")
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_and_fetchsi(
+               emit_insn(gen_reissue_atomic_and_fetchsi(
+                       operands[0], operands[1], operands[2]));
+                       operands[0], operands[1], operands[2]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_and_fetchsi"
+(define_insn "reissue_atomic_and_fetchsi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (and:SI (match_operand:SI 1 "memory_operand" "+Q")
+               (and:SI (match_operand:SI 1 "memory_operand" "+Q")
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+       (set (match_dup 1) (and:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 1) (and:SI (match_dup 1) (match_dup 2)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tAND %2,%0\n\tSW %0,%1"
+       "LOCK\n\tLW %1,%0\n\tAND %2,%0\n\tSW %0,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_expand "atomic_xor_fetchsi"
+(define_expand "atomic_xor_fetchsi"
+       [(match_operand:SI 0 "register_operand" "=r")
+       [(match_operand:SI 0 "register_operand" "=r")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand 3 "const_int_operand" "")                        ;// Memory model
+       (match_operand 3 "const_int_operand" "")                        ;// Memory model
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_xor_fetchsi(
+               emit_insn(gen_reissue_atomic_xor_fetchsi(
+                       operands[0], operands[1], operands[2]));
+                       operands[0], operands[1], operands[2]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_xor_fetchsi"
+(define_insn "reissue_atomic_xor_fetchsi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (xor:SI (match_operand:SI 1 "memory_operand" "+Q")
+               (xor:SI (match_operand:SI 1 "memory_operand" "+Q")
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+       (set (match_dup 1) (xor:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 1) (xor:SI (match_dup 1) (match_dup 2)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tXOR %2,%0\n\tSW %0,%1"
+       "LOCK\n\tLW %1,%0\n\tXOR %2,%0\n\tSW %0,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+;
+;
+;
+;
+;
+;
+;
+;
+(define_expand "atomic_fetch_addsi"
+(define_expand "atomic_fetch_addsi"
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+               (plus:SI (match_dup 1)
+               (plus:SI (match_dup 1)
+                       (match_operand:SI 2 "register_operand" "=r")))
+                       (match_operand:SI 2 "register_operand" "=r")))
+       (set (match_operand:SI 0 "register_operand" "=r")
+       (set (match_operand:SI 0 "register_operand" "=r")
+               (match_dup 1))
+               (match_dup 1))
+       (set (match_dup 2) (plus:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 2) (plus:SI (match_dup 1) (match_dup 2)))
+       (match_operand 3 "const_int_operand" "")                                ; Memory model used
+       (match_operand 3 "const_int_operand" "")                                ; Memory model used
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_fetch_addsi(
+               emit_insn(gen_reissue_atomic_fetch_addsi(
+                       operands[0], operands[1], operands[2]));
+                       operands[0], operands[1], operands[2]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_fetch_addsi"
+(define_insn "reissue_atomic_fetch_addsi"
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+               (plus:SI (match_dup 1)
+               (plus:SI (match_dup 1)
+                       (match_operand:SI 2 "register_operand" "=r")))
+                       (match_operand:SI 2 "register_operand" "=r")))
+       (set (match_operand:SI 0 "register_operand" "=r")
+       (set (match_operand:SI 0 "register_operand" "=r")
+               (match_dup 1))
+               (match_dup 1))
+       (set (match_dup 2) (plus:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 2) (plus:SI (match_dup 1) (match_dup 2)))
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tADD %0,%2\n\tSW %2,%1"
+       "LOCK\n\tLW %1,%0\n\tADD %0,%2\n\tSW %2,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_expand "atomic_fetch_subsi"
+(define_expand "atomic_fetch_subsi"
+       [(match_operand:SI 0 "register_operand" "=r")
+       [(match_operand:SI 0 "register_operand" "=r")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 1 "memory_operand" "+Q")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_fetch_subsi(
+               emit_insn(gen_reissue_atomic_fetch_subsi(
+                       operands[0], operands[1], operands[2]));
+                       operands[0], operands[1], operands[2]));
+               DONE;
+               DONE;
+       })
+       })
+(define_insn "reissue_atomic_fetch_subsi"
+(define_insn "reissue_atomic_fetch_subsi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (minus:SI (match_operand:SI 1 "memory_operand" "+Q")
+               (minus:SI (match_operand:SI 1 "memory_operand" "+Q")
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+                       (match_operand:SI 2 "zip_opb_single_operand_p" "rO")))
+       (set (match_dup 1) (minus:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 1) (minus:SI (match_dup 1) (match_dup 2)))
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (match_scratch:SI 3 "=r"))     ; Scratch register
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "NEG %2,%3\n\tLOCK\n\tLW %1,%0\n\tADD %0,%3\n\tSW %3,%1"
+       "NEG %2,%3\n\tLOCK\n\tLW %1,%0\n\tADD %0,%3\n\tSW %3,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_insn "atomic_fetch_orsi"
+(define_insn "atomic_fetch_orsi"
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+               (ior:SI (match_dup 1)
+               (ior:SI (match_dup 1)
+                       (match_operand:SI 2 "register_operand" "=r")))
+                       (match_operand:SI 2 "register_operand" "=r")))
+       (set (match_operand:SI 0 "register_operand" "=r")
+       (set (match_operand:SI 0 "register_operand" "=r")
+               (match_dup 1))
+               (match_dup 1))
+       (set (match_dup 2) (ior:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 2) (ior:SI (match_dup 1) (match_dup 2)))
+       (match_operand 3 "const_int_operand" "")        ; Memory model used
+       (match_operand 3 "const_int_operand" "")        ; Memory model used
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tOR %0,%2\n\tSW %2,%1"
+       "LOCK\n\tLW %1,%0\n\tOR %0,%2\n\tSW %2,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_insn "atomic_fetch_andsi"
+(define_insn "atomic_fetch_andsi"
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+               (and:SI (match_dup 1)
+               (and:SI (match_dup 1)
+                       (match_operand:SI 2 "register_operand" "=r")))
+                       (match_operand:SI 2 "register_operand" "=r")))
+       (set (match_operand:SI 0 "register_operand" "=r")
+       (set (match_operand:SI 0 "register_operand" "=r")
+               (match_dup 1))
+               (match_dup 1))
+       (set (match_dup 2) (and:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 2) (and:SI (match_dup 1) (match_dup 2)))
+       (match_operand 3 "const_int_operand" "")        ; Memory model used
+       (match_operand 3 "const_int_operand" "")        ; Memory model used
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tAND %0,%2\n\tSW %2,%1"
+       "LOCK\n\tLW %1,%0\n\tAND %0,%2\n\tSW %2,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+(define_insn "atomic_fetch_xorsi"
+(define_insn "atomic_fetch_xorsi"
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+       [(set (match_operand:SI 1 "memory_operand" "+Q")
+               (xor:SI (match_dup 1)
+               (xor:SI (match_dup 1)
+                       (match_operand:SI 2 "register_operand" "=r")))
+                       (match_operand:SI 2 "register_operand" "=r")))
+       (set (match_operand:SI 0 "register_operand" "=r")
+       (set (match_operand:SI 0 "register_operand" "=r")
+               (match_dup 1))
+               (match_dup 1))
+       (set (match_dup 2) (xor:SI (match_dup 1) (match_dup 2)))
+       (set (match_dup 2) (xor:SI (match_dup 1) (match_dup 2)))
+       (match_operand 3 "const_int_operand" "")        ; Memory model used
+       (match_operand 3 "const_int_operand" "")        ; Memory model used
+       (clobber (reg:CC CC_REG))]
+       (clobber (reg:CC CC_REG))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LOCK\n\tLW %1,%0\n\tXOR %0,%2\n\tSW %2,%1"
+       "LOCK\n\tLW %1,%0\n\tXOR %0,%2\n\tSW %2,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+;
+;
+;
+;
+;
+;
+;
+;
+(define_insn "atomic_test_and_set"
+(define_insn "atomic_test_and_set"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (zero_extend:SI
+               (zero_extend:SI
+                       (match_operand:QI 1 "memory_operand" "+Q")))
+                       (match_operand:QI 1 "memory_operand" "+Q")))
+       (set (match_dup 1) (const_int 1))
+       (set (match_dup 1) (const_int 1))
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (match_operand 2 "const_int_operand" "")        ; Memory model used
+       (clobber (match_scratch:SI 3 "=r"))]    ; Scratch register
+       (clobber (match_scratch:SI 3 "=r"))]    ; Scratch register
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "LDI    1,%3
+       "LDI    1,%3
+       LOCK
+       LOCK
+       LB      %1,%0
+       LB      %1,%0
+       SB      %3,%1"
+       SB      %3,%1"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+;
+;
+;
+;
+;
+;
+(define_expand "atomic_compare_and_swapsi"
+(define_expand "atomic_compare_and_swapsi"
+       [(match_operand:SI 0 "register_operand" "=r")   ;; bool output
+       [(match_operand:SI 0 "register_operand" "=r")   ;; bool output
+       (match_operand:SI 1 "register_operand" "=r")    ;; val output
+       (match_operand:SI 1 "register_operand" "=r")    ;; val output
+       (match_operand:SI 2 "memory_operand" "+Q")      ;; memory
+       (match_operand:SI 2 "memory_operand" "+Q")      ;; memory
+       (match_operand:SI 3 "zip_opb_single_operand_p" "rO") ;; Expected
+       (match_operand:SI 3 "zip_opb_single_operand_p" "rO") ;; Expected
+       (match_operand:SI 4 "register_operand" "r")     ;; Desired
+       (match_operand:SI 4 "register_operand" "r")     ;; Desired
+       (match_operand 5 "const_int_operand" "")        ; is_weak
+       (match_operand 5 "const_int_operand" "")        ; is_weak
+       (match_operand 6 "const_int_operand" "")        ; mem model on success
+       (match_operand 6 "const_int_operand" "")        ; mem model on success
+       (match_operand 7 "const_int_operand" "")        ; mem model on failure
+       (match_operand 7 "const_int_operand" "")        ; mem model on failure
+       ]
+       ]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       {
+       {
+               emit_insn(gen_reissue_atomic_compare_and_swapsi(
+               emit_insn(gen_reissue_atomic_compare_and_swapsi(
+                       operands[0], operands[1],
+                       operands[0], operands[1],
+                       operands[2], operands[3],
+                       operands[2], operands[3],
+                       operands[4]));
+                       operands[4]));
+               DONE;
+               DONE;
+       })
+       })
+
+
+(define_insn "reissue_atomic_compare_and_swapsi"
+(define_insn "reissue_atomic_compare_and_swapsi"
+       [(set (match_operand:SI 0 "register_operand" "=r")
+       [(set (match_operand:SI 0 "register_operand" "=r")
+               (if_then_else
+               (if_then_else
+                       (eq (match_operand:SI 2 "memory_operand" "+Q")
+                       (eq (match_operand:SI 2 "memory_operand" "+Q")
+                               (match_operand:SI 3 "zip_opb_single_operand_p" "rO"))
+                               (match_operand:SI 3 "zip_opb_single_operand_p" "rO"))
+                       (const_int 1)
+                       (const_int 1)
+                       (const_int 0)))
+                       (const_int 0)))
+       (set (match_operand:SI 1 "register_operand" "=r") (match_dup 2))
+       (set (match_operand:SI 1 "register_operand" "=r") (match_dup 2))
+       (set (match_dup 2) (if_then_else
+       (set (match_dup 2) (if_then_else
+                       (eq (match_dup 2) (match_dup 3))
+                       (eq (match_dup 2) (match_dup 3))
+                               (match_operand:SI 4 "register_operand" "r")
+                               (match_operand:SI 4 "register_operand" "r")
+                               (match_dup 0)))]
+                               (match_dup 0)))]
+       "(ZIP_ATOMIC)"
+       "(ZIP_ATOMIC)"
+       "CLR %0
+       "CLR %0
+       LOCK
+       LOCK
+       LW %2,%1
+       LW %2,%1
+       CMP %3,%1
+       CMP %3,%1
+       SW %4,%1
+       SW %4,%1
+       LDI.Z 1,%0"
+       LDI.Z 1,%0"
+       [(set_attr "predicable" "no")])
+       [(set_attr "predicable" "no")])
+;
+;
+;
+;
+;
+;
+;
+;
+; STILL MISSING:
+; STILL MISSING:
+;
+;
+;      deprecated sync_* atomic functions
+;      deprecated sync_* atomic functions
+;
+;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config.gcc gcc-6.2.0-zip/gcc/config.gcc
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config.gcc gcc-6.2.0-zip/gcc/config.gcc
--- gcc-6.2.0/gcc/config.gcc    2016-06-08 09:34:25.000000000 -0400
--- gcc-6.2.0/gcc/config.gcc    2016-06-08 09:34:25.000000000 -0400
+++ gcc-6.2.0-zip/gcc/config.gcc        2016-12-31 16:41:06.258602919 -0500
+++ gcc-6.2.0-zip/gcc/config.gcc        2016-12-31 16:41:06.258602919 -0500
@@ -493,6 +493,10 @@
@@ -493,6 +493,10 @@
 tilepro*-*-*)
 tilepro*-*-*)
        cpu_type=tilepro
        cpu_type=tilepro
        ;;
        ;;
+zip*)
+zip*)
+       cpu_type=zip
+       cpu_type=zip
+       tmake_file=zip/t-zip
+       tmake_file=zip/t-zip
+       ;;
+       ;;
 esac
 esac
 
 
 tm_file=${cpu_type}/${cpu_type}.h
 tm_file=${cpu_type}/${cpu_type}.h
@@ -3042,6 +3046,11 @@
@@ -3042,6 +3046,11 @@
        c_target_objs="m32c-pragma.o"
        c_target_objs="m32c-pragma.o"
        cxx_target_objs="m32c-pragma.o"
        cxx_target_objs="m32c-pragma.o"
        ;;
        ;;
+zip*)
+zip*)
+       target_has_targetm_common=yes
+       target_has_targetm_common=yes
+       tm_file="elfos.h newlib-stdint.h ${tm_file}"
+       tm_file="elfos.h newlib-stdint.h ${tm_file}"
+       tmake_file="${tmake_file} zip/t-zip"
+       tmake_file="${tmake_file} zip/t-zip"
+       ;;
+       ;;
 *)
 *)
        echo "*** Configuration ${target} not supported" 1>&2
        echo "*** Configuration ${target} not supported" 1>&2
        exit 1
        exit 1
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cse.c gcc-6.2.0-zip/gcc/cse.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cse.c gcc-6.2.0-zip/gcc/cse.c
--- gcc-6.2.0/gcc/cse.c 2016-02-04 04:56:13.000000000 -0500
--- gcc-6.2.0/gcc/cse.c 2016-02-04 04:56:13.000000000 -0500
+++ gcc-6.2.0-zip/gcc/cse.c     2017-02-06 21:46:10.525049918 -0500
+++ gcc-6.2.0-zip/gcc/cse.c     2018-06-05 21:17:32.963050314 -0400
@@ -42,6 +42,16 @@
@@ -42,6 +42,16 @@
 #include "dbgcnt.h"
 #include "dbgcnt.h"
 #include "rtl-iter.h"
 #include "rtl-iter.h"
 
 
+
+
+// #define     DO_ZIP_DEBUGS
+// #define     DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 #ifndef LOAD_EXTEND_OP
 #ifndef LOAD_EXTEND_OP
 #define LOAD_EXTEND_OP(M) UNKNOWN
 #define LOAD_EXTEND_OP(M) UNKNOWN
 #endif
 #endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/defaults.h gcc-6.2.0-zip/gcc/defaults.h
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/defaults.h gcc-6.2.0-zip/gcc/defaults.h
--- gcc-6.2.0/gcc/defaults.h    2016-01-04 09:30:50.000000000 -0500
--- gcc-6.2.0/gcc/defaults.h    2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/defaults.h        2016-12-31 16:42:29.094087738 -0500
+++ gcc-6.2.0-zip/gcc/defaults.h        2016-12-31 16:42:29.094087738 -0500
@@ -495,6 +495,8 @@
@@ -495,6 +495,8 @@
 #define LOG2_BITS_PER_UNIT 3
 #define LOG2_BITS_PER_UNIT 3
 #elif BITS_PER_UNIT == 16
 #elif BITS_PER_UNIT == 16
 #define LOG2_BITS_PER_UNIT 4
 #define LOG2_BITS_PER_UNIT 4
+#elif BITS_PER_UNIT == 32
+#elif BITS_PER_UNIT == 32
+#define LOG2_BITS_PER_UNIT 5
+#define LOG2_BITS_PER_UNIT 5
 #else
 #else
 #error Unknown BITS_PER_UNIT
 #error Unknown BITS_PER_UNIT
 #endif
 #endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/df-scan.c gcc-6.2.0-zip/gcc/df-scan.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/df-scan.c gcc-6.2.0-zip/gcc/df-scan.c
--- gcc-6.2.0/gcc/df-scan.c     2016-01-04 09:30:50.000000000 -0500
--- gcc-6.2.0/gcc/df-scan.c     2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/df-scan.c 2016-12-31 16:43:04.557867015 -0500
+++ gcc-6.2.0-zip/gcc/df-scan.c 2016-12-31 16:43:04.557867015 -0500
@@ -35,6 +35,14 @@
@@ -35,6 +35,14 @@
 #include "dumpfile.h"
 #include "dumpfile.h"
 
 
 
 
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 /* The set of hard registers in eliminables[i].from. */
 /* The set of hard registers in eliminables[i].from. */
 
 
 static HARD_REG_SET elim_reg_set;
 static HARD_REG_SET elim_reg_set;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/emit-rtl.c gcc-6.2.0-zip/gcc/emit-rtl.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/emit-rtl.c gcc-6.2.0-zip/gcc/emit-rtl.c
--- gcc-6.2.0/gcc/emit-rtl.c    2016-02-16 18:12:19.000000000 -0500
--- gcc-6.2.0/gcc/emit-rtl.c    2016-02-16 18:12:19.000000000 -0500
+++ gcc-6.2.0-zip/gcc/emit-rtl.c        2016-12-31 16:43:35.065677060 -0500
+++ gcc-6.2.0-zip/gcc/emit-rtl.c        2016-12-31 16:43:35.065677060 -0500
@@ -59,6 +59,15 @@
@@ -59,6 +59,15 @@
 #include "stor-layout.h"
 #include "stor-layout.h"
 #include "opts.h"
 #include "opts.h"
 
 
+
+
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 struct target_rtl default_target_rtl;
 struct target_rtl default_target_rtl;
 #if SWITCHABLE_TARGET
 #if SWITCHABLE_TARGET
 struct target_rtl *this_target_rtl = &default_target_rtl;
 struct target_rtl *this_target_rtl = &default_target_rtl;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/final.c gcc-6.2.0-zip/gcc/final.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/final.c gcc-6.2.0-zip/gcc/final.c
--- gcc-6.2.0/gcc/final.c       2016-01-22 11:44:10.000000000 -0500
--- gcc-6.2.0/gcc/final.c       2016-01-22 11:44:10.000000000 -0500
+++ gcc-6.2.0-zip/gcc/final.c   2017-02-06 15:35:15.410085646 -0500
+++ gcc-6.2.0-zip/gcc/final.c   2017-02-06 15:35:15.410085646 -0500
@@ -79,6 +79,16 @@
@@ -79,6 +79,16 @@
 #include "rtl-iter.h"
 #include "rtl-iter.h"
 #include "print-rtl.h"
 #include "print-rtl.h"
 
 
+
+
+// #define     DO_ZIP_DEBUGS
+// #define     DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 #ifdef XCOFF_DEBUGGING_INFO
 #ifdef XCOFF_DEBUGGING_INFO
 #include "xcoffout.h"          /* Needed for external data declarations.  */
 #include "xcoffout.h"          /* Needed for external data declarations.  */
 #endif
 #endif
@@ -2944,6 +2954,8 @@
@@ -2944,6 +2954,8 @@
 
 
        current_output_insn = debug_insn = insn;
        current_output_insn = debug_insn = insn;
 
 
+ZIP_DEBUG_LINE("FINAL-INSN", insn);
+ZIP_DEBUG_LINE("FINAL-INSN", insn);
+
+
        /* Find the proper template for this insn.  */
        /* Find the proper template for this insn.  */
        templ = get_insn_template (insn_code_number, insn);
        templ = get_insn_template (insn_code_number, insn);
 
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/fold-const.c gcc-6.2.0-zip/gcc/fold-const.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/fold-const.c gcc-6.2.0-zip/gcc/fold-const.c
--- gcc-6.2.0/gcc/fold-const.c  2016-07-08 16:20:23.000000000 -0400
--- gcc-6.2.0/gcc/fold-const.c  2016-07-08 16:20:23.000000000 -0400
+++ gcc-6.2.0-zip/gcc/fold-const.c      2016-12-31 16:47:49.000093249 -0500
+++ gcc-6.2.0-zip/gcc/fold-const.c      2016-12-31 16:47:49.000093249 -0500
@@ -1247,7 +1247,7 @@
@@ -1247,7 +1247,7 @@
            wide_int w2 = arg2;
            wide_int w2 = arg2;
            f2.data.high = w2.elt (1);
            f2.data.high = w2.elt (1);
            f2.data.low = w2.elt (0);
            f2.data.low = w2.elt (0);
-           f2.mode = SImode;
-           f2.mode = SImode;
+           f2.mode = word_mode;
+           f2.mode = word_mode;
          }
          }
          break;
          break;
 
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/jump.c gcc-6.2.0-zip/gcc/jump.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/jump.c gcc-6.2.0-zip/gcc/jump.c
--- gcc-6.2.0/gcc/jump.c        2016-01-04 09:30:50.000000000 -0500
--- gcc-6.2.0/gcc/jump.c        2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/jump.c    2016-12-31 16:49:10.675582836 -0500
+++ gcc-6.2.0-zip/gcc/jump.c    2016-12-31 16:49:10.675582836 -0500
@@ -50,6 +50,15 @@
@@ -50,6 +50,15 @@
 #include "cfgrtl.h"
 #include "cfgrtl.h"
 #include "rtl-iter.h"
 #include "rtl-iter.h"
 
 
+
+
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 /* Optimize jump y; x: ... y: jumpif... x?
 /* Optimize jump y; x: ... y: jumpif... x?
    Don't know if it is worth bothering with.  */
    Don't know if it is worth bothering with.  */
 /* Optimize two cases of conditional jump to conditional jump?
 /* Optimize two cases of conditional jump to conditional jump?
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/loop-doloop.c gcc-6.2.0-zip/gcc/loop-doloop.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/loop-doloop.c gcc-6.2.0-zip/gcc/loop-doloop.c
--- gcc-6.2.0/gcc/loop-doloop.c 2016-01-14 18:12:53.000000000 -0500
--- gcc-6.2.0/gcc/loop-doloop.c 2016-01-14 18:12:53.000000000 -0500
+++ gcc-6.2.0-zip/gcc/loop-doloop.c     2016-12-31 16:50:27.099104820 -0500
+++ gcc-6.2.0-zip/gcc/loop-doloop.c     2016-12-31 16:50:27.099104820 -0500
@@ -37,6 +37,15 @@
@@ -37,6 +37,15 @@
 #include "regs.h"
 #include "regs.h"
 #include "df.h"
 #include "df.h"
 
 
+
+
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 /* This module is used to modify loops with a determinable number of
 /* This module is used to modify loops with a determinable number of
    iterations to use special low-overhead looping instructions.
    iterations to use special low-overhead looping instructions.
 
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/loop-iv.c gcc-6.2.0-zip/gcc/loop-iv.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/loop-iv.c gcc-6.2.0-zip/gcc/loop-iv.c
--- gcc-6.2.0/gcc/loop-iv.c     2016-01-04 09:30:50.000000000 -0500
--- gcc-6.2.0/gcc/loop-iv.c     2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/loop-iv.c 2016-12-31 16:52:42.034259845 -0500
+++ gcc-6.2.0-zip/gcc/loop-iv.c 2016-12-31 16:52:42.034259845 -0500
@@ -1715,8 +1715,8 @@
@@ -1715,8 +1715,8 @@
   if (op0 != XEXP (cond, 0)
   if (op0 != XEXP (cond, 0)
       || op1 != XEXP (cond, 1)
       || op1 != XEXP (cond, 1)
       || code != GET_CODE (cond)
       || code != GET_CODE (cond)
-      || GET_MODE (cond) != SImode)
-      || GET_MODE (cond) != SImode)
-    cond = gen_rtx_fmt_ee (code, SImode, op0, op1);
-    cond = gen_rtx_fmt_ee (code, SImode, op0, op1);
+      || GET_MODE (cond) != word_mode)
+      || GET_MODE (cond) != word_mode)
+    cond = gen_rtx_fmt_ee (code, word_mode, op0, op1);
+    cond = gen_rtx_fmt_ee (code, word_mode, op0, op1);
 
 
   return cond;
   return cond;
 }
 }
@@ -2083,9 +2083,9 @@
@@ -2083,9 +2083,9 @@
   rtx mmin, mmax, cond_over, cond_under;
   rtx mmin, mmax, cond_over, cond_under;
 
 
   get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax);
   get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax);
-  cond_under = simplify_gen_relational (LT, SImode, iv->extend_mode,
-  cond_under = simplify_gen_relational (LT, SImode, iv->extend_mode,
+  cond_under = simplify_gen_relational (LT, word_mode, iv->extend_mode,
+  cond_under = simplify_gen_relational (LT, word_mode, iv->extend_mode,
                                        iv->base, mmin);
                                        iv->base, mmin);
-  cond_over = simplify_gen_relational (GT, SImode, iv->extend_mode,
-  cond_over = simplify_gen_relational (GT, SImode, iv->extend_mode,
+  cond_over = simplify_gen_relational (GT, word_mode, iv->extend_mode,
+  cond_over = simplify_gen_relational (GT, word_mode, iv->extend_mode,
                                       iv->base, mmax);
                                       iv->base, mmax);
 
 
   switch (cond)
   switch (cond)
@@ -2464,7 +2464,7 @@
@@ -2464,7 +2464,7 @@
        if (iv0.step == const0_rtx)
        if (iv0.step == const0_rtx)
          {
          {
            tmp = lowpart_subreg (mode, iv0.base, comp_mode);
            tmp = lowpart_subreg (mode, iv0.base, comp_mode);
-           assumption = simplify_gen_relational (EQ, SImode, mode, tmp,
-           assumption = simplify_gen_relational (EQ, SImode, mode, tmp,
+           assumption = simplify_gen_relational (EQ, word_mode, mode, tmp,
+           assumption = simplify_gen_relational (EQ, word_mode, mode, tmp,
                                                  mode_mmax);
                                                  mode_mmax);
            if (assumption == const_true_rtx)
            if (assumption == const_true_rtx)
              goto zero_iter_simplify;
              goto zero_iter_simplify;
@@ -2474,7 +2474,7 @@
@@ -2474,7 +2474,7 @@
        else
        else
          {
          {
            tmp = lowpart_subreg (mode, iv1.base, comp_mode);
            tmp = lowpart_subreg (mode, iv1.base, comp_mode);
-           assumption = simplify_gen_relational (EQ, SImode, mode, tmp,
-           assumption = simplify_gen_relational (EQ, SImode, mode, tmp,
+           assumption = simplify_gen_relational (EQ, word_mode, mode, tmp,
+           assumption = simplify_gen_relational (EQ, word_mode, mode, tmp,
                                                  mode_mmin);
                                                  mode_mmin);
            if (assumption == const_true_rtx)
            if (assumption == const_true_rtx)
              goto zero_iter_simplify;
              goto zero_iter_simplify;
@@ -2561,10 +2561,10 @@
@@ -2561,10 +2561,10 @@
              bound = simplify_gen_binary (MINUS, comp_mode, bound, delta);
              bound = simplify_gen_binary (MINUS, comp_mode, bound, delta);
              bound = lowpart_subreg (mode, bound, comp_mode);
              bound = lowpart_subreg (mode, bound, comp_mode);
              tmp = lowpart_subreg (mode, iv0.base, comp_mode);
              tmp = lowpart_subreg (mode, iv0.base, comp_mode);
-             may_xform = simplify_gen_relational (cond, SImode, mode,
-             may_xform = simplify_gen_relational (cond, SImode, mode,
+             may_xform = simplify_gen_relational (cond, word_mode, mode,
+             may_xform = simplify_gen_relational (cond, word_mode, mode,
                                                   bound, tmp);
                                                   bound, tmp);
              may_not_xform = simplify_gen_relational (reverse_condition (cond),
              may_not_xform = simplify_gen_relational (reverse_condition (cond),
-                                                      SImode, mode,
-                                                      SImode, mode,
+                                                      word_mode, mode,
+                                                      word_mode, mode,
                                                       bound, tmp);
                                                       bound, tmp);
            }
            }
          else
          else
@@ -2573,10 +2573,10 @@
@@ -2573,10 +2573,10 @@
              bound = simplify_gen_binary (PLUS, comp_mode, bound, delta);
              bound = simplify_gen_binary (PLUS, comp_mode, bound, delta);
              bound = lowpart_subreg (mode, bound, comp_mode);
              bound = lowpart_subreg (mode, bound, comp_mode);
              tmp = lowpart_subreg (mode, iv1.base, comp_mode);
              tmp = lowpart_subreg (mode, iv1.base, comp_mode);
-             may_xform = simplify_gen_relational (cond, SImode, mode,
-             may_xform = simplify_gen_relational (cond, SImode, mode,
+             may_xform = simplify_gen_relational (cond, word_mode, mode,
+             may_xform = simplify_gen_relational (cond, word_mode, mode,
                                                   tmp, bound);
                                                   tmp, bound);
              may_not_xform = simplify_gen_relational (reverse_condition (cond),
              may_not_xform = simplify_gen_relational (reverse_condition (cond),
-                                                      SImode, mode,
-                                                      SImode, mode,
+                                                      word_mode, mode,
+                                                      word_mode, mode,
                                                       tmp, bound);
                                                       tmp, bound);
            }
            }
        }
        }
@@ -2629,7 +2629,7 @@
@@ -2629,7 +2629,7 @@
          tmp0 = lowpart_subreg (mode, iv0.base, comp_mode);
          tmp0 = lowpart_subreg (mode, iv0.base, comp_mode);
          tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
          tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
          assumption = simplify_gen_relational (reverse_condition (cond),
          assumption = simplify_gen_relational (reverse_condition (cond),
-                                               SImode, mode, tmp0, tmp1);
-                                               SImode, mode, tmp0, tmp1);
+                                               word_mode, mode, tmp0, tmp1);
+                                               word_mode, mode, tmp0, tmp1);
          if (assumption == const_true_rtx)
          if (assumption == const_true_rtx)
            goto zero_iter_simplify;
            goto zero_iter_simplify;
          else if (assumption != const0_rtx)
          else if (assumption != const0_rtx)
@@ -2671,7 +2671,7 @@
@@ -2671,7 +2671,7 @@
 
 
       tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
       tmp1 = lowpart_subreg (mode, iv1.base, comp_mode);
       tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode));
       tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode));
-      assumption = simplify_gen_relational (NE, SImode, mode, tmp, const0_rtx);
-      assumption = simplify_gen_relational (NE, SImode, mode, tmp, const0_rtx);
+      assumption = simplify_gen_relational (NE, word_mode, mode, tmp, const0_rtx);
+      assumption = simplify_gen_relational (NE, word_mode, mode, tmp, const0_rtx);
       desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite);
       desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite);
 
 
       tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode));
       tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode));
@@ -2703,19 +2703,19 @@
@@ -2703,19 +2703,19 @@
              /* If s is power of 2, we know that the loop is infinite if
              /* If s is power of 2, we know that the loop is infinite if
                 a % s <= b % s and b + s overflows.  */
                 a % s <= b % s and b + s overflows.  */
              assumption = simplify_gen_relational (reverse_condition (cond),
              assumption = simplify_gen_relational (reverse_condition (cond),
-                                                   SImode, mode,
-                                                   SImode, mode,
+                                                   word_mode, mode,
+                                                   word_mode, mode,
                                                    tmp1, bound);
                                                    tmp1, bound);
 
 
              t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step);
              t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step);
              t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step);
              t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step);
-             tmp = simplify_gen_relational (cond, SImode, mode, t0, t1);
-             tmp = simplify_gen_relational (cond, SImode, mode, t0, t1);
-             assumption = simplify_gen_binary (AND, SImode, assumption, tmp);
-             assumption = simplify_gen_binary (AND, SImode, assumption, tmp);
+             tmp = simplify_gen_relational (cond, word_mode, mode, t0, t1);
+             tmp = simplify_gen_relational (cond, word_mode, mode, t0, t1);
+             assumption = simplify_gen_binary (AND, word_mode, assumption, tmp);
+             assumption = simplify_gen_binary (AND, word_mode, assumption, tmp);
              desc->infinite =
              desc->infinite =
                      alloc_EXPR_LIST (0, assumption, desc->infinite);
                      alloc_EXPR_LIST (0, assumption, desc->infinite);
            }
            }
          else
          else
            {
            {
-             assumption = simplify_gen_relational (cond, SImode, mode,
-             assumption = simplify_gen_relational (cond, SImode, mode,
+             assumption = simplify_gen_relational (cond, word_mode, mode,
+             assumption = simplify_gen_relational (cond, word_mode, mode,
                                                    tmp1, bound);
                                                    tmp1, bound);
              desc->assumptions =
              desc->assumptions =
                      alloc_EXPR_LIST (0, assumption, desc->assumptions);
                      alloc_EXPR_LIST (0, assumption, desc->assumptions);
@@ -2724,7 +2724,7 @@
@@ -2724,7 +2724,7 @@
          tmp = simplify_gen_binary (PLUS, comp_mode, iv1.base, iv0.step);
          tmp = simplify_gen_binary (PLUS, comp_mode, iv1.base, iv0.step);
          tmp = lowpart_subreg (mode, tmp, comp_mode);
          tmp = lowpart_subreg (mode, tmp, comp_mode);
          assumption = simplify_gen_relational (reverse_condition (cond),
          assumption = simplify_gen_relational (reverse_condition (cond),
-                                               SImode, mode, tmp0, tmp);
-                                               SImode, mode, tmp0, tmp);
+                                               word_mode, mode, tmp0, tmp);
+                                               word_mode, mode, tmp0, tmp);
 
 
          delta = simplify_gen_binary (PLUS, mode, tmp1, step);
          delta = simplify_gen_binary (PLUS, mode, tmp1, step);
          delta = simplify_gen_binary (MINUS, mode, delta, tmp0);
          delta = simplify_gen_binary (MINUS, mode, delta, tmp0);
@@ -2747,19 +2747,19 @@
@@ -2747,19 +2747,19 @@
              /* If s is power of 2, we know that the loop is infinite if
              /* If s is power of 2, we know that the loop is infinite if
                 a % s <= b % s and a - s overflows.  */
                 a % s <= b % s and a - s overflows.  */
              assumption = simplify_gen_relational (reverse_condition (cond),
              assumption = simplify_gen_relational (reverse_condition (cond),
-                                                   SImode, mode,
-                                                   SImode, mode,
+                                                   word_mode, mode,
+                                                   word_mode, mode,
                                                    bound, tmp0);
                                                    bound, tmp0);
 
 
              t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step);
              t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step);
              t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step);
              t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step);
-             tmp = simplify_gen_relational (cond, SImode, mode, t0, t1);
-             tmp = simplify_gen_relational (cond, SImode, mode, t0, t1);
-             assumption = simplify_gen_binary (AND, SImode, assumption, tmp);
-             assumption = simplify_gen_binary (AND, SImode, assumption, tmp);
+             tmp = simplify_gen_relational (cond, word_mode, mode, t0, t1);
+             tmp = simplify_gen_relational (cond, word_mode, mode, t0, t1);
+             assumption = simplify_gen_binary (AND, word_mode, assumption, tmp);
+             assumption = simplify_gen_binary (AND, word_mode, assumption, tmp);
              desc->infinite =
              desc->infinite =
                      alloc_EXPR_LIST (0, assumption, desc->infinite);
                      alloc_EXPR_LIST (0, assumption, desc->infinite);
            }
            }
          else
          else
            {
            {
-             assumption = simplify_gen_relational (cond, SImode, mode,
-             assumption = simplify_gen_relational (cond, SImode, mode,
+             assumption = simplify_gen_relational (cond, word_mode, mode,
+             assumption = simplify_gen_relational (cond, word_mode, mode,
                                                    bound, tmp0);
                                                    bound, tmp0);
              desc->assumptions =
              desc->assumptions =
                      alloc_EXPR_LIST (0, assumption, desc->assumptions);
                      alloc_EXPR_LIST (0, assumption, desc->assumptions);
@@ -2768,7 +2768,7 @@
@@ -2768,7 +2768,7 @@
          tmp = simplify_gen_binary (PLUS, comp_mode, iv0.base, iv1.step);
          tmp = simplify_gen_binary (PLUS, comp_mode, iv0.base, iv1.step);
          tmp = lowpart_subreg (mode, tmp, comp_mode);
          tmp = lowpart_subreg (mode, tmp, comp_mode);
          assumption = simplify_gen_relational (reverse_condition (cond),
          assumption = simplify_gen_relational (reverse_condition (cond),
-                                               SImode, mode,
-                                               SImode, mode,
+                                               word_mode, mode,
+                                               word_mode, mode,
                                                tmp, tmp1);
                                                tmp, tmp1);
          delta = simplify_gen_binary (MINUS, mode, tmp0, step);
          delta = simplify_gen_binary (MINUS, mode, tmp0, step);
          delta = simplify_gen_binary (MINUS, mode, tmp1, delta);
          delta = simplify_gen_binary (MINUS, mode, tmp1, delta);
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/optabs.c gcc-6.2.0-zip/gcc/optabs.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/optabs.c gcc-6.2.0-zip/gcc/optabs.c
--- gcc-6.2.0/gcc/optabs.c      2016-02-16 10:15:40.000000000 -0500
--- gcc-6.2.0/gcc/optabs.c      2016-02-16 10:15:40.000000000 -0500
+++ gcc-6.2.0-zip/gcc/optabs.c  2017-03-01 15:46:15.660221429 -0500
+++ gcc-6.2.0-zip/gcc/optabs.c  2018-03-23 07:13:51.664427524 -0400
@@ -43,6 +43,17 @@
@@ -43,6 +43,17 @@
 #include "optabs-tree.h"
 #include "optabs-tree.h"
 #include "libfuncs.h"
 #include "libfuncs.h"
 
 
+
+
+// #define     DO_ZIP_DEBUGS
+// #define     DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx_pfx(const char *,const_rtx);
+extern void    zip_debug_rtx_pfx(const char *,const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
                                   machine_mode *);
                                   machine_mode *);
 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
@@ -6985,6 +6996,15 @@
@@ -6985,6 +6996,15 @@
 maybe_gen_insn (enum insn_code icode, unsigned int nops,
 maybe_gen_insn (enum insn_code icode, unsigned int nops,
                struct expand_operand *ops)
                struct expand_operand *ops)
 {
 {
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+fprintf(stderr, "ICODE = %d\n", icode);
+fprintf(stderr, "ICODE = %d\n", icode);
+fprintf(stderr, "NOPS  = %d\n", nops);
+fprintf(stderr, "NOPS  = %d\n", nops);
+for(int i=0; i<nops; i++) {
+for(unsigned i=0; i<nops; i++) {
+       char    str[10];
+       char    str[10];
+       sprintf(str, "Op %d: ", i);
+       sprintf(str, "Op %d: ", i);
+       zip_debug_rtx_pfx(str, ops[i].value);
+       zip_debug_rtx_pfx(str, ops[i].value);
+}
+}
+#endif
+#endif
   gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
   gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
   if (!maybe_legitimize_operands (icode, 0, nops, ops))
   if (!maybe_legitimize_operands (icode, 0, nops, ops))
     return NULL;
     return NULL;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/recog.c gcc-6.2.0-zip/gcc/recog.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/recog.c gcc-6.2.0-zip/gcc/recog.c
--- gcc-6.2.0/gcc/recog.c       2016-01-29 13:47:17.000000000 -0500
--- gcc-6.2.0/gcc/recog.c       2016-01-29 13:47:17.000000000 -0500
+++ gcc-6.2.0-zip/gcc/recog.c   2017-02-06 15:47:48.493946049 -0500
+++ gcc-6.2.0-zip/gcc/recog.c   2017-02-06 15:47:48.493946049 -0500
@@ -40,6 +40,16 @@
@@ -40,6 +40,16 @@
 #include "reload.h"
 #include "reload.h"
 #include "tree-pass.h"
 #include "tree-pass.h"
 
 
+
+
+// #define     DO_ZIP_DEBUGS
+// #define     DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 #ifndef STACK_POP_CODE
 #ifndef STACK_POP_CODE
 #if STACK_GROWS_DOWNWARD
 #if STACK_GROWS_DOWNWARD
 #define STACK_POP_CODE POST_INC
 #define STACK_POP_CODE POST_INC
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/reload1.c gcc-6.2.0-zip/gcc/reload1.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/reload1.c gcc-6.2.0-zip/gcc/reload1.c
--- gcc-6.2.0/gcc/reload1.c     2016-03-18 04:25:57.000000000 -0400
--- gcc-6.2.0/gcc/reload1.c     2016-03-18 04:25:57.000000000 -0400
+++ gcc-6.2.0-zip/gcc/reload1.c 2017-02-06 15:54:21.067740343 -0500
+++ gcc-6.2.0-zip/gcc/reload1.c 2017-02-06 15:54:21.067740343 -0500
@@ -42,6 +42,15 @@
@@ -42,6 +42,15 @@
 #include "dumpfile.h"
 #include "dumpfile.h"
 #include "rtl-iter.h"
 #include "rtl-iter.h"
 
 
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
+
+
 /* This file contains the reload pass of the compiler, which is
 /* This file contains the reload pass of the compiler, which is
    run after register allocation has been done.  It checks that
    run after register allocation has been done.  It checks that
    each insn is valid (operands required to be in registers really
    each insn is valid (operands required to be in registers really
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/reload.c gcc-6.2.0-zip/gcc/reload.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/reload.c gcc-6.2.0-zip/gcc/reload.c
--- gcc-6.2.0/gcc/reload.c      2016-02-13 20:37:40.000000000 -0500
--- gcc-6.2.0/gcc/reload.c      2016-02-13 20:37:40.000000000 -0500
+++ gcc-6.2.0-zip/gcc/reload.c  2017-03-01 15:46:26.784221658 -0500
+++ gcc-6.2.0-zip/gcc/reload.c  2017-03-01 15:46:26.784221658 -0500
@@ -106,6 +106,15 @@
@@ -106,6 +106,15 @@
 #include "addresses.h"
 #include "addresses.h"
 #include "params.h"
 #include "params.h"
 
 
+// #define     DO_ZIP_DEBUGS
+// #define     DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#include <stdio.h>
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+#define        ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void    zip_debug_rtx(const_rtx);
+extern void    zip_debug_rtx(const_rtx);
+#else
+#else
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#define        ZIP_DEBUG_LINE(STR,RTX)
+#endif
+#endif
+
+
 /* True if X is a constant that can be forced into the constant pool.
 /* True if X is a constant that can be forced into the constant pool.
    MODE is the mode of the operand, or VOIDmode if not known.  */
    MODE is the mode of the operand, or VOIDmode if not known.  */
 #define CONST_POOL_OK_P(MODE, X)               \
 #define CONST_POOL_OK_P(MODE, X)               \
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/testsuite/lib/target-supports.exp gcc-6.2.0-zip/gcc/testsuite/lib/target-supports.exp
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/testsuite/lib/target-supports.exp gcc-6.2.0-zip/gcc/testsuite/lib/target-supports.exp
--- gcc-6.2.0/gcc/testsuite/lib/target-supports.exp     2016-07-05 13:54:02.000000000 -0400
--- gcc-6.2.0/gcc/testsuite/lib/target-supports.exp     2016-07-05 13:54:02.000000000 -0400
+++ gcc-6.2.0-zip/gcc/testsuite/lib/target-supports.exp 2016-12-31 16:59:52.719091392 -0500
+++ gcc-6.2.0-zip/gcc/testsuite/lib/target-supports.exp 2016-12-31 16:59:52.719091392 -0500
@@ -545,6 +545,11 @@
@@ -545,6 +545,11 @@
        return 0
        return 0
     }
     }
 
 
+    # ZipCPU doesnt support profiling (yet).
+    # ZipCPU doesnt support profiling (yet).
+    if { [istarget zip*] } {
+    if { [istarget zip*] } {
+       return 0
+       return 0
+    }
+    }
+
+
     # cygwin does not support -p.
     # cygwin does not support -p.
     if { [istarget *-*-cygwin*] && $test_what == "-p" } {
     if { [istarget *-*-cygwin*] && $test_what == "-p" } {
        return 0
        return 0
@@ -1090,6 +1095,12 @@
@@ -1090,6 +1095,12 @@
        }]
        }]
     }
     }
 
 
+    # No real hardware FPU support for ZipCPU yet --- even though the
+    # No real hardware FPU support for ZipCPU yet --- even though the
+    # instruction set supports it, the CPU just isn't ready (yet).
+    # instruction set supports it, the CPU just isn't ready (yet).
+    if { [istarget zip*] } {
+    if { [istarget zip*] } {
+        return 0
+        return 0
+    }
+    }
+
+
     # This proc is actually checking the availabilty of FPU
     # This proc is actually checking the availabilty of FPU
     # support for doubles, so on the RX we must fail if the
     # support for doubles, so on the RX we must fail if the
     # 64-bit double multilib has been selected.
     # 64-bit double multilib has been selected.
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/tree-ssa-math-opts.c gcc-6.2.0-zip/gcc/tree-ssa-math-opts.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/tree-ssa-math-opts.c gcc-6.2.0-zip/gcc/tree-ssa-math-opts.c
--- gcc-6.2.0/gcc/tree-ssa-math-opts.c  2016-04-04 11:42:19.000000000 -0400
--- gcc-6.2.0/gcc/tree-ssa-math-opts.c  2016-04-04 11:42:19.000000000 -0400
+++ gcc-6.2.0-zip/gcc/tree-ssa-math-opts.c      2016-12-31 17:02:24.405602214 -0500
+++ gcc-6.2.0-zip/gcc/tree-ssa-math-opts.c      2016-12-31 17:02:24.405602214 -0500
@@ -983,7 +983,7 @@
@@ -983,7 +983,7 @@
     {
     {
       if (val & 1)
       if (val & 1)
        {
        {
-         digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
-         digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
+         digit = val & ((1l << POWI_WINDOW_SIZE) - 1);
+         digit = val & ((1l << POWI_WINDOW_SIZE) - 1);
          result += powi_lookup_cost (digit, cache)
          result += powi_lookup_cost (digit, cache)
                    + POWI_WINDOW_SIZE + 1;
                    + POWI_WINDOW_SIZE + 1;
          val >>= POWI_WINDOW_SIZE;
          val >>= POWI_WINDOW_SIZE;
@@ -1023,7 +1023,7 @@
@@ -1023,7 +1023,7 @@
     }
     }
   else if (n & 1)
   else if (n & 1)
     {
     {
-      digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
-      digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
+      digit = n & ((1l << POWI_WINDOW_SIZE) - 1);
+      digit = n & ((1l << POWI_WINDOW_SIZE) - 1);
       op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
       op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
       op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
       op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
     }
     }
@@ -1957,7 +1957,7 @@
@@ -1957,7 +1957,7 @@
 };
 };
 
 
 #define BITS_PER_MARKER 8
 #define BITS_PER_MARKER 8
-#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
-#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
+#define MARKER_MASK ((1l << BITS_PER_MARKER) - 1)
+#define MARKER_MASK ((1l << BITS_PER_MARKER) - 1)
 #define MARKER_BYTE_UNKNOWN MARKER_MASK
 #define MARKER_BYTE_UNKNOWN MARKER_MASK
 #define HEAD_MARKER(n, size) \
 #define HEAD_MARKER(n, size) \
   ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
   ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
@@ -1993,7 +1993,7 @@
@@ -1993,7 +1993,7 @@
   /* Zero out the extra bits of N in order to avoid them being shifted
   /* Zero out the extra bits of N in order to avoid them being shifted
      into the significant bits.  */
      into the significant bits.  */
   if (size < 64 / BITS_PER_MARKER)
   if (size < 64 / BITS_PER_MARKER)
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
 
 
   switch (code)
   switch (code)
     {
     {
@@ -2020,7 +2020,7 @@
@@ -2020,7 +2020,7 @@
     }
     }
   /* Zero unused bits for size.  */
   /* Zero unused bits for size.  */
   if (size < 64 / BITS_PER_MARKER)
   if (size < 64 / BITS_PER_MARKER)
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
   return true;
   return true;
 }
 }
 
 
@@ -2067,7 +2067,7 @@
@@ -2067,7 +2067,7 @@
   n->n = CMPNOP;
   n->n = CMPNOP;
 
 
   if (size < 64 / BITS_PER_MARKER)
   if (size < 64 / BITS_PER_MARKER)
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
-    n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
+    n->n &= ((uint64_t) 1l << (size * BITS_PER_MARKER)) - 1;
 
 
   return true;
   return true;
 }
 }
@@ -2372,7 +2372,7 @@
@@ -2372,7 +2372,7 @@
              {
              {
                /* If STMT casts to a smaller type mask out the bits not
                /* If STMT casts to a smaller type mask out the bits not
                   belonging to the target type.  */
                   belonging to the target type.  */
-               n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
-               n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
+               n->n &= ((uint64_t) 1l << (type_size * BITS_PER_MARKER)) - 1;
+               n->n &= ((uint64_t) 1l << (type_size * BITS_PER_MARKER)) - 1;
              }
              }
            n->type = type;
            n->type = type;
            if (!n->base_addr)
            if (!n->base_addr)
@@ -2489,7 +2489,7 @@
@@ -2489,7 +2489,7 @@
     {
     {
       uint64_t mask;
       uint64_t mask;
 
 
-      mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
-      mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
+      mask = ((uint64_t) 1l << (n->range * BITS_PER_MARKER)) - 1;
+      mask = ((uint64_t) 1l << (n->range * BITS_PER_MARKER)) - 1;
       cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
       cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
       cmpnop &= mask;
       cmpnop &= mask;
     }
     }
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/ubsan.c gcc-6.2.0-zip/gcc/ubsan.c
 
--- gcc-6.2.0/gcc/ubsan.c       2016-08-12 15:57:35.000000000 -0400
 
+++ gcc-6.2.0-zip/gcc/ubsan.c   2017-08-23 09:29:11.196719450 -0400
 
@@ -1469,7 +1469,7 @@
 
 
 
   expanded_location xloc = expand_location (loc);
 
   if (xloc.file == NULL || strncmp (xloc.file, "\1", 2) == 0
 
-      || xloc.file == '\0' || xloc.file[0] == '\xff'
 
+      || xloc.file[0] == '\0' || xloc.file[0] == '\xff'
 
       || xloc.file[1] == '\xff')
 
     return false;
 
 
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/varasm.c gcc-6.2.0-zip/gcc/varasm.c
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/varasm.c gcc-6.2.0-zip/gcc/varasm.c
--- gcc-6.2.0/gcc/varasm.c      2016-03-31 11:30:33.000000000 -0400
--- gcc-6.2.0/gcc/varasm.c      2016-03-31 11:30:33.000000000 -0400
+++ gcc-6.2.0-zip/gcc/varasm.c  2016-12-31 17:03:08.629193673 -0500
+++ gcc-6.2.0-zip/gcc/varasm.c  2016-12-31 17:03:08.629193673 -0500
@@ -2771,7 +2771,7 @@
@@ -2771,7 +2771,7 @@
 
 
   /* Put out the first word with the specified alignment.  */
   /* Put out the first word with the specified alignment.  */
   if (reverse)
   if (reverse)
-    elt = flip_storage_order (SImode, gen_int_mode (data[nelts - 1], SImode));
-    elt = flip_storage_order (SImode, gen_int_mode (data[nelts - 1], SImode));
+    elt = flip_storage_order (word_mode, gen_int_mode (data[nelts - 1], SImode));
+    elt = flip_storage_order (word_mode, gen_int_mode (data[nelts - 1], SImode));
   else
   else
     elt = GEN_INT (data[0]);
     elt = GEN_INT (data[0]);
   assemble_integer (elt, MIN (nunits, units_per), align, 1);
   assemble_integer (elt, MIN (nunits, units_per), align, 1);
@@ -2783,7 +2783,7 @@
@@ -2783,7 +2783,7 @@
   for (int i = 1; i < nelts; i++)
   for (int i = 1; i < nelts; i++)
     {
     {
       if (reverse)
       if (reverse)
-       elt = flip_storage_order (SImode,
-       elt = flip_storage_order (SImode,
+       elt = flip_storage_order (word_mode,
+       elt = flip_storage_order (word_mode,
                                  gen_int_mode (data[nelts - 1 - i], SImode));
                                  gen_int_mode (data[nelts - 1 - i], SImode));
       else
       else
        elt = GEN_INT (data[i]);
        elt = GEN_INT (data[i]);
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/config/zip/sfp-machine.h gcc-6.2.0-zip/libgcc/config/zip/sfp-machine.h
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/config/zip/sfp-machine.h gcc-6.2.0-zip/libgcc/config/zip/sfp-machine.h
--- gcc-6.2.0/libgcc/config/zip/sfp-machine.h   1969-12-31 19:00:00.000000000 -0500
--- gcc-6.2.0/libgcc/config/zip/sfp-machine.h   1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/libgcc/config/zip/sfp-machine.h       2017-01-27 12:27:05.094921759 -0500
+++ gcc-6.2.0-zip/libgcc/config/zip/sfp-machine.h       2018-03-23 07:16:33.779338866 -0400
@@ -0,0 +1,53 @@
@@ -0,0 +1,55 @@
+#ifndef        ZIP_SFP_MACHINE_H
+#ifndef        ZIP_SFP_MACHINE_H
+#define        ZIP_SFP_MACHINE_H
+#define        ZIP_SFP_MACHINE_H
+
+
+#define        __BIG_ENDIAN    4321
+#define        __BIG_ENDIAN    4321
+#define        __BYTE_ORDER    __BIG_ENDIAN
+#define        __BYTE_ORDER    __BIG_ENDIAN
+
+
 
+#include "fp-bit.h"
 
+
+#define        _FP_W_TYPE_SIZE 32
+#define        _FP_W_TYPE_SIZE 32
+#define        _FP_W_TYPE      unsigned int
+#define        _FP_W_TYPE      unsigned int
+#define        _FP_WS_TYPE     signed int
+#define        _FP_WS_TYPE     signed int
+#define        _FP_I_TYPE      long
+#define        _FP_I_TYPE      long
+
+
+#define        _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define        _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define        _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define        _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+
+
+#define        _FP_DIV_MEAT_S(R,X,Y)   _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define        _FP_DIV_MEAT_S(R,X,Y)   _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define        _FP_DIV_MEAT_D(R,X,Y)   _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define        _FP_DIV_MEAT_D(R,X,Y)   _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+
+
+#define        _FP_NANFRAC_S   ((_FP_QNANBIT_S<<1)-1)
+#define        _FP_NANFRAC_S   ((_FP_QNANBIT_S<<1)-1)
+#define        _FP_NANFRAC_D   ((_FP_QNANBIT_D<<1)-1), -1
+#define        _FP_NANFRAC_D   ((_FP_QNANBIT_D<<1)-1), -1
+
+
+#define        _FP_QNANNEGATEDP        0
+#define        _FP_QNANNEGATEDP        0
+#define        _FP_NANSIGN_S           0
+#define        _FP_NANSIGN_S           0
+#define        _FP_NANSIGN_D           0
+#define        _FP_NANSIGN_D           0
+#define        _FP_KEEPNANFRACP        1
+#define        _FP_KEEPNANFRACP        1
+
+
+/* Someone please check this.  --- copied from one of many other places  */
+/* Someone please check this.  --- copied from one of many other places  */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)                      \
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP)                      \
+  do {                                                          \
+  do {                                                          \
+    if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)          \
+    if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)          \
+        && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs))     \
+        && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs))     \
+      {                                                         \
+      {                                                         \
+        R##_s = Y##_s;                                          \
+        R##_s = Y##_s;                                          \
+        _FP_FRAC_COPY_##wc(R,Y);                                \
+        _FP_FRAC_COPY_##wc(R,Y);                                \
+      }                                                         \
+      }                                                         \
+    else                                                        \
+    else                                                        \
+      {                                                         \
+      {                                                         \
+        R##_s = X##_s;                                          \
+        R##_s = X##_s;                                          \
+        _FP_FRAC_COPY_##wc(R,X);                                \
+        _FP_FRAC_COPY_##wc(R,X);                                \
+      }                                                         \
+      }                                                         \
+    R##_c = FP_CLS_NAN;                                         \
+    R##_c = FP_CLS_NAN;                                         \
+  } while (0)
+  } while (0)
+
+
+/* Not checked.  */
+/* Not checked.  */
+#define _FP_TININESS_AFTER_ROUNDING 0
+#define _FP_TININESS_AFTER_ROUNDING 0
+
+
+#ifndef        __BYTE_ORDER
+#ifndef        __BYTE_ORDER
+#define        __BYTE_ORDER    __BIG_ENDIAN
+#define        __BYTE_ORDER    __BIG_ENDIAN
+#endif
+#endif
+
+
+#define        strong_alias(name, aliasname) _stong_alias(name, aliasname)
+#define        strong_alias(name, aliasname) _stong_alias(name, aliasname)
+#define        _strong_alias(name, aliasname)  \
+#define        _strong_alias(name, aliasname)  \
+       extern __typeof(name) aliasname __attribute__ ((alias (#name)));
+       extern __typeof(name) aliasname __attribute__ ((alias (#name)));
+#endif
+#endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/config.host gcc-6.2.0-zip/libgcc/config.host
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/config.host gcc-6.2.0-zip/libgcc/config.host
--- gcc-6.2.0/libgcc/config.host        2016-05-17 02:15:52.000000000 -0400
--- gcc-6.2.0/libgcc/config.host        2016-05-17 02:15:52.000000000 -0400
+++ gcc-6.2.0-zip/libgcc/config.host    2017-01-27 12:28:31.470410459 -0500
+++ gcc-6.2.0-zip/libgcc/config.host    2018-03-23 07:31:08.573591955 -0400
@@ -197,6 +197,9 @@
@@ -197,6 +197,9 @@
 tic6x-*-*)
 tic6x-*-*)
        cpu_type=c6x
        cpu_type=c6x
        ;;
        ;;
+zip*)
+zip*)
+       cpu_type=zip
+       cpu_type=zip
+       ;;
+       ;;
 esac
 esac
 
 
 # Common parts for widely ported systems.
 # Common parts for widely ported systems.
@@ -1328,6 +1331,10 @@
@@ -1328,6 +1331,10 @@
        tmake_file="$tmake_file nvptx/t-nvptx"
        tmake_file="$tmake_file nvptx/t-nvptx"
        extra_parts="crt0.o"
        extra_parts="crt0.o"
        ;;
        ;;
+zip*)
+zip*)
+       tmake_file="$tmake_file t-softfp-sfdf t-softfp-excl t-softfp"
+       tmake_file="$tmake_file t-softfp-sfdf t-softfp t-fdpbit"
+       # extra_parts="crt0.o"
+       # extra_parts="crt0.o"
+       ;;
+       ;;
 *)
 *)
        echo "*** Configuration ${host} not supported" 1>&2
        echo "*** Configuration ${host} not supported" 1>&2
        exit 1
        exit 1
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/configure gcc-6.2.0-zip/libgcc/configure
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/configure gcc-6.2.0-zip/libgcc/configure
--- gcc-6.2.0/libgcc/configure  2016-04-11 15:45:35.000000000 -0400
--- gcc-6.2.0/libgcc/configure  2016-04-11 15:45:35.000000000 -0400
+++ gcc-6.2.0-zip/libgcc/configure      2017-01-27 15:53:43.141531475 -0500
+++ gcc-6.2.0-zip/libgcc/configure      2017-01-27 15:53:43.141531475 -0500
@@ -3805,13 +3805,13 @@
@@ -3805,13 +3805,13 @@
   CFLAGS=$ac_save_CFLAGS
   CFLAGS=$ac_save_CFLAGS
 elif test $ac_cv_prog_cc_g = yes; then
 elif test $ac_cv_prog_cc_g = yes; then
   if test "$GCC" = yes; then
   if test "$GCC" = yes; then
-    CFLAGS="-g -O2"
-    CFLAGS="-g -O2"
+    CFLAGS="-O3"
+    CFLAGS="-O3"
   else
   else
     CFLAGS="-g"
     CFLAGS="-g"
   fi
   fi
 else
 else
   if test "$GCC" = yes; then
   if test "$GCC" = yes; then
-    CFLAGS="-O2"
-    CFLAGS="-O2"
+    CFLAGS="-O3"
+    CFLAGS="-O3"
   else
   else
     CFLAGS=
     CFLAGS=
   fi
   fi
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/Makefile.in gcc-6.2.0-zip/libgcc/Makefile.in
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgcc/Makefile.in gcc-6.2.0-zip/libgcc/Makefile.in
--- gcc-6.2.0/libgcc/Makefile.in        2016-02-25 07:23:52.000000000 -0500
--- gcc-6.2.0/libgcc/Makefile.in        2016-02-25 07:23:52.000000000 -0500
+++ gcc-6.2.0-zip/libgcc/Makefile.in    2017-01-27 15:54:32.241240828 -0500
+++ gcc-6.2.0-zip/libgcc/Makefile.in    2017-01-27 15:54:32.241240828 -0500
@@ -229,8 +229,8 @@
@@ -229,8 +229,8 @@
 
 
 # Options to use when compiling libgcc2.a.
 # Options to use when compiling libgcc2.a.
 #
 #
-LIBGCC2_DEBUG_CFLAGS = -g
-LIBGCC2_DEBUG_CFLAGS = -g
-LIBGCC2_CFLAGS = -O2 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
-LIBGCC2_CFLAGS = -O2 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
+LIBGCC2_DEBUG_CFLAGS =
+LIBGCC2_DEBUG_CFLAGS =
+LIBGCC2_CFLAGS = -O3 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
+LIBGCC2_CFLAGS = -O3 $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) $(HOST_LIBGCC2_CFLAGS) \
                 $(LIBGCC2_DEBUG_CFLAGS) -DIN_LIBGCC2 \
                 $(LIBGCC2_DEBUG_CFLAGS) -DIN_LIBGCC2 \
                 -fbuilding-libgcc -fno-stack-protector \
                 -fbuilding-libgcc -fno-stack-protector \
                 $(INHIBIT_LIBC_CFLAGS)
                 $(INHIBIT_LIBC_CFLAGS)
@@ -284,7 +284,7 @@
@@ -284,7 +284,7 @@
                  $(INCLUDES) @set_have_cc_tls@ @set_use_emutls@
                  $(INCLUDES) @set_have_cc_tls@ @set_use_emutls@
 
 
 # Options to use when compiling crtbegin/end.
 # Options to use when compiling crtbegin/end.
-CRTSTUFF_CFLAGS = -O2 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
-CRTSTUFF_CFLAGS = -O2 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
+CRTSTUFF_CFLAGS = -O3 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
+CRTSTUFF_CFLAGS = -O3 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
   $(NO_PIE_CFLAGS) -finhibit-size-directive -fno-inline -fno-exceptions \
   $(NO_PIE_CFLAGS) -finhibit-size-directive -fno-inline -fno-exceptions \
   -fno-zero-initialized-in-bss -fno-toplevel-reorder -fno-tree-vectorize \
   -fno-zero-initialized-in-bss -fno-toplevel-reorder -fno-tree-vectorize \
   -fbuilding-libgcc -fno-stack-protector $(FORCE_EXPLICIT_EH_REGISTRY) \
   -fbuilding-libgcc -fno-stack-protector $(FORCE_EXPLICIT_EH_REGISTRY) \
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgomp/configure.tgt gcc-6.2.0-zip/libgomp/configure.tgt
diff -Naur '--exclude=*.swp' gcc-6.2.0/libgomp/configure.tgt gcc-6.2.0-zip/libgomp/configure.tgt
--- gcc-6.2.0/libgomp/configure.tgt     2015-09-03 12:20:35.000000000 -0400
--- gcc-6.2.0/libgomp/configure.tgt     2015-09-03 12:20:35.000000000 -0400
+++ gcc-6.2.0-zip/libgomp/configure.tgt 2016-12-31 17:06:26.795473062 -0500
+++ gcc-6.2.0-zip/libgomp/configure.tgt 2016-12-31 17:06:26.795473062 -0500
@@ -161,6 +161,9 @@
@@ -161,6 +161,9 @@
            config_path="rtems posix"
            config_path="rtems posix"
        fi
        fi
        ;;
        ;;
+  zip*)
+  zip*)
+       config_path="bsd posix"
+       config_path="bsd posix"
+       ;;
+       ;;
 
 
   *)
   *)
        ;;
        ;;
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.