[Patch] Raise the dead: i860

Jason Eckhardt jle@owlnet.rice.edu
Sat Aug 9 05:59:00 GMT 2003


This patch un-purges the i860 port.  The "new" config/i860/* files
are mostly unchanged from the final pre-purge versions, and obviously
won't build (pre-targetm port, poisoned macros, etc.).  Follow-up
patches will resolve the build problems.

Okay for mainline?
(note, my write permission currently seems hosed, please commit if
 it's acceptable).

ChangeLog
	* MAINTAINERS: Resurrect the i860 maintainer.
gcc/ChangeLog:
	* config.gcc (i860-*-sysv4*): Add target.
	* config/i860/i860-protos.h: New.
	* config/i860/i860.c: New.
	* config/i860/i860.h: New.
	* config/i860/i860.md: New.
	* config/i860/sysv4.h: New.
	* config/i860/varargs.asm: New.
	* config/i860/x-sysv4: New.
	* config/i860/xm-i860.h: New.

Index: gcc/MAINTAINERS
===================================================================
RCS file: /cvsroot/gcc/gcc/MAINTAINERS,v
retrieving revision 1.297
diff -c -3 -p -r1.297 MAINTAINERS
*** gcc/MAINTAINERS	26 Jul 2003 16:53:54 -0000	1.297
--- gcc/MAINTAINERS	9 Aug 2003 05:25:35 -0000
*************** h8 port			Kazu Hirata		kazu@cs.umass.edu
*** 49,54 ****
--- 49,55 ----
  hppa port		Jeff Law		law@redhat.com
  hppa port		Dave Anglin		dave.anglin@nrc.ca
  i386 port		Richard Henderson	rth@redhat.com
+ i860 port		Jason Eckhardt		jle@rice.edu
  i960 port		Jim Wilson		wilson@tuliptree.org
  ia64 port		Jim Wilson		wilson@tuliptree.org
  ip2k port		Denis Chertykov		denisc@overta.ru
Index: gcc/gcc/config.gcc
===================================================================
RCS file: /cvsroot/gcc/gcc/gcc/config.gcc,v
retrieving revision 1.339
diff -c -3 -p -r1.339 config.gcc
*** gcc/gcc/config.gcc	6 Aug 2003 17:04:38 -0000	1.339
--- gcc/gcc/config.gcc	9 Aug 2003 05:25:47 -0000
*************** i[34567]86-*-kaos*)
*** 1232,1237 ****
--- 1232,1244 ----
  	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/i386elf.h kaos.h i386/kaos-i386.h"
  	tmake_file="i386/t-i386elf t-svr4"
  	;;
+ i860-*-sysv4*)
+         tm_file="${tm_file} elfos.h svr4.h i860/sysv4.h"
+         xm_defines="USG SVR3"
+         xmake_file=i860/x-sysv4
+         tmake_file=t-svr4
+         extra_parts="crtbegin.o crtend.o"
+         ;;
  i960-*-coff*)
  	tm_file="${tm_file} dbxcoff.h i960/i960-coff.h libgloss.h"
  	tmake_file=i960/t-960bare


New files:
diff -c3pN gcc/gcc/config/nil/i860-protos.h gcc/gcc/config/i860/i860-protos.h
*** gcc/gcc/config/nil/i860-protos.h	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/i860-protos.h	Sat Aug  9 00:14:20 2003
***************
*** 0 ****
--- 1,61 ----
+ /* Definitions of target machine for GNU compiler, for Intel 860.
+    Copyright (C) 2000 Free Software Foundation, Inc.
+    Hacked substantially by Ron Guilmette (rfg@monkeys.com) to cater to
+    the whims of the System V Release 4 assembler.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING.  If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.  */
+
+ /* Declare things which are defined in i860.c but called from
+    insn-output.c.  */
+
+ #ifdef RTX_CODE
+ extern unsigned long sfmode_constant_to_ulong PARAMS ((rtx));
+ extern const char *output_load PARAMS ((rtx *));
+ extern const char *output_store PARAMS ((rtx *));
+ extern const char *output_move_double PARAMS ((rtx *));
+ extern const char *output_fp_move_double PARAMS ((rtx *));
+ extern const char *output_block_move PARAMS ((rtx *));
+ extern const char *output_delay_insn PARAMS ((rtx));
+ #if 0
+ extern const char *output_delayed_branch PARAMS ((const char *, rtx *, rtx));
+ #endif
+ extern void output_load_address PARAMS ((rtx *));
+ extern int safe_insn_src_p PARAMS ((rtx, enum machine_mode));
+ extern int operand_clobbered_before_used_after PARAMS ((rtx, rtx));
+ extern int single_insn_src_p PARAMS ((rtx, enum machine_mode));
+ extern int reg_or_0_operand PARAMS ((rtx, enum machine_mode));
+ extern int arith_operand PARAMS ((rtx, enum machine_mode));
+ extern int logic_operand PARAMS ((rtx, enum machine_mode));
+ extern int shift_operand PARAMS ((rtx, enum machine_mode));
+ extern int compare_operand PARAMS ((rtx, enum machine_mode));
+ extern int bte_operand PARAMS ((rtx, enum machine_mode));
+ extern int indexed_operand PARAMS ((rtx, enum machine_mode));
+ extern int load_operand PARAMS ((rtx, enum machine_mode));
+ extern int small_int PARAMS ((rtx, enum machine_mode));
+ extern int logic_int PARAMS ((rtx, enum machine_mode));
+ extern int call_insn_operand PARAMS ((rtx, enum machine_mode));
+ extern rtx i860_saveregs PARAMS ((void));
+ #ifdef TREE_CODE
+ extern void i860_va_start PARAMS ((int, tree, rtx));
+ extern rtx i860_va_arg PARAMS ((tree, tree));
+ #endif /* TREE_CODE */
+ #endif /* RTX_CODE */
+
+ #ifdef TREE_CODE
+ extern tree i860_build_va_list PARAMS ((void));
+ #endif /* TREE_CODE */
diff -c3pN gcc/gcc/config/nil/i860.c gcc/gcc/config/i860/i860.c
*** gcc/gcc/config/nil/i860.c	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/i860.c	Sat Aug  9 00:13:59 2003
***************
*** 0 ****
--- 1,2361 ----
+ /* Subroutines for insn-output.c for Intel 860
+    Copyright (C) 1989, 1991, 1997, 1998, 1999, 2000, 2001, 2002
+    Free Software Foundation, Inc.
+    Derived from sparc.c.
+
+    Written by Richard Stallman (rms@ai.mit.edu).
+
+    Hacked substantially by Ron Guilmette (rfg@netcom.com) to cater
+    to the whims of the System V Release 4 assembler.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING.  If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.  */
+
+
+ #include "config.h"
+ #include "system.h"
+ #include "flags.h"
+ #include "rtl.h"
+ #include "tree.h"
+ #include "regs.h"
+ #include "hard-reg-set.h"
+ #include "real.h"
+ #include "insn-config.h"
+ #include "conditions.h"
+ #include "output.h"
+ #include "recog.h"
+ #include "insn-attr.h"
+ #include "function.h"
+ #include "expr.h"
+ #include "tm_p.h"
+ #include "target.h"
+ #include "target-def.h"
+
+ static rtx find_addr_reg PARAMS ((rtx));
+ static int reg_clobbered_p PARAMS ((rtx, rtx));
+ static const char *singlemove_string PARAMS ((rtx *));
+ static const char *load_opcode PARAMS ((enum machine_mode, const char *, rtx));
+ static const char *store_opcode PARAMS ((enum machine_mode, const char *, rtx));
+ static void output_size_for_block_move PARAMS ((rtx, rtx, rtx));
+ static void i860_output_function_prologue PARAMS ((FILE *, HOST_WIDE_INT));
+ static void i860_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
+
+ #ifndef I860_REG_PREFIX
+ #define I860_REG_PREFIX ""
+ #endif
+
+ const char *i860_reg_prefix = I860_REG_PREFIX;
+
+ /* Save information from a "cmpxx" operation until the branch is emitted.  */
+
+ rtx i860_compare_op0, i860_compare_op1;
+
+ /* Initialize the GCC target structure.  */
+ #undef TARGET_ASM_FUNCTION_PROLOGUE
+ #define TARGET_ASM_FUNCTION_PROLOGUE i860_output_function_prologue
+ #undef TARGET_ASM_FUNCTION_EPILOGUE
+ #define TARGET_ASM_FUNCTION_EPILOGUE i860_output_function_epilogue
+
+ struct gcc_target targetm = TARGET_INITIALIZER;
+
+ /* Return non-zero if this pattern, can be evaluated safely, even if it
+    was not asked for.  */
+ int
+ safe_insn_src_p (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   /* Just experimenting.  */
+
+   /* No floating point src is safe if it contains an arithmetic
+      operation, since that operation may trap.  */
+   switch (GET_CODE (op))
+     {
+     case CONST_INT:
+     case LABEL_REF:
+     case SYMBOL_REF:
+     case CONST:
+       return 1;
+
+     case REG:
+       return 1;
+
+     case MEM:
+       return CONSTANT_ADDRESS_P (XEXP (op, 0));
+
+       /* We never need to negate or complement constants.  */
+     case NEG:
+       return (mode != SFmode && mode != DFmode);
+     case NOT:
+     case ZERO_EXTEND:
+       return 1;
+
+     case EQ:
+     case NE:
+     case LT:
+     case GT:
+     case LE:
+     case GE:
+     case LTU:
+     case GTU:
+     case LEU:
+     case GEU:
+     case MINUS:
+     case PLUS:
+       return (mode != SFmode && mode != DFmode);
+     case AND:
+     case IOR:
+     case XOR:
+     case ASHIFT:
+     case ASHIFTRT:
+     case LSHIFTRT:
+       if ((GET_CODE (XEXP (op, 0)) == CONST_INT && ! SMALL_INT (XEXP (op, 0)))
+ 	  || (GET_CODE (XEXP (op, 1)) == CONST_INT && ! SMALL_INT (XEXP (op, 1))))
+ 	return 0;
+       return 1;
+
+     default:
+       return 0;
+     }
+ }
+
+ /* Return 1 if REG is clobbered in IN.
+    Return 2 if REG is used in IN.
+    Return 3 if REG is both used and clobbered in IN.
+    Return 0 if neither.  */
+
+ static int
+ reg_clobbered_p (reg, in)
+      rtx reg;
+      rtx in;
+ {
+   register enum rtx_code code;
+
+   if (in == 0)
+     return 0;
+
+   code = GET_CODE (in);
+
+   if (code == SET || code == CLOBBER)
+     {
+       rtx dest = SET_DEST (in);
+       int set = 0;
+       int used = 0;
+
+       while (GET_CODE (dest) == STRICT_LOW_PART
+ 	     || GET_CODE (dest) == SUBREG
+ 	     || GET_CODE (dest) == SIGN_EXTRACT
+ 	     || GET_CODE (dest) == ZERO_EXTRACT)
+ 	dest = XEXP (dest, 0);
+
+       if (dest == reg)
+ 	set = 1;
+       else if (GET_CODE (dest) == REG
+ 	       && refers_to_regno_p (REGNO (reg),
+ 				     REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
+ 				     SET_DEST (in), 0))
+ 	{
+ 	  set = 1;
+ 	  /* Anything that sets just part of the register
+ 	     is considered using as well as setting it.
+ 	     But note that a straight SUBREG of a single-word value
+ 	     clobbers the entire value.   */
+ 	  if (dest != SET_DEST (in)
+ 	      && ! (GET_CODE (SET_DEST (in)) == SUBREG
+ 		    || UNITS_PER_WORD >= GET_MODE_SIZE (GET_MODE (dest))))
+ 	    used = 1;
+ 	}
+
+       if (code == SET)
+ 	{
+ 	  if (set)
+ 	    used = refers_to_regno_p (REGNO (reg),
+ 				      REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
+ 				      SET_SRC (in), 0);
+ 	  else
+ 	    used = refers_to_regno_p (REGNO (reg),
+ 				      REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
+ 				      in, 0);
+ 	}
+
+       return set + used * 2;
+     }
+
+   if (refers_to_regno_p (REGNO (reg),
+ 			 REGNO (reg) + HARD_REGNO_NREGS (reg, GET_MODE (reg)),
+ 			 in, 0))
+     return 2;
+   return 0;
+ }
+
+ /* Return non-zero if OP can be written to without screwing up
+    GCC's model of what's going on.  It is assumed that this operand
+    appears in the dest position of a SET insn in a conditional
+    branch's delay slot.  AFTER is the label to start looking from.  */
+ int
+ operand_clobbered_before_used_after (op, after)
+      rtx op;
+      rtx after;
+ {
+   /* Just experimenting.  */
+   if (GET_CODE (op) == CC0)
+     return 1;
+   if (GET_CODE (op) == REG)
+     {
+       rtx insn;
+
+       if (op == stack_pointer_rtx)
+ 	return 0;
+
+       /* Scan forward from the label, to see if the value of OP
+ 	 is clobbered before the first use.  */
+
+       for (insn = NEXT_INSN (after); insn; insn = NEXT_INSN (insn))
+ 	{
+ 	  if (GET_CODE (insn) == NOTE)
+ 	    continue;
+ 	  if (GET_CODE (insn) == INSN
+ 	      || GET_CODE (insn) == JUMP_INSN
+ 	      || GET_CODE (insn) == CALL_INSN)
+ 	    {
+ 	      switch (reg_clobbered_p (op, PATTERN (insn)))
+ 		{
+ 		default:
+ 		  return 0;
+ 		case 1:
+ 		  return 1;
+ 		case 0:
+ 		  break;
+ 		}
+ 	    }
+ 	  /* If we reach another label without clobbering OP,
+ 	     then we cannot safely write it here.  */
+ 	  else if (GET_CODE (insn) == CODE_LABEL)
+ 	    return 0;
+ 	  if (GET_CODE (insn) == JUMP_INSN)
+ 	    {
+ 	      if (condjump_p (insn))
+ 		return 0;
+ 	      /* This is a jump insn which has already
+ 		 been mangled.  We can't tell what it does.  */
+ 	      if (GET_CODE (PATTERN (insn)) == PARALLEL)
+ 		return 0;
+ 	      if (! JUMP_LABEL (insn))
+ 		return 0;
+ 	      /* Keep following jumps.  */
+ 	      insn = JUMP_LABEL (insn);
+ 	    }
+ 	}
+       return 1;
+     }
+
+   /* In both of these cases, the first insn executed
+      for this op will be a orh whatever%h,%?r0,%?r31,
+      which is tolerable.  */
+   if (GET_CODE (op) == MEM)
+     return (CONSTANT_ADDRESS_P (XEXP (op, 0)));
+
+   return 0;
+ }
+
+ /* Return non-zero if this pattern, as a source to a "SET",
+    is known to yield an instruction of unit size.  */
+ int
+ single_insn_src_p (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   switch (GET_CODE (op))
+     {
+     case CONST_INT:
+       /* This is not always a single insn src, technically,
+ 	 but output_delayed_branch knows how to deal with it.  */
+       return 1;
+
+     case SYMBOL_REF:
+     case CONST:
+       /* This is not a single insn src, technically,
+ 	 but output_delayed_branch knows how to deal with it.  */
+       return 1;
+
+     case REG:
+       return 1;
+
+     case MEM:
+       return 1;
+
+       /* We never need to negate or complement constants.  */
+     case NEG:
+       return (mode != DFmode);
+     case NOT:
+     case ZERO_EXTEND:
+       return 1;
+
+     case PLUS:
+     case MINUS:
+       /* Detect cases that require multiple instructions.  */
+       if (CONSTANT_P (XEXP (op, 1))
+ 	  && !(GET_CODE (XEXP (op, 1)) == CONST_INT
+ 	       && SMALL_INT (XEXP (op, 1))))
+ 	return 0;
+     case EQ:
+     case NE:
+     case LT:
+     case GT:
+     case LE:
+     case GE:
+     case LTU:
+     case GTU:
+     case LEU:
+     case GEU:
+       /* Not doing floating point, since they probably
+ 	 take longer than the branch slot they might fill.  */
+       return (mode != SFmode && mode != DFmode);
+
+     case AND:
+       if (GET_CODE (XEXP (op, 1)) == NOT)
+ 	{
+ 	  rtx arg = XEXP (XEXP (op, 1), 0);
+ 	  if (CONSTANT_P (arg)
+ 	      && !(GET_CODE (arg) == CONST_INT
+ 		   && (SMALL_INT (arg)
+ 		       || (INTVAL (arg) & 0xffff) == 0)))
+ 	    return 0;
+ 	}
+     case IOR:
+     case XOR:
+       /* Both small and round numbers take one instruction;
+ 	 others take two.  */
+       if (CONSTANT_P (XEXP (op, 1))
+ 	  && !(GET_CODE (XEXP (op, 1)) == CONST_INT
+ 	       && (SMALL_INT (XEXP (op, 1))
+ 		   || (INTVAL (XEXP (op, 1)) & 0xffff) == 0)))
+ 	return 0;
+
+     case ASHIFT:
+     case ASHIFTRT:
+     case LSHIFTRT:
+       return 1;
+
+     case SUBREG:
+       if (SUBREG_BYTE (op) != 0)
+ 	return 0;
+       return single_insn_src_p (SUBREG_REG (op), mode);
+
+       /* Not doing floating point, since they probably
+ 	 take longer than the branch slot they might fill.  */
+     case FLOAT_EXTEND:
+     case FLOAT_TRUNCATE:
+     case FLOAT:
+     case FIX:
+     case UNSIGNED_FLOAT:
+     case UNSIGNED_FIX:
+       return 0;
+
+     default:
+       return 0;
+     }
+ }
+
+ /* Return non-zero only if OP is a register of mode MODE,
+    or const0_rtx.  */
+ int
+ reg_or_0_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (op == const0_rtx || register_operand (op, mode)
+ 	  || op == CONST0_RTX (mode));
+ }
+
+ /* Return truth value of whether OP can be used as an operands in a three
+    address add/subtract insn (such as add %o1,7,%l2) of mode MODE.  */
+
+ int
+ arith_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (register_operand (op, mode)
+ 	  || (GET_CODE (op) == CONST_INT && SMALL_INT (op)));
+ }
+
+ /* Return 1 if OP is a valid first operand for a logical insn of mode MODE.  */
+
+ int
+ logic_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (register_operand (op, mode)
+ 	  || (GET_CODE (op) == CONST_INT && LOGIC_INT (op)));
+ }
+
+ /* Return 1 if OP is a valid first operand for a shift insn of mode MODE.  */
+
+ int
+ shift_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (register_operand (op, mode)
+           || (GET_CODE (op) == CONST_INT));
+ }
+
+ /* Return 1 if OP is a valid first operand for either a logical insn
+    or an add insn of mode MODE.  */
+
+ int
+ compare_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (register_operand (op, mode)
+ 	  || (GET_CODE (op) == CONST_INT && SMALL_INT (op) && LOGIC_INT (op)));
+ }
+
+ /* Return truth value of whether OP can be used as the 5-bit immediate
+    operand of a bte or btne insn.  */
+
+ int
+ bte_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (register_operand (op, mode)
+ 	  || (GET_CODE (op) == CONST_INT
+ 	      && (unsigned) INTVAL (op) < 0x20));
+ }
+
+ /* Return 1 if OP is an indexed memory reference of mode MODE.  */
+
+ int
+ indexed_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (GET_CODE (op) == MEM && GET_MODE (op) == mode
+ 	  && GET_CODE (XEXP (op, 0)) == PLUS
+ 	  && GET_MODE (XEXP (op, 0)) == SImode
+ 	  && register_operand (XEXP (XEXP (op, 0), 0), SImode)
+ 	  && register_operand (XEXP (XEXP (op, 0), 1), SImode));
+ }
+
+ /* Return 1 if OP is a suitable source operand for a load insn
+    with mode MODE.  */
+
+ int
+ load_operand (op, mode)
+      rtx op;
+      enum machine_mode mode;
+ {
+   return (memory_operand (op, mode) || indexed_operand (op, mode));
+ }
+
+ /* Return truth value of whether OP is an integer which fits the
+    range constraining immediate operands in add/subtract insns.  */
+
+ int
+ small_int (op, mode)
+      rtx op;
+      enum machine_mode mode ATTRIBUTE_UNUSED;
+ {
+   return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
+ }
+
+ /* Return truth value of whether OP is an integer which fits the
+    range constraining immediate operands in logic insns.  */
+
+ int
+ logic_int (op, mode)
+      rtx op;
+      enum machine_mode mode ATTRIBUTE_UNUSED;
+ {
+   return (GET_CODE (op) == CONST_INT && LOGIC_INT (op));
+ }
+
+ /* Test for a valid operand for a call instruction.
+    Don't allow the arg pointer register or virtual regs
+    since they may change into reg + const, which the patterns
+    can't handle yet.  */
+
+ int
+ call_insn_operand (op, mode)
+      rtx op;
+      enum machine_mode mode ATTRIBUTE_UNUSED;
+ {
+   if (GET_CODE (op) == MEM
+       && (CONSTANT_ADDRESS_P (XEXP (op, 0))
+ 	  || (GET_CODE (XEXP (op, 0)) == REG
+ 	      && XEXP (op, 0) != arg_pointer_rtx
+ 	      && !(REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
+ 		   && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
+     return 1;
+   return 0;
+ }
+
+ /* Return the best assembler insn template
+    for moving operands[1] into operands[0] as a fullword.  */
+
+ static const char *
+ singlemove_string (operands)
+      rtx *operands;
+ {
+   if (GET_CODE (operands[0]) == MEM)
+     {
+       if (GET_CODE (operands[1]) != MEM)
+ 	if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ 	  {
+ 	    if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		   && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		   && cc_prev_status.mdep == XEXP (operands[0], 0)))
+ 	      {
+ 		CC_STATUS_INIT;
+ 	        output_asm_insn ("orh %h0,%?r0,%?r31", operands);
+ 	      }
+ 	    cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	    cc_status.mdep = XEXP (operands[0], 0);
+ 	    return "st.l %r1,%L0(%?r31)";
+ 	  }
+ 	else
+ 	  return "st.l %r1,%0";
+       else
+ 	abort ();
+ #if 0
+ 	{
+ 	  rtx xoperands[2];
+
+ 	  cc_status.flags &= ~CC_F0_IS_0;
+ 	  xoperands[0] = gen_rtx_REG (SFmode, 32);
+ 	  xoperands[1] = operands[1];
+ 	  output_asm_insn (singlemove_string (xoperands), xoperands);
+ 	  xoperands[1] = xoperands[0];
+ 	  xoperands[0] = operands[0];
+ 	  output_asm_insn (singlemove_string (xoperands), xoperands);
+ 	  return "";
+ 	}
+ #endif
+     }
+   if (GET_CODE (operands[1]) == MEM)
+     {
+       if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ 	{
+ 	  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		 && cc_prev_status.mdep == XEXP (operands[1], 0)))
+ 	    {
+ 	      CC_STATUS_INIT;
+ 	      output_asm_insn ("orh %h1,%?r0,%?r31", operands);
+ 	    }
+ 	  cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	  cc_status.mdep = XEXP (operands[1], 0);
+ 	  return "ld.l %L1(%?r31),%0";
+ 	}
+       return "ld.l %m1,%0";
+     }
+  if (GET_CODE (operands[1]) == CONST_INT)
+    {
+      if (operands[1] == const0_rtx)
+       return "mov %?r0,%0";
+      if((INTVAL (operands[1]) & 0xffff0000) == 0)
+       return "or %L1,%?r0,%0";
+      if((INTVAL (operands[1]) & 0xffff8000) == 0xffff8000)
+       return "adds %1,%?r0,%0";
+      if((INTVAL (operands[1]) & 0x0000ffff) == 0)
+       return "orh %H1,%?r0,%0";
+
+      return "orh %H1,%?r0,%0\n\tor %L1,%0,%0";
+    }
+   return "mov %1,%0";
+ }
+
+ /* Output assembler code to perform a doubleword move insn
+    with operands OPERANDS.  */
+
+ const char *
+ output_move_double (operands)
+      rtx *operands;
+ {
+   enum { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP } optype0, optype1;
+   rtx latehalf[2];
+   rtx addreg0 = 0, addreg1 = 0;
+   int highest_first = 0;
+   int no_addreg1_decrement = 0;
+
+   /* First classify both operands.  */
+
+   if (REG_P (operands[0]))
+     optype0 = REGOP;
+   else if (offsettable_memref_p (operands[0]))
+     optype0 = OFFSOP;
+   else if (GET_CODE (operands[0]) == MEM)
+     optype0 = MEMOP;
+   else
+     optype0 = RNDOP;
+
+   if (REG_P (operands[1]))
+     optype1 = REGOP;
+   else if (CONSTANT_P (operands[1]))
+     optype1 = CNSTOP;
+   else if (offsettable_memref_p (operands[1]))
+     optype1 = OFFSOP;
+   else if (GET_CODE (operands[1]) == MEM)
+     optype1 = MEMOP;
+   else
+     optype1 = RNDOP;
+
+   /* Check for the cases that the operand constraints are not
+      supposed to allow to happen.  Abort if we get one,
+      because generating code for these cases is painful.  */
+
+   if (optype0 == RNDOP || optype1 == RNDOP)
+     abort ();
+
+   /* If an operand is an unoffsettable memory ref, find a register
+      we can increment temporarily to make it refer to the second word.  */
+
+   if (optype0 == MEMOP)
+     addreg0 = find_addr_reg (XEXP (operands[0], 0));
+
+   if (optype1 == MEMOP)
+     addreg1 = find_addr_reg (XEXP (operands[1], 0));
+
+ /* ??? Perhaps in some cases move double words
+    if there is a spare pair of floating regs.  */
+
+   /* Ok, we can do one word at a time.
+      Normally we do the low-numbered word first,
+      but if either operand is autodecrementing then we
+      do the high-numbered word first.
+
+      In either case, set up in LATEHALF the operands to use
+      for the high-numbered word and in some cases alter the
+      operands in OPERANDS to be suitable for the low-numbered word.  */
+
+   if (optype0 == REGOP)
+     latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+   else if (optype0 == OFFSOP)
+     latehalf[0] = adjust_address (operands[0], SImode, 4);
+   else
+     latehalf[0] = operands[0];
+
+   if (optype1 == REGOP)
+     latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+   else if (optype1 == OFFSOP)
+     latehalf[1] = adjust_address (operands[1], SImode, 4);
+   else if (optype1 == CNSTOP)
+     {
+       if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ 	split_double (operands[1], &operands[1], &latehalf[1]);
+       else if (CONSTANT_P (operands[1]))
+ 	latehalf[1] = const0_rtx;
+     }
+   else
+     latehalf[1] = operands[1];
+
+   /* If the first move would clobber the source of the second one,
+      do them in the other order.
+
+      RMS says "This happens only for registers;
+      such overlap can't happen in memory unless the user explicitly
+      sets it up, and that is an undefined circumstance."
+
+      but it happens on the sparc when loading parameter registers,
+      so I am going to define that circumstance, and make it work
+      as expected.  */
+
+   if (optype0 == REGOP && optype1 == REGOP
+       && REGNO (operands[0]) == REGNO (latehalf[1]))
+     {
+       CC_STATUS_PARTIAL_INIT;
+       /* Make any unoffsettable addresses point at high-numbered word.  */
+       if (addreg0)
+ 	output_asm_insn ("adds 0x4,%0,%0", &addreg0);
+       if (addreg1)
+ 	output_asm_insn ("adds 0x4,%0,%0", &addreg1);
+
+       /* Do that word.  */
+       output_asm_insn (singlemove_string (latehalf), latehalf);
+
+       /* Undo the adds we just did.  */
+       if (addreg0)
+ 	output_asm_insn ("adds -0x4,%0,%0", &addreg0);
+       if (addreg1)
+ 	output_asm_insn ("adds -0x4,%0,%0", &addreg1);
+
+       /* Do low-numbered word.  */
+       return singlemove_string (operands);
+     }
+   else if (optype0 == REGOP && optype1 != REGOP
+ 	   && reg_overlap_mentioned_p (operands[0], operands[1]))
+     {
+       /* If both halves of dest are used in the src memory address,
+ 	 add the two regs and put them in the low reg (operands[0]).
+ 	 Then it works to load latehalf first.  */
+       if (reg_mentioned_p (operands[0], XEXP (operands[1], 0))
+ 	  && reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
+ 	{
+ 	  rtx xops[2];
+ 	  xops[0] = latehalf[0];
+ 	  xops[1] = operands[0];
+ 	  output_asm_insn ("adds %1,%0,%1", xops);
+ 	  operands[1] = gen_rtx_MEM (DImode, operands[0]);
+ 	  latehalf[1] = adjust_address (operands[1], SImode, 4);
+ 	  addreg1 = 0;
+ 	  highest_first = 1;
+ 	}
+       /* Only one register in the dest is used in the src memory address,
+ 	 and this is the first register of the dest, so we want to do
+ 	 the late half first here also.  */
+       else if (! reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
+ 	highest_first = 1;
+       /* Only one register in the dest is used in the src memory address,
+ 	 and this is the second register of the dest, so we want to do
+ 	 the late half last.  If addreg1 is set, and addreg1 is the same
+ 	 register as latehalf, then we must suppress the trailing decrement,
+ 	 because it would clobber the value just loaded.  */
+       else if (addreg1 && reg_mentioned_p (addreg1, latehalf[0]))
+ 	no_addreg1_decrement = 1;
+     }
+
+   /* Normal case: do the two words, low-numbered first.
+      Overlap case (highest_first set): do high-numbered word first.  */
+
+   if (! highest_first)
+     output_asm_insn (singlemove_string (operands), operands);
+
+   CC_STATUS_PARTIAL_INIT;
+   /* Make any unoffsettable addresses point at high-numbered word.  */
+   if (addreg0)
+     output_asm_insn ("adds 0x4,%0,%0", &addreg0);
+   if (addreg1)
+     output_asm_insn ("adds 0x4,%0,%0", &addreg1);
+
+   /* Do that word.  */
+   output_asm_insn (singlemove_string (latehalf), latehalf);
+
+   /* Undo the adds we just did.  */
+   if (addreg0)
+     output_asm_insn ("adds -0x4,%0,%0", &addreg0);
+   if (addreg1 && !no_addreg1_decrement)
+     output_asm_insn ("adds -0x4,%0,%0", &addreg1);
+
+   if (highest_first)
+     output_asm_insn (singlemove_string (operands), operands);
+
+   return "";
+ }
+
+ const char *
+ output_fp_move_double (operands)
+      rtx *operands;
+ {
+   /* If the source operand is any sort of zero, use f0 instead.  */
+
+   if (operands[1] == CONST0_RTX (GET_MODE (operands[1])))
+     operands[1] = gen_rtx_REG (DFmode, F0_REGNUM);
+
+   if (FP_REG_P (operands[0]))
+     {
+       if (FP_REG_P (operands[1]))
+ 	return "fmov.dd %1,%0";
+       if (GET_CODE (operands[1]) == REG)
+ 	{
+ 	  output_asm_insn ("ixfr %1,%0", operands);
+ 	  operands[0] = gen_rtx_REG (VOIDmode, REGNO (operands[0]) + 1);
+ 	  operands[1] = gen_rtx_REG (VOIDmode, REGNO (operands[1]) + 1);
+ 	  return "ixfr %1,%0";
+ 	}
+       if (operands[1] == CONST0_RTX (DFmode))
+ 	return "fmov.dd f0,%0";
+       if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ 	{
+ 	  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		 && cc_prev_status.mdep == XEXP (operands[1], 0)))
+ 	    {
+ 	      CC_STATUS_INIT;
+ 	      output_asm_insn ("orh %h1,%?r0,%?r31", operands);
+ 	    }
+ 	  cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	  cc_status.mdep = XEXP (operands[1], 0);
+ 	  return "fld.d %L1(%?r31),%0";
+ 	}
+       return "fld.d %1,%0";
+     }
+   else if (FP_REG_P (operands[1]))
+     {
+       if (GET_CODE (operands[0]) == REG)
+ 	{
+ 	  output_asm_insn ("fxfr %1,%0", operands);
+ 	  operands[0] = gen_rtx_REG (VOIDmode, REGNO (operands[0]) + 1);
+ 	  operands[1] = gen_rtx_REG (VOIDmode, REGNO (operands[1]) + 1);
+ 	  return "fxfr %1,%0";
+ 	}
+       if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ 	{
+ 	  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		 && cc_prev_status.mdep == XEXP (operands[0], 0)))
+ 	    {
+ 	      CC_STATUS_INIT;
+ 	      output_asm_insn ("orh %h0,%?r0,%?r31", operands);
+ 	    }
+ 	  cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	  cc_status.mdep = XEXP (operands[0], 0);
+ 	  return "fst.d %1,%L0(%?r31)";
+ 	}
+       return "fst.d %1,%0";
+     }
+   else
+     abort ();
+   /* NOTREACHED */
+   return NULL;
+ }
+
+ /* Return a REG that occurs in ADDR with coefficient 1.
+    ADDR can be effectively incremented by incrementing REG.  */
+
+ static rtx
+ find_addr_reg (addr)
+      rtx addr;
+ {
+   while (GET_CODE (addr) == PLUS)
+     {
+       if (GET_CODE (XEXP (addr, 0)) == REG)
+ 	addr = XEXP (addr, 0);
+       else if (GET_CODE (XEXP (addr, 1)) == REG)
+ 	addr = XEXP (addr, 1);
+       else if (CONSTANT_P (XEXP (addr, 0)))
+ 	addr = XEXP (addr, 1);
+       else if (CONSTANT_P (XEXP (addr, 1)))
+ 	addr = XEXP (addr, 0);
+       else
+ 	abort ();
+     }
+   if (GET_CODE (addr) == REG)
+     return addr;
+   abort ();
+   /* NOTREACHED */
+   return NULL;
+ }
+
+ /* Return a template for a load instruction with mode MODE and
+    arguments from the string ARGS.
+
+    This string is in static storage.   */
+
+ static const char *
+ load_opcode (mode, args, reg)
+      enum machine_mode mode;
+      const char *args;
+      rtx reg;
+ {
+   static char buf[30];
+   const char *opcode;
+
+   switch (mode)
+     {
+     case QImode:
+       opcode = "ld.b";
+       break;
+
+     case HImode:
+       opcode = "ld.s";
+       break;
+
+     case SImode:
+     case SFmode:
+       if (FP_REG_P (reg))
+ 	opcode = "fld.l";
+       else
+ 	opcode = "ld.l";
+       break;
+
+     case DImode:
+       if (!FP_REG_P (reg))
+ 	abort ();
+     case DFmode:
+       opcode = "fld.d";
+       break;
+
+     default:
+       abort ();
+     }
+
+   sprintf (buf, "%s %s", opcode, args);
+   return buf;
+ }
+
+ /* Return a template for a store instruction with mode MODE and
+    arguments from the string ARGS.
+
+    This string is in static storage.   */
+
+ static const char *
+ store_opcode (mode, args, reg)
+      enum machine_mode mode;
+      const char *args;
+      rtx reg;
+ {
+   static char buf[30];
+   const char *opcode;
+
+   switch (mode)
+     {
+     case QImode:
+       opcode = "st.b";
+       break;
+
+     case HImode:
+       opcode = "st.s";
+       break;
+
+     case SImode:
+     case SFmode:
+       if (FP_REG_P (reg))
+ 	opcode = "fst.l";
+       else
+ 	opcode = "st.l";
+       break;
+
+     case DImode:
+       if (!FP_REG_P (reg))
+ 	abort ();
+     case DFmode:
+       opcode = "fst.d";
+       break;
+
+     default:
+       abort ();
+     }
+
+   sprintf (buf, "%s %s", opcode, args);
+   return buf;
+ }
+
+ /* Output a store-in-memory whose operands are OPERANDS[0,1].
+    OPERANDS[0] is a MEM, and OPERANDS[1] is a reg or zero.
+
+    This function returns a template for an insn.
+    This is in static storage.
+
+    It may also output some insns directly.
+    It may alter the values of operands[0] and operands[1].  */
+
+ const char *
+ output_store (operands)
+      rtx *operands;
+ {
+   enum machine_mode mode = GET_MODE (operands[0]);
+   rtx address = XEXP (operands[0], 0);
+
+   cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+   cc_status.mdep = address;
+
+   if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 	 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 	 && address == cc_prev_status.mdep))
+     {
+       CC_STATUS_INIT;
+       output_asm_insn ("orh %h0,%?r0,%?r31", operands);
+       cc_prev_status.mdep = address;
+     }
+
+   /* Store zero in two parts when appropriate.  */
+   if (mode == DFmode && operands[1] == CONST0_RTX (DFmode))
+     return store_opcode (DFmode, "%r1,%L0(%?r31)", operands[1]);
+
+   /* Code below isn't smart enough to move a doubleword in two parts,
+      so use output_move_double to do that in the cases that require it.  */
+   if ((mode == DImode || mode == DFmode)
+       && ! FP_REG_P (operands[1]))
+     return output_move_double (operands);
+
+   return store_opcode (mode, "%r1,%L0(%?r31)", operands[1]);
+ }
+
+ /* Output a load-from-memory whose operands are OPERANDS[0,1].
+    OPERANDS[0] is a reg, and OPERANDS[1] is a mem.
+
+    This function returns a template for an insn.
+    This is in static storage.
+
+    It may also output some insns directly.
+    It may alter the values of operands[0] and operands[1].  */
+
+ const char *
+ output_load (operands)
+      rtx *operands;
+ {
+   enum machine_mode mode = GET_MODE (operands[0]);
+   rtx address = XEXP (operands[1], 0);
+
+   /* We don't bother trying to see if we know %hi(address).
+      This is because we are doing a load, and if we know the
+      %hi value, we probably also know that value in memory.  */
+   cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+   cc_status.mdep = address;
+
+   if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 	 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 	 && address == cc_prev_status.mdep
+ 	 && cc_prev_status.mdep == cc_status.mdep))
+     {
+       CC_STATUS_INIT;
+       output_asm_insn ("orh %h1,%?r0,%?r31", operands);
+       cc_prev_status.mdep = address;
+     }
+
+   /* Code below isn't smart enough to move a doubleword in two parts,
+      so use output_move_double to do that in the cases that require it.  */
+   if ((mode == DImode || mode == DFmode)
+       && ! FP_REG_P (operands[0]))
+     return output_move_double (operands);
+
+   return load_opcode (mode, "%L1(%?r31),%0", operands[0]);
+ }
+
+ #if 0
+ /* Load the address specified by OPERANDS[3] into the register
+    specified by OPERANDS[0].
+
+    OPERANDS[3] may be the result of a sum, hence it could either be:
+
+    (1) CONST
+    (2) REG
+    (2) REG + CONST_INT
+    (3) REG + REG + CONST_INT
+    (4) REG + REG  (special case of 3).
+
+    Note that (3) is not a legitimate address.
+    All cases are handled here.  */
+
+ void
+ output_load_address (operands)
+      rtx *operands;
+ {
+   rtx base, offset;
+
+   if (CONSTANT_P (operands[3]))
+     {
+       output_asm_insn ("mov %3,%0", operands);
+       return;
+     }
+
+   if (REG_P (operands[3]))
+     {
+       if (REGNO (operands[0]) != REGNO (operands[3]))
+ 	output_asm_insn ("shl %?r0,%3,%0", operands);
+       return;
+     }
+
+   if (GET_CODE (operands[3]) != PLUS)
+     abort ();
+
+   base = XEXP (operands[3], 0);
+   offset = XEXP (operands[3], 1);
+
+   if (GET_CODE (base) == CONST_INT)
+     {
+       rtx tmp = base;
+       base = offset;
+       offset = tmp;
+     }
+
+   if (GET_CODE (offset) != CONST_INT)
+     {
+       /* Operand is (PLUS (REG) (REG)).  */
+       base = operands[3];
+       offset = const0_rtx;
+     }
+
+   if (REG_P (base))
+     {
+       operands[6] = base;
+       operands[7] = offset;
+       CC_STATUS_PARTIAL_INIT;
+       if (SMALL_INT (offset))
+ 	output_asm_insn ("adds %7,%6,%0", operands);
+       else
+ 	output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands);
+     }
+   else if (GET_CODE (base) == PLUS)
+     {
+       operands[6] = XEXP (base, 0);
+       operands[7] = XEXP (base, 1);
+       operands[8] = offset;
+
+       CC_STATUS_PARTIAL_INIT;
+       if (SMALL_INT (offset))
+ 	output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands);
+       else
+ 	output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands);
+     }
+   else
+     abort ();
+ }
+ #endif
+
+ /* Output code to place a size count SIZE in register REG.
+    Because block moves are pipelined, we don't include the
+    first element in the transfer of SIZE to REG.
+    For this, we subtract ALIGN.  (Actually, I think it is not
+    right to subtract on this machine, so right now we don't.)  */
+
+ static void
+ output_size_for_block_move (size, reg, align)
+      rtx size, reg, align;
+ {
+   rtx xoperands[3];
+
+   xoperands[0] = reg;
+   xoperands[1] = size;
+   xoperands[2] = align;
+
+ #if 1
+   cc_status.flags &= ~ CC_KNOW_HI_R31;
+   output_asm_insn (singlemove_string (xoperands), xoperands);
+ #else
+   if (GET_CODE (size) == REG)
+     output_asm_insn ("sub %2,%1,%0", xoperands);
+   else
+     {
+       xoperands[1] = GEN_INT (INTVAL (size) - INTVAL (align));
+       cc_status.flags &= ~ CC_KNOW_HI_R31;
+       output_asm_insn ("mov %1,%0", xoperands);
+     }
+ #endif
+ }
+
+ /* Emit code to perform a block move.
+
+    OPERANDS[0] is the destination.
+    OPERANDS[1] is the source.
+    OPERANDS[2] is the size.
+    OPERANDS[3] is the known safe alignment.
+    OPERANDS[4..6] are pseudos we can safely clobber as temps.  */
+
+ const char *
+ output_block_move (operands)
+      rtx *operands;
+ {
+   /* A vector for our computed operands.  Note that load_output_address
+      makes use of (and can clobber) up to the 8th element of this vector.  */
+   rtx xoperands[10];
+ #if 0
+   rtx zoperands[10];
+ #endif
+   static int movstrsi_label = 0;
+   int i;
+   rtx temp1 = operands[4];
+   rtx alignrtx = operands[3];
+   int align = INTVAL (alignrtx);
+   int chunk_size;
+
+   xoperands[0] = operands[0];
+   xoperands[1] = operands[1];
+   xoperands[2] = temp1;
+
+   /* We can't move more than four bytes at a time
+      because we have only one register to move them through.  */
+   if (align > 4)
+     {
+       align = 4;
+       alignrtx = GEN_INT (4);
+     }
+
+   /* Recognize special cases of block moves.  These occur
+      when GNU C++ is forced to treat something as BLKmode
+      to keep it in memory, when its mode could be represented
+      with something smaller.
+
+      We cannot do this for global variables, since we don't know
+      what pages they don't cross.  Sigh.  */
+   if (GET_CODE (operands[2]) == CONST_INT
+       && ! CONSTANT_ADDRESS_P (operands[0])
+       && ! CONSTANT_ADDRESS_P (operands[1]))
+     {
+       int size = INTVAL (operands[2]);
+       rtx op0 = xoperands[0];
+       rtx op1 = xoperands[1];
+
+       if ((align & 3) == 0 && (size & 3) == 0 && (size >> 2) <= 16)
+ 	{
+ 	  if (memory_address_p (SImode, plus_constant (op0, size))
+ 	      && memory_address_p (SImode, plus_constant (op1, size)))
+ 	    {
+ 	      cc_status.flags &= ~CC_KNOW_HI_R31;
+ 	      for (i = (size>>2)-1; i >= 0; i--)
+ 		{
+ 		  xoperands[0] = plus_constant (op0, i * 4);
+ 		  xoperands[1] = plus_constant (op1, i * 4);
+ 		  output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0",
+ 				   xoperands);
+ 		}
+ 	      return "";
+ 	    }
+ 	}
+       else if ((align & 1) == 0 && (size & 1) == 0 && (size >> 1) <= 16)
+ 	{
+ 	  if (memory_address_p (HImode, plus_constant (op0, size))
+ 	      && memory_address_p (HImode, plus_constant (op1, size)))
+ 	    {
+ 	      cc_status.flags &= ~CC_KNOW_HI_R31;
+ 	      for (i = (size>>1)-1; i >= 0; i--)
+ 		{
+ 		  xoperands[0] = plus_constant (op0, i * 2);
+ 		  xoperands[1] = plus_constant (op1, i * 2);
+ 		  output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0",
+ 				   xoperands);
+ 		}
+ 	      return "";
+ 	    }
+ 	}
+       else if (size <= 16)
+ 	{
+ 	  if (memory_address_p (QImode, plus_constant (op0, size))
+ 	      && memory_address_p (QImode, plus_constant (op1, size)))
+ 	    {
+ 	      cc_status.flags &= ~CC_KNOW_HI_R31;
+ 	      for (i = size-1; i >= 0; i--)
+ 		{
+ 		  xoperands[0] = plus_constant (op0, i);
+ 		  xoperands[1] = plus_constant (op1, i);
+ 		  output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0",
+ 				   xoperands);
+ 		}
+ 	      return "";
+ 	    }
+ 	}
+     }
+
+   /* Since we clobber untold things, nix the condition codes.  */
+   CC_STATUS_INIT;
+
+   /* This is the size of the transfer.
+      Either use the register which already contains the size,
+      or use a free register (used by no operands).  */
+   output_size_for_block_move (operands[2], operands[4], alignrtx);
+
+ #if 0
+   /* Also emit code to decrement the size value by ALIGN.  */
+   zoperands[0] = operands[0];
+   zoperands[3] = plus_constant (operands[0], align);
+   output_load_address (zoperands);
+ #endif
+
+   /* Generate number for unique label.  */
+
+   xoperands[3] = GEN_INT (movstrsi_label++);
+
+   /* Calculate the size of the chunks we will be trying to move first.  */
+
+ #if 0
+   if ((align & 3) == 0)
+     chunk_size = 4;
+   else if ((align & 1) == 0)
+     chunk_size = 2;
+   else
+ #endif
+     chunk_size = 1;
+
+   /* Copy the increment (negative) to a register for bla insn.  */
+
+   xoperands[4] = GEN_INT (- chunk_size);
+   xoperands[5] = operands[5];
+   output_asm_insn ("adds %4,%?r0,%5", xoperands);
+
+   /* Predecrement the loop counter.  This happens again also in the `bla'
+      instruction which precedes the loop, but we need to have it done
+      two times before we enter the loop because of the bizarre semantics
+      of the bla instruction.  */
+
+   output_asm_insn ("adds %5,%2,%2", xoperands);
+
+   /* Check for the case where the original count was less than or equal to
+      zero.  Avoid going through the loop at all if the original count was
+      indeed less than or equal to zero.  Note that we treat the count as
+      if it were a signed 32-bit quantity here, rather than an unsigned one,
+      even though we really shouldn't.  We have to do this because of the
+      semantics of the `ble' instruction, which assume that the count is
+      a signed 32-bit value.  Anyway, in practice it won't matter because
+      nobody is going to try to do a memcpy() of more than half of the
+      entire address space (i.e. 2 gigabytes) anyway.  */
+
+   output_asm_insn ("bc .Le%3", xoperands);
+
+   /* Make available a register which is a temporary.  */
+
+   xoperands[6] = operands[6];
+
+   /* Now the actual loop.
+      In xoperands, elements 1 and 0 are the input and output vectors.
+      Element 2 is the loop index.  Element 5 is the increment.  */
+
+   output_asm_insn ("subs %1,%5,%1", xoperands);
+   output_asm_insn ("bla %5,%2,.Lm%3", xoperands);
+   output_asm_insn ("adds %0,%2,%6", xoperands);
+   output_asm_insn ("\n.Lm%3:", xoperands);	    /* Label for bla above.  */
+   output_asm_insn ("\n.Ls%3:",  xoperands);	    /* Loop start label. */
+   output_asm_insn ("adds %5,%6,%6", xoperands);
+
+   /* NOTE:  The code here which is supposed to handle the cases where the
+      sources and destinations are known to start on a 4 or 2 byte boundary
+      are currently broken.  They fail to do anything about the overflow
+      bytes which might still need to be copied even after we have copied
+      some number of words or halfwords.  Thus, for now we use the lowest
+      common denominator, i.e. the code which just copies some number of
+      totally unaligned individual bytes.  (See the calculation of
+      chunk_size above.  */
+
+   if (chunk_size == 4)
+     {
+       output_asm_insn ("ld.l %2(%1),%?r31", xoperands);
+       output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
+       output_asm_insn ("st.l %?r31,8(%6)", xoperands);
+     }
+   else if (chunk_size == 2)
+     {
+       output_asm_insn ("ld.s %2(%1),%?r31", xoperands);
+       output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
+       output_asm_insn ("st.s %?r31,4(%6)", xoperands);
+     }
+   else /* chunk_size == 1 */
+     {
+       output_asm_insn ("ld.b %2(%1),%?r31", xoperands);
+       output_asm_insn ("bla %5,%2,.Ls%3", xoperands);
+       output_asm_insn ("st.b %?r31,2(%6)", xoperands);
+     }
+   output_asm_insn ("\n.Le%3:", xoperands);	    /* Here if count <= 0.  */
+
+   return "";
+ }
+
+ #if 0
+ /* Output a delayed branch insn with the delay insn in its
+    branch slot.  The delayed branch insn template is in TEMPLATE,
+    with operands OPERANDS.  The insn in its delay slot is INSN.
+
+    As a special case, since we know that all memory transfers are via
+    ld/st insns, if we see a (MEM (SYMBOL_REF ...)) we divide the memory
+    reference around the branch as
+
+ 	orh ha%x,%?r0,%?r31
+ 	b ...
+ 	ld/st l%x(%?r31),...
+
+    As another special case, we handle loading (SYMBOL_REF ...) and
+    other large constants around branches as well:
+
+ 	orh h%x,%?r0,%0
+ 	b ...
+ 	or l%x,%0,%1
+
+    */
+ /* ??? Disabled because this re-recognition is incomplete and causes
+    constrain_operands to segfault.  Anyone who cares should fix up
+    the code to use the DBR pass.  */
+
+ const char *
+ output_delayed_branch (template, operands, insn)
+      const char *template;
+      rtx *operands;
+      rtx insn;
+ {
+   rtx src = XVECEXP (PATTERN (insn), 0, 1);
+   rtx dest = XVECEXP (PATTERN (insn), 0, 0);
+
+   /* See if we are doing some branch together with setting some register
+      to some 32-bit value which does (or may) have some of the high-order
+      16 bits set.  If so, we need to set the register in two stages.  One
+      stage must be done before the branch, and the other one can be done
+      in the delay slot.  */
+
+   if ( (GET_CODE (src) == CONST_INT
+ 	&& ((unsigned) INTVAL (src) & (unsigned) 0xffff0000) != (unsigned) 0)
+       || (GET_CODE (src) == SYMBOL_REF)
+       || (GET_CODE (src) == LABEL_REF)
+       || (GET_CODE (src) == CONST))
+     {
+       rtx xoperands[2];
+       xoperands[0] = dest;
+       xoperands[1] = src;
+
+       CC_STATUS_PARTIAL_INIT;
+       /* Output the `orh' insn.  */
+       output_asm_insn ("orh %H1,%?r0,%0", xoperands);
+
+       /* Output the branch instruction next.  */
+       output_asm_insn (template, operands);
+
+       /* Now output the `or' insn.  */
+       output_asm_insn ("or %L1,%0,%0", xoperands);
+     }
+   else if ((GET_CODE (src) == MEM
+ 	    && CONSTANT_ADDRESS_P (XEXP (src, 0)))
+ 	   || (GET_CODE (dest) == MEM
+ 	       && CONSTANT_ADDRESS_P (XEXP (dest, 0))))
+     {
+       rtx xoperands[2];
+       const char *split_template;
+       xoperands[0] = dest;
+       xoperands[1] = src;
+
+       /* Output the `orh' insn.  */
+       if (GET_CODE (src) == MEM)
+ 	{
+ 	  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		 && cc_prev_status.mdep == XEXP (operands[1], 0)))
+ 	    {
+ 	      CC_STATUS_INIT;
+ 	      output_asm_insn ("orh %h1,%?r0,%?r31", xoperands);
+ 	    }
+ 	  split_template = load_opcode (GET_MODE (dest),
+ 					"%L1(%?r31),%0", dest);
+ 	}
+       else
+ 	{
+ 	  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		 && cc_prev_status.mdep == XEXP (operands[0], 0)))
+ 	    {
+ 	      CC_STATUS_INIT;
+ 	      output_asm_insn ("orh %h0,%?r0,%?r31", xoperands);
+ 	    }
+ 	  split_template = store_opcode (GET_MODE (dest),
+ 					 "%r1,%L0(%?r31)", src);
+ 	}
+
+       /* Output the branch instruction next.  */
+       output_asm_insn (template, operands);
+
+       /* Now output the load or store.
+ 	 No need to do a CC_STATUS_INIT, because we are branching anyway.  */
+       output_asm_insn (split_template, xoperands);
+     }
+   else
+     {
+       int insn_code_number;
+       rtx pat = gen_rtx_SET (VOIDmode, dest, src);
+       rtx delay_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, pat, -1, 0, 0);
+       int i;
+
+       /* Output the branch instruction first.  */
+       output_asm_insn (template, operands);
+
+       /* Now recognize the insn which we put in its delay slot.
+ 	 We must do this after outputting the branch insn,
+ 	 since operands may just be a pointer to `recog_data.operand'.  */
+       INSN_CODE (delay_insn) = insn_code_number
+ 	= recog (pat, delay_insn, NULL);
+       if (insn_code_number == -1)
+ 	abort ();
+
+       for (i = 0; i < insn_data[insn_code_number].n_operands; i++)
+ 	{
+ 	  if (GET_CODE (recog_data.operand[i]) == SUBREG)
+ 	    alter_subreg (&recog_data.operand[i]);
+ 	}
+
+       insn_extract (delay_insn);
+       if (! constrain_operands (1))
+ 	fatal_insn_not_found (delay_insn);
+
+       template = get_insn_template (insn_code_number, delay_insn);
+       output_asm_insn (template, recog_data.operand);
+     }
+   CC_STATUS_INIT;
+   return "";
+ }
+
+ /* Output a newly constructed insn DELAY_INSN.  */
+ const char *
+ output_delay_insn (delay_insn)
+      rtx delay_insn;
+ {
+   const char *template;
+   int insn_code_number;
+   int i;
+
+   /* Now recognize the insn which we put in its delay slot.
+      We must do this after outputting the branch insn,
+      since operands may just be a pointer to `recog_data.operand'.  */
+   insn_code_number = recog_memoized (delay_insn);
+   if (insn_code_number == -1)
+     abort ();
+
+   /* Extract the operands of this delay insn.  */
+   INSN_CODE (delay_insn) = insn_code_number;
+   insn_extract (delay_insn);
+
+   /* It is possible that this insn has not been properly scanned by final
+      yet.  If this insn's operands don't appear in the peephole's
+      actual operands, then they won't be fixed up by final, so we
+      make sure they get fixed up here.  -- This is a kludge.  */
+   for (i = 0; i < insn_data[insn_code_number].n_operands; i++)
+     {
+       if (GET_CODE (recog_data.operand[i]) == SUBREG)
+ 	alter_subreg (&recog_data.operand[i]);
+     }
+
+   if (! constrain_operands (1))
+     abort ();
+
+   cc_prev_status = cc_status;
+
+   /* Update `cc_status' for this instruction.
+      The instruction's output routine may change it further.
+      If the output routine for a jump insn needs to depend
+      on the cc status, it should look at cc_prev_status.  */
+
+   NOTICE_UPDATE_CC (PATTERN (delay_insn), delay_insn);
+
+   /* Now get the template for what this insn would
+      have been, without the branch.  */
+
+   template = get_insn_template (insn_code_number, delay_insn);
+   output_asm_insn (template, recog_data.operand);
+   return "";
+ }
+ #endif
+
+ /* Special routine to convert an SFmode value represented as a
+    CONST_DOUBLE into its equivalent unsigned long bit pattern.
+    We convert the value from a double precision floating-point
+    value to single precision first, and thence to a bit-wise
+    equivalent unsigned long value.  This routine is used when
+    generating an immediate move of an SFmode value directly
+    into a general register because the svr4 assembler doesn't
+    grok floating literals in instruction operand contexts.  */
+
+ unsigned long
+ sfmode_constant_to_ulong (x)
+      rtx x;
+ {
+   REAL_VALUE_TYPE d;
+   unsigned long l;
+
+   if (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != SFmode)
+     abort ();
+
+   REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+   REAL_VALUE_TO_TARGET_SINGLE (d, l);
+   return l;
+ }
+
+ /* This function generates the assembly code for function entry.
+
+    ASM_FILE is a stdio stream to output the code to.
+    SIZE is an int: how many units of temporary storage to allocate.
+
+    Refer to the array `regs_ever_live' to determine which registers
+    to save; `regs_ever_live[I]' is nonzero if register number I
+    is ever used in the function.  This macro is responsible for
+    knowing which registers should not be saved even if used.
+
+    NOTE: `frame_lower_bytes' is the count of bytes which will lie
+    between the new `fp' value and the new `sp' value after the
+    prologue is done.  `frame_upper_bytes' is the count of bytes
+    that will lie between the new `fp' and the *old* `sp' value
+    after the new `fp' is setup (in the prologue).  The upper
+    part of each frame always includes at least 2 words (8 bytes)
+    to hold the saved frame pointer and the saved return address.
+
+    The svr4 ABI for the i860 now requires that the values of the
+    stack pointer and frame pointer registers be kept aligned to
+    16-byte boundaries at all times.  We obey that restriction here.
+
+    The svr4 ABI for the i860 is entirely vague when it comes to specifying
+    exactly where the "preserved" registers should be saved.  The native
+    svr4 C compiler I now have doesn't help to clarify the requirements
+    very much because it is plainly out-of-date and non-ABI-compliant
+    (in at least one important way, i.e. how it generates function
+    epilogues).
+
+    The native svr4 C compiler saves the "preserved" registers (i.e.
+    r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative
+    offsets from the frame pointer).
+
+    Previous versions of GCC also saved the "preserved" registers in the
+    "negative" part of the frame, but they saved them using positive
+    offsets from the (adjusted) stack pointer (after it had been adjusted
+    to allocate space for the new frame).  That's just plain wrong
+    because if the current function calls alloca(), the stack pointer
+    will get moved, and it will be impossible to restore the registers
+    properly again after that.
+
+    Both compilers handled parameter registers (i.e. r16-r27 and f8-f15)
+    by copying their values either into various "preserved" registers or
+    into stack slots in the lower part of the current frame (as seemed
+    appropriate, depending upon subsequent usage of these values).
+
+    Here we want to save the preserved registers at some offset from the
+    frame pointer register so as to avoid any possible problems arising
+    from calls to alloca().  We can either save them at small positive
+    offsets from the frame pointer, or at small negative offsets from
+    the frame pointer.  If we save them at small negative offsets from
+    the frame pointer (i.e. in the lower part of the frame) then we
+    must tell the rest of GCC (via STARTING_FRAME_OFFSET) exactly how
+    many bytes of space we plan to use in the lower part of the frame
+    for this purpose.  Since other parts of the compiler reference the
+    value of STARTING_FRAME_OFFSET long before final() calls this function,
+    we would have to go ahead and assume the worst-case storage requirements
+    for saving all of the "preserved" registers (and use that number, i.e.
+    `80', to define STARTING_FRAME_OFFSET) if we wanted to save them in
+    the lower part of the frame.  That could potentially be very wasteful,
+    and that wastefulness could really hamper people compiling for embedded
+    i860 targets with very tight limits on stack space.  Thus, we choose
+    here to save the preserved registers in the upper part of the
+    frame, so that we can decide at the very last minute how much (or how
+    little) space we must allocate for this purpose.
+
+    To satisfy the needs of the svr4 ABI "tdesc" scheme, preserved
+    registers must always be saved so that the saved values of registers
+    with higher numbers are at higher addresses.  We obey that restriction
+    here.
+
+    There are two somewhat different ways that you can generate prologues
+    here... i.e. pedantically ABI-compliant, and the "other" way.  The
+    "other" way is more consistent with what is currently generated by the
+    "native" svr4 C compiler for the i860.  That's important if you want
+    to use the current (as of 8/91) incarnation of svr4 SDB for the i860.
+    The SVR4 SDB for the i860 insists on having function prologues be
+    non-ABI-compliant!
+
+    To get fully ABI-compliant prologues, define I860_STRICT_ABI_PROLOGUES
+    in the i860svr4.h file.  (By default this is *not* defined).
+
+    The differences between the ABI-compliant and non-ABI-compliant prologues
+    are that (a) the ABI version seems to require the use of *signed*
+    (rather than unsigned) adds and subtracts, and (b) the ordering of
+    the various steps (e.g. saving preserved registers, saving the
+    return address, setting up the new frame pointer value) is different.
+
+    For strict ABI compliance, it seems to be the case that the very last
+    thing that is supposed to happen in the prologue is getting the frame
+    pointer set to its new value (but only after everything else has
+    already been properly setup).  We do that here, but only if the symbol
+    I860_STRICT_ABI_PROLOGUES is defined.
+ */
+
+ #ifndef STACK_ALIGNMENT
+ #define STACK_ALIGNMENT	16
+ #endif
+
+ const char *current_function_original_name;
+
+ static int must_preserve_r1;
+ static unsigned must_preserve_bytes;
+
+ static void
+ i860_output_function_prologue (asm_file, local_bytes)
+      register FILE *asm_file;
+      register HOST_WIDE_INT local_bytes;
+ {
+   register HOST_WIDE_INT frame_lower_bytes;
+   register HOST_WIDE_INT frame_upper_bytes;
+   register HOST_WIDE_INT total_fsize;
+   register unsigned preserved_reg_bytes = 0;
+   register unsigned i;
+   register unsigned preserved_so_far = 0;
+
+   must_preserve_r1 = (optimize < 2 || ! leaf_function_p ());
+   must_preserve_bytes = 4 + (must_preserve_r1 ? 4 : 0);
+
+   /* Count registers that need preserving.  Ignore r0.  It never needs
+      preserving.  */
+
+   for (i = 1; i < FIRST_PSEUDO_REGISTER; i++)
+     {
+       if (regs_ever_live[i] && ! call_used_regs[i])
+         preserved_reg_bytes += 4;
+     }
+
+   /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
+
+   frame_lower_bytes = (local_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
+
+   /* The upper part of each frame will contain the saved fp,
+      the saved r1, and stack slots for all of the other "preserved"
+      registers that we find we will need to save & restore. */
+
+   frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes;
+
+   /* Round-up the frame_upper_bytes so that it's a multiple of 16. */
+
+   frame_upper_bytes
+     = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
+
+   total_fsize = frame_upper_bytes + frame_lower_bytes;
+
+ #ifndef I860_STRICT_ABI_PROLOGUES
+
+   /* There are two kinds of function prologues.
+      You use the "small" version if the total frame size is
+      small enough so that it can fit into an immediate 16-bit
+      value in one instruction.  Otherwise, you use the "large"
+      version of the function prologue.  */
+
+   if (total_fsize > 0x7fff)
+     {
+       /* Adjust the stack pointer.  The ABI sez to do this using `adds',
+ 	 but the native C compiler on svr4 uses `addu'.  */
+
+       fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n",
+ 	frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
+
+       /* Save the old frame pointer.  */
+
+       fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n",
+ 	i860_reg_prefix, i860_reg_prefix);
+
+       /* Setup the new frame pointer.  The ABI sez to do this after
+ 	 preserving registers (using adds), but that's not what the
+ 	 native C compiler on svr4 does.  */
+
+       fprintf (asm_file, "\taddu 0,%ssp,%sfp\n",
+ 	i860_reg_prefix, i860_reg_prefix);
+
+       /* Get the value of frame_lower_bytes into r31.  */
+
+       fprintf (asm_file, "\torh %d,%sr0,%sr31\n",
+ 	frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix);
+       fprintf (asm_file, "\tor %d,%sr31,%sr31\n",
+ 	frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix);
+
+       /* Now re-adjust the stack pointer using the value in r31.
+ 	 The ABI sez to do this with `subs' but SDB may prefer `subu'.  */
+
+       fprintf (asm_file, "\tsubu %ssp,%sr31,%ssp\n",
+ 	i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
+
+       /* Preserve registers.  The ABI sez to do this before setting
+ 	 up the new frame pointer, but that's not what the native
+ 	 C compiler on svr4 does.  */
+
+       for (i = 1; i < 32; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    must_preserve_bytes  + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       for (i = 32; i < 64; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    must_preserve_bytes + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       /* Save the return address.  */
+
+       if (must_preserve_r1)
+         fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n",
+ 	  i860_reg_prefix, i860_reg_prefix);
+     }
+   else
+     {
+       /* Adjust the stack pointer.  The ABI sez to do this using `adds',
+ 	 but the native C compiler on svr4 uses `addu'.  */
+
+       fprintf (asm_file, "\taddu -%d,%ssp,%ssp\n",
+ 	total_fsize, i860_reg_prefix, i860_reg_prefix);
+
+       /* Save the old frame pointer.  */
+
+       fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n",
+ 	i860_reg_prefix, frame_lower_bytes, i860_reg_prefix);
+
+       /* Setup the new frame pointer.  The ABI sez to do this after
+ 	 preserving registers and after saving the return address,
+ 	(and its saz to do this using adds), but that's not what the
+ 	 native C compiler on svr4 does.  */
+
+       fprintf (asm_file, "\taddu %d,%ssp,%sfp\n",
+ 	frame_lower_bytes, i860_reg_prefix, i860_reg_prefix);
+
+       /* Preserve registers.  The ABI sez to do this before setting
+ 	 up the new frame pointer, but that's not what the native
+ 	 compiler on svr4 does.  */
+
+       for (i = 1; i < 32; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tst.l %s%s,%d(%sfp)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    must_preserve_bytes + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       for (i = 32; i < 64; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tfst.l %s%s,%d(%sfp)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    must_preserve_bytes + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       /* Save the return address.  The ABI sez to do this earlier,
+ 	 and also via an offset from %sp, but the native C compiler
+ 	 on svr4 does it later (i.e. now) and uses an offset from
+ 	 %fp.  */
+
+       if (must_preserve_r1)
+         fprintf (asm_file, "\tst.l %sr1,4(%sfp)\n",
+ 	  i860_reg_prefix, i860_reg_prefix);
+     }
+
+ #else /* defined(I860_STRICT_ABI_PROLOGUES) */
+
+   /* There are two kinds of function prologues.
+      You use the "small" version if the total frame size is
+      small enough so that it can fit into an immediate 16-bit
+      value in one instruction.  Otherwise, you use the "large"
+      version of the function prologue.  */
+
+   if (total_fsize > 0x7fff)
+     {
+       /* Adjust the stack pointer (thereby allocating a new frame).  */
+
+       fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n",
+ 	frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
+
+       /* Save the caller's frame pointer.  */
+
+       fprintf (asm_file, "\tst.l %sfp,0(%ssp)\n",
+ 	i860_reg_prefix, i860_reg_prefix);
+
+       /* Save return address.  */
+
+       if (must_preserve_r1)
+         fprintf (asm_file, "\tst.l %sr1,4(%ssp)\n",
+ 	  i860_reg_prefix, i860_reg_prefix);
+
+       /* Get the value of frame_lower_bytes into r31 for later use.  */
+
+       fprintf (asm_file, "\torh %d,%sr0,%sr31\n",
+ 	frame_lower_bytes >> 16, i860_reg_prefix, i860_reg_prefix);
+       fprintf (asm_file, "\tor %d,%sr31,%sr31\n",
+ 	frame_lower_bytes & 0xffff, i860_reg_prefix, i860_reg_prefix);
+
+       /* Now re-adjust the stack pointer using the value in r31.  */
+
+       fprintf (asm_file, "\tsubs %ssp,%sr31,%ssp\n",
+ 	i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
+
+       /* Pre-compute value to be used as the new frame pointer.  */
+
+       fprintf (asm_file, "\tadds %ssp,%sr31,%sr31\n",
+ 	i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
+
+       /* Preserve registers.  */
+
+       for (i = 1; i < 32; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tst.l %s%s,%d(%sr31)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    must_preserve_bytes + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       for (i = 32; i < 64; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tfst.l %s%s,%d(%sr31)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    must_preserve_bytes + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       /* Actually set the new value of the frame pointer.  */
+
+       fprintf (asm_file, "\tmov %sr31,%sfp\n",
+ 	i860_reg_prefix, i860_reg_prefix);
+     }
+   else
+     {
+       /* Adjust the stack pointer.  */
+
+       fprintf (asm_file, "\tadds -%d,%ssp,%ssp\n",
+ 	total_fsize, i860_reg_prefix, i860_reg_prefix);
+
+       /* Save the caller's frame pointer.  */
+
+       fprintf (asm_file, "\tst.l %sfp,%d(%ssp)\n",
+ 	i860_reg_prefix, frame_lower_bytes, i860_reg_prefix);
+
+       /* Save the return address.  */
+
+       if (must_preserve_r1)
+         fprintf (asm_file, "\tst.l %sr1,%d(%ssp)\n",
+ 	  i860_reg_prefix, frame_lower_bytes + 4, i860_reg_prefix);
+
+       /* Preserve registers.  */
+
+       for (i = 1; i < 32; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tst.l %s%s,%d(%ssp)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       for (i = 32; i < 64; i++)
+         if (regs_ever_live[i] && ! call_used_regs[i])
+           fprintf (asm_file, "\tfst.l %s%s,%d(%ssp)\n",
+ 	    i860_reg_prefix, reg_names[i],
+ 	    frame_lower_bytes + must_preserve_bytes + (4 * preserved_so_far++),
+ 	    i860_reg_prefix);
+
+       /* Setup the new frame pointer.  */
+
+       fprintf (asm_file, "\tadds %d,%ssp,%sfp\n",
+ 	frame_lower_bytes, i860_reg_prefix, i860_reg_prefix);
+     }
+ #endif /* defined(I860_STRICT_ABI_PROLOGUES) */
+
+ #ifdef ASM_OUTPUT_PROLOGUE_SUFFIX
+   ASM_OUTPUT_PROLOGUE_SUFFIX (asm_file);
+ #endif /* defined(ASM_OUTPUT_PROLOGUE_SUFFIX) */
+ }
+
+ /* This function generates the assembly code for function exit.
+
+    ASM_FILE is a stdio stream to output the code to.
+    SIZE is an int: how many units of temporary storage to allocate.
+
+    The function epilogue should not depend on the current stack pointer!
+    It should use the frame pointer only.  This is mandatory because
+    of alloca; we also take advantage of it to omit stack adjustments
+    before returning.
+
+    Note that when we go to restore the preserved register values we must
+    not try to address their slots by using offsets from the stack pointer.
+    That's because the stack pointer may have been moved during the function
+    execution due to a call to alloca().  Rather, we must restore all
+    preserved registers via offsets from the frame pointer value.
+
+    Note also that when the current frame is being "popped" (by adjusting
+    the value of the stack pointer) on function exit, we must (for the
+    sake of alloca) set the new value of the stack pointer based upon
+    the current value of the frame pointer.  We can't just add what we
+    believe to be the (static) frame size to the stack pointer because
+    if we did that, and alloca() had been called during this function,
+    we would end up returning *without* having fully deallocated all of
+    the space grabbed by alloca.  If that happened, and a function
+    containing one or more alloca() calls was called over and over again,
+    then the stack would grow without limit!
+
+    Finally note that the epilogues generated here are completely ABI
+    compliant.  They go out of their way to insure that the value in
+    the frame pointer register is never less than the value in the stack
+    pointer register.  It's not clear why this relationship needs to be
+    maintained at all times, but maintaining it only costs one extra
+    instruction, so what the hell.
+ */
+
+ /* This corresponds to a version 4 TDESC structure. Lower numbered
+    versions successively omit the last word of the structure. We
+    don't try to handle version 5 here. */
+
+ typedef struct TDESC_flags {
+ 	int version:4;
+ 	int reg_packing:1;
+ 	int callable_block:1;
+ 	int reserved:4;
+ 	int fregs:6;	/* fp regs 2-7 */
+ 	int iregs:16;	/* regs 0-15 */
+ } TDESC_flags;
+
+ typedef struct TDESC {
+ 	TDESC_flags flags;
+ 	int integer_reg_offset;		/* same as must_preserve_bytes */
+ 	int floating_point_reg_offset;
+ 	unsigned int positive_frame_size;	/* same as frame_upper_bytes */
+ 	unsigned int negative_frame_size;	/* same as frame_lower_bytes */
+ } TDESC;
+
+ static void
+ i860_output_function_epilogue (asm_file, local_bytes)
+      register FILE *asm_file;
+      register HOST_WIDE_INT local_bytes;
+ {
+   register HOST_WIDE_INT frame_upper_bytes;
+   register HOST_WIDE_INT frame_lower_bytes;
+   register HOST_WIDE_INT preserved_reg_bytes = 0;
+   register unsigned i;
+   register unsigned restored_so_far = 0;
+   register unsigned int_restored;
+   register unsigned mask;
+   unsigned intflags=0;
+   register TDESC_flags *flags = (TDESC_flags *) &intflags;
+ #ifdef	OUTPUT_TDESC	/* Output an ABI-compliant TDESC entry */
+   const char *long_op = integer_asm_op (4, TRUE);
+ #endif
+
+   flags->version = 4;
+   flags->reg_packing = 1;
+   flags->iregs = 8;	/* old fp always gets saved */
+
+   /* Round-up the frame_lower_bytes so that it's a multiple of 16. */
+
+   frame_lower_bytes = (local_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
+
+   /* Count the number of registers that were preserved in the prologue.
+      Ignore r0.  It is never preserved.  */
+
+   for (i = 1; i < FIRST_PSEUDO_REGISTER; i++)
+     {
+       if (regs_ever_live[i] && ! call_used_regs[i])
+         preserved_reg_bytes += 4;
+     }
+
+   /* The upper part of each frame will contain only saved fp,
+      the saved r1, and stack slots for all of the other "preserved"
+      registers that we find we will need to save & restore. */
+
+   frame_upper_bytes = must_preserve_bytes + preserved_reg_bytes;
+
+   /* Round-up frame_upper_bytes so that t is a multiple of 16. */
+
+   frame_upper_bytes
+     = (frame_upper_bytes + STACK_ALIGNMENT - 1) & -STACK_ALIGNMENT;
+
+   /* Restore all of the "preserved" registers that need restoring.  */
+
+   mask = 2;
+
+   for (i = 1; i < 32; i++, mask<<=1)
+     if (regs_ever_live[i] && ! call_used_regs[i]) {
+       fprintf (asm_file, "\tld.l %d(%sfp),%s%s\n",
+ 	must_preserve_bytes + (4 * restored_so_far++),
+ 	i860_reg_prefix, i860_reg_prefix, reg_names[i]);
+       if (i > 3 && i < 16)
+ 	flags->iregs |= mask;
+     }
+
+   int_restored = restored_so_far;
+   mask = 1;
+
+   for (i = 32; i < 64; i++) {
+     if (regs_ever_live[i] && ! call_used_regs[i]) {
+       fprintf (asm_file, "\tfld.l %d(%sfp),%s%s\n",
+ 	must_preserve_bytes + (4 * restored_so_far++),
+ 	i860_reg_prefix, i860_reg_prefix, reg_names[i]);
+       if (i > 33 && i < 40)
+ 	flags->fregs |= mask;
+     }
+     if (i > 33 && i < 40)
+       mask<<=1;
+   }
+
+   /* Get the value we plan to use to restore the stack pointer into r31.  */
+
+   fprintf (asm_file, "\tadds %d,%sfp,%sr31\n",
+     frame_upper_bytes, i860_reg_prefix, i860_reg_prefix);
+
+   /* Restore the return address and the old frame pointer.  */
+
+   if (must_preserve_r1) {
+     fprintf (asm_file, "\tld.l 4(%sfp),%sr1\n",
+       i860_reg_prefix, i860_reg_prefix);
+     flags->iregs |= 2;
+   }
+
+   fprintf (asm_file, "\tld.l 0(%sfp),%sfp\n",
+     i860_reg_prefix, i860_reg_prefix);
+
+   /* Return and restore the old stack pointer value.  */
+
+   fprintf (asm_file, "\tbri %sr1\n\tmov %sr31,%ssp\n",
+     i860_reg_prefix, i860_reg_prefix, i860_reg_prefix);
+
+ #ifdef	OUTPUT_TDESC	/* Output an ABI-compliant TDESC entry */
+   if (! frame_lower_bytes) {
+     flags->version--;
+     if (! frame_upper_bytes) {
+       flags->version--;
+       if (restored_so_far == int_restored)	/* No FP saves */
+ 	flags->version--;
+     }
+   }
+   assemble_name(asm_file,current_function_original_name);
+   fputs(".TDESC:\n", asm_file);
+   fprintf(asm_file, "%s 0x%0x\n", long_op, intflags);
+   fprintf(asm_file, "%s %d\n", long_op,
+ 	int_restored ? must_preserve_bytes : 0);
+   if (flags->version > 1) {
+     fprintf(asm_file, "%s %d\n", long_op,
+ 	(restored_so_far == int_restored) ? 0 : must_preserve_bytes +
+ 	  (4 * int_restored));
+     if (flags->version > 2) {
+       fprintf(asm_file, "%s %d\n", long_op, frame_upper_bytes);
+       if (flags->version > 3)
+ 	fprintf(asm_file, "%s %d\n", long_op, frame_lower_bytes);
+     }
+   }
+   tdesc_section();
+   fprintf(asm_file, "%s ", long_op);
+   assemble_name(asm_file, current_function_original_name);
+   fprintf(asm_file, "\n%s ", long_op);
+   assemble_name(asm_file, current_function_original_name);
+   fputs(".TDESC\n", asm_file);
+   text_section();
+ #endif
+ }
+
+
+ /* Expand a library call to __builtin_saveregs.  */
+ rtx
+ i860_saveregs ()
+ {
+   rtx fn = gen_rtx_SYMBOL_REF (Pmode, "__builtin_saveregs");
+   rtx save = gen_reg_rtx (Pmode);
+   rtx valreg = LIBCALL_VALUE (Pmode);
+   rtx ret;
+
+   /* The return value register overlaps the first argument register.
+      Save and restore it around the call.  */
+   emit_move_insn (save, valreg);
+   ret = emit_library_call_value (fn, NULL_RTX, 1, Pmode, 0);
+   if (GET_CODE (ret) != REG || REGNO (ret) < FIRST_PSEUDO_REGISTER)
+     ret = copy_to_reg (ret);
+   emit_move_insn (valreg, save);
+
+   return ret;
+ }
+
+ tree
+ i860_build_va_list ()
+ {
+   tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
+   tree record;
+
+   record = make_node (RECORD_TYPE);
+
+   field_ireg_used = build_decl (FIELD_DECL, get_identifier ("__ireg_used"),
+ 				unsigned_type_node);
+   field_freg_used = build_decl (FIELD_DECL, get_identifier ("__freg_used"),
+ 				unsigned_type_node);
+   field_reg_base = build_decl (FIELD_DECL, get_identifier ("__reg_base"),
+ 			       ptr_type_node);
+   field_mem_ptr = build_decl (FIELD_DECL, get_identifier ("__mem_ptr"),
+ 			      ptr_type_node);
+
+   DECL_FIELD_CONTEXT (field_ireg_used) = record;
+   DECL_FIELD_CONTEXT (field_freg_used) = record;
+   DECL_FIELD_CONTEXT (field_reg_base) = record;
+   DECL_FIELD_CONTEXT (field_mem_ptr) = record;
+
+ #ifdef I860_SVR4_VA_LIST
+   TYPE_FIELDS (record) = field_ireg_used;
+   TREE_CHAIN (field_ireg_used) = field_freg_used;
+   TREE_CHAIN (field_freg_used) = field_reg_base;
+   TREE_CHAIN (field_reg_base) = field_mem_ptr;
+ #else
+   TYPE_FIELDS (record) = field_reg_base;
+   TREE_CHAIN (field_reg_base) = field_mem_ptr;
+   TREE_CHAIN (field_mem_ptr) = field_ireg_used;
+   TREE_CHAIN (field_ireg_used) = field_freg_used;
+ #endif
+
+   layout_type (record);
+   return record;
+ }
+
+ void
+ i860_va_start (stdarg_p, valist, nextarg)
+      int stdarg_p;
+      tree valist;
+      rtx nextarg;
+ {
+   tree saveregs, t;
+
+   saveregs = make_tree (build_pointer_type (va_list_type_node),
+ 			expand_builtin_saveregs ());
+   saveregs = build1 (INDIRECT_REF, va_list_type_node, saveregs);
+
+   if (stdarg_p)
+     {
+       tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
+       tree ireg_used, freg_used, reg_base, mem_ptr;
+
+ #ifdef I860_SVR4_VA_LIST
+       field_ireg_used = TYPE_FIELDS (va_list_type_node);
+       field_freg_used = TREE_CHAIN (field_ireg_used);
+       field_reg_base = TREE_CHAIN (field_freg_used);
+       field_mem_ptr = TREE_CHAIN (field_reg_base);
+ #else
+       field_reg_base = TYPE_FIELDS (va_list_type_node);
+       field_mem_ptr = TREE_CHAIN (field_reg_base);
+       field_ireg_used = TREE_CHAIN (field_mem_ptr);
+       field_freg_used = TREE_CHAIN (field_ireg_used);
+ #endif
+
+       ireg_used = build (COMPONENT_REF, TREE_TYPE (field_ireg_used),
+ 			 valist, field_ireg_used);
+       freg_used = build (COMPONENT_REF, TREE_TYPE (field_freg_used),
+ 			 valist, field_freg_used);
+       reg_base = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
+ 			valist, field_reg_base);
+       mem_ptr = build (COMPONENT_REF, TREE_TYPE (field_mem_ptr),
+ 		       valist, field_mem_ptr);
+
+       t = build_int_2 (current_function_args_info.ints, 0);
+       t = build (MODIFY_EXPR, TREE_TYPE (ireg_used), ireg_used, t);
+       TREE_SIDE_EFFECTS (t) = 1;
+       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+       t = build_int_2 (ROUNDUP (current_function_args_info.floats, 8), 0);
+       t = build (MODIFY_EXPR, TREE_TYPE (freg_used), freg_used, t);
+       TREE_SIDE_EFFECTS (t) = 1;
+       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+       t = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
+ 		 saveregs, field_reg_base);
+       t = build (MODIFY_EXPR, TREE_TYPE (reg_base), reg_base, t);
+       TREE_SIDE_EFFECTS (t) = 1;
+       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+       t = make_tree (ptr_type_node, nextarg);
+       t = build (MODIFY_EXPR, TREE_TYPE (mem_ptr), mem_ptr, t);
+       TREE_SIDE_EFFECTS (t) = 1;
+       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+     }
+   else
+     {
+       t = build (MODIFY_EXPR, va_list_type_node, valist, saveregs);
+       TREE_SIDE_EFFECTS (t) = 1;
+       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+     }
+ }
+
+ #define NUM_PARM_FREGS	8
+ #define NUM_PARM_IREGS	12
+ #ifdef I860_SVR4_VARARGS
+ #define FREG_OFFSET 0
+ #define IREG_OFFSET (NUM_PARM_FREGS * UNITS_PER_WORD)
+ #else
+ #define FREG_OFFSET (NUM_PARM_IREGS * UNITS_PER_WORD)
+ #define IREG_OFFSET 0
+ #endif
+
+ rtx
+ i860_va_arg (valist, type)
+      tree valist, type;
+ {
+   tree field_ireg_used, field_freg_used, field_reg_base, field_mem_ptr;
+   tree type_ptr_node, t;
+   rtx lab_over = NULL_RTX;
+   rtx ret, val;
+   HOST_WIDE_INT align;
+
+ #ifdef I860_SVR4_VA_LIST
+   field_ireg_used = TYPE_FIELDS (va_list_type_node);
+   field_freg_used = TREE_CHAIN (field_ireg_used);
+   field_reg_base = TREE_CHAIN (field_freg_used);
+   field_mem_ptr = TREE_CHAIN (field_reg_base);
+ #else
+   field_reg_base = TYPE_FIELDS (va_list_type_node);
+   field_mem_ptr = TREE_CHAIN (field_reg_base);
+   field_ireg_used = TREE_CHAIN (field_mem_ptr);
+   field_freg_used = TREE_CHAIN (field_ireg_used);
+ #endif
+
+   field_ireg_used = build (COMPONENT_REF, TREE_TYPE (field_ireg_used),
+ 			   valist, field_ireg_used);
+   field_freg_used = build (COMPONENT_REF, TREE_TYPE (field_freg_used),
+ 			   valist, field_freg_used);
+   field_reg_base = build (COMPONENT_REF, TREE_TYPE (field_reg_base),
+ 			  valist, field_reg_base);
+   field_mem_ptr = build (COMPONENT_REF, TREE_TYPE (field_mem_ptr),
+ 			 valist, field_mem_ptr);
+
+   ret = gen_reg_rtx (Pmode);
+   type_ptr_node = build_pointer_type (type);
+
+   if (! AGGREGATE_TYPE_P (type))
+     {
+       int nparm, incr, ofs;
+       tree field;
+       rtx lab_false;
+
+       if (FLOAT_TYPE_P (type))
+ 	{
+ 	  field = field_freg_used;
+ 	  nparm = NUM_PARM_FREGS;
+ 	  incr = 2;
+ 	  ofs = FREG_OFFSET;
+ 	}
+       else
+ 	{
+ 	  field = field_ireg_used;
+ 	  nparm = NUM_PARM_IREGS;
+ 	  incr = int_size_in_bytes (type) / UNITS_PER_WORD;
+ 	  ofs = IREG_OFFSET;
+ 	}
+
+       lab_false = gen_label_rtx ();
+       lab_over = gen_label_rtx ();
+
+       emit_cmp_and_jump_insns (expand_expr (field, NULL_RTX, 0, 0),
+ 			       GEN_INT (nparm - incr), GT, const0_rtx,
+ 			       TYPE_MODE (TREE_TYPE (field)),
+ 			       TREE_UNSIGNED (field), lab_false);
+
+       t = fold (build (POSTINCREMENT_EXPR, TREE_TYPE (field), field,
+ 		       build_int_2 (incr, 0)));
+       TREE_SIDE_EFFECTS (t) = 1;
+
+       t = fold (build (MULT_EXPR, TREE_TYPE (field), field,
+ 		       build_int_2 (UNITS_PER_WORD, 0)));
+       TREE_SIDE_EFFECTS (t) = 1;
+
+       t = fold (build (PLUS_EXPR, ptr_type_node, field_reg_base,
+ 		       fold (build (PLUS_EXPR, TREE_TYPE (field), t,
+ 				    build_int_2 (ofs, 0)))));
+       TREE_SIDE_EFFECTS (t) = 1;
+
+       val = expand_expr (t, ret, VOIDmode, EXPAND_NORMAL);
+       if (val != ret)
+ 	emit_move_insn (ret, val);
+
+       emit_jump_insn (gen_jump (lab_over));
+       emit_barrier ();
+       emit_label (lab_false);
+     }
+
+   align = TYPE_ALIGN (type);
+   if (align < BITS_PER_WORD)
+     align = BITS_PER_WORD;
+   align /= BITS_PER_UNIT;
+
+   t = build (PLUS_EXPR, ptr_type_node, field_mem_ptr,
+ 	     build_int_2 (align - 1, 0));
+   t = build (BIT_AND_EXPR, ptr_type_node, t, build_int_2 (-align, -1));
+
+   val = expand_expr (t, ret, VOIDmode, EXPAND_NORMAL);
+   if (val != ret)
+     emit_move_insn (ret, val);
+
+   t = fold (build (PLUS_EXPR, ptr_type_node,
+ 		   make_tree (ptr_type_node, ret),
+ 		   build_int_2 (int_size_in_bytes (type), 0)));
+   t = build (MODIFY_EXPR, ptr_type_node, field_mem_ptr, t);
+   TREE_SIDE_EFFECTS (t) = 1;
+   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+   if (lab_over)
+     emit_label (lab_over);
+
+   return ret;
+ }
diff -c3pN gcc/gcc/config/nil/i860.h gcc/gcc/config/i860/i860.h
*** gcc/gcc/config/nil/i860.h	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/i860.h	Sat Aug  9 00:14:04 2003
***************
*** 0 ****
--- 1,1319 ----
+ /* Definitions of target machine for GNU compiler, for Intel 860.
+    Copyright (C) 1989, 1991, 1993, 1995, 1996, 1997, 1998, 1999, 2000,
+    2001, 2002 Free Software Foundation, Inc.
+    Hacked substantially by Ron Guilmette (rfg@monkeys.com) to cater to
+    the whims of the System V Release 4 assembler.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING.  If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.  */
+
+
+ /* Note that some other tm.h files include this one and then override
+    many of the definitions that relate to assembler syntax.  */
+
+
+ /* Names to predefine in the preprocessor for this target machine.  */
+
+ #define CPP_PREDEFINES "-Di860 -Dunix -Asystem=unix -Asystem=svr4 -Acpu=i860 -Amachine=i860"
+
+ /* Print subsidiary information on the compiler version in use.  */
+ #define TARGET_VERSION fprintf (stderr, " (i860)");
+
+ /* Run-time compilation parameters selecting different hardware subsets
+    or supersets.
+
+    On the i860, we have one: TARGET_XP.  This option allows gcc to generate
+    additional instructions available only on the newer i860 XP (but not on
+    the older i860 XR).
+ */
+
+ extern int target_flags;
+
+ /* Nonzero if we should generate code to use the fpu.  */
+ #define TARGET_XP (target_flags & 1)
+
+ /* Macro to define tables used to set the flags.
+    This is a list in braces of pairs in braces,
+    each pair being { "NAME", VALUE }
+    where VALUE is the bits to set or minus the bits to clear.
+    An empty string NAME is used to identify the default VALUE.  */
+
+ #define TARGET_SWITCHES  \
+   { {"xp", 1, N_("Generate code which uses the FPU")},			\
+     {"noxp", -1, N_("Do not generate code which uses the FPU")},	\
+     {"xr", -1, N_("Do not generate code which uses the FPU")},		\
+     { "", TARGET_DEFAULT, NULL}}
+
+ #define TARGET_DEFAULT 0
+
+ /* target machine storage layout */
+
+ /* Define this if most significant bit is lowest numbered
+    in instructions that operate on numbered bit-fields.
+    This is a moot question on the i860 due to the lack of bit-field insns.  */
+ #define BITS_BIG_ENDIAN 0
+
+ /* Define this if most significant byte of a word is the lowest numbered.  */
+ /* That is not true on i860 in the mode we will use.  */
+ #define BYTES_BIG_ENDIAN 0
+
+ /* Define this if most significant word of a multiword number is the lowest
+    numbered.  */
+ /* For the i860 this goes with BYTES_BIG_ENDIAN.  */
+ /* NOTE: GCC probably cannot support a big-endian i860
+    because GCC fundamentally assumes that the order of words
+    in memory as the same as the order in registers.
+    That's not true for the big-endian i860.
+    The big-endian i860 isn't important enough to
+    justify the trouble of changing this assumption.  */
+ #define WORDS_BIG_ENDIAN 0
+
+ /* Width of a word, in units (bytes).  */
+ #define UNITS_PER_WORD 4
+
+ /* Allocation boundary (in *bits*) for storing arguments in argument list.  */
+ #define PARM_BOUNDARY 32
+
+ /* Boundary (in *bits*) on which stack pointer should be aligned.  */
+ #define STACK_BOUNDARY 128
+
+ /* Allocation boundary (in *bits*) for the code of a function.  */
+ #define FUNCTION_BOUNDARY 64
+
+ /* Alignment of field after `int : 0' in a structure.  */
+ #define EMPTY_FIELD_BOUNDARY 32
+
+ /* Every structure's size must be a multiple of this.  */
+ #define STRUCTURE_SIZE_BOUNDARY 8
+
+ /* Minimum size in bits of the largest boundary to which any
+    and all fundamental data types supported by the hardware
+    might need to be aligned. No data type wants to be aligned
+    rounder than this.  The i860 supports 128-bit (long double)
+    floating point quantities, and the System V Release 4 i860
+    ABI requires these to be aligned to 16-byte (128-bit)
+    boundaries.  */
+ #define BIGGEST_ALIGNMENT 128
+
+ /* Set this nonzero if move instructions will actually fail to work
+    when given unaligned data.  */
+ #define STRICT_ALIGNMENT 1
+
+ /* If bit field type is int, don't let it cross an int,
+    and give entire struct the alignment of an int.  */
+ #define PCC_BITFIELD_TYPE_MATTERS 1
+
+ /* Standard register usage.  */
+
+ /* Number of actual hardware registers.
+    The hardware registers are assigned numbers for the compiler
+    from 0 to just below FIRST_PSEUDO_REGISTER.
+    All registers that the compiler knows about must be given numbers,
+    even those that are not normally considered general registers.
+
+    i860 has 32 fullword registers and 32 floating point registers.  */
+
+ #define FIRST_PSEUDO_REGISTER 64
+
+ /* 1 for registers that have pervasive standard uses
+    and are not available for the register allocator.
+    On the i860, this includes the always-0 registers
+    and fp, sp, arg pointer, and the return address.
+    Also r31, used for special purposes for constant addresses.  */
+ #define FIXED_REGISTERS  \
+  {1, 1, 1, 1, 0, 0, 0, 0,	\
+   0, 0, 0, 0, 0, 0, 0, 0,	\
+   0, 0, 0, 0, 0, 0, 0, 0,	\
+   0, 0, 0, 0, 0, 0, 0, 1,	\
+   1, 1, 0, 0, 0, 0, 0, 0,	\
+   0, 0, 0, 0, 0, 0, 0, 0,	\
+   0, 0, 0, 0, 0, 0, 0, 0,	\
+   0, 0, 0, 0, 0, 0, 0, 0}
+
+ /* 1 for registers not available across function calls.
+    These must include the FIXED_REGISTERS and also any
+    registers that can be used without being saved.
+    On the i860, these are r0-r3, r16-r31, f0, f1, and f16-f31.  */
+ #define CALL_USED_REGISTERS  \
+  {1, 1, 1, 1, 0, 0, 0, 0,	\
+   0, 0, 0, 0, 0, 0, 0, 0,	\
+   1, 1, 1, 1, 1, 1, 1, 1,	\
+   1, 1, 1, 1, 1, 1, 1, 1,	\
+   1, 1, 0, 0, 0, 0, 0, 0,	\
+   1, 1, 1, 1, 1, 1, 1, 1,	\
+   1, 1, 1, 1, 1, 1, 1, 1,	\
+   1, 1, 1, 1, 1, 1, 1, 1}
+
+ /* Try to get a non-preserved register before trying to get one we will
+    have to preserve.  Try to get an FP register only *after* trying to
+    get a general register, because it is relatively expensive to move
+    into or out of an FP register.  */
+
+ #define REG_ALLOC_ORDER			\
+  {31, 30, 29, 28, 27, 26, 25, 24,	\
+   23, 22, 21, 20, 19, 18, 17, 16,	\
+   15, 14, 13, 12, 11, 10,  9,  8,	\
+    7,  6,  5,  4,  3,  2,  1,  0,	\
+   63, 62, 61, 60, 59, 58, 57, 56,	\
+   55, 54, 53, 52, 51, 50, 49, 48,	\
+   47, 46, 45, 44, 43, 42, 41, 40,	\
+   39, 38, 37, 36, 35, 34, 33, 32}
+
+ /* Return number of consecutive hard regs needed starting at reg REGNO
+    to hold something of mode MODE.
+    This is ordinarily the length in words of a value of mode MODE
+    but can be less for certain modes in special long registers.
+
+    On the i860, all registers hold 32 bits worth.  */
+ #define HARD_REGNO_NREGS(REGNO, MODE)   \
+   (((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+ #define REGNO_MODE_ALIGNED(REGNO, MODE) \
+   (((REGNO) % ((GET_MODE_UNIT_SIZE (MODE) + 3) / 4)) == 0)
+
+ /* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+
+    On the i860, we allow anything to go into any registers, but we require
+    any sort of value going into the FP registers to be properly aligned
+    (based on its size) within the FP register set.
+ */
+ #define HARD_REGNO_MODE_OK(REGNO, MODE)					\
+   (((REGNO) < 32) 							\
+    || (MODE) == VOIDmode || (MODE) == BLKmode				\
+    || REGNO_MODE_ALIGNED (REGNO, MODE))
+
+ /* Value is 1 if it is a good idea to tie two pseudo registers
+    when one has mode MODE1 and one has mode MODE2.
+    If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+    for any hard reg, then this must be 0 for correct output.  */
+ /* I think that is not always true; alignment restrictions for doubles
+    should not prevent tying them with singles.  So try allowing that.
+    On the other hand, don't let fixed and floating be tied;
+    this restriction is not necessary, but may make better code.  */
+ #define MODES_TIEABLE_P(MODE1, MODE2) \
+   ((GET_MODE_CLASS (MODE1) == MODE_FLOAT		\
+     || GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT)	\
+    == (GET_MODE_CLASS (MODE2) == MODE_FLOAT		\
+        || GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT))
+
+ /* Specify the registers used for certain standard purposes.
+    The values of these macros are register numbers.  */
+
+ /* i860 pc isn't overloaded on a register that the compiler knows about.  */
+ /* #define PC_REGNUM  */
+
+ /* Register to use for pushing function arguments.  */
+ #define STACK_POINTER_REGNUM 2
+
+ /* Base register for access to local variables of the function.  */
+ #define FRAME_POINTER_REGNUM 3
+
+ /* Value should be nonzero if functions must have frame pointers.
+    Zero means the frame pointer need not be set up (and parms
+    may be accessed via the stack pointer) in functions that seem suitable.
+    This is computed in `reload', in reload1.c.  */
+ #define FRAME_POINTER_REQUIRED 1
+
+ /* Base register for access to arguments of the function.  */
+ #define ARG_POINTER_REGNUM 28
+
+ /* Register in which static-chain is passed to a function.  */
+ #define STATIC_CHAIN_REGNUM 29
+
+ /* Register in which address to store a structure value
+    is passed to a function.  */
+ #define STRUCT_VALUE_REGNUM 16
+
+ /* Register to use when a source of a floating-point zero is needed.  */
+ #define F0_REGNUM	32
+
+ /* Define the classes of registers for register constraints in the
+    machine description.  Also define ranges of constants.
+
+    One of the classes must always be named ALL_REGS and include all hard regs.
+    If there is more than one class, another class must be named NO_REGS
+    and contain no registers.
+
+    The name GENERAL_REGS must be the name of a class (or an alias for
+    another name such as ALL_REGS).  This is the class of registers
+    that is allowed by "g" or "r" in a register constraint.
+    Also, registers outside this class are allocated only when
+    instructions express preferences for them.
+
+    The classes must be numbered in nondecreasing order; that is,
+    a larger-numbered class must never be contained completely
+    in a smaller-numbered class.
+
+    For any two classes, it is very desirable that there be another
+    class that represents their union.  */
+
+ /* The i860 has two kinds of registers, hence four classes.  */
+
+ enum reg_class { NO_REGS, GENERAL_REGS, FP_REGS, ALL_REGS, LIM_REG_CLASSES };
+
+ #define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+ /* Give names of register classes as strings for dump file.   */
+
+ #define REG_CLASS_NAMES \
+  {"NO_REGS", "GENERAL_REGS", "FP_REGS", "ALL_REGS" }
+
+ /* Define which registers fit in which classes.
+    This is an initializer for a vector of HARD_REG_SET
+    of length N_REG_CLASSES.  */
+
+ #define REG_CLASS_CONTENTS	\
+  {{0, 0}, {0xffffffff, 0},	\
+   {0, 0xffffffff}, {0xffffffff, 0xffffffff}}
+
+ /* The same information, inverted:
+    Return the class number of the smallest class containing
+    reg number REGNO.  This could be a conditional expression
+    or could index an array.  */
+
+ #define REGNO_REG_CLASS(REGNO) \
+  ((REGNO) >= 32 ? FP_REGS : GENERAL_REGS)
+
+ /* The class value for index registers, and the one for base regs.  */
+ #define INDEX_REG_CLASS GENERAL_REGS
+ #define BASE_REG_CLASS GENERAL_REGS
+
+ /* Get reg_class from a letter such as appears in the machine description.  */
+
+ #define REG_CLASS_FROM_LETTER(C) \
+   ((C) == 'f' ? FP_REGS : NO_REGS)
+
+ /* The letters I, J, K, L and M in a register constraint string
+    can be used to stand for particular ranges of immediate operands.
+    This macro defines what the ranges are.
+    C is the letter, and VALUE is a constant value.
+    Return 1 if VALUE is in the range specified by C.
+
+    For the i860, `I' is used for the range of constants
+    an add/subtract insn can actually contain.
+    But not including -0x8000, since we need
+    to negate the constant sometimes.
+    `J' is used for the range which is just zero (since that is R0).
+    `K' is used for the range allowed in bte.
+    `L' is used for the range allowed in logical insns.  */
+
+ #define SMALL_INT(X) ((unsigned) (INTVAL (X) + 0x7fff) < 0xffff)
+
+ #define LOGIC_INT(X) ((unsigned) INTVAL (X) < 0x10000)
+
+ #define SMALL_INTVAL(X) ((unsigned) ((X) + 0x7fff) < 0xffff)
+
+ #define LOGIC_INTVAL(X) ((unsigned) (X) < 0x10000)
+
+ #define CONST_OK_FOR_LETTER_P(VALUE, C)  \
+   ((C) == 'I' ? ((unsigned) (VALUE) + 0x7fff) < 0xffff	\
+    : (C) == 'J' ? (VALUE) == 0				\
+    : (C) == 'K' ? (unsigned) (VALUE) < 0x20	\
+    : (C) == 'L' ? (unsigned) (VALUE) < 0x10000	\
+    : 0)
+
+ /* Return non-zero if the given VALUE is acceptable for the
+    constraint letter C.  For the i860, constraint letter 'G'
+    permits only a floating-point zero value.  */
+ #define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C)  	\
+   ((C) == 'G' && CONST_DOUBLE_LOW ((VALUE)) == 0	\
+    && CONST_DOUBLE_HIGH ((VALUE)) == 0)
+
+ /* Given an rtx X being reloaded into a reg required to be
+    in class CLASS, return the class of reg to actually use.
+    In general this is just CLASS; but on some machines
+    in some cases it is preferable to use a more restrictive class.
+
+    If we are trying to put an integer constant into some register, prefer an
+    integer register to an FP register.  If we are trying to put a
+    non-zero floating-point constant into some register, use an integer
+    register if the constant is SFmode and GENERAL_REGS is one of our options.
+    Otherwise, put the constant into memory.
+
+    When reloading something smaller than a word, use a general reg
+    rather than an FP reg.  */
+
+ #define PREFERRED_RELOAD_CLASS(X,CLASS)  \
+   ((CLASS) == ALL_REGS && GET_CODE (X) == CONST_INT ? GENERAL_REGS	\
+    : ((GET_MODE (X) == HImode || GET_MODE (X) == QImode)		\
+       && (CLASS) == ALL_REGS)						\
+    ? GENERAL_REGS							\
+    : (GET_CODE (X) == CONST_DOUBLE					\
+       && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT			\
+       && ! CONST_DOUBLE_OK_FOR_LETTER_P (X, 'G'))			\
+    ? ((CLASS) == ALL_REGS && GET_MODE (X) == SFmode ? GENERAL_REGS	\
+       : (CLASS) == GENERAL_REGS && GET_MODE (X) == SFmode ? (CLASS)	\
+       : NO_REGS)							\
+    : (CLASS))
+
+ /* Return the register class of a scratch register needed to copy IN into
+    a register in CLASS in MODE.  If it can be done directly, NO_REGS is
+    returned.  */
+
+ #define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,IN) \
+   ((CLASS) == FP_REGS && CONSTANT_P (IN) ? GENERAL_REGS : NO_REGS)
+
+ /* Return the maximum number of consecutive registers
+    needed to represent mode MODE in a register of class CLASS.  */
+ /* On the i860, this is the size of MODE in words.  */
+ #define CLASS_MAX_NREGS(CLASS, MODE)	\
+   ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+ /* Stack layout; function entry, exit and calling.  */
+
+ /* Define this if pushing a word on the stack
+    makes the stack pointer a smaller address.  */
+ #define STACK_GROWS_DOWNWARD
+
+ /* Define this if the nominal address of the stack frame
+    is at the high-address end of the local variables;
+    that is, each additional local variable allocated
+    goes at a more negative offset in the frame.  */
+ #define FRAME_GROWS_DOWNWARD
+
+ /* Offset within stack frame to start allocating local variables at.
+    If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+    first local allocated.  Otherwise, it is the offset to the BEGINNING
+    of the first local allocated.  */
+ #define STARTING_FRAME_OFFSET 0
+
+ /* If we generate an insn to push BYTES bytes,
+    this says how many the stack pointer really advances by.
+    On the i860, don't define this because there are no push insns.  */
+ /*  #define PUSH_ROUNDING(BYTES) */
+
+ /* Offset of first parameter from the argument pointer register value.  */
+ #define FIRST_PARM_OFFSET(FNDECL) 0
+
+ /* Value is the number of bytes of arguments automatically
+    popped when returning from a subroutine call.
+    FUNDECL is the declaration node of the function (as a tree),
+    FUNTYPE is the data type of the function (as a tree),
+    or for a library call it is an identifier node for the subroutine name.
+    SIZE is the number of bytes of arguments passed on the stack.  */
+
+ #define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+ /* Define how to find the value returned by a function.
+    VALTYPE is the data type of the value (as a tree).
+    If the precise function being called is known, FUNC is its FUNCTION_DECL;
+    otherwise, FUNC is 0.  */
+
+ /* On the i860, the value register depends on the mode.  */
+
+ #define FUNCTION_VALUE(VALTYPE, FUNC)  \
+   gen_rtx_REG (TYPE_MODE (VALTYPE),				\
+ 	       (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT	\
+ 		? 40 : 16))
+
+ /* Define how to find the value returned by a library function
+    assuming the value has mode MODE.  */
+
+ #define LIBCALL_VALUE(MODE)				\
+   gen_rtx_REG (MODE,					\
+ 	       (GET_MODE_CLASS ((MODE)) == MODE_FLOAT	\
+ 		? 40 : 16))
+
+ /* 1 if N is a possible register number for a function value
+    as seen by the caller.  */
+
+ #define FUNCTION_VALUE_REGNO_P(N) ((N) == 40 || (N) == 16)
+
+ /* 1 if N is a possible register number for function argument passing.
+    On the i860, these are r16-r27 and f8-f15.  */
+
+ #define FUNCTION_ARG_REGNO_P(N)		\
+   (((N) < 28 && (N) > 15) || ((N) < 48 && (N) >= 40))
+
+ /* Define a data type for recording info about an argument list
+    during the scan of that argument list.  This data type should
+    hold all necessary information about the function itself
+    and about the args processed so far, enough to enable macros
+    such as FUNCTION_ARG to determine where the next arg should go.
+
+    On the i860, we must count separately the number of general registers used
+    and the number of float registers used.  */
+
+ struct cumulative_args { int ints, floats; };
+ #define CUMULATIVE_ARGS struct cumulative_args
+
+ /* Initialize a variable CUM of type CUMULATIVE_ARGS
+    for a call to a function whose data type is FNTYPE.
+    For a library call, FNTYPE is 0.
+
+    On the i860, the general-reg offset normally starts at 0,
+    but starts at 4 bytes
+    when the function gets a structure-value-address as an
+    invisible first argument.  */
+
+ #define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT)	\
+  ((CUM).ints = ((FNTYPE) != 0 && aggregate_value_p (TREE_TYPE ((FNTYPE))) \
+ 		? 4 : 0),			\
+   (CUM).floats = 0)
+
+ /* Machine-specific subroutines of the following macros.  */
+ #define CEILING(X,Y)  (((X) + (Y) - 1) / (Y))
+ #define ROUNDUP(X,Y)  (CEILING ((X), (Y)) * (Y))
+
+ /* Update the data in CUM to advance over an argument
+    of mode MODE and data type TYPE.
+    (TYPE is null for libcalls where that information may not be available.)
+    Floats, and doubleword ints, are returned in f regs;
+    other ints, in r regs.
+    Aggregates, even short ones, are passed in memory.  */
+
+ #define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED)		\
+  ((TYPE) != 0 && (TREE_CODE ((TYPE)) == RECORD_TYPE		\
+ 		  || TREE_CODE ((TYPE)) == UNION_TYPE)		\
+   ? 0								\
+   : GET_MODE_CLASS ((MODE)) == MODE_FLOAT || (MODE) == DImode	\
+   ? ((CUM).floats = (ROUNDUP ((CUM).floats, GET_MODE_SIZE ((MODE)))	\
+ 		     + ROUNDUP (GET_MODE_SIZE (MODE), 4)))	\
+   : GET_MODE_CLASS ((MODE)) == MODE_INT				\
+   ? ((CUM).ints = (ROUNDUP ((CUM).ints, GET_MODE_SIZE ((MODE))) \
+ 		   + ROUNDUP (GET_MODE_SIZE (MODE), 4)))	\
+   : 0)
+
+ /* Determine where to put an argument to a function.
+    Value is zero to push the argument on the stack,
+    or a hard register in which to store the argument.
+
+    MODE is the argument's machine mode.
+    TYPE is the data type of the argument (as a tree).
+     This is null for libcalls where that information may
+     not be available.
+    CUM is a variable of type CUMULATIVE_ARGS which gives info about
+     the preceding args and about the function being called.
+    NAMED is nonzero if this argument is a named parameter
+     (otherwise it is an extra parameter matching an ellipsis).  */
+
+ /* On the i860, the first 12 words of integer arguments go in r16-r27,
+    and the first 8 words of floating arguments go in f8-f15.
+    DImode values are treated as floats.  */
+
+ #define FUNCTION_ARG(CUM, MODE, TYPE, NAMED)		\
+  ((TYPE) != 0 && (TREE_CODE ((TYPE)) == RECORD_TYPE	\
+ 		  || TREE_CODE ((TYPE)) == UNION_TYPE)	\
+   ? 0							\
+   : GET_MODE_CLASS ((MODE)) == MODE_FLOAT || (MODE) == DImode	\
+   ? (ROUNDUP ((CUM).floats, GET_MODE_SIZE ((MODE))) < 32	\
+      ? gen_rtx_REG ((MODE),				\
+ 		    40 + (ROUNDUP ((CUM).floats,	\
+ 				   GET_MODE_SIZE ((MODE)))	\
+ 			  / 4))				\
+      : 0)						\
+   : GET_MODE_CLASS ((MODE)) == MODE_INT			\
+   ? (ROUNDUP ((CUM).ints, GET_MODE_SIZE ((MODE))) < 48	\
+      ? gen_rtx_REG ((MODE),				\
+ 		    16 + (ROUNDUP ((CUM).ints,		\
+ 				   GET_MODE_SIZE ((MODE)))	\
+ 			  / 4))				\
+      : 0)						\
+   : 0)
+
+ /* For an arg passed partly in registers and partly in memory,
+    this is the number of registers used.
+    For args passed entirely in registers or entirely in memory, zero.  */
+
+ #define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) 0
+
+ /* If defined, a C expression that gives the alignment boundary, in
+    bits, of an argument with the specified mode and type.  If it is
+    not defined,  `PARM_BOUNDARY' is used for all arguments.  */
+
+ #define FUNCTION_ARG_BOUNDARY(MODE, TYPE)			\
+   (((TYPE) != 0)						\
+    ? ((TYPE_ALIGN(TYPE) <= PARM_BOUNDARY)			\
+       ? PARM_BOUNDARY						\
+       : TYPE_ALIGN(TYPE))					\
+    : ((GET_MODE_ALIGNMENT(MODE) <= PARM_BOUNDARY)		\
+       ? PARM_BOUNDARY						\
+       : GET_MODE_ALIGNMENT(MODE)))
+
+ /* Output a no-op just before the beginning of the function,
+    to ensure that there does not appear to be a delayed branch there.
+    Such a thing would confuse interrupt recovery.  */
+ #define ASM_OUTPUT_FUNCTION_PREFIX(FILE,NAME) \
+   fprintf (FILE, "\tnop\n")
+
+ /* Output assembler code to FILE to increment profiler label # LABELNO
+    for profiling a function entry.  */
+
+ #define FUNCTION_PROFILER(FILE, LABELNO)  \
+    abort ();
+
+ /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+    the stack pointer does not matter.  The value is tested only in
+    functions that have frame pointers.
+    No definition is equivalent to always zero.  */
+
+ #define EXIT_IGNORE_STACK 1
+
+ /* Generate necessary RTL for __builtin_saveregs().  */
+ #define EXPAND_BUILTIN_SAVEREGS() \
+   i860_saveregs()
+
+ /* Define the `__builtin_va_list' type for the ABI.  */
+ #define BUILD_VA_LIST_TYPE(VALIST) \
+   (VALIST) = i860_build_va_list ()
+
+ /* Implement `va_start' for varargs and stdarg.  */
+ #define EXPAND_BUILTIN_VA_START(stdarg, valist, nextarg) \
+   i860_va_start (stdarg, valist, nextarg)
+
+ /* Implement `va_arg'.  */
+ #define EXPAND_BUILTIN_VA_ARG(valist, type) \
+   i860_va_arg (valist, type)
+
+ /* Store in the variable DEPTH the initial difference between the
+    frame pointer reg contents and the stack pointer reg contents,
+    as of the start of the function body.  This depends on the layout
+    of the fixed parts of the stack frame and on how registers are saved.
+
+    On the i860, FRAME_POINTER_REQUIRED is always 1, so the definition of this
+    macro doesn't matter.  But it must be defined.  */
+
+ #define INITIAL_FRAME_POINTER_OFFSET(DEPTH) \
+   do { (DEPTH) = 0; } while (0)
+
+ /* Output assembler code for a block containing the constant parts
+    of a trampoline, leaving space for the variable parts.  */
+
+ /* On the i860, the trampoline contains five instructions:
+      orh #TOP_OF_FUNCTION,r0,r31
+      or #BOTTOM_OF_FUNCTION,r31,r31
+      orh #TOP_OF_STATIC,r0,r29
+      bri r31
+      or #BOTTOM_OF_STATIC,r29,r29  */
+ #define TRAMPOLINE_TEMPLATE(FILE)					\
+ {									\
+   assemble_aligned_integer (UNITS_PER_WORD, GEN_INT (0xec1f0000));	\
+   assemble_aligned_integer (UNITS_PER_WORD, GEN_INT (0xe7ff0000));	\
+   assemble_aligned_integer (UNITS_PER_WORD, GEN_INT (0xec1d0000));	\
+   assemble_aligned_integer (UNITS_PER_WORD, GEN_INT (0x4000f800));	\
+   assemble_aligned_integer (UNITS_PER_WORD, GEN_INT (0xe7bd0000));	\
+ }
+
+ /* Length in units of the trampoline for entering a nested function.  */
+
+ #define TRAMPOLINE_SIZE 20
+
+ /* Emit RTL insns to initialize the variable parts of a trampoline.
+    FNADDR is an RTX for the address of the function's pure code.
+    CXT is an RTX for the static chain value for the function.
+
+    Store hi function at +0, low function at +4,
+    hi static at +8, low static at +16  */
+
+ #define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT)			\
+ {									\
+   rtx cxt = force_reg (Pmode, CXT);					\
+   rtx fn = force_reg (Pmode, FNADDR);					\
+   rtx hi_cxt = expand_shift (RSHIFT_EXPR, SImode, cxt,			\
+ 			     size_int (16), 0, 0);			\
+   rtx hi_fn = expand_shift (RSHIFT_EXPR, SImode, fn,			\
+ 			    size_int (16), 0, 0);			\
+   emit_move_insn (gen_rtx_MEM (HImode, plus_constant (TRAMP, 16)),	\
+ 		  gen_lowpart (HImode, cxt));				\
+   emit_move_insn (gen_rtx_MEM (HImode, plus_constant (TRAMP, 4)),	\
+ 		  gen_lowpart (HImode, fn));				\
+   emit_move_insn (gen_rtx_MEM (HImode, plus_constant (TRAMP, 8)),	\
+ 		  gen_lowpart (HImode, hi_cxt));			\
+   emit_move_insn (gen_rtx_MEM (HImode, plus_constant (TRAMP, 0)),	\
+ 		  gen_lowpart (HImode, hi_fn));				\
+ }
+
+ /* Addressing modes, and classification of registers for them.  */
+
+ /* #define HAVE_POST_INCREMENT 0 */
+ /* #define HAVE_POST_DECREMENT 0 */
+
+ /* #define HAVE_PRE_DECREMENT 0 */
+ /* #define HAVE_PRE_INCREMENT 0 */
+
+ /* Macros to check register numbers against specific register classes.  */
+
+ /* These assume that REGNO is a hard or pseudo reg number.
+    They give nonzero only if REGNO is a hard reg of the suitable class
+    or a pseudo reg currently allocated to a suitable hard reg.
+    Since they use reg_renumber, they are safe only once reg_renumber
+    has been allocated, which happens in local-alloc.c.  */
+
+ #define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32)
+ #define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32)
+ #define REGNO_OK_FOR_FP_P(REGNO) \
+ (((REGNO) ^ 0x20) < 32 || (unsigned) (reg_renumber[REGNO] ^ 0x20) < 32)
+
+ /* Now macros that check whether X is a register and also,
+    strictly, whether it is in a specified class.
+
+    These macros are specific to the i860, and may be used only
+    in code for printing assembler insns and in conditions for
+    define_optimization.  */
+
+ /* 1 if X is an fp register.  */
+
+ #define FP_REG_P(X) (REG_P (X) && REGNO_OK_FOR_FP_P (REGNO (X)))
+
+ /* Maximum number of registers that can appear in a valid memory address.  */
+
+ #define MAX_REGS_PER_ADDRESS 2
+
+ /* Recognize any constant value that is a valid address.  */
+
+ #define CONSTANT_ADDRESS_P(X)   \
+   (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF		\
+    || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST		\
+    || GET_CODE (X) == HIGH)
+
+ /* Nonzero if the constant value X is a legitimate general operand.
+    It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+    On the Sparc, this is anything but a CONST_DOUBLE.
+    Let's try permitting CONST_DOUBLEs and see what happens.  */
+
+ #define LEGITIMATE_CONSTANT_P(X) 1
+
+ /* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+    and check its validity for a certain class.
+    We have two alternate definitions for each of them.
+    The usual definition accepts all pseudo regs; the other rejects
+    them unless they have been allocated suitable hard regs.
+    The symbol REG_OK_STRICT causes the latter definition to be used.
+
+    Most source files want to accept pseudo regs in the hope that
+    they will get allocated to the class that the insn wants them to be in.
+    Source files for reload pass need to be strict.
+    After reload, it makes no difference, since pseudo regs have
+    been eliminated by then.  */
+
+ #ifndef REG_OK_STRICT
+
+ /* Nonzero if X is a hard reg that can be used as an index
+    or if it is a pseudo reg.  */
+ #define REG_OK_FOR_INDEX_P(X) (((unsigned) REGNO (X)) - 32 >= 14)
+ /* Nonzero if X is a hard reg that can be used as a base reg
+    or if it is a pseudo reg.  */
+ #define REG_OK_FOR_BASE_P(X) (((unsigned) REGNO (X)) - 32 >= 14)
+
+ #else
+
+ /* Nonzero if X is a hard reg that can be used as an index.  */
+ #define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+ /* Nonzero if X is a hard reg that can be used as a base reg.  */
+ #define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+ #endif
+
+ /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+    that is a valid memory address for an instruction.
+    The MODE argument is the machine mode for the MEM expression
+    that wants to use this address.
+
+    On the i860, the actual addresses must be REG+REG or REG+SMALLINT.
+    But we can treat a SYMBOL_REF as legitimate if it is part of this
+    function's constant-pool, because such addresses can actually
+    be output as REG+SMALLINT.
+
+    The displacement in an address must be a multiple of the alignment.
+
+    Try making SYMBOL_REF (and other things which are CONSTANT_ADDRESS_P)
+    a legitimate address, regardless.  Because the only insns which can use
+    memory are load or store insns, the added hair in the machine description
+    is not that bad.  It should also speed up the compiler by halving the number
+    of insns it must manage for each (MEM (SYMBOL_REF ...)) involved.  */
+
+ #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR)		\
+ { if (GET_CODE (X) == REG)				\
+     { if (REG_OK_FOR_BASE_P (X)) goto ADDR; }		\
+   else if (GET_CODE (X) == PLUS)			\
+     {							\
+       if (GET_CODE (XEXP (X, 0)) == REG			\
+ 	  && REG_OK_FOR_BASE_P (XEXP (X, 0)))		\
+ 	{						\
+ 	  if (GET_CODE (XEXP (X, 1)) == CONST_INT	\
+ 	      && INTVAL (XEXP (X, 1)) >= -0x8000	\
+ 	      && INTVAL (XEXP (X, 1)) < 0x8000		\
+ 	      && (INTVAL (XEXP (X, 1)) & (GET_MODE_SIZE (MODE) - 1)) == 0) \
+ 	    goto ADDR;					\
+ 	}						\
+       else if (GET_CODE (XEXP (X, 1)) == REG		\
+ 	  && REG_OK_FOR_BASE_P (XEXP (X, 1)))		\
+ 	{						\
+ 	  if (GET_CODE (XEXP (X, 0)) == CONST_INT	\
+ 	      && INTVAL (XEXP (X, 0)) >= -0x8000	\
+ 	      && INTVAL (XEXP (X, 0)) < 0x8000		\
+ 	      && (INTVAL (XEXP (X, 0)) & (GET_MODE_SIZE (MODE) - 1)) == 0) \
+ 	    goto ADDR;					\
+ 	}						\
+     }							\
+   else if (CONSTANT_ADDRESS_P (X))			\
+     goto ADDR;						\
+ }
+
+ /* Try machine-dependent ways of modifying an illegitimate address
+    to be legitimate.  If we find one, return the new, valid address.
+    This macro is used in only one place: `memory_address' in explow.c.
+
+    OLDX is the address as it was before break_out_memory_refs was called.
+    In some cases it is useful to look at this to decide what needs to be done.
+
+    MODE and WIN are passed so that this macro can use
+    GO_IF_LEGITIMATE_ADDRESS.
+
+    It is always safe for this macro to do nothing.  It exists to recognize
+    opportunities to optimize the output.  */
+
+ /* On the i860, change COMPLICATED + CONSTANT to REG+CONSTANT.
+    Also change a symbolic constant to a REG,
+    though that may not be necessary.  */
+
+ #define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)	\
+ { if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == MULT)	\
+     (X) = gen_rtx_PLUS (SImode, XEXP (X, 1),			\
+ 			force_operand (XEXP (X, 0), 0));	\
+   if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == MULT)	\
+     (X) = gen_rtx_PLUS (SImode, XEXP (X, 0),			\
+ 			force_operand (XEXP (X, 1), 0));	\
+   if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == PLUS)	\
+     (X) = gen_rtx_PLUS (SImode, XEXP (X, 1),			\
+ 			force_operand (XEXP (X, 0), 0));	\
+   if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == PLUS)	\
+     (X) = gen_rtx_PLUS (SImode, XEXP (X, 0),			\
+ 			force_operand (XEXP (X, 1), 0));	\
+   if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) != REG	\
+       && GET_CODE (XEXP (X, 0)) != CONST_INT)			\
+     (X) = gen_rtx_PLUS (SImode, XEXP (X, 1),			\
+ 			copy_to_mode_reg (SImode, XEXP (X, 0))); \
+   if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) != REG	\
+       && GET_CODE (XEXP (X, 1)) != CONST_INT)			\
+     (X) = gen_rtx_PLUS (SImode, XEXP (X, 0),			\
+ 			copy_to_mode_reg (SImode, XEXP (X, 1))); \
+   if (GET_CODE (x) == SYMBOL_REF)				\
+     (X) = copy_to_reg (X);					\
+   if (GET_CODE (x) == CONST)					\
+     (X) = copy_to_reg (X);					\
+   if (memory_address_p (MODE, X))				\
+     goto WIN; }
+
+ /* Go to LABEL if ADDR (a legitimate address expression)
+    has an effect that depends on the machine mode it is used for.
+    On the i860 this is never true.
+    There are some addresses that are invalid in wide modes
+    but valid for narrower modes, but they shouldn't affect
+    the places that use this macro.  */
+
+ #define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+ /* Specify the machine mode that this machine uses
+    for the index in the tablejump instruction.  */
+ #define CASE_VECTOR_MODE SImode
+
+ /* Define as C expression which evaluates to nonzero if the tablejump
+    instruction expects the table to contain offsets from the address of the
+    table.
+    Do not define this if the table should contain absolute addresses. */
+ /* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+ /* Must pass floats to libgcc functions as doubles.  */
+ #define LIBGCC_NEEDS_DOUBLE 1
+
+ #define DIVSI3_LIBCALL "*.div"
+ #define UDIVSI3_LIBCALL "*.udiv"
+ #define REMSI3_LIBCALL "*.rem"
+ #define UREMSI3_LIBCALL "*.urem"
+
+ /* Define this as 1 if `char' should by default be signed; else as 0.  */
+ #define DEFAULT_SIGNED_CHAR 1
+
+ /* Max number of bytes we can move from memory to memory
+    in one reasonably fast instruction.  */
+ #define MOVE_MAX 16
+
+ /* Nonzero if access to memory by bytes is slow and undesirable.  */
+ #define SLOW_BYTE_ACCESS 0
+
+ /* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+    is done just by pretending it is already truncated.  */
+ #define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+ /* Value is 1 if it generates better code to perform an unsigned comparison
+    on the given literal integer value in the given mode when we are only
+    looking for an equal/non-equal result.  */
+ /* For the i860, if the immediate value has its high-order 27 bits zero,
+    then we want to engineer an unsigned comparison for EQ/NE because
+    such values can fit in the 5-bit immediate field of a bte or btne
+    instruction (which gets zero extended before comparing).  For all
+    other immediate values on the i860, we will use signed compares
+    because that avoids the need for doing explicit xor's to zero_extend
+    the non-constant operand in cases where it was (mem:QI ...) or a
+    (mem:HI ...) which always gets automatically sign-extended by the
+    hardware upon loading.  */
+
+ #define LITERAL_COMPARE_BETTER_UNSIGNED(intval, mode)                   \
+   (((unsigned) (intval) & 0x1f) == (unsigned) (intval))
+
+ /* Specify the machine mode that pointers have.
+    After generation of rtl, the compiler makes no further distinction
+    between pointers and any other objects of this machine mode.  */
+ #define Pmode SImode
+
+ /* A function address in a call instruction
+    is a byte address (for indexing purposes)
+    so give the MEM rtx a byte's mode.  */
+ #define FUNCTION_MODE SImode
+
+ /* Define this if addresses of constant functions
+    shouldn't be put through pseudo regs where they can be cse'd.
+    Desirable on machines where ordinary constants are expensive
+    but a CALL with constant address is cheap.  */
+ #define NO_FUNCTION_CSE
+
+ /* Compute the cost of computing a constant rtl expression RTX
+    whose rtx-code is CODE.  The body of this macro is a portion
+    of a switch statement.  If the code is computed here,
+    return it with a return statement.  Otherwise, break from the switch.  */
+
+ #define CONST_COSTS(RTX,CODE, OUTER_CODE)			\
+   case CONST_INT:						\
+     if (INTVAL (RTX) == 0)					\
+       return 0;							\
+     if (INTVAL (RTX) < 0x2000 && INTVAL (RTX) >= -0x2000) return 1; \
+   case CONST:							\
+   case LABEL_REF:						\
+   case SYMBOL_REF:						\
+     return 4;							\
+   case CONST_DOUBLE:						\
+     return 6;
+
+ /* Specify the cost of a branch insn; roughly the number of extra insns that
+    should be added to avoid a branch.
+
+    Set this to 3 on the i860 since branches may often take three cycles.  */
+
+ #define BRANCH_COST 3
+
+ /* Tell final.c how to eliminate redundant test instructions.  */
+
+ /* Here we define machine-dependent flags and fields in cc_status
+    (see `conditions.h').  */
+
+ /* This holds the value sourcing h%r31.  We keep this info
+    around so that mem/mem ops, such as increment and decrement,
+    etc, can be performed reasonably.  */
+ #define CC_STATUS_MDEP rtx
+
+ #define CC_STATUS_MDEP_INIT (cc_status.mdep = 0)
+
+ #define CC_NEGATED	01000
+
+ /* We use this macro in those places in the i860.md file where we would
+    normally just do a CC_STATUS_INIT (for other machines).  This macro
+    differs from CC_STATUS_INIT in that it doesn't mess with the special
+    bits or fields which describe what is currently in the special r31
+    scratch register, but it does clear out everything that actually
+    relates to the condition code bit of the i860.  */
+
+ #define CC_STATUS_PARTIAL_INIT						\
+  (cc_status.flags &= (CC_KNOW_HI_R31 | CC_HI_R31_ADJ),			\
+   cc_status.value1 = 0,							\
+   cc_status.value2 = 0)
+
+ /* Nonzero if we know the value of h%r31.  */
+ #define CC_KNOW_HI_R31 0100000
+
+ /* Nonzero if h%r31 is actually ha%something, rather than h%something.  */
+ #define CC_HI_R31_ADJ 0200000
+
+ /* Store in cc_status the expressions
+    that the condition codes will describe
+    after execution of an instruction whose pattern is EXP.
+    Do not alter them if the instruction would not alter the cc's.  */
+
+ /* On the i860, only compare insns set a useful condition code.  */
+
+ #define NOTICE_UPDATE_CC(EXP, INSN) \
+ { cc_status.flags &= (CC_KNOW_HI_R31 | CC_HI_R31_ADJ);	\
+   cc_status.value1 = 0; cc_status.value2 = 0; }
+
+ /* Control the assembler format that we output.  */
+
+ /* Assembler pseudos to introduce constants of various size.  */
+
+ #define ASM_DOUBLE "\t.double"
+
+ /* Output at beginning of assembler file.  */
+ /* The .file command should always begin the output.  */
+
+ #define ASM_FILE_START(FILE)
+ #if 0
+ #define ASM_FILE_START(FILE)					\
+   do { output_file_directive ((FILE), main_input_filename);	\
+        if (optimize) ASM_FILE_START_1 (FILE);			\
+      } while (0)
+ #endif
+
+ #define ASM_FILE_START_1(FILE)
+
+ /* Output to assembler file text saying following lines
+    may contain character constants, extra white space, comments, etc.  */
+
+ #define ASM_APP_ON ""
+
+ /* Output to assembler file text saying following lines
+    no longer contain unusual constructs.  */
+
+ #define ASM_APP_OFF ""
+
+ /* Output before read-only data.  */
+
+ #define TEXT_SECTION_ASM_OP "\t.text"
+
+ /* Output before writable data.  */
+
+ #define DATA_SECTION_ASM_OP "\t.data"
+
+ /* How to refer to registers in assembler output.
+    This sequence is indexed by compiler's hard-register-number (see above).  */
+
+ #define REGISTER_NAMES \
+ {"r0", "r1", "sp", "fp", "r4", "r5", "r6", "r7", "r8", "r9",		\
+  "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",	\
+  "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",	\
+  "r30", "r31",								\
+  "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",		\
+  "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",	\
+  "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",	\
+  "f30", "f31" }
+
+ /* This is how to output the definition of a user-level label named NAME,
+    such as the label on a static function or variable NAME.  */
+
+ #define ASM_OUTPUT_LABEL(FILE,NAME)	\
+   do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+ /* This is how to output a command to make the user-level label named NAME
+    defined for reference from other files.  */
+
+ #define ASM_GLOBALIZE_LABEL(FILE,NAME)					\
+   do { fputs (".globl ", FILE);					\
+ 	assemble_name (FILE, NAME);					\
+ 	fputs ("\n", FILE);						\
+   } while (0)
+
+ /* The prefix to add to user-visible assembler symbols.
+
+    This definition is overridden in i860v4.h because under System V
+    Release 4, user-level symbols are *not* prefixed with underscores in
+    the generated assembly code.  */
+
+ #define USER_LABEL_PREFIX "_"
+
+ /* This is how to output an internal numbered label where
+    PREFIX is the class of label and NUM is the number within the class.  */
+
+ #define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM)	\
+   fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
+
+ /* This is how to output an internal numbered label which
+    labels a jump table.  */
+
+ #undef ASM_OUTPUT_CASE_LABEL
+ #define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE)		\
+ do { ASM_OUTPUT_ALIGN ((FILE), 2);					\
+      ASM_OUTPUT_INTERNAL_LABEL ((FILE), PREFIX, NUM);			\
+    } while (0)
+
+ /* Output at the end of a jump table.  */
+
+ #define ASM_OUTPUT_CASE_END(FILE,NUM,INSN)	\
+   fprintf (FILE, ".text\n")
+
+ /* This is how to store into the string LABEL
+    the symbol_ref name of an internal numbered label where
+    PREFIX is the class of label and NUM is the number within the class.
+    This is suitable for output with `assemble_name'.  */
+
+ #define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM)	\
+   sprintf (LABEL, "*.%s%d", PREFIX, NUM)
+
+ /* This is how to output code to push a register on the stack.
+    It need not be very fast code.  */
+
+ #define ASM_OUTPUT_REG_PUSH(FILE,REGNO)					\
+   fprintf (FILE, "\taddu -16,%ssp,%ssp\n\t%sst.l %s%s,0(%ssp)\n",	\
+ 	i860_reg_prefix, i860_reg_prefix,				\
+ 	((REGNO) < 32 ? "" : "f"),					\
+ 	i860_reg_prefix, reg_names[REGNO],				\
+ 	i860_reg_prefix)
+
+ /* This is how to output an insn to pop a register from the stack.
+    It need not be very fast code.  */
+
+ #define ASM_OUTPUT_REG_POP(FILE,REGNO)					\
+   fprintf (FILE, "\t%sld.l 0(%ssp),%s%s\n\taddu 16,%ssp,%ssp\n",	\
+ 	((REGNO) < 32 ? "" : "f"),					\
+ 	i860_reg_prefix,						\
+ 	i860_reg_prefix, reg_names[REGNO],				\
+ 	i860_reg_prefix, i860_reg_prefix)
+
+ /* This is how to output an element of a case-vector that is absolute.  */
+
+ #define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE)  \
+   fprintf (FILE, "\t.long .L%d\n", VALUE)
+
+ /* This is how to output an element of a case-vector that is relative.
+    (The i860 does not use such vectors,
+    but we must define this macro anyway.)  */
+
+ #define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL)  \
+   fprintf (FILE, "\t.word .L%d-.L%d\n", VALUE, REL)
+
+ /* This is how to output an assembler line
+    that says to advance the location counter
+    to a multiple of 2**LOG bytes.  */
+
+ #define ASM_OUTPUT_ALIGN(FILE,LOG)	\
+   if ((LOG) != 0)			\
+     fprintf (FILE, "\t.align %d\n", 1 << (LOG))
+
+ #define ASM_OUTPUT_SKIP(FILE,SIZE)  \
+   fprintf (FILE, "\t.blkb %u\n", (SIZE))
+
+ /* This says how to output an assembler line
+    to define a global common symbol.  */
+
+ #define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED)  \
+ ( fputs (".comm ", (FILE)),			\
+   assemble_name ((FILE), (NAME)),		\
+   fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+ /* This says how to output an assembler line
+    to define a local common symbol.  */
+
+ #define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED)  \
+ ( fputs (".lcomm ", (FILE)),			\
+   assemble_name ((FILE), (NAME)),		\
+   fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+ /* Store in OUTPUT a string (made with alloca) containing
+    an assembler-name for a local static variable named NAME.
+    LABELNO is an integer which is different for each call.  */
+
+ #define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO)	\
+ ( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10),	\
+   sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+ /* Print operand X (an rtx) in assembler syntax to file FILE.
+    CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+    For `%' followed by punctuation, CODE is the punctuation and X is null.
+
+    In the following comments, the term "constant address" is used frequently.
+    For an exact definition of what constitutes a "constant address" see the
+    output_addr_const routine in final.c
+
+    On the i860, the following target-specific special codes are recognized:
+
+ 	`r'	The operand can be anything, but if it is an immediate zero
+ 		value (either integer or floating point) then it will be
+ 		represented as `r0' or as `f0' (respectively).
+
+ 	`m'	The operand is a memory ref (to a constant address) but print
+ 		its address as a constant.
+
+ 	`L'	The operand is a numeric constant, a constant address, or
+ 		a memory ref to a constant address.  Print the correct
+ 		notation to yield the low part of the given value or
+ 		address or the low part of the address of the referred
+ 		to memory object.
+
+ 	`H'	The operand is a numeric constant, a constant address, or
+ 		a memory ref to a constant address.  Print the correct
+ 		notation to yield the high part of the given value or
+ 		address or the high part of the address of the referred
+ 		to memory object.
+
+ 	`h'	The operand is a numeric constant, a constant address, or
+ 		a memory ref to a constant address.  Either print the
+ 		correct notation to yield the plain high part of the
+ 		given value or address (or the plain high part of the
+ 		address of the memory object) or else print the correct
+ 		notation to yield the "adjusted" high part of the given
+ 		address (or of the address of the referred to memory object).
+
+ 		The choice of what to print depends upon whether the address
+ 		in question is relocatable or not.  If it is relocatable,
+ 		print the notation to get the adjusted high part.  Otherwise
+ 		just print the notation to get the plain high part.  Note
+ 		that "adjusted" high parts are generally used *only* when
+ 		the next following instruction uses the low part of the
+ 		address as an offset, as in `offset(reg)'.
+
+ 	`R'	The operand is a floating-pointer register.  Print the
+ 		name of the next following (32-bit) floating-point register.
+ 		(This is used when moving a value into just the most
+ 		significant part of a floating-point register pair.)
+
+ 	`?'	(takes no operand) Substitute the value of i860_reg_prefix
+ 		at this point.  The value of i860_reg_prefix is typically
+ 		a null string for most i860 targets, but for System V
+ 		Release 4 the i860 assembler syntax requires that all
+ 		names of registers be prefixed with a percent-sign, so
+ 		for SVR4, the value of i860_reg_prefix is initialized to
+ 		"%" in i860.c.
+ */
+
+ extern const char *i860_reg_prefix;
+
+ #define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '?')
+
+ /* The following macro definition is overridden in i860v4.h
+    because the svr4 i860 assembler required a different syntax
+    for getting parts of constant/relocatable values.  */
+
+ #define PRINT_OPERAND_PART(FILE, X, PART_CODE)				\
+   do { fprintf (FILE, "%s%%", PART_CODE);				\
+ 	output_address (X);						\
+   } while (0)
+
+ #define OPERAND_LOW_PART	"l"
+ #define OPERAND_HIGH_PART	"h"
+ /* NOTE: All documentation available for the i860 sez that you must
+    use "ha" to get the relocated high part of a relocatable, but
+    reality sez different.  */
+ #define OPERAND_HIGH_ADJ_PART	"ha"
+
+ #define PRINT_OPERAND(FILE, X, CODE)					\
+ { if ((CODE) == '?')							\
+     fprintf (FILE, "%s", i860_reg_prefix);				\
+   else if (CODE == 'R')							\
+     fprintf (FILE, "%s%s", i860_reg_prefix, reg_names[REGNO (X) + 1]);	\
+   else if (GET_CODE (X) == REG)						\
+     fprintf (FILE, "%s%s", i860_reg_prefix, reg_names[REGNO (X)]);	\
+   else if ((CODE) == 'm')						\
+     output_address (XEXP (X, 0));					\
+   else if ((CODE) == 'L')						\
+     {									\
+       if (GET_CODE (X) == MEM)						\
+ 	PRINT_OPERAND_PART (FILE, XEXP (X, 0), OPERAND_LOW_PART);	\
+       else								\
+ 	PRINT_OPERAND_PART (FILE, X, OPERAND_LOW_PART);			\
+     }									\
+   else if ((CODE) == 'H')						\
+     {									\
+       if (GET_CODE (X) == MEM)						\
+ 	PRINT_OPERAND_PART (FILE, XEXP (X, 0), OPERAND_HIGH_PART);	\
+       else								\
+ 	PRINT_OPERAND_PART (FILE, X, OPERAND_HIGH_PART);		\
+     }									\
+   else if ((CODE) == 'h')						\
+     {									\
+       if (GET_CODE (X) == MEM)						\
+ 	PRINT_OPERAND_PART (FILE, XEXP (X, 0), OPERAND_HIGH_ADJ_PART);	\
+       else								\
+ 	PRINT_OPERAND_PART (FILE, X, OPERAND_HIGH_ADJ_PART);		\
+     }									\
+   else if (GET_CODE (X) == MEM)						\
+     output_address (XEXP (X, 0));					\
+   else if ((CODE) == 'r' && (X) == const0_rtx)				\
+     fprintf (FILE, "%sr0", i860_reg_prefix);				\
+   else if ((CODE) == 'r' && (X) == CONST0_RTX (GET_MODE (X)))		\
+     fprintf (FILE, "%sf0", i860_reg_prefix);				\
+   else if (GET_CODE (X) == CONST_DOUBLE)				\
+     fprintf (FILE, "0x%lx", sfmode_constant_to_ulong (X));		\
+   else									\
+     output_addr_const (FILE, X); }
+
+ /* Print a memory address as an operand to reference that memory location.  */
+
+ #define PRINT_OPERAND_ADDRESS(FILE, ADDR)  \
+ { register rtx addr = ADDR;					\
+   if (GET_CODE (addr) == REG)					\
+     {								\
+       fprintf (FILE, "0(%s%s)",					\
+ 	i860_reg_prefix, reg_names[REGNO (addr)]);		\
+     }								\
+   else if (GET_CODE (addr) == CONST_DOUBLE			\
+             && GET_MODE (addr) == SFmode)			\
+     fprintf (FILE, "0x%lx", sfmode_constant_to_ulong (addr));	\
+   else if (GET_CODE (addr) == PLUS)				\
+     {								\
+       if ((GET_CODE (XEXP (addr, 0)) == CONST_INT)		\
+ 	  && (GET_CODE (XEXP (addr, 1)) == REG))		\
+ 	fprintf (FILE, "%d(%s%s)", INTVAL (XEXP (addr, 0)),	\
+ 	    i860_reg_prefix, reg_names[REGNO (XEXP (addr, 1))]);\
+       else if ((GET_CODE (XEXP (addr, 1)) == CONST_INT)		\
+ 	  && (GET_CODE (XEXP (addr, 0)) == REG))		\
+ 	fprintf (FILE, "%d(%s%s)", INTVAL (XEXP (addr, 1)),	\
+ 	    i860_reg_prefix, reg_names[REGNO (XEXP (addr, 0))]);\
+       else if ((GET_CODE (XEXP (addr, 0)) == REG)		\
+ 	  && (GET_CODE (XEXP (addr, 1)) == REG))		\
+ 	fprintf (FILE, "%s%s(%s%s)",				\
+ 	    i860_reg_prefix, reg_names[REGNO (XEXP (addr, 0))],	\
+ 	    i860_reg_prefix, reg_names[REGNO (XEXP (addr, 1))]);\
+       else							\
+ 	output_addr_const (FILE, addr);				\
+     }								\
+   else								\
+     {								\
+       output_addr_const (FILE, addr);				\
+     }								\
+ }
+
+ /* Optionally define this if you have added predicates to
+    `MACHINE.c'.  This macro is called within an initializer of an
+    array of structures.  The first field in the structure is the
+    name of a predicate and the second field is an array of rtl
+    codes.  For each predicate, list all rtl codes that can be in
+    expressions matched by the predicate.  The list should have a
+    trailing comma.  Here is an example of two entries in the list
+    for a typical RISC machine:
+
+    #define PREDICATE_CODES \
+      {"gen_reg_rtx_operand", {SUBREG, REG}},  \
+      {"reg_or_short_cint_operand", {SUBREG, REG, CONST_INT}},
+
+    Defining this macro does not affect the generated code (however,
+    incorrect definitions that omit an rtl code that may be matched
+    by the predicate can cause the compiler to malfunction).
+    Instead, it allows the table built by `genrecog' to be more
+    compact and efficient, thus speeding up the compiler.  The most
+    important predicates to include in the list specified by this
+    macro are thoses used in the most insn patterns.  */
+
+ #define PREDICATE_CODES							\
+    {"reg_or_0_operand",		{REG, SUBREG, CONST_INT}},		\
+    {"arith_operand",		{REG, SUBREG, CONST_INT}},		\
+    {"logic_operand",		{REG, SUBREG, CONST_INT}},		\
+    {"shift_operand",		{REG, SUBREG, CONST_INT}},		\
+    {"compare_operand",		{REG, SUBREG, CONST_INT}},		\
+    {"arith_const_operand",	{CONST_INT}},				\
+    {"logic_const_operand",	{CONST_INT}},				\
+    {"bte_operand",		{REG, SUBREG, CONST_INT}},		\
+    {"indexed_operand",		{MEM}},					\
+    {"load_operand",		{MEM}},					\
+    {"small_int",		{CONST_INT}},				\
+    {"logic_int",		{CONST_INT}},				\
+    {"call_insn_operand",	{MEM}},
+
+ /* Define the information needed to generate branch insns.  This is stored
+    from the compare operation.  Note that we can't use "rtx" here since it
+    hasn't been defined!  */
+
+ extern struct rtx_def *i860_compare_op0, *i860_compare_op1;
diff -c3pN gcc/gcc/config/nil/i860.md gcc/gcc/config/i860/i860.md
*** gcc/gcc/config/nil/i860.md	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/i860.md	Sat Aug  9 00:14:12 2003
***************
*** 0 ****
--- 1,2327 ----
+ ;;- Machine description for Intel 860 chip for GNU C compiler
+ ;;  Copyright (C) 1989, 1990, 1997, 1998, 1999, 2000
+ ;;  Free Software Foundation, Inc.
+
+ ;; This file is part of GNU CC.
+
+ ;; GNU CC is free software; you can redistribute it and/or modify
+ ;; it under the terms of the GNU General Public License as published by
+ ;; the Free Software Foundation; either version 2, or (at your option)
+ ;; any later version.
+
+ ;; GNU CC is distributed in the hope that it will be useful,
+ ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+ ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ ;; GNU General Public License for more details.
+
+ ;; You should have received a copy of the GNU General Public License
+ ;; along with GNU CC; see the file COPYING.  If not, write to
+ ;; the Free Software Foundation, 59 Temple Place - Suite 330,
+ ;; Boston, MA 02111-1307, USA.
+
+
+ ;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+ ;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code
+ ;;- updates for most instructions.
+
+ ;;- Operand classes for the register allocator:
+
+ /* Bit-test instructions.  */
+
+ (define_insn ""
+   [(set (cc0) (eq (and:SI (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "logic_operand" "rL"))
+ 		  (const_int 0)))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"and %1,%0,%?r0\";
+ }")
+
+ (define_insn ""
+   [(set (cc0) (ne (and:SI (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "logic_operand" "rL"))
+ 		  (const_int 0)))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"and %1,%0,%?r0\";
+ }")
+
+ (define_insn ""
+   [(set (cc0) (eq (and:SI (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "immediate_operand" "i"))
+ 		  (const_int 0)))]
+   "GET_CODE (operands[1]) == CONST_INT && (INTVAL (operands[1]) & 0xffff) == 0"
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"andh %H1,%0,%?r0\";
+ }")
+
+ (define_insn ""
+   [(set (cc0) (ne (and:SI (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "immediate_operand" "i"))
+ 		  (const_int 0)))]
+   "GET_CODE (operands[1]) == CONST_INT && (INTVAL (operands[1]) & 0xffff) == 0"
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"andh %H1,%0,%?r0\";
+ }")
+
+ (define_insn ""
+   [(set (cc0) (eq (ashiftrt:SI
+ 		   (sign_extend:SI
+ 		    (ashift:QI (match_operand:QI 0 "register_operand" "r")
+ 			       (match_operand:QI 1 "logic_int" "n")))
+ 		   (match_operand:SI 2 "logic_int" "n"))
+ 		  (const_int 0)))]
+   ""
+   "*
+ {
+   int width = 8 - INTVAL (operands[2]);
+   int pos = 8 - width - INTVAL (operands[1]);
+
+   CC_STATUS_PARTIAL_INIT;
+   operands[2] = GEN_INT (~((-1) << width) << pos);
+   return \"and %2,%0,%?r0\";
+ }")
+
+ ;; -------------------------------------------------------------------------
+ ;; SImode signed integer comparisons
+ ;; -------------------------------------------------------------------------
+
+ (define_insn "cmpeqsi"
+   [(set (cc0) (eq (match_operand:SI 0 "logic_operand" "r,rL")
+ 		  (match_operand:SI 1 "logic_operand" "L,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[0]))
+     return \"xor %1,%0,%?r0\";
+   else
+     return \"xor %0,%1,%?r0\";
+ }")
+
+ (define_insn "cmpnesi"
+   [(set (cc0) (ne (match_operand:SI 0 "logic_operand" "r,rL")
+ 		  (match_operand:SI 1 "logic_operand" "L,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   if (REG_P (operands[0]))
+     return \"xor %1,%0,%?r0\";
+   else
+     return \"xor %0,%1,%?r0\";
+ }")
+
+ (define_insn "cmpltsi"
+   [(set (cc0) (lt (match_operand:SI 0 "arith_operand" "r,rI")
+ 		  (match_operand:SI 1 "arith_operand" "I,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[1]))
+     return \"subs %0,%1,%?r0\";
+   else
+     {
+       cc_status.flags |= CC_REVERSED;
+       operands[1] = GEN_INT (- INTVAL (operands[1]));
+       return \"adds %1,%0,%?r0\";
+     }
+ }")
+
+ (define_insn "cmpgtsi"
+   [(set (cc0) (gt (match_operand:SI 0 "arith_operand" "r,rI")
+ 		  (match_operand:SI 1 "arith_operand" "I,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[0]))
+     return \"subs %1,%0,%?r0\";
+   else
+     {
+       cc_status.flags |= CC_REVERSED;
+       operands[0] = GEN_INT (- INTVAL (operands[0]));
+       return \"adds %0,%1,%?r0\";
+     }
+ }")
+
+ (define_insn "cmplesi"
+   [(set (cc0) (le (match_operand:SI 0 "arith_operand" "r,rI")
+ 		  (match_operand:SI 1 "arith_operand" "I,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   if (REG_P (operands[0]))
+     return \"subs %1,%0,%?r0\";
+   else
+     {
+       cc_status.flags |= CC_REVERSED;
+       operands[0] = GEN_INT (- INTVAL (operands[0]));
+       return \"adds %0,%1,%?r0\";
+     }
+ }")
+
+ (define_insn "cmpgesi"
+   [(set (cc0) (ge (match_operand:SI 0 "arith_operand" "r,rI")
+ 		  (match_operand:SI 1 "arith_operand" "I,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   if (REG_P (operands[1]))
+     return \"subs %0,%1,%?r0\";
+   else
+     {
+       cc_status.flags |= CC_REVERSED;
+       operands[1] = GEN_INT (- INTVAL (operands[1]));
+       return \"adds %1,%0,%?r0\";
+     }
+ }")
+
+ ;; -------------------------------------------------------------------------
+ ;; SImode unsigned integer comparisons
+ ;; -------------------------------------------------------------------------
+
+ ;; WARNING!  There is a small i860 hardware limitation (bug?) which we
+ ;; may run up against (if we are not careful) when we are trying to do
+ ;; unsigned comparisons like (x >= 0), (x < 0), (0 <= x), and (0 > x).
+ ;; Specifically, we must avoid using an `addu' instruction to perform
+ ;; such comparisons because the result (in the CC bit register) will
+ ;; come out wrong.  (This fact is documented in a footnote on page 7-10
+ ;; of the 1991 version of the i860 Microprocessor Family Programmer's
+ ;; Reference Manual).  Note that unsigned comparisons of this sort are
+ ;; always redundant anyway, because an unsigned quantity can never be
+ ;; less than zero.  When we see cases like this, we generate an
+ ;; `or K,%r0,%r0' instruction instead (where K is a constant 0 or -1)
+ ;; so as to get the CC bit register set properly for any subsequent
+ ;; conditional jump instruction.
+
+ (define_insn "cmpgeusi"
+   [(set (cc0) (geu (match_operand:SI 0 "arith_operand" "r,rI")
+ 		   (match_operand:SI 1 "arith_operand" "I,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[1]))
+     return \"subu %0,%1,%?r0\";
+   else
+     {
+       if (INTVAL (operands[1]) == 0)
+ 	return \"or 0,%?r0,%?r0\";
+       else
+ 	{
+ 	  cc_status.flags |= CC_REVERSED;
+ 	  operands[1] = GEN_INT (- INTVAL (operands[1]));
+ 	  return \"addu %1,%0,%?r0\";
+ 	}
+     }
+ }")
+
+ (define_insn "cmpleusi"
+   [(set (cc0) (leu (match_operand:SI 0 "arith_operand" "r,rI")
+ 		   (match_operand:SI 1 "arith_operand" "I,r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[0]))
+     return \"subu %1,%0,%?r0\";
+   else
+     {
+       if (INTVAL (operands[0]) == 0)
+ 	return \"or 0,%?r0,%?r0\";
+       else
+ 	{
+ 	  cc_status.flags |= CC_REVERSED;
+ 	  operands[0] = GEN_INT (- INTVAL (operands[0]));
+ 	  return \"addu %0,%1,%?r0\";
+ 	}
+     }
+ }")
+
+ ;; -------------------------------------------------------------------------
+ ;; SFmode floating-point comparisons
+ ;; -------------------------------------------------------------------------
+
+ (define_insn "cmpeqsf"
+   [(set (cc0) (eq (match_operand:SF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"pfeq.ss %r0,%r1,%?f0\";
+ }")
+
+ (define_insn "cmpnesf"
+   [(set (cc0) (ne (match_operand:SF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"pfeq.ss %r1,%r0,%?f0\";
+ }")
+
+ ;; NOTE:  The i860 Programmer's Reference Manual says that when we are
+ ;; doing (A < B) or (A > B) comparisons, we have to use pfgt for these
+ ;; in order to be IEEE compliant (in case a trap occurs during these
+ ;; operations).  Conversely, for (A <= B) or (A >= B) comparisons, we
+ ;; must use pfle to be IEEE compliant.
+
+ (define_insn "cmpltsf"
+   [(set (cc0) (lt (match_operand:SF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"pfgt.ss %r1,%r0,%?f0\";
+ }")
+
+ (define_insn "cmpgtsf"
+   [(set (cc0) (gt (match_operand:SF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"pfgt.ss %r0,%r1,%?f0\";
+ }")
+
+ ;; NOTE:  The pfle opcode doesn't do what you think it does.  It is
+ ;; bass-ackwards.  It *clears* the CC flag if the first operand is
+ ;; less than or equal to the second.  Thus, we have to set CC_NEGATED
+ ;; for the following two patterns.
+
+ (define_insn "cmplesf"
+   [(set (cc0) (le (match_operand:SF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"pfle.ss %r0,%r1,%?f0\";
+ }")
+
+ (define_insn "cmpgesf"
+   [(set (cc0) (ge (match_operand:SF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:SF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"pfle.ss %r1,%r0,%?f0\";
+ }")
+
+ ;; -------------------------------------------------------------------------
+ ;; DFmode floating-point comparisons
+ ;; -------------------------------------------------------------------------
+
+ (define_insn "cmpeqdf"
+   [(set (cc0) (eq (match_operand:DF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"pfeq.dd %r0,%r1,%?f0\";
+ }")
+
+ (define_insn "cmpnedf"
+   [(set (cc0) (ne (match_operand:DF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"pfeq.dd %r1,%r0,%?f0\";
+ }")
+
+ ;; NOTE:  The i860 Programmer's Reference Manual says that when we are
+ ;; doing (A < B) or (A > B) comparisons, we have to use pfgt for these
+ ;; in order to be IEEE compliant (in case a trap occurs during these
+ ;; operations).  Conversely, for (A <= B) or (A >= B) comparisons, we
+ ;; must use pfle to be IEEE compliant.
+
+ (define_insn "cmpltdf"
+   [(set (cc0) (lt (match_operand:DF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"pfgt.dd %r1,%r0,%?f0\";
+ }")
+
+ (define_insn "cmpgtdf"
+   [(set (cc0) (gt (match_operand:DF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"pfgt.dd %r0,%r1,%?f0\";
+ }")
+
+ ;; NOTE:  The pfle opcode doesn't do what you think it does.  It is
+ ;; bass-ackwards.  It *clears* the CC flag if the first operand is
+ ;; less than or equal to the second.  Thus, we have to set CC_NEGATED
+ ;; for the following two patterns.
+
+ (define_insn "cmpledf"
+   [(set (cc0) (le (match_operand:DF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"pfle.dd %r0,%r1,%?f0\";
+ }")
+
+ (define_insn "cmpgedf"
+   [(set (cc0) (ge (match_operand:DF 0 "reg_or_0_operand" "fG")
+ 		  (match_operand:DF 1 "reg_or_0_operand" "fG")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   cc_status.flags |= CC_NEGATED;
+   return \"pfle.dd %r1,%r0,%?f0\";
+ }")
+
+ ;; ------------------------------------------------------------------------
+ ;; Integer EQ/NE comparisons against constant values which will fit in the
+ ;; 16-bit immediate field of an instruction.  These are made by combining.
+ ;; ------------------------------------------------------------------------
+
+ (define_insn ""
+   [(set (cc0) (eq (zero_extend:SI (match_operand:HI 0 "load_operand" "m"))
+ 	          (match_operand:SI 1 "small_int" "I")))]
+   "INTVAL (operands[1]) >= 0"
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"ld.s %0,%?r31\;xor %1,%?r31,%?r0\";
+ }")
+
+ (define_insn ""
+   [(set (cc0) (eq (match_operand:SI 0 "small_int" "I")
+ 	          (zero_extend:SI (match_operand:HI 1 "load_operand" "m"))))]
+   "INTVAL (operands[0]) >= 0"
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"ld.s %1,%?r31\;xor %0,%?r31,%?r0\";
+ }")
+
+ ;; ------------------------------------------------------------------------
+ ;; Define the real conditional branch instructions.
+ ;; ------------------------------------------------------------------------
+
+ (define_insn "cbranch"
+   [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ 			   (label_ref (match_operand 0 "" ""))
+ 			   (pc)))]
+   ""
+   "*
+ {
+   if ((cc_prev_status.flags & CC_NEGATED) == 0)
+     return \"bnc %l0\";
+   else
+     return \"bc %l0\";
+ }")
+
+ (define_insn "flipped_cbranch"
+   [(set (pc) (if_then_else (ne (cc0)
+ 			       (const_int 0))
+ 			   (pc)
+ 			   (label_ref (match_operand 0 "" ""))))]
+   ""
+   "*
+ {
+   if ((cc_prev_status.flags & CC_NEGATED) == 0)
+     return \"bnc %l0\";
+   else
+     return \"bc %l0\";
+ }")
+
+ (define_insn "inverse_cbranch"
+   [(set (pc) (if_then_else (eq (cc0)
+ 			       (const_int 0))
+ 			   (pc)
+ 			   (label_ref (match_operand 0 "" ""))))]
+   ""
+   "*
+ {
+   if ((cc_prev_status.flags & CC_NEGATED) == 0)
+     return \"bc %l0\";
+   else
+     return \"bnc %l0\";
+ }")
+
+
+ (define_insn "flipped_inverse_cbranch"
+   [(set (pc) (if_then_else (ne (cc0)
+ 			       (const_int 0))
+ 			   (label_ref (match_operand 0 "" ""))
+ 			   (pc)))]
+   ""
+   "*
+ {
+   if ((cc_prev_status.flags & CC_NEGATED) == 0)
+     return \"bc %l0\";
+   else
+     return \"bnc %l0\";
+ }")
+
+ ;; Simple BTE/BTNE compare-and-branch insns made by combining.
+ ;; Note that it is wrong to add similar patterns for QI or HImode
+ ;; because bte/btne always compare the whole register.
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (eq (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "bte_operand" "rK"))
+ 		      (label_ref (match_operand 2 "" ""))
+ 		      (pc)))]
+   ""
+   "bte %1,%0,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (ne (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "bte_operand" "rK"))
+ 		      (label_ref (match_operand 2 "" ""))
+ 		      (pc)))]
+   ""
+   "btne %1,%0,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (eq (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "bte_operand" "rK"))
+ 		      (pc)
+ 		      (label_ref (match_operand 2 "" ""))))]
+   ""
+   "btne %1,%0,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (ne (match_operand:SI 0 "register_operand" "r")
+ 			  (match_operand:SI 1 "bte_operand" "rK"))
+ 		      (pc)
+ 		      (label_ref (match_operand 2 "" ""))))]
+   ""
+   "bte %1,%0,%2")
+
+ ;; Load byte/halfword, zero-extend, & compare-and-branch insns.
+ ;; These are made by combining.
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (eq (zero_extend:SI (match_operand:QI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (label_ref (match_operand 2 "" ""))
+                       (pc)))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.b %0,%3;bte %1,%3,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (ne (zero_extend:SI (match_operand:QI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (label_ref (match_operand 2 "" ""))
+                       (pc)))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.b %0,%3;btne %1,%3,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (eq (zero_extend:SI (match_operand:QI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (pc)
+                       (label_ref (match_operand 2 "" ""))))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.b %0,%3;btne %1,%3,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (ne (zero_extend:SI (match_operand:QI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (pc)
+                       (label_ref (match_operand 2 "" ""))))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.b %0,%3;bte %1,%3,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (eq (zero_extend:SI (match_operand:HI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (label_ref (match_operand 2 "" ""))
+                       (pc)))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.s %0,%3;bte %1,%3,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (ne (zero_extend:SI (match_operand:HI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (label_ref (match_operand 2 "" ""))
+                       (pc)))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.s %0,%3;btne %1,%3,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (eq (zero_extend:SI (match_operand:HI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (pc)
+                       (label_ref (match_operand 2 "" ""))))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.s %0,%3;btne %1,%3,%2")
+
+ (define_insn ""
+   [(set (pc)
+         (if_then_else (ne (zero_extend:SI (match_operand:HI 0 "memory_operand" "m"))
+                           (match_operand:SI 1 "bte_operand" "K"))
+                       (pc)
+                       (label_ref (match_operand 2 "" ""))))
+    (match_scratch:SI 3 "=r")]
+   ""
+   "ld.s %0,%3;bte %1,%3,%2")
+
+
+ ;; Generation of conditionals.
+
+ ;; We save the compare operands in the cmpxx patterns and use then when
+ ;; we generate the branch.
+
+ (define_expand "cmpsi"
+   [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ 		       (match_operand:SI 1 "compare_operand" "")))]
+   ""
+   "
+ { i860_compare_op0 = operands[0];
+   i860_compare_op1 = operands[1];
+   DONE;
+ }")
+
+ (define_expand "cmpsf"
+   [(set (cc0) (compare (match_operand:SF 0 "register_operand" "")
+ 		       (match_operand:SF 1 "register_operand" "")))]
+   ""
+   "
+ { i860_compare_op0 = operands[0];
+   i860_compare_op1 = operands[1];
+   DONE;
+ }")
+
+ (define_expand "cmpdf"
+   [(set (cc0) (compare (match_operand:DF 0 "register_operand" "")
+ 		       (match_operand:DF 1 "register_operand" "")))]
+   ""
+   "
+ { i860_compare_op0 = operands[0];
+   i860_compare_op1 = operands[1];
+   DONE;
+ }")
+
+ ;; These are the standard-named conditional branch patterns.
+ ;; Detailed comments are found in the first one only.
+
+ (define_expand "beq"
+   [(set (pc)
+ 	(if_then_else (eq (cc0)
+ 			  (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   /* Emit a single-condition compare insn according to
+      the type of operands and the condition to be tested.  */
+
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) == MODE_INT)
+     emit_insn (gen_cmpeqsi (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == SFmode)
+     emit_insn (gen_cmpeqsf (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == DFmode)
+     emit_insn (gen_cmpeqdf (i860_compare_op0, i860_compare_op1));
+   else
+     abort ();
+
+   /* Emit branch-if-true.  */
+
+   emit_jump_insn (gen_flipped_inverse_cbranch (operands[0]));
+   DONE;
+ }")
+
+ (define_expand "bne"
+   [(set (pc)
+ 	(if_then_else (ne (cc0)
+ 			  (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) == MODE_INT)
+     emit_insn (gen_cmpeqsi (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == SFmode)
+     emit_insn (gen_cmpeqsf (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == DFmode)
+     emit_insn (gen_cmpeqdf (i860_compare_op0, i860_compare_op1));
+   else
+     abort ();
+
+   emit_jump_insn (gen_flipped_cbranch (operands[0]));
+
+   DONE;
+ }")
+
+ (define_expand "bgt"
+   [(set (pc)
+ 	(if_then_else (gt (cc0)
+ 			  (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) == MODE_INT)
+     emit_insn (gen_cmpgtsi (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == SFmode)
+     emit_insn (gen_cmpgtsf (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == DFmode)
+     emit_insn (gen_cmpgtdf (i860_compare_op0, i860_compare_op1));
+   else
+     abort ();
+
+   emit_jump_insn (gen_flipped_inverse_cbranch (operands[0]));
+   DONE;
+ }")
+
+ (define_expand "blt"
+   [(set (pc)
+ 	(if_then_else (lt (cc0)
+ 			  (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) == MODE_INT)
+     emit_insn (gen_cmpltsi (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == SFmode)
+     emit_insn (gen_cmpltsf (i860_compare_op0, i860_compare_op1));
+   else if (GET_MODE (i860_compare_op0) == DFmode)
+     emit_insn (gen_cmpltdf (i860_compare_op0, i860_compare_op1));
+   else
+     abort ();
+
+   emit_jump_insn (gen_flipped_inverse_cbranch (operands[0]));
+   DONE;
+ }")
+
+ (define_expand "ble"
+   [(set (pc)
+ 	(if_then_else (le (cc0)
+ 			  (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) == MODE_INT)
+     {
+       emit_insn (gen_cmpgtsi (i860_compare_op0, i860_compare_op1));
+       emit_jump_insn (gen_flipped_cbranch (operands[0]));
+     }
+   else
+     {
+       if (GET_MODE (i860_compare_op0) == SFmode)
+ 	emit_insn (gen_cmplesf (i860_compare_op0, i860_compare_op1));
+       else if (GET_MODE (i860_compare_op0) == DFmode)
+ 	emit_insn (gen_cmpledf (i860_compare_op0, i860_compare_op1));
+       else
+ 	abort ();
+       emit_jump_insn (gen_flipped_inverse_cbranch (operands[0]));
+     }
+   DONE;
+ }")
+
+ (define_expand "bge"
+   [(set (pc)
+ 	(if_then_else (ge (cc0)
+ 			  (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) == MODE_INT)
+     {
+       emit_insn (gen_cmpltsi (i860_compare_op0, i860_compare_op1));
+       emit_jump_insn (gen_flipped_cbranch (operands[0]));
+     }
+   else
+     {
+       if (GET_MODE (i860_compare_op0) == SFmode)
+ 	emit_insn (gen_cmpgesf (i860_compare_op0, i860_compare_op1));
+       else if (GET_MODE (i860_compare_op0) == DFmode)
+ 	emit_insn (gen_cmpgedf (i860_compare_op0, i860_compare_op1));
+       else
+ 	abort ();
+       emit_jump_insn (gen_flipped_inverse_cbranch (operands[0]));
+     }
+   DONE;
+ }")
+
+ (define_expand "bgtu"
+   [(set (pc)
+ 	(if_then_else (gtu (cc0)
+ 			   (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) != MODE_INT)
+     abort ();
+
+   emit_insn (gen_cmpleusi (i860_compare_op0, i860_compare_op1));
+   emit_jump_insn (gen_flipped_cbranch (operands[0]));
+   DONE;
+ }")
+
+ (define_expand "bltu"
+   [(set (pc)
+ 	(if_then_else (ltu (cc0)
+ 			   (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) != MODE_INT)
+     abort ();
+
+   emit_insn (gen_cmpgeusi (i860_compare_op0, i860_compare_op1));
+   emit_jump_insn (gen_flipped_cbranch (operands[0]));
+   DONE;
+ }")
+
+ (define_expand "bgeu"
+   [(set (pc)
+ 	(if_then_else (geu (cc0)
+ 			   (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) != MODE_INT)
+     abort ();
+
+   emit_insn (gen_cmpgeusi (i860_compare_op0, i860_compare_op1));
+   emit_jump_insn (gen_flipped_inverse_cbranch (operands[0]));
+   DONE;
+ }")
+
+ (define_expand "bleu"
+   [(set (pc)
+ 	(if_then_else (leu (cc0)
+ 			   (const_int 0))
+ 		      (label_ref (match_operand 0 "" ""))
+ 		      (pc)))]
+   ""
+   "
+ {
+   if (GET_MODE_CLASS (GET_MODE (i860_compare_op0)) != MODE_INT)
+     abort ();
+
+   emit_insn (gen_cmpleusi (i860_compare_op0, i860_compare_op1));
+   emit_jump_insn (gen_flipped_inverse_cbranch (operands[0]));
+   DONE;
+ }")
+
+ ;; Move instructions
+
+ ;; Note that source operands for `mov' pseudo-instructions are no longer
+ ;; allowed (by the svr4 assembler) to be "big" things, i.e. constants that
+ ;; won't fit in 16-bits.  (This includes any sort of a relocatable address
+ ;; also.)  Thus, we must use an explicit orh/or pair of instructions if
+ ;; the source operand is something "big".
+
+ (define_insn "movsi"
+   [(set (match_operand:SI 0 "general_operand" "=r,m,f")
+ 	(match_operand:SI 1 "general_operand" "rmif,rfJ,rmfJ"))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM)
+     {
+       if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ 	return output_store (operands);
+       if (FP_REG_P (operands[1]))
+ 	return \"fst.l %1,%0\";
+       return \"st.l %r1,%0\";
+     }
+   if (GET_CODE (operands[1]) == MEM)
+     {
+       if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ 	return output_load (operands);
+       if (FP_REG_P (operands[0]))
+ 	return \"fld.l %1,%0\";
+       return \"ld.l %1,%0\";
+     }
+   if (FP_REG_P (operands[1]) && FP_REG_P (operands[0]))
+     return \"fmov.ss %1,%0\";
+   if (FP_REG_P (operands[1]))
+     return \"fxfr %1,%0\";
+   if (FP_REG_P (operands[0]) && operands[1] == const0_rtx)
+     return \"fmov.ss %?f0,%0\";
+   if (FP_REG_P (operands[0]))
+     return \"ixfr %1,%0\";
+
+   if (GET_CODE (operands[1]) == REG)
+     return \"shl %?r0,%1,%0\";
+
+   CC_STATUS_PARTIAL_INIT;
+
+   if (GET_CODE (operands[1]) == CONST_INT)
+     {
+       if((INTVAL (operands[1]) & 0xffff0000) == 0)
+         return \"or %L1,%?r0,%0\";
+       if((INTVAL (operands[1]) & 0x0000ffff) == 0)
+         return \"orh %H1,%?r0,%0\";
+     }
+   return \"orh %H1,%?r0,%0\;or %L1,%0,%0\";
+ }")
+
+ (define_insn "movhi"
+   [(set (match_operand:HI 0 "general_operand" "=r,m,!*f,!r")
+ 	(match_operand:HI 1 "general_operand" "rmi,rJ,rJ*f,*f"))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM)
+     {
+       if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ 	return output_store (operands);
+       return \"st.s %r1,%0\";
+     }
+   if (GET_CODE (operands[1]) == MEM)
+     {
+       if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ 	return output_load (operands);
+       return \"ld.s %1,%0\";
+     }
+   if (FP_REG_P (operands[1]) && FP_REG_P (operands[0]))
+     return \"fmov.ss %1,%0\";
+   if (FP_REG_P (operands[1]))
+     return \"fxfr %1,%0\";
+   if (FP_REG_P (operands[0]) && operands[1] == const0_rtx)
+     return \"fmov.ss %?f0,%0\";
+   if (FP_REG_P (operands[0]))
+     return \"ixfr %1,%0\";
+
+   if (GET_CODE (operands[1]) == REG)
+     return \"shl %?r0,%1,%0\";
+
+   CC_STATUS_PARTIAL_INIT;
+
+   return \"or %L1,%?r0,%0\";
+ }")
+
+ (define_insn "movqi"
+   [(set (match_operand:QI 0 "general_operand" "=r,m,!*f,!r")
+ 	(match_operand:QI 1 "general_operand" "rmi,rJ,rJ*f,*f"))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM)
+     {
+       if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ 	return output_store (operands);
+       return \"st.b %r1,%0\";
+     }
+   if (GET_CODE (operands[1]) == MEM)
+     {
+       if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ 	return output_load (operands);
+       return \"ld.b %1,%0\";
+     }
+   if (FP_REG_P (operands[1]) && FP_REG_P (operands[0]))
+     return \"fmov.ss %1,%0\";
+   if (FP_REG_P (operands[1]))
+     return \"fxfr %1,%0\";
+   if (FP_REG_P (operands[0]) && operands[1] == const0_rtx)
+     return \"fmov.ss %?f0,%0\";
+   if (FP_REG_P (operands[0]))
+     return \"ixfr %1,%0\";
+
+   if (GET_CODE (operands[1]) == REG)
+     return \"shl %?r0,%1,%0\";
+
+   CC_STATUS_PARTIAL_INIT;
+
+   return \"or %L1,%?r0,%0\";
+ }")
+
+ ;; The definition of this insn does not really explain what it does,
+ ;; but it should suffice
+ ;; that anything generated as this insn will be recognized as one
+ ;; and that it won't successfully combine with anything.
+ (define_expand "movstrsi"
+   [(parallel [(set (match_operand:BLK 0 "general_operand" "")
+ 		   (match_operand:BLK 1 "general_operand" ""))
+ 	      (use (match_operand:SI 2 "nonmemory_operand" ""))
+ 	      (use (match_operand:SI 3 "immediate_operand" ""))
+ 	      (clobber (match_dup 4))
+ 	      (clobber (match_dup 5))
+ 	      (clobber (match_dup 6))
+ 	      (clobber (match_dup 7))
+ 	      (clobber (match_dup 8))])]
+   ""
+   "
+ {
+   operands[4] = gen_reg_rtx (SImode);
+   operands[5] = gen_reg_rtx (SImode);
+   operands[6] = gen_reg_rtx (SImode);
+   operands[7] = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+   operands[8] = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+   operands[0] = replace_equiv_address (operands[0], operands[7]);
+   operands[1] = replace_equiv_address (operands[1], operands[8]);
+ }")
+
+ (define_insn ""
+   [(set (mem:BLK (match_operand:SI 0 "register_operand" "r"))
+ 	(mem:BLK (match_operand:SI 1 "register_operand" "r")))
+    (use (match_operand:SI 2 "general_operand" "rn"))
+    (use (match_operand:SI 3 "immediate_operand" "i"))
+    (clobber (match_operand:SI 4 "register_operand" "=r"))
+    (clobber (match_operand:SI 5 "register_operand" "=r"))
+    (clobber (match_operand:SI 6 "register_operand" "=r"))
+    (clobber (match_dup 0))
+    (clobber (match_dup 1))]
+   ""
+   "* return output_block_move (operands);")
+
+ ;; Floating point move insns
+
+ ;; This pattern forces (set (reg:DF ...) (const_double ...))
+ ;; to be reloaded by putting the constant into memory.
+ ;; It must come before the more general movdf pattern.
+ (define_insn ""
+   [(set (match_operand:DF 0 "general_operand" "=r,f,o")
+ 	(match_operand:DF 1 "" "mG,m,G"))]
+   "GET_CODE (operands[1]) == CONST_DOUBLE"
+   "*
+ {
+   if (FP_REG_P (operands[0]) || operands[1] == CONST0_RTX (DFmode))
+     return output_fp_move_double (operands);
+   return output_move_double (operands);
+ }")
+
+ (define_insn "movdf"
+   [(set (match_operand:DF 0 "general_operand" "=*rm,*r,?f,?*rm")
+ 	(match_operand:DF 1 "general_operand" "*r,m,*rfmG,f"))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM
+       && CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+     return output_store (operands);
+   if (GET_CODE (operands[1]) == MEM
+       && CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+     return output_load (operands);
+
+   if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+     return output_fp_move_double (operands);
+   return output_move_double (operands);
+ }")
+
+ (define_insn "movdi"
+   [(set (match_operand:DI 0 "general_operand" "=rm,r,?f,?rm")
+ 	(match_operand:DI 1 "general_operand" "r,miF,rfmG,f"))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM
+       && CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+     return output_store (operands);
+   if (GET_CODE (operands[1]) == MEM
+       && CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+     return output_load (operands);
+
+   /* ??? How can we have a DFmode arg here with DImode above? */
+   if (FP_REG_P (operands[0]) && operands[1] == CONST0_RTX (DFmode))
+     return \"fmov.dd %?f0,%0\";
+
+   if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+     return output_fp_move_double (operands);
+   return output_move_double (operands);
+ }")
+
+ ;; The alternative m/r is separate from m/f
+ ;; The first alternative is separate from the second for the same reason.
+ (define_insn "movsf"
+   [(set (match_operand:SF 0 "general_operand" "=*rf,*rf,*r,m,m")
+ 	(match_operand:SF 1 "general_operand" "*r,fmG,F,*r,f"))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM
+       && CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+     return output_store (operands);
+   if (GET_CODE (operands[1]) == MEM
+       && CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+     return output_load (operands);
+   if (FP_REG_P (operands[0]))
+     {
+       if (FP_REG_P (operands[1]))
+ 	return \"fmov.ss %1,%0\";
+       if (GET_CODE (operands[1]) == REG)
+ 	return \"ixfr %1,%0\";
+       if (operands[1] == CONST0_RTX (SFmode))
+         return \"fmov.ss %?f0,%0\";
+       if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+ 	{
+ 	  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		 && cc_prev_status.mdep == XEXP(operands[1],0)))
+ 	    {
+ 	      CC_STATUS_INIT;
+ 	      cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	      cc_status.mdep = XEXP (operands[1], 0);
+ 	      return \"orh %h1,%?r0,%?r31\;fld.l %L1(%?r31),%0\";
+ 	    }
+ 	  return \"fld.l %L1(%?r31),%0\";
+ 	}
+       return \"fld.l %1,%0\";
+     }
+   if (FP_REG_P (operands[1]) || GET_CODE (operands[1]) == CONST_DOUBLE)
+     {
+       if (GET_CODE (operands[0]) == REG && FP_REG_P (operands[1]))
+ 	return \"fxfr %1,%0\";
+       if (GET_CODE (operands[0]) == REG)
+ 	{
+ 	  CC_STATUS_PARTIAL_INIT;
+ 	  if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ 	    {
+ 	      register unsigned long ul;
+
+               ul = sfmode_constant_to_ulong (operands[1]);
+ 	      if ((ul & 0x0000ffff) == 0)
+ 		return \"orh %H1,%?r0,%0\";
+ 	      if ((ul & 0xffff0000) == 0)
+ 		return \"or %L1,%?r0,%0\";
+ 	    }
+           return \"orh %H1,%?r0,%0\;or %L1,%0,%0\";
+ 	}
+       /* Now operand 0 must be memory.
+          If operand 1 is CONST_DOUBLE, its value must be 0.  */
+       if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+ 	{
+ 	  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 		 && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 		 && XEXP (operands[0], 0) == cc_prev_status.mdep))
+ 	    {
+ 	      CC_STATUS_INIT;
+ 	      cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	      cc_status.mdep = XEXP (operands[0], 0);
+ 	      output_asm_insn (\"orh %h0,%?r0,%?r31\", operands);
+ 	    }
+ 	  return \"fst.l %r1,%L0(%?r31)\";
+ 	}
+       return \"fst.l %r1,%0\";
+     }
+   if (GET_CODE (operands[0]) == MEM)
+     return \"st.l %r1,%0\";
+   if (GET_CODE (operands[1]) == MEM)
+     return \"ld.l %1,%0\";
+   if (operands[1] == CONST0_RTX (SFmode))
+     return \"shl %?r0,%?r0,%0\";
+   return \"mov %1,%0\";
+ }")
+
+ ;; Special load insns for REG+REG addresses.
+ ;; Such addresses are not "legitimate" because st rejects them.
+
+ (define_insn ""
+   [(set (match_operand:DF 0 "register_operand" "=rf")
+ 	(match_operand:DF 1 "indexed_operand" "m"))]
+   ""
+   "*
+ {
+   if (FP_REG_P (operands[0]))
+     return output_fp_move_double (operands);
+   return output_move_double (operands);
+ }")
+
+ (define_insn ""
+   [(set (match_operand:SF 0 "register_operand" "=rf")
+ 	(match_operand:SF 1 "indexed_operand" "m"))]
+   ""
+   "*
+ {
+   if (FP_REG_P (operands[0]))
+     return \"fld.l %1,%0\";
+   return \"ld.l %1,%0\";
+ }")
+
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=rf")
+ 	(match_operand:SI 1 "indexed_operand" "m"))]
+   ""
+   "*
+ {
+   if (FP_REG_P (operands[0]))
+     return \"fld.l %1,%0\";
+   return \"ld.l %1,%0\";
+ }")
+
+ (define_insn ""
+   [(set (match_operand:HI 0 "register_operand" "=r")
+ 	(match_operand:HI 1 "indexed_operand" "m"))]
+   ""
+   "ld.s %1,%0")
+
+ (define_insn ""
+   [(set (match_operand:QI 0 "register_operand" "=r")
+ 	(match_operand:QI 1 "indexed_operand" "m"))]
+   ""
+   "ld.b %1,%0")
+
+ ;; Likewise for floating-point store insns.
+
+ (define_insn ""
+   [(set (match_operand:DF 0 "indexed_operand" "=m")
+ 	(match_operand:DF 1 "register_operand" "f"))]
+   ""
+   "fst.d %1,%0")
+
+ (define_insn ""
+   [(set (match_operand:SF 0 "indexed_operand" "=m")
+ 	(match_operand:SF 1 "register_operand" "f"))]
+   ""
+   "fst.l %1,%0")
+
+ ;;- truncation instructions
+ (define_insn "truncsiqi2"
+   [(set (match_operand:QI 0 "general_operand" "=g")
+ 	(truncate:QI
+ 	 (match_operand:SI 1 "register_operand" "r")))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM)
+   {
+     if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+       {
+ 	if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 	       && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 	       && XEXP (operands[0], 0) == cc_prev_status.mdep))
+ 	  {
+ 	    CC_STATUS_INIT;
+ 	    cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	    cc_status.mdep = XEXP (operands[0], 0);
+ 	    output_asm_insn (\"orh %h0,%?r0,%?r31\", operands);
+ 	  }
+ 	return \"st.b %1,%L0(%?r31)\";
+       }
+     else
+       return \"st.b %1,%0\";
+   }
+   return \"shl %?r0,%1,%0\";
+ }")
+
+ (define_insn "trunchiqi2"
+   [(set (match_operand:QI 0 "general_operand" "=g")
+ 	(truncate:QI
+ 	 (match_operand:HI 1 "register_operand" "r")))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM)
+   {
+     if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+       {
+ 	if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 	       && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 	       && XEXP (operands[0], 0) == cc_prev_status.mdep))
+ 	  {
+ 	    CC_STATUS_INIT;
+ 	    cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	    cc_status.mdep = XEXP (operands[0], 0);
+ 	    output_asm_insn (\"orh %h0,%?r0,%?r31\", operands);
+ 	  }
+ 	return \"st.b %1,%L0(%?r31)\";
+       }
+     else
+       return \"st.b %1,%0\";
+   }
+   return \"shl %?r0,%1,%0\";
+ }")
+
+ (define_insn "truncsihi2"
+   [(set (match_operand:HI 0 "general_operand" "=g")
+ 	(truncate:HI
+ 	 (match_operand:SI 1 "register_operand" "r")))]
+   ""
+   "*
+ {
+   if (GET_CODE (operands[0]) == MEM)
+   {
+     if (CONSTANT_ADDRESS_P (XEXP (operands[0], 0)))
+       {
+ 	if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)
+ 	       && (cc_prev_status.flags & CC_HI_R31_ADJ)
+ 	       && XEXP (operands[0], 0) == cc_prev_status.mdep))
+ 	  {
+ 	    CC_STATUS_INIT;
+ 	    cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+ 	    cc_status.mdep = XEXP (operands[0], 0);
+ 	    output_asm_insn (\"orh %h0,%?r0,%?r31\", operands);
+ 	  }
+ 	return \"st.s %1,%L0(%?r31)\";
+       }
+     else
+       return \"st.s %1,%0\";
+   }
+   return \"shl %?r0,%1,%0\";
+ }")
+
+ ;;- zero extension instructions
+
+ (define_insn "zero_extendhisi2"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(zero_extend:SI
+ 	 (match_operand:HI 1 "register_operand" "r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"and 0xffff,%1,%0\";
+ }")
+
+ (define_insn "zero_extendqihi2"
+   [(set (match_operand:HI 0 "register_operand" "=r")
+ 	(zero_extend:HI
+ 	 (match_operand:QI 1 "register_operand" "r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"and 0xff,%1,%0\";
+ }")
+
+ (define_insn "zero_extendqisi2"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(zero_extend:SI
+ 	 (match_operand:QI 1 "register_operand" "r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"and 0xff,%1,%0\";
+ }")
+
+ ;; Sign extension instructions.
+
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(sign_extend:SI
+ 	 (match_operand:HI 1 "indexed_operand" "m")))]
+   ""
+   "ld.s %1,%0")
+
+ (define_insn ""
+   [(set (match_operand:HI 0 "register_operand" "=r")
+ 	(sign_extend:HI
+ 	 (match_operand:QI 1 "indexed_operand" "m")))]
+   ""
+   "ld.b %1,%0")
+
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(sign_extend:SI
+ 	 (match_operand:QI 1 "indexed_operand" "m")))]
+   ""
+   "ld.b %1,%0")
+
+ (define_insn "extendhisi2"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(sign_extend:SI
+ 	 (match_operand:HI 1 "nonimmediate_operand" "mr")))]
+   ""
+   "*
+ {
+   if (REG_P (operands[1]))
+     return \"shl 16,%1,%0\;shra 16,%0,%0\";
+   if (GET_CODE (operands[1]) == CONST_INT)
+     abort ();
+   if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+     {
+       CC_STATUS_INIT;
+       cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+       cc_status.mdep = XEXP (operands[1], 0);
+       return \"orh %h1,%?r0,%?r31\;ld.s %L1(%?r31),%0\";
+     }
+   else
+     return \"ld.s %1,%0\";
+ }")
+
+ (define_insn "extendqihi2"
+   [(set (match_operand:HI 0 "register_operand" "=r")
+ 	(sign_extend:HI
+ 	 (match_operand:QI 1 "nonimmediate_operand" "mr")))]
+   ""
+   "*
+ {
+   if (REG_P (operands[1]))
+     return \"shl 24,%1,%0\;shra 24,%0,%0\";
+   if (GET_CODE (operands[1]) == CONST_INT)
+     abort ();
+   if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+     {
+       CC_STATUS_INIT;
+       cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+       cc_status.mdep = XEXP (operands[1], 0);
+       return \"orh %h1,%?r0,%?r31\;ld.b %L1(%?r31),%0\";
+     }
+   else
+     return \"ld.b %1,%0\";
+ }")
+
+ (define_insn "extendqisi2"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(sign_extend:SI
+ 	 (match_operand:QI 1 "nonimmediate_operand" "mr")))]
+   ""
+   "*
+ {
+   if (REG_P (operands[1]))
+     return \"shl 24,%1,%0\;shra 24,%0,%0\";
+   if (GET_CODE (operands[1]) == CONST_INT)
+     abort ();
+   if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+     {
+       CC_STATUS_INIT;
+       cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+       cc_status.mdep = XEXP (operands[1], 0);
+       return \"orh %h1,%?r0,%?r31\;ld.b %L1(%?r31),%0\";
+     }
+   else
+     return \"ld.b %1,%0\";
+ }")
+
+ ;; Signed bitfield extractions come out looking like
+ ;;	(shiftrt (sign_extend (shift <Y> <C1>)) <C2>)
+ ;; which we expand poorly as four shift insns.
+ ;; These patterns yield two shifts:
+ ;;	(shiftrt (shift <Y> <C3>) <C4>)
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ashiftrt:SI
+ 	 (sign_extend:SI
+ 	  (match_operand:QI 1 "register_operand" "r"))
+ 	 (match_operand:SI 2 "logic_int" "n")))]
+   "INTVAL (operands[2]) < 8"
+   "*
+ {
+   return \"shl 24,%1,%0\;shra 24+%2,%0,%0\";
+ }")
+
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ashiftrt:SI
+ 	 (sign_extend:SI
+ 	  (subreg:QI (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ 				(match_operand:SI 2 "logic_int" "n")) 0))
+ 	 (match_operand:SI 3 "logic_int" "n")))]
+   "INTVAL (operands[3]) < 8"
+   "*
+ {
+   return \"shl 0x18+%2,%1,%0\;shra 0x18+%3,%0,%0\";
+ }")
+
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ashiftrt:SI
+ 	 (sign_extend:SI
+ 	  (ashift:QI (match_operand:QI 1 "register_operand" "r")
+ 		     (match_operand:QI 2 "logic_int" "n")))
+ 	 (match_operand:SI 3 "logic_int" "n")))]
+   "INTVAL (operands[3]) < 8"
+   "*
+ {
+   return \"shl 0x18+%2,%1,%0\;shra 0x18+%3,%0,%0\";
+ }")
+
+ ;; Special patterns for optimizing bit-field instructions.
+
+ ;; First two patterns are for bitfields that came from memory
+ ;; testing only the high bit.  They work with old combiner.
+
+ (define_insn ""
+   [(set (cc0)
+ 	(eq (zero_extend:SI (subreg:QI (lshiftrt:SI (match_operand:SI 0 "register_operand" "r")
+ 						    (const_int 7)) 0))
+ 	    (const_int 0)))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"and 128,%0,%?r0\";
+ }")
+
+ (define_insn ""
+   [(set (cc0)
+ 	(eq (sign_extend:SI (subreg:QI (ashiftrt:SI (match_operand:SI 0 "register_operand" "r")
+ 						    (const_int 7)) 0))
+ 	    (const_int 0)))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"and 128,%0,%?r0\";
+ }")
+
+ ;; next two patterns are good for bitfields coming from memory
+ ;; (via pseudo-register) or from a register, though this optimization
+ ;; is only good for values contained wholly within the bottom 13 bits
+ (define_insn ""
+   [(set (cc0)
+ 	(eq
+ 	 (and:SI (lshiftrt:SI (match_operand:SI 0 "register_operand" "r")
+ 			      (match_operand:SI 1 "logic_int" "n"))
+ 		 (match_operand:SI 2 "logic_int" "n"))
+ 	 (const_int 0)))]
+   "LOGIC_INTVAL (INTVAL (operands[2]) << INTVAL (operands[1]))"
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   operands[2] = GEN_INT (INTVAL (operands[2]) << INTVAL (operands[1]));
+   return \"and %2,%0,%?r0\";
+ }")
+
+ (define_insn ""
+   [(set (cc0)
+ 	(eq
+ 	 (and:SI (ashiftrt:SI (match_operand:SI 0 "register_operand" "r")
+ 			      (match_operand:SI 1 "logic_int" "n"))
+ 		 (match_operand:SI 2 "logic_int" "n"))
+ 	 (const_int 0)))]
+   "LOGIC_INTVAL (INTVAL (operands[2]) << INTVAL (operands[1]))"
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   operands[2] = GEN_INT (INTVAL (operands[2]) << INTVAL (operands[1]));
+   return \"and %2,%0,%?r0\";
+ }")
+
+ ;; Conversions between float and double.
+
+ (define_insn "extendsfdf2"
+   [(set (match_operand:DF 0 "register_operand" "=f")
+ 	(float_extend:DF
+ 	 (match_operand:SF 1 "register_operand" "f")))]
+   ""
+   "fmov.sd %1,%0")
+
+ (define_insn "truncdfsf2"
+   [(set (match_operand:SF 0 "register_operand" "=f")
+ 	(float_truncate:SF
+ 	 (match_operand:DF 1 "register_operand" "f")))]
+   ""
+   "fmov.ds %1,%0")
+
+ ;; Conversion between fixed point and floating point.
+ ;; Note that among the fix-to-float insns
+ ;; the ones that start with SImode come first.
+ ;; That is so that an operand that is a CONST_INT
+ ;; (and therefore lacks a specific machine mode).
+ ;; will be recognized as SImode (which is always valid)
+ ;; rather than as QImode or HImode.
+
+ ;; This pattern forces (set (reg:SF ...) (float:SF (const_int ...)))
+ ;; to be reloaded by putting the constant into memory.
+ ;; It must come before the more general floatsisf2 pattern.
+ (define_expand "floatsidf2"
+   [(set (match_dup 2) (match_dup 3))
+    (set (match_dup 4) (xor:SI (match_operand:SI 1 "register_operand" "")
+ 			      (const_int -2147483648)))
+    (set (match_dup 5) (match_dup 3))
+    (set (subreg:SI (match_dup 5) 0) (match_dup 4))
+    (set (match_operand:DF 0 "register_operand" "")
+ 	(minus:DF (match_dup 5) (match_dup 2)))]
+   ""
+   "
+ {
+   REAL_VALUE_TYPE d;
+   /* 4503601774854144 is  (1 << 30) * ((1 << 22) + (1 << 1)).  */
+   d = REAL_VALUE_ATOF (\"4503601774854144\", DFmode);
+   operands[2] = gen_reg_rtx (DFmode);
+   operands[3] = CONST_DOUBLE_FROM_REAL_VALUE (d, DFmode);
+   operands[4] = gen_reg_rtx (SImode);
+   operands[5] = gen_reg_rtx (DFmode);
+ }")
+
+ ;; Floating to fixed conversion.
+
+ (define_expand "fix_truncdfsi2"
+   ;; This first insn produces a double-word value
+   ;; in which only the low word is valid.
+   [(set (match_dup 2)
+ 	(fix:DI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
+    (set (match_operand:SI 0 "register_operand" "=f")
+ 	(subreg:SI (match_dup 2) 0))]
+   ""
+   "
+ {
+   operands[2] = gen_reg_rtx (DImode);
+ }")
+
+ ;; Recognize the first insn generated above.
+ ;; This RTL looks like a fix_truncdfdi2 insn,
+ ;; but we don't call it that, because only 32 bits
+ ;; of the result are valid.
+ ;; This pattern will work for the intended purposes
+ ;; as long as we do not have any fixdfdi2 or fix_truncdfdi2.
+ (define_insn ""
+   [(set (match_operand:DI 0 "register_operand" "=f")
+ 	(fix:DI (fix:DF (match_operand:DF 1 "register_operand" "f"))))]
+   ""
+   "ftrunc.dd %1,%0")
+
+ (define_expand "fix_truncsfsi2"
+   ;; This first insn produces a double-word value
+   ;; in which only the low word is valid.
+   [(set (match_dup 2)
+ 	(fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))
+    (set (match_operand:SI 0 "register_operand" "=f")
+ 	(subreg:SI (match_dup 2) 0))]
+   ""
+   "
+ {
+   operands[2] = gen_reg_rtx (DImode);
+ }")
+
+ ;; Recognize the first insn generated above.
+ ;; This RTL looks like a fix_truncsfdi2 insn,
+ ;; but we don't call it that, because only 32 bits
+ ;; of the result are valid.
+ ;; This pattern will work for the intended purposes
+ ;; as long as we do not have any fixsfdi2 or fix_truncsfdi2.
+ (define_insn ""
+   [(set (match_operand:DI 0 "register_operand" "=f")
+ 	(fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
+   ""
+   "ftrunc.sd %1,%0")
+
+ ;;- arithmetic instructions
+
+ (define_insn "addsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r,*f")
+ 	(plus:SI (match_operand:SI 1 "nonmemory_operand" "%r,*f")
+ 		 (match_operand:SI 2 "arith_operand" "rI,*f")))]
+   ""
+   "*
+ {
+   if (which_alternative == 1)
+     return \"fiadd.ss %2,%1,%0\";
+   CC_STATUS_PARTIAL_INIT;
+   return \"addu %2,%1,%0\";
+ }")
+
+ (define_insn "adddi3"
+   [(set (match_operand:DI 0 "register_operand" "=f")
+ 	(plus:DI (match_operand:DI 1 "register_operand" "%f")
+ 		 (match_operand:DI 2 "register_operand" "f")))]
+   ""
+   "fiadd.dd %1,%2,%0")
+
+ (define_insn "subsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r,r,*f")
+ 	(minus:SI (match_operand:SI 1 "register_operand" "r,I,*f")
+ 		  (match_operand:SI 2 "arith_operand" "rI,r,*f")))]
+   ""
+   "*
+ {
+   if (which_alternative == 2)
+     return \"fisub.ss %1,%2,%0\";
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[2]))
+     return \"subu %1,%2,%0\";
+   operands[2] = GEN_INT (- INTVAL (operands[2]));
+   return \"addu %2,%1,%0\";
+ }")
+
+ (define_insn "subdi3"
+   [(set (match_operand:DI 0 "register_operand" "=f")
+ 	(minus:DI (match_operand:DI 1 "register_operand" "f")
+ 		  (match_operand:DI 2 "register_operand" "f")))]
+   ""
+   "fisub.dd %1,%2,%0")
+
+ (define_expand "mulsi3"
+   [(set (subreg:SI (match_dup 4) 0) (match_operand:SI 1 "general_operand" ""))
+    (set (subreg:SI (match_dup 5) 0) (match_operand:SI 2 "general_operand" ""))
+    (clobber (match_dup 3))
+    (set (subreg:SI (match_dup 3) 0)
+ 	(mult:SI (subreg:SI (match_dup 4) 0) (subreg:SI (match_dup 5) 0)))
+    (set (match_operand:SI 0 "register_operand" "") (subreg:SI (match_dup 3) 0))]
+   ""
+   "
+ {
+   if (WORDS_BIG_ENDIAN)
+     emit_insn (gen_mulsi3_big (operands[0], operands[1], operands[2]));
+   else
+     emit_insn (gen_mulsi3_little (operands[0], operands[1], operands[2]));
+   DONE;
+ }")
+
+ (define_expand "mulsi3_little"
+   [(set (subreg:SI (match_dup 4) 0) (match_operand:SI 1 "general_operand" ""))
+    (set (subreg:SI (match_dup 5) 0) (match_operand:SI 2 "general_operand" ""))
+    (clobber (match_dup 3))
+    (set (subreg:SI (match_dup 3) 0)
+ 	(mult:SI (subreg:SI (match_dup 4) 0) (subreg:SI (match_dup 5) 0)))
+    (set (match_operand:SI 0 "register_operand" "") (subreg:SI (match_dup 3) 0))]
+   "! WORDS_BIG_ENDIAN"
+   "
+ {
+   operands[3] = gen_reg_rtx (DImode);
+   operands[4] = gen_reg_rtx (DImode);
+   operands[5] = gen_reg_rtx (DImode);
+ }")
+
+ (define_expand "mulsi3_big"
+   [(set (subreg:SI (match_dup 4) 4) (match_operand:SI 1 "general_operand" ""))
+    (set (subreg:SI (match_dup 5) 4) (match_operand:SI 2 "general_operand" ""))
+    (clobber (match_dup 3))
+    (set (subreg:SI (match_dup 3) 4)
+ 	(mult:SI (subreg:SI (match_dup 4) 4) (subreg:SI (match_dup 5) 4)))
+    (set (match_operand:SI 0 "register_operand" "") (subreg:SI (match_dup 3) 4))]
+   "WORDS_BIG_ENDIAN"
+   "
+ {
+   operands[3] = gen_reg_rtx (DImode);
+   operands[4] = gen_reg_rtx (DImode);
+   operands[5] = gen_reg_rtx (DImode);
+ }")
+
+ (define_insn ""
+   [(set (subreg:SI (match_operand:DI 0 "register_operand" "=f") 0)
+ 	(mult:SI (subreg:SI (match_operand:DI 1 "register_operand" "f") 0)
+ 		 (subreg:SI (match_operand:DI 2 "register_operand" "f") 0)))]
+   "! WORDS_BIG_ENDIAN"
+   "fmlow.dd %2,%1,%0")
+
+ (define_insn ""
+   [(set (subreg:SI (match_operand:DI 0 "register_operand" "=f") 4)
+ 	(mult:SI (subreg:SI (match_operand:DI 1 "register_operand" "f") 4)
+ 		 (subreg:SI (match_operand:DI 2 "register_operand" "f") 4)))]
+   "WORDS_BIG_ENDIAN"
+   "fmlow.dd %2,%1,%0")
+
+ ;;- and instructions (with compliment also)
+ (define_insn "andsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(and:SI (match_operand:SI 1 "nonmemory_operand" "%r")
+ 		(match_operand:SI 2 "nonmemory_operand" "rL")))]
+   ""
+   "*
+ {
+   rtx xop[3];
+
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[2]) || LOGIC_INT (operands[2]))
+     return \"and %2,%1,%0\";
+   if ((INTVAL (operands[2]) & 0xffff) == 0)
+     {
+       operands[2]
+ 	= GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (operands[2]) >> 16);
+       return \"andh %2,%1,%0\";
+     }
+   xop[0] = operands[0];
+   xop[1] = operands[1];
+   xop[2] = GEN_INT (~INTVAL (operands[2]) & 0xffff);
+   output_asm_insn (\"andnot %2,%1,%0\", xop);
+   operands[2] = GEN_INT (~(unsigned HOST_WIDE_INT) INTVAL (operands[2]) >> 16);
+   return \"andnoth %2,%0,%0\";
+ }")
+
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(and:SI (not:SI (match_operand:SI 1 "register_operand" "rn"))
+ 		(match_operand:SI 2 "register_operand" "r")))]
+   ""
+   "*
+ {
+   rtx xop[3];
+
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[1]) || LOGIC_INT (operands[1]))
+     return \"andnot %1,%2,%0\";
+   if ((INTVAL (operands[1]) & 0xffff) == 0)
+     {
+       operands[1]
+ 	= GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (operands[1]) >> 16);
+       return \"andnoth %1,%2,%0\";
+     }
+   xop[0] = operands[0];
+   xop[1] = GEN_INT (INTVAL (operands[1]) & 0xffff);
+   xop[2] = operands[2];
+   output_asm_insn (\"andnot %1,%2,%0\", xop);
+   operands[1] = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (operands[1]) >> 16);
+   return \"andnoth %1,%0,%0\";
+ }")
+
+ (define_insn "iorsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ior:SI (match_operand:SI 1 "nonmemory_operand" "%r")
+ 		(match_operand:SI 2 "nonmemory_operand" "rL")))]
+   ""
+   "*
+ {
+   rtx xop[3];
+
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[2]) || LOGIC_INT (operands[2]))
+     return \"or %2,%1,%0\";
+   if ((INTVAL (operands[2]) & 0xffff) == 0)
+     {
+       operands[2]
+ 	= GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (operands[2]) >> 16);
+       return \"orh %2,%1,%0\";
+     }
+   xop[0] = operands[0];
+   xop[1] = operands[1];
+   xop[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+   output_asm_insn (\"or %2,%1,%0\", xop);
+   operands[2] = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (operands[2]) >> 16);
+   return \"orh %2,%0,%0\";
+ }")
+
+ (define_insn "xorsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(xor:SI (match_operand:SI 1 "nonmemory_operand" "%r")
+ 		(match_operand:SI 2 "nonmemory_operand" "rL")))]
+   ""
+   "*
+ {
+   rtx xop[3];
+
+   CC_STATUS_PARTIAL_INIT;
+   if (REG_P (operands[2]) || LOGIC_INT (operands[2]))
+     return \"xor %2,%1,%0\";
+   if ((INTVAL (operands[2]) & 0xffff) == 0)
+     {
+       operands[2]
+ 	= GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (operands[2]) >> 16);
+       return \"xorh %2,%1,%0\";
+     }
+   xop[0] = operands[0];
+   xop[1] = operands[1];
+   xop[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+   output_asm_insn (\"xor %2,%1,%0\", xop);
+   operands[2] = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (operands[2]) >> 16);
+   return \"xorh %2,%0,%0\";
+ }")
+
+ ;(The i860 instruction set doesn't allow an immediate second operand in
+ ; a subtraction.)
+ (define_insn "negsi2"
+   [(set (match_operand:SI 0 "general_operand" "=r")
+ 	(neg:SI (match_operand:SI 1 "arith_operand" "r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"subu %?r0,%1,%0\";
+ }")
+
+ (define_insn "one_cmplsi2"
+   [(set (match_operand:SI 0 "general_operand" "=r")
+ 	(not:SI (match_operand:SI 1 "arith_operand" "r")))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   return \"subu -1,%1,%0\";
+ }")
+
+ ;; Floating point arithmetic instructions.
+
+ (define_insn "adddf3"
+   [(set (match_operand:DF 0 "register_operand" "=f")
+ 	(plus:DF (match_operand:DF 1 "register_operand" "f")
+ 		 (match_operand:DF 2 "register_operand" "f")))]
+   ""
+   "fadd.dd %1,%2,%0")
+
+ (define_insn "addsf3"
+   [(set (match_operand:SF 0 "register_operand" "=f")
+ 	(plus:SF (match_operand:SF 1 "register_operand" "f")
+ 		 (match_operand:SF 2 "register_operand" "f")))]
+   ""
+   "fadd.ss %1,%2,%0")
+
+ (define_insn "subdf3"
+   [(set (match_operand:DF 0 "register_operand" "=f")
+ 	(minus:DF (match_operand:DF 1 "register_operand" "f")
+ 		  (match_operand:DF 2 "register_operand" "f")))]
+   ""
+   "fsub.dd %1,%2,%0")
+
+ (define_insn "subsf3"
+   [(set (match_operand:SF 0 "register_operand" "=f")
+ 	(minus:SF (match_operand:SF 1 "register_operand" "f")
+ 		  (match_operand:SF 2 "register_operand" "f")))]
+   ""
+   "fsub.ss %1,%2,%0")
+
+ (define_insn "muldf3"
+   [(set (match_operand:DF 0 "register_operand" "=f")
+ 	(mult:DF (match_operand:DF 1 "register_operand" "f")
+ 		 (match_operand:DF 2 "register_operand" "f")))]
+   ""
+   "fmul.dd %1,%2,%0")
+
+ (define_insn "mulsf3"
+   [(set (match_operand:SF 0 "register_operand" "=f")
+ 	(mult:SF (match_operand:SF 1 "register_operand" "f")
+ 		 (match_operand:SF 2 "register_operand" "f")))]
+   ""
+   "fmul.ss %1,%2,%0")
+
+ (define_insn "negdf2"
+   [(set (match_operand:DF 0 "register_operand" "=f")
+ 	(neg:DF (match_operand:DF 1 "register_operand" "f")))]
+   ""
+   "fsub.dd %?f0,%1,%0")
+
+ (define_insn "negsf2"
+   [(set (match_operand:SF 0 "register_operand" "=f")
+ 	(neg:SF (match_operand:SF 1 "register_operand" "f")))]
+   ""
+   "fsub.ss %?f0,%1,%0")
+
+ (define_insn "divdf3"
+   [(set (match_operand:DF 0 "register_operand" "=&f")
+ 	(div:DF (match_operand:DF 1 "register_operand" "f")
+ 		 (match_operand:DF 2 "register_operand" "f")))
+    (clobber (match_scratch:DF 3 "=&f"))
+    (clobber (match_scratch:DF 4 "=&f"))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   if (((cc_prev_status.flags & CC_KNOW_HI_R31) == 0)
+       || (cc_prev_status.flags & CC_HI_R31_ADJ)
+       || (cc_prev_status.mdep != CONST2_RTX (SFmode)))
+     {
+       cc_status.flags |= CC_KNOW_HI_R31;
+       cc_status.flags &= ~CC_HI_R31_ADJ;
+       cc_status.mdep = CONST2_RTX (SFmode);
+       return \"frcp.dd %2,%3\;fmul.dd %2,%3,%0\;fmov.dd %?f0,%4\;\\
+ orh 0x4000,%?r0,%?r31\;ixfr %?r31,%R4\;fsub.dd %4,%0,%0\;\\
+ fmul.dd %3,%0,%3\;fmul.dd %2,%3,%0\;fsub.dd %4,%0,%0\;\\
+ fmul.dd %3,%0,%3\;fmul.dd %2,%3,%0\;fsub.dd %4,%0,%0\;\\
+ fmul.dd %3,%1,%3\;fmul.dd %0,%3,%0\";
+     }
+   else
+     return \"frcp.dd %2,%3\;fmul.dd %2,%3,%0\;fmov.dd %?f0,%4\;\\
+ ixfr %?r31,%R4\;fsub.dd %4,%0,%0\;\\
+ fmul.dd %3,%0,%3\;fmul.dd %2,%3,%0\;fsub.dd %4,%0,%0\;\\
+ fmul.dd %3,%0,%3\;fmul.dd %2,%3,%0\;fsub.dd %4,%0,%0\;\\
+ fmul.dd %3,%1,%3\;fmul.dd %0,%3,%0\";
+ }")
+
+ (define_insn "divsf3"
+   [(set (match_operand:SF 0 "register_operand" "=&f")
+ 	(div:SF (match_operand:SF 1 "register_operand" "f")
+ 		 (match_operand:SF 2 "register_operand" "f")))
+    (clobber (match_scratch:SF 3 "=&f"))
+    (clobber (match_scratch:SF 4 "=&f"))]
+   ""
+   "*
+ {
+   CC_STATUS_PARTIAL_INIT;
+   if (((cc_prev_status.flags & CC_KNOW_HI_R31) == 0)
+       || (cc_prev_status.flags & CC_HI_R31_ADJ)
+       || (cc_prev_status.mdep != CONST2_RTX (SFmode)))
+     {
+       cc_status.flags |= CC_KNOW_HI_R31;
+       cc_status.flags &= ~CC_HI_R31_ADJ;
+       cc_status.mdep = CONST2_RTX (SFmode);
+       output_asm_insn (\"orh 0x4000,%?r0,%?r31\", operands);
+     }
+   return \"ixfr %?r31,%4\;frcp.ss %2,%0\;\\
+ fmul.ss %2,%0,%3\;fsub.ss %4,%3,%3\;fmul.ss %0,%3,%0\;\\
+ fmul.ss %2,%0,%3\;fsub.ss %4,%3,%3\;\\
+ fmul.ss %1,%0,%4\;fmul.ss %3,%4,%0\";
+ }")
+
+ ;; Shift instructions
+
+ ;; Optimized special case of shifting.
+ ;; Must precede the general case.
+
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ashiftrt:SI (match_operand:SI 1 "memory_operand" "m")
+ 		     (const_int 24)))]
+   ""
+   "*
+ {
+   if (CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))
+     {
+       CC_STATUS_INIT;
+       cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;
+       cc_status.mdep = XEXP (operands[1], 0);
+       return \"orh %h1,%?r0,%?r31\;ld.b %L1(%?r31),%0\";
+     }
+   return \"ld.b %1,%0\";
+ }")
+
+
+ ;;- arithmetic shift instructions
+ (define_insn "ashlsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ashift:SI (match_operand:SI 1 "register_operand" "r")
+ 		   (match_operand:SI 2 "shift_operand" "rn")))]
+   ""
+   "*
+ {
+   return \"shl %2,%1,%0\";
+ }")
+
+ (define_insn "ashlhi3"
+   [(set (match_operand:HI 0 "register_operand" "=r")
+ 	(ashift:HI (match_operand:HI 1 "register_operand" "r")
+ 		   (match_operand:HI 2 "shift_operand" "rn")))]
+   ""
+   "*
+ {
+   return \"shl %2,%1,%0\";
+ }")
+
+ (define_insn "ashlqi3"
+   [(set (match_operand:QI 0 "register_operand" "=r")
+ 	(ashift:QI (match_operand:QI 1 "register_operand" "r")
+ 		   (match_operand:QI 2 "shift_operand" "rn")))]
+   ""
+   "*
+ {
+   return \"shl %2,%1,%0\";
+ }")
+
+ (define_insn "ashrsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ 		     (match_operand:SI 2 "shift_operand" "rn")))]
+   ""
+   "*
+ {
+   return \"shra %2,%1,%0\";
+ }")
+
+ (define_insn "lshrsi3"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ 		     (match_operand:SI 2 "shift_operand" "rn")))]
+   ""
+   "*
+ {
+   return \"shr %2,%1,%0\";
+ }")
+
+ ;; Unconditional and other jump instructions
+
+ (define_insn "jump"
+   [(set (pc) (label_ref (match_operand 0 "" "")))]
+   ""
+   "*
+ {
+   return \"br %l0\;nop\";
+ }")
+
+ ;; Here are two simple peepholes which fill the delay slot of
+ ;; an unconditional branch.
+ ;
+ ;; ??? All disabled, because output_delayed_branch is a crock
+ ;; that will reliably segfault.  This should be using the dbr
+ ;; pass in any case.  Anyone who cares is welcome to fix it.
+ ;
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "register_operand" "=rf")
+ ;	(match_operand:SI 1 "single_insn_src_p" "gfG"))
+ ;   (set (pc) (label_ref (match_operand 2 "" "")))]
+ ;  ""
+ ;  "* return output_delayed_branch (\"br %l2\", operands, insn);")
+ ;
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "memory_operand" "=m")
+ ;	(match_operand:SI 1 "reg_or_0_operand" "rfJ"))
+ ;   (set (pc) (label_ref (match_operand 2 "" "")))]
+ ;  ""
+ ;  "* return output_delayed_branch (\"br %l2\", operands, insn);")
+
+ (define_insn "tablejump"
+   [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+    (use (label_ref (match_operand 1 "" "")))]
+   ""
+   "bri %0\;nop")
+
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "memory_operand" "=m")
+ ;	(match_operand:SI 1 "reg_or_0_operand" "rfJ"))
+ ;   (set (pc) (match_operand:SI 2 "register_operand" "r"))
+ ;   (use (label_ref (match_operand 3 "" "")))]
+ ;  ""
+ ;  "* return output_delayed_branch (\"bri %2\", operands, insn);")
+
+ ;;- jump to subroutine
+ (define_expand "call"
+   [(call (match_operand:SI 0 "memory_operand" "m")
+ 	 (match_operand 1 "" "i"))]
+   ;; operand[2] is next_arg_register
+   ""
+   "
+ {
+   /* Make sure the address is just one reg and will stay that way.  */
+   if (! call_insn_operand (operands[0], QImode))
+     operands[0]
+       = replace_equiv_address (operands[0],
+ 			       copy_to_mode_reg (Pmode,
+ 						 XEXP (operands[0], 0)));
+   if (INTVAL (operands[1]) > 0)
+     {
+       emit_move_insn (arg_pointer_rtx, stack_pointer_rtx);
+       emit_insn (gen_rtx_USE (VOIDmode, arg_pointer_rtx));
+     }
+ }")
+
+ ;;- jump to subroutine
+ (define_insn ""
+   [(call (match_operand:SI 0 "call_insn_operand" "m")
+ 	 (match_operand 1 "" "i"))]
+   ;; operand[2] is next_arg_register
+   ""
+   "*
+ {
+   /* strip the MEM.  */
+   operands[0] = XEXP (operands[0], 0);
+   CC_STATUS_INIT;
+   if (GET_CODE (operands[0]) == REG)
+     return \"calli %0\;nop\";
+   return \"call %0\;nop\";
+ }")
+
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "register_operand" "=rf")
+ ;	(match_operand:SI 1 "single_insn_src_p" "gfG"))
+ ;   (call (match_operand:SI 2 "memory_operand" "m")
+ ;	 (match_operand 3 "" "i"))]
+ ;  ;;- Don't use operand 1 for most machines.
+ ;  "! reg_mentioned_p (operands[0], operands[2])"
+ ;  "*
+ ;{
+ ;  /* strip the MEM.  */
+ ;  operands[2] = XEXP (operands[2], 0);
+ ;  if (GET_CODE (operands[2]) == REG)
+ ;    return output_delayed_branch (\"calli %2\", operands, insn);
+ ;  return output_delayed_branch (\"call %2\", operands, insn);
+ ;}")
+
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "memory_operand" "=m")
+ ;	(match_operand:SI 1 "reg_or_0_operand" "rfJ"))
+ ;   (call (match_operand:SI 2 "call_insn_operand" "m")
+ ;	 (match_operand 3 "" "i"))]
+ ;  ;;- Don't use operand 1 for most machines.
+ ;  ""
+ ;  "*
+ ;{
+ ;  /* strip the MEM.  */
+ ;  operands[2] = XEXP (operands[2], 0);
+ ;  if (GET_CODE (operands[2]) == REG)
+ ;    return output_delayed_branch (\"calli %2\", operands, insn);
+ ;  return output_delayed_branch (\"call %2\", operands, insn);
+ ;}")
+
+ (define_expand "call_value"
+   [(set (match_operand 0 "register_operand" "=rf")
+ 	(call (match_operand:SI 1 "memory_operand" "m")
+ 	      (match_operand 2 "" "i")))]
+   ;; operand 3 is next_arg_register
+   ""
+   "
+ {
+   /* Make sure the address is just one reg and will stay that way.  */
+   if (! call_insn_operand (operands[1], QImode))
+     operands[1]
+       = replace_equiv_address (operands[1],
+ 			       copy_to_mode_reg (Pmode,
+ 						 XEXP (operands[1], 0)));
+   if (INTVAL (operands[2]) > 0)
+     {
+       emit_move_insn (arg_pointer_rtx, stack_pointer_rtx);
+       emit_insn (gen_rtx_USE (VOIDmode, arg_pointer_rtx));
+     }
+ }")
+
+ (define_insn ""
+   [(set (match_operand 0 "register_operand" "=rf")
+ 	(call (match_operand:SI 1 "call_insn_operand" "m")
+ 	      (match_operand 2 "" "i")))]
+   ;; operand 3 is next_arg_register
+   ""
+   "*
+ {
+   /* strip the MEM.  */
+   operands[1] = XEXP (operands[1], 0);
+   CC_STATUS_INIT;
+   if (GET_CODE (operands[1]) == REG)
+     return \"calli %1\;nop\";
+   return \"call %1\;nop\";
+ }")
+
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "register_operand" "=rf")
+ ;	(match_operand:SI 1 "single_insn_src_p" "gfG"))
+ ;   (set (match_operand 2 "" "=rf")
+ ;	(call (match_operand:SI 3 "call_insn_operand" "m")
+ ;	      (match_operand 4 "" "i")))]
+ ;  ;;- Don't use operand 4 for most machines.
+ ;  "! reg_mentioned_p (operands[0], operands[3])"
+ ;  "*
+ ;{
+ ;  /* strip the MEM.  */
+ ;  operands[3] = XEXP (operands[3], 0);
+ ;  if (GET_CODE (operands[3]) == REG)
+ ;    return output_delayed_branch (\"calli %3\", operands, insn);
+ ;  return output_delayed_branch (\"call %3\", operands, insn);
+ ;}")
+
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "memory_operand" "=m")
+ ;	(match_operand:SI 1 "reg_or_0_operand" "rJf"))
+ ;   (set (match_operand 2 "" "=rf")
+ ;	(call (match_operand:SI 3 "call_insn_operand" "m")
+ ;	      (match_operand 4 "" "i")))]
+ ;  ;;- Don't use operand 4 for most machines.
+ ;  ""
+ ;  "*
+ ;{
+ ;  /* strip the MEM.  */
+ ;  operands[3] = XEXP (operands[3], 0);
+ ;  if (GET_CODE (operands[3]) == REG)
+ ;    return output_delayed_branch (\"calli %3\", operands, insn);
+ ;  return output_delayed_branch (\"call %3\", operands, insn);
+ ;}")
+
+ ;; Call subroutine returning any type.
+
+ (define_expand "untyped_call"
+   [(parallel [(call (match_operand 0 "" "")
+ 		    (const_int 0))
+ 	      (match_operand 1 "" "")
+ 	      (match_operand 2 "" "")])]
+   ""
+   "
+ {
+   int i;
+
+   emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+
+   for (i = 0; i < XVECLEN (operands[2], 0); i++)
+     {
+       rtx set = XVECEXP (operands[2], 0, i);
+       emit_move_insn (SET_DEST (set), SET_SRC (set));
+     }
+
+   /* The optimizer does not know that the call sets the function value
+      registers we stored in the result block.  We avoid problems by
+      claiming that all hard registers are used and clobbered at this
+      point.  */
+   emit_insn (gen_blockage ());
+
+   DONE;
+ }")
+
+ ;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+ ;; all of memory.  This blocks insns from being moved across this point.
+
+ (define_insn "blockage"
+   [(unspec_volatile [(const_int 0)] 0)]
+   ""
+   "")
+
+ (define_insn "nop"
+   [(const_int 0)]
+   ""
+   "nop")
+
+ (define_insn "indirect_jump"
+   [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+   ""
+   "bri %0")
+
+ ;;
+ ;; A special insn that does the work to get setup just
+ ;; before a table jump.
+ ;;
+ (define_insn ""
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(mem:SI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ 			 (label_ref (match_operand 2 "" "")))))]
+   ""
+   "*
+ {
+   CC_STATUS_INIT;
+   return \"orh %H2,%?r0,%?r31\;or %L2,%?r31,%?r31\;ld.l %?r31(%1),%0\";
+ }")
+
+ ;(define_peephole
+ ;  [(set (match_operand:SI 0 "register_operand" "=rf")
+ ;	(match_operand:SI 1 "single_insn_src_p" "gfG"))
+ ;   (set (pc) (match_operand:SI 2 "register_operand" "r"))
+ ;   (use (label_ref (match_operand 3 "" "")))]
+ ;  "REGNO (operands[0]) != REGNO (operands[2])"
+ ;  "* return output_delayed_branch (\"bri %2\", operands, insn);")
diff -c3pN gcc/gcc/config/nil/sysv4.h gcc/gcc/config/i860/sysv4.h
*** gcc/gcc/config/nil/sysv4.h	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/sysv4.h	Sat Aug  9 00:14:36 2003
***************
*** 0 ****
--- 1,143 ----
+ /* Target definitions for GNU compiler for Intel 80860 running System V.4
+    Copyright (C) 1991, 1996, 2000, 2002 Free Software Foundation, Inc.
+    Contributed by Ron Guilmette (rfg@monkeys.com).
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING.  If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.  */
+
+ #undef TARGET_VERSION
+ #define TARGET_VERSION fprintf (stderr, " (i860 System V Release 4)");
+
+ /* Provide a set of pre-definitions and pre-assertions appropriate for
+    the i860 running svr4.  Note that the symbol `__svr4__' MUST BE
+    DEFINED!  It is needed so that the va_list struct in va-i860.h
+    will get correctly defined for the svr4 (ABI compliant) case rather
+    than for the previous (svr3, svr2, ...) case.  It also needs to be
+    defined so that the correct (svr4) version of __builtin_saveregs
+    will be selected when we are building gnulib2.c.
+    __svr4__ is our extension.  */
+
+ #define CPP_PREDEFINES \
+   "-Di860 -Dunix -DSVR4 -D__svr4__ -Asystem=unix -Asystem=svr4 -Acpu=i860 -Amachine=i860"
+
+ /* For the benefit of i860_va_arg, flag it this way too.  */
+
+ #define I860_SVR4_VA_LIST 1
+
+ /* The prefix to be used in assembler output for all names of registers.
+    This string gets prepended to all i860 register names (svr4 only).  */
+
+ #define I860_REG_PREFIX	"%"
+
+ #define ASM_COMMENT_START "#"
+
+ #undef TYPE_OPERAND_FMT
+ #define TYPE_OPERAND_FMT      "\"%s\""
+
+ /* The following macro definition overrides the one in i860.h
+    because the svr4 i860 assembler requires a different syntax
+    for getting parts of constant/relocatable values.  */
+
+ #undef PRINT_OPERAND_PART
+ #define PRINT_OPERAND_PART(FILE, X, PART_CODE)				\
+   do { fprintf (FILE, "[");						\
+ 	output_address (X);						\
+ 	fprintf (FILE, "]@%s", PART_CODE);				\
+   } while (0)
+
+ #undef ASM_FILE_START
+ #define ASM_FILE_START(FILE)						\
+   do {	output_file_directive (FILE, main_input_filename);		\
+ 	fprintf (FILE, "\t.version\t\"01.01\"\n");			\
+   } while (0)
+
+ /* Output the special word the svr4 SDB wants to see just before
+    the first word of each function's prologue code.  */
+
+ extern const char *current_function_original_name;
+
+ /* This special macro is used to output a magic word just before the
+    first word of each function.  On some versions of UNIX running on
+    the i860, this word can be any word that looks like a NOP, however
+    under svr4, this neds to be an `shr r0,r0,r0' instruction in which
+    the normally unused low-order bits contain the length of the function
+    prologue code (in bytes).  This is needed to make the svr4 SDB debugger
+    happy.  */
+
+ #undef ASM_OUTPUT_FUNCTION_PREFIX
+ #define ASM_OUTPUT_FUNCTION_PREFIX(FILE, FNNAME)			\
+   do {	ASM_OUTPUT_ALIGN (FILE, 2);					\
+   	fprintf ((FILE), "\t.long\t.ep.");				\
+ 	assemble_name (FILE, FNNAME);					\
+ 	fprintf (FILE, "-");						\
+ 	assemble_name (FILE, FNNAME);					\
+ 	fprintf (FILE, "+0xc8000000\n");				\
+ 	current_function_original_name = (FNNAME);			\
+   } while (0)
+
+ /* Output the special label that must go just after each function's
+    prologue code to support svr4 SDB.  */
+
+ #define ASM_OUTPUT_PROLOGUE_SUFFIX(FILE)				\
+   do {	fprintf (FILE, ".ep.");						\
+ 	assemble_name (FILE, current_function_original_name);		\
+ 	fprintf (FILE, ":\n");						\
+   } while (0)
+
+ /* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+    Note that we want to give these sections the SHF_WRITE attribute
+    because these sections will actually contain data (i.e. tables of
+    addresses of functions in the current root executable or shared library
+    file) and, in the case of a shared library, the relocatable addresses
+    will have to be properly resolved/relocated (and then written into) by
+    the dynamic linker when it actually attaches the given shared library
+    to the executing process.  (Note that on SVR4, you may wish to use the
+    `-z text' option to the ELF linker, when building a shared library, as
+    an additional check that you are doing everything right.  But if you do
+    use the `-z text' option when building a shared library, you will get
+    errors unless the .ctors and .dtors sections are marked as writable
+    via the SHF_WRITE attribute.)  */
+
+ #undef CTORS_SECTION_ASM_OP
+ #define CTORS_SECTION_ASM_OP	"\t.section\t.ctors,\"aw\""
+ #undef DTORS_SECTION_ASM_OP
+ #define DTORS_SECTION_ASM_OP	"\t.section\t.dtors,\"aw\""
+
+ /* Add definitions to support the .tdesc section as specified in the svr4
+    ABI for the i860.  */
+
+ #define TDESC_SECTION_ASM_OP    "\t.section\t.tdesc"
+
+ #undef EXTRA_SECTIONS
+ #define EXTRA_SECTIONS in_tdesc
+
+ #undef EXTRA_SECTION_FUNCTIONS
+ #define EXTRA_SECTION_FUNCTIONS						\
+   TDESC_SECTION_FUNCTION
+
+ #define TDESC_SECTION_FUNCTION						\
+ void									\
+ tdesc_section ()							\
+ {									\
+   if (in_section != in_tdesc)						\
+     {									\
+       fprintf (asm_out_file, "%s\n", TDESC_SECTION_ASM_OP);		\
+       in_section = in_tdesc;						\
+     }									\
+ }
+
diff -c3pN gcc/gcc/config/nil/varargs.asm gcc/gcc/config/i860/varargs.asm
*** gcc/gcc/config/nil/varargs.asm	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/varargs.asm	Thu Aug  7 20:06:46 2003
***************
*** 0 ****
--- 1,201 ----
+ /* Special varargs support for i860.
+    Copyright (C) 2001  Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file.  (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combine
+ executable.)
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING.  If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.  */
+
+ #if defined(__svr4__) || defined(__alliant__)
+ 	.text
+ 	.align	4
+
+ /* The Alliant needs the added underscore.  */
+ 	.globl	__builtin_saveregs
+ __builtin_saveregs:
+ 	.globl	___builtin_saveregs
+ ___builtin_saveregs:
+
+ 	andnot	0x0f,%sp,%sp	/* round down to 16-byte boundary */
+ 	adds	-96,%sp,%sp  /* allocate stack space for reg save
+ 			   area and also for a new va_list
+ 			   structure */
+ 	/* Save all argument registers in the arg reg save area.  The
+ 	   arg reg save area must have the following layout (according
+ 	   to the svr4 ABI):
+
+ 	struct {
+ 	  union  {
+ 	    float freg[8];
+ 	    double dreg[4];
+ 	  } float_regs;
+ 	  long	ireg[12];
+ 	};
+ 	*/
+
+ 	fst.q	%f8,  0(%sp) /* save floating regs (f8-f15)  */
+ 	fst.q	%f12,16(%sp)
+
+ 	st.l	%r16,32(%sp) /* save integer regs (r16-r27) */
+ 	st.l	%r17,36(%sp)
+ 	st.l	%r18,40(%sp)
+ 	st.l	%r19,44(%sp)
+ 	st.l	%r20,48(%sp)
+ 	st.l	%r21,52(%sp)
+ 	st.l	%r22,56(%sp)
+ 	st.l	%r23,60(%sp)
+ 	st.l	%r24,64(%sp)
+ 	st.l	%r25,68(%sp)
+ 	st.l	%r26,72(%sp)
+ 	st.l	%r27,76(%sp)
+
+ 	adds	80,%sp,%r16  /* compute the address of the new
+ 			   va_list structure.  Put in into
+ 			   r16 so that it will be returned
+ 			   to the caller.  */
+
+ 	/* Initialize all fields of the new va_list structure.  This
+ 	   structure looks like:
+
+ 	typedef struct {
+ 	    unsigned long	ireg_used;
+ 	    unsigned long	freg_used;
+ 	    long	*reg_base;
+ 	    long	*mem_ptr;
+ 	} va_list;
+ 	*/
+
+ 	st.l	%r0, 0(%r16) /* nfixed */
+ 	st.l	%r0, 4(%r16) /* nfloating */
+ 	st.l    %sp, 8(%r16) /* __va_ctl points to __va_struct.  */
+ 	bri	%r1	/* delayed return */
+ 	st.l	%r28,12(%r16) /* pointer to overflow args */
+
+ #else /* not __svr4__ */
+ #if defined(__PARAGON__)
+ 	/*
+ 	 *	we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
+ 	 *	and we stand a better chance of hooking into libraries
+ 	 *	compiled by PGI.  [andyp@ssd.intel.com]
+ 	 */
+ 	.text
+ 	.align	4
+ 	.globl	__builtin_saveregs
+ __builtin_saveregs:
+ 	.globl	___builtin_saveregs
+ ___builtin_saveregs:
+
+ 	andnot	0x0f,sp,sp	/* round down to 16-byte boundary */
+ 	adds	-96,sp,sp	/* allocate stack space for reg save
+ 			   area and also for a new va_list
+ 			   structure */
+ 	/* Save all argument registers in the arg reg save area.  The
+ 	   arg reg save area must have the following layout (according
+ 	   to the svr4 ABI):
+
+ 	struct {
+ 	  union  {
+ 	    float freg[8];
+ 	    double dreg[4];
+ 	  } float_regs;
+ 	  long	ireg[12];
+ 	};
+ 	*/
+
+ 	fst.q	f8,  0(sp)
+ 	fst.q	f12,16(sp)
+ 	st.l	r16,32(sp)
+ 	st.l	r17,36(sp)
+ 	st.l	r18,40(sp)
+ 	st.l	r19,44(sp)
+ 	st.l	r20,48(sp)
+ 	st.l	r21,52(sp)
+ 	st.l	r22,56(sp)
+ 	st.l	r23,60(sp)
+ 	st.l	r24,64(sp)
+ 	st.l	r25,68(sp)
+ 	st.l	r26,72(sp)
+ 	st.l	r27,76(sp)
+
+ 	adds	80,sp,r16  /* compute the address of the new
+ 			   va_list structure.  Put in into
+ 			   r16 so that it will be returned
+ 			   to the caller.  */
+
+ 	/* Initialize all fields of the new va_list structure.  This
+ 	   structure looks like:
+
+ 	typedef struct {
+ 	    unsigned long	ireg_used;
+ 	    unsigned long	freg_used;
+ 	    long	*reg_base;
+ 	    long	*mem_ptr;
+ 	} va_list;
+ 	*/
+
+ 	st.l	r0, 0(r16) /* nfixed */
+ 	st.l	r0, 4(r16) /* nfloating */
+ 	st.l    sp, 8(r16) /* __va_ctl points to __va_struct.  */
+ 	bri	r1	/* delayed return */
+ 	st.l	r28,12(r16) /* pointer to overflow args */
+ #else /* not __PARAGON__ */
+ 	.text
+ 	.align	4
+
+ 	.globl	___builtin_saveregs
+ ___builtin_saveregs:
+ 	mov	sp,r30
+ 	andnot	0x0f,sp,sp
+ 	adds	-96,sp,sp  /* allocate sufficient space on the stack */
+
+ /* Fill in the __va_struct.  */
+ 	st.l	r16, 0(sp) /* save integer regs (r16-r27) */
+ 	st.l	r17, 4(sp) /* int	fixed[12] */
+ 	st.l	r18, 8(sp)
+ 	st.l	r19,12(sp)
+ 	st.l	r20,16(sp)
+ 	st.l	r21,20(sp)
+ 	st.l	r22,24(sp)
+ 	st.l	r23,28(sp)
+ 	st.l	r24,32(sp)
+ 	st.l	r25,36(sp)
+ 	st.l	r26,40(sp)
+ 	st.l	r27,44(sp)
+
+ 	fst.q	f8, 48(sp) /* save floating regs (f8-f15) */
+ 	fst.q	f12,64(sp) /* int floating[8] */
+
+ /* Fill in the __va_ctl.  */
+ 	st.l    sp, 80(sp) /* __va_ctl points to __va_struct.  */
+ 	st.l	r28,84(sp) /* pointer to more args */
+ 	st.l	r0, 88(sp) /* nfixed */
+ 	st.l	r0, 92(sp) /* nfloating */
+
+ 	adds	80,sp,r16  /* return address of the __va_ctl.  */
+ 	bri	r1
+ 	mov	r30,sp
+ 		/* recover stack and pass address to start
+ 		   of data.  */
+ #endif /* not __PARAGON__ */
+ #endif /* not __svr4__ */
diff -c3pN gcc/gcc/config/nil/x-sysv4 gcc/gcc/config/i860/x-sysv4
*** gcc/gcc/config/nil/x-sysv4	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/x-sysv4	Thu Aug  7 20:07:33 2003
***************
*** 0 ****
--- 1,44 ----
+ # The svr4 reference port for the i860 contains an alloca.o routine
+ # in /usr/ucblib/libucb.a, but we can't just try to get that by
+ # setting CLIB to /usr/ucblib/libucb.a because (unfortunately)
+ # there are a lot of other routines in libucb.a which are supposed
+ # to be the Berkeley versions of library routines normally found in
+ # libc.a and many of these Berkeley versions are badly broken.  Thus,
+ # if we try to link programs with libucb.a before libc.a, those
+ # programs tend to crash.
+
+ # Also, the alloca() routine supplied in early version of svr4 for
+ # the i860 is non-ABI compliant.  It doesn't keep the stack aligned
+ # to a 16-byte boundary as the ABI requires.
+
+ # More importantly however, even a fully ABI compliant alloca() routine
+ # would fail to work correctly with some versions of the native svr4 C
+ # compiler currently being distributed for the i860 (as of 1/29/92).
+ # The problem is that the native C compiler generates non-ABI-compliant
+ # function epilogues which cut back the stack (upon function exit) in
+ # an incorrect manner.  Specifically, they cut back the stack by adding
+ # the nominal *static* frame size (determined statically at compile-time)
+ # to the stack pointer rather than setting the stack pointer based upon
+ # the current value of the frame pointer (as called for in the i860 ABI).
+ # This can cause serious trouble in cases where you repeatedly call a
+ # routine which itself calls alloca().  In such cases, the stack will
+ # grow continuously until you finally run out of swap space or exceed
+ # the system's process size limit.  To avoid this problem (which can
+ # arise when a stage1 gcc is being used to build a stage2 gcc) you
+ # *must* link in the C language version of alloca() which is supplied
+ # with gcc to your stage1 version of gcc.  The following definition
+ # forces that to happen.
+
+ ALLOCA=alloca.o
+
+ # We build all stages *without* shared libraries because that may make
+ # debugging the compiler easier (until there is a GDB which supports
+ # both Dwarf *and* svr4 shared libraries).
+
+ # Note that the native C compiler for the svr4 reference port on the
+ # i860 recognizes a special -gg option.  Using that option causes *full*
+ # Dwarf debugging information to be generated, whereas using only -g
+ # causes only limited Dwarf debugging information to be generated.
+ # (This is an undocumented feature of the native svr4 C compiler.)
+
+ CCLIBFLAGS=-Bstatic -dn -gg
diff -c3pN gcc/gcc/config/nil/xm-i860.h gcc/gcc/config/i860/xm-i860.h
*** gcc/gcc/config/nil/xm-i860.h	Wed Dec 31 19:00:00 1969
--- gcc/gcc/config/i860/xm-i860.h	Thu Aug  7 20:08:08 2003
***************
*** 0 ****
--- 1,39 ----
+ /* Configuration for GNU C-compiler for Intel i860.
+    Copyright (C) 1988, 1993 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING.  If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA.  */
+
+ #ifndef i860
+ #define i860
+ #endif
+
+ /* This describes the machine the compiler is hosted on.  */
+ #define HOST_BITS_PER_CHAR 8
+ #define HOST_BITS_PER_SHORT 16
+ #define HOST_BITS_PER_INT 32
+ #define HOST_BITS_PER_LONG 32
+ #define HOST_BITS_PER_LONGLONG 64
+
+ /* Arguments to use with `exit'.  */
+ #define SUCCESS_EXIT_CODE 0
+ #define FATAL_EXIT_CODE 33
+
+ /* target machine dependencies.
+    tm.h is a symbolic link to the actual target specific file.   */
+
+ #include "tm.h"



More information about the Gcc-patches mailing list