--- /dev/null
+;;- Machine description for AMD Am29000 for GNU C compiler
+;; Copyright (C) 1991 Free Software Foundation, Inc.
+;; Contributed by Richard Kenner (kenner@nyu.edu)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; The insns in this file are presented in the same order as the AMD 29000
+;; User's Manual (i.e., alphabetical by machine op-code).
+;;
+;; DEFINE_EXPAND's are located near the first occurrance of the major insn
+;; that they generate.
+\f
+;; The only attribute we have is the type. We only care about calls, branches,
+;; loads, stores, floating-point operations, and multi-word insns.
+;; Everything else is miscellaneous.
+
+(define_attr "type"
+ "call,branch,load,store,fadd,fmul,fam,fdiv,dmul,dam,ddiv,multi,misc"
+ (const_string "misc"))
+
+;; ASM insns cannot go into a delay slot, so call them "multi".
+(define_asm_attributes [(set_attr "type" "multi")])
+
+(define_attr "in_delay_slot" "yes,no"
+ (if_then_else (eq_attr "type" "call,branch,multi") (const_string "no")
+ (const_string "yes")))
+
+;; Branch and call insns require a single delay slot. Annulling is not
+;; supported.
+(define_delay (eq_attr "type" "call,branch")
+ [(eq_attr "in_delay_slot" "yes") (nil) (nil)])
+
+;; Define the function unit usages. We first define memory as a unit.
+(define_function_unit "memory" 1 2 (eq_attr "type" "load") 6 11)
+(define_function_unit "memory" 1 2 (eq_attr "type" "store") 1 0)
+
+;; Now define the function units for the floating-point support. Most
+;; units are pipelined and can accept an input every cycle.
+;;
+;; Note that we have an inaccuracy here. If a fmac insn is issued, followed
+;; 2 cycles later by a fadd, there will be a conflict for the floating
+;; adder that we can't represent. Also, all insns will conflict for the
+;; floating-point rounder. It isn't clear how to represent this.
+
+(define_function_unit "multiplier" 1 0 (eq_attr "type" "fmul") 3 0)
+(define_function_unit "multiplier" 1 0 (eq_attr "type" "dmul") 6 8)
+(define_function_unit "multiplier" 1 0 (eq_attr "type" "fam") 6 8)
+(define_function_unit "multiplier" 1 0 (eq_attr "type" "dam") 9 8)
+
+(define_function_unit "adder" 1 0 (eq_attr "type" "fadd,fam,dam") 3 0)
+
+(define_function_unit "divider" 1 1 (eq_attr "type" "fdiv") 11 20)
+(define_function_unit "divider" 1 1 (eq_attr "type" "ddiv") 18 34)
+\f
+;; ADD
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "gen_reg_operand" "%r,r")
+ (match_operand:SI 2 "add_operand" "rI,N")))]
+ ""
+ "@
+ add %0,%1,%2
+ sub %0,%1,%n2")
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "gen_reg_operand" "=r")
+ (plus:DI (match_operand:DI 1 "gen_reg_operand" "%r")
+ (match_operand:DI 2 "gen_reg_operand" "r")))]
+ ""
+ "add %L0,%L1,%L2\;addc %0,%1,%2"
+ [(set_attr "type" "multi")])
+
+;; AND/ANDN
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "gen_reg_operand" "%r,r")
+ (match_operand:SI 2 "and_operand" "rI,K")))]
+ ""
+ "@
+ and %0,%1,%2
+ andn %0,%1,%C2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 1 "srcb_operand" "rI"))
+ (match_operand:SI 2 "gen_reg_operand" "r")))]
+ ""
+ "andn %0,%2,%1")
+
+\f
+;; CALLI
+;;
+;; Start with a subroutine to write out CLOBBERs starting at lr2 up to,
+;; but not including, the next parameter register. If operand[0] is null,
+;; it means that all the argument registers have been used.
+(define_expand "clobbers_to"
+ [(clobber (match_operand:SI 0 "" ""))]
+ ""
+ "
+{
+ int i;
+ int high_regno;
+
+ if (operands[0] == 0)
+ high_regno = R_LR (18);
+ else if (GET_CODE (operands[0]) != REG || REGNO (operands[0]) < R_LR (0)
+ || REGNO (operands[0]) > R_LR (18))
+ abort ();
+ else
+ high_regno = REGNO (operands[0]);
+
+ for (i = R_LR (2); i < high_regno; i++)
+ emit_insn (gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, i)));
+
+ DONE;
+}")
+
+(define_expand "call"
+ [(parallel [(call (match_operand:SI 0 "" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 32))])
+ (match_operand 2 "" "")]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) != MEM)
+ abort ();
+
+ if (! TARGET_SMALL_MEMORY
+ && GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF)
+ operands[0] = gen_rtx (MEM, GET_MODE (operands[0]),
+ force_reg (Pmode, XEXP (operands[0], 0)));
+
+ operands[2] = gen_clobbers_to (operands[2]);
+}")
+
+(define_insn ""
+ [(call (match_operand:SI 0 "memory_operand" "m")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 32))]
+ ""
+ "calli lr0,%0%#"
+ [(set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 32))])
+ (match_operand 3 "" "")]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) != MEM)
+ abort ();
+
+ if (! TARGET_SMALL_MEMORY
+ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF)
+ operands[1] = gen_rtx (MEM, GET_MODE (operands[1]),
+ force_reg (Pmode, XEXP (operands[1], 0)));
+
+ operands[3] = gen_clobbers_to (operands[3]);
+}")
+
+(define_insn ""
+ [(set (match_operand 0 "gen_reg_operand" "=r")
+ (call (match_operand:SI 1 "memory_operand" "m")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 32))]
+ ""
+ "calli lr0,%1%#"
+ [(set_attr "type" "call")])
+
+(define_insn ""
+ [(call (mem:SI (match_operand:SI 0 "immediate_operand" "i"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 32))]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && (TARGET_SMALL_MEMORY
+ || ! strcmp (XSTR (operands[0], 0), current_function_name))"
+ "call lr0,%F0"
+ [(set_attr "type" "call")])
+
+(define_insn ""
+ [(set (match_operand 0 "gen_reg_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "immediate_operand" "i"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 32))]
+ "GET_CODE (operands[1]) == SYMBOL_REF
+ && (TARGET_SMALL_MEMORY
+ || ! strcmp (XSTR (operands[1], 0), current_function_name))"
+ "call lr0,%F1"
+ [(set_attr "type" "call")])
+
+(define_expand "probe"
+ [(call (mem:SI (symbol_ref:SI "_msp_check"))
+ (const_int 1))]
+ "TARGET_STACK_CHECK"
+ "")
+
+;; This is used for internal routine calls via TPC. Currently used only
+;; in probe, above.
+(define_insn ""
+ [(call (mem:SI (match_operand:SI 0 "immediate_operand" "s"))
+ (const_int 1))]
+ ""
+ "call %*,%0"
+ [(set_attr "type" "call")])
+\f
+;; CONST, CONSTH, CONSTN
+;;
+;; Many of these are generated from move insns.
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (and:SI (match_operand:SI 1 "immediate_operand" "i")
+ (const_int 65535)))]
+ ""
+ "const %0,%1")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gen_reg_operand" "+r")
+ (const_int 16)
+ (match_operand:SI 1 "const_0_operand" ""))
+ (ashiftrt:SI (match_operand:SI 2 "immediate_operand" "i")
+ (const_int 16)))]
+ ""
+ "consth %0,%2")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gen_reg_operand" "+r")
+ (const_int 16)
+ (match_operand:SI 1 "const_0_operand" ""))
+ (match_operand:SI 2 "cint_16_operand" "J"))]
+ ""
+ "*
+{ operands[2] = gen_rtx (CONST_INT, VOIDmode, INTVAL (operands[2]) << 16);
+ return \"consth %0,%2\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 1 "gen_reg_operand" "0")
+ (const_int 65535))
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "(INTVAL (operands[1]) & 0xffff) == 0"
+ "consth %0,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 1 "gen_reg_operand" "0")
+ (const_int 65535))
+ (and:SI (match_operand:SI 2 "immediate_operand" "i")
+ (const_int -65536))))]
+ ""
+ "consth %0,%2")
+
+\f
+;; CONVERT
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (fix:SI (match_operand:SF 1 "register_operand" "r")))]
+ ""
+ "convert %0,%1,0,3,0,1")
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "register_operand" "r")))]
+ ""
+ "convert %0,%1,0,3,0,2")
+
+(define_insn "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "r")))]
+ ""
+ "convert %0,%1,1,3,0,1")
+
+(define_insn "fixuns_truncdfsi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (unsigned_fix:SI (match_operand:DF 1 "register_operand" "r")))]
+ ""
+ "convert %0,%1,1,3,0,2")
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "r")))]
+ ""
+ "convert %0,%1,0,4,1,2")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "r")))]
+ ""
+ "convert %0,%1,0,4,2,1")
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (float:SF (match_operand:SI 1 "gen_reg_operand" "r")))]
+ ""
+ "convert %0,%1,0,4,1,0")
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (float:DF (match_operand:SI 1 "gen_reg_operand" "r")))]
+ ""
+ "convert %0,%1,0,4,2,0")
+
+(define_insn "floatunssisf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (unsigned_float:SF (match_operand:SI 1 "gen_reg_operand" "r")))]
+ ""
+ "convert %0,%1,1,4,1,0")
+
+(define_insn "floatunssidf2"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (unsigned_float:DF (match_operand:SI 1 "gen_reg_operand" "r")))]
+ ""
+ "convert %0,%1,1,4,2,0")
+\f
+;; CPxxx, DEQ, DGT, DGE, FEQ, FGT, FGE
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (match_operator 3 "comparison_operator"
+ [(match_operand:SI 1 "gen_reg_operand" "r")
+ (match_operand:SI 2 "srcb_operand" "rI")]))]
+ ""
+ "cp%J3 %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (match_operator 3 "fp_comparison_operator"
+ [(match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")]))]
+ ""
+ "f%J3 %0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (match_operator 3 "fp_comparison_operator"
+ [(match_operand:DF 1 "register_operand" "r")
+ (match_operand:DF 2 "register_operand" "r")]))]
+ ""
+ "d%J3 %0,%1,%2"
+ [(set_attr "type" "fadd")])
+\f
+;; DADD
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (plus:DF (match_operand:DF 1 "register_operand" "")
+ (match_operand:DF 2 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (plus:DF (match_operand:DF 1 "register_operand" "%r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ "! TARGET_29050 "
+ "dadd %0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,a")
+ (plus:DF (match_operand:DF 1 "register_operand" "%r,r")
+ (match_operand:DF 2 "register_operand" "r,0")))]
+ "TARGET_29050"
+ "@
+ dadd %0,%1,%2
+ dmac 8,%0,%1,0"
+ [(set_attr "type" "fadd,dam")])
+
+;; DDIV
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (div:DF (match_operand:DF 1 "register_operand" "=r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ ""
+ "ddiv %0,%1,%2"
+ [(set_attr "type" "ddiv")])
+\f
+;; DIVIDE
+;;
+;; We must set Q to the sign extension of the dividend first. For MOD, we
+;; must get the remainder from Q.
+;;
+;; For divmod: operand 1 is divided by operand 2; quotient goes to operand
+;; 0 and remainder to operand 3.
+(define_expand "divmodsi4"
+ [(set (match_dup 4)
+ (ashiftrt:SI (match_operand:SI 1 "gen_reg_operand" "")
+ (const_int 31)))
+ (set (reg:SI 180)
+ (match_dup 4))
+ (parallel [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (div:SI (match_dup 1)
+ (match_operand:SI 2 "gen_reg_operand" "")))
+ (set (reg:SI 180)
+ (mod:SI (match_dup 1)
+ (match_dup 2)))
+ (use (reg:SI 180))])
+ (set (match_operand:SI 3 "gen_reg_operand" "")
+ (reg:SI 180))]
+ ""
+ "
+{
+ operands[4] = gen_reg_rtx (SImode);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (match_operand:SI 2 "gen_reg_operand" "r")))
+ (set (reg:SI 180)
+ (mod:SI (match_dup 1)
+ (match_dup 2)))
+ (use (reg:SI 180))]
+ ""
+ "divide %0,%1,%2")
+\f
+;; DIVIDU
+;;
+;; Similar to DIVIDE.
+(define_expand "udivmodsi4"
+ [(set (reg:SI 180)
+ (const_int 0))
+ (parallel [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (udiv:SI (match_operand:SI 1 "gen_reg_operand" "")
+ (match_operand:SI 2 "gen_reg_operand" "")))
+ (set (reg:SI 180)
+ (umod:SI (match_dup 1)
+ (match_dup 2)))
+ (use (reg:SI 180))])
+ (set (match_operand:SI 3 "gen_reg_operand" "")
+ (reg:SI 180))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (match_operand:SI 2 "gen_reg_operand" "r")))
+ (set (reg:SI 180)
+ (umod:SI (match_dup 1)
+ (match_dup 2)))
+ (use (reg:SI 180))]
+ ""
+ "dividu %0,%1,%2")
+\f
+;; DMAC/DMSM
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=a,*r")
+ (plus:DF (mult:DF (match_operand:DF 1 "register_operand" "%r,A")
+ (match_operand:DF 2 "register_operand" "r,r"))
+ (match_operand:DF 3 "register_operand" "0,*r")))]
+ "TARGET_29050"
+ "@
+ dmac 0,%0,%1,%2
+ dmsm %0,%2,%3"
+ [(set_attr "type" "dam")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=a")
+ (plus:DF (mult:DF (neg:DF (match_operand:DF 1 "register_operand" "r"))
+ (match_operand:DF 2 "register_operand" "r"))
+ (match_operand:DF 3 "register_operand" "0")))]
+ "TARGET_29050"
+ "dmac 1,%0,%2,%1"
+ [(set_attr "type" "dam")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=a")
+ (minus:DF (mult:DF (match_operand:DF 1 "register_operand" "%r")
+ (match_operand:DF 2 "register_operand" "r"))
+ (match_operand:DF 3 "register_operand" "0")))]
+ "TARGET_29050"
+ "dmac 2,%0,%1,%2"
+ [(set_attr "type" "dam")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=a")
+ (minus:DF (mult:DF (match_operand:DF 1 "register_operand" "r")
+ (neg:DF (match_operand:DF 2 "register_operand" "r")))
+ (match_operand:DF 3 "register_operand" "0")))]
+ "TARGET_29050"
+ "dmac 3,%0,%1,%2"
+ [(set_attr "type" "dam")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=a")
+ (mult:DF (neg:DF (match_operand:DF 1 "register_operand" "r"))
+ (match_operand:DF 2 "register_operand" "r")))]
+ "TARGET_29050"
+ "dmac 5,%0,%2,%1"
+ [(set_attr "type" "dam")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=a")
+ (minus:DF (neg:DF (match_operand:DF 1 "register_operand" "r"))
+ (match_operand:DF 2 "register_operand" "0")))]
+ "TARGET_29050"
+ "dmac 11,%0,%1,0"
+ [(set_attr "type" "dam")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=a")
+ (neg:DF (plus:DF (match_operand:DF 1 "register_operand" "%r")
+ (match_operand:DF 2 "register_operand" "0"))))]
+ "TARGET_29050"
+ "dmac 11,%0,%1,0"
+ [(set_attr "type" "dam")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,r,a")
+ (neg:DF (match_operand:DF 1 "register_operand" "0,r,r")))
+ (clobber (match_scratch:SI 2 "=&r,&r,X"))]
+ "TARGET_29050"
+ "@
+ cpeq %2,gr1,gr1\;xor %0,%1,%2
+ cpeq %2,gr1,gr1\;xor %0,%1,%2\;sll %L0,%L1,0
+ dmac 13,%0,%1,0"
+ [(set_attr "type" "multi,multi,dam")])
+
+;; DMUL
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (mult:DF (match_operand:DF 1 "register_operand" "")
+ (match_operand:DF 2 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (mult:DF (match_operand:DF 1 "register_operand" "%r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ "! TARGET_29050"
+ "dmul %0,%1,%2"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,a")
+ (mult:DF (match_operand:DF 1 "register_operand" "%r,r")
+ (match_operand:DF 2 "register_operand" "r,r")))]
+ "TARGET_29050"
+ "@
+ dmul %0,%1,%2
+ dmac 4,%0,%1,%2"
+ [(set_attr "type" "dmul,dam")])
+
+;; DSUB
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (minus:DF (match_operand:DF 1 "register_operand" "r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (minus:DF (match_operand:DF 1 "register_operand" "r")
+ (match_operand:DF 2 "register_operand" "r")))]
+ "! TARGET_29050"
+ "dsub %0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r,a,a")
+ (minus:DF (match_operand:DF 1 "register_operand" "r,0,r")
+ (match_operand:DF 2 "register_operand" "r,r,0")))]
+ "TARGET_29050"
+ "@
+ dsub %0,%1,%2
+ dmac 9,%0,%2,0
+ dmac 10,%0,%1,0"
+ [(set_attr "type" "fadd,dam,dam")])
+\f
+;; EXBYTE
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 1 "srcb_operand" "rI")
+ (const_int -256))
+ (zero_extract:SI (match_operand:SI 2 "gen_reg_operand" "r")
+ (const_int 8)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3)))))]
+ ""
+ "exbyte %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int 8)
+ (ashift:SI (match_operand:SI 2 "register_operand" "b")
+ (const_int 3))))]
+ ""
+ "exbyte %0,%1,0")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gen_reg_operand" "+r")
+ (const_int 8)
+ (match_operand:SI 1 "const_24_operand" ""))
+ (zero_extract:SI (match_operand:SI 2 "gen_reg_operand" "r")
+ (const_int 8)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3))))]
+ ""
+ "exbyte %0,%2,%0")
+
+(define_expand "extzv"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "gen_reg_operand" "")
+ (match_operand:SI 2 "general_operand" "")
+ (match_operand:SI 3 "general_operand" "")))]
+ ""
+ "
+{
+ int size, pos;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT)
+ FAIL;
+
+ size = INTVAL (operands[2]);
+ pos = INTVAL (operands[3]);
+ if ((size != 8 && size != 16) || pos % size != 0)
+ FAIL;
+
+ operands[3] = gen_rtx (ASHIFT, SImode,
+ force_reg (SImode,
+ gen_rtx (CONST_INT, VOIDmode, pos / 8)),
+ gen_rtx (CONST_INT, VOIDmode, 3));
+}")
+
+(define_expand "extv"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "gen_reg_operand" "")
+ (match_operand:SI 2 "general_operand" "")
+ (match_operand:SI 3 "general_operand" "")))]
+ ""
+ "
+{
+ int pos;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT)
+ FAIL;
+
+ pos = INTVAL (operands[3]);
+ if (INTVAL (operands[2]) != 16 || pos % 16 != 0)
+ FAIL;
+
+ operands[3] = gen_rtx (ASHIFT, SImode,
+ force_reg (SImode,
+ gen_rtx (CONST_INT, VOIDmode, pos / 8)),
+ gen_rtx (CONST_INT, VOIDmode, 3));
+}")
+
+;; EXHW
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 1 "srcb_operand" "rI")
+ (const_int -65536))
+ (zero_extract:SI (match_operand:SI 2 "gen_reg_operand" "r")
+ (const_int 16)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3)))))]
+ ""
+ "exhw %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int 16)
+ (ashift:SI (match_operand:SI 2 "register_operand" "b")
+ (const_int 3))))]
+ ""
+ "exhw %0,%1,0")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gen_reg_operand" "+r")
+ (const_int 16)
+ (match_operand:SI 1 "const_16_operand" ""))
+ (zero_extract:SI (match_operand:SI 2 "gen_reg_operand" "r")
+ (const_int 16)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3))))]
+ ""
+ "exhw %0,%2,%0")
+
+;; EXHWS
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int 16)
+ (ashift:SI (match_operand:SI 2 "register_operand" "b")
+ (const_int 3))))]
+ ""
+ "exhws %0,%1")
+\f
+;; EXTRACT
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (reg:QI 178)))]
+ ""
+ "extract %0,%1,%1")
+
+(define_expand "rotlsi3"
+ [(set (reg:QI 178)
+ (match_operand: SI 2 "gen_reg_or_immediate_operand" ""))
+ (set (match_operand:SI 0 "gen_reg_operand" "")
+ (rotate:SI (match_operand:SI 1 "gen_reg_operand" "")
+ (reg:QI 178)))]
+ ""
+ "
+{ operands[2] = gen_lowpart (QImode, operands[2]); }")
+\f
+;; FADD
+(define_expand "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (plus:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (plus:SF (match_operand:SF 1 "register_operand" "%r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "! TARGET_29050"
+ "fadd %0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r,a")
+ (plus:SF (match_operand:SF 1 "register_operand" "%r,r")
+ (match_operand:SF 2 "register_operand" "r,0")))]
+ "TARGET_29050"
+ "@
+ fadd %0,%1,%2
+ fmac 8,%0,%1,0"
+ [(set_attr "type" "fadd,fam")])
+
+;; FDIV
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (div:DF (match_operand:SF 1 "register_operand" "=r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ ""
+ "fdiv %0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+;; FDMUL
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "%r"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "r"))))]
+ ""
+ "fdmul %0,%1,%2")
+
+;; FMAC/FMSM
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=a,*r")
+ (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "%r,A")
+ (match_operand:SF 2 "register_operand" "r,r"))
+ (match_operand:SF 3 "register_operand" "0,*r")))]
+ "TARGET_29050"
+ "@
+ fmac 0,%0,%1,%2
+ fmsm %0,%2,%3"
+ [(set_attr "type" "fam")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=a")
+ (plus:SF (mult:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
+ (match_operand:SF 2 "register_operand" "r"))
+ (match_operand:SF 3 "register_operand" "0")))]
+ "TARGET_29050"
+ "fmac 1,%0,%2,%1"
+ [(set_attr "type" "fam")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=a")
+ (minus:SF (mult:SF (match_operand:SF 1 "register_operand" "%r")
+ (match_operand:SF 2 "register_operand" "r"))
+ (match_operand:SF 3 "register_operand" "0")))]
+ "TARGET_29050"
+ "fmac 2,%0,%1,%2"
+ [(set_attr "type" "fam")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=a")
+ (minus:SF (mult:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
+ (match_operand:SF 2 "register_operand" "r"))
+ (match_operand:SF 3 "register_operand" "0")))]
+ "TARGET_29050"
+ "fmac 3,%0,%2,%1"
+ [(set_attr "type" "fam")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=a")
+ (mult:SF (neg:SF (match_operand:SF 1 "register_operand" "r"))
+ (match_operand:SF 2 "register_operand" "r")))]
+ "TARGET_29050"
+ "fmac 5,%0,%2,%1"
+ [(set_attr "type" "fam")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=a")
+ (minus:SF (neg:SF (match_operand:SF 1 "register_operand" "%r"))
+ (match_operand:SF 2 "register_operand" "0")))]
+ "TARGET_29050"
+ "fmac 11,%0,%1,0"
+ [(set_attr "type" "fam")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=a")
+ (neg:SF (plus:SF (match_operand:SF 1 "register_operand" "%r")
+ (match_operand:SF 2 "register_operand" "0"))))]
+ "TARGET_29050"
+ "fmac 11,%0,%1,0"
+ [(set_attr "type" "fam")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r,a")
+ (neg:SF (match_operand:SF 1 "register_operand" "r,r")))
+ (clobber (match_scratch:SI 2 "=&r,X"))]
+ "TARGET_29050"
+ "@
+ cpeq %2,gr1,gr1\;xor %0,%1,%2
+ fmac 13,%0,%1,0"
+ [(set_attr "type" "multi,fam")])
+
+;; FMUL
+(define_expand "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (mult:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (mult:SF (match_operand:SF 1 "register_operand" "%r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "! TARGET_29050"
+ "fmul %0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r,a")
+ (mult:SF (match_operand:SF 1 "register_operand" "%r,r")
+ (match_operand:SF 2 "register_operand" "r,r")))]
+ "TARGET_29050"
+ "@
+ fmul %0,%1,%2
+ fmac 4,%0,%1,%2"
+ [(set_attr "type" "fmul,fam")])
+
+;; FSUB
+(define_expand "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (minus:SF (match_operand:SF 1 "register_operand" "")
+ (match_operand:SF 2 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (minus:SF (match_operand:SF 1 "register_operand" "r")
+ (match_operand:SF 2 "register_operand" "r")))]
+ "! TARGET_29050"
+ "fsub %0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=r,a,a")
+ (minus:SF (match_operand:SF 1 "register_operand" "r,0,r")
+ (match_operand:SF 2 "register_operand" "r,r,0")))]
+ "TARGET_29050"
+ "@
+ fsub %0,%1,%2
+ fmac 9,%0,%2,0
+ fmac 10,%0,%1,0"
+ [(set_attr "type" "fadd,fam,fam")])
+\f
+;; INBYTE
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gen_reg_operand" "+r")
+ (const_int 8)
+ (ashift:SI (match_operand:SI 2 "register_operand" "b")
+ (const_int 3)))
+ (match_operand:SI 1 "srcb_operand" "rI"))]
+ ""
+ "inbyte %0,%0,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (ashift:SI (const_int 255)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3))))
+ (match_operand:SI 1 "gen_reg_operand" "r"))
+ (ashift:SI (and:SI (match_operand:SI 2 "srcb_operand" "rI")
+ (const_int 255))
+ (match_operand:SI 4 "const_24_operand" ""))))]
+ ""
+ "inbyte %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (ashift:SI (const_int 255)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3))))
+ (match_operand:SI 1 "gen_reg_operand" "r"))
+ (ashift:SI (match_operand:SI 2 "srcb_operand" "rI")
+ (match_operand:SI 4 "const_24_operand" ""))))]
+ ""
+ "inbyte %0,%1,%2")
+
+;; INHW
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gen_reg_operand" "+r")
+ (const_int 16)
+ (ashift:SI (match_operand:SI 2 "register_operand" "b")
+ (const_int 3)))
+ (match_operand:SI 1 "srcb_operand" "rI"))]
+ ""
+ "inhw %0,%0,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (ashift:SI (const_int 65535)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3))))
+ (match_operand:SI 1 "gen_reg_operand" "r"))
+ (ashift:SI (and:SI (match_operand:SI 2 "srcb_operand" "rI")
+ (const_int 65535))
+ (match_operand:SI 4 "const_24_operand" ""))))]
+ ""
+ "inhw %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (ashift:SI (const_int 65535)
+ (ashift:SI (match_operand:SI 3 "register_operand" "b")
+ (const_int 3))))
+ (match_operand:SI 1 "gen_reg_operand" "r"))
+ (ashift:SI (match_operand:SI 2 "srcb_operand" "rI")
+ (match_operand:SI 4 "const_24_operand" ""))))]
+ ""
+ "inhw %0,%1,%2")
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "gen_reg_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "srcb_operand" ""))]
+ ""
+ "
+{
+ int size, pos;
+
+ if (GET_CODE (operands[1]) != CONST_INT
+ || GET_CODE (operands[2]) != CONST_INT)
+ FAIL;
+
+ size = INTVAL (operands[1]);
+ pos = INTVAL (operands[2]);
+ if ((size != 8 && size != 16) || pos % size != 0)
+ FAIL;
+
+ operands[2] = gen_rtx (ASHIFT, SImode,
+ force_reg (SImode,
+ gen_rtx (CONST_INT, VOIDmode, pos / 8)),
+ gen_rtx (CONST_INT, VOIDmode, 3));
+}")
+\f
+;; LOAD (also used by move insn).
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (mem:SI (and:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int -4))))
+ (set (reg:SI 177)
+ (and:SI (match_dup 1)
+ (const_int 3)))]
+ "! TARGET_DW_ENABLE"
+ "load 0,17,%0,%1"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (mem:SI (and:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int -4))))
+ (set (reg:SI 177)
+ (and:SI (match_dup 1)
+ (const_int 2)))]
+ "! TARGET_DW_ENABLE"
+ "load 0,18,%0,%1"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand 0 "gen_reg_operand" "=r")
+ (match_operator 2 "extend_operator"
+ [(match_operand 1 "memory_operand" "m")]))]
+ "TARGET_DW_ENABLE && GET_MODE (operands[0]) == GET_MODE (operands[2])"
+ "load 0,%X2,%0,%1"
+ [(set_attr "type" "load")])
+\f
+;; LOADM
+(define_expand "load_multiple"
+ [(set (reg:SI 179)
+ (match_dup 2))
+ (match_parallel 3 "" [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))])]
+ ""
+ "
+{
+ int regno;
+ int count;
+ rtx from;
+ int i;
+
+ /* Support only loading a constant number of hard registers from memory. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || operands[2] == const1_rtx
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[0]);
+
+ /* CR gets set to the number of registers minus one. */
+ operands[2] = gen_rtx (CONST_INT, VOIDmode, count - 1);
+
+ operands[3] = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count + 2));
+ from = memory_address (SImode, XEXP (operands[1], 0));
+ XVECEXP (operands[3], 0, 0) = gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode, regno),
+ gen_rtx (MEM, SImode, from));
+ XVECEXP (operands[3], 0, 1)
+ = gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, R_CR));
+ XVECEXP (operands[3], 0, 2)
+ = gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, R_CR));
+
+ for (i = 1; i < count; i++)
+ XVECEXP (operands[3], 0, i + 2)
+ = gen_rtx (SET, VOIDmode, gen_rtx (REG, SImode, regno + i),
+ gen_rtx (MEM, SImode, plus_constant (from, i * 4)));
+}")
+
+;; Indicate that CR is used and is then clobbered.
+(define_insn ""
+ [(set (match_operand 0 "gen_reg_operand" "=r")
+ (match_operand 1 "memory_operand" "m"))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))]
+ "GET_MODE (operands[0]) == GET_MODE (operands[1])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) > UNITS_PER_WORD
+ && ! TARGET_29050"
+ "loadm 0,0,%0,%1"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand 0 "gen_reg_operand" "=&r")
+ (match_operand 1 "memory_operand" "m"))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))]
+ "GET_MODE (operands[0]) == GET_MODE (operands[1])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) > UNITS_PER_WORD
+ && TARGET_29050"
+ "loadm 0,0,%0,%1"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "gen_reg_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))])]
+ "! TARGET_29050"
+ "loadm 0,0,%1,%2"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "gen_reg_operand" "=&r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))])]
+ "TARGET_29050"
+ "loadm 0,0,%1,%2"
+ [(set_attr "type" "load")])
+\f
+;; MTSR (used also by move insn)
+(define_insn ""
+ [(set (match_operand:SI 0 "spec_reg_operand" "=*h,*h")
+ (and:SI (match_operand:SI 1 "gen_reg_or_immediate_operand" "r,i")
+ (match_operand:SI 2 "const_int_operand" "n,n")))]
+ "masks_bits_for_special (operands[0], operands[2])"
+ "@
+ mtsr %0,%1
+ mtsrim %0,%1")
+\f
+;; MULTIPLY, MULTM, MULTMU
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (mult:SI (match_operand:SI 1 "gen_reg_operand" "%r")
+ (match_operand:SI 2 "gen_reg_operand" "r")))]
+ ""
+ "multiply %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (subreg:SI
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 1 "gen_reg_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "gen_reg_operand" "r"))) 0))]
+ ""
+ "multm %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (subreg:SI
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 1 "gen_reg_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "gen_reg_operand" "r"))) 0))]
+ ""
+ "multmu %0,%1,%2")
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "gen_reg_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gen_reg_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "gen_reg_operand" "r"))))]
+ ""
+ "multiply %L0,%1,%2\;multm %0,%1,%2"
+ [(set_attr "type" "multi")])
+
+(define_split
+ [(set (match_operand:DI 0 "gen_reg_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gen_reg_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "gen_reg_operand" ""))))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 4)
+ (subreg:SI (mult:DI
+ (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2))) 0))]
+ "
+{ operands[3] = operand_subword (operands[0], 1, 1, DImode);
+ operands[4] = operand_subword (operands[1], 0, 1, DImode); } ")
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "gen_reg_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gen_reg_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "gen_reg_operand" "r"))))]
+ ""
+ "multiplu %L0,%1,%2\;multmu %0,%1,%2"
+ [(set_attr "type" "multi")])
+
+(define_split
+ [(set (match_operand:DI 0 "gen_reg_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gen_reg_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "gen_reg_operand" ""))))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 4)
+ (subreg:SI (mult:DI (zero_extend:DI (match_dup 1))
+ (zero_extend:DI (match_dup 2))) 0))]
+ "
+{ operands[3] = operand_subword (operands[0], 1, 1, DImode);
+ operands[4] = operand_subword (operands[1], 0, 1, DImode); } ")
+
+;; NAND
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (not:SI (match_operand:SI 1 "gen_reg_operand" "%r"))
+ (not:SI (match_operand:SI 2 "srcb_operand" "rI"))))]
+ ""
+ "nand %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (not:SI (match_operand:SI 1 "gen_reg_operand" "r"))
+ (match_operand:SI 2 "const_int_operand" "K")))]
+ "((unsigned) ~ INTVAL (operands[2])) < 256"
+ "nand %0,%1,%C2")
+
+;; NOR
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 1 "gen_reg_operand" "%r"))
+ (not:SI (match_operand:SI 2 "srcb_operand" "rI"))))]
+ ""
+ "nor %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 1 "gen_reg_operand" "r"))
+ (match_operand:SI 2 "const_int_operand" "K")))]
+ "((unsigned) ~ INTVAL (operands[2])) < 256"
+ "nor %0,%1,%C2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (not:SI (match_operand:SI 1 "gen_reg_operand" "r")))]
+ ""
+ "nor %0,%1,0")
+\f
+;; OR/ORN
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (ior:SI (match_operand:SI 1 "gen_reg_operand" "")
+ (match_operand:SI 2 "srcb_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ior:SI (match_operand:SI 1 "gen_reg_operand" "%r")
+ (match_operand:SI 2 "srcb_operand" "rI")))]
+ "! TARGET_29050"
+ "or %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "gen_reg_operand" "%r,r")
+ (match_operand:SI 2 "srcb_operand" "rI,K")))]
+ "TARGET_29050"
+ "@
+ or %0,%1,%2
+ orn %0,%1,%C2")
+
+\f
+;; SLL (also used by move insn)
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "aseq 0x40,gr1,gr1")
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (match_operand:QI 2 "srcb_operand" "rn")))]
+ ""
+ "sll %0,%1,%Q2")
+
+;; SRA
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (match_operand:QI 2 "srcb_operand" "rn")))]
+ ""
+ "sra %0,%1,%Q2")
+
+;; SRL
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (lshiftrt:SI (match_operand:SI 1 "gen_reg_operand" "r")
+ (match_operand:QI 2 "srcb_operand" "rn")))]
+ ""
+ "srl %0,%1,%Q2")
+\f
+;; STORE
+;;
+;; These somewhat bogus patterns exist to set OPT = 001/010 for partial-word
+;; stores on systems with DW not set.
+(define_insn ""
+ [(set (mem:SI (and:SI (match_operand:SI 0 "gen_reg_operand" "r")
+ (const_int -4)))
+ (match_operand:SI 1 "gen_reg_operand" "r"))]
+ "! TARGET_DW_ENABLE"
+ "store 0,1,%1,%0"
+ [(set_attr "type" "store")])
+
+(define_insn ""
+ [(set (mem:SI (and:SI (match_operand:SI 0 "gen_reg_operand" "r")
+ (const_int -3)))
+ (match_operand:SI 1 "gen_reg_operand" "r"))]
+ "! TARGET_DW_ENABLE"
+ "store 0,2,%1,%0"
+ [(set_attr "type" "store")])
+
+;; STOREM
+(define_expand "store_multiple"
+ [(use (match_operand 0 "" ""))
+ (use (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))]
+ ""
+ "
+{ rtx pat;
+
+ if (TARGET_NO_STOREM_BUG)
+ pat = gen_store_multiple_no_bug (operands[0], operands[1], operands[2]);
+ else
+ pat = gen_store_multiple_bug (operands[0], operands[1], operands[2]);
+
+ if (pat)
+ emit_insn (pat);
+ else
+ FAIL;
+
+ DONE;
+}")
+
+(define_expand "store_multiple_no_bug"
+ [(set (reg:SI 179)
+ (match_dup 2))
+ (match_parallel 3 "" [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))])]
+ ""
+ "
+{
+ int regno;
+ int count;
+ rtx from;
+ int i;
+
+ /* Support only storing a constant number of hard registers to memory. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || operands[2] == const1_rtx
+ || GET_CODE (operands[0]) != MEM
+ || GET_CODE (operands[1]) != REG
+ || REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[1]);
+
+ /* CR gets set to the number of registers minus one. */
+ operands[2] = gen_rtx (CONST_INT, VOIDmode, count - 1);
+
+ operands[3] = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count + 2));
+ from = memory_address (SImode, XEXP (operands[0], 0));
+ XVECEXP (operands[3], 0, 0) = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, SImode, from),
+ gen_rtx (REG, SImode, regno));
+ XVECEXP (operands[3], 0, 1)
+ = gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, R_CR));
+ XVECEXP (operands[3], 0, 2)
+ = gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, R_CR));
+
+ for (i = 1; i < count; i++)
+ XVECEXP (operands[3], 0, i + 2)
+ = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, SImode, plus_constant (from, i * 4)),
+ gen_rtx (REG, SImode, regno + i));
+}")
+
+(define_expand "store_multiple_bug"
+ [(match_parallel 3 "" [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))])]
+ ""
+ "
+{
+ int regno;
+ int count;
+ rtx from;
+ int i;
+
+ /* Support only storing a constant number of hard registers to memory. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || operands[2] == const1_rtx
+ || GET_CODE (operands[0]) != MEM
+ || GET_CODE (operands[1]) != REG
+ || REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[1]);
+
+ operands[3] = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count + 1));
+ from = memory_address (SImode, XEXP (operands[0], 0));
+ XVECEXP (operands[3], 0, 0) = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, SImode, from),
+ gen_rtx (REG, SImode, regno));
+ XVECEXP (operands[3], 0, 1)
+ = gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, R_CR));
+
+ for (i = 1; i < count; i++)
+ XVECEXP (operands[3], 0, i + 1)
+ = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, SImode, plus_constant (from, i * 4)),
+ gen_rtx (REG, SImode, regno + i));
+}")
+
+(define_insn ""
+ [(set (match_operand 0 "memory_operand" "=m")
+ (match_operand 1 "gen_reg_operand" "r"))
+ (clobber (reg:SI 179))]
+ "!TARGET_NO_STOREM_BUG
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) > UNITS_PER_WORD"
+ "mtsrim cr,%S1\;storem 0,0,%1,%0"
+ [(set_attr "type" "multi")])
+
+(define_insn ""
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "memory_operand" "=m")
+ (match_operand:SI 2 "gen_reg_operand" "r"))
+ (clobber (reg:SI 179))])]
+ "!TARGET_NO_STOREM_BUG"
+ "mtsrim cr,%V0\;storem 0,0,%2,%1"
+ [(set_attr "type" "multi")])
+
+(define_insn ""
+ [(set (match_operand 0 "memory_operand" "=m")
+ (match_operand 1 "gen_reg_operand" "r"))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))]
+ "TARGET_NO_STOREM_BUG
+ && GET_MODE (operands[0]) == GET_MODE (operands[1])
+ && GET_MODE_SIZE (GET_MODE (operands[0])) > UNITS_PER_WORD"
+ "storem 0,0,%1,%0"
+ [(set_attr "type" "store")])
+
+(define_insn ""
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "memory_operand" "=m")
+ (match_operand:SI 2 "gen_reg_operand" "r"))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))])]
+ "TARGET_NO_STOREM_BUG"
+ "storem 0,0,%2,%1"
+ [(set_attr "type" "store")])
+\f
+;; SUB
+;;
+;; Either operand can be a register or an 8-bit constant, but both cannot be
+;; constants (can't usually occur anyway).
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (minus:SI (match_operand:SI 1 "srcb_operand" "")
+ (match_operand:SI 2 "srcb_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == CONST_INT
+ && GET_CODE (operands[1]) == CONST_INT)
+ operands[1] = force_reg (SImode, operands[1]);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "srcb_operand" "r,I")
+ (match_operand:SI 2 "srcb_operand" "rI,r")))]
+ "register_operand (operands[1], SImode)
+ || register_operand (operands[2], SImode)"
+ "@
+ sub %0,%1,%2
+ subr %0,%2,%1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "gen_reg_operand" "=r")
+ (minus:DI (match_operand:DI 1 "gen_reg_operand" "r")
+ (match_operand:DI 2 "gen_reg_operand" "r")))]
+ ""
+ "sub %L0,%L1,%L2\;subc %0,%1,%2"
+ [(set_attr "type" "multi")])
+
+;; SUBR (also used above in SUB)
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "gen_reg_operand" "=r")
+ (neg:DI (match_operand:DI 1 "gen_reg_operand" "r")))]
+ ""
+ "subr %L0,%L1,0\;subrc %0,%1,0"
+ [(set_attr "type" "multi")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (neg:SI (match_operand:SI 1 "gen_reg_operand" "r")))]
+ ""
+ "subr %0,%1,0")
+\f
+;; XNOR
+(define_insn ""
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (not:SI (xor:SI (match_operand:SI 1 "gen_reg_operand" "%r")
+ (match_operand:SI 2 "srcb_operand" "rI"))))]
+ ""
+ "xnor %0,%1,%2")
+
+;; XOR
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (xor:SI (match_operand:SI 1 "gen_reg_operand" "%r")
+ (match_operand:SI 2 "srcb_operand" "rI")))]
+ ""
+ "xor %0,%1,%2")
+
+;; Can use XOR to negate floating-point values, but we are better off not doing
+;; it that way on the 29050 so it can combine with the fmac insns.
+(define_expand "negsf2"
+ [(parallel [(set (match_operand:SF 0 "register_operand" "")
+ (neg:SF (match_operand:SF 1 "register_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+{
+ rtx result;
+ rtx target;
+
+ if (! TARGET_29050)
+ {
+ target = operand_subword_force (operands[0], 0, SFmode);
+ result = expand_binop (SImode, xor_optab,
+ operand_subword_force (operands[1], 0, SFmode),
+ gen_rtx (CONST_INT, VOIDmode, 0x80000000),
+ target, 0, OPTAB_WIDEN);
+ if (result == 0)
+ abort ();
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ /* Make a place for REG_EQUAL. */
+ emit_move_insn (operands[0], operands[0]);
+ DONE;
+ }
+}")
+
+(define_expand "negdf2"
+ [(parallel [(set (match_operand:DF 0 "register_operand" "")
+ (neg:DF (match_operand:DF 1 "register_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+{
+ rtx result;
+ rtx target;
+ rtx insns;
+
+ if (! TARGET_29050)
+ {
+ start_sequence ();
+ target = operand_subword (operands[0], 0, 1, DFmode);
+ result = expand_binop (SImode, xor_optab,
+ operand_subword_force (operands[1], 0, DFmode),
+ gen_rtx (CONST_INT, VOIDmode, 0x80000000),
+ target, 0, OPTAB_WIDEN);
+ if (result == 0)
+ abort ();
+
+ if (result != target)
+ emit_move_insn (result, target);
+
+ emit_move_insn (operand_subword (operands[0], 1, 1, DFmode),
+ operand_subword_force (operands[1], 1, DFmode));
+
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_no_conflict_block (insns, operands[0], operands[1], 0, 0);
+ DONE;
+ }
+}")
+\f
+;; Sign extend and truncation operations.
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "gen_reg_operand" "=r")
+ (zero_extend:HI (match_operand:QI 1 "gen_reg_operand" "r")))]
+ ""
+ "and %0,%1,255")
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "gen_reg_operand" "r")))]
+ ""
+ "and %0,%1,255")
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "gen_reg_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "gen_reg_operand" "0")))]
+ ""
+ "consth %0,0")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gen_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "gen_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gen_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "gen_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "gen_reg_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "gen_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+\f
+;; Define the methods used to move data around.
+;;
+;; movsi:
+;;
+;; If storing into memory, force source into register.
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM && ! gen_reg_operand (operands[1], SImode))
+ operands[1] = copy_to_mode_reg (SImode, operands[1]);
+ else if (spec_reg_operand (operands[0], SImode)
+ && ! (register_operand (operands[1], SImode)
+ || cint_16_operand (operands[1], SImode)))
+ operands[1] = force_reg (SImode, operands[1]);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (match_operand:SI 1 "long_const_operand" ""))]
+ ""
+ [(set (match_dup 0)
+ (and:SI (match_dup 1)
+ (const_int 65535)))
+ (set (match_dup 0)
+ (ior:SI (and:SI (match_dup 0)
+ (const_int 65535))
+ (and:SI (match_dup 1)
+ (const_int -65536))))]
+ "")
+\f
+;; Subroutines to load/store halfwords. Use TAV (gr121) as scratch. We have
+;; two versions of storehi, one when halfword writes are supported and one
+;; where they aren't.
+(define_expand "loadhi"
+ [(parallel [(set (match_dup 2)
+ (mem:SI (and:SI (match_operand:SI 0 "gen_reg_operand" "")
+ (const_int -4))))
+ (set (reg:SI 177)
+ (and:SI (match_dup 0)
+ (const_int 2)))])
+ (set (match_operand:HI 1 "gen_reg_operand" "")
+ (zero_extract:SI (match_dup 2)
+ (const_int 16)
+ (ashift:SI (reg:SI 177)
+ (const_int 3))))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ if (reload_in_progress)
+ operands[2] = gen_rtx (REG, SImode, R_TAV);
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "storehinhww"
+ [(parallel [(set (match_dup 2)
+ (mem:SI (and:SI (match_operand:SI 0 "gen_reg_operand" "")
+ (const_int -4))))
+ (set (reg:SI 177)
+ (and:SI (match_dup 0)
+ (const_int 2)))])
+ (set (zero_extract:SI (match_dup 2)
+ (const_int 8)
+ (ashift:SI (reg:SI 177)
+ (const_int 3)))
+ (match_operand:HI 1 "gen_reg_operand" ""))
+ (set (mem:SI (match_dup 0))
+ (match_dup 2))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ if (reload_in_progress)
+ operands[2] = gen_rtx (REG, SImode, R_TAV);
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "storehihww"
+ [(set (reg:SI 177)
+ (and:SI (match_operand:SI 0 "gen_reg_operand" "")
+ (const_int 3)))
+ (set (match_dup 2)
+ (ior:SI (and:SI (not:SI (ashift:SI (const_int 65535)
+ (ashift:SI (reg:SI 177)
+ (const_int 3))))
+ (match_operand:HI 1 "gen_reg_operand" ""))
+ (ashift:SI (and:SI (match_dup 1)
+ (const_int 65535))
+ (ashift:SI (reg:SI 177)
+ (const_int 3)))))
+ (set (mem:SI (and:SI (match_dup 0)
+ (const_int -3)))
+ (match_dup 2))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ if (reload_in_progress)
+ operands[2] = gen_rtx (REG, SImode, R_TAV);
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (! gen_reg_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ if (! TARGET_DW_ENABLE)
+ {
+ if (TARGET_BYTE_WRITES)
+ emit_insn (gen_storehihww (XEXP (operands[0], 0), operands[1]));
+ else
+ emit_insn (gen_storehinhww (XEXP (operands[0], 0), operands[1]));
+ DONE;
+ }
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ {
+ if (! TARGET_DW_ENABLE)
+ {
+ emit_insn (gen_loadhi (XEXP (operands[1], 0), operands[0]));
+ DONE;
+ }
+ }
+}")
+\f
+;; Subroutines to load/store bytes. Use TAV (gr121) as scratch.
+(define_expand "loadqi"
+ [(parallel [(set (match_dup 2)
+ (mem:SI (and:SI (match_operand:SI 0 "gen_reg_operand" "")
+ (const_int -4))))
+ (set (reg:SI 177)
+ (and:SI (match_dup 0)
+ (const_int 3)))])
+ (set (match_operand:QI 1 "gen_reg_operand" "")
+ (zero_extract:SI (match_dup 2)
+ (const_int 8)
+ (ashift:SI (reg:SI 177)
+ (const_int 3))))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ if (reload_in_progress)
+ operands[2] = gen_rtx (REG, SImode, R_TAV);
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "storeqinhww"
+ [(parallel [(set (match_dup 2)
+ (mem:SI (and:SI (match_operand:SI 0 "gen_reg_operand" "")
+ (const_int -4))))
+ (set (reg:SI 177)
+ (and:SI (match_dup 0)
+ (const_int 3)))])
+ (set (zero_extract:SI (match_dup 2)
+ (const_int 8)
+ (ashift:SI (reg:SI 177)
+ (const_int 3)))
+ (match_operand:QI 1 "gen_reg_operand" ""))
+ (set (mem:SI (match_dup 0))
+ (match_dup 2))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ if (reload_in_progress)
+ operands[2] = gen_rtx (REG, SImode, R_TAV);
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "storeqihww"
+ [(set (reg:SI 177)
+ (and:SI (match_operand:SI 0 "gen_reg_operand" "")
+ (const_int 3)))
+ (set (match_dup 2)
+ (ior:SI (and:SI (not:SI (ashift:SI (const_int 255)
+ (ashift:SI (reg:SI 177)
+ (const_int 3))))
+ (match_operand:HI 1 "gen_reg_operand" ""))
+ (ashift:SI (and:SI (match_dup 1)
+ (const_int 255))
+ (ashift:SI (reg:SI 177)
+ (const_int 3)))))
+ (set (mem:SI (and:SI (match_dup 0)
+ (const_int -4)))
+ (match_dup 2))]
+ ""
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+
+ if (reload_in_progress)
+ operands[2] = gen_rtx (REG, SImode, R_TAV);
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+\f
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (! gen_reg_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ if (! TARGET_DW_ENABLE)
+ {
+ if (TARGET_BYTE_WRITES)
+ emit_insn (gen_storeqihww (XEXP (operands[0], 0), operands[1]));
+ else
+ emit_insn (gen_storeqinhww (XEXP (operands[0], 0), operands[1]));
+ DONE;
+ }
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ {
+ if (! TARGET_DW_ENABLE)
+ {
+ emit_insn (gen_loadqi (XEXP (operands[1], 0), operands[0]));
+ DONE;
+ }
+ }
+}")
+\f
+;; Now the actual insns used to move data around. We include here the
+;; DEFINE_SPLITs that may be needed. In some cases these will be
+;; split again. For floating-point, if we can look inside the constant,
+;; always split it. This can eliminate unnecessary insns.
+(define_insn ""
+ [(set (match_operand:SF 0 "out_operand" "=r,r,r,r,m")
+ (match_operand:SF 1 "in_operand" "r,E,F,m,r"))]
+ "(gen_reg_operand (operands[0], SFmode)
+ || gen_reg_operand (operands[1], SFmode))
+ && ! TARGET_29050"
+ "@
+ sll %0,%1,0
+ #
+ const %0,%1\;consth %0,%1
+ load 0,0,%0,%1
+ store 0,0,%1,%0"
+ [(set_attr "type" "misc,multi,multi,load,store")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "out_operand" "=r,r,r,r,m,*a,r")
+ (match_operand:SF 1 "in_operand" "r,E,F,m,r,r,*a"))]
+ "(gen_reg_operand (operands[0], SFmode)
+ || gen_reg_operand (operands[1], SFmode))
+ && TARGET_29050"
+ "@
+ sll %0,%1,0
+ #
+ const %0,%1\;consth %0,%1
+ load 0,0,%0,%1
+ store 0,0,%1,%0
+ mtacc %1,1,%0
+ mfacc %0,1,%1"
+ [(set_attr "type" "misc,multi,multi,load,store,fadd,fadd")])
+
+;; Turn this into SImode. It will then be split up that way.
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "float_const_operand" ""))]
+ "HOST_FLOAT_FORMAT == TARGET_FLOAT_FORMAT"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "
+{ operands[0] = operand_subword (operands[0], 0, 0, SFmode);
+ operands[1] = operand_subword (operands[1], 0, 0, SFmode);
+
+ if (operands[0] == 0 || operands[1] == 0)
+ FAIL;
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "out_operand" "=r,r,r,m")
+ (match_operand:DF 1 "in_operand" "rE,F,m,r"))
+ (clobber (reg:SI 179))]
+ "(gen_reg_operand (operands[0], DFmode)
+ || gen_reg_operand (operands[1], DFmode))
+ && ! TARGET_29050"
+ "@
+ #
+ const %0,%1\;consth %0,%1\;const %L0,%L1\;consth %L0,%L1
+ mtsrim cr,1\;loadm 0,0,%0,%1
+ mtsrim cr,1\;storem 0,0,%1,%0"
+ [(set_attr "type" "multi")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "out_operand" "=r,r,&r,m,*a,r")
+ (match_operand:DF 1 "in_operand" "rE,F,m,r,r,*a"))
+ (clobber (reg:SI 179))]
+ "(gen_reg_operand (operands[0], DFmode)
+ || gen_reg_operand (operands[1], DFmode))
+ && TARGET_29050"
+ "@
+ #
+ const %0,%1\;consth %0,%1\;const %L0,%L1\;consth %L0,%L1
+ mtsrim cr,1\;loadm 0,0,%0,%1
+ mtsrim cr,1\;storem 0,0,%1,%0
+ mtacc %1,2,%0
+ mfacc %0,2,%1"
+ [(set_attr "type" "multi,multi,multi,multi,fadd,fadd")])
+
+;; Split register-register copies and constant loads into two SImode loads,
+;; one for each word. In the constant case, they will get further split.
+;; Don't so this until register allocation, though, since it will
+;; interfere with register allocation. Normally copy the lowest-addressed
+;; word first; the exception is if we are copying register to register and
+;; the lowest register of the first operand is the highest register of the
+;; second operand.
+(define_split
+ [(set (match_operand:DF 0 "gen_reg_operand" "")
+ (match_operand:DF 1 "gen_reg_or_float_constant_operand" ""))
+ (clobber (reg:SI 179))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "
+{ if (GET_CODE (operands[1]) == REG
+ && REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ {
+ operands[2] = operand_subword (operands[0], 1, 1, DFmode);
+ operands[3] = operand_subword (operands[1], 1, 1, DFmode);
+ operands[4] = operand_subword (operands[0], 0, 1, DFmode);
+ operands[5] = operand_subword (operands[1], 0, 1, DFmode);
+ }
+ else
+ {
+ operands[2] = operand_subword (operands[0], 0, 1, DFmode);
+ operands[3] = operand_subword (operands[1], 0, 1, DFmode);
+ operands[4] = operand_subword (operands[0], 1, 1, DFmode);
+ operands[5] = operand_subword (operands[1], 1, 1, DFmode);
+ }
+
+ if (operands[2] == 0 || operands[3] == 0
+ || operands[4] == 0 || operands[5] == 0)
+ FAIL;
+}")
+
+;; Split memory loads and stores into the MTSR and LOADM/STOREM.
+(define_split
+ [(set (match_operand:DF 0 "out_operand" "")
+ (match_operand:DF 1 "in_operand" ""))
+ (clobber (reg:SI 179))]
+ "TARGET_NO_STOREM_BUG
+ && (memory_operand (operands[0], DFmode)
+ || memory_operand (operands[1], DFmode))"
+ [(set (reg:SI 179) (const_int 1))
+ (parallel [(set (match_dup 0) (match_dup 1))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))])]
+ "")
+
+;; DI move is similar to DF move.
+(define_insn ""
+ [(set (match_operand:DI 0 "out_operand" "=r,r,m")
+ (match_operand:DI 1 "in_operand" "rn,m,r"))
+ (clobber (reg:SI 179))]
+ "(gen_reg_operand (operands[0], DImode)
+ || gen_reg_operand (operands[1], DImode))
+ && ! TARGET_29050"
+ "@
+ #
+ mtsrim cr,1\;loadm 0,0,%0,%1
+ mtsrim cr,1\;storem 0,0,%1,%0"
+ [(set_attr "type" "multi")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "out_operand" "=r,&r,m")
+ (match_operand:DI 1 "in_operand" "rn,m,r"))
+ (clobber (reg:SI 179))]
+ "(gen_reg_operand (operands[0], DImode)
+ || gen_reg_operand (operands[1], DImode))
+ && TARGET_29050"
+ "@
+ #
+ mtsrim cr,1\;loadm 0,0,%0,%1
+ mtsrim cr,1\;storem 0,0,%1,%0"
+ [(set_attr "type" "multi")])
+
+(define_split
+ [(set (match_operand:DI 0 "gen_reg_operand" "")
+ (match_operand:DI 1 "gen_reg_or_integer_constant_operand" ""))
+ (clobber (reg:SI 179))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "
+{ if (GET_CODE (operands[1]) == REG
+ && REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ {
+ operands[2] = operand_subword (operands[0], 1, 1, DFmode);
+ operands[3] = operand_subword (operands[1], 1, 1, DFmode);
+ operands[4] = operand_subword (operands[0], 0, 1, DFmode);
+ operands[5] = operand_subword (operands[1], 0, 1, DFmode);
+ }
+ else
+ {
+ operands[2] = operand_subword (operands[0], 0, 1, DFmode);
+ operands[3] = operand_subword (operands[1], 0, 1, DFmode);
+ operands[4] = operand_subword (operands[0], 1, 1, DFmode);
+ operands[5] = operand_subword (operands[1], 1, 1, DFmode);
+ }
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "out_operand" "")
+ (match_operand:DI 1 "in_operand" ""))
+ (clobber (reg:SI 179))]
+ "TARGET_NO_STOREM_BUG
+ && (memory_operand (operands[0], DImode)
+ || memory_operand (operands[1], DImode))"
+ [(set (reg:SI 179) (const_int 1))
+ (parallel [(set (match_dup 0) (match_dup 1))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))])]
+ "")
+
+;; TImode moves are very similar to DImode moves, except that we can't
+;; have constants.
+(define_insn ""
+ [(set (match_operand:TI 0 "out_operand" "=r,r,m")
+ (match_operand:TI 1 "in_operand" "r,m,r"))
+ (clobber (reg:SI 179))]
+ "(gen_reg_operand (operands[0], TImode)
+ || gen_reg_operand (operands[1], TImode))
+ && ! TARGET_29050"
+ "@
+ #
+ mtsrim cr,3\;loadm 0,0,%0,%1
+ mtsrim cr,3\;storem 0,0,%1,%0"
+ [(set_attr "type" "multi,multi,multi")])
+
+(define_insn ""
+ [(set (match_operand:TI 0 "out_operand" "=r,&r,m")
+ (match_operand:TI 1 "in_operand" "r,m,r"))
+ (clobber (reg:SI 179))]
+ "(gen_reg_operand (operands[0], TImode)
+ || gen_reg_operand (operands[1], TImode))
+ && TARGET_29050"
+ "@
+ #
+ mtsrim cr,3\;loadm 0,0,%0,%1
+ mtsrim cr,3\;storem 0,0,%1,%0"
+ [(set_attr "type" "multi,multi,multi")])
+
+(define_split
+ [(set (match_operand:TI 0 "gen_reg_operand" "")
+ (match_operand:TI 1 "gen_reg_operand" ""))
+ (clobber (reg:SI 179))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))
+ (set (match_dup 6) (match_dup 7))
+ (set (match_dup 8) (match_dup 9))]
+ "
+{
+ if (REGNO (operands[0]) >= REGNO (operands[1]) + 1
+ && REGNO (operands[0]) <= REGNO (operands[1]) + 3)
+ {
+ operands[2] = gen_rtx (REG, SImode, REGNO (operands[0]) + 3);
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[1]) + 3);
+ operands[4] = gen_rtx (REG, SImode, REGNO (operands[0]) + 2);
+ operands[5] = gen_rtx (REG, SImode, REGNO (operands[1]) + 2);
+ operands[6] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ operands[7] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
+ operands[8] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[9] = gen_rtx (REG, SImode, REGNO (operands[1]));
+ }
+ else
+ {
+ operands[2] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[1]));
+ operands[4] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ operands[5] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
+ operands[6] = gen_rtx (REG, SImode, REGNO (operands[0]) + 2);
+ operands[7] = gen_rtx (REG, SImode, REGNO (operands[1]) + 2);
+ operands[8] = gen_rtx (REG, SImode, REGNO (operands[0]) + 3);
+ operands[9] = gen_rtx (REG, SImode, REGNO (operands[1]) + 3);
+ }
+}")
+
+(define_split
+ [(set (match_operand:TI 0 "out_operand" "")
+ (match_operand:TI 1 "in_operand" ""))
+ (clobber (reg:SI 179))]
+ "TARGET_NO_STOREM_BUG
+ && (memory_operand (operands[0], TImode)
+ || memory_operand (operands[1], TImode))"
+ [(set (reg:SI 179) (const_int 1))
+ (parallel [(set (match_dup 0) (match_dup 1))
+ (use (reg:SI 179))
+ (clobber (reg:SI 179))])]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "out_operand" "=r,r,r,r,r,r,r,m,*h,*h")
+ (match_operand:SI 1 "in_operand" "r,J,M,O,i,m,*h,r,r,J"))]
+ "(gen_reg_operand (operands[0], SImode)
+ || gen_reg_operand (operands[1], SImode)
+ || (spec_reg_operand (operands[0], SImode)
+ && cint_16_operand (operands[1], SImode)))
+ && ! TARGET_29050"
+ "@
+ sll %0,%1,0
+ const %0,%1
+ constn %0,%M1
+ cpeq %0,gr1,gr1
+ #
+ load 0,0,%0,%1
+ mfsr %0,%1
+ store 0,0,%1,%0
+ mtsr %0,%1
+ mtsrim %0,%1"
+ [(set_attr "type" "misc,misc,misc,misc,multi,load,misc,store,misc,misc")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "out_operand" "=r,r,r,r,r,r,r,m,*h,*h")
+ (match_operand:SI 1 "in_operand" "r,J,M,O,i,m,*h,r,r,J"))]
+ "(gen_reg_operand (operands[0], SImode)
+ || gen_reg_operand (operands[1], SImode)
+ || (spec_reg_operand (operands[0], SImode)
+ && cint_16_operand (operands[1], SImode)))
+ && TARGET_29050"
+ "@
+ sll %0,%1,0
+ const %0,%1
+ constn %0,%M1
+ consthz %0,%1
+ #
+ load 0,0,%0,%1
+ mfsr %0,%1
+ store 0,0,%1,%0
+ mtsr %0,%1
+ mtsrim %0,%1"
+ [(set_attr "type" "misc,misc,misc,misc,multi,load,misc,store,misc,misc")])
+
+(define_insn ""
+ [(set (match_operand:HI 0 "out_operand" "=r,r,r,m,r,*h,*h")
+ (match_operand:HI 1 "in_operand" "r,i,m,r,*h,r,i"))]
+ "gen_reg_operand (operands[0], HImode)
+ || gen_reg_operand (operands[1], HImode)"
+ "@
+ sll %0,%1,0
+ const %0,%1
+ load 0,2,%0,%1
+ store 0,2,%1,%0
+ mfsr %0,%1
+ mtsr %0,%1
+ mtsrim %0,%1"
+ [(set_attr "type" "misc,misc,load,store,misc,misc,misc")])
+
+(define_insn ""
+ [(set (match_operand:QI 0 "out_operand" "=r,r,r,m,r,*h,*h")
+ (match_operand:QI 1 "in_operand" "r,i,m,r,*h,r,i"))]
+ "gen_reg_operand (operands[0], QImode)
+ || gen_reg_operand (operands[1], QImode)"
+ "@
+ sll %0,%1,0
+ const %0,%1
+ load 0,1,%0,%1
+ store 0,1,%1,%0
+ mfsr %0,%1
+ mtsr %0,%1
+ mtsrim %0,%1"
+ [(set_attr "type" "misc,misc,load,store,misc,misc,misc")])
+\f
+;; Define move insns for DI, TI, SF, and DF.
+;;
+;; In no case do we support mem->mem directly.
+;;
+;; For DI move of constant to register, split apart at this time since these
+;; can require anywhere from 2 to 4 insns and determining which is complex.
+;;
+;; In other cases, handle similarly to SImode moves.
+;;
+;; However, indicate that DI, TI, and DF moves (can) clobber CR (reg 179).
+(define_expand "movdi"
+ [(parallel [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))
+ (clobber (reg:SI 179))])]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DImode, operands[1]);
+}")
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+}")
+
+(define_expand "movdf"
+ [(parallel [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))
+ (clobber (reg:SI 179))])]
+ ""
+ "
+{ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+}")
+
+(define_expand "movti"
+ [(parallel [(set (match_operand:TI 0 "general_operand" "")
+ (match_operand:TI 1 "general_operand" ""))
+ (clobber (reg:SI 179))])]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (TImode, operands[1]);
+}")
+\f
+;; For compare operations, we simply store the comparison operands and
+;; do nothing else. The following branch or scc insn will output whatever
+;; is needed.
+(define_expand "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "gen_reg_operand" "")
+ (match_operand:SI 1 "srcb_operand" "")))]
+ ""
+ "
+{
+ a29k_compare_op0 = operands[0];
+ a29k_compare_op1 = operands[1];
+ a29k_compare_fp_p = 0;
+ DONE;
+}")
+
+(define_expand "cmpsf"
+ [(set (cc0)
+ (compare (match_operand:SF 0 "gen_reg_operand" "")
+ (match_operand:SF 1 "gen_reg_operand" "")))]
+ ""
+ "
+{
+ a29k_compare_op0 = operands[0];
+ a29k_compare_op1 = operands[1];
+ a29k_compare_fp_p = 1;
+ DONE;
+}")
+
+(define_expand "cmpdf"
+ [(set (cc0)
+ (compare (match_operand:DF 0 "gen_reg_operand" "")
+ (match_operand:DF 1 "gen_reg_operand" "")))]
+ ""
+ "
+{
+ a29k_compare_op0 = operands[0];
+ a29k_compare_op1 = operands[1];
+ a29k_compare_fp_p = 1;
+ DONE;
+}")
+
+;; We can generate bit-tests better if we use NE instead of EQ, but we
+;; don't have an NE for floating-point. So we have to have two patterns
+;; for EQ and two for NE.
+
+(define_expand "beq"
+ [(set (match_dup 1) (ne:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE_CLASS (GET_MODE (a29k_compare_op0)) == MODE_FLOAT)
+ {
+ emit_insn (gen_beq_fp (operands[0]));
+ DONE;
+ }
+
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "beq_fp"
+ [(set (match_dup 1) (eq:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "bne"
+ [(set (match_dup 1) (ne:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (GET_MODE_CLASS (GET_MODE (a29k_compare_op0)) == MODE_FLOAT)
+ {
+ emit_insn (gen_bne_fp (operands[0]));
+ DONE;
+ }
+
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "bne_fp"
+ [(set (match_dup 1) (eq:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+;; We don't have a floating-point "lt" insn, so we have to use "gt" in that
+;; case with the operands swapped. The operands must both be registers in
+;; the floating-point case, so we know that swapping them is OK.
+(define_expand "blt"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ if (a29k_compare_fp_p)
+ operands[2] = gen_rtx (GT, SImode, a29k_compare_op1, a29k_compare_op0);
+ else
+ operands[2] = gen_rtx (LT, SImode, a29k_compare_op0, a29k_compare_op1);
+}")
+
+;; Similarly for "le".
+(define_expand "ble"
+ [(set (match_dup 1) (match_dup 2))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ if (a29k_compare_fp_p)
+ operands[2] = gen_rtx (GE, SImode, a29k_compare_op1, a29k_compare_op0);
+ else
+ operands[2] = gen_rtx (LE, SImode, a29k_compare_op0, a29k_compare_op1);
+}")
+
+(define_expand "bltu"
+ [(set (match_dup 1) (ltu:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "bleu"
+ [(set (match_dup 1) (leu:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "bgt"
+ [(set (match_dup 1) (gt:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "bge"
+ [(set (match_dup 1) (ge:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "bgtu"
+ [(set (match_dup 1) (gtu:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+
+(define_expand "bgeu"
+ [(set (match_dup 1) (geu:SI (match_dup 2) (match_dup 3)))
+ (set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_reg_rtx (SImode);
+ operands[2] = a29k_compare_op0;
+ operands[3] = a29k_compare_op1;
+}")
+\f
+(define_expand "seq"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (eq:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+}")
+
+;; This is the most complicated case, because we don't have a floating-point
+;; "ne" insn. If integer, handle normally. If floating-point, write the
+;; compare and then write an insn to reverse the test.
+(define_expand "sne_fp"
+ [(set (match_dup 3)
+ (eq:SI (match_operand 1 "gen_reg_operand" "")
+ (match_operand 2 "gen_reg_operand" "")))
+ (set (match_operand:SI 0 "gen_reg_operand" "")
+ (ge:SI (match_dup 3) (const_int 0)))]
+ ""
+ "
+{ operands[3] = gen_reg_rtx (SImode);
+}");
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (ne:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+
+ if (a29k_compare_fp_p)
+ {
+ emit_insn (gen_sne_fp (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+;; We don't have a floating-point "lt" insn, so use "gt" and swap the
+;; operands, the same as we do "blt".
+(define_expand "slt"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (a29k_compare_fp_p)
+ operands[1] = gen_rtx (GT, SImode, a29k_compare_op1, a29k_compare_op0);
+ else
+ operands[1] = gen_rtx (LT, SImode, a29k_compare_op0, a29k_compare_op1);
+}")
+
+;; Similarly for "le"
+(define_expand "sle"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ if (a29k_compare_fp_p)
+ operands[1] = gen_rtx (GE, SImode, a29k_compare_op1, a29k_compare_op0);
+ else
+ operands[1] = gen_rtx (LE, SImode, a29k_compare_op0, a29k_compare_op1);
+}")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (ltu:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+}")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (leu:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+}")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (gt:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+}")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (ge:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+}")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (gtu:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+}")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "gen_reg_operand" "")
+ (geu:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "
+{
+ operands[1] = a29k_compare_op0;
+ operands[2] = a29k_compare_op1;
+}")
+\f
+;; Now define the actual jump insns.
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_operator"
+ [(match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "jmp%b0 %1,%l2%#"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_operator"
+ [(match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int 0)])
+ (return)
+ (pc)))]
+ "null_epilogue ()"
+ "jmp%b0i %1,lr0%#"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_operator"
+ [(match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "jmp%B0 %1,%l2%#"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_operator"
+ [(match_operand:SI 1 "gen_reg_operand" "r")
+ (const_int 0)])
+ (pc)
+ (return)))]
+ "null_epilogue ()"
+ "jmp%B0i %1,lr0%#"
+ [(set_attr "type" "branch")])
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jmp %e0%E0"
+ [(set_attr "type" "branch")])
+
+(define_insn "return"
+ [(return)]
+ "null_epilogue ()"
+ "jmpi lr0%#"
+ [(set_attr "type" "branch")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "gen_reg_operand" "r"))]
+ ""
+ "jmpi %0%#"
+ [(set_attr "type" "branch")])
+
+(define_insn "tablejump"
+ [(set (pc)
+ (match_operand:SI 0 "gen_reg_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmpi %0%#"
+ [(set_attr "type" "branch")])
+
+;; JMPFDEC
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ge (match_operand:SI 0 "gen_reg_operand" "r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "jmpfdec %0,%l1%#"
+ [(set_attr "type" "branch")])
+\f
+;;- Local variables:
+;;- mode:emacs-lisp
+;;- comment-start: ";;- "
+;;- eval: (set-syntax-table (copy-sequence (syntax-table)))
+;;- eval: (modify-syntax-entry ?[ "(]")
+;;- eval: (modify-syntax-entry ?] ")[")
+;;- eval: (modify-syntax-entry ?{ "(}")
+;;- eval: (modify-syntax-entry ?} "){")
+;;- End:
--- /dev/null
+/* Definitions of target machine for GNU compiler, for AMD Am29000 CPU, Unix.
+ Copyright (C) 1991 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* This is mostly the same as a29k.h, except that we define unix instead of
+ EPI and define unix-style machine names. */
+
+#include "a29k.h"
+
+/* Set our default target to be the 29050; that is the more interesting chip
+ for Unix systems. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (1+2+8+64)
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dam29k -Da29k -Dam29000"
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{!m29000:-Dam29050 -D__am29050__}"
+
+/* For some systems, it is best if double-word objects are aligned on a
+ doubleword boundary. We want to maintain compatibility with MetaWare in
+ a29k.h, but do not feel constrained to do so here. */
+
+#undef BIGGEST_ALIGNMENT
+#define BIGGEST_ALIGNMENT 64
--- /dev/null
+/* Subroutines for insn-output.c for Sun SPARC.
+ Copyright (C) 1987, 1988, 1989, 1992 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include "config.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "expr.h"
+#include "recog.h"
+
+/* Global variables for machine-dependent things. */
+
+/* Save the operands last given to a compare for use when we
+ generate a scc or bcc insn. */
+
+rtx sparc_compare_op0, sparc_compare_op1;
+
+/* We may need an epilogue if we spill too many registers.
+ If this is non-zero, then we branch here for the epilogue. */
+static rtx leaf_label;
+
+#ifdef LEAF_REGISTERS
+
+/* Vector to say how input registers are mapped to output
+ registers. FRAME_POINTER_REGNUM cannot be remapped by
+ this function to eliminate it. You must use -fomit-frame-pointer
+ to get that. */
+char leaf_reg_remap[] =
+{ 0, 1, 2, 3, 4, 5, 6, 7,
+ -1, -1, -1, -1, -1, -1, 14, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ 8, 9, 10, 11, 12, 13, -1, 15,
+
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63};
+
+char leaf_reg_backmap[] =
+{ 0, 1, 2, 3, 4, 5, 6, 7,
+ 24, 25, 26, 27, 28, 29, 14, 31,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63};
+#endif
+
+/* Global variables set by FUNCTION_PROLOGUE. */
+/* Size of frame. Need to know this to emit return insns from
+ leaf procedures. */
+int apparent_fsize;
+int actual_fsize;
+
+/* Name of where we pretend to think the frame pointer points.
+ Normally, this is "%fp", but if we are in a leaf procedure,
+ this is "%sp+something". */
+char *frame_base_name;
+
+static rtx find_addr_reg ();
+
+/* Return non-zero only if OP is a register of mode MODE,
+ or const0_rtx. */
+int
+reg_or_0_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (op == const0_rtx || register_operand (op, mode))
+ return 1;
+ if (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (op) == 0
+ && CONST_DOUBLE_LOW (op) == 0)
+ return 1;
+ return 0;
+}
+
+/* Nonzero if OP can appear as the dest of a RESTORE insn. */
+int
+restore_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == REG && GET_MODE (op) == mode
+ && (REGNO (op) < 8 || (REGNO (op) >= 24 && REGNO (op) < 32)));
+}
+
+/* PC-relative call insn on SPARC is independent of `memory_operand'. */
+
+int
+call_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != MEM)
+ abort ();
+ op = XEXP (op, 0);
+ return (REG_P (op) || CONSTANT_P (op));
+}
+
+int
+call_operand_address (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (REG_P (op) || CONSTANT_P (op));
+}
+
+/* Returns 1 if OP is either a symbol reference or a sum of a symbol
+ reference and a constant. */
+
+int
+symbolic_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+
+ case CONST:
+ op = XEXP (op, 0);
+ return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+
+ /* This clause seems to be irrelevant. */
+ case CONST_DOUBLE:
+ return GET_MODE (op) == mode;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return truth value of statement that OP is a symbolic memory
+ operand of mode MODE. */
+
+int
+symbolic_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
+ || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
+}
+
+/* Return 1 if the operand is either a register or a memory operand that is
+ not symbolic. */
+
+int
+reg_or_nonsymb_mem_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode))
+ return 1;
+
+ return 0;
+}
+
+int
+sparc_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+ if (GET_CODE (op) == CONST_INT)
+ return SMALL_INT (op);
+ if (GET_MODE (op) != mode)
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == LO_SUM)
+ return (GET_CODE (XEXP (op, 0)) == REG
+ && symbolic_operand (XEXP (op, 1), Pmode));
+ return memory_address_p (mode, op);
+}
+
+int
+move_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (mode == DImode && arith_double_operand (op, mode))
+ return 1;
+ if (register_operand (op, mode))
+ return 1;
+ if (GET_CODE (op) == CONST_INT)
+ return (SMALL_INT (op) || (INTVAL (op) & 0x3ff) == 0);
+
+ if (GET_MODE (op) != mode)
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == LO_SUM)
+ return (register_operand (XEXP (op, 0), Pmode)
+ && CONSTANT_P (XEXP (op, 1)));
+ return memory_address_p (mode, op);
+}
+
+int
+move_pic_label (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* Special case for PIC. */
+ if (flag_pic && GET_CODE (op) == LABEL_REF)
+ return 1;
+ return 0;
+}
+\f
+/* The rtx for the global offset table which is a special form
+ that *is* a position independent symbolic constant. */
+rtx pic_pc_rtx;
+
+/* Ensure that we are not using patterns that are not OK with PIC. */
+
+int
+check_pic (i)
+ int i;
+{
+ switch (flag_pic)
+ {
+ case 1:
+ if (GET_CODE (recog_operand[i]) == SYMBOL_REF
+ || (GET_CODE (recog_operand[i]) == CONST
+ && ! rtx_equal_p (pic_pc_rtx, recog_operand[i])))
+ abort ();
+ case 2:
+ default:
+ return 1;
+ }
+}
+
+/* Return true if X is an address which needs a temporary register when
+ reloaded while generating PIC code. */
+
+int
+pic_address_needs_scratch (x)
+ rtx x;
+{
+ /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
+ if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
+ return 1;
+
+ return 0;
+}
+
+int
+memop (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == MEM)
+ return (mode == VOIDmode || mode == GET_MODE (op));
+ return 0;
+}
+
+/* Return truth value of whether OP is EQ or NE. */
+
+int
+eq_or_neq (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
+}
+
+/* Return 1 if this is a comparison operator, but not an EQ, NE, GEU,
+ or LTU for non-floating-point. We handle those specially. */
+
+int
+normal_comp_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CCFPmode)
+ return 1;
+
+ return (code != NE && code != EQ && code != GEU && code != LTU);
+}
+
+/* Return 1 if this is a comparison operator. This allows the use of
+ MATCH_OPERATOR to recognize all the branch insns. */
+
+int
+noov_compare_op (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode)
+ /* These are the only branches which work with CC_NOOVmode. */
+ return (code == EQ || code == NE || code == GE || code == LT);
+ return 1;
+}
+
+/* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */
+
+int
+extend_op (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND;
+}
+
+/* Return nonzero if OP is an operator of mode MODE which can set
+ the condition codes explicitly. We do not include PLUS and MINUS
+ because these require CC_NOOVmode, which we handle explicitly. */
+
+int
+cc_arithop (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == AND
+ || GET_CODE (op) == IOR
+ || GET_CODE (op) == XOR)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if OP is an operator of mode MODE which can bitwise
+ complement its second operand and set the condition codes explicitly. */
+
+int
+cc_arithopn (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* XOR is not here because combine canonicalizes (xor (not ...) ...)
+ and (xor ... (not ...)) to (not (xor ...)). */
+ return (GET_CODE (op) == AND
+ || GET_CODE (op) == IOR);
+}
+\f
+/* Return truth value of whether OP can be used as an operands in a three
+ address arithmetic insn (such as add %o1,7,%l2) of mode MODE. */
+
+int
+arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && SMALL_INT (op)));
+}
+
+/* Return truth value of whether OP can be used as an operand in a two
+ address arithmetic insn (such as set 123456,%o4) of mode MODE. */
+
+int
+arith32_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode) || GET_CODE (op) == CONST_INT);
+}
+
+/* Return truth value of whether OP is a register or a CONST_DOUBLE. */
+
+int
+arith_double_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || (GET_CODE (op) == CONST_DOUBLE
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
+ && ((CONST_DOUBLE_HIGH (op) == -1
+ && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000)
+ || (CONST_DOUBLE_HIGH (op) == 0
+ && (CONST_DOUBLE_LOW (op) & 0x1000) == 0)))
+ || (GET_CODE (op) == CONST_INT
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned) (INTVAL (op) + 0x1000) < 0x2000));
+}
+
+/* Return truth value of whether OP is a integer which fits the
+ range constraining immediate operands in three-address insns. */
+
+int
+small_int (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
+}
+
+/* Return truth value of statement that OP is a call-clobbered register. */
+int
+clobbered_register (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]);
+}
+\f
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. */
+
+rtx
+gen_compare_reg (code, x, y)
+ enum rtx_code code;
+ rtx x, y;
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x);
+ rtx cc_reg = gen_rtx (REG, mode, 0);
+
+ emit_insn (gen_rtx (SET, VOIDmode, cc_reg,
+ gen_rtx (COMPARE, mode, x, y)));
+
+ return cc_reg;
+}
+\f
+/* Return nonzero if a return peephole merging return with
+ setting of output register is ok. */
+int
+leaf_return_peephole_ok ()
+{
+ return (actual_fsize == 0);
+}
+
+/* Return nonzero if TRIAL can go into the function epilogue's
+ delay slot. SLOT is the slot we are trying to fill. */
+
+int
+eligible_for_epilogue_delay (trial, slot)
+ rtx trial;
+ int slot;
+{
+ static char *this_function_name;
+ rtx pat, src;
+
+ if (slot >= 1)
+ return 0;
+ if (GET_CODE (trial) != INSN
+ || GET_CODE (PATTERN (trial)) != SET)
+ return 0;
+ if (get_attr_length (trial) != 1)
+ return 0;
+
+ /* In the case of a true leaf function, anything can
+ go into the delay slot. */
+ if (leaf_function)
+ {
+ if (leaf_return_peephole_ok ())
+ return (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_TRUE);
+ return 0;
+ }
+
+ /* Otherwise, only operations which can be done in tandem with
+ a `restore' insn can go into the delay slot. */
+ pat = PATTERN (trial);
+ if (GET_CODE (SET_DEST (pat)) != REG
+ || REGNO (SET_DEST (pat)) == 0
+ || (leaf_function
+ && REGNO (SET_DEST (pat)) < 32
+ && REGNO (SET_DEST (pat)) >= 16)
+ || (! leaf_function
+ && (REGNO (SET_DEST (pat)) >= 32
+ || REGNO (SET_DEST (pat)) < 24)))
+ return 0;
+ src = SET_SRC (pat);
+ if (arith_operand (src, GET_MODE (src)))
+ return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
+ if (arith_double_operand (src, GET_MODE (src)))
+ return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
+ if (GET_CODE (src) == PLUS)
+ {
+ if (register_operand (XEXP (src, 0), SImode)
+ && arith_operand (XEXP (src, 1), SImode))
+ return 1;
+ if (register_operand (XEXP (src, 1), SImode)
+ && arith_operand (XEXP (src, 0), SImode))
+ return 1;
+ if (register_operand (XEXP (src, 0), DImode)
+ && arith_double_operand (XEXP (src, 1), DImode))
+ return 1;
+ if (register_operand (XEXP (src, 1), DImode)
+ && arith_double_operand (XEXP (src, 0), DImode))
+ return 1;
+ }
+ if (GET_CODE (src) == MINUS
+ && register_operand (XEXP (src, 0), SImode)
+ && small_int (XEXP (src, 1), VOIDmode))
+ return 1;
+ if (GET_CODE (src) == MINUS
+ && register_operand (XEXP (src, 0), DImode)
+ && !register_operand (XEXP (src, 1), DImode)
+ && arith_double_operand (XEXP (src, 1), DImode))
+ return 1;
+ return 0;
+}
+
+int
+short_branch (uid1, uid2)
+ int uid1, uid2;
+{
+ unsigned int delta = insn_addresses[uid1] - insn_addresses[uid2];
+ if (delta + 1024 < 2048)
+ return 1;
+ /* warning ("long branch, distance %d", delta); */
+ return 0;
+}
+
+/* Return non-zero if REG is not used after INSN.
+ We assume REG is a reload reg, and therefore does
+ not live past labels or calls or jumps. */
+int
+reg_unused_after (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ enum rtx_code code, prev_code = UNKNOWN;
+
+ while (insn = NEXT_INSN (insn))
+ {
+ if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
+ return 1;
+
+ code = GET_CODE (insn);
+ if (GET_CODE (insn) == CODE_LABEL)
+ return 1;
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx set = single_set (insn);
+ int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
+ if (set && in_src)
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return 1;
+ if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ return 0;
+ }
+ prev_code = code;
+ }
+ return 1;
+}
+\f
+/* Legitimize PIC addresses. If the address is already position-independent,
+ we return ORIG. Newly generated position-independent addresses go into a
+ reg. This is REG if non zero, otherwise we allocate register(s) as
+ necessary. If this is called during reload, and we need a second temp
+ register, then we use SCRATCH, which is provided via the
+ SECONDARY_INPUT_RELOAD_CLASS mechanism. */
+
+rtx
+legitimize_pic_address (orig, mode, reg, scratch)
+ rtx orig;
+ enum machine_mode mode;
+ rtx reg, scratch;
+{
+ if (GET_CODE (orig) == SYMBOL_REF)
+ {
+ rtx pic_ref, address;
+ rtx insn;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (flag_pic == 2)
+ {
+ /* If not during reload, allocate another temp reg here for loading
+ in the address, so that these instructions can be optimized
+ properly. */
+ rtx temp_reg = (reload_in_progress ? reg : gen_reg_rtx (Pmode));
+
+ emit_insn (gen_rtx (SET, VOIDmode, temp_reg,
+ gen_rtx (HIGH, Pmode, orig)));
+ emit_insn (gen_rtx (SET, VOIDmode, temp_reg,
+ gen_rtx (LO_SUM, Pmode, temp_reg, orig)));
+ address = temp_reg;
+ }
+ else
+ address = orig;
+
+ pic_ref = gen_rtx (MEM, Pmode,
+ gen_rtx (PLUS, Pmode,
+ pic_offset_table_rtx, address));
+ current_function_uses_pic_offset_table = 1;
+ RTX_UNCHANGING_P (pic_ref) = 1;
+ insn = emit_move_insn (reg, pic_ref);
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode,
+ reg, 0);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg, 0);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ if (SMALL_INT (offset))
+ return plus_constant_for_output (base, INTVAL (offset));
+ else if (! reload_in_progress)
+ offset = force_reg (Pmode, offset);
+ /* We can't create any new registers during reload, so use the
+ SCRATCH reg provided by the reload_insi pattern. */
+ else if (scratch)
+ {
+ emit_move_insn (scratch, offset);
+ offset = scratch;
+ }
+ else
+ /* If we reach here, then the SECONDARY_INPUT_RELOAD_CLASS
+ macro needs to be adjusted so that a scratch reg is provided
+ for this address. */
+ abort ();
+ }
+ return gen_rtx (PLUS, Pmode, base, offset);
+ }
+ else if (GET_CODE (orig) == LABEL_REF)
+ current_function_uses_pic_offset_table = 1;
+
+ return orig;
+}
+
+/* Set up PIC-specific rtl. This should not cause any insns
+ to be emitted. */
+
+void
+initialize_pic ()
+{
+}
+
+/* Emit special PIC prologues and epilogues. */
+
+void
+finalize_pic ()
+{
+ /* The table we use to reference PIC data. */
+ rtx global_offset_table;
+ /* Labels to get the PC in the prologue of this function. */
+ rtx l1, l2;
+ rtx seq;
+ int orig_flag_pic = flag_pic;
+
+ if (current_function_uses_pic_offset_table == 0)
+ return;
+
+ if (! flag_pic)
+ abort ();
+
+ flag_pic = 0;
+ l1 = gen_label_rtx ();
+ l2 = gen_label_rtx ();
+
+ start_sequence ();
+
+ emit_label (l1);
+ /* Note that we pun calls and jumps here! */
+ emit_jump_insn (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode, pc_rtx, gen_rtx (LABEL_REF, VOIDmode, l2)),
+ gen_rtx (SET, VOIDmode, gen_rtx (REG, SImode, 15), gen_rtx (LABEL_REF, VOIDmode, l2)))));
+ emit_label (l2);
+
+ /* Initialize every time through, since we can't easily
+ know this to be permanent. */
+ global_offset_table = gen_rtx (SYMBOL_REF, Pmode, "*__GLOBAL_OFFSET_TABLE_");
+ pic_pc_rtx = gen_rtx (CONST, Pmode,
+ gen_rtx (MINUS, Pmode,
+ global_offset_table,
+ gen_rtx (CONST, Pmode,
+ gen_rtx (MINUS, Pmode,
+ gen_rtx (LABEL_REF, VOIDmode, l1),
+ pc_rtx))));
+
+ emit_insn (gen_rtx (SET, VOIDmode, pic_offset_table_rtx,
+ gen_rtx (HIGH, Pmode, pic_pc_rtx)));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ pic_offset_table_rtx,
+ gen_rtx (LO_SUM, Pmode,
+ pic_offset_table_rtx, pic_pc_rtx)));
+ emit_insn (gen_rtx (SET, VOIDmode,
+ pic_offset_table_rtx,
+ gen_rtx (PLUS, Pmode,
+ pic_offset_table_rtx, gen_rtx (REG, Pmode, 15))));
+ /* emit_insn (gen_rtx (ASM_INPUT, VOIDmode, "!#PROLOGUE# 1")); */
+ LABEL_PRESERVE_P (l1) = 1;
+ LABEL_PRESERVE_P (l2) = 1;
+ flag_pic = orig_flag_pic;
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, get_insns ());
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up. */
+ emit_insn (gen_rtx (USE, VOIDmode, pic_offset_table_rtx));
+}
+\f
+/* For the SPARC, REG and REG+CONST is cost 0, REG+REG is cost 1,
+ and addresses involving symbolic constants are cost 2.
+
+ We make REG+REG slightly more expensive because it might keep
+ a register live for longer than we might like.
+
+ PIC addresses are very expensive.
+
+ It is no coincidence that this has the same structure
+ as GO_IF_LEGITIMATE_ADDRESS. */
+int
+sparc_address_cost (X)
+ rtx X;
+{
+#if 0
+ /* Handled before calling here. */
+ if (GET_CODE (X) == REG)
+ { return 1; }
+#endif
+ if (GET_CODE (X) == PLUS)
+ {
+ if (GET_CODE (XEXP (X, 0)) == REG
+ && GET_CODE (XEXP (X, 1)) == REG)
+ return 2;
+ return 1;
+ }
+ else if (GET_CODE (X) == LO_SUM)
+ return 1;
+ else if (GET_CODE (X) == HIGH)
+ return 2;
+ return 4;
+}
+\f
+/* Emit insns to move operands[1] into operands[0].
+
+ Return 1 if we have written out everything that needs to be done to
+ do the move. Otherwise, return 0 and the caller will emit the move
+ normally.
+
+ SCRATCH_REG if non zero can be used as a scratch register for the move
+ operation. It is provided by a SECONDARY_RELOAD_* macro if needed. */
+
+int
+emit_move_sequence (operands, mode, scratch_reg)
+ rtx *operands;
+ enum machine_mode mode;
+ rtx scratch_reg;
+{
+ register rtx operand0 = operands[0];
+ register rtx operand1 = operands[1];
+
+ /* Handle most common case first: storing into a register. */
+ if (register_operand (operand0, mode))
+ {
+ if (register_operand (operand1, mode)
+ || (GET_CODE (operand1) == CONST_INT && SMALL_INT (operand1))
+ || (GET_CODE (operand1) == CONST_DOUBLE
+ && arith_double_operand (operand1, DImode))
+ || (GET_CODE (operand1) == HIGH && GET_MODE (operand1) != DImode)
+ /* Only `general_operands' can come here, so MEM is ok. */
+ || GET_CODE (operand1) == MEM)
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx (SET, VOIDmode, operand0, operand1));
+ return 1;
+ }
+ }
+ else if (GET_CODE (operand0) == MEM)
+ {
+ if (register_operand (operand1, mode) || operand1 == const0_rtx)
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx (SET, VOIDmode, operand0, operand1));
+ return 1;
+ }
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operand0);
+ operands[1] = operand1 = force_reg (mode, operand1);
+ }
+ }
+
+ /* Simplify the source if we need to. Must handle DImode HIGH operators
+ here because such a move needs a clobber added. */
+ if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
+ || (GET_CODE (operand1) == HIGH && GET_MODE (operand1) == DImode))
+ {
+ if (flag_pic && symbolic_operand (operand1, mode))
+ {
+ rtx temp_reg = reload_in_progress ? operand0 : 0;
+
+ operands[1] = legitimize_pic_address (operand1, mode, temp_reg,
+ scratch_reg);
+ }
+ else if (GET_CODE (operand1) == CONST_INT
+ ? (! SMALL_INT (operand1)
+ && (INTVAL (operand1) & 0x3ff) != 0)
+ : (GET_CODE (operand1) == CONST_DOUBLE
+ ? ! arith_double_operand (operand1, DImode)
+ : 1))
+ {
+ /* For DImode values, temp must be operand0 because of the way
+ HI and LO_SUM work. The LO_SUM operator only copies half of
+ the LSW from the dest of the HI operator. If the LO_SUM dest is
+ not the same as the HI dest, then the MSW of the LO_SUM dest will
+ never be set.
+
+ ??? The real problem here is that the ...(HI:DImode pattern emits
+ multiple instructions, and the ...(LO_SUM:DImode pattern emits
+ one instruction. This fails, because the compiler assumes that
+ LO_SUM copies all bits of the first operand to its dest. Better
+ would be to have the HI pattern emit one instruction and the
+ LO_SUM pattern multiple instructions. Even better would be
+ to use four rtl insns. */
+ rtx temp = ((reload_in_progress || mode == DImode)
+ ? operand0 : gen_reg_rtx (mode));
+
+ emit_insn (gen_rtx (SET, VOIDmode, temp,
+ gen_rtx (HIGH, mode, operand1)));
+ operands[1] = gen_rtx (LO_SUM, mode, temp, operand1);
+ }
+ }
+
+ if (GET_CODE (operand1) == LABEL_REF && flag_pic)
+ {
+ /* The procedure for doing this involves using a call instruction to
+ get the pc into o7. We need to indicate this explicitly because
+ the tablejump pattern assumes that it can use this value also. */
+ emit_insn (gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ gen_rtx (SET, VOIDmode, operand0,
+ operand1),
+ gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, mode, 15),
+ pc_rtx))));
+ return 1;
+ }
+
+ /* Now have insn-emit do whatever it normally does. */
+ return 0;
+}
+\f
+/* Return the best assembler insn template
+ for moving operands[1] into operands[0] as a fullword. */
+
+char *
+singlemove_string (operands)
+ rtx *operands;
+{
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (GET_CODE (operands[1]) != MEM)
+ return "st %r1,%0";
+ else
+ abort ();
+ }
+ if (GET_CODE (operands[1]) == MEM)
+ return "ld %1,%0";
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ int i = INTVAL (operands[1]);
+
+ /* If all low order 12 bits are clear, then we only need a single
+ sethi insn to load the constant. */
+ if (i & 0x00000FFF)
+ return "sethi %%hi(%a1),%0\n\tor %0,%%lo(%a1),%0";
+ else
+ return "sethi %%hi(%a1),%0";
+ }
+ /* ??? Wrong if target is DImode? */
+ return "mov %1,%0";
+}
+\f
+/* Output assembler code to perform a doubleword move insn
+ with operands OPERANDS. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ enum { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP } optype0, optype1;
+ rtx latehalf[2];
+ rtx addreg0 = 0, addreg1 = 0;
+
+ /* First classify both operands. */
+
+ if (REG_P (operands[0]))
+ optype0 = REGOP;
+ else if (offsettable_memref_p (operands[0]))
+ optype0 = OFFSOP;
+ else if (GET_CODE (operands[0]) == MEM)
+ optype0 = MEMOP;
+ else
+ optype0 = RNDOP;
+
+ if (REG_P (operands[1]))
+ optype1 = REGOP;
+ else if (CONSTANT_P (operands[1]))
+ optype1 = CNSTOP;
+ else if (offsettable_memref_p (operands[1]))
+ optype1 = OFFSOP;
+ else if (GET_CODE (operands[1]) == MEM)
+ optype1 = MEMOP;
+ else
+ optype1 = RNDOP;
+
+ /* Check for the cases that the operand constraints are not
+ supposed to allow to happen. Abort if we get one,
+ because generating code for these cases is painful. */
+
+ if (optype0 == RNDOP || optype1 == RNDOP)
+ abort ();
+
+ /* If an operand is an unoffsettable memory ref, find a register
+ we can increment temporarily to make it refer to the second word. */
+
+ if (optype0 == MEMOP)
+ addreg0 = find_addr_reg (XEXP (operands[0], 0));
+
+ if (optype1 == MEMOP)
+ addreg1 = find_addr_reg (XEXP (operands[1], 0));
+
+ /* Ok, we can do one word at a time.
+ Normally we do the low-numbered word first,
+ but if either operand is autodecrementing then we
+ do the high-numbered word first.
+
+ In either case, set up in LATEHALF the operands to use for the
+ high-numbered (least significant) word and in some cases alter the
+ operands in OPERANDS to be suitable for the low-numbered word. */
+
+ if (optype0 == REGOP)
+ latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ else if (optype0 == OFFSOP)
+ latehalf[0] = adj_offsettable_operand (operands[0], 4);
+ else
+ latehalf[0] = operands[0];
+
+ if (optype1 == REGOP)
+ latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
+ else if (optype1 == OFFSOP)
+ latehalf[1] = adj_offsettable_operand (operands[1], 4);
+ else if (optype1 == CNSTOP)
+ split_double (operands[1], &operands[1], &latehalf[1]);
+ else
+ latehalf[1] = operands[1];
+
+ /* If the first move would clobber the source of the second one,
+ do them in the other order.
+
+ RMS says "This happens only for registers;
+ such overlap can't happen in memory unless the user explicitly
+ sets it up, and that is an undefined circumstance."
+
+ but it happens on the sparc when loading parameter registers,
+ so I am going to define that circumstance, and make it work
+ as expected. */
+
+ /* Easy case: try moving both words at once. */
+ /* First check for moving between an even/odd register pair
+ and a memory location. */
+ if ((optype0 == REGOP && optype1 != REGOP && optype1 != CNSTOP
+ && (REGNO (operands[0]) & 1) == 0)
+ || (optype0 != REGOP && optype0 != CNSTOP && optype1 == REGOP
+ && (REGNO (operands[1]) & 1) == 0))
+ {
+ rtx op1, op2;
+ rtx base = 0, offset = const0_rtx;
+
+ /* OP1 gets the register pair, and OP2 gets the memory address. */
+ if (optype0 == REGOP)
+ op1 = operands[0], op2 = operands[1];
+ else
+ op1 = operands[1], op2 = operands[0];
+
+ /* Now see if we can trust the address to be 8-byte aligned. */
+ /* Trust global variables. */
+
+ if (GET_CODE (op2) == LO_SUM)
+ {
+ operands[0] = op1;
+ operands[1] = op2;
+
+ if (final_sequence)
+ abort ();
+ return "ldd %1,%0";
+ }
+
+ if (GET_CODE (XEXP (op2, 0)) == PLUS)
+ {
+ rtx temp = XEXP (op2, 0);
+ if (GET_CODE (XEXP (temp, 0)) == REG)
+ base = XEXP (temp, 0), offset = XEXP (temp, 1);
+ else if (GET_CODE (XEXP (temp, 1)) == REG)
+ base = XEXP (temp, 1), offset = XEXP (temp, 0);
+ }
+
+ /* Trust round enough offsets from the stack or frame pointer. */
+ if (base
+ && (REGNO (base) == FRAME_POINTER_REGNUM
+ || REGNO (base) == STACK_POINTER_REGNUM))
+ {
+ if (GET_CODE (offset) == CONST_INT
+ && (INTVAL (offset) & 0x7) == 0)
+ {
+ if (op1 == operands[0])
+ return "ldd %1,%0";
+ else
+ return "std %1,%0";
+ }
+ }
+ /* We know structs not on the stack are properly aligned. Since a
+ double asks for 8-byte alignment, we know it must have got that
+ if it is in a struct. But a DImode need not be 8-byte aligned,
+ because it could be a struct containing two ints or pointers. */
+ else if (GET_CODE (operands[1]) == MEM
+ && GET_MODE (operands[1]) == DFmode
+ && (CONSTANT_P (XEXP (operands[1], 0))
+ /* Let user ask for it anyway. */
+ || TARGET_ALIGN))
+ return "ldd %1,%0";
+ else if (GET_CODE (operands[0]) == MEM
+ && GET_MODE (operands[0]) == DFmode
+ && (CONSTANT_P (XEXP (operands[0], 0))
+ || TARGET_ALIGN))
+ return "std %1,%0";
+ }
+
+ if (optype0 == REGOP && optype1 == REGOP
+ && REGNO (operands[0]) == REGNO (latehalf[1]))
+ {
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x4,%0", &addreg1);
+
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ output_asm_insn ("add %0,-0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,-0x4,%0", &addreg1);
+
+ /* Do low-numbered word. */
+ return singlemove_string (operands);
+ }
+ else if (optype0 == REGOP && optype1 != REGOP
+ && reg_overlap_mentioned_p (operands[0], operands[1]))
+ {
+ /* Do the late half first. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+ /* Then clobber. */
+ return singlemove_string (operands);
+ }
+
+ /* Normal case: do the two words, low-numbered first. */
+
+ output_asm_insn (singlemove_string (operands), operands);
+
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x4,%0", &addreg1);
+
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ output_asm_insn ("add %0,-0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,-0x4,%0", &addreg1);
+
+ return "";
+}
+\f
+char *
+output_fp_move_double (operands)
+ rtx *operands;
+{
+ rtx addr;
+
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ return "fmovs %1,%0\n\tfmovs %R1,%R0";
+ if (GET_CODE (operands[1]) == REG)
+ {
+ if ((REGNO (operands[1]) & 1) == 0)
+ return "std %1,[%@-8]\n\tldd [%@-8],%0";
+ else
+ return "st %R1,[%@-4]\n\tst %1,[%@-8]\n\tldd [%@-8],%0";
+ }
+ addr = XEXP (operands[1], 0);
+
+ /* Use ldd if known to be aligned. */
+ if (TARGET_ALIGN
+ || (GET_CODE (addr) == PLUS
+ && (((XEXP (addr, 0) == frame_pointer_rtx
+ || XEXP (addr, 0) == stack_pointer_rtx)
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (INTVAL (XEXP (addr, 1)) & 0x7) == 0)
+ /* Arrays are known to be aligned,
+ and reg+reg addresses are used (on this machine)
+ only for array accesses. */
+ || (REG_P (XEXP (addr, 0)) && REG_P (XEXP (addr, 1)))))
+ || (GET_MODE (operands[0]) == DFmode
+ && (GET_CODE (addr) == LO_SUM || CONSTANT_P (addr))))
+ return "ldd %1,%0";
+
+ /* Otherwise use two ld insns. */
+ operands[2]
+ = gen_rtx (MEM, GET_MODE (operands[1]),
+ plus_constant_for_output (addr, 4));
+ return "ld %1,%0\n\tld %2,%R0";
+ }
+ else if (FP_REG_P (operands[1]))
+ {
+ if (GET_CODE (operands[0]) == REG)
+ {
+ if ((REGNO (operands[0]) & 1) == 0)
+ return "std %1,[%@-8]\n\tldd [%@-8],%0";
+ else
+ return "std %1,[%@-8]\n\tld [%@-4],%R0\n\tld [%@-8],%0";
+ }
+ addr = XEXP (operands[0], 0);
+
+ /* Use std if we can be sure it is well-aligned. */
+ if (TARGET_ALIGN
+ || (GET_CODE (addr) == PLUS
+ && (((XEXP (addr, 0) == frame_pointer_rtx
+ || XEXP (addr, 0) == stack_pointer_rtx)
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT
+ && (INTVAL (XEXP (addr, 1)) & 0x7) == 0)
+ /* Arrays are known to be aligned,
+ and reg+reg addresses are used (on this machine)
+ only for array accesses. */
+ || (REG_P (XEXP (addr, 0)) && REG_P (XEXP (addr, 1)))))
+ || (GET_MODE (operands[1]) == DFmode
+ && (GET_CODE (addr) == LO_SUM || CONSTANT_P (addr))))
+ return "std %1,%0";
+
+ /* Otherwise use two st insns. */
+ operands[2]
+ = gen_rtx (MEM, GET_MODE (operands[0]),
+ plus_constant_for_output (addr, 4));
+ return "st %r1,%0\n\tst %R1,%2";
+ }
+ else abort ();
+}
+\f
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG. */
+
+static rtx
+find_addr_reg (addr)
+ rtx addr;
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ /* We absolutely can not fudge the frame pointer here, because the
+ frame pointer must always be 8 byte aligned. It also confuses
+ debuggers. */
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) != FRAME_POINTER_REGNUM)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG
+ && REGNO (XEXP (addr, 1)) != FRAME_POINTER_REGNUM)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ abort ();
+ }
+ if (GET_CODE (addr) == REG)
+ return addr;
+ abort ();
+}
+
+void
+output_sized_memop (opname, mode, signedp)
+ char *opname;
+ enum machine_mode mode;
+ int signedp;
+{
+ static char *ld_size_suffix_u[] = { "ub", "uh", "", "?", "d" };
+ static char *ld_size_suffix_s[] = { "sb", "sh", "", "?", "d" };
+ static char *st_size_suffix[] = { "b", "h", "", "?", "d" };
+ char **opnametab, *modename;
+
+ if (opname[0] == 'l')
+ if (signedp)
+ opnametab = ld_size_suffix_s;
+ else
+ opnametab = ld_size_suffix_u;
+ else
+ opnametab = st_size_suffix;
+ modename = opnametab[GET_MODE_SIZE (mode) >> 1];
+
+ fprintf (asm_out_file, "\t%s%s", opname, modename);
+}
+\f
+void
+output_move_with_extension (operands)
+ rtx *operands;
+{
+ if (GET_MODE (operands[2]) == HImode)
+ output_asm_insn ("sll %2,0x10,%0", operands);
+ else if (GET_MODE (operands[2]) == QImode)
+ output_asm_insn ("sll %2,0x18,%0", operands);
+ else
+ abort ();
+}
+\f
+/* Load the address specified by OPERANDS[3] into the register
+ specified by OPERANDS[0].
+
+ OPERANDS[3] may be the result of a sum, hence it could either be:
+
+ (1) CONST
+ (2) REG
+ (2) REG + CONST_INT
+ (3) REG + REG + CONST_INT
+ (4) REG + REG (special case of 3).
+
+ Note that (3) is not a legitimate address.
+ All cases are handled here. */
+
+void
+output_load_address (operands)
+ rtx *operands;
+{
+ rtx base, offset;
+
+ if (CONSTANT_P (operands[3]))
+ {
+ output_asm_insn ("set %3,%0", operands);
+ return;
+ }
+
+ if (REG_P (operands[3]))
+ {
+ if (REGNO (operands[0]) != REGNO (operands[3]))
+ output_asm_insn ("mov %3,%0", operands);
+ return;
+ }
+
+ if (GET_CODE (operands[3]) != PLUS)
+ abort ();
+
+ base = XEXP (operands[3], 0);
+ offset = XEXP (operands[3], 1);
+
+ if (GET_CODE (base) == CONST_INT)
+ {
+ rtx tmp = base;
+ base = offset;
+ offset = tmp;
+ }
+
+ if (GET_CODE (offset) != CONST_INT)
+ {
+ /* Operand is (PLUS (REG) (REG)). */
+ base = operands[3];
+ offset = const0_rtx;
+ }
+
+ if (REG_P (base))
+ {
+ operands[6] = base;
+ operands[7] = offset;
+ if (SMALL_INT (offset))
+ output_asm_insn ("add %6,%7,%0", operands);
+ else
+ output_asm_insn ("set %7,%0\n\tadd %0,%6,%0", operands);
+ }
+ else if (GET_CODE (base) == PLUS)
+ {
+ operands[6] = XEXP (base, 0);
+ operands[7] = XEXP (base, 1);
+ operands[8] = offset;
+
+ if (SMALL_INT (offset))
+ output_asm_insn ("add %6,%7,%0\n\tadd %0,%8,%0", operands);
+ else
+ output_asm_insn ("set %8,%0\n\tadd %0,%6,%0\n\tadd %0,%7,%0", operands);
+ }
+ else
+ abort ();
+}
+
+/* Output code to place a size count SIZE in register REG.
+ ALIGN is the size of the unit of transfer.
+
+ Because block moves are pipelined, we don't include the
+ first element in the transfer of SIZE to REG. */
+
+static void
+output_size_for_block_move (size, reg, align)
+ rtx size, reg;
+ rtx align;
+{
+ rtx xoperands[3];
+
+ xoperands[0] = reg;
+ xoperands[1] = size;
+ xoperands[2] = align;
+ if (GET_CODE (size) == REG)
+ output_asm_insn ("sub %1,%2,%0", xoperands);
+ else
+ {
+ xoperands[1]
+ = gen_rtx (CONST_INT, VOIDmode, INTVAL (size) - INTVAL (align));
+ output_asm_insn ("set %1,%0", xoperands);
+ }
+}
+
+/* Emit code to perform a block move.
+
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source.
+ OPERANDS[2] is the size.
+ OPERANDS[3] is the alignment safe to use.
+ OPERANDS[4] is a register we can safely clobber as a temp. */
+
+char *
+output_block_move (operands)
+ rtx *operands;
+{
+ /* A vector for our computed operands. Note that load_output_address
+ makes use of (and can clobber) up to the 8th element of this vector. */
+ rtx xoperands[10];
+ rtx zoperands[10];
+ static int movstrsi_label = 0;
+ int i;
+ rtx temp1 = operands[4];
+ rtx sizertx = operands[2];
+ rtx alignrtx = operands[3];
+ int align = INTVAL (alignrtx);
+
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ xoperands[2] = temp1;
+
+ /* We can't move more than this many bytes at a time
+ because we have only one register to move them through. */
+ if (align > GET_MODE_SIZE (GET_MODE (temp1)))
+ {
+ align = GET_MODE_SIZE (GET_MODE (temp1));
+ alignrtx = gen_rtx (CONST_INT, VOIDmode, GET_MODE_SIZE (GET_MODE (temp1)));
+ }
+
+ /* If the size isn't known to be a multiple of the alignment,
+ we have to do it in smaller pieces. If we could determine that
+ the size was a multiple of 2 (or whatever), we could be smarter
+ about this. */
+ if (GET_CODE (sizertx) != CONST_INT)
+ align = 1;
+ else
+ {
+ int size = INTVAL (sizertx);
+ while (size % align)
+ align >>= 1;
+ }
+
+ if (align != INTVAL (alignrtx))
+ alignrtx = gen_rtx (CONST_INT, VOIDmode, align);
+
+ /* Recognize special cases of block moves. These occur
+ when GNU C++ is forced to treat something as BLKmode
+ to keep it in memory, when its mode could be represented
+ with something smaller.
+
+ We cannot do this for global variables, since we don't know
+ what pages they don't cross. Sigh. */
+ if (GET_CODE (sizertx) == CONST_INT && INTVAL (sizertx) <= 16)
+ {
+ int size = INTVAL (sizertx);
+
+ if (align == 1)
+ {
+ if (memory_address_p (QImode,
+ plus_constant_for_output (xoperands[0], size))
+ && memory_address_p (QImode,
+ plus_constant_for_output (xoperands[1],
+ size)))
+ {
+ /* We will store different integers into this particular RTX. */
+ xoperands[2] = rtx_alloc (CONST_INT);
+ PUT_MODE (xoperands[2], VOIDmode);
+ for (i = size-1; i >= 0; i--)
+ {
+ INTVAL (xoperands[2]) = i;
+ output_asm_insn ("ldub [%a1+%2],%%g1\n\tstb %%g1,[%a0+%2]",
+ xoperands);
+ }
+ return "";
+ }
+ }
+ else if (align == 2)
+ {
+ if (memory_address_p (HImode,
+ plus_constant_for_output (xoperands[0], size))
+ && memory_address_p (HImode,
+ plus_constant_for_output (xoperands[1],
+ size)))
+ {
+ /* We will store different integers into this particular RTX. */
+ xoperands[2] = rtx_alloc (CONST_INT);
+ PUT_MODE (xoperands[2], VOIDmode);
+ for (i = (size>>1)-1; i >= 0; i--)
+ {
+ INTVAL (xoperands[2]) = i<<1;
+ output_asm_insn ("lduh [%a1+%2],%%g1\n\tsth %%g1,[%a0+%2]",
+ xoperands);
+ }
+ return "";
+ }
+ }
+ else
+ {
+ if (memory_address_p (SImode,
+ plus_constant_for_output (xoperands[0], size))
+ && memory_address_p (SImode,
+ plus_constant_for_output (xoperands[1],
+ size)))
+ {
+ /* We will store different integers into this particular RTX. */
+ xoperands[2] = rtx_alloc (CONST_INT);
+ PUT_MODE (xoperands[2], VOIDmode);
+ for (i = (size>>2)-1; i >= 0; i--)
+ {
+ INTVAL (xoperands[2]) = i<<2;
+ output_asm_insn ("ld [%a1+%2],%%g1\n\tst %%g1,[%a0+%2]",
+ xoperands);
+ }
+ return "";
+ }
+ }
+ }
+
+ xoperands[3] = gen_rtx (CONST_INT, VOIDmode, movstrsi_label++);
+ xoperands[4] = gen_rtx (CONST_INT, VOIDmode, align);
+ xoperands[5] = gen_rtx (CONST_INT, VOIDmode, movstrsi_label++);
+
+ /* This is the size of the transfer.
+ Either use the register which already contains the size,
+ or use a free register (used by no operands).
+ Also emit code to decrement the size value by ALIGN. */
+ output_size_for_block_move (sizertx, temp1, alignrtx);
+
+ /* Must handle the case when the size is zero or negative, so the first thing
+ we do is compare the size against zero, and only copy bytes if it is
+ zero or greater. Note that we have already subtracted off the alignment
+ once, so we must copy 1 alignment worth of bytes if the size is zero
+ here.
+
+ The SUN assembler complains about labels in branch delay slots, so we
+ do this before outputing the load address, so that there will always
+ be a harmless insn between the branch here and the next label emitted
+ below. */
+
+#ifdef NO_UNDERSCORES
+ output_asm_insn ("cmp %2,0\n\tbl .Lm%5", xoperands);
+#else
+ output_asm_insn ("cmp %2,0\n\tbl Lm%5", xoperands);
+#endif
+
+ zoperands[0] = operands[0];
+ zoperands[3] = plus_constant_for_output (operands[0], align);
+ output_load_address (zoperands);
+
+ /* ??? This might be much faster if the loops below were preconditioned
+ and unrolled.
+
+ That is, at run time, copy enough bytes one at a time to ensure that the
+ target and source addresses are aligned to the the largest possible
+ alignment. Then use a preconditioned unrolled loop to copy say 16
+ bytes at a time. Then copy bytes one at a time until finish the rest. */
+
+ /* Output the first label separately, so that it is spaced properly. */
+
+#ifdef NO_UNDERSCORES
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, ".Lm", INTVAL (xoperands[3]));
+#else
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "Lm", INTVAL (xoperands[3]));
+#endif
+
+#ifdef NO_UNDERSCORES
+ if (align == 1)
+ output_asm_insn ("ldub [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge .Lm%3\n\tstb %%g1,[%0+%2]\n.Lm%5:", xoperands);
+ else if (align == 2)
+ output_asm_insn ("lduh [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge .Lm%3\n\tsth %%g1,[%0+%2]\n.Lm%5:", xoperands);
+ else
+ output_asm_insn ("ld [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge .Lm%3\n\tst %%g1,[%0+%2]\n.Lm%5:", xoperands);
+ return "";
+#else
+ if (align == 1)
+ output_asm_insn ("ldub [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge Lm%3\n\tstb %%g1,[%0+%2]\nLm%5:", xoperands);
+ else if (align == 2)
+ output_asm_insn ("lduh [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge Lm%3\n\tsth %%g1,[%0+%2]\nLm%5:", xoperands);
+ else
+ output_asm_insn ("ld [%1+%2],%%g1\n\tsubcc %2,%4,%2\n\tbge Lm%3\n\tst %%g1,[%0+%2]\nLm%5:", xoperands);
+ return "";
+#endif
+}
+\f
+/* Output reasonable peephole for set-on-condition-code insns.
+ Note that these insns assume a particular way of defining
+ labels. Therefore, *both* sparc.h and this function must
+ be changed if a new syntax is needed. */
+
+char *
+output_scc_insn (operands, insn)
+ rtx operands[];
+ rtx insn;
+{
+ static char string[100];
+ rtx label = 0, next = insn;
+ int need_label = 0;
+
+ /* Try doing a jump optimization which jump.c can't do for us
+ because we did not expose that setcc works by using branches.
+
+ If this scc insn is followed by an unconditional branch, then have
+ the jump insn emitted here jump to that location, instead of to
+ the end of the scc sequence as usual. */
+
+ do
+ {
+ if (GET_CODE (next) == CODE_LABEL)
+ label = next;
+ next = NEXT_INSN (next);
+ if (next == 0)
+ break;
+ }
+ while (GET_CODE (next) == NOTE || GET_CODE (next) == CODE_LABEL);
+
+ /* If we are in a sequence, and the following insn is a sequence also,
+ then just following the current insn's next field will take us to the
+ first insn of the next sequence, which is the wrong place. We don't
+ want to optimize with a branch that has had its delay slot filled.
+ Avoid this by verifying that NEXT_INSN (PREV_INSN (next)) == next
+ which fails only if NEXT is such a branch. */
+
+ if (next && GET_CODE (next) == JUMP_INSN && simplejump_p (next)
+ && (! final_sequence || NEXT_INSN (PREV_INSN (next)) == next))
+ label = JUMP_LABEL (next);
+ /* If not optimizing, jump label fields are not set. To be safe, always
+ check here to whether label is still zero. */
+ if (label == 0)
+ {
+ label = gen_label_rtx ();
+ need_label = 1;
+ }
+
+ LABEL_NUSES (label) += 1;
+
+ operands[2] = label;
+
+ /* If we are in a delay slot, assume it is the delay slot of an fpcc
+ insn since our type isn't allowed anywhere else. */
+
+ /* ??? Fpcc instructions no longer have delay slots, so this code is
+ probably obsolete. */
+
+ /* The fastest way to emit code for this is an annulled branch followed
+ by two move insns. This will take two cycles if the branch is taken,
+ and three cycles if the branch is not taken.
+
+ However, if we are in the delay slot of another branch, this won't work,
+ because we can't put a branch in the delay slot of another branch.
+ The above sequence would effectively take 3 or 4 cycles respectively
+ since a no op would have be inserted between the two branches.
+ In this case, we want to emit a move, annulled branch, and then the
+ second move. This sequence always takes 3 cycles, and hence is faster
+ when we are in a branch delay slot. */
+
+ if (final_sequence)
+ {
+ strcpy (string, "mov 0,%0\n\t");
+ strcat (string, output_cbranch (operands[1], 2, 0, 1, 0));
+ strcat (string, "\n\tmov 1,%0");
+ }
+ else
+ {
+ strcpy (string, output_cbranch (operands[1], 2, 0, 1, 0));
+ strcat (string, "\n\tmov 1,%0\n\tmov 0,%0");
+ }
+
+ if (need_label)
+ strcat (string, "\n%l2:");
+
+ return string;
+}
+\f
+/* Vectors to keep interesting information about registers where
+ it can easily be got. */
+
+/* Modes for condition codes. */
+#define C_MODES \
+ ((1 << (int) CCmode) | (1 << (int) CC_NOOVmode) | (1 << (int) CCFPmode))
+
+/* Modes for single-word (and smaller) quantities. */
+#define S_MODES \
+ (~C_MODES \
+ & ~ ((1 << (int) DImode) | (1 << (int) TImode) \
+ | (1 << (int) DFmode) | (1 << (int) TFmode)))
+
+/* Modes for double-word (and smaller) quantities. */
+#define D_MODES \
+ (~C_MODES \
+ & ~ ((1 << (int) TImode) | (1 << (int) TFmode)))
+
+/* Modes for quad-word quantities. */
+#define T_MODES (~C_MODES)
+
+/* Modes for single-float quantities. */
+#define SF_MODES ((1 << (int) SFmode))
+
+/* Modes for double-float quantities. */
+#define DF_MODES (SF_MODES | (1 << (int) DFmode) | (1 << (int) SCmode))
+
+/* Modes for quad-float quantities. */
+#define TF_MODES (DF_MODES | (1 << (int) TFmode) | (1 << (int) DCmode))
+
+/* Value is 1 if register/mode pair is acceptable on sparc.
+ The funny mixture of D and T modes is because integer operations
+ do not specially operate on tetra quantities, so non-quad-aligned
+ registers can hold quadword quantities (except %o4 and %i4 because
+ they cross fixed registers. */
+
+int hard_regno_mode_ok[] = {
+ C_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
+
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES};
+\f
+#ifdef __GNUC__
+inline
+#endif
+static int
+save_regs (file, low, high, base, offset, n_fregs)
+ FILE *file;
+ int low, high;
+ char *base;
+ int offset;
+ int n_fregs;
+{
+ int i;
+
+ for (i = low; i < high; i += 2)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tstd %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_fregs),
+ n_fregs += 2;
+ else
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_fregs),
+ n_fregs += 2;
+ else if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[i+1], base, offset + 4 * n_fregs),
+ n_fregs += 2;
+ }
+ return n_fregs;
+}
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+restore_regs (file, low, high, base, offset, n_fregs)
+ FILE *file;
+ int low, high;
+ char *base;
+ int offset;
+{
+ int i;
+
+ for (i = low; i < high; i += 2)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tldd [%s+%d], %s\n",
+ base, offset + 4 * n_fregs, reg_names[i]),
+ n_fregs += 2;
+ else
+ fprintf (file, "\tld [%s+%d],%s\n",
+ base, offset + 4 * n_fregs, reg_names[i]),
+ n_fregs += 2;
+ else if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tld [%s+%d],%s\n",
+ base, offset + 4 * n_fregs, reg_names[i+1]),
+ n_fregs += 2;
+ }
+ return n_fregs;
+}
+
+/* Static variables we want to share between prologue and epilogue. */
+
+/* Number of live floating point registers needed to be saved. */
+static int num_fregs;
+
+/* Nonzero if any floating point register was ever used. */
+static int fregs_ever_live;
+
+int
+compute_frame_size (size, leaf_function)
+ int size;
+ int leaf_function;
+{
+ int fregs_ever_live = 0;
+ int n_fregs = 0, i;
+ int outgoing_args_size = (current_function_outgoing_args_size
+ + REG_PARM_STACK_SPACE (current_function_decl));
+
+ apparent_fsize = ((size) + 7 - STARTING_FRAME_OFFSET) & -8;
+ for (i = 32; i < FIRST_PSEUDO_REGISTER; i += 2)
+ fregs_ever_live |= regs_ever_live[i]|regs_ever_live[i+1];
+
+ if (TARGET_EPILOGUE && fregs_ever_live)
+ {
+ for (i = 32; i < FIRST_PSEUDO_REGISTER; i += 2)
+ if ((regs_ever_live[i] && ! call_used_regs[i])
+ || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
+ n_fregs += 2;
+ }
+
+ /* Set up values for use in `function_epilogue'. */
+ num_fregs = n_fregs;
+
+ apparent_fsize += (outgoing_args_size+7) & -8;
+ if (leaf_function && n_fregs == 0
+ && apparent_fsize == (REG_PARM_STACK_SPACE (current_function_decl)
+ - STARTING_FRAME_OFFSET))
+ apparent_fsize = 0;
+
+ actual_fsize = apparent_fsize + n_fregs*4;
+
+ /* Make sure nothing can clobber our register windows.
+ If a SAVE must be done, or there is a stack-local variable,
+ the register window area must be allocated. */
+ if (leaf_function == 0 || size > 0)
+ actual_fsize += (16 * UNITS_PER_WORD)+8;
+
+ return actual_fsize;
+}
+
+void
+output_function_prologue (file, size, leaf_function)
+ FILE *file;
+ int size;
+{
+ if (leaf_function)
+ frame_base_name = "%sp+80";
+ else
+ frame_base_name = "%fp";
+
+ actual_fsize = compute_frame_size (size, leaf_function);
+
+ fprintf (file, "\t!#PROLOGUE# 0\n");
+ if (actual_fsize == 0) /* do nothing. */ ;
+ else if (actual_fsize < 4096)
+ {
+ if (! leaf_function)
+ fprintf (file, "\tsave %%sp,-%d,%%sp\n", actual_fsize);
+ else
+ fprintf (file, "\tadd %%sp,-%d,%%sp\n", actual_fsize);
+ }
+ else if (! leaf_function)
+ {
+ /* Need to use actual_fsize, since we are also allocating space for
+ our callee (and our own register save area). */
+ fprintf (file, "\tsethi %%hi(%d),%%g1\n\tor %%g1,%%lo(%d),%%g1\n",
+ -actual_fsize, -actual_fsize);
+ fprintf (file, "\tsave %%sp,%%g1,%%sp\n");
+ }
+ else
+ {
+ /* Put pointer to parameters into %g4, and allocate
+ frame space using result computed into %g1. actual_fsize
+ used instead of apparent_fsize for reasons stated above. */
+ abort ();
+
+ fprintf (file, "\tsethi %%hi(%d),%%g1\n\tor %%g1,%%lo(%d),%%g1\n",
+ -actual_fsize, -actual_fsize);
+ fprintf (file, "\tadd %%sp,64,%%g4\n\tadd %%sp,%%g1,%%sp\n");
+ }
+
+ /* If doing anything with PIC, do it now. */
+ if (! flag_pic)
+ fprintf (file, "\t!#PROLOGUE# 1\n");
+
+ /* Figure out where to save any special registers. */
+ if (num_fregs)
+ {
+ int offset, n_fregs = num_fregs;
+
+ if (! leaf_function)
+ offset = -apparent_fsize;
+ else
+ offset = 0;
+
+ if (TARGET_EPILOGUE && ! leaf_function)
+ n_fregs = save_regs (file, 0, 16, frame_base_name, offset, 0);
+ else if (leaf_function)
+ n_fregs = save_regs (file, 0, 32, frame_base_name, offset, 0);
+ if (TARGET_EPILOGUE)
+ save_regs (file, 32, FIRST_PSEUDO_REGISTER,
+ frame_base_name, offset, n_fregs);
+ }
+
+ if (regs_ever_live[62])
+ fprintf (file, "\tst %s,[%s-16]\n\tst %s,[%s-12]\n",
+ reg_names[0], frame_base_name,
+ reg_names[0], frame_base_name);
+
+ leaf_label = 0;
+ if (leaf_function && actual_fsize != 0)
+ {
+ /* warning ("leaf procedure with frame size %d", actual_fsize); */
+ if (! TARGET_EPILOGUE)
+ leaf_label = gen_label_rtx ();
+ }
+}
+
+void
+output_function_epilogue (file, size, leaf_function, true_epilogue)
+ FILE *file;
+ int size;
+{
+ int n_fregs, i;
+ char *ret;
+
+ if (leaf_label)
+ {
+ if (leaf_function < 0)
+ abort ();
+ emit_label_after (leaf_label, get_last_insn ());
+ final_scan_insn (get_last_insn (), file, 0, 0, 1);
+ }
+
+ if (num_fregs)
+ {
+ int offset, n_fregs = num_fregs;
+
+ if (! leaf_function)
+ offset = -apparent_fsize;
+ else
+ offset = 0;
+
+ if (TARGET_EPILOGUE && ! leaf_function)
+ n_fregs = restore_regs (file, 0, 16, frame_base_name, offset, 0);
+ else if (leaf_function)
+ n_fregs = restore_regs (file, 0, 32, frame_base_name, offset, 0);
+ if (TARGET_EPILOGUE)
+ restore_regs (file, 32, FIRST_PSEUDO_REGISTER,
+ frame_base_name, offset, n_fregs);
+ }
+
+ /* Work out how to skip the caller's unimp instruction if required. */
+ if (leaf_function)
+ ret = (current_function_returns_struct ? "jmp %o7+12" : "retl");
+ else
+ ret = (current_function_returns_struct ? "jmp %i7+12" : "ret");
+
+ /* Tail calls have to do this work themselves. */
+ if (leaf_function >= 0)
+ {
+ if (TARGET_EPILOGUE || leaf_label)
+ {
+ int old_target_epilogue = TARGET_EPILOGUE;
+ target_flags &= ~old_target_epilogue;
+
+ if (! leaf_function)
+ {
+ /* If we wound up with things in our delay slot,
+ flush them here. */
+ if (current_function_epilogue_delay_list)
+ {
+ rtx insn = emit_jump_insn_after (gen_rtx (RETURN, VOIDmode),
+ get_last_insn ());
+ PATTERN (insn) = gen_rtx (PARALLEL, VOIDmode,
+ gen_rtvec (2,
+ PATTERN (XEXP (current_function_epilogue_delay_list, 0)),
+ PATTERN (insn)));
+ final_scan_insn (insn, file, 1, 0, 1);
+ }
+ else
+ fprintf (file, "\t%s\n\trestore\n", ret);
+ }
+ else if (actual_fsize < 4096)
+ {
+ if (current_function_epilogue_delay_list)
+ {
+ fprintf (file, "\t%s\n", ret);
+ final_scan_insn (XEXP (current_function_epilogue_delay_list, 0),
+ file, 1, 0, 1);
+ }
+ else
+ fprintf (file, "\t%s\n\tadd %%sp,%d,%%sp\n",
+ ret, actual_fsize);
+ }
+ else
+ {
+ if (current_function_epilogue_delay_list)
+ abort ();
+ fprintf (file, "\tsethi %%hi(%d),%%g1\n\tor %%g1,%%lo(%d),%%g1\n\t%s\n\tadd %%sp,%%g1,%%sp\n",
+ actual_fsize, actual_fsize, ret);
+ }
+ target_flags |= old_target_epilogue;
+ }
+ }
+ else if (true_epilogue)
+ {
+ /* We may still need a return insn! Somebody could jump around
+ the tail-calls that this function makes. */
+ if (TARGET_EPILOGUE)
+ {
+ rtx last = get_last_insn ();
+
+ last = prev_nonnote_insn (last);
+ if (last == 0
+ || (GET_CODE (last) != JUMP_INSN && GET_CODE (last) != BARRIER))
+ fprintf (file, "\t%s\n\tnop\n", ret);
+ }
+ }
+}
+\f
+/* Return the string to output a conditional branch to LABEL, which is
+ the operand number of the label. OP is the conditional expression. The
+ mode of register 0 says what kind of comparison we made.
+
+ REVERSED is non-zero if we should reverse the sense of the comparison.
+
+ ANNUL is non-zero if we should generate an annulling branch.
+
+ NOOP is non-zero if we have to follow this branch by a noop. */
+
+char *
+output_cbranch (op, label, reversed, annul, noop)
+ rtx op;
+ int label;
+ int reversed, annul, noop;
+{
+ static char string[20];
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode mode = GET_MODE (XEXP (op, 0));
+ static char labelno[] = " %lX";
+
+ /* ??? FP branches can not be preceeded by another floating point insn.
+ Because there is currently no concept of pre-delay slots, we can fix
+ this only by always emitting a nop before a floating point branch. */
+
+ if (mode == CCFPmode)
+ strcpy (string, "nop\n\t");
+
+ /* If not floating-point or if EQ or NE, we can just reverse the code. */
+ if (reversed && (mode != CCFPmode || code == EQ || code == NE))
+ code = reverse_condition (code), reversed = 0;
+
+ /* Start by writing the branch condition. */
+ switch (code)
+ {
+ case NE:
+ if (mode == CCFPmode)
+ strcat (string, "fbne");
+ else
+ strcpy (string, "bne");
+ break;
+
+ case EQ:
+ if (mode == CCFPmode)
+ strcat (string, "fbe");
+ else
+ strcpy (string, "be");
+ break;
+
+ case GE:
+ if (mode == CCFPmode)
+ {
+ if (reversed)
+ strcat (string, "fbul");
+ else
+ strcat (string, "fbge");
+ }
+ else if (mode == CC_NOOVmode)
+ strcpy (string, "bpos");
+ else
+ strcpy (string, "bge");
+ break;
+
+ case GT:
+ if (mode == CCFPmode)
+ {
+ if (reversed)
+ strcat (string, "fbule");
+ else
+ strcat (string, "fbg");
+ }
+ else
+ strcpy (string, "bg");
+ break;
+
+ case LE:
+ if (mode == CCFPmode)
+ {
+ if (reversed)
+ strcat (string, "fbug");
+ else
+ strcat (string, "fble");
+ }
+ else
+ strcpy (string, "ble");
+ break;
+
+ case LT:
+ if (mode == CCFPmode)
+ {
+ if (reversed)
+ strcat (string, "fbuge");
+ else
+ strcat (string, "fbl");
+ }
+ else if (mode == CC_NOOVmode)
+ strcpy (string, "bneg");
+ else
+ strcpy (string, "bl");
+ break;
+
+ case GEU:
+ strcpy (string, "bgeu");
+ break;
+
+ case GTU:
+ strcpy (string, "bgu");
+ break;
+
+ case LEU:
+ strcpy (string, "bleu");
+ break;
+
+ case LTU:
+ strcpy (string, "blu");
+ break;
+ }
+
+ /* Now add the annulling, the label, and a possible noop. */
+ if (annul)
+ strcat (string, ",a");
+
+ labelno[3] = label + '0';
+ strcat (string, labelno);
+
+ if (noop)
+ strcat (string, "\n\tnop");
+
+ return string;
+}
+
+char *
+output_return (operands)
+ rtx *operands;
+{
+ if (leaf_label)
+ {
+ operands[0] = leaf_label;
+ return "b,a %l0";
+ }
+ else if (leaf_function)
+ {
+ operands[0] = gen_rtx (CONST_INT, VOIDmode, actual_fsize);
+ if (actual_fsize < 4096)
+ {
+ if (current_function_returns_struct)
+ return "jmp %%o7+12\n\tadd %%sp,%0,%%sp";
+ else
+ return "retl\n\tadd %%sp,%0,%%sp";
+ }
+ else
+ {
+ if (current_function_returns_struct)
+ return "sethi %%hi(%a0),%%g1\n\tor %%g1,%%lo(%a0),%%g1\n\tjmp %%o7+12\n\tadd %%sp,%%g1,%%sp";
+ else
+ return "sethi %%hi(%a0),%%g1\n\tor %%g1,%%lo(%a0),%%g1\n\tretl\n\tadd %%sp,%%g1,%%sp";
+ }
+ }
+ else
+ {
+ if (current_function_returns_struct)
+ return "jmp %%i7+12\n\trestore";
+ else
+ return "ret\n\trestore";
+ }
+}
+
+char *
+output_floatsisf2 (operands)
+ rtx *operands;
+{
+ if (GET_CODE (operands[1]) == MEM)
+ return "ld %1,%0\n\tfitos %0,%0";
+ else if (FP_REG_P (operands[1]))
+ return "fitos %1,%0";
+ return "st %r1,[%%fp-4]\n\tld [%%fp-4],%0\n\tfitos %0,%0";
+}
+
+char *
+output_floatsidf2 (operands)
+ rtx *operands;
+{
+ if (GET_CODE (operands[1]) == MEM)
+ return "ld %1,%0\n\tfitod %0,%0";
+ else if (FP_REG_P (operands[1]))
+ return "fitod %1,%0";
+ return "st %r1,[%%fp-4]\n\tld [%%fp-4],%0\n\tfitod %0,%0";
+}
+
+int
+tail_call_valid_p ()
+{
+ static int checked = 0;
+ static int valid_p = 0;
+
+ if (! checked)
+ {
+ register int i;
+
+ checked = 1;
+ for (i = 32; i < FIRST_PSEUDO_REGISTER; i++)
+ if (! fixed_regs[i] && ! call_used_regs[i])
+ return 0;
+ valid_p = 1;
+ }
+ return valid_p;
+}
+\f
+/* Leaf functions and non-leaf functions have different needs. */
+
+static int
+reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
+
+static int
+reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
+
+static int *reg_alloc_orders[] = {
+ reg_leaf_alloc_order,
+ reg_nonleaf_alloc_order};
+
+void
+order_regs_for_local_alloc ()
+{
+ static int last_order_nonleaf = 1;
+
+ if (regs_ever_live[15] != last_order_nonleaf)
+ {
+ last_order_nonleaf = !last_order_nonleaf;
+ bcopy (reg_alloc_orders[last_order_nonleaf], reg_alloc_order,
+ FIRST_PSEUDO_REGISTER * sizeof (int));
+ }
+}
+\f
+/* Machine dependent routines for the branch probability, arc profiling
+ code. */
+
+/* The label used by the arc profiling code. */
+
+static rtx profiler_label;
+
+void
+init_arc_profiler ()
+{
+ /* Generate and save a copy of this so it can be shared. */
+ profiler_label = gen_rtx (SYMBOL_REF, Pmode, "*LPBX2");
+}
+
+void
+output_arc_profiler (arcno, insert_after)
+ int arcno;
+ rtx insert_after;
+{
+ rtx profiler_target_addr
+ = gen_rtx (CONST, Pmode,
+ gen_rtx (PLUS, Pmode, profiler_label,
+ gen_rtx (CONST_INT, VOIDmode, 4 * arcno)));
+ register rtx profiler_reg = gen_reg_rtx (SImode);
+ register rtx temp = gen_reg_rtx (Pmode);
+ register rtx profiler_target = gen_rtx (MEM, SImode,
+ gen_rtx (LO_SUM, Pmode, temp,
+ profiler_target_addr));
+ /* The insns are emitted from last to first after the insn insert_after.
+ Emit_insn_after is used because sometimes we want to put the
+ instrumentation code after the last insn of the function. */
+ emit_insn_after (gen_rtx (SET, VOIDmode, profiler_target, profiler_reg),
+ insert_after);
+ emit_insn_after (gen_rtx (SET, VOIDmode, profiler_reg,
+ gen_rtx (PLUS, SImode, profiler_reg, const1_rtx)),
+ insert_after);
+ emit_insn_after (gen_rtx (SET, VOIDmode, profiler_reg, profiler_target),
+ insert_after);
+ emit_insn_after (gen_rtx (SET, VOIDmode, temp,
+ gen_rtx (HIGH, Pmode, profiler_target_addr)),
+ insert_after);
+}
+\f
+/* All the remaining routines in this file have been turned off. */
+#if 0
+char *
+output_tail_call (operands, insn)
+ rtx *operands;
+ rtx insn;
+{
+ int this_fsize = actual_fsize;
+ rtx next;
+ int need_nop_at_end = 0;
+
+ next = next_real_insn (insn);
+ while (next && GET_CODE (next) == CODE_LABEL)
+ next = next_real_insn (insn);
+
+ if (final_sequence && this_fsize > 0)
+ {
+ rtx xoperands[1];
+
+ /* If we have to restore any registers, don't take any chances
+ restoring a register before we discharge it into
+ its home. If the frame size is only 88, we are guaranteed
+ that the epilogue will fit in the delay slot. */
+ rtx delay_insn = XVECEXP (final_sequence, 0, 1);
+ if (GET_CODE (PATTERN (delay_insn)) == SET)
+ {
+ rtx dest = SET_DEST (PATTERN (delay_insn));
+ if (GET_CODE (dest) == REG
+ && reg_mentioned_p (dest, insn))
+ abort ();
+ }
+ else if (GET_CODE (PATTERN (delay_insn)) == PARALLEL)
+ abort ();
+ xoperands[0] = operands[0];
+ final_scan_insn (delay_insn, asm_out_file, 0, 0, 1);
+ operands[0] = xoperands[0];
+ final_sequence = 0;
+ }
+
+ /* Make sure we are clear to return. */
+ output_function_epilogue (asm_out_file, get_frame_size (), -1, 0);
+
+ /* Strip the MEM. */
+ operands[0] = XEXP (operands[0], 0);
+
+ if (final_sequence == 0
+ && (next == 0
+ || GET_CODE (next) == CALL_INSN
+ || GET_CODE (next) == JUMP_INSN))
+ need_nop_at_end = 1;
+
+ if (flag_pic)
+ return output_pic_sequence_2 (2, 3, 0, "jmpl %%g1+%3", operands, need_nop_at_end);
+
+ if (GET_CODE (operands[0]) == REG)
+ output_asm_insn ("jmpl %a0,%%g0", operands);
+ else if (TARGET_TAIL_CALL)
+ {
+ /* We assume all labels will be within 16 MB of our call. */
+ if (need_nop_at_end || final_sequence)
+ output_asm_insn ("b %a0", operands);
+ else
+ output_asm_insn ("b,a %a0", operands);
+ }
+ else if (! final_sequence)
+ {
+ output_asm_insn ("sethi %%hi(%a0),%%g1\n\tjmpl %%g1+%%lo(%a0),%%g1",
+ operands);
+ }
+ else
+ {
+ int i;
+ rtx x = PATTERN (XVECEXP (final_sequence, 0, 1));
+ for (i = 1; i < 32; i++)
+ if ((i == 1 || ! fixed_regs[i])
+ && call_used_regs[i]
+ && ! refers_to_regno_p (i, i+1, x, 0))
+ break;
+ if (i == 32)
+ abort ();
+ operands[1] = gen_rtx (REG, SImode, i);
+ output_asm_insn ("sethi %%hi(%a0),%1\n\tjmpl %1+%%lo(%a0),%1", operands);
+ }
+ return (need_nop_at_end ? "nop" : "");
+}
+#endif
+\f
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case '#':
+ /* Output a 'nop' if there's nothing for the delay slot. */
+ if (dbr_sequence_length () == 0)
+ fputs ("\n\tnop", file);
+ return;
+ case '*':
+ /* Output an annul flag if there's nothing for the delay slot. */
+ if (dbr_sequence_length () == 0)
+ fputs (",a", file);
+ return;
+ case 'Y':
+ /* Adjust the operand to take into account a RESTORE operation. */
+ if (GET_CODE (x) != REG)
+ abort ();
+ if (REGNO (x) < 8)
+ fputs (reg_names[REGNO (x)], file);
+ else if (REGNO (x) >= 24 && REGNO (x) < 32)
+ fputs (reg_names[REGNO (x)-16], file);
+ else
+ abort ();
+ return;
+ case '@':
+ /* Print out what we are using as the frame pointer. This might
+ be %fp, or might be %sp+offset. */
+ fputs (frame_base_name, file);
+ return;
+ case 'R':
+ /* Print out the second register name of a register pair.
+ I.e., R (%o0) => %o1. */
+ fputs (reg_names[REGNO (x)+1], file);
+ return;
+ case 'm':
+ /* Print the operand's address only. */
+ output_address (XEXP (x, 0));
+ return;
+ case 'r':
+ /* In this case we need a register. Use %g0 if the
+ operand in const0_rtx. */
+ if (x == const0_rtx)
+ {
+ fputs ("%g0", file);
+ return;
+ }
+ else
+ break;
+
+ case 'A':
+ switch (GET_CODE (x))
+ {
+ case IOR: fputs ("or", file); break;
+ case AND: fputs ("and", file); break;
+ case XOR: fputs ("xor", file); break;
+ default: abort ();
+ }
+ return;
+
+ case 'B':
+ switch (GET_CODE (x))
+ {
+ case IOR: fputs ("orn", file); break;
+ case AND: fputs ("andn", file); break;
+ case XOR: fputs ("xnor", file); break;
+ default: abort ();
+ }
+ return;
+
+ case 'b':
+ {
+ /* Print a sign-extended character. */
+ int i = INTVAL (x) & 0xff;
+ if (i & 0x80)
+ i |= 0xffffff00;
+ fprintf (file, "%d", i);
+ return;
+ }
+
+ case 0:
+ /* Do nothing special. */
+ break;
+
+ default:
+ /* Undocumented flag. */
+ abort ();
+ }
+
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ fputc ('[', file);
+ if (CONSTANT_P (XEXP (x, 0)))
+ /* Poor Sun assembler doesn't understand absolute addressing. */
+ fputs ("%g0+", file);
+ output_address (XEXP (x, 0));
+ fputc (']', file);
+ }
+ else if (GET_CODE (x) == HIGH)
+ {
+ fputs ("%hi(", file);
+ output_addr_const (file, XEXP (x, 0));
+ fputc (')', file);
+ }
+ else if (GET_CODE (x) == LO_SUM)
+ {
+ print_operand (file, XEXP (x, 0), 0);
+ fputs ("+%lo(", file);
+ output_addr_const (file, XEXP (x, 1));
+ fputc (')', file);
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ if (CONST_DOUBLE_HIGH (x) == 0)
+ fprintf (file, "%u", CONST_DOUBLE_LOW (x));
+ else if (CONST_DOUBLE_HIGH (x) == -1
+ && CONST_DOUBLE_LOW (x) < 0)
+ fprintf (file, "%d", CONST_DOUBLE_LOW (x));
+ else
+ abort ();
+ }
+ else { output_addr_const (file, x); }
+}
+\f
+/* This function outputs assembler code for VALUE to FILE, where VALUE is
+ a 64 bit (DImode) value. */
+
+/* ??? If there is a 64 bit counterpart to .word that the assembler
+ understands, then using that would simply this code greatly. */
+
+void
+output_double_int (file, value)
+ FILE *file;
+ rtx value;
+{
+ if (GET_CODE (value) == CONST_INT)
+ {
+ if (INTVAL (value) < 0)
+ ASM_OUTPUT_INT (file, constm1_rtx);
+ else
+ ASM_OUTPUT_INT (file, const0_rtx);
+ ASM_OUTPUT_INT (file, value);
+ }
+ else if (GET_CODE (value) == CONST_DOUBLE)
+ {
+ ASM_OUTPUT_INT (file, gen_rtx (CONST_INT, VOIDmode,
+ CONST_DOUBLE_HIGH (value)));
+ ASM_OUTPUT_INT (file, gen_rtx (CONST_INT, VOIDmode,
+ CONST_DOUBLE_LOW (value)));
+ }
+ else if (GET_CODE (value) == SYMBOL_REF
+ || GET_CODE (value) == CONST
+ || GET_CODE (value) == PLUS)
+ {
+ /* Addresses are only 32 bits. */
+ ASM_OUTPUT_INT (file, const0_rtx);
+ ASM_OUTPUT_INT (file, value);
+ }
+ else
+ abort ();
+}
+
--- /dev/null
+/* Definitions of target machine for GNU compiler. Vax version.
+ Copyright (C) 1987, 1988, 1991 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define CPP_PREDEFINES "-Dvax -Dunix"
+
+/* If using g-format floating point, alter math.h. */
+
+#define CPP_SPEC "%{mg:-DGFLOAT}"
+
+/* Choose proper libraries depending on float format.
+ Note that there are no profiling libraries for g-format.
+ Also use -lg for the sake of dbx. */
+
+#define LIB_SPEC "%{g:-lg}\
+ %{mg:%{lm:-lmg} -lcg \
+ %{p:%eprofiling not supported with -mg\n}\
+ %{pg:%eprofiling not supported with -mg\n}}\
+ %{!mg:%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}"
+
+/* Print subsidiary information on the compiler version in use. */
+
+#define TARGET_VERSION fprintf (stderr, " (vax)");
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+extern int target_flags;
+
+/* Macros used in the machine description to test the flags. */
+
+/* Nonzero if compiling code that Unix assembler can assemble. */
+#define TARGET_UNIX_ASM (target_flags & 1)
+
+/* Nonzero if compiling with VAX-11 "C" style structure alignment */
+#define TARGET_VAXC_ALIGNMENT (target_flags & 2)
+
+/* Nonzero if compiling with `G'-format floating point */
+#define TARGET_G_FLOAT (target_flags & 4)
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ { {"unix", 1}, \
+ {"gnu", -1}, \
+ {"vaxc-alignment", 2}, \
+ {"g", 4}, \
+ {"g-float", 4}, \
+ {"d", -4}, \
+ {"d-float", -4}, \
+ { "", TARGET_DEFAULT}}
+
+/* Default target_flags if no switches specified. */
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 1
+#endif
+\f
+/* Target machine storage layout */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+ This is not true on the vax. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is not true on the vax. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+/* This is not true on the vax. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Number of bits in an addressible storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 16
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY (TARGET_VAXC_ALIGNMENT ? 8 : 32)
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS (! TARGET_VAXC_ALIGNMENT)
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* No structure field wants to be aligned rounder than this. */
+#define BIGGEST_FIELD_ALIGNMENT (TARGET_VAXC_ALIGNMENT ? 8 : 32)
+
+/* Define this if move instructions will actually fail to work
+ when given unaligned data. */
+/* #define STRICT_ALIGNMENT */
+\f
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+#define FIRST_PSEUDO_REGISTER 16
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the vax, these are the AP, FP, SP and PC. */
+#define FIXED_REGISTERS {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS {1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+ On the vax, all registers are one word long. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On the vax, all registers can hold all modes. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Vax pc is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 14
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 13
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 1
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 12
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 0
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 1
+\f
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The vax has only one kind of registers, so NO_REGS and ALL_REGS
+ are the only classes. */
+
+enum reg_class { NO_REGS, ALL_REGS, LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Since GENERAL_REGS is the same class as ALL_REGS,
+ don't give it a different class number; just make it an alias. */
+
+#define GENERAL_REGS ALL_REGS
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ {"NO_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS {0, 0xffff}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) ALL_REGS
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS ALL_REGS
+#define BASE_REG_CLASS ALL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+
+#define REG_CLASS_FROM_LETTER(C) NO_REGS
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ `I' is the constant zero. */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? (VALUE) == 0 \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself.
+
+ `G' is a floating-point zero. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? ((VALUE) == CONST0_RTX (DFmode) \
+ || (VALUE) == CONST0_RTX (SFmode)) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+/* On the vax, this is always the size of MODE in words,
+ since all registers are the same size. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+\f
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if longjmp restores from saved registers
+ rather than from what setjmp saved. */
+#define LONGJMP_RESTORE_FROM_STACK
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On the vax, -(sp) pushes only the bytes of the operands. */
+#define PUSH_ROUNDING(BYTES) (BYTES)
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the Vax, the RET insn always pops all the args for any function. */
+
+#define RETURN_POPS_ARGS(FUNTYPE,SIZE) (SIZE)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+/* On the Vax the return value is in R0 regardless. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+/* On the Vax the return value is in R0 regardless. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, 0)
+
+/* Define this if PCC uses the nonreentrant convention for returning
+ structure and union values. */
+
+#define PCC_STATIC_STRUCT_RETURN
+
+/* 1 if N is a possible register number for a function value.
+ On the Vax, R0 is the only register thus used. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+/* 1 if N is a possible register number for function argument passing.
+ On the Vax, no registers are used in this way. */
+
+#define FUNCTION_ARG_REGNO_P(N) 0
+\f
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On the vax, this is a single integer, which is a number of bytes
+ of arguments scanned so far. */
+
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ On the vax, the offset starts at 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) \
+ ((CUM) = 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ ((CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3))
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+/* On the vax all args are pushed. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) 0
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+{ register int regno; \
+ register int mask = 0; \
+ extern char call_used_regs[]; \
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) \
+ if (regs_ever_live[regno] && !call_used_regs[regno]) \
+ mask |= 1 << regno; \
+ fprintf (FILE, "\t.word 0x%x\n", mask); \
+ MAYBE_VMS_FUNCTION_PROLOGUE(FILE) \
+ if ((SIZE) >= 64) fprintf (FILE, "\tmovab %d(sp),sp\n", -SIZE);\
+ else if (SIZE) fprintf (FILE, "\tsubl2 $%d,sp\n", (SIZE)); }
+
+/* vms.h redefines this. */
+#define MAYBE_VMS_FUNCTION_PROLOGUE(FILE)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tmovab LP%d,r0\n\tjsb mcount\n", (LABELNO));
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done. */
+
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\ttstl LPBX0\n\tjneq LPI%d\n\tpushal LPBX0\n\tcalls $1,__bb_init_func\nLPI%d:\n", \
+ LABELNO, LABELNO);
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. This is a real pain in the
+ sphincter on a VAX, since we do not want to change any of the bits in the
+ processor status word. The way it is done here, it is pushed onto the stack
+ before any flags have changed, and then the stack is fixed up to account for
+ the fact that the instruction to restore the flags only reads a word.
+ It may seem a bit clumsy, but at least it works.
+*/
+
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+ fprintf (FILE, "\tmovpsl -(sp)\n\tmovw (sp),2(sp)\n\taddl2 $2,sp\n\taddl2 $1,LPBX2+%d\n\tbicpsw $255\n\tbispsw (sp)+\n", \
+ 4 * BLOCKNO)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE. */
+
+/* #define FUNCTION_EPILOGUE(FILE, SIZE) */
+
+/* Store in the variable DEPTH the initial difference between the
+ frame pointer reg contents and the stack pointer reg contents,
+ as of the start of the function body. This depends on the layout
+ of the fixed parts of the stack frame and on how registers are saved.
+
+ On the Vax, FRAME_POINTER_REQUIRED is always 1, so the definition of this
+ macro doesn't matter. But it must be defined. */
+
+#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) (DEPTH) = 0;
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts. */
+
+/* On the vax, the trampoline contains an entry mask and two instructions:
+ .word NN
+ movl $STATIC,r0 (store the functions static chain)
+ jmp *$FUNCTION (jump to function code at address FUNCTION) */
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ ASM_OUTPUT_SHORT (FILE, const0_rtx); \
+ ASM_OUTPUT_SHORT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x8fd0)); \
+ ASM_OUTPUT_INT (FILE, const0_rtx); \
+ ASM_OUTPUT_BYTE (FILE, 0x50+STATIC_CHAIN_REGNUM); \
+ ASM_OUTPUT_SHORT (FILE, gen_rtx (CONST_INT, VOIDmode, 0x9f17)); \
+ ASM_OUTPUT_INT (FILE, const0_rtx); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 15
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+/* We copy the register-mask from the function's pure code
+ to the start of the trampoline. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, HImode, TRAMP), \
+ gen_rtx (MEM, HImode, FNADDR)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 4)), CXT);\
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 11)), \
+ plus_constant (FNADDR, 2)); \
+}
+\f
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT
+/* #define HAVE_POST_DECREMENT */
+
+#define HAVE_PRE_DECREMENT
+/* #define HAVE_PRE_INCREMENT */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(regno) \
+((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0)
+#define REGNO_OK_FOR_BASE_P(regno) \
+((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0)
+\f
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* 1 if X is an rtx for a constant that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) CONSTANT_P (X)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_CONSTANT_P(X) 1
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) 1
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) 1
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#endif
+\f
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
+ except for CONSTANT_ADDRESS_P which is actually machine-independent. */
+
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESS
+
+/* Zero if this contains a (CONST (PLUS (SYMBOL_REF) (...))) and the
+ symbol in the SYMBOL_REF is an external symbol. */
+
+#define INDIRECTABLE_CONSTANT_P(X) \
+ (! (GET_CODE ((X)) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == SYMBOL_REF \
+ && SYMBOL_REF_FLAG (XEXP (XEXP ((X), 0), 0))))
+
+/* Re-definition of CONSTANT_ADDRESS_P, which is true only when there
+ are no SYMBOL_REFs for external symbols present. */
+
+#define INDIRECTABLE_CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == SYMBOL_REF && !SYMBOL_REF_FLAG (X)) \
+ || (GET_CODE (X) == CONST && INDIRECTABLE_CONSTANT_P(X)) \
+ || GET_CODE (X) == CONST_INT)
+
+
+/* Non-zero if X is an address which can be indirected. External symbols
+ could be in a sharable image library, so we disallow those. */
+
+#define INDIRECTABLE_ADDRESS_P(X) \
+ (INDIRECTABLE_CONSTANT_ADDRESS_P (X) \
+ || (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && INDIRECTABLE_CONSTANT_ADDRESS_P (XEXP (X, 1))))
+
+#else /* not NO_EXTERNAL_INDIRECT_ADDRESS */
+
+#define INDIRECTABLE_CONSTANT_ADDRESS_P(X) CONSTANT_ADDRESS_P(X)
+
+/* Non-zero if X is an address which can be indirected. */
+#define INDIRECTABLE_ADDRESS_P(X) \
+ (CONSTANT_ADDRESS_P (X) \
+ || (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && CONSTANT_ADDRESS_P (XEXP (X, 1))))
+
+#endif /* not NO_EXTERNAL_INDIRECT_ADDRESS */
+
+/* Go to ADDR if X is a valid address not using indexing.
+ (This much is the easy part.) */
+#define GO_IF_NONINDEXED_ADDRESS(X, ADDR) \
+{ register rtx xfoob = (X); \
+ if (GET_CODE (xfoob) == REG) goto ADDR; \
+ if (CONSTANT_ADDRESS_P (xfoob)) goto ADDR; \
+ if (INDIRECTABLE_ADDRESS_P (xfoob)) goto ADDR; \
+ xfoob = XEXP (X, 0); \
+ if (GET_CODE (X) == MEM && INDIRECTABLE_ADDRESS_P (xfoob)) \
+ goto ADDR; \
+ if ((GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_INC) \
+ && GET_CODE (xfoob) == REG && REG_OK_FOR_BASE_P (xfoob)) \
+ goto ADDR; }
+
+/* 1 if PROD is either a reg times size of mode MODE
+ or just a reg, if MODE is just one byte.
+ This macro's expansion uses the temporary variables xfoo0 and xfoo1
+ that must be declared in the surrounding context. */
+#define INDEX_TERM_P(PROD, MODE) \
+(GET_MODE_SIZE (MODE) == 1 \
+ ? (GET_CODE (PROD) == REG && REG_OK_FOR_BASE_P (PROD)) \
+ : (GET_CODE (PROD) == MULT \
+ && \
+ (xfoo0 = XEXP (PROD, 0), xfoo1 = XEXP (PROD, 1), \
+ ((GET_CODE (xfoo0) == CONST_INT \
+ && INTVAL (xfoo0) == GET_MODE_SIZE (MODE) \
+ && GET_CODE (xfoo1) == REG \
+ && REG_OK_FOR_INDEX_P (xfoo1)) \
+ || \
+ (GET_CODE (xfoo1) == CONST_INT \
+ && INTVAL (xfoo1) == GET_MODE_SIZE (MODE) \
+ && GET_CODE (xfoo0) == REG \
+ && REG_OK_FOR_INDEX_P (xfoo0))))))
+
+/* Go to ADDR if X is the sum of a register
+ and a valid index term for mode MODE. */
+#define GO_IF_REG_PLUS_INDEX(X, MODE, ADDR) \
+{ register rtx xfooa; \
+ if (GET_CODE (X) == PLUS) \
+ { if (GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && (xfooa = XEXP (X, 1), \
+ INDEX_TERM_P (xfooa, MODE))) \
+ goto ADDR; \
+ if (GET_CODE (XEXP (X, 1)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 1)) \
+ && (xfooa = XEXP (X, 0), \
+ INDEX_TERM_P (xfooa, MODE))) \
+ goto ADDR; } }
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ register rtx xfoo, xfoo0, xfoo1; \
+ GO_IF_NONINDEXED_ADDRESS (X, ADDR); \
+ if (GET_CODE (X) == PLUS) \
+ { /* Handle <address>[index] represented with index-sum outermost */\
+ xfoo = XEXP (X, 0); \
+ if (INDEX_TERM_P (xfoo, MODE)) \
+ { GO_IF_NONINDEXED_ADDRESS (XEXP (X, 1), ADDR); } \
+ xfoo = XEXP (X, 1); \
+ if (INDEX_TERM_P (xfoo, MODE)) \
+ { GO_IF_NONINDEXED_ADDRESS (XEXP (X, 0), ADDR); } \
+ /* Handle offset(reg)[index] with offset added outermost */ \
+ if (INDIRECTABLE_CONSTANT_ADDRESS_P (XEXP (X, 0))) \
+ { if (GET_CODE (XEXP (X, 1)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 1))) \
+ goto ADDR; \
+ GO_IF_REG_PLUS_INDEX (XEXP (X, 1), MODE, ADDR); } \
+ if (INDIRECTABLE_CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ { if (GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0))) \
+ goto ADDR; \
+ GO_IF_REG_PLUS_INDEX (XEXP (X, 0), MODE, ADDR); } } }
+\f
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ For the vax, nothing needs to be done. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) {}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the VAX, the predecrement and postincrement address depend thus
+ (the amount of decrement or increment being the length of the operand)
+ and all indexed address depend thus (because the index scale factor
+ is the length of the operand). */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+ { if (GET_CODE (ADDR) == POST_INC || GET_CODE (ADDR) == PRE_DEC) \
+ goto LABEL; \
+ if (GET_CODE (ADDR) == PLUS) \
+ { if (CONSTANT_ADDRESS_P (XEXP (ADDR, 0)) \
+ && GET_CODE (XEXP (ADDR, 1)) == REG); \
+ else if (CONSTANT_ADDRESS_P (XEXP (ADDR, 1)) \
+ && GET_CODE (XEXP (ADDR, 0)) == REG); \
+ else goto LABEL; }}
+\f
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE HImode
+
+/* Define this if the case instruction expects the table
+ to contain offsets from the address of the table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE
+
+/* Define this if the case instruction drops through after the table
+ when the index is out of range. Don't define it if the case insn
+ jumps to the default label instead. */
+#define CASE_DROPS_THROUGH
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+#define FIXUNS_TRUNC_LIKE_FIX_TRUNC
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+/* Define this if zero-extension is slow (more than one real instruction). */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+/* #define SHIFT_COUNT_TRUNCATED */
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* This machine doesn't use IEEE floats. */
+
+#define TARGET_FLOAT_FORMAT VAX_FLOAT_FORMAT
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+#define CONST_COSTS(RTX,CODE) \
+ case CONST_INT: \
+ /* Constant zero is super cheap due to clr instruction. */ \
+ if ((RTX) == const0_rtx) return 0; \
+ /* Constants of +/- 1 should also be super cheap since \
+ may be used in decl/incl/aob/sob insns. */ \
+ if ((RTX) == const1_rtx || (RTX) == constm1_rtx) return 0; \
+ if ((unsigned) INTVAL (RTX) < 077) return 1; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 3; \
+ case CONST_DOUBLE: \
+ return 5;
+
+/* On most VAX models, shift are almost as expensive as multiplies, so
+ we'd rather use multiply unless it can be done in an extremely small
+ sequence. */
+#define RTX_COSTS(RTX,CODE) \
+ case LSHIFT: \
+ case ASHIFT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ROTATE: \
+ case ROTATERT: \
+ return COSTS_N_INSNS (4);
+
+/* Specify the cost of a branch insn; roughly the number of extra insns that
+ should be added to avoid a branch.
+
+ Branches are extremely cheap on the VAX while the shift insns often
+ used to replace branches can be expensive. */
+
+#define BRANCH_COST 0
+
+/*
+ * We can use the BSD C library routines for the libgcc calls that are
+ * still generated, since that's what they boil down to anyways.
+ */
+
+#define UDIVSI3_LIBCALL "*udiv"
+#define UMODSI3_LIBCALL "*urem"
+
+/* Check a `double' value for validity for a particular machine mode. */
+
+/* note that it is very hard to accidently create a number that fits in a
+ double but not in a float, since their ranges are almost the same */
+#define CHECK_FLOAT_VALUE(mode, d) \
+ if ((mode) == SFmode) \
+ { \
+ if ((d) > 1.7014117331926444e+38) \
+ { error ("magnitude of constant too large for `float'"); \
+ (d) = 1.7014117331926444e+38; } \
+ else if ((d) < -1.7014117331926444e+38) \
+ { error ("magnitude of constant too large for `float'"); \
+ (d) = -1.7014117331926444e+38; } \
+ else if (((d) > 0) && ((d) < 2.9387358770557188e-39)) \
+ { warning ("`float' constant truncated to zero"); \
+ (d) = 0.0; } \
+ else if (((d) < 0) && ((d) > -2.9387358770557188e-39)) \
+ { warning ("`float' constant truncated to zero"); \
+ (d) = 0.0; } \
+ }
+
+/* For future reference:
+ D Float: 9 bit, sign magnitude, excess 128 binary exponent
+ normalized 56 bit fraction, redundant bit not represented
+ approximately 16 decimal digits of precision
+
+ The values to use if we trust decimal to binary conversions:
+#define MAX_D_FLOAT 1.7014118346046923e+38
+#define MIN_D_FLOAT .29387358770557188e-38
+
+ G float: 12 bit, sign magnitude, excess 1024 binary exponent
+ normalized 53 bit fraction, redundant bit not represented
+ approximately 15 decimal digits precision
+
+ The values to use if we trust decimal to binary conversions:
+#define MAX_G_FLOAT .898846567431157e+308
+#define MIN_G_FLOAT .556268464626800e-308
+*/
+\f
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). No extra ones are needed for the vax. */
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) \
+{ if (GET_CODE (EXP) == SET) \
+ { if (GET_CODE (SET_SRC (EXP)) == CALL) \
+ CC_STATUS_INIT; \
+ else if (GET_CODE (SET_DEST (EXP)) != PC) \
+ { cc_status.flags = 0; \
+ cc_status.value1 = SET_DEST (EXP); \
+ cc_status.value2 = SET_SRC (EXP); } } \
+ else if (GET_CODE (EXP) == PARALLEL \
+ && GET_CODE (XVECEXP (EXP, 0, 0)) == SET) \
+ { \
+ if (GET_CODE (SET_SRC (XVECEXP (EXP, 0, 0))) == CALL) \
+ CC_STATUS_INIT; \
+ else if (GET_CODE (SET_DEST (XVECEXP (EXP, 0, 0))) != PC) \
+ { cc_status.flags = 0; \
+ cc_status.value1 = SET_DEST (XVECEXP (EXP, 0, 0)); \
+ cc_status.value2 = SET_SRC (XVECEXP (EXP, 0, 0)); } } \
+ /* PARALLELs whose first element sets the PC are aob, sob insns. \
+ They do change the cc's. So drop through and forget the cc's. */ \
+ else CC_STATUS_INIT; \
+ if (cc_status.value1 && GET_CODE (cc_status.value1) == REG \
+ && cc_status.value2 \
+ && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) \
+ cc_status.value2 = 0; \
+ if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM \
+ && cc_status.value2 \
+ && GET_CODE (cc_status.value2) == MEM) \
+ cc_status.value2 = 0; }
+/* Actual condition, one line up, should be that value2's address
+ depends on value1, but that is too much of a pain. */
+
+#define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \
+{ if (cc_status.flags & CC_NO_OVERFLOW) \
+ return NO_OV; \
+ return NORMAL; }
+\f
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+
+#define ASM_FILE_START(FILE) fprintf (FILE, "#NO_APP\n");
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON "#APP\n"
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP ".data"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", \
+ "r9", "r10", "r11", "ap", "fp", "sp", "pc"}
+
+/* This is BSD, so it wants DBX format. */
+
+#define DBX_DEBUGGING_INFO
+
+/* How to renumber registers for dbx and gdb.
+ Vax needs no change in the numeration. */
+
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Do not break .stabs pseudos into continuations. */
+
+#define DBX_CONTIN_LENGTH 0
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+
+#define DBX_CONTIN_CHAR '?'
+
+/* Don't use the `xsfoo;' construct in DBX output; this system
+ doesn't support it. */
+
+#define DBX_NO_XREFS
+
+/* Output the .stabs for a C `static' variable in the data section. */
+#define DBX_STATIC_STAB_DATA_SECTION
+
+/* Vax specific: which type character is used for type double? */
+
+#define ASM_DOUBLE_CHAR (TARGET_G_FLOAT ? 'g' : 'd')
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs (".globl ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
+
+/* This is how to output a reference to a user-level label named NAME. */
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ fprintf (FILE, "_%s", NAME)
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%d", PREFIX, NUM)
+
+/* This is how to output an assembler line defining a `double' constant.
+ It is .dfloat or .gfloat, depending. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+ fprintf (FILE, "\t.%cfloat 0%c%.20e\n", ASM_DOUBLE_CHAR, \
+ ASM_DOUBLE_CHAR, (VALUE))
+
+/* This is how to output an assembler line defining a `float' constant. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ fprintf (FILE, "\t.float 0f%.20e\n", (VALUE))
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+( fprintf (FILE, "\t.long "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "\t.word "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+( fprintf (FILE, "\t.byte "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf (FILE, "\t.byte 0x%x\n", (VALUE))
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tpushl %s\n", reg_names[REGNO])
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tmovl (sp)+,%s\n", reg_names[REGNO])
+
+/* This is how to output an element of a case-vector that is absolute.
+ (The Vax does not use such vectors,
+ but we must define this macro anyway.) */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.long L%d\n", VALUE)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+ fprintf (FILE, "\t.word L%d-L%d\n", VALUE, REL)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* This is how to output an assembler line
+ that says to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %u\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Print an instruction operand X on file FILE.
+ CODE is the code from the %-spec that requested printing this operand;
+ if `%z3' was used to print operand 3, then CODE is 'z'.
+ On the Vax, the codes used are:
+ `#', indicating that either `d' or `g' should be printed,
+ depending on whether we're using dfloat or gfloat.
+ `C', indicating the reverse of the condition name specified by the
+ operand.
+ `P', indicating one plus a constant operand
+ `N', indicating the one's complement of a constant operand
+ `H', indicating the low-order 16 bits of the one's complement of a constant
+ `B', similarly for the low-order 8 bits. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '#')
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+{ extern char *rev_cond_name (); \
+ if (CODE == '#') fputc (ASM_DOUBLE_CHAR, FILE); \
+ else if (CODE == 'C') \
+ fputs (rev_cond_name (X), FILE); \
+ else if (CODE == 'P' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", INTVAL (X) + 1); \
+ else if (CODE == 'N' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", ~ INTVAL (X)); \
+ /* rotl instruction cannot deal with negative arguments. */ \
+ else if (CODE == 'R' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", 32 - INTVAL (X)); \
+ else if (CODE == 'H' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", 0xffff & ~ INTVAL (X)); \
+ else if (CODE == 'B' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", 0xff & ~ INTVAL (X)); \
+ else if (GET_CODE (X) == REG) \
+ fprintf (FILE, "%s", reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == MEM) \
+ output_address (XEXP (X, 0)); \
+ else if (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) != DImode) \
+ { union { double d; int i[2]; } u; \
+ u.i[0] = CONST_DOUBLE_LOW (X); u.i[1] = CONST_DOUBLE_HIGH (X); \
+ fprintf (FILE, "$0%c%.20e", ASM_DOUBLE_CHAR, u.d); } \
+ else { putc ('$', FILE); output_addr_const (FILE, X); }}
+
+/* Print a memory operand whose address is X, on file FILE.
+ This uses a function in output-vax.c. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address (FILE, ADDR)
--- /dev/null
+;;- Machine description for GNU compiler, Vax Version
+;; Copyright (C) 1987, 1988, 1991 Free Software Foundation, Inc.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+;;- Instruction patterns. When multiple patterns apply,
+;;- the first one in the file is chosen.
+;;-
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+;;-
+;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code
+;;- updates for most instructions.
+
+;; We don't want to allow a constant operand for test insns because
+;; (set (cc0) (const_int foo)) has no mode information. Such insns will
+;; be folded while optimizing anyway.
+
+(define_insn "tstsi"
+ [(set (cc0)
+ (match_operand:SI 0 "nonimmediate_operand" "g"))]
+ ""
+ "tstl %0")
+
+(define_insn "tsthi"
+ [(set (cc0)
+ (match_operand:HI 0 "nonimmediate_operand" "g"))]
+ ""
+ "tstw %0")
+
+(define_insn "tstqi"
+ [(set (cc0)
+ (match_operand:QI 0 "nonimmediate_operand" "g"))]
+ ""
+ "tstb %0")
+
+(define_insn "tstdf"
+ [(set (cc0)
+ (match_operand:DF 0 "general_operand" "gF"))]
+ ""
+ "tst%# %0")
+
+(define_insn "tstsf"
+ [(set (cc0)
+ (match_operand:SF 0 "general_operand" "gF"))]
+ ""
+ "tstf %0")
+
+(define_insn "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "g")
+ (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "cmpl %0,%1")
+
+(define_insn "cmphi"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "g")
+ (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "cmpw %0,%1")
+
+(define_insn "cmpqi"
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "g")
+ (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "cmpb %0,%1")
+
+(define_insn "cmpdf"
+ [(set (cc0)
+ (compare (match_operand:DF 0 "general_operand" "gF")
+ (match_operand:DF 1 "general_operand" "gF")))]
+ ""
+ "cmp%# %0,%1")
+
+(define_insn "cmpsf"
+ [(set (cc0)
+ (compare (match_operand:SF 0 "general_operand" "gF")
+ (match_operand:SF 1 "general_operand" "gF")))]
+ ""
+ "cmpf %0,%1")
+
+(define_insn ""
+ [(set (cc0)
+ (and:SI (match_operand:SI 0 "general_operand" "g")
+ (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "bitl %0,%1")
+
+(define_insn ""
+ [(set (cc0)
+ (and:HI (match_operand:HI 0 "general_operand" "g")
+ (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "bitw %0,%1")
+
+(define_insn ""
+ [(set (cc0)
+ (and:QI (match_operand:QI 0 "general_operand" "g")
+ (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "bitb %0,%1")
+
+;; The vax has no sltu or sgeu patterns, but does have two-operand
+;; add/subtract with carry. This is still better than the alternative.
+;; Since the cc0-using insn cannot be separated from the cc0-setting insn,
+;; and the two are created independently, we can't just use a define_expand
+;; to try to optimize this. (The "movl" and "clrl" insns alter the cc0
+;; flags, but leave the carry flag alone, but that can't easily be expressed.)
+;;
+;; Several two-operator combinations could be added to make slightly more
+;; optimal code, but they'd have to cover all combinations of plus and minus
+;; using match_dup. If you want to do this, I'd suggest changing the "sgeu"
+;; pattern to something like (minus (const_int 1) (ltu ...)), so fewer
+;; patterns need to be recognized.
+;; -- Ken Raeburn (Raeburn@Watch.COM) 24 August 1991.
+
+(define_insn "sltu"
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (ltu (cc0) (const_int 0)))]
+ ""
+ "clrl %0\;adwc $0,%0")
+
+(define_insn "sgeu"
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (geu (cc0) (const_int 0)))]
+ ""
+ "movl $1,%0\;sbwc $0,%0")
+\f
+(define_insn "movdf"
+ [(set (match_operand:DF 0 "general_operand" "=g,g")
+ (match_operand:DF 1 "general_operand" "G,gF"))]
+ ""
+ "@
+ clr%# %0
+ mov%# %1,%0")
+
+(define_insn "movsf"
+ [(set (match_operand:SF 0 "general_operand" "=g,g")
+ (match_operand:SF 1 "general_operand" "G,gF"))]
+ ""
+ "@
+ clrf %0
+ movf %1,%0")
+
+;; Some vaxes don't support this instruction.
+;;(define_insn "movti"
+;; [(set (match_operand:TI 0 "general_operand" "=g")
+;; (match_operand:TI 1 "general_operand" "g"))]
+;; ""
+;; "movh %1,%0")
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "general_operand" "=g,g")
+ (match_operand:DI 1 "general_operand" "I,g"))]
+ ""
+ "@
+ clrq %0
+ movq %1,%0")
+
+(define_insn "movsi"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:SI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return \"incl %0\";
+ if (GET_CODE (operands[1]) == SYMBOL_REF || GET_CODE (operands[1]) == CONST)
+ {
+ if (push_operand (operands[0], SImode))
+ return \"pushab %a1\";
+ return \"movab %a1,%0\";
+ }
+ /* this is slower than a movl, except when pushing an operand */
+ if (operands[1] == const0_rtx)
+ return \"clrl %0\";
+ if (GET_CODE (operands[1]) == CONST_INT
+ && (unsigned) INTVAL (operands[1]) >= 64)
+ {
+ int i = INTVAL (operands[1]);
+ if ((unsigned)(~i) < 64)
+ {
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, ~i);
+ return \"mcoml %1,%0\";
+ }
+ if ((unsigned)i < 127)
+ {
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, 63);
+ operands[2] = gen_rtx (CONST_INT, VOIDmode, i-63);
+ return \"addl3 %2,%1,%0\";
+ }
+ /* trading speed for space */
+ if ((unsigned)i < 0x100)
+ return \"movzbl %1,%0\";
+ if (i >= -0x80 && i < 0)
+ return \"cvtbl %1,%0\";
+ if ((unsigned)i < 0x10000)
+ return \"movzwl %1,%0\";
+ if (i >= -0x8000 && i < 0)
+ return \"cvtwl %1,%0\";
+ }
+ if (push_operand (operands[0], SImode))
+ return \"pushl %1\";
+ return \"movl %1,%0\";
+}")
+
+(define_insn "movhi"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (match_operand:HI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return \"incw %0\";
+ if (operands[1] == const0_rtx)
+ return \"clrw %0\";
+ if (GET_CODE (operands[1]) == CONST_INT
+ && (unsigned) INTVAL (operands[1]) >= 64)
+ {
+ int i = INTVAL (operands[1]);
+ if ((unsigned)((~i) & 0xffff) < 64)
+ {
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, (~i) & 0xffff);
+ return \"mcomw %1,%0\";
+ }
+ if ((unsigned)(i & 0xffff) < 127)
+ {
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, 63);
+ operands[2] = gen_rtx (CONST_INT, VOIDmode, (i-63) & 0xffff);
+ return \"addw3 %2,%1,%0\";
+ }
+ /* this is a lot slower, and only saves 1 measly byte! */
+ /* if ((unsigned)i < 0x100)
+ return \"movzbw %1,%0\"; */
+ /* if (i >= -0x80 && i < 0)
+ return \"cvtbw %1,%0\"; */
+ }
+ return \"movw %1,%0\";
+}")
+
+(define_insn "movqi"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (match_operand:QI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ if (operands[1] == const0_rtx)
+ return \"clrb %0\";
+ if (GET_CODE (operands[1]) == CONST_INT
+ && (unsigned) INTVAL (operands[1]) >= 64)
+ {
+ int i = INTVAL (operands[1]);
+ if ((unsigned)((~i) & 0xff) < 64)
+ {
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, (~i) & 0xff);
+ return \"mcomb %1,%0\";
+ }
+ }
+ return \"movb %1,%0\";
+}")
+
+;; The definition of this insn does not really explain what it does,
+;; but it should suffice
+;; that anything generated as this insn will be recognized as one
+;; and that it won't successfully combine with anything.
+(define_insn "movstrhi"
+ [(set (match_operand:BLK 0 "general_operand" "=g")
+ (match_operand:BLK 1 "general_operand" "g"))
+ (use (match_operand:HI 2 "general_operand" "g"))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))
+ (clobber (reg:SI 4))
+ (clobber (reg:SI 5))]
+ ""
+ "movc3 %2,%1,%0")
+\f
+;; Extension and truncation insns.
+
+(define_insn "truncsiqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (truncate:QI (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtlb %1,%0")
+
+(define_insn "truncsihi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (truncate:HI (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtlw %1,%0")
+
+(define_insn "trunchiqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (truncate:QI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwb %1,%0")
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwl %1,%0")
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbw %1,%0")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbl %1,%0")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float_extend:DF (match_operand:SF 1 "general_operand" "gF")))]
+ ""
+ "cvtf%# %1,%0")
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float_truncate:SF (match_operand:DF 1 "general_operand" "gF")))]
+ ""
+ "cvt%#f %1,%0")
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzwl %1,%0")
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzbw %1,%0")
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzbl %1,%0")
+\f
+;; Fix-to-float conversion insns.
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtlf %1,%0")
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtl%# %1,%0")
+
+(define_insn "floathisf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float:SF (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwf %1,%0")
+
+(define_insn "floathidf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float:DF (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtw%# %1,%0")
+
+(define_insn "floatqisf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float:SF (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbf %1,%0")
+
+(define_insn "floatqidf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float:DF (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtb%# %1,%0")
+\f
+;; Float-to-fix conversion insns.
+
+(define_insn "fix_truncsfqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (fix:QI (fix:SF (match_operand:SF 1 "general_operand" "gF"))))]
+ ""
+ "cvtfb %1,%0")
+
+(define_insn "fix_truncsfhi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (fix:HI (fix:SF (match_operand:SF 1 "general_operand" "gF"))))]
+ ""
+ "cvtfw %1,%0")
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (fix:SI (fix:SF (match_operand:SF 1 "general_operand" "gF"))))]
+ ""
+ "cvtfl %1,%0")
+
+(define_insn "fix_truncdfqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (fix:QI (fix:DF (match_operand:DF 1 "general_operand" "gF"))))]
+ ""
+ "cvt%#b %1,%0")
+
+(define_insn "fix_truncdfhi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (fix:HI (fix:DF (match_operand:DF 1 "general_operand" "gF"))))]
+ ""
+ "cvt%#w %1,%0")
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (fix:SI (fix:DF (match_operand:DF 1 "general_operand" "gF"))))]
+ ""
+ "cvt%#l %1,%0")
+\f
+;;- All kinds of add instructions.
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g,g")
+ (plus:DF (match_operand:DF 1 "general_operand" "0,gF,gF")
+ (match_operand:DF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ add%#2 %2,%0
+ add%#2 %1,%0
+ add%#3 %1,%2,%0")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g,g")
+ (plus:SF (match_operand:SF 1 "general_operand" "0,gF,gF")
+ (match_operand:SF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ addf2 %2,%0
+ addf2 %1,%0
+ addf3 %1,%2,%0")
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (plus:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return \"incl %0\";
+ if (operands[2] == constm1_rtx)
+ return \"decl %0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subl2 $%n2,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) INTVAL (operands[2]) >= 64
+ && GET_CODE (operands[1]) == REG)
+ return \"movab %c2(%1),%0\";
+ return \"addl2 %2,%0\";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return \"addl2 %1,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subl3 $%n2,%1,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) INTVAL (operands[2]) >= 64
+ && GET_CODE (operands[1]) == REG)
+ {
+ if (push_operand (operands[0], SImode))
+ return \"pushab %c2(%1)\";
+ return \"movab %c2(%1),%0\";
+ }
+ return \"addl3 %1,%2,%0\";
+}")
+
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (plus:HI (match_operand:HI 1 "general_operand" "g")
+ (match_operand:HI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return \"incw %0\";
+ if (operands[2] == constm1_rtx)
+ return \"decw %0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subw2 $%n2,%0\";
+ return \"addw2 %2,%0\";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return \"addw2 %1,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subw3 $%n2,%1,%0\";
+ return \"addw3 %1,%2,%0\";
+}")
+
+(define_insn "addqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (plus:QI (match_operand:QI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return \"incb %0\";
+ if (operands[2] == constm1_rtx)
+ return \"decb %0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subb2 $%n2,%0\";
+ return \"addb2 %2,%0\";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return \"addb2 %1,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subb3 $%n2,%1,%0\";
+ return \"addb3 %1,%2,%0\";
+}")
+
+;; The add-with-carry (adwc) instruction only accepts two operands.
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "general_operand" "=ro>,ro>")
+ (plus:DI (match_operand:DI 1 "general_operand" "%0,ro>")
+ (match_operand:DI 2 "general_operand" "Fro,F")))]
+ ""
+ "*
+{
+ rtx low[3];
+ char *pattern;
+ int carry = 1;
+
+ split_quadword_operands (operands, low, 3);
+ /* Add low parts. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (low[2] == const0_rtx)
+ /* Should examine operand, punt if not POST_INC. */
+ pattern = \"tstl %0\", carry = 0;
+ else if (low[2] == const1_rtx)
+ pattern = \"incl %0\";
+ else
+ pattern = \"addl2 %2,%0\";
+ }
+ else
+ {
+ if (low[2] == const0_rtx)
+ pattern = \"movl %1,%0\", carry = 0;
+ else
+ pattern = \"addl3 %2,%1,%0\";
+ }
+ if (pattern)
+ output_asm_insn (pattern, low);
+ if (!carry)
+ /* If CARRY is 0, we don't have any carry value to worry about. */
+ return OUT_FCN (CODE_FOR_addsi3) (operands, insn);
+ /* %0 = C + %1 + %2 */
+ if (!rtx_equal_p (operands[0], operands[1]))
+ output_asm_insn ((operands[1] == const0_rtx
+ ? \"clrl %0\"
+ : \"movl %1,%0\"), operands);
+ return \"adwc %2,%0\";
+}")
+\f
+;;- All kinds of subtract instructions.
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g")
+ (minus:DF (match_operand:DF 1 "general_operand" "0,gF")
+ (match_operand:DF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ sub%#2 %2,%0
+ sub%#3 %2,%1,%0")
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g")
+ (minus:SF (match_operand:SF 1 "general_operand" "0,gF")
+ (match_operand:SF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ subf2 %2,%0
+ subf3 %2,%1,%0")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (minus:SI (match_operand:SI 1 "general_operand" "0,g")
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ subl2 %2,%0
+ subl3 %2,%1,%0")
+
+(define_insn "subhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (minus:HI (match_operand:HI 1 "general_operand" "0,g")
+ (match_operand:HI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ subw2 %2,%0
+ subw3 %2,%1,%0")
+
+(define_insn "subqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (minus:QI (match_operand:QI 1 "general_operand" "0,g")
+ (match_operand:QI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ subb2 %2,%0
+ subb3 %2,%1,%0")
+
+;; The subtract-with-carry (sbwc) instruction only takes two operands.
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "general_operand" "=or>,or>")
+ (minus:DI (match_operand:DI 1 "general_operand" "0,or>")
+ (match_operand:DI 2 "general_operand" "For,F")))]
+ ""
+ "*
+{
+ rtx low[3];
+ char *pattern;
+ int carry = 1;
+
+ split_quadword_operands (operands, low, 3);
+ /* Subtract low parts. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (low[2] == const0_rtx)
+ pattern = 0, carry = 0;
+ else if (low[2] == constm1_rtx)
+ pattern = \"decl %0\";
+ else
+ pattern = \"subl2 %2,%0\";
+ }
+ else
+ {
+ if (low[2] == constm1_rtx)
+ pattern = \"decl %0\";
+ else if (low[2] == const0_rtx)
+ pattern = OUT_FCN (CODE_FOR_movsi) (low, insn), carry = 0;
+ else
+ pattern = \"subl3 %2,%1,%0\";
+ }
+ if (pattern)
+ output_asm_insn (pattern, low);
+ if (carry)
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ return \"movl %1,%0\;sbwc %2,%0\";
+ return \"sbwc %2,%0\";
+ /* %0 = %2 - %1 - C */
+ }
+ return OUT_FCN (CODE_FOR_subsi3) (operands, insn);
+}")
+\f
+;;- Multiply instructions.
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g,g")
+ (mult:DF (match_operand:DF 1 "general_operand" "0,gF,gF")
+ (match_operand:DF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ mul%#2 %2,%0
+ mul%#2 %1,%0
+ mul%#3 %1,%2,%0")
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g,g")
+ (mult:SF (match_operand:SF 1 "general_operand" "0,gF,gF")
+ (match_operand:SF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ mulf2 %2,%0
+ mulf2 %1,%0
+ mulf3 %1,%2,%0")
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g,g")
+ (mult:SI (match_operand:SI 1 "general_operand" "0,g,g")
+ (match_operand:SI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ mull2 %2,%0
+ mull2 %1,%0
+ mull3 %1,%2,%0")
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g,")
+ (mult:HI (match_operand:HI 1 "general_operand" "0,g,g")
+ (match_operand:HI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ mulw2 %2,%0
+ mulw2 %1,%0
+ mulw3 %1,%2,%0")
+
+(define_insn "mulqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g,g")
+ (mult:QI (match_operand:QI 1 "general_operand" "0,g,g")
+ (match_operand:QI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ mulb2 %2,%0
+ mulb2 %1,%0
+ mulb3 %1,%2,%0")
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "g"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "g"))))]
+ ""
+ "emul %1,%2,$0,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (plus:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "g"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "g")))
+ (sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "g"))))]
+ ""
+ "emul %1,%2,%3,%0")
+
+;; 'F' constraint means type CONST_DOUBLE
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (plus:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "g"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "g")))
+ (match_operand:DI 3 "immediate_operand" "F")))]
+ "GET_CODE (operands[3]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[3]) == (CONST_DOUBLE_LOW (operands[3]) >> 31)"
+ "*
+{
+ if (CONST_DOUBLE_HIGH (operands[3]))
+ operands[3] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (operands[3]));
+ return \"emul %1,%2,%3,%0\";
+}")
+\f
+;;- Divide instructions.
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g")
+ (div:DF (match_operand:DF 1 "general_operand" "0,gF")
+ (match_operand:DF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ div%#2 %2,%0
+ div%#3 %2,%1,%0")
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g")
+ (div:SF (match_operand:SF 1 "general_operand" "0,gF")
+ (match_operand:SF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ divf2 %2,%0
+ divf3 %2,%1,%0")
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (div:SI (match_operand:SI 1 "general_operand" "0,g")
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ divl2 %2,%0
+ divl3 %2,%1,%0")
+
+(define_insn "divhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (div:HI (match_operand:HI 1 "general_operand" "0,g")
+ (match_operand:HI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ divw2 %2,%0
+ divw3 %2,%1,%0")
+
+(define_insn "divqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (div:QI (match_operand:QI 1 "general_operand" "0,g")
+ (match_operand:QI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ divb2 %2,%0
+ divb3 %2,%1,%0")
+
+;This is left out because it is very slow;
+;we are better off programming around the "lack" of this insn.
+;(define_insn "divmoddisi4"
+; [(set (match_operand:SI 0 "general_operand" "=g")
+; (div:SI (match_operand:DI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g")))
+; (set (match_operand:SI 3 "general_operand" "=g")
+; (mod:SI (match_operand:DI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g")))]
+; ""
+; "ediv %2,%1,%0,%3")
+\f
+;; Bit-and on the vax is done with a clear-bits insn.
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (and:SI (not:SI (match_operand:SI 1 "general_operand" "g"))
+ (match_operand:SI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[1]));
+ else
+ operands[1] = expand_unop (SImode, one_cmpl_optab, operands[1], 0, 1);
+}")
+
+(define_expand "andhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (and:HI (not:HI (match_operand:HI 1 "general_operand" "g"))
+ (match_operand:HI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ rtx op = operands[1];
+ if (GET_CODE (op) == CONST_INT)
+ operands[1] = gen_rtx (CONST_INT, VOIDmode,
+ ((1 << 16) - 1) & ~INTVAL (op));
+ else
+ operands[1] = expand_unop (HImode, one_cmpl_optab, op, 0, 1);
+}")
+
+(define_expand "andqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (and:QI (not:QI (match_operand:QI 1 "general_operand" "g"))
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ rtx op = operands[1];
+ if (GET_CODE (op) == CONST_INT)
+ operands[1] = gen_rtx (CONST_INT, VOIDmode,
+ ((1 << 8) - 1) & ~INTVAL (op));
+ else
+ operands[1] = expand_unop (QImode, one_cmpl_optab, op, 0, 1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (and:SI (not:SI (match_operand:SI 1 "general_operand" "g,g"))
+ (match_operand:SI 2 "general_operand" "0,g")))]
+ ""
+ "@
+ bicl2 %1,%0
+ bicl3 %1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (and:HI (not:HI (match_operand:HI 1 "general_operand" "g,g"))
+ (match_operand:HI 2 "general_operand" "0,g")))]
+ ""
+ "@
+ bicw2 %1,%0
+ bicw3 %1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (and:QI (not:QI (match_operand:QI 1 "general_operand" "g,g"))
+ (match_operand:QI 2 "general_operand" "0,g")))]
+ ""
+ "@
+ bicb2 %1,%0
+ bicb3 %1,%2,%0")
+
+;; The following used to be needed because constant propagation can
+;; create them starting from the bic insn patterns above. This is no
+;; longer a problem. However, having these patterns allows optimization
+;; opportunities in combine.c.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (and:SI (match_operand:SI 1 "general_operand" "0,g")
+ (match_operand:SI 2 "const_int_operand" "n,n")))]
+ ""
+ "@
+ bicl2 %N2,%0
+ bicl3 %N2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (and:HI (match_operand:HI 1 "general_operand" "0,g")
+ (match_operand:HI 2 "const_int_operand" "n,n")))]
+ ""
+ "@
+ bicw2 %H2,%0
+ bicw3 %H2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (and:QI (match_operand:QI 1 "general_operand" "0,g")
+ (match_operand:QI 2 "const_int_operand" "n,n")))]
+ ""
+ "@
+ bicb2 %B2,%0
+ bicb3 %B2,%1,%0")
+\f
+;;- Bit set instructions.
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g,g")
+ (ior:SI (match_operand:SI 1 "general_operand" "0,g,g")
+ (match_operand:SI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ bisl2 %2,%0
+ bisl2 %1,%0
+ bisl3 %2,%1,%0")
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g,g")
+ (ior:HI (match_operand:HI 1 "general_operand" "0,g,g")
+ (match_operand:HI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ bisw2 %2,%0
+ bisw2 %1,%0
+ bisw3 %2,%1,%0")
+
+(define_insn "iorqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g,g")
+ (ior:QI (match_operand:QI 1 "general_operand" "0,g,g")
+ (match_operand:QI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ bisb2 %2,%0
+ bisb2 %1,%0
+ bisb3 %2,%1,%0")
+
+;;- xor instructions.
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g,g")
+ (xor:SI (match_operand:SI 1 "general_operand" "0,g,g")
+ (match_operand:SI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ xorl2 %2,%0
+ xorl2 %1,%0
+ xorl3 %2,%1,%0")
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g,g")
+ (xor:HI (match_operand:HI 1 "general_operand" "0,g,g")
+ (match_operand:HI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ xorw2 %2,%0
+ xorw2 %1,%0
+ xorw3 %2,%1,%0")
+
+(define_insn "xorqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g,g")
+ (xor:QI (match_operand:QI 1 "general_operand" "0,g,g")
+ (match_operand:QI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ xorb2 %2,%0
+ xorb2 %1,%0
+ xorb3 %2,%1,%0")
+\f
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (neg:DF (match_operand:DF 1 "general_operand" "gF")))]
+ ""
+ "mneg%# %1,%0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (neg:SF (match_operand:SF 1 "general_operand" "gF")))]
+ ""
+ "mnegf %1,%0")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (neg:SI (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "mnegl %1,%0")
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (neg:HI (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "mnegw %1,%0")
+
+(define_insn "negqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (neg:QI (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "mnegb %1,%0")
+\f
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (not:SI (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "mcoml %1,%0")
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (not:HI (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "mcomw %1,%0")
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (not:QI (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "mcomb %1,%0")
+\f
+;; Arithmetic right shift on the vax works by negating the shift count,
+;; then emitting a right shift with the shift count negated. This means
+;; that all actual shift counts in the RTL will be positive. This
+;; prevents converting shifts to ZERO_EXTRACTs with negative positions,
+;; which isn't valid.
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = gen_rtx (NEG, QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n")))]
+ ""
+ "ashl $%n2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "ashl %2,%1,%0")
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashift:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (operands[2] == const1_rtx && rtx_equal_p (operands[0], operands[1]))
+ return \"addl2 %0,%0\";
+ if (GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == CONST_INT)
+ {
+ int i = INTVAL (operands[2]);
+ if (i == 1)
+ return \"addl3 %1,%1,%0\";
+ if (i == 2)
+ return \"moval 0[%1],%0\";
+ if (i == 3)
+ return \"movad 0[%1],%0\";
+ }
+ return \"ashl %2,%1,%0\";
+}")
+
+;; Arithmetic right shift on the vax works by negating the shift count.
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ operands[2] = gen_rtx (NEG, QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn "ashldi3"
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (ashift:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "ashq %2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "ashq %2,%1,%0")
+
+;; Rotate right on the vax works by negating the shift count.
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = gen_rtx (NEG, QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotate:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "rotl %2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n")))]
+ ""
+ "rotl $%R2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "g")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "rotl %2,%1,%0")
+
+;This insn is probably slower than a multiply and an add.
+;(define_insn ""
+; [(set (match_operand:SI 0 "general_operand" "=g")
+; (mult:SI (plus:SI (match_operand:SI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g"))
+; (match_operand:SI 3 "general_operand" "g")))]
+; ""
+; "index %1,$0x80000000,$0x7fffffff,%3,%2,%0")
+\f
+;; Special cases of bit-field insns which we should
+;; recognize in preference to the general case.
+;; These handle aligned 8-bit and 16-bit fields,
+;; which can usually be done with move instructions.
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "+ro")
+ (match_operand:QI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "general_operand" "g"))]
+ "(INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
+ && INTVAL (operands[2]) % INTVAL (operands[1]) == 0
+ && (GET_CODE (operands[0]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[0], 0)))"
+ "*
+{
+ if (REG_P (operands[0]))
+ {
+ if (INTVAL (operands[2]) != 0)
+ return \"insv %3,%2,%1,%0\";
+ }
+ else
+ operands[0]
+ = adj_offsettable_operand (operands[0], INTVAL (operands[2]) / 8);
+
+ if (INTVAL (operands[1]) == 8)
+ return \"movb %3,%0\";
+ return \"movw %3,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=&g")
+ (zero_extract:SI (match_operand:SI 1 "general_operand" "ro")
+ (match_operand:QI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (GET_CODE (operands[1]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[1], 0)))"
+ "*
+{
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[3]) != 0)
+ return \"extzv %3,%2,%1,%0\";
+ }
+ else
+ operands[1]
+ = adj_offsettable_operand (operands[1], INTVAL (operands[3]) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return \"movzbl %1,%0\";
+ return \"movzwl %1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extract:SI (match_operand:SI 1 "general_operand" "ro")
+ (match_operand:QI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (GET_CODE (operands[1]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[1], 0)))"
+ "*
+{
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[3]) != 0)
+ return \"extv %3,%2,%1,%0\";
+ }
+ else
+ operands[1]
+ = adj_offsettable_operand (operands[1], INTVAL (operands[3]) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return \"cvtbl %1,%0\";
+ return \"cvtwl %1,%0\";
+}")
+\f
+;; Register-only SImode cases of bit-field insns.
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (sign_extract:SI (match_operand:SI 0 "nonmemory_operand" "r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpv %2,%1,%0,%3")
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (zero_extract:SI (match_operand:SI 0 "nonmemory_operand" "r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpzv %2,%1,%0,%3")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extract:SI (match_operand:SI 1 "nonmemory_operand" "r")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "extv %3,%2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extract:SI (match_operand:SI 1 "nonmemory_operand" "r")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "extzv %3,%2,%1,%0")
+
+;; Non-register cases.
+;; nonimmediate_operand is used to make sure that mode-ambiguous cases
+;; don't match these (and therefore match the cases above instead).
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (sign_extract:SI (match_operand:QI 0 "nonimmediate_operand" "rm")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpv %2,%1,%0,%3")
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (zero_extract:SI (match_operand:QI 0 "nonimmediate_operand" "rm")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpzv %2,%1,%0,%3")
+
+(define_insn "extv"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extract:SI (match_operand:QI 1 "nonimmediate_operand" "rm")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "extv %3,%2,%1,%0")
+
+(define_insn "extzv"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extract:SI (match_operand:QI 1 "nonimmediate_operand" "rm")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "extzv %3,%2,%1,%0")
+
+(define_insn "insv"
+ [(set (zero_extract:SI (match_operand:QI 0 "general_operand" "+g")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g"))]
+ ""
+ "insv %3,%2,%1,%0")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g"))]
+ ""
+ "insv %3,%2,%1,%0")
+\f
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jbr %l0")
+
+(define_insn "beq"
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jeql %l0")
+
+(define_insn "bne"
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jneq %l0")
+
+(define_insn "bgt"
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgtr %l0")
+
+(define_insn "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgtru %l0")
+
+(define_insn "blt"
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jlss %l0")
+
+(define_insn "bltu"
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jlssu %l0")
+
+(define_insn "bge"
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgeq %l0")
+
+(define_insn "bgeu"
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgequ %l0")
+
+(define_insn "ble"
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jleq %l0")
+
+(define_insn "bleu"
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jlequ %l0")
+
+;; Recognize reversed jumps.
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "j%C0 %l1") ; %C0 negates condition
+\f
+;; Recognize jbs, jlbs, jbc and jlbc instructions.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:QI 0 "nonimmediate_operand" "g,g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbs %0,%l2
+ jbs %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:QI 0 "nonimmediate_operand" "g,g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbc %0,%l2
+ jbc %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbs %0,%l2
+ jbs %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbc %0,%l2
+ jbc %1,%0,%l2")
+\f
+;; Subtract-and-jump and Add-and-jump insns.
+;; These are not used when output is for the Unix assembler
+;; because it does not know how to modify them to reach far.
+
+;; Normal sob insns.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (gt (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_UNIX_ASM"
+ "jsobgtr %0,%l1")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ge (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_UNIX_ASM"
+ "jsobgeq %0,%l1")
+
+;; Normal aob insns. Define a version for when operands[1] is a constant.
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (lt (plus:SI (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM"
+ "jaoblss %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (lt (match_operand:SI 0 "general_operand" "+g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM && GET_CODE (operands[1]) == CONST_INT"
+ "jaoblss %P1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (le (plus:SI (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM"
+ "jaobleq %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (le (match_operand:SI 0 "general_operand" "+g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM && GET_CODE (operands[1]) == CONST_INT"
+ "jaobleq %P1,%0,%l2")
+
+;; Something like a sob insn, but compares against -1.
+;; This finds `while (foo--)' which was changed to `while (--foo != -1)'.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (match_operand:SI 0 "general_operand" "g")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "decl %0\;jgequ %l1")
+\f
+;; Note that operand 1 is total size of args, in bytes,
+;; and what the call insn wants is the number of words.
+(define_insn "call_pop"
+ [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:QI 1 "general_operand" "g"))
+ (set (reg:SI 14) (plus:SI (reg:SI 14)
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%0\;addl2 %1,sp\";
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, (INTVAL (operands[1]) + 3)/ 4);
+ return \"calls %1,%0\";
+")
+
+(define_insn "call_value_pop"
+ [(set (match_operand 0 "" "=g")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:QI 2 "general_operand" "g")))
+ (set (reg:SI 14) (plus:SI (reg:SI 14)
+ (match_operand:SI 4 "immediate_operand" "i")))]
+ ""
+ "*
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%1\;addl2 %2,sp\";
+ operands[2] = gen_rtx (CONST_INT, VOIDmode, (INTVAL (operands[2]) + 3)/ 4);
+ return \"calls %2,%1\";
+")
+
+;; Define another set of these for the case of functions with no
+;; operands. In that case, combine may simplify the adjustment of sp.
+(define_insn ""
+ [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:QI 1 "general_operand" "g"))
+ (set (reg:SI 14) (reg:SI 14))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%0\;addl2 %1,sp\";
+ operands[1] = gen_rtx (CONST_INT, VOIDmode, (INTVAL (operands[1]) + 3)/ 4);
+ return \"calls %1,%0\";
+")
+
+(define_insn ""
+ [(set (match_operand 0 "" "=g")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:QI 2 "general_operand" "g")))
+ (set (reg:SI 14) (reg:SI 14))]
+ ""
+ "*
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%1\;addl2 %2,sp\";
+ operands[2] = gen_rtx (CONST_INT, VOIDmode, (INTVAL (operands[2]) + 3)/ 4);
+ return \"calls %2,%1\";
+")
+
+(define_insn "return"
+ [(return)]
+ ""
+ "ret")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "general_operand" "r"))]
+ "(GET_CODE (operands[0]) != MEM || offsettable_memref_p (operands[0]))"
+ "jmp (%0)")
+
+(define_insn "casesi"
+ [(set (pc)
+ (if_then_else (leu (minus:SI (match_operand:SI 0 "general_operand" "g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (match_operand:SI 2 "general_operand" "g"))
+ (plus:SI (sign_extend:SI
+ (mem:HI
+ (plus:SI (pc)
+ (mult:SI (minus:SI (match_dup 0)
+ (match_dup 1))
+ (const_int 2)))))
+ (label_ref:SI (match_operand 3 "" "")))
+ (pc)))]
+ ""
+ "casel %0,%1,%2")
+
+;; This used to arise from the preceding by simplification
+;; if operand 1 is zero. Perhaps it is no longer necessary.
+(define_insn ""
+ [(set (pc)
+ (if_then_else (leu (match_operand:SI 0 "general_operand" "g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (plus:SI (sign_extend:SI
+ (mem:HI
+ (plus:SI (pc)
+ (mult:SI (minus:SI (match_dup 0)
+ (const_int 0))
+ (const_int 2)))))
+ (label_ref:SI (match_operand 3 "" "")))
+ (pc)))]
+ ""
+ "casel %0,$0,%1")
+\f
+;;- load or push effective address
+;; These come after the move and add/sub patterns
+;; because we don't want pushl $1 turned into pushad 1.
+;; or addl3 r1,r2,r3 turned into movab 0(r1)[r2],r3.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=<,g")
+ (match_operand:QI 1 "address_operand" "p,p"))]
+ ""
+ "@
+ pushab %a1
+ movab %a1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=<,g")
+ (match_operand:HI 1 "address_operand" "p,p"))]
+ ""
+ "@
+ pushaw %a1
+ movaw %a1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=<,g")
+ (match_operand:SI 1 "address_operand" "p,p"))]
+ ""
+ "@
+ pushal %a1
+ moval %a1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=<,g")
+ (match_operand:DI 1 "address_operand" "p,p"))]
+ ""
+ "@
+ pushaq %a1
+ movaq %a1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=<,g")
+ (match_operand:SF 1 "address_operand" "p,p"))]
+ ""
+ "@
+ pushaf %a1
+ movaf %a1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=<,g")
+ (match_operand:DF 1 "address_operand" "p,p"))]
+ ""
+ "@
+ pushad %a1
+ movad %a1,%0")
+\f
+;; These used to be peepholes, but it is more straightforward to do them
+;; as single insns. However, we must force the output to be a register
+;; if it is not an offsettable address so that we know that we can assign
+;; to it twice.
+
+;; If we had a good way of evaluating the relative costs, these could be
+;; machine-independent.
+
+;; Optimize extzv ...,z; andl2 ...,z
+;; or ashl ...,z; andl2 ...,z
+;; with other operands constant. This is what the combiner converts the
+;; above sequences to before attempting to recognize the new insn.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (and:SI (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[3]) & ~((1 << (32 - INTVAL (operands[2]))) - 1)) == 0"
+ "*
+{
+ unsigned long mask1 = INTVAL (operands[3]);
+ unsigned long mask2 = (1 << (32 - INTVAL (operands[2]))) - 1;
+
+ if ((mask1 & mask2) != mask1)
+ operands[3] = gen_rtx (CONST_INT, VOIDmode, mask1 & mask2);
+
+ return \"rotl %R2,%1,%0\;bicl2 %N3,%0\";
+}")
+
+;; left-shift and mask
+;; The only case where `ashl' is better is if the mask only turns off
+;; bits that the ashl would anyways, in which case it should have been
+;; optimized away.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (and:SI (ashift:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ ""
+ "*
+{
+ operands[3] = gen_rtx (CONST_INT, VOIDmode,
+ INTVAL (operands[3]) & ~((1 << INTVAL (operands[2])) - 1));
+ return \"rotl %2,%1,%0\;bicl2 %N3,%0\";
+}")
+\f
+;;- Local variables:
+;;- mode:emacs-lisp
+;;- comment-start: ";;- "
+;;- eval: (set-syntax-table (copy-sequence (syntax-table)))
+;;- eval: (modify-syntax-entry ?[ "(]")
+;;- eval: (modify-syntax-entry ?] ")[")
+;;- eval: (modify-syntax-entry ?{ "(}")
+;;- eval: (modify-syntax-entry ?} "){")
+;;- End: