]> gcc.gnu.org Git - gcc.git/blob - gcc/config/sparc/sparc.md
8b235fb2e4931a2f932946a2c1dd8151a7e90d86
[gcc.git] / gcc / config / sparc / sparc.md
1 ;;- Machine description for SPARC chip for GNU C compiler
2 ;; Copyright (C) 1987, 88, 89, 92-96, 1997 Free Software Foundation, Inc.
3 ;; Contributed by Michael Tiemann (tiemann@cygnus.com)
4 ;; 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 ;; at Cygnus Support.
6
7 ;; This file is part of GNU CC.
8
9 ;; GNU CC is free software; you can redistribute it and/or modify
10 ;; it under the terms of the GNU General Public License as published by
11 ;; the Free Software Foundation; either version 2, or (at your option)
12 ;; any later version.
13
14 ;; GNU CC is distributed in the hope that it will be useful,
15 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 ;; GNU General Public License for more details.
18
19 ;; You should have received a copy of the GNU General Public License
20 ;; along with GNU CC; see the file COPYING. If not, write to
21 ;; the Free Software Foundation, 59 Temple Place - Suite 330,
22 ;; Boston, MA 02111-1307, USA.
23
24 ;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
25
26 ;; The upper 32 fp regs on the v9 can't hold SFmode values. To deal with this
27 ;; a second register class, EXTRA_FP_REGS, exists for the v9 chip. The name
28 ;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding
29 ;; constraint letter is 'e'. To avoid any confusion, 'e' is used instead of
30 ;; 'f' for all DF/TFmode values, including those that are specific to the v8.
31 ;;
32 ;; -mlive-g0 is *not* supported for TARGET_ARCH64, so we don't bother to
33 ;; test TARGET_LIVE_G0 if we have TARGET_ARCH64.
34
35 ;; Attribute for cpu type.
36 ;; These must match the values for enum processor_type in sparc.h.
37 (define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,sparclet,tsc701,v8plus,v9,ultrasparc"
38 (const (symbol_ref "sparc_cpu_attr")))
39
40 ;; Attribute for the instruction set.
41 ;; At present we only need to distinguish v9/!v9, but for clarity we
42 ;; test TARGET_V8 too.
43 (define_attr "isa" "v6,v8,v9,sparclet"
44 (const
45 (cond [(symbol_ref "TARGET_V9") (const_string "v9")
46 (symbol_ref "TARGET_V8") (const_string "v8")
47 (symbol_ref "TARGET_SPARCLET") (const_string "sparclet")]
48 (const_string "v6"))))
49
50 ;; Architecture size.
51 (define_attr "arch" "arch32bit,arch64bit"
52 (const
53 (cond [(symbol_ref "TARGET_ARCH64") (const_string "arch64bit")]
54 (const_string "arch32bit"))))
55
56 ;; Whether -mlive-g0 is in effect.
57 (define_attr "live_g0" "no,yes"
58 (const
59 (cond [(symbol_ref "TARGET_LIVE_G0") (const_string "yes")]
60 (const_string "no"))))
61
62 ;; Insn type. Used to default other attribute values.
63
64 ;; type "unary" insns have one input operand (1) and one output operand (0)
65 ;; type "binary" insns have two input operands (1,2) and one output (0)
66 ;; type "compare" insns have one or two input operands (0,1) and no output
67 ;; type "call_no_delay_slot" is a call followed by an unimp instruction.
68
69 (define_attr "type"
70 "move,unary,binary,compare,load,store,ialu,shift,uncond_branch,branch,call,call_no_delay_slot,address,imul,fpload,fpstore,fp,fpcmp,fpmul,fpdivs,fpdivd,fpsqrt,cmove,multi,misc"
71 (const_string "binary"))
72
73 ;; Set true if insn uses call-clobbered intermediate register.
74 (define_attr "use_clobbered" "false,true"
75 (if_then_else (and (eq_attr "type" "address")
76 (match_operand 0 "clobbered_register" ""))
77 (const_string "true")
78 (const_string "false")))
79
80 ;; Length (in # of insns).
81 (define_attr "length" ""
82 (cond [(eq_attr "type" "load,fpload")
83 (if_then_else (match_operand 1 "symbolic_memory_operand" "")
84 (const_int 2) (const_int 1))
85
86 (eq_attr "type" "store,fpstore")
87 (if_then_else (match_operand 0 "symbolic_memory_operand" "")
88 (const_int 2) (const_int 1))
89
90 (eq_attr "type" "address") (const_int 2)
91
92 (eq_attr "type" "binary")
93 (if_then_else (ior (match_operand 2 "arith_operand" "")
94 (match_operand 2 "arith_double_operand" ""))
95 (const_int 1) (const_int 3))
96
97 (eq_attr "type" "multi") (const_int 2)
98
99 (eq_attr "type" "move,unary")
100 (if_then_else (ior (match_operand 1 "arith_operand" "")
101 (match_operand 1 "arith_double_operand" ""))
102 (const_int 1) (const_int 2))]
103
104 (const_int 1)))
105
106 (define_asm_attributes
107 [(set_attr "length" "1")
108 (set_attr "type" "multi")])
109
110 ;; Attributes for instruction and branch scheduling
111
112 (define_attr "in_call_delay" "false,true"
113 (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,multi")
114 (const_string "false")
115 (eq_attr "type" "load,fpload,store,fpstore")
116 (if_then_else (eq_attr "length" "1")
117 (const_string "true")
118 (const_string "false"))
119 (eq_attr "type" "address")
120 (if_then_else (eq_attr "use_clobbered" "false")
121 (const_string "true")
122 (const_string "false"))]
123 (if_then_else (eq_attr "length" "1")
124 (const_string "true")
125 (const_string "false"))))
126
127 (define_delay (eq_attr "type" "call")
128 [(eq_attr "in_call_delay" "true") (nil) (nil)])
129
130 ;; ??? Should implement the notion of predelay slots for floating point
131 ;; branches. This would allow us to remove the nop always inserted before
132 ;; a floating point branch.
133
134 ;; ??? It is OK for fill_simple_delay_slots to put load/store instructions
135 ;; in a delay slot, but it is not OK for fill_eager_delay_slots to do so.
136 ;; This is because doing so will add several pipeline stalls to the path
137 ;; that the load/store did not come from. Unfortunately, there is no way
138 ;; to prevent fill_eager_delay_slots from using load/store without completely
139 ;; disabling them. For the SPEC benchmark set, this is a serious lose,
140 ;; because it prevents us from moving back the final store of inner loops.
141
142 (define_attr "in_branch_delay" "false,true"
143 (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
144 (eq_attr "length" "1"))
145 (const_string "true")
146 (const_string "false")))
147
148 (define_attr "in_uncond_branch_delay" "false,true"
149 (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
150 (eq_attr "length" "1"))
151 (const_string "true")
152 (const_string "false")))
153
154 (define_attr "in_annul_branch_delay" "false,true"
155 (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
156 (eq_attr "length" "1"))
157 (const_string "true")
158 (const_string "false")))
159
160 (define_delay (eq_attr "type" "branch")
161 [(eq_attr "in_branch_delay" "true")
162 (nil) (eq_attr "in_annul_branch_delay" "true")])
163
164 (define_delay (eq_attr "type" "uncond_branch")
165 [(eq_attr "in_uncond_branch_delay" "true")
166 (nil) (nil)])
167
168 ;; Function units of the SPARC
169
170 ;; (define_function_unit {name} {num-units} {n-users} {test}
171 ;; {ready-delay} {issue-delay} [{conflict-list}])
172
173 ;; The integer ALU.
174 ;; (Noted only for documentation; units that take one cycle do not need to
175 ;; be specified.)
176
177 ;; On the sparclite, integer multiply takes 1, 3, or 5 cycles depending on
178 ;; the inputs.
179
180 ;; (define_function_unit "alu" 1 0
181 ;; (eq_attr "type" "unary,binary,move,address") 1 0)
182
183 ;; ---- cypress CY7C602 scheduling:
184 ;; Memory with load-delay of 1 (i.e., 2 cycle load).
185 (define_function_unit "memory" 1 0
186 (and (eq_attr "type" "load,fpload") (eq_attr "cpu" "cypress")) 2 2)
187
188 ;; SPARC has two floating-point units: the FP ALU,
189 ;; and the FP MUL/DIV/SQRT unit.
190 ;; Instruction timings on the CY7C602 are as follows
191 ;; FABSs 4
192 ;; FADDs/d 5/5
193 ;; FCMPs/d 4/4
194 ;; FDIVs/d 23/37
195 ;; FMOVs 4
196 ;; FMULs/d 5/7
197 ;; FNEGs 4
198 ;; FSQRTs/d 34/63
199 ;; FSUBs/d 5/5
200 ;; FdTOi/s 5/5
201 ;; FsTOi/d 5/5
202 ;; FiTOs/d 9/5
203
204 ;; The CY7C602 can only support 2 fp isnsn simultaneously.
205 ;; More insns cause the chip to stall.
206
207 (define_function_unit "fp_alu" 1 0
208 (and (eq_attr "type" "fp") (eq_attr "cpu" "cypress")) 5 5)
209 (define_function_unit "fp_mds" 1 0
210 (and (eq_attr "type" "fpmul") (eq_attr "cpu" "cypress")) 7 7)
211 (define_function_unit "fp_mds" 1 0
212 (and (eq_attr "type" "fpdivs,fpdivd") (eq_attr "cpu" "cypress")) 37 37)
213 (define_function_unit "fp_mds" 1 0
214 (and (eq_attr "type" "fpsqrt") (eq_attr "cpu" "cypress")) 63 63)
215
216 ;; ----- The TMS390Z55 scheduling
217 ;; The Supersparc can issue 1 - 3 insns per cycle; here we assume
218 ;; three insns/cycle, and hence multiply all costs by three.
219 ;; Combinations up to two integer, one ld/st, one fp.
220 ;; Memory delivers its result in one cycle to IU, zero cycles to FP
221 (define_function_unit "memory" 1 0
222 (and (eq_attr "type" "load") (eq_attr "cpu" "supersparc")) 3 3)
223 (define_function_unit "memory" 1 0
224 (and (eq_attr "type" "fpload") (eq_attr "cpu" "supersparc")) 1 3)
225 ;; at least one in three instructions can be a mem opt.
226 (define_function_unit "memory" 1 0
227 (and (eq_attr "type" "store,fpstore") (eq_attr "cpu" "supersparc")) 1 3)
228 ;; at least one in three instructions can be a shift op.
229 (define_function_unit "shift" 1 0
230 (and (eq_attr "type" "shift") (eq_attr "cpu" "supersparc")) 1 3)
231
232 ;; There are only two write ports to the integer register file
233 ;; A store also uses a write port
234 (define_function_unit "iwport" 2 0
235 (and (eq_attr "type" "load,store,shift,ialu") (eq_attr "cpu" "supersparc")) 1 3)
236
237 ;; Timings; throughput/latency
238 ;; FADD 1/3 add/sub, format conv, compar, abs, neg
239 ;; FMUL 1/3
240 ;; FDIVs 4/6
241 ;; FDIVd 7/9
242 ;; FSQRTs 6/8
243 ;; FSQRTd 10/12
244 ;; IMUL 4/4
245
246 (define_function_unit "fp_alu" 1 0
247 (and (eq_attr "type" "fp,fpcmp") (eq_attr "cpu" "supersparc")) 9 3)
248 (define_function_unit "fp_mds" 1 0
249 (and (eq_attr "type" "fpmul") (eq_attr "cpu" "supersparc")) 9 3)
250 (define_function_unit "fp_mds" 1 0
251 (and (eq_attr "type" "fpdivs") (eq_attr "cpu" "supersparc")) 18 12)
252 (define_function_unit "fp_mds" 1 0
253 (and (eq_attr "type" "fpdivd") (eq_attr "cpu" "supersparc")) 27 21)
254 (define_function_unit "fp_mds" 1 0
255 (and (eq_attr "type" "fpsqrt") (eq_attr "cpu" "supersparc")) 36 30)
256 (define_function_unit "fp_mds" 1 0
257 (and (eq_attr "type" "imul") (eq_attr "cpu" "supersparc")) 12 12)
258
259 ;; ----- sparclet tsc701 scheduling
260 ;; The tsc701 issues 1 insn per cycle.
261 ;; Results may be written back out of order.
262
263 ;; Loads take 2 extra cycles to complete and 4 can be buffered at a time.
264 (define_function_unit "tsc701_load" 4 1
265 (and (eq_attr "type" "load") (eq_attr "cpu" "tsc701")) 3 1)
266 ;; Stores take 2(?) extra cycles to complete.
267 ;; It is desirable to not have any memory operation in the following 2 cycles.
268 ;; (??? or 2 memory ops in the case of std).
269 (define_function_unit "tsc701_store" 1 0
270 (and (eq_attr "type" "store") (eq_attr "cpu" "tsc701")) 3 3
271 [(eq_attr "type" "load,store")])
272 ;; The multiply unit has a latency of 5.
273 (define_function_unit "tsc701_mul" 1 0
274 (and (eq_attr "type" "imul") (eq_attr "cpu" "tsc701")) 5 5)
275
276 ;; ----- The UltraSPARC-1 scheduling
277 ;; The Ultrasparc can issue 1 - 4 insns per cycle; here we assume
278 ;; four insns/cycle, and hence multiply all costs by four.
279
280 ;; Memory delivers its result in three cycles to IU, three cycles to FP
281 (define_function_unit "memory" 1 0
282 (and (eq_attr "type" "load,fpload") (eq_attr "cpu" "ultrasparc")) 12 4)
283 (define_function_unit "memory" 1 0
284 (and (eq_attr "type" "store,fpstore") (eq_attr "cpu" "ultrasparc")) 4 4)
285 (define_function_unit "ieu" 1 0
286 (and (eq_attr "type" "ialu") (eq_attr "cpu" "ultrasparc")) 1 2)
287 (define_function_unit "ieu" 1 0
288 (and (eq_attr "type" "shift") (eq_attr "cpu" "ultrasparc")) 1 4)
289 (define_function_unit "ieu" 1 0
290 (and (eq_attr "type" "cmove") (eq_attr "cpu" "ultrasparc")) 8 4)
291
292 ;; Timings; throughput/latency
293 ;; ?? FADD 1/3 add/sub, format conv, compar, abs, neg
294 ;; ?? FMUL 1/3
295 ;; ?? FDIVs 1/12
296 ;; ?? FDIVd 1/22
297 ;; ?? FSQRTs 1/12
298 ;; ?? FSQRTd 1/22
299
300 (define_function_unit "fp" 1 0
301 (and (eq_attr "type" "fp") (eq_attr "cpu" "ultrasparc")) 12 2)
302 (define_function_unit "fp" 1 0
303 (and (eq_attr "type" "fpcmp") (eq_attr "cpu" "ultrasparc")) 8 2)
304 (define_function_unit "fp" 1 0
305 (and (eq_attr "type" "fpmul") (eq_attr "cpu" "ultrasparc")) 12 2)
306 (define_function_unit "fp" 1 0
307 (and (eq_attr "type" "fpdivs") (eq_attr "cpu" "ultrasparc")) 48 2)
308 (define_function_unit "fp" 1 0
309 (and (eq_attr "type" "fpdivd") (eq_attr "cpu" "ultrasparc")) 88 2)
310 (define_function_unit "fp" 1 0
311 (and (eq_attr "type" "fpsqrt") (eq_attr "cpu" "ultrasparc")) 48 2)
312 \f
313 ;; Compare instructions.
314 ;; This controls RTL generation and register allocation.
315
316 ;; We generate RTL for comparisons and branches by having the cmpxx
317 ;; patterns store away the operands. Then, the scc and bcc patterns
318 ;; emit RTL for both the compare and the branch.
319 ;;
320 ;; We do this because we want to generate different code for an sne and
321 ;; seq insn. In those cases, if the second operand of the compare is not
322 ;; const0_rtx, we want to compute the xor of the two operands and test
323 ;; it against zero.
324 ;;
325 ;; We start with the DEFINE_EXPANDs, then the DEFINE_INSNs to match
326 ;; the patterns. Finally, we have the DEFINE_SPLITs for some of the scc
327 ;; insns that actually require more than one machine instruction.
328
329 ;; Put cmpsi first among compare insns so it matches two CONST_INT operands.
330
331 (define_expand "cmpsi"
332 [(set (reg:CC 100)
333 (compare:CC (match_operand:SI 0 "register_operand" "")
334 (match_operand:SI 1 "arith_operand" "")))]
335 ""
336 "
337 {
338 sparc_compare_op0 = operands[0];
339 sparc_compare_op1 = operands[1];
340 DONE;
341 }")
342
343 (define_expand "cmpdi"
344 [(set (reg:CCX 100)
345 (compare:CCX (match_operand:DI 0 "register_operand" "")
346 (match_operand:DI 1 "arith_double_operand" "")))]
347 "TARGET_ARCH64"
348 "
349 {
350 sparc_compare_op0 = operands[0];
351 sparc_compare_op1 = operands[1];
352 DONE;
353 }")
354
355 (define_expand "cmpsf"
356 ;; The 96 here isn't ever used by anyone.
357 [(set (reg:CCFP 96)
358 (compare:CCFP (match_operand:SF 0 "register_operand" "")
359 (match_operand:SF 1 "register_operand" "")))]
360 "TARGET_FPU"
361 "
362 {
363 sparc_compare_op0 = operands[0];
364 sparc_compare_op1 = operands[1];
365 DONE;
366 }")
367
368 (define_expand "cmpdf"
369 ;; The 96 here isn't ever used by anyone.
370 [(set (reg:CCFP 96)
371 (compare:CCFP (match_operand:DF 0 "register_operand" "")
372 (match_operand:DF 1 "register_operand" "")))]
373 "TARGET_FPU"
374 "
375 {
376 sparc_compare_op0 = operands[0];
377 sparc_compare_op1 = operands[1];
378 DONE;
379 }")
380
381 (define_expand "cmptf"
382 ;; The 96 here isn't ever used by anyone.
383 [(set (reg:CCFP 96)
384 (compare:CCFP (match_operand:TF 0 "register_operand" "")
385 (match_operand:TF 1 "register_operand" "")))]
386 "TARGET_FPU"
387 "
388 {
389 sparc_compare_op0 = operands[0];
390 sparc_compare_op1 = operands[1];
391 DONE;
392 }")
393
394 ;; Now the compare DEFINE_INSNs.
395
396 (define_insn "*cmpsi_insn"
397 [(set (reg:CC 100)
398 (compare:CC (match_operand:SI 0 "register_operand" "r")
399 (match_operand:SI 1 "arith_operand" "rI")))]
400 ""
401 "cmp %0,%1"
402 [(set_attr "type" "compare")])
403
404 (define_insn "*cmpdi_sp64"
405 [(set (reg:CCX 100)
406 (compare:CCX (match_operand:DI 0 "register_operand" "r")
407 (match_operand:DI 1 "arith_double_operand" "rHI")))]
408 "TARGET_ARCH64"
409 "cmp %0,%1"
410 [(set_attr "type" "compare")])
411
412 (define_insn "*cmpsf_fpe"
413 [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
414 (compare:CCFPE (match_operand:SF 1 "register_operand" "f")
415 (match_operand:SF 2 "register_operand" "f")))]
416 "TARGET_FPU"
417 "*
418 {
419 if (TARGET_V9)
420 return \"fcmpes %0,%1,%2\";
421 return \"fcmpes %1,%2\";
422 }"
423 [(set_attr "type" "fpcmp")])
424
425 (define_insn "*cmpdf_fpe"
426 [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
427 (compare:CCFPE (match_operand:DF 1 "register_operand" "e")
428 (match_operand:DF 2 "register_operand" "e")))]
429 "TARGET_FPU"
430 "*
431 {
432 if (TARGET_V9)
433 return \"fcmped %0,%1,%2\";
434 return \"fcmped %1,%2\";
435 }"
436 [(set_attr "type" "fpcmp")])
437
438 (define_insn "*cmptf_fpe"
439 [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
440 (compare:CCFPE (match_operand:TF 1 "register_operand" "e")
441 (match_operand:TF 2 "register_operand" "e")))]
442 "TARGET_FPU && TARGET_HARD_QUAD"
443 "*
444 {
445 if (TARGET_V9)
446 return \"fcmpeq %0,%1,%2\";
447 return \"fcmpeq %1,%2\";
448 }"
449 [(set_attr "type" "fpcmp")])
450
451 (define_insn "*cmpsf_fp"
452 [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
453 (compare:CCFP (match_operand:SF 1 "register_operand" "f")
454 (match_operand:SF 2 "register_operand" "f")))]
455 "TARGET_FPU"
456 "*
457 {
458 if (TARGET_V9)
459 return \"fcmps %0,%1,%2\";
460 return \"fcmps %1,%2\";
461 }"
462 [(set_attr "type" "fpcmp")])
463
464 (define_insn "*cmpdf_fp"
465 [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
466 (compare:CCFP (match_operand:DF 1 "register_operand" "e")
467 (match_operand:DF 2 "register_operand" "e")))]
468 "TARGET_FPU"
469 "*
470 {
471 if (TARGET_V9)
472 return \"fcmpd %0,%1,%2\";
473 return \"fcmpd %1,%2\";
474 }"
475 [(set_attr "type" "fpcmp")])
476
477 (define_insn "*cmptf_fp"
478 [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
479 (compare:CCFP (match_operand:TF 1 "register_operand" "e")
480 (match_operand:TF 2 "register_operand" "e")))]
481 "TARGET_FPU && TARGET_HARD_QUAD"
482 "*
483 {
484 if (TARGET_V9)
485 return \"fcmpq %0,%1,%2\";
486 return \"fcmpq %1,%2\";
487 }"
488 [(set_attr "type" "fpcmp")])
489 \f
490 ;; Next come the scc insns. For seq, sne, sgeu, and sltu, we can do this
491 ;; without jumps using the addx/subx instructions. For seq/sne on v9 we use
492 ;; the same code as v8 (the addx/subx method has more applications). The
493 ;; exception to this is "reg != 0" which can be done in one instruction on v9
494 ;; (so we do it). For the rest, on v9 we use conditional moves; on v8, we do
495 ;; branches.
496
497 ;; Seq_special[_xxx] and sne_special[_xxx] clobber the CC reg, because they
498 ;; generate addcc/subcc instructions.
499
500 (define_expand "seqsi_special"
501 [(set (match_dup 3)
502 (xor:SI (match_operand:SI 1 "register_operand" "")
503 (match_operand:SI 2 "register_operand" "")))
504 (parallel [(set (match_operand:SI 0 "register_operand" "")
505 (eq:SI (match_dup 3) (const_int 0)))
506 (clobber (reg:CC 100))])]
507 "! TARGET_LIVE_G0"
508 "{ operands[3] = gen_reg_rtx (SImode); }")
509
510 (define_expand "seqdi_special"
511 [(set (match_dup 3)
512 (xor:DI (match_operand:DI 1 "register_operand" "")
513 (match_operand:DI 2 "register_operand" "")))
514 (set (match_operand:DI 0 "register_operand" "")
515 (eq:DI (match_dup 3) (const_int 0)))]
516 "TARGET_ARCH64"
517 "{ operands[3] = gen_reg_rtx (DImode); }")
518
519 (define_expand "snesi_special"
520 [(set (match_dup 3)
521 (xor:SI (match_operand:SI 1 "register_operand" "")
522 (match_operand:SI 2 "register_operand" "")))
523 (parallel [(set (match_operand:SI 0 "register_operand" "")
524 (ne:SI (match_dup 3) (const_int 0)))
525 (clobber (reg:CC 100))])]
526 "! TARGET_LIVE_G0"
527 "{ operands[3] = gen_reg_rtx (SImode); }")
528
529 (define_expand "snedi_special"
530 [(set (match_dup 3)
531 (xor:DI (match_operand:DI 1 "register_operand" "")
532 (match_operand:DI 2 "register_operand" "")))
533 (set (match_operand:DI 0 "register_operand" "")
534 (ne:DI (match_dup 3) (const_int 0)))]
535 "TARGET_ARCH64"
536 "{ operands[3] = gen_reg_rtx (DImode); }")
537
538 (define_expand "seqdi_special_trunc"
539 [(set (match_dup 3)
540 (xor:DI (match_operand:DI 1 "register_operand" "")
541 (match_operand:DI 2 "register_operand" "")))
542 (set (match_operand:SI 0 "register_operand" "")
543 (eq:DI (match_dup 3) (const_int 0)))]
544 "TARGET_ARCH64"
545 "{ operands[3] = gen_reg_rtx (DImode); }")
546
547 (define_expand "snedi_special_trunc"
548 [(set (match_dup 3)
549 (xor:DI (match_operand:DI 1 "register_operand" "")
550 (match_operand:DI 2 "register_operand" "")))
551 (set (match_operand:SI 0 "register_operand" "")
552 (ne:DI (match_dup 3) (const_int 0)))]
553 "TARGET_ARCH64"
554 "{ operands[3] = gen_reg_rtx (DImode); }")
555
556 (define_expand "seqsi_special_extend"
557 [(set (match_dup 3)
558 (xor:SI (match_operand:SI 1 "register_operand" "")
559 (match_operand:SI 2 "register_operand" "")))
560 (parallel [(set (match_operand:DI 0 "register_operand" "")
561 (eq:SI (match_dup 3) (const_int 0)))
562 (clobber (reg:CC 100))])]
563 "TARGET_ARCH64"
564 "{ operands[3] = gen_reg_rtx (SImode); }")
565
566 (define_expand "snesi_special_extend"
567 [(set (match_dup 3)
568 (xor:SI (match_operand:SI 1 "register_operand" "")
569 (match_operand:SI 2 "register_operand" "")))
570 (parallel [(set (match_operand:DI 0 "register_operand" "")
571 (ne:SI (match_dup 3) (const_int 0)))
572 (clobber (reg:CC 100))])]
573 "TARGET_ARCH64"
574 "{ operands[3] = gen_reg_rtx (SImode); }")
575
576 ;; ??? v9: Operand 0 needs a mode, so SImode was chosen.
577 ;; However, the code handles both SImode and DImode.
578 (define_expand "seq"
579 [(set (match_operand:SI 0 "intreg_operand" "")
580 (eq:SI (match_dup 1) (const_int 0)))]
581 "! TARGET_LIVE_G0"
582 "
583 {
584 if (GET_MODE (sparc_compare_op0) == SImode)
585 {
586 rtx pat;
587
588 if (GET_MODE (operands[0]) == SImode)
589 pat = gen_seqsi_special (operands[0], sparc_compare_op0,
590 sparc_compare_op1);
591 else if (! TARGET_ARCH64)
592 FAIL;
593 else
594 pat = gen_seqsi_special_extend (operands[0], sparc_compare_op0,
595 sparc_compare_op1);
596 emit_insn (pat);
597 DONE;
598 }
599 else if (GET_MODE (sparc_compare_op0) == DImode)
600 {
601 rtx pat;
602
603 if (! TARGET_ARCH64)
604 FAIL;
605 else if (GET_MODE (operands[0]) == SImode)
606 pat = gen_seqdi_special_trunc (operands[0], sparc_compare_op0,
607 sparc_compare_op1);
608 else
609 pat = gen_seqdi_special (operands[0], sparc_compare_op0,
610 sparc_compare_op1);
611 emit_insn (pat);
612 DONE;
613 }
614 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
615 {
616 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
617 emit_insn (gen_sne (operands[0]));
618 DONE;
619 }
620 else if (TARGET_V9)
621 {
622 if (gen_v9_scc (EQ, operands))
623 DONE;
624 /* fall through */
625 }
626 operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
627 }")
628
629 ;; ??? v9: Operand 0 needs a mode, so SImode was chosen.
630 ;; However, the code handles both SImode and DImode.
631 (define_expand "sne"
632 [(set (match_operand:SI 0 "intreg_operand" "")
633 (ne:SI (match_dup 1) (const_int 0)))]
634 "! TARGET_LIVE_G0"
635 "
636 {
637 if (GET_MODE (sparc_compare_op0) == SImode)
638 {
639 rtx pat;
640
641 if (GET_MODE (operands[0]) == SImode)
642 pat = gen_snesi_special (operands[0], sparc_compare_op0,
643 sparc_compare_op1);
644 else if (! TARGET_ARCH64)
645 FAIL;
646 else
647 pat = gen_snesi_special_extend (operands[0], sparc_compare_op0,
648 sparc_compare_op1);
649 emit_insn (pat);
650 DONE;
651 }
652 else if (GET_MODE (sparc_compare_op0) == DImode)
653 {
654 rtx pat;
655
656 if (! TARGET_ARCH64)
657 FAIL;
658 else if (GET_MODE (operands[0]) == SImode)
659 pat = gen_snedi_special_trunc (operands[0], sparc_compare_op0,
660 sparc_compare_op1);
661 else
662 pat = gen_snedi_special (operands[0], sparc_compare_op0,
663 sparc_compare_op1);
664 emit_insn (pat);
665 DONE;
666 }
667 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
668 {
669 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
670 emit_insn (gen_sne (operands[0]));
671 DONE;
672 }
673 else if (TARGET_V9)
674 {
675 if (gen_v9_scc (NE, operands))
676 DONE;
677 /* fall through */
678 }
679 operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
680 }")
681
682 (define_expand "sgt"
683 [(set (match_operand:SI 0 "intreg_operand" "")
684 (gt:SI (match_dup 1) (const_int 0)))]
685 "! TARGET_LIVE_G0"
686 "
687 {
688 if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
689 {
690 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
691 emit_insn (gen_sne (operands[0]));
692 DONE;
693 }
694 else if (TARGET_V9)
695 {
696 if (gen_v9_scc (GT, operands))
697 DONE;
698 /* fall through */
699 }
700 operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
701 }")
702
703 (define_expand "slt"
704 [(set (match_operand:SI 0 "intreg_operand" "")
705 (lt:SI (match_dup 1) (const_int 0)))]
706 "! TARGET_LIVE_G0"
707 "
708 {
709 if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
710 {
711 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
712 emit_insn (gen_sne (operands[0]));
713 DONE;
714 }
715 else if (TARGET_V9)
716 {
717 if (gen_v9_scc (LT, operands))
718 DONE;
719 /* fall through */
720 }
721 operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
722 }")
723
724 (define_expand "sge"
725 [(set (match_operand:SI 0 "intreg_operand" "")
726 (ge:SI (match_dup 1) (const_int 0)))]
727 "! TARGET_LIVE_G0"
728 "
729 {
730 if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
731 {
732 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
733 emit_insn (gen_sne (operands[0]));
734 DONE;
735 }
736 else if (TARGET_V9)
737 {
738 if (gen_v9_scc (GE, operands))
739 DONE;
740 /* fall through */
741 }
742 operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
743 }")
744
745 (define_expand "sle"
746 [(set (match_operand:SI 0 "intreg_operand" "")
747 (le:SI (match_dup 1) (const_int 0)))]
748 "! TARGET_LIVE_G0"
749 "
750 {
751 if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
752 {
753 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
754 emit_insn (gen_sne (operands[0]));
755 DONE;
756 }
757 else if (TARGET_V9)
758 {
759 if (gen_v9_scc (LE, operands))
760 DONE;
761 /* fall through */
762 }
763 operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
764 }")
765
766 (define_expand "sgtu"
767 [(set (match_operand:SI 0 "intreg_operand" "")
768 (gtu:SI (match_dup 1) (const_int 0)))]
769 "! TARGET_LIVE_G0"
770 "
771 {
772 if (! TARGET_V9)
773 {
774 rtx tem;
775
776 /* We can do ltu easily, so if both operands are registers, swap them and
777 do a LTU. */
778 if ((GET_CODE (sparc_compare_op0) == REG
779 || GET_CODE (sparc_compare_op0) == SUBREG)
780 && (GET_CODE (sparc_compare_op1) == REG
781 || GET_CODE (sparc_compare_op1) == SUBREG))
782 {
783 tem = sparc_compare_op0;
784 sparc_compare_op0 = sparc_compare_op1;
785 sparc_compare_op1 = tem;
786 emit_insn (gen_sltu (operands[0]));
787 DONE;
788 }
789 }
790 else
791 {
792 if (gen_v9_scc (GTU, operands))
793 DONE;
794 }
795 operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
796 }")
797
798 (define_expand "sltu"
799 [(set (match_operand:SI 0 "intreg_operand" "")
800 (ltu:SI (match_dup 1) (const_int 0)))]
801 "! TARGET_LIVE_G0"
802 "
803 {
804 if (TARGET_V9)
805 {
806 if (gen_v9_scc (LTU, operands))
807 DONE;
808 }
809 operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
810 }")
811
812 (define_expand "sgeu"
813 [(set (match_operand:SI 0 "intreg_operand" "")
814 (geu:SI (match_dup 1) (const_int 0)))]
815 "! TARGET_LIVE_G0"
816 "
817 {
818 if (TARGET_V9)
819 {
820 if (gen_v9_scc (GEU, operands))
821 DONE;
822 }
823 operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
824 }")
825
826 (define_expand "sleu"
827 [(set (match_operand:SI 0 "intreg_operand" "")
828 (leu:SI (match_dup 1) (const_int 0)))]
829 "! TARGET_LIVE_G0"
830 "
831 {
832 if (! TARGET_V9)
833 {
834 rtx tem;
835
836 /* We can do geu easily, so if both operands are registers, swap them and
837 do a GEU. */
838 if ((GET_CODE (sparc_compare_op0) == REG
839 || GET_CODE (sparc_compare_op0) == SUBREG)
840 && (GET_CODE (sparc_compare_op1) == REG
841 || GET_CODE (sparc_compare_op1) == SUBREG))
842 {
843 tem = sparc_compare_op0;
844 sparc_compare_op0 = sparc_compare_op1;
845 sparc_compare_op1 = tem;
846 emit_insn (gen_sgeu (operands[0]));
847 DONE;
848 }
849 }
850 else
851 {
852 if (gen_v9_scc (LEU, operands))
853 DONE;
854 }
855 operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
856 }")
857
858 ;; Now the DEFINE_INSNs for the scc cases.
859
860 ;; The SEQ and SNE patterns are special because they can be done
861 ;; without any branching and do not involve a COMPARE.
862
863 (define_insn "*snesi_zero"
864 [(set (match_operand:SI 0 "register_operand" "=r")
865 (ne:SI (match_operand:SI 1 "register_operand" "r")
866 (const_int 0)))
867 (clobber (reg:CC 100))]
868 "! TARGET_LIVE_G0"
869 "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
870 [(set_attr "type" "unary")
871 (set_attr "length" "2")])
872
873 (define_insn "*neg_snesi_zero"
874 [(set (match_operand:SI 0 "register_operand" "=r")
875 (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
876 (const_int 0))))
877 (clobber (reg:CC 100))]
878 "! TARGET_LIVE_G0"
879 "subcc %%g0,%1,%%g0\;subx %%g0,0,%0"
880 [(set_attr "type" "unary")
881 (set_attr "length" "2")])
882
883 (define_insn "*snesi_zero_extend"
884 [(set (match_operand:DI 0 "register_operand" "=r")
885 (ne:SI (match_operand:SI 1 "register_operand" "r")
886 (const_int 0)))
887 (clobber (reg:CC 100))]
888 "TARGET_ARCH64"
889 "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
890 [(set_attr "type" "unary")
891 (set_attr "length" "2")])
892
893 (define_insn "*snedi_zero"
894 [(set (match_operand:DI 0 "register_operand" "=&r")
895 (ne:DI (match_operand:DI 1 "register_operand" "r")
896 (const_int 0)))]
897 "TARGET_ARCH64"
898 "mov 0,%0\;movrnz %1,1,%0"
899 [(set_attr "type" "unary")
900 (set_attr "length" "2")])
901
902 (define_insn "*neg_snedi_zero"
903 [(set (match_operand:DI 0 "register_operand" "=&r")
904 (neg:DI (ne:DI (match_operand:DI 1 "register_operand" "r")
905 (const_int 0))))]
906 "TARGET_ARCH64"
907 "mov 0,%0\;movrnz %1,-1,%0"
908 [(set_attr "type" "unary")
909 (set_attr "length" "2")])
910
911 (define_insn "*snedi_zero_trunc"
912 [(set (match_operand:SI 0 "register_operand" "=&r")
913 (ne:DI (match_operand:DI 1 "register_operand" "r")
914 (const_int 0)))]
915 "TARGET_ARCH64"
916 "mov 0,%0\;movrnz %1,1,%0"
917 [(set_attr "type" "unary")
918 (set_attr "length" "2")])
919
920 (define_insn "*seqsi_zero"
921 [(set (match_operand:SI 0 "register_operand" "=r")
922 (eq:SI (match_operand:SI 1 "register_operand" "r")
923 (const_int 0)))
924 (clobber (reg:CC 100))]
925 "! TARGET_LIVE_G0"
926 "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
927 [(set_attr "type" "unary")
928 (set_attr "length" "2")])
929
930 (define_insn "*neg_seqsi_zero"
931 [(set (match_operand:SI 0 "register_operand" "=r")
932 (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
933 (const_int 0))))
934 (clobber (reg:CC 100))]
935 "! TARGET_LIVE_G0"
936 "subcc %%g0,%1,%%g0\;addx %%g0,-1,%0"
937 [(set_attr "type" "unary")
938 (set_attr "length" "2")])
939
940 (define_insn "*seqsi_zero_extend"
941 [(set (match_operand:DI 0 "register_operand" "=r")
942 (eq:SI (match_operand:SI 1 "register_operand" "r")
943 (const_int 0)))
944 (clobber (reg:CC 100))]
945 "TARGET_ARCH64"
946 "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
947 [(set_attr "type" "unary")
948 (set_attr "length" "2")])
949
950 (define_insn "*seqdi_zero"
951 [(set (match_operand:DI 0 "register_operand" "=&r")
952 (eq:DI (match_operand:DI 1 "register_operand" "r")
953 (const_int 0)))]
954 "TARGET_ARCH64"
955 "mov 0,%0\;movrz %1,1,%0"
956 [(set_attr "type" "unary")
957 (set_attr "length" "2")])
958
959 (define_insn "*neg_seqdi_zero"
960 [(set (match_operand:DI 0 "register_operand" "=&r")
961 (neg:DI (eq:DI (match_operand:DI 1 "register_operand" "r")
962 (const_int 0))))]
963 "TARGET_ARCH64"
964 "mov 0,%0\;movrz %1,-1,%0"
965 [(set_attr "type" "unary")
966 (set_attr "length" "2")])
967
968 (define_insn "*seqdi_zero_trunc"
969 [(set (match_operand:SI 0 "register_operand" "=&r")
970 (eq:DI (match_operand:DI 1 "register_operand" "r")
971 (const_int 0)))]
972 "TARGET_ARCH64"
973 "mov 0,%0\;movrz %1,1,%0"
974 [(set_attr "type" "unary")
975 (set_attr "length" "2")])
976
977 ;; We can also do (x + (i == 0)) and related, so put them in.
978 ;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
979 ;; versions for v9.
980
981 (define_insn "*x_plus_i_ne_0"
982 [(set (match_operand:SI 0 "register_operand" "=r")
983 (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
984 (const_int 0))
985 (match_operand:SI 2 "register_operand" "r")))
986 (clobber (reg:CC 100))]
987 "! TARGET_LIVE_G0"
988 "subcc %%g0,%1,%%g0\;addx %2,0,%0"
989 [(set_attr "length" "2")])
990
991 (define_insn "*x_minus_i_ne_0"
992 [(set (match_operand:SI 0 "register_operand" "=r")
993 (minus:SI (match_operand:SI 2 "register_operand" "r")
994 (ne:SI (match_operand:SI 1 "register_operand" "r")
995 (const_int 0))))
996 (clobber (reg:CC 100))]
997 "! TARGET_LIVE_G0"
998 "subcc %%g0,%1,%%g0\;subx %2,0,%0"
999 [(set_attr "length" "2")])
1000
1001 (define_insn "*x_plus_i_eq_0"
1002 [(set (match_operand:SI 0 "register_operand" "=r")
1003 (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
1004 (const_int 0))
1005 (match_operand:SI 2 "register_operand" "r")))
1006 (clobber (reg:CC 100))]
1007 "! TARGET_LIVE_G0"
1008 "subcc %%g0,%1,%%g0\;subx %2,-1,%0"
1009 [(set_attr "length" "2")])
1010
1011 (define_insn "*x_minus_i_eq_0"
1012 [(set (match_operand:SI 0 "register_operand" "=r")
1013 (minus:SI (match_operand:SI 2 "register_operand" "r")
1014 (eq:SI (match_operand:SI 1 "register_operand" "r")
1015 (const_int 0))))
1016 (clobber (reg:CC 100))]
1017 "! TARGET_LIVE_G0"
1018 "subcc %%g0,%1,%%g0\;addx %2,-1,%0"
1019 [(set_attr "length" "2")])
1020
1021 ;; We can also do GEU and LTU directly, but these operate after a compare.
1022 ;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
1023 ;; versions for v9.
1024
1025 (define_insn "*sltu_insn"
1026 [(set (match_operand:SI 0 "register_operand" "=r")
1027 (ltu:SI (reg:CC 100) (const_int 0)))]
1028 "! TARGET_LIVE_G0"
1029 "addx %%g0,0,%0"
1030 [(set_attr "type" "misc")])
1031
1032 (define_insn "*neg_sltu_insn"
1033 [(set (match_operand:SI 0 "register_operand" "=r")
1034 (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
1035 "! TARGET_LIVE_G0"
1036 "subx %%g0,0,%0"
1037 [(set_attr "type" "misc")])
1038
1039 ;; ??? Combine should canonicalize these next two to the same pattern.
1040 (define_insn "*neg_sltu_minus_x"
1041 [(set (match_operand:SI 0 "register_operand" "=r")
1042 (minus:SI (neg:SI (ltu:SI (reg:CC 100) (const_int 0)))
1043 (match_operand:SI 1 "arith_operand" "rI")))]
1044 "! TARGET_LIVE_G0"
1045 "subx %%g0,%1,%0"
1046 [(set_attr "type" "unary")])
1047
1048 (define_insn "*neg_sltu_plus_x"
1049 [(set (match_operand:SI 0 "register_operand" "=r")
1050 (neg:SI (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
1051 (match_operand:SI 1 "arith_operand" "rI"))))]
1052 "! TARGET_LIVE_G0"
1053 "subx %%g0,%1,%0"
1054 [(set_attr "type" "unary")])
1055
1056 (define_insn "*sgeu_insn"
1057 [(set (match_operand:SI 0 "register_operand" "=r")
1058 (geu:SI (reg:CC 100) (const_int 0)))]
1059 "! TARGET_LIVE_G0"
1060 "subx %%g0,-1,%0"
1061 [(set_attr "type" "misc")])
1062
1063 (define_insn "*neg_sgeu_insn"
1064 [(set (match_operand:SI 0 "register_operand" "=r")
1065 (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
1066 "! TARGET_LIVE_G0"
1067 "addx %%g0,-1,%0"
1068 [(set_attr "type" "misc")])
1069
1070 ;; We can also do (x + ((unsigned) i >= 0)) and related, so put them in.
1071 ;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
1072 ;; versions for v9.
1073
1074 (define_insn "*sltu_plus_x"
1075 [(set (match_operand:SI 0 "register_operand" "=r")
1076 (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
1077 (match_operand:SI 1 "arith_operand" "rI")))]
1078 "! TARGET_LIVE_G0"
1079 "addx %%g0,%1,%0"
1080 [(set_attr "type" "unary")])
1081
1082 (define_insn "*sltu_plus_x_plus_y"
1083 [(set (match_operand:SI 0 "register_operand" "=r")
1084 (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
1085 (plus:SI (match_operand:SI 1 "arith_operand" "%r")
1086 (match_operand:SI 2 "arith_operand" "rI"))))]
1087 ""
1088 "addx %1,%2,%0")
1089
1090 (define_insn "*x_minus_sltu"
1091 [(set (match_operand:SI 0 "register_operand" "=r")
1092 (minus:SI (match_operand:SI 1 "register_operand" "r")
1093 (ltu:SI (reg:CC 100) (const_int 0))))]
1094 ""
1095 "subx %1,0,%0"
1096 [(set_attr "type" "unary")])
1097
1098 ;; ??? Combine should canonicalize these next two to the same pattern.
1099 (define_insn "*x_minus_y_minus_sltu"
1100 [(set (match_operand:SI 0 "register_operand" "=r")
1101 (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
1102 (match_operand:SI 2 "arith_operand" "rI"))
1103 (ltu:SI (reg:CC 100) (const_int 0))))]
1104 ""
1105 "subx %1,%2,%0")
1106
1107 (define_insn "*x_minus_sltu_plus_y"
1108 [(set (match_operand:SI 0 "register_operand" "=r")
1109 (minus:SI (match_operand:SI 1 "register_operand" "r")
1110 (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
1111 (match_operand:SI 2 "arith_operand" "rI"))))]
1112 ""
1113 "subx %1,%2,%0")
1114
1115 (define_insn "*sgeu_plus_x"
1116 [(set (match_operand:SI 0 "register_operand" "=r")
1117 (plus:SI (geu:SI (reg:CC 100) (const_int 0))
1118 (match_operand:SI 1 "register_operand" "r")))]
1119 ""
1120 "subx %1,-1,%0"
1121 [(set_attr "type" "unary")])
1122
1123 (define_insn "*x_minus_sgeu"
1124 [(set (match_operand:SI 0 "register_operand" "=r")
1125 (minus:SI (match_operand:SI 1 "register_operand" "r")
1126 (geu:SI (reg:CC 100) (const_int 0))))]
1127 ""
1128 "addx %1,-1,%0"
1129 [(set_attr "type" "unary")])
1130
1131 ;; Now we have the generic scc insns.
1132 ;; !v9: These will be done using a jump.
1133 ;; v9: Use conditional moves which are defined elsewhere.
1134 ;; We have to exclude the cases above, since we will not want combine to
1135 ;; turn something that does not require a jump into something that does.
1136
1137 (define_insn "*scc_si"
1138 [(set (match_operand:SI 0 "register_operand" "=r")
1139 (match_operator:SI 2 "noov_compare_op"
1140 [(match_operand 1 "icc_or_fcc_reg_operand" "")
1141 (const_int 0)]))]
1142 ""
1143 "* return output_scc_insn (operands, insn); "
1144 [(set_attr "type" "multi")
1145 (set_attr "length" "3")])
1146
1147 (define_insn "*scc_di"
1148 [(set (match_operand:DI 0 "register_operand" "=r")
1149 (match_operator:DI 2 "noov_compare_op"
1150 [(match_operand 1 "icc_or_fcc_reg_operand" "")
1151 (const_int 0)]))]
1152 "TARGET_ARCH64"
1153 "* return output_scc_insn (operands, insn); "
1154 [(set_attr "type" "multi")
1155 (set_attr "length" "3")])
1156 \f
1157 ;; These control RTL generation for conditional jump insns
1158
1159 ;; The quad-word fp compare library routines all return nonzero to indicate
1160 ;; true, which is different from the equivalent libgcc routines, so we must
1161 ;; handle them specially here.
1162
1163 (define_expand "beq"
1164 [(set (pc)
1165 (if_then_else (eq (match_dup 1) (const_int 0))
1166 (label_ref (match_operand 0 "" ""))
1167 (pc)))]
1168 ""
1169 "
1170 {
1171 if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
1172 && GET_CODE (sparc_compare_op0) == REG
1173 && GET_MODE (sparc_compare_op0) == DImode)
1174 {
1175 emit_v9_brxx_insn (EQ, sparc_compare_op0, operands[0]);
1176 DONE;
1177 }
1178 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
1179 {
1180 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
1181 emit_jump_insn (gen_bne (operands[0]));
1182 DONE;
1183 }
1184 operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
1185 }")
1186
1187 (define_expand "bne"
1188 [(set (pc)
1189 (if_then_else (ne (match_dup 1) (const_int 0))
1190 (label_ref (match_operand 0 "" ""))
1191 (pc)))]
1192 ""
1193 "
1194 {
1195 if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
1196 && GET_CODE (sparc_compare_op0) == REG
1197 && GET_MODE (sparc_compare_op0) == DImode)
1198 {
1199 emit_v9_brxx_insn (NE, sparc_compare_op0, operands[0]);
1200 DONE;
1201 }
1202 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
1203 {
1204 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
1205 emit_jump_insn (gen_bne (operands[0]));
1206 DONE;
1207 }
1208 operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
1209 }")
1210
1211 (define_expand "bgt"
1212 [(set (pc)
1213 (if_then_else (gt (match_dup 1) (const_int 0))
1214 (label_ref (match_operand 0 "" ""))
1215 (pc)))]
1216 ""
1217 "
1218 {
1219 if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
1220 && GET_CODE (sparc_compare_op0) == REG
1221 && GET_MODE (sparc_compare_op0) == DImode)
1222 {
1223 emit_v9_brxx_insn (GT, sparc_compare_op0, operands[0]);
1224 DONE;
1225 }
1226 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
1227 {
1228 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
1229 emit_jump_insn (gen_bne (operands[0]));
1230 DONE;
1231 }
1232 operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
1233 }")
1234
1235 (define_expand "bgtu"
1236 [(set (pc)
1237 (if_then_else (gtu (match_dup 1) (const_int 0))
1238 (label_ref (match_operand 0 "" ""))
1239 (pc)))]
1240 ""
1241 "
1242 { operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
1243 }")
1244
1245 (define_expand "blt"
1246 [(set (pc)
1247 (if_then_else (lt (match_dup 1) (const_int 0))
1248 (label_ref (match_operand 0 "" ""))
1249 (pc)))]
1250 ""
1251 "
1252 {
1253 if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
1254 && GET_CODE (sparc_compare_op0) == REG
1255 && GET_MODE (sparc_compare_op0) == DImode)
1256 {
1257 emit_v9_brxx_insn (LT, sparc_compare_op0, operands[0]);
1258 DONE;
1259 }
1260 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
1261 {
1262 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
1263 emit_jump_insn (gen_bne (operands[0]));
1264 DONE;
1265 }
1266 operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
1267 }")
1268
1269 (define_expand "bltu"
1270 [(set (pc)
1271 (if_then_else (ltu (match_dup 1) (const_int 0))
1272 (label_ref (match_operand 0 "" ""))
1273 (pc)))]
1274 ""
1275 "
1276 { operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
1277 }")
1278
1279 (define_expand "bge"
1280 [(set (pc)
1281 (if_then_else (ge (match_dup 1) (const_int 0))
1282 (label_ref (match_operand 0 "" ""))
1283 (pc)))]
1284 ""
1285 "
1286 {
1287 if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
1288 && GET_CODE (sparc_compare_op0) == REG
1289 && GET_MODE (sparc_compare_op0) == DImode)
1290 {
1291 emit_v9_brxx_insn (GE, sparc_compare_op0, operands[0]);
1292 DONE;
1293 }
1294 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
1295 {
1296 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
1297 emit_jump_insn (gen_bne (operands[0]));
1298 DONE;
1299 }
1300 operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
1301 }")
1302
1303 (define_expand "bgeu"
1304 [(set (pc)
1305 (if_then_else (geu (match_dup 1) (const_int 0))
1306 (label_ref (match_operand 0 "" ""))
1307 (pc)))]
1308 ""
1309 "
1310 { operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
1311 }")
1312
1313 (define_expand "ble"
1314 [(set (pc)
1315 (if_then_else (le (match_dup 1) (const_int 0))
1316 (label_ref (match_operand 0 "" ""))
1317 (pc)))]
1318 ""
1319 "
1320 {
1321 if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
1322 && GET_CODE (sparc_compare_op0) == REG
1323 && GET_MODE (sparc_compare_op0) == DImode)
1324 {
1325 emit_v9_brxx_insn (LE, sparc_compare_op0, operands[0]);
1326 DONE;
1327 }
1328 else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
1329 {
1330 emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
1331 emit_jump_insn (gen_bne (operands[0]));
1332 DONE;
1333 }
1334 operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
1335 }")
1336
1337 (define_expand "bleu"
1338 [(set (pc)
1339 (if_then_else (leu (match_dup 1) (const_int 0))
1340 (label_ref (match_operand 0 "" ""))
1341 (pc)))]
1342 ""
1343 "
1344 { operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
1345 }")
1346 \f
1347 ;; Now match both normal and inverted jump.
1348
1349 (define_insn "*normal_branch"
1350 [(set (pc)
1351 (if_then_else (match_operator 0 "noov_compare_op"
1352 [(reg 100) (const_int 0)])
1353 (label_ref (match_operand 1 "" ""))
1354 (pc)))]
1355 ""
1356 "*
1357 {
1358 return output_cbranch (operands[0], 1, 0,
1359 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1360 ! final_sequence);
1361 }"
1362 [(set_attr "type" "branch")])
1363
1364 (define_insn "*inverted_branch"
1365 [(set (pc)
1366 (if_then_else (match_operator 0 "noov_compare_op"
1367 [(reg 100) (const_int 0)])
1368 (pc)
1369 (label_ref (match_operand 1 "" ""))))]
1370 ""
1371 "*
1372 {
1373 return output_cbranch (operands[0], 1, 1,
1374 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1375 ! final_sequence);
1376 }"
1377 [(set_attr "type" "branch")])
1378
1379 (define_insn "*normal_fp_branch"
1380 [(set (pc)
1381 (if_then_else (match_operator 1 "comparison_operator"
1382 [(match_operand:CCFP 0 "fcc_reg_operand" "c")
1383 (const_int 0)])
1384 (label_ref (match_operand 2 "" ""))
1385 (pc)))]
1386 ""
1387 "*
1388 {
1389 return output_cbranch (operands[1], 2, 0,
1390 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1391 ! final_sequence);
1392 }"
1393 [(set_attr "type" "branch")])
1394
1395 (define_insn "*inverted_fp_branch"
1396 [(set (pc)
1397 (if_then_else (match_operator 1 "comparison_operator"
1398 [(match_operand:CCFP 0 "fcc_reg_operand" "c")
1399 (const_int 0)])
1400 (pc)
1401 (label_ref (match_operand 2 "" ""))))]
1402 ""
1403 "*
1404 {
1405 return output_cbranch (operands[1], 2, 1,
1406 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1407 ! final_sequence);
1408 }"
1409 [(set_attr "type" "branch")])
1410
1411 (define_insn "*normal_fpe_branch"
1412 [(set (pc)
1413 (if_then_else (match_operator 1 "comparison_operator"
1414 [(match_operand:CCFPE 0 "fcc_reg_operand" "c")
1415 (const_int 0)])
1416 (label_ref (match_operand 2 "" ""))
1417 (pc)))]
1418 ""
1419 "*
1420 {
1421 return output_cbranch (operands[1], 2, 0,
1422 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1423 ! final_sequence);
1424 }"
1425 [(set_attr "type" "branch")])
1426
1427 (define_insn "*inverted_fpe_branch"
1428 [(set (pc)
1429 (if_then_else (match_operator 1 "comparison_operator"
1430 [(match_operand:CCFPE 0 "fcc_reg_operand" "c")
1431 (const_int 0)])
1432 (pc)
1433 (label_ref (match_operand 2 "" ""))))]
1434 ""
1435 "*
1436 {
1437 return output_cbranch (operands[1], 2, 1,
1438 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1439 ! final_sequence);
1440 }"
1441 [(set_attr "type" "branch")])
1442
1443 ;; Sparc V9-specific jump insns. None of these are guaranteed to be
1444 ;; in the architecture.
1445
1446 ;; There are no 32 bit brreg insns.
1447
1448 (define_insn "*normal_int_branch_sp64"
1449 [(set (pc)
1450 (if_then_else (match_operator 0 "v9_regcmp_op"
1451 [(match_operand:DI 1 "register_operand" "r")
1452 (const_int 0)])
1453 (label_ref (match_operand 2 "" ""))
1454 (pc)))]
1455 "TARGET_ARCH64"
1456 "*
1457 {
1458 return output_v9branch (operands[0], 1, 2, 0,
1459 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1460 ! final_sequence);
1461 }"
1462 [(set_attr "type" "branch")])
1463
1464 (define_insn "*inverted_int_branch_sp64"
1465 [(set (pc)
1466 (if_then_else (match_operator 0 "v9_regcmp_op"
1467 [(match_operand:DI 1 "register_operand" "r")
1468 (const_int 0)])
1469 (pc)
1470 (label_ref (match_operand 2 "" ""))))]
1471 "TARGET_ARCH64"
1472 "*
1473 {
1474 return output_v9branch (operands[0], 1, 2, 1,
1475 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
1476 ! final_sequence);
1477 }"
1478 [(set_attr "type" "branch")])
1479 \f
1480 ;; Esoteric move insns (lo_sum, high, pic).
1481
1482 (define_insn "*lo_sum_si"
1483 [(set (match_operand:SI 0 "register_operand" "=r")
1484 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
1485 (match_operand:SI 2 "immediate_operand" "in")))]
1486 ""
1487 ;; V9 needs "add" because of the code models. We still use "or" for v8
1488 ;; so we can compare the old compiler with the new.
1489 "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
1490 ;; Need to set length for this arith insn because operand2
1491 ;; is not an "arith_operand".
1492 [(set_attr "length" "1")])
1493
1494 ;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
1495 ;; confuse them with real addresses.
1496 (define_insn "pic_lo_sum_si"
1497 [(set (match_operand:SI 0 "register_operand" "=r")
1498 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
1499 (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")] 0)))]
1500 "flag_pic"
1501 ;; V9 needs "add" because of the code models. We still use "or" for v8
1502 ;; so we can compare the old compiler with the new.
1503 "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
1504 ;; Need to set length for this arith insn because operand2
1505 ;; is not an "arith_operand".
1506 [(set_attr "length" "1")])
1507
1508 ;; The PIC version of sethi must appear before the non-pic case so that
1509 ;; the unspec will not be matched as part of the operand.
1510 ;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
1511 ;; confuse them with real addresses.
1512 (define_insn "pic_sethi_si"
1513 [(set (match_operand:SI 0 "register_operand" "=r")
1514 (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
1515 "flag_pic && check_pic (1)"
1516 "sethi %%hi(%a1),%0"
1517 [(set_attr "type" "move")
1518 (set_attr "length" "1")])
1519
1520 (define_insn "pic_lo_sum_di"
1521 [(set (match_operand:DI 0 "register_operand" "=r")
1522 (lo_sum:SI (match_operand:DI 1 "register_operand" "r")
1523 (unspec:SI [(match_operand:DI 2 "immediate_operand" "in")] 0)))]
1524 "TARGET_ARCH64 && flag_pic"
1525 "add %1,%%lo(%a2),%0"
1526 [(set_attr "length" "1")])
1527
1528 (define_insn "pic_sethi_di"
1529 [(set (match_operand:DI 0 "register_operand" "=r")
1530 (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
1531 "TARGET_ARCH64 && flag_pic && check_pic (1)"
1532 "sethi %%hi(%a1),%0"
1533 [(set_attr "type" "move")
1534 (set_attr "length" "1")])
1535
1536 (define_insn "get_pc_via_call"
1537 [(set (pc) (label_ref (match_operand 0 "" "")))
1538 (set (reg:SI 15) (label_ref (match_operand 1 "" "")))]
1539 ""
1540 "call %l0%#"
1541 [(set_attr "type" "uncond_branch")])
1542
1543 (define_insn "get_pc_via_rdpc"
1544 [(set (match_operand:DI 0 "register_operand" "=r") (pc))]
1545 "TARGET_PTR64"
1546 "rd %%pc,%0"
1547 [(set_attr "type" "move")])
1548
1549 ;; Special pic pattern, for loading the address of a label into a register.
1550 ;; It clobbers o7 because the call puts the return address (i.e. pc value)
1551 ;; there. The pic tablejump pattern also uses this.
1552
1553 (define_insn "move_pic_label_si"
1554 [(set (match_operand:SI 0 "register_operand" "=r")
1555 ; This was previously (label_ref:SI (match_operand 1 "" "")) but that
1556 ; loses the volatil and other flags of the original label_ref.
1557 (match_operand:SI 1 "label_ref_operand" ""))
1558 (set (reg:SI 15) (pc))]
1559 "flag_pic"
1560 "*
1561 {
1562 if (get_attr_length (insn) == 2)
1563 return \"\\n1:\;call 2f\;add %%o7,%%lo(%l1-1b),%0\\n2:\";
1564 else
1565 return \"\\n1:\;call 2f\;sethi %%hi(%l1-1b),%0\\n2:\\tor %0,%%lo(%l1-1b),%0\;add %0,%%o7,%0\";
1566 }"
1567 [(set_attr "type" "multi")
1568 ; 960 = 4096 bytes / 4 bytes/insn - 64 (for not always perfect length calcs)
1569 (set (attr "length") (if_then_else (ltu (minus (match_dup 1) (pc))
1570 (const_int 960))
1571 (const_int 2)
1572 (const_int 4)))])
1573
1574 ;; Special sparc64 pattern for loading the address of a label into a register.
1575 ;; The pic and non-pic cases are the same since it's the most efficient way.
1576 ;;
1577 ;; ??? The non-pic case doesn't need to use %o7, we could use a scratch
1578 ;; instead. But the pic case doesn't need to use %o7 either. We handle them
1579 ;; both here so that when this is fixed, they can both be fixed together.
1580 ;; Don't forget that the pic jump table stuff uses %o7 (that will need to be
1581 ;; changed too).
1582
1583 (define_insn "move_label_di"
1584 [(set (match_operand:DI 0 "register_operand" "=r")
1585 ; This was previously (label_ref:DI (match_operand 1 "" "")) but that
1586 ; loses the volatil and other flags of the original label_ref.
1587 (match_operand:DI 1 "label_ref_operand" ""))
1588 (set (reg:DI 15) (pc))]
1589 "TARGET_ARCH64"
1590 "*
1591 {
1592 if (get_attr_length (insn) == 2)
1593 return \"\\n1:\;rd %%pc,%%o7\;add %%o7,%l1-1b,%0\";
1594 else
1595 return \"\\n1:\;rd %%pc,%%o7\;sethi %%hi(%l1-1b),%0\;add %0,%%lo(%l1-1b),%0\;sra %0,0,%0\;add %0,%%o7,%0\";
1596 }"
1597 [(set_attr "type" "multi")
1598 ; 960 = 4096 bytes / 4 bytes/insn - 64 (for not always perfect length calcs)
1599 (set (attr "length") (if_then_else (ltu (minus (match_dup 1) (pc))
1600 (const_int 960))
1601 (const_int 2)
1602 (const_int 5)))])
1603
1604 (define_insn "*sethi_hi"
1605 [(set (match_operand:HI 0 "register_operand" "=r")
1606 (high:HI (match_operand 1 "" "")))]
1607 "check_pic (1)"
1608 "sethi %%hi(%a1),%0"
1609 [(set_attr "type" "move")
1610 (set_attr "length" "1")])
1611
1612 ;; This must appear after the PIC sethi so that the PIC unspec will not
1613 ;; be matched as part of the operand.
1614 (define_insn "*sethi_si"
1615 [(set (match_operand:SI 0 "register_operand" "=r")
1616 (high:SI (match_operand 1 "" "")))]
1617 "check_pic (1)"
1618 "sethi %%hi(%a1),%0"
1619 [(set_attr "type" "move")
1620 (set_attr "length" "1")])
1621
1622 (define_insn "*lo_sum_di_sp32"
1623 [(set (match_operand:DI 0 "register_operand" "=r")
1624 (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
1625 (match_operand:DI 2 "immediate_operand" "in")))]
1626 "! TARGET_ARCH64"
1627 "*
1628 {
1629 /* Don't output a 64 bit constant, since we can't trust the assembler to
1630 handle it correctly. */
1631 if (GET_CODE (operands[2]) == CONST_DOUBLE)
1632 operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
1633 else if (GET_CODE (operands[2]) == CONST_INT
1634 && HOST_BITS_PER_WIDE_INT > 32
1635 && INTVAL (operands[2]) > 0xffffffff)
1636 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
1637
1638 return \"or %L1,%%lo(%a2),%L0\";
1639 }"
1640 ;; Need to set length for this arith insn because operand2
1641 ;; is not an "arith_operand".
1642 [(set_attr "length" "1")])
1643
1644 ;; ??? Optimizer does not handle "or %o1,%lo(0),%o1". How about add?
1645
1646 (define_insn "*lo_sum_di_sp64"
1647 [(set (match_operand:DI 0 "register_operand" "=r")
1648 (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
1649 (match_operand:DI 2 "immediate_operand" "in")))]
1650 "TARGET_ARCH64"
1651 "*
1652 {
1653 /* Don't output a 64 bit constant, since we can't trust the assembler to
1654 handle it correctly. */
1655 if (GET_CODE (operands[2]) == CONST_DOUBLE)
1656 operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
1657 else if (GET_CODE (operands[2]) == CONST_INT
1658 && HOST_BITS_PER_WIDE_INT > 32
1659 && INTVAL (operands[2]) > 0xffffffff)
1660 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
1661
1662 /* Note that we use add here. This is important because Medium/Anywhere
1663 code model support depends on it. */
1664 return \"add %1,%%lo(%a2),%0\";
1665 }"
1666 ;; Need to set length for this arith insn because operand2
1667 ;; is not an "arith_operand".
1668 [(set_attr "length" "1")])
1669
1670 (define_insn "*sethi_di_sp32"
1671 [(set (match_operand:DI 0 "register_operand" "=r")
1672 (high:DI (match_operand 1 "" "")))]
1673 "! TARGET_ARCH64 && check_pic (1)"
1674 "*
1675 {
1676 rtx op0 = operands[0];
1677 rtx op1 = operands[1];
1678
1679 if (GET_CODE (op1) == CONST_INT)
1680 {
1681 operands[0] = operand_subword (op0, 1, 0, DImode);
1682 output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
1683
1684 operands[0] = operand_subword (op0, 0, 0, DImode);
1685 if (INTVAL (op1) < 0)
1686 return \"mov -1,%0\";
1687 else
1688 return \"mov 0,%0\";
1689 }
1690 else if (GET_CODE (op1) == CONST_DOUBLE)
1691 {
1692 operands[0] = operand_subword (op0, 1, 0, DImode);
1693 operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1));
1694 output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
1695
1696 operands[0] = operand_subword (op0, 0, 0, DImode);
1697 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (op1));
1698 return singlemove_string (operands);
1699 }
1700 else
1701 abort ();
1702 return \"\";
1703 }"
1704 [(set_attr "type" "move")
1705 (set_attr "length" "2")])
1706
1707 ;;; ??? This pattern originally clobbered a scratch register. However, this
1708 ;;; is invalid, the movdi pattern may not use a temp register because it
1709 ;;; may be called from reload to reload a DImode value. In that case, we
1710 ;;; end up with a scratch register that never gets allocated. To avoid this,
1711 ;;; we use global register 1 which is never otherwise used by gcc as a temp.
1712 ;;; The correct solution here might be to force DImode constants to memory,
1713 ;;; e.g. by using a toc like the romp and rs6000 ports do for addresses, reg
1714 ;;; 1 will then no longer need to be considered a fixed reg.
1715
1716 (define_expand "sethi_di_sp64"
1717 [(parallel
1718 [(set (match_operand:DI 0 "register_operand" "")
1719 (high:DI (match_operand 1 "general_operand" "")))
1720 (clobber (reg:DI 1))])]
1721 "TARGET_ARCH64"
1722 "")
1723
1724 (define_insn "*sethi_di_sp64_const"
1725 [(set (match_operand:DI 0 "register_operand" "=r")
1726 (high:DI (match_operand 1 "const_double_operand" "")))
1727 (clobber (reg:DI 1))]
1728 "TARGET_ARCH64 && check_pic (1)"
1729 "*
1730 {
1731 #if HOST_BITS_PER_WIDE_INT == 32
1732 rtx high, low;
1733
1734 split_double (operands[1], &high, &low);
1735
1736 if (high == const0_rtx)
1737 {
1738 operands[1] = low;
1739 output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
1740 }
1741 else
1742 {
1743 operands[1] = high;
1744 output_asm_insn (singlemove_string (operands), operands);
1745
1746 operands[1] = low;
1747 output_asm_insn (\"sllx %0,32,%0\", operands);
1748 if (low != const0_rtx)
1749 output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands);
1750 }
1751 #else
1752 rtx op = operands[1];
1753
1754 if (! SPARC_SETHI_P (INTVAL(op)))
1755 {
1756 operands[1] = GEN_INT (INTVAL (op) >> 32);
1757 output_asm_insn (singlemove_string (operands), operands);
1758
1759 output_asm_insn (\"sllx %0,32,%0\", operands);
1760 if (INTVAL (op) & 0xffffffff)
1761 {
1762 operands[1] = GEN_INT (INTVAL (op) & 0xffffffff);
1763 output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands);
1764 }
1765 }
1766 else
1767 {
1768 output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
1769 }
1770 #endif
1771
1772 return \"\";
1773 }"
1774 [(set_attr "type" "move")
1775 (set_attr "length" "5")])
1776
1777 ;; Most of the required support for the various code models is here.
1778 ;; We can do this because sparcs need the high insn to load the address. We
1779 ;; just need to get high to do the right thing for each code model. Then each
1780 ;; uses the same "%X+%lo(...)" in the load/store insn, though in the case of
1781 ;; the medium/middle code model "%lo" is written "%l44".
1782
1783 ;; When TARGET_CM_MEDLOW, assume that the upper 32 bits of symbol addresses are
1784 ;; always 0.
1785 ;; When TARGET_CM_MEDMID, the executable must be in the low 16 TB of memory.
1786 ;; This corresponds to the low 44 bits, and the %[hml]44 relocs are used.
1787 ;; ??? Not implemented yet.
1788 ;; When TARGET_CM_EMBMEDANY, the text and data segments have a maximum size of
1789 ;; 31 bits and may be located anywhere. EMBMEDANY_BASE_REG contains the start
1790 ;; address of the data segment, currently %g4.
1791 ;; When TARGET_CM_MEDANY, the text and data segments have a maximum size of 31
1792 ;; bits and may be located anywhere. The maximum offset from any instruction
1793 ;; to the label _GLOBAL_OFFSET_TABLE_ is 31 bits.
1794
1795 (define_insn "*sethi_di_medlow"
1796 [(set (match_operand:DI 0 "register_operand" "=r")
1797 (high:DI (match_operand 1 "" "")))
1798 ;; The clobber is here because emit_move_sequence assumes the worst case.
1799 (clobber (reg:DI 1))]
1800 "TARGET_CM_MEDLOW && check_pic (1)"
1801 "sethi %%hi(%a1),%0"
1802 [(set_attr "type" "move")
1803 (set_attr "length" "1")])
1804
1805 (define_insn "*sethi_di_medium_pic"
1806 [(set (match_operand:DI 0 "register_operand" "=r")
1807 (high:DI (match_operand 1 "sp64_medium_pic_operand" "")))]
1808 "(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && check_pic (1)"
1809 "sethi %%hi(%a1),%0"
1810 [(set_attr "type" "move")
1811 (set_attr "length" "1")])
1812
1813 ;; WARNING: %0 gets %hi(%1)+%g4.
1814 ;; You cannot OR in %lo(%1), it must be added in.
1815
1816 (define_insn "*sethi_di_embmedany_data"
1817 [(set (match_operand:DI 0 "register_operand" "=r")
1818 (high:DI (match_operand 1 "data_segment_operand" "")))
1819 ;; The clobber is here because emit_move_sequence assumes the worst case.
1820 (clobber (reg:DI 1))]
1821 "TARGET_CM_EMBMEDANY && check_pic (1)"
1822 "sethi %%hi(%a1),%0; add %0,%_,%0"
1823 [(set_attr "type" "move")
1824 (set_attr "length" "2")])
1825
1826 (define_insn "*sethi_di_embmedany_text"
1827 [(set (match_operand:DI 0 "register_operand" "=r")
1828 (high:DI (match_operand 1 "text_segment_operand" "")))
1829 ;; The clobber is here because emit_move_sequence assumes the worst case.
1830 (clobber (reg:DI 1))]
1831 "TARGET_CM_EMBMEDANY && check_pic (1)"
1832 "sethi %%uhi(%a1),%%g1; or %%g1,%%ulo(%a1),%%g1; sllx %%g1,32,%%g1; sethi %%hi(%a1),%0; or %0,%%g1,%0"
1833 [(set_attr "type" "move")
1834 (set_attr "length" "5")])
1835 \f
1836 ;; Move instructions
1837
1838 (define_expand "movqi"
1839 [(set (match_operand:QI 0 "general_operand" "")
1840 (match_operand:QI 1 "general_operand" ""))]
1841 ""
1842 "
1843 {
1844 if (emit_move_sequence (operands, QImode))
1845 DONE;
1846 }")
1847
1848 (define_insn "*movqi_insn"
1849 [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
1850 (match_operand:QI 1 "move_operand" "rI,K,Q,rJ"))]
1851 "! TARGET_LIVE_G0
1852 && (register_operand (operands[0], QImode)
1853 || register_operand (operands[1], QImode)
1854 || operands[1] == const0_rtx)"
1855 "@
1856 mov %1,%0
1857 sethi %%hi(%a1),%0
1858 ldub %1,%0
1859 stb %r1,%0"
1860 [(set_attr "type" "move,move,load,store")
1861 (set_attr "length" "1")])
1862
1863 (define_insn "*movqi_insn_liveg0"
1864 [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
1865 (match_operand:QI 1 "move_operand" "r,J,I,K,Q,r"))]
1866 "TARGET_LIVE_G0
1867 && (register_operand (operands[0], QImode)
1868 || register_operand (operands[1], QImode))"
1869 "@
1870 mov %1,%0
1871 and %0,0,%0
1872 and %0,0,%0\;or %0,%1,%0
1873 sethi %%hi(%a1),%0
1874 ldub %1,%0
1875 stb %1,%0"
1876 [(set_attr "type" "move,move,move,move,load,store")
1877 (set_attr "length" "1,1,2,1,1,1")])
1878
1879 (define_insn "*lo_sum_qi"
1880 [(set (match_operand:QI 0 "register_operand" "=r")
1881 (subreg:QI (lo_sum:SI (match_operand:QI 1 "register_operand" "r")
1882 (match_operand 2 "immediate_operand" "in")) 0))]
1883 ""
1884 "or %1,%%lo(%a2),%0"
1885 [(set_attr "length" "1")])
1886
1887 (define_insn "*store_qi"
1888 [(set (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
1889 (match_operand:QI 1 "reg_or_0_operand" "rJ"))
1890 (clobber (match_scratch:SI 2 "=&r"))]
1891 "(reload_completed || reload_in_progress)
1892 && ! TARGET_PTR64"
1893 "sethi %%hi(%a0),%2\;stb %r1,[%2+%%lo(%a0)]"
1894 [(set_attr "type" "store")
1895 (set_attr "length" "2")])
1896
1897 (define_expand "movhi"
1898 [(set (match_operand:HI 0 "general_operand" "")
1899 (match_operand:HI 1 "general_operand" ""))]
1900 ""
1901 "
1902 {
1903 if (emit_move_sequence (operands, HImode))
1904 DONE;
1905 }")
1906
1907 (define_insn "*movhi_insn"
1908 [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
1909 (match_operand:HI 1 "move_operand" "rI,K,Q,rJ"))]
1910 "! TARGET_LIVE_G0
1911 && (register_operand (operands[0], HImode)
1912 || register_operand (operands[1], HImode)
1913 || operands[1] == const0_rtx)"
1914 "@
1915 mov %1,%0
1916 sethi %%hi(%a1),%0
1917 lduh %1,%0
1918 sth %r1,%0"
1919 [(set_attr "type" "move,move,load,store")
1920 (set_attr "length" "1")])
1921
1922 (define_insn "*movhi_insn_liveg0"
1923 [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
1924 (match_operand:HI 1 "move_operand" "r,J,I,K,Q,r"))]
1925 "TARGET_LIVE_G0
1926 && (register_operand (operands[0], HImode)
1927 || register_operand (operands[1], HImode))"
1928 "@
1929 mov %1,%0
1930 and %0,0,%0
1931 and %0,0,%0\;or %0,%1,%0
1932 sethi %%hi(%a1),%0
1933 lduh %1,%0
1934 sth %1,%0"
1935 [(set_attr "type" "move,move,move,move,load,store")
1936 (set_attr "length" "1,1,2,1,1,1")])
1937
1938 (define_insn "*lo_sum_hi"
1939 [(set (match_operand:HI 0 "register_operand" "=r")
1940 (lo_sum:HI (match_operand:HI 1 "register_operand" "r")
1941 (match_operand 2 "immediate_operand" "in")))]
1942 ""
1943 "or %1,%%lo(%a2),%0"
1944 [(set_attr "length" "1")])
1945
1946 (define_insn "*store_hi"
1947 [(set (mem:HI (match_operand:SI 0 "symbolic_operand" ""))
1948 (match_operand:HI 1 "reg_or_0_operand" "rJ"))
1949 (clobber (match_scratch:SI 2 "=&r"))]
1950 "(reload_completed || reload_in_progress)
1951 && ! TARGET_PTR64"
1952 "sethi %%hi(%a0),%2\;sth %r1,[%2+%%lo(%a0)]"
1953 [(set_attr "type" "store")
1954 (set_attr "length" "2")])
1955
1956 (define_expand "movsi"
1957 [(set (match_operand:SI 0 "general_operand" "")
1958 (match_operand:SI 1 "general_operand" ""))]
1959 ""
1960 "
1961 {
1962 if (emit_move_sequence (operands, SImode))
1963 DONE;
1964 }")
1965
1966 ;; We must support both 'r' and 'f' registers here, because combine may
1967 ;; convert SFmode hard registers to SImode hard registers when simplifying
1968 ;; subreg sets.
1969
1970 ;; We cannot combine the similar 'r' and 'f' constraints, because it causes
1971 ;; problems with register allocation. Reload might try to put an integer
1972 ;; in an fp register, or an fp number is an integer register.
1973
1974 (define_insn "*movsi_insn"
1975 [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,f,r,r,f,Q,Q")
1976 (match_operand:SI 1 "move_operand" "rI,!f,K,Q,!Q,rJ,!f"))]
1977 "! TARGET_LIVE_G0
1978 && (register_operand (operands[0], SImode)
1979 || register_operand (operands[1], SImode)
1980 || operands[1] == const0_rtx)"
1981 "@
1982 mov %1,%0
1983 fmovs %1,%0
1984 sethi %%hi(%a1),%0
1985 ld %1,%0
1986 ld %1,%0
1987 st %r1,%0
1988 st %1,%0"
1989 [(set_attr "type" "move,fp,move,load,fpload,store,fpstore")
1990 (set_attr "length" "1")])
1991
1992 (define_insn "*movsi_insn_liveg0"
1993 [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,f,r,r,f,Q,Q")
1994 (match_operand:SI 1 "move_operand" "r,J,I,!f,K,Q,!Q,r,!f"))]
1995 "TARGET_LIVE_G0
1996 && (register_operand (operands[0], SImode)
1997 || register_operand (operands[1], SImode))"
1998 "@
1999 mov %1,%0
2000 and %0,0,%0
2001 and %0,0,%0\;or %0,%1,%0
2002 fmovs %1,%0
2003 sethi %%hi(%a1),%0
2004 ld %1,%0
2005 ld %1,%0
2006 st %1,%0
2007 st %1,%0"
2008 [(set_attr "type" "move,move,move,fp,move,load,fpload,store,fpstore")
2009 (set_attr "length" "1,1,2,1,1,1,1,1,1")])
2010
2011 (define_insn "*store_si"
2012 [(set (mem:SI (match_operand:SI 0 "symbolic_operand" ""))
2013 (match_operand:SI 1 "reg_or_0_operand" "rJ"))
2014 (clobber (match_scratch:SI 2 "=&r"))]
2015 "(reload_completed || reload_in_progress)
2016 && ! TARGET_PTR64"
2017 "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
2018 [(set_attr "type" "store")
2019 (set_attr "length" "2")])
2020
2021 (define_expand "movdi"
2022 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "")
2023 (match_operand:DI 1 "general_operand" ""))]
2024 ""
2025 "
2026 {
2027 if (emit_move_sequence (operands, DImode))
2028 DONE;
2029 }")
2030
2031 (define_insn "*movdi_sp32_insn"
2032 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,T,U,Q,r,r,?f,?f,?Q")
2033 (match_operand:DI 1 "general_operand" "r,U,T,r,Q,i,f,Q,f"))]
2034 "! TARGET_ARCH64
2035 && (register_operand (operands[0], DImode)
2036 || register_operand (operands[1], DImode)
2037 || operands[1] == const0_rtx)"
2038 "*
2039 {
2040 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
2041 return output_fp_move_double (operands);
2042 return output_move_double (operands);
2043 }"
2044 [(set_attr "type" "move,store,load,store,load,multi,fp,fpload,fpstore")
2045 (set_attr "length" "2,1,1,3,3,3,2,3,3")])
2046
2047 ;;; ??? The trick used below can be extended to load any negative 32 bit
2048 ;;; constant in two instructions. Currently the compiler will use HIGH/LO_SUM
2049 ;;; for anything not matching the HIK constraints, which results in 5
2050 ;;; instructions. Positive 32 bit constants can be loaded in the obvious way
2051 ;;; with sethi/ori. To extend the trick, in the xor instruction, use
2052 ;;; xor %o0, ((op1 & 0x3ff) | -0x400), %o0
2053 ;;; This needs the original value of operands[1], not the inverted value.
2054
2055 (define_insn "*movdi_sp64_insn"
2056 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q,?f,?f,?Q")
2057 (match_operand:DI 1 "move_operand" "rI,K,Q,rJ,f,Q,f"))]
2058 "TARGET_ARCH64
2059 && (register_operand (operands[0], DImode)
2060 || register_operand (operands[1], DImode)
2061 || operands[1] == const0_rtx)"
2062 "*
2063 {
2064 switch (which_alternative)
2065 {
2066 case 0:
2067 return \"mov %1,%0\";
2068 case 1:
2069 /* Sethi does not sign extend, so we must use a little trickery
2070 to use it for negative numbers. Invert the constant before
2071 loading it in, then use a xor immediate to invert the loaded bits
2072 (along with the upper 32 bits) to the desired constant. This
2073 works because the sethi and immediate fields overlap. */
2074
2075 if ((INTVAL (operands[1]) & 0x80000000) == 0)
2076 return \"sethi %%hi(%a1),%0\";
2077 else
2078 {
2079 operands[1] = GEN_INT (~INTVAL (operands[1]));
2080 output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
2081 /* The low 10 bits are already zero, but invert the rest.
2082 Assemblers don't accept 0x1c00, so use -0x400 instead. */
2083 return \"xor %0,-0x400,%0\";
2084 }
2085 case 2:
2086 return \"ldx %1,%0\";
2087 case 3:
2088 return \"stx %r1,%0\";
2089 case 4:
2090 return \"mov %1,%0\";
2091 case 5:
2092 return \"ldd %1,%0\";
2093 case 6:
2094 return \"std %1,%0\";
2095 }
2096 }"
2097 [(set_attr "type" "move,move,load,store,fp,fpload,fpstore")
2098 (set_attr "length" "1,2,1,1,1,1,1")])
2099
2100 ;; ??? There's no symbolic (set (mem:DI ...) ...).
2101 ;; Experimentation with v9 suggested one isn't needed.
2102 \f
2103 ;; Block move insns.
2104
2105 ;; ??? We get better code without it. See output_block_move in sparc.c.
2106
2107 ;; The definition of this insn does not really explain what it does,
2108 ;; but it should suffice
2109 ;; that anything generated as this insn will be recognized as one
2110 ;; and that it will not successfully combine with anything.
2111 ;(define_expand "movstrsi"
2112 ; [(parallel [(set (mem:BLK (match_operand:BLK 0 "general_operand" ""))
2113 ; (mem:BLK (match_operand:BLK 1 "general_operand" "")))
2114 ; (use (match_operand:SI 2 "nonmemory_operand" ""))
2115 ; (use (match_operand:SI 3 "immediate_operand" ""))
2116 ; (clobber (match_dup 0))
2117 ; (clobber (match_dup 1))
2118 ; (clobber (match_scratch:SI 4 ""))
2119 ; (clobber (reg:SI 100))
2120 ; (clobber (reg:SI 1))])]
2121 ; ""
2122 ; "
2123 ;{
2124 ; /* If the size isn't known, don't emit inline code. output_block_move
2125 ; would output code that's much slower than the library function.
2126 ; Also don't output code for large blocks. */
2127 ; if (GET_CODE (operands[2]) != CONST_INT
2128 ; || GET_CODE (operands[3]) != CONST_INT
2129 ; || INTVAL (operands[2]) / INTVAL (operands[3]) > 16)
2130 ; FAIL;
2131 ;
2132 ; operands[0] = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
2133 ; operands[1] = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
2134 ; operands[2] = force_not_mem (operands[2]);
2135 ;}")
2136
2137 ;(define_insn "*block_move_insn"
2138 ; [(set (mem:BLK (match_operand:SI 0 "register_operand" "+r"))
2139 ; (mem:BLK (match_operand:SI 1 "register_operand" "+r")))
2140 ; (use (match_operand:SI 2 "nonmemory_operand" "rn"))
2141 ; (use (match_operand:SI 3 "immediate_operand" "i"))
2142 ; (clobber (match_dup 0))
2143 ; (clobber (match_dup 1))
2144 ; (clobber (match_scratch:SI 4 "=&r"))
2145 ; (clobber (reg:SI 100))
2146 ; (clobber (reg:SI 1))]
2147 ; ""
2148 ; "* return output_block_move (operands);"
2149 ; [(set_attr "type" "multi")
2150 ; (set_attr "length" "6")])
2151 \f
2152 ;; Floating point move insns
2153
2154 ;; This pattern forces (set (reg:SF ...) (const_double ...))
2155 ;; to be reloaded by putting the constant into memory.
2156 ;; It must come before the more general movsf pattern.
2157 (define_insn "*movsf_const_insn"
2158 [(set (match_operand:SF 0 "general_operand" "=?r,f,m")
2159 (match_operand:SF 1 "" "?F,m,G"))]
2160 "TARGET_FPU
2161 && GET_CODE (operands[1]) == CONST_DOUBLE
2162 && (GET_CODE (operands[0]) == REG
2163 || fp_zero_operand (operands[1]))"
2164 "*
2165 {
2166 switch (which_alternative)
2167 {
2168 case 0:
2169 return singlemove_string (operands);
2170 case 1:
2171 return \"ld %1,%0\";
2172 case 2:
2173 return \"st %%g0,%0\";
2174 }
2175 }"
2176 [(set_attr "type" "load,fpload,store")
2177 (set_attr "length" "2,1,1")])
2178
2179 (define_expand "movsf"
2180 [(set (match_operand:SF 0 "general_operand" "")
2181 (match_operand:SF 1 "general_operand" ""))]
2182 ""
2183 "
2184 {
2185 if (emit_move_sequence (operands, SFmode))
2186 DONE;
2187 }")
2188
2189 (define_insn "*movsf_insn"
2190 [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=f,r,f,r,Q,Q")
2191 (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "f,r,Q,Q,f,r"))]
2192 "TARGET_FPU
2193 && (register_operand (operands[0], SFmode)
2194 || register_operand (operands[1], SFmode))"
2195 "@
2196 fmovs %1,%0
2197 mov %1,%0
2198 ld %1,%0
2199 ld %1,%0
2200 st %1,%0
2201 st %1,%0"
2202 [(set_attr "type" "fp,move,fpload,load,fpstore,store")])
2203
2204 ;; Exactly the same as above, except that all `f' cases are deleted.
2205 ;; This is necessary to prevent reload from ever trying to use a `f' reg
2206 ;; when -mno-fpu.
2207
2208 (define_insn "*movsf_no_f_insn"
2209 [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=r,r,Q")
2210 (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "r,Q,r"))]
2211 "! TARGET_FPU
2212 && (register_operand (operands[0], SFmode)
2213 || register_operand (operands[1], SFmode))"
2214 "@
2215 mov %1,%0
2216 ld %1,%0
2217 st %1,%0"
2218 [(set_attr "type" "move,load,store")])
2219
2220 (define_insn "*store_sf"
2221 [(set (mem:SF (match_operand:SI 0 "symbolic_operand" "i"))
2222 (match_operand:SF 1 "reg_or_0_operand" "rfG"))
2223 (clobber (match_scratch:SI 2 "=&r"))]
2224 "(reload_completed || reload_in_progress)
2225 && ! TARGET_PTR64"
2226 "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
2227 [(set_attr "type" "store")
2228 (set_attr "length" "2")])
2229
2230 ;; This pattern forces (set (reg:DF ...) (const_double ...))
2231 ;; to be reloaded by putting the constant into memory.
2232 ;; It must come before the more general movdf pattern.
2233
2234 (define_insn "*movdf_const_insn"
2235 [(set (match_operand:DF 0 "general_operand" "=?r,e,o")
2236 (match_operand:DF 1 "" "?F,m,G"))]
2237 "TARGET_FPU
2238 && GET_CODE (operands[1]) == CONST_DOUBLE
2239 && (GET_CODE (operands[0]) == REG
2240 || fp_zero_operand (operands[1]))"
2241 "*
2242 {
2243 switch (which_alternative)
2244 {
2245 case 0:
2246 return output_move_double (operands);
2247 case 1:
2248 return output_fp_move_double (operands);
2249 case 2:
2250 if (TARGET_ARCH64)
2251 {
2252 return \"stx %%g0,%0\";
2253 }
2254 else
2255 {
2256 operands[1] = adj_offsettable_operand (operands[0], 4);
2257 return \"st %%g0,%0\;st %%g0,%1\";
2258 }
2259 }
2260 }"
2261 [(set_attr "type" "load,fpload,store")
2262 (set_attr "length" "3,3,3")])
2263
2264 (define_expand "movdf"
2265 [(set (match_operand:DF 0 "general_operand" "")
2266 (match_operand:DF 1 "general_operand" ""))]
2267 ""
2268 "
2269 {
2270 if (emit_move_sequence (operands, DFmode))
2271 DONE;
2272 }")
2273
2274 (define_insn "*movdf_insn"
2275 [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,e,r,Q,Q,e,r")
2276 (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,e,r,e,r,Q,Q"))]
2277 "TARGET_FPU
2278 && (register_operand (operands[0], DFmode)
2279 || register_operand (operands[1], DFmode))"
2280 "*
2281 {
2282 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
2283 return output_fp_move_double (operands);
2284 return output_move_double (operands);
2285 }"
2286 [(set_attr "type" "fpstore,fpload,fp,move,fpstore,store,fpload,load")
2287 (set_attr "length" "1,1,2,2,3,3,3,3")])
2288
2289 ;; Exactly the same as above, except that all `e' cases are deleted.
2290 ;; This is necessary to prevent reload from ever trying to use a `e' reg
2291 ;; when -mno-fpu.
2292
2293 (define_insn "*movdf_no_e_insn"
2294 [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,r,Q,&r")
2295 (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,r,r,Q"))]
2296 "! TARGET_FPU
2297 && (register_operand (operands[0], DFmode)
2298 || register_operand (operands[1], DFmode))"
2299 "* return output_move_double (operands);"
2300 [(set_attr "type" "store,load,move,store,load")
2301 (set_attr "length" "1,1,2,3,3")])
2302
2303 ;; Must handle overlapping registers here, since parameters can be unaligned
2304 ;; in registers.
2305
2306 (define_split
2307 [(set (match_operand:DF 0 "register_operand" "")
2308 (match_operand:DF 1 "register_operand" ""))]
2309 "! TARGET_ARCH64 && reload_completed
2310 && REGNO (operands[0]) < SPARC_FIRST_V9_FP_REG
2311 && REGNO (operands[1]) < SPARC_FIRST_V9_FP_REG"
2312 [(set (match_dup 2) (match_dup 3))
2313 (set (match_dup 4) (match_dup 5))]
2314 "
2315 {
2316 rtx first_set = operand_subword (operands[0], 0, 0, DFmode);
2317 rtx second_use = operand_subword (operands[1], 1, 0, DFmode);
2318
2319 if (REGNO (first_set) == REGNO (second_use))
2320 {
2321 operands[2] = operand_subword (operands[0], 1, 0, DFmode);
2322 operands[3] = second_use;
2323 operands[4] = first_set;
2324 operands[5] = operand_subword (operands[1], 0, 0, DFmode);
2325 }
2326 else
2327 {
2328 operands[2] = first_set;
2329 operands[3] = operand_subword (operands[1], 0, 0, DFmode);
2330 operands[4] = operand_subword (operands[0], 1, 0, DFmode);
2331 operands[5] = second_use;
2332 }
2333 }")
2334
2335 (define_insn "*store_df"
2336 [(set (mem:DF (match_operand:SI 0 "symbolic_operand" "i,i"))
2337 (match_operand:DF 1 "reg_or_0_operand" "re,G"))
2338 (clobber (match_scratch:SI 2 "=&r,&r"))]
2339 "(reload_completed || reload_in_progress)
2340 && ! TARGET_PTR64"
2341 "*
2342 {
2343 output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
2344 if (which_alternative == 0)
2345 return \"std %1,[%2+%%lo(%a0)]\";
2346 else
2347 return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\";
2348 }"
2349 [(set_attr "type" "store")
2350 (set_attr "length" "3")])
2351
2352 ;; This pattern forces (set (reg:TF ...) (const_double ...))
2353 ;; to be reloaded by putting the constant into memory.
2354 ;; It must come before the more general movtf pattern.
2355 (define_insn "*movtf_const_insn"
2356 [(set (match_operand:TF 0 "general_operand" "=?r,e,o")
2357 (match_operand:TF 1 "" "?F,m,G"))]
2358 "TARGET_FPU
2359 && GET_CODE (operands[1]) == CONST_DOUBLE
2360 && (GET_CODE (operands[0]) == REG
2361 || fp_zero_operand (operands[1]))"
2362 "*
2363 {
2364 switch (which_alternative)
2365 {
2366 case 0:
2367 return output_move_quad (operands);
2368 case 1:
2369 return output_fp_move_quad (operands);
2370 case 2:
2371 if (TARGET_ARCH64)
2372 {
2373 operands[1] = adj_offsettable_operand (operands[0], 8);
2374 return \"stx %%g0,%0\;stx %%g0,%1\";
2375 }
2376 else
2377 {
2378 /* ??? Do we run off the end of the array here? */
2379 operands[1] = adj_offsettable_operand (operands[0], 4);
2380 operands[2] = adj_offsettable_operand (operands[0], 8);
2381 operands[3] = adj_offsettable_operand (operands[0], 12);
2382 return \"st %%g0,%0\;st %%g0,%1\;st %%g0,%2\;st %%g0,%3\";
2383 }
2384 }
2385 }"
2386 [(set_attr "type" "load,fpload,store")
2387 (set_attr "length" "5,5,5")])
2388
2389 (define_expand "movtf"
2390 [(set (match_operand:TF 0 "general_operand" "")
2391 (match_operand:TF 1 "general_operand" ""))]
2392 ""
2393 "
2394 {
2395 if (emit_move_sequence (operands, TFmode))
2396 DONE;
2397 }")
2398
2399 (define_insn "*movtf_insn"
2400 [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=e,r,Q,Q,e,&r")
2401 (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "e,r,e,r,Q,Q"))]
2402 "TARGET_FPU
2403 && (register_operand (operands[0], TFmode)
2404 || register_operand (operands[1], TFmode))"
2405 "*
2406 {
2407 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
2408 return output_fp_move_quad (operands);
2409 return output_move_quad (operands);
2410 }"
2411 [(set_attr "type" "fp,move,fpstore,store,fpload,load")
2412 (set_attr "length" "4,4,5,5,5,5")])
2413
2414 ;; Exactly the same as above, except that all `e' cases are deleted.
2415 ;; This is necessary to prevent reload from ever trying to use a `e' reg
2416 ;; when -mno-fpu.
2417
2418 (define_insn "*movtf_no_e_insn"
2419 [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=r,Q,&r")
2420 (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "r,r,Q"))]
2421 "! TARGET_FPU
2422 && (register_operand (operands[0], TFmode)
2423 || register_operand (operands[1], TFmode))"
2424 "*
2425 {
2426 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
2427 return output_fp_move_quad (operands);
2428 return output_move_quad (operands);
2429 }"
2430 [(set_attr "type" "move,store,load")
2431 (set_attr "length" "4,5,5")])
2432
2433 ;; This is disabled because it does not work. Long doubles have only 8
2434 ;; byte alignment. Adding an offset of 8 or 12 to an 8 byte aligned %lo may
2435 ;; cause it to overflow. See also GO_IF_LEGITIMATE_ADDRESS.
2436 (define_insn "*store_tf"
2437 [(set (mem:TF (match_operand:SI 0 "symbolic_operand" "i,i"))
2438 (match_operand:TF 1 "reg_or_0_operand" "re,G"))
2439 (clobber (match_scratch:SI 2 "=&r,&r"))]
2440 "0 && (reload_completed || reload_in_progress)
2441 && ! TARGET_PTR64"
2442 "*
2443 {
2444 output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
2445 if (which_alternative == 0)
2446 return \"std %1,[%2+%%lo(%a0)]\;std %S1,[%2+%%lo(%a0+8)]\";
2447 else
2448 return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\; st %%g0,[%2+%%lo(%a0+8)]\;st %%g0,[%2+%%lo(%a0+12)]\";
2449 }"
2450 [(set_attr "type" "store")
2451 (set_attr "length" "5")])
2452 \f
2453 ;; Sparc V9 conditional move instructions.
2454
2455 ;; We can handle larger constants here for some flavors, but for now we keep
2456 ;; it simple and only allow those constants supported by all flavours.
2457 ;; Note that emit_conditional_move canonicalizes operands 2,3 so that operand
2458 ;; 3 contains the constant if one is present, but we handle either for
2459 ;; generality (sparc.c puts a constant in operand 2).
2460
2461 (define_expand "movqicc"
2462 [(set (match_operand:QI 0 "register_operand" "")
2463 (if_then_else:QI (match_operand 1 "comparison_operator" "")
2464 (match_operand:QI 2 "arith10_operand" "")
2465 (match_operand:QI 3 "arith10_operand" "")))]
2466 "TARGET_V9"
2467 "
2468 {
2469 enum rtx_code code = GET_CODE (operands[1]);
2470
2471 if (GET_MODE (sparc_compare_op0) == DImode
2472 && ! TARGET_ARCH64)
2473 FAIL;
2474
2475 if (sparc_compare_op1 == const0_rtx
2476 && GET_CODE (sparc_compare_op0) == REG
2477 && GET_MODE (sparc_compare_op0) == DImode
2478 && v9_regcmp_p (code))
2479 {
2480 operands[1] = gen_rtx (code, DImode,
2481 sparc_compare_op0, sparc_compare_op1);
2482 }
2483 else
2484 {
2485 rtx cc_reg = gen_compare_reg (code,
2486 sparc_compare_op0, sparc_compare_op1);
2487 operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2488 }
2489 }")
2490
2491 (define_expand "movhicc"
2492 [(set (match_operand:HI 0 "register_operand" "")
2493 (if_then_else:HI (match_operand 1 "comparison_operator" "")
2494 (match_operand:HI 2 "arith10_operand" "")
2495 (match_operand:HI 3 "arith10_operand" "")))]
2496 "TARGET_V9"
2497 "
2498 {
2499 enum rtx_code code = GET_CODE (operands[1]);
2500
2501 if (GET_MODE (sparc_compare_op0) == DImode
2502 && ! TARGET_ARCH64)
2503 FAIL;
2504
2505 if (sparc_compare_op1 == const0_rtx
2506 && GET_CODE (sparc_compare_op0) == REG
2507 && GET_MODE (sparc_compare_op0) == DImode
2508 && v9_regcmp_p (code))
2509 {
2510 operands[1] = gen_rtx (code, DImode,
2511 sparc_compare_op0, sparc_compare_op1);
2512 }
2513 else
2514 {
2515 rtx cc_reg = gen_compare_reg (code,
2516 sparc_compare_op0, sparc_compare_op1);
2517 operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2518 }
2519 }")
2520
2521 (define_expand "movsicc"
2522 [(set (match_operand:SI 0 "register_operand" "")
2523 (if_then_else:SI (match_operand 1 "comparison_operator" "")
2524 (match_operand:SI 2 "arith10_operand" "")
2525 (match_operand:SI 3 "arith10_operand" "")))]
2526 "TARGET_V9"
2527 "
2528 {
2529 enum rtx_code code = GET_CODE (operands[1]);
2530
2531 if (GET_MODE (sparc_compare_op0) == DImode
2532 && ! TARGET_ARCH64)
2533 FAIL;
2534
2535 if (sparc_compare_op1 == const0_rtx
2536 && GET_CODE (sparc_compare_op0) == REG
2537 && GET_MODE (sparc_compare_op0) == DImode
2538 && v9_regcmp_p (code))
2539 {
2540 operands[1] = gen_rtx (code, DImode,
2541 sparc_compare_op0, sparc_compare_op1);
2542 }
2543 else
2544 {
2545 rtx cc_reg = gen_compare_reg (code,
2546 sparc_compare_op0, sparc_compare_op1);
2547 operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2548 }
2549 }")
2550
2551 (define_expand "movdicc"
2552 [(set (match_operand:DI 0 "register_operand" "")
2553 (if_then_else:DI (match_operand 1 "comparison_operator" "")
2554 (match_operand:DI 2 "arith10_double_operand" "")
2555 (match_operand:DI 3 "arith10_double_operand" "")))]
2556 "TARGET_ARCH64"
2557 "
2558 {
2559 enum rtx_code code = GET_CODE (operands[1]);
2560
2561 if (sparc_compare_op1 == const0_rtx
2562 && GET_CODE (sparc_compare_op0) == REG
2563 && GET_MODE (sparc_compare_op0) == DImode
2564 && v9_regcmp_p (code))
2565 {
2566 operands[1] = gen_rtx (code, DImode,
2567 sparc_compare_op0, sparc_compare_op1);
2568 }
2569 else
2570 {
2571 rtx cc_reg = gen_compare_reg (code,
2572 sparc_compare_op0, sparc_compare_op1);
2573 operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2574 }
2575 }")
2576
2577 (define_expand "movsfcc"
2578 [(set (match_operand:SF 0 "register_operand" "")
2579 (if_then_else:SF (match_operand 1 "comparison_operator" "")
2580 (match_operand:SF 2 "register_operand" "")
2581 (match_operand:SF 3 "register_operand" "")))]
2582 "TARGET_V9 && TARGET_FPU"
2583 "
2584 {
2585 enum rtx_code code = GET_CODE (operands[1]);
2586
2587 if (GET_MODE (sparc_compare_op0) == DImode
2588 && ! TARGET_ARCH64)
2589 FAIL;
2590
2591 if (sparc_compare_op1 == const0_rtx
2592 && GET_CODE (sparc_compare_op0) == REG
2593 && GET_MODE (sparc_compare_op0) == DImode
2594 && v9_regcmp_p (code))
2595 {
2596 operands[1] = gen_rtx (code, DImode,
2597 sparc_compare_op0, sparc_compare_op1);
2598 }
2599 else
2600 {
2601 rtx cc_reg = gen_compare_reg (code,
2602 sparc_compare_op0, sparc_compare_op1);
2603 operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2604 }
2605 }")
2606
2607 (define_expand "movdfcc"
2608 [(set (match_operand:DF 0 "register_operand" "")
2609 (if_then_else:DF (match_operand 1 "comparison_operator" "")
2610 (match_operand:DF 2 "register_operand" "")
2611 (match_operand:DF 3 "register_operand" "")))]
2612 "TARGET_V9 && TARGET_FPU"
2613 "
2614 {
2615 enum rtx_code code = GET_CODE (operands[1]);
2616
2617 if (GET_MODE (sparc_compare_op0) == DImode
2618 && ! TARGET_ARCH64)
2619 FAIL;
2620
2621 if (sparc_compare_op1 == const0_rtx
2622 && GET_CODE (sparc_compare_op0) == REG
2623 && GET_MODE (sparc_compare_op0) == DImode
2624 && v9_regcmp_p (code))
2625 {
2626 operands[1] = gen_rtx (code, DImode,
2627 sparc_compare_op0, sparc_compare_op1);
2628 }
2629 else
2630 {
2631 rtx cc_reg = gen_compare_reg (code,
2632 sparc_compare_op0, sparc_compare_op1);
2633 operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2634 }
2635 }")
2636
2637 (define_expand "movtfcc"
2638 [(set (match_operand:TF 0 "register_operand" "")
2639 (if_then_else:TF (match_operand 1 "comparison_operator" "")
2640 (match_operand:TF 2 "register_operand" "")
2641 (match_operand:TF 3 "register_operand" "")))]
2642 "TARGET_V9 && TARGET_FPU"
2643 "
2644 {
2645 enum rtx_code code = GET_CODE (operands[1]);
2646
2647 if (GET_MODE (sparc_compare_op0) == DImode
2648 && ! TARGET_ARCH64)
2649 FAIL;
2650
2651 if (sparc_compare_op1 == const0_rtx
2652 && GET_CODE (sparc_compare_op0) == REG
2653 && GET_MODE (sparc_compare_op0) == DImode
2654 && v9_regcmp_p (code))
2655 {
2656 operands[1] = gen_rtx (code, DImode,
2657 sparc_compare_op0, sparc_compare_op1);
2658 }
2659 else
2660 {
2661 rtx cc_reg = gen_compare_reg (code,
2662 sparc_compare_op0, sparc_compare_op1);
2663 operands[1] = gen_rtx (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2664 }
2665 }")
2666
2667 ;; Conditional move define_insns.
2668
2669 (define_insn "*movqi_cc_sp64"
2670 [(set (match_operand:QI 0 "register_operand" "=r,r")
2671 (if_then_else:QI (match_operator 1 "comparison_operator"
2672 [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
2673 (const_int 0)])
2674 (match_operand:QI 3 "arith11_operand" "rL,0")
2675 (match_operand:QI 4 "arith11_operand" "0,rL")))]
2676 "TARGET_V9"
2677 "@
2678 mov%C1 %x2,%3,%0
2679 mov%c1 %x2,%4,%0"
2680 [(set_attr "type" "cmove")])
2681
2682 (define_insn "*movhi_cc_sp64"
2683 [(set (match_operand:HI 0 "register_operand" "=r,r")
2684 (if_then_else:HI (match_operator 1 "comparison_operator"
2685 [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
2686 (const_int 0)])
2687 (match_operand:HI 3 "arith11_operand" "rL,0")
2688 (match_operand:HI 4 "arith11_operand" "0,rL")))]
2689 "TARGET_V9"
2690 "@
2691 mov%C1 %x2,%3,%0
2692 mov%c1 %x2,%4,%0"
2693 [(set_attr "type" "cmove")])
2694
2695 (define_insn "*movsi_cc_sp64"
2696 [(set (match_operand:SI 0 "register_operand" "=r,r")
2697 (if_then_else:SI (match_operator 1 "comparison_operator"
2698 [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
2699 (const_int 0)])
2700 (match_operand:SI 3 "arith11_operand" "rL,0")
2701 (match_operand:SI 4 "arith11_operand" "0,rL")))]
2702 "TARGET_V9"
2703 "@
2704 mov%C1 %x2,%3,%0
2705 mov%c1 %x2,%4,%0"
2706 [(set_attr "type" "cmove")])
2707
2708 ;; ??? The constraints of operands 3,4 need work.
2709 (define_insn "*movdi_cc_sp64"
2710 [(set (match_operand:DI 0 "register_operand" "=r,r")
2711 (if_then_else:DI (match_operator 1 "comparison_operator"
2712 [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
2713 (const_int 0)])
2714 (match_operand:DI 3 "arith11_double_operand" "rLH,0")
2715 (match_operand:DI 4 "arith11_double_operand" "0,rLH")))]
2716 "TARGET_ARCH64"
2717 "@
2718 mov%C1 %x2,%3,%0
2719 mov%c1 %x2,%4,%0"
2720 [(set_attr "type" "cmove")])
2721
2722 (define_insn "*movsf_cc_sp64"
2723 [(set (match_operand:SF 0 "register_operand" "=f,f")
2724 (if_then_else:SF (match_operator 1 "comparison_operator"
2725 [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
2726 (const_int 0)])
2727 (match_operand:SF 3 "register_operand" "f,0")
2728 (match_operand:SF 4 "register_operand" "0,f")))]
2729 "TARGET_V9 && TARGET_FPU"
2730 "@
2731 fmovs%C1 %x2,%3,%0
2732 fmovs%c1 %x2,%4,%0"
2733 [(set_attr "type" "cmove")])
2734
2735 (define_insn "*movdf_cc_sp64"
2736 [(set (match_operand:DF 0 "register_operand" "=e,e")
2737 (if_then_else:DF (match_operator 1 "comparison_operator"
2738 [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
2739 (const_int 0)])
2740 (match_operand:DF 3 "register_operand" "e,0")
2741 (match_operand:DF 4 "register_operand" "0,e")))]
2742 "TARGET_V9 && TARGET_FPU"
2743 "@
2744 fmovd%C1 %x2,%3,%0
2745 fmovd%c1 %x2,%4,%0"
2746 [(set_attr "type" "cmove")])
2747
2748 (define_insn "*movtf_cc_sp64"
2749 [(set (match_operand:TF 0 "register_operand" "=e,e")
2750 (if_then_else:TF (match_operator 1 "comparison_operator"
2751 [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
2752 (const_int 0)])
2753 (match_operand:TF 3 "register_operand" "e,0")
2754 (match_operand:TF 4 "register_operand" "0,e")))]
2755 "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
2756 "@
2757 fmovq%C1 %x2,%3,%0
2758 fmovq%c1 %x2,%4,%0"
2759 [(set_attr "type" "cmove")])
2760
2761 (define_insn "*movqi_cc_reg_sp64"
2762 [(set (match_operand:QI 0 "register_operand" "=r,r")
2763 (if_then_else:QI (match_operator 1 "v9_regcmp_op"
2764 [(match_operand:DI 2 "register_operand" "r,r")
2765 (const_int 0)])
2766 (match_operand:QI 3 "arith10_operand" "rM,0")
2767 (match_operand:QI 4 "arith10_operand" "0,rM")))]
2768 "TARGET_ARCH64"
2769 "@
2770 movr%D1 %2,%r3,%0
2771 movr%d1 %2,%r4,%0"
2772 [(set_attr "type" "cmove")])
2773
2774 (define_insn "*movhi_cc_reg_sp64"
2775 [(set (match_operand:HI 0 "register_operand" "=r,r")
2776 (if_then_else:HI (match_operator 1 "v9_regcmp_op"
2777 [(match_operand:DI 2 "register_operand" "r,r")
2778 (const_int 0)])
2779 (match_operand:HI 3 "arith10_operand" "rM,0")
2780 (match_operand:HI 4 "arith10_operand" "0,rM")))]
2781 "TARGET_ARCH64"
2782 "@
2783 movr%D1 %2,%r3,%0
2784 movr%d1 %2,%r4,%0"
2785 [(set_attr "type" "cmove")])
2786
2787 (define_insn "*movsi_cc_reg_sp64"
2788 [(set (match_operand:SI 0 "register_operand" "=r,r")
2789 (if_then_else:SI (match_operator 1 "v9_regcmp_op"
2790 [(match_operand:DI 2 "register_operand" "r,r")
2791 (const_int 0)])
2792 (match_operand:SI 3 "arith10_operand" "rM,0")
2793 (match_operand:SI 4 "arith10_operand" "0,rM")))]
2794 "TARGET_ARCH64"
2795 "@
2796 movr%D1 %2,%r3,%0
2797 movr%d1 %2,%r4,%0"
2798 [(set_attr "type" "cmove")])
2799
2800 ;; ??? The constraints of operands 3,4 need work.
2801 (define_insn "*movdi_cc_reg_sp64"
2802 [(set (match_operand:DI 0 "register_operand" "=r,r")
2803 (if_then_else:DI (match_operator 1 "v9_regcmp_op"
2804 [(match_operand:DI 2 "register_operand" "r,r")
2805 (const_int 0)])
2806 (match_operand:DI 3 "arith10_double_operand" "rMH,0")
2807 (match_operand:DI 4 "arith10_double_operand" "0,rMH")))]
2808 "TARGET_ARCH64"
2809 "@
2810 movr%D1 %2,%r3,%0
2811 movr%d1 %2,%r4,%0"
2812 [(set_attr "type" "cmove")])
2813
2814 (define_insn "*movsf_cc_reg_sp64"
2815 [(set (match_operand:SF 0 "register_operand" "=f,f")
2816 (if_then_else:SF (match_operator 1 "v9_regcmp_op"
2817 [(match_operand:DI 2 "register_operand" "r,r")
2818 (const_int 0)])
2819 (match_operand:SF 3 "register_operand" "f,0")
2820 (match_operand:SF 4 "register_operand" "0,f")))]
2821 "TARGET_ARCH64 && TARGET_FPU"
2822 "@
2823 fmovrs%D1 %2,%3,%0
2824 fmovrs%d1 %2,%4,%0"
2825 [(set_attr "type" "cmove")])
2826
2827 (define_insn "*movdf_cc_reg_sp64"
2828 [(set (match_operand:DF 0 "register_operand" "=e,e")
2829 (if_then_else:DF (match_operator 1 "v9_regcmp_op"
2830 [(match_operand:DI 2 "register_operand" "r,r")
2831 (const_int 0)])
2832 (match_operand:DF 3 "register_operand" "e,0")
2833 (match_operand:DF 4 "register_operand" "0,e")))]
2834 "TARGET_ARCH64 && TARGET_FPU"
2835 "@
2836 fmovrd%D1 %2,%3,%0
2837 fmovrd%d1 %2,%4,%0"
2838 [(set_attr "type" "cmove")])
2839
2840 (define_insn "*movtf_cc_reg_sp64"
2841 [(set (match_operand:TF 0 "register_operand" "=e,e")
2842 (if_then_else:TF (match_operator 1 "v9_regcmp_op"
2843 [(match_operand:DI 2 "register_operand" "r,r")
2844 (const_int 0)])
2845 (match_operand:TF 3 "register_operand" "e,0")
2846 (match_operand:TF 4 "register_operand" "0,e")))]
2847 "TARGET_ARCH64 && TARGET_FPU"
2848 "@
2849 fmovrq%D1 %2,%3,%0
2850 fmovrq%d1 %2,%4,%0"
2851 [(set_attr "type" "cmove")])
2852 \f
2853 ;;- zero extension instructions
2854
2855 ;; These patterns originally accepted general_operands, however, slightly
2856 ;; better code is generated by only accepting register_operands, and then
2857 ;; letting combine generate the ldu[hb] insns.
2858
2859 (define_expand "zero_extendhisi2"
2860 [(set (match_operand:SI 0 "register_operand" "")
2861 (zero_extend:SI (match_operand:HI 1 "register_operand" "")))]
2862 ""
2863 "
2864 {
2865 rtx temp = gen_reg_rtx (SImode);
2866 rtx shift_16 = GEN_INT (16);
2867 int op1_subword = 0;
2868
2869 if (GET_CODE (operand1) == SUBREG)
2870 {
2871 op1_subword = SUBREG_WORD (operand1);
2872 operand1 = XEXP (operand1, 0);
2873 }
2874
2875 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
2876 op1_subword),
2877 shift_16));
2878 emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
2879 DONE;
2880 }")
2881
2882 (define_insn "*zero_extendhisi2_insn"
2883 [(set (match_operand:SI 0 "register_operand" "=r")
2884 (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
2885 ""
2886 "lduh %1,%0"
2887 [(set_attr "type" "load")])
2888
2889 (define_expand "zero_extendqihi2"
2890 [(set (match_operand:HI 0 "register_operand" "")
2891 (zero_extend:HI (match_operand:QI 1 "register_operand" "")))]
2892 ""
2893 "")
2894
2895 (define_insn "*zero_extendqihi2_insn"
2896 [(set (match_operand:HI 0 "register_operand" "=r,r")
2897 (zero_extend:HI (match_operand:QI 1 "sparc_operand" "r,Q")))]
2898 "GET_CODE (operands[1]) != CONST_INT"
2899 "@
2900 and %1,0xff,%0
2901 ldub %1,%0"
2902 [(set_attr "type" "unary,load")
2903 (set_attr "length" "1")])
2904
2905 (define_expand "zero_extendqisi2"
2906 [(set (match_operand:SI 0 "register_operand" "")
2907 (zero_extend:SI (match_operand:QI 1 "register_operand" "")))]
2908 ""
2909 "")
2910
2911 (define_insn "*zero_extendqisi2_insn"
2912 [(set (match_operand:SI 0 "register_operand" "=r,r")
2913 (zero_extend:SI (match_operand:QI 1 "sparc_operand" "r,Q")))]
2914 "GET_CODE (operands[1]) != CONST_INT"
2915 "@
2916 and %1,0xff,%0
2917 ldub %1,%0"
2918 [(set_attr "type" "unary,load")
2919 (set_attr "length" "1")])
2920
2921 (define_expand "zero_extendqidi2"
2922 [(set (match_operand:DI 0 "register_operand" "")
2923 (zero_extend:DI (match_operand:QI 1 "register_operand" "")))]
2924 "TARGET_ARCH64"
2925 "")
2926
2927 (define_insn "*zero_extendqidi2_insn"
2928 [(set (match_operand:DI 0 "register_operand" "=r,r")
2929 (zero_extend:DI (match_operand:QI 1 "sparc_operand" "r,Q")))]
2930 "TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
2931 "@
2932 and %1,0xff,%0
2933 ldub %1,%0"
2934 [(set_attr "type" "unary,load")
2935 (set_attr "length" "1")])
2936
2937 (define_expand "zero_extendhidi2"
2938 [(set (match_operand:DI 0 "register_operand" "")
2939 (zero_extend:DI (match_operand:HI 1 "register_operand" "")))]
2940 "TARGET_ARCH64"
2941 "
2942 {
2943 rtx temp = gen_reg_rtx (DImode);
2944 rtx shift_48 = GEN_INT (48);
2945 int op1_subword = 0;
2946
2947 if (GET_CODE (operand1) == SUBREG)
2948 {
2949 op1_subword = SUBREG_WORD (operand1);
2950 operand1 = XEXP (operand1, 0);
2951 }
2952
2953 emit_insn (gen_ashldi3 (temp, gen_rtx (SUBREG, DImode, operand1,
2954 op1_subword),
2955 shift_48));
2956 emit_insn (gen_lshrdi3 (operand0, temp, shift_48));
2957 DONE;
2958 }")
2959
2960 (define_insn "*zero_extendhidi2_insn"
2961 [(set (match_operand:DI 0 "register_operand" "=r")
2962 (zero_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
2963 "TARGET_ARCH64"
2964 "lduh %1,%0"
2965 [(set_attr "type" "load")])
2966
2967 ;; ??? Write truncdisi pattern using sra?
2968
2969 (define_expand "zero_extendsidi2"
2970 [(set (match_operand:DI 0 "register_operand" "")
2971 (zero_extend:DI (match_operand:SI 1 "register_operand" "")))]
2972 "TARGET_ARCH64"
2973 "")
2974
2975 (define_insn "*zero_extendsidi2_insn"
2976 [(set (match_operand:DI 0 "register_operand" "=r,r")
2977 (zero_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
2978 "TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
2979 "@
2980 srl %1,0,%0
2981 lduw %1,%0"
2982 [(set_attr "type" "unary,load")
2983 (set_attr "length" "1")])
2984
2985 ;; Simplify comparisons of extended values.
2986
2987 (define_insn "*cmp_zero_extendqisi2"
2988 [(set (reg:CC 100)
2989 (compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
2990 (const_int 0)))]
2991 ""
2992 "andcc %0,0xff,%%g0"
2993 [(set_attr "type" "compare")])
2994
2995 (define_insn "*cmp_zero_extendqisi2_set"
2996 [(set (reg:CC 100)
2997 (compare:CC (zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
2998 (const_int 0)))
2999 (set (match_operand:SI 0 "register_operand" "=r")
3000 (zero_extend:SI (match_dup 1)))]
3001 ""
3002 "andcc %1,0xff,%0"
3003 [(set_attr "type" "unary")])
3004
3005 ;; Similarly, handle SI->QI mode truncation followed by a compare.
3006
3007 (define_insn "*cmp_siqi_trunc"
3008 [(set (reg:CC 100)
3009 (compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0)
3010 (const_int 0)))]
3011 ""
3012 "andcc %0,0xff,%%g0"
3013 [(set_attr "type" "compare")])
3014
3015 (define_insn "*cmp_siqi_trunc_set"
3016 [(set (reg:CC 100)
3017 (compare:CC (subreg:QI (match_operand:SI 1 "register_operand" "r") 0)
3018 (const_int 0)))
3019 (set (match_operand:QI 0 "register_operand" "=r")
3020 (match_dup 1))]
3021 ""
3022 "andcc %1,0xff,%0"
3023 [(set_attr "type" "unary")])
3024 \f
3025 ;;- sign extension instructions
3026
3027 ;; These patterns originally accepted general_operands, however, slightly
3028 ;; better code is generated by only accepting register_operands, and then
3029 ;; letting combine generate the lds[hb] insns.
3030
3031 (define_expand "extendhisi2"
3032 [(set (match_operand:SI 0 "register_operand" "")
3033 (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
3034 ""
3035 "
3036 {
3037 rtx temp = gen_reg_rtx (SImode);
3038 rtx shift_16 = GEN_INT (16);
3039 int op1_subword = 0;
3040
3041 if (GET_CODE (operand1) == SUBREG)
3042 {
3043 op1_subword = SUBREG_WORD (operand1);
3044 operand1 = XEXP (operand1, 0);
3045 }
3046
3047 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
3048 op1_subword),
3049 shift_16));
3050 emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
3051 DONE;
3052 }")
3053
3054 (define_insn "*sign_extendhisi2_insn"
3055 [(set (match_operand:SI 0 "register_operand" "=r")
3056 (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
3057 ""
3058 "ldsh %1,%0"
3059 [(set_attr "type" "load")])
3060
3061 (define_expand "extendqihi2"
3062 [(set (match_operand:HI 0 "register_operand" "")
3063 (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
3064 ""
3065 "
3066 {
3067 rtx temp = gen_reg_rtx (SImode);
3068 rtx shift_24 = GEN_INT (24);
3069 int op1_subword = 0;
3070 int op0_subword = 0;
3071
3072 if (GET_CODE (operand1) == SUBREG)
3073 {
3074 op1_subword = SUBREG_WORD (operand1);
3075 operand1 = XEXP (operand1, 0);
3076 }
3077 if (GET_CODE (operand0) == SUBREG)
3078 {
3079 op0_subword = SUBREG_WORD (operand0);
3080 operand0 = XEXP (operand0, 0);
3081 }
3082 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
3083 op1_subword),
3084 shift_24));
3085 if (GET_MODE (operand0) != SImode)
3086 operand0 = gen_rtx (SUBREG, SImode, operand0, op0_subword);
3087 emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
3088 DONE;
3089 }")
3090
3091 (define_insn "*sign_extendqihi2_insn"
3092 [(set (match_operand:HI 0 "register_operand" "=r")
3093 (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
3094 ""
3095 "ldsb %1,%0"
3096 [(set_attr "type" "load")])
3097
3098 (define_expand "extendqisi2"
3099 [(set (match_operand:SI 0 "register_operand" "")
3100 (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
3101 ""
3102 "
3103 {
3104 rtx temp = gen_reg_rtx (SImode);
3105 rtx shift_24 = GEN_INT (24);
3106 int op1_subword = 0;
3107
3108 if (GET_CODE (operand1) == SUBREG)
3109 {
3110 op1_subword = SUBREG_WORD (operand1);
3111 operand1 = XEXP (operand1, 0);
3112 }
3113
3114 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1,
3115 op1_subword),
3116 shift_24));
3117 emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
3118 DONE;
3119 }")
3120
3121 (define_insn "*sign_extendqisi2_insn"
3122 [(set (match_operand:SI 0 "register_operand" "=r")
3123 (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
3124 ""
3125 "ldsb %1,%0"
3126 [(set_attr "type" "load")])
3127
3128 (define_expand "extendqidi2"
3129 [(set (match_operand:DI 0 "register_operand" "")
3130 (sign_extend:DI (match_operand:QI 1 "register_operand" "")))]
3131 "TARGET_ARCH64"
3132 "
3133 {
3134 rtx temp = gen_reg_rtx (DImode);
3135 rtx shift_56 = GEN_INT (56);
3136 int op1_subword = 0;
3137
3138 if (GET_CODE (operand1) == SUBREG)
3139 {
3140 op1_subword = SUBREG_WORD (operand1);
3141 operand1 = XEXP (operand1, 0);
3142 }
3143
3144 emit_insn (gen_ashldi3 (temp, gen_rtx (SUBREG, DImode, operand1,
3145 op1_subword),
3146 shift_56));
3147 emit_insn (gen_ashrdi3 (operand0, temp, shift_56));
3148 DONE;
3149 }")
3150
3151 (define_insn "*sign_extendqidi2_insn"
3152 [(set (match_operand:DI 0 "register_operand" "=r")
3153 (sign_extend:DI (match_operand:QI 1 "memory_operand" "m")))]
3154 "TARGET_ARCH64"
3155 "ldsb %1,%0"
3156 [(set_attr "type" "load")])
3157
3158 (define_expand "extendhidi2"
3159 [(set (match_operand:DI 0 "register_operand" "")
3160 (sign_extend:DI (match_operand:HI 1 "register_operand" "")))]
3161 "TARGET_ARCH64"
3162 "
3163 {
3164 rtx temp = gen_reg_rtx (DImode);
3165 rtx shift_48 = GEN_INT (48);
3166 int op1_subword = 0;
3167
3168 if (GET_CODE (operand1) == SUBREG)
3169 {
3170 op1_subword = SUBREG_WORD (operand1);
3171 operand1 = XEXP (operand1, 0);
3172 }
3173
3174 emit_insn (gen_ashldi3 (temp, gen_rtx (SUBREG, DImode, operand1,
3175 op1_subword),
3176 shift_48));
3177 emit_insn (gen_ashrdi3 (operand0, temp, shift_48));
3178 DONE;
3179 }")
3180
3181 (define_insn "*sign_extendhidi2_insn"
3182 [(set (match_operand:DI 0 "register_operand" "=r")
3183 (sign_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
3184 "TARGET_ARCH64"
3185 "ldsh %1,%0"
3186 [(set_attr "type" "load")])
3187
3188 (define_expand "extendsidi2"
3189 [(set (match_operand:DI 0 "register_operand" "")
3190 (sign_extend:DI (match_operand:SI 1 "register_operand" "")))]
3191 "TARGET_ARCH64"
3192 "")
3193
3194 (define_insn "*sign_extendsidi2_insn"
3195 [(set (match_operand:DI 0 "register_operand" "=r,r")
3196 (sign_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
3197 "TARGET_ARCH64"
3198 "@
3199 sra %1,0,%0
3200 ldsw %1,%0"
3201 [(set_attr "type" "unary,load")
3202 (set_attr "length" "1")])
3203 \f
3204 ;; Special pattern for optimizing bit-field compares. This is needed
3205 ;; because combine uses this as a canonical form.
3206
3207 (define_insn "*cmp_zero_extract"
3208 [(set (reg:CC 100)
3209 (compare:CC
3210 (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
3211 (match_operand:SI 1 "small_int" "n")
3212 (match_operand:SI 2 "small_int" "n"))
3213 (const_int 0)))]
3214 "INTVAL (operands[2]) > 19"
3215 "*
3216 {
3217 int len = INTVAL (operands[1]);
3218 int pos = 32 - INTVAL (operands[2]) - len;
3219 unsigned mask = ((1 << len) - 1) << pos;
3220
3221 operands[1] = GEN_INT (mask);
3222 return \"andcc %0,%1,%%g0\";
3223 }")
3224
3225 (define_insn "*cmp_zero_extract_sp64"
3226 [(set (reg:CCX 100)
3227 (compare:CCX
3228 (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
3229 (match_operand:SI 1 "small_int" "n")
3230 (match_operand:SI 2 "small_int" "n"))
3231 (const_int 0)))]
3232 "TARGET_ARCH64 && INTVAL (operands[2]) > 51"
3233 "*
3234 {
3235 int len = INTVAL (operands[1]);
3236 int pos = 64 - INTVAL (operands[2]) - len;
3237 unsigned HOST_WIDE_INT mask = (((unsigned HOST_WIDE_INT) 1 << len) - 1) << pos;
3238
3239 operands[1] = GEN_INT (mask);
3240 return \"andcc %0,%1,%%g0\";
3241 }")
3242 \f
3243 ;; Conversions between float, double and long double.
3244
3245 (define_insn "extendsfdf2"
3246 [(set (match_operand:DF 0 "register_operand" "=e")
3247 (float_extend:DF
3248 (match_operand:SF 1 "register_operand" "f")))]
3249 "TARGET_FPU"
3250 "fstod %1,%0"
3251 [(set_attr "type" "fp")])
3252
3253 (define_insn "extendsftf2"
3254 [(set (match_operand:TF 0 "register_operand" "=e")
3255 (float_extend:TF
3256 (match_operand:SF 1 "register_operand" "f")))]
3257 "TARGET_FPU && TARGET_HARD_QUAD"
3258 "fstoq %1,%0"
3259 [(set_attr "type" "fp")])
3260
3261 (define_insn "extenddftf2"
3262 [(set (match_operand:TF 0 "register_operand" "=e")
3263 (float_extend:TF
3264 (match_operand:DF 1 "register_operand" "e")))]
3265 "TARGET_FPU && TARGET_HARD_QUAD"
3266 "fdtoq %1,%0"
3267 [(set_attr "type" "fp")])
3268
3269 (define_insn "truncdfsf2"
3270 [(set (match_operand:SF 0 "register_operand" "=f")
3271 (float_truncate:SF
3272 (match_operand:DF 1 "register_operand" "e")))]
3273 "TARGET_FPU"
3274 "fdtos %1,%0"
3275 [(set_attr "type" "fp")])
3276
3277 (define_insn "trunctfsf2"
3278 [(set (match_operand:SF 0 "register_operand" "=f")
3279 (float_truncate:SF
3280 (match_operand:TF 1 "register_operand" "e")))]
3281 "TARGET_FPU && TARGET_HARD_QUAD"
3282 "fqtos %1,%0"
3283 [(set_attr "type" "fp")])
3284
3285 (define_insn "trunctfdf2"
3286 [(set (match_operand:DF 0 "register_operand" "=e")
3287 (float_truncate:DF
3288 (match_operand:TF 1 "register_operand" "e")))]
3289 "TARGET_FPU && TARGET_HARD_QUAD"
3290 "fqtod %1,%0"
3291 [(set_attr "type" "fp")])
3292 \f
3293 ;; Conversion between fixed point and floating point.
3294
3295 (define_insn "floatsisf2"
3296 [(set (match_operand:SF 0 "register_operand" "=f")
3297 (float:SF (match_operand:SI 1 "register_operand" "f")))]
3298 "TARGET_FPU"
3299 "fitos %1,%0"
3300 [(set_attr "type" "fp")])
3301
3302 (define_insn "floatsidf2"
3303 [(set (match_operand:DF 0 "register_operand" "=e")
3304 (float:DF (match_operand:SI 1 "register_operand" "f")))]
3305 "TARGET_FPU"
3306 "fitod %1,%0"
3307 [(set_attr "type" "fp")])
3308
3309 (define_insn "floatsitf2"
3310 [(set (match_operand:TF 0 "register_operand" "=e")
3311 (float:TF (match_operand:SI 1 "register_operand" "f")))]
3312 "TARGET_FPU && TARGET_HARD_QUAD"
3313 "fitoq %1,%0"
3314 [(set_attr "type" "fp")])
3315
3316 ;; Now the same for 64 bit sources.
3317 ;; ??? We cannot put DImode values in fp regs (see below near fix_truncdfsi2).
3318
3319 (define_expand "floatdisf2"
3320 [(parallel [(set (match_operand:SF 0 "register_operand" "")
3321 (float:SF (match_operand:DI 1 "general_operand" "")))
3322 (clobber (match_dup 2))
3323 (clobber (match_dup 3))])]
3324 "TARGET_ARCH64 && TARGET_FPU"
3325 "
3326 {
3327 operands[2] = gen_reg_rtx (DFmode);
3328 operands[3] = sparc64_fpconv_stack_temp ();
3329 }")
3330
3331 (define_expand "floatdidf2"
3332 [(parallel [(set (match_operand:DF 0 "register_operand" "")
3333 (float:DF (match_operand:DI 1 "general_operand" "")))
3334 (clobber (match_dup 2))
3335 (clobber (match_dup 3))])]
3336 "TARGET_ARCH64 && TARGET_FPU"
3337 "
3338 {
3339 operands[2] = gen_reg_rtx (DFmode);
3340 operands[3] = sparc64_fpconv_stack_temp ();
3341 }")
3342
3343 (define_expand "floatditf2"
3344 [(parallel [(set (match_operand:TF 0 "register_operand" "")
3345 (float:TF (match_operand:DI 1 "general_operand" "")))
3346 (clobber (match_dup 2))
3347 (clobber (match_dup 3))])]
3348 "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
3349 "
3350 {
3351 operands[2] = gen_reg_rtx (DFmode);
3352 operands[3] = sparc64_fpconv_stack_temp ();
3353 }")
3354
3355 (define_insn "*floatdisf2_insn"
3356 [(parallel [(set (match_operand:SF 0 "register_operand" "=f")
3357 (float:SF (match_operand:DI 1 "general_operand" "rm")))
3358 (clobber (match_operand:DF 2 "register_operand" "=&e"))
3359 (clobber (match_operand:DI 3 "memory_operand" "m"))])]
3360 "TARGET_ARCH64 && TARGET_FPU"
3361 "*
3362 {
3363 if (GET_CODE (operands[1]) == MEM)
3364 output_asm_insn (\"ldd %1,%2\", operands);
3365 else
3366 output_asm_insn (\"stx %1,%3\;ldd %3,%2\", operands);
3367 return \"fxtos %2,%0\";
3368 }"
3369 [(set_attr "type" "fp")
3370 (set_attr "length" "3")])
3371
3372 (define_insn "*floatdidf2_insn"
3373 [(parallel [(set (match_operand:DF 0 "register_operand" "=e")
3374 (float:DF (match_operand:DI 1 "general_operand" "rm")))
3375 (clobber (match_operand:DF 2 "register_operand" "=&e"))
3376 (clobber (match_operand:DI 3 "memory_operand" "m"))])]
3377 "TARGET_ARCH64 && TARGET_FPU"
3378 "*
3379 {
3380 if (GET_CODE (operands[1]) == MEM)
3381 output_asm_insn (\"ldd %1,%2\", operands);
3382 else
3383 output_asm_insn (\"stx %1,%3\;ldd %3,%2\", operands);
3384 return \"fxtod %2,%0\";
3385 }"
3386 [(set_attr "type" "fp")
3387 (set_attr "length" "3")])
3388
3389 (define_insn "*floatditf2_insn"
3390 [(parallel [(set (match_operand:TF 0 "register_operand" "=e")
3391 (float:TF (match_operand:DI 1 "general_operand" "rm")))
3392 (clobber (match_operand:DF 2 "register_operand" "=&e"))
3393 (clobber (match_operand:DI 3 "memory_operand" "m"))])]
3394 "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
3395 "*
3396 {
3397 if (GET_CODE (operands[1]) == MEM)
3398 output_asm_insn (\"ldd %1,%2\", operands);
3399 else
3400 output_asm_insn (\"stx %1,%3\;ldd %3,%2\", operands);
3401 return \"fxtoq %2,%0\";
3402 }"
3403 [(set_attr "type" "fp")
3404 (set_attr "length" "3")])
3405
3406 ;; ??? Ideally, these are what we would like to use.
3407
3408 (define_insn "floatdisf2_sp64"
3409 [(set (match_operand:SF 0 "register_operand" "=f")
3410 (float:SF (match_operand:DI 1 "register_operand" "e")))]
3411 "0 && TARGET_ARCH64 && TARGET_FPU"
3412 "fxtos %1,%0"
3413 [(set_attr "type" "fp")])
3414
3415 (define_insn "floatdidf2_sp64"
3416 [(set (match_operand:DF 0 "register_operand" "=e")
3417 (float:DF (match_operand:DI 1 "register_operand" "e")))]
3418 "0 && TARGET_ARCH64 && TARGET_FPU"
3419 "fxtod %1,%0"
3420 [(set_attr "type" "fp")])
3421
3422 (define_insn "floatditf2_sp64"
3423 [(set (match_operand:TF 0 "register_operand" "=e")
3424 (float:TF (match_operand:DI 1 "register_operand" "e")))]
3425 "0 && TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
3426 "fxtoq %1,%0"
3427 [(set_attr "type" "fp")])
3428
3429 ;; Convert a float to an actual integer.
3430 ;; Truncation is performed as part of the conversion.
3431
3432 (define_insn "fix_truncsfsi2"
3433 [(set (match_operand:SI 0 "register_operand" "=f")
3434 (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
3435 "TARGET_FPU"
3436 "fstoi %1,%0"
3437 [(set_attr "type" "fp")])
3438
3439 (define_insn "fix_truncdfsi2"
3440 [(set (match_operand:SI 0 "register_operand" "=f")
3441 (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
3442 "TARGET_FPU"
3443 "fdtoi %1,%0"
3444 [(set_attr "type" "fp")])
3445
3446 (define_insn "fix_trunctfsi2"
3447 [(set (match_operand:SI 0 "register_operand" "=f")
3448 (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
3449 "TARGET_FPU && TARGET_HARD_QUAD"
3450 "fqtoi %1,%0"
3451 [(set_attr "type" "fp")])
3452
3453 ;; Now the same, for 64-bit targets
3454 ;; ??? We try to work around an interesting problem.
3455 ;; If gcc tries to do a subreg on the result it will get the wrong answer:
3456 ;; "(subreg:SI (reg:DI M int-reg) 0)" is the same as
3457 ;; "(subreg:SI (reg:DI N float-reg) 1)", but gcc does not know how to change
3458 ;; the "0" to a "1". One could enhance alter_subreg but it is not clear how to
3459 ;; do this cleanly.
3460
3461 (define_expand "fix_truncsfdi2"
3462 [(parallel [(set (match_operand:DI 0 "general_operand" "")
3463 (fix:DI (fix:SF (match_operand:SF 1 "register_operand" ""))))
3464 (clobber (match_dup 2))
3465 (clobber (match_dup 3))])]
3466 "TARGET_ARCH64 && TARGET_FPU"
3467 "
3468 {
3469 operands[2] = gen_reg_rtx (DFmode);
3470 operands[3] = sparc64_fpconv_stack_temp ();
3471 }")
3472
3473 (define_expand "fix_truncdfdi2"
3474 [(parallel [(set (match_operand:DI 0 "general_operand" "")
3475 (fix:DI (fix:DF (match_operand:DF 1 "register_operand" ""))))
3476 (clobber (match_dup 2))
3477 (clobber (match_dup 3))])]
3478 "TARGET_ARCH64 && TARGET_FPU"
3479 "
3480 {
3481 operands[2] = gen_reg_rtx (DFmode);
3482 operands[3] = sparc64_fpconv_stack_temp ();
3483 }")
3484
3485 (define_expand "fix_trunctfdi2"
3486 [(parallel [(set (match_operand:DI 0 "general_operand" "")
3487 (fix:DI (fix:TF (match_operand:TF 1 "register_operand" ""))))
3488 (clobber (match_dup 2))
3489 (clobber (match_dup 3))])]
3490 "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
3491 "
3492 {
3493 operands[2] = gen_reg_rtx (DFmode);
3494 operands[3] = sparc64_fpconv_stack_temp ();
3495 }")
3496
3497 (define_insn "*fix_truncsfdi2_insn"
3498 [(parallel [(set (match_operand:DI 0 "general_operand" "=rm")
3499 (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))
3500 (clobber (match_operand:DF 2 "register_operand" "=&e"))
3501 (clobber (match_operand:DI 3 "memory_operand" "m"))])]
3502 "TARGET_ARCH64 && TARGET_FPU"
3503 "*
3504 {
3505 output_asm_insn (\"fstox %1,%2\", operands);
3506 if (GET_CODE (operands[0]) == MEM)
3507 return \"std %2,%0\";
3508 else
3509 return \"std %2,%3\;ldx %3,%0\";
3510 }"
3511 [(set_attr "type" "fp")
3512 (set_attr "length" "3")])
3513
3514 (define_insn "*fix_truncdfdi2_insn"
3515 [(parallel [(set (match_operand:DI 0 "general_operand" "=rm")
3516 (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "e"))))
3517 (clobber (match_operand:DF 2 "register_operand" "=&e"))
3518 (clobber (match_operand:DI 3 "memory_operand" "m"))])]
3519 "TARGET_ARCH64 && TARGET_FPU"
3520 "*
3521 {
3522 output_asm_insn (\"fdtox %1,%2\", operands);
3523 if (GET_CODE (operands[0]) == MEM)
3524 return \"std %2,%0\";
3525 else
3526 return \"std %2,%3\;ldx %3,%0\";
3527 }"
3528 [(set_attr "type" "fp")
3529 (set_attr "length" "3")])
3530
3531 (define_insn "*fix_trunctfdi2_insn"
3532 [(parallel [(set (match_operand:DI 0 "general_operand" "=rm")
3533 (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))
3534 (clobber (match_operand:DF 2 "register_operand" "=&e"))
3535 (clobber (match_operand:DI 3 "memory_operand" "m"))])]
3536 "TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
3537 "*
3538 {
3539 output_asm_insn (\"fqtox %1,%2\", operands);
3540 if (GET_CODE (operands[0]) == MEM)
3541 return \"std %2,%0\";
3542 else
3543 return \"std %2,%3\;ldx %3,%0\";
3544 }"
3545 [(set_attr "type" "fp")
3546 (set_attr "length" "3")])
3547
3548 ;; ??? Ideally, these are what we would like to use.
3549
3550 (define_insn "fix_truncsfdi2_sp64"
3551 [(set (match_operand:DI 0 "register_operand" "=e")
3552 (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
3553 "0 && TARGET_ARCH64 && TARGET_FPU"
3554 "fstox %1,%0"
3555 [(set_attr "type" "fp")])
3556
3557 (define_insn "fix_truncdfdi2_sp64"
3558 [(set (match_operand:DI 0 "register_operand" "=e")
3559 (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
3560 "0 && TARGET_ARCH64 && TARGET_FPU"
3561 "fdtox %1,%0"
3562 [(set_attr "type" "fp")])
3563
3564 (define_insn "fix_trunctfdi2_sp64"
3565 [(set (match_operand:DI 0 "register_operand" "=e")
3566 (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
3567 "0 && TARGET_ARCH64 && TARGET_FPU && TARGET_HARD_QUAD"
3568 "fqtox %1,%0"
3569 [(set_attr "type" "fp")])
3570 \f
3571 ;;- arithmetic instructions
3572
3573 (define_expand "adddi3"
3574 [(set (match_operand:DI 0 "register_operand" "=r")
3575 (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
3576 (match_operand:DI 2 "arith_double_operand" "rHI")))]
3577 ""
3578 "
3579 {
3580 if (! TARGET_ARCH64)
3581 {
3582 emit_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
3583 gen_rtx (SET, VOIDmode, operands[0],
3584 gen_rtx (PLUS, DImode, operands[1],
3585 operands[2])),
3586 gen_rtx (CLOBBER, VOIDmode,
3587 gen_rtx (REG, SImode, SPARC_ICC_REG)))));
3588 DONE;
3589 }
3590 }")
3591
3592 (define_insn "*adddi3_sp32"
3593 [(set (match_operand:DI 0 "register_operand" "=r")
3594 (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
3595 (match_operand:DI 2 "arith_double_operand" "rHI")))
3596 (clobber (reg:SI 100))]
3597 "! TARGET_ARCH64"
3598 "*
3599 {
3600 rtx op2 = operands[2];
3601
3602 if (GET_CODE (op2) == CONST_INT
3603 || GET_CODE (op2) == CONST_DOUBLE)
3604 {
3605 rtx xoperands[4];
3606 xoperands[0] = operands[0];
3607 xoperands[1] = operands[1];
3608 if (WORDS_BIG_ENDIAN)
3609 split_double (op2, &xoperands[2], &xoperands[3]);
3610 else
3611 split_double (op2, &xoperands[3], &xoperands[2]);
3612 if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1])
3613 output_asm_insn (\"add %H1,%2,%H0\", xoperands);
3614 else
3615 output_asm_insn (\"addcc %L1,%3,%L0\;addx %H1,%2,%H0\", xoperands);
3616 return \"\";
3617 }
3618 return \"addcc %L1,%L2,%L0\;addx %H1,%H2,%H0\";
3619 }"
3620 [(set_attr "length" "2")])
3621
3622 (define_insn "*adddi3_sp64"
3623 [(set (match_operand:DI 0 "register_operand" "=r")
3624 (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
3625 (match_operand:DI 2 "arith_double_operand" "rHI")))]
3626 "TARGET_ARCH64"
3627 "add %1,%2,%0")
3628
3629 (define_insn "addsi3"
3630 [(set (match_operand:SI 0 "register_operand" "=r")
3631 (plus:SI (match_operand:SI 1 "arith_operand" "%r")
3632 (match_operand:SI 2 "arith_operand" "rI")))]
3633 ""
3634 "add %1,%2,%0"
3635 [(set_attr "type" "ialu")])
3636
3637 (define_insn "*cmp_cc_plus"
3638 [(set (reg:CC_NOOV 100)
3639 (compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r")
3640 (match_operand:SI 1 "arith_operand" "rI"))
3641 (const_int 0)))]
3642 ""
3643 "addcc %0,%1,%%g0"
3644 [(set_attr "type" "compare")])
3645
3646 (define_insn "*cmp_ccx_plus"
3647 [(set (reg:CCX_NOOV 100)
3648 (compare:CCX_NOOV (plus:DI (match_operand:DI 0 "arith_double_operand" "%r")
3649 (match_operand:DI 1 "arith_double_operand" "rHI"))
3650 (const_int 0)))]
3651 "TARGET_ARCH64"
3652 "addcc %0,%1,%%g0"
3653 [(set_attr "type" "compare")])
3654
3655 (define_insn "*cmp_cc_plus_set"
3656 [(set (reg:CC_NOOV 100)
3657 (compare:CC_NOOV (plus:SI (match_operand:SI 1 "arith_operand" "%r")
3658 (match_operand:SI 2 "arith_operand" "rI"))
3659 (const_int 0)))
3660 (set (match_operand:SI 0 "register_operand" "=r")
3661 (plus:SI (match_dup 1) (match_dup 2)))]
3662 ""
3663 "addcc %1,%2,%0")
3664
3665 (define_insn "*cmp_ccx_plus_set"
3666 [(set (reg:CCX_NOOV 100)
3667 (compare:CCX_NOOV (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
3668 (match_operand:DI 2 "arith_double_operand" "rHI"))
3669 (const_int 0)))
3670 (set (match_operand:DI 0 "register_operand" "=r")
3671 (plus:DI (match_dup 1) (match_dup 2)))]
3672 "TARGET_ARCH64"
3673 "addcc %1,%2,%0")
3674
3675 (define_expand "subdi3"
3676 [(set (match_operand:DI 0 "register_operand" "=r")
3677 (minus:DI (match_operand:DI 1 "register_operand" "r")
3678 (match_operand:DI 2 "arith_double_operand" "rHI")))]
3679 ""
3680 "
3681 {
3682 if (! TARGET_ARCH64)
3683 {
3684 emit_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
3685 gen_rtx (SET, VOIDmode, operands[0],
3686 gen_rtx (MINUS, DImode, operands[1],
3687 operands[2])),
3688 gen_rtx (CLOBBER, VOIDmode,
3689 gen_rtx (REG, SImode, SPARC_ICC_REG)))));
3690 DONE;
3691 }
3692 }")
3693
3694 (define_insn "*subdi3_sp32"
3695 [(set (match_operand:DI 0 "register_operand" "=r")
3696 (minus:DI (match_operand:DI 1 "register_operand" "r")
3697 (match_operand:DI 2 "arith_double_operand" "rHI")))
3698 (clobber (reg:SI 100))]
3699 "! TARGET_ARCH64"
3700 "*
3701 {
3702 rtx op2 = operands[2];
3703
3704 if (GET_CODE (op2) == CONST_INT
3705 || GET_CODE (op2) == CONST_DOUBLE)
3706 {
3707 rtx xoperands[4];
3708 xoperands[0] = operands[0];
3709 xoperands[1] = operands[1];
3710 if (WORDS_BIG_ENDIAN)
3711 split_double (op2, &xoperands[2], &xoperands[3]);
3712 else
3713 split_double (op2, &xoperands[3], &xoperands[2]);
3714 if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1])
3715 output_asm_insn (\"sub %H1,%2,%H0\", xoperands);
3716 else
3717 output_asm_insn (\"subcc %L1,%3,%L0\;subx %H1,%2,%H0\", xoperands);
3718 return \"\";
3719 }
3720 return \"subcc %L1,%L2,%L0\;subx %H1,%H2,%H0\";
3721 }"
3722 [(set_attr "length" "2")])
3723
3724 (define_insn "*subdi3_sp64"
3725 [(set (match_operand:DI 0 "register_operand" "=r")
3726 (minus:DI (match_operand:DI 1 "register_operand" "r")
3727 (match_operand:DI 2 "arith_double_operand" "rHI")))]
3728 "TARGET_ARCH64"
3729 "sub %1,%2,%0")
3730
3731 (define_insn "subsi3"
3732 [(set (match_operand:SI 0 "register_operand" "=r")
3733 (minus:SI (match_operand:SI 1 "register_operand" "r")
3734 (match_operand:SI 2 "arith_operand" "rI")))]
3735 ""
3736 "sub %1,%2,%0"
3737 [(set_attr "type" "ialu")])
3738
3739 (define_insn "*cmp_minus_cc"
3740 [(set (reg:CC_NOOV 100)
3741 (compare:CC_NOOV (minus:SI (match_operand:SI 0 "register_operand" "r")
3742 (match_operand:SI 1 "arith_operand" "rI"))
3743 (const_int 0)))]
3744 ""
3745 "subcc %0,%1,%%g0"
3746 [(set_attr "type" "compare")])
3747
3748 (define_insn "*cmp_minus_ccx"
3749 [(set (reg:CCX_NOOV 100)
3750 (compare:CCX_NOOV (minus:DI (match_operand:DI 0 "register_operand" "r")
3751 (match_operand:DI 1 "arith_double_operand" "rHI"))
3752 (const_int 0)))]
3753 "TARGET_ARCH64"
3754 "subcc %0,%1,%%g0"
3755 [(set_attr "type" "compare")])
3756
3757 (define_insn "*cmp_minus_cc_set"
3758 [(set (reg:CC_NOOV 100)
3759 (compare:CC_NOOV (minus:SI (match_operand:SI 1 "register_operand" "r")
3760 (match_operand:SI 2 "arith_operand" "rI"))
3761 (const_int 0)))
3762 (set (match_operand:SI 0 "register_operand" "=r")
3763 (minus:SI (match_dup 1) (match_dup 2)))]
3764 ""
3765 "subcc %1,%2,%0")
3766
3767 (define_insn "*cmp_minus_ccx_set"
3768 [(set (reg:CCX_NOOV 100)
3769 (compare:CCX_NOOV (minus:DI (match_operand:DI 1 "register_operand" "r")
3770 (match_operand:DI 2 "arith_double_operand" "rHI"))
3771 (const_int 0)))
3772 (set (match_operand:DI 0 "register_operand" "=r")
3773 (minus:DI (match_dup 1) (match_dup 2)))]
3774 "TARGET_ARCH64"
3775 "subcc %1,%2,%0")
3776 \f
3777 ;; Integer Multiply/Divide.
3778
3779 ;; The 32 bit multiply/divide instructions are deprecated on v9 and shouldn't
3780 ;; we used. We still use them in 32 bit v9 compilers.
3781 ;; The 64 bit v9 compiler will (/should) widen the args and use muldi3.
3782
3783 (define_insn "mulsi3"
3784 [(set (match_operand:SI 0 "register_operand" "=r")
3785 (mult:SI (match_operand:SI 1 "arith_operand" "%r")
3786 (match_operand:SI 2 "arith_operand" "rI")))]
3787 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3788 "smul %1,%2,%0"
3789 [(set_attr "type" "imul")])
3790
3791 (define_insn "muldi3"
3792 [(set (match_operand:DI 0 "register_operand" "=r")
3793 (mult:DI (match_operand:DI 1 "arith_double_operand" "%r")
3794 (match_operand:DI 2 "arith_double_operand" "rHI")))]
3795 "TARGET_ARCH64"
3796 "mulx %1,%2,%0")
3797
3798 ;; It is not known whether this will match.
3799
3800 (define_insn "*cmp_mul_set"
3801 [(set (match_operand:SI 0 "register_operand" "=r")
3802 (mult:SI (match_operand:SI 1 "arith_operand" "%r")
3803 (match_operand:SI 2 "arith_operand" "rI")))
3804 (set (reg:CC_NOOV 100)
3805 (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2))
3806 (const_int 0)))]
3807 "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS"
3808 "smulcc %1,%2,%0"
3809 [(set_attr "type" "imul")])
3810
3811 (define_expand "mulsidi3"
3812 [(set (match_operand:DI 0 "register_operand" "")
3813 (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
3814 (sign_extend:DI (match_operand:SI 2 "arith_operand" ""))))]
3815 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3816 "
3817 {
3818 if (CONSTANT_P (operands[2]))
3819 {
3820 emit_insn (gen_const_mulsidi3 (operands[0], operands[1], operands[2]));
3821 DONE;
3822 }
3823 }")
3824
3825 (define_insn "*mulsidi3_sp32"
3826 [(set (match_operand:DI 0 "register_operand" "=r")
3827 (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
3828 (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
3829 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3830 "*
3831 {
3832 return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\";
3833 }"
3834 [(set (attr "length")
3835 (if_then_else (eq_attr "isa" "sparclet")
3836 (const_int 1) (const_int 2)))])
3837
3838 ;; Extra pattern, because sign_extend of a constant isn't valid.
3839
3840 (define_insn "const_mulsidi3"
3841 [(set (match_operand:DI 0 "register_operand" "=r")
3842 (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
3843 (match_operand:SI 2 "small_int" "I")))]
3844 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3845 "*
3846 {
3847 return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\";
3848 }"
3849 [(set (attr "length")
3850 (if_then_else (eq_attr "isa" "sparclet")
3851 (const_int 1) (const_int 2)))])
3852
3853 (define_expand "smulsi3_highpart"
3854 [(set (match_operand:SI 0 "register_operand" "")
3855 (truncate:SI
3856 (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
3857 (sign_extend:DI (match_operand:SI 2 "arith_operand" "")))
3858 (const_int 32))))]
3859 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3860 "
3861 {
3862 if (CONSTANT_P (operands[2]))
3863 {
3864 emit_insn (gen_const_smulsi3_highpart (operands[0], operands[1], operands[2]));
3865 DONE;
3866 }
3867 }")
3868
3869 (define_insn "*smulsidi3_highpart_sp32"
3870 [(set (match_operand:SI 0 "register_operand" "=r")
3871 (truncate:SI
3872 (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
3873 (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
3874 (const_int 32))))]
3875 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3876 "smul %1,%2,%%g0\;rd %%y,%0"
3877 [(set_attr "length" "2")])
3878
3879 (define_insn "const_smulsi3_highpart"
3880 [(set (match_operand:SI 0 "register_operand" "=r")
3881 (truncate:SI
3882 (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
3883 (match_operand:SI 2 "register_operand" "r"))
3884 (const_int 32))))]
3885 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3886 "smul %1,%2,%%g0\;rd %%y,%0"
3887 [(set_attr "length" "2")])
3888
3889 (define_expand "umulsidi3"
3890 [(set (match_operand:DI 0 "register_operand" "")
3891 (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
3892 (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" ""))))]
3893 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3894 "
3895 {
3896 if (CONSTANT_P (operands[2]))
3897 {
3898 emit_insn (gen_const_umulsidi3 (operands[0], operands[1], operands[2]));
3899 DONE;
3900 }
3901 }")
3902
3903 (define_insn "*umulsidi3_sp32"
3904 [(set (match_operand:DI 0 "register_operand" "=r")
3905 (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
3906 (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
3907 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3908 "*
3909 {
3910 return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\";
3911 }"
3912 [(set (attr "length")
3913 (if_then_else (eq_attr "isa" "sparclet")
3914 (const_int 1) (const_int 2)))])
3915
3916 ;; Extra pattern, because sign_extend of a constant isn't valid.
3917
3918 (define_insn "const_umulsidi3"
3919 [(set (match_operand:DI 0 "register_operand" "=r")
3920 (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
3921 (match_operand:SI 2 "uns_small_int" "")))]
3922 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3923 "*
3924 {
3925 return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\";
3926 }"
3927 [(set (attr "length")
3928 (if_then_else (eq_attr "isa" "sparclet")
3929 (const_int 1) (const_int 2)))])
3930
3931 (define_expand "umulsi3_highpart"
3932 [(set (match_operand:SI 0 "register_operand" "")
3933 (truncate:SI
3934 (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
3935 (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" "")))
3936 (const_int 32))))]
3937 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3938 "
3939 {
3940 if (CONSTANT_P (operands[2]))
3941 {
3942 emit_insn (gen_const_umulsi3_highpart (operands[0], operands[1], operands[2]));
3943 DONE;
3944 }
3945 }")
3946
3947 (define_insn "*umulsidi3_highpart_sp32"
3948 [(set (match_operand:SI 0 "register_operand" "=r")
3949 (truncate:SI
3950 (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
3951 (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
3952 (const_int 32))))]
3953 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3954 "umul %1,%2,%%g0\;rd %%y,%0"
3955 [(set_attr "length" "2")])
3956
3957 (define_insn "const_umulsi3_highpart"
3958 [(set (match_operand:SI 0 "register_operand" "=r")
3959 (truncate:SI
3960 (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
3961 (match_operand:SI 2 "uns_small_int" ""))
3962 (const_int 32))))]
3963 "TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS"
3964 "umul %1,%2,%%g0\;rd %%y,%0"
3965 [(set_attr "length" "2")])
3966
3967 ;; The v8 architecture specifies that there must be 3 instructions between
3968 ;; a y register write and a use of it for correct results.
3969
3970 (define_insn "divsi3"
3971 [(set (match_operand:SI 0 "register_operand" "=r")
3972 (div:SI (match_operand:SI 1 "register_operand" "r")
3973 (match_operand:SI 2 "arith_operand" "rI")))
3974 (clobber (match_scratch:SI 3 "=&r"))]
3975 "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
3976 "*
3977 {
3978 if (TARGET_V9)
3979 return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdiv %1,%2,%0\";
3980 else
3981 return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdiv %1,%2,%0\";
3982 }"
3983 [(set (attr "length")
3984 (if_then_else (eq_attr "isa" "v9")
3985 (const_int 3) (const_int 6)))])
3986
3987 (define_insn "divdi3"
3988 [(set (match_operand:DI 0 "register_operand" "=r")
3989 (div:DI (match_operand:DI 1 "register_operand" "r")
3990 (match_operand:DI 2 "arith_double_operand" "rHI")))]
3991 "TARGET_ARCH64"
3992 "sdivx %1,%2,%0")
3993
3994 ;; It is not known whether this will match.
3995
3996 (define_insn "*cmp_sdiv_cc_set"
3997 [(set (match_operand:SI 0 "register_operand" "=r")
3998 (div:SI (match_operand:SI 1 "register_operand" "r")
3999 (match_operand:SI 2 "arith_operand" "rI")))
4000 (set (reg:CC 100)
4001 (compare:CC (div:SI (match_dup 1) (match_dup 2))
4002 (const_int 0)))
4003 (clobber (match_scratch:SI 3 "=&r"))]
4004 "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
4005 "*
4006 {
4007 if (TARGET_V9)
4008 return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdivcc %1,%2,%0\";
4009 else
4010 return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdivcc %1,%2,%0\";
4011 }"
4012 [(set (attr "length")
4013 (if_then_else (eq_attr "isa" "v9")
4014 (const_int 3) (const_int 6)))])
4015
4016 (define_insn "udivsi3"
4017 [(set (match_operand:SI 0 "register_operand" "=r")
4018 (udiv:SI (match_operand:SI 1 "register_operand" "r")
4019 (match_operand:SI 2 "arith_operand" "rI")))]
4020 "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
4021 "*
4022 {
4023 if (TARGET_V9)
4024 return \"wr %%g0,%%g0,%%y\;udiv %1,%2,%0\";
4025 else
4026 return \"wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udiv %1,%2,%0\";
4027 }"
4028 [(set (attr "length")
4029 (if_then_else (eq_attr "isa" "v9")
4030 (const_int 2) (const_int 5)))])
4031
4032 (define_insn "udivdi3"
4033 [(set (match_operand:DI 0 "register_operand" "=r")
4034 (udiv:DI (match_operand:DI 1 "register_operand" "r")
4035 (match_operand:DI 2 "arith_double_operand" "rHI")))]
4036 "TARGET_ARCH64"
4037 "udivx %1,%2,%0")
4038
4039 ;; It is not known whether this will match.
4040
4041 (define_insn "*cmp_udiv_cc_set"
4042 [(set (match_operand:SI 0 "register_operand" "=r")
4043 (udiv:SI (match_operand:SI 1 "register_operand" "r")
4044 (match_operand:SI 2 "arith_operand" "rI")))
4045 (set (reg:CC 100)
4046 (compare:CC (udiv:SI (match_dup 1) (match_dup 2))
4047 (const_int 0)))]
4048 "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
4049 "*
4050 {
4051 if (TARGET_V9)
4052 return \"wr %%g0,%%g0,%%y\;udivcc %1,%2,%0\";
4053 else
4054 return \"wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udivcc %1,%2,%0\";
4055 }"
4056 [(set (attr "length")
4057 (if_then_else (eq_attr "isa" "v9")
4058 (const_int 2) (const_int 5)))])
4059
4060 ; sparclet multiply/accumulate insns
4061
4062 (define_insn "*smacsi"
4063 [(set (match_operand:SI 0 "register_operand" "=r")
4064 (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
4065 (match_operand:SI 2 "arith_operand" "rI"))
4066 (match_operand:SI 3 "register_operand" "0")))]
4067 "TARGET_SPARCLET"
4068 "smac %1,%2,%0"
4069 [(set_attr "type" "imul")])
4070
4071 (define_insn "*smacdi"
4072 [(set (match_operand:DI 0 "register_operand" "=r")
4073 (plus:DI (mult:DI (sign_extend:DI
4074 (match_operand:SI 1 "register_operand" "%r"))
4075 (sign_extend:DI
4076 (match_operand:SI 2 "register_operand" "r")))
4077 (match_operand:DI 3 "register_operand" "0")))]
4078 "TARGET_SPARCLET"
4079 "smacd %1,%2,%L0"
4080 [(set_attr "type" "imul")])
4081
4082 (define_insn "*umacdi"
4083 [(set (match_operand:DI 0 "register_operand" "=r")
4084 (plus:DI (mult:DI (zero_extend:DI
4085 (match_operand:SI 1 "register_operand" "%r"))
4086 (zero_extend:DI
4087 (match_operand:SI 2 "register_operand" "r")))
4088 (match_operand:DI 3 "register_operand" "0")))]
4089 "TARGET_SPARCLET"
4090 "umacd %1,%2,%L0"
4091 [(set_attr "type" "imul")])
4092 \f
4093 ;;- Boolean instructions
4094 ;; We define DImode `and' so with DImode `not' we can get
4095 ;; DImode `andn'. Other combinations are possible.
4096
4097 (define_expand "anddi3"
4098 [(set (match_operand:DI 0 "register_operand" "")
4099 (and:DI (match_operand:DI 1 "arith_double_operand" "")
4100 (match_operand:DI 2 "arith_double_operand" "")))]
4101 ""
4102 "")
4103
4104 (define_insn "*anddi3_sp32"
4105 [(set (match_operand:DI 0 "register_operand" "=r")
4106 (and:DI (match_operand:DI 1 "arith_double_operand" "%r")
4107 (match_operand:DI 2 "arith_double_operand" "rHI")))]
4108 "! TARGET_ARCH64"
4109 "*
4110 {
4111 rtx op2 = operands[2];
4112
4113 if (GET_CODE (op2) == CONST_INT
4114 || GET_CODE (op2) == CONST_DOUBLE)
4115 {
4116 rtx xoperands[4];
4117 xoperands[0] = operands[0];
4118 xoperands[1] = operands[1];
4119 if (WORDS_BIG_ENDIAN)
4120 split_double (op2, &xoperands[2], &xoperands[3]);
4121 else
4122 split_double (op2, &xoperands[3], &xoperands[2]);
4123 output_asm_insn (\"and %L1,%3,%L0\;and %H1,%2,%H0\", xoperands);
4124 return \"\";
4125 }
4126 return \"and %1,%2,%0\;and %R1,%R2,%R0\";
4127 }"
4128 [(set_attr "length" "2")])
4129
4130 (define_insn "*anddi3_sp64"
4131 [(set (match_operand:DI 0 "register_operand" "=r")
4132 (and:DI (match_operand:DI 1 "arith_double_operand" "%r")
4133 (match_operand:DI 2 "arith_double_operand" "rHI")))]
4134 "TARGET_ARCH64"
4135 "and %1,%2,%0")
4136
4137 (define_insn "andsi3"
4138 [(set (match_operand:SI 0 "register_operand" "=r")
4139 (and:SI (match_operand:SI 1 "arith_operand" "%r")
4140 (match_operand:SI 2 "arith_operand" "rI")))]
4141 ""
4142 "and %1,%2,%0"
4143 [(set_attr "type" "ialu")])
4144
4145 (define_split
4146 [(set (match_operand:SI 0 "register_operand" "")
4147 (and:SI (match_operand:SI 1 "register_operand" "")
4148 (match_operand:SI 2 "" "")))
4149 (clobber (match_operand:SI 3 "register_operand" ""))]
4150 "GET_CODE (operands[2]) == CONST_INT
4151 && !SMALL_INT (operands[2])
4152 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
4153 [(set (match_dup 3) (match_dup 4))
4154 (set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))]
4155 "
4156 {
4157 operands[4] = GEN_INT (~INTVAL (operands[2]));
4158 }")
4159
4160 (define_insn "*and_not_di_sp32"
4161 [(set (match_operand:DI 0 "register_operand" "=r")
4162 (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
4163 (match_operand:DI 2 "register_operand" "r")))]
4164 "! TARGET_ARCH64"
4165 "andn %2,%1,%0\;andn %R2,%R1,%R0"
4166 [(set_attr "length" "2")])
4167
4168 (define_insn "*and_not_di_sp64"
4169 [(set (match_operand:DI 0 "register_operand" "=r")
4170 (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
4171 (match_operand:DI 2 "register_operand" "r")))]
4172 "TARGET_ARCH64"
4173 "andn %2,%1,%0")
4174
4175 (define_insn "*and_not_si"
4176 [(set (match_operand:SI 0 "register_operand" "=r")
4177 (and:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
4178 (match_operand:SI 2 "register_operand" "r")))]
4179 ""
4180 "andn %2,%1,%0"
4181 [(set_attr "type" "ialu")])
4182
4183 (define_expand "iordi3"
4184 [(set (match_operand:DI 0 "register_operand" "")
4185 (ior:DI (match_operand:DI 1 "arith_double_operand" "")
4186 (match_operand:DI 2 "arith_double_operand" "")))]
4187 ""
4188 "")
4189
4190 (define_insn "*iordi3_sp32"
4191 [(set (match_operand:DI 0 "register_operand" "=r")
4192 (ior:DI (match_operand:DI 1 "arith_double_operand" "%r")
4193 (match_operand:DI 2 "arith_double_operand" "rHI")))]
4194 "! TARGET_ARCH64"
4195 "*
4196 {
4197 rtx op2 = operands[2];
4198
4199 if (GET_CODE (op2) == CONST_INT
4200 || GET_CODE (op2) == CONST_DOUBLE)
4201 {
4202 rtx xoperands[4];
4203 xoperands[0] = operands[0];
4204 xoperands[1] = operands[1];
4205 if (WORDS_BIG_ENDIAN)
4206 split_double (op2, &xoperands[2], &xoperands[3]);
4207 else
4208 split_double (op2, &xoperands[3], &xoperands[2]);
4209 output_asm_insn (\"or %L1,%3,%L0\;or %H1,%2,%H0\", xoperands);
4210 return \"\";
4211 }
4212 return \"or %1,%2,%0\;or %R1,%R2,%R0\";
4213 }"
4214 [(set_attr "length" "2")])
4215
4216 (define_insn "*iordi3_sp64"
4217 [(set (match_operand:DI 0 "register_operand" "=r")
4218 (ior:DI (match_operand:DI 1 "arith_double_operand" "%r")
4219 (match_operand:DI 2 "arith_double_operand" "rHI")))]
4220 "TARGET_ARCH64"
4221 "or %1,%2,%0")
4222
4223 (define_insn "iorsi3"
4224 [(set (match_operand:SI 0 "register_operand" "=r")
4225 (ior:SI (match_operand:SI 1 "arith_operand" "%r")
4226 (match_operand:SI 2 "arith_operand" "rI")))]
4227 ""
4228 "or %1,%2,%0"
4229 [(set_attr "type" "ialu")])
4230
4231 (define_split
4232 [(set (match_operand:SI 0 "register_operand" "")
4233 (ior:SI (match_operand:SI 1 "register_operand" "")
4234 (match_operand:SI 2 "" "")))
4235 (clobber (match_operand:SI 3 "register_operand" ""))]
4236 "GET_CODE (operands[2]) == CONST_INT
4237 && !SMALL_INT (operands[2])
4238 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
4239 [(set (match_dup 3) (match_dup 4))
4240 (set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))]
4241 "
4242 {
4243 operands[4] = GEN_INT (~INTVAL (operands[2]));
4244 }")
4245
4246 (define_insn "*or_not_di_sp32"
4247 [(set (match_operand:DI 0 "register_operand" "=r")
4248 (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
4249 (match_operand:DI 2 "register_operand" "r")))]
4250 "! TARGET_ARCH64"
4251 "orn %2,%1,%0\;orn %R2,%R1,%R0"
4252 [(set_attr "length" "2")])
4253
4254 (define_insn "*or_not_di_sp64"
4255 [(set (match_operand:DI 0 "register_operand" "=r")
4256 (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
4257 (match_operand:DI 2 "register_operand" "r")))]
4258 "TARGET_ARCH64"
4259 "orn %2,%1,%0")
4260
4261 (define_insn "*or_not_si"
4262 [(set (match_operand:SI 0 "register_operand" "=r")
4263 (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
4264 (match_operand:SI 2 "register_operand" "r")))]
4265 ""
4266 "orn %2,%1,%0"
4267 [(set_attr "type" "ialu")])
4268
4269 (define_expand "xordi3"
4270 [(set (match_operand:DI 0 "register_operand" "")
4271 (xor:DI (match_operand:DI 1 "arith_double_operand" "")
4272 (match_operand:DI 2 "arith_double_operand" "")))]
4273 ""
4274 "")
4275
4276 (define_insn "*xorsi3_sp32"
4277 [(set (match_operand:DI 0 "register_operand" "=r")
4278 (xor:DI (match_operand:DI 1 "arith_double_operand" "%r")
4279 (match_operand:DI 2 "arith_double_operand" "rHI")))]
4280 "! TARGET_ARCH64"
4281 "*
4282 {
4283 rtx op2 = operands[2];
4284
4285 if (GET_CODE (op2) == CONST_INT
4286 || GET_CODE (op2) == CONST_DOUBLE)
4287 {
4288 rtx xoperands[4];
4289 xoperands[0] = operands[0];
4290 xoperands[1] = operands[1];
4291 if (WORDS_BIG_ENDIAN)
4292 split_double (op2, &xoperands[2], &xoperands[3]);
4293 else
4294 split_double (op2, &xoperands[3], &xoperands[2]);
4295 output_asm_insn (\"xor %L1,%3,%L0\;xor %H1,%2,%H0\", xoperands);
4296 return \"\";
4297 }
4298 return \"xor %1,%2,%0\;xor %R1,%R2,%R0\";
4299 }"
4300 [(set_attr "length" "2")])
4301
4302 (define_insn "*xordi3_sp64"
4303 [(set (match_operand:DI 0 "register_operand" "=r")
4304 (xor:DI (match_operand:DI 1 "arith_double_operand" "%rJ")
4305 (match_operand:DI 2 "arith_double_operand" "rHI")))]
4306 "TARGET_ARCH64"
4307 "xor %r1,%2,%0")
4308
4309 (define_insn "xorsi3"
4310 [(set (match_operand:SI 0 "register_operand" "=r")
4311 (xor:SI (match_operand:SI 1 "arith_operand" "%rJ")
4312 (match_operand:SI 2 "arith_operand" "rI")))]
4313 ""
4314 "xor %r1,%2,%0"
4315 [(set_attr "type" "ialu")])
4316
4317 (define_split
4318 [(set (match_operand:SI 0 "register_operand" "")
4319 (xor:SI (match_operand:SI 1 "register_operand" "")
4320 (match_operand:SI 2 "" "")))
4321 (clobber (match_operand:SI 3 "register_operand" ""))]
4322 "GET_CODE (operands[2]) == CONST_INT
4323 && !SMALL_INT (operands[2])
4324 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
4325 [(set (match_dup 3) (match_dup 4))
4326 (set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))]
4327 "
4328 {
4329 operands[4] = GEN_INT (~INTVAL (operands[2]));
4330 }")
4331
4332 (define_split
4333 [(set (match_operand:SI 0 "register_operand" "")
4334 (not:SI (xor:SI (match_operand:SI 1 "register_operand" "")
4335 (match_operand:SI 2 "" ""))))
4336 (clobber (match_operand:SI 3 "register_operand" ""))]
4337 "GET_CODE (operands[2]) == CONST_INT
4338 && !SMALL_INT (operands[2])
4339 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
4340 [(set (match_dup 3) (match_dup 4))
4341 (set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))]
4342 "
4343 {
4344 operands[4] = GEN_INT (~INTVAL (operands[2]));
4345 }")
4346
4347 ;; xnor patterns. Note that (a ^ ~b) == (~a ^ b) == ~(a ^ b).
4348 ;; Combine now canonicalizes to the rightmost expression.
4349 (define_insn "*xor_not_di_sp32"
4350 [(set (match_operand:DI 0 "register_operand" "=r")
4351 (not:DI (xor:DI (match_operand:DI 1 "register_operand" "r")
4352 (match_operand:DI 2 "register_operand" "r"))))]
4353 "! TARGET_ARCH64"
4354 "xnor %1,%2,%0\;xnor %R1,%R2,%R0"
4355 [(set_attr "length" "2")])
4356
4357 (define_insn "*xor_not_di_sp64"
4358 [(set (match_operand:DI 0 "register_operand" "=r")
4359 (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
4360 (match_operand:DI 2 "arith_double_operand" "rHI"))))]
4361 "TARGET_ARCH64"
4362 "xnor %r1,%2,%0")
4363
4364 (define_insn "*xor_not_si"
4365 [(set (match_operand:SI 0 "register_operand" "=r")
4366 (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
4367 (match_operand:SI 2 "arith_operand" "rI"))))]
4368 ""
4369 "xnor %r1,%2,%0"
4370 [(set_attr "type" "ialu")])
4371
4372 ;; These correspond to the above in the case where we also (or only)
4373 ;; want to set the condition code.
4374
4375 (define_insn "*cmp_cc_arith_op"
4376 [(set (reg:CC 100)
4377 (compare:CC
4378 (match_operator:SI 2 "cc_arithop"
4379 [(match_operand:SI 0 "arith_operand" "%r")
4380 (match_operand:SI 1 "arith_operand" "rI")])
4381 (const_int 0)))]
4382 ""
4383 "%A2cc %0,%1,%%g0"
4384 [(set_attr "type" "compare")])
4385
4386 (define_insn "*cmp_ccx_arith_op"
4387 [(set (reg:CCX 100)
4388 (compare:CCX
4389 (match_operator:DI 2 "cc_arithop"
4390 [(match_operand:DI 0 "arith_double_operand" "%r")
4391 (match_operand:DI 1 "arith_double_operand" "rHI")])
4392 (const_int 0)))]
4393 "TARGET_ARCH64"
4394 "%A2cc %0,%1,%%g0"
4395 [(set_attr "type" "compare")])
4396
4397 (define_insn "*cmp_cc_arith_op_set"
4398 [(set (reg:CC 100)
4399 (compare:CC
4400 (match_operator:SI 3 "cc_arithop"
4401 [(match_operand:SI 1 "arith_operand" "%r")
4402 (match_operand:SI 2 "arith_operand" "rI")])
4403 (const_int 0)))
4404 (set (match_operand:SI 0 "register_operand" "=r")
4405 (match_dup 3))]
4406 ""
4407 "%A3cc %1,%2,%0")
4408
4409 (define_insn "*cmp_ccx_arith_op_set"
4410 [(set (reg:CCX 100)
4411 (compare:CCX
4412 (match_operator:DI 3 "cc_arithop"
4413 [(match_operand:DI 1 "arith_double_operand" "%r")
4414 (match_operand:DI 2 "arith_double_operand" "rHI")])
4415 (const_int 0)))
4416 (set (match_operand:DI 0 "register_operand" "=r")
4417 (match_dup 3))]
4418 "TARGET_ARCH64"
4419 "%A3cc %1,%2,%0")
4420
4421 (define_insn "*cmp_cc_xor_not"
4422 [(set (reg:CC 100)
4423 (compare:CC
4424 (not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ")
4425 (match_operand:SI 1 "arith_operand" "rI")))
4426 (const_int 0)))]
4427 ""
4428 "xnorcc %r0,%1,%%g0"
4429 [(set_attr "type" "compare")])
4430
4431 (define_insn "*cmp_ccx_xor_not"
4432 [(set (reg:CCX 100)
4433 (compare:CCX
4434 (not:DI (xor:DI (match_operand:DI 0 "reg_or_0_operand" "%rJ")
4435 (match_operand:DI 1 "arith_double_operand" "rHI")))
4436 (const_int 0)))]
4437 "TARGET_ARCH64"
4438 "xnorcc %r0,%1,%%g0"
4439 [(set_attr "type" "compare")])
4440
4441 (define_insn "*cmp_cc_xor_not_set"
4442 [(set (reg:CC 100)
4443 (compare:CC
4444 (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
4445 (match_operand:SI 2 "arith_operand" "rI")))
4446 (const_int 0)))
4447 (set (match_operand:SI 0 "register_operand" "=r")
4448 (not:SI (xor:SI (match_dup 1) (match_dup 2))))]
4449 ""
4450 "xnorcc %r1,%2,%0")
4451
4452 (define_insn "*cmp_ccx_xor_not_set"
4453 [(set (reg:CCX 100)
4454 (compare:CCX
4455 (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ")
4456 (match_operand:DI 2 "arith_double_operand" "rHI")))
4457 (const_int 0)))
4458 (set (match_operand:DI 0 "register_operand" "=r")
4459 (not:DI (xor:DI (match_dup 1) (match_dup 2))))]
4460 "TARGET_ARCH64"
4461 "xnorcc %r1,%2,%0")
4462
4463 (define_insn "*cmp_cc_arith_op_not"
4464 [(set (reg:CC 100)
4465 (compare:CC
4466 (match_operator:SI 2 "cc_arithopn"
4467 [(not:SI (match_operand:SI 0 "arith_operand" "rI"))
4468 (match_operand:SI 1 "reg_or_0_operand" "rJ")])
4469 (const_int 0)))]
4470 ""
4471 "%B2cc %r1,%0,%%g0"
4472 [(set_attr "type" "compare")])
4473
4474 (define_insn "*cmp_ccx_arith_op_not"
4475 [(set (reg:CCX 100)
4476 (compare:CCX
4477 (match_operator:DI 2 "cc_arithopn"
4478 [(not:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
4479 (match_operand:DI 1 "reg_or_0_operand" "rJ")])
4480 (const_int 0)))]
4481 "TARGET_ARCH64"
4482 "%B2cc %r1,%0,%%g0"
4483 [(set_attr "type" "compare")])
4484
4485 (define_insn "*cmp_cc_arith_op_not_set"
4486 [(set (reg:CC 100)
4487 (compare:CC
4488 (match_operator:SI 3 "cc_arithopn"
4489 [(not:SI (match_operand:SI 1 "arith_operand" "rI"))
4490 (match_operand:SI 2 "reg_or_0_operand" "rJ")])
4491 (const_int 0)))
4492 (set (match_operand:SI 0 "register_operand" "=r")
4493 (match_dup 3))]
4494 ""
4495 "%B3cc %r2,%1,%0")
4496
4497 (define_insn "*cmp_ccx_arith_op_not_set"
4498 [(set (reg:CCX 100)
4499 (compare:CCX
4500 (match_operator:DI 3 "cc_arithopn"
4501 [(not:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
4502 (match_operand:DI 2 "reg_or_0_operand" "rJ")])
4503 (const_int 0)))
4504 (set (match_operand:DI 0 "register_operand" "=r")
4505 (match_dup 3))]
4506 "TARGET_ARCH64"
4507 "%B3cc %r2,%1,%0")
4508
4509 ;; We cannot use the "neg" pseudo insn because the Sun assembler
4510 ;; does not know how to make it work for constants.
4511
4512 (define_expand "negdi2"
4513 [(set (match_operand:DI 0 "register_operand" "=r")
4514 (neg:DI (match_operand:DI 1 "register_operand" "r")))]
4515 ""
4516 "
4517 {
4518 if (! TARGET_ARCH64)
4519 {
4520 emit_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
4521 gen_rtx (SET, VOIDmode, operand0,
4522 gen_rtx (NEG, DImode, operand1)),
4523 gen_rtx (CLOBBER, VOIDmode,
4524 gen_rtx (REG, SImode, SPARC_ICC_REG)))));
4525 DONE;
4526 }
4527 }")
4528
4529 (define_insn "*negdi2_sp32"
4530 [(set (match_operand:DI 0 "register_operand" "=r")
4531 (neg:DI (match_operand:DI 1 "register_operand" "r")))
4532 (clobber (reg:SI 100))]
4533 "! TARGET_ARCH64"
4534 "*
4535 {
4536 if (TARGET_LIVE_G0)
4537 output_asm_insn (\"and %%g0,0,%%g0\", operands);
4538 return \"subcc %%g0,%L1,%L0\;subx %%g0,%H1,%H0\";
4539 }"
4540 [(set_attr "type" "unary")
4541 ;; ??? This is wrong for TARGET_LIVE_G0 but it's not critical.
4542 (set_attr "length" "2")])
4543
4544 (define_insn "*negdi2_sp64"
4545 [(set (match_operand:DI 0 "register_operand" "=r")
4546 (neg:DI (match_operand:DI 1 "register_operand" "r")))]
4547 "TARGET_ARCH64"
4548 "sub %%g0,%1,%0"
4549 [(set_attr "type" "unary")
4550 (set_attr "length" "1")])
4551
4552 (define_insn "negsi2"
4553 [(set (match_operand:SI 0 "register_operand" "=r")
4554 (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
4555 ""
4556 "*
4557 {
4558 if (TARGET_LIVE_G0)
4559 return \"and %%g0,0,%%g0\;sub %%g0,%1,%0\";
4560 return \"sub %%g0,%1,%0\";
4561 }"
4562 [(set_attr "type" "unary")
4563 (set (attr "length")
4564 (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1)))])
4565
4566 (define_insn "*cmp_cc_neg"
4567 [(set (reg:CC_NOOV 100)
4568 (compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI"))
4569 (const_int 0)))]
4570 "! TARGET_LIVE_G0"
4571 "subcc %%g0,%0,%%g0"
4572 [(set_attr "type" "compare")])
4573
4574 (define_insn "*cmp_ccx_neg"
4575 [(set (reg:CCX_NOOV 100)
4576 (compare:CCX_NOOV (neg:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
4577 (const_int 0)))]
4578 "TARGET_ARCH64"
4579 "subcc %%g0,%0,%%g0"
4580 [(set_attr "type" "compare")])
4581
4582 (define_insn "*cmp_cc_set_neg"
4583 [(set (reg:CC_NOOV 100)
4584 (compare:CC_NOOV (neg:SI (match_operand:SI 1 "arith_operand" "rI"))
4585 (const_int 0)))
4586 (set (match_operand:SI 0 "register_operand" "=r")
4587 (neg:SI (match_dup 1)))]
4588 "! TARGET_LIVE_G0"
4589 "subcc %%g0,%1,%0"
4590 [(set_attr "type" "unary")])
4591
4592 (define_insn "*cmp_ccx_set_neg"
4593 [(set (reg:CCX_NOOV 100)
4594 (compare:CCX_NOOV (neg:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
4595 (const_int 0)))
4596 (set (match_operand:DI 0 "register_operand" "=r")
4597 (neg:DI (match_dup 1)))]
4598 "TARGET_ARCH64"
4599 "subcc %%g0,%1,%0"
4600 [(set_attr "type" "unary")])
4601
4602 ;; We cannot use the "not" pseudo insn because the Sun assembler
4603 ;; does not know how to make it work for constants.
4604 (define_expand "one_cmpldi2"
4605 [(set (match_operand:DI 0 "register_operand" "")
4606 (not:DI (match_operand:DI 1 "register_operand" "")))]
4607 ""
4608 "")
4609
4610 (define_insn "*one_cmpldi2_sp32"
4611 [(set (match_operand:DI 0 "register_operand" "=r")
4612 (not:DI (match_operand:DI 1 "register_operand" "r")))]
4613 "! TARGET_ARCH64"
4614 "xnor %1,0,%0\;xnor %R1,0,%R0"
4615 [(set_attr "type" "unary")
4616 (set_attr "length" "2")])
4617
4618 (define_insn "*one_cmpldi2_sp64"
4619 [(set (match_operand:DI 0 "register_operand" "=r")
4620 (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")))]
4621 "TARGET_ARCH64"
4622 "xnor %1,0,%0"
4623 [(set_attr "type" "unary")])
4624
4625 (define_insn "one_cmplsi2"
4626 [(set (match_operand:SI 0 "register_operand" "=r,r")
4627 (not:SI (match_operand:SI 1 "arith_operand" "r,I")))]
4628 ""
4629 "*
4630 {
4631 if (which_alternative == 0)
4632 return \"xnor %1,0,%0\";
4633 if (TARGET_LIVE_G0)
4634 output_asm_insn (\"and %%g0,0,%%g0\", operands);
4635 return \"xnor %%g0,%1,%0\";
4636 }"
4637 [(set_attr "type" "unary")
4638 (set_attr_alternative "length"
4639 [(const_int 1)
4640 (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1))])])
4641
4642 (define_insn "*cmp_cc_not"
4643 [(set (reg:CC 100)
4644 (compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
4645 (const_int 0)))]
4646 "! TARGET_LIVE_G0"
4647 "xnorcc %%g0,%0,%%g0"
4648 [(set_attr "type" "compare")])
4649
4650 (define_insn "*cmp_ccx_not"
4651 [(set (reg:CCX 100)
4652 (compare:CCX (not:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
4653 (const_int 0)))]
4654 "TARGET_ARCH64"
4655 "xnorcc %%g0,%0,%%g0"
4656 [(set_attr "type" "compare")])
4657
4658 (define_insn "*cmp_cc_set_not"
4659 [(set (reg:CC 100)
4660 (compare:CC (not:SI (match_operand:SI 1 "arith_operand" "rI"))
4661 (const_int 0)))
4662 (set (match_operand:SI 0 "register_operand" "=r")
4663 (not:SI (match_dup 1)))]
4664 "! TARGET_LIVE_G0"
4665 "xnorcc %%g0,%1,%0"
4666 [(set_attr "type" "unary")])
4667
4668 (define_insn "*cmp_ccx_set_not"
4669 [(set (reg:CCX 100)
4670 (compare:CCX (not:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
4671 (const_int 0)))
4672 (set (match_operand:DI 0 "register_operand" "=r")
4673 (not:DI (match_dup 1)))]
4674 "TARGET_ARCH64"
4675 "xnorcc %%g0,%1,%0"
4676 [(set_attr "type" "unary")])
4677 \f
4678 ;; Floating point arithmetic instructions.
4679
4680 (define_insn "addtf3"
4681 [(set (match_operand:TF 0 "register_operand" "=e")
4682 (plus:TF (match_operand:TF 1 "register_operand" "e")
4683 (match_operand:TF 2 "register_operand" "e")))]
4684 "TARGET_FPU && TARGET_HARD_QUAD"
4685 "faddq %1,%2,%0"
4686 [(set_attr "type" "fp")])
4687
4688 (define_insn "adddf3"
4689 [(set (match_operand:DF 0 "register_operand" "=e")
4690 (plus:DF (match_operand:DF 1 "register_operand" "e")
4691 (match_operand:DF 2 "register_operand" "e")))]
4692 "TARGET_FPU"
4693 "faddd %1,%2,%0"
4694 [(set_attr "type" "fp")])
4695
4696 (define_insn "addsf3"
4697 [(set (match_operand:SF 0 "register_operand" "=f")
4698 (plus:SF (match_operand:SF 1 "register_operand" "f")
4699 (match_operand:SF 2 "register_operand" "f")))]
4700 "TARGET_FPU"
4701 "fadds %1,%2,%0"
4702 [(set_attr "type" "fp")])
4703
4704 (define_insn "subtf3"
4705 [(set (match_operand:TF 0 "register_operand" "=e")
4706 (minus:TF (match_operand:TF 1 "register_operand" "e")
4707 (match_operand:TF 2 "register_operand" "e")))]
4708 "TARGET_FPU && TARGET_HARD_QUAD"
4709 "fsubq %1,%2,%0"
4710 [(set_attr "type" "fp")])
4711
4712 (define_insn "subdf3"
4713 [(set (match_operand:DF 0 "register_operand" "=e")
4714 (minus:DF (match_operand:DF 1 "register_operand" "e")
4715 (match_operand:DF 2 "register_operand" "e")))]
4716 "TARGET_FPU"
4717 "fsubd %1,%2,%0"
4718 [(set_attr "type" "fp")])
4719
4720 (define_insn "subsf3"
4721 [(set (match_operand:SF 0 "register_operand" "=f")
4722 (minus:SF (match_operand:SF 1 "register_operand" "f")
4723 (match_operand:SF 2 "register_operand" "f")))]
4724 "TARGET_FPU"
4725 "fsubs %1,%2,%0"
4726 [(set_attr "type" "fp")])
4727
4728 (define_insn "multf3"
4729 [(set (match_operand:TF 0 "register_operand" "=e")
4730 (mult:TF (match_operand:TF 1 "register_operand" "e")
4731 (match_operand:TF 2 "register_operand" "e")))]
4732 "TARGET_FPU && TARGET_HARD_QUAD"
4733 "fmulq %1,%2,%0"
4734 [(set_attr "type" "fpmul")])
4735
4736 (define_insn "muldf3"
4737 [(set (match_operand:DF 0 "register_operand" "=e")
4738 (mult:DF (match_operand:DF 1 "register_operand" "e")
4739 (match_operand:DF 2 "register_operand" "e")))]
4740 "TARGET_FPU"
4741 "fmuld %1,%2,%0"
4742 [(set_attr "type" "fpmul")])
4743
4744 (define_insn "mulsf3"
4745 [(set (match_operand:SF 0 "register_operand" "=f")
4746 (mult:SF (match_operand:SF 1 "register_operand" "f")
4747 (match_operand:SF 2 "register_operand" "f")))]
4748 "TARGET_FPU"
4749 "fmuls %1,%2,%0"
4750 [(set_attr "type" "fpmul")])
4751
4752 (define_insn "*muldf3_extend"
4753 [(set (match_operand:DF 0 "register_operand" "=e")
4754 (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "f"))
4755 (float_extend:DF (match_operand:SF 2 "register_operand" "f"))))]
4756 "(TARGET_V8 || TARGET_V9) && TARGET_FPU"
4757 "fsmuld %1,%2,%0"
4758 [(set_attr "type" "fpmul")])
4759
4760 (define_insn "*multf3_extend"
4761 [(set (match_operand:TF 0 "register_operand" "=e")
4762 (mult:TF (float_extend:TF (match_operand:DF 1 "register_operand" "e"))
4763 (float_extend:TF (match_operand:DF 2 "register_operand" "e"))))]
4764 "(TARGET_V8 || TARGET_V9) && TARGET_FPU && TARGET_HARD_QUAD"
4765 "fdmulq %1,%2,%0"
4766 [(set_attr "type" "fpmul")])
4767
4768 ;; don't have timing for quad-prec. divide.
4769 (define_insn "divtf3"
4770 [(set (match_operand:TF 0 "register_operand" "=e")
4771 (div:TF (match_operand:TF 1 "register_operand" "e")
4772 (match_operand:TF 2 "register_operand" "e")))]
4773 "TARGET_FPU && TARGET_HARD_QUAD"
4774 "fdivq %1,%2,%0"
4775 [(set_attr "type" "fpdivd")])
4776
4777 (define_insn "divdf3"
4778 [(set (match_operand:DF 0 "register_operand" "=e")
4779 (div:DF (match_operand:DF 1 "register_operand" "e")
4780 (match_operand:DF 2 "register_operand" "e")))]
4781 "TARGET_FPU"
4782 "fdivd %1,%2,%0"
4783 [(set_attr "type" "fpdivd")])
4784
4785 (define_insn "divsf3"
4786 [(set (match_operand:SF 0 "register_operand" "=f")
4787 (div:SF (match_operand:SF 1 "register_operand" "f")
4788 (match_operand:SF 2 "register_operand" "f")))]
4789 "TARGET_FPU"
4790 "fdivs %1,%2,%0"
4791 [(set_attr "type" "fpdivs")])
4792
4793 (define_insn "negtf2"
4794 [(set (match_operand:TF 0 "register_operand" "=e,e")
4795 (neg:TF (match_operand:TF 1 "register_operand" "0,e")))]
4796 ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
4797 "TARGET_FPU"
4798 "*
4799 {
4800 /* v9: can't use fnegs, won't work with upper regs. */
4801 if (which_alternative == 0)
4802 return TARGET_V9 ? \"fnegd %0,%0\" : \"fnegs %0,%0\";
4803 else
4804 return TARGET_V9 ? \"fnegd %1,%0\;fmovd %S1,%S0\"
4805 : \"fnegs %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
4806 }"
4807 [(set_attr "type" "fp")
4808 (set_attr_alternative "length"
4809 [(const_int 1)
4810 (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
4811
4812 (define_insn "negdf2"
4813 [(set (match_operand:DF 0 "register_operand" "=e,e")
4814 (neg:DF (match_operand:DF 1 "register_operand" "0,e")))]
4815 "TARGET_FPU"
4816 "*
4817 {
4818 if (TARGET_V9)
4819 return \"fnegd %1,%0\";
4820 else if (which_alternative == 0)
4821 return \"fnegs %0,%0\";
4822 else
4823 return \"fnegs %1,%0\;fmovs %R1,%R0\";
4824 }"
4825 [(set_attr "type" "fp")
4826 (set_attr_alternative "length"
4827 [(const_int 1)
4828 (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
4829
4830 (define_insn "negsf2"
4831 [(set (match_operand:SF 0 "register_operand" "=f")
4832 (neg:SF (match_operand:SF 1 "register_operand" "f")))]
4833 "TARGET_FPU"
4834 "fnegs %1,%0"
4835 [(set_attr "type" "fp")])
4836
4837 (define_insn "abstf2"
4838 [(set (match_operand:TF 0 "register_operand" "=e,e")
4839 (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
4840 ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
4841 "TARGET_FPU"
4842 "*
4843 {
4844 /* v9: can't use fabss, won't work with upper regs. */
4845 if (which_alternative == 0)
4846 return TARGET_V9 ? \"fabsd %0,%0\" : \"fabss %0,%0\";
4847 else
4848 return TARGET_V9 ? \"fabsd %1,%0\;fmovd %S1,%S0\"
4849 : \"fabss %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
4850 }"
4851 [(set_attr "type" "fp")
4852 (set_attr_alternative "length"
4853 [(const_int 1)
4854 (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
4855
4856 (define_insn "absdf2"
4857 [(set (match_operand:DF 0 "register_operand" "=e,e")
4858 (abs:DF (match_operand:DF 1 "register_operand" "0,e")))]
4859 "TARGET_FPU"
4860 "*
4861 {
4862 if (TARGET_V9)
4863 return \"fabsd %1,%0\";
4864 else if (which_alternative == 0)
4865 return \"fabss %0,%0\";
4866 else
4867 return \"fabss %1,%0\;fmovs %R1,%R0\";
4868 }"
4869 [(set_attr "type" "fp")
4870 (set_attr_alternative "length"
4871 [(const_int 1)
4872 (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
4873
4874 (define_insn "abssf2"
4875 [(set (match_operand:SF 0 "register_operand" "=f")
4876 (abs:SF (match_operand:SF 1 "register_operand" "f")))]
4877 "TARGET_FPU"
4878 "fabss %1,%0"
4879 [(set_attr "type" "fp")])
4880
4881 (define_insn "sqrttf2"
4882 [(set (match_operand:TF 0 "register_operand" "=e")
4883 (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
4884 "TARGET_FPU && TARGET_HARD_QUAD"
4885 "fsqrtq %1,%0"
4886 [(set_attr "type" "fpsqrt")])
4887
4888 (define_insn "sqrtdf2"
4889 [(set (match_operand:DF 0 "register_operand" "=e")
4890 (sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
4891 "TARGET_FPU"
4892 "fsqrtd %1,%0"
4893 [(set_attr "type" "fpsqrt")])
4894
4895 (define_insn "sqrtsf2"
4896 [(set (match_operand:SF 0 "register_operand" "=f")
4897 (sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
4898 "TARGET_FPU"
4899 "fsqrts %1,%0"
4900 [(set_attr "type" "fpsqrt")])
4901 \f
4902 ;;- arithmetic shift instructions
4903
4904 (define_insn "ashlsi3"
4905 [(set (match_operand:SI 0 "register_operand" "=r")
4906 (ashift:SI (match_operand:SI 1 "register_operand" "r")
4907 (match_operand:SI 2 "arith_operand" "rI")))]
4908 ""
4909 "*
4910 {
4911 if (GET_CODE (operands[2]) == CONST_INT
4912 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
4913 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
4914
4915 return \"sll %1,%2,%0\";
4916 }"
4917 [(set_attr "type" "shift")])
4918
4919 (define_insn "ashldi3"
4920 [(set (match_operand:DI 0 "register_operand" "=r")
4921 (ashift:DI (match_operand:DI 1 "register_operand" "r")
4922 (match_operand:SI 2 "arith_operand" "rI")))]
4923 "TARGET_ARCH64"
4924 "*
4925 {
4926 if (GET_CODE (operands[2]) == CONST_INT
4927 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
4928 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
4929
4930 return \"sllx %1,%2,%0\";
4931 }")
4932
4933 (define_insn "*cmp_cc_ashift_1"
4934 [(set (reg:CC_NOOV 100)
4935 (compare:CC_NOOV (ashift:SI (match_operand:SI 0 "register_operand" "r")
4936 (const_int 1))
4937 (const_int 0)))]
4938 ""
4939 "addcc %0,%0,%%g0"
4940 [(set_attr "type" "compare")])
4941
4942 (define_insn "*cmp_cc_set_ashift_1"
4943 [(set (reg:CC_NOOV 100)
4944 (compare:CC_NOOV (ashift:SI (match_operand:SI 1 "register_operand" "r")
4945 (const_int 1))
4946 (const_int 0)))
4947 (set (match_operand:SI 0 "register_operand" "=r")
4948 (ashift:SI (match_dup 1) (const_int 1)))]
4949 ""
4950 "addcc %1,%1,%0")
4951
4952 (define_insn "ashrsi3"
4953 [(set (match_operand:SI 0 "register_operand" "=r")
4954 (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
4955 (match_operand:SI 2 "arith_operand" "rI")))]
4956 ""
4957 "*
4958 {
4959 if (GET_CODE (operands[2]) == CONST_INT
4960 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
4961 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
4962
4963 return \"sra %1,%2,%0\";
4964 }"
4965 [(set_attr "type" "shift")])
4966
4967 (define_insn "ashrdi3"
4968 [(set (match_operand:DI 0 "register_operand" "=r")
4969 (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
4970 (match_operand:SI 2 "arith_operand" "rI")))]
4971 "TARGET_ARCH64"
4972 "*
4973 {
4974 if (GET_CODE (operands[2]) == CONST_INT
4975 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
4976 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
4977
4978 return \"srax %1,%2,%0\";
4979 }")
4980
4981 (define_insn "lshrsi3"
4982 [(set (match_operand:SI 0 "register_operand" "=r")
4983 (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
4984 (match_operand:SI 2 "arith_operand" "rI")))]
4985 ""
4986 "*
4987 {
4988 if (GET_CODE (operands[2]) == CONST_INT
4989 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
4990 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
4991
4992 return \"srl %1,%2,%0\";
4993 }"
4994 [(set_attr "type" "shift")])
4995
4996 (define_insn "lshrdi3"
4997 [(set (match_operand:DI 0 "register_operand" "=r")
4998 (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
4999 (match_operand:SI 2 "arith_operand" "rI")))]
5000 "TARGET_ARCH64"
5001 "*
5002 {
5003 if (GET_CODE (operands[2]) == CONST_INT
5004 && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
5005 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
5006
5007 return \"srlx %1,%2,%0\";
5008 }")
5009 \f
5010 ;; Unconditional and other jump instructions
5011 ;; On the Sparc, by setting the annul bit on an unconditional branch, the
5012 ;; following insn is never executed. This saves us a nop. Dbx does not
5013 ;; handle such branches though, so we only use them when optimizing.
5014 (define_insn "jump"
5015 [(set (pc) (label_ref (match_operand 0 "" "")))]
5016 ""
5017 "*
5018 {
5019 /* Some implementations are reported to have problems with
5020 foo: b,a foo
5021 i.e. an empty loop with the annul bit set. The workaround is to use
5022 foo: b foo; nop
5023 instead. */
5024
5025 if (flag_delayed_branch
5026 && (insn_addresses[INSN_UID (operands[0])]
5027 == insn_addresses[INSN_UID (insn)]))
5028 return \"b %l0%#\";
5029 else
5030 return \"b%* %l0%(\";
5031 }"
5032 [(set_attr "type" "uncond_branch")])
5033
5034 (define_expand "tablejump"
5035 [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
5036 (use (label_ref (match_operand 1 "" "")))])]
5037 ""
5038 "
5039 {
5040 if (GET_MODE (operands[0]) != Pmode)
5041 abort ();
5042
5043 /* We need to use the PC value in %o7 that was set up when the address
5044 of the label was loaded into a register, so we need different RTL. */
5045 if (flag_pic)
5046 {
5047 if (!TARGET_PTR64)
5048 emit_jump_insn (gen_pic_tablejump_32 (operands[0], operands[1]));
5049 else
5050 emit_jump_insn (gen_pic_tablejump_64 (operands[0], operands[1]));
5051 DONE;
5052 }
5053 }")
5054
5055 (define_insn "pic_tablejump_32"
5056 [(set (pc) (match_operand:SI 0 "register_operand" "r"))
5057 (use (label_ref (match_operand 1 "" "")))
5058 (use (reg:SI 15))]
5059 "! TARGET_PTR64"
5060 "jmp %%o7+%0%#"
5061 [(set_attr "type" "uncond_branch")])
5062
5063 (define_insn "pic_tablejump_64"
5064 [(set (pc) (match_operand:DI 0 "register_operand" "r"))
5065 (use (label_ref (match_operand 1 "" "")))
5066 (use (reg:DI 15))]
5067 "TARGET_PTR64"
5068 "jmp %%o7+%0%#"
5069 [(set_attr "type" "uncond_branch")])
5070
5071 (define_insn "*tablejump_sp32"
5072 [(set (pc) (match_operand:SI 0 "address_operand" "p"))
5073 (use (label_ref (match_operand 1 "" "")))]
5074 "! TARGET_PTR64"
5075 "jmp %a0%#"
5076 [(set_attr "type" "uncond_branch")])
5077
5078 (define_insn "*tablejump_sp64"
5079 [(set (pc) (match_operand:DI 0 "address_operand" "p"))
5080 (use (label_ref (match_operand 1 "" "")))]
5081 "TARGET_PTR64"
5082 "jmp %a0%#"
5083 [(set_attr "type" "uncond_branch")])
5084
5085 ;; This pattern recognizes the "instruction" that appears in
5086 ;; a function call that wants a structure value,
5087 ;; to inform the called function if compiled with Sun CC.
5088 ;(define_insn "*unimp_insn"
5089 ; [(match_operand:SI 0 "immediate_operand" "")]
5090 ; "GET_CODE (operands[0]) == CONST_INT && INTVAL (operands[0]) > 0"
5091 ; "unimp %0"
5092 ; [(set_attr "type" "marker")])
5093
5094 ;;- jump to subroutine
5095 (define_expand "call"
5096 ;; Note that this expression is not used for generating RTL.
5097 ;; All the RTL is generated explicitly below.
5098 [(call (match_operand 0 "call_operand" "")
5099 (match_operand 3 "" "i"))]
5100 ;; operands[2] is next_arg_register
5101 ;; operands[3] is struct_value_size_rtx.
5102 ""
5103 "
5104 {
5105 rtx fn_rtx, nregs_rtx;
5106
5107 if (GET_MODE (operands[0]) != FUNCTION_MODE)
5108 abort ();
5109
5110 if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
5111 {
5112 /* This is really a PIC sequence. We want to represent
5113 it as a funny jump so it's delay slots can be filled.
5114
5115 ??? But if this really *is* a CALL, will not it clobber the
5116 call-clobbered registers? We lose this if it is a JUMP_INSN.
5117 Why cannot we have delay slots filled if it were a CALL? */
5118
5119 if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
5120 emit_jump_insn
5121 (gen_rtx (PARALLEL, VOIDmode,
5122 gen_rtvec (3,
5123 gen_rtx (SET, VOIDmode, pc_rtx,
5124 XEXP (operands[0], 0)),
5125 GEN_INT (INTVAL (operands[3]) & 0xfff),
5126 gen_rtx (CLOBBER, VOIDmode,
5127 gen_rtx (REG, Pmode, 15)))));
5128 else
5129 emit_jump_insn
5130 (gen_rtx (PARALLEL, VOIDmode,
5131 gen_rtvec (2,
5132 gen_rtx (SET, VOIDmode, pc_rtx,
5133 XEXP (operands[0], 0)),
5134 gen_rtx (CLOBBER, VOIDmode,
5135 gen_rtx (REG, Pmode, 15)))));
5136 goto finish_call;
5137 }
5138
5139 fn_rtx = operands[0];
5140
5141 /* Count the number of parameter registers being used by this call.
5142 if that argument is NULL, it means we are using them all, which
5143 means 6 on the sparc. */
5144 #if 0
5145 if (operands[2])
5146 nregs_rtx = GEN_INT (REGNO (operands[2]) - 8);
5147 else
5148 nregs_rtx = GEN_INT (6);
5149 #else
5150 nregs_rtx = const0_rtx;
5151 #endif
5152
5153 if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
5154 emit_call_insn
5155 (gen_rtx (PARALLEL, VOIDmode,
5156 gen_rtvec (3, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
5157 GEN_INT (INTVAL (operands[3]) & 0xfff),
5158 gen_rtx (CLOBBER, VOIDmode,
5159 gen_rtx (REG, Pmode, 15)))));
5160 else
5161 emit_call_insn
5162 (gen_rtx (PARALLEL, VOIDmode,
5163 gen_rtvec (2, gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
5164 gen_rtx (CLOBBER, VOIDmode,
5165 gen_rtx (REG, Pmode, 15)))));
5166
5167 finish_call:
5168 #if 0
5169 /* If this call wants a structure value,
5170 emit an unimp insn to let the called function know about this. */
5171 if (! TARGET_ARCH64 && INTVAL (operands[3]) > 0)
5172 {
5173 rtx insn = emit_insn (operands[3]);
5174 SCHED_GROUP_P (insn) = 1;
5175 }
5176 #endif
5177
5178 DONE;
5179 }")
5180
5181 ;; We can't use the same pattern for these two insns, because then registers
5182 ;; in the address may not be properly reloaded.
5183
5184 (define_insn "*call_address_sp32"
5185 [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
5186 (match_operand 1 "" ""))
5187 (clobber (reg:SI 15))]
5188 ;;- Do not use operand 1 for most machines.
5189 "! TARGET_PTR64"
5190 "call %a0,%1%#"
5191 [(set_attr "type" "call")])
5192
5193 (define_insn "*call_symbolic_sp32"
5194 [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
5195 (match_operand 1 "" ""))
5196 (clobber (reg:SI 15))]
5197 ;;- Do not use operand 1 for most machines.
5198 "! TARGET_PTR64"
5199 "call %a0,%1%#"
5200 [(set_attr "type" "call")])
5201
5202 (define_insn "*call_address_sp64"
5203 [(call (mem:SI (match_operand:DI 0 "address_operand" "p"))
5204 (match_operand 1 "" ""))
5205 (clobber (reg:DI 15))]
5206 ;;- Do not use operand 1 for most machines.
5207 "TARGET_PTR64"
5208 "call %a0,%1%#"
5209 [(set_attr "type" "call")])
5210
5211 (define_insn "*call_symbolic_sp64"
5212 [(call (mem:SI (match_operand:DI 0 "symbolic_operand" "s"))
5213 (match_operand 1 "" ""))
5214 (clobber (reg:DI 15))]
5215 ;;- Do not use operand 1 for most machines.
5216 "TARGET_PTR64"
5217 "call %a0,%1%#"
5218 [(set_attr "type" "call")])
5219
5220 ;; This is a call that wants a structure value.
5221 ;; There is no such critter for v9 (??? we may need one anyway).
5222 (define_insn "*call_address_struct_value_sp32"
5223 [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
5224 (match_operand 1 "" ""))
5225 (match_operand 2 "immediate_operand" "")
5226 (clobber (reg:SI 15))]
5227 ;;- Do not use operand 1 for most machines.
5228 "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
5229 "call %a0,%1\;nop\;unimp %2"
5230 [(set_attr "type" "call_no_delay_slot")])
5231
5232 ;; This is a call that wants a structure value.
5233 ;; There is no such critter for v9 (??? we may need one anyway).
5234 (define_insn "*call_symbolic_struct_value_sp32"
5235 [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
5236 (match_operand 1 "" ""))
5237 (match_operand 2 "immediate_operand" "")
5238 (clobber (reg:SI 15))]
5239 ;;- Do not use operand 1 for most machines.
5240 "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
5241 "call %a0,%1\;nop\;unimp %2"
5242 [(set_attr "type" "call_no_delay_slot")])
5243
5244 ;; This is a call that may want a structure value. This is used for
5245 ;; untyped_calls.
5246 (define_insn "*call_address_untyped_struct_value_sp32"
5247 [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
5248 (match_operand 1 "" ""))
5249 (match_operand 2 "immediate_operand" "")
5250 (clobber (reg:SI 15))]
5251 ;;- Do not use operand 1 for most machines.
5252 "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
5253 "call %a0,%1\;nop\;nop"
5254 [(set_attr "type" "call_no_delay_slot")])
5255
5256 ;; This is a call that wants a structure value.
5257 (define_insn "*call_symbolic_untyped_struct_value_sp32"
5258 [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
5259 (match_operand 1 "" ""))
5260 (match_operand 2 "immediate_operand" "")
5261 (clobber (reg:SI 15))]
5262 ;;- Do not use operand 1 for most machines.
5263 "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
5264 "call %a0,%1\;nop\;nop"
5265 [(set_attr "type" "call_no_delay_slot")])
5266
5267 (define_expand "call_value"
5268 ;; Note that this expression is not used for generating RTL.
5269 ;; All the RTL is generated explicitly below.
5270 [(set (match_operand 0 "register_operand" "=rf")
5271 (call (match_operand:SI 1 "" "")
5272 (match_operand 4 "" "")))]
5273 ;; operand 2 is stack_size_rtx
5274 ;; operand 3 is next_arg_register
5275 ""
5276 "
5277 {
5278 rtx fn_rtx, nregs_rtx;
5279 rtvec vec;
5280
5281 if (GET_MODE (operands[1]) != FUNCTION_MODE)
5282 abort ();
5283
5284 fn_rtx = operands[1];
5285
5286 #if 0
5287 if (operands[3])
5288 nregs_rtx = gen_rtx (CONST_INT, VOIDmode, REGNO (operands[3]) - 8);
5289 else
5290 nregs_rtx = gen_rtx (CONST_INT, VOIDmode, 6);
5291 #else
5292 nregs_rtx = const0_rtx;
5293 #endif
5294
5295 vec = gen_rtvec (2,
5296 gen_rtx (SET, VOIDmode, operands[0],
5297 gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx)),
5298 gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, Pmode, 15)));
5299
5300 emit_call_insn (gen_rtx (PARALLEL, VOIDmode, vec));
5301
5302 DONE;
5303 }")
5304
5305 (define_insn "*call_value_address_sp32"
5306 [(set (match_operand 0 "" "=rf")
5307 (call (mem:SI (match_operand:SI 1 "address_operand" "p"))
5308 (match_operand 2 "" "")))
5309 (clobber (reg:SI 15))]
5310 ;;- Do not use operand 2 for most machines.
5311 "! TARGET_PTR64"
5312 "call %a1,%2%#"
5313 [(set_attr "type" "call")])
5314
5315 (define_insn "*call_value_symbolic_sp32"
5316 [(set (match_operand 0 "" "=rf")
5317 (call (mem:SI (match_operand:SI 1 "symbolic_operand" "s"))
5318 (match_operand 2 "" "")))
5319 (clobber (reg:SI 15))]
5320 ;;- Do not use operand 2 for most machines.
5321 "! TARGET_PTR64"
5322 "call %a1,%2%#"
5323 [(set_attr "type" "call")])
5324
5325 (define_insn "*call_value_address_sp64"
5326 [(set (match_operand 0 "" "=rf")
5327 (call (mem:SI (match_operand:DI 1 "address_operand" "p"))
5328 (match_operand 2 "" "")))
5329 (clobber (reg:DI 15))]
5330 ;;- Do not use operand 2 for most machines.
5331 "TARGET_PTR64"
5332 "call %a1,%2%#"
5333 [(set_attr "type" "call")])
5334
5335 (define_insn "*call_value_symbolic_sp64"
5336 [(set (match_operand 0 "" "=rf")
5337 (call (mem:SI (match_operand:DI 1 "symbolic_operand" "s"))
5338 (match_operand 2 "" "")))
5339 (clobber (reg:DI 15))]
5340 ;;- Do not use operand 2 for most machines.
5341 "TARGET_PTR64"
5342 "call %a1,%2%#"
5343 [(set_attr "type" "call")])
5344
5345 (define_expand "untyped_call"
5346 [(parallel [(call (match_operand 0 "" "")
5347 (const_int 0))
5348 (match_operand 1 "" "")
5349 (match_operand 2 "" "")])]
5350 ""
5351 "
5352 {
5353 int i;
5354
5355 /* Pass constm1 to indicate that it may expect a structure value, but
5356 we don't know what size it is. */
5357 emit_call_insn (gen_call (operands[0], const0_rtx, NULL, constm1_rtx));
5358
5359 for (i = 0; i < XVECLEN (operands[2], 0); i++)
5360 {
5361 rtx set = XVECEXP (operands[2], 0, i);
5362 emit_move_insn (SET_DEST (set), SET_SRC (set));
5363 }
5364
5365 /* The optimizer does not know that the call sets the function value
5366 registers we stored in the result block. We avoid problems by
5367 claiming that all hard registers are used and clobbered at this
5368 point. */
5369 emit_insn (gen_blockage ());
5370
5371 DONE;
5372 }")
5373
5374 ;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
5375 ;; all of memory. This blocks insns from being moved across this point.
5376
5377 (define_insn "blockage"
5378 [(unspec_volatile [(const_int 0)] 0)]
5379 ""
5380 "")
5381
5382 ;; Prepare to return any type including a structure value.
5383
5384 (define_expand "untyped_return"
5385 [(match_operand:BLK 0 "memory_operand" "")
5386 (match_operand 1 "" "")]
5387 ""
5388 "
5389 {
5390 rtx valreg1 = gen_rtx (REG, DImode, 24);
5391 rtx valreg2 = gen_rtx (REG, TARGET_ARCH64 ? TFmode : DFmode, 32);
5392 rtx result = operands[0];
5393
5394 if (! TARGET_ARCH64)
5395 {
5396 rtx rtnreg = gen_rtx (REG, SImode, (leaf_function ? 15 : 31));
5397 rtx value = gen_reg_rtx (SImode);
5398
5399 /* Fetch the instruction where we will return to and see if it's an unimp
5400 instruction (the most significant 10 bits will be zero). If so,
5401 update the return address to skip the unimp instruction. */
5402 emit_move_insn (value,
5403 gen_rtx (MEM, SImode, plus_constant (rtnreg, 8)));
5404 emit_insn (gen_lshrsi3 (value, value, GEN_INT (22)));
5405 emit_insn (gen_update_return (rtnreg, value));
5406 }
5407
5408 /* Reload the function value registers. */
5409 emit_move_insn (valreg1, change_address (result, DImode, XEXP (result, 0)));
5410 emit_move_insn (valreg2,
5411 change_address (result, TARGET_ARCH64 ? TFmode : DFmode,
5412 plus_constant (XEXP (result, 0), 8)));
5413
5414 /* Put USE insns before the return. */
5415 emit_insn (gen_rtx (USE, VOIDmode, valreg1));
5416 emit_insn (gen_rtx (USE, VOIDmode, valreg2));
5417
5418 /* Construct the return. */
5419 expand_null_return ();
5420
5421 DONE;
5422 }")
5423
5424 ;; This is a bit of a hack. We're incrementing a fixed register (%i7),
5425 ;; and parts of the compiler don't want to believe that the add is needed.
5426
5427 (define_insn "update_return"
5428 [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
5429 (match_operand:SI 1 "register_operand" "r")] 0)]
5430 "! TARGET_ARCH64"
5431 "cmp %1,0\;be,a .+8\;add %0,4,%0"
5432 [(set_attr "type" "multi")])
5433 \f
5434 (define_insn "return"
5435 [(return)
5436 (use (reg:SI 31))]
5437 "! TARGET_EPILOGUE"
5438 "* return output_return (operands);"
5439 [(set_attr "type" "multi")])
5440
5441 (define_insn "nop"
5442 [(const_int 0)]
5443 ""
5444 "nop")
5445
5446 (define_expand "indirect_jump"
5447 [(set (pc) (match_operand 0 "address_operand" "p"))]
5448 ""
5449 "")
5450
5451 (define_insn "*branch_sp32"
5452 [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
5453 "! TARGET_PTR64"
5454 "jmp %a0%#"
5455 [(set_attr "type" "uncond_branch")])
5456
5457 (define_insn "*branch_sp64"
5458 [(set (pc) (match_operand:DI 0 "address_operand" "p"))]
5459 "TARGET_PTR64"
5460 "jmp %a0%#"
5461 [(set_attr "type" "uncond_branch")])
5462
5463 ;; ??? Doesn't work with -mflat.
5464 (define_expand "nonlocal_goto"
5465 [(match_operand:SI 0 "general_operand" "")
5466 (match_operand:SI 1 "general_operand" "")
5467 (match_operand:SI 2 "general_operand" "")
5468 (match_operand:SI 3 "" "")]
5469 ""
5470 "
5471 {
5472 /* Trap instruction to flush all the register windows. */
5473 emit_insn (gen_flush_register_windows ());
5474 /* Load the fp value for the containing fn into %fp.
5475 This is needed because operands[2] refers to %fp.
5476 Virtual register instantiation fails if the virtual %fp isn't set from a
5477 register. Thus we must copy operands[0] into a register if it isn't
5478 already one. */
5479 if (GET_CODE (operands[0]) != REG)
5480 operands[0] = force_reg (Pmode, operands[0]);
5481 emit_move_insn (virtual_stack_vars_rtx, operands[0]);
5482 /* Find the containing function's current nonlocal goto handler,
5483 which will do any cleanups and then jump to the label. */
5484 emit_move_insn (gen_rtx (REG, Pmode, 8), operands[1]);
5485 /* Restore %fp from stack pointer value for containing function.
5486 The restore insn that follows will move this to %sp,
5487 and reload the appropriate value into %fp. */
5488 emit_move_insn (frame_pointer_rtx, operands[2]);
5489 /* Put in the static chain register the nonlocal label address. */
5490 emit_move_insn (static_chain_rtx, operands[3]);
5491 /* USE of frame_pointer_rtx added for consistency; not clear if
5492 really needed. */
5493 emit_insn (gen_rtx (USE, VOIDmode, frame_pointer_rtx));
5494 emit_insn (gen_rtx (USE, VOIDmode, stack_pointer_rtx));
5495 emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx));
5496 /* Return, restoring reg window and jumping to goto handler. */
5497 emit_insn (gen_goto_handler_and_restore ());
5498 emit_barrier ();
5499 DONE;
5500 }")
5501
5502 ;; Special trap insn to flush register windows.
5503 (define_insn "flush_register_windows"
5504 [(unspec_volatile [(const_int 0)] 1)]
5505 ""
5506 ;; ??? Use TARGET_V9 instead?
5507 "* return TARGET_ARCH64 ? \"flushw\" : \"ta 3\";"
5508 [(set_attr "type" "misc")])
5509
5510 (define_insn "goto_handler_and_restore"
5511 [(unspec_volatile [(const_int 0)] 2)
5512 (use (reg:SI 8))]
5513 ""
5514 "jmp %%o0+0\;restore"
5515 [(set_attr "type" "misc")
5516 (set_attr "length" "2")])
5517
5518 ;; Pattern for use after a setjmp to store FP and the return register
5519 ;; into the stack area.
5520
5521 (define_expand "setjmp"
5522 [(const_int 0)]
5523 ""
5524 "
5525 {
5526 if (TARGET_ARCH64)
5527 emit_insn (gen_setjmp_64 ());
5528 else
5529 emit_insn (gen_setjmp_32 ());
5530
5531 DONE;
5532 }")
5533
5534 (define_expand "setjmp_32"
5535 [(set (mem:SI (plus:SI (reg:SI 14) (const_int 56))) (match_dup 0))
5536 (set (mem:SI (plus:SI (reg:SI 14) (const_int 60))) (reg:SI 31))]
5537 ""
5538 "
5539 { operands[0] = frame_pointer_rtx; }")
5540
5541 (define_expand "setjmp_64"
5542 [(set (mem:DI (plus:DI (reg:DI 14) (const_int 112))) (match_dup 0))
5543 (set (mem:DI (plus:DI (reg:DI 14) (const_int 120))) (reg:DI 31))]
5544 ""
5545 "
5546 { operands[0] = frame_pointer_rtx; }")
5547
5548 ;; Special pattern for the FLUSH instruction.
5549
5550 (define_insn "flush"
5551 [(unspec_volatile [(match_operand 0 "memory_operand" "m")] 3)]
5552 ""
5553 "* return TARGET_V9 ? \"flush %f0\" : \"iflush %f0\";"
5554 [(set_attr "type" "misc")])
5555 \f
5556 ;; find first set.
5557
5558 ;; The scan instruction searches from the most significant bit while ffs
5559 ;; searches from the least significant bit. The bit index and treatment of
5560 ;; zero also differ. It takes at least 7 instructions to get the proper
5561 ;; result. Here is an obvious 8 instruction sequence.
5562
5563 (define_insn "ffssi2"
5564 [(set (match_operand:SI 0 "register_operand" "=&r")
5565 (ffs:SI (match_operand:SI 1 "register_operand" "r")))
5566 (clobber (match_scratch:SI 2 "=&r"))]
5567 "TARGET_SPARCLITE || TARGET_SPARCLET"
5568 "*
5569 {
5570 if (TARGET_LIVE_G0)
5571 output_asm_insn (\"and %%g0,0,%%g0\", operands);
5572 return \"sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0\";
5573 }"
5574 [(set_attr "type" "multi")
5575 (set_attr "length" "8")])
5576
5577 ;; ??? This should be a define expand, so that the extra instruction have
5578 ;; a chance of being optimized away.
5579
5580 (define_insn "ffsdi2"
5581 [(set (match_operand:DI 0 "register_operand" "=&r")
5582 (ffs:DI (match_operand:DI 1 "register_operand" "r")))
5583 (clobber (match_scratch:DI 2 "=&r"))]
5584 "TARGET_ARCH64"
5585 "neg %1,%2\;not %2,%2\;xor %1,%2,%2\;popc %2,%0\;movrz %1,0,%0"
5586 [(set_attr "type" "multi")
5587 (set_attr "length" "5")])
5588 \f
5589 ;; Split up troublesome insns for better scheduling. */
5590
5591 ;; The following patterns are straightforward. They can be applied
5592 ;; either before or after register allocation.
5593
5594 (define_split
5595 [(set (match_operand 0 "splittable_symbolic_memory_operand" "")
5596 (match_operand 1 "reg_or_0_operand" ""))
5597 (clobber (match_operand:SI 2 "register_operand" ""))]
5598 "! flag_pic"
5599 [(set (match_dup 2) (high:SI (match_dup 3)))
5600 (set (match_dup 4) (match_dup 1))]
5601 "
5602 {
5603 operands[3] = XEXP (operands[0], 0);
5604 operands[4] = gen_rtx (MEM, GET_MODE (operands[0]),
5605 gen_rtx (LO_SUM, SImode, operands[2], operands[3]));
5606 MEM_IN_STRUCT_P (operands[4]) = MEM_IN_STRUCT_P (operands[0]);
5607 MEM_VOLATILE_P (operands[4]) = MEM_VOLATILE_P (operands[0]);
5608 RTX_UNCHANGING_P (operands[4]) = RTX_UNCHANGING_P (operands[0]);
5609 }")
5610
5611 (define_split
5612 [(set (match_operand 0 "splittable_immediate_memory_operand" "")
5613 (match_operand 1 "general_operand" ""))
5614 (clobber (match_operand:SI 2 "register_operand" ""))]
5615 "flag_pic"
5616 [(set (match_dup 3) (match_dup 1))]
5617 "
5618 {
5619 rtx addr = legitimize_pic_address (XEXP (operands[0], 0),
5620 GET_MODE (operands[0]),
5621 operands[2]);
5622 operands[3] = gen_rtx (MEM, GET_MODE (operands[0]), addr);
5623 MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[0]);
5624 MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[0]);
5625 RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[0]);
5626 }")
5627
5628 (define_split
5629 [(set (match_operand 0 "register_operand" "")
5630 (match_operand 1 "splittable_immediate_memory_operand" ""))]
5631 "flag_pic"
5632 [(set (match_dup 0) (match_dup 2))]
5633 "
5634 {
5635 rtx addr = legitimize_pic_address (XEXP (operands[1], 0),
5636 GET_MODE (operands[1]),
5637 operands[0]);
5638 operands[2] = gen_rtx (MEM, GET_MODE (operands[1]), addr);
5639 MEM_IN_STRUCT_P (operands[2]) = MEM_IN_STRUCT_P (operands[1]);
5640 MEM_VOLATILE_P (operands[2]) = MEM_VOLATILE_P (operands[1]);
5641 RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
5642 }")
5643
5644 ;; Sign- and Zero-extend operations can have symbolic memory operands.
5645
5646 (define_split
5647 [(set (match_operand 0 "register_operand" "")
5648 (match_operator 1 "extend_op" [(match_operand 2 "splittable_immediate_memory_operand" "")]))]
5649 "flag_pic"
5650 [(set (match_dup 0) (match_op_dup 1 [(match_dup 3)]))]
5651 "
5652 {
5653 rtx addr = legitimize_pic_address (XEXP (operands[2], 0),
5654 GET_MODE (operands[2]),
5655 operands[0]);
5656 operands[3] = gen_rtx (MEM, GET_MODE (operands[2]), addr);
5657 MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[2]);
5658 MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[2]);
5659 RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[2]);
5660 }")
5661
5662 (define_split
5663 [(set (match_operand:SI 0 "register_operand" "")
5664 (match_operand:SI 1 "immediate_operand" ""))]
5665 "! flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
5666 || GET_CODE (operands[1]) == CONST
5667 || GET_CODE (operands[1]) == LABEL_REF)"
5668 [(set (match_dup 0) (high:SI (match_dup 1)))
5669 (set (match_dup 0)
5670 (lo_sum:SI (match_dup 0) (match_dup 1)))]
5671 "")
5672
5673 ;; LABEL_REFs are not modified by `legitimize_pic_address'
5674 ;; so do not recurse infinitely in the PIC case.
5675 (define_split
5676 [(set (match_operand:SI 0 "register_operand" "")
5677 (match_operand:SI 1 "immediate_operand" ""))]
5678 "flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
5679 || GET_CODE (operands[1]) == CONST)"
5680 [(set (match_dup 0) (match_dup 1))]
5681 "
5682 {
5683 operands[1] = legitimize_pic_address (operands[1], Pmode, operands[0]);
5684 }")
5685 \f
5686 ;; These split sne/seq insns. The forms of the resulting insns are
5687 ;; somewhat bogus, but they avoid extra patterns and show data dependency.
5688 ;; Nothing will look at these in detail after splitting has occurred.
5689
5690 ;; ??? v9 DImode versions are missing because addc and subc use %icc.
5691
5692 (define_split
5693 [(set (match_operand:SI 0 "register_operand" "")
5694 (ne:SI (match_operand:SI 1 "register_operand" "")
5695 (const_int 0)))
5696 (clobber (reg:CC 100))]
5697 ""
5698 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5699 (const_int 0)))
5700 (set (match_dup 0) (ltu:SI (reg:CC 100) (const_int 0)))]
5701 "")
5702
5703 (define_split
5704 [(set (match_operand:SI 0 "register_operand" "")
5705 (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "")
5706 (const_int 0))))
5707 (clobber (reg:CC 100))]
5708 ""
5709 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5710 (const_int 0)))
5711 (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
5712 "")
5713
5714 (define_split
5715 [(set (match_operand:SI 0 "register_operand" "")
5716 (eq:SI (match_operand:SI 1 "register_operand" "")
5717 (const_int 0)))
5718 (clobber (reg:CC 100))]
5719 ""
5720 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5721 (const_int 0)))
5722 (set (match_dup 0) (geu:SI (reg:CC 100) (const_int 0)))]
5723 "")
5724
5725 (define_split
5726 [(set (match_operand:SI 0 "register_operand" "")
5727 (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "")
5728 (const_int 0))))
5729 (clobber (reg:CC 100))]
5730 ""
5731 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5732 (const_int 0)))
5733 (set (match_dup 0) (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
5734 "")
5735
5736 (define_split
5737 [(set (match_operand:SI 0 "register_operand" "")
5738 (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "")
5739 (const_int 0))
5740 (match_operand:SI 2 "register_operand" "")))
5741 (clobber (reg:CC 100))]
5742 ""
5743 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5744 (const_int 0)))
5745 (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
5746 (match_dup 2)))]
5747 "")
5748
5749 (define_split
5750 [(set (match_operand:SI 0 "register_operand" "")
5751 (minus:SI (match_operand:SI 2 "register_operand" "")
5752 (ne:SI (match_operand:SI 1 "register_operand" "")
5753 (const_int 0))))
5754 (clobber (reg:CC 100))]
5755 ""
5756 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5757 (const_int 0)))
5758 (set (match_dup 0) (minus:SI (match_dup 2)
5759 (ltu:SI (reg:CC 100) (const_int 0))))]
5760 "")
5761
5762 (define_split
5763 [(set (match_operand:SI 0 "register_operand" "")
5764 (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "")
5765 (const_int 0))
5766 (match_operand:SI 2 "register_operand" "")))
5767 (clobber (reg:CC 100))]
5768 ""
5769 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5770 (const_int 0)))
5771 (set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0))
5772 (match_dup 2)))]
5773 "")
5774
5775 (define_split
5776 [(set (match_operand:SI 0 "register_operand" "")
5777 (minus:SI (match_operand:SI 2 "register_operand" "")
5778 (eq:SI (match_operand:SI 1 "register_operand" "")
5779 (const_int 0))))
5780 (clobber (reg:CC 100))]
5781 ""
5782 [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
5783 (const_int 0)))
5784 (set (match_dup 0) (minus:SI (match_dup 2)
5785 (geu:SI (reg:CC 100) (const_int 0))))]
5786 "")
5787 \f
5788 ;; Peepholes go at the end.
5789
5790 ;; Optimize consecutive loads or stores into ldd and std when possible.
5791 ;; The conditions in which we do this are very restricted and are
5792 ;; explained in the code for {registers,memory}_ok_for_ldd functions.
5793
5794 (define_peephole
5795 [(set (match_operand:SI 0 "register_operand" "=rf")
5796 (match_operand:SI 1 "memory_operand" ""))
5797 (set (match_operand:SI 2 "register_operand" "=rf")
5798 (match_operand:SI 3 "memory_operand" ""))]
5799 "! TARGET_ARCH64
5800 && registers_ok_for_ldd_peep (operands[0], operands[2])
5801 && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
5802 && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
5803 "ldd %1,%0")
5804
5805 (define_peephole
5806 [(set (match_operand:SI 0 "memory_operand" "")
5807 (match_operand:SI 1 "register_operand" "rf"))
5808 (set (match_operand:SI 2 "memory_operand" "")
5809 (match_operand:SI 3 "register_operand" "rf"))]
5810 "! TARGET_ARCH64
5811 && registers_ok_for_ldd_peep (operands[1], operands[3])
5812 && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
5813 && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
5814 "std %1,%0")
5815
5816 (define_peephole
5817 [(set (match_operand:SF 0 "register_operand" "=fr")
5818 (match_operand:SF 1 "memory_operand" ""))
5819 (set (match_operand:SF 2 "register_operand" "=fr")
5820 (match_operand:SF 3 "memory_operand" ""))]
5821 "! TARGET_ARCH64
5822 && registers_ok_for_ldd_peep (operands[0], operands[2])
5823 && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
5824 && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
5825 "ldd %1,%0")
5826
5827 (define_peephole
5828 [(set (match_operand:SF 0 "memory_operand" "")
5829 (match_operand:SF 1 "register_operand" "fr"))
5830 (set (match_operand:SF 2 "memory_operand" "")
5831 (match_operand:SF 3 "register_operand" "fr"))]
5832 "! TARGET_ARCH64
5833 && registers_ok_for_ldd_peep (operands[1], operands[3])
5834 && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
5835 && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
5836 "std %1,%0")
5837
5838 (define_peephole
5839 [(set (match_operand:SI 0 "register_operand" "=rf")
5840 (match_operand:SI 1 "memory_operand" ""))
5841 (set (match_operand:SI 2 "register_operand" "=rf")
5842 (match_operand:SI 3 "memory_operand" ""))]
5843 "! TARGET_ARCH64
5844 && registers_ok_for_ldd_peep (operands[2], operands[0])
5845 && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
5846 && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
5847 "ldd %3,%2")
5848
5849 (define_peephole
5850 [(set (match_operand:SI 0 "memory_operand" "")
5851 (match_operand:SI 1 "register_operand" "rf"))
5852 (set (match_operand:SI 2 "memory_operand" "")
5853 (match_operand:SI 3 "register_operand" "rf"))]
5854 "! TARGET_ARCH64
5855 && registers_ok_for_ldd_peep (operands[3], operands[1])
5856 && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
5857 && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
5858 "std %3,%2")
5859
5860 (define_peephole
5861 [(set (match_operand:SF 0 "register_operand" "=fr")
5862 (match_operand:SF 1 "memory_operand" ""))
5863 (set (match_operand:SF 2 "register_operand" "=fr")
5864 (match_operand:SF 3 "memory_operand" ""))]
5865 "! TARGET_ARCH64
5866 && registers_ok_for_ldd_peep (operands[2], operands[0])
5867 && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
5868 && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
5869 "ldd %3,%2")
5870
5871 (define_peephole
5872 [(set (match_operand:SF 0 "memory_operand" "")
5873 (match_operand:SF 1 "register_operand" "fr"))
5874 (set (match_operand:SF 2 "memory_operand" "")
5875 (match_operand:SF 3 "register_operand" "fr"))]
5876 "! TARGET_ARCH64
5877 && registers_ok_for_ldd_peep (operands[3], operands[1])
5878 && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
5879 && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
5880 "std %3,%2")
5881
5882 ;; Optimize the case of following a reg-reg move with a test
5883 ;; of reg just moved. Don't allow floating point regs for operand 0 or 1.
5884 ;; This can result from a float to fix conversion.
5885
5886 (define_peephole
5887 [(set (match_operand:SI 0 "register_operand" "=r")
5888 (match_operand:SI 1 "register_operand" "r"))
5889 (set (reg:CC 100)
5890 (compare:CC (match_operand:SI 2 "register_operand" "r")
5891 (const_int 0)))]
5892 "(rtx_equal_p (operands[2], operands[0])
5893 || rtx_equal_p (operands[2], operands[1]))
5894 && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
5895 "orcc %1,0,%0")
5896
5897 (define_peephole
5898 [(set (match_operand:DI 0 "register_operand" "=r")
5899 (match_operand:DI 1 "register_operand" "r"))
5900 (set (reg:CCX 100)
5901 (compare:CCX (match_operand:DI 2 "register_operand" "r")
5902 (const_int 0)))]
5903 "TARGET_ARCH64
5904 && (rtx_equal_p (operands[2], operands[0])
5905 || rtx_equal_p (operands[2], operands[1]))
5906 && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
5907 "orcc %1,0,%0")
5908
5909 ;; Do {sign,zero}-extended compares somewhat more efficiently.
5910 ;; ??? Is this now the Right Way to do this? Or will SCRATCH
5911 ;; eventually have some impact here?
5912
5913 (define_peephole
5914 [(set (match_operand:HI 0 "register_operand" "")
5915 (match_operand:HI 1 "memory_operand" ""))
5916 (set (match_operand:SI 2 "register_operand" "")
5917 (sign_extend:SI (match_dup 0)))
5918 (set (reg:CC 100)
5919 (compare:CC (match_dup 2)
5920 (const_int 0)))]
5921 ""
5922 "ldsh %1,%0\;orcc %0,0,%2")
5923
5924 (define_peephole
5925 [(set (match_operand:HI 0 "register_operand" "")
5926 (match_operand:HI 1 "memory_operand" ""))
5927 (set (match_operand:DI 2 "register_operand" "")
5928 (sign_extend:DI (match_dup 0)))
5929 (set (reg:CCX 100)
5930 (compare:CCX (match_dup 2)
5931 (const_int 0)))]
5932 "TARGET_ARCH64"
5933 "ldsh %1,%0\;orcc %0,0,%2")
5934
5935 (define_peephole
5936 [(set (match_operand:QI 0 "register_operand" "")
5937 (match_operand:QI 1 "memory_operand" ""))
5938 (set (match_operand:SI 2 "register_operand" "")
5939 (sign_extend:SI (match_dup 0)))
5940 (set (reg:CC 100)
5941 (compare:CC (match_dup 2)
5942 (const_int 0)))]
5943 ""
5944 "ldsb %1,%0\;orcc %0,0,%2")
5945
5946 (define_peephole
5947 [(set (match_operand:QI 0 "register_operand" "")
5948 (match_operand:QI 1 "memory_operand" ""))
5949 (set (match_operand:DI 2 "register_operand" "")
5950 (sign_extend:DI (match_dup 0)))
5951 (set (reg:CCX 100)
5952 (compare:CCX (match_dup 2)
5953 (const_int 0)))]
5954 "TARGET_ARCH64"
5955 "ldsb %1,%0\;orcc %0,0,%2")
5956
5957 ;; Floating-point move peepholes
5958 ;; ??? v9: Do we want similar ones?
5959
5960 (define_peephole
5961 [(set (match_operand:SI 0 "register_operand" "=r")
5962 (lo_sum:SI (match_dup 0)
5963 (match_operand:SI 1 "immediate_operand" "i")))
5964 (set (match_operand:DF 2 "register_operand" "=er")
5965 (mem:DF (match_dup 0)))]
5966 "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
5967 "*
5968 {
5969 /* Go by way of output_move_double in case the register in operand 2
5970 is not properly aligned for ldd. */
5971 operands[1] = gen_rtx (MEM, DFmode,
5972 gen_rtx (LO_SUM, SImode, operands[0], operands[1]));
5973 operands[0] = operands[2];
5974 return output_move_double (operands);
5975 }")
5976
5977 (define_peephole
5978 [(set (match_operand:SI 0 "register_operand" "=r")
5979 (lo_sum:SI (match_dup 0)
5980 (match_operand:SI 1 "immediate_operand" "i")))
5981 (set (match_operand:SF 2 "register_operand" "=fr")
5982 (mem:SF (match_dup 0)))]
5983 "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
5984 "ld [%0+%%lo(%a1)],%2")
5985
5986 ;; Return peepholes. First the "normal" ones.
5987 ;; These are necessary to catch insns ending up in the epilogue delay list.
5988
5989 (define_insn "*return_qi"
5990 [(set (match_operand:QI 0 "restore_operand" "")
5991 (match_operand:QI 1 "arith_operand" "rI"))
5992 (return)]
5993 "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
5994 "*
5995 {
5996 if (! TARGET_ARCH64 && current_function_returns_struct)
5997 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
5998 else
5999 return \"ret\;restore %%g0,%1,%Y0\";
6000 }"
6001 [(set_attr "type" "multi")])
6002
6003 (define_insn "*return_hi"
6004 [(set (match_operand:HI 0 "restore_operand" "")
6005 (match_operand:HI 1 "arith_operand" "rI"))
6006 (return)]
6007 "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
6008 "*
6009 {
6010 if (! TARGET_ARCH64 && current_function_returns_struct)
6011 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
6012 else
6013 return \"ret\;restore %%g0,%1,%Y0\";
6014 }"
6015 [(set_attr "type" "multi")])
6016
6017 (define_insn "*return_si"
6018 [(set (match_operand:SI 0 "restore_operand" "")
6019 (match_operand:SI 1 "arith_operand" "rI"))
6020 (return)]
6021 "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
6022 "*
6023 {
6024 if (! TARGET_ARCH64 && current_function_returns_struct)
6025 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
6026 else
6027 return \"ret\;restore %%g0,%1,%Y0\";
6028 }"
6029 [(set_attr "type" "multi")])
6030
6031 ;; The following pattern is only generated by delayed-branch scheduling,
6032 ;; when the insn winds up in the epilogue. This can only happen when
6033 ;; ! TARGET_FPU because otherwise fp return values are in %f0.
6034 (define_insn "*return_sf_no_fpu"
6035 [(set (match_operand:SF 0 "restore_operand" "r")
6036 (match_operand:SF 1 "register_operand" "r"))
6037 (return)]
6038 "! TARGET_FPU && ! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
6039 "*
6040 {
6041 if (! TARGET_ARCH64 && current_function_returns_struct)
6042 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
6043 else
6044 return \"ret\;restore %%g0,%1,%Y0\";
6045 }"
6046 [(set_attr "type" "multi")])
6047
6048 (define_insn "*return_addsi"
6049 [(set (match_operand:SI 0 "restore_operand" "")
6050 (plus:SI (match_operand:SI 1 "arith_operand" "%r")
6051 (match_operand:SI 2 "arith_operand" "rI")))
6052 (return)]
6053 "! TARGET_EPILOGUE && ! TARGET_LIVE_G0
6054 && (register_operand (operands[1], SImode)
6055 || register_operand (operands[2], SImode))"
6056 "*
6057 {
6058 if (! TARGET_ARCH64 && current_function_returns_struct)
6059 return \"jmp %%i7+12\;restore %r1,%2,%Y0\";
6060 else
6061 return \"ret\;restore %r1,%2,%Y0\";
6062 }"
6063 [(set_attr "type" "multi")])
6064
6065 (define_insn "*return_di"
6066 [(set (match_operand:DI 0 "restore_operand" "")
6067 (match_operand:DI 1 "arith_double_operand" "rHI"))
6068 (return)]
6069 "TARGET_ARCH64 && ! TARGET_EPILOGUE"
6070 "ret\;restore %%g0,%1,%Y0"
6071 [(set_attr "type" "multi")])
6072
6073 (define_insn "*return_adddi"
6074 [(set (match_operand:DI 0 "restore_operand" "")
6075 (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
6076 (match_operand:DI 2 "arith_double_operand" "rHI")))
6077 (return)]
6078 "TARGET_ARCH64 && ! TARGET_EPILOGUE
6079 && (register_operand (operands[1], DImode)
6080 || register_operand (operands[2], DImode))"
6081 "ret\;restore %r1,%2,%Y0"
6082 [(set_attr "type" "multi")])
6083
6084 (define_insn "*return_subsi"
6085 [(set (match_operand:SI 0 "restore_operand" "")
6086 (minus:SI (match_operand:SI 1 "register_operand" "r")
6087 (match_operand:SI 2 "small_int" "I")))
6088 (return)]
6089 "! TARGET_EPILOGUE && INTVAL (operands[2]) != -4096"
6090 "ret\;restore %1,%n2,%Y0"
6091 [(set_attr "type" "multi")])
6092
6093 ;; The following pattern is only generated by delayed-branch scheduling,
6094 ;; when the insn winds up in the epilogue.
6095 (define_insn "*return_sf"
6096 [(set (reg:SF 32)
6097 (match_operand:SF 0 "register_operand" "f"))
6098 (return)]
6099 "! TARGET_EPILOGUE"
6100 "ret\;fmovs %0,%%f0"
6101 [(set_attr "type" "multi")])
6102
6103 ;; Now peepholes to do a call followed by a jump.
6104
6105 (define_peephole
6106 [(parallel [(set (match_operand 0 "" "")
6107 (call (mem:SI (match_operand:SI 1 "call_operand_address" "ps"))
6108 (match_operand 2 "" "")))
6109 (clobber (reg:SI 15))])
6110 (set (pc) (label_ref (match_operand 3 "" "")))]
6111 "short_branch (INSN_UID (insn), INSN_UID (operands[3]))"
6112 "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
6113
6114 (define_peephole
6115 [(parallel [(call (mem:SI (match_operand:SI 0 "call_operand_address" "ps"))
6116 (match_operand 1 "" ""))
6117 (clobber (reg:SI 15))])
6118 (set (pc) (label_ref (match_operand 2 "" "")))]
6119 "short_branch (INSN_UID (insn), INSN_UID (operands[2]))"
6120 "*
6121 {
6122 return \"call %a0,%1\;add %%o7,(%l2-.-4),%%o7\";
6123 }")
6124
6125 (define_peephole
6126 [(parallel [(set (match_operand 0 "" "")
6127 (call (mem:SI (match_operand:DI 1 "call_operand_address" "ps"))
6128 (match_operand 2 "" "")))
6129 (clobber (reg:DI 15))])
6130 (set (pc) (label_ref (match_operand 3 "" "")))]
6131 "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[3]))"
6132 "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
6133
6134 (define_peephole
6135 [(parallel [(call (mem:SI (match_operand:DI 0 "call_operand_address" "ps"))
6136 (match_operand 1 "" ""))
6137 (clobber (reg:DI 15))])
6138 (set (pc) (label_ref (match_operand 2 "" "")))]
6139 "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[2]))"
6140 "call %a0,%1\;add %%o7,(%l2-.-4),%%o7")
6141
6142 ;; Other miscellaneous peepholes.
6143
6144 ;; (reg:SI 100) is created by the {add,neg,sub}di patterns.
6145 (define_peephole
6146 [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
6147 (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
6148 (reg:SI 100)))
6149 (clobber (reg:CC 100))])
6150 (set (reg:CC 100) (compare (match_dup 0) (const_int 0)))]
6151 ""
6152 "subxcc %r1,0,%0")
This page took 0.786721 seconds and 4 git commands to generate.