]> gcc.gnu.org Git - gcc.git/blob - gcc/config/sparc/sparc.md
(movdi): Delete the earlyclobber in load case.
[gcc.git] / gcc / config / sparc / sparc.md
1 ;;- Machine description for SPARC chip for GNU C compiler
2 ;; Copyright (C) 1987, 1988, 1989, 1992 Free Software Foundation, Inc.
3 ;; Contributed by Michael Tiemann (tiemann@cygnus.com)
4
5 ;; This file is part of GNU CC.
6
7 ;; GNU CC is free software; you can redistribute it and/or modify
8 ;; it under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 2, or (at your option)
10 ;; any later version.
11
12 ;; GNU CC is distributed in the hope that it will be useful,
13 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 ;; GNU General Public License for more details.
16
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GNU CC; see the file COPYING. If not, write to
19 ;; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20
21
22 ;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
23
24 ;; Insn type. Used to default other attribute values.
25
26 ;; type "unary" insns have one input operand (1) and one output operand (0)
27 ;; type "binary" insns have two input operands (1,2) and one output (0)
28 ;; type "compare" insns have one or two input operands (0,1) and no output
29 ;; type "call_no_delay_slot" is a call followed by an unimp instruction.
30
31 (define_attr "type"
32 "move,unary,binary,compare,load,store,uncond_branch,branch,call,call_no_delay_slot,address,fpload,fpstore,fp,fpcmp,fpmul,fpdiv,fpsqrt,multi,misc"
33 (const_string "binary"))
34
35 ;; Set true if insn uses call-clobbered intermediate register.
36 (define_attr "use_clobbered" "false,true"
37 (if_then_else (and (eq_attr "type" "address")
38 (match_operand 0 "clobbered_register" ""))
39 (const_string "true")
40 (const_string "false")))
41
42 ;; Length (in # of insns).
43 (define_attr "length" ""
44 (cond [(eq_attr "type" "load,fpload")
45 (if_then_else (match_operand 1 "symbolic_memory_operand" "")
46 (const_int 2) (const_int 1))
47
48 (eq_attr "type" "store,fpstore")
49 (if_then_else (match_operand 0 "symbolic_memory_operand" "")
50 (const_int 2) (const_int 1))
51
52 (eq_attr "type" "address") (const_int 2)
53
54 (eq_attr "type" "binary")
55 (if_then_else (ior (match_operand 2 "arith_operand" "")
56 (match_operand 2 "arith_double_operand" ""))
57 (const_int 1) (const_int 3))
58
59 (eq_attr "type" "multi") (const_int 2)
60
61 (eq_attr "type" "move,unary")
62 (if_then_else (ior (match_operand 1 "arith_operand" "")
63 (match_operand 1 "arith_double_operand" ""))
64 (const_int 1) (const_int 2))]
65
66 (const_int 1)))
67
68 (define_asm_attributes
69 [(set_attr "length" "1")
70 (set_attr "type" "multi")])
71
72 ;; Attributes for instruction and branch scheduling
73
74 (define_attr "in_call_delay" "false,true"
75 (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,multi")
76 (const_string "false")
77 (eq_attr "type" "load,fpload,store,fpstore")
78 (if_then_else (eq_attr "length" "1")
79 (const_string "true")
80 (const_string "false"))
81 (eq_attr "type" "address")
82 (if_then_else (eq_attr "use_clobbered" "false")
83 (const_string "true")
84 (const_string "false"))]
85 (if_then_else (eq_attr "length" "1")
86 (const_string "true")
87 (const_string "false"))))
88
89 (define_delay (eq_attr "type" "call")
90 [(eq_attr "in_call_delay" "true") (nil) (nil)])
91
92 ;; ??? Should implement the notion of predelay slots for floating point
93 ;; branches. This would allow us to remove the nop always inserted before
94 ;; a floating point branch.
95
96 ;; ??? It is OK for fill_simple_delay_slots to put load/store instructions
97 ;; in a delay slot, but it is not OK for fill_eager_delay_slots to do so.
98 ;; This is because doing so will add several pipeline stalls to the path
99 ;; that the load/store did not come from. Unfortunately, there is no way
100 ;; to prevent fill_eager_delay_slots from using load/store without completely
101 ;; disabling them. For the SPEC benchmark set, this is a serious lose,
102 ;; because it prevents us from moving back the final store of inner loops.
103
104 (define_attr "in_branch_delay" "false,true"
105 (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
106 (eq_attr "length" "1"))
107 (const_string "true")
108 (const_string "false")))
109
110 (define_attr "in_uncond_branch_delay" "false,true"
111 (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
112 (eq_attr "length" "1"))
113 (const_string "true")
114 (const_string "false")))
115
116 (define_attr "in_annul_branch_delay" "false,true"
117 (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
118 (eq_attr "length" "1"))
119 (const_string "true")
120 (const_string "false")))
121
122 (define_delay (eq_attr "type" "branch")
123 [(eq_attr "in_branch_delay" "true")
124 (nil) (eq_attr "in_annul_branch_delay" "true")])
125
126 (define_delay (eq_attr "type" "uncond_branch")
127 [(eq_attr "in_uncond_branch_delay" "true")
128 (nil) (nil)])
129
130 ;; Function units of the SPARC
131
132 ;; (define_function_unit {name} {num-units} {n-users} {test}
133 ;; {ready-delay} {issue-delay} [{conflict-list}])
134
135 ;; The integer ALU.
136 ;; (Noted only for documentation; units that take one cycle do not need to
137 ;; be specified.)
138
139 ;; On the sparclite, integer multiply takes 1, 3, or 5 cycles depending on
140 ;; the inputs.
141
142 ;; (define_function_unit "alu" 1 0
143 ;; (eq_attr "type" "unary,binary,move,address") 1 0)
144
145 ;; Memory with load-delay of 1 (i.e., 2 cycle load).
146 (define_function_unit "memory" 1 1 (eq_attr "type" "load,fpload") 2 0)
147
148 ;; SPARC has two floating-point units: the FP ALU,
149 ;; and the FP MUL/DIV/SQRT unit.
150 ;; Instruction timings on the CY7C602 are as follows
151 ;; FABSs 4
152 ;; FADDs/d 5/5
153 ;; FCMPs/d 4/4
154 ;; FDIVs/d 23/37
155 ;; FMOVs 4
156 ;; FMULs/d 5/7
157 ;; FNEGs 4
158 ;; FSQRTs/d 34/63
159 ;; FSUBs/d 5/5
160 ;; FdTOi/s 5/5
161 ;; FsTOi/d 5/5
162 ;; FiTOs/d 9/5
163
164 ;; The CY7C602 can only support 2 fp isnsn simultaneously.
165 ;; More insns cause the chip to stall.
166
167 (define_function_unit "fp_alu" 1 1 (eq_attr "type" "fp") 5 0)
168 (define_function_unit "fp_mds" 1 1 (eq_attr "type" "fpmul") 7 0)
169 (define_function_unit "fp_mds" 1 1 (eq_attr "type" "fpdiv") 37 0)
170 (define_function_unit "fp_mds" 1 1 (eq_attr "type" "fpsqrt") 63 0)
171 \f
172 ;; Compare instructions.
173 ;; This controls RTL generation and register allocation.
174
175 ;; We generate RTL for comparisons and branches by having the cmpxx
176 ;; patterns store away the operands. Then, the scc and bcc patterns
177 ;; emit RTL for both the compare and the branch.
178 ;;
179 ;; We do this because we want to generate different code for an sne and
180 ;; seq insn. In those cases, if the second operand of the compare is not
181 ;; const0_rtx, we want to compute the xor of the two operands and test
182 ;; it against zero.
183 ;;
184 ;; We start with the DEFINE_EXPANDs, then then DEFINE_INSNs to match
185 ;; the patterns. Finally, we have the DEFINE_SPLITs for some of the scc
186 ;; insns that actually require more than one machine instruction.
187
188 ;; Put cmpsi first among compare insns so it matches two CONST_INT operands.
189
190 (define_expand "cmpsi"
191 [(set (reg:CC 0)
192 (compare:CC (match_operand:SI 0 "register_operand" "")
193 (match_operand:SI 1 "arith_operand" "")))]
194 ""
195 "
196 {
197 sparc_compare_op0 = operands[0];
198 sparc_compare_op1 = operands[1];
199 DONE;
200 }")
201
202 (define_expand "cmpsf"
203 [(set (reg:CCFP 0)
204 (compare:CCFP (match_operand:SF 0 "register_operand" "")
205 (match_operand:SF 1 "register_operand" "")))]
206 "TARGET_FPU"
207 "
208 {
209 sparc_compare_op0 = operands[0];
210 sparc_compare_op1 = operands[1];
211 DONE;
212 }")
213
214 (define_expand "cmpdf"
215 [(set (reg:CCFP 0)
216 (compare:CCFP (match_operand:DF 0 "register_operand" "")
217 (match_operand:DF 1 "register_operand" "")))]
218 "TARGET_FPU"
219 "
220 {
221 sparc_compare_op0 = operands[0];
222 sparc_compare_op1 = operands[1];
223 DONE;
224 }")
225
226 (define_expand "cmptf"
227 [(set (reg:CCFP 0)
228 (compare:CCFP (match_operand:TF 0 "register_operand" "")
229 (match_operand:TF 1 "register_operand" "")))]
230 "TARGET_FPU"
231 "
232 {
233 sparc_compare_op0 = operands[0];
234 sparc_compare_op1 = operands[1];
235 DONE;
236 }")
237
238 ;; Next come the scc insns. For seq, sne, sgeu, and sltu, we can do this
239 ;; without jumps using the addx/subx instructions. For the rest, we do
240 ;; branches. Seq_special and sne_special clobber the CC reg, because they
241 ;; generate addcc/subcc instructions.
242
243 (define_expand "seq_special"
244 [(set (match_dup 3) (xor:SI (match_operand:SI 1 "register_operand" "")
245 (match_operand:SI 2 "register_operand" "")))
246 (parallel [(set (match_operand:SI 0 "register_operand" "")
247 (eq:SI (match_dup 3) (const_int 0)))
248 (clobber (reg:CC 0))])]
249
250 ""
251 "{ operands[3] = gen_reg_rtx (SImode); }")
252
253 (define_expand "sne_special"
254 [(set (match_dup 3) (xor:SI (match_operand:SI 1 "register_operand" "")
255 (match_operand:SI 2 "register_operand" "")))
256 (parallel [(set (match_operand:SI 0 "register_operand" "")
257 (ne:SI (match_dup 3) (const_int 0)))
258 (clobber (reg:CC 0))])]
259 ""
260 "{ operands[3] = gen_reg_rtx (SImode); }")
261
262 (define_expand "seq"
263 [(set (match_operand:SI 0 "register_operand" "")
264 (eq:SI (match_dup 1) (const_int 0)))]
265 ""
266 "
267 { if (GET_MODE (sparc_compare_op0) == SImode)
268 {
269 emit_insn (gen_seq_special (operands[0], sparc_compare_op0,
270 sparc_compare_op1));
271 DONE;
272 }
273 else
274 operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
275 }")
276
277 (define_expand "sne"
278 [(set (match_operand:SI 0 "register_operand" "")
279 (ne:SI (match_dup 1) (const_int 0)))]
280 ""
281 "
282 { if (GET_MODE (sparc_compare_op0) == SImode)
283 {
284 emit_insn (gen_sne_special (operands[0], sparc_compare_op0,
285 sparc_compare_op1));
286 DONE;
287 }
288 else
289 operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
290 }")
291
292 (define_expand "sgt"
293 [(set (match_operand:SI 0 "register_operand" "")
294 (gt:SI (match_dup 1) (const_int 0)))]
295 ""
296 "
297 { operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1); }")
298
299 (define_expand "slt"
300 [(set (match_operand:SI 0 "register_operand" "")
301 (lt:SI (match_dup 1) (const_int 0)))]
302 ""
303 "
304 { operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1); }")
305
306 (define_expand "sge"
307 [(set (match_operand:SI 0 "register_operand" "")
308 (ge:SI (match_dup 1) (const_int 0)))]
309 ""
310 "
311 { operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1); }")
312
313 (define_expand "sle"
314 [(set (match_operand:SI 0 "register_operand" "")
315 (le:SI (match_dup 1) (const_int 0)))]
316 ""
317 "
318 { operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1); }")
319
320 (define_expand "sgtu"
321 [(set (match_operand:SI 0 "register_operand" "")
322 (gtu:SI (match_dup 1) (const_int 0)))]
323 ""
324 "
325 {
326 rtx tem;
327
328 /* We can do ltu easily, so if both operands are registers, swap them and
329 do a LTU. */
330 if ((GET_CODE (sparc_compare_op0) == REG
331 || GET_CODE (sparc_compare_op0) == SUBREG)
332 && (GET_CODE (sparc_compare_op1) == REG
333 || GET_CODE (sparc_compare_op1) == SUBREG))
334 {
335 tem = sparc_compare_op0;
336 sparc_compare_op0 = sparc_compare_op1;
337 sparc_compare_op1 = tem;
338 emit_insn (gen_sltu (operands[0]));
339 DONE;
340 }
341
342 operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
343 }")
344
345 (define_expand "sltu"
346 [(set (match_operand:SI 0 "register_operand" "")
347 (ltu:SI (match_dup 1) (const_int 0)))]
348 ""
349 "
350 { operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
351 }")
352
353 (define_expand "sgeu"
354 [(set (match_operand:SI 0 "register_operand" "")
355 (geu:SI (match_dup 1) (const_int 0)))]
356 ""
357 "
358 { operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
359 }")
360
361 (define_expand "sleu"
362 [(set (match_operand:SI 0 "register_operand" "")
363 (leu:SI (match_dup 1) (const_int 0)))]
364 ""
365 "
366 {
367 rtx tem;
368
369 /* We can do geu easily, so if both operands are registers, swap them and
370 do a GEU. */
371 if ((GET_CODE (sparc_compare_op0) == REG
372 || GET_CODE (sparc_compare_op0) == SUBREG)
373 && (GET_CODE (sparc_compare_op1) == REG
374 || GET_CODE (sparc_compare_op1) == SUBREG))
375 {
376 tem = sparc_compare_op0;
377 sparc_compare_op0 = sparc_compare_op1;
378 sparc_compare_op1 = tem;
379 emit_insn (gen_sgeu (operands[0]));
380 DONE;
381 }
382
383 operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
384 }")
385
386 ;; Now the DEFINE_INSNs for the compare and scc cases. First the compares.
387
388 (define_insn ""
389 [(set (reg:CC 0)
390 (compare:CC (match_operand:SI 0 "register_operand" "r")
391 (match_operand:SI 1 "arith_operand" "rI")))]
392 ""
393 "cmp %r0,%1"
394 [(set_attr "type" "compare")])
395
396 (define_insn ""
397 [(set (reg:CCFPE 0)
398 (compare:CCFPE (match_operand:DF 0 "register_operand" "f")
399 (match_operand:DF 1 "register_operand" "f")))]
400 "TARGET_FPU"
401 "fcmped %0,%1"
402 [(set_attr "type" "fpcmp")])
403
404 (define_insn ""
405 [(set (reg:CCFPE 0)
406 (compare:CCFPE (match_operand:SF 0 "register_operand" "f")
407 (match_operand:SF 1 "register_operand" "f")))]
408 "TARGET_FPU"
409 "fcmpes %0,%1"
410 [(set_attr "type" "fpcmp")])
411
412 (define_insn ""
413 [(set (reg:CCFPE 0)
414 (compare:CCFPE (match_operand:TF 0 "register_operand" "f")
415 (match_operand:TF 1 "register_operand" "f")))]
416 "TARGET_FPU"
417 "fcmpeq %0,%1"
418 [(set_attr "type" "fpcmp")])
419
420 (define_insn ""
421 [(set (reg:CCFP 0)
422 (compare:CCFP (match_operand:DF 0 "register_operand" "f")
423 (match_operand:DF 1 "register_operand" "f")))]
424 "TARGET_FPU"
425 "fcmpd %0,%1"
426 [(set_attr "type" "fpcmp")])
427
428 (define_insn ""
429 [(set (reg:CCFP 0)
430 (compare:CCFP (match_operand:SF 0 "register_operand" "f")
431 (match_operand:SF 1 "register_operand" "f")))]
432 "TARGET_FPU"
433 "fcmps %0,%1"
434 [(set_attr "type" "fpcmp")])
435
436 (define_insn ""
437 [(set (reg:CCFP 0)
438 (compare:CCFP (match_operand:TF 0 "register_operand" "f")
439 (match_operand:TF 1 "register_operand" "f")))]
440 "TARGET_FPU"
441 "fcmpq %0,%1"
442 [(set_attr "type" "fpcmp")])
443
444 ;; The SEQ and SNE patterns are special because they can be done
445 ;; without any branching and do not involve a COMPARE.
446
447 (define_insn ""
448 [(set (match_operand:SI 0 "register_operand" "=r")
449 (ne:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)))
450 (clobber (reg:CC 0))]
451 ""
452 "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
453 [(set_attr "type" "unary")
454 (set_attr "length" "2")])
455
456 (define_insn ""
457 [(set (match_operand:SI 0 "register_operand" "=r")
458 (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
459 (const_int 0))))
460 (clobber (reg:CC 0))]
461 ""
462 "subcc %%g0,%1,%%g0\;subx %%g0,0,%0"
463 [(set_attr "type" "unary")
464 (set_attr "length" "2")])
465
466 (define_insn ""
467 [(set (match_operand:SI 0 "register_operand" "=r")
468 (eq:SI (match_operand:SI 1 "register_operand" "r") (const_int 0)))
469 (clobber (reg:CC 0))]
470 ""
471 "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
472 [(set_attr "type" "unary")
473 (set_attr "length" "2")])
474
475 (define_insn ""
476 [(set (match_operand:SI 0 "register_operand" "=r")
477 (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
478 (const_int 0))))
479 (clobber (reg:CC 0))]
480 ""
481 "subcc %%g0,%1,%%g0\;addx %%g0,-1,%0"
482 [(set_attr "type" "unary")
483 (set_attr "length" "2")])
484
485 ;; We can also do (x + (i == 0)) and related, so put them in.
486
487 (define_insn ""
488 [(set (match_operand:SI 0 "register_operand" "=r")
489 (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
490 (const_int 0))
491 (match_operand:SI 2 "register_operand" "r")))
492 (clobber (reg:CC 0))]
493 ""
494 "subcc %%g0,%1,%%g0\;addx %2,0,%0"
495 [(set_attr "length" "2")])
496
497 (define_insn ""
498 [(set (match_operand:SI 0 "register_operand" "=r")
499 (minus:SI (match_operand:SI 2 "register_operand" "r")
500 (ne:SI (match_operand:SI 1 "register_operand" "r")
501 (const_int 0))))
502 (clobber (reg:CC 0))]
503 ""
504 "subcc %%g0,%1,%%g0\;subx %2,0,%0"
505 [(set_attr "length" "2")])
506
507 (define_insn ""
508 [(set (match_operand:SI 0 "register_operand" "=r")
509 (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
510 (const_int 0))
511 (match_operand:SI 2 "register_operand" "r")))
512 (clobber (reg:CC 0))]
513 ""
514 "subcc %%g0,%1,%%g0\;subx %2,-1,%0"
515 [(set_attr "length" "2")])
516
517 (define_insn ""
518 [(set (match_operand:SI 0 "register_operand" "=r")
519 (minus:SI (match_operand:SI 2 "register_operand" "r")
520 (eq:SI (match_operand:SI 1 "register_operand" "r")
521 (const_int 0))))
522 (clobber (reg:CC 0))]
523 ""
524 "subcc %%g0,%1,%%g0\;addx %2,-1,%0"
525 [(set_attr "length" "2")])
526
527 ;; We can also do GEU and LTU directly, but these operate after a
528 ;; compare.
529
530 (define_insn ""
531 [(set (match_operand:SI 0 "register_operand" "=r")
532 (ltu:SI (reg:CC 0) (const_int 0)))]
533 ""
534 "addx %%g0,0,%0"
535 [(set_attr "type" "misc")])
536
537 (define_insn ""
538 [(set (match_operand:SI 0 "register_operand" "=r")
539 (neg:SI (ltu:SI (reg:CC 0) (const_int 0))))]
540 ""
541 "subx %%g0,0,%0"
542 [(set_attr "type" "misc")])
543
544 ;; ??? Combine should canonicalize these next two to the same pattern.
545 (define_insn ""
546 [(set (match_operand:SI 0 "register_operand" "=r")
547 (minus:SI (neg:SI (ltu:SI (reg:CC 0) (const_int 0)))
548 (match_operand:SI 1 "arith_operand" "rI")))]
549 ""
550 "subx %%g0,%1,%0"
551 [(set_attr "type" "unary")])
552
553 (define_insn ""
554 [(set (match_operand:SI 0 "register_operand" "=r")
555 (neg:SI (plus:SI (ltu:SI (reg:CC 0) (const_int 0))
556 (match_operand:SI 1 "arith_operand" "rI"))))]
557 ""
558 "subx %%g0,%1,%0"
559 [(set_attr "type" "unary")])
560
561 (define_insn ""
562 [(set (match_operand:SI 0 "register_operand" "=r")
563 (geu:SI (reg:CC 0) (const_int 0)))]
564 ""
565 "subx %%g0,-1,%0"
566 [(set_attr "type" "misc")])
567
568 (define_insn ""
569 [(set (match_operand:SI 0 "register_operand" "=r")
570 (neg:SI (geu:SI (reg:CC 0) (const_int 0))))]
571 ""
572 "addx %%g0,-1,%0"
573 [(set_attr "type" "misc")])
574
575 ;; We can also do (x + ((unsigned) i >= 0)) and related, so put them in.
576
577 (define_insn ""
578 [(set (match_operand:SI 0 "register_operand" "=r")
579 (plus:SI (ltu:SI (reg:CC 0) (const_int 0))
580 (match_operand:SI 1 "arith_operand" "rI")))]
581 ""
582 "addx %%g0,%1,%0"
583 [(set_attr "type" "unary")])
584
585 (define_insn ""
586 [(set (match_operand:SI 0 "register_operand" "=r")
587 (plus:SI (ltu:SI (reg:CC 0) (const_int 0))
588 (plus:SI (match_operand:SI 1 "arith_operand" "%r")
589 (match_operand:SI 2 "arith_operand" "rI"))))]
590 ""
591 "addx %1,%2,%0")
592
593 (define_insn ""
594 [(set (match_operand:SI 0 "register_operand" "=r")
595 (minus:SI (match_operand:SI 1 "register_operand" "r")
596 (ltu:SI (reg:CC 0) (const_int 0))))]
597 ""
598 "subx %1,0,%0"
599 [(set_attr "type" "unary")])
600
601 ;; ??? Combine should canonicalize these next two to the same pattern.
602 (define_insn ""
603 [(set (match_operand:SI 0 "register_operand" "=r")
604 (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
605 (match_operand:SI 2 "arith_operand" "rI"))
606 (ltu:SI (reg:CC 0) (const_int 0))))]
607 ""
608 "subx %1,%2,%0")
609
610 (define_insn ""
611 [(set (match_operand:SI 0 "register_operand" "=r")
612 (minus:SI (match_operand:SI 1 "register_operand" "r")
613 (plus:SI (ltu:SI (reg:CC 0) (const_int 0))
614 (match_operand:SI 2 "arith_operand" "rI"))))]
615 ""
616 "subx %1,%2,%0")
617
618 (define_insn ""
619 [(set (match_operand:SI 0 "register_operand" "=r")
620 (plus:SI (geu:SI (reg:CC 0) (const_int 0))
621 (match_operand:SI 1 "register_operand" "r")))]
622 ""
623 "subx %1,-1,%0"
624 [(set_attr "type" "unary")])
625
626 (define_insn ""
627 [(set (match_operand:SI 0 "register_operand" "=r")
628 (minus:SI (match_operand:SI 1 "register_operand" "r")
629 (geu:SI (reg:CC 0) (const_int 0))))]
630 ""
631 "addx %1,-1,%0"
632 [(set_attr "type" "unary")])
633
634 ;; Now we have the generic scc insns. These will be done using a jump.
635 ;; We have to exclude the cases above, since we will not want combine to
636 ;; turn something that does not require a jump into something that does.
637 (define_insn ""
638 [(set (match_operand:SI 0 "register_operand" "=r")
639 (match_operator:SI 1 "noov_compare_op" [(reg 0) (const_int 0)]))]
640 ""
641 "* return output_scc_insn (operands, insn); "
642 [(set_attr "type" "multi")
643 (set_attr "length" "3")])
644 \f
645 ;; These control RTL generation for conditional jump insns
646
647 (define_expand "beq"
648 [(set (pc)
649 (if_then_else (eq (match_dup 1) (const_int 0))
650 (label_ref (match_operand 0 "" ""))
651 (pc)))]
652 ""
653 "
654 { operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1); }")
655
656 (define_expand "bne"
657 [(set (pc)
658 (if_then_else (ne (match_dup 1) (const_int 0))
659 (label_ref (match_operand 0 "" ""))
660 (pc)))]
661 ""
662 "
663 { operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1); }")
664
665 (define_expand "bgt"
666 [(set (pc)
667 (if_then_else (gt (match_dup 1) (const_int 0))
668 (label_ref (match_operand 0 "" ""))
669 (pc)))]
670 ""
671 "
672 { operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1); }")
673
674 (define_expand "bgtu"
675 [(set (pc)
676 (if_then_else (gtu (match_dup 1) (const_int 0))
677 (label_ref (match_operand 0 "" ""))
678 (pc)))]
679 ""
680 "
681 { operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
682 }")
683
684 (define_expand "blt"
685 [(set (pc)
686 (if_then_else (lt (match_dup 1) (const_int 0))
687 (label_ref (match_operand 0 "" ""))
688 (pc)))]
689 ""
690 "
691 { operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1); }")
692
693 (define_expand "bltu"
694 [(set (pc)
695 (if_then_else (ltu (match_dup 1) (const_int 0))
696 (label_ref (match_operand 0 "" ""))
697 (pc)))]
698 ""
699 "
700 { operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
701 }")
702
703 (define_expand "bge"
704 [(set (pc)
705 (if_then_else (ge (match_dup 1) (const_int 0))
706 (label_ref (match_operand 0 "" ""))
707 (pc)))]
708 ""
709 "
710 { operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1); }")
711
712 (define_expand "bgeu"
713 [(set (pc)
714 (if_then_else (geu (match_dup 1) (const_int 0))
715 (label_ref (match_operand 0 "" ""))
716 (pc)))]
717 ""
718 "
719 { operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
720 }")
721
722 (define_expand "ble"
723 [(set (pc)
724 (if_then_else (le (match_dup 1) (const_int 0))
725 (label_ref (match_operand 0 "" ""))
726 (pc)))]
727 ""
728 "
729 { operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1); }")
730
731 (define_expand "bleu"
732 [(set (pc)
733 (if_then_else (leu (match_dup 1) (const_int 0))
734 (label_ref (match_operand 0 "" ""))
735 (pc)))]
736 ""
737 "
738 { operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
739 }")
740 \f
741 ;; Now match both normal and inverted jump.
742
743 (define_insn ""
744 [(set (pc)
745 (if_then_else (match_operator 0 "noov_compare_op"
746 [(reg 0) (const_int 0)])
747 (label_ref (match_operand 1 "" ""))
748 (pc)))]
749 ""
750 "*
751 {
752 return output_cbranch (operands[0], 1, 0,
753 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
754 ! final_sequence);
755 }"
756 [(set_attr "type" "branch")])
757
758 (define_insn ""
759 [(set (pc)
760 (if_then_else (match_operator 0 "noov_compare_op"
761 [(reg 0) (const_int 0)])
762 (pc)
763 (label_ref (match_operand 1 "" ""))))]
764 ""
765 "*
766 {
767 return output_cbranch (operands[0], 1, 1,
768 final_sequence && INSN_ANNULLED_BRANCH_P (insn),
769 ! final_sequence);
770 }"
771 [(set_attr "type" "branch")])
772 \f
773 ;; Move instructions
774
775 (define_expand "movsi"
776 [(set (match_operand:SI 0 "general_operand" "")
777 (match_operand:SI 1 "general_operand" ""))]
778 ""
779 "
780 {
781 if (emit_move_sequence (operands, SImode, NULL_RTX))
782 DONE;
783 }")
784
785 (define_expand "reload_insi"
786 [(set (match_operand:SI 0 "register_operand" "=r")
787 (match_operand:SI 1 "general_operand" ""))
788 (clobber (match_operand:SI 2 "register_operand" "=&r"))]
789 ""
790 "
791 {
792 if (emit_move_sequence (operands, SImode, operands[2]))
793 DONE;
794
795 /* We don't want the clobber emitted, so handle this ourselves. */
796 emit_insn (gen_rtx (SET, VOIDmode, operands[0], operands[1]));
797 DONE;
798 }")
799
800 ;; We must support both 'r' and 'f' registers here, because combine may
801 ;; convert SFmode hard registers to SImode hard registers when simplifying
802 ;; subreg sets.
803
804 ;; We cannot combine the similar 'r' and 'f' constraints, because it causes
805 ;; problems with register allocation. Reload might try to put an integer
806 ;; in an fp register, or an fp number is an integer register.
807
808 (define_insn ""
809 [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,f,r,r,f,Q,Q")
810 (match_operand:SI 1 "move_operand" "rI,!f,K,Q,!Q,rJ,!f"))]
811 "register_operand (operands[0], SImode)
812 || register_operand (operands[1], SImode)
813 || operands[1] == const0_rtx"
814 "@
815 mov %1,%0
816 fmovs %1,%0
817 sethi %%hi(%a1),%0
818 ld %1,%0
819 ld %1,%0
820 st %r1,%0
821 st %r1,%0"
822 [(set_attr "type" "move,fp,move,load,load,store,store")
823 (set_attr "length" "*,*,1,*,*,*,*")])
824
825 ;; Special pic pattern, for loading the address of a label into a register.
826 ;; It clobbers o7 because the call puts the return address (i.e. pc value)
827 ;; there.
828
829 (define_insn ""
830 [(set (match_operand:SI 0 "register_operand" "=r")
831 (match_operand:SI 1 "move_pic_label" "i"))
832 (set (reg:SI 15) (pc))]
833 ""
834 "\\n1:\;call 2f\;sethi %%hi(%l1-1b),%0\\n2:\\tor %0,%%lo(%l1-1b),%0\;add %0,%%o7,%0"
835 [(set_attr "type" "multi")
836 (set_attr "length" "4")])
837
838 (define_insn ""
839 [(set (match_operand:DI 0 "register_operand" "=r")
840 (high:DI (match_operand 1 "" "")))]
841 "check_pic (1)"
842 "*
843 {
844 rtx op0 = operands[0];
845 rtx op1 = operands[1];
846
847 if (GET_CODE (op1) == CONST_INT)
848 {
849 operands[0] = operand_subword (op0, 1, 0, DImode);
850 output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
851
852 operands[0] = operand_subword (op0, 0, 0, DImode);
853 if (INTVAL (op1) < 0)
854 return \"mov -1,%0\";
855 else
856 return \"mov 0,%0\";
857 }
858 else if (GET_CODE (op1) == CONST_DOUBLE)
859 {
860 operands[0] = operand_subword (op0, 1, 0, DImode);
861 operands[1] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (op1));
862 output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
863
864 operands[0] = operand_subword (op0, 0, 0, DImode);
865 operands[1] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_HIGH (op1));
866 return singlemove_string (operands);
867 }
868 else
869 abort ();
870 return \"\";
871 }"
872 [(set_attr "type" "move")
873 (set_attr "length" "2")])
874
875 ;; For PIC, symbol_refs are put inside unspec so that the optimizer won't
876 ;; confuse them with real addresses.
877 (define_insn ""
878 [(set (match_operand:SI 0 "register_operand" "=r")
879 (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
880 "check_pic (1)"
881 "sethi %%hi(%a1),%0"
882 [(set_attr "type" "move")
883 (set_attr "length" "1")])
884
885 (define_insn ""
886 [(set (match_operand:SI 0 "register_operand" "=r")
887 (high:SI (match_operand 1 "" "")))]
888 "check_pic (1)"
889 "sethi %%hi(%a1),%0"
890 [(set_attr "type" "move")
891 (set_attr "length" "1")])
892
893 (define_insn ""
894 [(set (match_operand:HI 0 "register_operand" "=r")
895 (high:HI (match_operand 1 "" "")))]
896 "check_pic (1)"
897 "sethi %%hi(%a1),%0"
898 [(set_attr "type" "move")
899 (set_attr "length" "1")])
900
901 (define_insn ""
902 [(set (match_operand:DI 0 "register_operand" "=r")
903 (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
904 (match_operand:DI 2 "immediate_operand" "in")))]
905 ""
906 "*
907 {
908 /* Don't output a 64 bit constant, since we can't trust the assembler to
909 handle it correctly. */
910 if (GET_CODE (operands[2]) == CONST_DOUBLE)
911 operands[2] = gen_rtx (CONST_INT, VOIDmode, CONST_DOUBLE_LOW (operands[2]));
912 return \"or %R1,%%lo(%a2),%R0\";
913 }"
914 ;; Need to set length for this arith insn because operand2
915 ;; is not an "arith_operand".
916 [(set_attr "length" "1")])
917
918 ;; For PIC, symbol_refs are put inside unspec so that the optimizer won't
919 ;; confuse them with real addresses.
920 (define_insn ""
921 [(set (match_operand:SI 0 "register_operand" "=r")
922 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
923 (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")] 0)))]
924 ""
925 "or %1,%%lo(%a2),%0"
926 ;; Need to set length for this arith insn because operand2
927 ;; is not an "arith_operand".
928 [(set_attr "length" "1")])
929
930 (define_insn ""
931 [(set (match_operand:SI 0 "register_operand" "=r")
932 (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
933 (match_operand:SI 2 "immediate_operand" "in")))]
934 ""
935 "or %1,%%lo(%a2),%0"
936 ;; Need to set length for this arith insn because operand2
937 ;; is not an "arith_operand".
938 [(set_attr "length" "1")])
939
940 (define_insn ""
941 [(set (mem:SI (match_operand:SI 0 "symbolic_operand" ""))
942 (match_operand:SI 1 "reg_or_0_operand" "rJ"))
943 (clobber (match_scratch:SI 2 "=&r"))]
944 ""
945 "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
946 [(set_attr "type" "store")
947 (set_attr "length" "2")])
948
949 (define_expand "movhi"
950 [(set (match_operand:HI 0 "general_operand" "")
951 (match_operand:HI 1 "general_operand" ""))]
952 ""
953 "
954 {
955 if (emit_move_sequence (operands, HImode, NULL_RTX))
956 DONE;
957 }")
958
959 (define_insn ""
960 [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
961 (match_operand:HI 1 "move_operand" "rI,K,Q,rJ"))]
962 "register_operand (operands[0], HImode)
963 || register_operand (operands[1], HImode)
964 || operands[1] == const0_rtx"
965 "@
966 mov %1,%0
967 sethi %%hi(%a1),%0
968 lduh %1,%0
969 sth %r1,%0"
970 [(set_attr "type" "move,move,load,store")
971 (set_attr "length" "*,1,*,1")])
972
973 (define_insn ""
974 [(set (match_operand:HI 0 "register_operand" "=r")
975 (lo_sum:HI (match_operand:HI 1 "register_operand" "r")
976 (match_operand 2 "immediate_operand" "in")))]
977 ""
978 "or %1,%%lo(%a2),%0"
979 [(set_attr "length" "1")])
980
981 (define_insn ""
982 [(set (mem:HI (match_operand:SI 0 "symbolic_operand" ""))
983 (match_operand:HI 1 "reg_or_0_operand" "rJ"))
984 (clobber (match_scratch:SI 2 "=&r"))]
985 ""
986 "sethi %%hi(%a0),%2\;sth %r1,[%2+%%lo(%a0)]"
987 [(set_attr "type" "store")
988 (set_attr "length" "2")])
989
990 (define_expand "movqi"
991 [(set (match_operand:QI 0 "general_operand" "")
992 (match_operand:QI 1 "general_operand" ""))]
993 ""
994 "
995 {
996 if (emit_move_sequence (operands, QImode, NULL_RTX))
997 DONE;
998 }")
999
1000 (define_insn ""
1001 [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
1002 (match_operand:QI 1 "move_operand" "rI,K,Q,rJ"))]
1003 "register_operand (operands[0], QImode)
1004 || register_operand (operands[1], QImode)
1005 || operands[1] == const0_rtx"
1006 "@
1007 mov %1,%0
1008 sethi %%hi(%a1),%0
1009 ldub %1,%0
1010 stb %r1,%0"
1011 [(set_attr "type" "move,move,load,store")
1012 (set_attr "length" "*,1,*,1")])
1013
1014 (define_insn ""
1015 [(set (match_operand:QI 0 "register_operand" "=r")
1016 (subreg:QI (lo_sum:SI (match_operand:QI 1 "register_operand" "r")
1017 (match_operand 2 "immediate_operand" "in")) 0))]
1018 ""
1019 "or %1,%%lo(%a2),%0"
1020 [(set_attr "length" "1")])
1021
1022 (define_insn ""
1023 [(set (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
1024 (match_operand:QI 1 "reg_or_0_operand" "rJ"))
1025 (clobber (match_scratch:SI 2 "=&r"))]
1026 ""
1027 "sethi %%hi(%a0),%2\;stb %r1,[%2+%%lo(%a0)]"
1028 [(set_attr "type" "store")
1029 (set_attr "length" "2")])
1030
1031 ;; ??? We get better code without it. See output_block_move in sparc.c.
1032
1033 ;; The definition of this insn does not really explain what it does,
1034 ;; but it should suffice
1035 ;; that anything generated as this insn will be recognized as one
1036 ;; and that it will not successfully combine with anything.
1037 ;(define_expand "movstrsi"
1038 ; [(parallel [(set (mem:BLK (match_operand:BLK 0 "general_operand" ""))
1039 ; (mem:BLK (match_operand:BLK 1 "general_operand" "")))
1040 ; (use (match_operand:SI 2 "nonmemory_operand" ""))
1041 ; (use (match_operand:SI 3 "immediate_operand" ""))
1042 ; (clobber (match_dup 0))
1043 ; (clobber (match_dup 1))
1044 ; (clobber (match_scratch:SI 4 ""))
1045 ; (clobber (reg:SI 0))
1046 ; (clobber (reg:SI 1))])]
1047 ; ""
1048 ; "
1049 ;{
1050 ; /* If the size isn't known, don't emit inline code. output_block_move
1051 ; would output code that's much slower than the library function.
1052 ; Also don't output code for large blocks. */
1053 ; if (GET_CODE (operands[2]) != CONST_INT
1054 ; || GET_CODE (operands[3]) != CONST_INT
1055 ; || INTVAL (operands[2]) / INTVAL (operands[3]) > 16)
1056 ; FAIL;
1057 ;
1058 ; operands[0] = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
1059 ; operands[1] = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
1060 ; operands[2] = force_not_mem (operands[2]);
1061 ;}")
1062
1063 ;(define_insn ""
1064 ; [(set (mem:BLK (match_operand:SI 0 "register_operand" "+r"))
1065 ; (mem:BLK (match_operand:SI 1 "register_operand" "+r")))
1066 ; (use (match_operand:SI 2 "nonmemory_operand" "rn"))
1067 ; (use (match_operand:SI 3 "immediate_operand" "i"))
1068 ; (clobber (match_dup 0))
1069 ; (clobber (match_dup 1))
1070 ; (clobber (match_scratch:SI 4 "=&r"))
1071 ; (clobber (reg:SI 0))
1072 ; (clobber (reg:SI 1))]
1073 ; ""
1074 ; "* return output_block_move (operands);"
1075 ; [(set_attr "type" "multi")
1076 ; (set_attr "length" "6")])
1077 \f
1078 ;; Floating point move insns
1079
1080 ;; This pattern forces (set (reg:TF ...) (const_double ...))
1081 ;; to be reloaded by putting the constant into memory.
1082 ;; It must come before the more general movtf pattern.
1083 (define_insn ""
1084 [(set (match_operand:TF 0 "general_operand" "=?r,f,o")
1085 (match_operand:TF 1 "" "?E,m,G"))]
1086 "TARGET_FPU && GET_CODE (operands[1]) == CONST_DOUBLE"
1087 "*
1088 {
1089 switch (which_alternative)
1090 {
1091 case 0:
1092 return output_move_quad (operands);
1093 case 1:
1094 return output_fp_move_quad (operands);
1095 case 2:
1096 operands[1] = adj_offsettable_operand (operands[0], 4);
1097 operands[2] = adj_offsettable_operand (operands[0], 8);
1098 operands[3] = adj_offsettable_operand (operands[0], 12);
1099 return \"st %%g0,%0\;st %%g0,%1\;st %%g0,%2\;st %%g0,%3\";
1100 }
1101 }"
1102 [(set_attr "type" "load,fpload,store")
1103 (set_attr "length" "5,5,5")])
1104
1105 (define_expand "movtf"
1106 [(set (match_operand:TF 0 "general_operand" "")
1107 (match_operand:TF 1 "general_operand" ""))]
1108 ""
1109 "
1110 {
1111 if (emit_move_sequence (operands, TFmode, NULL_RTX))
1112 DONE;
1113 }")
1114
1115 (define_insn ""
1116 [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=f,r,Q,Q,f,&r")
1117 (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "f,r,f,r,Q,Q"))]
1118 "TARGET_FPU
1119 && (register_operand (operands[0], TFmode)
1120 || register_operand (operands[1], TFmode))"
1121 "*
1122 {
1123 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
1124 return output_fp_move_quad (operands);
1125 return output_move_quad (operands);
1126 }"
1127 [(set_attr "type" "fp,move,fpstore,store,fpload,load")
1128 (set_attr "length" "4,4,5,5,5,5")])
1129
1130 ;; Exactly the same as above, except that all `f' cases are deleted.
1131 ;; This is necessary to prevent reload from ever trying to use a `f' reg
1132 ;; when -mno-fpu.
1133
1134 (define_insn ""
1135 [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=r,Q,&r")
1136 (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "r,r,Q"))]
1137 "! TARGET_FPU
1138 && (register_operand (operands[0], TFmode)
1139 || register_operand (operands[1], TFmode))"
1140 "*
1141 {
1142 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
1143 return output_fp_move_quad (operands);
1144 return output_move_quad (operands);
1145 }"
1146 [(set_attr "type" "move,store,load")
1147 (set_attr "length" "4,5,5")])
1148
1149 (define_insn ""
1150 [(set (mem:TF (match_operand:SI 0 "symbolic_operand" "i,i"))
1151 (match_operand:TF 1 "reg_or_0_operand" "rf,G"))
1152 (clobber (match_scratch:SI 2 "=&r,&r"))]
1153 ""
1154 "*
1155 {
1156 output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
1157 if (which_alternative == 0)
1158 return \"std %1,[%2+%%lo(%a0)]\;std %S1,[%2+%%lo(%a0+8)]\";
1159 else
1160 return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\; st %%g0,[%2+%%lo(%a0+8)]\;st %%g0,[%2+%%lo(%a0+12)]\";
1161 }"
1162 [(set_attr "type" "store")
1163 (set_attr "length" "5")])
1164 \f
1165 ;; This pattern forces (set (reg:DF ...) (const_double ...))
1166 ;; to be reloaded by putting the constant into memory.
1167 ;; It must come before the more general movdf pattern.
1168
1169 (define_insn ""
1170 [(set (match_operand:DF 0 "general_operand" "=?r,f,o")
1171 (match_operand:DF 1 "" "?E,m,G"))]
1172 "TARGET_FPU && GET_CODE (operands[1]) == CONST_DOUBLE"
1173 "*
1174 {
1175 switch (which_alternative)
1176 {
1177 case 0:
1178 return output_move_double (operands);
1179 case 1:
1180 return output_fp_move_double (operands);
1181 case 2:
1182 operands[1] = adj_offsettable_operand (operands[0], 4);
1183 return \"st %%g0,%0\;st %%g0,%1\";
1184 }
1185 }"
1186 [(set_attr "type" "load,fpload,store")
1187 (set_attr "length" "3,3,3")])
1188
1189 (define_expand "movdf"
1190 [(set (match_operand:DF 0 "general_operand" "")
1191 (match_operand:DF 1 "general_operand" ""))]
1192 ""
1193 "
1194 {
1195 if (emit_move_sequence (operands, DFmode, NULL_RTX))
1196 DONE;
1197 }")
1198
1199 (define_insn ""
1200 [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,f,r,Q,Q,f,&r")
1201 (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,f,r,f,r,Q,Q"))]
1202 "TARGET_FPU
1203 && (register_operand (operands[0], DFmode)
1204 || register_operand (operands[1], DFmode))"
1205 "*
1206 {
1207 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
1208 return output_fp_move_double (operands);
1209 return output_move_double (operands);
1210 }"
1211 [(set_attr "type" "fpstore,fpload,fp,move,fpstore,store,fpload,load")
1212 (set_attr "length" "1,1,2,2,3,3,3,3")])
1213
1214 ;; Exactly the same as above, except that all `f' cases are deleted.
1215 ;; This is necessary to prevent reload from ever trying to use a `f' reg
1216 ;; when -mno-fpu.
1217
1218 (define_insn ""
1219 [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,r,Q,&r")
1220 (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,r,r,Q"))]
1221 "! TARGET_FPU
1222 && (register_operand (operands[0], DFmode)
1223 || register_operand (operands[1], DFmode))"
1224 "* return output_move_double (operands);"
1225 [(set_attr "type" "store,load,move,store,load")
1226 (set_attr "length" "1,1,2,3,3")])
1227
1228 (define_split
1229 [(set (match_operand:DF 0 "register_operand" "")
1230 (match_operand:DF 1 "register_operand" ""))]
1231 "reload_completed"
1232 [(set (match_dup 2) (match_dup 3))
1233 (set (match_dup 4) (match_dup 5))]
1234 "
1235 { operands[2] = operand_subword (operands[0], 0, 0, DFmode);
1236 operands[3] = operand_subword (operands[1], 0, 0, DFmode);
1237 operands[4] = operand_subword (operands[0], 1, 0, DFmode);
1238 operands[5] = operand_subword (operands[1], 1, 0, DFmode); }")
1239
1240 (define_insn ""
1241 [(set (mem:DF (match_operand:SI 0 "symbolic_operand" "i,i"))
1242 (match_operand:DF 1 "reg_or_0_operand" "rf,G"))
1243 (clobber (match_scratch:SI 2 "=&r,&r"))]
1244 ""
1245 "*
1246 {
1247 output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
1248 if (which_alternative == 0)
1249 return \"std %1,[%2+%%lo(%a0)]\";
1250 else
1251 return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\";
1252 }"
1253 [(set_attr "type" "store")
1254 (set_attr "length" "3")])
1255 \f
1256 ;; Double-word move insns.
1257
1258 (define_expand "movdi"
1259 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "")
1260 (match_operand:DI 1 "general_operand" ""))]
1261 ""
1262 "
1263 {
1264 if (emit_move_sequence (operands, DImode, NULL_RTX))
1265 DONE;
1266 }")
1267
1268 (define_insn ""
1269 [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,Q,r,&r,?f,?f,?Q")
1270 (match_operand:DI 1 "general_operand" "r,r,Q,i,f,Q,f"))]
1271 "register_operand (operands[0], DImode)
1272 || register_operand (operands[1], DImode)
1273 || operands[1] == const0_rtx"
1274 "*
1275 {
1276 if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
1277 return output_fp_move_double (operands);
1278 return output_move_double (operands);
1279 }"
1280 [(set_attr "type" "move,store,load,multi,fp,fpload,fpstore")
1281 (set_attr "length" "2,3,3,3,2,3,3")])
1282
1283 ;; Floating-point move insns.
1284
1285 ;; This pattern forces (set (reg:SF ...) (const_double ...))
1286 ;; to be reloaded by putting the constant into memory.
1287 ;; It must come before the more general movsf pattern.
1288 (define_insn ""
1289 [(set (match_operand:SF 0 "general_operand" "=?r,f,m")
1290 (match_operand:SF 1 "" "?E,m,G"))]
1291 "TARGET_FPU && GET_CODE (operands[1]) == CONST_DOUBLE"
1292 "*
1293 {
1294 switch (which_alternative)
1295 {
1296 case 0:
1297 return singlemove_string (operands);
1298 case 1:
1299 return \"ld %1,%0\";
1300 case 2:
1301 return \"st %%g0,%0\";
1302 }
1303 }"
1304 [(set_attr "type" "load,fpload,store")
1305 (set_attr "length" "2,1,1")])
1306
1307 (define_expand "movsf"
1308 [(set (match_operand:SF 0 "general_operand" "")
1309 (match_operand:SF 1 "general_operand" ""))]
1310 ""
1311 "
1312 {
1313 if (emit_move_sequence (operands, SFmode, NULL_RTX))
1314 DONE;
1315 }")
1316
1317 (define_insn ""
1318 [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=f,r,f,r,Q,Q")
1319 (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "f,r,Q,Q,f,r"))]
1320 "TARGET_FPU
1321 && (register_operand (operands[0], SFmode)
1322 || register_operand (operands[1], SFmode))"
1323 "@
1324 fmovs %1,%0
1325 mov %1,%0
1326 ld %1,%0
1327 ld %1,%0
1328 st %r1,%0
1329 st %r1,%0"
1330 [(set_attr "type" "fp,move,fpload,load,fpstore,store")])
1331
1332 ;; Exactly the same as above, except that all `f' cases are deleted.
1333 ;; This is necessary to prevent reload from ever trying to use a `f' reg
1334 ;; when -mno-fpu.
1335
1336 (define_insn ""
1337 [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=r,r,Q")
1338 (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "r,Q,r"))]
1339 "! TARGET_FPU
1340 && (register_operand (operands[0], SFmode)
1341 || register_operand (operands[1], SFmode))"
1342 "@
1343 mov %1,%0
1344 ld %1,%0
1345 st %r1,%0"
1346 [(set_attr "type" "move,load,store")])
1347
1348 (define_insn ""
1349 [(set (mem:SF (match_operand:SI 0 "symbolic_operand" "i"))
1350 (match_operand:SF 1 "reg_or_0_operand" "rfG"))
1351 (clobber (match_scratch:SI 2 "=&r"))]
1352 ""
1353 "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
1354 [(set_attr "type" "store")
1355 (set_attr "length" "2")])
1356 \f
1357 ;;- zero extension instructions
1358
1359 ;; These patterns originally accepted general_operands, however, slightly
1360 ;; better code is generated by only accepting register_operands, and then
1361 ;; letting combine generate the ldu[hb] insns.
1362
1363 (define_expand "zero_extendhisi2"
1364 [(set (match_operand:SI 0 "register_operand" "")
1365 (zero_extend:SI (match_operand:HI 1 "register_operand" "")))]
1366 ""
1367 "
1368 {
1369 rtx temp = gen_reg_rtx (SImode);
1370 rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16);
1371
1372 if (GET_CODE (operand1) == SUBREG)
1373 operand1 = XEXP (operand1, 0);
1374
1375 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1, 0),
1376 shift_16));
1377 emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
1378 DONE;
1379 }")
1380
1381 (define_insn ""
1382 [(set (match_operand:SI 0 "register_operand" "=r")
1383 (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
1384 ""
1385 "lduh %1,%0"
1386 [(set_attr "type" "load")])
1387
1388 (define_expand "zero_extendqihi2"
1389 [(set (match_operand:HI 0 "register_operand" "")
1390 (zero_extend:HI (match_operand:QI 1 "register_operand" "")))]
1391 ""
1392 "")
1393
1394 (define_insn ""
1395 [(set (match_operand:HI 0 "register_operand" "=r,r")
1396 (zero_extend:HI (match_operand:QI 1 "sparc_operand" "r,Q")))]
1397 "GET_CODE (operands[1]) != CONST_INT"
1398 "@
1399 and %1,0xff,%0
1400 ldub %1,%0"
1401 [(set_attr "type" "unary,load")
1402 (set_attr "length" "1")])
1403
1404 (define_expand "zero_extendqisi2"
1405 [(set (match_operand:SI 0 "register_operand" "")
1406 (zero_extend:SI (match_operand:QI 1 "register_operand" "")))]
1407 ""
1408 "")
1409
1410 (define_insn ""
1411 [(set (match_operand:SI 0 "register_operand" "=r,r")
1412 (zero_extend:SI (match_operand:QI 1 "sparc_operand" "r,Q")))]
1413 "GET_CODE (operands[1]) != CONST_INT"
1414 "@
1415 and %1,0xff,%0
1416 ldub %1,%0"
1417 [(set_attr "type" "unary,load")
1418 (set_attr "length" "1")])
1419
1420 (define_insn ""
1421 [(set (reg:CC 0)
1422 (compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
1423 (const_int 0)))]
1424 ""
1425 "andcc %0,0xff,%%g0"
1426 [(set_attr "type" "compare")])
1427
1428 (define_insn ""
1429 [(set (reg:CC 0)
1430 (compare:CC (zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
1431 (const_int 0)))
1432 (set (match_operand:SI 0 "register_operand" "=r")
1433 (zero_extend:SI (match_dup 1)))]
1434 ""
1435 "andcc %1,0xff,%0"
1436 [(set_attr "type" "unary")])
1437
1438 ;; Similarly, handle SI->QI mode truncation followed by a compare.
1439
1440 (define_insn ""
1441 [(set (reg:CC 0)
1442 (compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0)
1443 (const_int 0)))]
1444 ""
1445 "andcc %0,0xff,%%g0"
1446 [(set_attr "type" "compare")])
1447
1448 (define_insn ""
1449 [(set (reg:CC 0)
1450 (compare:CC (subreg:QI (match_operand:SI 1 "register_operand" "r") 0)
1451 (const_int 0)))
1452 (set (match_operand:QI 0 "register_operand" "=r")
1453 (match_dup 1))]
1454 ""
1455 "andcc %1,0xff,%0"
1456 [(set_attr "type" "unary")])
1457 \f
1458 ;;- sign extension instructions
1459
1460 ;; These patterns originally accepted general_operands, however, slightly
1461 ;; better code is generated by only accepting register_operands, and then
1462 ;; letting combine generate the lds[hb] insns.
1463
1464 (define_expand "extendhisi2"
1465 [(set (match_operand:SI 0 "register_operand" "")
1466 (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
1467 ""
1468 "
1469 {
1470 rtx temp = gen_reg_rtx (SImode);
1471 rtx shift_16 = gen_rtx (CONST_INT, VOIDmode, 16);
1472
1473 if (GET_CODE (operand1) == SUBREG)
1474 operand1 = XEXP (operand1, 0);
1475
1476 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1, 0),
1477 shift_16));
1478 emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
1479 DONE;
1480 }")
1481
1482 (define_insn ""
1483 [(set (match_operand:SI 0 "register_operand" "=r")
1484 (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
1485 ""
1486 "ldsh %1,%0"
1487 [(set_attr "type" "load")])
1488
1489 (define_expand "extendqihi2"
1490 [(set (match_operand:HI 0 "register_operand" "")
1491 (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
1492 ""
1493 "
1494 {
1495 rtx temp = gen_reg_rtx (SImode);
1496 rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
1497
1498 if (GET_CODE (operand1) == SUBREG)
1499 operand1 = XEXP (operand1, 0);
1500 if (GET_CODE (operand0) == SUBREG)
1501 operand0 = XEXP (operand0, 0);
1502 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1, 0),
1503 shift_24));
1504 if (GET_MODE (operand0) != SImode)
1505 operand0 = gen_rtx (SUBREG, SImode, operand0, 0);
1506 emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
1507 DONE;
1508 }")
1509
1510 (define_insn ""
1511 [(set (match_operand:HI 0 "register_operand" "=r")
1512 (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
1513 ""
1514 "ldsb %1,%0"
1515 [(set_attr "type" "load")])
1516
1517 (define_expand "extendqisi2"
1518 [(set (match_operand:SI 0 "register_operand" "")
1519 (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
1520 ""
1521 "
1522 {
1523 rtx temp = gen_reg_rtx (SImode);
1524 rtx shift_24 = gen_rtx (CONST_INT, VOIDmode, 24);
1525
1526 if (GET_CODE (operand1) == SUBREG)
1527 operand1 = XEXP (operand1, 0);
1528 emit_insn (gen_ashlsi3 (temp, gen_rtx (SUBREG, SImode, operand1, 0),
1529 shift_24));
1530 emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
1531 DONE;
1532 }")
1533
1534 (define_insn ""
1535 [(set (match_operand:SI 0 "register_operand" "=r")
1536 (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
1537 ""
1538 "ldsb %1,%0"
1539 [(set_attr "type" "load")])
1540 \f
1541 ;; Special pattern for optimizing bit-field compares. This is needed
1542 ;; because combine uses this as a canonical form.
1543
1544 (define_insn ""
1545 [(set (reg:CC 0)
1546 (compare:CC
1547 (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
1548 (match_operand:SI 1 "small_int" "n")
1549 (match_operand:SI 2 "small_int" "n"))
1550 (const_int 0)))]
1551 "INTVAL (operands[2]) > 19"
1552 "*
1553 {
1554 int len = INTVAL (operands[1]);
1555 int pos = 32 - INTVAL (operands[2]) - len;
1556 unsigned mask = ((1 << len) - 1) << pos;
1557
1558 operands[1] = gen_rtx (CONST_INT, VOIDmode, mask);
1559 return \"andcc %0,%1,%%g0\";
1560 }")
1561 \f
1562 ;; Conversions between float, double and long double.
1563
1564 (define_insn "extendsfdf2"
1565 [(set (match_operand:DF 0 "register_operand" "=f")
1566 (float_extend:DF
1567 (match_operand:SF 1 "register_operand" "f")))]
1568 "TARGET_FPU"
1569 "fstod %1,%0"
1570 [(set_attr "type" "fp")])
1571
1572 (define_insn "extendsftf2"
1573 [(set (match_operand:TF 0 "register_operand" "=f")
1574 (float_extend:TF
1575 (match_operand:SF 1 "register_operand" "f")))]
1576 "TARGET_FPU"
1577 "fstoq %1,%0"
1578 [(set_attr "type" "fp")])
1579
1580 (define_insn "extenddftf2"
1581 [(set (match_operand:TF 0 "register_operand" "=f")
1582 (float_extend:TF
1583 (match_operand:DF 1 "register_operand" "f")))]
1584 "TARGET_FPU"
1585 "fdtoq %1,%0"
1586 [(set_attr "type" "fp")])
1587
1588 (define_insn "truncdfsf2"
1589 [(set (match_operand:SF 0 "register_operand" "=f")
1590 (float_truncate:SF
1591 (match_operand:DF 1 "register_operand" "f")))]
1592 "TARGET_FPU"
1593 "fdtos %1,%0"
1594 [(set_attr "type" "fp")])
1595
1596 (define_insn "trunctfsf2"
1597 [(set (match_operand:SF 0 "register_operand" "=f")
1598 (float_truncate:SF
1599 (match_operand:TF 1 "register_operand" "f")))]
1600 "TARGET_FPU"
1601 "fqtos %1,%0"
1602 [(set_attr "type" "fp")])
1603
1604 (define_insn "trunctfdf2"
1605 [(set (match_operand:DF 0 "register_operand" "=f")
1606 (float_truncate:DF
1607 (match_operand:TF 1 "register_operand" "f")))]
1608 "TARGET_FPU"
1609 "fqtod %1,%0"
1610 [(set_attr "type" "fp")])
1611 \f
1612 ;; Conversion between fixed point and floating point.
1613
1614 (define_insn "floatsisf2"
1615 [(set (match_operand:SF 0 "register_operand" "=f")
1616 (float:SF (match_operand:SI 1 "register_operand" "f")))]
1617 "TARGET_FPU"
1618 "fitos %1,%0"
1619 [(set_attr "type" "fp")])
1620
1621 (define_insn "floatsidf2"
1622 [(set (match_operand:DF 0 "register_operand" "=f")
1623 (float:DF (match_operand:SI 1 "register_operand" "f")))]
1624 "TARGET_FPU"
1625 "fitod %1,%0"
1626 [(set_attr "type" "fp")])
1627
1628 (define_insn "floatsitf2"
1629 [(set (match_operand:TF 0 "register_operand" "=f")
1630 (float:TF (match_operand:SI 1 "register_operand" "f")))]
1631 "TARGET_FPU"
1632 "fitoq %1,%0"
1633 [(set_attr "type" "fp")])
1634
1635 ;; Convert a float to an actual integer.
1636 ;; Truncation is performed as part of the conversion.
1637
1638 (define_insn "fix_truncsfsi2"
1639 [(set (match_operand:SI 0 "register_operand" "=f")
1640 (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
1641 "TARGET_FPU"
1642 "fstoi %1,%0"
1643 [(set_attr "type" "fp")])
1644
1645 (define_insn "fix_truncdfsi2"
1646 [(set (match_operand:SI 0 "register_operand" "=f")
1647 (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f"))))]
1648 "TARGET_FPU"
1649 "fdtoi %1,%0"
1650 [(set_attr "type" "fp")])
1651
1652 (define_insn "fix_trunctfsi2"
1653 [(set (match_operand:SI 0 "register_operand" "=f")
1654 (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "f"))))]
1655 "TARGET_FPU"
1656 "fqtoi %1,%0"
1657 [(set_attr "type" "fp")])
1658 \f
1659 ;;- arithmetic instructions
1660
1661 (define_insn "adddi3"
1662 [(set (match_operand:DI 0 "register_operand" "=r")
1663 (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
1664 (match_operand:DI 2 "arith_double_operand" "rHI")))
1665 (clobber (reg:SI 0))]
1666 ""
1667 "*
1668 {
1669 rtx op2 = operands[2];
1670
1671 /* If constant is positive, upper bits zeroed, otherwise unchanged.
1672 Give the assembler a chance to pick the move instruction. */
1673 if (GET_CODE (op2) == CONST_INT)
1674 {
1675 int sign = INTVAL (op2);
1676 if (sign < 0)
1677 return \"addcc %R1,%2,%R0\;addx %1,-1,%0\";
1678 return \"addcc %R1,%2,%R0\;addx %1,0,%0\";
1679 }
1680 else if (GET_CODE (op2) == CONST_DOUBLE)
1681 {
1682 int sign = CONST_DOUBLE_HIGH (op2);
1683 operands[2] = gen_rtx (CONST_INT, VOIDmode,
1684 CONST_DOUBLE_LOW (operands[1]));
1685 if (sign < 0)
1686 return \"addcc %R1,%2,%R0\;addx %1,-1,%0\";
1687 return \"addcc %R1,%2,%R0\;addx %1,0,%0\";
1688 }
1689 return \"addcc %R1,%R2,%R0\;addx %1,%2,%0\";
1690 }"
1691 [(set_attr "length" "2")])
1692
1693 (define_insn "addsi3"
1694 [(set (match_operand:SI 0 "register_operand" "=r")
1695 (plus:SI (match_operand:SI 1 "arith_operand" "%r")
1696 (match_operand:SI 2 "arith_operand" "rI")))]
1697 ""
1698 "add %1,%2,%0")
1699
1700 (define_insn ""
1701 [(set (reg:CC_NOOV 0)
1702 (compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r")
1703 (match_operand:SI 1 "arith_operand" "rI"))
1704 (const_int 0)))]
1705 ""
1706 "addcc %0,%1,%%g0"
1707 [(set_attr "type" "compare")])
1708
1709 (define_insn ""
1710 [(set (reg:CC_NOOV 0)
1711 (compare:CC_NOOV (plus:SI (match_operand:SI 1 "arith_operand" "%r")
1712 (match_operand:SI 2 "arith_operand" "rI"))
1713 (const_int 0)))
1714 (set (match_operand:SI 0 "register_operand" "=r")
1715 (plus:SI (match_dup 1) (match_dup 2)))]
1716 ""
1717 "addcc %1,%2,%0")
1718
1719 (define_insn "subdi3"
1720 [(set (match_operand:DI 0 "register_operand" "=r")
1721 (minus:DI (match_operand:DI 1 "register_operand" "r")
1722 (match_operand:DI 2 "arith_double_operand" "rHI")))
1723 (clobber (reg:SI 0))]
1724 ""
1725 "*
1726 {
1727 rtx op2 = operands[2];
1728
1729 /* If constant is positive, upper bits zeroed, otherwise unchanged.
1730 Give the assembler a chance to pick the move instruction. */
1731 if (GET_CODE (op2) == CONST_INT)
1732 {
1733 int sign = INTVAL (op2);
1734 if (sign < 0)
1735 return \"subcc %R1,%2,%R0\;subx %1,-1,%0\";
1736 return \"subcc %R1,%2,%R0\;subx %1,0,%0\";
1737 }
1738 else if (GET_CODE (op2) == CONST_DOUBLE)
1739 {
1740 int sign = CONST_DOUBLE_HIGH (op2);
1741 operands[2] = gen_rtx (CONST_INT, VOIDmode,
1742 CONST_DOUBLE_LOW (operands[1]));
1743 if (sign < 0)
1744 return \"subcc %R1,%2,%R0\;subx %1,-1,%0\";
1745 return \"subcc %R1,%2,%R0\;subx %1,0,%0\";
1746 }
1747 return \"subcc %R1,%R2,%R0\;subx %1,%2,%0\";
1748 }"
1749 [(set_attr "length" "2")])
1750
1751 (define_insn "subsi3"
1752 [(set (match_operand:SI 0 "register_operand" "=r")
1753 (minus:SI (match_operand:SI 1 "register_operand" "r")
1754 (match_operand:SI 2 "arith_operand" "rI")))]
1755 ""
1756 "sub %1,%2,%0")
1757
1758 (define_insn ""
1759 [(set (reg:CC_NOOV 0)
1760 (compare:CC_NOOV (minus:SI (match_operand:SI 0 "register_operand" "r")
1761 (match_operand:SI 1 "arith_operand" "rI"))
1762 (const_int 0)))]
1763 ""
1764 "subcc %0,%1,%%g0"
1765 [(set_attr "type" "compare")])
1766
1767 (define_insn ""
1768 [(set (reg:CC_NOOV 0)
1769 (compare:CC_NOOV (minus:SI (match_operand:SI 1 "register_operand" "r")
1770 (match_operand:SI 2 "arith_operand" "rI"))
1771 (const_int 0)))
1772 (set (match_operand:SI 0 "register_operand" "=r")
1773 (minus:SI (match_dup 1) (match_dup 2)))]
1774 ""
1775 "subcc %1,%2,%0")
1776
1777 (define_insn "mulsi3"
1778 [(set (match_operand:SI 0 "register_operand" "=r")
1779 (mult:SI (match_operand:SI 1 "arith_operand" "%r")
1780 (match_operand:SI 2 "arith_operand" "rI")))]
1781 "TARGET_V8 || TARGET_SPARCLITE"
1782 "smul %1,%2,%0")
1783
1784 ;; It is not known whether this will match.
1785
1786 (define_insn ""
1787 [(set (match_operand:SI 0 "register_operand" "=r")
1788 (mult:SI (match_operand:SI 1 "arith_operand" "%r")
1789 (match_operand:SI 2 "arith_operand" "rI")))
1790 (set (reg:CC_NOOV 0)
1791 (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2))
1792 (const_int 0)))]
1793 "TARGET_V8 || TARGET_SPARCLITE"
1794 "smulcc %1,%2,%0")
1795
1796 (define_expand "mulsidi3"
1797 [(set (match_operand:DI 0 "register_operand" "")
1798 (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
1799 (sign_extend:DI (match_operand:SI 2 "arith_operand" ""))))]
1800 "TARGET_V8 || TARGET_SPARCLITE"
1801 "
1802 {
1803 if (CONSTANT_P (operands[2]))
1804 {
1805 emit_insn (gen_const_mulsidi3 (operands[0], operands[1], operands[2]));
1806 DONE;
1807 }
1808 }")
1809
1810 (define_insn ""
1811 [(set (match_operand:DI 0 "register_operand" "=r")
1812 (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
1813 (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
1814 "TARGET_V8 || TARGET_SPARCLITE"
1815 "smul %1,%2,%R0\;rd %%y,%0"
1816 [(set_attr "length" "2")])
1817
1818 ;; Extra pattern, because sign_extend of a constant isn't legal.
1819
1820 (define_insn "const_mulsidi3"
1821 [(set (match_operand:DI 0 "register_operand" "=r")
1822 (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
1823 (match_operand:SI 2 "small_int" "I")))]
1824 "TARGET_V8 || TARGET_SPARCLITE"
1825 "smul %1,%2,%R0\;rd %%y,%0"
1826 [(set_attr "length" "2")])
1827
1828 (define_expand "umulsidi3"
1829 [(set (match_operand:DI 0 "register_operand" "")
1830 (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
1831 (zero_extend:DI (match_operand:SI 2 "arith_operand" ""))))]
1832 "TARGET_V8 || TARGET_SPARCLITE"
1833 "
1834 {
1835 if (CONSTANT_P (operands[2]))
1836 {
1837 emit_insn (gen_const_umulsidi3 (operands[0], operands[1], operands[2]));
1838 DONE;
1839 }
1840 }")
1841
1842 (define_insn ""
1843 [(set (match_operand:DI 0 "register_operand" "=r")
1844 (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
1845 (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
1846 "TARGET_V8 || TARGET_SPARCLITE"
1847 "umul %1,%2,%R0\;rd %%y,%0"
1848 [(set_attr "length" "2")])
1849
1850 ;; Extra pattern, because sign_extend of a constant isn't legal.
1851
1852 (define_insn "const_umulsidi3"
1853 [(set (match_operand:DI 0 "register_operand" "=r")
1854 (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
1855 (match_operand:SI 2 "small_int" "I")))]
1856 "TARGET_V8 || TARGET_SPARCLITE"
1857 "umul %1,%2,%R0\;rd %%y,%0"
1858 [(set_attr "length" "2")])
1859
1860 ;; The architecture specifies that there must be 3 instructions between
1861 ;; a y register write and a use of it for correct results.
1862
1863 (define_insn "divsi3"
1864 [(set (match_operand:SI 0 "register_operand" "=r")
1865 (div:SI (match_operand:SI 1 "register_operand" "r")
1866 (match_operand:SI 2 "arith_operand" "rI")))
1867 (clobber (match_scratch:SI 3 "=&r"))]
1868 "TARGET_V8"
1869 "sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdiv %1,%2,%0"
1870 [(set_attr "length" "6")])
1871
1872 ;; It is not known whether this will match.
1873
1874 (define_insn ""
1875 [(set (match_operand:SI 0 "register_operand" "=r")
1876 (div:SI (match_operand:SI 1 "register_operand" "r")
1877 (match_operand:SI 2 "arith_operand" "rI")))
1878 (set (reg:CC 0)
1879 (compare:CC (div:SI (match_dup 1) (match_dup 2))
1880 (const_int 0)))
1881 (clobber (match_scratch:SI 3 "=&r"))]
1882 "TARGET_V8"
1883 "sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdivcc %1,%2,%0"
1884 [(set_attr "length" "6")])
1885
1886 (define_insn "udivsi3"
1887 [(set (match_operand:SI 0 "register_operand" "=r")
1888 (udiv:SI (match_operand:SI 1 "register_operand" "r")
1889 (match_operand:SI 2 "arith_operand" "rI")))]
1890 "TARGET_V8"
1891 "wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udiv %1,%2,%0"
1892 [(set_attr "length" "5")])
1893
1894 ;; It is not known whether this will match.
1895
1896 (define_insn ""
1897 [(set (match_operand:SI 0 "register_operand" "=r")
1898 (udiv:SI (match_operand:SI 1 "register_operand" "r")
1899 (match_operand:SI 2 "arith_operand" "rI")))
1900 (set (reg:CC 0)
1901 (compare:CC (udiv:SI (match_dup 1) (match_dup 2))
1902 (const_int 0)))]
1903 "TARGET_V8"
1904 "wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udivcc %1,%2,%0"
1905 [(set_attr "length" "5")])
1906
1907 ;;- and instructions
1908 ;; We define DImode `and` so with DImode `not` we can get
1909 ;; DImode `andn`. Other combinations are possible.
1910
1911 (define_expand "anddi3"
1912 [(set (match_operand:DI 0 "register_operand" "")
1913 (and:DI (match_operand:DI 1 "arith_double_operand" "")
1914 (match_operand:DI 2 "arith_double_operand" "")))]
1915 ""
1916 "")
1917
1918 (define_insn ""
1919 [(set (match_operand:DI 0 "register_operand" "=r")
1920 (and:DI (match_operand:DI 1 "arith_double_operand" "%r")
1921 (match_operand:DI 2 "arith_double_operand" "rHI")))]
1922 ""
1923 "*
1924 {
1925 rtx op2 = operands[2];
1926
1927 /* If constant is positive, upper bits zeroed, otherwise unchanged.
1928 Give the assembler a chance to pick the move instruction. */
1929 if (GET_CODE (op2) == CONST_INT)
1930 {
1931 int sign = INTVAL (op2);
1932 if (sign < 0)
1933 return \"mov %1,%0\;and %R1,%2,%R0\";
1934 return \"mov 0,%0\;and %R1,%2,%R0\";
1935 }
1936 else if (GET_CODE (op2) == CONST_DOUBLE)
1937 {
1938 int sign = CONST_DOUBLE_HIGH (op2);
1939 operands[2] = gen_rtx (CONST_INT, VOIDmode,
1940 CONST_DOUBLE_LOW (operands[1]));
1941 if (sign < 0)
1942 return \"mov %1,%0\;and %R1,%2,%R0\";
1943 return \"mov 0,%0\;and %R1,%2,%R0\";
1944 }
1945 return \"and %1,%2,%0\;and %R1,%R2,%R0\";
1946 }"
1947 [(set_attr "length" "2")])
1948
1949 (define_insn "andsi3"
1950 [(set (match_operand:SI 0 "register_operand" "=r")
1951 (and:SI (match_operand:SI 1 "arith_operand" "%r")
1952 (match_operand:SI 2 "arith_operand" "rI")))]
1953 ""
1954 "and %1,%2,%0")
1955
1956 (define_split
1957 [(set (match_operand:SI 0 "register_operand" "")
1958 (and:SI (match_operand:SI 1 "register_operand" "")
1959 (match_operand:SI 2 "" "")))
1960 (clobber (match_operand:SI 3 "register_operand" ""))]
1961 "GET_CODE (operands[2]) == CONST_INT
1962 && !SMALL_INT (operands[2])
1963 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
1964 [(set (match_dup 3) (match_dup 4))
1965 (set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))]
1966 "
1967 {
1968 operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
1969 }")
1970
1971 (define_insn ""
1972 [(set (match_operand:DI 0 "register_operand" "=r")
1973 (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
1974 (match_operand:DI 2 "register_operand" "r")))]
1975 ""
1976 "andn %2,%1,%0\;andn %R2,%R1,%R0"
1977 [(set_attr "length" "2")])
1978
1979 (define_insn ""
1980 [(set (match_operand:SI 0 "register_operand" "=r")
1981 (and:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
1982 (match_operand:SI 2 "register_operand" "r")))]
1983 ""
1984 "andn %2,%1,%0")
1985
1986 (define_expand "iordi3"
1987 [(set (match_operand:DI 0 "register_operand" "")
1988 (ior:DI (match_operand:DI 1 "arith_double_operand" "")
1989 (match_operand:DI 2 "arith_double_operand" "")))]
1990 ""
1991 "")
1992
1993 (define_insn ""
1994 [(set (match_operand:DI 0 "register_operand" "=r")
1995 (ior:DI (match_operand:DI 1 "arith_double_operand" "%r")
1996 (match_operand:DI 2 "arith_double_operand" "rHI")))]
1997 ""
1998 "*
1999 {
2000 rtx op2 = operands[2];
2001
2002 /* If constant is positive, upper bits zeroed, otherwise unchanged.
2003 Give the assembler a chance to pick the move instruction. */
2004 if (GET_CODE (op2) == CONST_INT)
2005 {
2006 int sign = INTVAL (op2);
2007 if (sign < 0)
2008 return \"mov -1,%0\;or %R1,%2,%R0\";
2009 return \"mov %1,%0\;or %R1,%2,%R0\";
2010 }
2011 else if (GET_CODE (op2) == CONST_DOUBLE)
2012 {
2013 int sign = CONST_DOUBLE_HIGH (op2);
2014 operands[2] = gen_rtx (CONST_INT, VOIDmode,
2015 CONST_DOUBLE_LOW (operands[1]));
2016 if (sign < 0)
2017 return \"mov -1,%0\;or %R1,%2,%R0\";
2018 return \"mov %1,%0\;or %R1,%2,%R0\";
2019 }
2020 return \"or %1,%2,%0\;or %R1,%R2,%R0\";
2021 }"
2022 [(set_attr "length" "2")])
2023
2024 (define_insn "iorsi3"
2025 [(set (match_operand:SI 0 "register_operand" "=r")
2026 (ior:SI (match_operand:SI 1 "arith_operand" "%r")
2027 (match_operand:SI 2 "arith_operand" "rI")))]
2028 ""
2029 "or %1,%2,%0")
2030
2031 (define_split
2032 [(set (match_operand:SI 0 "register_operand" "")
2033 (ior:SI (match_operand:SI 1 "register_operand" "")
2034 (match_operand:SI 2 "" "")))
2035 (clobber (match_operand:SI 3 "register_operand" ""))]
2036 "GET_CODE (operands[2]) == CONST_INT
2037 && !SMALL_INT (operands[2])
2038 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
2039 [(set (match_dup 3) (match_dup 4))
2040 (set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))]
2041 "
2042 {
2043 operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
2044 }")
2045
2046 (define_insn ""
2047 [(set (match_operand:DI 0 "register_operand" "=r")
2048 (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
2049 (match_operand:DI 2 "register_operand" "r")))]
2050 ""
2051 "orn %2,%1,%0\;orn %R2,%R1,%R0"
2052 [(set_attr "length" "2")])
2053
2054 (define_insn ""
2055 [(set (match_operand:SI 0 "register_operand" "=r")
2056 (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r"))
2057 (match_operand:SI 2 "register_operand" "r")))]
2058 ""
2059 "orn %2,%1,%0")
2060
2061 (define_expand "xordi3"
2062 [(set (match_operand:DI 0 "register_operand" "")
2063 (xor:DI (match_operand:DI 1 "arith_double_operand" "")
2064 (match_operand:DI 2 "arith_double_operand" "")))]
2065 ""
2066 "")
2067
2068 (define_insn ""
2069 [(set (match_operand:DI 0 "register_operand" "=r")
2070 (xor:DI (match_operand:DI 1 "arith_double_operand" "%r")
2071 (match_operand:DI 2 "arith_double_operand" "rHI")))]
2072 ""
2073 "*
2074 {
2075 rtx op2 = operands[2];
2076
2077 /* If constant is positive, upper bits zeroed, otherwise unchanged.
2078 Give the assembler a chance to pick the move instruction. */
2079 if (GET_CODE (op2) == CONST_INT)
2080 {
2081 int sign = INTVAL (op2);
2082 if (sign < 0)
2083 return \"xor %1,-1,%0\;xor %R1,%2,%R0\";
2084 return \"mov %1,%0\;xor %R1,%2,%R0\";
2085 }
2086 else if (GET_CODE (op2) == CONST_DOUBLE)
2087 {
2088 int sign = CONST_DOUBLE_HIGH (op2);
2089 operands[2] = gen_rtx (CONST_INT, VOIDmode,
2090 CONST_DOUBLE_LOW (operands[1]));
2091 if (sign < 0)
2092 return \"xor %1,-1,%0\;xor %R1,%2,%R0\";
2093 return \"mov %1,%0\;xor %R1,%2,%R0\";
2094 }
2095 return \"xor %1,%2,%0\;xor %R1,%R2,%R0\";
2096 }"
2097 [(set_attr "length" "2")])
2098
2099 (define_insn "xorsi3"
2100 [(set (match_operand:SI 0 "register_operand" "=r")
2101 (xor:SI (match_operand:SI 1 "arith_operand" "%rJ")
2102 (match_operand:SI 2 "arith_operand" "rI")))]
2103 ""
2104 "xor %r1,%2,%0")
2105
2106 (define_split
2107 [(set (match_operand:SI 0 "register_operand" "")
2108 (xor:SI (match_operand:SI 1 "register_operand" "")
2109 (match_operand:SI 2 "" "")))
2110 (clobber (match_operand:SI 3 "register_operand" ""))]
2111 "GET_CODE (operands[2]) == CONST_INT
2112 && !SMALL_INT (operands[2])
2113 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
2114 [(set (match_dup 3) (match_dup 4))
2115 (set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))]
2116 "
2117 {
2118 operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
2119 }")
2120
2121 (define_split
2122 [(set (match_operand:SI 0 "register_operand" "")
2123 (not:SI (xor:SI (match_operand:SI 1 "register_operand" "")
2124 (match_operand:SI 2 "" ""))))
2125 (clobber (match_operand:SI 3 "register_operand" ""))]
2126 "GET_CODE (operands[2]) == CONST_INT
2127 && !SMALL_INT (operands[2])
2128 && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
2129 [(set (match_dup 3) (match_dup 4))
2130 (set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))]
2131 "
2132 {
2133 operands[4] = gen_rtx (CONST_INT, VOIDmode, ~INTVAL (operands[2]));
2134 }")
2135
2136 ;; xnor patterns. Note that (a ^ ~b) == (~a ^ b) == ~(a ^ b).
2137 ;; Combine now canonicalizes to the rightmost expression.
2138 (define_insn ""
2139 [(set (match_operand:DI 0 "register_operand" "=r")
2140 (not:DI (xor:DI (match_operand:DI 1 "register_operand" "r")
2141 (match_operand:DI 2 "register_operand" "r"))))]
2142 ""
2143 "xnor %1,%2,%0\;xnor %R1,%R2,%R0"
2144 [(set_attr "length" "2")])
2145
2146 (define_insn ""
2147 [(set (match_operand:SI 0 "register_operand" "=r")
2148 (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
2149 (match_operand:SI 2 "arith_operand" "rI"))))]
2150 ""
2151 "xnor %r1,%2,%0")
2152
2153 ;; These correspond to the above in the case where we also (or only)
2154 ;; want to set the condition code.
2155
2156 (define_insn ""
2157 [(set (reg:CC 0)
2158 (compare:CC
2159 (match_operator:SI 2 "cc_arithop"
2160 [(match_operand:SI 0 "arith_operand" "%r")
2161 (match_operand:SI 1 "arith_operand" "rI")])
2162 (const_int 0)))]
2163 ""
2164 "%A2cc %0,%1,%%g0"
2165 [(set_attr "type" "compare")])
2166
2167 (define_insn ""
2168 [(set (reg:CC 0)
2169 (compare:CC
2170 (match_operator:SI 3 "cc_arithop"
2171 [(match_operand:SI 1 "arith_operand" "%r")
2172 (match_operand:SI 2 "arith_operand" "rI")])
2173 (const_int 0)))
2174 (set (match_operand:SI 0 "register_operand" "=r")
2175 (match_dup 3))]
2176 ""
2177 "%A3cc %1,%2,%0")
2178
2179 (define_insn ""
2180 [(set (reg:CC 0)
2181 (compare:CC
2182 (not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ")
2183 (match_operand:SI 1 "arith_operand" "rI")))
2184 (const_int 0)))]
2185 ""
2186 "xnorcc %r0,%1,%%g0"
2187 [(set_attr "type" "compare")])
2188
2189 (define_insn ""
2190 [(set (reg:CC 0)
2191 (compare:CC
2192 (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
2193 (match_operand:SI 2 "arith_operand" "rI")))
2194 (const_int 0)))
2195 (set (match_operand:SI 0 "register_operand" "=r")
2196 (not:SI (xor:SI (match_dup 1) (match_dup 2))))]
2197 ""
2198 "xnorcc %r1,%2,%0")
2199
2200 (define_insn ""
2201 [(set (reg:CC 0)
2202 (compare:CC
2203 (match_operator:SI 2 "cc_arithopn"
2204 [(not:SI (match_operand:SI 0 "arith_operand" "rI"))
2205 (match_operand:SI 1 "reg_or_0_operand" "rJ")])
2206 (const_int 0)))]
2207 ""
2208 "%B2cc %r1,%0,%%g0"
2209 [(set_attr "type" "compare")])
2210
2211 (define_insn ""
2212 [(set (reg:CC 0)
2213 (compare:CC
2214 (match_operator:SI 3 "cc_arithopn"
2215 [(not:SI (match_operand:SI 1 "arith_operand" "rI"))
2216 (match_operand:SI 2 "reg_or_0_operand" "rJ")])
2217 (const_int 0)))
2218 (set (match_operand:SI 0 "register_operand" "=r")
2219 (match_dup 3))]
2220 ""
2221 "%B3cc %r2,%1,%0")
2222
2223 ;; We cannot use the "neg" pseudo insn because the Sun assembler
2224 ;; does not know how to make it work for constants.
2225
2226 (define_insn "negdi2"
2227 [(set (match_operand:DI 0 "register_operand" "=r")
2228 (neg:DI (match_operand:DI 1 "register_operand" "r")))
2229 (clobber (reg:SI 0))]
2230 ""
2231 "subcc %%g0,%R1,%R0\;subx %%g0,%1,%0"
2232 [(set_attr "type" "unary")
2233 (set_attr "length" "2")])
2234
2235 (define_insn "negsi2"
2236 [(set (match_operand:SI 0 "general_operand" "=r")
2237 (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
2238 ""
2239 "sub %%g0,%1,%0"
2240 [(set_attr "type" "unary")])
2241
2242 (define_insn ""
2243 [(set (reg:CC_NOOV 0)
2244 (compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI"))
2245 (const_int 0)))]
2246 ""
2247 "subcc %%g0,%0,%%g0"
2248 [(set_attr "type" "compare")])
2249
2250 (define_insn ""
2251 [(set (reg:CC_NOOV 0)
2252 (compare:CC_NOOV (neg:SI (match_operand:SI 1 "arith_operand" "rI"))
2253 (const_int 0)))
2254 (set (match_operand:SI 0 "register_operand" "=r")
2255 (neg:SI (match_dup 1)))]
2256 ""
2257 "subcc %%g0,%1,%0"
2258 [(set_attr "type" "unary")])
2259
2260 ;; We cannot use the "not" pseudo insn because the Sun assembler
2261 ;; does not know how to make it work for constants.
2262 (define_expand "one_cmpldi2"
2263 [(set (match_operand:DI 0 "register_operand" "=r")
2264 (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")))]
2265 ""
2266 "")
2267
2268 (define_insn ""
2269 [(set (match_operand:DI 0 "register_operand" "=r")
2270 (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")))]
2271 ""
2272 "*
2273 {
2274 rtx op1 = operands[1];
2275
2276 if (GET_CODE (op1) == CONST_INT)
2277 {
2278 int sign = INTVAL (op1);
2279 if (sign < 0)
2280 return \"xnor %%g0,%1,%R0\;xnor %%g0,-1,%0\";
2281 return \"xnor %%g0,%1,%R0\;xnor %%g0,0,%0\";
2282 }
2283 else if (GET_CODE (op1) == CONST_DOUBLE)
2284 {
2285 int sign = CONST_DOUBLE_HIGH (op1);
2286 operands[1] = gen_rtx (CONST_INT, VOIDmode,
2287 CONST_DOUBLE_LOW (operands[1]));
2288 if (sign < 0)
2289 return \"xnor %%g0,%1,%R0\;xnor %%g0,-1,%0\";
2290 return \"xnor %%g0,%1,%R0\;xnor %%g0,0,%0\";
2291 }
2292 return \"xnor %%g0,%1,%0\;xnor %%g0,%R1,%R0\";
2293 }"
2294 [(set_attr "type" "unary")
2295 (set_attr "length" "2")])
2296
2297 (define_insn "one_cmplsi2"
2298 [(set (match_operand:SI 0 "register_operand" "=r")
2299 (not:SI (match_operand:SI 1 "arith_operand" "rI")))]
2300 ""
2301 "xnor %%g0,%1,%0"
2302 [(set_attr "type" "unary")])
2303
2304 (define_insn ""
2305 [(set (reg:CC 0)
2306 (compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
2307 (const_int 0)))]
2308 ""
2309 "xnorcc %%g0,%0,%%g0"
2310 [(set_attr "type" "compare")])
2311
2312 (define_insn ""
2313 [(set (reg:CC 0)
2314 (compare:CC (not:SI (match_operand:SI 1 "arith_operand" "rI"))
2315 (const_int 0)))
2316 (set (match_operand:SI 0 "register_operand" "=r")
2317 (not:SI (match_dup 1)))]
2318 ""
2319 "xnorcc %%g0,%1,%0"
2320 [(set_attr "type" "unary")])
2321 \f
2322 ;; Floating point arithmetic instructions.
2323
2324 (define_insn "addtf3"
2325 [(set (match_operand:TF 0 "register_operand" "=f")
2326 (plus:TF (match_operand:TF 1 "register_operand" "f")
2327 (match_operand:TF 2 "register_operand" "f")))]
2328 "TARGET_FPU"
2329 "faddq %1,%2,%0"
2330 [(set_attr "type" "fp")])
2331
2332 (define_insn "adddf3"
2333 [(set (match_operand:DF 0 "register_operand" "=f")
2334 (plus:DF (match_operand:DF 1 "register_operand" "f")
2335 (match_operand:DF 2 "register_operand" "f")))]
2336 "TARGET_FPU"
2337 "faddd %1,%2,%0"
2338 [(set_attr "type" "fp")])
2339
2340 (define_insn "addsf3"
2341 [(set (match_operand:SF 0 "register_operand" "=f")
2342 (plus:SF (match_operand:SF 1 "register_operand" "f")
2343 (match_operand:SF 2 "register_operand" "f")))]
2344 "TARGET_FPU"
2345 "fadds %1,%2,%0"
2346 [(set_attr "type" "fp")])
2347
2348 (define_insn "subtf3"
2349 [(set (match_operand:TF 0 "register_operand" "=f")
2350 (minus:TF (match_operand:TF 1 "register_operand" "f")
2351 (match_operand:TF 2 "register_operand" "f")))]
2352 "TARGET_FPU"
2353 "fsubq %1,%2,%0"
2354 [(set_attr "type" "fp")])
2355
2356 (define_insn "subdf3"
2357 [(set (match_operand:DF 0 "register_operand" "=f")
2358 (minus:DF (match_operand:DF 1 "register_operand" "f")
2359 (match_operand:DF 2 "register_operand" "f")))]
2360 "TARGET_FPU"
2361 "fsubd %1,%2,%0"
2362 [(set_attr "type" "fp")])
2363
2364 (define_insn "subsf3"
2365 [(set (match_operand:SF 0 "register_operand" "=f")
2366 (minus:SF (match_operand:SF 1 "register_operand" "f")
2367 (match_operand:SF 2 "register_operand" "f")))]
2368 "TARGET_FPU"
2369 "fsubs %1,%2,%0"
2370 [(set_attr "type" "fp")])
2371
2372 (define_insn "multf3"
2373 [(set (match_operand:TF 0 "register_operand" "=f")
2374 (mult:TF (match_operand:TF 1 "register_operand" "f")
2375 (match_operand:TF 2 "register_operand" "f")))]
2376 "TARGET_FPU"
2377 "fmulq %1,%2,%0"
2378 [(set_attr "type" "fpmul")])
2379
2380 (define_insn "muldf3"
2381 [(set (match_operand:DF 0 "register_operand" "=f")
2382 (mult:DF (match_operand:DF 1 "register_operand" "f")
2383 (match_operand:DF 2 "register_operand" "f")))]
2384 "TARGET_FPU"
2385 "fmuld %1,%2,%0"
2386 [(set_attr "type" "fpmul")])
2387
2388 (define_insn "mulsf3"
2389 [(set (match_operand:SF 0 "register_operand" "=f")
2390 (mult:SF (match_operand:SF 1 "register_operand" "f")
2391 (match_operand:SF 2 "register_operand" "f")))]
2392 "TARGET_FPU"
2393 "fmuls %1,%2,%0"
2394 [(set_attr "type" "fpmul")])
2395
2396 (define_insn ""
2397 [(set (match_operand:DF 0 "register_operand" "=f")
2398 (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "f"))
2399 (float_extend:DF (match_operand:SF 2 "register_operand" "f"))))]
2400 "TARGET_V8 && TARGET_FPU"
2401 "fsmuld %1,%2,%0"
2402 [(set_attr "type" "fpmul")])
2403
2404 (define_insn ""
2405 [(set (match_operand:TF 0 "register_operand" "=f")
2406 (mult:TF (float_extend:TF (match_operand:DF 1 "register_operand" "f"))
2407 (float_extend:TF (match_operand:DF 2 "register_operand" "f"))))]
2408 "TARGET_V8 && TARGET_FPU"
2409 "fdmulq %1,%2,%0"
2410 [(set_attr "type" "fpmul")])
2411
2412 (define_insn "divtf3"
2413 [(set (match_operand:TF 0 "register_operand" "=f")
2414 (div:TF (match_operand:TF 1 "register_operand" "f")
2415 (match_operand:TF 2 "register_operand" "f")))]
2416 "TARGET_FPU"
2417 "fdivq %1,%2,%0"
2418 [(set_attr "type" "fpdiv")])
2419
2420 (define_insn "divdf3"
2421 [(set (match_operand:DF 0 "register_operand" "=f")
2422 (div:DF (match_operand:DF 1 "register_operand" "f")
2423 (match_operand:DF 2 "register_operand" "f")))]
2424 "TARGET_FPU"
2425 "fdivd %1,%2,%0"
2426 [(set_attr "type" "fpdiv")])
2427
2428 (define_insn "divsf3"
2429 [(set (match_operand:SF 0 "register_operand" "=f")
2430 (div:SF (match_operand:SF 1 "register_operand" "f")
2431 (match_operand:SF 2 "register_operand" "f")))]
2432 "TARGET_FPU"
2433 "fdivs %1,%2,%0"
2434 [(set_attr "type" "fpdiv")])
2435
2436 (define_insn "negtf2"
2437 [(set (match_operand:TF 0 "register_operand" "=f,f")
2438 (neg:TF (match_operand:TF 1 "register_operand" "0,f")))]
2439 "TARGET_FPU"
2440 "@
2441 fnegs %0,%0
2442 fnegs %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0"
2443 [(set_attr "type" "fp")
2444 (set_attr "length" "1,4")])
2445
2446 (define_insn "negdf2"
2447 [(set (match_operand:DF 0 "register_operand" "=f,f")
2448 (neg:DF (match_operand:DF 1 "register_operand" "0,f")))]
2449 "TARGET_FPU"
2450 "@
2451 fnegs %0,%0
2452 fnegs %1,%0\;fmovs %R1,%R0"
2453 [(set_attr "type" "fp")
2454 (set_attr "length" "1,2")])
2455
2456 (define_insn "negsf2"
2457 [(set (match_operand:SF 0 "register_operand" "=f")
2458 (neg:SF (match_operand:SF 1 "register_operand" "f")))]
2459 "TARGET_FPU"
2460 "fnegs %1,%0"
2461 [(set_attr "type" "fp")])
2462
2463 (define_insn "abstf2"
2464 [(set (match_operand:TF 0 "register_operand" "=f,f")
2465 (abs:TF (match_operand:TF 1 "register_operand" "0,f")))]
2466 "TARGET_FPU"
2467 "@
2468 fabss %0,%0
2469 fabss %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0"
2470 [(set_attr "type" "fp")
2471 (set_attr "length" "1,4")])
2472
2473 (define_insn "absdf2"
2474 [(set (match_operand:DF 0 "register_operand" "=f,f")
2475 (abs:DF (match_operand:DF 1 "register_operand" "0,f")))]
2476 "TARGET_FPU"
2477 "@
2478 fabss %0,%0
2479 fabss %1,%0\;fmovs %R1,%R0"
2480 [(set_attr "type" "fp")
2481 (set_attr "length" "1,2")])
2482
2483 (define_insn "abssf2"
2484 [(set (match_operand:SF 0 "register_operand" "=f")
2485 (abs:SF (match_operand:SF 1 "register_operand" "f")))]
2486 "TARGET_FPU"
2487 "fabss %1,%0"
2488 [(set_attr "type" "fp")])
2489
2490 (define_insn "sqrttf2"
2491 [(set (match_operand:TF 0 "register_operand" "=f")
2492 (sqrt:TF (match_operand:TF 1 "register_operand" "f")))]
2493 "TARGET_FPU"
2494 "fsqrtq %1,%0"
2495 [(set_attr "type" "fpsqrt")])
2496
2497 (define_insn "sqrtdf2"
2498 [(set (match_operand:DF 0 "register_operand" "=f")
2499 (sqrt:DF (match_operand:DF 1 "register_operand" "f")))]
2500 "TARGET_FPU"
2501 "fsqrtd %1,%0"
2502 [(set_attr "type" "fpsqrt")])
2503
2504 (define_insn "sqrtsf2"
2505 [(set (match_operand:SF 0 "register_operand" "=f")
2506 (sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
2507 "TARGET_FPU"
2508 "fsqrts %1,%0"
2509 [(set_attr "type" "fpsqrt")])
2510 \f
2511 ;;- arithmetic shift instructions
2512
2513 ;; We can trivially handle shifting the constant 1 by 64 bits.
2514 ;; For other shifts we use the library routine.
2515 ;; ??? Questionable, we can do better than this can't we?
2516 (define_expand "ashldi3"
2517 [(parallel [(set (match_operand:DI 0 "register_operand" "")
2518 (ashift:DI (match_operand:DI 1 "const_double_operand" "")
2519 (match_operand:SI 2 "register_operand" "")))
2520 (clobber (reg:SI 0))])]
2521 ""
2522 "
2523 {
2524 if (GET_CODE (operands[1]) == CONST_DOUBLE
2525 && CONST_DOUBLE_HIGH (operands[1]) == 0
2526 && CONST_DOUBLE_LOW (operands[1]) == 1)
2527 operands[1] = const1_rtx;
2528 else if (operands[1] != const1_rtx)
2529 FAIL;
2530 }")
2531
2532 ;; ??? Questionable, we can do better than this can't we?
2533 (define_insn ""
2534 [(set (match_operand:DI 0 "register_operand" "=&r")
2535 (ashift:DI (const_int 1)
2536 (match_operand:SI 1 "register_operand" "r")))
2537 (clobber (reg:SI 0))]
2538 ""
2539 "subcc %1,32,%%g0\;addx %%g0,0,%R0\;xor %R0,1,%0\;sll %R0,%1,%R0\;sll %0,%1,%0"
2540 [(set_attr "type" "multi")
2541 (set_attr "length" "5")])
2542
2543 (define_insn "ashlsi3"
2544 [(set (match_operand:SI 0 "register_operand" "=r")
2545 (ashift:SI (match_operand:SI 1 "register_operand" "r")
2546 (match_operand:SI 2 "arith_operand" "rI")))]
2547 ""
2548 "sll %1,%2,%0")
2549
2550 (define_expand "lshldi3"
2551 [(parallel [(set (match_operand:DI 0 "register_operand" "")
2552 (lshift:DI (match_operand:DI 1 "register_operand" "")
2553 (match_operand:DI 2 "const_int_operand" "")))
2554 (clobber (match_scratch:SI 3 ""))])]
2555 ""
2556 "
2557 {
2558 if (GET_CODE (operands[2]) != CONST_INT)
2559 FAIL;
2560 }")
2561
2562 (define_insn ""
2563 [(set (match_operand:DI 0 "register_operand" "=r")
2564 (lshift:DI (match_operand:DI 1 "register_operand" "r")
2565 (match_operand:DI 2 "const_int_operand" "I")))
2566 (clobber (match_scratch:SI 3 "=r"))]
2567 "INTVAL (operands[2]) < 32"
2568 "*
2569 {
2570 operands[4] = GEN_INT (32 - INTVAL (operands[2]));
2571 return \"srl %R1,%4,%3\;sll %R1,%2,%R0\;sll %1,%2,%0\;or %3,%0,%0\";
2572 }"
2573 [(set_attr "type" "multi")
2574 (set_attr "length" "4")])
2575
2576 (define_insn ""
2577 [(set (match_operand:DI 0 "register_operand" "=r")
2578 (lshift:DI (match_operand:DI 1 "register_operand" "r")
2579 (match_operand:DI 2 "const_int_operand" "I")))
2580 (clobber (match_scratch:SI 3 "=X"))]
2581 "INTVAL (operands[2]) >= 32"
2582 "*
2583 {
2584 operands[4] = GEN_INT (INTVAL (operands[2]) - 32);
2585 return \"sll %R1,%4,%0\;mov %%g0,%R0\";
2586 }"
2587 [(set_attr "type" "multi")
2588 (set_attr "length" "2")])
2589
2590 (define_insn "ashrsi3"
2591 [(set (match_operand:SI 0 "register_operand" "=r")
2592 (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
2593 (match_operand:SI 2 "arith_operand" "rI")))]
2594 ""
2595 "sra %1,%2,%0")
2596
2597 (define_insn "lshrsi3"
2598 [(set (match_operand:SI 0 "register_operand" "=r")
2599 (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
2600 (match_operand:SI 2 "arith_operand" "rI")))]
2601 ""
2602 "srl %1,%2,%0")
2603
2604 (define_expand "lshrdi3"
2605 [(parallel [(set (match_operand:DI 0 "register_operand" "")
2606 (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
2607 (match_operand:DI 2 "const_int_operand" "")))
2608 (clobber (match_scratch:SI 3 ""))])]
2609 ""
2610 "
2611 {
2612 if (GET_CODE (operands[2]) != CONST_INT)
2613 FAIL;
2614 }")
2615
2616 (define_insn ""
2617 [(set (match_operand:DI 0 "register_operand" "=r")
2618 (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
2619 (match_operand:DI 2 "const_int_operand" "I")))
2620 (clobber (match_scratch:SI 3 "=r"))]
2621 "INTVAL (operands[2]) < 32"
2622 "*
2623 {
2624 operands[4] = GEN_INT (32 - INTVAL (operands[2]));
2625 return \"sll %1,%4,%3\;srl %1,%2,%0\;srl %R1,%2,%R0\;or %3,%R0,%R0\";
2626 }"
2627 [(set_attr "type" "multi")
2628 (set_attr "length" "4")])
2629
2630 (define_insn ""
2631 [(set (match_operand:DI 0 "register_operand" "=r")
2632 (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
2633 (match_operand:DI 2 "const_int_operand" "I")))
2634 (clobber (match_scratch:SI 3 "=X"))]
2635 "INTVAL (operands[2]) >= 32"
2636 "*
2637 {
2638 operands[4] = GEN_INT (INTVAL (operands[2]) - 32);
2639 return \"srl %1,%4,%R0\;mov %%g0,%0\";
2640 }"
2641 [(set_attr "type" "multi")
2642 (set_attr "length" "2")])
2643 \f
2644 ;; Unconditional and other jump instructions
2645 ;; On the Sparc, by setting the annul bit on an unconditional branch, the
2646 ;; following insn is never executed. This saves us a nop. Dbx does not
2647 ;; handle such branches though, so we only use them when optimizing.
2648 (define_insn "jump"
2649 [(set (pc) (label_ref (match_operand 0 "" "")))]
2650 ""
2651 "b%* %l0%("
2652 [(set_attr "type" "uncond_branch")])
2653
2654 (define_expand "tablejump"
2655 [(parallel [(set (pc) (match_operand:SI 0 "register_operand" "r"))
2656 (use (label_ref (match_operand 1 "" "")))])]
2657 ""
2658 "
2659 {
2660 /* We need to use the PC value in %o7 that was set up when the address
2661 of the label was loaded into a register, so we need different RTL. */
2662 if (flag_pic)
2663 {
2664 emit_insn (gen_pic_tablejump (operands[0], operands[1]));
2665 DONE;
2666 }
2667 }")
2668
2669 (define_insn "pic_tablejump"
2670 [(set (pc) (match_operand:SI 0 "register_operand" "r"))
2671 (use (label_ref (match_operand 1 "" "")))
2672 (use (reg:SI 15))]
2673 ""
2674 "jmp %%o7+%0%#"
2675 [(set_attr "type" "uncond_branch")])
2676
2677 (define_insn ""
2678 [(set (pc) (match_operand:SI 0 "address_operand" "p"))
2679 (use (label_ref (match_operand 1 "" "")))]
2680 ""
2681 "jmp %a0%#"
2682 [(set_attr "type" "uncond_branch")])
2683
2684 (define_insn ""
2685 [(set (pc) (label_ref (match_operand 0 "" "")))
2686 (set (reg:SI 15) (label_ref (match_dup 0)))]
2687 ""
2688 "call %l0%#"
2689 [(set_attr "type" "uncond_branch")])
2690
2691 ;; This pattern recognizes the "instruction" that appears in
2692 ;; a function call that wants a structure value,
2693 ;; to inform the called function if compiled with Sun CC.
2694 ;(define_insn ""
2695 ; [(match_operand:SI 0 "immediate_operand" "")]
2696 ; "GET_CODE (operands[0]) == CONST_INT && INTVAL (operands[0]) > 0"
2697 ; "unimp %0"
2698 ; [(set_attr "type" "marker")])
2699
2700 ;;- jump to subroutine
2701 (define_expand "call"
2702 ;; Note that this expression is not used for generating RTL.
2703 ;; All the RTL is generated explicitly below.
2704 [(call (match_operand:SI 0 "call_operand" "")
2705 (match_operand 3 "" "i"))]
2706 ;; operands[2] is next_arg_register
2707 ;; operands[3] is struct_value_size_rtx.
2708 ""
2709 "
2710 {
2711 rtx fn_rtx, nregs_rtx;
2712
2713 if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
2714 {
2715 /* This is really a PIC sequence. We want to represent
2716 it as a funny jump so it's delay slots can be filled.
2717
2718 ??? But if this really *is* a CALL, will not it clobber the
2719 call-clobbered registers? We lose this if it is a JUMP_INSN.
2720 Why cannot we have delay slots filled if it were a CALL? */
2721
2722 if (INTVAL (operands[3]) > 0)
2723 emit_jump_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (3,
2724 gen_rtx (SET, VOIDmode, pc_rtx,
2725 XEXP (operands[0], 0)),
2726 operands[3],
2727 gen_rtx (CLOBBER, VOIDmode,
2728 gen_rtx (REG, SImode, 15)))));
2729 else
2730 emit_jump_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
2731 gen_rtx (SET, VOIDmode, pc_rtx,
2732 XEXP (operands[0], 0)),
2733 gen_rtx (CLOBBER, VOIDmode,
2734 gen_rtx (REG, SImode, 15)))));
2735 goto finish_call;
2736 }
2737
2738 fn_rtx = operands[0];
2739
2740 /* Count the number of parameter registers being used by this call.
2741 if that argument is NULL, it means we are using them all, which
2742 means 6 on the sparc. */
2743 #if 0
2744 if (operands[2])
2745 nregs_rtx = gen_rtx (CONST_INT, VOIDmode, REGNO (operands[2]) - 8);
2746 else
2747 nregs_rtx = gen_rtx (CONST_INT, VOIDmode, 6);
2748 #else
2749 nregs_rtx = const0_rtx;
2750 #endif
2751
2752 if (INTVAL (operands[3]) > 0)
2753 emit_call_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (3,
2754 gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
2755 operands[3],
2756 gen_rtx (CLOBBER, VOIDmode,
2757 gen_rtx (REG, SImode, 15)))));
2758 else
2759 emit_call_insn (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
2760 gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx),
2761 gen_rtx (CLOBBER, VOIDmode,
2762 gen_rtx (REG, SImode, 15)))));
2763
2764 finish_call:
2765 #if 0
2766 /* If this call wants a structure value,
2767 emit an unimp insn to let the called function know about this. */
2768 if (INTVAL (operands[3]) > 0)
2769 {
2770 rtx insn = emit_insn (operands[3]);
2771 SCHED_GROUP_P (insn) = 1;
2772 }
2773 #endif
2774
2775 DONE;
2776 }")
2777
2778 (define_insn ""
2779 [(call (mem:SI (match_operand:SI 0 "call_operand_address" "S,r"))
2780 (match_operand 1 "" ""))
2781 (clobber (reg:SI 15))]
2782 ;;- Do not use operand 1 for most machines.
2783 ""
2784 "*
2785 {
2786 return \"call %a0,%1%#\";
2787 }"
2788 [(set_attr "type" "call")])
2789
2790 ;; This is a call that wants a structure value.
2791 (define_insn ""
2792 [(call (mem:SI (match_operand:SI 0 "call_operand_address" "S,r"))
2793 (match_operand 1 "" ""))
2794 (match_operand 2 "immediate_operand" "")
2795 (clobber (reg:SI 15))]
2796 ;;- Do not use operand 1 for most machines.
2797 "GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) > 0"
2798 "*
2799 {
2800 return \"call %a0,%1\;nop\;unimp %2\";
2801 }"
2802 [(set_attr "type" "call_no_delay_slot")])
2803
2804 (define_expand "call_value"
2805 [(set (match_operand 0 "register_operand" "=rf")
2806 (call (match_operand:SI 1 "" "")
2807 (match_operand 4 "" "")))]
2808 ;; operand 3 is next_arg_register
2809 ""
2810 "
2811 {
2812 rtx fn_rtx, nregs_rtx;
2813 rtvec vec;
2814
2815 fn_rtx = operands[1];
2816
2817 #if 0
2818 if (operands[3])
2819 nregs_rtx = gen_rtx (CONST_INT, VOIDmode, REGNO (operands[3]) - 8);
2820 else
2821 nregs_rtx = gen_rtx (CONST_INT, VOIDmode, 6);
2822 #else
2823 nregs_rtx = const0_rtx;
2824 #endif
2825
2826 vec = gen_rtvec (2,
2827 gen_rtx (SET, VOIDmode, operands[0],
2828 gen_rtx (CALL, VOIDmode, fn_rtx, nregs_rtx)),
2829 gen_rtx (CLOBBER, VOIDmode, gen_rtx (REG, SImode, 15)));
2830
2831 emit_call_insn (gen_rtx (PARALLEL, VOIDmode, vec));
2832
2833 DONE;
2834 }")
2835
2836 (define_insn ""
2837 [(set (match_operand 0 "" "=rf")
2838 (call (mem:SI (match_operand:SI 1 "call_operand_address" "rS"))
2839 (match_operand 2 "" "")))
2840 (clobber (reg:SI 15))]
2841 ;;- Do not use operand 2 for most machines.
2842 ""
2843 "*
2844 {
2845 return \"call %a1,%2%#\";
2846 }"
2847 [(set_attr "type" "call")])
2848
2849 (define_expand "untyped_call"
2850 [(parallel [(call (match_operand:SI 0 "call_operand" "")
2851 (const_int 0))
2852 (match_operand:BLK 1 "memory_operand" "")
2853 (match_operand 2 "" "")
2854 (clobber (reg:SI 15))])]
2855 ""
2856 "
2857 {
2858 operands[1] = change_address (operands[1], DImode, XEXP (operands[1], 0));
2859 }")
2860
2861 ;; Make a call followed by two nops in case the function being called
2862 ;; returns a structure value and expects to skip an unimp instruction.
2863
2864 (define_insn ""
2865 [(call (mem:SI (match_operand:SI 0 "call_operand_address" "rS"))
2866 (const_int 0))
2867 (match_operand:DI 1 "memory_operand" "o")
2868 (match_operand 2 "" "")
2869 (clobber (reg:SI 15))]
2870 ""
2871 "*
2872 {
2873 operands[2] = adj_offsettable_operand (operands[1], 8);
2874 return \"call %a0,0\;nop\;nop\;std %%o0,%1\;st %%f0,%2\";
2875 }"
2876 [(set_attr "type" "multi")])
2877
2878 ;; Prepare to return any type including a structure value.
2879
2880 (define_expand "untyped_return"
2881 [(match_operand:BLK 0 "memory_operand" "")
2882 (match_operand 1 "" "")]
2883 ""
2884 "
2885 {
2886 rtx valreg1 = gen_rtx (REG, DImode, 24);
2887 rtx valreg2 = gen_rtx (REG, DFmode, 32);
2888 rtx result = operands[0];
2889 rtx rtnreg = gen_rtx (REG, SImode, (leaf_function ? 15 : 31));
2890 rtx value = gen_reg_rtx (SImode);
2891
2892 /* Fetch the instruction where we will return to and see if it's an unimp
2893 instruction (the most significant 10 bits will be zero). If so,
2894 update the return address to skip the unimp instruction. */
2895 emit_move_insn (value,
2896 gen_rtx (MEM, SImode, plus_constant (rtnreg, 8)));
2897 emit_insn (gen_lshrsi3 (value, value, GEN_INT (22)));
2898 emit_insn (gen_update_return (rtnreg, value));
2899
2900 /* Reload the function value registers. */
2901 emit_move_insn (valreg1, change_address (result, DImode, XEXP (result, 0)));
2902 emit_move_insn (valreg2,
2903 change_address (result, DFmode,
2904 plus_constant (XEXP (result, 0), 8)));
2905
2906 /* Put USE insns before the return. */
2907 emit_insn (gen_rtx (USE, VOIDmode, valreg1));
2908 emit_insn (gen_rtx (USE, VOIDmode, valreg2));
2909
2910 /* Construct the return. */
2911 expand_null_return ();
2912
2913 DONE;
2914 }")
2915
2916 ;; This is a bit of a hack. We're incrementing a fixed register (%i7),
2917 ;; and parts of the compiler don't want to believe that the add is needed.
2918
2919 (define_insn "update_return"
2920 [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
2921 (match_operand:SI 1 "register_operand" "r")] 0)]
2922 ""
2923 "cmp %1,0\;be,a .+8\;add %0,4,%0"
2924 [(set_attr "type" "multi")])
2925 \f
2926 (define_insn "return"
2927 [(return)]
2928 "! TARGET_EPILOGUE"
2929 "* return output_return (operands);"
2930 [(set_attr "type" "multi")])
2931
2932 (define_insn "nop"
2933 [(const_int 0)]
2934 ""
2935 "nop")
2936
2937 (define_insn "indirect_jump"
2938 [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
2939 ""
2940 "jmp %a0%#"
2941 [(set_attr "type" "uncond_branch")])
2942
2943 (define_expand "nonlocal_goto"
2944 [(match_operand:SI 0 "general_operand" "")
2945 (match_operand:SI 1 "general_operand" "")
2946 (match_operand:SI 2 "general_operand" "")
2947 (match_operand:SI 3 "" "")]
2948 ""
2949 "
2950 {
2951 /* Trap instruction to flush all the registers window. */
2952 emit_insn (gen_flush_register_windows ());
2953 /* Load the fp value for the containing fn into %fp.
2954 This is needed because operands[2] refers to %fp.
2955 Virtual register instantiation fails if the virtual %fp isn't set from a
2956 register. Thus we must copy operands[0] into a register if it isn't
2957 already one. */
2958 if (GET_CODE (operands[0]) != REG)
2959 operands[0] = force_reg (SImode, operands[0]);
2960 emit_move_insn (virtual_stack_vars_rtx, operands[0]);
2961 /* Find the containing function's current nonlocal goto handler,
2962 which will do any cleanups and then jump to the label. */
2963 emit_move_insn (gen_rtx (REG, SImode, 8), operands[1]);
2964 /* Restore %fp from stack pointer value for containing function.
2965 The restore insn that follows will move this to %sp,
2966 and reload the appropriate value into %fp. */
2967 emit_move_insn (frame_pointer_rtx, operands[2]);
2968 /* Put in the static chain register the nonlocal label address. */
2969 emit_move_insn (static_chain_rtx, operands[3]);
2970 /* USE of frame_pointer_rtx added for consistency; not clear if
2971 really needed. */
2972 emit_insn (gen_rtx (USE, VOIDmode, frame_pointer_rtx));
2973 emit_insn (gen_rtx (USE, VOIDmode, stack_pointer_rtx));
2974 emit_insn (gen_rtx (USE, VOIDmode, static_chain_rtx));
2975 emit_insn (gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, 8)));
2976 /* Return, restoring reg window and jumping to goto handler. */
2977 emit_insn (gen_goto_handler_and_restore ());
2978 DONE;
2979 }")
2980
2981 ;; Special trap insn to flush register windows.
2982 (define_insn "flush_register_windows"
2983 [(unspec_volatile [(const_int 0)] 0)]
2984 ""
2985 "ta 3"
2986 [(set_attr "type" "misc")])
2987
2988 (define_insn "goto_handler_and_restore"
2989 [(unspec_volatile [(const_int 0)] 1)]
2990 ""
2991 "jmp %%o0+0\;restore"
2992 [(set_attr "type" "misc")
2993 (set_attr "length" "2")])
2994 \f
2995 ;; find first set.
2996
2997 ;; The scan instruction searches from the most significant bit while ffs
2998 ;; searches from the least significant bit. The bit index and treatment of
2999 ;; zero also differ. It takes at least 7 instructions to get the proper
3000 ;; result. Here is an obvious 8 instruction seequence.
3001
3002 (define_insn "ffssi2"
3003 [(set (match_operand:SI 0 "register_operand" "=&r")
3004 (ffs:SI (match_operand:SI 1 "register_operand" "r")))
3005 (clobber (match_scratch:SI 2 "=&r"))]
3006 "TARGET_SPARCLITE"
3007 "sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0"
3008 [(set_attr "type" "multi")
3009 (set_attr "length" "8")])
3010 \f
3011 ;; Split up troublesome insns for better scheduling. */
3012
3013 ;; The following patterns are straightforward. They can be applied
3014 ;; either before or after register allocation.
3015
3016 (define_split
3017 [(set (match_operator 0 "memop" [(match_operand:SI 1 "symbolic_operand" "")])
3018 (match_operand 2 "reg_or_0_operand" ""))
3019 (clobber (match_operand:SI 3 "register_operand" ""))]
3020 "! flag_pic"
3021 [(set (match_dup 3) (high:SI (match_dup 1)))
3022 (set (match_op_dup 0 [(lo_sum:SI (match_dup 3) (match_dup 1))])
3023 (match_dup 2))]
3024 "")
3025
3026 (define_split
3027 [(set (match_operator 0 "memop"
3028 [(match_operand:SI 1 "immediate_operand" "")])
3029 (match_operand 2 "general_operand" ""))
3030 (clobber (match_operand:SI 3 "register_operand" ""))]
3031 "flag_pic"
3032 [(set (match_op_dup 0 [(match_dup 1)])
3033 (match_dup 2))]
3034 "
3035 {
3036 operands[1] = legitimize_pic_address (operands[1], GET_MODE (operands[0]),
3037 operands[3], 0);
3038 }")
3039
3040 (define_split
3041 [(set (match_operand 0 "register_operand" "")
3042 (match_operator 1 "memop"
3043 [(match_operand:SI 2 "immediate_operand" "")]))]
3044 "flag_pic"
3045 [(set (match_dup 0)
3046 (match_op_dup 1 [(match_dup 2)]))]
3047 "
3048 {
3049 operands[2] = legitimize_pic_address (operands[2], GET_MODE (operands[1]),
3050 operands[0], 0);
3051 }")
3052
3053 ;; Sign- and Zero-extend operations can have symbolic memory operands.
3054
3055 (define_split
3056 [(set (match_operand 0 "register_operand" "")
3057 (match_operator 1 "extend_op"
3058 [(match_operator 2 "memop"
3059 [(match_operand:SI 3 "immediate_operand" "")])]))]
3060 "flag_pic"
3061 [(set (match_dup 0)
3062 (match_op_dup 1 [(match_op_dup 2 [(match_dup 3)])]))]
3063 "
3064 {
3065 operands[3] = legitimize_pic_address (operands[3], GET_MODE (operands[2]),
3066 operands[0], 0);
3067 }")
3068
3069 (define_split
3070 [(set (match_operand:SI 0 "register_operand" "")
3071 (match_operand:SI 1 "immediate_operand" ""))]
3072 "! flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
3073 || GET_CODE (operands[1]) == CONST
3074 || GET_CODE (operands[1]) == LABEL_REF)"
3075 [(set (match_dup 0) (high:SI (match_dup 1)))
3076 (set (match_dup 0)
3077 (lo_sum:SI (match_dup 0) (match_dup 1)))]
3078 "")
3079
3080 ;; LABEL_REFs are not modified by `legitimize_pic_address`
3081 ;; so do not recurse infinitely in the PIC case.
3082 (define_split
3083 [(set (match_operand:SI 0 "register_operand" "")
3084 (match_operand:SI 1 "immediate_operand" ""))]
3085 "flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
3086 || GET_CODE (operands[1]) == CONST)"
3087 [(set (match_dup 0) (match_dup 1))]
3088 "
3089 {
3090 operands[1] = legitimize_pic_address (operands[1], Pmode, operands[0], 0);
3091 }")
3092 \f
3093 ;; These split sne/seq insns. The forms of the resulting insns are
3094 ;; somewhat bogus, but they avoid extra patterns and show data dependency.
3095 ;; Nothing will look at these in detail after splitting has occurred.
3096
3097 (define_split
3098 [(set (match_operand:SI 0 "register_operand" "")
3099 (ne:SI (match_operand:SI 1 "register_operand" "") (const_int 0)))
3100 (clobber (reg:CC 0))]
3101 ""
3102 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3103 (const_int 0)))
3104 (set (match_dup 0) (ltu:SI (reg:CC 0) (const_int 0)))]
3105 "")
3106
3107 (define_split
3108 [(set (match_operand:SI 0 "register_operand" "")
3109 (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "")
3110 (const_int 0))))
3111 (clobber (reg:CC 0))]
3112 ""
3113 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3114 (const_int 0)))
3115 (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 0) (const_int 0))))]
3116 "")
3117
3118 (define_split
3119 [(set (match_operand:SI 0 "register_operand" "")
3120 (eq:SI (match_operand:SI 1 "register_operand" "") (const_int 0)))
3121 (clobber (reg:CC 0))]
3122 ""
3123 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3124 (const_int 0)))
3125 (set (match_dup 0) (geu:SI (reg:CC 0) (const_int 0)))]
3126 "")
3127
3128 (define_split
3129 [(set (match_operand:SI 0 "register_operand" "")
3130 (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "")
3131 (const_int 0))))
3132 (clobber (reg:CC 0))]
3133 ""
3134 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3135 (const_int 0)))
3136 (set (match_dup 0) (neg:SI (geu:SI (reg:CC 0) (const_int 0))))]
3137 "")
3138
3139 (define_split
3140 [(set (match_operand:SI 0 "register_operand" "")
3141 (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "")
3142 (const_int 0))
3143 (match_operand:SI 2 "register_operand" "")))
3144 (clobber (reg:CC 0))]
3145 ""
3146 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3147 (const_int 0)))
3148 (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 0) (const_int 0))
3149 (match_dup 2)))]
3150 "")
3151
3152 (define_split
3153 [(set (match_operand:SI 0 "register_operand" "")
3154 (minus:SI (match_operand:SI 2 "register_operand" "")
3155 (ne:SI (match_operand:SI 1 "register_operand" "")
3156 (const_int 0))))
3157 (clobber (reg:CC 0))]
3158 ""
3159 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3160 (const_int 0)))
3161 (set (match_dup 0) (minus:SI (match_dup 2)
3162 (ltu:SI (reg:CC 0) (const_int 0))))]
3163 "")
3164
3165 (define_split
3166 [(set (match_operand:SI 0 "register_operand" "")
3167 (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "")
3168 (const_int 0))
3169 (match_operand:SI 2 "register_operand" "")))
3170 (clobber (reg:CC 0))]
3171 ""
3172 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3173 (const_int 0)))
3174 (set (match_dup 0) (plus:SI (geu:SI (reg:CC 0) (const_int 0))
3175 (match_dup 2)))]
3176 "")
3177
3178 (define_split
3179 [(set (match_operand:SI 0 "register_operand" "")
3180 (minus:SI (match_operand:SI 2 "register_operand" "")
3181 (eq:SI (match_operand:SI 1 "register_operand" "")
3182 (const_int 0))))
3183 (clobber (reg:CC 0))]
3184 ""
3185 [(set (reg:CC_NOOV 0) (compare:CC_NOOV (neg:SI (match_dup 1))
3186 (const_int 0)))
3187 (set (match_dup 0) (minus:SI (match_dup 2)
3188 (geu:SI (reg:CC 0) (const_int 0))))]
3189 "")
3190 \f
3191 ;; Peepholes go at the end.
3192
3193 ;; Optimize consecutive loads or stores into ldd and std when possible.
3194 ;; The conditions in which we do this are very restricted and are
3195 ;; explained in the code for {registers,memory}_ok_for_ldd functions.
3196
3197 (define_peephole
3198 [(set (match_operand:SI 0 "register_operand" "=rf")
3199 (match_operand:SI 1 "memory_operand" ""))
3200 (set (match_operand:SI 2 "register_operand" "=rf")
3201 (match_operand:SI 3 "memory_operand" ""))]
3202 "registers_ok_for_ldd_peep (operands[0], operands[2])
3203 && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
3204 && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
3205 "ldd %1,%0")
3206
3207 (define_peephole
3208 [(set (match_operand:SI 0 "memory_operand" "")
3209 (match_operand:SI 1 "register_operand" "rf"))
3210 (set (match_operand:SI 2 "memory_operand" "")
3211 (match_operand:SI 3 "register_operand" "rf"))]
3212 "registers_ok_for_ldd_peep (operands[1], operands[3])
3213 && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
3214 && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
3215 "std %1,%0")
3216
3217 (define_peephole
3218 [(set (match_operand:SF 0 "register_operand" "=fr")
3219 (match_operand:SF 1 "memory_operand" ""))
3220 (set (match_operand:SF 2 "register_operand" "=fr")
3221 (match_operand:SF 3 "memory_operand" ""))]
3222 "registers_ok_for_ldd_peep (operands[0], operands[2])
3223 && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
3224 && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
3225 "ldd %1,%0")
3226
3227 (define_peephole
3228 [(set (match_operand:SF 0 "memory_operand" "")
3229 (match_operand:SF 1 "register_operand" "fr"))
3230 (set (match_operand:SF 2 "memory_operand" "")
3231 (match_operand:SF 3 "register_operand" "fr"))]
3232 "registers_ok_for_ldd_peep (operands[1], operands[3])
3233 && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
3234 && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
3235 "std %1,%0")
3236
3237 (define_peephole
3238 [(set (match_operand:SI 0 "register_operand" "=rf")
3239 (match_operand:SI 1 "memory_operand" ""))
3240 (set (match_operand:SI 2 "register_operand" "=rf")
3241 (match_operand:SI 3 "memory_operand" ""))]
3242 "registers_ok_for_ldd_peep (operands[2], operands[0])
3243 && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
3244 && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
3245 "ldd %3,%2")
3246
3247 (define_peephole
3248 [(set (match_operand:SI 0 "memory_operand" "")
3249 (match_operand:SI 1 "register_operand" "rf"))
3250 (set (match_operand:SI 2 "memory_operand" "")
3251 (match_operand:SI 3 "register_operand" "rf"))]
3252 "registers_ok_for_ldd_peep (operands[3], operands[1])
3253 && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
3254 && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
3255 "std %3,%2")
3256
3257 (define_peephole
3258 [(set (match_operand:SF 0 "register_operand" "=fr")
3259 (match_operand:SF 1 "memory_operand" ""))
3260 (set (match_operand:SF 2 "register_operand" "=fr")
3261 (match_operand:SF 3 "memory_operand" ""))]
3262 "registers_ok_for_ldd_peep (operands[2], operands[0])
3263 && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
3264 && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
3265 "ldd %3,%2")
3266
3267 (define_peephole
3268 [(set (match_operand:SF 0 "memory_operand" "")
3269 (match_operand:SF 1 "register_operand" "fr"))
3270 (set (match_operand:SF 2 "memory_operand" "")
3271 (match_operand:SF 3 "register_operand" "fr"))]
3272 "registers_ok_for_ldd_peep (operands[3], operands[1])
3273 && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
3274 && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
3275 "std %3,%2")
3276
3277 ;; Optimize the case of following a reg-reg move with a test
3278 ;; of reg just moved. Don't allow floating point regs for operand 0 or 1.
3279 ;; This can result from a float to fix conversion.
3280
3281 (define_peephole
3282 [(set (match_operand:SI 0 "register_operand" "=r")
3283 (match_operand:SI 1 "register_operand" "r"))
3284 (set (reg:CC 0)
3285 (compare:CC (match_operand:SI 2 "register_operand" "r")
3286 (const_int 0)))]
3287 "(rtx_equal_p (operands[2], operands[0])
3288 || rtx_equal_p (operands[2], operands[1]))
3289 && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
3290 "orcc %1,%%g0,%0")
3291
3292 ;; Do {sign,zero}-extended compares somewhat more efficiently.
3293 ;; ??? Is this now the Right Way to do this? Or will SCRATCH
3294 ;; eventually have some impact here?
3295
3296 (define_peephole
3297 [(set (match_operand:HI 0 "register_operand" "")
3298 (match_operand:HI 1 "memory_operand" ""))
3299 (set (match_operand:SI 2 "register_operand" "")
3300 (sign_extend:SI (match_dup 0)))
3301 (set (reg:CC 0)
3302 (compare:CC (match_dup 2)
3303 (const_int 0)))]
3304 ""
3305 "ldsh %1,%0\;orcc %0,%%g0,%2")
3306
3307 (define_peephole
3308 [(set (match_operand:QI 0 "register_operand" "")
3309 (match_operand:QI 1 "memory_operand" ""))
3310 (set (match_operand:SI 2 "register_operand" "")
3311 (sign_extend:SI (match_dup 0)))
3312 (set (reg:CC 0)
3313 (compare:CC (match_dup 2)
3314 (const_int 0)))]
3315 ""
3316 "ldsb %1,%0\;orcc %0,%%g0,%2")
3317
3318 (define_peephole
3319 [(set (match_operand:HI 0 "register_operand" "")
3320 (match_operand:HI 1 "memory_operand" ""))
3321 (set (match_operand:SI 2 "register_operand" "")
3322 (sign_extend:SI (match_dup 0)))]
3323 "dead_or_set_p (insn, operands[0])"
3324 "*
3325 {
3326 warning (\"bad peephole\");
3327 if (! MEM_VOLATILE_P (operands[1]))
3328 abort ();
3329 return \"ldsh %1,%2\";
3330 }")
3331
3332 (define_peephole
3333 [(set (match_operand:QI 0 "register_operand" "")
3334 (match_operand:QI 1 "memory_operand" ""))
3335 (set (match_operand:SI 2 "register_operand" "")
3336 (sign_extend:SI (match_dup 0)))]
3337 "dead_or_set_p (insn, operands[0])"
3338 "*
3339 {
3340 warning (\"bad peephole\");
3341 if (! MEM_VOLATILE_P (operands[1]))
3342 abort ();
3343 return \"ldsb %1,%2\";
3344 }")
3345
3346 ;; Floating-point move peepholes
3347
3348 (define_peephole
3349 [(set (match_operand:SI 0 "register_operand" "=r")
3350 (lo_sum:SI (match_dup 0)
3351 (match_operand:SI 1 "immediate_operand" "i")))
3352 (set (match_operand:DF 2 "register_operand" "=fr")
3353 (mem:DF (match_dup 0)))]
3354 "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
3355 "*
3356 {
3357 /* Go by way of output_move_double in case the register in operand 2
3358 is not properly aligned for ldd. */
3359 operands[1] = gen_rtx (MEM, DFmode,
3360 gen_rtx (LO_SUM, SImode, operands[0], operands[1]));
3361 operands[0] = operands[2];
3362 return output_move_double (operands);
3363 }")
3364
3365 (define_peephole
3366 [(set (match_operand:SI 0 "register_operand" "=r")
3367 (lo_sum:SI (match_dup 0)
3368 (match_operand:SI 1 "immediate_operand" "i")))
3369 (set (match_operand:SF 2 "register_operand" "=fr")
3370 (mem:SF (match_dup 0)))]
3371 "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
3372 "ld [%0+%%lo(%a1)],%2")
3373
3374 ;; Return peepholes. First the "normal" ones
3375
3376 ;; ??? There are QImode, HImode, and SImode versions of this pattern.
3377 ;; It might be possible to write one more general pattern instead of three.
3378
3379 (define_insn ""
3380 [(set (match_operand:QI 0 "restore_operand" "")
3381 (match_operand:QI 1 "arith_operand" "rI"))
3382 (return)]
3383 "! TARGET_EPILOGUE"
3384 "*
3385 {
3386 if (current_function_returns_struct)
3387 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
3388 else
3389 return \"ret\;restore %%g0,%1,%Y0\";
3390 }"
3391 [(set_attr "type" "multi")])
3392
3393 (define_insn ""
3394 [(set (match_operand:HI 0 "restore_operand" "")
3395 (match_operand:HI 1 "arith_operand" "rI"))
3396 (return)]
3397 "! TARGET_EPILOGUE"
3398 "*
3399 {
3400 if (current_function_returns_struct)
3401 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
3402 else
3403 return \"ret\;restore %%g0,%1,%Y0\";
3404 }"
3405 [(set_attr "type" "multi")])
3406
3407 (define_insn ""
3408 [(set (match_operand:SI 0 "restore_operand" "")
3409 (match_operand:SI 1 "arith_operand" "rI"))
3410 (return)]
3411 "! TARGET_EPILOGUE"
3412 "*
3413 {
3414 if (current_function_returns_struct)
3415 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
3416 else
3417 return \"ret\;restore %%g0,%1,%Y0\";
3418 }"
3419 [(set_attr "type" "multi")])
3420
3421 ;; The following pattern is only generated by delayed-branch scheduling,
3422 ;; when the insn winds up in the epilogue. This can only happen when
3423 ;; ! TARGET_FPU because otherwise fp return values are in %f0.
3424 (define_insn ""
3425 [(set (match_operand:SF 0 "restore_operand" "r")
3426 (match_operand:SF 1 "register_operand" "r"))
3427 (return)]
3428 "! TARGET_FPU && ! TARGET_EPILOGUE"
3429 "*
3430 {
3431 if (current_function_returns_struct)
3432 return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
3433 else
3434 return \"ret\;restore %%g0,%1,%Y0\";
3435 }"
3436 [(set_attr "type" "multi")])
3437
3438 (define_insn ""
3439 [(set (match_operand:SI 0 "restore_operand" "")
3440 (plus:SI (match_operand:SI 1 "arith_operand" "%r")
3441 (match_operand:SI 2 "arith_operand" "rI")))
3442 (return)]
3443 "! TARGET_EPILOGUE"
3444 "*
3445 {
3446 if (current_function_returns_struct)
3447 return \"jmp %%i7+12\;restore %r1,%2,%Y0\";
3448 else
3449 return \"ret\;restore %r1,%2,%Y0\";
3450 }"
3451 [(set_attr "type" "multi")])
3452
3453 ;; Turned off because it should never match (subtracting a constant
3454 ;; is turned into addition) and because it would do the wrong thing
3455 ;; when operand 2 is -4096 (--4096 == 4096 is not a valid immediate).
3456 ;;(define_insn ""
3457 ;; [(set (match_operand:SI 0 "restore_operand" "")
3458 ;; (minus:SI (match_operand:SI 1 "register_operand" "r")
3459 ;; (match_operand:SI 2 "small_int" "I")))
3460 ;; (return)]
3461 ;; "! TARGET_EPILOGUE"
3462 ;; "ret\;restore %1,-(%2),%Y0"
3463 ;; [(set_attr "type" "multi")])
3464
3465 ;; The following pattern is only generated by delayed-branch scheduling,
3466 ;; when the insn winds up in the epilogue.
3467 (define_insn ""
3468 [(set (reg:SF 32)
3469 (match_operand:SF 0 "register_operand" "f"))
3470 (return)]
3471 "! TARGET_EPILOGUE"
3472 "ret\;fmovs %0,%%f0"
3473 [(set_attr "type" "multi")])
3474
3475 ;; Now peepholes to go a call followed by a jump.
3476
3477 (define_peephole
3478 [(parallel [(set (match_operand 0 "" "")
3479 (call (mem:SI (match_operand:SI 1 "call_operand_address" "S,r"))
3480 (match_operand 2 "" "")))
3481 (clobber (reg:SI 15))])
3482 (set (pc) (label_ref (match_operand 3 "" "")))]
3483 "short_branch (INSN_UID (insn), INSN_UID (operands[3]))"
3484 "*
3485 {
3486 return \"call %a1,%2\;add %%o7,(%l3-.-4),%%o7\";
3487 }")
3488
3489 (define_peephole
3490 [(parallel [(call (mem:SI (match_operand:SI 0 "call_operand_address" "S,r"))
3491 (match_operand 1 "" ""))
3492 (clobber (reg:SI 15))])
3493 (set (pc) (label_ref (match_operand 2 "" "")))]
3494 "short_branch (INSN_UID (insn), INSN_UID (operands[2]))"
3495 "*
3496 {
3497 return \"call %a0,%1\;add %%o7,(%l2-.-4),%%o7\";
3498 }")
3499
3500 (define_peephole
3501 [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
3502 (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
3503 (reg:SI 0)))
3504 (clobber (reg:CC 0))])
3505 (set (reg:CC 0) (compare (match_dup 0) (const_int 0)))]
3506 ""
3507 "subxcc %r1,0,%0")
This page took 0.193457 seconds and 5 git commands to generate.