]> gcc.gnu.org Git - gcc.git/blob - gcc/config/i386/sync.md
Revert "i386: Prefer remote atomic insn for atomic_fetch{add, and, or, xor}"
[gcc.git] / gcc / config / i386 / sync.md
1 ;; GCC machine description for i386 synchronization instructions.
2 ;; Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 ;;
4 ;; This file is part of GCC.
5 ;;
6 ;; GCC is free software; you can redistribute it and/or modify
7 ;; it under the terms of the GNU General Public License as published by
8 ;; the Free Software Foundation; either version 3, or (at your option)
9 ;; any later version.
10 ;;
11 ;; GCC is distributed in the hope that it will be useful,
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;; GNU General Public License for more details.
15 ;;
16 ;; You should have received a copy of the GNU General Public License
17 ;; along with GCC; see the file COPYING3. If not see
18 ;; <http://www.gnu.org/licenses/>.
19
20 (define_c_enum "unspec" [
21 UNSPEC_LFENCE
22 UNSPEC_SFENCE
23 UNSPEC_MFENCE
24
25 UNSPEC_FILD_ATOMIC
26 UNSPEC_FIST_ATOMIC
27
28 UNSPEC_LDX_ATOMIC
29 UNSPEC_STX_ATOMIC
30
31 ;; __atomic support
32 UNSPEC_LDA
33 UNSPEC_STA
34 ])
35
36 (define_c_enum "unspecv" [
37 UNSPECV_CMPXCHG
38 UNSPECV_XCHG
39 UNSPECV_LOCK
40
41 ;; For CMPccXADD support
42 UNSPECV_CMPCCXADD
43
44 ;; For RAOINT support
45 UNSPECV_RAOINT
46 ])
47
48 (define_expand "sse2_lfence"
49 [(set (match_dup 0)
50 (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
51 "TARGET_SSE2"
52 {
53 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
54 MEM_VOLATILE_P (operands[0]) = 1;
55 })
56
57 (define_insn "*sse2_lfence"
58 [(set (match_operand:BLK 0)
59 (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
60 "TARGET_SSE2"
61 "lfence"
62 [(set_attr "type" "sse")
63 (set_attr "length_address" "0")
64 (set_attr "atom_sse_attr" "lfence")
65 (set_attr "memory" "unknown")])
66
67 (define_expand "sse_sfence"
68 [(set (match_dup 0)
69 (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
70 "TARGET_SSE || TARGET_3DNOW_A"
71 {
72 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
73 MEM_VOLATILE_P (operands[0]) = 1;
74 })
75
76 (define_insn "*sse_sfence"
77 [(set (match_operand:BLK 0)
78 (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
79 "TARGET_SSE || TARGET_3DNOW_A"
80 "sfence"
81 [(set_attr "type" "sse")
82 (set_attr "length_address" "0")
83 (set_attr "atom_sse_attr" "fence")
84 (set_attr "memory" "unknown")])
85
86 (define_expand "sse2_mfence"
87 [(set (match_dup 0)
88 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
89 "TARGET_SSE2"
90 {
91 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
92 MEM_VOLATILE_P (operands[0]) = 1;
93 })
94
95 (define_insn "mfence_sse2"
96 [(set (match_operand:BLK 0)
97 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
98 "TARGET_64BIT || TARGET_SSE2"
99 "mfence"
100 [(set_attr "type" "sse")
101 (set_attr "length_address" "0")
102 (set_attr "atom_sse_attr" "fence")
103 (set_attr "memory" "unknown")])
104
105 (define_insn "mfence_nosse"
106 [(set (match_operand:BLK 0)
107 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))
108 (clobber (reg:CC FLAGS_REG))]
109 ""
110 {
111 rtx mem = gen_rtx_MEM (word_mode, stack_pointer_rtx);
112
113 output_asm_insn ("lock{%;} or%z0\t{$0, %0|%0, 0}", &mem);
114 return "";
115 }
116 [(set_attr "memory" "unknown")])
117
118 (define_expand "mem_thread_fence"
119 [(match_operand:SI 0 "const_int_operand")] ;; model
120 ""
121 {
122 enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
123
124 /* Unless this is a SEQ_CST fence, the i386 memory model is strong
125 enough not to require barriers of any kind. */
126 if (is_mm_seq_cst (model))
127 {
128 rtx (*mfence_insn)(rtx);
129 rtx mem;
130
131 if ((TARGET_64BIT || TARGET_SSE2)
132 && (optimize_function_for_size_p (cfun)
133 || !TARGET_AVOID_MFENCE))
134 mfence_insn = gen_mfence_sse2;
135 else
136 mfence_insn = gen_mfence_nosse;
137
138 mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
139 MEM_VOLATILE_P (mem) = 1;
140
141 emit_insn (mfence_insn (mem));
142 }
143 DONE;
144 })
145
146 ;; ??? From volume 3 section 8.1.1 Guaranteed Atomic Operations,
147 ;; Only beginning at Pentium family processors do we get any guarantee of
148 ;; atomicity in aligned 64-bit quantities. Beginning at P6, we get a
149 ;; guarantee for 64-bit accesses that do not cross a cacheline boundary.
150 ;;
151 ;; Note that the TARGET_CMPXCHG8B test below is a stand-in for "Pentium".
152 ;;
153 ;; Importantly, *no* processor makes atomicity guarantees for larger
154 ;; accesses. In particular, there's no way to perform an atomic TImode
155 ;; move, despite the apparent applicability of MOVDQA et al.
156
157 (define_mode_iterator ATOMIC
158 [QI HI SI
159 (DI "TARGET_64BIT || (TARGET_CMPXCHG8B && (TARGET_80387 || TARGET_SSE))")
160 ])
161
162 (define_expand "atomic_load<mode>"
163 [(set (match_operand:ATOMIC 0 "nonimmediate_operand")
164 (unspec:ATOMIC [(match_operand:ATOMIC 1 "memory_operand")
165 (match_operand:SI 2 "const_int_operand")]
166 UNSPEC_LDA))]
167 ""
168 {
169 /* For DImode on 32-bit, we can use the FPU to perform the load. */
170 if (<MODE>mode == DImode && !TARGET_64BIT)
171 emit_insn (gen_atomic_loaddi_fpu
172 (operands[0], operands[1],
173 assign_386_stack_local (DImode, SLOT_TEMP)));
174 else
175 {
176 rtx dst = operands[0];
177
178 if (MEM_P (dst))
179 dst = gen_reg_rtx (<MODE>mode);
180
181 emit_move_insn (dst, operands[1]);
182
183 /* Fix up the destination if needed. */
184 if (dst != operands[0])
185 emit_move_insn (operands[0], dst);
186 }
187 DONE;
188 })
189
190 (define_insn_and_split "atomic_loaddi_fpu"
191 [(set (match_operand:DI 0 "nonimmediate_operand" "=x,m,?r")
192 (unspec:DI [(match_operand:DI 1 "memory_operand" "m,m,m")]
193 UNSPEC_LDA))
194 (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
195 (clobber (match_scratch:DF 3 "=X,xf,xf"))]
196 "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
197 "#"
198 "&& reload_completed"
199 [(const_int 0)]
200 {
201 rtx dst = operands[0], src = operands[1];
202 rtx mem = operands[2], tmp = operands[3];
203
204 if (SSE_REG_P (dst))
205 emit_move_insn (dst, src);
206 else
207 {
208 if (MEM_P (dst))
209 mem = dst;
210
211 if (STACK_REG_P (tmp))
212 {
213 emit_insn (gen_loaddi_via_fpu (tmp, src));
214 emit_insn (gen_storedi_via_fpu (mem, tmp));
215 }
216 else
217 {
218 emit_insn (gen_loaddi_via_sse (tmp, src));
219 emit_insn (gen_storedi_via_sse (mem, tmp));
220 }
221
222 if (mem != dst)
223 emit_move_insn (dst, mem);
224 }
225 DONE;
226 })
227
228 (define_expand "atomic_store<mode>"
229 [(set (match_operand:ATOMIC 0 "memory_operand")
230 (unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
231 (match_operand:SI 2 "const_int_operand")]
232 UNSPEC_STA))]
233 ""
234 {
235 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
236
237 if (<MODE>mode == DImode && !TARGET_64BIT)
238 {
239 /* For DImode on 32-bit, we can use the FPU to perform the store. */
240 /* Note that while we could perform a cmpxchg8b loop, that turns
241 out to be significantly larger than this plus a barrier. */
242 emit_insn (gen_atomic_storedi_fpu
243 (operands[0], operands[1],
244 assign_386_stack_local (DImode, SLOT_TEMP)));
245 }
246 else
247 {
248 operands[1] = force_reg (<MODE>mode, operands[1]);
249
250 /* For seq-cst stores, use XCHG when we lack MFENCE. */
251 if (is_mm_seq_cst (model)
252 && (!(TARGET_64BIT || TARGET_SSE2)
253 || TARGET_AVOID_MFENCE))
254 {
255 emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
256 operands[0], operands[1],
257 operands[2]));
258 DONE;
259 }
260
261 /* Otherwise use a store. */
262 emit_insn (gen_atomic_store<mode>_1 (operands[0], operands[1],
263 operands[2]));
264 }
265 /* ... followed by an MFENCE, if required. */
266 if (is_mm_seq_cst (model))
267 emit_insn (gen_mem_thread_fence (operands[2]));
268 DONE;
269 })
270
271 (define_insn "atomic_store<mode>_1"
272 [(set (match_operand:SWI 0 "memory_operand" "=m")
273 (unspec:SWI [(match_operand:SWI 1 "<nonmemory_operand>" "<r><i>")
274 (match_operand:SI 2 "const_int_operand")]
275 UNSPEC_STA))]
276 ""
277 "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
278
279 (define_insn_and_split "atomic_storedi_fpu"
280 [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
281 (unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
282 UNSPEC_STA))
283 (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
284 (clobber (match_scratch:DF 3 "=X,xf,xf"))]
285 "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
286 "#"
287 "&& reload_completed"
288 [(const_int 0)]
289 {
290 rtx dst = operands[0], src = operands[1];
291 rtx mem = operands[2], tmp = operands[3];
292
293 if (SSE_REG_P (src))
294 emit_move_insn (dst, src);
295 else
296 {
297 if (REG_P (src))
298 {
299 emit_move_insn (mem, src);
300 src = mem;
301 }
302
303 if (STACK_REG_P (tmp))
304 {
305 emit_insn (gen_loaddi_via_fpu (tmp, src));
306 emit_insn (gen_storedi_via_fpu (dst, tmp));
307 }
308 else
309 {
310 emit_insn (gen_loaddi_via_sse (tmp, src));
311 emit_insn (gen_storedi_via_sse (dst, tmp));
312 }
313 }
314 DONE;
315 })
316
317 ;; ??? You'd think that we'd be able to perform this via FLOAT + FIX_TRUNC
318 ;; operations. But the fix_trunc patterns want way more setup than we want
319 ;; to provide. Note that the scratch is DFmode instead of XFmode in order
320 ;; to make it easy to allocate a scratch in either SSE or FP_REGs above.
321
322 (define_insn "loaddi_via_fpu"
323 [(set (match_operand:DF 0 "register_operand" "=f")
324 (unspec:DF [(match_operand:DI 1 "memory_operand" "m")]
325 UNSPEC_FILD_ATOMIC))]
326 "TARGET_80387"
327 "fild%Z1\t%1"
328 [(set_attr "type" "fmov")
329 (set_attr "mode" "DF")
330 (set_attr "fp_int_src" "true")])
331
332 (define_insn "storedi_via_fpu"
333 [(set (match_operand:DI 0 "memory_operand" "=m")
334 (unspec:DI [(match_operand:DF 1 "register_operand" "f")]
335 UNSPEC_FIST_ATOMIC))]
336 "TARGET_80387"
337 {
338 gcc_assert (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != NULL_RTX);
339
340 return "fistp%Z0\t%0";
341 }
342 [(set_attr "type" "fmov")
343 (set_attr "mode" "DI")])
344
345 (define_insn "loaddi_via_sse"
346 [(set (match_operand:DF 0 "register_operand" "=x")
347 (unspec:DF [(match_operand:DI 1 "memory_operand" "m")]
348 UNSPEC_LDX_ATOMIC))]
349 "TARGET_SSE"
350 {
351 if (TARGET_SSE2)
352 return "%vmovq\t{%1, %0|%0, %1}";
353 return "movlps\t{%1, %0|%0, %1}";
354 }
355 [(set_attr "type" "ssemov")
356 (set_attr "mode" "DI")])
357
358 (define_insn "storedi_via_sse"
359 [(set (match_operand:DI 0 "memory_operand" "=m")
360 (unspec:DI [(match_operand:DF 1 "register_operand" "x")]
361 UNSPEC_STX_ATOMIC))]
362 "TARGET_SSE"
363 {
364 if (TARGET_SSE2)
365 return "%vmovq\t{%1, %0|%0, %1}";
366 return "movlps\t{%1, %0|%0, %1}";
367 }
368 [(set_attr "type" "ssemov")
369 (set_attr "mode" "DI")])
370
371 (define_expand "atomic_compare_and_swap<mode>"
372 [(match_operand:QI 0 "register_operand") ;; bool success output
373 (match_operand:SWI124 1 "register_operand") ;; oldval output
374 (match_operand:SWI124 2 "memory_operand") ;; memory
375 (match_operand:SWI124 3 "register_operand") ;; expected input
376 (match_operand:SWI124 4 "register_operand") ;; newval input
377 (match_operand:SI 5 "const_int_operand") ;; is_weak
378 (match_operand:SI 6 "const_int_operand") ;; success model
379 (match_operand:SI 7 "const_int_operand")] ;; failure model
380 "TARGET_CMPXCHG"
381 {
382 if (TARGET_RELAX_CMPXCHG_LOOP)
383 {
384 ix86_expand_cmpxchg_loop (&operands[0], operands[1], operands[2],
385 operands[3], operands[4], operands[6],
386 false, NULL);
387 }
388 else
389 {
390 emit_insn
391 (gen_atomic_compare_and_swap<mode>_1
392 (operands[1], operands[2], operands[3], operands[4], operands[6]));
393 ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
394 const0_rtx);
395 }
396 DONE;
397 })
398
399 (define_mode_iterator CASMODE
400 [(DI "TARGET_64BIT || TARGET_CMPXCHG8B")
401 (TI "TARGET_64BIT && TARGET_CMPXCHG16B")])
402 (define_mode_attr CASHMODE [(DI "SI") (TI "DI")])
403
404 (define_expand "atomic_compare_and_swap<mode>"
405 [(match_operand:QI 0 "register_operand") ;; bool success output
406 (match_operand:CASMODE 1 "register_operand") ;; oldval output
407 (match_operand:CASMODE 2 "memory_operand") ;; memory
408 (match_operand:CASMODE 3 "register_operand") ;; expected input
409 (match_operand:CASMODE 4 "register_operand") ;; newval input
410 (match_operand:SI 5 "const_int_operand") ;; is_weak
411 (match_operand:SI 6 "const_int_operand") ;; success model
412 (match_operand:SI 7 "const_int_operand")] ;; failure model
413 "TARGET_CMPXCHG"
414 {
415 int doubleword = !(<MODE>mode == DImode && TARGET_64BIT);
416 if (TARGET_RELAX_CMPXCHG_LOOP)
417 {
418 ix86_expand_cmpxchg_loop (&operands[0], operands[1], operands[2],
419 operands[3], operands[4], operands[6],
420 doubleword, NULL);
421 }
422 else
423 {
424 if (!doubleword)
425 {
426 emit_insn
427 (gen_atomic_compare_and_swapdi_1
428 (operands[1], operands[2], operands[3], operands[4], operands[6]));
429 }
430 else
431 {
432 machine_mode hmode = <CASHMODE>mode;
433
434 emit_insn
435 (gen_atomic_compare_and_swap<mode>_doubleword
436 (operands[1], operands[2], operands[3],
437 gen_lowpart (hmode, operands[4]), gen_highpart (hmode, operands[4]),
438 operands[6]));
439 }
440
441 ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
442 const0_rtx);
443 }
444 DONE;
445 })
446
447 ;; For double-word compare and swap, we are obliged to play tricks with
448 ;; the input newval (op3:op4) because the Intel register numbering does
449 ;; not match the gcc register numbering, so the pair must be CX:BX.
450
451 (define_mode_attr doublemodesuffix [(SI "8") (DI "16")])
452
453 (define_insn "atomic_compare_and_swap<dwi>_doubleword"
454 [(set (match_operand:<DWI> 0 "register_operand" "=A")
455 (unspec_volatile:<DWI>
456 [(match_operand:<DWI> 1 "memory_operand" "+m")
457 (match_operand:<DWI> 2 "register_operand" "0")
458 (match_operand:DWIH 3 "register_operand" "b")
459 (match_operand:DWIH 4 "register_operand" "c")
460 (match_operand:SI 5 "const_int_operand")]
461 UNSPECV_CMPXCHG))
462 (set (match_dup 1)
463 (unspec_volatile:<DWI> [(const_int 0)] UNSPECV_CMPXCHG))
464 (set (reg:CCZ FLAGS_REG)
465 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
466 "TARGET_CMPXCHG<doublemodesuffix>B"
467 "lock{%;} %K5cmpxchg<doublemodesuffix>b\t%1")
468
469 (define_insn "atomic_compare_and_swap<mode>_1"
470 [(set (match_operand:SWI 0 "register_operand" "=a")
471 (unspec_volatile:SWI
472 [(match_operand:SWI 1 "memory_operand" "+m")
473 (match_operand:SWI 2 "register_operand" "0")
474 (match_operand:SWI 3 "register_operand" "<r>")
475 (match_operand:SI 4 "const_int_operand")]
476 UNSPECV_CMPXCHG))
477 (set (match_dup 1)
478 (unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
479 (set (reg:CCZ FLAGS_REG)
480 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
481 "TARGET_CMPXCHG"
482 "lock{%;} %K4cmpxchg{<imodesuffix>}\t{%3, %1|%1, %3}")
483
484 (define_peephole2
485 [(set (match_operand:SWI 0 "register_operand")
486 (match_operand:SWI 1 "general_operand"))
487 (parallel [(set (match_dup 0)
488 (unspec_volatile:SWI
489 [(match_operand:SWI 2 "memory_operand")
490 (match_dup 0)
491 (match_operand:SWI 3 "register_operand")
492 (match_operand:SI 4 "const_int_operand")]
493 UNSPECV_CMPXCHG))
494 (set (match_dup 2)
495 (unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
496 (set (reg:CCZ FLAGS_REG)
497 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))])
498 (set (reg:CCZ FLAGS_REG)
499 (compare:CCZ (match_operand:SWI 5 "register_operand")
500 (match_operand:SWI 6 "general_operand")))]
501 "(rtx_equal_p (operands[0], operands[5])
502 && rtx_equal_p (operands[1], operands[6]))
503 || (rtx_equal_p (operands[0], operands[6])
504 && rtx_equal_p (operands[1], operands[5]))"
505 [(set (match_dup 0)
506 (match_dup 1))
507 (parallel [(set (match_dup 0)
508 (unspec_volatile:SWI
509 [(match_dup 2)
510 (match_dup 0)
511 (match_dup 3)
512 (match_dup 4)]
513 UNSPECV_CMPXCHG))
514 (set (match_dup 2)
515 (unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
516 (set (reg:CCZ FLAGS_REG)
517 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))])])
518
519 (define_peephole2
520 [(parallel [(set (match_operand:SWI48 0 "register_operand")
521 (match_operand:SWI48 1 "const_int_operand"))
522 (clobber (reg:CC FLAGS_REG))])
523 (parallel [(set (match_operand:SWI 2 "register_operand")
524 (unspec_volatile:SWI
525 [(match_operand:SWI 3 "memory_operand")
526 (match_dup 2)
527 (match_operand:SWI 4 "register_operand")
528 (match_operand:SI 5 "const_int_operand")]
529 UNSPECV_CMPXCHG))
530 (set (match_dup 3)
531 (unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
532 (set (reg:CCZ FLAGS_REG)
533 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))])
534 (set (reg:CCZ FLAGS_REG)
535 (compare:CCZ (match_dup 2)
536 (match_dup 1)))]
537 "REGNO (operands[0]) == REGNO (operands[2])"
538 [(parallel [(set (match_dup 0)
539 (match_dup 1))
540 (clobber (reg:CC FLAGS_REG))])
541 (parallel [(set (match_dup 2)
542 (unspec_volatile:SWI
543 [(match_dup 3)
544 (match_dup 2)
545 (match_dup 4)
546 (match_dup 5)]
547 UNSPECV_CMPXCHG))
548 (set (match_dup 3)
549 (unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
550 (set (reg:CCZ FLAGS_REG)
551 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))])])
552
553 (define_expand "atomic_fetch_<logic><mode>"
554 [(match_operand:SWI124 0 "register_operand")
555 (any_logic:SWI124
556 (match_operand:SWI124 1 "memory_operand")
557 (match_operand:SWI124 2 "register_operand"))
558 (match_operand:SI 3 "const_int_operand")]
559 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
560 {
561 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
562 operands[2], <CODE>, false,
563 false);
564 DONE;
565 })
566
567 (define_expand "atomic_<logic>_fetch<mode>"
568 [(match_operand:SWI124 0 "register_operand")
569 (any_logic:SWI124
570 (match_operand:SWI124 1 "memory_operand")
571 (match_operand:SWI124 2 "register_operand"))
572 (match_operand:SI 3 "const_int_operand")]
573 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
574 {
575 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
576 operands[2], <CODE>, true,
577 false);
578 DONE;
579 })
580
581 (define_expand "atomic_fetch_nand<mode>"
582 [(match_operand:SWI124 0 "register_operand")
583 (match_operand:SWI124 1 "memory_operand")
584 (match_operand:SWI124 2 "register_operand")
585 (match_operand:SI 3 "const_int_operand")]
586 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
587 {
588 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
589 operands[2], NOT, false,
590 false);
591 DONE;
592 })
593
594 (define_expand "atomic_nand_fetch<mode>"
595 [(match_operand:SWI124 0 "register_operand")
596 (match_operand:SWI124 1 "memory_operand")
597 (match_operand:SWI124 2 "register_operand")
598 (match_operand:SI 3 "const_int_operand")]
599 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
600 {
601 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
602 operands[2], NOT, true,
603 false);
604 DONE;
605 })
606
607 (define_expand "atomic_fetch_<logic><mode>"
608 [(match_operand:CASMODE 0 "register_operand")
609 (any_logic:CASMODE
610 (match_operand:CASMODE 1 "memory_operand")
611 (match_operand:CASMODE 2 "register_operand"))
612 (match_operand:SI 3 "const_int_operand")]
613 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
614 {
615 bool doubleword = (<MODE>mode == DImode && !TARGET_64BIT)
616 || (<MODE>mode == TImode);
617 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
618 operands[2], <CODE>, false,
619 doubleword);
620 DONE;
621 })
622
623 (define_expand "atomic_<logic>_fetch<mode>"
624 [(match_operand:CASMODE 0 "register_operand")
625 (any_logic:CASMODE
626 (match_operand:CASMODE 1 "memory_operand")
627 (match_operand:CASMODE 2 "register_operand"))
628 (match_operand:SI 3 "const_int_operand")]
629 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
630 {
631 bool doubleword = (<MODE>mode == DImode && !TARGET_64BIT)
632 || (<MODE>mode == TImode);
633 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
634 operands[2], <CODE>, true,
635 doubleword);
636 DONE;
637 })
638
639 (define_expand "atomic_fetch_nand<mode>"
640 [(match_operand:CASMODE 0 "register_operand")
641 (match_operand:CASMODE 1 "memory_operand")
642 (match_operand:CASMODE 2 "register_operand")
643 (match_operand:SI 3 "const_int_operand")]
644 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
645 {
646 bool doubleword = (<MODE>mode == DImode && !TARGET_64BIT)
647 || (<MODE>mode == TImode);
648 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
649 operands[2], NOT, false,
650 doubleword);
651 DONE;
652 })
653
654 (define_expand "atomic_nand_fetch<mode>"
655 [(match_operand:CASMODE 0 "register_operand")
656 (match_operand:CASMODE 1 "memory_operand")
657 (match_operand:CASMODE 2 "register_operand")
658 (match_operand:SI 3 "const_int_operand")]
659 "TARGET_CMPXCHG && TARGET_RELAX_CMPXCHG_LOOP"
660 {
661 bool doubleword = (<MODE>mode == DImode && !TARGET_64BIT)
662 || (<MODE>mode == TImode);
663 ix86_expand_atomic_fetch_op_loop (operands[0], operands[1],
664 operands[2], NOT, true,
665 doubleword);
666 DONE;
667 })
668
669
670 ;; For operand 2 nonmemory_operand predicate is used instead of
671 ;; register_operand to allow combiner to better optimize atomic
672 ;; additions of constants.
673 (define_insn "atomic_fetch_add<mode>"
674 [(set (match_operand:SWI 0 "register_operand" "=<r>")
675 (unspec_volatile:SWI
676 [(match_operand:SWI 1 "memory_operand" "+m")
677 (match_operand:SI 3 "const_int_operand")] ;; model
678 UNSPECV_XCHG))
679 (set (match_dup 1)
680 (plus:SWI (match_dup 1)
681 (match_operand:SWI 2 "nonmemory_operand" "0")))
682 (clobber (reg:CC FLAGS_REG))]
683 "TARGET_XADD"
684 "lock{%;} %K3xadd{<imodesuffix>}\t{%0, %1|%1, %0}")
685
686 ;; This peephole2 and following insn optimize
687 ;; __sync_fetch_and_add (x, -N) == N into just lock {add,sub,inc,dec}
688 ;; followed by testing of flags instead of lock xadd and comparisons.
689 (define_peephole2
690 [(set (match_operand:SWI 0 "register_operand")
691 (match_operand:SWI 2 "const_int_operand"))
692 (parallel [(set (match_dup 0)
693 (unspec_volatile:SWI
694 [(match_operand:SWI 1 "memory_operand")
695 (match_operand:SI 4 "const_int_operand")]
696 UNSPECV_XCHG))
697 (set (match_dup 1)
698 (plus:SWI (match_dup 1)
699 (match_dup 0)))
700 (clobber (reg:CC FLAGS_REG))])
701 (set (reg:CCZ FLAGS_REG)
702 (compare:CCZ (match_dup 0)
703 (match_operand:SWI 3 "const_int_operand")))]
704 "peep2_reg_dead_p (3, operands[0])
705 && (unsigned HOST_WIDE_INT) INTVAL (operands[2])
706 == -(unsigned HOST_WIDE_INT) INTVAL (operands[3])
707 && !reg_overlap_mentioned_p (operands[0], operands[1])"
708 [(parallel [(set (reg:CCZ FLAGS_REG)
709 (compare:CCZ
710 (unspec_volatile:SWI [(match_dup 1) (match_dup 4)]
711 UNSPECV_XCHG)
712 (match_dup 3)))
713 (set (match_dup 1)
714 (plus:SWI (match_dup 1)
715 (match_dup 2)))])])
716
717 ;; Likewise, but for the -Os special case of *mov<mode>_or.
718 (define_peephole2
719 [(parallel [(set (match_operand:SWI 0 "register_operand")
720 (match_operand:SWI 2 "constm1_operand"))
721 (clobber (reg:CC FLAGS_REG))])
722 (parallel [(set (match_dup 0)
723 (unspec_volatile:SWI
724 [(match_operand:SWI 1 "memory_operand")
725 (match_operand:SI 4 "const_int_operand")]
726 UNSPECV_XCHG))
727 (set (match_dup 1)
728 (plus:SWI (match_dup 1)
729 (match_dup 0)))
730 (clobber (reg:CC FLAGS_REG))])
731 (set (reg:CCZ FLAGS_REG)
732 (compare:CCZ (match_dup 0)
733 (match_operand:SWI 3 "const_int_operand")))]
734 "peep2_reg_dead_p (3, operands[0])
735 && (unsigned HOST_WIDE_INT) INTVAL (operands[2])
736 == -(unsigned HOST_WIDE_INT) INTVAL (operands[3])
737 && !reg_overlap_mentioned_p (operands[0], operands[1])"
738 [(parallel [(set (reg:CCZ FLAGS_REG)
739 (compare:CCZ
740 (unspec_volatile:SWI [(match_dup 1) (match_dup 4)]
741 UNSPECV_XCHG)
742 (match_dup 3)))
743 (set (match_dup 1)
744 (plus:SWI (match_dup 1)
745 (match_dup 2)))])])
746
747 (define_insn "*atomic_fetch_add_cmp<mode>"
748 [(set (reg:CCZ FLAGS_REG)
749 (compare:CCZ
750 (unspec_volatile:SWI
751 [(match_operand:SWI 0 "memory_operand" "+m")
752 (match_operand:SI 3 "const_int_operand")] ;; model
753 UNSPECV_XCHG)
754 (match_operand:SWI 2 "const_int_operand")))
755 (set (match_dup 0)
756 (plus:SWI (match_dup 0)
757 (match_operand:SWI 1 "const_int_operand")))]
758 "(unsigned HOST_WIDE_INT) INTVAL (operands[1])
759 == -(unsigned HOST_WIDE_INT) INTVAL (operands[2])"
760 {
761 if (incdec_operand (operands[1], <MODE>mode))
762 {
763 if (operands[1] == const1_rtx)
764 return "lock{%;} %K3inc{<imodesuffix>}\t%0";
765 else
766 {
767 gcc_assert (operands[1] == constm1_rtx);
768 return "lock{%;} %K3dec{<imodesuffix>}\t%0";
769 }
770 }
771
772 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
773 return "lock{%;} %K3sub{<imodesuffix>}\t{%1, %0|%0, %1}";
774
775 return "lock{%;} %K3add{<imodesuffix>}\t{%1, %0|%0, %1}";
776 })
777
778 ;; Recall that xchg implicitly sets LOCK#, so adding it again wastes space.
779 ;; In addition, it is always a full barrier, so we can ignore the memory model.
780 (define_insn "atomic_exchange<mode>"
781 [(set (match_operand:SWI 0 "register_operand" "=<r>") ;; output
782 (unspec_volatile:SWI
783 [(match_operand:SWI 1 "memory_operand" "+m") ;; memory
784 (match_operand:SI 3 "const_int_operand")] ;; model
785 UNSPECV_XCHG))
786 (set (match_dup 1)
787 (match_operand:SWI 2 "register_operand" "0"))] ;; input
788 ""
789 "%K3xchg{<imodesuffix>}\t{%1, %0|%0, %1}")
790
791 (define_code_iterator any_plus_logic [and ior xor plus])
792 (define_code_attr plus_logic [(and "and") (ior "or") (xor "xor") (plus "add")])
793
794 (define_insn "rao_a<plus_logic><mode>"
795 [(set (match_operand:SWI48 0 "memory_operand" "+m")
796 (unspec_volatile:SWI48
797 [(any_plus_logic:SWI48 (match_dup 0)
798 (match_operand:SWI48 1 "register_operand" "r"))
799 (const_int 0)] ;; MEMMODEL_RELAXED
800 UNSPECV_RAOINT))]
801 "TARGET_RAOINT"
802 "a<plus_logic>\t{%1, %0|%0, %1}")
803
804 (define_insn "atomic_add<mode>"
805 [(set (match_operand:SWI 0 "memory_operand" "+m")
806 (unspec_volatile:SWI
807 [(plus:SWI (match_dup 0)
808 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
809 (match_operand:SI 2 "const_int_operand")] ;; model
810 UNSPECV_LOCK))
811 (clobber (reg:CC FLAGS_REG))]
812 ""
813 {
814 if (incdec_operand (operands[1], <MODE>mode))
815 {
816 if (operands[1] == const1_rtx)
817 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
818 else
819 {
820 gcc_assert (operands[1] == constm1_rtx);
821 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
822 }
823 }
824
825 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
826 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
827
828 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
829 })
830
831 (define_insn "atomic_sub<mode>"
832 [(set (match_operand:SWI 0 "memory_operand" "+m")
833 (unspec_volatile:SWI
834 [(minus:SWI (match_dup 0)
835 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
836 (match_operand:SI 2 "const_int_operand")] ;; model
837 UNSPECV_LOCK))
838 (clobber (reg:CC FLAGS_REG))]
839 ""
840 {
841 if (incdec_operand (operands[1], <MODE>mode))
842 {
843 if (operands[1] == const1_rtx)
844 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
845 else
846 {
847 gcc_assert (operands[1] == constm1_rtx);
848 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
849 }
850 }
851
852 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
853 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
854
855 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
856 })
857
858 (define_insn "atomic_<logic><mode>"
859 [(set (match_operand:SWI 0 "memory_operand" "+m")
860 (unspec_volatile:SWI
861 [(any_logic:SWI (match_dup 0)
862 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
863 (match_operand:SI 2 "const_int_operand")] ;; model
864 UNSPECV_LOCK))
865 (clobber (reg:CC FLAGS_REG))]
866 ""
867 "lock{%;} %K2<logic>{<imodesuffix>}\t{%1, %0|%0, %1}")
868
869 (define_expand "atomic_bit_test_and_set<mode>"
870 [(match_operand:SWI248 0 "register_operand")
871 (match_operand:SWI248 1 "memory_operand")
872 (match_operand:SWI248 2 "nonmemory_operand")
873 (match_operand:SI 3 "const_int_operand") ;; model
874 (match_operand:SI 4 "const_int_operand")]
875 ""
876 {
877 emit_insn (gen_atomic_bit_test_and_set<mode>_1 (operands[1], operands[2],
878 operands[3]));
879 rtx tem = gen_reg_rtx (QImode);
880 ix86_expand_setcc (tem, EQ, gen_rtx_REG (CCCmode, FLAGS_REG), const0_rtx);
881 rtx result = convert_modes (<MODE>mode, QImode, tem, 1);
882 if (operands[4] == const0_rtx)
883 result = expand_simple_binop (<MODE>mode, ASHIFT, result,
884 operands[2], operands[0], 0, OPTAB_WIDEN);
885 if (result != operands[0])
886 emit_move_insn (operands[0], result);
887 DONE;
888 })
889
890 (define_insn "atomic_bit_test_and_set<mode>_1"
891 [(set (reg:CCC FLAGS_REG)
892 (compare:CCC
893 (unspec_volatile:SWI248
894 [(match_operand:SWI248 0 "memory_operand" "+m")
895 (match_operand:SI 2 "const_int_operand")] ;; model
896 UNSPECV_XCHG)
897 (const_int 0)))
898 (set (zero_extract:SWI248 (match_dup 0)
899 (const_int 1)
900 (match_operand:SWI248 1 "nonmemory_operand" "rN"))
901 (const_int 1))]
902 ""
903 "lock{%;} %K2bts{<imodesuffix>}\t{%1, %0|%0, %1}")
904
905 (define_expand "atomic_bit_test_and_complement<mode>"
906 [(match_operand:SWI248 0 "register_operand")
907 (match_operand:SWI248 1 "memory_operand")
908 (match_operand:SWI248 2 "nonmemory_operand")
909 (match_operand:SI 3 "const_int_operand") ;; model
910 (match_operand:SI 4 "const_int_operand")]
911 ""
912 {
913 emit_insn (gen_atomic_bit_test_and_complement<mode>_1 (operands[1],
914 operands[2],
915 operands[3]));
916 rtx tem = gen_reg_rtx (QImode);
917 ix86_expand_setcc (tem, EQ, gen_rtx_REG (CCCmode, FLAGS_REG), const0_rtx);
918 rtx result = convert_modes (<MODE>mode, QImode, tem, 1);
919 if (operands[4] == const0_rtx)
920 result = expand_simple_binop (<MODE>mode, ASHIFT, result,
921 operands[2], operands[0], 0, OPTAB_WIDEN);
922 if (result != operands[0])
923 emit_move_insn (operands[0], result);
924 DONE;
925 })
926
927 (define_insn "atomic_bit_test_and_complement<mode>_1"
928 [(set (reg:CCC FLAGS_REG)
929 (compare:CCC
930 (unspec_volatile:SWI248
931 [(match_operand:SWI248 0 "memory_operand" "+m")
932 (match_operand:SI 2 "const_int_operand")] ;; model
933 UNSPECV_XCHG)
934 (const_int 0)))
935 (set (zero_extract:SWI248 (match_dup 0)
936 (const_int 1)
937 (match_operand:SWI248 1 "nonmemory_operand" "rN"))
938 (not:SWI248 (zero_extract:SWI248 (match_dup 0)
939 (const_int 1)
940 (match_dup 1))))]
941 ""
942 "lock{%;} %K2btc{<imodesuffix>}\t{%1, %0|%0, %1}")
943
944 (define_expand "atomic_bit_test_and_reset<mode>"
945 [(match_operand:SWI248 0 "register_operand")
946 (match_operand:SWI248 1 "memory_operand")
947 (match_operand:SWI248 2 "nonmemory_operand")
948 (match_operand:SI 3 "const_int_operand") ;; model
949 (match_operand:SI 4 "const_int_operand")]
950 ""
951 {
952 emit_insn (gen_atomic_bit_test_and_reset<mode>_1 (operands[1], operands[2],
953 operands[3]));
954 rtx tem = gen_reg_rtx (QImode);
955 ix86_expand_setcc (tem, EQ, gen_rtx_REG (CCCmode, FLAGS_REG), const0_rtx);
956 rtx result = convert_modes (<MODE>mode, QImode, tem, 1);
957 if (operands[4] == const0_rtx)
958 result = expand_simple_binop (<MODE>mode, ASHIFT, result,
959 operands[2], operands[0], 0, OPTAB_WIDEN);
960 if (result != operands[0])
961 emit_move_insn (operands[0], result);
962 DONE;
963 })
964
965 (define_insn "atomic_bit_test_and_reset<mode>_1"
966 [(set (reg:CCC FLAGS_REG)
967 (compare:CCC
968 (unspec_volatile:SWI248
969 [(match_operand:SWI248 0 "memory_operand" "+m")
970 (match_operand:SI 2 "const_int_operand")] ;; model
971 UNSPECV_XCHG)
972 (const_int 0)))
973 (set (zero_extract:SWI248 (match_dup 0)
974 (const_int 1)
975 (match_operand:SWI248 1 "nonmemory_operand" "rN"))
976 (const_int 0))]
977 ""
978 "lock{%;} %K2btr{<imodesuffix>}\t{%1, %0|%0, %1}")
979
980 (define_expand "atomic_<plusminus_mnemonic>_fetch_cmp_0<mode>"
981 [(match_operand:QI 0 "register_operand")
982 (plusminus:SWI (match_operand:SWI 1 "memory_operand")
983 (match_operand:SWI 2 "nonmemory_operand"))
984 (match_operand:SI 3 "const_int_operand") ;; model
985 (match_operand:SI 4 "const_int_operand")]
986 ""
987 {
988 if (INTVAL (operands[4]) == GT || INTVAL (operands[4]) == LE)
989 FAIL;
990 emit_insn (gen_atomic_<plusminus_mnemonic>_fetch_cmp_0<mode>_1 (operands[1],
991 operands[2],
992 operands[3]));
993 ix86_expand_setcc (operands[0], (enum rtx_code) INTVAL (operands[4]),
994 gen_rtx_REG (CCGOCmode, FLAGS_REG), const0_rtx);
995 DONE;
996 })
997
998 (define_insn "atomic_add_fetch_cmp_0<mode>_1"
999 [(set (reg:CCGOC FLAGS_REG)
1000 (compare:CCGOC
1001 (plus:SWI
1002 (unspec_volatile:SWI
1003 [(match_operand:SWI 0 "memory_operand" "+m")
1004 (match_operand:SI 2 "const_int_operand")] ;; model
1005 UNSPECV_XCHG)
1006 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
1007 (const_int 0)))
1008 (set (match_dup 0)
1009 (plus:SWI (match_dup 0) (match_dup 1)))]
1010 ""
1011 {
1012 if (incdec_operand (operands[1], <MODE>mode))
1013 {
1014 if (operands[1] == const1_rtx)
1015 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
1016 else
1017 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
1018 }
1019
1020 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
1021 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
1022
1023 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
1024 })
1025
1026 (define_insn "atomic_sub_fetch_cmp_0<mode>_1"
1027 [(set (reg:CCGOC FLAGS_REG)
1028 (compare:CCGOC
1029 (minus:SWI
1030 (unspec_volatile:SWI
1031 [(match_operand:SWI 0 "memory_operand" "+m")
1032 (match_operand:SI 2 "const_int_operand")] ;; model
1033 UNSPECV_XCHG)
1034 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
1035 (const_int 0)))
1036 (set (match_dup 0)
1037 (minus:SWI (match_dup 0) (match_dup 1)))]
1038 ""
1039 {
1040 if (incdec_operand (operands[1], <MODE>mode))
1041 {
1042 if (operands[1] != const1_rtx)
1043 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
1044 else
1045 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
1046 }
1047
1048 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
1049 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
1050
1051 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
1052 })
1053
1054 (define_expand "atomic_<logic>_fetch_cmp_0<mode>"
1055 [(match_operand:QI 0 "register_operand")
1056 (any_logic:SWI (match_operand:SWI 1 "memory_operand")
1057 (match_operand:SWI 2 "nonmemory_operand"))
1058 (match_operand:SI 3 "const_int_operand") ;; model
1059 (match_operand:SI 4 "const_int_operand")]
1060 ""
1061 {
1062 emit_insn (gen_atomic_<logic>_fetch_cmp_0<mode>_1 (operands[1], operands[2],
1063 operands[3]));
1064 ix86_expand_setcc (operands[0], (enum rtx_code) INTVAL (operands[4]),
1065 gen_rtx_REG (CCNOmode, FLAGS_REG), const0_rtx);
1066 DONE;
1067 })
1068
1069 (define_insn "atomic_<logic>_fetch_cmp_0<mode>_1"
1070 [(set (reg:CCNO FLAGS_REG)
1071 (compare:CCNO
1072 (any_logic:SWI
1073 (unspec_volatile:SWI
1074 [(match_operand:SWI 0 "memory_operand" "+m")
1075 (match_operand:SI 2 "const_int_operand")] ;; model
1076 UNSPECV_XCHG)
1077 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
1078 (const_int 0)))
1079 (set (match_dup 0)
1080 (any_logic:SWI (match_dup 0) (match_dup 1)))]
1081 ""
1082 "lock{%;} %K2<logic>{<imodesuffix>}\t{%1, %0|%0, %1}")
1083
1084 ;; CMPCCXADD
1085
1086 (define_insn "cmpccxadd_<mode>"
1087 [(set (match_operand:SWI48x 0 "register_operand" "=r")
1088 (unspec_volatile:SWI48x
1089 [(match_operand:SWI48x 1 "memory_operand" "+m")
1090 (match_operand:SWI48x 2 "register_operand" "0")
1091 (match_operand:SWI48x 3 "register_operand" "r")
1092 (match_operand:SI 4 "const_0_to_15_operand" "n")]
1093 UNSPECV_CMPCCXADD))
1094 (set (match_dup 1)
1095 (unspec_volatile:SWI48x [(const_int 0)] UNSPECV_CMPCCXADD))
1096 (clobber (reg:CC FLAGS_REG))]
1097 "TARGET_CMPCCXADD && TARGET_64BIT"
1098 {
1099 char buf[128];
1100 const char *ops = "cmp%sxadd\t{%%3, %%0, %%1|%%1, %%0, %%3}";
1101 char const *cc[16] = {"o" ,"no", "b", "nb", "z", "nz", "be", "nbe",
1102 "s", "ns", "p", "np", "l", "nl", "le", "nle"};
1103
1104 snprintf (buf, sizeof (buf), ops, cc[INTVAL (operands[4])]);
1105 output_asm_insn (buf, operands);
1106 return "";
1107 })
This page took 0.08381 seconds and 5 git commands to generate.