]> gcc.gnu.org Git - gcc.git/blob - gcc/config/riscv/sync.md
Fix wrong code out of NRV + RSO + inlining
[gcc.git] / gcc / config / riscv / sync.md
1 ;; Machine description for RISC-V atomic operations.
2 ;; Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
5
6 ;; This file is part of GCC.
7
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
11 ;; any later version.
12
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
17
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
21
22 (define_c_enum "unspec" [
23 UNSPEC_COMPARE_AND_SWAP
24 UNSPEC_COMPARE_AND_SWAP_SUBWORD
25 UNSPEC_SYNC_OLD_OP
26 UNSPEC_SYNC_OLD_OP_SUBWORD
27 UNSPEC_SYNC_OLD_OP_ZABHA
28 UNSPEC_SYNC_EXCHANGE
29 UNSPEC_SYNC_EXCHANGE_SUBWORD
30 UNSPEC_SYNC_EXCHANGE_ZABHA
31 UNSPEC_ATOMIC_LOAD
32 UNSPEC_ATOMIC_STORE
33 UNSPEC_MEMORY_BARRIER
34 ])
35
36 ;; Memory barriers.
37
38 (define_expand "mem_thread_fence"
39 [(match_operand:SI 0 "const_int_operand" "")] ;; model
40 ""
41 {
42 enum memmodel model = memmodel_base (INTVAL (operands[0]));
43
44 if (TARGET_ZTSO && model == MEMMODEL_SEQ_CST)
45 {
46 rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
47 MEM_VOLATILE_P (mem) = 1;
48 emit_insn (gen_mem_thread_fence_ztso (mem, operands[0]));
49 }
50 else if (!TARGET_ZTSO && model != MEMMODEL_RELAXED)
51 {
52 rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
53 MEM_VOLATILE_P (mem) = 1;
54 emit_insn (gen_mem_thread_fence_rvwmo (mem, operands[0]));
55 }
56 DONE;
57 })
58
59 ;; Atomic memory operations.
60
61 (define_expand "atomic_load<mode>"
62 [(match_operand:ANYI 0 "register_operand")
63 (match_operand:ANYI 1 "memory_operand")
64 (match_operand:SI 2 "const_int_operand")] ;; model
65 ""
66 {
67 if (TARGET_ZTSO)
68 emit_insn (gen_atomic_load_ztso<mode> (operands[0], operands[1],
69 operands[2]));
70 else
71 emit_insn (gen_atomic_load_rvwmo<mode> (operands[0], operands[1],
72 operands[2]));
73 DONE;
74 })
75
76 (define_expand "atomic_store<mode>"
77 [(match_operand:ANYI 0 "memory_operand")
78 (match_operand:ANYI 1 "reg_or_0_operand")
79 (match_operand:SI 2 "const_int_operand")] ;; model
80 ""
81 {
82 if (TARGET_ZTSO)
83 emit_insn (gen_atomic_store_ztso<mode> (operands[0], operands[1],
84 operands[2]));
85 else
86 emit_insn (gen_atomic_store_rvwmo<mode> (operands[0], operands[1],
87 operands[2]));
88 DONE;
89 })
90
91 ;; AMO ops
92
93 (define_insn "atomic_<atomic_optab><mode>"
94 [(set (match_operand:SHORT 0 "memory_operand" "+A")
95 (unspec_volatile:SHORT
96 [(any_atomic:SHORT (match_dup 0)
97 (match_operand:SHORT 1 "reg_or_0_operand" "rJ"))
98 (match_operand:SI 2 "const_int_operand")] ;; model
99 UNSPEC_SYNC_OLD_OP_ZABHA))]
100 "TARGET_ZABHA"
101 "amo<insn>.<amobh>%A2\tzero,%z1,%0"
102 [(set_attr "type" "atomic")
103 (set (attr "length") (const_int 4))])
104
105 (define_expand "atomic_<atomic_optab><mode>"
106 [(any_atomic:GPR (match_operand:GPR 0 "memory_operand") ;; mem location
107 (match_operand:GPR 1 "reg_or_0_operand")) ;; value for op
108 (match_operand:SI 2 "const_int_operand")] ;; model
109 "TARGET_ZAAMO || TARGET_ZALRSC"
110 {
111 if (TARGET_ZAAMO)
112 emit_insn (gen_amo_atomic_<atomic_optab><mode> (operands[0], operands[1],
113 operands[2]));
114 else
115 emit_insn (gen_lrsc_atomic_<atomic_optab><mode> (operands[0], operands[1],
116 operands[2]));
117 DONE;
118 })
119
120 (define_insn "amo_atomic_<atomic_optab><mode>"
121 [(set (match_operand:GPR 0 "memory_operand" "+A")
122 (unspec_volatile:GPR
123 [(any_atomic:GPR (match_dup 0)
124 (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
125 (match_operand:SI 2 "const_int_operand")] ;; model
126 UNSPEC_SYNC_OLD_OP))]
127 "TARGET_ZAAMO"
128 "amo<insn>.<amo>%A2\tzero,%z1,%0"
129 [(set_attr "type" "atomic")
130 (set (attr "length") (const_int 4))])
131
132 (define_insn "lrsc_atomic_<atomic_optab><mode>"
133 [(set (match_operand:GPR 0 "memory_operand" "+A")
134 (unspec_volatile:GPR
135 [(any_atomic:GPR (match_dup 0)
136 (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
137 (match_operand:SI 2 "const_int_operand")] ;; model
138 UNSPEC_SYNC_OLD_OP))
139 (clobber (match_scratch:GPR 3 "=&r"))] ;; tmp_1
140 "!TARGET_ZAAMO && TARGET_ZALRSC"
141 {
142 return "1:\;"
143 "lr.<amo>%I2\t%3, %0\;"
144 "<insn>\t%3, %3, %1\;"
145 "sc.<amo>%J2\t%3, %3, %0\;"
146 "bnez\t%3, 1b";
147 }
148 [(set_attr "type" "atomic")
149 (set (attr "length") (const_int 16))])
150
151 ;; AMO fetch ops
152
153 (define_expand "atomic_fetch_<atomic_optab><mode>"
154 [(match_operand:GPR 0 "register_operand") ;; old value at mem
155 (any_atomic:GPR (match_operand:GPR 1 "memory_operand") ;; mem location
156 (match_operand:GPR 2 "reg_or_0_operand")) ;; value for op
157 (match_operand:SI 3 "const_int_operand")] ;; model
158 "TARGET_ZAAMO || TARGET_ZALRSC"
159 {
160 if (TARGET_ZAAMO)
161 emit_insn (gen_amo_atomic_fetch_<atomic_optab><mode> (operands[0], operands[1],
162 operands[2], operands[3]));
163 else
164 emit_insn (gen_lrsc_atomic_fetch_<atomic_optab><mode> (operands[0], operands[1],
165 operands[2], operands[3]));
166 DONE;
167 })
168
169 (define_insn "amo_atomic_fetch_<atomic_optab><mode>"
170 [(set (match_operand:GPR 0 "register_operand" "=&r")
171 (match_operand:GPR 1 "memory_operand" "+A"))
172 (set (match_dup 1)
173 (unspec_volatile:GPR
174 [(any_atomic:GPR (match_dup 1)
175 (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
176 (match_operand:SI 3 "const_int_operand")] ;; model
177 UNSPEC_SYNC_OLD_OP))]
178 "TARGET_ZAAMO"
179 "amo<insn>.<amo>%A3\t%0,%z2,%1"
180 [(set_attr "type" "atomic")
181 (set (attr "length") (const_int 4))])
182
183 (define_insn "lrsc_atomic_fetch_<atomic_optab><mode>"
184 [(set (match_operand:GPR 0 "register_operand" "=&r")
185 (match_operand:GPR 1 "memory_operand" "+A"))
186 (set (match_dup 1)
187 (unspec_volatile:GPR
188 [(any_atomic:GPR (match_dup 1)
189 (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
190 (match_operand:SI 3 "const_int_operand")] ;; model
191 UNSPEC_SYNC_OLD_OP))
192 (clobber (match_scratch:GPR 4 "=&r"))] ;; tmp_1
193 "!TARGET_ZAAMO && TARGET_ZALRSC"
194 {
195 return "1:\;"
196 "lr.<amo>%I3\t%0, %1\;"
197 "<insn>\t%4, %0, %2\;"
198 "sc.<amo>%J3\t%4, %4, %1\;"
199 "bnez\t%4, 1b";
200 }
201 [(set_attr "type" "atomic")
202 (set (attr "length") (const_int 16))])
203
204 (define_insn "subword_atomic_fetch_strong_<atomic_optab>"
205 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
206 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
207 (set (match_dup 1)
208 (unspec_volatile:SI
209 [(any_atomic:SI (match_dup 1)
210 (match_operand:SI 2 "register_operand" "rI")) ;; value for op
211 (match_operand:SI 3 "const_int_operand")] ;; model
212 UNSPEC_SYNC_OLD_OP_SUBWORD))
213 (match_operand:SI 4 "register_operand" "rI") ;; mask
214 (match_operand:SI 5 "register_operand" "rI") ;; not_mask
215 (clobber (match_scratch:SI 6 "=&r")) ;; tmp_1
216 (clobber (match_scratch:SI 7 "=&r"))] ;; tmp_2
217 "TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
218 {
219 return "1:\;"
220 "lr.w%I3\t%0, %1\;"
221 "<insn>\t%6, %0, %2\;"
222 "and\t%6, %6, %4\;"
223 "and\t%7, %0, %5\;"
224 "or\t%7, %7, %6\;"
225 "sc.w%J3\t%6, %7, %1\;"
226 "bnez\t%6, 1b";
227 }
228 [(set_attr "type" "multi")
229 (set (attr "length") (const_int 28))])
230
231 (define_expand "atomic_fetch_nand<mode>"
232 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
233 (not:SHORT (and:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
234 (match_operand:SHORT 2 "reg_or_0_operand"))) ;; value for op
235 (match_operand:SI 3 "const_int_operand")] ;; model
236 "TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
237 {
238 /* We have no QImode/HImode atomics, so form a mask, then use
239 subword_atomic_fetch_strong_nand to implement a LR/SC version of the
240 operation. */
241
242 /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
243 is disabled. */
244
245 rtx old = gen_reg_rtx (SImode);
246 rtx mem = operands[1];
247 rtx value = operands[2];
248 rtx model = operands[3];
249 rtx aligned_mem = gen_reg_rtx (SImode);
250 rtx shift = gen_reg_rtx (SImode);
251 rtx mask = gen_reg_rtx (SImode);
252 rtx not_mask = gen_reg_rtx (SImode);
253
254 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
255
256 rtx shifted_value = gen_reg_rtx (SImode);
257 riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
258
259 emit_insn (gen_subword_atomic_fetch_strong_nand (old, aligned_mem,
260 shifted_value, model,
261 mask, not_mask));
262
263 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
264 gen_lowpart (QImode, shift)));
265
266 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
267
268 DONE;
269 })
270
271 (define_insn "subword_atomic_fetch_strong_nand"
272 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
273 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
274 (set (match_dup 1)
275 (unspec_volatile:SI
276 [(not:SI (and:SI (match_dup 1)
277 (match_operand:SI 2 "register_operand" "rI"))) ;; value for op
278 (match_operand:SI 3 "const_int_operand")] ;; mask
279 UNSPEC_SYNC_OLD_OP_SUBWORD))
280 (match_operand:SI 4 "register_operand" "rI") ;; mask
281 (match_operand:SI 5 "register_operand" "rI") ;; not_mask
282 (clobber (match_scratch:SI 6 "=&r")) ;; tmp_1
283 (clobber (match_scratch:SI 7 "=&r"))] ;; tmp_2
284 "TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
285 {
286 return "1:\;"
287 "lr.w%I3\t%0, %1\;"
288 "and\t%6, %0, %2\;"
289 "not\t%6, %6\;"
290 "and\t%6, %6, %4\;"
291 "and\t%7, %0, %5\;"
292 "or\t%7, %7, %6\;"
293 "sc.w%J3\t%6, %7, %1\;"
294 "bnez\t%6, 1b";
295 }
296 [(set_attr "type" "multi")
297 (set (attr "length") (const_int 32))])
298
299 (define_expand "atomic_fetch_<atomic_optab><mode>"
300 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
301 (any_atomic:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
302 (match_operand:SHORT 2 "reg_or_0_operand")) ;; value for op
303 (match_operand:SI 3 "const_int_operand")] ;; model
304 "(TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC) || TARGET_ZABHA"
305 {
306 if (TARGET_ZABHA)
307 emit_insn(gen_zabha_atomic_fetch_<atomic_optab><mode> (operands[0], operands[1],
308 operands[2], operands[3]));
309 else
310 emit_insn(gen_lrsc_atomic_fetch_<atomic_optab><mode> (operands[0], operands[1],
311 operands[2], operands[3]));
312 DONE;
313 })
314
315 (define_insn "zabha_atomic_fetch_<atomic_optab><mode>"
316 [(set (match_operand:SHORT 0 "register_operand" "=&r")
317 (match_operand:SHORT 1 "memory_operand" "+A"))
318 (set (match_dup 1)
319 (unspec_volatile:SHORT
320 [(any_atomic:SHORT (match_dup 1)
321 (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))
322 (match_operand:SI 3 "const_int_operand")] ;; model
323 UNSPEC_SYNC_OLD_OP_ZABHA))]
324 "TARGET_ZABHA"
325 "amo<insn>.<amobh>%A3\t%0,%z2,%1"
326 [(set_attr "type" "atomic")
327 (set (attr "length") (const_int 4))])
328
329 (define_expand "lrsc_atomic_fetch_<atomic_optab><mode>"
330 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
331 (any_atomic:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
332 (match_operand:SHORT 2 "reg_or_0_operand")) ;; value for op
333 (match_operand:SI 3 "const_int_operand")] ;; model
334 "!TARGET_ZABHA && TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
335 {
336 /* We have no QImode/HImode atomics, so form a mask, then use
337 subword_atomic_fetch_strong_<mode> to implement a LR/SC version of the
338 operation. */
339
340 /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
341 is disabled. */
342
343 rtx old = gen_reg_rtx (SImode);
344 rtx mem = operands[1];
345 rtx value = operands[2];
346 rtx model = operands[3];
347 rtx aligned_mem = gen_reg_rtx (SImode);
348 rtx shift = gen_reg_rtx (SImode);
349 rtx mask = gen_reg_rtx (SImode);
350 rtx not_mask = gen_reg_rtx (SImode);
351
352 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
353
354 rtx shifted_value = gen_reg_rtx (SImode);
355 riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
356
357 emit_insn (gen_subword_atomic_fetch_strong_<atomic_optab> (old, aligned_mem,
358 shifted_value,
359 model, mask,
360 not_mask));
361
362 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
363 gen_lowpart (QImode, shift)));
364
365 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
366
367 DONE;
368 })
369
370 ; Atomic exchange ops
371
372 (define_expand "atomic_exchange<mode>"
373 [(match_operand:GPR 0 "register_operand") ;; old value at mem
374 (match_operand:GPR 1 "memory_operand") ;; mem location
375 (match_operand:GPR 2 "register_operand") ;; value for op
376 (match_operand:SI 3 "const_int_operand")] ;; model
377 "TARGET_ZAAMO || TARGET_ZALRSC"
378 {
379 if (TARGET_ZAAMO)
380 emit_insn (gen_amo_atomic_exchange<mode> (operands[0], operands[1],
381 operands[2], operands[3]));
382 else
383 emit_insn (gen_lrsc_atomic_exchange<mode> (operands[0], operands[1],
384 operands[2], operands[3]));
385 DONE;
386 })
387
388 (define_insn "amo_atomic_exchange<mode>"
389 [(set (match_operand:GPR 0 "register_operand" "=&r")
390 (unspec_volatile:GPR
391 [(match_operand:GPR 1 "memory_operand" "+A")
392 (match_operand:SI 3 "const_int_operand")] ;; model
393 UNSPEC_SYNC_EXCHANGE))
394 (set (match_dup 1)
395 (match_operand:GPR 2 "register_operand" "0"))]
396 "TARGET_ZAAMO"
397 "amoswap.<amo>%A3\t%0,%z2,%1"
398 [(set_attr "type" "atomic")
399 (set (attr "length") (const_int 4))])
400
401 (define_insn "lrsc_atomic_exchange<mode>"
402 [(set (match_operand:GPR 0 "register_operand" "=&r")
403 (unspec_volatile:GPR
404 [(match_operand:GPR 1 "memory_operand" "+A")
405 (match_operand:SI 3 "const_int_operand")] ;; model
406 UNSPEC_SYNC_EXCHANGE))
407 (set (match_dup 1)
408 (match_operand:GPR 2 "register_operand" "0"))
409 (clobber (match_scratch:GPR 4 "=&r"))] ;; tmp_1
410 "!TARGET_ZAAMO && TARGET_ZALRSC"
411 {
412 return "1:\;"
413 "lr.<amo>%I3\t%4, %1\;"
414 "sc.<amo>%J3\t%0, %0, %1\;"
415 "bnez\t%0, 1b\;"
416 "mv\t%0, %4";
417 }
418 [(set_attr "type" "atomic")
419 (set (attr "length") (const_int 16))])
420
421 (define_expand "atomic_exchange<mode>"
422 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
423 (match_operand:SHORT 1 "memory_operand") ;; mem location
424 (match_operand:SHORT 2 "register_operand") ;; value
425 (match_operand:SI 3 "const_int_operand")] ;; model
426 "(TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC) || TARGET_ZABHA"
427 {
428 if (TARGET_ZABHA)
429 emit_insn(gen_zabha_atomic_exchange<mode>(operands[0], operands[1],
430 operands[2], operands[3]));
431 else
432 emit_insn(gen_lrsc_atomic_exchange<mode>(operands[0], operands[1],
433 operands[2], operands[3]));
434 DONE;
435 })
436
437 (define_insn "zabha_atomic_exchange<mode>"
438 [(set (match_operand:SHORT 0 "register_operand" "=&r")
439 (unspec_volatile:SHORT
440 [(match_operand:SHORT 1 "memory_operand" "+A")
441 (match_operand:SI 3 "const_int_operand")] ;; model
442 UNSPEC_SYNC_EXCHANGE_ZABHA))
443 (set (match_dup 1)
444 (match_operand:SHORT 2 "register_operand" "0"))]
445 "TARGET_ZABHA"
446 "amoswap.<amobh>%A3\t%0,%z2,%1"
447 [(set_attr "type" "atomic")
448 (set (attr "length") (const_int 4))])
449
450 (define_expand "lrsc_atomic_exchange<mode>"
451 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
452 (match_operand:SHORT 1 "memory_operand") ;; mem location
453 (match_operand:SHORT 2 "register_operand") ;; value
454 (match_operand:SI 3 "const_int_operand")] ;; model
455 "!TARGET_ZABHA && TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
456 {
457 rtx old = gen_reg_rtx (SImode);
458 rtx mem = operands[1];
459 rtx value = operands[2];
460 rtx model = operands[3];
461 rtx aligned_mem = gen_reg_rtx (SImode);
462 rtx shift = gen_reg_rtx (SImode);
463 rtx mask = gen_reg_rtx (SImode);
464 rtx not_mask = gen_reg_rtx (SImode);
465
466 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
467
468 rtx shifted_value = gen_reg_rtx (SImode);
469 riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
470
471 emit_insn (gen_subword_atomic_exchange_strong (old, aligned_mem,
472 shifted_value, model,
473 not_mask));
474
475 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
476 gen_lowpart (QImode, shift)));
477
478 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
479 DONE;
480 })
481
482 (define_insn "subword_atomic_exchange_strong"
483 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
484 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
485 (set (match_dup 1)
486 (unspec_volatile:SI
487 [(match_operand:SI 2 "reg_or_0_operand" "rI") ;; value
488 (match_operand:SI 3 "const_int_operand")] ;; model
489 UNSPEC_SYNC_EXCHANGE_SUBWORD))
490 (match_operand:SI 4 "reg_or_0_operand" "rI") ;; not_mask
491 (clobber (match_scratch:SI 5 "=&r"))] ;; tmp_1
492 "TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
493 {
494 return "1:\;"
495 "lr.w%I3\t%0, %1\;"
496 "and\t%5, %0, %4\;"
497 "or\t%5, %5, %2\;"
498 "sc.w%J3\t%5, %5, %1\;"
499 "bnez\t%5, 1b";
500 }
501 [(set_attr "type" "multi")
502 (set (attr "length") (const_int 20))])
503
504 ; Atomic CAS ops
505
506 (define_insn "zalrsc_atomic_cas_value_strong<mode>"
507 [(set (match_operand:GPR 0 "register_operand" "=&r")
508 (match_operand:GPR 1 "memory_operand" "+A"))
509 (set (match_dup 1)
510 (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
511 (match_operand:GPR 3 "reg_or_0_operand" "rJ")
512 (match_operand:SI 4 "const_int_operand") ;; mod_s
513 (match_operand:SI 5 "const_int_operand")] ;; mod_f
514 UNSPEC_COMPARE_AND_SWAP))
515 (clobber (match_scratch:GPR 6 "=&r"))]
516 "TARGET_ZALRSC"
517 {
518 enum memmodel model_success = (enum memmodel) INTVAL (operands[4]);
519 enum memmodel model_failure = (enum memmodel) INTVAL (operands[5]);
520 /* Find the union of the two memory models so we can satisfy both success
521 and failure memory models. */
522 operands[5] = GEN_INT (riscv_union_memmodels (model_success, model_failure));
523 return "1:\;"
524 "lr.<amo>%I5\t%0,%1\;"
525 "bne\t%0,%z2,1f\;"
526 "sc.<amo>%J5\t%6,%z3,%1\;"
527 "bnez\t%6,1b\;"
528 "1:";
529 }
530 [(set_attr "type" "multi")
531 (set (attr "length") (const_int 16))])
532
533 ;; Implement compare_exchange with a conservative leading fence when
534 ;; model_failure is seq_cst.
535 ;; This allows us to be compatible with the ISA manual Table A.6 and Table A.7
536 ;; (A6C and A7).
537 ;; More details: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/issues/444
538 (define_insn "zacas_atomic_cas_value_strong<mode>"
539 [(set (match_operand:GPR 0 "register_operand" "=&r") ;; val output
540 (match_operand:GPR 1 "memory_operand" "+A")) ;; memory
541 (set (match_dup 1)
542 (unspec_volatile:GPR [(match_operand:GPR 2 "register_operand" "0") ;; expected val
543 (match_operand:GPR 3 "reg_or_0_operand" "rJ") ;; desired val
544 (match_operand:SI 4 "const_int_operand") ;; mod_s
545 (match_operand:SI 5 "const_int_operand")] ;; mod_f
546 UNSPEC_COMPARE_AND_SWAP))]
547 "TARGET_ZACAS"
548 {
549 enum memmodel model_success = (enum memmodel) INTVAL (operands[4]);
550 enum memmodel model_failure = (enum memmodel) INTVAL (operands[5]);
551 /* Find the union of the two memory models so we can satisfy both success
552 and failure memory models. */
553 operands[4] = GEN_INT (riscv_union_memmodels (model_success, model_failure));
554
555 if (model_failure == MEMMODEL_SEQ_CST)
556 return "fence\trw,rw\;"
557 "amocas.<amo>%A4\t%0,%z3,%1";
558 else
559 return "amocas.<amo>%A4\t%0,%z3,%1";
560 }
561 [(set_attr "type" "atomic")
562 (set (attr "length")
563 (symbol_ref "(is_mm_seq_cst (memmodel_from_int (INTVAL (operands[5]))) ? 8
564 : 4)"))])
565
566 (define_expand "atomic_compare_and_swap<mode>"
567 [(match_operand:SI 0 "register_operand" "") ;; bool output
568 (match_operand:GPR 1 "register_operand" "") ;; val output
569 (match_operand:GPR 2 "memory_operand" "") ;; memory
570 (match_operand:GPR 3 "register_operand" "") ;; expected value
571 (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
572 (match_operand:SI 5 "const_int_operand" "") ;; is_weak
573 (match_operand:SI 6 "const_int_operand" "") ;; mod_s
574 (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
575 "TARGET_ZALRSC || TARGET_ZACAS"
576 {
577 if (word_mode != <MODE>mode && operands[3] != const0_rtx)
578 {
579 /* We don't have SI mode compare on RV64, so we need to make sure expected
580 value is sign-extended. */
581 rtx tmp0 = gen_reg_rtx (word_mode);
582 emit_insn (gen_extend_insn (tmp0, operands[3], word_mode, <MODE>mode, 0));
583 operands[3] = simplify_gen_subreg (<MODE>mode, tmp0, word_mode, 0);
584 }
585
586 if (TARGET_ZACAS)
587 emit_insn (gen_zacas_atomic_cas_value_strong<mode> (operands[1],
588 operands[2],
589 operands[3],
590 operands[4],
591 operands[6],
592 operands[7]));
593 else
594 emit_insn (gen_zalrsc_atomic_cas_value_strong<mode> (operands[1],
595 operands[2],
596 operands[3],
597 operands[4],
598 operands[6],
599 operands[7]));
600
601 rtx compare = operands[1];
602 if (operands[3] != const0_rtx)
603 {
604 rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
605 compare = gen_reg_rtx (<MODE>mode);
606 emit_insn (gen_rtx_SET (compare, difference));
607 }
608
609 if (word_mode != <MODE>mode)
610 {
611 rtx reg = gen_reg_rtx (word_mode);
612 emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare)));
613 compare = reg;
614 }
615
616 emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx)));
617 DONE;
618 })
619
620 ;; Implement compare_exchange with a conservative leading fence when
621 ;; model_failure is seq_cst.
622 ;; This allows us to be compatible with the ISA manual Table A.6 and Table A.7
623 ;; (A6C and A7).
624 ;; More details: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/issues/444
625 (define_insn "zacas_atomic_cas_value_strong<mode>"
626 [(set (match_operand:SHORT 0 "register_operand" "=&r") ;; val output
627 (match_operand:SHORT 1 "memory_operand" "+A")) ;; memory
628 (set (match_dup 1)
629 (unspec_volatile:SHORT [(match_operand:SHORT 2 "register_operand" "0") ;; expected_val
630 (match_operand:SHORT 3 "register_operand" "rJ") ;; desired_val
631 (match_operand:SI 4 "const_int_operand") ;; mod_s
632 (match_operand:SI 5 "const_int_operand")] ;; mod_f
633 UNSPEC_COMPARE_AND_SWAP))]
634 "TARGET_ZACAS && TARGET_ZABHA"
635 {
636 enum memmodel model_success = (enum memmodel) INTVAL (operands[4]);
637 enum memmodel model_failure = (enum memmodel) INTVAL (operands[5]);
638 /* Find the union of the two memory models so we can satisfy both success
639 and failure memory models. */
640 operands[4] = GEN_INT (riscv_union_memmodels (model_success, model_failure));
641
642 if (model_failure == MEMMODEL_SEQ_CST)
643 return "fence\trw,rw\;"
644 "amocas.<amobh>%A4\t%0,%z3,%1";
645 else
646 return "amocas.<amobh>%A4\t%0,%z3,%1";
647 }
648 [(set_attr "type" "atomic")
649 (set (attr "length")
650 (symbol_ref "(is_mm_seq_cst (memmodel_from_int (INTVAL (operands[5]))) ? 8
651 : 4)"))])
652
653 (define_expand "atomic_compare_and_swap<mode>"
654 [(match_operand:SI 0 "register_operand") ;; bool output
655 (match_operand:SHORT 1 "register_operand") ;; val output
656 (match_operand:SHORT 2 "memory_operand") ;; memory
657 (match_operand:SHORT 3 "register_operand") ;; expected value
658 (match_operand:SHORT 4 "reg_or_0_operand") ;; desired value
659 (match_operand:SI 5 "const_int_operand") ;; is_weak
660 (match_operand:SI 6 "const_int_operand") ;; mod_s
661 (match_operand:SI 7 "const_int_operand")] ;; mod_f
662 "(TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC) || (TARGET_ZACAS && TARGET_ZABHA)"
663 {
664
665 if (TARGET_ZACAS && TARGET_ZABHA)
666 emit_insn (gen_zacas_atomic_cas_value_strong<mode> (operands[1],
667 operands[2],
668 operands[3],
669 operands[4],
670 operands[6],
671 operands[7]));
672 else
673 emit_insn (gen_zalrsc_atomic_cas_value_strong<mode> (operands[1],
674 operands[2],
675 operands[3],
676 operands[4],
677 operands[6],
678 operands[7]));
679
680 rtx val = gen_reg_rtx (SImode);
681 if (operands[1] != const0_rtx)
682 emit_move_insn (val, gen_rtx_SIGN_EXTEND (SImode, operands[1]));
683 else
684 emit_move_insn (val, const0_rtx);
685
686 rtx exp = gen_reg_rtx (SImode);
687 if (operands[3] != const0_rtx)
688 emit_move_insn (exp, gen_rtx_SIGN_EXTEND (SImode, operands[3]));
689 else
690 emit_move_insn (exp, const0_rtx);
691
692 rtx compare = val;
693 if (exp != const0_rtx)
694 {
695 rtx difference = gen_rtx_MINUS (SImode, val, exp);
696 compare = gen_reg_rtx (SImode);
697 emit_move_insn (compare, difference);
698 }
699
700 if (word_mode != SImode)
701 {
702 rtx reg = gen_reg_rtx (word_mode);
703 emit_move_insn (reg, gen_rtx_SIGN_EXTEND (word_mode, compare));
704 compare = reg;
705 }
706
707 emit_move_insn (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx));
708 DONE;
709 })
710
711 (define_expand "zalrsc_atomic_cas_value_strong<mode>"
712 [(match_operand:SHORT 0 "register_operand") ;; val output
713 (match_operand:SHORT 1 "memory_operand") ;; memory
714 (match_operand:SHORT 2 "reg_or_0_operand") ;; expected value
715 (match_operand:SHORT 3 "reg_or_0_operand") ;; desired value
716 (match_operand:SI 4 "const_int_operand") ;; mod_s
717 (match_operand:SI 5 "const_int_operand") ;; mod_f
718 (match_scratch:SHORT 6)]
719 "TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
720 {
721 /* We have no QImode/HImode atomics, so form a mask, then use
722 subword_atomic_cas_strong<mode> to implement a LR/SC version of the
723 operation. */
724
725 /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
726 is disabled. */
727
728 rtx old = gen_reg_rtx (SImode);
729 rtx mem = operands[1];
730 rtx aligned_mem = gen_reg_rtx (SImode);
731 rtx shift = gen_reg_rtx (SImode);
732 rtx mask = gen_reg_rtx (SImode);
733 rtx not_mask = gen_reg_rtx (SImode);
734
735 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
736
737 rtx o = operands[2];
738 rtx n = operands[3];
739 rtx shifted_o = gen_reg_rtx (SImode);
740 rtx shifted_n = gen_reg_rtx (SImode);
741
742 riscv_lshift_subword (<MODE>mode, o, shift, &shifted_o);
743 riscv_lshift_subword (<MODE>mode, n, shift, &shifted_n);
744
745 emit_move_insn (shifted_o, gen_rtx_AND (SImode, shifted_o, mask));
746 emit_move_insn (shifted_n, gen_rtx_AND (SImode, shifted_n, mask));
747
748 enum memmodel model_success = (enum memmodel) INTVAL (operands[4]);
749 enum memmodel model_failure = (enum memmodel) INTVAL (operands[5]);
750 /* Find the union of the two memory models so we can satisfy both success
751 and failure memory models. */
752 rtx model = GEN_INT (riscv_union_memmodels (model_success, model_failure));
753
754 emit_insn (gen_subword_atomic_cas_strong (old, aligned_mem,
755 shifted_o, shifted_n,
756 model, mask, not_mask));
757
758 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
759 gen_lowpart (QImode, shift)));
760
761 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
762
763 DONE;
764 })
765
766 (define_insn "subword_atomic_cas_strong"
767 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
768 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
769 (set (match_dup 1)
770 (unspec_volatile:SI [(match_operand:SI 2 "reg_or_0_operand" "rJ") ;; expected value
771 (match_operand:SI 3 "reg_or_0_operand" "rJ")] ;; desired value
772 UNSPEC_COMPARE_AND_SWAP_SUBWORD))
773 (match_operand:SI 4 "const_int_operand") ;; model
774 (match_operand:SI 5 "register_operand" "rI") ;; mask
775 (match_operand:SI 6 "register_operand" "rI") ;; not_mask
776 (clobber (match_scratch:SI 7 "=&r"))] ;; tmp_1
777 "TARGET_ZALRSC && TARGET_INLINE_SUBWORD_ATOMIC"
778 {
779 return "1:\;"
780 "lr.w%I4\t%0, %1\;"
781 "and\t%7, %0, %5\;"
782 "bne\t%7, %z2, 1f\;"
783 "and\t%7, %0, %6\;"
784 "or\t%7, %7, %3\;"
785 "sc.w%J4\t%7, %7, %1\;"
786 "bnez\t%7, 1b\;"
787 "1:";
788 }
789 [(set_attr "type" "multi")
790 (set (attr "length") (const_int 28))])
791
792 (define_expand "atomic_test_and_set"
793 [(match_operand:QI 0 "register_operand" "") ;; bool output
794 (match_operand:QI 1 "memory_operand" "+A") ;; memory
795 (match_operand:SI 2 "const_int_operand" "")] ;; model
796 "TARGET_ZAAMO || TARGET_ZALRSC"
797 {
798 /* We have no QImode atomics, so use the address LSBs to form a mask,
799 then use an aligned SImode atomic. */
800 rtx old = gen_reg_rtx (SImode);
801 rtx mem = operands[1];
802 rtx model = operands[2];
803 rtx set = gen_reg_rtx (QImode);
804 rtx aligned_mem = gen_reg_rtx (SImode);
805 rtx shift = gen_reg_rtx (SImode);
806
807 /* Unused. */
808 rtx _mask = gen_reg_rtx (SImode);
809 rtx _not_mask = gen_reg_rtx (SImode);
810
811 riscv_subword_address (mem, &aligned_mem, &shift, &_mask, &_not_mask);
812
813 emit_move_insn (set, GEN_INT (1));
814 rtx shifted_set = gen_reg_rtx (SImode);
815 riscv_lshift_subword (QImode, set, shift, &shifted_set);
816
817 if (TARGET_ZAAMO)
818 emit_insn (gen_amo_atomic_fetch_orsi (old, aligned_mem, shifted_set, model));
819 else if (TARGET_ZALRSC)
820 emit_insn (gen_lrsc_atomic_fetch_orsi (old, aligned_mem, shifted_set, model));
821
822 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
823 gen_lowpart (QImode, shift)));
824
825 emit_move_insn (operands[0], gen_lowpart (QImode, old));
826
827 DONE;
828 })
This page took 0.072936 seconds and 6 git commands to generate.