]> gcc.gnu.org Git - gcc.git/blob - gcc/config/riscv/sync.md
RISC-V: Strengthen atomic stores
[gcc.git] / gcc / config / riscv / sync.md
1 ;; Machine description for RISC-V atomic operations.
2 ;; Copyright (C) 2011-2023 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
5
6 ;; This file is part of GCC.
7
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
11 ;; any later version.
12
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
17
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
21
22 (define_c_enum "unspec" [
23 UNSPEC_COMPARE_AND_SWAP
24 UNSPEC_COMPARE_AND_SWAP_SUBWORD
25 UNSPEC_SYNC_OLD_OP
26 UNSPEC_SYNC_OLD_OP_SUBWORD
27 UNSPEC_SYNC_EXCHANGE
28 UNSPEC_SYNC_EXCHANGE_SUBWORD
29 UNSPEC_ATOMIC_STORE
30 UNSPEC_MEMORY_BARRIER
31 ])
32
33 ;; Memory barriers.
34
35 (define_expand "mem_thread_fence"
36 [(match_operand:SI 0 "const_int_operand" "")] ;; model
37 ""
38 {
39 if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
40 {
41 rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
42 MEM_VOLATILE_P (mem) = 1;
43 emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
44 }
45 DONE;
46 })
47
48 ;; Until the RISC-V memory model (hence its mapping from C++) is finalized,
49 ;; conservatively emit a full FENCE.
50 (define_insn "mem_thread_fence_1"
51 [(set (match_operand:BLK 0 "" "")
52 (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
53 (match_operand:SI 1 "const_int_operand" "")] ;; model
54 ""
55 "fence\tiorw,iorw")
56
57 ;; Atomic memory operations.
58
59 ;; Implement atomic stores with conservative fences. Fall back to fences for
60 ;; atomic loads.
61 ;; This allows us to be compatible with the ISA manual Table A.6 and Table A.7.
62 (define_insn "atomic_store<mode>"
63 [(set (match_operand:GPR 0 "memory_operand" "=A")
64 (unspec_volatile:GPR
65 [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
66 (match_operand:SI 2 "const_int_operand")] ;; model
67 UNSPEC_ATOMIC_STORE))]
68 "TARGET_ATOMIC"
69 {
70 enum memmodel model = (enum memmodel) INTVAL (operands[2]);
71 model = memmodel_base (model);
72
73 if (model == MEMMODEL_SEQ_CST)
74 return "fence\trw,w\;"
75 "s<amo>\t%z1,%0\;"
76 "fence\trw,rw";
77 if (model == MEMMODEL_RELEASE)
78 return "fence\trw,w\;"
79 "s<amo>\t%z1,%0";
80 else
81 return "s<amo>\t%z1,%0";
82 }
83 [(set_attr "type" "atomic")
84 (set (attr "length") (const_int 12))])
85
86 (define_insn "atomic_<atomic_optab><mode>"
87 [(set (match_operand:GPR 0 "memory_operand" "+A")
88 (unspec_volatile:GPR
89 [(any_atomic:GPR (match_dup 0)
90 (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
91 (match_operand:SI 2 "const_int_operand")] ;; model
92 UNSPEC_SYNC_OLD_OP))]
93 "TARGET_ATOMIC"
94 "%F2amo<insn>.<amo>%A2 zero,%z1,%0"
95 [(set_attr "type" "atomic")
96 (set (attr "length") (const_int 8))])
97
98 (define_insn "atomic_fetch_<atomic_optab><mode>"
99 [(set (match_operand:GPR 0 "register_operand" "=&r")
100 (match_operand:GPR 1 "memory_operand" "+A"))
101 (set (match_dup 1)
102 (unspec_volatile:GPR
103 [(any_atomic:GPR (match_dup 1)
104 (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
105 (match_operand:SI 3 "const_int_operand")] ;; model
106 UNSPEC_SYNC_OLD_OP))]
107 "TARGET_ATOMIC"
108 "%F3amo<insn>.<amo>%A3 %0,%z2,%1"
109 [(set_attr "type" "atomic")
110 (set (attr "length") (const_int 8))])
111
112 (define_insn "subword_atomic_fetch_strong_<atomic_optab>"
113 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
114 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
115 (set (match_dup 1)
116 (unspec_volatile:SI
117 [(any_atomic:SI (match_dup 1)
118 (match_operand:SI 2 "register_operand" "rI")) ;; value for op
119 (match_operand:SI 3 "register_operand" "rI")] ;; mask
120 UNSPEC_SYNC_OLD_OP_SUBWORD))
121 (match_operand:SI 4 "register_operand" "rI") ;; not_mask
122 (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1
123 (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2
124 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
125 {
126 return "1:\;"
127 "lr.w.aqrl\t%0, %1\;"
128 "<insn>\t%5, %0, %2\;"
129 "and\t%5, %5, %3\;"
130 "and\t%6, %0, %4\;"
131 "or\t%6, %6, %5\;"
132 "sc.w.rl\t%5, %6, %1\;"
133 "bnez\t%5, 1b";
134 }
135 [(set (attr "length") (const_int 28))])
136
137 (define_expand "atomic_fetch_nand<mode>"
138 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
139 (not:SHORT (and:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
140 (match_operand:SHORT 2 "reg_or_0_operand"))) ;; value for op
141 (match_operand:SI 3 "const_int_operand")] ;; model
142 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
143 {
144 /* We have no QImode/HImode atomics, so form a mask, then use
145 subword_atomic_fetch_strong_nand to implement a LR/SC version of the
146 operation. */
147
148 /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
149 is disabled. */
150
151 rtx old = gen_reg_rtx (SImode);
152 rtx mem = operands[1];
153 rtx value = operands[2];
154 rtx aligned_mem = gen_reg_rtx (SImode);
155 rtx shift = gen_reg_rtx (SImode);
156 rtx mask = gen_reg_rtx (SImode);
157 rtx not_mask = gen_reg_rtx (SImode);
158
159 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
160
161 rtx shifted_value = gen_reg_rtx (SImode);
162 riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
163
164 emit_insn (gen_subword_atomic_fetch_strong_nand (old, aligned_mem,
165 shifted_value,
166 mask, not_mask));
167
168 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
169 gen_lowpart (QImode, shift)));
170
171 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
172
173 DONE;
174 })
175
176 (define_insn "subword_atomic_fetch_strong_nand"
177 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
178 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
179 (set (match_dup 1)
180 (unspec_volatile:SI
181 [(not:SI (and:SI (match_dup 1)
182 (match_operand:SI 2 "register_operand" "rI"))) ;; value for op
183 (match_operand:SI 3 "register_operand" "rI")] ;; mask
184 UNSPEC_SYNC_OLD_OP_SUBWORD))
185 (match_operand:SI 4 "register_operand" "rI") ;; not_mask
186 (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1
187 (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2
188 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
189 {
190 return "1:\;"
191 "lr.w.aqrl\t%0, %1\;"
192 "and\t%5, %0, %2\;"
193 "not\t%5, %5\;"
194 "and\t%5, %5, %3\;"
195 "and\t%6, %0, %4\;"
196 "or\t%6, %6, %5\;"
197 "sc.w.rl\t%5, %6, %1\;"
198 "bnez\t%5, 1b";
199 }
200 [(set (attr "length") (const_int 32))])
201
202 (define_expand "atomic_fetch_<atomic_optab><mode>"
203 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
204 (any_atomic:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
205 (match_operand:SHORT 2 "reg_or_0_operand")) ;; value for op
206 (match_operand:SI 3 "const_int_operand")] ;; model
207 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
208 {
209 /* We have no QImode/HImode atomics, so form a mask, then use
210 subword_atomic_fetch_strong_<mode> to implement a LR/SC version of the
211 operation. */
212
213 /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
214 is disabled. */
215
216 rtx old = gen_reg_rtx (SImode);
217 rtx mem = operands[1];
218 rtx value = operands[2];
219 rtx aligned_mem = gen_reg_rtx (SImode);
220 rtx shift = gen_reg_rtx (SImode);
221 rtx mask = gen_reg_rtx (SImode);
222 rtx not_mask = gen_reg_rtx (SImode);
223
224 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
225
226 rtx shifted_value = gen_reg_rtx (SImode);
227 riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
228
229 emit_insn (gen_subword_atomic_fetch_strong_<atomic_optab> (old, aligned_mem,
230 shifted_value,
231 mask, not_mask));
232
233 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
234 gen_lowpart (QImode, shift)));
235
236 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
237
238 DONE;
239 })
240
241 (define_insn "atomic_exchange<mode>"
242 [(set (match_operand:GPR 0 "register_operand" "=&r")
243 (unspec_volatile:GPR
244 [(match_operand:GPR 1 "memory_operand" "+A")
245 (match_operand:SI 3 "const_int_operand")] ;; model
246 UNSPEC_SYNC_EXCHANGE))
247 (set (match_dup 1)
248 (match_operand:GPR 2 "register_operand" "0"))]
249 "TARGET_ATOMIC"
250 "%F3amoswap.<amo>%A3 %0,%z2,%1"
251 [(set_attr "type" "atomic")
252 (set (attr "length") (const_int 8))])
253
254 (define_expand "atomic_exchange<mode>"
255 [(match_operand:SHORT 0 "register_operand") ;; old value at mem
256 (match_operand:SHORT 1 "memory_operand") ;; mem location
257 (match_operand:SHORT 2 "register_operand") ;; value
258 (match_operand:SI 3 "const_int_operand")] ;; model
259 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
260 {
261 rtx old = gen_reg_rtx (SImode);
262 rtx mem = operands[1];
263 rtx value = operands[2];
264 rtx aligned_mem = gen_reg_rtx (SImode);
265 rtx shift = gen_reg_rtx (SImode);
266 rtx mask = gen_reg_rtx (SImode);
267 rtx not_mask = gen_reg_rtx (SImode);
268
269 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
270
271 rtx shifted_value = gen_reg_rtx (SImode);
272 riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
273
274 emit_insn (gen_subword_atomic_exchange_strong (old, aligned_mem,
275 shifted_value, not_mask));
276
277 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
278 gen_lowpart (QImode, shift)));
279
280 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
281 DONE;
282 })
283
284 (define_insn "subword_atomic_exchange_strong"
285 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
286 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
287 (set (match_dup 1)
288 (unspec_volatile:SI
289 [(match_operand:SI 2 "reg_or_0_operand" "rI") ;; value
290 (match_operand:SI 3 "reg_or_0_operand" "rI")] ;; not_mask
291 UNSPEC_SYNC_EXCHANGE_SUBWORD))
292 (clobber (match_scratch:SI 4 "=&r"))] ;; tmp_1
293 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
294 {
295 return "1:\;"
296 "lr.w.aqrl\t%0, %1\;"
297 "and\t%4, %0, %3\;"
298 "or\t%4, %4, %2\;"
299 "sc.w.rl\t%4, %4, %1\;"
300 "bnez\t%4, 1b";
301 }
302 [(set (attr "length") (const_int 20))])
303
304 (define_insn "atomic_cas_value_strong<mode>"
305 [(set (match_operand:GPR 0 "register_operand" "=&r")
306 (match_operand:GPR 1 "memory_operand" "+A"))
307 (set (match_dup 1)
308 (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
309 (match_operand:GPR 3 "reg_or_0_operand" "rJ")
310 (match_operand:SI 4 "const_int_operand") ;; mod_s
311 (match_operand:SI 5 "const_int_operand")] ;; mod_f
312 UNSPEC_COMPARE_AND_SWAP))
313 (clobber (match_scratch:GPR 6 "=&r"))]
314 "TARGET_ATOMIC"
315 {
316 return "1:\;"
317 "lr.<amo>.aqrl\t%0,%1\;"
318 "bne\t%0,%z2,1f\;"
319 "sc.<amo>.rl\t%6,%z3,%1\;"
320 "bnez\t%6,1b\;"
321 "1:";
322 }
323 [(set_attr "type" "atomic")
324 (set (attr "length") (const_int 16))])
325
326 (define_expand "atomic_compare_and_swap<mode>"
327 [(match_operand:SI 0 "register_operand" "") ;; bool output
328 (match_operand:GPR 1 "register_operand" "") ;; val output
329 (match_operand:GPR 2 "memory_operand" "") ;; memory
330 (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
331 (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
332 (match_operand:SI 5 "const_int_operand" "") ;; is_weak
333 (match_operand:SI 6 "const_int_operand" "") ;; mod_s
334 (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
335 "TARGET_ATOMIC"
336 {
337 emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
338 operands[3], operands[4],
339 operands[6], operands[7]));
340
341 rtx compare = operands[1];
342 if (operands[3] != const0_rtx)
343 {
344 rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
345 compare = gen_reg_rtx (<MODE>mode);
346 emit_insn (gen_rtx_SET (compare, difference));
347 }
348
349 if (word_mode != <MODE>mode)
350 {
351 rtx reg = gen_reg_rtx (word_mode);
352 emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare)));
353 compare = reg;
354 }
355
356 emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx)));
357 DONE;
358 })
359
360 (define_expand "atomic_compare_and_swap<mode>"
361 [(match_operand:SI 0 "register_operand") ;; bool output
362 (match_operand:SHORT 1 "register_operand") ;; val output
363 (match_operand:SHORT 2 "memory_operand") ;; memory
364 (match_operand:SHORT 3 "reg_or_0_operand") ;; expected value
365 (match_operand:SHORT 4 "reg_or_0_operand") ;; desired value
366 (match_operand:SI 5 "const_int_operand") ;; is_weak
367 (match_operand:SI 6 "const_int_operand") ;; mod_s
368 (match_operand:SI 7 "const_int_operand")] ;; mod_f
369 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
370 {
371 emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
372 operands[3], operands[4],
373 operands[6], operands[7]));
374
375 rtx val = gen_reg_rtx (SImode);
376 if (operands[1] != const0_rtx)
377 emit_move_insn (val, gen_rtx_SIGN_EXTEND (SImode, operands[1]));
378 else
379 emit_move_insn (val, const0_rtx);
380
381 rtx exp = gen_reg_rtx (SImode);
382 if (operands[3] != const0_rtx)
383 emit_move_insn (exp, gen_rtx_SIGN_EXTEND (SImode, operands[3]));
384 else
385 emit_move_insn (exp, const0_rtx);
386
387 rtx compare = val;
388 if (exp != const0_rtx)
389 {
390 rtx difference = gen_rtx_MINUS (SImode, val, exp);
391 compare = gen_reg_rtx (SImode);
392 emit_move_insn (compare, difference);
393 }
394
395 if (word_mode != SImode)
396 {
397 rtx reg = gen_reg_rtx (word_mode);
398 emit_move_insn (reg, gen_rtx_SIGN_EXTEND (word_mode, compare));
399 compare = reg;
400 }
401
402 emit_move_insn (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx));
403 DONE;
404 })
405
406 (define_expand "atomic_cas_value_strong<mode>"
407 [(match_operand:SHORT 0 "register_operand") ;; val output
408 (match_operand:SHORT 1 "memory_operand") ;; memory
409 (match_operand:SHORT 2 "reg_or_0_operand") ;; expected value
410 (match_operand:SHORT 3 "reg_or_0_operand") ;; desired value
411 (match_operand:SI 4 "const_int_operand") ;; mod_s
412 (match_operand:SI 5 "const_int_operand") ;; mod_f
413 (match_scratch:SHORT 6)]
414 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
415 {
416 /* We have no QImode/HImode atomics, so form a mask, then use
417 subword_atomic_cas_strong<mode> to implement a LR/SC version of the
418 operation. */
419
420 /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
421 is disabled. */
422
423 rtx old = gen_reg_rtx (SImode);
424 rtx mem = operands[1];
425 rtx aligned_mem = gen_reg_rtx (SImode);
426 rtx shift = gen_reg_rtx (SImode);
427 rtx mask = gen_reg_rtx (SImode);
428 rtx not_mask = gen_reg_rtx (SImode);
429
430 riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
431
432 rtx o = operands[2];
433 rtx n = operands[3];
434 rtx shifted_o = gen_reg_rtx (SImode);
435 rtx shifted_n = gen_reg_rtx (SImode);
436
437 riscv_lshift_subword (<MODE>mode, o, shift, &shifted_o);
438 riscv_lshift_subword (<MODE>mode, n, shift, &shifted_n);
439
440 emit_move_insn (shifted_o, gen_rtx_AND (SImode, shifted_o, mask));
441 emit_move_insn (shifted_n, gen_rtx_AND (SImode, shifted_n, mask));
442
443 emit_insn (gen_subword_atomic_cas_strong (old, aligned_mem,
444 shifted_o, shifted_n,
445 mask, not_mask));
446
447 emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
448 gen_lowpart (QImode, shift)));
449
450 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
451
452 DONE;
453 })
454
455 (define_insn "subword_atomic_cas_strong"
456 [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
457 (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
458 (set (match_dup 1)
459 (unspec_volatile:SI [(match_operand:SI 2 "reg_or_0_operand" "rJ") ;; expected value
460 (match_operand:SI 3 "reg_or_0_operand" "rJ")] ;; desired value
461 UNSPEC_COMPARE_AND_SWAP_SUBWORD))
462 (match_operand:SI 4 "register_operand" "rI") ;; mask
463 (match_operand:SI 5 "register_operand" "rI") ;; not_mask
464 (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_1
465 "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
466 {
467 return "1:\;"
468 "lr.w.aqrl\t%0, %1\;"
469 "and\t%6, %0, %4\;"
470 "bne\t%6, %z2, 1f\;"
471 "and\t%6, %0, %5\;"
472 "or\t%6, %6, %3\;"
473 "sc.w.rl\t%6, %6, %1\;"
474 "bnez\t%6, 1b\;"
475 "1:";
476 }
477 [(set (attr "length") (const_int 28))])
478
479 (define_expand "atomic_test_and_set"
480 [(match_operand:QI 0 "register_operand" "") ;; bool output
481 (match_operand:QI 1 "memory_operand" "+A") ;; memory
482 (match_operand:SI 2 "const_int_operand" "")] ;; model
483 "TARGET_ATOMIC"
484 {
485 /* We have no QImode atomics, so use the address LSBs to form a mask,
486 then use an aligned SImode atomic. */
487 rtx result = operands[0];
488 rtx mem = operands[1];
489 rtx model = operands[2];
490 rtx addr = force_reg (Pmode, XEXP (mem, 0));
491
492 rtx aligned_addr = gen_reg_rtx (Pmode);
493 emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
494
495 rtx aligned_mem = change_address (mem, SImode, aligned_addr);
496 set_mem_alias_set (aligned_mem, 0);
497
498 rtx offset = gen_reg_rtx (SImode);
499 emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
500 GEN_INT (3)));
501
502 rtx tmp = gen_reg_rtx (SImode);
503 emit_move_insn (tmp, GEN_INT (1));
504
505 rtx shmt = gen_reg_rtx (SImode);
506 emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
507
508 rtx word = gen_reg_rtx (SImode);
509 emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp,
510 gen_lowpart (QImode, shmt)));
511
512 tmp = gen_reg_rtx (SImode);
513 emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
514
515 emit_move_insn (gen_lowpart (SImode, result),
516 gen_rtx_LSHIFTRT (SImode, tmp,
517 gen_lowpart (QImode, shmt)));
518 DONE;
519 })
This page took 0.054929 seconds and 5 git commands to generate.