]> gcc.gnu.org Git - gcc.git/blob - gcc/config/s390/s390.c
S/390: Add emit_barrier() after trap
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "cfghooks.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "rtl.h"
31 #include "df.h"
32 #include "alias.h"
33 #include "fold-const.h"
34 #include "print-tree.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "calls.h"
39 #include "tm_p.h"
40 #include "regs.h"
41 #include "insn-config.h"
42 #include "conditions.h"
43 #include "output.h"
44 #include "insn-attr.h"
45 #include "flags.h"
46 #include "except.h"
47 #include "recog.h"
48 #include "expmed.h"
49 #include "dojump.h"
50 #include "explow.h"
51 #include "emit-rtl.h"
52 #include "stmt.h"
53 #include "expr.h"
54 #include "reload.h"
55 #include "diagnostic-core.h"
56 #include "cfgrtl.h"
57 #include "cfganal.h"
58 #include "lcm.h"
59 #include "cfgbuild.h"
60 #include "cfgcleanup.h"
61 #include "target.h"
62 #include "debug.h"
63 #include "langhooks.h"
64 #include "insn-codes.h"
65 #include "optabs.h"
66 #include "internal-fn.h"
67 #include "gimple-fold.h"
68 #include "tree-eh.h"
69 #include "gimplify.h"
70 #include "params.h"
71 #include "cfgloop.h"
72 #include "opts.h"
73 #include "tree-pass.h"
74 #include "context.h"
75 #include "builtins.h"
76 #include "rtl-iter.h"
77 #include "intl.h"
78 #include "cgraph.h"
79
80 /* This file should be included last. */
81 #include "target-def.h"
82
83 /* Define the specific costs for a given cpu. */
84
85 struct processor_costs
86 {
87 /* multiplication */
88 const int m; /* cost of an M instruction. */
89 const int mghi; /* cost of an MGHI instruction. */
90 const int mh; /* cost of an MH instruction. */
91 const int mhi; /* cost of an MHI instruction. */
92 const int ml; /* cost of an ML instruction. */
93 const int mr; /* cost of an MR instruction. */
94 const int ms; /* cost of an MS instruction. */
95 const int msg; /* cost of an MSG instruction. */
96 const int msgf; /* cost of an MSGF instruction. */
97 const int msgfr; /* cost of an MSGFR instruction. */
98 const int msgr; /* cost of an MSGR instruction. */
99 const int msr; /* cost of an MSR instruction. */
100 const int mult_df; /* cost of multiplication in DFmode. */
101 const int mxbr;
102 /* square root */
103 const int sqxbr; /* cost of square root in TFmode. */
104 const int sqdbr; /* cost of square root in DFmode. */
105 const int sqebr; /* cost of square root in SFmode. */
106 /* multiply and add */
107 const int madbr; /* cost of multiply and add in DFmode. */
108 const int maebr; /* cost of multiply and add in SFmode. */
109 /* division */
110 const int dxbr;
111 const int ddbr;
112 const int debr;
113 const int dlgr;
114 const int dlr;
115 const int dr;
116 const int dsgfr;
117 const int dsgr;
118 };
119
120 const struct processor_costs *s390_cost;
121
122 static const
123 struct processor_costs z900_cost =
124 {
125 COSTS_N_INSNS (5), /* M */
126 COSTS_N_INSNS (10), /* MGHI */
127 COSTS_N_INSNS (5), /* MH */
128 COSTS_N_INSNS (4), /* MHI */
129 COSTS_N_INSNS (5), /* ML */
130 COSTS_N_INSNS (5), /* MR */
131 COSTS_N_INSNS (4), /* MS */
132 COSTS_N_INSNS (15), /* MSG */
133 COSTS_N_INSNS (7), /* MSGF */
134 COSTS_N_INSNS (7), /* MSGFR */
135 COSTS_N_INSNS (10), /* MSGR */
136 COSTS_N_INSNS (4), /* MSR */
137 COSTS_N_INSNS (7), /* multiplication in DFmode */
138 COSTS_N_INSNS (13), /* MXBR */
139 COSTS_N_INSNS (136), /* SQXBR */
140 COSTS_N_INSNS (44), /* SQDBR */
141 COSTS_N_INSNS (35), /* SQEBR */
142 COSTS_N_INSNS (18), /* MADBR */
143 COSTS_N_INSNS (13), /* MAEBR */
144 COSTS_N_INSNS (134), /* DXBR */
145 COSTS_N_INSNS (30), /* DDBR */
146 COSTS_N_INSNS (27), /* DEBR */
147 COSTS_N_INSNS (220), /* DLGR */
148 COSTS_N_INSNS (34), /* DLR */
149 COSTS_N_INSNS (34), /* DR */
150 COSTS_N_INSNS (32), /* DSGFR */
151 COSTS_N_INSNS (32), /* DSGR */
152 };
153
154 static const
155 struct processor_costs z990_cost =
156 {
157 COSTS_N_INSNS (4), /* M */
158 COSTS_N_INSNS (2), /* MGHI */
159 COSTS_N_INSNS (2), /* MH */
160 COSTS_N_INSNS (2), /* MHI */
161 COSTS_N_INSNS (4), /* ML */
162 COSTS_N_INSNS (4), /* MR */
163 COSTS_N_INSNS (5), /* MS */
164 COSTS_N_INSNS (6), /* MSG */
165 COSTS_N_INSNS (4), /* MSGF */
166 COSTS_N_INSNS (4), /* MSGFR */
167 COSTS_N_INSNS (4), /* MSGR */
168 COSTS_N_INSNS (4), /* MSR */
169 COSTS_N_INSNS (1), /* multiplication in DFmode */
170 COSTS_N_INSNS (28), /* MXBR */
171 COSTS_N_INSNS (130), /* SQXBR */
172 COSTS_N_INSNS (66), /* SQDBR */
173 COSTS_N_INSNS (38), /* SQEBR */
174 COSTS_N_INSNS (1), /* MADBR */
175 COSTS_N_INSNS (1), /* MAEBR */
176 COSTS_N_INSNS (60), /* DXBR */
177 COSTS_N_INSNS (40), /* DDBR */
178 COSTS_N_INSNS (26), /* DEBR */
179 COSTS_N_INSNS (176), /* DLGR */
180 COSTS_N_INSNS (31), /* DLR */
181 COSTS_N_INSNS (31), /* DR */
182 COSTS_N_INSNS (31), /* DSGFR */
183 COSTS_N_INSNS (31), /* DSGR */
184 };
185
186 static const
187 struct processor_costs z9_109_cost =
188 {
189 COSTS_N_INSNS (4), /* M */
190 COSTS_N_INSNS (2), /* MGHI */
191 COSTS_N_INSNS (2), /* MH */
192 COSTS_N_INSNS (2), /* MHI */
193 COSTS_N_INSNS (4), /* ML */
194 COSTS_N_INSNS (4), /* MR */
195 COSTS_N_INSNS (5), /* MS */
196 COSTS_N_INSNS (6), /* MSG */
197 COSTS_N_INSNS (4), /* MSGF */
198 COSTS_N_INSNS (4), /* MSGFR */
199 COSTS_N_INSNS (4), /* MSGR */
200 COSTS_N_INSNS (4), /* MSR */
201 COSTS_N_INSNS (1), /* multiplication in DFmode */
202 COSTS_N_INSNS (28), /* MXBR */
203 COSTS_N_INSNS (130), /* SQXBR */
204 COSTS_N_INSNS (66), /* SQDBR */
205 COSTS_N_INSNS (38), /* SQEBR */
206 COSTS_N_INSNS (1), /* MADBR */
207 COSTS_N_INSNS (1), /* MAEBR */
208 COSTS_N_INSNS (60), /* DXBR */
209 COSTS_N_INSNS (40), /* DDBR */
210 COSTS_N_INSNS (26), /* DEBR */
211 COSTS_N_INSNS (30), /* DLGR */
212 COSTS_N_INSNS (23), /* DLR */
213 COSTS_N_INSNS (23), /* DR */
214 COSTS_N_INSNS (24), /* DSGFR */
215 COSTS_N_INSNS (24), /* DSGR */
216 };
217
218 static const
219 struct processor_costs z10_cost =
220 {
221 COSTS_N_INSNS (10), /* M */
222 COSTS_N_INSNS (10), /* MGHI */
223 COSTS_N_INSNS (10), /* MH */
224 COSTS_N_INSNS (10), /* MHI */
225 COSTS_N_INSNS (10), /* ML */
226 COSTS_N_INSNS (10), /* MR */
227 COSTS_N_INSNS (10), /* MS */
228 COSTS_N_INSNS (10), /* MSG */
229 COSTS_N_INSNS (10), /* MSGF */
230 COSTS_N_INSNS (10), /* MSGFR */
231 COSTS_N_INSNS (10), /* MSGR */
232 COSTS_N_INSNS (10), /* MSR */
233 COSTS_N_INSNS (1) , /* multiplication in DFmode */
234 COSTS_N_INSNS (50), /* MXBR */
235 COSTS_N_INSNS (120), /* SQXBR */
236 COSTS_N_INSNS (52), /* SQDBR */
237 COSTS_N_INSNS (38), /* SQEBR */
238 COSTS_N_INSNS (1), /* MADBR */
239 COSTS_N_INSNS (1), /* MAEBR */
240 COSTS_N_INSNS (111), /* DXBR */
241 COSTS_N_INSNS (39), /* DDBR */
242 COSTS_N_INSNS (32), /* DEBR */
243 COSTS_N_INSNS (160), /* DLGR */
244 COSTS_N_INSNS (71), /* DLR */
245 COSTS_N_INSNS (71), /* DR */
246 COSTS_N_INSNS (71), /* DSGFR */
247 COSTS_N_INSNS (71), /* DSGR */
248 };
249
250 static const
251 struct processor_costs z196_cost =
252 {
253 COSTS_N_INSNS (7), /* M */
254 COSTS_N_INSNS (5), /* MGHI */
255 COSTS_N_INSNS (5), /* MH */
256 COSTS_N_INSNS (5), /* MHI */
257 COSTS_N_INSNS (7), /* ML */
258 COSTS_N_INSNS (7), /* MR */
259 COSTS_N_INSNS (6), /* MS */
260 COSTS_N_INSNS (8), /* MSG */
261 COSTS_N_INSNS (6), /* MSGF */
262 COSTS_N_INSNS (6), /* MSGFR */
263 COSTS_N_INSNS (8), /* MSGR */
264 COSTS_N_INSNS (6), /* MSR */
265 COSTS_N_INSNS (1) , /* multiplication in DFmode */
266 COSTS_N_INSNS (40), /* MXBR B+40 */
267 COSTS_N_INSNS (100), /* SQXBR B+100 */
268 COSTS_N_INSNS (42), /* SQDBR B+42 */
269 COSTS_N_INSNS (28), /* SQEBR B+28 */
270 COSTS_N_INSNS (1), /* MADBR B */
271 COSTS_N_INSNS (1), /* MAEBR B */
272 COSTS_N_INSNS (101), /* DXBR B+101 */
273 COSTS_N_INSNS (29), /* DDBR */
274 COSTS_N_INSNS (22), /* DEBR */
275 COSTS_N_INSNS (160), /* DLGR cracked */
276 COSTS_N_INSNS (160), /* DLR cracked */
277 COSTS_N_INSNS (160), /* DR expanded */
278 COSTS_N_INSNS (160), /* DSGFR cracked */
279 COSTS_N_INSNS (160), /* DSGR cracked */
280 };
281
282 static const
283 struct processor_costs zEC12_cost =
284 {
285 COSTS_N_INSNS (7), /* M */
286 COSTS_N_INSNS (5), /* MGHI */
287 COSTS_N_INSNS (5), /* MH */
288 COSTS_N_INSNS (5), /* MHI */
289 COSTS_N_INSNS (7), /* ML */
290 COSTS_N_INSNS (7), /* MR */
291 COSTS_N_INSNS (6), /* MS */
292 COSTS_N_INSNS (8), /* MSG */
293 COSTS_N_INSNS (6), /* MSGF */
294 COSTS_N_INSNS (6), /* MSGFR */
295 COSTS_N_INSNS (8), /* MSGR */
296 COSTS_N_INSNS (6), /* MSR */
297 COSTS_N_INSNS (1) , /* multiplication in DFmode */
298 COSTS_N_INSNS (40), /* MXBR B+40 */
299 COSTS_N_INSNS (100), /* SQXBR B+100 */
300 COSTS_N_INSNS (42), /* SQDBR B+42 */
301 COSTS_N_INSNS (28), /* SQEBR B+28 */
302 COSTS_N_INSNS (1), /* MADBR B */
303 COSTS_N_INSNS (1), /* MAEBR B */
304 COSTS_N_INSNS (131), /* DXBR B+131 */
305 COSTS_N_INSNS (29), /* DDBR */
306 COSTS_N_INSNS (22), /* DEBR */
307 COSTS_N_INSNS (160), /* DLGR cracked */
308 COSTS_N_INSNS (160), /* DLR cracked */
309 COSTS_N_INSNS (160), /* DR expanded */
310 COSTS_N_INSNS (160), /* DSGFR cracked */
311 COSTS_N_INSNS (160), /* DSGR cracked */
312 };
313
314 extern int reload_completed;
315
316 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
317 static rtx_insn *last_scheduled_insn;
318
319 /* Structure used to hold the components of a S/390 memory
320 address. A legitimate address on S/390 is of the general
321 form
322 base + index + displacement
323 where any of the components is optional.
324
325 base and index are registers of the class ADDR_REGS,
326 displacement is an unsigned 12-bit immediate constant. */
327
328 struct s390_address
329 {
330 rtx base;
331 rtx indx;
332 rtx disp;
333 bool pointer;
334 bool literal_pool;
335 };
336
337 /* The following structure is embedded in the machine
338 specific part of struct function. */
339
340 struct GTY (()) s390_frame_layout
341 {
342 /* Offset within stack frame. */
343 HOST_WIDE_INT gprs_offset;
344 HOST_WIDE_INT f0_offset;
345 HOST_WIDE_INT f4_offset;
346 HOST_WIDE_INT f8_offset;
347 HOST_WIDE_INT backchain_offset;
348
349 /* Number of first and last gpr where slots in the register
350 save area are reserved for. */
351 int first_save_gpr_slot;
352 int last_save_gpr_slot;
353
354 /* Location (FP register number) where GPRs (r0-r15) should
355 be saved to.
356 0 - does not need to be saved at all
357 -1 - stack slot */
358 signed char gpr_save_slots[16];
359
360 /* Number of first and last gpr to be saved, restored. */
361 int first_save_gpr;
362 int first_restore_gpr;
363 int last_save_gpr;
364 int last_restore_gpr;
365
366 /* Bits standing for floating point registers. Set, if the
367 respective register has to be saved. Starting with reg 16 (f0)
368 at the rightmost bit.
369 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
370 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
371 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
372 unsigned int fpr_bitmap;
373
374 /* Number of floating point registers f8-f15 which must be saved. */
375 int high_fprs;
376
377 /* Set if return address needs to be saved.
378 This flag is set by s390_return_addr_rtx if it could not use
379 the initial value of r14 and therefore depends on r14 saved
380 to the stack. */
381 bool save_return_addr_p;
382
383 /* Size of stack frame. */
384 HOST_WIDE_INT frame_size;
385 };
386
387 /* Define the structure for the machine field in struct function. */
388
389 struct GTY(()) machine_function
390 {
391 struct s390_frame_layout frame_layout;
392
393 /* Literal pool base register. */
394 rtx base_reg;
395
396 /* True if we may need to perform branch splitting. */
397 bool split_branches_pending_p;
398
399 bool has_landing_pad_p;
400
401 /* True if the current function may contain a tbegin clobbering
402 FPRs. */
403 bool tbegin_p;
404 };
405
406 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
407
408 #define cfun_frame_layout (cfun->machine->frame_layout)
409 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
410 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
411 ? cfun_frame_layout.fpr_bitmap & 0x0f \
412 : cfun_frame_layout.fpr_bitmap & 0x03))
413 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
414 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
415 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
416 (1 << (REGNO - FPR0_REGNUM)))
417 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
418 (1 << (REGNO - FPR0_REGNUM))))
419 #define cfun_gpr_save_slot(REGNO) \
420 cfun->machine->frame_layout.gpr_save_slots[REGNO]
421
422 /* Number of GPRs and FPRs used for argument passing. */
423 #define GP_ARG_NUM_REG 5
424 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
425 #define VEC_ARG_NUM_REG 8
426
427 /* A couple of shortcuts. */
428 #define CONST_OK_FOR_J(x) \
429 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
430 #define CONST_OK_FOR_K(x) \
431 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
432 #define CONST_OK_FOR_Os(x) \
433 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
434 #define CONST_OK_FOR_Op(x) \
435 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
436 #define CONST_OK_FOR_On(x) \
437 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
438
439 #define REGNO_PAIR_OK(REGNO, MODE) \
440 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
441
442 /* That's the read ahead of the dynamic branch prediction unit in
443 bytes on a z10 (or higher) CPU. */
444 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
445
446
447 /* Indicate which ABI has been used for passing vector args.
448 0 - no vector type arguments have been passed where the ABI is relevant
449 1 - the old ABI has been used
450 2 - a vector type argument has been passed either in a vector register
451 or on the stack by value */
452 static int s390_vector_abi = 0;
453
454 /* Set the vector ABI marker if TYPE is subject to the vector ABI
455 switch. The vector ABI affects only vector data types. There are
456 two aspects of the vector ABI relevant here:
457
458 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
459 ABI and natural alignment with the old.
460
461 2. vector <= 16 bytes are passed in VRs or by value on the stack
462 with the new ABI but by reference on the stack with the old.
463
464 If ARG_P is true TYPE is used for a function argument or return
465 value. The ABI marker then is set for all vector data types. If
466 ARG_P is false only type 1 vectors are being checked. */
467
468 static void
469 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
470 {
471 static hash_set<const_tree> visited_types_hash;
472
473 if (s390_vector_abi)
474 return;
475
476 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
477 return;
478
479 if (visited_types_hash.contains (type))
480 return;
481
482 visited_types_hash.add (type);
483
484 if (VECTOR_TYPE_P (type))
485 {
486 int type_size = int_size_in_bytes (type);
487
488 /* Outside arguments only the alignment is changing and this
489 only happens for vector types >= 16 bytes. */
490 if (!arg_p && type_size < 16)
491 return;
492
493 /* In arguments vector types > 16 are passed as before (GCC
494 never enforced the bigger alignment for arguments which was
495 required by the old vector ABI). However, it might still be
496 ABI relevant due to the changed alignment if it is a struct
497 member. */
498 if (arg_p && type_size > 16 && !in_struct_p)
499 return;
500
501 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
502 }
503 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
504 {
505 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
506 natural alignment there will never be ABI dependent padding
507 in an array type. That's why we do not set in_struct_p to
508 true here. */
509 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
510 }
511 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
512 {
513 tree arg_chain;
514
515 /* Check the return type. */
516 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
517
518 for (arg_chain = TYPE_ARG_TYPES (type);
519 arg_chain;
520 arg_chain = TREE_CHAIN (arg_chain))
521 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
522 }
523 else if (RECORD_OR_UNION_TYPE_P (type))
524 {
525 tree field;
526
527 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
528 {
529 if (TREE_CODE (field) != FIELD_DECL)
530 continue;
531
532 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
533 }
534 }
535 }
536
537
538 /* System z builtins. */
539
540 #include "s390-builtins.h"
541
542 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
543 {
544 #undef B_DEF
545 #undef OB_DEF
546 #undef OB_DEF_VAR
547 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
548 #define OB_DEF(...)
549 #define OB_DEF_VAR(...)
550 #include "s390-builtins.def"
551 0
552 };
553
554 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
555 {
556 #undef B_DEF
557 #undef OB_DEF
558 #undef OB_DEF_VAR
559 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
560 #define OB_DEF(...)
561 #define OB_DEF_VAR(...)
562 #include "s390-builtins.def"
563 0
564 };
565
566 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
567 {
568 #undef B_DEF
569 #undef OB_DEF
570 #undef OB_DEF_VAR
571 #define B_DEF(...)
572 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
573 #define OB_DEF_VAR(...)
574 #include "s390-builtins.def"
575 0
576 };
577
578 const unsigned int
579 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
580 {
581 #undef B_DEF
582 #undef OB_DEF
583 #undef OB_DEF_VAR
584 #define B_DEF(...)
585 #define OB_DEF(...)
586 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
587 #include "s390-builtins.def"
588 0
589 };
590
591 tree s390_builtin_types[BT_MAX];
592 tree s390_builtin_fn_types[BT_FN_MAX];
593 tree s390_builtin_decls[S390_BUILTIN_MAX +
594 S390_OVERLOADED_BUILTIN_MAX +
595 S390_OVERLOADED_BUILTIN_VAR_MAX];
596
597 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
598 #undef B_DEF
599 #undef OB_DEF
600 #undef OB_DEF_VAR
601 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
602 #define OB_DEF(...)
603 #define OB_DEF_VAR(...)
604
605 #include "s390-builtins.def"
606 CODE_FOR_nothing
607 };
608
609 static void
610 s390_init_builtins (void)
611 {
612 /* These definitions are being used in s390-builtins.def. */
613 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
614 NULL, NULL);
615 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
616 tree c_uint64_type_node;
617 unsigned int bflags_mask = (BFLAGS_MASK_INIT);
618
619 bflags_mask |= (TARGET_VX) ? B_VX : 0;
620 bflags_mask |= (TARGET_HTM) ? B_HTM : 0;
621
622 /* The uint64_type_node from tree.c is not compatible to the C99
623 uint64_t data type. What we want is c_uint64_type_node from
624 c-common.c. But since backend code is not supposed to interface
625 with the frontend we recreate it here. */
626 if (TARGET_64BIT)
627 c_uint64_type_node = long_unsigned_type_node;
628 else
629 c_uint64_type_node = long_long_unsigned_type_node;
630
631 #undef DEF_TYPE
632 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
633 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
634 s390_builtin_types[INDEX] = (!CONST_P) ? \
635 (NODE) : build_type_variant ((NODE), 1, 0);
636
637 #undef DEF_POINTER_TYPE
638 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
639 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
640 s390_builtin_types[INDEX] = \
641 build_pointer_type (s390_builtin_types[INDEX_BASE]);
642
643 #undef DEF_DISTINCT_TYPE
644 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
645 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
646 s390_builtin_types[INDEX] = \
647 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
648
649 #undef DEF_VECTOR_TYPE
650 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
651 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
652 s390_builtin_types[INDEX] = \
653 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
654
655 #undef DEF_OPAQUE_VECTOR_TYPE
656 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
657 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
658 s390_builtin_types[INDEX] = \
659 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
660
661 #undef DEF_FN_TYPE
662 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
663 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
664 s390_builtin_fn_types[INDEX] = \
665 build_function_type_list (args, NULL_TREE);
666 #undef DEF_OV_TYPE
667 #define DEF_OV_TYPE(...)
668 #include "s390-builtin-types.def"
669
670 #undef B_DEF
671 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
672 if (((BFLAGS) & ~bflags_mask) == 0) \
673 s390_builtin_decls[S390_BUILTIN_##NAME] = \
674 add_builtin_function ("__builtin_" #NAME, \
675 s390_builtin_fn_types[FNTYPE], \
676 S390_BUILTIN_##NAME, \
677 BUILT_IN_MD, \
678 NULL, \
679 ATTRS);
680 #undef OB_DEF
681 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
682 if (((BFLAGS) & ~bflags_mask) == 0) \
683 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
684 add_builtin_function ("__builtin_" #NAME, \
685 s390_builtin_fn_types[FNTYPE], \
686 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
687 BUILT_IN_MD, \
688 NULL, \
689 0);
690 #undef OB_DEF_VAR
691 #define OB_DEF_VAR(...)
692 #include "s390-builtins.def"
693
694 }
695
696 /* Return true if ARG is appropriate as argument number ARGNUM of
697 builtin DECL. The operand flags from s390-builtins.def have to
698 passed as OP_FLAGS. */
699 bool
700 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
701 {
702 if (O_UIMM_P (op_flags))
703 {
704 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
705 int bitwidth = bitwidths[op_flags - O_U1];
706
707 if (!tree_fits_uhwi_p (arg)
708 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
709 {
710 error("constant argument %d for builtin %qF is out of range (0.."
711 HOST_WIDE_INT_PRINT_UNSIGNED ")",
712 argnum, decl,
713 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
714 return false;
715 }
716 }
717
718 if (O_SIMM_P (op_flags))
719 {
720 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
721 int bitwidth = bitwidths[op_flags - O_S2];
722
723 if (!tree_fits_shwi_p (arg)
724 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
725 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
726 {
727 error("constant argument %d for builtin %qF is out of range ("
728 HOST_WIDE_INT_PRINT_DEC ".."
729 HOST_WIDE_INT_PRINT_DEC ")",
730 argnum, decl,
731 -(HOST_WIDE_INT)1 << (bitwidth - 1),
732 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
733 return false;
734 }
735 }
736 return true;
737 }
738
739 /* Expand an expression EXP that calls a built-in function,
740 with result going to TARGET if that's convenient
741 (and in mode MODE if that's convenient).
742 SUBTARGET may be used as the target for computing one of EXP's operands.
743 IGNORE is nonzero if the value is to be ignored. */
744
745 static rtx
746 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
747 machine_mode mode ATTRIBUTE_UNUSED,
748 int ignore ATTRIBUTE_UNUSED)
749 {
750 #define MAX_ARGS 5
751
752 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
753 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
754 enum insn_code icode;
755 rtx op[MAX_ARGS], pat;
756 int arity;
757 bool nonvoid;
758 tree arg;
759 call_expr_arg_iterator iter;
760 unsigned int all_op_flags = opflags_for_builtin (fcode);
761 machine_mode last_vec_mode = VOIDmode;
762
763 if (TARGET_DEBUG_ARG)
764 {
765 fprintf (stderr,
766 "s390_expand_builtin, code = %4d, %s\n",
767 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
768 }
769
770 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
771 && fcode < S390_ALL_BUILTIN_MAX)
772 {
773 gcc_unreachable ();
774 }
775 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
776 {
777 icode = code_for_builtin[fcode];
778 /* Set a flag in the machine specific cfun part in order to support
779 saving/restoring of FPRs. */
780 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
781 cfun->machine->tbegin_p = true;
782 }
783 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
784 {
785 error ("Unresolved overloaded builtin");
786 return const0_rtx;
787 }
788 else
789 internal_error ("bad builtin fcode");
790
791 if (icode == 0)
792 internal_error ("bad builtin icode");
793
794 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
795
796 if (nonvoid)
797 {
798 machine_mode tmode = insn_data[icode].operand[0].mode;
799 if (!target
800 || GET_MODE (target) != tmode
801 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
802 target = gen_reg_rtx (tmode);
803
804 /* There are builtins (e.g. vec_promote) with no vector
805 arguments but an element selector. So we have to also look
806 at the vector return type when emitting the modulo
807 operation. */
808 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
809 last_vec_mode = insn_data[icode].operand[0].mode;
810 }
811
812 arity = 0;
813 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
814 {
815 const struct insn_operand_data *insn_op;
816 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
817
818 all_op_flags = all_op_flags >> O_SHIFT;
819
820 if (arg == error_mark_node)
821 return NULL_RTX;
822 if (arity >= MAX_ARGS)
823 return NULL_RTX;
824
825 if (O_IMM_P (op_flags)
826 && TREE_CODE (arg) != INTEGER_CST)
827 {
828 error ("constant value required for builtin %qF argument %d",
829 fndecl, arity + 1);
830 return const0_rtx;
831 }
832
833 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
834 return const0_rtx;
835
836 insn_op = &insn_data[icode].operand[arity + nonvoid];
837 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
838
839 /* Wrap the expanded RTX for pointer types into a MEM expr with
840 the proper mode. This allows us to use e.g. (match_operand
841 "memory_operand"..) in the insn patterns instead of (mem
842 (match_operand "address_operand)). This is helpful for
843 patterns not just accepting MEMs. */
844 if (POINTER_TYPE_P (TREE_TYPE (arg))
845 && insn_op->predicate != address_operand)
846 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
847
848 /* Expand the module operation required on element selectors. */
849 if (op_flags == O_ELEM)
850 {
851 gcc_assert (last_vec_mode != VOIDmode);
852 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
853 op[arity],
854 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
855 NULL_RTX, 1, OPTAB_DIRECT);
856 }
857
858 /* Record the vector mode used for an element selector. This assumes:
859 1. There is no builtin with two different vector modes and an element selector
860 2. The element selector comes after the vector type it is referring to.
861 This currently the true for all the builtins but FIXME we
862 should better check for that. */
863 if (VECTOR_MODE_P (insn_op->mode))
864 last_vec_mode = insn_op->mode;
865
866 if (insn_op->predicate (op[arity], insn_op->mode))
867 {
868 arity++;
869 continue;
870 }
871
872 if (MEM_P (op[arity])
873 && insn_op->predicate == memory_operand
874 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
875 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
876 {
877 op[arity] = replace_equiv_address (op[arity],
878 copy_to_mode_reg (Pmode,
879 XEXP (op[arity], 0)));
880 }
881 else if (GET_MODE (op[arity]) == insn_op->mode
882 || GET_MODE (op[arity]) == VOIDmode
883 || (insn_op->predicate == address_operand
884 && GET_MODE (op[arity]) == Pmode))
885 {
886 /* An address_operand usually has VOIDmode in the expander
887 so we cannot use this. */
888 machine_mode target_mode =
889 (insn_op->predicate == address_operand
890 ? Pmode : insn_op->mode);
891 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
892 }
893
894 if (!insn_op->predicate (op[arity], insn_op->mode))
895 {
896 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
897 return const0_rtx;
898 }
899 arity++;
900 }
901
902 if (last_vec_mode != VOIDmode && !TARGET_VX)
903 {
904 error ("Vector type builtin %qF is not supported without -mvx "
905 "(default with -march=z13).",
906 fndecl);
907 return const0_rtx;
908 }
909
910 switch (arity)
911 {
912 case 0:
913 pat = GEN_FCN (icode) (target);
914 break;
915 case 1:
916 if (nonvoid)
917 pat = GEN_FCN (icode) (target, op[0]);
918 else
919 pat = GEN_FCN (icode) (op[0]);
920 break;
921 case 2:
922 if (nonvoid)
923 pat = GEN_FCN (icode) (target, op[0], op[1]);
924 else
925 pat = GEN_FCN (icode) (op[0], op[1]);
926 break;
927 case 3:
928 if (nonvoid)
929 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
930 else
931 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
932 break;
933 case 4:
934 if (nonvoid)
935 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
936 else
937 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
938 break;
939 case 5:
940 if (nonvoid)
941 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
942 else
943 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
944 break;
945 case 6:
946 if (nonvoid)
947 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
948 else
949 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
950 break;
951 default:
952 gcc_unreachable ();
953 }
954 if (!pat)
955 return NULL_RTX;
956 emit_insn (pat);
957
958 if (nonvoid)
959 return target;
960 else
961 return const0_rtx;
962 }
963
964
965 static const int s390_hotpatch_hw_max = 1000000;
966 static int s390_hotpatch_hw_before_label = 0;
967 static int s390_hotpatch_hw_after_label = 0;
968
969 /* Check whether the hotpatch attribute is applied to a function and, if it has
970 an argument, the argument is valid. */
971
972 static tree
973 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
974 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
975 {
976 tree expr;
977 tree expr2;
978 int err;
979
980 if (TREE_CODE (*node) != FUNCTION_DECL)
981 {
982 warning (OPT_Wattributes, "%qE attribute only applies to functions",
983 name);
984 *no_add_attrs = true;
985 }
986 if (args != NULL && TREE_CHAIN (args) != NULL)
987 {
988 expr = TREE_VALUE (args);
989 expr2 = TREE_VALUE (TREE_CHAIN (args));
990 }
991 if (args == NULL || TREE_CHAIN (args) == NULL)
992 err = 1;
993 else if (TREE_CODE (expr) != INTEGER_CST
994 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
995 || wi::gtu_p (expr, s390_hotpatch_hw_max))
996 err = 1;
997 else if (TREE_CODE (expr2) != INTEGER_CST
998 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
999 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1000 err = 1;
1001 else
1002 err = 0;
1003 if (err)
1004 {
1005 error ("requested %qE attribute is not a comma separated pair of"
1006 " non-negative integer constants or too large (max. %d)", name,
1007 s390_hotpatch_hw_max);
1008 *no_add_attrs = true;
1009 }
1010
1011 return NULL_TREE;
1012 }
1013
1014 /* Expand the s390_vector_bool type attribute. */
1015
1016 static tree
1017 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1018 tree args ATTRIBUTE_UNUSED,
1019 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1020 {
1021 tree type = *node, result = NULL_TREE;
1022 machine_mode mode;
1023
1024 while (POINTER_TYPE_P (type)
1025 || TREE_CODE (type) == FUNCTION_TYPE
1026 || TREE_CODE (type) == METHOD_TYPE
1027 || TREE_CODE (type) == ARRAY_TYPE)
1028 type = TREE_TYPE (type);
1029
1030 mode = TYPE_MODE (type);
1031 switch (mode)
1032 {
1033 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1034 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1035 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1036 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1037 default: break;
1038 }
1039
1040 *no_add_attrs = true; /* No need to hang on to the attribute. */
1041
1042 if (result)
1043 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1044
1045 return NULL_TREE;
1046 }
1047
1048 static const struct attribute_spec s390_attribute_table[] = {
1049 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1050 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1051 /* End element. */
1052 { NULL, 0, 0, false, false, false, NULL, false }
1053 };
1054
1055 /* Return the alignment for LABEL. We default to the -falign-labels
1056 value except for the literal pool base label. */
1057 int
1058 s390_label_align (rtx label)
1059 {
1060 rtx_insn *prev_insn = prev_active_insn (label);
1061 rtx set, src;
1062
1063 if (prev_insn == NULL_RTX)
1064 goto old;
1065
1066 set = single_set (prev_insn);
1067
1068 if (set == NULL_RTX)
1069 goto old;
1070
1071 src = SET_SRC (set);
1072
1073 /* Don't align literal pool base labels. */
1074 if (GET_CODE (src) == UNSPEC
1075 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1076 return 0;
1077
1078 old:
1079 return align_labels_log;
1080 }
1081
1082 static machine_mode
1083 s390_libgcc_cmp_return_mode (void)
1084 {
1085 return TARGET_64BIT ? DImode : SImode;
1086 }
1087
1088 static machine_mode
1089 s390_libgcc_shift_count_mode (void)
1090 {
1091 return TARGET_64BIT ? DImode : SImode;
1092 }
1093
1094 static machine_mode
1095 s390_unwind_word_mode (void)
1096 {
1097 return TARGET_64BIT ? DImode : SImode;
1098 }
1099
1100 /* Return true if the back end supports mode MODE. */
1101 static bool
1102 s390_scalar_mode_supported_p (machine_mode mode)
1103 {
1104 /* In contrast to the default implementation reject TImode constants on 31bit
1105 TARGET_ZARCH for ABI compliance. */
1106 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1107 return false;
1108
1109 if (DECIMAL_FLOAT_MODE_P (mode))
1110 return default_decimal_float_supported_p ();
1111
1112 return default_scalar_mode_supported_p (mode);
1113 }
1114
1115 /* Return true if the back end supports vector mode MODE. */
1116 static bool
1117 s390_vector_mode_supported_p (machine_mode mode)
1118 {
1119 machine_mode inner;
1120
1121 if (!VECTOR_MODE_P (mode)
1122 || !TARGET_VX
1123 || GET_MODE_SIZE (mode) > 16)
1124 return false;
1125
1126 inner = GET_MODE_INNER (mode);
1127
1128 switch (inner)
1129 {
1130 case QImode:
1131 case HImode:
1132 case SImode:
1133 case DImode:
1134 case TImode:
1135 case SFmode:
1136 case DFmode:
1137 case TFmode:
1138 return true;
1139 default:
1140 return false;
1141 }
1142 }
1143
1144 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1145
1146 void
1147 s390_set_has_landing_pad_p (bool value)
1148 {
1149 cfun->machine->has_landing_pad_p = value;
1150 }
1151
1152 /* If two condition code modes are compatible, return a condition code
1153 mode which is compatible with both. Otherwise, return
1154 VOIDmode. */
1155
1156 static machine_mode
1157 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1158 {
1159 if (m1 == m2)
1160 return m1;
1161
1162 switch (m1)
1163 {
1164 case CCZmode:
1165 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1166 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1167 return m2;
1168 return VOIDmode;
1169
1170 case CCSmode:
1171 case CCUmode:
1172 case CCTmode:
1173 case CCSRmode:
1174 case CCURmode:
1175 case CCZ1mode:
1176 if (m2 == CCZmode)
1177 return m1;
1178
1179 return VOIDmode;
1180
1181 default:
1182 return VOIDmode;
1183 }
1184 return VOIDmode;
1185 }
1186
1187 /* Return true if SET either doesn't set the CC register, or else
1188 the source and destination have matching CC modes and that
1189 CC mode is at least as constrained as REQ_MODE. */
1190
1191 static bool
1192 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1193 {
1194 machine_mode set_mode;
1195
1196 gcc_assert (GET_CODE (set) == SET);
1197
1198 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1199 return 1;
1200
1201 set_mode = GET_MODE (SET_DEST (set));
1202 switch (set_mode)
1203 {
1204 case CCSmode:
1205 case CCSRmode:
1206 case CCUmode:
1207 case CCURmode:
1208 case CCLmode:
1209 case CCL1mode:
1210 case CCL2mode:
1211 case CCL3mode:
1212 case CCT1mode:
1213 case CCT2mode:
1214 case CCT3mode:
1215 case CCVEQmode:
1216 case CCVHmode:
1217 case CCVHUmode:
1218 case CCVFHmode:
1219 case CCVFHEmode:
1220 if (req_mode != set_mode)
1221 return 0;
1222 break;
1223
1224 case CCZmode:
1225 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1226 && req_mode != CCSRmode && req_mode != CCURmode)
1227 return 0;
1228 break;
1229
1230 case CCAPmode:
1231 case CCANmode:
1232 if (req_mode != CCAmode)
1233 return 0;
1234 break;
1235
1236 default:
1237 gcc_unreachable ();
1238 }
1239
1240 return (GET_MODE (SET_SRC (set)) == set_mode);
1241 }
1242
1243 /* Return true if every SET in INSN that sets the CC register
1244 has source and destination with matching CC modes and that
1245 CC mode is at least as constrained as REQ_MODE.
1246 If REQ_MODE is VOIDmode, always return false. */
1247
1248 bool
1249 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1250 {
1251 int i;
1252
1253 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1254 if (req_mode == VOIDmode)
1255 return false;
1256
1257 if (GET_CODE (PATTERN (insn)) == SET)
1258 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1259
1260 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1261 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1262 {
1263 rtx set = XVECEXP (PATTERN (insn), 0, i);
1264 if (GET_CODE (set) == SET)
1265 if (!s390_match_ccmode_set (set, req_mode))
1266 return false;
1267 }
1268
1269 return true;
1270 }
1271
1272 /* If a test-under-mask instruction can be used to implement
1273 (compare (and ... OP1) OP2), return the CC mode required
1274 to do that. Otherwise, return VOIDmode.
1275 MIXED is true if the instruction can distinguish between
1276 CC1 and CC2 for mixed selected bits (TMxx), it is false
1277 if the instruction cannot (TM). */
1278
1279 machine_mode
1280 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1281 {
1282 int bit0, bit1;
1283
1284 /* ??? Fixme: should work on CONST_DOUBLE as well. */
1285 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1286 return VOIDmode;
1287
1288 /* Selected bits all zero: CC0.
1289 e.g.: int a; if ((a & (16 + 128)) == 0) */
1290 if (INTVAL (op2) == 0)
1291 return CCTmode;
1292
1293 /* Selected bits all one: CC3.
1294 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1295 if (INTVAL (op2) == INTVAL (op1))
1296 return CCT3mode;
1297
1298 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1299 int a;
1300 if ((a & (16 + 128)) == 16) -> CCT1
1301 if ((a & (16 + 128)) == 128) -> CCT2 */
1302 if (mixed)
1303 {
1304 bit1 = exact_log2 (INTVAL (op2));
1305 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1306 if (bit0 != -1 && bit1 != -1)
1307 return bit0 > bit1 ? CCT1mode : CCT2mode;
1308 }
1309
1310 return VOIDmode;
1311 }
1312
1313 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1314 OP0 and OP1 of a COMPARE, return the mode to be used for the
1315 comparison. */
1316
1317 machine_mode
1318 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1319 {
1320 if (TARGET_VX
1321 && register_operand (op0, DFmode)
1322 && register_operand (op1, DFmode))
1323 {
1324 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1325 s390_emit_compare or s390_canonicalize_comparison will take
1326 care of it. */
1327 switch (code)
1328 {
1329 case EQ:
1330 case NE:
1331 return CCVEQmode;
1332 case GT:
1333 case UNLE:
1334 return CCVFHmode;
1335 case GE:
1336 case UNLT:
1337 return CCVFHEmode;
1338 default:
1339 ;
1340 }
1341 }
1342
1343 switch (code)
1344 {
1345 case EQ:
1346 case NE:
1347 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1348 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1349 return CCAPmode;
1350 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1351 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1352 return CCAPmode;
1353 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1354 || GET_CODE (op1) == NEG)
1355 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1356 return CCLmode;
1357
1358 if (GET_CODE (op0) == AND)
1359 {
1360 /* Check whether we can potentially do it via TM. */
1361 machine_mode ccmode;
1362 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1363 if (ccmode != VOIDmode)
1364 {
1365 /* Relax CCTmode to CCZmode to allow fall-back to AND
1366 if that turns out to be beneficial. */
1367 return ccmode == CCTmode ? CCZmode : ccmode;
1368 }
1369 }
1370
1371 if (register_operand (op0, HImode)
1372 && GET_CODE (op1) == CONST_INT
1373 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1374 return CCT3mode;
1375 if (register_operand (op0, QImode)
1376 && GET_CODE (op1) == CONST_INT
1377 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1378 return CCT3mode;
1379
1380 return CCZmode;
1381
1382 case LE:
1383 case LT:
1384 case GE:
1385 case GT:
1386 /* The only overflow condition of NEG and ABS happens when
1387 -INT_MAX is used as parameter, which stays negative. So
1388 we have an overflow from a positive value to a negative.
1389 Using CCAP mode the resulting cc can be used for comparisons. */
1390 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1391 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1392 return CCAPmode;
1393
1394 /* If constants are involved in an add instruction it is possible to use
1395 the resulting cc for comparisons with zero. Knowing the sign of the
1396 constant the overflow behavior gets predictable. e.g.:
1397 int a, b; if ((b = a + c) > 0)
1398 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1399 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1400 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1401 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1402 /* Avoid INT32_MIN on 32 bit. */
1403 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1404 {
1405 if (INTVAL (XEXP((op0), 1)) < 0)
1406 return CCANmode;
1407 else
1408 return CCAPmode;
1409 }
1410 /* Fall through. */
1411 case UNORDERED:
1412 case ORDERED:
1413 case UNEQ:
1414 case UNLE:
1415 case UNLT:
1416 case UNGE:
1417 case UNGT:
1418 case LTGT:
1419 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1420 && GET_CODE (op1) != CONST_INT)
1421 return CCSRmode;
1422 return CCSmode;
1423
1424 case LTU:
1425 case GEU:
1426 if (GET_CODE (op0) == PLUS
1427 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1428 return CCL1mode;
1429
1430 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1431 && GET_CODE (op1) != CONST_INT)
1432 return CCURmode;
1433 return CCUmode;
1434
1435 case LEU:
1436 case GTU:
1437 if (GET_CODE (op0) == MINUS
1438 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1439 return CCL2mode;
1440
1441 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1442 && GET_CODE (op1) != CONST_INT)
1443 return CCURmode;
1444 return CCUmode;
1445
1446 default:
1447 gcc_unreachable ();
1448 }
1449 }
1450
1451 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1452 that we can implement more efficiently. */
1453
1454 static void
1455 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1456 bool op0_preserve_value)
1457 {
1458 if (op0_preserve_value)
1459 return;
1460
1461 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1462 if ((*code == EQ || *code == NE)
1463 && *op1 == const0_rtx
1464 && GET_CODE (*op0) == ZERO_EXTRACT
1465 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1466 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1467 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1468 {
1469 rtx inner = XEXP (*op0, 0);
1470 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1471 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1472 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1473
1474 if (len > 0 && len < modesize
1475 && pos >= 0 && pos + len <= modesize
1476 && modesize <= HOST_BITS_PER_WIDE_INT)
1477 {
1478 unsigned HOST_WIDE_INT block;
1479 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1480 block <<= modesize - pos - len;
1481
1482 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1483 gen_int_mode (block, GET_MODE (inner)));
1484 }
1485 }
1486
1487 /* Narrow AND of memory against immediate to enable TM. */
1488 if ((*code == EQ || *code == NE)
1489 && *op1 == const0_rtx
1490 && GET_CODE (*op0) == AND
1491 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1492 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1493 {
1494 rtx inner = XEXP (*op0, 0);
1495 rtx mask = XEXP (*op0, 1);
1496
1497 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1498 if (GET_CODE (inner) == SUBREG
1499 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1500 && (GET_MODE_SIZE (GET_MODE (inner))
1501 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1502 && ((INTVAL (mask)
1503 & GET_MODE_MASK (GET_MODE (inner))
1504 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1505 == 0))
1506 inner = SUBREG_REG (inner);
1507
1508 /* Do not change volatile MEMs. */
1509 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1510 {
1511 int part = s390_single_part (XEXP (*op0, 1),
1512 GET_MODE (inner), QImode, 0);
1513 if (part >= 0)
1514 {
1515 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1516 inner = adjust_address_nv (inner, QImode, part);
1517 *op0 = gen_rtx_AND (QImode, inner, mask);
1518 }
1519 }
1520 }
1521
1522 /* Narrow comparisons against 0xffff to HImode if possible. */
1523 if ((*code == EQ || *code == NE)
1524 && GET_CODE (*op1) == CONST_INT
1525 && INTVAL (*op1) == 0xffff
1526 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1527 && (nonzero_bits (*op0, GET_MODE (*op0))
1528 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1529 {
1530 *op0 = gen_lowpart (HImode, *op0);
1531 *op1 = constm1_rtx;
1532 }
1533
1534 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1535 if (GET_CODE (*op0) == UNSPEC
1536 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1537 && XVECLEN (*op0, 0) == 1
1538 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1539 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1540 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1541 && *op1 == const0_rtx)
1542 {
1543 enum rtx_code new_code = UNKNOWN;
1544 switch (*code)
1545 {
1546 case EQ: new_code = EQ; break;
1547 case NE: new_code = NE; break;
1548 case LT: new_code = GTU; break;
1549 case GT: new_code = LTU; break;
1550 case LE: new_code = GEU; break;
1551 case GE: new_code = LEU; break;
1552 default: break;
1553 }
1554
1555 if (new_code != UNKNOWN)
1556 {
1557 *op0 = XVECEXP (*op0, 0, 0);
1558 *code = new_code;
1559 }
1560 }
1561
1562 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1563 if (GET_CODE (*op0) == UNSPEC
1564 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1565 && XVECLEN (*op0, 0) == 1
1566 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1567 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1568 && CONST_INT_P (*op1))
1569 {
1570 enum rtx_code new_code = UNKNOWN;
1571 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1572 {
1573 case CCZmode:
1574 case CCRAWmode:
1575 switch (*code)
1576 {
1577 case EQ: new_code = EQ; break;
1578 case NE: new_code = NE; break;
1579 default: break;
1580 }
1581 break;
1582 default: break;
1583 }
1584
1585 if (new_code != UNKNOWN)
1586 {
1587 /* For CCRAWmode put the required cc mask into the second
1588 operand. */
1589 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1590 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1591 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1592 *op0 = XVECEXP (*op0, 0, 0);
1593 *code = new_code;
1594 }
1595 }
1596
1597 /* Simplify cascaded EQ, NE with const0_rtx. */
1598 if ((*code == NE || *code == EQ)
1599 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1600 && GET_MODE (*op0) == SImode
1601 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1602 && REG_P (XEXP (*op0, 0))
1603 && XEXP (*op0, 1) == const0_rtx
1604 && *op1 == const0_rtx)
1605 {
1606 if ((*code == EQ && GET_CODE (*op0) == NE)
1607 || (*code == NE && GET_CODE (*op0) == EQ))
1608 *code = EQ;
1609 else
1610 *code = NE;
1611 *op0 = XEXP (*op0, 0);
1612 }
1613
1614 /* Prefer register over memory as first operand. */
1615 if (MEM_P (*op0) && REG_P (*op1))
1616 {
1617 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1618 *code = (int)swap_condition ((enum rtx_code)*code);
1619 }
1620
1621 /* Using the scalar variants of vector instructions for 64 bit FP
1622 comparisons might require swapping the operands. */
1623 if (TARGET_VX
1624 && register_operand (*op0, DFmode)
1625 && register_operand (*op1, DFmode)
1626 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1627 {
1628 rtx tmp;
1629
1630 switch (*code)
1631 {
1632 case LT: *code = GT; break;
1633 case LE: *code = GE; break;
1634 case UNGT: *code = UNLE; break;
1635 case UNGE: *code = UNLT; break;
1636 default: ;
1637 }
1638 tmp = *op0; *op0 = *op1; *op1 = tmp;
1639 }
1640 }
1641
1642 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1643 FP compare using the single element variant of vector instructions.
1644 Replace CODE with the comparison code to be used in the CC reg
1645 compare and return the condition code register RTX in CC. */
1646
1647 static bool
1648 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1649 rtx *cc)
1650 {
1651 machine_mode cmp_mode;
1652 bool swap_p = false;
1653
1654 switch (*code)
1655 {
1656 case EQ: cmp_mode = CCVEQmode; break;
1657 case NE: cmp_mode = CCVEQmode; break;
1658 case GT: cmp_mode = CCVFHmode; break;
1659 case GE: cmp_mode = CCVFHEmode; break;
1660 case UNLE: cmp_mode = CCVFHmode; break;
1661 case UNLT: cmp_mode = CCVFHEmode; break;
1662 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1663 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1664 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1665 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1666 default: return false;
1667 }
1668
1669 if (swap_p)
1670 {
1671 rtx tmp = cmp2;
1672 cmp2 = cmp1;
1673 cmp1 = tmp;
1674 }
1675 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1676 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1677 gen_rtvec (2,
1678 gen_rtx_SET (*cc,
1679 gen_rtx_COMPARE (cmp_mode, cmp1,
1680 cmp2)),
1681 gen_rtx_CLOBBER (VOIDmode,
1682 gen_rtx_SCRATCH (V2DImode)))));
1683 return true;
1684 }
1685
1686
1687 /* Emit a compare instruction suitable to implement the comparison
1688 OP0 CODE OP1. Return the correct condition RTL to be placed in
1689 the IF_THEN_ELSE of the conditional branch testing the result. */
1690
1691 rtx
1692 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1693 {
1694 machine_mode mode = s390_select_ccmode (code, op0, op1);
1695 rtx cc;
1696
1697 if (TARGET_VX
1698 && register_operand (op0, DFmode)
1699 && register_operand (op1, DFmode)
1700 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1701 {
1702 /* Work has been done by s390_expand_vec_compare_scalar already. */
1703 }
1704 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1705 {
1706 /* Do not output a redundant compare instruction if a
1707 compare_and_swap pattern already computed the result and the
1708 machine modes are compatible. */
1709 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1710 == GET_MODE (op0));
1711 cc = op0;
1712 }
1713 else
1714 {
1715 cc = gen_rtx_REG (mode, CC_REGNUM);
1716 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1717 }
1718
1719 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1720 }
1721
1722 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1723 matches CMP.
1724 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1725 conditional branch testing the result. */
1726
1727 static rtx
1728 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1729 rtx cmp, rtx new_rtx)
1730 {
1731 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1732 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1733 const0_rtx);
1734 }
1735
1736 /* Emit a jump instruction to TARGET and return it. If COND is
1737 NULL_RTX, emit an unconditional jump, else a conditional jump under
1738 condition COND. */
1739
1740 rtx_insn *
1741 s390_emit_jump (rtx target, rtx cond)
1742 {
1743 rtx insn;
1744
1745 target = gen_rtx_LABEL_REF (VOIDmode, target);
1746 if (cond)
1747 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1748
1749 insn = gen_rtx_SET (pc_rtx, target);
1750 return emit_jump_insn (insn);
1751 }
1752
1753 /* Return branch condition mask to implement a branch
1754 specified by CODE. Return -1 for invalid comparisons. */
1755
1756 int
1757 s390_branch_condition_mask (rtx code)
1758 {
1759 const int CC0 = 1 << 3;
1760 const int CC1 = 1 << 2;
1761 const int CC2 = 1 << 1;
1762 const int CC3 = 1 << 0;
1763
1764 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1765 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1766 gcc_assert (XEXP (code, 1) == const0_rtx
1767 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1768 && CONST_INT_P (XEXP (code, 1))));
1769
1770
1771 switch (GET_MODE (XEXP (code, 0)))
1772 {
1773 case CCZmode:
1774 case CCZ1mode:
1775 switch (GET_CODE (code))
1776 {
1777 case EQ: return CC0;
1778 case NE: return CC1 | CC2 | CC3;
1779 default: return -1;
1780 }
1781 break;
1782
1783 case CCT1mode:
1784 switch (GET_CODE (code))
1785 {
1786 case EQ: return CC1;
1787 case NE: return CC0 | CC2 | CC3;
1788 default: return -1;
1789 }
1790 break;
1791
1792 case CCT2mode:
1793 switch (GET_CODE (code))
1794 {
1795 case EQ: return CC2;
1796 case NE: return CC0 | CC1 | CC3;
1797 default: return -1;
1798 }
1799 break;
1800
1801 case CCT3mode:
1802 switch (GET_CODE (code))
1803 {
1804 case EQ: return CC3;
1805 case NE: return CC0 | CC1 | CC2;
1806 default: return -1;
1807 }
1808 break;
1809
1810 case CCLmode:
1811 switch (GET_CODE (code))
1812 {
1813 case EQ: return CC0 | CC2;
1814 case NE: return CC1 | CC3;
1815 default: return -1;
1816 }
1817 break;
1818
1819 case CCL1mode:
1820 switch (GET_CODE (code))
1821 {
1822 case LTU: return CC2 | CC3; /* carry */
1823 case GEU: return CC0 | CC1; /* no carry */
1824 default: return -1;
1825 }
1826 break;
1827
1828 case CCL2mode:
1829 switch (GET_CODE (code))
1830 {
1831 case GTU: return CC0 | CC1; /* borrow */
1832 case LEU: return CC2 | CC3; /* no borrow */
1833 default: return -1;
1834 }
1835 break;
1836
1837 case CCL3mode:
1838 switch (GET_CODE (code))
1839 {
1840 case EQ: return CC0 | CC2;
1841 case NE: return CC1 | CC3;
1842 case LTU: return CC1;
1843 case GTU: return CC3;
1844 case LEU: return CC1 | CC2;
1845 case GEU: return CC2 | CC3;
1846 default: return -1;
1847 }
1848
1849 case CCUmode:
1850 switch (GET_CODE (code))
1851 {
1852 case EQ: return CC0;
1853 case NE: return CC1 | CC2 | CC3;
1854 case LTU: return CC1;
1855 case GTU: return CC2;
1856 case LEU: return CC0 | CC1;
1857 case GEU: return CC0 | CC2;
1858 default: return -1;
1859 }
1860 break;
1861
1862 case CCURmode:
1863 switch (GET_CODE (code))
1864 {
1865 case EQ: return CC0;
1866 case NE: return CC2 | CC1 | CC3;
1867 case LTU: return CC2;
1868 case GTU: return CC1;
1869 case LEU: return CC0 | CC2;
1870 case GEU: return CC0 | CC1;
1871 default: return -1;
1872 }
1873 break;
1874
1875 case CCAPmode:
1876 switch (GET_CODE (code))
1877 {
1878 case EQ: return CC0;
1879 case NE: return CC1 | CC2 | CC3;
1880 case LT: return CC1 | CC3;
1881 case GT: return CC2;
1882 case LE: return CC0 | CC1 | CC3;
1883 case GE: return CC0 | CC2;
1884 default: return -1;
1885 }
1886 break;
1887
1888 case CCANmode:
1889 switch (GET_CODE (code))
1890 {
1891 case EQ: return CC0;
1892 case NE: return CC1 | CC2 | CC3;
1893 case LT: return CC1;
1894 case GT: return CC2 | CC3;
1895 case LE: return CC0 | CC1;
1896 case GE: return CC0 | CC2 | CC3;
1897 default: return -1;
1898 }
1899 break;
1900
1901 case CCSmode:
1902 switch (GET_CODE (code))
1903 {
1904 case EQ: return CC0;
1905 case NE: return CC1 | CC2 | CC3;
1906 case LT: return CC1;
1907 case GT: return CC2;
1908 case LE: return CC0 | CC1;
1909 case GE: return CC0 | CC2;
1910 case UNORDERED: return CC3;
1911 case ORDERED: return CC0 | CC1 | CC2;
1912 case UNEQ: return CC0 | CC3;
1913 case UNLT: return CC1 | CC3;
1914 case UNGT: return CC2 | CC3;
1915 case UNLE: return CC0 | CC1 | CC3;
1916 case UNGE: return CC0 | CC2 | CC3;
1917 case LTGT: return CC1 | CC2;
1918 default: return -1;
1919 }
1920 break;
1921
1922 case CCSRmode:
1923 switch (GET_CODE (code))
1924 {
1925 case EQ: return CC0;
1926 case NE: return CC2 | CC1 | CC3;
1927 case LT: return CC2;
1928 case GT: return CC1;
1929 case LE: return CC0 | CC2;
1930 case GE: return CC0 | CC1;
1931 case UNORDERED: return CC3;
1932 case ORDERED: return CC0 | CC2 | CC1;
1933 case UNEQ: return CC0 | CC3;
1934 case UNLT: return CC2 | CC3;
1935 case UNGT: return CC1 | CC3;
1936 case UNLE: return CC0 | CC2 | CC3;
1937 case UNGE: return CC0 | CC1 | CC3;
1938 case LTGT: return CC2 | CC1;
1939 default: return -1;
1940 }
1941 break;
1942
1943 /* Vector comparison modes. */
1944
1945 case CCVEQmode:
1946 switch (GET_CODE (code))
1947 {
1948 case EQ: return CC0;
1949 case NE: return CC3;
1950 default: return -1;
1951 }
1952
1953 case CCVEQANYmode:
1954 switch (GET_CODE (code))
1955 {
1956 case EQ: return CC0 | CC1;
1957 case NE: return CC3 | CC1;
1958 default: return -1;
1959 }
1960
1961 /* Integer vector compare modes. */
1962
1963 case CCVHmode:
1964 switch (GET_CODE (code))
1965 {
1966 case GT: return CC0;
1967 case LE: return CC3;
1968 default: return -1;
1969 }
1970
1971 case CCVHANYmode:
1972 switch (GET_CODE (code))
1973 {
1974 case GT: return CC0 | CC1;
1975 case LE: return CC3 | CC1;
1976 default: return -1;
1977 }
1978
1979 case CCVHUmode:
1980 switch (GET_CODE (code))
1981 {
1982 case GTU: return CC0;
1983 case LEU: return CC3;
1984 default: return -1;
1985 }
1986
1987 case CCVHUANYmode:
1988 switch (GET_CODE (code))
1989 {
1990 case GTU: return CC0 | CC1;
1991 case LEU: return CC3 | CC1;
1992 default: return -1;
1993 }
1994
1995 /* FP vector compare modes. */
1996
1997 case CCVFHmode:
1998 switch (GET_CODE (code))
1999 {
2000 case GT: return CC0;
2001 case UNLE: return CC3;
2002 default: return -1;
2003 }
2004
2005 case CCVFHANYmode:
2006 switch (GET_CODE (code))
2007 {
2008 case GT: return CC0 | CC1;
2009 case UNLE: return CC3 | CC1;
2010 default: return -1;
2011 }
2012
2013 case CCVFHEmode:
2014 switch (GET_CODE (code))
2015 {
2016 case GE: return CC0;
2017 case UNLT: return CC3;
2018 default: return -1;
2019 }
2020
2021 case CCVFHEANYmode:
2022 switch (GET_CODE (code))
2023 {
2024 case GE: return CC0 | CC1;
2025 case UNLT: return CC3 | CC1;
2026 default: return -1;
2027 }
2028
2029
2030 case CCRAWmode:
2031 switch (GET_CODE (code))
2032 {
2033 case EQ:
2034 return INTVAL (XEXP (code, 1));
2035 case NE:
2036 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2037 default:
2038 gcc_unreachable ();
2039 }
2040
2041 default:
2042 return -1;
2043 }
2044 }
2045
2046
2047 /* Return branch condition mask to implement a compare and branch
2048 specified by CODE. Return -1 for invalid comparisons. */
2049
2050 int
2051 s390_compare_and_branch_condition_mask (rtx code)
2052 {
2053 const int CC0 = 1 << 3;
2054 const int CC1 = 1 << 2;
2055 const int CC2 = 1 << 1;
2056
2057 switch (GET_CODE (code))
2058 {
2059 case EQ:
2060 return CC0;
2061 case NE:
2062 return CC1 | CC2;
2063 case LT:
2064 case LTU:
2065 return CC1;
2066 case GT:
2067 case GTU:
2068 return CC2;
2069 case LE:
2070 case LEU:
2071 return CC0 | CC1;
2072 case GE:
2073 case GEU:
2074 return CC0 | CC2;
2075 default:
2076 gcc_unreachable ();
2077 }
2078 return -1;
2079 }
2080
2081 /* If INV is false, return assembler mnemonic string to implement
2082 a branch specified by CODE. If INV is true, return mnemonic
2083 for the corresponding inverted branch. */
2084
2085 static const char *
2086 s390_branch_condition_mnemonic (rtx code, int inv)
2087 {
2088 int mask;
2089
2090 static const char *const mnemonic[16] =
2091 {
2092 NULL, "o", "h", "nle",
2093 "l", "nhe", "lh", "ne",
2094 "e", "nlh", "he", "nl",
2095 "le", "nh", "no", NULL
2096 };
2097
2098 if (GET_CODE (XEXP (code, 0)) == REG
2099 && REGNO (XEXP (code, 0)) == CC_REGNUM
2100 && (XEXP (code, 1) == const0_rtx
2101 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2102 && CONST_INT_P (XEXP (code, 1)))))
2103 mask = s390_branch_condition_mask (code);
2104 else
2105 mask = s390_compare_and_branch_condition_mask (code);
2106
2107 gcc_assert (mask >= 0);
2108
2109 if (inv)
2110 mask ^= 15;
2111
2112 gcc_assert (mask >= 1 && mask <= 14);
2113
2114 return mnemonic[mask];
2115 }
2116
2117 /* Return the part of op which has a value different from def.
2118 The size of the part is determined by mode.
2119 Use this function only if you already know that op really
2120 contains such a part. */
2121
2122 unsigned HOST_WIDE_INT
2123 s390_extract_part (rtx op, machine_mode mode, int def)
2124 {
2125 unsigned HOST_WIDE_INT value = 0;
2126 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2127 int part_bits = GET_MODE_BITSIZE (mode);
2128 unsigned HOST_WIDE_INT part_mask
2129 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2130 int i;
2131
2132 for (i = 0; i < max_parts; i++)
2133 {
2134 if (i == 0)
2135 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2136 else
2137 value >>= part_bits;
2138
2139 if ((value & part_mask) != (def & part_mask))
2140 return value & part_mask;
2141 }
2142
2143 gcc_unreachable ();
2144 }
2145
2146 /* If OP is an integer constant of mode MODE with exactly one
2147 part of mode PART_MODE unequal to DEF, return the number of that
2148 part. Otherwise, return -1. */
2149
2150 int
2151 s390_single_part (rtx op,
2152 machine_mode mode,
2153 machine_mode part_mode,
2154 int def)
2155 {
2156 unsigned HOST_WIDE_INT value = 0;
2157 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2158 unsigned HOST_WIDE_INT part_mask
2159 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2160 int i, part = -1;
2161
2162 if (GET_CODE (op) != CONST_INT)
2163 return -1;
2164
2165 for (i = 0; i < n_parts; i++)
2166 {
2167 if (i == 0)
2168 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2169 else
2170 value >>= GET_MODE_BITSIZE (part_mode);
2171
2172 if ((value & part_mask) != (def & part_mask))
2173 {
2174 if (part != -1)
2175 return -1;
2176 else
2177 part = i;
2178 }
2179 }
2180 return part == -1 ? -1 : n_parts - 1 - part;
2181 }
2182
2183 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2184 bits and no other bits are set in IN. POS and LENGTH can be used
2185 to obtain the start position and the length of the bitfield.
2186
2187 POS gives the position of the first bit of the bitfield counting
2188 from the lowest order bit starting with zero. In order to use this
2189 value for S/390 instructions this has to be converted to "bits big
2190 endian" style. */
2191
2192 bool
2193 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2194 int *pos, int *length)
2195 {
2196 int tmp_pos = 0;
2197 int tmp_length = 0;
2198 int i;
2199 unsigned HOST_WIDE_INT mask = 1ULL;
2200 bool contiguous = false;
2201
2202 for (i = 0; i < size; mask <<= 1, i++)
2203 {
2204 if (contiguous)
2205 {
2206 if (mask & in)
2207 tmp_length++;
2208 else
2209 break;
2210 }
2211 else
2212 {
2213 if (mask & in)
2214 {
2215 contiguous = true;
2216 tmp_length++;
2217 }
2218 else
2219 tmp_pos++;
2220 }
2221 }
2222
2223 if (!tmp_length)
2224 return false;
2225
2226 /* Calculate a mask for all bits beyond the contiguous bits. */
2227 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2228
2229 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2230 mask &= (HOST_WIDE_INT_1U << size) - 1;
2231
2232 if (mask & in)
2233 return false;
2234
2235 if (tmp_length + tmp_pos - 1 > size)
2236 return false;
2237
2238 if (length)
2239 *length = tmp_length;
2240
2241 if (pos)
2242 *pos = tmp_pos;
2243
2244 return true;
2245 }
2246
2247 /* Return true if OP contains the same contiguous bitfield in *all*
2248 its elements. START and END can be used to obtain the start and
2249 end position of the bitfield.
2250
2251 START/STOP give the position of the first/last bit of the bitfield
2252 counting from the lowest order bit starting with zero. In order to
2253 use these values for S/390 instructions this has to be converted to
2254 "bits big endian" style. */
2255
2256 bool
2257 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2258 {
2259 unsigned HOST_WIDE_INT mask;
2260 int length, size;
2261 rtx elt;
2262
2263 if (!const_vec_duplicate_p (op, &elt)
2264 || !CONST_INT_P (elt))
2265 return false;
2266
2267 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2268 mask = UINTVAL (elt);
2269 if (s390_contiguous_bitmask_p (mask, size, start,
2270 end != NULL ? &length : NULL))
2271 {
2272 if (end != NULL)
2273 *end = *start + length - 1;
2274 return true;
2275 }
2276 /* 0xff00000f style immediates can be covered by swapping start and
2277 end indices in vgm. */
2278 if (s390_contiguous_bitmask_p (~mask, size, start,
2279 end != NULL ? &length : NULL))
2280 {
2281 if (end != NULL)
2282 *end = *start - 1;
2283 if (start != NULL)
2284 *start = *start + length;
2285 return true;
2286 }
2287 return false;
2288 }
2289
2290 /* Return true if C consists only of byte chunks being either 0 or
2291 0xff. If MASK is !=NULL a byte mask is generated which is
2292 appropriate for the vector generate byte mask instruction. */
2293
2294 bool
2295 s390_bytemask_vector_p (rtx op, unsigned *mask)
2296 {
2297 int i;
2298 unsigned tmp_mask = 0;
2299 int nunit, unit_size;
2300
2301 if (!VECTOR_MODE_P (GET_MODE (op))
2302 || GET_CODE (op) != CONST_VECTOR
2303 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2304 return false;
2305
2306 nunit = GET_MODE_NUNITS (GET_MODE (op));
2307 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2308
2309 for (i = 0; i < nunit; i++)
2310 {
2311 unsigned HOST_WIDE_INT c;
2312 int j;
2313
2314 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2315 return false;
2316
2317 c = UINTVAL (XVECEXP (op, 0, i));
2318 for (j = 0; j < unit_size; j++)
2319 {
2320 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2321 return false;
2322 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2323 c = c >> BITS_PER_UNIT;
2324 }
2325 }
2326
2327 if (mask != NULL)
2328 *mask = tmp_mask;
2329
2330 return true;
2331 }
2332
2333 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2334 equivalent to a shift followed by the AND. In particular, CONTIG
2335 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2336 for ROTL indicate a rotate to the right. */
2337
2338 bool
2339 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2340 {
2341 int pos, len;
2342 bool ok;
2343
2344 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2345 gcc_assert (ok);
2346
2347 return ((rotl >= 0 && rotl <= pos)
2348 || (rotl < 0 && -rotl <= bitsize - len - pos));
2349 }
2350
2351 /* Check whether we can (and want to) split a double-word
2352 move in mode MODE from SRC to DST into two single-word
2353 moves, moving the subword FIRST_SUBWORD first. */
2354
2355 bool
2356 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2357 {
2358 /* Floating point and vector registers cannot be split. */
2359 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2360 return false;
2361
2362 /* We don't need to split if operands are directly accessible. */
2363 if (s_operand (src, mode) || s_operand (dst, mode))
2364 return false;
2365
2366 /* Non-offsettable memory references cannot be split. */
2367 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2368 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2369 return false;
2370
2371 /* Moving the first subword must not clobber a register
2372 needed to move the second subword. */
2373 if (register_operand (dst, mode))
2374 {
2375 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2376 if (reg_overlap_mentioned_p (subreg, src))
2377 return false;
2378 }
2379
2380 return true;
2381 }
2382
2383 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2384 and [MEM2, MEM2 + SIZE] do overlap and false
2385 otherwise. */
2386
2387 bool
2388 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2389 {
2390 rtx addr1, addr2, addr_delta;
2391 HOST_WIDE_INT delta;
2392
2393 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2394 return true;
2395
2396 if (size == 0)
2397 return false;
2398
2399 addr1 = XEXP (mem1, 0);
2400 addr2 = XEXP (mem2, 0);
2401
2402 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2403
2404 /* This overlapping check is used by peepholes merging memory block operations.
2405 Overlapping operations would otherwise be recognized by the S/390 hardware
2406 and would fall back to a slower implementation. Allowing overlapping
2407 operations would lead to slow code but not to wrong code. Therefore we are
2408 somewhat optimistic if we cannot prove that the memory blocks are
2409 overlapping.
2410 That's why we return false here although this may accept operations on
2411 overlapping memory areas. */
2412 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2413 return false;
2414
2415 delta = INTVAL (addr_delta);
2416
2417 if (delta == 0
2418 || (delta > 0 && delta < size)
2419 || (delta < 0 && -delta < size))
2420 return true;
2421
2422 return false;
2423 }
2424
2425 /* Check whether the address of memory reference MEM2 equals exactly
2426 the address of memory reference MEM1 plus DELTA. Return true if
2427 we can prove this to be the case, false otherwise. */
2428
2429 bool
2430 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2431 {
2432 rtx addr1, addr2, addr_delta;
2433
2434 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2435 return false;
2436
2437 addr1 = XEXP (mem1, 0);
2438 addr2 = XEXP (mem2, 0);
2439
2440 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2441 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2442 return false;
2443
2444 return true;
2445 }
2446
2447 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2448
2449 void
2450 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2451 rtx *operands)
2452 {
2453 machine_mode wmode = mode;
2454 rtx dst = operands[0];
2455 rtx src1 = operands[1];
2456 rtx src2 = operands[2];
2457 rtx op, clob, tem;
2458
2459 /* If we cannot handle the operation directly, use a temp register. */
2460 if (!s390_logical_operator_ok_p (operands))
2461 dst = gen_reg_rtx (mode);
2462
2463 /* QImode and HImode patterns make sense only if we have a destination
2464 in memory. Otherwise perform the operation in SImode. */
2465 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2466 wmode = SImode;
2467
2468 /* Widen operands if required. */
2469 if (mode != wmode)
2470 {
2471 if (GET_CODE (dst) == SUBREG
2472 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2473 dst = tem;
2474 else if (REG_P (dst))
2475 dst = gen_rtx_SUBREG (wmode, dst, 0);
2476 else
2477 dst = gen_reg_rtx (wmode);
2478
2479 if (GET_CODE (src1) == SUBREG
2480 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2481 src1 = tem;
2482 else if (GET_MODE (src1) != VOIDmode)
2483 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2484
2485 if (GET_CODE (src2) == SUBREG
2486 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2487 src2 = tem;
2488 else if (GET_MODE (src2) != VOIDmode)
2489 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2490 }
2491
2492 /* Emit the instruction. */
2493 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2494 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2495 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2496
2497 /* Fix up the destination if needed. */
2498 if (dst != operands[0])
2499 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2500 }
2501
2502 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2503
2504 bool
2505 s390_logical_operator_ok_p (rtx *operands)
2506 {
2507 /* If the destination operand is in memory, it needs to coincide
2508 with one of the source operands. After reload, it has to be
2509 the first source operand. */
2510 if (GET_CODE (operands[0]) == MEM)
2511 return rtx_equal_p (operands[0], operands[1])
2512 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2513
2514 return true;
2515 }
2516
2517 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2518 operand IMMOP to switch from SS to SI type instructions. */
2519
2520 void
2521 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2522 {
2523 int def = code == AND ? -1 : 0;
2524 HOST_WIDE_INT mask;
2525 int part;
2526
2527 gcc_assert (GET_CODE (*memop) == MEM);
2528 gcc_assert (!MEM_VOLATILE_P (*memop));
2529
2530 mask = s390_extract_part (*immop, QImode, def);
2531 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2532 gcc_assert (part >= 0);
2533
2534 *memop = adjust_address (*memop, QImode, part);
2535 *immop = gen_int_mode (mask, QImode);
2536 }
2537
2538
2539 /* How to allocate a 'struct machine_function'. */
2540
2541 static struct machine_function *
2542 s390_init_machine_status (void)
2543 {
2544 return ggc_cleared_alloc<machine_function> ();
2545 }
2546
2547 /* Map for smallest class containing reg regno. */
2548
2549 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2550 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2551 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2552 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2553 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2554 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2555 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2556 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2557 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2558 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2559 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2560 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2561 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2562 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2563 VEC_REGS, VEC_REGS /* 52 */
2564 };
2565
2566 /* Return attribute type of insn. */
2567
2568 static enum attr_type
2569 s390_safe_attr_type (rtx_insn *insn)
2570 {
2571 if (recog_memoized (insn) >= 0)
2572 return get_attr_type (insn);
2573 else
2574 return TYPE_NONE;
2575 }
2576
2577 /* Return true if DISP is a valid short displacement. */
2578
2579 static bool
2580 s390_short_displacement (rtx disp)
2581 {
2582 /* No displacement is OK. */
2583 if (!disp)
2584 return true;
2585
2586 /* Without the long displacement facility we don't need to
2587 distingiush between long and short displacement. */
2588 if (!TARGET_LONG_DISPLACEMENT)
2589 return true;
2590
2591 /* Integer displacement in range. */
2592 if (GET_CODE (disp) == CONST_INT)
2593 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2594
2595 /* GOT offset is not OK, the GOT can be large. */
2596 if (GET_CODE (disp) == CONST
2597 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2598 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2599 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2600 return false;
2601
2602 /* All other symbolic constants are literal pool references,
2603 which are OK as the literal pool must be small. */
2604 if (GET_CODE (disp) == CONST)
2605 return true;
2606
2607 return false;
2608 }
2609
2610 /* Decompose a RTL expression ADDR for a memory address into
2611 its components, returned in OUT.
2612
2613 Returns false if ADDR is not a valid memory address, true
2614 otherwise. If OUT is NULL, don't return the components,
2615 but check for validity only.
2616
2617 Note: Only addresses in canonical form are recognized.
2618 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2619 canonical form so that they will be recognized. */
2620
2621 static int
2622 s390_decompose_address (rtx addr, struct s390_address *out)
2623 {
2624 HOST_WIDE_INT offset = 0;
2625 rtx base = NULL_RTX;
2626 rtx indx = NULL_RTX;
2627 rtx disp = NULL_RTX;
2628 rtx orig_disp;
2629 bool pointer = false;
2630 bool base_ptr = false;
2631 bool indx_ptr = false;
2632 bool literal_pool = false;
2633
2634 /* We may need to substitute the literal pool base register into the address
2635 below. However, at this point we do not know which register is going to
2636 be used as base, so we substitute the arg pointer register. This is going
2637 to be treated as holding a pointer below -- it shouldn't be used for any
2638 other purpose. */
2639 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2640
2641 /* Decompose address into base + index + displacement. */
2642
2643 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2644 base = addr;
2645
2646 else if (GET_CODE (addr) == PLUS)
2647 {
2648 rtx op0 = XEXP (addr, 0);
2649 rtx op1 = XEXP (addr, 1);
2650 enum rtx_code code0 = GET_CODE (op0);
2651 enum rtx_code code1 = GET_CODE (op1);
2652
2653 if (code0 == REG || code0 == UNSPEC)
2654 {
2655 if (code1 == REG || code1 == UNSPEC)
2656 {
2657 indx = op0; /* index + base */
2658 base = op1;
2659 }
2660
2661 else
2662 {
2663 base = op0; /* base + displacement */
2664 disp = op1;
2665 }
2666 }
2667
2668 else if (code0 == PLUS)
2669 {
2670 indx = XEXP (op0, 0); /* index + base + disp */
2671 base = XEXP (op0, 1);
2672 disp = op1;
2673 }
2674
2675 else
2676 {
2677 return false;
2678 }
2679 }
2680
2681 else
2682 disp = addr; /* displacement */
2683
2684 /* Extract integer part of displacement. */
2685 orig_disp = disp;
2686 if (disp)
2687 {
2688 if (GET_CODE (disp) == CONST_INT)
2689 {
2690 offset = INTVAL (disp);
2691 disp = NULL_RTX;
2692 }
2693 else if (GET_CODE (disp) == CONST
2694 && GET_CODE (XEXP (disp, 0)) == PLUS
2695 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2696 {
2697 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2698 disp = XEXP (XEXP (disp, 0), 0);
2699 }
2700 }
2701
2702 /* Strip off CONST here to avoid special case tests later. */
2703 if (disp && GET_CODE (disp) == CONST)
2704 disp = XEXP (disp, 0);
2705
2706 /* We can convert literal pool addresses to
2707 displacements by basing them off the base register. */
2708 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2709 {
2710 /* Either base or index must be free to hold the base register. */
2711 if (!base)
2712 base = fake_pool_base, literal_pool = true;
2713 else if (!indx)
2714 indx = fake_pool_base, literal_pool = true;
2715 else
2716 return false;
2717
2718 /* Mark up the displacement. */
2719 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2720 UNSPEC_LTREL_OFFSET);
2721 }
2722
2723 /* Validate base register. */
2724 if (base)
2725 {
2726 if (GET_CODE (base) == UNSPEC)
2727 switch (XINT (base, 1))
2728 {
2729 case UNSPEC_LTREF:
2730 if (!disp)
2731 disp = gen_rtx_UNSPEC (Pmode,
2732 gen_rtvec (1, XVECEXP (base, 0, 0)),
2733 UNSPEC_LTREL_OFFSET);
2734 else
2735 return false;
2736
2737 base = XVECEXP (base, 0, 1);
2738 break;
2739
2740 case UNSPEC_LTREL_BASE:
2741 if (XVECLEN (base, 0) == 1)
2742 base = fake_pool_base, literal_pool = true;
2743 else
2744 base = XVECEXP (base, 0, 1);
2745 break;
2746
2747 default:
2748 return false;
2749 }
2750
2751 if (!REG_P (base)
2752 || (GET_MODE (base) != SImode
2753 && GET_MODE (base) != Pmode))
2754 return false;
2755
2756 if (REGNO (base) == STACK_POINTER_REGNUM
2757 || REGNO (base) == FRAME_POINTER_REGNUM
2758 || ((reload_completed || reload_in_progress)
2759 && frame_pointer_needed
2760 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2761 || REGNO (base) == ARG_POINTER_REGNUM
2762 || (flag_pic
2763 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2764 pointer = base_ptr = true;
2765
2766 if ((reload_completed || reload_in_progress)
2767 && base == cfun->machine->base_reg)
2768 pointer = base_ptr = literal_pool = true;
2769 }
2770
2771 /* Validate index register. */
2772 if (indx)
2773 {
2774 if (GET_CODE (indx) == UNSPEC)
2775 switch (XINT (indx, 1))
2776 {
2777 case UNSPEC_LTREF:
2778 if (!disp)
2779 disp = gen_rtx_UNSPEC (Pmode,
2780 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2781 UNSPEC_LTREL_OFFSET);
2782 else
2783 return false;
2784
2785 indx = XVECEXP (indx, 0, 1);
2786 break;
2787
2788 case UNSPEC_LTREL_BASE:
2789 if (XVECLEN (indx, 0) == 1)
2790 indx = fake_pool_base, literal_pool = true;
2791 else
2792 indx = XVECEXP (indx, 0, 1);
2793 break;
2794
2795 default:
2796 return false;
2797 }
2798
2799 if (!REG_P (indx)
2800 || (GET_MODE (indx) != SImode
2801 && GET_MODE (indx) != Pmode))
2802 return false;
2803
2804 if (REGNO (indx) == STACK_POINTER_REGNUM
2805 || REGNO (indx) == FRAME_POINTER_REGNUM
2806 || ((reload_completed || reload_in_progress)
2807 && frame_pointer_needed
2808 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2809 || REGNO (indx) == ARG_POINTER_REGNUM
2810 || (flag_pic
2811 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2812 pointer = indx_ptr = true;
2813
2814 if ((reload_completed || reload_in_progress)
2815 && indx == cfun->machine->base_reg)
2816 pointer = indx_ptr = literal_pool = true;
2817 }
2818
2819 /* Prefer to use pointer as base, not index. */
2820 if (base && indx && !base_ptr
2821 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2822 {
2823 rtx tmp = base;
2824 base = indx;
2825 indx = tmp;
2826 }
2827
2828 /* Validate displacement. */
2829 if (!disp)
2830 {
2831 /* If virtual registers are involved, the displacement will change later
2832 anyway as the virtual registers get eliminated. This could make a
2833 valid displacement invalid, but it is more likely to make an invalid
2834 displacement valid, because we sometimes access the register save area
2835 via negative offsets to one of those registers.
2836 Thus we don't check the displacement for validity here. If after
2837 elimination the displacement turns out to be invalid after all,
2838 this is fixed up by reload in any case. */
2839 /* LRA maintains always displacements up to date and we need to
2840 know the displacement is right during all LRA not only at the
2841 final elimination. */
2842 if (lra_in_progress
2843 || (base != arg_pointer_rtx
2844 && indx != arg_pointer_rtx
2845 && base != return_address_pointer_rtx
2846 && indx != return_address_pointer_rtx
2847 && base != frame_pointer_rtx
2848 && indx != frame_pointer_rtx
2849 && base != virtual_stack_vars_rtx
2850 && indx != virtual_stack_vars_rtx))
2851 if (!DISP_IN_RANGE (offset))
2852 return false;
2853 }
2854 else
2855 {
2856 /* All the special cases are pointers. */
2857 pointer = true;
2858
2859 /* In the small-PIC case, the linker converts @GOT
2860 and @GOTNTPOFF offsets to possible displacements. */
2861 if (GET_CODE (disp) == UNSPEC
2862 && (XINT (disp, 1) == UNSPEC_GOT
2863 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2864 && flag_pic == 1)
2865 {
2866 ;
2867 }
2868
2869 /* Accept pool label offsets. */
2870 else if (GET_CODE (disp) == UNSPEC
2871 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2872 ;
2873
2874 /* Accept literal pool references. */
2875 else if (GET_CODE (disp) == UNSPEC
2876 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2877 {
2878 /* In case CSE pulled a non literal pool reference out of
2879 the pool we have to reject the address. This is
2880 especially important when loading the GOT pointer on non
2881 zarch CPUs. In this case the literal pool contains an lt
2882 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2883 will most likely exceed the displacement. */
2884 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2885 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2886 return false;
2887
2888 orig_disp = gen_rtx_CONST (Pmode, disp);
2889 if (offset)
2890 {
2891 /* If we have an offset, make sure it does not
2892 exceed the size of the constant pool entry. */
2893 rtx sym = XVECEXP (disp, 0, 0);
2894 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2895 return false;
2896
2897 orig_disp = plus_constant (Pmode, orig_disp, offset);
2898 }
2899 }
2900
2901 else
2902 return false;
2903 }
2904
2905 if (!base && !indx)
2906 pointer = true;
2907
2908 if (out)
2909 {
2910 out->base = base;
2911 out->indx = indx;
2912 out->disp = orig_disp;
2913 out->pointer = pointer;
2914 out->literal_pool = literal_pool;
2915 }
2916
2917 return true;
2918 }
2919
2920 /* Decompose a RTL expression OP for a shift count into its components,
2921 and return the base register in BASE and the offset in OFFSET.
2922
2923 Return true if OP is a valid shift count, false if not. */
2924
2925 bool
2926 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2927 {
2928 HOST_WIDE_INT off = 0;
2929
2930 /* We can have an integer constant, an address register,
2931 or a sum of the two. */
2932 if (GET_CODE (op) == CONST_INT)
2933 {
2934 off = INTVAL (op);
2935 op = NULL_RTX;
2936 }
2937 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2938 {
2939 off = INTVAL (XEXP (op, 1));
2940 op = XEXP (op, 0);
2941 }
2942 while (op && GET_CODE (op) == SUBREG)
2943 op = SUBREG_REG (op);
2944
2945 if (op && GET_CODE (op) != REG)
2946 return false;
2947
2948 if (offset)
2949 *offset = off;
2950 if (base)
2951 *base = op;
2952
2953 return true;
2954 }
2955
2956
2957 /* Return true if CODE is a valid address without index. */
2958
2959 bool
2960 s390_legitimate_address_without_index_p (rtx op)
2961 {
2962 struct s390_address addr;
2963
2964 if (!s390_decompose_address (XEXP (op, 0), &addr))
2965 return false;
2966 if (addr.indx)
2967 return false;
2968
2969 return true;
2970 }
2971
2972
2973 /* Return TRUE if ADDR is an operand valid for a load/store relative
2974 instruction. Be aware that the alignment of the operand needs to
2975 be checked separately.
2976 Valid addresses are single references or a sum of a reference and a
2977 constant integer. Return these parts in SYMREF and ADDEND. You can
2978 pass NULL in REF and/or ADDEND if you are not interested in these
2979 values. Literal pool references are *not* considered symbol
2980 references. */
2981
2982 static bool
2983 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2984 {
2985 HOST_WIDE_INT tmpaddend = 0;
2986
2987 if (GET_CODE (addr) == CONST)
2988 addr = XEXP (addr, 0);
2989
2990 if (GET_CODE (addr) == PLUS)
2991 {
2992 if (!CONST_INT_P (XEXP (addr, 1)))
2993 return false;
2994
2995 tmpaddend = INTVAL (XEXP (addr, 1));
2996 addr = XEXP (addr, 0);
2997 }
2998
2999 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3000 || (GET_CODE (addr) == UNSPEC
3001 && (XINT (addr, 1) == UNSPEC_GOTENT
3002 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3003 {
3004 if (symref)
3005 *symref = addr;
3006 if (addend)
3007 *addend = tmpaddend;
3008
3009 return true;
3010 }
3011 return false;
3012 }
3013
3014 /* Return true if the address in OP is valid for constraint letter C
3015 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3016 pool MEMs should be accepted. Only the Q, R, S, T constraint
3017 letters are allowed for C. */
3018
3019 static int
3020 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3021 {
3022 struct s390_address addr;
3023 bool decomposed = false;
3024
3025 /* This check makes sure that no symbolic address (except literal
3026 pool references) are accepted by the R or T constraints. */
3027 if (s390_loadrelative_operand_p (op, NULL, NULL))
3028 return 0;
3029
3030 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3031 if (!lit_pool_ok)
3032 {
3033 if (!s390_decompose_address (op, &addr))
3034 return 0;
3035 if (addr.literal_pool)
3036 return 0;
3037 decomposed = true;
3038 }
3039
3040 switch (c)
3041 {
3042 case 'Q': /* no index short displacement */
3043 if (!decomposed && !s390_decompose_address (op, &addr))
3044 return 0;
3045 if (addr.indx)
3046 return 0;
3047 if (!s390_short_displacement (addr.disp))
3048 return 0;
3049 break;
3050
3051 case 'R': /* with index short displacement */
3052 if (TARGET_LONG_DISPLACEMENT)
3053 {
3054 if (!decomposed && !s390_decompose_address (op, &addr))
3055 return 0;
3056 if (!s390_short_displacement (addr.disp))
3057 return 0;
3058 }
3059 /* Any invalid address here will be fixed up by reload,
3060 so accept it for the most generic constraint. */
3061 break;
3062
3063 case 'S': /* no index long displacement */
3064 if (!TARGET_LONG_DISPLACEMENT)
3065 return 0;
3066 if (!decomposed && !s390_decompose_address (op, &addr))
3067 return 0;
3068 if (addr.indx)
3069 return 0;
3070 if (s390_short_displacement (addr.disp))
3071 return 0;
3072 break;
3073
3074 case 'T': /* with index long displacement */
3075 if (!TARGET_LONG_DISPLACEMENT)
3076 return 0;
3077 /* Any invalid address here will be fixed up by reload,
3078 so accept it for the most generic constraint. */
3079 if ((decomposed || s390_decompose_address (op, &addr))
3080 && s390_short_displacement (addr.disp))
3081 return 0;
3082 break;
3083 default:
3084 return 0;
3085 }
3086 return 1;
3087 }
3088
3089
3090 /* Evaluates constraint strings described by the regular expression
3091 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3092 the constraint given in STR, or 0 else. */
3093
3094 int
3095 s390_mem_constraint (const char *str, rtx op)
3096 {
3097 char c = str[0];
3098
3099 switch (c)
3100 {
3101 case 'A':
3102 /* Check for offsettable variants of memory constraints. */
3103 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3104 return 0;
3105 if ((reload_completed || reload_in_progress)
3106 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3107 return 0;
3108 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3109 case 'B':
3110 /* Check for non-literal-pool variants of memory constraints. */
3111 if (!MEM_P (op))
3112 return 0;
3113 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3114 case 'Q':
3115 case 'R':
3116 case 'S':
3117 case 'T':
3118 if (GET_CODE (op) != MEM)
3119 return 0;
3120 return s390_check_qrst_address (c, XEXP (op, 0), true);
3121 case 'U':
3122 return (s390_check_qrst_address ('Q', op, true)
3123 || s390_check_qrst_address ('R', op, true));
3124 case 'W':
3125 return (s390_check_qrst_address ('S', op, true)
3126 || s390_check_qrst_address ('T', op, true));
3127 case 'Y':
3128 /* Simply check for the basic form of a shift count. Reload will
3129 take care of making sure we have a proper base register. */
3130 if (!s390_decompose_shift_count (op, NULL, NULL))
3131 return 0;
3132 break;
3133 case 'Z':
3134 return s390_check_qrst_address (str[1], op, true);
3135 default:
3136 return 0;
3137 }
3138 return 1;
3139 }
3140
3141
3142 /* Evaluates constraint strings starting with letter O. Input
3143 parameter C is the second letter following the "O" in the constraint
3144 string. Returns 1 if VALUE meets the respective constraint and 0
3145 otherwise. */
3146
3147 int
3148 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3149 {
3150 if (!TARGET_EXTIMM)
3151 return 0;
3152
3153 switch (c)
3154 {
3155 case 's':
3156 return trunc_int_for_mode (value, SImode) == value;
3157
3158 case 'p':
3159 return value == 0
3160 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3161
3162 case 'n':
3163 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3164
3165 default:
3166 gcc_unreachable ();
3167 }
3168 }
3169
3170
3171 /* Evaluates constraint strings starting with letter N. Parameter STR
3172 contains the letters following letter "N" in the constraint string.
3173 Returns true if VALUE matches the constraint. */
3174
3175 int
3176 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3177 {
3178 machine_mode mode, part_mode;
3179 int def;
3180 int part, part_goal;
3181
3182
3183 if (str[0] == 'x')
3184 part_goal = -1;
3185 else
3186 part_goal = str[0] - '0';
3187
3188 switch (str[1])
3189 {
3190 case 'Q':
3191 part_mode = QImode;
3192 break;
3193 case 'H':
3194 part_mode = HImode;
3195 break;
3196 case 'S':
3197 part_mode = SImode;
3198 break;
3199 default:
3200 return 0;
3201 }
3202
3203 switch (str[2])
3204 {
3205 case 'H':
3206 mode = HImode;
3207 break;
3208 case 'S':
3209 mode = SImode;
3210 break;
3211 case 'D':
3212 mode = DImode;
3213 break;
3214 default:
3215 return 0;
3216 }
3217
3218 switch (str[3])
3219 {
3220 case '0':
3221 def = 0;
3222 break;
3223 case 'F':
3224 def = -1;
3225 break;
3226 default:
3227 return 0;
3228 }
3229
3230 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3231 return 0;
3232
3233 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3234 if (part < 0)
3235 return 0;
3236 if (part_goal != -1 && part_goal != part)
3237 return 0;
3238
3239 return 1;
3240 }
3241
3242
3243 /* Returns true if the input parameter VALUE is a float zero. */
3244
3245 int
3246 s390_float_const_zero_p (rtx value)
3247 {
3248 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3249 && value == CONST0_RTX (GET_MODE (value)));
3250 }
3251
3252 /* Implement TARGET_REGISTER_MOVE_COST. */
3253
3254 static int
3255 s390_register_move_cost (machine_mode mode,
3256 reg_class_t from, reg_class_t to)
3257 {
3258 /* On s390, copy between fprs and gprs is expensive. */
3259
3260 /* It becomes somewhat faster having ldgr/lgdr. */
3261 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3262 {
3263 /* ldgr is single cycle. */
3264 if (reg_classes_intersect_p (from, GENERAL_REGS)
3265 && reg_classes_intersect_p (to, FP_REGS))
3266 return 1;
3267 /* lgdr needs 3 cycles. */
3268 if (reg_classes_intersect_p (to, GENERAL_REGS)
3269 && reg_classes_intersect_p (from, FP_REGS))
3270 return 3;
3271 }
3272
3273 /* Otherwise copying is done via memory. */
3274 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3275 && reg_classes_intersect_p (to, FP_REGS))
3276 || (reg_classes_intersect_p (from, FP_REGS)
3277 && reg_classes_intersect_p (to, GENERAL_REGS)))
3278 return 10;
3279
3280 return 1;
3281 }
3282
3283 /* Implement TARGET_MEMORY_MOVE_COST. */
3284
3285 static int
3286 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3287 reg_class_t rclass ATTRIBUTE_UNUSED,
3288 bool in ATTRIBUTE_UNUSED)
3289 {
3290 return 2;
3291 }
3292
3293 /* Compute a (partial) cost for rtx X. Return true if the complete
3294 cost has been computed, and false if subexpressions should be
3295 scanned. In either case, *TOTAL contains the cost result.
3296 OUTER_CODE contains the code of the superexpression of x. */
3297
3298 static bool
3299 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3300 int opno ATTRIBUTE_UNUSED,
3301 int *total, bool speed ATTRIBUTE_UNUSED)
3302 {
3303 int code = GET_CODE (x);
3304 switch (code)
3305 {
3306 case CONST:
3307 case CONST_INT:
3308 case LABEL_REF:
3309 case SYMBOL_REF:
3310 case CONST_DOUBLE:
3311 case MEM:
3312 *total = 0;
3313 return true;
3314
3315 case IOR:
3316 /* risbg */
3317 if (GET_CODE (XEXP (x, 0)) == AND
3318 && GET_CODE (XEXP (x, 1)) == ASHIFT
3319 && REG_P (XEXP (XEXP (x, 0), 0))
3320 && REG_P (XEXP (XEXP (x, 1), 0))
3321 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3322 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3323 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3324 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3325 {
3326 *total = COSTS_N_INSNS (2);
3327 return true;
3328 }
3329 case ASHIFT:
3330 case ASHIFTRT:
3331 case LSHIFTRT:
3332 case ROTATE:
3333 case ROTATERT:
3334 case AND:
3335 case XOR:
3336 case NEG:
3337 case NOT:
3338 *total = COSTS_N_INSNS (1);
3339 return false;
3340
3341 case PLUS:
3342 case MINUS:
3343 *total = COSTS_N_INSNS (1);
3344 return false;
3345
3346 case MULT:
3347 switch (mode)
3348 {
3349 case SImode:
3350 {
3351 rtx left = XEXP (x, 0);
3352 rtx right = XEXP (x, 1);
3353 if (GET_CODE (right) == CONST_INT
3354 && CONST_OK_FOR_K (INTVAL (right)))
3355 *total = s390_cost->mhi;
3356 else if (GET_CODE (left) == SIGN_EXTEND)
3357 *total = s390_cost->mh;
3358 else
3359 *total = s390_cost->ms; /* msr, ms, msy */
3360 break;
3361 }
3362 case DImode:
3363 {
3364 rtx left = XEXP (x, 0);
3365 rtx right = XEXP (x, 1);
3366 if (TARGET_ZARCH)
3367 {
3368 if (GET_CODE (right) == CONST_INT
3369 && CONST_OK_FOR_K (INTVAL (right)))
3370 *total = s390_cost->mghi;
3371 else if (GET_CODE (left) == SIGN_EXTEND)
3372 *total = s390_cost->msgf;
3373 else
3374 *total = s390_cost->msg; /* msgr, msg */
3375 }
3376 else /* TARGET_31BIT */
3377 {
3378 if (GET_CODE (left) == SIGN_EXTEND
3379 && GET_CODE (right) == SIGN_EXTEND)
3380 /* mulsidi case: mr, m */
3381 *total = s390_cost->m;
3382 else if (GET_CODE (left) == ZERO_EXTEND
3383 && GET_CODE (right) == ZERO_EXTEND
3384 && TARGET_CPU_ZARCH)
3385 /* umulsidi case: ml, mlr */
3386 *total = s390_cost->ml;
3387 else
3388 /* Complex calculation is required. */
3389 *total = COSTS_N_INSNS (40);
3390 }
3391 break;
3392 }
3393 case SFmode:
3394 case DFmode:
3395 *total = s390_cost->mult_df;
3396 break;
3397 case TFmode:
3398 *total = s390_cost->mxbr;
3399 break;
3400 default:
3401 return false;
3402 }
3403 return false;
3404
3405 case FMA:
3406 switch (mode)
3407 {
3408 case DFmode:
3409 *total = s390_cost->madbr;
3410 break;
3411 case SFmode:
3412 *total = s390_cost->maebr;
3413 break;
3414 default:
3415 return false;
3416 }
3417 /* Negate in the third argument is free: FMSUB. */
3418 if (GET_CODE (XEXP (x, 2)) == NEG)
3419 {
3420 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3421 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3422 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3423 return true;
3424 }
3425 return false;
3426
3427 case UDIV:
3428 case UMOD:
3429 if (mode == TImode) /* 128 bit division */
3430 *total = s390_cost->dlgr;
3431 else if (mode == DImode)
3432 {
3433 rtx right = XEXP (x, 1);
3434 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3435 *total = s390_cost->dlr;
3436 else /* 64 by 64 bit division */
3437 *total = s390_cost->dlgr;
3438 }
3439 else if (mode == SImode) /* 32 bit division */
3440 *total = s390_cost->dlr;
3441 return false;
3442
3443 case DIV:
3444 case MOD:
3445 if (mode == DImode)
3446 {
3447 rtx right = XEXP (x, 1);
3448 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3449 if (TARGET_ZARCH)
3450 *total = s390_cost->dsgfr;
3451 else
3452 *total = s390_cost->dr;
3453 else /* 64 by 64 bit division */
3454 *total = s390_cost->dsgr;
3455 }
3456 else if (mode == SImode) /* 32 bit division */
3457 *total = s390_cost->dlr;
3458 else if (mode == SFmode)
3459 {
3460 *total = s390_cost->debr;
3461 }
3462 else if (mode == DFmode)
3463 {
3464 *total = s390_cost->ddbr;
3465 }
3466 else if (mode == TFmode)
3467 {
3468 *total = s390_cost->dxbr;
3469 }
3470 return false;
3471
3472 case SQRT:
3473 if (mode == SFmode)
3474 *total = s390_cost->sqebr;
3475 else if (mode == DFmode)
3476 *total = s390_cost->sqdbr;
3477 else /* TFmode */
3478 *total = s390_cost->sqxbr;
3479 return false;
3480
3481 case SIGN_EXTEND:
3482 case ZERO_EXTEND:
3483 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3484 || outer_code == PLUS || outer_code == MINUS
3485 || outer_code == COMPARE)
3486 *total = 0;
3487 return false;
3488
3489 case COMPARE:
3490 *total = COSTS_N_INSNS (1);
3491 if (GET_CODE (XEXP (x, 0)) == AND
3492 && GET_CODE (XEXP (x, 1)) == CONST_INT
3493 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3494 {
3495 rtx op0 = XEXP (XEXP (x, 0), 0);
3496 rtx op1 = XEXP (XEXP (x, 0), 1);
3497 rtx op2 = XEXP (x, 1);
3498
3499 if (memory_operand (op0, GET_MODE (op0))
3500 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3501 return true;
3502 if (register_operand (op0, GET_MODE (op0))
3503 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3504 return true;
3505 }
3506 return false;
3507
3508 default:
3509 return false;
3510 }
3511 }
3512
3513 /* Return the cost of an address rtx ADDR. */
3514
3515 static int
3516 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3517 addr_space_t as ATTRIBUTE_UNUSED,
3518 bool speed ATTRIBUTE_UNUSED)
3519 {
3520 struct s390_address ad;
3521 if (!s390_decompose_address (addr, &ad))
3522 return 1000;
3523
3524 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3525 }
3526
3527 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3528 otherwise return 0. */
3529
3530 int
3531 tls_symbolic_operand (rtx op)
3532 {
3533 if (GET_CODE (op) != SYMBOL_REF)
3534 return 0;
3535 return SYMBOL_REF_TLS_MODEL (op);
3536 }
3537 \f
3538 /* Split DImode access register reference REG (on 64-bit) into its constituent
3539 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3540 gen_highpart cannot be used as they assume all registers are word-sized,
3541 while our access registers have only half that size. */
3542
3543 void
3544 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3545 {
3546 gcc_assert (TARGET_64BIT);
3547 gcc_assert (ACCESS_REG_P (reg));
3548 gcc_assert (GET_MODE (reg) == DImode);
3549 gcc_assert (!(REGNO (reg) & 1));
3550
3551 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3552 *hi = gen_rtx_REG (SImode, REGNO (reg));
3553 }
3554
3555 /* Return true if OP contains a symbol reference */
3556
3557 bool
3558 symbolic_reference_mentioned_p (rtx op)
3559 {
3560 const char *fmt;
3561 int i;
3562
3563 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3564 return 1;
3565
3566 fmt = GET_RTX_FORMAT (GET_CODE (op));
3567 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3568 {
3569 if (fmt[i] == 'E')
3570 {
3571 int j;
3572
3573 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3574 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3575 return 1;
3576 }
3577
3578 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3579 return 1;
3580 }
3581
3582 return 0;
3583 }
3584
3585 /* Return true if OP contains a reference to a thread-local symbol. */
3586
3587 bool
3588 tls_symbolic_reference_mentioned_p (rtx op)
3589 {
3590 const char *fmt;
3591 int i;
3592
3593 if (GET_CODE (op) == SYMBOL_REF)
3594 return tls_symbolic_operand (op);
3595
3596 fmt = GET_RTX_FORMAT (GET_CODE (op));
3597 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3598 {
3599 if (fmt[i] == 'E')
3600 {
3601 int j;
3602
3603 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3604 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3605 return true;
3606 }
3607
3608 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3609 return true;
3610 }
3611
3612 return false;
3613 }
3614
3615
3616 /* Return true if OP is a legitimate general operand when
3617 generating PIC code. It is given that flag_pic is on
3618 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
3619
3620 int
3621 legitimate_pic_operand_p (rtx op)
3622 {
3623 /* Accept all non-symbolic constants. */
3624 if (!SYMBOLIC_CONST (op))
3625 return 1;
3626
3627 /* Reject everything else; must be handled
3628 via emit_symbolic_move. */
3629 return 0;
3630 }
3631
3632 /* Returns true if the constant value OP is a legitimate general operand.
3633 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
3634
3635 static bool
3636 s390_legitimate_constant_p (machine_mode mode, rtx op)
3637 {
3638 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3639 {
3640 if (GET_MODE_SIZE (mode) != 16)
3641 return 0;
3642
3643 if (!const0_operand (op, mode)
3644 && !s390_contiguous_bitmask_vector_p (op, NULL, NULL)
3645 && !s390_bytemask_vector_p (op, NULL))
3646 return 0;
3647 }
3648
3649 /* Accept all non-symbolic constants. */
3650 if (!SYMBOLIC_CONST (op))
3651 return 1;
3652
3653 /* Accept immediate LARL operands. */
3654 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3655 return 1;
3656
3657 /* Thread-local symbols are never legal constants. This is
3658 so that emit_call knows that computing such addresses
3659 might require a function call. */
3660 if (TLS_SYMBOLIC_CONST (op))
3661 return 0;
3662
3663 /* In the PIC case, symbolic constants must *not* be
3664 forced into the literal pool. We accept them here,
3665 so that they will be handled by emit_symbolic_move. */
3666 if (flag_pic)
3667 return 1;
3668
3669 /* All remaining non-PIC symbolic constants are
3670 forced into the literal pool. */
3671 return 0;
3672 }
3673
3674 /* Determine if it's legal to put X into the constant pool. This
3675 is not possible if X contains the address of a symbol that is
3676 not constant (TLS) or not known at final link time (PIC). */
3677
3678 static bool
3679 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3680 {
3681 switch (GET_CODE (x))
3682 {
3683 case CONST_INT:
3684 case CONST_DOUBLE:
3685 case CONST_VECTOR:
3686 /* Accept all non-symbolic constants. */
3687 return false;
3688
3689 case LABEL_REF:
3690 /* Labels are OK iff we are non-PIC. */
3691 return flag_pic != 0;
3692
3693 case SYMBOL_REF:
3694 /* 'Naked' TLS symbol references are never OK,
3695 non-TLS symbols are OK iff we are non-PIC. */
3696 if (tls_symbolic_operand (x))
3697 return true;
3698 else
3699 return flag_pic != 0;
3700
3701 case CONST:
3702 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3703 case PLUS:
3704 case MINUS:
3705 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3706 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3707
3708 case UNSPEC:
3709 switch (XINT (x, 1))
3710 {
3711 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3712 case UNSPEC_LTREL_OFFSET:
3713 case UNSPEC_GOT:
3714 case UNSPEC_GOTOFF:
3715 case UNSPEC_PLTOFF:
3716 case UNSPEC_TLSGD:
3717 case UNSPEC_TLSLDM:
3718 case UNSPEC_NTPOFF:
3719 case UNSPEC_DTPOFF:
3720 case UNSPEC_GOTNTPOFF:
3721 case UNSPEC_INDNTPOFF:
3722 return false;
3723
3724 /* If the literal pool shares the code section, be put
3725 execute template placeholders into the pool as well. */
3726 case UNSPEC_INSN:
3727 return TARGET_CPU_ZARCH;
3728
3729 default:
3730 return true;
3731 }
3732 break;
3733
3734 default:
3735 gcc_unreachable ();
3736 }
3737 }
3738
3739 /* Returns true if the constant value OP is a legitimate general
3740 operand during and after reload. The difference to
3741 legitimate_constant_p is that this function will not accept
3742 a constant that would need to be forced to the literal pool
3743 before it can be used as operand.
3744 This function accepts all constants which can be loaded directly
3745 into a GPR. */
3746
3747 bool
3748 legitimate_reload_constant_p (rtx op)
3749 {
3750 /* Accept la(y) operands. */
3751 if (GET_CODE (op) == CONST_INT
3752 && DISP_IN_RANGE (INTVAL (op)))
3753 return true;
3754
3755 /* Accept l(g)hi/l(g)fi operands. */
3756 if (GET_CODE (op) == CONST_INT
3757 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3758 return true;
3759
3760 /* Accept lliXX operands. */
3761 if (TARGET_ZARCH
3762 && GET_CODE (op) == CONST_INT
3763 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3764 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3765 return true;
3766
3767 if (TARGET_EXTIMM
3768 && GET_CODE (op) == CONST_INT
3769 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3770 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3771 return true;
3772
3773 /* Accept larl operands. */
3774 if (TARGET_CPU_ZARCH
3775 && larl_operand (op, VOIDmode))
3776 return true;
3777
3778 /* Accept floating-point zero operands that fit into a single GPR. */
3779 if (GET_CODE (op) == CONST_DOUBLE
3780 && s390_float_const_zero_p (op)
3781 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3782 return true;
3783
3784 /* Accept double-word operands that can be split. */
3785 if (GET_CODE (op) == CONST_INT
3786 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
3787 {
3788 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3789 rtx hi = operand_subword (op, 0, 0, dword_mode);
3790 rtx lo = operand_subword (op, 1, 0, dword_mode);
3791 return legitimate_reload_constant_p (hi)
3792 && legitimate_reload_constant_p (lo);
3793 }
3794
3795 /* Everything else cannot be handled without reload. */
3796 return false;
3797 }
3798
3799 /* Returns true if the constant value OP is a legitimate fp operand
3800 during and after reload.
3801 This function accepts all constants which can be loaded directly
3802 into an FPR. */
3803
3804 static bool
3805 legitimate_reload_fp_constant_p (rtx op)
3806 {
3807 /* Accept floating-point zero operands if the load zero instruction
3808 can be used. Prior to z196 the load fp zero instruction caused a
3809 performance penalty if the result is used as BFP number. */
3810 if (TARGET_Z196
3811 && GET_CODE (op) == CONST_DOUBLE
3812 && s390_float_const_zero_p (op))
3813 return true;
3814
3815 return false;
3816 }
3817
3818 /* Returns true if the constant value OP is a legitimate vector operand
3819 during and after reload.
3820 This function accepts all constants which can be loaded directly
3821 into an VR. */
3822
3823 static bool
3824 legitimate_reload_vector_constant_p (rtx op)
3825 {
3826 /* FIXME: Support constant vectors with all the same 16 bit unsigned
3827 operands. These can be loaded with vrepi. */
3828
3829 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3830 && (const0_operand (op, GET_MODE (op))
3831 || constm1_operand (op, GET_MODE (op))
3832 || s390_contiguous_bitmask_vector_p (op, NULL, NULL)
3833 || s390_bytemask_vector_p (op, NULL)))
3834 return true;
3835
3836 return false;
3837 }
3838
3839 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3840 return the class of reg to actually use. */
3841
3842 static reg_class_t
3843 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3844 {
3845 switch (GET_CODE (op))
3846 {
3847 /* Constants we cannot reload into general registers
3848 must be forced into the literal pool. */
3849 case CONST_VECTOR:
3850 case CONST_DOUBLE:
3851 case CONST_INT:
3852 if (reg_class_subset_p (GENERAL_REGS, rclass)
3853 && legitimate_reload_constant_p (op))
3854 return GENERAL_REGS;
3855 else if (reg_class_subset_p (ADDR_REGS, rclass)
3856 && legitimate_reload_constant_p (op))
3857 return ADDR_REGS;
3858 else if (reg_class_subset_p (FP_REGS, rclass)
3859 && legitimate_reload_fp_constant_p (op))
3860 return FP_REGS;
3861 else if (reg_class_subset_p (VEC_REGS, rclass)
3862 && legitimate_reload_vector_constant_p (op))
3863 return VEC_REGS;
3864
3865 return NO_REGS;
3866
3867 /* If a symbolic constant or a PLUS is reloaded,
3868 it is most likely being used as an address, so
3869 prefer ADDR_REGS. If 'class' is not a superset
3870 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3871 case CONST:
3872 /* Symrefs cannot be pushed into the literal pool with -fPIC
3873 so we *MUST NOT* return NO_REGS for these cases
3874 (s390_cannot_force_const_mem will return true).
3875
3876 On the other hand we MUST return NO_REGS for symrefs with
3877 invalid addend which might have been pushed to the literal
3878 pool (no -fPIC). Usually we would expect them to be
3879 handled via secondary reload but this does not happen if
3880 they are used as literal pool slot replacement in reload
3881 inheritance (see emit_input_reload_insns). */
3882 if (TARGET_CPU_ZARCH
3883 && GET_CODE (XEXP (op, 0)) == PLUS
3884 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3885 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3886 {
3887 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3888 return ADDR_REGS;
3889 else
3890 return NO_REGS;
3891 }
3892 /* fallthrough */
3893 case LABEL_REF:
3894 case SYMBOL_REF:
3895 if (!legitimate_reload_constant_p (op))
3896 return NO_REGS;
3897 /* fallthrough */
3898 case PLUS:
3899 /* load address will be used. */
3900 if (reg_class_subset_p (ADDR_REGS, rclass))
3901 return ADDR_REGS;
3902 else
3903 return NO_REGS;
3904
3905 default:
3906 break;
3907 }
3908
3909 return rclass;
3910 }
3911
3912 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3913 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3914 aligned. */
3915
3916 bool
3917 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3918 {
3919 HOST_WIDE_INT addend;
3920 rtx symref;
3921
3922 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3923 return false;
3924
3925 if (addend & (alignment - 1))
3926 return false;
3927
3928 if (GET_CODE (symref) == SYMBOL_REF
3929 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3930 return true;
3931
3932 if (GET_CODE (symref) == UNSPEC
3933 && alignment <= UNITS_PER_LONG)
3934 return true;
3935
3936 return false;
3937 }
3938
3939 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3940 operand SCRATCH is used to reload the even part of the address and
3941 adding one. */
3942
3943 void
3944 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3945 {
3946 HOST_WIDE_INT addend;
3947 rtx symref;
3948
3949 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3950 gcc_unreachable ();
3951
3952 if (!(addend & 1))
3953 /* Easy case. The addend is even so larl will do fine. */
3954 emit_move_insn (reg, addr);
3955 else
3956 {
3957 /* We can leave the scratch register untouched if the target
3958 register is a valid base register. */
3959 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3960 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3961 scratch = reg;
3962
3963 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3964 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3965
3966 if (addend != 1)
3967 emit_move_insn (scratch,
3968 gen_rtx_CONST (Pmode,
3969 gen_rtx_PLUS (Pmode, symref,
3970 GEN_INT (addend - 1))));
3971 else
3972 emit_move_insn (scratch, symref);
3973
3974 /* Increment the address using la in order to avoid clobbering cc. */
3975 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3976 }
3977 }
3978
3979 /* Generate what is necessary to move between REG and MEM using
3980 SCRATCH. The direction is given by TOMEM. */
3981
3982 void
3983 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3984 {
3985 /* Reload might have pulled a constant out of the literal pool.
3986 Force it back in. */
3987 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3988 || GET_CODE (mem) == CONST_VECTOR
3989 || GET_CODE (mem) == CONST)
3990 mem = force_const_mem (GET_MODE (reg), mem);
3991
3992 gcc_assert (MEM_P (mem));
3993
3994 /* For a load from memory we can leave the scratch register
3995 untouched if the target register is a valid base register. */
3996 if (!tomem
3997 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3998 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3999 && GET_MODE (reg) == GET_MODE (scratch))
4000 scratch = reg;
4001
4002 /* Load address into scratch register. Since we can't have a
4003 secondary reload for a secondary reload we have to cover the case
4004 where larl would need a secondary reload here as well. */
4005 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4006
4007 /* Now we can use a standard load/store to do the move. */
4008 if (tomem)
4009 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4010 else
4011 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4012 }
4013
4014 /* Inform reload about cases where moving X with a mode MODE to a register in
4015 RCLASS requires an extra scratch or immediate register. Return the class
4016 needed for the immediate register. */
4017
4018 static reg_class_t
4019 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4020 machine_mode mode, secondary_reload_info *sri)
4021 {
4022 enum reg_class rclass = (enum reg_class) rclass_i;
4023
4024 /* Intermediate register needed. */
4025 if (reg_classes_intersect_p (CC_REGS, rclass))
4026 return GENERAL_REGS;
4027
4028 if (TARGET_VX)
4029 {
4030 /* The vst/vl vector move instructions allow only for short
4031 displacements. */
4032 if (MEM_P (x)
4033 && GET_CODE (XEXP (x, 0)) == PLUS
4034 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4035 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4036 && reg_class_subset_p (rclass, VEC_REGS)
4037 && (!reg_class_subset_p (rclass, FP_REGS)
4038 || (GET_MODE_SIZE (mode) > 8
4039 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4040 {
4041 if (in_p)
4042 sri->icode = (TARGET_64BIT ?
4043 CODE_FOR_reloaddi_la_in :
4044 CODE_FOR_reloadsi_la_in);
4045 else
4046 sri->icode = (TARGET_64BIT ?
4047 CODE_FOR_reloaddi_la_out :
4048 CODE_FOR_reloadsi_la_out);
4049 }
4050 }
4051
4052 if (TARGET_Z10)
4053 {
4054 HOST_WIDE_INT offset;
4055 rtx symref;
4056
4057 /* On z10 several optimizer steps may generate larl operands with
4058 an odd addend. */
4059 if (in_p
4060 && s390_loadrelative_operand_p (x, &symref, &offset)
4061 && mode == Pmode
4062 && !SYMBOL_REF_ALIGN1_P (symref)
4063 && (offset & 1) == 1)
4064 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4065 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4066
4067 /* Handle all the (mem (symref)) accesses we cannot use the z10
4068 instructions for. */
4069 if (MEM_P (x)
4070 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4071 && (mode == QImode
4072 || !reg_class_subset_p (rclass, GENERAL_REGS)
4073 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4074 || !s390_check_symref_alignment (XEXP (x, 0),
4075 GET_MODE_SIZE (mode))))
4076 {
4077 #define __SECONDARY_RELOAD_CASE(M,m) \
4078 case M##mode: \
4079 if (TARGET_64BIT) \
4080 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4081 CODE_FOR_reload##m##di_tomem_z10; \
4082 else \
4083 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4084 CODE_FOR_reload##m##si_tomem_z10; \
4085 break;
4086
4087 switch (GET_MODE (x))
4088 {
4089 __SECONDARY_RELOAD_CASE (QI, qi);
4090 __SECONDARY_RELOAD_CASE (HI, hi);
4091 __SECONDARY_RELOAD_CASE (SI, si);
4092 __SECONDARY_RELOAD_CASE (DI, di);
4093 __SECONDARY_RELOAD_CASE (TI, ti);
4094 __SECONDARY_RELOAD_CASE (SF, sf);
4095 __SECONDARY_RELOAD_CASE (DF, df);
4096 __SECONDARY_RELOAD_CASE (TF, tf);
4097 __SECONDARY_RELOAD_CASE (SD, sd);
4098 __SECONDARY_RELOAD_CASE (DD, dd);
4099 __SECONDARY_RELOAD_CASE (TD, td);
4100 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4101 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4102 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4103 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4104 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4105 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4106 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4107 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4108 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4109 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4110 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4111 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4112 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4113 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4114 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4115 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4116 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4117 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4118 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4119 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4120 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4121 default:
4122 gcc_unreachable ();
4123 }
4124 #undef __SECONDARY_RELOAD_CASE
4125 }
4126 }
4127
4128 /* We need a scratch register when loading a PLUS expression which
4129 is not a legitimate operand of the LOAD ADDRESS instruction. */
4130 /* LRA can deal with transformation of plus op very well -- so we
4131 don't need to prompt LRA in this case. */
4132 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4133 sri->icode = (TARGET_64BIT ?
4134 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4135
4136 /* Performing a multiword move from or to memory we have to make sure the
4137 second chunk in memory is addressable without causing a displacement
4138 overflow. If that would be the case we calculate the address in
4139 a scratch register. */
4140 if (MEM_P (x)
4141 && GET_CODE (XEXP (x, 0)) == PLUS
4142 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4143 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4144 + GET_MODE_SIZE (mode) - 1))
4145 {
4146 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4147 in a s_operand address since we may fallback to lm/stm. So we only
4148 have to care about overflows in the b+i+d case. */
4149 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4150 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4151 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4152 /* For FP_REGS no lm/stm is available so this check is triggered
4153 for displacement overflows in b+i+d and b+d like addresses. */
4154 || (reg_classes_intersect_p (FP_REGS, rclass)
4155 && s390_class_max_nregs (FP_REGS, mode) > 1))
4156 {
4157 if (in_p)
4158 sri->icode = (TARGET_64BIT ?
4159 CODE_FOR_reloaddi_la_in :
4160 CODE_FOR_reloadsi_la_in);
4161 else
4162 sri->icode = (TARGET_64BIT ?
4163 CODE_FOR_reloaddi_la_out :
4164 CODE_FOR_reloadsi_la_out);
4165 }
4166 }
4167
4168 /* A scratch address register is needed when a symbolic constant is
4169 copied to r0 compiling with -fPIC. In other cases the target
4170 register might be used as temporary (see legitimize_pic_address). */
4171 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4172 sri->icode = (TARGET_64BIT ?
4173 CODE_FOR_reloaddi_PIC_addr :
4174 CODE_FOR_reloadsi_PIC_addr);
4175
4176 /* Either scratch or no register needed. */
4177 return NO_REGS;
4178 }
4179
4180 /* Generate code to load SRC, which is PLUS that is not a
4181 legitimate operand for the LA instruction, into TARGET.
4182 SCRATCH may be used as scratch register. */
4183
4184 void
4185 s390_expand_plus_operand (rtx target, rtx src,
4186 rtx scratch)
4187 {
4188 rtx sum1, sum2;
4189 struct s390_address ad;
4190
4191 /* src must be a PLUS; get its two operands. */
4192 gcc_assert (GET_CODE (src) == PLUS);
4193 gcc_assert (GET_MODE (src) == Pmode);
4194
4195 /* Check if any of the two operands is already scheduled
4196 for replacement by reload. This can happen e.g. when
4197 float registers occur in an address. */
4198 sum1 = find_replacement (&XEXP (src, 0));
4199 sum2 = find_replacement (&XEXP (src, 1));
4200 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4201
4202 /* If the address is already strictly valid, there's nothing to do. */
4203 if (!s390_decompose_address (src, &ad)
4204 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4205 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4206 {
4207 /* Otherwise, one of the operands cannot be an address register;
4208 we reload its value into the scratch register. */
4209 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4210 {
4211 emit_move_insn (scratch, sum1);
4212 sum1 = scratch;
4213 }
4214 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4215 {
4216 emit_move_insn (scratch, sum2);
4217 sum2 = scratch;
4218 }
4219
4220 /* According to the way these invalid addresses are generated
4221 in reload.c, it should never happen (at least on s390) that
4222 *neither* of the PLUS components, after find_replacements
4223 was applied, is an address register. */
4224 if (sum1 == scratch && sum2 == scratch)
4225 {
4226 debug_rtx (src);
4227 gcc_unreachable ();
4228 }
4229
4230 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4231 }
4232
4233 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4234 is only ever performed on addresses, so we can mark the
4235 sum as legitimate for LA in any case. */
4236 s390_load_address (target, src);
4237 }
4238
4239
4240 /* Return true if ADDR is a valid memory address.
4241 STRICT specifies whether strict register checking applies. */
4242
4243 static bool
4244 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4245 {
4246 struct s390_address ad;
4247
4248 if (TARGET_Z10
4249 && larl_operand (addr, VOIDmode)
4250 && (mode == VOIDmode
4251 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4252 return true;
4253
4254 if (!s390_decompose_address (addr, &ad))
4255 return false;
4256
4257 if (strict)
4258 {
4259 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4260 return false;
4261
4262 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4263 return false;
4264 }
4265 else
4266 {
4267 if (ad.base
4268 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4269 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4270 return false;
4271
4272 if (ad.indx
4273 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4274 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4275 return false;
4276 }
4277 return true;
4278 }
4279
4280 /* Return true if OP is a valid operand for the LA instruction.
4281 In 31-bit, we need to prove that the result is used as an
4282 address, as LA performs only a 31-bit addition. */
4283
4284 bool
4285 legitimate_la_operand_p (rtx op)
4286 {
4287 struct s390_address addr;
4288 if (!s390_decompose_address (op, &addr))
4289 return false;
4290
4291 return (TARGET_64BIT || addr.pointer);
4292 }
4293
4294 /* Return true if it is valid *and* preferable to use LA to
4295 compute the sum of OP1 and OP2. */
4296
4297 bool
4298 preferred_la_operand_p (rtx op1, rtx op2)
4299 {
4300 struct s390_address addr;
4301
4302 if (op2 != const0_rtx)
4303 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4304
4305 if (!s390_decompose_address (op1, &addr))
4306 return false;
4307 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4308 return false;
4309 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4310 return false;
4311
4312 /* Avoid LA instructions with index register on z196; it is
4313 preferable to use regular add instructions when possible.
4314 Starting with zEC12 the la with index register is "uncracked"
4315 again. */
4316 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4317 return false;
4318
4319 if (!TARGET_64BIT && !addr.pointer)
4320 return false;
4321
4322 if (addr.pointer)
4323 return true;
4324
4325 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4326 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4327 return true;
4328
4329 return false;
4330 }
4331
4332 /* Emit a forced load-address operation to load SRC into DST.
4333 This will use the LOAD ADDRESS instruction even in situations
4334 where legitimate_la_operand_p (SRC) returns false. */
4335
4336 void
4337 s390_load_address (rtx dst, rtx src)
4338 {
4339 if (TARGET_64BIT)
4340 emit_move_insn (dst, src);
4341 else
4342 emit_insn (gen_force_la_31 (dst, src));
4343 }
4344
4345 /* Return a legitimate reference for ORIG (an address) using the
4346 register REG. If REG is 0, a new pseudo is generated.
4347
4348 There are two types of references that must be handled:
4349
4350 1. Global data references must load the address from the GOT, via
4351 the PIC reg. An insn is emitted to do this load, and the reg is
4352 returned.
4353
4354 2. Static data references, constant pool addresses, and code labels
4355 compute the address as an offset from the GOT, whose base is in
4356 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4357 differentiate them from global data objects. The returned
4358 address is the PIC reg + an unspec constant.
4359
4360 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4361 reg also appears in the address. */
4362
4363 rtx
4364 legitimize_pic_address (rtx orig, rtx reg)
4365 {
4366 rtx addr = orig;
4367 rtx addend = const0_rtx;
4368 rtx new_rtx = orig;
4369
4370 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4371
4372 if (GET_CODE (addr) == CONST)
4373 addr = XEXP (addr, 0);
4374
4375 if (GET_CODE (addr) == PLUS)
4376 {
4377 addend = XEXP (addr, 1);
4378 addr = XEXP (addr, 0);
4379 }
4380
4381 if ((GET_CODE (addr) == LABEL_REF
4382 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4383 || (GET_CODE (addr) == UNSPEC &&
4384 (XINT (addr, 1) == UNSPEC_GOTENT
4385 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4386 && GET_CODE (addend) == CONST_INT)
4387 {
4388 /* This can be locally addressed. */
4389
4390 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4391 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4392 gen_rtx_CONST (Pmode, addr) : addr);
4393
4394 if (TARGET_CPU_ZARCH
4395 && larl_operand (const_addr, VOIDmode)
4396 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4397 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4398 {
4399 if (INTVAL (addend) & 1)
4400 {
4401 /* LARL can't handle odd offsets, so emit a pair of LARL
4402 and LA. */
4403 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4404
4405 if (!DISP_IN_RANGE (INTVAL (addend)))
4406 {
4407 HOST_WIDE_INT even = INTVAL (addend) - 1;
4408 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4409 addr = gen_rtx_CONST (Pmode, addr);
4410 addend = const1_rtx;
4411 }
4412
4413 emit_move_insn (temp, addr);
4414 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4415
4416 if (reg != 0)
4417 {
4418 s390_load_address (reg, new_rtx);
4419 new_rtx = reg;
4420 }
4421 }
4422 else
4423 {
4424 /* If the offset is even, we can just use LARL. This
4425 will happen automatically. */
4426 }
4427 }
4428 else
4429 {
4430 /* No larl - Access local symbols relative to the GOT. */
4431
4432 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4433
4434 if (reload_in_progress || reload_completed)
4435 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4436
4437 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4438 if (addend != const0_rtx)
4439 addr = gen_rtx_PLUS (Pmode, addr, addend);
4440 addr = gen_rtx_CONST (Pmode, addr);
4441 addr = force_const_mem (Pmode, addr);
4442 emit_move_insn (temp, addr);
4443
4444 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4445 if (reg != 0)
4446 {
4447 s390_load_address (reg, new_rtx);
4448 new_rtx = reg;
4449 }
4450 }
4451 }
4452 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4453 {
4454 /* A non-local symbol reference without addend.
4455
4456 The symbol ref is wrapped into an UNSPEC to make sure the
4457 proper operand modifier (@GOT or @GOTENT) will be emitted.
4458 This will tell the linker to put the symbol into the GOT.
4459
4460 Additionally the code dereferencing the GOT slot is emitted here.
4461
4462 An addend to the symref needs to be added afterwards.
4463 legitimize_pic_address calls itself recursively to handle
4464 that case. So no need to do it here. */
4465
4466 if (reg == 0)
4467 reg = gen_reg_rtx (Pmode);
4468
4469 if (TARGET_Z10)
4470 {
4471 /* Use load relative if possible.
4472 lgrl <target>, sym@GOTENT */
4473 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4474 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4475 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4476
4477 emit_move_insn (reg, new_rtx);
4478 new_rtx = reg;
4479 }
4480 else if (flag_pic == 1)
4481 {
4482 /* Assume GOT offset is a valid displacement operand (< 4k
4483 or < 512k with z990). This is handled the same way in
4484 both 31- and 64-bit code (@GOT).
4485 lg <target>, sym@GOT(r12) */
4486
4487 if (reload_in_progress || reload_completed)
4488 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4489
4490 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4491 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4492 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4493 new_rtx = gen_const_mem (Pmode, new_rtx);
4494 emit_move_insn (reg, new_rtx);
4495 new_rtx = reg;
4496 }
4497 else if (TARGET_CPU_ZARCH)
4498 {
4499 /* If the GOT offset might be >= 4k, we determine the position
4500 of the GOT entry via a PC-relative LARL (@GOTENT).
4501 larl temp, sym@GOTENT
4502 lg <target>, 0(temp) */
4503
4504 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4505
4506 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4507 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4508
4509 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4510 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4511 emit_move_insn (temp, new_rtx);
4512
4513 new_rtx = gen_const_mem (Pmode, temp);
4514 emit_move_insn (reg, new_rtx);
4515
4516 new_rtx = reg;
4517 }
4518 else
4519 {
4520 /* If the GOT offset might be >= 4k, we have to load it
4521 from the literal pool (@GOT).
4522
4523 lg temp, lit-litbase(r13)
4524 lg <target>, 0(temp)
4525 lit: .long sym@GOT */
4526
4527 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4528
4529 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4530 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4531
4532 if (reload_in_progress || reload_completed)
4533 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4534
4535 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4536 addr = gen_rtx_CONST (Pmode, addr);
4537 addr = force_const_mem (Pmode, addr);
4538 emit_move_insn (temp, addr);
4539
4540 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4541 new_rtx = gen_const_mem (Pmode, new_rtx);
4542 emit_move_insn (reg, new_rtx);
4543 new_rtx = reg;
4544 }
4545 }
4546 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4547 {
4548 gcc_assert (XVECLEN (addr, 0) == 1);
4549 switch (XINT (addr, 1))
4550 {
4551 /* These address symbols (or PLT slots) relative to the GOT
4552 (not GOT slots!). In general this will exceed the
4553 displacement range so these value belong into the literal
4554 pool. */
4555 case UNSPEC_GOTOFF:
4556 case UNSPEC_PLTOFF:
4557 new_rtx = force_const_mem (Pmode, orig);
4558 break;
4559
4560 /* For -fPIC the GOT size might exceed the displacement
4561 range so make sure the value is in the literal pool. */
4562 case UNSPEC_GOT:
4563 if (flag_pic == 2)
4564 new_rtx = force_const_mem (Pmode, orig);
4565 break;
4566
4567 /* For @GOTENT larl is used. This is handled like local
4568 symbol refs. */
4569 case UNSPEC_GOTENT:
4570 gcc_unreachable ();
4571 break;
4572
4573 /* @PLT is OK as is on 64-bit, must be converted to
4574 GOT-relative @PLTOFF on 31-bit. */
4575 case UNSPEC_PLT:
4576 if (!TARGET_CPU_ZARCH)
4577 {
4578 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4579
4580 if (reload_in_progress || reload_completed)
4581 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4582
4583 addr = XVECEXP (addr, 0, 0);
4584 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4585 UNSPEC_PLTOFF);
4586 if (addend != const0_rtx)
4587 addr = gen_rtx_PLUS (Pmode, addr, addend);
4588 addr = gen_rtx_CONST (Pmode, addr);
4589 addr = force_const_mem (Pmode, addr);
4590 emit_move_insn (temp, addr);
4591
4592 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4593 if (reg != 0)
4594 {
4595 s390_load_address (reg, new_rtx);
4596 new_rtx = reg;
4597 }
4598 }
4599 else
4600 /* On 64 bit larl can be used. This case is handled like
4601 local symbol refs. */
4602 gcc_unreachable ();
4603 break;
4604
4605 /* Everything else cannot happen. */
4606 default:
4607 gcc_unreachable ();
4608 }
4609 }
4610 else if (addend != const0_rtx)
4611 {
4612 /* Otherwise, compute the sum. */
4613
4614 rtx base = legitimize_pic_address (addr, reg);
4615 new_rtx = legitimize_pic_address (addend,
4616 base == reg ? NULL_RTX : reg);
4617 if (GET_CODE (new_rtx) == CONST_INT)
4618 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4619 else
4620 {
4621 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4622 {
4623 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4624 new_rtx = XEXP (new_rtx, 1);
4625 }
4626 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4627 }
4628
4629 if (GET_CODE (new_rtx) == CONST)
4630 new_rtx = XEXP (new_rtx, 0);
4631 new_rtx = force_operand (new_rtx, 0);
4632 }
4633
4634 return new_rtx;
4635 }
4636
4637 /* Load the thread pointer into a register. */
4638
4639 rtx
4640 s390_get_thread_pointer (void)
4641 {
4642 rtx tp = gen_reg_rtx (Pmode);
4643
4644 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4645 mark_reg_pointer (tp, BITS_PER_WORD);
4646
4647 return tp;
4648 }
4649
4650 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4651 in s390_tls_symbol which always refers to __tls_get_offset.
4652 The returned offset is written to RESULT_REG and an USE rtx is
4653 generated for TLS_CALL. */
4654
4655 static GTY(()) rtx s390_tls_symbol;
4656
4657 static void
4658 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4659 {
4660 rtx insn;
4661
4662 if (!flag_pic)
4663 emit_insn (s390_load_got ());
4664
4665 if (!s390_tls_symbol)
4666 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4667
4668 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4669 gen_rtx_REG (Pmode, RETURN_REGNUM));
4670
4671 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4672 RTL_CONST_CALL_P (insn) = 1;
4673 }
4674
4675 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4676 this (thread-local) address. REG may be used as temporary. */
4677
4678 static rtx
4679 legitimize_tls_address (rtx addr, rtx reg)
4680 {
4681 rtx new_rtx, tls_call, temp, base, r2, insn;
4682
4683 if (GET_CODE (addr) == SYMBOL_REF)
4684 switch (tls_symbolic_operand (addr))
4685 {
4686 case TLS_MODEL_GLOBAL_DYNAMIC:
4687 start_sequence ();
4688 r2 = gen_rtx_REG (Pmode, 2);
4689 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4690 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4691 new_rtx = force_const_mem (Pmode, new_rtx);
4692 emit_move_insn (r2, new_rtx);
4693 s390_emit_tls_call_insn (r2, tls_call);
4694 insn = get_insns ();
4695 end_sequence ();
4696
4697 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4698 temp = gen_reg_rtx (Pmode);
4699 emit_libcall_block (insn, temp, r2, new_rtx);
4700
4701 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4702 if (reg != 0)
4703 {
4704 s390_load_address (reg, new_rtx);
4705 new_rtx = reg;
4706 }
4707 break;
4708
4709 case TLS_MODEL_LOCAL_DYNAMIC:
4710 start_sequence ();
4711 r2 = gen_rtx_REG (Pmode, 2);
4712 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4713 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4714 new_rtx = force_const_mem (Pmode, new_rtx);
4715 emit_move_insn (r2, new_rtx);
4716 s390_emit_tls_call_insn (r2, tls_call);
4717 insn = get_insns ();
4718 end_sequence ();
4719
4720 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4721 temp = gen_reg_rtx (Pmode);
4722 emit_libcall_block (insn, temp, r2, new_rtx);
4723
4724 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4725 base = gen_reg_rtx (Pmode);
4726 s390_load_address (base, new_rtx);
4727
4728 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4729 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4730 new_rtx = force_const_mem (Pmode, new_rtx);
4731 temp = gen_reg_rtx (Pmode);
4732 emit_move_insn (temp, new_rtx);
4733
4734 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4735 if (reg != 0)
4736 {
4737 s390_load_address (reg, new_rtx);
4738 new_rtx = reg;
4739 }
4740 break;
4741
4742 case TLS_MODEL_INITIAL_EXEC:
4743 if (flag_pic == 1)
4744 {
4745 /* Assume GOT offset < 4k. This is handled the same way
4746 in both 31- and 64-bit code. */
4747
4748 if (reload_in_progress || reload_completed)
4749 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4750
4751 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4752 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4753 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4754 new_rtx = gen_const_mem (Pmode, new_rtx);
4755 temp = gen_reg_rtx (Pmode);
4756 emit_move_insn (temp, new_rtx);
4757 }
4758 else if (TARGET_CPU_ZARCH)
4759 {
4760 /* If the GOT offset might be >= 4k, we determine the position
4761 of the GOT entry via a PC-relative LARL. */
4762
4763 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4764 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4765 temp = gen_reg_rtx (Pmode);
4766 emit_move_insn (temp, new_rtx);
4767
4768 new_rtx = gen_const_mem (Pmode, temp);
4769 temp = gen_reg_rtx (Pmode);
4770 emit_move_insn (temp, new_rtx);
4771 }
4772 else if (flag_pic)
4773 {
4774 /* If the GOT offset might be >= 4k, we have to load it
4775 from the literal pool. */
4776
4777 if (reload_in_progress || reload_completed)
4778 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4779
4780 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4781 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4782 new_rtx = force_const_mem (Pmode, new_rtx);
4783 temp = gen_reg_rtx (Pmode);
4784 emit_move_insn (temp, new_rtx);
4785
4786 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4787 new_rtx = gen_const_mem (Pmode, new_rtx);
4788
4789 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4790 temp = gen_reg_rtx (Pmode);
4791 emit_insn (gen_rtx_SET (temp, new_rtx));
4792 }
4793 else
4794 {
4795 /* In position-dependent code, load the absolute address of
4796 the GOT entry from the literal pool. */
4797
4798 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4799 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4800 new_rtx = force_const_mem (Pmode, new_rtx);
4801 temp = gen_reg_rtx (Pmode);
4802 emit_move_insn (temp, new_rtx);
4803
4804 new_rtx = temp;
4805 new_rtx = gen_const_mem (Pmode, new_rtx);
4806 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4807 temp = gen_reg_rtx (Pmode);
4808 emit_insn (gen_rtx_SET (temp, new_rtx));
4809 }
4810
4811 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4812 if (reg != 0)
4813 {
4814 s390_load_address (reg, new_rtx);
4815 new_rtx = reg;
4816 }
4817 break;
4818
4819 case TLS_MODEL_LOCAL_EXEC:
4820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4822 new_rtx = force_const_mem (Pmode, new_rtx);
4823 temp = gen_reg_rtx (Pmode);
4824 emit_move_insn (temp, new_rtx);
4825
4826 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4827 if (reg != 0)
4828 {
4829 s390_load_address (reg, new_rtx);
4830 new_rtx = reg;
4831 }
4832 break;
4833
4834 default:
4835 gcc_unreachable ();
4836 }
4837
4838 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4839 {
4840 switch (XINT (XEXP (addr, 0), 1))
4841 {
4842 case UNSPEC_INDNTPOFF:
4843 gcc_assert (TARGET_CPU_ZARCH);
4844 new_rtx = addr;
4845 break;
4846
4847 default:
4848 gcc_unreachable ();
4849 }
4850 }
4851
4852 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4853 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4854 {
4855 new_rtx = XEXP (XEXP (addr, 0), 0);
4856 if (GET_CODE (new_rtx) != SYMBOL_REF)
4857 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4858
4859 new_rtx = legitimize_tls_address (new_rtx, reg);
4860 new_rtx = plus_constant (Pmode, new_rtx,
4861 INTVAL (XEXP (XEXP (addr, 0), 1)));
4862 new_rtx = force_operand (new_rtx, 0);
4863 }
4864
4865 else
4866 gcc_unreachable (); /* for now ... */
4867
4868 return new_rtx;
4869 }
4870
4871 /* Emit insns making the address in operands[1] valid for a standard
4872 move to operands[0]. operands[1] is replaced by an address which
4873 should be used instead of the former RTX to emit the move
4874 pattern. */
4875
4876 void
4877 emit_symbolic_move (rtx *operands)
4878 {
4879 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4880
4881 if (GET_CODE (operands[0]) == MEM)
4882 operands[1] = force_reg (Pmode, operands[1]);
4883 else if (TLS_SYMBOLIC_CONST (operands[1]))
4884 operands[1] = legitimize_tls_address (operands[1], temp);
4885 else if (flag_pic)
4886 operands[1] = legitimize_pic_address (operands[1], temp);
4887 }
4888
4889 /* Try machine-dependent ways of modifying an illegitimate address X
4890 to be legitimate. If we find one, return the new, valid address.
4891
4892 OLDX is the address as it was before break_out_memory_refs was called.
4893 In some cases it is useful to look at this to decide what needs to be done.
4894
4895 MODE is the mode of the operand pointed to by X.
4896
4897 When -fpic is used, special handling is needed for symbolic references.
4898 See comments by legitimize_pic_address for details. */
4899
4900 static rtx
4901 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4902 machine_mode mode ATTRIBUTE_UNUSED)
4903 {
4904 rtx constant_term = const0_rtx;
4905
4906 if (TLS_SYMBOLIC_CONST (x))
4907 {
4908 x = legitimize_tls_address (x, 0);
4909
4910 if (s390_legitimate_address_p (mode, x, FALSE))
4911 return x;
4912 }
4913 else if (GET_CODE (x) == PLUS
4914 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
4915 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
4916 {
4917 return x;
4918 }
4919 else if (flag_pic)
4920 {
4921 if (SYMBOLIC_CONST (x)
4922 || (GET_CODE (x) == PLUS
4923 && (SYMBOLIC_CONST (XEXP (x, 0))
4924 || SYMBOLIC_CONST (XEXP (x, 1)))))
4925 x = legitimize_pic_address (x, 0);
4926
4927 if (s390_legitimate_address_p (mode, x, FALSE))
4928 return x;
4929 }
4930
4931 x = eliminate_constant_term (x, &constant_term);
4932
4933 /* Optimize loading of large displacements by splitting them
4934 into the multiple of 4K and the rest; this allows the
4935 former to be CSE'd if possible.
4936
4937 Don't do this if the displacement is added to a register
4938 pointing into the stack frame, as the offsets will
4939 change later anyway. */
4940
4941 if (GET_CODE (constant_term) == CONST_INT
4942 && !TARGET_LONG_DISPLACEMENT
4943 && !DISP_IN_RANGE (INTVAL (constant_term))
4944 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4945 {
4946 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4947 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4948
4949 rtx temp = gen_reg_rtx (Pmode);
4950 rtx val = force_operand (GEN_INT (upper), temp);
4951 if (val != temp)
4952 emit_move_insn (temp, val);
4953
4954 x = gen_rtx_PLUS (Pmode, x, temp);
4955 constant_term = GEN_INT (lower);
4956 }
4957
4958 if (GET_CODE (x) == PLUS)
4959 {
4960 if (GET_CODE (XEXP (x, 0)) == REG)
4961 {
4962 rtx temp = gen_reg_rtx (Pmode);
4963 rtx val = force_operand (XEXP (x, 1), temp);
4964 if (val != temp)
4965 emit_move_insn (temp, val);
4966
4967 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4968 }
4969
4970 else if (GET_CODE (XEXP (x, 1)) == REG)
4971 {
4972 rtx temp = gen_reg_rtx (Pmode);
4973 rtx val = force_operand (XEXP (x, 0), temp);
4974 if (val != temp)
4975 emit_move_insn (temp, val);
4976
4977 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4978 }
4979 }
4980
4981 if (constant_term != const0_rtx)
4982 x = gen_rtx_PLUS (Pmode, x, constant_term);
4983
4984 return x;
4985 }
4986
4987 /* Try a machine-dependent way of reloading an illegitimate address AD
4988 operand. If we find one, push the reload and return the new address.
4989
4990 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4991 and TYPE is the reload type of the current reload. */
4992
4993 rtx
4994 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
4995 int opnum, int type)
4996 {
4997 if (!optimize || TARGET_LONG_DISPLACEMENT)
4998 return NULL_RTX;
4999
5000 if (GET_CODE (ad) == PLUS)
5001 {
5002 rtx tem = simplify_binary_operation (PLUS, Pmode,
5003 XEXP (ad, 0), XEXP (ad, 1));
5004 if (tem)
5005 ad = tem;
5006 }
5007
5008 if (GET_CODE (ad) == PLUS
5009 && GET_CODE (XEXP (ad, 0)) == REG
5010 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5011 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5012 {
5013 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5014 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5015 rtx cst, tem, new_rtx;
5016
5017 cst = GEN_INT (upper);
5018 if (!legitimate_reload_constant_p (cst))
5019 cst = force_const_mem (Pmode, cst);
5020
5021 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5022 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5023
5024 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5025 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5026 opnum, (enum reload_type) type);
5027 return new_rtx;
5028 }
5029
5030 return NULL_RTX;
5031 }
5032
5033 /* Emit code to move LEN bytes from DST to SRC. */
5034
5035 bool
5036 s390_expand_movmem (rtx dst, rtx src, rtx len)
5037 {
5038 /* When tuning for z10 or higher we rely on the Glibc functions to
5039 do the right thing. Only for constant lengths below 64k we will
5040 generate inline code. */
5041 if (s390_tune >= PROCESSOR_2097_Z10
5042 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5043 return false;
5044
5045 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5046 {
5047 if (INTVAL (len) > 0)
5048 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5049 }
5050
5051 else if (TARGET_MVCLE)
5052 {
5053 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5054 }
5055
5056 else
5057 {
5058 rtx dst_addr, src_addr, count, blocks, temp;
5059 rtx_code_label *loop_start_label = gen_label_rtx ();
5060 rtx_code_label *loop_end_label = gen_label_rtx ();
5061 rtx_code_label *end_label = gen_label_rtx ();
5062 machine_mode mode;
5063
5064 mode = GET_MODE (len);
5065 if (mode == VOIDmode)
5066 mode = Pmode;
5067
5068 dst_addr = gen_reg_rtx (Pmode);
5069 src_addr = gen_reg_rtx (Pmode);
5070 count = gen_reg_rtx (mode);
5071 blocks = gen_reg_rtx (mode);
5072
5073 convert_move (count, len, 1);
5074 emit_cmp_and_jump_insns (count, const0_rtx,
5075 EQ, NULL_RTX, mode, 1, end_label);
5076
5077 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5078 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5079 dst = change_address (dst, VOIDmode, dst_addr);
5080 src = change_address (src, VOIDmode, src_addr);
5081
5082 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5083 OPTAB_DIRECT);
5084 if (temp != count)
5085 emit_move_insn (count, temp);
5086
5087 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5088 OPTAB_DIRECT);
5089 if (temp != blocks)
5090 emit_move_insn (blocks, temp);
5091
5092 emit_cmp_and_jump_insns (blocks, const0_rtx,
5093 EQ, NULL_RTX, mode, 1, loop_end_label);
5094
5095 emit_label (loop_start_label);
5096
5097 if (TARGET_Z10
5098 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5099 {
5100 rtx prefetch;
5101
5102 /* Issue a read prefetch for the +3 cache line. */
5103 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5104 const0_rtx, const0_rtx);
5105 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5106 emit_insn (prefetch);
5107
5108 /* Issue a write prefetch for the +3 cache line. */
5109 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5110 const1_rtx, const0_rtx);
5111 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5112 emit_insn (prefetch);
5113 }
5114
5115 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5116 s390_load_address (dst_addr,
5117 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5118 s390_load_address (src_addr,
5119 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5120
5121 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5122 OPTAB_DIRECT);
5123 if (temp != blocks)
5124 emit_move_insn (blocks, temp);
5125
5126 emit_cmp_and_jump_insns (blocks, const0_rtx,
5127 EQ, NULL_RTX, mode, 1, loop_end_label);
5128
5129 emit_jump (loop_start_label);
5130 emit_label (loop_end_label);
5131
5132 emit_insn (gen_movmem_short (dst, src,
5133 convert_to_mode (Pmode, count, 1)));
5134 emit_label (end_label);
5135 }
5136 return true;
5137 }
5138
5139 /* Emit code to set LEN bytes at DST to VAL.
5140 Make use of clrmem if VAL is zero. */
5141
5142 void
5143 s390_expand_setmem (rtx dst, rtx len, rtx val)
5144 {
5145 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5146 return;
5147
5148 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5149
5150 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5151 {
5152 if (val == const0_rtx && INTVAL (len) <= 256)
5153 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5154 else
5155 {
5156 /* Initialize memory by storing the first byte. */
5157 emit_move_insn (adjust_address (dst, QImode, 0), val);
5158
5159 if (INTVAL (len) > 1)
5160 {
5161 /* Initiate 1 byte overlap move.
5162 The first byte of DST is propagated through DSTP1.
5163 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5164 DST is set to size 1 so the rest of the memory location
5165 does not count as source operand. */
5166 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5167 set_mem_size (dst, 1);
5168
5169 emit_insn (gen_movmem_short (dstp1, dst,
5170 GEN_INT (INTVAL (len) - 2)));
5171 }
5172 }
5173 }
5174
5175 else if (TARGET_MVCLE)
5176 {
5177 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5178 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
5179 }
5180
5181 else
5182 {
5183 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5184 rtx_code_label *loop_start_label = gen_label_rtx ();
5185 rtx_code_label *loop_end_label = gen_label_rtx ();
5186 rtx_code_label *end_label = gen_label_rtx ();
5187 machine_mode mode;
5188
5189 mode = GET_MODE (len);
5190 if (mode == VOIDmode)
5191 mode = Pmode;
5192
5193 dst_addr = gen_reg_rtx (Pmode);
5194 count = gen_reg_rtx (mode);
5195 blocks = gen_reg_rtx (mode);
5196
5197 convert_move (count, len, 1);
5198 emit_cmp_and_jump_insns (count, const0_rtx,
5199 EQ, NULL_RTX, mode, 1, end_label);
5200
5201 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5202 dst = change_address (dst, VOIDmode, dst_addr);
5203
5204 if (val == const0_rtx)
5205 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5206 OPTAB_DIRECT);
5207 else
5208 {
5209 dstp1 = adjust_address (dst, VOIDmode, 1);
5210 set_mem_size (dst, 1);
5211
5212 /* Initialize memory by storing the first byte. */
5213 emit_move_insn (adjust_address (dst, QImode, 0), val);
5214
5215 /* If count is 1 we are done. */
5216 emit_cmp_and_jump_insns (count, const1_rtx,
5217 EQ, NULL_RTX, mode, 1, end_label);
5218
5219 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5220 OPTAB_DIRECT);
5221 }
5222 if (temp != count)
5223 emit_move_insn (count, temp);
5224
5225 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5226 OPTAB_DIRECT);
5227 if (temp != blocks)
5228 emit_move_insn (blocks, temp);
5229
5230 emit_cmp_and_jump_insns (blocks, const0_rtx,
5231 EQ, NULL_RTX, mode, 1, loop_end_label);
5232
5233 emit_label (loop_start_label);
5234
5235 if (TARGET_Z10
5236 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5237 {
5238 /* Issue a write prefetch for the +4 cache line. */
5239 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5240 GEN_INT (1024)),
5241 const1_rtx, const0_rtx);
5242 emit_insn (prefetch);
5243 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5244 }
5245
5246 if (val == const0_rtx)
5247 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5248 else
5249 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5250 s390_load_address (dst_addr,
5251 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5252
5253 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5254 OPTAB_DIRECT);
5255 if (temp != blocks)
5256 emit_move_insn (blocks, temp);
5257
5258 emit_cmp_and_jump_insns (blocks, const0_rtx,
5259 EQ, NULL_RTX, mode, 1, loop_end_label);
5260
5261 emit_jump (loop_start_label);
5262 emit_label (loop_end_label);
5263
5264 if (val == const0_rtx)
5265 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5266 else
5267 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5268 emit_label (end_label);
5269 }
5270 }
5271
5272 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5273 and return the result in TARGET. */
5274
5275 bool
5276 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5277 {
5278 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5279 rtx tmp;
5280
5281 /* When tuning for z10 or higher we rely on the Glibc functions to
5282 do the right thing. Only for constant lengths below 64k we will
5283 generate inline code. */
5284 if (s390_tune >= PROCESSOR_2097_Z10
5285 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5286 return false;
5287
5288 /* As the result of CMPINT is inverted compared to what we need,
5289 we have to swap the operands. */
5290 tmp = op0; op0 = op1; op1 = tmp;
5291
5292 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5293 {
5294 if (INTVAL (len) > 0)
5295 {
5296 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5297 emit_insn (gen_cmpint (target, ccreg));
5298 }
5299 else
5300 emit_move_insn (target, const0_rtx);
5301 }
5302 else if (TARGET_MVCLE)
5303 {
5304 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5305 emit_insn (gen_cmpint (target, ccreg));
5306 }
5307 else
5308 {
5309 rtx addr0, addr1, count, blocks, temp;
5310 rtx_code_label *loop_start_label = gen_label_rtx ();
5311 rtx_code_label *loop_end_label = gen_label_rtx ();
5312 rtx_code_label *end_label = gen_label_rtx ();
5313 machine_mode mode;
5314
5315 mode = GET_MODE (len);
5316 if (mode == VOIDmode)
5317 mode = Pmode;
5318
5319 addr0 = gen_reg_rtx (Pmode);
5320 addr1 = gen_reg_rtx (Pmode);
5321 count = gen_reg_rtx (mode);
5322 blocks = gen_reg_rtx (mode);
5323
5324 convert_move (count, len, 1);
5325 emit_cmp_and_jump_insns (count, const0_rtx,
5326 EQ, NULL_RTX, mode, 1, end_label);
5327
5328 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5329 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5330 op0 = change_address (op0, VOIDmode, addr0);
5331 op1 = change_address (op1, VOIDmode, addr1);
5332
5333 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5334 OPTAB_DIRECT);
5335 if (temp != count)
5336 emit_move_insn (count, temp);
5337
5338 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5339 OPTAB_DIRECT);
5340 if (temp != blocks)
5341 emit_move_insn (blocks, temp);
5342
5343 emit_cmp_and_jump_insns (blocks, const0_rtx,
5344 EQ, NULL_RTX, mode, 1, loop_end_label);
5345
5346 emit_label (loop_start_label);
5347
5348 if (TARGET_Z10
5349 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5350 {
5351 rtx prefetch;
5352
5353 /* Issue a read prefetch for the +2 cache line of operand 1. */
5354 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5355 const0_rtx, const0_rtx);
5356 emit_insn (prefetch);
5357 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5358
5359 /* Issue a read prefetch for the +2 cache line of operand 2. */
5360 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5361 const0_rtx, const0_rtx);
5362 emit_insn (prefetch);
5363 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5364 }
5365
5366 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5367 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5368 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5369 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5370 temp = gen_rtx_SET (pc_rtx, temp);
5371 emit_jump_insn (temp);
5372
5373 s390_load_address (addr0,
5374 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5375 s390_load_address (addr1,
5376 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5377
5378 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5379 OPTAB_DIRECT);
5380 if (temp != blocks)
5381 emit_move_insn (blocks, temp);
5382
5383 emit_cmp_and_jump_insns (blocks, const0_rtx,
5384 EQ, NULL_RTX, mode, 1, loop_end_label);
5385
5386 emit_jump (loop_start_label);
5387 emit_label (loop_end_label);
5388
5389 emit_insn (gen_cmpmem_short (op0, op1,
5390 convert_to_mode (Pmode, count, 1)));
5391 emit_label (end_label);
5392
5393 emit_insn (gen_cmpint (target, ccreg));
5394 }
5395 return true;
5396 }
5397
5398 /* Emit a conditional jump to LABEL for condition code mask MASK using
5399 comparsion operator COMPARISON. Return the emitted jump insn. */
5400
5401 static rtx
5402 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5403 {
5404 rtx temp;
5405
5406 gcc_assert (comparison == EQ || comparison == NE);
5407 gcc_assert (mask > 0 && mask < 15);
5408
5409 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5410 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5411 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5412 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5413 temp = gen_rtx_SET (pc_rtx, temp);
5414 return emit_jump_insn (temp);
5415 }
5416
5417 /* Emit the instructions to implement strlen of STRING and store the
5418 result in TARGET. The string has the known ALIGNMENT. This
5419 version uses vector instructions and is therefore not appropriate
5420 for targets prior to z13. */
5421
5422 void
5423 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5424 {
5425 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5426 int very_likely = REG_BR_PROB_BASE - 1;
5427 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5428 rtx str_reg = gen_reg_rtx (V16QImode);
5429 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5430 rtx str_idx_reg = gen_reg_rtx (Pmode);
5431 rtx result_reg = gen_reg_rtx (V16QImode);
5432 rtx is_aligned_label = gen_label_rtx ();
5433 rtx into_loop_label = NULL_RTX;
5434 rtx loop_start_label = gen_label_rtx ();
5435 rtx temp;
5436 rtx len = gen_reg_rtx (QImode);
5437 rtx cond;
5438
5439 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5440 emit_move_insn (str_idx_reg, const0_rtx);
5441
5442 if (INTVAL (alignment) < 16)
5443 {
5444 /* Check whether the address happens to be aligned properly so
5445 jump directly to the aligned loop. */
5446 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5447 str_addr_base_reg, GEN_INT (15)),
5448 const0_rtx, EQ, NULL_RTX,
5449 Pmode, 1, is_aligned_label);
5450
5451 temp = gen_reg_rtx (Pmode);
5452 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5453 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5454 gcc_assert (REG_P (temp));
5455 highest_index_to_load_reg =
5456 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5457 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5458 gcc_assert (REG_P (highest_index_to_load_reg));
5459 emit_insn (gen_vllv16qi (str_reg,
5460 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5461 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5462
5463 into_loop_label = gen_label_rtx ();
5464 s390_emit_jump (into_loop_label, NULL_RTX);
5465 emit_barrier ();
5466 }
5467
5468 emit_label (is_aligned_label);
5469 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5470
5471 /* Reaching this point we are only performing 16 bytes aligned
5472 loads. */
5473 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5474
5475 emit_label (loop_start_label);
5476 LABEL_NUSES (loop_start_label) = 1;
5477
5478 /* Load 16 bytes of the string into VR. */
5479 emit_move_insn (str_reg,
5480 gen_rtx_MEM (V16QImode,
5481 gen_rtx_PLUS (Pmode, str_idx_reg,
5482 str_addr_base_reg)));
5483 if (into_loop_label != NULL_RTX)
5484 {
5485 emit_label (into_loop_label);
5486 LABEL_NUSES (into_loop_label) = 1;
5487 }
5488
5489 /* Increment string index by 16 bytes. */
5490 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5491 str_idx_reg, 1, OPTAB_DIRECT);
5492
5493 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5494 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5495
5496 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5497 REG_BR_PROB, very_likely);
5498 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5499
5500 /* If the string pointer wasn't aligned we have loaded less then 16
5501 bytes and the remaining bytes got filled with zeros (by vll).
5502 Now we have to check whether the resulting index lies within the
5503 bytes actually part of the string. */
5504
5505 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5506 highest_index_to_load_reg);
5507 s390_load_address (highest_index_to_load_reg,
5508 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5509 const1_rtx));
5510 if (TARGET_64BIT)
5511 emit_insn (gen_movdicc (str_idx_reg, cond,
5512 highest_index_to_load_reg, str_idx_reg));
5513 else
5514 emit_insn (gen_movsicc (str_idx_reg, cond,
5515 highest_index_to_load_reg, str_idx_reg));
5516
5517 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5518 very_unlikely);
5519
5520 expand_binop (Pmode, add_optab, str_idx_reg,
5521 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5522 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5523 here. */
5524 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5525 convert_to_mode (Pmode, len, 1),
5526 target, 1, OPTAB_DIRECT);
5527 if (temp != target)
5528 emit_move_insn (target, temp);
5529 }
5530
5531 /* Expand conditional increment or decrement using alc/slb instructions.
5532 Should generate code setting DST to either SRC or SRC + INCREMENT,
5533 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5534 Returns true if successful, false otherwise.
5535
5536 That makes it possible to implement some if-constructs without jumps e.g.:
5537 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5538 unsigned int a, b, c;
5539 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5540 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5541 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5542 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5543
5544 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5545 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5546 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5547 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5548 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5549
5550 bool
5551 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5552 rtx dst, rtx src, rtx increment)
5553 {
5554 machine_mode cmp_mode;
5555 machine_mode cc_mode;
5556 rtx op_res;
5557 rtx insn;
5558 rtvec p;
5559 int ret;
5560
5561 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5562 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5563 cmp_mode = SImode;
5564 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5565 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5566 cmp_mode = DImode;
5567 else
5568 return false;
5569
5570 /* Try ADD LOGICAL WITH CARRY. */
5571 if (increment == const1_rtx)
5572 {
5573 /* Determine CC mode to use. */
5574 if (cmp_code == EQ || cmp_code == NE)
5575 {
5576 if (cmp_op1 != const0_rtx)
5577 {
5578 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5579 NULL_RTX, 0, OPTAB_WIDEN);
5580 cmp_op1 = const0_rtx;
5581 }
5582
5583 cmp_code = cmp_code == EQ ? LEU : GTU;
5584 }
5585
5586 if (cmp_code == LTU || cmp_code == LEU)
5587 {
5588 rtx tem = cmp_op0;
5589 cmp_op0 = cmp_op1;
5590 cmp_op1 = tem;
5591 cmp_code = swap_condition (cmp_code);
5592 }
5593
5594 switch (cmp_code)
5595 {
5596 case GTU:
5597 cc_mode = CCUmode;
5598 break;
5599
5600 case GEU:
5601 cc_mode = CCL3mode;
5602 break;
5603
5604 default:
5605 return false;
5606 }
5607
5608 /* Emit comparison instruction pattern. */
5609 if (!register_operand (cmp_op0, cmp_mode))
5610 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5611
5612 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5613 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5614 /* We use insn_invalid_p here to add clobbers if required. */
5615 ret = insn_invalid_p (emit_insn (insn), false);
5616 gcc_assert (!ret);
5617
5618 /* Emit ALC instruction pattern. */
5619 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5620 gen_rtx_REG (cc_mode, CC_REGNUM),
5621 const0_rtx);
5622
5623 if (src != const0_rtx)
5624 {
5625 if (!register_operand (src, GET_MODE (dst)))
5626 src = force_reg (GET_MODE (dst), src);
5627
5628 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5629 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5630 }
5631
5632 p = rtvec_alloc (2);
5633 RTVEC_ELT (p, 0) =
5634 gen_rtx_SET (dst, op_res);
5635 RTVEC_ELT (p, 1) =
5636 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5637 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5638
5639 return true;
5640 }
5641
5642 /* Try SUBTRACT LOGICAL WITH BORROW. */
5643 if (increment == constm1_rtx)
5644 {
5645 /* Determine CC mode to use. */
5646 if (cmp_code == EQ || cmp_code == NE)
5647 {
5648 if (cmp_op1 != const0_rtx)
5649 {
5650 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5651 NULL_RTX, 0, OPTAB_WIDEN);
5652 cmp_op1 = const0_rtx;
5653 }
5654
5655 cmp_code = cmp_code == EQ ? LEU : GTU;
5656 }
5657
5658 if (cmp_code == GTU || cmp_code == GEU)
5659 {
5660 rtx tem = cmp_op0;
5661 cmp_op0 = cmp_op1;
5662 cmp_op1 = tem;
5663 cmp_code = swap_condition (cmp_code);
5664 }
5665
5666 switch (cmp_code)
5667 {
5668 case LEU:
5669 cc_mode = CCUmode;
5670 break;
5671
5672 case LTU:
5673 cc_mode = CCL3mode;
5674 break;
5675
5676 default:
5677 return false;
5678 }
5679
5680 /* Emit comparison instruction pattern. */
5681 if (!register_operand (cmp_op0, cmp_mode))
5682 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5683
5684 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5685 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5686 /* We use insn_invalid_p here to add clobbers if required. */
5687 ret = insn_invalid_p (emit_insn (insn), false);
5688 gcc_assert (!ret);
5689
5690 /* Emit SLB instruction pattern. */
5691 if (!register_operand (src, GET_MODE (dst)))
5692 src = force_reg (GET_MODE (dst), src);
5693
5694 op_res = gen_rtx_MINUS (GET_MODE (dst),
5695 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5696 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5697 gen_rtx_REG (cc_mode, CC_REGNUM),
5698 const0_rtx));
5699 p = rtvec_alloc (2);
5700 RTVEC_ELT (p, 0) =
5701 gen_rtx_SET (dst, op_res);
5702 RTVEC_ELT (p, 1) =
5703 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5704 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5705
5706 return true;
5707 }
5708
5709 return false;
5710 }
5711
5712 /* Expand code for the insv template. Return true if successful. */
5713
5714 bool
5715 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5716 {
5717 int bitsize = INTVAL (op1);
5718 int bitpos = INTVAL (op2);
5719 machine_mode mode = GET_MODE (dest);
5720 machine_mode smode;
5721 int smode_bsize, mode_bsize;
5722 rtx op, clobber;
5723
5724 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5725 return false;
5726
5727 /* Generate INSERT IMMEDIATE (IILL et al). */
5728 /* (set (ze (reg)) (const_int)). */
5729 if (TARGET_ZARCH
5730 && register_operand (dest, word_mode)
5731 && (bitpos % 16) == 0
5732 && (bitsize % 16) == 0
5733 && const_int_operand (src, VOIDmode))
5734 {
5735 HOST_WIDE_INT val = INTVAL (src);
5736 int regpos = bitpos + bitsize;
5737
5738 while (regpos > bitpos)
5739 {
5740 machine_mode putmode;
5741 int putsize;
5742
5743 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5744 putmode = SImode;
5745 else
5746 putmode = HImode;
5747
5748 putsize = GET_MODE_BITSIZE (putmode);
5749 regpos -= putsize;
5750 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5751 GEN_INT (putsize),
5752 GEN_INT (regpos)),
5753 gen_int_mode (val, putmode));
5754 val >>= putsize;
5755 }
5756 gcc_assert (regpos == bitpos);
5757 return true;
5758 }
5759
5760 smode = smallest_mode_for_size (bitsize, MODE_INT);
5761 smode_bsize = GET_MODE_BITSIZE (smode);
5762 mode_bsize = GET_MODE_BITSIZE (mode);
5763
5764 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5765 if (bitpos == 0
5766 && (bitsize % BITS_PER_UNIT) == 0
5767 && MEM_P (dest)
5768 && (register_operand (src, word_mode)
5769 || const_int_operand (src, VOIDmode)))
5770 {
5771 /* Emit standard pattern if possible. */
5772 if (smode_bsize == bitsize)
5773 {
5774 emit_move_insn (adjust_address (dest, smode, 0),
5775 gen_lowpart (smode, src));
5776 return true;
5777 }
5778
5779 /* (set (ze (mem)) (const_int)). */
5780 else if (const_int_operand (src, VOIDmode))
5781 {
5782 int size = bitsize / BITS_PER_UNIT;
5783 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
5784 BLKmode,
5785 UNITS_PER_WORD - size);
5786
5787 dest = adjust_address (dest, BLKmode, 0);
5788 set_mem_size (dest, size);
5789 s390_expand_movmem (dest, src_mem, GEN_INT (size));
5790 return true;
5791 }
5792
5793 /* (set (ze (mem)) (reg)). */
5794 else if (register_operand (src, word_mode))
5795 {
5796 if (bitsize <= 32)
5797 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
5798 const0_rtx), src);
5799 else
5800 {
5801 /* Emit st,stcmh sequence. */
5802 int stcmh_width = bitsize - 32;
5803 int size = stcmh_width / BITS_PER_UNIT;
5804
5805 emit_move_insn (adjust_address (dest, SImode, size),
5806 gen_lowpart (SImode, src));
5807 set_mem_size (dest, size);
5808 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5809 GEN_INT (stcmh_width),
5810 const0_rtx),
5811 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
5812 }
5813 return true;
5814 }
5815 }
5816
5817 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
5818 if ((bitpos % BITS_PER_UNIT) == 0
5819 && (bitsize % BITS_PER_UNIT) == 0
5820 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
5821 && MEM_P (src)
5822 && (mode == DImode || mode == SImode)
5823 && register_operand (dest, mode))
5824 {
5825 /* Emit a strict_low_part pattern if possible. */
5826 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
5827 {
5828 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
5829 op = gen_rtx_SET (op, gen_lowpart (smode, src));
5830 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5831 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
5832 return true;
5833 }
5834
5835 /* ??? There are more powerful versions of ICM that are not
5836 completely represented in the md file. */
5837 }
5838
5839 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
5840 if (TARGET_Z10 && (mode == DImode || mode == SImode))
5841 {
5842 machine_mode mode_s = GET_MODE (src);
5843
5844 if (mode_s == VOIDmode)
5845 {
5846 /* For constant zero values the representation with AND
5847 appears to be folded in more situations than the (set
5848 (zero_extract) ...).
5849 We only do this when the start and end of the bitfield
5850 remain in the same SImode chunk. That way nihf or nilf
5851 can be used.
5852 The AND patterns might still generate a risbg for this. */
5853 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
5854 return false;
5855 else
5856 src = force_reg (mode, src);
5857 }
5858 else if (mode_s != mode)
5859 {
5860 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
5861 src = force_reg (mode_s, src);
5862 src = gen_lowpart (mode, src);
5863 }
5864
5865 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
5866 op = gen_rtx_SET (op, src);
5867
5868 if (!TARGET_ZEC12)
5869 {
5870 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5871 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
5872 }
5873 emit_insn (op);
5874
5875 return true;
5876 }
5877
5878 return false;
5879 }
5880
5881 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
5882 register that holds VAL of mode MODE shifted by COUNT bits. */
5883
5884 static inline rtx
5885 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
5886 {
5887 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
5888 NULL_RTX, 1, OPTAB_DIRECT);
5889 return expand_simple_binop (SImode, ASHIFT, val, count,
5890 NULL_RTX, 1, OPTAB_DIRECT);
5891 }
5892
5893 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
5894 the result in TARGET. */
5895
5896 void
5897 s390_expand_vec_compare (rtx target, enum rtx_code cond,
5898 rtx cmp_op1, rtx cmp_op2)
5899 {
5900 machine_mode mode = GET_MODE (target);
5901 bool neg_p = false, swap_p = false;
5902 rtx tmp;
5903
5904 if (GET_MODE (cmp_op1) == V2DFmode)
5905 {
5906 switch (cond)
5907 {
5908 /* NE a != b -> !(a == b) */
5909 case NE: cond = EQ; neg_p = true; break;
5910 /* UNGT a u> b -> !(b >= a) */
5911 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
5912 /* UNGE a u>= b -> !(b > a) */
5913 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
5914 /* LE: a <= b -> b >= a */
5915 case LE: cond = GE; swap_p = true; break;
5916 /* UNLE: a u<= b -> !(a > b) */
5917 case UNLE: cond = GT; neg_p = true; break;
5918 /* LT: a < b -> b > a */
5919 case LT: cond = GT; swap_p = true; break;
5920 /* UNLT: a u< b -> !(a >= b) */
5921 case UNLT: cond = GE; neg_p = true; break;
5922 case UNEQ:
5923 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
5924 return;
5925 case LTGT:
5926 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
5927 return;
5928 case ORDERED:
5929 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
5930 return;
5931 case UNORDERED:
5932 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
5933 return;
5934 default: break;
5935 }
5936 }
5937 else
5938 {
5939 switch (cond)
5940 {
5941 /* NE: a != b -> !(a == b) */
5942 case NE: cond = EQ; neg_p = true; break;
5943 /* GE: a >= b -> !(b > a) */
5944 case GE: cond = GT; neg_p = true; swap_p = true; break;
5945 /* GEU: a >= b -> !(b > a) */
5946 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
5947 /* LE: a <= b -> !(a > b) */
5948 case LE: cond = GT; neg_p = true; break;
5949 /* LEU: a <= b -> !(a > b) */
5950 case LEU: cond = GTU; neg_p = true; break;
5951 /* LT: a < b -> b > a */
5952 case LT: cond = GT; swap_p = true; break;
5953 /* LTU: a < b -> b > a */
5954 case LTU: cond = GTU; swap_p = true; break;
5955 default: break;
5956 }
5957 }
5958
5959 if (swap_p)
5960 {
5961 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
5962 }
5963
5964 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
5965 mode,
5966 cmp_op1, cmp_op2)));
5967 if (neg_p)
5968 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
5969 }
5970
5971 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
5972 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
5973 elements in CMP1 and CMP2 fulfill the comparison. */
5974 void
5975 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
5976 rtx cmp1, rtx cmp2, bool all_p)
5977 {
5978 enum rtx_code new_code = code;
5979 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
5980 rtx tmp_reg = gen_reg_rtx (SImode);
5981 bool swap_p = false;
5982
5983 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
5984 {
5985 switch (code)
5986 {
5987 case EQ: cmp_mode = CCVEQmode; break;
5988 case NE: cmp_mode = CCVEQmode; break;
5989 case GT: cmp_mode = CCVHmode; break;
5990 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
5991 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
5992 case LE: cmp_mode = CCVHmode; new_code = LE; break;
5993 case GTU: cmp_mode = CCVHUmode; break;
5994 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
5995 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
5996 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
5997 default: gcc_unreachable ();
5998 }
5999 scratch_mode = GET_MODE (cmp1);
6000 }
6001 else if (GET_MODE (cmp1) == V2DFmode)
6002 {
6003 switch (code)
6004 {
6005 case EQ: cmp_mode = CCVEQmode; break;
6006 case NE: cmp_mode = CCVEQmode; break;
6007 case GT: cmp_mode = CCVFHmode; break;
6008 case GE: cmp_mode = CCVFHEmode; break;
6009 case UNLE: cmp_mode = CCVFHmode; break;
6010 case UNLT: cmp_mode = CCVFHEmode; break;
6011 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6012 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6013 default: gcc_unreachable ();
6014 }
6015 scratch_mode = V2DImode;
6016 }
6017 else
6018 gcc_unreachable ();
6019
6020 if (!all_p)
6021 switch (cmp_mode)
6022 {
6023 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6024 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6025 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6026 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6027 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6028 default: gcc_unreachable ();
6029 }
6030 else
6031 /* The modes without ANY match the ALL modes. */
6032 full_cmp_mode = cmp_mode;
6033
6034 if (swap_p)
6035 {
6036 rtx tmp = cmp2;
6037 cmp2 = cmp1;
6038 cmp1 = tmp;
6039 }
6040
6041 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6042 gen_rtvec (2, gen_rtx_SET (
6043 gen_rtx_REG (cmp_mode, CC_REGNUM),
6044 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6045 gen_rtx_CLOBBER (VOIDmode,
6046 gen_rtx_SCRATCH (scratch_mode)))));
6047 emit_move_insn (target, const0_rtx);
6048 emit_move_insn (tmp_reg, const1_rtx);
6049
6050 emit_move_insn (target,
6051 gen_rtx_IF_THEN_ELSE (SImode,
6052 gen_rtx_fmt_ee (new_code, VOIDmode,
6053 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6054 const0_rtx),
6055 target, tmp_reg));
6056 }
6057
6058 /* Generate a vector comparison expression loading either elements of
6059 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6060 and CMP_OP2. */
6061
6062 void
6063 s390_expand_vcond (rtx target, rtx then, rtx els,
6064 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6065 {
6066 rtx tmp;
6067 machine_mode result_mode;
6068 rtx result_target;
6069
6070 /* We always use an integral type vector to hold the comparison
6071 result. */
6072 result_mode = GET_MODE (cmp_op1) == V2DFmode ? V2DImode : GET_MODE (cmp_op1);
6073 result_target = gen_reg_rtx (result_mode);
6074
6075 /* Alternatively this could be done by reload by lowering the cmp*
6076 predicates. But it appears to be better for scheduling etc. to
6077 have that in early. */
6078 if (!REG_P (cmp_op1))
6079 cmp_op1 = force_reg (GET_MODE (target), cmp_op1);
6080
6081 if (!REG_P (cmp_op2))
6082 cmp_op2 = force_reg (GET_MODE (target), cmp_op2);
6083
6084 s390_expand_vec_compare (result_target, cond,
6085 cmp_op1, cmp_op2);
6086
6087 /* If the results are supposed to be either -1 or 0 we are done
6088 since this is what our compare instructions generate anyway. */
6089 if (constm1_operand (then, GET_MODE (then))
6090 && const0_operand (els, GET_MODE (els)))
6091 {
6092 emit_move_insn (target, gen_rtx_SUBREG (GET_MODE (target),
6093 result_target, 0));
6094 return;
6095 }
6096
6097 /* Otherwise we will do a vsel afterwards. */
6098 /* This gets triggered e.g.
6099 with gcc.c-torture/compile/pr53410-1.c */
6100 if (!REG_P (then))
6101 then = force_reg (GET_MODE (target), then);
6102
6103 if (!REG_P (els))
6104 els = force_reg (GET_MODE (target), els);
6105
6106 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6107 result_target,
6108 CONST0_RTX (result_mode));
6109
6110 /* We compared the result against zero above so we have to swap then
6111 and els here. */
6112 tmp = gen_rtx_IF_THEN_ELSE (GET_MODE (target), tmp, els, then);
6113
6114 gcc_assert (GET_MODE (target) == GET_MODE (then));
6115 emit_insn (gen_rtx_SET (target, tmp));
6116 }
6117
6118 /* Emit the RTX necessary to initialize the vector TARGET with values
6119 in VALS. */
6120 void
6121 s390_expand_vec_init (rtx target, rtx vals)
6122 {
6123 machine_mode mode = GET_MODE (target);
6124 machine_mode inner_mode = GET_MODE_INNER (mode);
6125 int n_elts = GET_MODE_NUNITS (mode);
6126 bool all_same = true, all_regs = true, all_const_int = true;
6127 rtx x;
6128 int i;
6129
6130 for (i = 0; i < n_elts; ++i)
6131 {
6132 x = XVECEXP (vals, 0, i);
6133
6134 if (!CONST_INT_P (x))
6135 all_const_int = false;
6136
6137 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6138 all_same = false;
6139
6140 if (!REG_P (x))
6141 all_regs = false;
6142 }
6143
6144 /* Use vector gen mask or vector gen byte mask if possible. */
6145 if (all_same && all_const_int
6146 && (XVECEXP (vals, 0, 0) == const0_rtx
6147 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6148 NULL, NULL)
6149 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6150 {
6151 emit_insn (gen_rtx_SET (target,
6152 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6153 return;
6154 }
6155
6156 if (all_same)
6157 {
6158 emit_insn (gen_rtx_SET (target,
6159 gen_rtx_VEC_DUPLICATE (mode,
6160 XVECEXP (vals, 0, 0))));
6161 return;
6162 }
6163
6164 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6165 {
6166 /* Use vector load pair. */
6167 emit_insn (gen_rtx_SET (target,
6168 gen_rtx_VEC_CONCAT (mode,
6169 XVECEXP (vals, 0, 0),
6170 XVECEXP (vals, 0, 1))));
6171 return;
6172 }
6173
6174 /* We are about to set the vector elements one by one. Zero out the
6175 full register first in order to help the data flow framework to
6176 detect it as full VR set. */
6177 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6178
6179 /* Unfortunately the vec_init expander is not allowed to fail. So
6180 we have to implement the fallback ourselves. */
6181 for (i = 0; i < n_elts; i++)
6182 emit_insn (gen_rtx_SET (target,
6183 gen_rtx_UNSPEC (mode,
6184 gen_rtvec (3, XVECEXP (vals, 0, i),
6185 GEN_INT (i), target),
6186 UNSPEC_VEC_SET)));
6187 }
6188
6189 /* Structure to hold the initial parameters for a compare_and_swap operation
6190 in HImode and QImode. */
6191
6192 struct alignment_context
6193 {
6194 rtx memsi; /* SI aligned memory location. */
6195 rtx shift; /* Bit offset with regard to lsb. */
6196 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6197 rtx modemaski; /* ~modemask */
6198 bool aligned; /* True if memory is aligned, false else. */
6199 };
6200
6201 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6202 structure AC for transparent simplifying, if the memory alignment is known
6203 to be at least 32bit. MEM is the memory location for the actual operation
6204 and MODE its mode. */
6205
6206 static void
6207 init_alignment_context (struct alignment_context *ac, rtx mem,
6208 machine_mode mode)
6209 {
6210 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6211 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6212
6213 if (ac->aligned)
6214 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6215 else
6216 {
6217 /* Alignment is unknown. */
6218 rtx byteoffset, addr, align;
6219
6220 /* Force the address into a register. */
6221 addr = force_reg (Pmode, XEXP (mem, 0));
6222
6223 /* Align it to SImode. */
6224 align = expand_simple_binop (Pmode, AND, addr,
6225 GEN_INT (-GET_MODE_SIZE (SImode)),
6226 NULL_RTX, 1, OPTAB_DIRECT);
6227 /* Generate MEM. */
6228 ac->memsi = gen_rtx_MEM (SImode, align);
6229 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6230 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6231 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6232
6233 /* Calculate shiftcount. */
6234 byteoffset = expand_simple_binop (Pmode, AND, addr,
6235 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6236 NULL_RTX, 1, OPTAB_DIRECT);
6237 /* As we already have some offset, evaluate the remaining distance. */
6238 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6239 NULL_RTX, 1, OPTAB_DIRECT);
6240 }
6241
6242 /* Shift is the byte count, but we need the bitcount. */
6243 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6244 NULL_RTX, 1, OPTAB_DIRECT);
6245
6246 /* Calculate masks. */
6247 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6248 GEN_INT (GET_MODE_MASK (mode)),
6249 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6250 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6251 NULL_RTX, 1);
6252 }
6253
6254 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6255 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6256 perform the merge in SEQ2. */
6257
6258 static rtx
6259 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6260 machine_mode mode, rtx val, rtx ins)
6261 {
6262 rtx tmp;
6263
6264 if (ac->aligned)
6265 {
6266 start_sequence ();
6267 tmp = copy_to_mode_reg (SImode, val);
6268 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6269 const0_rtx, ins))
6270 {
6271 *seq1 = NULL;
6272 *seq2 = get_insns ();
6273 end_sequence ();
6274 return tmp;
6275 }
6276 end_sequence ();
6277 }
6278
6279 /* Failed to use insv. Generate a two part shift and mask. */
6280 start_sequence ();
6281 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6282 *seq1 = get_insns ();
6283 end_sequence ();
6284
6285 start_sequence ();
6286 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6287 *seq2 = get_insns ();
6288 end_sequence ();
6289
6290 return tmp;
6291 }
6292
6293 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6294 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6295 value to set if CMP == MEM. */
6296
6297 void
6298 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6299 rtx cmp, rtx new_rtx, bool is_weak)
6300 {
6301 struct alignment_context ac;
6302 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6303 rtx res = gen_reg_rtx (SImode);
6304 rtx_code_label *csloop = NULL, *csend = NULL;
6305
6306 gcc_assert (MEM_P (mem));
6307
6308 init_alignment_context (&ac, mem, mode);
6309
6310 /* Load full word. Subsequent loads are performed by CS. */
6311 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6312 NULL_RTX, 1, OPTAB_DIRECT);
6313
6314 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6315 possible, we try to use insv to make this happen efficiently. If
6316 that fails we'll generate code both inside and outside the loop. */
6317 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6318 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6319
6320 if (seq0)
6321 emit_insn (seq0);
6322 if (seq1)
6323 emit_insn (seq1);
6324
6325 /* Start CS loop. */
6326 if (!is_weak)
6327 {
6328 /* Begin assuming success. */
6329 emit_move_insn (btarget, const1_rtx);
6330
6331 csloop = gen_label_rtx ();
6332 csend = gen_label_rtx ();
6333 emit_label (csloop);
6334 }
6335
6336 /* val = "<mem>00..0<mem>"
6337 * cmp = "00..0<cmp>00..0"
6338 * new = "00..0<new>00..0"
6339 */
6340
6341 emit_insn (seq2);
6342 emit_insn (seq3);
6343
6344 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6345 if (is_weak)
6346 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6347 else
6348 {
6349 rtx tmp;
6350
6351 /* Jump to end if we're done (likely?). */
6352 s390_emit_jump (csend, cc);
6353
6354 /* Check for changes outside mode, and loop internal if so.
6355 Arrange the moves so that the compare is adjacent to the
6356 branch so that we can generate CRJ. */
6357 tmp = copy_to_reg (val);
6358 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6359 1, OPTAB_DIRECT);
6360 cc = s390_emit_compare (NE, val, tmp);
6361 s390_emit_jump (csloop, cc);
6362
6363 /* Failed. */
6364 emit_move_insn (btarget, const0_rtx);
6365 emit_label (csend);
6366 }
6367
6368 /* Return the correct part of the bitfield. */
6369 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6370 NULL_RTX, 1, OPTAB_DIRECT), 1);
6371 }
6372
6373 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6374 and VAL the value to play with. If AFTER is true then store the value
6375 MEM holds after the operation, if AFTER is false then store the value MEM
6376 holds before the operation. If TARGET is zero then discard that value, else
6377 store it to TARGET. */
6378
6379 void
6380 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6381 rtx target, rtx mem, rtx val, bool after)
6382 {
6383 struct alignment_context ac;
6384 rtx cmp;
6385 rtx new_rtx = gen_reg_rtx (SImode);
6386 rtx orig = gen_reg_rtx (SImode);
6387 rtx_code_label *csloop = gen_label_rtx ();
6388
6389 gcc_assert (!target || register_operand (target, VOIDmode));
6390 gcc_assert (MEM_P (mem));
6391
6392 init_alignment_context (&ac, mem, mode);
6393
6394 /* Shift val to the correct bit positions.
6395 Preserve "icm", but prevent "ex icm". */
6396 if (!(ac.aligned && code == SET && MEM_P (val)))
6397 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6398
6399 /* Further preparation insns. */
6400 if (code == PLUS || code == MINUS)
6401 emit_move_insn (orig, val);
6402 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6403 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6404 NULL_RTX, 1, OPTAB_DIRECT);
6405
6406 /* Load full word. Subsequent loads are performed by CS. */
6407 cmp = force_reg (SImode, ac.memsi);
6408
6409 /* Start CS loop. */
6410 emit_label (csloop);
6411 emit_move_insn (new_rtx, cmp);
6412
6413 /* Patch new with val at correct position. */
6414 switch (code)
6415 {
6416 case PLUS:
6417 case MINUS:
6418 val = expand_simple_binop (SImode, code, new_rtx, orig,
6419 NULL_RTX, 1, OPTAB_DIRECT);
6420 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6421 NULL_RTX, 1, OPTAB_DIRECT);
6422 /* FALLTHRU */
6423 case SET:
6424 if (ac.aligned && MEM_P (val))
6425 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6426 0, 0, SImode, val);
6427 else
6428 {
6429 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6430 NULL_RTX, 1, OPTAB_DIRECT);
6431 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6432 NULL_RTX, 1, OPTAB_DIRECT);
6433 }
6434 break;
6435 case AND:
6436 case IOR:
6437 case XOR:
6438 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6439 NULL_RTX, 1, OPTAB_DIRECT);
6440 break;
6441 case MULT: /* NAND */
6442 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6443 NULL_RTX, 1, OPTAB_DIRECT);
6444 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6445 NULL_RTX, 1, OPTAB_DIRECT);
6446 break;
6447 default:
6448 gcc_unreachable ();
6449 }
6450
6451 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6452 ac.memsi, cmp, new_rtx));
6453
6454 /* Return the correct part of the bitfield. */
6455 if (target)
6456 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6457 after ? new_rtx : cmp, ac.shift,
6458 NULL_RTX, 1, OPTAB_DIRECT), 1);
6459 }
6460
6461 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6462 We need to emit DTP-relative relocations. */
6463
6464 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6465
6466 static void
6467 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6468 {
6469 switch (size)
6470 {
6471 case 4:
6472 fputs ("\t.long\t", file);
6473 break;
6474 case 8:
6475 fputs ("\t.quad\t", file);
6476 break;
6477 default:
6478 gcc_unreachable ();
6479 }
6480 output_addr_const (file, x);
6481 fputs ("@DTPOFF", file);
6482 }
6483
6484 /* Return the proper mode for REGNO being represented in the dwarf
6485 unwind table. */
6486 machine_mode
6487 s390_dwarf_frame_reg_mode (int regno)
6488 {
6489 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6490
6491 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6492 if (GENERAL_REGNO_P (regno))
6493 save_mode = Pmode;
6494
6495 /* The rightmost 64 bits of vector registers are call-clobbered. */
6496 if (GET_MODE_SIZE (save_mode) > 8)
6497 save_mode = DImode;
6498
6499 return save_mode;
6500 }
6501
6502 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6503 /* Implement TARGET_MANGLE_TYPE. */
6504
6505 static const char *
6506 s390_mangle_type (const_tree type)
6507 {
6508 type = TYPE_MAIN_VARIANT (type);
6509
6510 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6511 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6512 return NULL;
6513
6514 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6515 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6516 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6517 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6518
6519 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6520 && TARGET_LONG_DOUBLE_128)
6521 return "g";
6522
6523 /* For all other types, use normal C++ mangling. */
6524 return NULL;
6525 }
6526 #endif
6527
6528 /* In the name of slightly smaller debug output, and to cater to
6529 general assembler lossage, recognize various UNSPEC sequences
6530 and turn them back into a direct symbol reference. */
6531
6532 static rtx
6533 s390_delegitimize_address (rtx orig_x)
6534 {
6535 rtx x, y;
6536
6537 orig_x = delegitimize_mem_from_attrs (orig_x);
6538 x = orig_x;
6539
6540 /* Extract the symbol ref from:
6541 (plus:SI (reg:SI 12 %r12)
6542 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6543 UNSPEC_GOTOFF/PLTOFF)))
6544 and
6545 (plus:SI (reg:SI 12 %r12)
6546 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6547 UNSPEC_GOTOFF/PLTOFF)
6548 (const_int 4 [0x4])))) */
6549 if (GET_CODE (x) == PLUS
6550 && REG_P (XEXP (x, 0))
6551 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6552 && GET_CODE (XEXP (x, 1)) == CONST)
6553 {
6554 HOST_WIDE_INT offset = 0;
6555
6556 /* The const operand. */
6557 y = XEXP (XEXP (x, 1), 0);
6558
6559 if (GET_CODE (y) == PLUS
6560 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6561 {
6562 offset = INTVAL (XEXP (y, 1));
6563 y = XEXP (y, 0);
6564 }
6565
6566 if (GET_CODE (y) == UNSPEC
6567 && (XINT (y, 1) == UNSPEC_GOTOFF
6568 || XINT (y, 1) == UNSPEC_PLTOFF))
6569 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6570 }
6571
6572 if (GET_CODE (x) != MEM)
6573 return orig_x;
6574
6575 x = XEXP (x, 0);
6576 if (GET_CODE (x) == PLUS
6577 && GET_CODE (XEXP (x, 1)) == CONST
6578 && GET_CODE (XEXP (x, 0)) == REG
6579 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6580 {
6581 y = XEXP (XEXP (x, 1), 0);
6582 if (GET_CODE (y) == UNSPEC
6583 && XINT (y, 1) == UNSPEC_GOT)
6584 y = XVECEXP (y, 0, 0);
6585 else
6586 return orig_x;
6587 }
6588 else if (GET_CODE (x) == CONST)
6589 {
6590 /* Extract the symbol ref from:
6591 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6592 UNSPEC_PLT/GOTENT))) */
6593
6594 y = XEXP (x, 0);
6595 if (GET_CODE (y) == UNSPEC
6596 && (XINT (y, 1) == UNSPEC_GOTENT
6597 || XINT (y, 1) == UNSPEC_PLT))
6598 y = XVECEXP (y, 0, 0);
6599 else
6600 return orig_x;
6601 }
6602 else
6603 return orig_x;
6604
6605 if (GET_MODE (orig_x) != Pmode)
6606 {
6607 if (GET_MODE (orig_x) == BLKmode)
6608 return orig_x;
6609 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6610 if (y == NULL_RTX)
6611 return orig_x;
6612 }
6613 return y;
6614 }
6615
6616 /* Output operand OP to stdio stream FILE.
6617 OP is an address (register + offset) which is not used to address data;
6618 instead the rightmost bits are interpreted as the value. */
6619
6620 static void
6621 print_shift_count_operand (FILE *file, rtx op)
6622 {
6623 HOST_WIDE_INT offset;
6624 rtx base;
6625
6626 /* Extract base register and offset. */
6627 if (!s390_decompose_shift_count (op, &base, &offset))
6628 gcc_unreachable ();
6629
6630 /* Sanity check. */
6631 if (base)
6632 {
6633 gcc_assert (GET_CODE (base) == REG);
6634 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6635 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6636 }
6637
6638 /* Offsets are constricted to twelve bits. */
6639 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6640 if (base)
6641 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6642 }
6643
6644 /* Assigns the number of NOP halfwords to be emitted before and after the
6645 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6646 If hotpatching is disabled for the function, the values are set to zero.
6647 */
6648
6649 static void
6650 s390_function_num_hotpatch_hw (tree decl,
6651 int *hw_before,
6652 int *hw_after)
6653 {
6654 tree attr;
6655
6656 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6657
6658 /* Handle the arguments of the hotpatch attribute. The values
6659 specified via attribute might override the cmdline argument
6660 values. */
6661 if (attr)
6662 {
6663 tree args = TREE_VALUE (attr);
6664
6665 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6666 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6667 }
6668 else
6669 {
6670 /* Use the values specified by the cmdline arguments. */
6671 *hw_before = s390_hotpatch_hw_before_label;
6672 *hw_after = s390_hotpatch_hw_after_label;
6673 }
6674 }
6675
6676 /* Write the extra assembler code needed to declare a function properly. */
6677
6678 void
6679 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
6680 tree decl)
6681 {
6682 int hw_before, hw_after;
6683
6684 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
6685 if (hw_before > 0)
6686 {
6687 unsigned int function_alignment;
6688 int i;
6689
6690 /* Add a trampoline code area before the function label and initialize it
6691 with two-byte nop instructions. This area can be overwritten with code
6692 that jumps to a patched version of the function. */
6693 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
6694 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
6695 hw_before);
6696 for (i = 1; i < hw_before; i++)
6697 fputs ("\tnopr\t%r7\n", asm_out_file);
6698
6699 /* Note: The function label must be aligned so that (a) the bytes of the
6700 following nop do not cross a cacheline boundary, and (b) a jump address
6701 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
6702 stored directly before the label without crossing a cacheline
6703 boundary. All this is necessary to make sure the trampoline code can
6704 be changed atomically.
6705 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
6706 if there are NOPs before the function label, the alignment is placed
6707 before them. So it is necessary to duplicate the alignment after the
6708 NOPs. */
6709 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
6710 if (! DECL_USER_ALIGN (decl))
6711 function_alignment = MAX (function_alignment,
6712 (unsigned int) align_functions);
6713 fputs ("\t# alignment for hotpatch\n", asm_out_file);
6714 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
6715 }
6716
6717 ASM_OUTPUT_LABEL (asm_out_file, fname);
6718 if (hw_after > 0)
6719 asm_fprintf (asm_out_file,
6720 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
6721 hw_after);
6722 }
6723
6724 /* Output machine-dependent UNSPECs occurring in address constant X
6725 in assembler syntax to stdio stream FILE. Returns true if the
6726 constant X could be recognized, false otherwise. */
6727
6728 static bool
6729 s390_output_addr_const_extra (FILE *file, rtx x)
6730 {
6731 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
6732 switch (XINT (x, 1))
6733 {
6734 case UNSPEC_GOTENT:
6735 output_addr_const (file, XVECEXP (x, 0, 0));
6736 fprintf (file, "@GOTENT");
6737 return true;
6738 case UNSPEC_GOT:
6739 output_addr_const (file, XVECEXP (x, 0, 0));
6740 fprintf (file, "@GOT");
6741 return true;
6742 case UNSPEC_GOTOFF:
6743 output_addr_const (file, XVECEXP (x, 0, 0));
6744 fprintf (file, "@GOTOFF");
6745 return true;
6746 case UNSPEC_PLT:
6747 output_addr_const (file, XVECEXP (x, 0, 0));
6748 fprintf (file, "@PLT");
6749 return true;
6750 case UNSPEC_PLTOFF:
6751 output_addr_const (file, XVECEXP (x, 0, 0));
6752 fprintf (file, "@PLTOFF");
6753 return true;
6754 case UNSPEC_TLSGD:
6755 output_addr_const (file, XVECEXP (x, 0, 0));
6756 fprintf (file, "@TLSGD");
6757 return true;
6758 case UNSPEC_TLSLDM:
6759 assemble_name (file, get_some_local_dynamic_name ());
6760 fprintf (file, "@TLSLDM");
6761 return true;
6762 case UNSPEC_DTPOFF:
6763 output_addr_const (file, XVECEXP (x, 0, 0));
6764 fprintf (file, "@DTPOFF");
6765 return true;
6766 case UNSPEC_NTPOFF:
6767 output_addr_const (file, XVECEXP (x, 0, 0));
6768 fprintf (file, "@NTPOFF");
6769 return true;
6770 case UNSPEC_GOTNTPOFF:
6771 output_addr_const (file, XVECEXP (x, 0, 0));
6772 fprintf (file, "@GOTNTPOFF");
6773 return true;
6774 case UNSPEC_INDNTPOFF:
6775 output_addr_const (file, XVECEXP (x, 0, 0));
6776 fprintf (file, "@INDNTPOFF");
6777 return true;
6778 }
6779
6780 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
6781 switch (XINT (x, 1))
6782 {
6783 case UNSPEC_POOL_OFFSET:
6784 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
6785 output_addr_const (file, x);
6786 return true;
6787 }
6788 return false;
6789 }
6790
6791 /* Output address operand ADDR in assembler syntax to
6792 stdio stream FILE. */
6793
6794 void
6795 print_operand_address (FILE *file, rtx addr)
6796 {
6797 struct s390_address ad;
6798
6799 if (s390_loadrelative_operand_p (addr, NULL, NULL))
6800 {
6801 if (!TARGET_Z10)
6802 {
6803 output_operand_lossage ("symbolic memory references are "
6804 "only supported on z10 or later");
6805 return;
6806 }
6807 output_addr_const (file, addr);
6808 return;
6809 }
6810
6811 if (!s390_decompose_address (addr, &ad)
6812 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6813 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
6814 output_operand_lossage ("cannot decompose address");
6815
6816 if (ad.disp)
6817 output_addr_const (file, ad.disp);
6818 else
6819 fprintf (file, "0");
6820
6821 if (ad.base && ad.indx)
6822 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
6823 reg_names[REGNO (ad.base)]);
6824 else if (ad.base)
6825 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
6826 }
6827
6828 /* Output operand X in assembler syntax to stdio stream FILE.
6829 CODE specified the format flag. The following format flags
6830 are recognized:
6831
6832 'C': print opcode suffix for branch condition.
6833 'D': print opcode suffix for inverse branch condition.
6834 'E': print opcode suffix for branch on index instruction.
6835 'G': print the size of the operand in bytes.
6836 'J': print tls_load/tls_gdcall/tls_ldcall suffix
6837 'M': print the second word of a TImode operand.
6838 'N': print the second word of a DImode operand.
6839 'O': print only the displacement of a memory reference or address.
6840 'R': print only the base register of a memory reference or address.
6841 'S': print S-type memory reference (base+displacement).
6842 'Y': print shift count operand.
6843
6844 'b': print integer X as if it's an unsigned byte.
6845 'c': print integer X as if it's an signed byte.
6846 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
6847 'f': "end" contiguous bitmask X in SImode.
6848 'h': print integer X as if it's a signed halfword.
6849 'i': print the first nonzero HImode part of X.
6850 'j': print the first HImode part unequal to -1 of X.
6851 'k': print the first nonzero SImode part of X.
6852 'm': print the first SImode part unequal to -1 of X.
6853 'o': print integer X as if it's an unsigned 32bit word.
6854 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
6855 't': CONST_INT: "start" of contiguous bitmask X in SImode.
6856 CONST_VECTOR: Generate a bitmask for vgbm instruction.
6857 'x': print integer X as if it's an unsigned halfword.
6858 'v': print register number as vector register (v1 instead of f1).
6859 */
6860
6861 void
6862 print_operand (FILE *file, rtx x, int code)
6863 {
6864 HOST_WIDE_INT ival;
6865
6866 switch (code)
6867 {
6868 case 'C':
6869 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
6870 return;
6871
6872 case 'D':
6873 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
6874 return;
6875
6876 case 'E':
6877 if (GET_CODE (x) == LE)
6878 fprintf (file, "l");
6879 else if (GET_CODE (x) == GT)
6880 fprintf (file, "h");
6881 else
6882 output_operand_lossage ("invalid comparison operator "
6883 "for 'E' output modifier");
6884 return;
6885
6886 case 'J':
6887 if (GET_CODE (x) == SYMBOL_REF)
6888 {
6889 fprintf (file, "%s", ":tls_load:");
6890 output_addr_const (file, x);
6891 }
6892 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
6893 {
6894 fprintf (file, "%s", ":tls_gdcall:");
6895 output_addr_const (file, XVECEXP (x, 0, 0));
6896 }
6897 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
6898 {
6899 fprintf (file, "%s", ":tls_ldcall:");
6900 const char *name = get_some_local_dynamic_name ();
6901 gcc_assert (name);
6902 assemble_name (file, name);
6903 }
6904 else
6905 output_operand_lossage ("invalid reference for 'J' output modifier");
6906 return;
6907
6908 case 'G':
6909 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
6910 return;
6911
6912 case 'O':
6913 {
6914 struct s390_address ad;
6915 int ret;
6916
6917 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
6918
6919 if (!ret
6920 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6921 || ad.indx)
6922 {
6923 output_operand_lossage ("invalid address for 'O' output modifier");
6924 return;
6925 }
6926
6927 if (ad.disp)
6928 output_addr_const (file, ad.disp);
6929 else
6930 fprintf (file, "0");
6931 }
6932 return;
6933
6934 case 'R':
6935 {
6936 struct s390_address ad;
6937 int ret;
6938
6939 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
6940
6941 if (!ret
6942 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6943 || ad.indx)
6944 {
6945 output_operand_lossage ("invalid address for 'R' output modifier");
6946 return;
6947 }
6948
6949 if (ad.base)
6950 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
6951 else
6952 fprintf (file, "0");
6953 }
6954 return;
6955
6956 case 'S':
6957 {
6958 struct s390_address ad;
6959 int ret;
6960
6961 if (!MEM_P (x))
6962 {
6963 output_operand_lossage ("memory reference expected for "
6964 "'S' output modifier");
6965 return;
6966 }
6967 ret = s390_decompose_address (XEXP (x, 0), &ad);
6968
6969 if (!ret
6970 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6971 || ad.indx)
6972 {
6973 output_operand_lossage ("invalid address for 'S' output modifier");
6974 return;
6975 }
6976
6977 if (ad.disp)
6978 output_addr_const (file, ad.disp);
6979 else
6980 fprintf (file, "0");
6981
6982 if (ad.base)
6983 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
6984 }
6985 return;
6986
6987 case 'N':
6988 if (GET_CODE (x) == REG)
6989 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
6990 else if (GET_CODE (x) == MEM)
6991 x = change_address (x, VOIDmode,
6992 plus_constant (Pmode, XEXP (x, 0), 4));
6993 else
6994 output_operand_lossage ("register or memory expression expected "
6995 "for 'N' output modifier");
6996 break;
6997
6998 case 'M':
6999 if (GET_CODE (x) == REG)
7000 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7001 else if (GET_CODE (x) == MEM)
7002 x = change_address (x, VOIDmode,
7003 plus_constant (Pmode, XEXP (x, 0), 8));
7004 else
7005 output_operand_lossage ("register or memory expression expected "
7006 "for 'M' output modifier");
7007 break;
7008
7009 case 'Y':
7010 print_shift_count_operand (file, x);
7011 return;
7012 }
7013
7014 switch (GET_CODE (x))
7015 {
7016 case REG:
7017 /* Print FP regs as fx instead of vx when they are accessed
7018 through non-vector mode. */
7019 if (code == 'v'
7020 || VECTOR_NOFP_REG_P (x)
7021 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7022 || (VECTOR_REG_P (x)
7023 && (GET_MODE_SIZE (GET_MODE (x)) /
7024 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7025 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7026 else
7027 fprintf (file, "%s", reg_names[REGNO (x)]);
7028 break;
7029
7030 case MEM:
7031 output_address (XEXP (x, 0));
7032 break;
7033
7034 case CONST:
7035 case CODE_LABEL:
7036 case LABEL_REF:
7037 case SYMBOL_REF:
7038 output_addr_const (file, x);
7039 break;
7040
7041 case CONST_INT:
7042 ival = INTVAL (x);
7043 switch (code)
7044 {
7045 case 0:
7046 break;
7047 case 'b':
7048 ival &= 0xff;
7049 break;
7050 case 'c':
7051 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7052 break;
7053 case 'x':
7054 ival &= 0xffff;
7055 break;
7056 case 'h':
7057 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7058 break;
7059 case 'i':
7060 ival = s390_extract_part (x, HImode, 0);
7061 break;
7062 case 'j':
7063 ival = s390_extract_part (x, HImode, -1);
7064 break;
7065 case 'k':
7066 ival = s390_extract_part (x, SImode, 0);
7067 break;
7068 case 'm':
7069 ival = s390_extract_part (x, SImode, -1);
7070 break;
7071 case 'o':
7072 ival &= 0xffffffff;
7073 break;
7074 case 'e': case 'f':
7075 case 's': case 't':
7076 {
7077 int pos, len;
7078 bool ok;
7079
7080 len = (code == 's' || code == 'e' ? 64 : 32);
7081 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7082 gcc_assert (ok);
7083 if (code == 's' || code == 't')
7084 ival = 64 - pos - len;
7085 else
7086 ival = 64 - 1 - pos;
7087 }
7088 break;
7089 default:
7090 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7091 }
7092 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7093 break;
7094
7095 case CONST_DOUBLE:
7096 gcc_assert (GET_MODE (x) == VOIDmode);
7097 if (code == 'b')
7098 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
7099 else if (code == 'x')
7100 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
7101 else if (code == 'h')
7102 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7103 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
7104 else
7105 {
7106 if (code == 0)
7107 output_operand_lossage ("invalid constant - try using "
7108 "an output modifier");
7109 else
7110 output_operand_lossage ("invalid constant for output modifier '%c'",
7111 code);
7112 }
7113 break;
7114 case CONST_VECTOR:
7115 switch (code)
7116 {
7117 case 'e':
7118 case 's':
7119 {
7120 int start, stop, inner_len;
7121 bool ok;
7122
7123 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7124 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7125 gcc_assert (ok);
7126 if (code == 's' || code == 't')
7127 ival = inner_len - stop - 1;
7128 else
7129 ival = inner_len - start - 1;
7130 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7131 }
7132 break;
7133 case 't':
7134 {
7135 unsigned mask;
7136 bool ok = s390_bytemask_vector_p (x, &mask);
7137 gcc_assert (ok);
7138 fprintf (file, "%u", mask);
7139 }
7140 break;
7141
7142 default:
7143 output_operand_lossage ("invalid constant vector for output "
7144 "modifier '%c'", code);
7145 }
7146 break;
7147
7148 default:
7149 if (code == 0)
7150 output_operand_lossage ("invalid expression - try using "
7151 "an output modifier");
7152 else
7153 output_operand_lossage ("invalid expression for output "
7154 "modifier '%c'", code);
7155 break;
7156 }
7157 }
7158
7159 /* Target hook for assembling integer objects. We need to define it
7160 here to work a round a bug in some versions of GAS, which couldn't
7161 handle values smaller than INT_MIN when printed in decimal. */
7162
7163 static bool
7164 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7165 {
7166 if (size == 8 && aligned_p
7167 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7168 {
7169 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7170 INTVAL (x));
7171 return true;
7172 }
7173 return default_assemble_integer (x, size, aligned_p);
7174 }
7175
7176 /* Returns true if register REGNO is used for forming
7177 a memory address in expression X. */
7178
7179 static bool
7180 reg_used_in_mem_p (int regno, rtx x)
7181 {
7182 enum rtx_code code = GET_CODE (x);
7183 int i, j;
7184 const char *fmt;
7185
7186 if (code == MEM)
7187 {
7188 if (refers_to_regno_p (regno, XEXP (x, 0)))
7189 return true;
7190 }
7191 else if (code == SET
7192 && GET_CODE (SET_DEST (x)) == PC)
7193 {
7194 if (refers_to_regno_p (regno, SET_SRC (x)))
7195 return true;
7196 }
7197
7198 fmt = GET_RTX_FORMAT (code);
7199 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7200 {
7201 if (fmt[i] == 'e'
7202 && reg_used_in_mem_p (regno, XEXP (x, i)))
7203 return true;
7204
7205 else if (fmt[i] == 'E')
7206 for (j = 0; j < XVECLEN (x, i); j++)
7207 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7208 return true;
7209 }
7210 return false;
7211 }
7212
7213 /* Returns true if expression DEP_RTX sets an address register
7214 used by instruction INSN to address memory. */
7215
7216 static bool
7217 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7218 {
7219 rtx target, pat;
7220
7221 if (NONJUMP_INSN_P (dep_rtx))
7222 dep_rtx = PATTERN (dep_rtx);
7223
7224 if (GET_CODE (dep_rtx) == SET)
7225 {
7226 target = SET_DEST (dep_rtx);
7227 if (GET_CODE (target) == STRICT_LOW_PART)
7228 target = XEXP (target, 0);
7229 while (GET_CODE (target) == SUBREG)
7230 target = SUBREG_REG (target);
7231
7232 if (GET_CODE (target) == REG)
7233 {
7234 int regno = REGNO (target);
7235
7236 if (s390_safe_attr_type (insn) == TYPE_LA)
7237 {
7238 pat = PATTERN (insn);
7239 if (GET_CODE (pat) == PARALLEL)
7240 {
7241 gcc_assert (XVECLEN (pat, 0) == 2);
7242 pat = XVECEXP (pat, 0, 0);
7243 }
7244 gcc_assert (GET_CODE (pat) == SET);
7245 return refers_to_regno_p (regno, SET_SRC (pat));
7246 }
7247 else if (get_attr_atype (insn) == ATYPE_AGEN)
7248 return reg_used_in_mem_p (regno, PATTERN (insn));
7249 }
7250 }
7251 return false;
7252 }
7253
7254 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7255
7256 int
7257 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7258 {
7259 rtx dep_rtx = PATTERN (dep_insn);
7260 int i;
7261
7262 if (GET_CODE (dep_rtx) == SET
7263 && addr_generation_dependency_p (dep_rtx, insn))
7264 return 1;
7265 else if (GET_CODE (dep_rtx) == PARALLEL)
7266 {
7267 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7268 {
7269 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7270 return 1;
7271 }
7272 }
7273 return 0;
7274 }
7275
7276
7277 /* A C statement (sans semicolon) to update the integer scheduling priority
7278 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7279 reduce the priority to execute INSN later. Do not define this macro if
7280 you do not need to adjust the scheduling priorities of insns.
7281
7282 A STD instruction should be scheduled earlier,
7283 in order to use the bypass. */
7284 static int
7285 s390_adjust_priority (rtx_insn *insn, int priority)
7286 {
7287 if (! INSN_P (insn))
7288 return priority;
7289
7290 if (s390_tune <= PROCESSOR_2064_Z900)
7291 return priority;
7292
7293 switch (s390_safe_attr_type (insn))
7294 {
7295 case TYPE_FSTOREDF:
7296 case TYPE_FSTORESF:
7297 priority = priority << 3;
7298 break;
7299 case TYPE_STORE:
7300 case TYPE_STM:
7301 priority = priority << 1;
7302 break;
7303 default:
7304 break;
7305 }
7306 return priority;
7307 }
7308
7309
7310 /* The number of instructions that can be issued per cycle. */
7311
7312 static int
7313 s390_issue_rate (void)
7314 {
7315 switch (s390_tune)
7316 {
7317 case PROCESSOR_2084_Z990:
7318 case PROCESSOR_2094_Z9_109:
7319 case PROCESSOR_2094_Z9_EC:
7320 case PROCESSOR_2817_Z196:
7321 return 3;
7322 case PROCESSOR_2097_Z10:
7323 return 2;
7324 case PROCESSOR_9672_G5:
7325 case PROCESSOR_9672_G6:
7326 case PROCESSOR_2064_Z900:
7327 /* Starting with EC12 we use the sched_reorder hook to take care
7328 of instruction dispatch constraints. The algorithm only
7329 picks the best instruction and assumes only a single
7330 instruction gets issued per cycle. */
7331 case PROCESSOR_2827_ZEC12:
7332 case PROCESSOR_2964_Z13:
7333 default:
7334 return 1;
7335 }
7336 }
7337
7338 static int
7339 s390_first_cycle_multipass_dfa_lookahead (void)
7340 {
7341 return 4;
7342 }
7343
7344 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7345 Fix up MEMs as required. */
7346
7347 static void
7348 annotate_constant_pool_refs (rtx *x)
7349 {
7350 int i, j;
7351 const char *fmt;
7352
7353 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7354 || !CONSTANT_POOL_ADDRESS_P (*x));
7355
7356 /* Literal pool references can only occur inside a MEM ... */
7357 if (GET_CODE (*x) == MEM)
7358 {
7359 rtx memref = XEXP (*x, 0);
7360
7361 if (GET_CODE (memref) == SYMBOL_REF
7362 && CONSTANT_POOL_ADDRESS_P (memref))
7363 {
7364 rtx base = cfun->machine->base_reg;
7365 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7366 UNSPEC_LTREF);
7367
7368 *x = replace_equiv_address (*x, addr);
7369 return;
7370 }
7371
7372 if (GET_CODE (memref) == CONST
7373 && GET_CODE (XEXP (memref, 0)) == PLUS
7374 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7375 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7376 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7377 {
7378 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7379 rtx sym = XEXP (XEXP (memref, 0), 0);
7380 rtx base = cfun->machine->base_reg;
7381 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7382 UNSPEC_LTREF);
7383
7384 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7385 return;
7386 }
7387 }
7388
7389 /* ... or a load-address type pattern. */
7390 if (GET_CODE (*x) == SET)
7391 {
7392 rtx addrref = SET_SRC (*x);
7393
7394 if (GET_CODE (addrref) == SYMBOL_REF
7395 && CONSTANT_POOL_ADDRESS_P (addrref))
7396 {
7397 rtx base = cfun->machine->base_reg;
7398 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7399 UNSPEC_LTREF);
7400
7401 SET_SRC (*x) = addr;
7402 return;
7403 }
7404
7405 if (GET_CODE (addrref) == CONST
7406 && GET_CODE (XEXP (addrref, 0)) == PLUS
7407 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7408 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7409 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7410 {
7411 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7412 rtx sym = XEXP (XEXP (addrref, 0), 0);
7413 rtx base = cfun->machine->base_reg;
7414 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7415 UNSPEC_LTREF);
7416
7417 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7418 return;
7419 }
7420 }
7421
7422 /* Annotate LTREL_BASE as well. */
7423 if (GET_CODE (*x) == UNSPEC
7424 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7425 {
7426 rtx base = cfun->machine->base_reg;
7427 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7428 UNSPEC_LTREL_BASE);
7429 return;
7430 }
7431
7432 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7433 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7434 {
7435 if (fmt[i] == 'e')
7436 {
7437 annotate_constant_pool_refs (&XEXP (*x, i));
7438 }
7439 else if (fmt[i] == 'E')
7440 {
7441 for (j = 0; j < XVECLEN (*x, i); j++)
7442 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7443 }
7444 }
7445 }
7446
7447 /* Split all branches that exceed the maximum distance.
7448 Returns true if this created a new literal pool entry. */
7449
7450 static int
7451 s390_split_branches (void)
7452 {
7453 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7454 int new_literal = 0, ret;
7455 rtx_insn *insn;
7456 rtx pat, target;
7457 rtx *label;
7458
7459 /* We need correct insn addresses. */
7460
7461 shorten_branches (get_insns ());
7462
7463 /* Find all branches that exceed 64KB, and split them. */
7464
7465 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7466 {
7467 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7468 continue;
7469
7470 pat = PATTERN (insn);
7471 if (GET_CODE (pat) == PARALLEL)
7472 pat = XVECEXP (pat, 0, 0);
7473 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7474 continue;
7475
7476 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7477 {
7478 label = &SET_SRC (pat);
7479 }
7480 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7481 {
7482 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7483 label = &XEXP (SET_SRC (pat), 1);
7484 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7485 label = &XEXP (SET_SRC (pat), 2);
7486 else
7487 continue;
7488 }
7489 else
7490 continue;
7491
7492 if (get_attr_length (insn) <= 4)
7493 continue;
7494
7495 /* We are going to use the return register as scratch register,
7496 make sure it will be saved/restored by the prologue/epilogue. */
7497 cfun_frame_layout.save_return_addr_p = 1;
7498
7499 if (!flag_pic)
7500 {
7501 new_literal = 1;
7502 rtx mem = force_const_mem (Pmode, *label);
7503 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7504 insn);
7505 INSN_ADDRESSES_NEW (set_insn, -1);
7506 annotate_constant_pool_refs (&PATTERN (set_insn));
7507
7508 target = temp_reg;
7509 }
7510 else
7511 {
7512 new_literal = 1;
7513 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7514 UNSPEC_LTREL_OFFSET);
7515 target = gen_rtx_CONST (Pmode, target);
7516 target = force_const_mem (Pmode, target);
7517 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7518 insn);
7519 INSN_ADDRESSES_NEW (set_insn, -1);
7520 annotate_constant_pool_refs (&PATTERN (set_insn));
7521
7522 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7523 cfun->machine->base_reg),
7524 UNSPEC_LTREL_BASE);
7525 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7526 }
7527
7528 ret = validate_change (insn, label, target, 0);
7529 gcc_assert (ret);
7530 }
7531
7532 return new_literal;
7533 }
7534
7535
7536 /* Find an annotated literal pool symbol referenced in RTX X,
7537 and store it at REF. Will abort if X contains references to
7538 more than one such pool symbol; multiple references to the same
7539 symbol are allowed, however.
7540
7541 The rtx pointed to by REF must be initialized to NULL_RTX
7542 by the caller before calling this routine. */
7543
7544 static void
7545 find_constant_pool_ref (rtx x, rtx *ref)
7546 {
7547 int i, j;
7548 const char *fmt;
7549
7550 /* Ignore LTREL_BASE references. */
7551 if (GET_CODE (x) == UNSPEC
7552 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7553 return;
7554 /* Likewise POOL_ENTRY insns. */
7555 if (GET_CODE (x) == UNSPEC_VOLATILE
7556 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7557 return;
7558
7559 gcc_assert (GET_CODE (x) != SYMBOL_REF
7560 || !CONSTANT_POOL_ADDRESS_P (x));
7561
7562 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7563 {
7564 rtx sym = XVECEXP (x, 0, 0);
7565 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7566 && CONSTANT_POOL_ADDRESS_P (sym));
7567
7568 if (*ref == NULL_RTX)
7569 *ref = sym;
7570 else
7571 gcc_assert (*ref == sym);
7572
7573 return;
7574 }
7575
7576 fmt = GET_RTX_FORMAT (GET_CODE (x));
7577 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7578 {
7579 if (fmt[i] == 'e')
7580 {
7581 find_constant_pool_ref (XEXP (x, i), ref);
7582 }
7583 else if (fmt[i] == 'E')
7584 {
7585 for (j = 0; j < XVECLEN (x, i); j++)
7586 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7587 }
7588 }
7589 }
7590
7591 /* Replace every reference to the annotated literal pool
7592 symbol REF in X by its base plus OFFSET. */
7593
7594 static void
7595 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7596 {
7597 int i, j;
7598 const char *fmt;
7599
7600 gcc_assert (*x != ref);
7601
7602 if (GET_CODE (*x) == UNSPEC
7603 && XINT (*x, 1) == UNSPEC_LTREF
7604 && XVECEXP (*x, 0, 0) == ref)
7605 {
7606 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7607 return;
7608 }
7609
7610 if (GET_CODE (*x) == PLUS
7611 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7612 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7613 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7614 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7615 {
7616 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7617 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7618 return;
7619 }
7620
7621 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7622 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7623 {
7624 if (fmt[i] == 'e')
7625 {
7626 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7627 }
7628 else if (fmt[i] == 'E')
7629 {
7630 for (j = 0; j < XVECLEN (*x, i); j++)
7631 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7632 }
7633 }
7634 }
7635
7636 /* Check whether X contains an UNSPEC_LTREL_BASE.
7637 Return its constant pool symbol if found, NULL_RTX otherwise. */
7638
7639 static rtx
7640 find_ltrel_base (rtx x)
7641 {
7642 int i, j;
7643 const char *fmt;
7644
7645 if (GET_CODE (x) == UNSPEC
7646 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7647 return XVECEXP (x, 0, 0);
7648
7649 fmt = GET_RTX_FORMAT (GET_CODE (x));
7650 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7651 {
7652 if (fmt[i] == 'e')
7653 {
7654 rtx fnd = find_ltrel_base (XEXP (x, i));
7655 if (fnd)
7656 return fnd;
7657 }
7658 else if (fmt[i] == 'E')
7659 {
7660 for (j = 0; j < XVECLEN (x, i); j++)
7661 {
7662 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
7663 if (fnd)
7664 return fnd;
7665 }
7666 }
7667 }
7668
7669 return NULL_RTX;
7670 }
7671
7672 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
7673
7674 static void
7675 replace_ltrel_base (rtx *x)
7676 {
7677 int i, j;
7678 const char *fmt;
7679
7680 if (GET_CODE (*x) == UNSPEC
7681 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7682 {
7683 *x = XVECEXP (*x, 0, 1);
7684 return;
7685 }
7686
7687 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7688 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7689 {
7690 if (fmt[i] == 'e')
7691 {
7692 replace_ltrel_base (&XEXP (*x, i));
7693 }
7694 else if (fmt[i] == 'E')
7695 {
7696 for (j = 0; j < XVECLEN (*x, i); j++)
7697 replace_ltrel_base (&XVECEXP (*x, i, j));
7698 }
7699 }
7700 }
7701
7702
7703 /* We keep a list of constants which we have to add to internal
7704 constant tables in the middle of large functions. */
7705
7706 #define NR_C_MODES 31
7707 machine_mode constant_modes[NR_C_MODES] =
7708 {
7709 TFmode, TImode, TDmode,
7710 V16QImode, V8HImode, V4SImode, V2DImode, V4SFmode, V2DFmode, V1TFmode,
7711 DFmode, DImode, DDmode,
7712 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
7713 SFmode, SImode, SDmode,
7714 V4QImode, V2HImode, V1SImode, V1SFmode,
7715 HImode,
7716 V2QImode, V1HImode,
7717 QImode,
7718 V1QImode
7719 };
7720
7721 struct constant
7722 {
7723 struct constant *next;
7724 rtx value;
7725 rtx_code_label *label;
7726 };
7727
7728 struct constant_pool
7729 {
7730 struct constant_pool *next;
7731 rtx_insn *first_insn;
7732 rtx_insn *pool_insn;
7733 bitmap insns;
7734 rtx_insn *emit_pool_after;
7735
7736 struct constant *constants[NR_C_MODES];
7737 struct constant *execute;
7738 rtx_code_label *label;
7739 int size;
7740 };
7741
7742 /* Allocate new constant_pool structure. */
7743
7744 static struct constant_pool *
7745 s390_alloc_pool (void)
7746 {
7747 struct constant_pool *pool;
7748 int i;
7749
7750 pool = (struct constant_pool *) xmalloc (sizeof *pool);
7751 pool->next = NULL;
7752 for (i = 0; i < NR_C_MODES; i++)
7753 pool->constants[i] = NULL;
7754
7755 pool->execute = NULL;
7756 pool->label = gen_label_rtx ();
7757 pool->first_insn = NULL;
7758 pool->pool_insn = NULL;
7759 pool->insns = BITMAP_ALLOC (NULL);
7760 pool->size = 0;
7761 pool->emit_pool_after = NULL;
7762
7763 return pool;
7764 }
7765
7766 /* Create new constant pool covering instructions starting at INSN
7767 and chain it to the end of POOL_LIST. */
7768
7769 static struct constant_pool *
7770 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
7771 {
7772 struct constant_pool *pool, **prev;
7773
7774 pool = s390_alloc_pool ();
7775 pool->first_insn = insn;
7776
7777 for (prev = pool_list; *prev; prev = &(*prev)->next)
7778 ;
7779 *prev = pool;
7780
7781 return pool;
7782 }
7783
7784 /* End range of instructions covered by POOL at INSN and emit
7785 placeholder insn representing the pool. */
7786
7787 static void
7788 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
7789 {
7790 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
7791
7792 if (!insn)
7793 insn = get_last_insn ();
7794
7795 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
7796 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
7797 }
7798
7799 /* Add INSN to the list of insns covered by POOL. */
7800
7801 static void
7802 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
7803 {
7804 bitmap_set_bit (pool->insns, INSN_UID (insn));
7805 }
7806
7807 /* Return pool out of POOL_LIST that covers INSN. */
7808
7809 static struct constant_pool *
7810 s390_find_pool (struct constant_pool *pool_list, rtx insn)
7811 {
7812 struct constant_pool *pool;
7813
7814 for (pool = pool_list; pool; pool = pool->next)
7815 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
7816 break;
7817
7818 return pool;
7819 }
7820
7821 /* Add constant VAL of mode MODE to the constant pool POOL. */
7822
7823 static void
7824 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
7825 {
7826 struct constant *c;
7827 int i;
7828
7829 for (i = 0; i < NR_C_MODES; i++)
7830 if (constant_modes[i] == mode)
7831 break;
7832 gcc_assert (i != NR_C_MODES);
7833
7834 for (c = pool->constants[i]; c != NULL; c = c->next)
7835 if (rtx_equal_p (val, c->value))
7836 break;
7837
7838 if (c == NULL)
7839 {
7840 c = (struct constant *) xmalloc (sizeof *c);
7841 c->value = val;
7842 c->label = gen_label_rtx ();
7843 c->next = pool->constants[i];
7844 pool->constants[i] = c;
7845 pool->size += GET_MODE_SIZE (mode);
7846 }
7847 }
7848
7849 /* Return an rtx that represents the offset of X from the start of
7850 pool POOL. */
7851
7852 static rtx
7853 s390_pool_offset (struct constant_pool *pool, rtx x)
7854 {
7855 rtx label;
7856
7857 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
7858 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
7859 UNSPEC_POOL_OFFSET);
7860 return gen_rtx_CONST (GET_MODE (x), x);
7861 }
7862
7863 /* Find constant VAL of mode MODE in the constant pool POOL.
7864 Return an RTX describing the distance from the start of
7865 the pool to the location of the new constant. */
7866
7867 static rtx
7868 s390_find_constant (struct constant_pool *pool, rtx val,
7869 machine_mode mode)
7870 {
7871 struct constant *c;
7872 int i;
7873
7874 for (i = 0; i < NR_C_MODES; i++)
7875 if (constant_modes[i] == mode)
7876 break;
7877 gcc_assert (i != NR_C_MODES);
7878
7879 for (c = pool->constants[i]; c != NULL; c = c->next)
7880 if (rtx_equal_p (val, c->value))
7881 break;
7882
7883 gcc_assert (c);
7884
7885 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
7886 }
7887
7888 /* Check whether INSN is an execute. Return the label_ref to its
7889 execute target template if so, NULL_RTX otherwise. */
7890
7891 static rtx
7892 s390_execute_label (rtx insn)
7893 {
7894 if (NONJUMP_INSN_P (insn)
7895 && GET_CODE (PATTERN (insn)) == PARALLEL
7896 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
7897 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
7898 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
7899
7900 return NULL_RTX;
7901 }
7902
7903 /* Add execute target for INSN to the constant pool POOL. */
7904
7905 static void
7906 s390_add_execute (struct constant_pool *pool, rtx insn)
7907 {
7908 struct constant *c;
7909
7910 for (c = pool->execute; c != NULL; c = c->next)
7911 if (INSN_UID (insn) == INSN_UID (c->value))
7912 break;
7913
7914 if (c == NULL)
7915 {
7916 c = (struct constant *) xmalloc (sizeof *c);
7917 c->value = insn;
7918 c->label = gen_label_rtx ();
7919 c->next = pool->execute;
7920 pool->execute = c;
7921 pool->size += 6;
7922 }
7923 }
7924
7925 /* Find execute target for INSN in the constant pool POOL.
7926 Return an RTX describing the distance from the start of
7927 the pool to the location of the execute target. */
7928
7929 static rtx
7930 s390_find_execute (struct constant_pool *pool, rtx insn)
7931 {
7932 struct constant *c;
7933
7934 for (c = pool->execute; c != NULL; c = c->next)
7935 if (INSN_UID (insn) == INSN_UID (c->value))
7936 break;
7937
7938 gcc_assert (c);
7939
7940 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
7941 }
7942
7943 /* For an execute INSN, extract the execute target template. */
7944
7945 static rtx
7946 s390_execute_target (rtx insn)
7947 {
7948 rtx pattern = PATTERN (insn);
7949 gcc_assert (s390_execute_label (insn));
7950
7951 if (XVECLEN (pattern, 0) == 2)
7952 {
7953 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
7954 }
7955 else
7956 {
7957 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
7958 int i;
7959
7960 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
7961 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
7962
7963 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
7964 }
7965
7966 return pattern;
7967 }
7968
7969 /* Indicate that INSN cannot be duplicated. This is the case for
7970 execute insns that carry a unique label. */
7971
7972 static bool
7973 s390_cannot_copy_insn_p (rtx_insn *insn)
7974 {
7975 rtx label = s390_execute_label (insn);
7976 return label && label != const0_rtx;
7977 }
7978
7979 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
7980 do not emit the pool base label. */
7981
7982 static void
7983 s390_dump_pool (struct constant_pool *pool, bool remote_label)
7984 {
7985 struct constant *c;
7986 rtx_insn *insn = pool->pool_insn;
7987 int i;
7988
7989 /* Switch to rodata section. */
7990 if (TARGET_CPU_ZARCH)
7991 {
7992 insn = emit_insn_after (gen_pool_section_start (), insn);
7993 INSN_ADDRESSES_NEW (insn, -1);
7994 }
7995
7996 /* Ensure minimum pool alignment. */
7997 if (TARGET_CPU_ZARCH)
7998 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
7999 else
8000 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8001 INSN_ADDRESSES_NEW (insn, -1);
8002
8003 /* Emit pool base label. */
8004 if (!remote_label)
8005 {
8006 insn = emit_label_after (pool->label, insn);
8007 INSN_ADDRESSES_NEW (insn, -1);
8008 }
8009
8010 /* Dump constants in descending alignment requirement order,
8011 ensuring proper alignment for every constant. */
8012 for (i = 0; i < NR_C_MODES; i++)
8013 for (c = pool->constants[i]; c; c = c->next)
8014 {
8015 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8016 rtx value = copy_rtx (c->value);
8017 if (GET_CODE (value) == CONST
8018 && GET_CODE (XEXP (value, 0)) == UNSPEC
8019 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8020 && XVECLEN (XEXP (value, 0), 0) == 1)
8021 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8022
8023 insn = emit_label_after (c->label, insn);
8024 INSN_ADDRESSES_NEW (insn, -1);
8025
8026 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8027 gen_rtvec (1, value),
8028 UNSPECV_POOL_ENTRY);
8029 insn = emit_insn_after (value, insn);
8030 INSN_ADDRESSES_NEW (insn, -1);
8031 }
8032
8033 /* Ensure minimum alignment for instructions. */
8034 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8035 INSN_ADDRESSES_NEW (insn, -1);
8036
8037 /* Output in-pool execute template insns. */
8038 for (c = pool->execute; c; c = c->next)
8039 {
8040 insn = emit_label_after (c->label, insn);
8041 INSN_ADDRESSES_NEW (insn, -1);
8042
8043 insn = emit_insn_after (s390_execute_target (c->value), insn);
8044 INSN_ADDRESSES_NEW (insn, -1);
8045 }
8046
8047 /* Switch back to previous section. */
8048 if (TARGET_CPU_ZARCH)
8049 {
8050 insn = emit_insn_after (gen_pool_section_end (), insn);
8051 INSN_ADDRESSES_NEW (insn, -1);
8052 }
8053
8054 insn = emit_barrier_after (insn);
8055 INSN_ADDRESSES_NEW (insn, -1);
8056
8057 /* Remove placeholder insn. */
8058 remove_insn (pool->pool_insn);
8059 }
8060
8061 /* Free all memory used by POOL. */
8062
8063 static void
8064 s390_free_pool (struct constant_pool *pool)
8065 {
8066 struct constant *c, *next;
8067 int i;
8068
8069 for (i = 0; i < NR_C_MODES; i++)
8070 for (c = pool->constants[i]; c; c = next)
8071 {
8072 next = c->next;
8073 free (c);
8074 }
8075
8076 for (c = pool->execute; c; c = next)
8077 {
8078 next = c->next;
8079 free (c);
8080 }
8081
8082 BITMAP_FREE (pool->insns);
8083 free (pool);
8084 }
8085
8086
8087 /* Collect main literal pool. Return NULL on overflow. */
8088
8089 static struct constant_pool *
8090 s390_mainpool_start (void)
8091 {
8092 struct constant_pool *pool;
8093 rtx_insn *insn;
8094
8095 pool = s390_alloc_pool ();
8096
8097 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8098 {
8099 if (NONJUMP_INSN_P (insn)
8100 && GET_CODE (PATTERN (insn)) == SET
8101 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8102 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8103 {
8104 /* There might be two main_pool instructions if base_reg
8105 is call-clobbered; one for shrink-wrapped code and one
8106 for the rest. We want to keep the first. */
8107 if (pool->pool_insn)
8108 {
8109 insn = PREV_INSN (insn);
8110 delete_insn (NEXT_INSN (insn));
8111 continue;
8112 }
8113 pool->pool_insn = insn;
8114 }
8115
8116 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8117 {
8118 s390_add_execute (pool, insn);
8119 }
8120 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8121 {
8122 rtx pool_ref = NULL_RTX;
8123 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8124 if (pool_ref)
8125 {
8126 rtx constant = get_pool_constant (pool_ref);
8127 machine_mode mode = get_pool_mode (pool_ref);
8128 s390_add_constant (pool, constant, mode);
8129 }
8130 }
8131
8132 /* If hot/cold partitioning is enabled we have to make sure that
8133 the literal pool is emitted in the same section where the
8134 initialization of the literal pool base pointer takes place.
8135 emit_pool_after is only used in the non-overflow case on non
8136 Z cpus where we can emit the literal pool at the end of the
8137 function body within the text section. */
8138 if (NOTE_P (insn)
8139 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8140 && !pool->emit_pool_after)
8141 pool->emit_pool_after = PREV_INSN (insn);
8142 }
8143
8144 gcc_assert (pool->pool_insn || pool->size == 0);
8145
8146 if (pool->size >= 4096)
8147 {
8148 /* We're going to chunkify the pool, so remove the main
8149 pool placeholder insn. */
8150 remove_insn (pool->pool_insn);
8151
8152 s390_free_pool (pool);
8153 pool = NULL;
8154 }
8155
8156 /* If the functions ends with the section where the literal pool
8157 should be emitted set the marker to its end. */
8158 if (pool && !pool->emit_pool_after)
8159 pool->emit_pool_after = get_last_insn ();
8160
8161 return pool;
8162 }
8163
8164 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8165 Modify the current function to output the pool constants as well as
8166 the pool register setup instruction. */
8167
8168 static void
8169 s390_mainpool_finish (struct constant_pool *pool)
8170 {
8171 rtx base_reg = cfun->machine->base_reg;
8172
8173 /* If the pool is empty, we're done. */
8174 if (pool->size == 0)
8175 {
8176 /* We don't actually need a base register after all. */
8177 cfun->machine->base_reg = NULL_RTX;
8178
8179 if (pool->pool_insn)
8180 remove_insn (pool->pool_insn);
8181 s390_free_pool (pool);
8182 return;
8183 }
8184
8185 /* We need correct insn addresses. */
8186 shorten_branches (get_insns ());
8187
8188 /* On zSeries, we use a LARL to load the pool register. The pool is
8189 located in the .rodata section, so we emit it after the function. */
8190 if (TARGET_CPU_ZARCH)
8191 {
8192 rtx set = gen_main_base_64 (base_reg, pool->label);
8193 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8194 INSN_ADDRESSES_NEW (insn, -1);
8195 remove_insn (pool->pool_insn);
8196
8197 insn = get_last_insn ();
8198 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8199 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8200
8201 s390_dump_pool (pool, 0);
8202 }
8203
8204 /* On S/390, if the total size of the function's code plus literal pool
8205 does not exceed 4096 bytes, we use BASR to set up a function base
8206 pointer, and emit the literal pool at the end of the function. */
8207 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8208 + pool->size + 8 /* alignment slop */ < 4096)
8209 {
8210 rtx set = gen_main_base_31_small (base_reg, pool->label);
8211 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8212 INSN_ADDRESSES_NEW (insn, -1);
8213 remove_insn (pool->pool_insn);
8214
8215 insn = emit_label_after (pool->label, insn);
8216 INSN_ADDRESSES_NEW (insn, -1);
8217
8218 /* emit_pool_after will be set by s390_mainpool_start to the
8219 last insn of the section where the literal pool should be
8220 emitted. */
8221 insn = pool->emit_pool_after;
8222
8223 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8224 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8225
8226 s390_dump_pool (pool, 1);
8227 }
8228
8229 /* Otherwise, we emit an inline literal pool and use BASR to branch
8230 over it, setting up the pool register at the same time. */
8231 else
8232 {
8233 rtx_code_label *pool_end = gen_label_rtx ();
8234
8235 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8236 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8237 JUMP_LABEL (insn) = pool_end;
8238 INSN_ADDRESSES_NEW (insn, -1);
8239 remove_insn (pool->pool_insn);
8240
8241 insn = emit_label_after (pool->label, insn);
8242 INSN_ADDRESSES_NEW (insn, -1);
8243
8244 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8245 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8246
8247 insn = emit_label_after (pool_end, pool->pool_insn);
8248 INSN_ADDRESSES_NEW (insn, -1);
8249
8250 s390_dump_pool (pool, 1);
8251 }
8252
8253
8254 /* Replace all literal pool references. */
8255
8256 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8257 {
8258 if (INSN_P (insn))
8259 replace_ltrel_base (&PATTERN (insn));
8260
8261 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8262 {
8263 rtx addr, pool_ref = NULL_RTX;
8264 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8265 if (pool_ref)
8266 {
8267 if (s390_execute_label (insn))
8268 addr = s390_find_execute (pool, insn);
8269 else
8270 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8271 get_pool_mode (pool_ref));
8272
8273 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8274 INSN_CODE (insn) = -1;
8275 }
8276 }
8277 }
8278
8279
8280 /* Free the pool. */
8281 s390_free_pool (pool);
8282 }
8283
8284 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8285 We have decided we cannot use this pool, so revert all changes
8286 to the current function that were done by s390_mainpool_start. */
8287 static void
8288 s390_mainpool_cancel (struct constant_pool *pool)
8289 {
8290 /* We didn't actually change the instruction stream, so simply
8291 free the pool memory. */
8292 s390_free_pool (pool);
8293 }
8294
8295
8296 /* Chunkify the literal pool. */
8297
8298 #define S390_POOL_CHUNK_MIN 0xc00
8299 #define S390_POOL_CHUNK_MAX 0xe00
8300
8301 static struct constant_pool *
8302 s390_chunkify_start (void)
8303 {
8304 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8305 int extra_size = 0;
8306 bitmap far_labels;
8307 rtx pending_ltrel = NULL_RTX;
8308 rtx_insn *insn;
8309
8310 rtx (*gen_reload_base) (rtx, rtx) =
8311 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8312
8313
8314 /* We need correct insn addresses. */
8315
8316 shorten_branches (get_insns ());
8317
8318 /* Scan all insns and move literals to pool chunks. */
8319
8320 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8321 {
8322 bool section_switch_p = false;
8323
8324 /* Check for pending LTREL_BASE. */
8325 if (INSN_P (insn))
8326 {
8327 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8328 if (ltrel_base)
8329 {
8330 gcc_assert (ltrel_base == pending_ltrel);
8331 pending_ltrel = NULL_RTX;
8332 }
8333 }
8334
8335 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8336 {
8337 if (!curr_pool)
8338 curr_pool = s390_start_pool (&pool_list, insn);
8339
8340 s390_add_execute (curr_pool, insn);
8341 s390_add_pool_insn (curr_pool, insn);
8342 }
8343 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8344 {
8345 rtx pool_ref = NULL_RTX;
8346 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8347 if (pool_ref)
8348 {
8349 rtx constant = get_pool_constant (pool_ref);
8350 machine_mode mode = get_pool_mode (pool_ref);
8351
8352 if (!curr_pool)
8353 curr_pool = s390_start_pool (&pool_list, insn);
8354
8355 s390_add_constant (curr_pool, constant, mode);
8356 s390_add_pool_insn (curr_pool, insn);
8357
8358 /* Don't split the pool chunk between a LTREL_OFFSET load
8359 and the corresponding LTREL_BASE. */
8360 if (GET_CODE (constant) == CONST
8361 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8362 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8363 {
8364 gcc_assert (!pending_ltrel);
8365 pending_ltrel = pool_ref;
8366 }
8367 }
8368 }
8369
8370 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8371 {
8372 if (curr_pool)
8373 s390_add_pool_insn (curr_pool, insn);
8374 /* An LTREL_BASE must follow within the same basic block. */
8375 gcc_assert (!pending_ltrel);
8376 }
8377
8378 if (NOTE_P (insn))
8379 switch (NOTE_KIND (insn))
8380 {
8381 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8382 section_switch_p = true;
8383 break;
8384 case NOTE_INSN_VAR_LOCATION:
8385 case NOTE_INSN_CALL_ARG_LOCATION:
8386 continue;
8387 default:
8388 break;
8389 }
8390
8391 if (!curr_pool
8392 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8393 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8394 continue;
8395
8396 if (TARGET_CPU_ZARCH)
8397 {
8398 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8399 continue;
8400
8401 s390_end_pool (curr_pool, NULL);
8402 curr_pool = NULL;
8403 }
8404 else
8405 {
8406 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8407 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8408 + extra_size;
8409
8410 /* We will later have to insert base register reload insns.
8411 Those will have an effect on code size, which we need to
8412 consider here. This calculation makes rather pessimistic
8413 worst-case assumptions. */
8414 if (LABEL_P (insn))
8415 extra_size += 6;
8416
8417 if (chunk_size < S390_POOL_CHUNK_MIN
8418 && curr_pool->size < S390_POOL_CHUNK_MIN
8419 && !section_switch_p)
8420 continue;
8421
8422 /* Pool chunks can only be inserted after BARRIERs ... */
8423 if (BARRIER_P (insn))
8424 {
8425 s390_end_pool (curr_pool, insn);
8426 curr_pool = NULL;
8427 extra_size = 0;
8428 }
8429
8430 /* ... so if we don't find one in time, create one. */
8431 else if (chunk_size > S390_POOL_CHUNK_MAX
8432 || curr_pool->size > S390_POOL_CHUNK_MAX
8433 || section_switch_p)
8434 {
8435 rtx_insn *label, *jump, *barrier, *next, *prev;
8436
8437 if (!section_switch_p)
8438 {
8439 /* We can insert the barrier only after a 'real' insn. */
8440 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8441 continue;
8442 if (get_attr_length (insn) == 0)
8443 continue;
8444 /* Don't separate LTREL_BASE from the corresponding
8445 LTREL_OFFSET load. */
8446 if (pending_ltrel)
8447 continue;
8448 next = insn;
8449 do
8450 {
8451 insn = next;
8452 next = NEXT_INSN (insn);
8453 }
8454 while (next
8455 && NOTE_P (next)
8456 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8457 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8458 }
8459 else
8460 {
8461 gcc_assert (!pending_ltrel);
8462
8463 /* The old pool has to end before the section switch
8464 note in order to make it part of the current
8465 section. */
8466 insn = PREV_INSN (insn);
8467 }
8468
8469 label = gen_label_rtx ();
8470 prev = insn;
8471 if (prev && NOTE_P (prev))
8472 prev = prev_nonnote_insn (prev);
8473 if (prev)
8474 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8475 INSN_LOCATION (prev));
8476 else
8477 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8478 barrier = emit_barrier_after (jump);
8479 insn = emit_label_after (label, barrier);
8480 JUMP_LABEL (jump) = label;
8481 LABEL_NUSES (label) = 1;
8482
8483 INSN_ADDRESSES_NEW (jump, -1);
8484 INSN_ADDRESSES_NEW (barrier, -1);
8485 INSN_ADDRESSES_NEW (insn, -1);
8486
8487 s390_end_pool (curr_pool, barrier);
8488 curr_pool = NULL;
8489 extra_size = 0;
8490 }
8491 }
8492 }
8493
8494 if (curr_pool)
8495 s390_end_pool (curr_pool, NULL);
8496 gcc_assert (!pending_ltrel);
8497
8498 /* Find all labels that are branched into
8499 from an insn belonging to a different chunk. */
8500
8501 far_labels = BITMAP_ALLOC (NULL);
8502
8503 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8504 {
8505 rtx_jump_table_data *table;
8506
8507 /* Labels marked with LABEL_PRESERVE_P can be target
8508 of non-local jumps, so we have to mark them.
8509 The same holds for named labels.
8510
8511 Don't do that, however, if it is the label before
8512 a jump table. */
8513
8514 if (LABEL_P (insn)
8515 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8516 {
8517 rtx_insn *vec_insn = NEXT_INSN (insn);
8518 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8519 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8520 }
8521 /* Check potential targets in a table jump (casesi_jump). */
8522 else if (tablejump_p (insn, NULL, &table))
8523 {
8524 rtx vec_pat = PATTERN (table);
8525 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8526
8527 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8528 {
8529 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8530
8531 if (s390_find_pool (pool_list, label)
8532 != s390_find_pool (pool_list, insn))
8533 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8534 }
8535 }
8536 /* If we have a direct jump (conditional or unconditional),
8537 check all potential targets. */
8538 else if (JUMP_P (insn))
8539 {
8540 rtx pat = PATTERN (insn);
8541
8542 if (GET_CODE (pat) == PARALLEL)
8543 pat = XVECEXP (pat, 0, 0);
8544
8545 if (GET_CODE (pat) == SET)
8546 {
8547 rtx label = JUMP_LABEL (insn);
8548 if (label && !ANY_RETURN_P (label))
8549 {
8550 if (s390_find_pool (pool_list, label)
8551 != s390_find_pool (pool_list, insn))
8552 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8553 }
8554 }
8555 }
8556 }
8557
8558 /* Insert base register reload insns before every pool. */
8559
8560 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8561 {
8562 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8563 curr_pool->label);
8564 rtx_insn *insn = curr_pool->first_insn;
8565 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8566 }
8567
8568 /* Insert base register reload insns at every far label. */
8569
8570 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8571 if (LABEL_P (insn)
8572 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8573 {
8574 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8575 if (pool)
8576 {
8577 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8578 pool->label);
8579 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8580 }
8581 }
8582
8583
8584 BITMAP_FREE (far_labels);
8585
8586
8587 /* Recompute insn addresses. */
8588
8589 init_insn_lengths ();
8590 shorten_branches (get_insns ());
8591
8592 return pool_list;
8593 }
8594
8595 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8596 After we have decided to use this list, finish implementing
8597 all changes to the current function as required. */
8598
8599 static void
8600 s390_chunkify_finish (struct constant_pool *pool_list)
8601 {
8602 struct constant_pool *curr_pool = NULL;
8603 rtx_insn *insn;
8604
8605
8606 /* Replace all literal pool references. */
8607
8608 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8609 {
8610 if (INSN_P (insn))
8611 replace_ltrel_base (&PATTERN (insn));
8612
8613 curr_pool = s390_find_pool (pool_list, insn);
8614 if (!curr_pool)
8615 continue;
8616
8617 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8618 {
8619 rtx addr, pool_ref = NULL_RTX;
8620 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8621 if (pool_ref)
8622 {
8623 if (s390_execute_label (insn))
8624 addr = s390_find_execute (curr_pool, insn);
8625 else
8626 addr = s390_find_constant (curr_pool,
8627 get_pool_constant (pool_ref),
8628 get_pool_mode (pool_ref));
8629
8630 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8631 INSN_CODE (insn) = -1;
8632 }
8633 }
8634 }
8635
8636 /* Dump out all literal pools. */
8637
8638 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8639 s390_dump_pool (curr_pool, 0);
8640
8641 /* Free pool list. */
8642
8643 while (pool_list)
8644 {
8645 struct constant_pool *next = pool_list->next;
8646 s390_free_pool (pool_list);
8647 pool_list = next;
8648 }
8649 }
8650
8651 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8652 We have decided we cannot use this list, so revert all changes
8653 to the current function that were done by s390_chunkify_start. */
8654
8655 static void
8656 s390_chunkify_cancel (struct constant_pool *pool_list)
8657 {
8658 struct constant_pool *curr_pool = NULL;
8659 rtx_insn *insn;
8660
8661 /* Remove all pool placeholder insns. */
8662
8663 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8664 {
8665 /* Did we insert an extra barrier? Remove it. */
8666 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
8667 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
8668 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
8669
8670 if (jump && JUMP_P (jump)
8671 && barrier && BARRIER_P (barrier)
8672 && label && LABEL_P (label)
8673 && GET_CODE (PATTERN (jump)) == SET
8674 && SET_DEST (PATTERN (jump)) == pc_rtx
8675 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
8676 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
8677 {
8678 remove_insn (jump);
8679 remove_insn (barrier);
8680 remove_insn (label);
8681 }
8682
8683 remove_insn (curr_pool->pool_insn);
8684 }
8685
8686 /* Remove all base register reload insns. */
8687
8688 for (insn = get_insns (); insn; )
8689 {
8690 rtx_insn *next_insn = NEXT_INSN (insn);
8691
8692 if (NONJUMP_INSN_P (insn)
8693 && GET_CODE (PATTERN (insn)) == SET
8694 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
8695 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
8696 remove_insn (insn);
8697
8698 insn = next_insn;
8699 }
8700
8701 /* Free pool list. */
8702
8703 while (pool_list)
8704 {
8705 struct constant_pool *next = pool_list->next;
8706 s390_free_pool (pool_list);
8707 pool_list = next;
8708 }
8709 }
8710
8711 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
8712
8713 void
8714 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
8715 {
8716 REAL_VALUE_TYPE r;
8717
8718 switch (GET_MODE_CLASS (mode))
8719 {
8720 case MODE_FLOAT:
8721 case MODE_DECIMAL_FLOAT:
8722 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
8723
8724 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
8725 assemble_real (r, mode, align);
8726 break;
8727
8728 case MODE_INT:
8729 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
8730 mark_symbol_refs_as_used (exp);
8731 break;
8732
8733 case MODE_VECTOR_INT:
8734 case MODE_VECTOR_FLOAT:
8735 {
8736 int i;
8737 machine_mode inner_mode;
8738 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
8739
8740 inner_mode = GET_MODE_INNER (GET_MODE (exp));
8741 for (i = 0; i < XVECLEN (exp, 0); i++)
8742 s390_output_pool_entry (XVECEXP (exp, 0, i),
8743 inner_mode,
8744 i == 0
8745 ? align
8746 : GET_MODE_BITSIZE (inner_mode));
8747 }
8748 break;
8749
8750 default:
8751 gcc_unreachable ();
8752 }
8753 }
8754
8755
8756 /* Return an RTL expression representing the value of the return address
8757 for the frame COUNT steps up from the current frame. FRAME is the
8758 frame pointer of that frame. */
8759
8760 rtx
8761 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
8762 {
8763 int offset;
8764 rtx addr;
8765
8766 /* Without backchain, we fail for all but the current frame. */
8767
8768 if (!TARGET_BACKCHAIN && count > 0)
8769 return NULL_RTX;
8770
8771 /* For the current frame, we need to make sure the initial
8772 value of RETURN_REGNUM is actually saved. */
8773
8774 if (count == 0)
8775 {
8776 /* On non-z architectures branch splitting could overwrite r14. */
8777 if (TARGET_CPU_ZARCH)
8778 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
8779 else
8780 {
8781 cfun_frame_layout.save_return_addr_p = true;
8782 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
8783 }
8784 }
8785
8786 if (TARGET_PACKED_STACK)
8787 offset = -2 * UNITS_PER_LONG;
8788 else
8789 offset = RETURN_REGNUM * UNITS_PER_LONG;
8790
8791 addr = plus_constant (Pmode, frame, offset);
8792 addr = memory_address (Pmode, addr);
8793 return gen_rtx_MEM (Pmode, addr);
8794 }
8795
8796 /* Return an RTL expression representing the back chain stored in
8797 the current stack frame. */
8798
8799 rtx
8800 s390_back_chain_rtx (void)
8801 {
8802 rtx chain;
8803
8804 gcc_assert (TARGET_BACKCHAIN);
8805
8806 if (TARGET_PACKED_STACK)
8807 chain = plus_constant (Pmode, stack_pointer_rtx,
8808 STACK_POINTER_OFFSET - UNITS_PER_LONG);
8809 else
8810 chain = stack_pointer_rtx;
8811
8812 chain = gen_rtx_MEM (Pmode, chain);
8813 return chain;
8814 }
8815
8816 /* Find first call clobbered register unused in a function.
8817 This could be used as base register in a leaf function
8818 or for holding the return address before epilogue. */
8819
8820 static int
8821 find_unused_clobbered_reg (void)
8822 {
8823 int i;
8824 for (i = 0; i < 6; i++)
8825 if (!df_regs_ever_live_p (i))
8826 return i;
8827 return 0;
8828 }
8829
8830
8831 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
8832 clobbered hard regs in SETREG. */
8833
8834 static void
8835 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
8836 {
8837 char *regs_ever_clobbered = (char *)data;
8838 unsigned int i, regno;
8839 machine_mode mode = GET_MODE (setreg);
8840
8841 if (GET_CODE (setreg) == SUBREG)
8842 {
8843 rtx inner = SUBREG_REG (setreg);
8844 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
8845 return;
8846 regno = subreg_regno (setreg);
8847 }
8848 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
8849 regno = REGNO (setreg);
8850 else
8851 return;
8852
8853 for (i = regno;
8854 i < regno + HARD_REGNO_NREGS (regno, mode);
8855 i++)
8856 regs_ever_clobbered[i] = 1;
8857 }
8858
8859 /* Walks through all basic blocks of the current function looking
8860 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
8861 of the passed integer array REGS_EVER_CLOBBERED are set to one for
8862 each of those regs. */
8863
8864 static void
8865 s390_regs_ever_clobbered (char regs_ever_clobbered[])
8866 {
8867 basic_block cur_bb;
8868 rtx_insn *cur_insn;
8869 unsigned int i;
8870
8871 memset (regs_ever_clobbered, 0, 32);
8872
8873 /* For non-leaf functions we have to consider all call clobbered regs to be
8874 clobbered. */
8875 if (!crtl->is_leaf)
8876 {
8877 for (i = 0; i < 32; i++)
8878 regs_ever_clobbered[i] = call_really_used_regs[i];
8879 }
8880
8881 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
8882 this work is done by liveness analysis (mark_regs_live_at_end).
8883 Special care is needed for functions containing landing pads. Landing pads
8884 may use the eh registers, but the code which sets these registers is not
8885 contained in that function. Hence s390_regs_ever_clobbered is not able to
8886 deal with this automatically. */
8887 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
8888 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
8889 if (crtl->calls_eh_return
8890 || (cfun->machine->has_landing_pad_p
8891 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
8892 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
8893
8894 /* For nonlocal gotos all call-saved registers have to be saved.
8895 This flag is also set for the unwinding code in libgcc.
8896 See expand_builtin_unwind_init. For regs_ever_live this is done by
8897 reload. */
8898 if (crtl->saves_all_registers)
8899 for (i = 0; i < 32; i++)
8900 if (!call_really_used_regs[i])
8901 regs_ever_clobbered[i] = 1;
8902
8903 FOR_EACH_BB_FN (cur_bb, cfun)
8904 {
8905 FOR_BB_INSNS (cur_bb, cur_insn)
8906 {
8907 rtx pat;
8908
8909 if (!INSN_P (cur_insn))
8910 continue;
8911
8912 pat = PATTERN (cur_insn);
8913
8914 /* Ignore GPR restore insns. */
8915 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
8916 {
8917 if (GET_CODE (pat) == SET
8918 && GENERAL_REG_P (SET_DEST (pat)))
8919 {
8920 /* lgdr */
8921 if (GET_MODE (SET_SRC (pat)) == DImode
8922 && FP_REG_P (SET_SRC (pat)))
8923 continue;
8924
8925 /* l / lg */
8926 if (GET_CODE (SET_SRC (pat)) == MEM)
8927 continue;
8928 }
8929
8930 /* lm / lmg */
8931 if (GET_CODE (pat) == PARALLEL
8932 && load_multiple_operation (pat, VOIDmode))
8933 continue;
8934 }
8935
8936 note_stores (pat,
8937 s390_reg_clobbered_rtx,
8938 regs_ever_clobbered);
8939 }
8940 }
8941 }
8942
8943 /* Determine the frame area which actually has to be accessed
8944 in the function epilogue. The values are stored at the
8945 given pointers AREA_BOTTOM (address of the lowest used stack
8946 address) and AREA_TOP (address of the first item which does
8947 not belong to the stack frame). */
8948
8949 static void
8950 s390_frame_area (int *area_bottom, int *area_top)
8951 {
8952 int b, t;
8953
8954 b = INT_MAX;
8955 t = INT_MIN;
8956
8957 if (cfun_frame_layout.first_restore_gpr != -1)
8958 {
8959 b = (cfun_frame_layout.gprs_offset
8960 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
8961 t = b + (cfun_frame_layout.last_restore_gpr
8962 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
8963 }
8964
8965 if (TARGET_64BIT && cfun_save_high_fprs_p)
8966 {
8967 b = MIN (b, cfun_frame_layout.f8_offset);
8968 t = MAX (t, (cfun_frame_layout.f8_offset
8969 + cfun_frame_layout.high_fprs * 8));
8970 }
8971
8972 if (!TARGET_64BIT)
8973 {
8974 if (cfun_fpr_save_p (FPR4_REGNUM))
8975 {
8976 b = MIN (b, cfun_frame_layout.f4_offset);
8977 t = MAX (t, cfun_frame_layout.f4_offset + 8);
8978 }
8979 if (cfun_fpr_save_p (FPR6_REGNUM))
8980 {
8981 b = MIN (b, cfun_frame_layout.f4_offset + 8);
8982 t = MAX (t, cfun_frame_layout.f4_offset + 16);
8983 }
8984 }
8985 *area_bottom = b;
8986 *area_top = t;
8987 }
8988 /* Update gpr_save_slots in the frame layout trying to make use of
8989 FPRs as GPR save slots.
8990 This is a helper routine of s390_register_info. */
8991
8992 static void
8993 s390_register_info_gprtofpr ()
8994 {
8995 int save_reg_slot = FPR0_REGNUM;
8996 int i, j;
8997
8998 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8999 return;
9000
9001 for (i = 15; i >= 6; i--)
9002 {
9003 if (cfun_gpr_save_slot (i) == 0)
9004 continue;
9005
9006 /* Advance to the next FP register which can be used as a
9007 GPR save slot. */
9008 while ((!call_really_used_regs[save_reg_slot]
9009 || df_regs_ever_live_p (save_reg_slot)
9010 || cfun_fpr_save_p (save_reg_slot))
9011 && FP_REGNO_P (save_reg_slot))
9012 save_reg_slot++;
9013 if (!FP_REGNO_P (save_reg_slot))
9014 {
9015 /* We only want to use ldgr/lgdr if we can get rid of
9016 stm/lm entirely. So undo the gpr slot allocation in
9017 case we ran out of FPR save slots. */
9018 for (j = 6; j <= 15; j++)
9019 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9020 cfun_gpr_save_slot (j) = -1;
9021 break;
9022 }
9023 cfun_gpr_save_slot (i) = save_reg_slot++;
9024 }
9025 }
9026
9027 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9028 stdarg.
9029 This is a helper routine for s390_register_info. */
9030
9031 static void
9032 s390_register_info_stdarg_fpr ()
9033 {
9034 int i;
9035 int min_fpr;
9036 int max_fpr;
9037
9038 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9039 f0-f4 for 64 bit. */
9040 if (!cfun->stdarg
9041 || !TARGET_HARD_FLOAT
9042 || !cfun->va_list_fpr_size
9043 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9044 return;
9045
9046 min_fpr = crtl->args.info.fprs;
9047 max_fpr = min_fpr + cfun->va_list_fpr_size;
9048 if (max_fpr > FP_ARG_NUM_REG)
9049 max_fpr = FP_ARG_NUM_REG;
9050
9051 for (i = min_fpr; i < max_fpr; i++)
9052 cfun_set_fpr_save (i + FPR0_REGNUM);
9053 }
9054
9055 /* Reserve the GPR save slots for GPRs which need to be saved due to
9056 stdarg.
9057 This is a helper routine for s390_register_info. */
9058
9059 static void
9060 s390_register_info_stdarg_gpr ()
9061 {
9062 int i;
9063 int min_gpr;
9064 int max_gpr;
9065
9066 if (!cfun->stdarg
9067 || !cfun->va_list_gpr_size
9068 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9069 return;
9070
9071 min_gpr = crtl->args.info.gprs;
9072 max_gpr = min_gpr + cfun->va_list_gpr_size;
9073 if (max_gpr > GP_ARG_NUM_REG)
9074 max_gpr = GP_ARG_NUM_REG;
9075
9076 for (i = min_gpr; i < max_gpr; i++)
9077 cfun_gpr_save_slot (2 + i) = -1;
9078 }
9079
9080 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9081 for registers which need to be saved in function prologue.
9082 This function can be used until the insns emitted for save/restore
9083 of the regs are visible in the RTL stream. */
9084
9085 static void
9086 s390_register_info ()
9087 {
9088 int i, j;
9089 char clobbered_regs[32];
9090
9091 gcc_assert (!epilogue_completed);
9092
9093 if (reload_completed)
9094 /* After reload we rely on our own routine to determine which
9095 registers need saving. */
9096 s390_regs_ever_clobbered (clobbered_regs);
9097 else
9098 /* During reload we use regs_ever_live as a base since reload
9099 does changes in there which we otherwise would not be aware
9100 of. */
9101 for (i = 0; i < 32; i++)
9102 clobbered_regs[i] = df_regs_ever_live_p (i);
9103
9104 for (i = 0; i < 32; i++)
9105 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9106
9107 /* Mark the call-saved FPRs which need to be saved.
9108 This needs to be done before checking the special GPRs since the
9109 stack pointer usage depends on whether high FPRs have to be saved
9110 or not. */
9111 cfun_frame_layout.fpr_bitmap = 0;
9112 cfun_frame_layout.high_fprs = 0;
9113 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9114 if (clobbered_regs[i] && !call_really_used_regs[i])
9115 {
9116 cfun_set_fpr_save (i);
9117 if (i >= FPR8_REGNUM)
9118 cfun_frame_layout.high_fprs++;
9119 }
9120
9121 if (flag_pic)
9122 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
9123 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
9124
9125 clobbered_regs[BASE_REGNUM]
9126 |= (cfun->machine->base_reg
9127 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9128
9129 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9130 |= !!frame_pointer_needed;
9131
9132 /* On pre z900 machines this might take until machine dependent
9133 reorg to decide.
9134 save_return_addr_p will only be set on non-zarch machines so
9135 there is no risk that r14 goes into an FPR instead of a stack
9136 slot. */
9137 clobbered_regs[RETURN_REGNUM]
9138 |= (!crtl->is_leaf
9139 || TARGET_TPF_PROFILING
9140 || cfun->machine->split_branches_pending_p
9141 || cfun_frame_layout.save_return_addr_p
9142 || crtl->calls_eh_return);
9143
9144 clobbered_regs[STACK_POINTER_REGNUM]
9145 |= (!crtl->is_leaf
9146 || TARGET_TPF_PROFILING
9147 || cfun_save_high_fprs_p
9148 || get_frame_size () > 0
9149 || (reload_completed && cfun_frame_layout.frame_size > 0)
9150 || cfun->calls_alloca);
9151
9152 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
9153
9154 for (i = 6; i < 16; i++)
9155 if (clobbered_regs[i])
9156 cfun_gpr_save_slot (i) = -1;
9157
9158 s390_register_info_stdarg_fpr ();
9159 s390_register_info_gprtofpr ();
9160
9161 /* First find the range of GPRs to be restored. Vararg regs don't
9162 need to be restored so we do it before assigning slots to the
9163 vararg GPRs. */
9164 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9165 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9166 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9167 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9168
9169 /* stdarg functions might need to save GPRs 2 to 6. This might
9170 override the GPR->FPR save decision made above for r6 since
9171 vararg regs must go to the stack. */
9172 s390_register_info_stdarg_gpr ();
9173
9174 /* Now the range of GPRs which need saving. */
9175 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9176 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9177 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9178 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9179 }
9180
9181 /* This function is called by s390_optimize_prologue in order to get
9182 rid of unnecessary GPR save/restore instructions. The register info
9183 for the GPRs is re-computed and the ranges are re-calculated. */
9184
9185 static void
9186 s390_optimize_register_info ()
9187 {
9188 char clobbered_regs[32];
9189 int i, j;
9190
9191 gcc_assert (epilogue_completed);
9192 gcc_assert (!cfun->machine->split_branches_pending_p);
9193
9194 s390_regs_ever_clobbered (clobbered_regs);
9195
9196 for (i = 0; i < 32; i++)
9197 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9198
9199 /* There is still special treatment needed for cases invisible to
9200 s390_regs_ever_clobbered. */
9201 clobbered_regs[RETURN_REGNUM]
9202 |= (TARGET_TPF_PROFILING
9203 /* When expanding builtin_return_addr in ESA mode we do not
9204 know whether r14 will later be needed as scratch reg when
9205 doing branch splitting. So the builtin always accesses the
9206 r14 save slot and we need to stick to the save/restore
9207 decision for r14 even if it turns out that it didn't get
9208 clobbered. */
9209 || cfun_frame_layout.save_return_addr_p
9210 || crtl->calls_eh_return);
9211
9212 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
9213
9214 for (i = 6; i < 16; i++)
9215 if (!clobbered_regs[i])
9216 cfun_gpr_save_slot (i) = 0;
9217
9218 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9219 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9220 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9221 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9222
9223 s390_register_info_stdarg_gpr ();
9224
9225 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9226 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9227 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9228 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9229 }
9230
9231 /* Fill cfun->machine with info about frame of current function. */
9232
9233 static void
9234 s390_frame_info (void)
9235 {
9236 HOST_WIDE_INT lowest_offset;
9237
9238 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9239 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9240
9241 /* The va_arg builtin uses a constant distance of 16 *
9242 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9243 pointer. So even if we are going to save the stack pointer in an
9244 FPR we need the stack space in order to keep the offsets
9245 correct. */
9246 if (cfun->stdarg && cfun_save_arg_fprs_p)
9247 {
9248 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9249
9250 if (cfun_frame_layout.first_save_gpr_slot == -1)
9251 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9252 }
9253
9254 cfun_frame_layout.frame_size = get_frame_size ();
9255 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9256 fatal_error (input_location,
9257 "total size of local variables exceeds architecture limit");
9258
9259 if (!TARGET_PACKED_STACK)
9260 {
9261 /* Fixed stack layout. */
9262 cfun_frame_layout.backchain_offset = 0;
9263 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9264 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9265 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9266 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9267 * UNITS_PER_LONG);
9268 }
9269 else if (TARGET_BACKCHAIN)
9270 {
9271 /* Kernel stack layout - packed stack, backchain, no float */
9272 gcc_assert (TARGET_SOFT_FLOAT);
9273 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9274 - UNITS_PER_LONG);
9275
9276 /* The distance between the backchain and the return address
9277 save slot must not change. So we always need a slot for the
9278 stack pointer which resides in between. */
9279 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9280
9281 cfun_frame_layout.gprs_offset
9282 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9283
9284 /* FPRs will not be saved. Nevertheless pick sane values to
9285 keep area calculations valid. */
9286 cfun_frame_layout.f0_offset =
9287 cfun_frame_layout.f4_offset =
9288 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9289 }
9290 else
9291 {
9292 int num_fprs;
9293
9294 /* Packed stack layout without backchain. */
9295
9296 /* With stdarg FPRs need their dedicated slots. */
9297 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9298 : (cfun_fpr_save_p (FPR4_REGNUM) +
9299 cfun_fpr_save_p (FPR6_REGNUM)));
9300 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9301
9302 num_fprs = (cfun->stdarg ? 2
9303 : (cfun_fpr_save_p (FPR0_REGNUM)
9304 + cfun_fpr_save_p (FPR2_REGNUM)));
9305 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9306
9307 cfun_frame_layout.gprs_offset
9308 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9309
9310 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9311 - cfun_frame_layout.high_fprs * 8);
9312 }
9313
9314 if (cfun_save_high_fprs_p)
9315 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9316
9317 if (!crtl->is_leaf)
9318 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9319
9320 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9321 sized area at the bottom of the stack. This is required also for
9322 leaf functions. When GCC generates a local stack reference it
9323 will always add STACK_POINTER_OFFSET to all these references. */
9324 if (crtl->is_leaf
9325 && !TARGET_TPF_PROFILING
9326 && cfun_frame_layout.frame_size == 0
9327 && !cfun->calls_alloca)
9328 return;
9329
9330 /* Calculate the number of bytes we have used in our own register
9331 save area. With the packed stack layout we can re-use the
9332 remaining bytes for normal stack elements. */
9333
9334 if (TARGET_PACKED_STACK)
9335 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9336 cfun_frame_layout.f4_offset),
9337 cfun_frame_layout.gprs_offset);
9338 else
9339 lowest_offset = 0;
9340
9341 if (TARGET_BACKCHAIN)
9342 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9343
9344 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9345
9346 /* If under 31 bit an odd number of gprs has to be saved we have to
9347 adjust the frame size to sustain 8 byte alignment of stack
9348 frames. */
9349 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9350 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9351 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9352 }
9353
9354 /* Generate frame layout. Fills in register and frame data for the current
9355 function in cfun->machine. This routine can be called multiple times;
9356 it will re-do the complete frame layout every time. */
9357
9358 static void
9359 s390_init_frame_layout (void)
9360 {
9361 HOST_WIDE_INT frame_size;
9362 int base_used;
9363
9364 /* After LRA the frame layout is supposed to be read-only and should
9365 not be re-computed. */
9366 if (reload_completed)
9367 return;
9368
9369 /* On S/390 machines, we may need to perform branch splitting, which
9370 will require both base and return address register. We have no
9371 choice but to assume we're going to need them until right at the
9372 end of the machine dependent reorg phase. */
9373 if (!TARGET_CPU_ZARCH)
9374 cfun->machine->split_branches_pending_p = true;
9375
9376 do
9377 {
9378 frame_size = cfun_frame_layout.frame_size;
9379
9380 /* Try to predict whether we'll need the base register. */
9381 base_used = cfun->machine->split_branches_pending_p
9382 || crtl->uses_const_pool
9383 || (!DISP_IN_RANGE (frame_size)
9384 && !CONST_OK_FOR_K (frame_size));
9385
9386 /* Decide which register to use as literal pool base. In small
9387 leaf functions, try to use an unused call-clobbered register
9388 as base register to avoid save/restore overhead. */
9389 if (!base_used)
9390 cfun->machine->base_reg = NULL_RTX;
9391 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
9392 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
9393 else
9394 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
9395
9396 s390_register_info ();
9397 s390_frame_info ();
9398 }
9399 while (frame_size != cfun_frame_layout.frame_size);
9400 }
9401
9402 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9403 the TX is nonescaping. A transaction is considered escaping if
9404 there is at least one path from tbegin returning CC0 to the
9405 function exit block without an tend.
9406
9407 The check so far has some limitations:
9408 - only single tbegin/tend BBs are supported
9409 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9410 - when CC is copied to a GPR and the CC0 check is done with the GPR
9411 this is not supported
9412 */
9413
9414 static void
9415 s390_optimize_nonescaping_tx (void)
9416 {
9417 const unsigned int CC0 = 1 << 3;
9418 basic_block tbegin_bb = NULL;
9419 basic_block tend_bb = NULL;
9420 basic_block bb;
9421 rtx_insn *insn;
9422 bool result = true;
9423 int bb_index;
9424 rtx_insn *tbegin_insn = NULL;
9425
9426 if (!cfun->machine->tbegin_p)
9427 return;
9428
9429 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9430 {
9431 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9432
9433 if (!bb)
9434 continue;
9435
9436 FOR_BB_INSNS (bb, insn)
9437 {
9438 rtx ite, cc, pat, target;
9439 unsigned HOST_WIDE_INT mask;
9440
9441 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9442 continue;
9443
9444 pat = PATTERN (insn);
9445
9446 if (GET_CODE (pat) == PARALLEL)
9447 pat = XVECEXP (pat, 0, 0);
9448
9449 if (GET_CODE (pat) != SET
9450 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9451 continue;
9452
9453 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9454 {
9455 rtx_insn *tmp;
9456
9457 tbegin_insn = insn;
9458
9459 /* Just return if the tbegin doesn't have clobbers. */
9460 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9461 return;
9462
9463 if (tbegin_bb != NULL)
9464 return;
9465
9466 /* Find the next conditional jump. */
9467 for (tmp = NEXT_INSN (insn);
9468 tmp != NULL_RTX;
9469 tmp = NEXT_INSN (tmp))
9470 {
9471 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9472 return;
9473 if (!JUMP_P (tmp))
9474 continue;
9475
9476 ite = SET_SRC (PATTERN (tmp));
9477 if (GET_CODE (ite) != IF_THEN_ELSE)
9478 continue;
9479
9480 cc = XEXP (XEXP (ite, 0), 0);
9481 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9482 || GET_MODE (cc) != CCRAWmode
9483 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9484 return;
9485
9486 if (bb->succs->length () != 2)
9487 return;
9488
9489 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9490 if (GET_CODE (XEXP (ite, 0)) == NE)
9491 mask ^= 0xf;
9492
9493 if (mask == CC0)
9494 target = XEXP (ite, 1);
9495 else if (mask == (CC0 ^ 0xf))
9496 target = XEXP (ite, 2);
9497 else
9498 return;
9499
9500 {
9501 edge_iterator ei;
9502 edge e1, e2;
9503
9504 ei = ei_start (bb->succs);
9505 e1 = ei_safe_edge (ei);
9506 ei_next (&ei);
9507 e2 = ei_safe_edge (ei);
9508
9509 if (e2->flags & EDGE_FALLTHRU)
9510 {
9511 e2 = e1;
9512 e1 = ei_safe_edge (ei);
9513 }
9514
9515 if (!(e1->flags & EDGE_FALLTHRU))
9516 return;
9517
9518 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9519 }
9520 if (tmp == BB_END (bb))
9521 break;
9522 }
9523 }
9524
9525 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9526 {
9527 if (tend_bb != NULL)
9528 return;
9529 tend_bb = bb;
9530 }
9531 }
9532 }
9533
9534 /* Either we successfully remove the FPR clobbers here or we are not
9535 able to do anything for this TX. Both cases don't qualify for
9536 another look. */
9537 cfun->machine->tbegin_p = false;
9538
9539 if (tbegin_bb == NULL || tend_bb == NULL)
9540 return;
9541
9542 calculate_dominance_info (CDI_POST_DOMINATORS);
9543 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9544 free_dominance_info (CDI_POST_DOMINATORS);
9545
9546 if (!result)
9547 return;
9548
9549 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9550 gen_rtvec (2,
9551 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9552 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9553 INSN_CODE (tbegin_insn) = -1;
9554 df_insn_rescan (tbegin_insn);
9555
9556 return;
9557 }
9558
9559 /* Return true if it is legal to put a value with MODE into REGNO. */
9560
9561 bool
9562 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9563 {
9564 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9565 return false;
9566
9567 switch (REGNO_REG_CLASS (regno))
9568 {
9569 case VEC_REGS:
9570 return ((GET_MODE_CLASS (mode) == MODE_INT
9571 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9572 || mode == DFmode
9573 || s390_vector_mode_supported_p (mode));
9574 break;
9575 case FP_REGS:
9576 if (TARGET_VX
9577 && ((GET_MODE_CLASS (mode) == MODE_INT
9578 && s390_class_max_nregs (FP_REGS, mode) == 1)
9579 || mode == DFmode
9580 || s390_vector_mode_supported_p (mode)))
9581 return true;
9582
9583 if (REGNO_PAIR_OK (regno, mode))
9584 {
9585 if (mode == SImode || mode == DImode)
9586 return true;
9587
9588 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9589 return true;
9590 }
9591 break;
9592 case ADDR_REGS:
9593 if (FRAME_REGNO_P (regno) && mode == Pmode)
9594 return true;
9595
9596 /* fallthrough */
9597 case GENERAL_REGS:
9598 if (REGNO_PAIR_OK (regno, mode))
9599 {
9600 if (TARGET_ZARCH
9601 || (mode != TFmode && mode != TCmode && mode != TDmode))
9602 return true;
9603 }
9604 break;
9605 case CC_REGS:
9606 if (GET_MODE_CLASS (mode) == MODE_CC)
9607 return true;
9608 break;
9609 case ACCESS_REGS:
9610 if (REGNO_PAIR_OK (regno, mode))
9611 {
9612 if (mode == SImode || mode == Pmode)
9613 return true;
9614 }
9615 break;
9616 default:
9617 return false;
9618 }
9619
9620 return false;
9621 }
9622
9623 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
9624
9625 bool
9626 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
9627 {
9628 /* Once we've decided upon a register to use as base register, it must
9629 no longer be used for any other purpose. */
9630 if (cfun->machine->base_reg)
9631 if (REGNO (cfun->machine->base_reg) == old_reg
9632 || REGNO (cfun->machine->base_reg) == new_reg)
9633 return false;
9634
9635 /* Prevent regrename from using call-saved regs which haven't
9636 actually been saved. This is necessary since regrename assumes
9637 the backend save/restore decisions are based on
9638 df_regs_ever_live. Since we have our own routine we have to tell
9639 regrename manually about it. */
9640 if (GENERAL_REGNO_P (new_reg)
9641 && !call_really_used_regs[new_reg]
9642 && cfun_gpr_save_slot (new_reg) == 0)
9643 return false;
9644
9645 return true;
9646 }
9647
9648 /* Return nonzero if register REGNO can be used as a scratch register
9649 in peephole2. */
9650
9651 static bool
9652 s390_hard_regno_scratch_ok (unsigned int regno)
9653 {
9654 /* See s390_hard_regno_rename_ok. */
9655 if (GENERAL_REGNO_P (regno)
9656 && !call_really_used_regs[regno]
9657 && cfun_gpr_save_slot (regno) == 0)
9658 return false;
9659
9660 return true;
9661 }
9662
9663 /* Maximum number of registers to represent a value of mode MODE
9664 in a register of class RCLASS. */
9665
9666 int
9667 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
9668 {
9669 int reg_size;
9670 bool reg_pair_required_p = false;
9671
9672 switch (rclass)
9673 {
9674 case FP_REGS:
9675 case VEC_REGS:
9676 reg_size = TARGET_VX ? 16 : 8;
9677
9678 /* TF and TD modes would fit into a VR but we put them into a
9679 register pair since we do not have 128bit FP instructions on
9680 full VRs. */
9681 if (TARGET_VX
9682 && SCALAR_FLOAT_MODE_P (mode)
9683 && GET_MODE_SIZE (mode) >= 16)
9684 reg_pair_required_p = true;
9685
9686 /* Even if complex types would fit into a single FPR/VR we force
9687 them into a register pair to deal with the parts more easily.
9688 (FIXME: What about complex ints?) */
9689 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9690 reg_pair_required_p = true;
9691 break;
9692 case ACCESS_REGS:
9693 reg_size = 4;
9694 break;
9695 default:
9696 reg_size = UNITS_PER_WORD;
9697 break;
9698 }
9699
9700 if (reg_pair_required_p)
9701 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
9702
9703 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
9704 }
9705
9706 /* Return TRUE if changing mode from FROM to TO should not be allowed
9707 for register class CLASS. */
9708
9709 int
9710 s390_cannot_change_mode_class (machine_mode from_mode,
9711 machine_mode to_mode,
9712 enum reg_class rclass)
9713 {
9714 machine_mode small_mode;
9715 machine_mode big_mode;
9716
9717 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
9718 return 0;
9719
9720 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
9721 {
9722 small_mode = from_mode;
9723 big_mode = to_mode;
9724 }
9725 else
9726 {
9727 small_mode = to_mode;
9728 big_mode = from_mode;
9729 }
9730
9731 /* Values residing in VRs are little-endian style. All modes are
9732 placed left-aligned in an VR. This means that we cannot allow
9733 switching between modes with differing sizes. Also if the vector
9734 facility is available we still place TFmode values in VR register
9735 pairs, since the only instructions we have operating on TFmodes
9736 only deal with register pairs. Therefore we have to allow DFmode
9737 subregs of TFmodes to enable the TFmode splitters. */
9738 if (reg_classes_intersect_p (VEC_REGS, rclass)
9739 && (GET_MODE_SIZE (small_mode) < 8
9740 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
9741 return 1;
9742
9743 /* Likewise for access registers, since they have only half the
9744 word size on 64-bit. */
9745 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
9746 return 1;
9747
9748 return 0;
9749 }
9750
9751 /* Return true if we use LRA instead of reload pass. */
9752 static bool
9753 s390_lra_p (void)
9754 {
9755 return s390_lra_flag;
9756 }
9757
9758 /* Return true if register FROM can be eliminated via register TO. */
9759
9760 static bool
9761 s390_can_eliminate (const int from, const int to)
9762 {
9763 /* On zSeries machines, we have not marked the base register as fixed.
9764 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
9765 If a function requires the base register, we say here that this
9766 elimination cannot be performed. This will cause reload to free
9767 up the base register (as if it were fixed). On the other hand,
9768 if the current function does *not* require the base register, we
9769 say here the elimination succeeds, which in turn allows reload
9770 to allocate the base register for any other purpose. */
9771 if (from == BASE_REGNUM && to == BASE_REGNUM)
9772 {
9773 if (TARGET_CPU_ZARCH)
9774 {
9775 s390_init_frame_layout ();
9776 return cfun->machine->base_reg == NULL_RTX;
9777 }
9778
9779 return false;
9780 }
9781
9782 /* Everything else must point into the stack frame. */
9783 gcc_assert (to == STACK_POINTER_REGNUM
9784 || to == HARD_FRAME_POINTER_REGNUM);
9785
9786 gcc_assert (from == FRAME_POINTER_REGNUM
9787 || from == ARG_POINTER_REGNUM
9788 || from == RETURN_ADDRESS_POINTER_REGNUM);
9789
9790 /* Make sure we actually saved the return address. */
9791 if (from == RETURN_ADDRESS_POINTER_REGNUM)
9792 if (!crtl->calls_eh_return
9793 && !cfun->stdarg
9794 && !cfun_frame_layout.save_return_addr_p)
9795 return false;
9796
9797 return true;
9798 }
9799
9800 /* Return offset between register FROM and TO initially after prolog. */
9801
9802 HOST_WIDE_INT
9803 s390_initial_elimination_offset (int from, int to)
9804 {
9805 HOST_WIDE_INT offset;
9806
9807 /* ??? Why are we called for non-eliminable pairs? */
9808 if (!s390_can_eliminate (from, to))
9809 return 0;
9810
9811 switch (from)
9812 {
9813 case FRAME_POINTER_REGNUM:
9814 offset = (get_frame_size()
9815 + STACK_POINTER_OFFSET
9816 + crtl->outgoing_args_size);
9817 break;
9818
9819 case ARG_POINTER_REGNUM:
9820 s390_init_frame_layout ();
9821 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
9822 break;
9823
9824 case RETURN_ADDRESS_POINTER_REGNUM:
9825 s390_init_frame_layout ();
9826
9827 if (cfun_frame_layout.first_save_gpr_slot == -1)
9828 {
9829 /* If it turns out that for stdarg nothing went into the reg
9830 save area we also do not need the return address
9831 pointer. */
9832 if (cfun->stdarg && !cfun_save_arg_fprs_p)
9833 return 0;
9834
9835 gcc_unreachable ();
9836 }
9837
9838 /* In order to make the following work it is not necessary for
9839 r14 to have a save slot. It is sufficient if one other GPR
9840 got one. Since the GPRs are always stored without gaps we
9841 are able to calculate where the r14 save slot would
9842 reside. */
9843 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
9844 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
9845 UNITS_PER_LONG);
9846 break;
9847
9848 case BASE_REGNUM:
9849 offset = 0;
9850 break;
9851
9852 default:
9853 gcc_unreachable ();
9854 }
9855
9856 return offset;
9857 }
9858
9859 /* Emit insn to save fpr REGNUM at offset OFFSET relative
9860 to register BASE. Return generated insn. */
9861
9862 static rtx
9863 save_fpr (rtx base, int offset, int regnum)
9864 {
9865 rtx addr;
9866 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
9867
9868 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
9869 set_mem_alias_set (addr, get_varargs_alias_set ());
9870 else
9871 set_mem_alias_set (addr, get_frame_alias_set ());
9872
9873 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
9874 }
9875
9876 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
9877 to register BASE. Return generated insn. */
9878
9879 static rtx
9880 restore_fpr (rtx base, int offset, int regnum)
9881 {
9882 rtx addr;
9883 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
9884 set_mem_alias_set (addr, get_frame_alias_set ());
9885
9886 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
9887 }
9888
9889 /* Return true if REGNO is a global register, but not one
9890 of the special ones that need to be saved/restored in anyway. */
9891
9892 static inline bool
9893 global_not_special_regno_p (int regno)
9894 {
9895 return (global_regs[regno]
9896 /* These registers are special and need to be
9897 restored in any case. */
9898 && !(regno == STACK_POINTER_REGNUM
9899 || regno == RETURN_REGNUM
9900 || regno == BASE_REGNUM
9901 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
9902 }
9903
9904 /* Generate insn to save registers FIRST to LAST into
9905 the register save area located at offset OFFSET
9906 relative to register BASE. */
9907
9908 static rtx
9909 save_gprs (rtx base, int offset, int first, int last)
9910 {
9911 rtx addr, insn, note;
9912 int i;
9913
9914 addr = plus_constant (Pmode, base, offset);
9915 addr = gen_rtx_MEM (Pmode, addr);
9916
9917 set_mem_alias_set (addr, get_frame_alias_set ());
9918
9919 /* Special-case single register. */
9920 if (first == last)
9921 {
9922 if (TARGET_64BIT)
9923 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
9924 else
9925 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
9926
9927 if (!global_not_special_regno_p (first))
9928 RTX_FRAME_RELATED_P (insn) = 1;
9929 return insn;
9930 }
9931
9932
9933 insn = gen_store_multiple (addr,
9934 gen_rtx_REG (Pmode, first),
9935 GEN_INT (last - first + 1));
9936
9937 if (first <= 6 && cfun->stdarg)
9938 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9939 {
9940 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
9941
9942 if (first + i <= 6)
9943 set_mem_alias_set (mem, get_varargs_alias_set ());
9944 }
9945
9946 /* We need to set the FRAME_RELATED flag on all SETs
9947 inside the store-multiple pattern.
9948
9949 However, we must not emit DWARF records for registers 2..5
9950 if they are stored for use by variable arguments ...
9951
9952 ??? Unfortunately, it is not enough to simply not the
9953 FRAME_RELATED flags for those SETs, because the first SET
9954 of the PARALLEL is always treated as if it had the flag
9955 set, even if it does not. Therefore we emit a new pattern
9956 without those registers as REG_FRAME_RELATED_EXPR note. */
9957
9958 if (first >= 6 && !global_not_special_regno_p (first))
9959 {
9960 rtx pat = PATTERN (insn);
9961
9962 for (i = 0; i < XVECLEN (pat, 0); i++)
9963 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
9964 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
9965 0, i)))))
9966 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
9967
9968 RTX_FRAME_RELATED_P (insn) = 1;
9969 }
9970 else if (last >= 6)
9971 {
9972 int start;
9973
9974 for (start = first >= 6 ? first : 6; start <= last; start++)
9975 if (!global_not_special_regno_p (start))
9976 break;
9977
9978 if (start > last)
9979 return insn;
9980
9981 addr = plus_constant (Pmode, base,
9982 offset + (start - first) * UNITS_PER_LONG);
9983
9984 if (start == last)
9985 {
9986 if (TARGET_64BIT)
9987 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
9988 gen_rtx_REG (Pmode, start));
9989 else
9990 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
9991 gen_rtx_REG (Pmode, start));
9992 note = PATTERN (note);
9993
9994 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
9995 RTX_FRAME_RELATED_P (insn) = 1;
9996
9997 return insn;
9998 }
9999
10000 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10001 gen_rtx_REG (Pmode, start),
10002 GEN_INT (last - start + 1));
10003 note = PATTERN (note);
10004
10005 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10006
10007 for (i = 0; i < XVECLEN (note, 0); i++)
10008 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10009 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10010 0, i)))))
10011 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10012
10013 RTX_FRAME_RELATED_P (insn) = 1;
10014 }
10015
10016 return insn;
10017 }
10018
10019 /* Generate insn to restore registers FIRST to LAST from
10020 the register save area located at offset OFFSET
10021 relative to register BASE. */
10022
10023 static rtx
10024 restore_gprs (rtx base, int offset, int first, int last)
10025 {
10026 rtx addr, insn;
10027
10028 addr = plus_constant (Pmode, base, offset);
10029 addr = gen_rtx_MEM (Pmode, addr);
10030 set_mem_alias_set (addr, get_frame_alias_set ());
10031
10032 /* Special-case single register. */
10033 if (first == last)
10034 {
10035 if (TARGET_64BIT)
10036 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10037 else
10038 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10039
10040 RTX_FRAME_RELATED_P (insn) = 1;
10041 return insn;
10042 }
10043
10044 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10045 addr,
10046 GEN_INT (last - first + 1));
10047 RTX_FRAME_RELATED_P (insn) = 1;
10048 return insn;
10049 }
10050
10051 /* Return insn sequence to load the GOT register. */
10052
10053 static GTY(()) rtx got_symbol;
10054 rtx_insn *
10055 s390_load_got (void)
10056 {
10057 rtx_insn *insns;
10058
10059 /* We cannot use pic_offset_table_rtx here since we use this
10060 function also for non-pic if __tls_get_offset is called and in
10061 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10062 aren't usable. */
10063 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10064
10065 if (!got_symbol)
10066 {
10067 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10068 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10069 }
10070
10071 start_sequence ();
10072
10073 if (TARGET_CPU_ZARCH)
10074 {
10075 emit_move_insn (got_rtx, got_symbol);
10076 }
10077 else
10078 {
10079 rtx offset;
10080
10081 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10082 UNSPEC_LTREL_OFFSET);
10083 offset = gen_rtx_CONST (Pmode, offset);
10084 offset = force_const_mem (Pmode, offset);
10085
10086 emit_move_insn (got_rtx, offset);
10087
10088 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10089 UNSPEC_LTREL_BASE);
10090 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10091
10092 emit_move_insn (got_rtx, offset);
10093 }
10094
10095 insns = get_insns ();
10096 end_sequence ();
10097 return insns;
10098 }
10099
10100 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10101 and the change to the stack pointer. */
10102
10103 static void
10104 s390_emit_stack_tie (void)
10105 {
10106 rtx mem = gen_frame_mem (BLKmode,
10107 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10108
10109 emit_insn (gen_stack_tie (mem));
10110 }
10111
10112 /* Copy GPRS into FPR save slots. */
10113
10114 static void
10115 s390_save_gprs_to_fprs (void)
10116 {
10117 int i;
10118
10119 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10120 return;
10121
10122 for (i = 6; i < 16; i++)
10123 {
10124 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10125 {
10126 rtx_insn *insn =
10127 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10128 gen_rtx_REG (DImode, i));
10129 RTX_FRAME_RELATED_P (insn) = 1;
10130 /* This prevents dwarf2cfi from interpreting the set. Doing
10131 so it might emit def_cfa_register infos setting an FPR as
10132 new CFA. */
10133 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10134 }
10135 }
10136 }
10137
10138 /* Restore GPRs from FPR save slots. */
10139
10140 static void
10141 s390_restore_gprs_from_fprs (void)
10142 {
10143 int i;
10144
10145 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10146 return;
10147
10148 for (i = 6; i < 16; i++)
10149 {
10150 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10151 {
10152 rtx_insn *insn =
10153 emit_move_insn (gen_rtx_REG (DImode, i),
10154 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
10155 df_set_regs_ever_live (i, true);
10156 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10157 if (i == STACK_POINTER_REGNUM)
10158 add_reg_note (insn, REG_CFA_DEF_CFA,
10159 plus_constant (Pmode, stack_pointer_rtx,
10160 STACK_POINTER_OFFSET));
10161 RTX_FRAME_RELATED_P (insn) = 1;
10162 }
10163 }
10164 }
10165
10166
10167 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10168 generation. */
10169
10170 namespace {
10171
10172 const pass_data pass_data_s390_early_mach =
10173 {
10174 RTL_PASS, /* type */
10175 "early_mach", /* name */
10176 OPTGROUP_NONE, /* optinfo_flags */
10177 TV_MACH_DEP, /* tv_id */
10178 0, /* properties_required */
10179 0, /* properties_provided */
10180 0, /* properties_destroyed */
10181 0, /* todo_flags_start */
10182 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10183 };
10184
10185 class pass_s390_early_mach : public rtl_opt_pass
10186 {
10187 public:
10188 pass_s390_early_mach (gcc::context *ctxt)
10189 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10190 {}
10191
10192 /* opt_pass methods: */
10193 virtual unsigned int execute (function *);
10194
10195 }; // class pass_s390_early_mach
10196
10197 unsigned int
10198 pass_s390_early_mach::execute (function *fun)
10199 {
10200 rtx_insn *insn;
10201
10202 /* Try to get rid of the FPR clobbers. */
10203 s390_optimize_nonescaping_tx ();
10204
10205 /* Re-compute register info. */
10206 s390_register_info ();
10207
10208 /* If we're using a base register, ensure that it is always valid for
10209 the first non-prologue instruction. */
10210 if (fun->machine->base_reg)
10211 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10212
10213 /* Annotate all constant pool references to let the scheduler know
10214 they implicitly use the base register. */
10215 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10216 if (INSN_P (insn))
10217 {
10218 annotate_constant_pool_refs (&PATTERN (insn));
10219 df_insn_rescan (insn);
10220 }
10221 return 0;
10222 }
10223
10224 } // anon namespace
10225
10226 /* Expand the prologue into a bunch of separate insns. */
10227
10228 void
10229 s390_emit_prologue (void)
10230 {
10231 rtx insn, addr;
10232 rtx temp_reg;
10233 int i;
10234 int offset;
10235 int next_fpr = 0;
10236
10237 /* Choose best register to use for temp use within prologue.
10238 See below for why TPF must use the register 1. */
10239
10240 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10241 && !crtl->is_leaf
10242 && !TARGET_TPF_PROFILING)
10243 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10244 else
10245 temp_reg = gen_rtx_REG (Pmode, 1);
10246
10247 s390_save_gprs_to_fprs ();
10248
10249 /* Save call saved gprs. */
10250 if (cfun_frame_layout.first_save_gpr != -1)
10251 {
10252 insn = save_gprs (stack_pointer_rtx,
10253 cfun_frame_layout.gprs_offset +
10254 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10255 - cfun_frame_layout.first_save_gpr_slot),
10256 cfun_frame_layout.first_save_gpr,
10257 cfun_frame_layout.last_save_gpr);
10258 emit_insn (insn);
10259 }
10260
10261 /* Dummy insn to mark literal pool slot. */
10262
10263 if (cfun->machine->base_reg)
10264 emit_insn (gen_main_pool (cfun->machine->base_reg));
10265
10266 offset = cfun_frame_layout.f0_offset;
10267
10268 /* Save f0 and f2. */
10269 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10270 {
10271 if (cfun_fpr_save_p (i))
10272 {
10273 save_fpr (stack_pointer_rtx, offset, i);
10274 offset += 8;
10275 }
10276 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10277 offset += 8;
10278 }
10279
10280 /* Save f4 and f6. */
10281 offset = cfun_frame_layout.f4_offset;
10282 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10283 {
10284 if (cfun_fpr_save_p (i))
10285 {
10286 insn = save_fpr (stack_pointer_rtx, offset, i);
10287 offset += 8;
10288
10289 /* If f4 and f6 are call clobbered they are saved due to
10290 stdargs and therefore are not frame related. */
10291 if (!call_really_used_regs[i])
10292 RTX_FRAME_RELATED_P (insn) = 1;
10293 }
10294 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10295 offset += 8;
10296 }
10297
10298 if (TARGET_PACKED_STACK
10299 && cfun_save_high_fprs_p
10300 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10301 {
10302 offset = (cfun_frame_layout.f8_offset
10303 + (cfun_frame_layout.high_fprs - 1) * 8);
10304
10305 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10306 if (cfun_fpr_save_p (i))
10307 {
10308 insn = save_fpr (stack_pointer_rtx, offset, i);
10309
10310 RTX_FRAME_RELATED_P (insn) = 1;
10311 offset -= 8;
10312 }
10313 if (offset >= cfun_frame_layout.f8_offset)
10314 next_fpr = i;
10315 }
10316
10317 if (!TARGET_PACKED_STACK)
10318 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10319
10320 if (flag_stack_usage_info)
10321 current_function_static_stack_size = cfun_frame_layout.frame_size;
10322
10323 /* Decrement stack pointer. */
10324
10325 if (cfun_frame_layout.frame_size > 0)
10326 {
10327 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10328 rtx real_frame_off;
10329
10330 if (s390_stack_size)
10331 {
10332 HOST_WIDE_INT stack_guard;
10333
10334 if (s390_stack_guard)
10335 stack_guard = s390_stack_guard;
10336 else
10337 {
10338 /* If no value for stack guard is provided the smallest power of 2
10339 larger than the current frame size is chosen. */
10340 stack_guard = 1;
10341 while (stack_guard < cfun_frame_layout.frame_size)
10342 stack_guard <<= 1;
10343 }
10344
10345 if (cfun_frame_layout.frame_size >= s390_stack_size)
10346 {
10347 warning (0, "frame size of function %qs is %wd"
10348 " bytes exceeding user provided stack limit of "
10349 "%d bytes. "
10350 "An unconditional trap is added.",
10351 current_function_name(), cfun_frame_layout.frame_size,
10352 s390_stack_size);
10353 emit_insn (gen_trap ());
10354 emit_barrier ();
10355 }
10356 else
10357 {
10358 /* stack_guard has to be smaller than s390_stack_size.
10359 Otherwise we would emit an AND with zero which would
10360 not match the test under mask pattern. */
10361 if (stack_guard >= s390_stack_size)
10362 {
10363 warning (0, "frame size of function %qs is %wd"
10364 " bytes which is more than half the stack size. "
10365 "The dynamic check would not be reliable. "
10366 "No check emitted for this function.",
10367 current_function_name(),
10368 cfun_frame_layout.frame_size);
10369 }
10370 else
10371 {
10372 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10373 & ~(stack_guard - 1));
10374
10375 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10376 GEN_INT (stack_check_mask));
10377 if (TARGET_64BIT)
10378 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10379 t, const0_rtx),
10380 t, const0_rtx, const0_rtx));
10381 else
10382 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10383 t, const0_rtx),
10384 t, const0_rtx, const0_rtx));
10385 }
10386 }
10387 }
10388
10389 if (s390_warn_framesize > 0
10390 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10391 warning (0, "frame size of %qs is %wd bytes",
10392 current_function_name (), cfun_frame_layout.frame_size);
10393
10394 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10395 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10396
10397 /* Save incoming stack pointer into temp reg. */
10398 if (TARGET_BACKCHAIN || next_fpr)
10399 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10400
10401 /* Subtract frame size from stack pointer. */
10402
10403 if (DISP_IN_RANGE (INTVAL (frame_off)))
10404 {
10405 insn = gen_rtx_SET (stack_pointer_rtx,
10406 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10407 frame_off));
10408 insn = emit_insn (insn);
10409 }
10410 else
10411 {
10412 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10413 frame_off = force_const_mem (Pmode, frame_off);
10414
10415 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10416 annotate_constant_pool_refs (&PATTERN (insn));
10417 }
10418
10419 RTX_FRAME_RELATED_P (insn) = 1;
10420 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10421 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10422 gen_rtx_SET (stack_pointer_rtx,
10423 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10424 real_frame_off)));
10425
10426 /* Set backchain. */
10427
10428 if (TARGET_BACKCHAIN)
10429 {
10430 if (cfun_frame_layout.backchain_offset)
10431 addr = gen_rtx_MEM (Pmode,
10432 plus_constant (Pmode, stack_pointer_rtx,
10433 cfun_frame_layout.backchain_offset));
10434 else
10435 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10436 set_mem_alias_set (addr, get_frame_alias_set ());
10437 insn = emit_insn (gen_move_insn (addr, temp_reg));
10438 }
10439
10440 /* If we support non-call exceptions (e.g. for Java),
10441 we need to make sure the backchain pointer is set up
10442 before any possibly trapping memory access. */
10443 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10444 {
10445 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10446 emit_clobber (addr);
10447 }
10448 }
10449
10450 /* Save fprs 8 - 15 (64 bit ABI). */
10451
10452 if (cfun_save_high_fprs_p && next_fpr)
10453 {
10454 /* If the stack might be accessed through a different register
10455 we have to make sure that the stack pointer decrement is not
10456 moved below the use of the stack slots. */
10457 s390_emit_stack_tie ();
10458
10459 insn = emit_insn (gen_add2_insn (temp_reg,
10460 GEN_INT (cfun_frame_layout.f8_offset)));
10461
10462 offset = 0;
10463
10464 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10465 if (cfun_fpr_save_p (i))
10466 {
10467 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10468 cfun_frame_layout.frame_size
10469 + cfun_frame_layout.f8_offset
10470 + offset);
10471
10472 insn = save_fpr (temp_reg, offset, i);
10473 offset += 8;
10474 RTX_FRAME_RELATED_P (insn) = 1;
10475 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10476 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10477 gen_rtx_REG (DFmode, i)));
10478 }
10479 }
10480
10481 /* Set frame pointer, if needed. */
10482
10483 if (frame_pointer_needed)
10484 {
10485 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10486 RTX_FRAME_RELATED_P (insn) = 1;
10487 }
10488
10489 /* Set up got pointer, if needed. */
10490
10491 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10492 {
10493 rtx_insn *insns = s390_load_got ();
10494
10495 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10496 annotate_constant_pool_refs (&PATTERN (insn));
10497
10498 emit_insn (insns);
10499 }
10500
10501 if (TARGET_TPF_PROFILING)
10502 {
10503 /* Generate a BAS instruction to serve as a function
10504 entry intercept to facilitate the use of tracing
10505 algorithms located at the branch target. */
10506 emit_insn (gen_prologue_tpf ());
10507
10508 /* Emit a blockage here so that all code
10509 lies between the profiling mechanisms. */
10510 emit_insn (gen_blockage ());
10511 }
10512 }
10513
10514 /* Expand the epilogue into a bunch of separate insns. */
10515
10516 void
10517 s390_emit_epilogue (bool sibcall)
10518 {
10519 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10520 int area_bottom, area_top, offset = 0;
10521 int next_offset;
10522 rtvec p;
10523 int i;
10524
10525 if (TARGET_TPF_PROFILING)
10526 {
10527
10528 /* Generate a BAS instruction to serve as a function
10529 entry intercept to facilitate the use of tracing
10530 algorithms located at the branch target. */
10531
10532 /* Emit a blockage here so that all code
10533 lies between the profiling mechanisms. */
10534 emit_insn (gen_blockage ());
10535
10536 emit_insn (gen_epilogue_tpf ());
10537 }
10538
10539 /* Check whether to use frame or stack pointer for restore. */
10540
10541 frame_pointer = (frame_pointer_needed
10542 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10543
10544 s390_frame_area (&area_bottom, &area_top);
10545
10546 /* Check whether we can access the register save area.
10547 If not, increment the frame pointer as required. */
10548
10549 if (area_top <= area_bottom)
10550 {
10551 /* Nothing to restore. */
10552 }
10553 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10554 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10555 {
10556 /* Area is in range. */
10557 offset = cfun_frame_layout.frame_size;
10558 }
10559 else
10560 {
10561 rtx insn, frame_off, cfa;
10562
10563 offset = area_bottom < 0 ? -area_bottom : 0;
10564 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10565
10566 cfa = gen_rtx_SET (frame_pointer,
10567 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10568 if (DISP_IN_RANGE (INTVAL (frame_off)))
10569 {
10570 insn = gen_rtx_SET (frame_pointer,
10571 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10572 insn = emit_insn (insn);
10573 }
10574 else
10575 {
10576 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10577 frame_off = force_const_mem (Pmode, frame_off);
10578
10579 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10580 annotate_constant_pool_refs (&PATTERN (insn));
10581 }
10582 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10583 RTX_FRAME_RELATED_P (insn) = 1;
10584 }
10585
10586 /* Restore call saved fprs. */
10587
10588 if (TARGET_64BIT)
10589 {
10590 if (cfun_save_high_fprs_p)
10591 {
10592 next_offset = cfun_frame_layout.f8_offset;
10593 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10594 {
10595 if (cfun_fpr_save_p (i))
10596 {
10597 restore_fpr (frame_pointer,
10598 offset + next_offset, i);
10599 cfa_restores
10600 = alloc_reg_note (REG_CFA_RESTORE,
10601 gen_rtx_REG (DFmode, i), cfa_restores);
10602 next_offset += 8;
10603 }
10604 }
10605 }
10606
10607 }
10608 else
10609 {
10610 next_offset = cfun_frame_layout.f4_offset;
10611 /* f4, f6 */
10612 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10613 {
10614 if (cfun_fpr_save_p (i))
10615 {
10616 restore_fpr (frame_pointer,
10617 offset + next_offset, i);
10618 cfa_restores
10619 = alloc_reg_note (REG_CFA_RESTORE,
10620 gen_rtx_REG (DFmode, i), cfa_restores);
10621 next_offset += 8;
10622 }
10623 else if (!TARGET_PACKED_STACK)
10624 next_offset += 8;
10625 }
10626
10627 }
10628
10629 /* Return register. */
10630
10631 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10632
10633 /* Restore call saved gprs. */
10634
10635 if (cfun_frame_layout.first_restore_gpr != -1)
10636 {
10637 rtx insn, addr;
10638 int i;
10639
10640 /* Check for global register and save them
10641 to stack location from where they get restored. */
10642
10643 for (i = cfun_frame_layout.first_restore_gpr;
10644 i <= cfun_frame_layout.last_restore_gpr;
10645 i++)
10646 {
10647 if (global_not_special_regno_p (i))
10648 {
10649 addr = plus_constant (Pmode, frame_pointer,
10650 offset + cfun_frame_layout.gprs_offset
10651 + (i - cfun_frame_layout.first_save_gpr_slot)
10652 * UNITS_PER_LONG);
10653 addr = gen_rtx_MEM (Pmode, addr);
10654 set_mem_alias_set (addr, get_frame_alias_set ());
10655 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
10656 }
10657 else
10658 cfa_restores
10659 = alloc_reg_note (REG_CFA_RESTORE,
10660 gen_rtx_REG (Pmode, i), cfa_restores);
10661 }
10662
10663 if (! sibcall)
10664 {
10665 /* Fetch return address from stack before load multiple,
10666 this will do good for scheduling.
10667
10668 Only do this if we already decided that r14 needs to be
10669 saved to a stack slot. (And not just because r14 happens to
10670 be in between two GPRs which need saving.) Otherwise it
10671 would be difficult to take that decision back in
10672 s390_optimize_prologue. */
10673 if (cfun_gpr_save_slot (RETURN_REGNUM) == -1)
10674 {
10675 int return_regnum = find_unused_clobbered_reg();
10676 if (!return_regnum)
10677 return_regnum = 4;
10678 return_reg = gen_rtx_REG (Pmode, return_regnum);
10679
10680 addr = plus_constant (Pmode, frame_pointer,
10681 offset + cfun_frame_layout.gprs_offset
10682 + (RETURN_REGNUM
10683 - cfun_frame_layout.first_save_gpr_slot)
10684 * UNITS_PER_LONG);
10685 addr = gen_rtx_MEM (Pmode, addr);
10686 set_mem_alias_set (addr, get_frame_alias_set ());
10687 emit_move_insn (return_reg, addr);
10688
10689 /* Once we did that optimization we have to make sure
10690 s390_optimize_prologue does not try to remove the
10691 store of r14 since we will not be able to find the
10692 load issued here. */
10693 cfun_frame_layout.save_return_addr_p = true;
10694 }
10695 }
10696
10697 insn = restore_gprs (frame_pointer,
10698 offset + cfun_frame_layout.gprs_offset
10699 + (cfun_frame_layout.first_restore_gpr
10700 - cfun_frame_layout.first_save_gpr_slot)
10701 * UNITS_PER_LONG,
10702 cfun_frame_layout.first_restore_gpr,
10703 cfun_frame_layout.last_restore_gpr);
10704 insn = emit_insn (insn);
10705 REG_NOTES (insn) = cfa_restores;
10706 add_reg_note (insn, REG_CFA_DEF_CFA,
10707 plus_constant (Pmode, stack_pointer_rtx,
10708 STACK_POINTER_OFFSET));
10709 RTX_FRAME_RELATED_P (insn) = 1;
10710 }
10711
10712 s390_restore_gprs_from_fprs ();
10713
10714 if (! sibcall)
10715 {
10716
10717 /* Return to caller. */
10718
10719 p = rtvec_alloc (2);
10720
10721 RTVEC_ELT (p, 0) = ret_rtx;
10722 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
10723 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
10724 }
10725 }
10726
10727 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
10728
10729 static void
10730 s300_set_up_by_prologue (hard_reg_set_container *regs)
10731 {
10732 if (cfun->machine->base_reg
10733 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10734 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
10735 }
10736
10737 /* Return true if the function can use simple_return to return outside
10738 of a shrink-wrapped region. At present shrink-wrapping is supported
10739 in all cases. */
10740
10741 bool
10742 s390_can_use_simple_return_insn (void)
10743 {
10744 return true;
10745 }
10746
10747 /* Return true if the epilogue is guaranteed to contain only a return
10748 instruction and if a direct return can therefore be used instead.
10749 One of the main advantages of using direct return instructions
10750 is that we can then use conditional returns. */
10751
10752 bool
10753 s390_can_use_return_insn (void)
10754 {
10755 int i;
10756
10757 if (!reload_completed)
10758 return false;
10759
10760 if (crtl->profile)
10761 return false;
10762
10763 if (TARGET_TPF_PROFILING)
10764 return false;
10765
10766 for (i = 0; i < 16; i++)
10767 if (cfun_gpr_save_slot (i))
10768 return false;
10769
10770 /* For 31 bit this is not covered by the frame_size check below
10771 since f4, f6 are saved in the register save area without needing
10772 additional stack space. */
10773 if (!TARGET_64BIT
10774 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
10775 return false;
10776
10777 if (cfun->machine->base_reg
10778 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10779 return false;
10780
10781 return cfun_frame_layout.frame_size == 0;
10782 }
10783
10784 /* The VX ABI differs for vararg functions. Therefore we need the
10785 prototype of the callee to be available when passing vector type
10786 values. */
10787 static const char *
10788 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
10789 {
10790 return ((TARGET_VX_ABI
10791 && typelist == 0
10792 && VECTOR_TYPE_P (TREE_TYPE (val))
10793 && (funcdecl == NULL_TREE
10794 || (TREE_CODE (funcdecl) == FUNCTION_DECL
10795 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
10796 ? N_("Vector argument passed to unprototyped function")
10797 : NULL);
10798 }
10799
10800
10801 /* Return the size in bytes of a function argument of
10802 type TYPE and/or mode MODE. At least one of TYPE or
10803 MODE must be specified. */
10804
10805 static int
10806 s390_function_arg_size (machine_mode mode, const_tree type)
10807 {
10808 if (type)
10809 return int_size_in_bytes (type);
10810
10811 /* No type info available for some library calls ... */
10812 if (mode != BLKmode)
10813 return GET_MODE_SIZE (mode);
10814
10815 /* If we have neither type nor mode, abort */
10816 gcc_unreachable ();
10817 }
10818
10819 /* Return true if a function argument of type TYPE and mode MODE
10820 is to be passed in a vector register, if available. */
10821
10822 bool
10823 s390_function_arg_vector (machine_mode mode, const_tree type)
10824 {
10825 if (!TARGET_VX_ABI)
10826 return false;
10827
10828 if (s390_function_arg_size (mode, type) > 16)
10829 return false;
10830
10831 /* No type info available for some library calls ... */
10832 if (!type)
10833 return VECTOR_MODE_P (mode);
10834
10835 /* The ABI says that record types with a single member are treated
10836 just like that member would be. */
10837 while (TREE_CODE (type) == RECORD_TYPE)
10838 {
10839 tree field, single = NULL_TREE;
10840
10841 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
10842 {
10843 if (TREE_CODE (field) != FIELD_DECL)
10844 continue;
10845
10846 if (single == NULL_TREE)
10847 single = TREE_TYPE (field);
10848 else
10849 return false;
10850 }
10851
10852 if (single == NULL_TREE)
10853 return false;
10854 else
10855 {
10856 /* If the field declaration adds extra byte due to
10857 e.g. padding this is not accepted as vector type. */
10858 if (int_size_in_bytes (single) <= 0
10859 || int_size_in_bytes (single) != int_size_in_bytes (type))
10860 return false;
10861 type = single;
10862 }
10863 }
10864
10865 return VECTOR_TYPE_P (type);
10866 }
10867
10868 /* Return true if a function argument of type TYPE and mode MODE
10869 is to be passed in a floating-point register, if available. */
10870
10871 static bool
10872 s390_function_arg_float (machine_mode mode, const_tree type)
10873 {
10874 if (s390_function_arg_size (mode, type) > 8)
10875 return false;
10876
10877 /* Soft-float changes the ABI: no floating-point registers are used. */
10878 if (TARGET_SOFT_FLOAT)
10879 return false;
10880
10881 /* No type info available for some library calls ... */
10882 if (!type)
10883 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
10884
10885 /* The ABI says that record types with a single member are treated
10886 just like that member would be. */
10887 while (TREE_CODE (type) == RECORD_TYPE)
10888 {
10889 tree field, single = NULL_TREE;
10890
10891 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
10892 {
10893 if (TREE_CODE (field) != FIELD_DECL)
10894 continue;
10895
10896 if (single == NULL_TREE)
10897 single = TREE_TYPE (field);
10898 else
10899 return false;
10900 }
10901
10902 if (single == NULL_TREE)
10903 return false;
10904 else
10905 type = single;
10906 }
10907
10908 return TREE_CODE (type) == REAL_TYPE;
10909 }
10910
10911 /* Return true if a function argument of type TYPE and mode MODE
10912 is to be passed in an integer register, or a pair of integer
10913 registers, if available. */
10914
10915 static bool
10916 s390_function_arg_integer (machine_mode mode, const_tree type)
10917 {
10918 int size = s390_function_arg_size (mode, type);
10919 if (size > 8)
10920 return false;
10921
10922 /* No type info available for some library calls ... */
10923 if (!type)
10924 return GET_MODE_CLASS (mode) == MODE_INT
10925 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
10926
10927 /* We accept small integral (and similar) types. */
10928 if (INTEGRAL_TYPE_P (type)
10929 || POINTER_TYPE_P (type)
10930 || TREE_CODE (type) == NULLPTR_TYPE
10931 || TREE_CODE (type) == OFFSET_TYPE
10932 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
10933 return true;
10934
10935 /* We also accept structs of size 1, 2, 4, 8 that are not
10936 passed in floating-point registers. */
10937 if (AGGREGATE_TYPE_P (type)
10938 && exact_log2 (size) >= 0
10939 && !s390_function_arg_float (mode, type))
10940 return true;
10941
10942 return false;
10943 }
10944
10945 /* Return 1 if a function argument of type TYPE and mode MODE
10946 is to be passed by reference. The ABI specifies that only
10947 structures of size 1, 2, 4, or 8 bytes are passed by value,
10948 all other structures (and complex numbers) are passed by
10949 reference. */
10950
10951 static bool
10952 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
10953 machine_mode mode, const_tree type,
10954 bool named ATTRIBUTE_UNUSED)
10955 {
10956 int size = s390_function_arg_size (mode, type);
10957
10958 if (s390_function_arg_vector (mode, type))
10959 return false;
10960
10961 if (size > 8)
10962 return true;
10963
10964 if (type)
10965 {
10966 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
10967 return true;
10968
10969 if (TREE_CODE (type) == COMPLEX_TYPE
10970 || TREE_CODE (type) == VECTOR_TYPE)
10971 return true;
10972 }
10973
10974 return false;
10975 }
10976
10977 /* Update the data in CUM to advance over an argument of mode MODE and
10978 data type TYPE. (TYPE is null for libcalls where that information
10979 may not be available.). The boolean NAMED specifies whether the
10980 argument is a named argument (as opposed to an unnamed argument
10981 matching an ellipsis). */
10982
10983 static void
10984 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
10985 const_tree type, bool named)
10986 {
10987 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10988
10989 if (s390_function_arg_vector (mode, type))
10990 {
10991 /* We are called for unnamed vector stdarg arguments which are
10992 passed on the stack. In this case this hook does not have to
10993 do anything since stack arguments are tracked by common
10994 code. */
10995 if (!named)
10996 return;
10997 cum->vrs += 1;
10998 }
10999 else if (s390_function_arg_float (mode, type))
11000 {
11001 cum->fprs += 1;
11002 }
11003 else if (s390_function_arg_integer (mode, type))
11004 {
11005 int size = s390_function_arg_size (mode, type);
11006 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11007 }
11008 else
11009 gcc_unreachable ();
11010 }
11011
11012 /* Define where to put the arguments to a function.
11013 Value is zero to push the argument on the stack,
11014 or a hard register in which to store the argument.
11015
11016 MODE is the argument's machine mode.
11017 TYPE is the data type of the argument (as a tree).
11018 This is null for libcalls where that information may
11019 not be available.
11020 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11021 the preceding args and about the function being called.
11022 NAMED is nonzero if this argument is a named parameter
11023 (otherwise it is an extra parameter matching an ellipsis).
11024
11025 On S/390, we use general purpose registers 2 through 6 to
11026 pass integer, pointer, and certain structure arguments, and
11027 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11028 to pass floating point arguments. All remaining arguments
11029 are pushed to the stack. */
11030
11031 static rtx
11032 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11033 const_tree type, bool named)
11034 {
11035 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11036
11037 if (!named)
11038 s390_check_type_for_vector_abi (type, true, false);
11039
11040 if (s390_function_arg_vector (mode, type))
11041 {
11042 /* Vector arguments being part of the ellipsis are passed on the
11043 stack. */
11044 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11045 return NULL_RTX;
11046
11047 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11048 }
11049 else if (s390_function_arg_float (mode, type))
11050 {
11051 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11052 return NULL_RTX;
11053 else
11054 return gen_rtx_REG (mode, cum->fprs + 16);
11055 }
11056 else if (s390_function_arg_integer (mode, type))
11057 {
11058 int size = s390_function_arg_size (mode, type);
11059 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11060
11061 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11062 return NULL_RTX;
11063 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11064 return gen_rtx_REG (mode, cum->gprs + 2);
11065 else if (n_gprs == 2)
11066 {
11067 rtvec p = rtvec_alloc (2);
11068
11069 RTVEC_ELT (p, 0)
11070 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11071 const0_rtx);
11072 RTVEC_ELT (p, 1)
11073 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11074 GEN_INT (4));
11075
11076 return gen_rtx_PARALLEL (mode, p);
11077 }
11078 }
11079
11080 /* After the real arguments, expand_call calls us once again
11081 with a void_type_node type. Whatever we return here is
11082 passed as operand 2 to the call expanders.
11083
11084 We don't need this feature ... */
11085 else if (type == void_type_node)
11086 return const0_rtx;
11087
11088 gcc_unreachable ();
11089 }
11090
11091 /* Return true if return values of type TYPE should be returned
11092 in a memory buffer whose address is passed by the caller as
11093 hidden first argument. */
11094
11095 static bool
11096 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11097 {
11098 /* We accept small integral (and similar) types. */
11099 if (INTEGRAL_TYPE_P (type)
11100 || POINTER_TYPE_P (type)
11101 || TREE_CODE (type) == OFFSET_TYPE
11102 || TREE_CODE (type) == REAL_TYPE)
11103 return int_size_in_bytes (type) > 8;
11104
11105 /* vector types which fit into a VR. */
11106 if (TARGET_VX_ABI
11107 && VECTOR_TYPE_P (type)
11108 && int_size_in_bytes (type) <= 16)
11109 return false;
11110
11111 /* Aggregates and similar constructs are always returned
11112 in memory. */
11113 if (AGGREGATE_TYPE_P (type)
11114 || TREE_CODE (type) == COMPLEX_TYPE
11115 || VECTOR_TYPE_P (type))
11116 return true;
11117
11118 /* ??? We get called on all sorts of random stuff from
11119 aggregate_value_p. We can't abort, but it's not clear
11120 what's safe to return. Pretend it's a struct I guess. */
11121 return true;
11122 }
11123
11124 /* Function arguments and return values are promoted to word size. */
11125
11126 static machine_mode
11127 s390_promote_function_mode (const_tree type, machine_mode mode,
11128 int *punsignedp,
11129 const_tree fntype ATTRIBUTE_UNUSED,
11130 int for_return ATTRIBUTE_UNUSED)
11131 {
11132 if (INTEGRAL_MODE_P (mode)
11133 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11134 {
11135 if (type != NULL_TREE && POINTER_TYPE_P (type))
11136 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11137 return Pmode;
11138 }
11139
11140 return mode;
11141 }
11142
11143 /* Define where to return a (scalar) value of type RET_TYPE.
11144 If RET_TYPE is null, define where to return a (scalar)
11145 value of mode MODE from a libcall. */
11146
11147 static rtx
11148 s390_function_and_libcall_value (machine_mode mode,
11149 const_tree ret_type,
11150 const_tree fntype_or_decl,
11151 bool outgoing ATTRIBUTE_UNUSED)
11152 {
11153 /* For vector return types it is important to use the RET_TYPE
11154 argument whenever available since the middle-end might have
11155 changed the mode to a scalar mode. */
11156 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11157 || (!ret_type && VECTOR_MODE_P (mode)));
11158
11159 /* For normal functions perform the promotion as
11160 promote_function_mode would do. */
11161 if (ret_type)
11162 {
11163 int unsignedp = TYPE_UNSIGNED (ret_type);
11164 mode = promote_function_mode (ret_type, mode, &unsignedp,
11165 fntype_or_decl, 1);
11166 }
11167
11168 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11169 || SCALAR_FLOAT_MODE_P (mode)
11170 || (TARGET_VX_ABI && vector_ret_type_p));
11171 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11172
11173 if (TARGET_VX_ABI && vector_ret_type_p)
11174 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11175 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11176 return gen_rtx_REG (mode, 16);
11177 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11178 || UNITS_PER_LONG == UNITS_PER_WORD)
11179 return gen_rtx_REG (mode, 2);
11180 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11181 {
11182 /* This case is triggered when returning a 64 bit value with
11183 -m31 -mzarch. Although the value would fit into a single
11184 register it has to be forced into a 32 bit register pair in
11185 order to match the ABI. */
11186 rtvec p = rtvec_alloc (2);
11187
11188 RTVEC_ELT (p, 0)
11189 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11190 RTVEC_ELT (p, 1)
11191 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11192
11193 return gen_rtx_PARALLEL (mode, p);
11194 }
11195
11196 gcc_unreachable ();
11197 }
11198
11199 /* Define where to return a scalar return value of type RET_TYPE. */
11200
11201 static rtx
11202 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11203 bool outgoing)
11204 {
11205 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11206 fn_decl_or_type, outgoing);
11207 }
11208
11209 /* Define where to return a scalar libcall return value of mode
11210 MODE. */
11211
11212 static rtx
11213 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11214 {
11215 return s390_function_and_libcall_value (mode, NULL_TREE,
11216 NULL_TREE, true);
11217 }
11218
11219
11220 /* Create and return the va_list datatype.
11221
11222 On S/390, va_list is an array type equivalent to
11223
11224 typedef struct __va_list_tag
11225 {
11226 long __gpr;
11227 long __fpr;
11228 void *__overflow_arg_area;
11229 void *__reg_save_area;
11230 } va_list[1];
11231
11232 where __gpr and __fpr hold the number of general purpose
11233 or floating point arguments used up to now, respectively,
11234 __overflow_arg_area points to the stack location of the
11235 next argument passed on the stack, and __reg_save_area
11236 always points to the start of the register area in the
11237 call frame of the current function. The function prologue
11238 saves all registers used for argument passing into this
11239 area if the function uses variable arguments. */
11240
11241 static tree
11242 s390_build_builtin_va_list (void)
11243 {
11244 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11245
11246 record = lang_hooks.types.make_type (RECORD_TYPE);
11247
11248 type_decl =
11249 build_decl (BUILTINS_LOCATION,
11250 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11251
11252 f_gpr = build_decl (BUILTINS_LOCATION,
11253 FIELD_DECL, get_identifier ("__gpr"),
11254 long_integer_type_node);
11255 f_fpr = build_decl (BUILTINS_LOCATION,
11256 FIELD_DECL, get_identifier ("__fpr"),
11257 long_integer_type_node);
11258 f_ovf = build_decl (BUILTINS_LOCATION,
11259 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11260 ptr_type_node);
11261 f_sav = build_decl (BUILTINS_LOCATION,
11262 FIELD_DECL, get_identifier ("__reg_save_area"),
11263 ptr_type_node);
11264
11265 va_list_gpr_counter_field = f_gpr;
11266 va_list_fpr_counter_field = f_fpr;
11267
11268 DECL_FIELD_CONTEXT (f_gpr) = record;
11269 DECL_FIELD_CONTEXT (f_fpr) = record;
11270 DECL_FIELD_CONTEXT (f_ovf) = record;
11271 DECL_FIELD_CONTEXT (f_sav) = record;
11272
11273 TYPE_STUB_DECL (record) = type_decl;
11274 TYPE_NAME (record) = type_decl;
11275 TYPE_FIELDS (record) = f_gpr;
11276 DECL_CHAIN (f_gpr) = f_fpr;
11277 DECL_CHAIN (f_fpr) = f_ovf;
11278 DECL_CHAIN (f_ovf) = f_sav;
11279
11280 layout_type (record);
11281
11282 /* The correct type is an array type of one element. */
11283 return build_array_type (record, build_index_type (size_zero_node));
11284 }
11285
11286 /* Implement va_start by filling the va_list structure VALIST.
11287 STDARG_P is always true, and ignored.
11288 NEXTARG points to the first anonymous stack argument.
11289
11290 The following global variables are used to initialize
11291 the va_list structure:
11292
11293 crtl->args.info:
11294 holds number of gprs and fprs used for named arguments.
11295 crtl->args.arg_offset_rtx:
11296 holds the offset of the first anonymous stack argument
11297 (relative to the virtual arg pointer). */
11298
11299 static void
11300 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11301 {
11302 HOST_WIDE_INT n_gpr, n_fpr;
11303 int off;
11304 tree f_gpr, f_fpr, f_ovf, f_sav;
11305 tree gpr, fpr, ovf, sav, t;
11306
11307 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11308 f_fpr = DECL_CHAIN (f_gpr);
11309 f_ovf = DECL_CHAIN (f_fpr);
11310 f_sav = DECL_CHAIN (f_ovf);
11311
11312 valist = build_simple_mem_ref (valist);
11313 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11314 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11315 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11316 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11317
11318 /* Count number of gp and fp argument registers used. */
11319
11320 n_gpr = crtl->args.info.gprs;
11321 n_fpr = crtl->args.info.fprs;
11322
11323 if (cfun->va_list_gpr_size)
11324 {
11325 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11326 build_int_cst (NULL_TREE, n_gpr));
11327 TREE_SIDE_EFFECTS (t) = 1;
11328 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11329 }
11330
11331 if (cfun->va_list_fpr_size)
11332 {
11333 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11334 build_int_cst (NULL_TREE, n_fpr));
11335 TREE_SIDE_EFFECTS (t) = 1;
11336 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11337 }
11338
11339 /* Find the overflow area.
11340 FIXME: This currently is too pessimistic when the vector ABI is
11341 enabled. In that case we *always* set up the overflow area
11342 pointer. */
11343 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11344 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11345 || TARGET_VX_ABI)
11346 {
11347 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11348
11349 off = INTVAL (crtl->args.arg_offset_rtx);
11350 off = off < 0 ? 0 : off;
11351 if (TARGET_DEBUG_ARG)
11352 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11353 (int)n_gpr, (int)n_fpr, off);
11354
11355 t = fold_build_pointer_plus_hwi (t, off);
11356
11357 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11358 TREE_SIDE_EFFECTS (t) = 1;
11359 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11360 }
11361
11362 /* Find the register save area. */
11363 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11364 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11365 {
11366 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11367 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11368
11369 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11370 TREE_SIDE_EFFECTS (t) = 1;
11371 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11372 }
11373 }
11374
11375 /* Implement va_arg by updating the va_list structure
11376 VALIST as required to retrieve an argument of type
11377 TYPE, and returning that argument.
11378
11379 Generates code equivalent to:
11380
11381 if (integral value) {
11382 if (size <= 4 && args.gpr < 5 ||
11383 size > 4 && args.gpr < 4 )
11384 ret = args.reg_save_area[args.gpr+8]
11385 else
11386 ret = *args.overflow_arg_area++;
11387 } else if (vector value) {
11388 ret = *args.overflow_arg_area;
11389 args.overflow_arg_area += size / 8;
11390 } else if (float value) {
11391 if (args.fgpr < 2)
11392 ret = args.reg_save_area[args.fpr+64]
11393 else
11394 ret = *args.overflow_arg_area++;
11395 } else if (aggregate value) {
11396 if (args.gpr < 5)
11397 ret = *args.reg_save_area[args.gpr]
11398 else
11399 ret = **args.overflow_arg_area++;
11400 } */
11401
11402 static tree
11403 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11404 gimple_seq *post_p ATTRIBUTE_UNUSED)
11405 {
11406 tree f_gpr, f_fpr, f_ovf, f_sav;
11407 tree gpr, fpr, ovf, sav, reg, t, u;
11408 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11409 tree lab_false, lab_over;
11410 tree addr = create_tmp_var (ptr_type_node, "addr");
11411 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11412 a stack slot. */
11413
11414 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11415 f_fpr = DECL_CHAIN (f_gpr);
11416 f_ovf = DECL_CHAIN (f_fpr);
11417 f_sav = DECL_CHAIN (f_ovf);
11418
11419 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11420 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11421 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11422
11423 /* The tree for args* cannot be shared between gpr/fpr and ovf since
11424 both appear on a lhs. */
11425 valist = unshare_expr (valist);
11426 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11427
11428 size = int_size_in_bytes (type);
11429
11430 s390_check_type_for_vector_abi (type, true, false);
11431
11432 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11433 {
11434 if (TARGET_DEBUG_ARG)
11435 {
11436 fprintf (stderr, "va_arg: aggregate type");
11437 debug_tree (type);
11438 }
11439
11440 /* Aggregates are passed by reference. */
11441 indirect_p = 1;
11442 reg = gpr;
11443 n_reg = 1;
11444
11445 /* kernel stack layout on 31 bit: It is assumed here that no padding
11446 will be added by s390_frame_info because for va_args always an even
11447 number of gprs has to be saved r15-r2 = 14 regs. */
11448 sav_ofs = 2 * UNITS_PER_LONG;
11449 sav_scale = UNITS_PER_LONG;
11450 size = UNITS_PER_LONG;
11451 max_reg = GP_ARG_NUM_REG - n_reg;
11452 left_align_p = false;
11453 }
11454 else if (s390_function_arg_vector (TYPE_MODE (type), type))
11455 {
11456 if (TARGET_DEBUG_ARG)
11457 {
11458 fprintf (stderr, "va_arg: vector type");
11459 debug_tree (type);
11460 }
11461
11462 indirect_p = 0;
11463 reg = NULL_TREE;
11464 n_reg = 0;
11465 sav_ofs = 0;
11466 sav_scale = 8;
11467 max_reg = 0;
11468 left_align_p = true;
11469 }
11470 else if (s390_function_arg_float (TYPE_MODE (type), type))
11471 {
11472 if (TARGET_DEBUG_ARG)
11473 {
11474 fprintf (stderr, "va_arg: float type");
11475 debug_tree (type);
11476 }
11477
11478 /* FP args go in FP registers, if present. */
11479 indirect_p = 0;
11480 reg = fpr;
11481 n_reg = 1;
11482 sav_ofs = 16 * UNITS_PER_LONG;
11483 sav_scale = 8;
11484 max_reg = FP_ARG_NUM_REG - n_reg;
11485 left_align_p = false;
11486 }
11487 else
11488 {
11489 if (TARGET_DEBUG_ARG)
11490 {
11491 fprintf (stderr, "va_arg: other type");
11492 debug_tree (type);
11493 }
11494
11495 /* Otherwise into GP registers. */
11496 indirect_p = 0;
11497 reg = gpr;
11498 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11499
11500 /* kernel stack layout on 31 bit: It is assumed here that no padding
11501 will be added by s390_frame_info because for va_args always an even
11502 number of gprs has to be saved r15-r2 = 14 regs. */
11503 sav_ofs = 2 * UNITS_PER_LONG;
11504
11505 if (size < UNITS_PER_LONG)
11506 sav_ofs += UNITS_PER_LONG - size;
11507
11508 sav_scale = UNITS_PER_LONG;
11509 max_reg = GP_ARG_NUM_REG - n_reg;
11510 left_align_p = false;
11511 }
11512
11513 /* Pull the value out of the saved registers ... */
11514
11515 if (reg != NULL_TREE)
11516 {
11517 /*
11518 if (reg > ((typeof (reg))max_reg))
11519 goto lab_false;
11520
11521 addr = sav + sav_ofs + reg * save_scale;
11522
11523 goto lab_over;
11524
11525 lab_false:
11526 */
11527
11528 lab_false = create_artificial_label (UNKNOWN_LOCATION);
11529 lab_over = create_artificial_label (UNKNOWN_LOCATION);
11530
11531 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
11532 t = build2 (GT_EXPR, boolean_type_node, reg, t);
11533 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11534 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11535 gimplify_and_add (t, pre_p);
11536
11537 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11538 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
11539 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
11540 t = fold_build_pointer_plus (t, u);
11541
11542 gimplify_assign (addr, t, pre_p);
11543
11544 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11545
11546 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
11547 }
11548
11549 /* ... Otherwise out of the overflow area. */
11550
11551 t = ovf;
11552 if (size < UNITS_PER_LONG && !left_align_p)
11553 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
11554
11555 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11556
11557 gimplify_assign (addr, t, pre_p);
11558
11559 if (size < UNITS_PER_LONG && left_align_p)
11560 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
11561 else
11562 t = fold_build_pointer_plus_hwi (t, size);
11563
11564 gimplify_assign (ovf, t, pre_p);
11565
11566 if (reg != NULL_TREE)
11567 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
11568
11569
11570 /* Increment register save count. */
11571
11572 if (n_reg > 0)
11573 {
11574 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
11575 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
11576 gimplify_and_add (u, pre_p);
11577 }
11578
11579 if (indirect_p)
11580 {
11581 t = build_pointer_type_for_mode (build_pointer_type (type),
11582 ptr_mode, true);
11583 addr = fold_convert (t, addr);
11584 addr = build_va_arg_indirect_ref (addr);
11585 }
11586 else
11587 {
11588 t = build_pointer_type_for_mode (type, ptr_mode, true);
11589 addr = fold_convert (t, addr);
11590 }
11591
11592 return build_va_arg_indirect_ref (addr);
11593 }
11594
11595 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
11596 expanders.
11597 DEST - Register location where CC will be stored.
11598 TDB - Pointer to a 256 byte area where to store the transaction.
11599 diagnostic block. NULL if TDB is not needed.
11600 RETRY - Retry count value. If non-NULL a retry loop for CC2
11601 is emitted
11602 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
11603 of the tbegin instruction pattern. */
11604
11605 void
11606 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
11607 {
11608 rtx retry_plus_two = gen_reg_rtx (SImode);
11609 rtx retry_reg = gen_reg_rtx (SImode);
11610 rtx_code_label *retry_label = NULL;
11611
11612 if (retry != NULL_RTX)
11613 {
11614 emit_move_insn (retry_reg, retry);
11615 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
11616 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
11617 retry_label = gen_label_rtx ();
11618 emit_label (retry_label);
11619 }
11620
11621 if (clobber_fprs_p)
11622 {
11623 if (TARGET_VX)
11624 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11625 tdb));
11626 else
11627 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11628 tdb));
11629 }
11630 else
11631 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11632 tdb));
11633
11634 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
11635 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
11636 CC_REGNUM)),
11637 UNSPEC_CC_TO_INT));
11638 if (retry != NULL_RTX)
11639 {
11640 const int CC0 = 1 << 3;
11641 const int CC1 = 1 << 2;
11642 const int CC3 = 1 << 0;
11643 rtx jump;
11644 rtx count = gen_reg_rtx (SImode);
11645 rtx_code_label *leave_label = gen_label_rtx ();
11646
11647 /* Exit for success and permanent failures. */
11648 jump = s390_emit_jump (leave_label,
11649 gen_rtx_EQ (VOIDmode,
11650 gen_rtx_REG (CCRAWmode, CC_REGNUM),
11651 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
11652 LABEL_NUSES (leave_label) = 1;
11653
11654 /* CC2 - transient failure. Perform retry with ppa. */
11655 emit_move_insn (count, retry_plus_two);
11656 emit_insn (gen_subsi3 (count, count, retry_reg));
11657 emit_insn (gen_tx_assist (count));
11658 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
11659 retry_reg,
11660 retry_reg));
11661 JUMP_LABEL (jump) = retry_label;
11662 LABEL_NUSES (retry_label) = 1;
11663 emit_label (leave_label);
11664 }
11665 }
11666
11667
11668 /* Return the decl for the target specific builtin with the function
11669 code FCODE. */
11670
11671 static tree
11672 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
11673 {
11674 if (fcode >= S390_BUILTIN_MAX)
11675 return error_mark_node;
11676
11677 return s390_builtin_decls[fcode];
11678 }
11679
11680 /* We call mcount before the function prologue. So a profiled leaf
11681 function should stay a leaf function. */
11682
11683 static bool
11684 s390_keep_leaf_when_profiled ()
11685 {
11686 return true;
11687 }
11688
11689 /* Output assembly code for the trampoline template to
11690 stdio stream FILE.
11691
11692 On S/390, we use gpr 1 internally in the trampoline code;
11693 gpr 0 is used to hold the static chain. */
11694
11695 static void
11696 s390_asm_trampoline_template (FILE *file)
11697 {
11698 rtx op[2];
11699 op[0] = gen_rtx_REG (Pmode, 0);
11700 op[1] = gen_rtx_REG (Pmode, 1);
11701
11702 if (TARGET_64BIT)
11703 {
11704 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11705 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
11706 output_asm_insn ("br\t%1", op); /* 2 byte */
11707 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
11708 }
11709 else
11710 {
11711 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11712 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
11713 output_asm_insn ("br\t%1", op); /* 2 byte */
11714 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
11715 }
11716 }
11717
11718 /* Emit RTL insns to initialize the variable parts of a trampoline.
11719 FNADDR is an RTX for the address of the function's pure code.
11720 CXT is an RTX for the static chain value for the function. */
11721
11722 static void
11723 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
11724 {
11725 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
11726 rtx mem;
11727
11728 emit_block_move (m_tramp, assemble_trampoline_template (),
11729 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
11730
11731 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
11732 emit_move_insn (mem, cxt);
11733 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
11734 emit_move_insn (mem, fnaddr);
11735 }
11736
11737 /* Output assembler code to FILE to increment profiler label # LABELNO
11738 for profiling a function entry. */
11739
11740 void
11741 s390_function_profiler (FILE *file, int labelno)
11742 {
11743 rtx op[7];
11744
11745 char label[128];
11746 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
11747
11748 fprintf (file, "# function profiler \n");
11749
11750 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
11751 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
11752 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
11753
11754 op[2] = gen_rtx_REG (Pmode, 1);
11755 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
11756 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
11757
11758 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
11759 if (flag_pic)
11760 {
11761 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
11762 op[4] = gen_rtx_CONST (Pmode, op[4]);
11763 }
11764
11765 if (TARGET_64BIT)
11766 {
11767 output_asm_insn ("stg\t%0,%1", op);
11768 output_asm_insn ("larl\t%2,%3", op);
11769 output_asm_insn ("brasl\t%0,%4", op);
11770 output_asm_insn ("lg\t%0,%1", op);
11771 }
11772 else if (!flag_pic)
11773 {
11774 op[6] = gen_label_rtx ();
11775
11776 output_asm_insn ("st\t%0,%1", op);
11777 output_asm_insn ("bras\t%2,%l6", op);
11778 output_asm_insn (".long\t%4", op);
11779 output_asm_insn (".long\t%3", op);
11780 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11781 output_asm_insn ("l\t%0,0(%2)", op);
11782 output_asm_insn ("l\t%2,4(%2)", op);
11783 output_asm_insn ("basr\t%0,%0", op);
11784 output_asm_insn ("l\t%0,%1", op);
11785 }
11786 else
11787 {
11788 op[5] = gen_label_rtx ();
11789 op[6] = gen_label_rtx ();
11790
11791 output_asm_insn ("st\t%0,%1", op);
11792 output_asm_insn ("bras\t%2,%l6", op);
11793 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
11794 output_asm_insn (".long\t%4-%l5", op);
11795 output_asm_insn (".long\t%3-%l5", op);
11796 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11797 output_asm_insn ("lr\t%0,%2", op);
11798 output_asm_insn ("a\t%0,0(%2)", op);
11799 output_asm_insn ("a\t%2,4(%2)", op);
11800 output_asm_insn ("basr\t%0,%0", op);
11801 output_asm_insn ("l\t%0,%1", op);
11802 }
11803 }
11804
11805 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
11806 into its SYMBOL_REF_FLAGS. */
11807
11808 static void
11809 s390_encode_section_info (tree decl, rtx rtl, int first)
11810 {
11811 default_encode_section_info (decl, rtl, first);
11812
11813 if (TREE_CODE (decl) == VAR_DECL)
11814 {
11815 /* If a variable has a forced alignment to < 2 bytes, mark it
11816 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
11817 operand. */
11818 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
11819 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
11820 if (!DECL_SIZE (decl)
11821 || !DECL_ALIGN (decl)
11822 || !tree_fits_shwi_p (DECL_SIZE (decl))
11823 || (DECL_ALIGN (decl) <= 64
11824 && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl))))
11825 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
11826 }
11827
11828 /* Literal pool references don't have a decl so they are handled
11829 differently here. We rely on the information in the MEM_ALIGN
11830 entry to decide upon natural alignment. */
11831 if (MEM_P (rtl)
11832 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
11833 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
11834 && (MEM_ALIGN (rtl) == 0
11835 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
11836 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
11837 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
11838 }
11839
11840 /* Output thunk to FILE that implements a C++ virtual function call (with
11841 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
11842 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
11843 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
11844 relative to the resulting this pointer. */
11845
11846 static void
11847 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
11848 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11849 tree function)
11850 {
11851 rtx op[10];
11852 int nonlocal = 0;
11853
11854 /* Make sure unwind info is emitted for the thunk if needed. */
11855 final_start_function (emit_barrier (), file, 1);
11856
11857 /* Operand 0 is the target function. */
11858 op[0] = XEXP (DECL_RTL (function), 0);
11859 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
11860 {
11861 nonlocal = 1;
11862 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
11863 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
11864 op[0] = gen_rtx_CONST (Pmode, op[0]);
11865 }
11866
11867 /* Operand 1 is the 'this' pointer. */
11868 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11869 op[1] = gen_rtx_REG (Pmode, 3);
11870 else
11871 op[1] = gen_rtx_REG (Pmode, 2);
11872
11873 /* Operand 2 is the delta. */
11874 op[2] = GEN_INT (delta);
11875
11876 /* Operand 3 is the vcall_offset. */
11877 op[3] = GEN_INT (vcall_offset);
11878
11879 /* Operand 4 is the temporary register. */
11880 op[4] = gen_rtx_REG (Pmode, 1);
11881
11882 /* Operands 5 to 8 can be used as labels. */
11883 op[5] = NULL_RTX;
11884 op[6] = NULL_RTX;
11885 op[7] = NULL_RTX;
11886 op[8] = NULL_RTX;
11887
11888 /* Operand 9 can be used for temporary register. */
11889 op[9] = NULL_RTX;
11890
11891 /* Generate code. */
11892 if (TARGET_64BIT)
11893 {
11894 /* Setup literal pool pointer if required. */
11895 if ((!DISP_IN_RANGE (delta)
11896 && !CONST_OK_FOR_K (delta)
11897 && !CONST_OK_FOR_Os (delta))
11898 || (!DISP_IN_RANGE (vcall_offset)
11899 && !CONST_OK_FOR_K (vcall_offset)
11900 && !CONST_OK_FOR_Os (vcall_offset)))
11901 {
11902 op[5] = gen_label_rtx ();
11903 output_asm_insn ("larl\t%4,%5", op);
11904 }
11905
11906 /* Add DELTA to this pointer. */
11907 if (delta)
11908 {
11909 if (CONST_OK_FOR_J (delta))
11910 output_asm_insn ("la\t%1,%2(%1)", op);
11911 else if (DISP_IN_RANGE (delta))
11912 output_asm_insn ("lay\t%1,%2(%1)", op);
11913 else if (CONST_OK_FOR_K (delta))
11914 output_asm_insn ("aghi\t%1,%2", op);
11915 else if (CONST_OK_FOR_Os (delta))
11916 output_asm_insn ("agfi\t%1,%2", op);
11917 else
11918 {
11919 op[6] = gen_label_rtx ();
11920 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
11921 }
11922 }
11923
11924 /* Perform vcall adjustment. */
11925 if (vcall_offset)
11926 {
11927 if (DISP_IN_RANGE (vcall_offset))
11928 {
11929 output_asm_insn ("lg\t%4,0(%1)", op);
11930 output_asm_insn ("ag\t%1,%3(%4)", op);
11931 }
11932 else if (CONST_OK_FOR_K (vcall_offset))
11933 {
11934 output_asm_insn ("lghi\t%4,%3", op);
11935 output_asm_insn ("ag\t%4,0(%1)", op);
11936 output_asm_insn ("ag\t%1,0(%4)", op);
11937 }
11938 else if (CONST_OK_FOR_Os (vcall_offset))
11939 {
11940 output_asm_insn ("lgfi\t%4,%3", op);
11941 output_asm_insn ("ag\t%4,0(%1)", op);
11942 output_asm_insn ("ag\t%1,0(%4)", op);
11943 }
11944 else
11945 {
11946 op[7] = gen_label_rtx ();
11947 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
11948 output_asm_insn ("ag\t%4,0(%1)", op);
11949 output_asm_insn ("ag\t%1,0(%4)", op);
11950 }
11951 }
11952
11953 /* Jump to target. */
11954 output_asm_insn ("jg\t%0", op);
11955
11956 /* Output literal pool if required. */
11957 if (op[5])
11958 {
11959 output_asm_insn (".align\t4", op);
11960 targetm.asm_out.internal_label (file, "L",
11961 CODE_LABEL_NUMBER (op[5]));
11962 }
11963 if (op[6])
11964 {
11965 targetm.asm_out.internal_label (file, "L",
11966 CODE_LABEL_NUMBER (op[6]));
11967 output_asm_insn (".long\t%2", op);
11968 }
11969 if (op[7])
11970 {
11971 targetm.asm_out.internal_label (file, "L",
11972 CODE_LABEL_NUMBER (op[7]));
11973 output_asm_insn (".long\t%3", op);
11974 }
11975 }
11976 else
11977 {
11978 /* Setup base pointer if required. */
11979 if (!vcall_offset
11980 || (!DISP_IN_RANGE (delta)
11981 && !CONST_OK_FOR_K (delta)
11982 && !CONST_OK_FOR_Os (delta))
11983 || (!DISP_IN_RANGE (delta)
11984 && !CONST_OK_FOR_K (vcall_offset)
11985 && !CONST_OK_FOR_Os (vcall_offset)))
11986 {
11987 op[5] = gen_label_rtx ();
11988 output_asm_insn ("basr\t%4,0", op);
11989 targetm.asm_out.internal_label (file, "L",
11990 CODE_LABEL_NUMBER (op[5]));
11991 }
11992
11993 /* Add DELTA to this pointer. */
11994 if (delta)
11995 {
11996 if (CONST_OK_FOR_J (delta))
11997 output_asm_insn ("la\t%1,%2(%1)", op);
11998 else if (DISP_IN_RANGE (delta))
11999 output_asm_insn ("lay\t%1,%2(%1)", op);
12000 else if (CONST_OK_FOR_K (delta))
12001 output_asm_insn ("ahi\t%1,%2", op);
12002 else if (CONST_OK_FOR_Os (delta))
12003 output_asm_insn ("afi\t%1,%2", op);
12004 else
12005 {
12006 op[6] = gen_label_rtx ();
12007 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12008 }
12009 }
12010
12011 /* Perform vcall adjustment. */
12012 if (vcall_offset)
12013 {
12014 if (CONST_OK_FOR_J (vcall_offset))
12015 {
12016 output_asm_insn ("l\t%4,0(%1)", op);
12017 output_asm_insn ("a\t%1,%3(%4)", op);
12018 }
12019 else if (DISP_IN_RANGE (vcall_offset))
12020 {
12021 output_asm_insn ("l\t%4,0(%1)", op);
12022 output_asm_insn ("ay\t%1,%3(%4)", op);
12023 }
12024 else if (CONST_OK_FOR_K (vcall_offset))
12025 {
12026 output_asm_insn ("lhi\t%4,%3", op);
12027 output_asm_insn ("a\t%4,0(%1)", op);
12028 output_asm_insn ("a\t%1,0(%4)", op);
12029 }
12030 else if (CONST_OK_FOR_Os (vcall_offset))
12031 {
12032 output_asm_insn ("iilf\t%4,%3", op);
12033 output_asm_insn ("a\t%4,0(%1)", op);
12034 output_asm_insn ("a\t%1,0(%4)", op);
12035 }
12036 else
12037 {
12038 op[7] = gen_label_rtx ();
12039 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12040 output_asm_insn ("a\t%4,0(%1)", op);
12041 output_asm_insn ("a\t%1,0(%4)", op);
12042 }
12043
12044 /* We had to clobber the base pointer register.
12045 Re-setup the base pointer (with a different base). */
12046 op[5] = gen_label_rtx ();
12047 output_asm_insn ("basr\t%4,0", op);
12048 targetm.asm_out.internal_label (file, "L",
12049 CODE_LABEL_NUMBER (op[5]));
12050 }
12051
12052 /* Jump to target. */
12053 op[8] = gen_label_rtx ();
12054
12055 if (!flag_pic)
12056 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12057 else if (!nonlocal)
12058 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12059 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12060 else if (flag_pic == 1)
12061 {
12062 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12063 output_asm_insn ("l\t%4,%0(%4)", op);
12064 }
12065 else if (flag_pic == 2)
12066 {
12067 op[9] = gen_rtx_REG (Pmode, 0);
12068 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12069 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12070 output_asm_insn ("ar\t%4,%9", op);
12071 output_asm_insn ("l\t%4,0(%4)", op);
12072 }
12073
12074 output_asm_insn ("br\t%4", op);
12075
12076 /* Output literal pool. */
12077 output_asm_insn (".align\t4", op);
12078
12079 if (nonlocal && flag_pic == 2)
12080 output_asm_insn (".long\t%0", op);
12081 if (nonlocal)
12082 {
12083 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12084 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12085 }
12086
12087 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12088 if (!flag_pic)
12089 output_asm_insn (".long\t%0", op);
12090 else
12091 output_asm_insn (".long\t%0-%5", op);
12092
12093 if (op[6])
12094 {
12095 targetm.asm_out.internal_label (file, "L",
12096 CODE_LABEL_NUMBER (op[6]));
12097 output_asm_insn (".long\t%2", op);
12098 }
12099 if (op[7])
12100 {
12101 targetm.asm_out.internal_label (file, "L",
12102 CODE_LABEL_NUMBER (op[7]));
12103 output_asm_insn (".long\t%3", op);
12104 }
12105 }
12106 final_end_function ();
12107 }
12108
12109 static bool
12110 s390_valid_pointer_mode (machine_mode mode)
12111 {
12112 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12113 }
12114
12115 /* Checks whether the given CALL_EXPR would use a caller
12116 saved register. This is used to decide whether sibling call
12117 optimization could be performed on the respective function
12118 call. */
12119
12120 static bool
12121 s390_call_saved_register_used (tree call_expr)
12122 {
12123 CUMULATIVE_ARGS cum_v;
12124 cumulative_args_t cum;
12125 tree parameter;
12126 machine_mode mode;
12127 tree type;
12128 rtx parm_rtx;
12129 int reg, i;
12130
12131 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12132 cum = pack_cumulative_args (&cum_v);
12133
12134 for (i = 0; i < call_expr_nargs (call_expr); i++)
12135 {
12136 parameter = CALL_EXPR_ARG (call_expr, i);
12137 gcc_assert (parameter);
12138
12139 /* For an undeclared variable passed as parameter we will get
12140 an ERROR_MARK node here. */
12141 if (TREE_CODE (parameter) == ERROR_MARK)
12142 return true;
12143
12144 type = TREE_TYPE (parameter);
12145 gcc_assert (type);
12146
12147 mode = TYPE_MODE (type);
12148 gcc_assert (mode);
12149
12150 /* We assume that in the target function all parameters are
12151 named. This only has an impact on vector argument register
12152 usage none of which is call-saved. */
12153 if (pass_by_reference (&cum_v, mode, type, true))
12154 {
12155 mode = Pmode;
12156 type = build_pointer_type (type);
12157 }
12158
12159 parm_rtx = s390_function_arg (cum, mode, type, true);
12160
12161 s390_function_arg_advance (cum, mode, type, true);
12162
12163 if (!parm_rtx)
12164 continue;
12165
12166 if (REG_P (parm_rtx))
12167 {
12168 for (reg = 0;
12169 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12170 reg++)
12171 if (!call_used_regs[reg + REGNO (parm_rtx)])
12172 return true;
12173 }
12174
12175 if (GET_CODE (parm_rtx) == PARALLEL)
12176 {
12177 int i;
12178
12179 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12180 {
12181 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12182
12183 gcc_assert (REG_P (r));
12184
12185 for (reg = 0;
12186 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12187 reg++)
12188 if (!call_used_regs[reg + REGNO (r)])
12189 return true;
12190 }
12191 }
12192
12193 }
12194 return false;
12195 }
12196
12197 /* Return true if the given call expression can be
12198 turned into a sibling call.
12199 DECL holds the declaration of the function to be called whereas
12200 EXP is the call expression itself. */
12201
12202 static bool
12203 s390_function_ok_for_sibcall (tree decl, tree exp)
12204 {
12205 /* The TPF epilogue uses register 1. */
12206 if (TARGET_TPF_PROFILING)
12207 return false;
12208
12209 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12210 which would have to be restored before the sibcall. */
12211 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12212 return false;
12213
12214 /* Register 6 on s390 is available as an argument register but unfortunately
12215 "caller saved". This makes functions needing this register for arguments
12216 not suitable for sibcalls. */
12217 return !s390_call_saved_register_used (exp);
12218 }
12219
12220 /* Return the fixed registers used for condition codes. */
12221
12222 static bool
12223 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12224 {
12225 *p1 = CC_REGNUM;
12226 *p2 = INVALID_REGNUM;
12227
12228 return true;
12229 }
12230
12231 /* This function is used by the call expanders of the machine description.
12232 It emits the call insn itself together with the necessary operations
12233 to adjust the target address and returns the emitted insn.
12234 ADDR_LOCATION is the target address rtx
12235 TLS_CALL the location of the thread-local symbol
12236 RESULT_REG the register where the result of the call should be stored
12237 RETADDR_REG the register where the return address should be stored
12238 If this parameter is NULL_RTX the call is considered
12239 to be a sibling call. */
12240
12241 rtx_insn *
12242 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12243 rtx retaddr_reg)
12244 {
12245 bool plt_call = false;
12246 rtx_insn *insn;
12247 rtx call;
12248 rtx clobber;
12249 rtvec vec;
12250
12251 /* Direct function calls need special treatment. */
12252 if (GET_CODE (addr_location) == SYMBOL_REF)
12253 {
12254 /* When calling a global routine in PIC mode, we must
12255 replace the symbol itself with the PLT stub. */
12256 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12257 {
12258 if (retaddr_reg != NULL_RTX)
12259 {
12260 addr_location = gen_rtx_UNSPEC (Pmode,
12261 gen_rtvec (1, addr_location),
12262 UNSPEC_PLT);
12263 addr_location = gen_rtx_CONST (Pmode, addr_location);
12264 plt_call = true;
12265 }
12266 else
12267 /* For -fpic code the PLT entries might use r12 which is
12268 call-saved. Therefore we cannot do a sibcall when
12269 calling directly using a symbol ref. When reaching
12270 this point we decided (in s390_function_ok_for_sibcall)
12271 to do a sibcall for a function pointer but one of the
12272 optimizers was able to get rid of the function pointer
12273 by propagating the symbol ref into the call. This
12274 optimization is illegal for S/390 so we turn the direct
12275 call into a indirect call again. */
12276 addr_location = force_reg (Pmode, addr_location);
12277 }
12278
12279 /* Unless we can use the bras(l) insn, force the
12280 routine address into a register. */
12281 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12282 {
12283 if (flag_pic)
12284 addr_location = legitimize_pic_address (addr_location, 0);
12285 else
12286 addr_location = force_reg (Pmode, addr_location);
12287 }
12288 }
12289
12290 /* If it is already an indirect call or the code above moved the
12291 SYMBOL_REF to somewhere else make sure the address can be found in
12292 register 1. */
12293 if (retaddr_reg == NULL_RTX
12294 && GET_CODE (addr_location) != SYMBOL_REF
12295 && !plt_call)
12296 {
12297 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12298 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12299 }
12300
12301 addr_location = gen_rtx_MEM (QImode, addr_location);
12302 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12303
12304 if (result_reg != NULL_RTX)
12305 call = gen_rtx_SET (result_reg, call);
12306
12307 if (retaddr_reg != NULL_RTX)
12308 {
12309 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12310
12311 if (tls_call != NULL_RTX)
12312 vec = gen_rtvec (3, call, clobber,
12313 gen_rtx_USE (VOIDmode, tls_call));
12314 else
12315 vec = gen_rtvec (2, call, clobber);
12316
12317 call = gen_rtx_PARALLEL (VOIDmode, vec);
12318 }
12319
12320 insn = emit_call_insn (call);
12321
12322 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12323 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12324 {
12325 /* s390_function_ok_for_sibcall should
12326 have denied sibcalls in this case. */
12327 gcc_assert (retaddr_reg != NULL_RTX);
12328 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12329 }
12330 return insn;
12331 }
12332
12333 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12334
12335 static void
12336 s390_conditional_register_usage (void)
12337 {
12338 int i;
12339
12340 if (flag_pic)
12341 {
12342 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12343 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12344 }
12345 if (TARGET_CPU_ZARCH)
12346 {
12347 fixed_regs[BASE_REGNUM] = 0;
12348 call_used_regs[BASE_REGNUM] = 0;
12349 fixed_regs[RETURN_REGNUM] = 0;
12350 call_used_regs[RETURN_REGNUM] = 0;
12351 }
12352 if (TARGET_64BIT)
12353 {
12354 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12355 call_used_regs[i] = call_really_used_regs[i] = 0;
12356 }
12357 else
12358 {
12359 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12360 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12361 }
12362
12363 if (TARGET_SOFT_FLOAT)
12364 {
12365 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12366 call_used_regs[i] = fixed_regs[i] = 1;
12367 }
12368
12369 /* Disable v16 - v31 for non-vector target. */
12370 if (!TARGET_VX)
12371 {
12372 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12373 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12374 }
12375 }
12376
12377 /* Corresponding function to eh_return expander. */
12378
12379 static GTY(()) rtx s390_tpf_eh_return_symbol;
12380 void
12381 s390_emit_tpf_eh_return (rtx target)
12382 {
12383 rtx_insn *insn;
12384 rtx reg, orig_ra;
12385
12386 if (!s390_tpf_eh_return_symbol)
12387 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12388
12389 reg = gen_rtx_REG (Pmode, 2);
12390 orig_ra = gen_rtx_REG (Pmode, 3);
12391
12392 emit_move_insn (reg, target);
12393 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12394 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12395 gen_rtx_REG (Pmode, RETURN_REGNUM));
12396 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12397 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12398
12399 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
12400 }
12401
12402 /* Rework the prologue/epilogue to avoid saving/restoring
12403 registers unnecessarily. */
12404
12405 static void
12406 s390_optimize_prologue (void)
12407 {
12408 rtx_insn *insn, *new_insn, *next_insn;
12409
12410 /* Do a final recompute of the frame-related data. */
12411 s390_optimize_register_info ();
12412
12413 /* If all special registers are in fact used, there's nothing we
12414 can do, so no point in walking the insn list. */
12415
12416 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
12417 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
12418 && (TARGET_CPU_ZARCH
12419 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
12420 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
12421 return;
12422
12423 /* Search for prologue/epilogue insns and replace them. */
12424
12425 for (insn = get_insns (); insn; insn = next_insn)
12426 {
12427 int first, last, off;
12428 rtx set, base, offset;
12429 rtx pat;
12430
12431 next_insn = NEXT_INSN (insn);
12432
12433 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
12434 continue;
12435
12436 pat = PATTERN (insn);
12437
12438 /* Remove ldgr/lgdr instructions used for saving and restore
12439 GPRs if possible. */
12440 if (TARGET_Z10
12441 && GET_CODE (pat) == SET
12442 && GET_MODE (SET_SRC (pat)) == DImode
12443 && REG_P (SET_SRC (pat))
12444 && REG_P (SET_DEST (pat)))
12445 {
12446 int src_regno = REGNO (SET_SRC (pat));
12447 int dest_regno = REGNO (SET_DEST (pat));
12448 int gpr_regno;
12449 int fpr_regno;
12450
12451 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
12452 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
12453 continue;
12454
12455 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
12456 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
12457
12458 /* GPR must be call-saved, FPR must be call-clobbered. */
12459 if (!call_really_used_regs[fpr_regno]
12460 || call_really_used_regs[gpr_regno])
12461 continue;
12462
12463 /* It must not happen that what we once saved in an FPR now
12464 needs a stack slot. */
12465 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
12466
12467 if (cfun_gpr_save_slot (gpr_regno) == 0)
12468 {
12469 remove_insn (insn);
12470 continue;
12471 }
12472 }
12473
12474 if (GET_CODE (pat) == PARALLEL
12475 && store_multiple_operation (pat, VOIDmode))
12476 {
12477 set = XVECEXP (pat, 0, 0);
12478 first = REGNO (SET_SRC (set));
12479 last = first + XVECLEN (pat, 0) - 1;
12480 offset = const0_rtx;
12481 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12482 off = INTVAL (offset);
12483
12484 if (GET_CODE (base) != REG || off < 0)
12485 continue;
12486 if (cfun_frame_layout.first_save_gpr != -1
12487 && (cfun_frame_layout.first_save_gpr < first
12488 || cfun_frame_layout.last_save_gpr > last))
12489 continue;
12490 if (REGNO (base) != STACK_POINTER_REGNUM
12491 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12492 continue;
12493 if (first > BASE_REGNUM || last < BASE_REGNUM)
12494 continue;
12495
12496 if (cfun_frame_layout.first_save_gpr != -1)
12497 {
12498 rtx s_pat = save_gprs (base,
12499 off + (cfun_frame_layout.first_save_gpr
12500 - first) * UNITS_PER_LONG,
12501 cfun_frame_layout.first_save_gpr,
12502 cfun_frame_layout.last_save_gpr);
12503 new_insn = emit_insn_before (s_pat, insn);
12504 INSN_ADDRESSES_NEW (new_insn, -1);
12505 }
12506
12507 remove_insn (insn);
12508 continue;
12509 }
12510
12511 if (cfun_frame_layout.first_save_gpr == -1
12512 && GET_CODE (pat) == SET
12513 && GENERAL_REG_P (SET_SRC (pat))
12514 && GET_CODE (SET_DEST (pat)) == MEM)
12515 {
12516 set = pat;
12517 first = REGNO (SET_SRC (set));
12518 offset = const0_rtx;
12519 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12520 off = INTVAL (offset);
12521
12522 if (GET_CODE (base) != REG || off < 0)
12523 continue;
12524 if (REGNO (base) != STACK_POINTER_REGNUM
12525 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12526 continue;
12527
12528 remove_insn (insn);
12529 continue;
12530 }
12531
12532 if (GET_CODE (pat) == PARALLEL
12533 && load_multiple_operation (pat, VOIDmode))
12534 {
12535 set = XVECEXP (pat, 0, 0);
12536 first = REGNO (SET_DEST (set));
12537 last = first + XVECLEN (pat, 0) - 1;
12538 offset = const0_rtx;
12539 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12540 off = INTVAL (offset);
12541
12542 if (GET_CODE (base) != REG || off < 0)
12543 continue;
12544
12545 if (cfun_frame_layout.first_restore_gpr != -1
12546 && (cfun_frame_layout.first_restore_gpr < first
12547 || cfun_frame_layout.last_restore_gpr > last))
12548 continue;
12549 if (REGNO (base) != STACK_POINTER_REGNUM
12550 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12551 continue;
12552 if (first > BASE_REGNUM || last < BASE_REGNUM)
12553 continue;
12554
12555 if (cfun_frame_layout.first_restore_gpr != -1)
12556 {
12557 rtx rpat = restore_gprs (base,
12558 off + (cfun_frame_layout.first_restore_gpr
12559 - first) * UNITS_PER_LONG,
12560 cfun_frame_layout.first_restore_gpr,
12561 cfun_frame_layout.last_restore_gpr);
12562
12563 /* Remove REG_CFA_RESTOREs for registers that we no
12564 longer need to save. */
12565 REG_NOTES (rpat) = REG_NOTES (insn);
12566 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
12567 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
12568 && ((int) REGNO (XEXP (*ptr, 0))
12569 < cfun_frame_layout.first_restore_gpr))
12570 *ptr = XEXP (*ptr, 1);
12571 else
12572 ptr = &XEXP (*ptr, 1);
12573 new_insn = emit_insn_before (rpat, insn);
12574 RTX_FRAME_RELATED_P (new_insn) = 1;
12575 INSN_ADDRESSES_NEW (new_insn, -1);
12576 }
12577
12578 remove_insn (insn);
12579 continue;
12580 }
12581
12582 if (cfun_frame_layout.first_restore_gpr == -1
12583 && GET_CODE (pat) == SET
12584 && GENERAL_REG_P (SET_DEST (pat))
12585 && GET_CODE (SET_SRC (pat)) == MEM)
12586 {
12587 set = pat;
12588 first = REGNO (SET_DEST (set));
12589 offset = const0_rtx;
12590 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12591 off = INTVAL (offset);
12592
12593 if (GET_CODE (base) != REG || off < 0)
12594 continue;
12595
12596 if (REGNO (base) != STACK_POINTER_REGNUM
12597 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12598 continue;
12599
12600 remove_insn (insn);
12601 continue;
12602 }
12603 }
12604 }
12605
12606 /* On z10 and later the dynamic branch prediction must see the
12607 backward jump within a certain windows. If not it falls back to
12608 the static prediction. This function rearranges the loop backward
12609 branch in a way which makes the static prediction always correct.
12610 The function returns true if it added an instruction. */
12611 static bool
12612 s390_fix_long_loop_prediction (rtx_insn *insn)
12613 {
12614 rtx set = single_set (insn);
12615 rtx code_label, label_ref, new_label;
12616 rtx_insn *uncond_jump;
12617 rtx_insn *cur_insn;
12618 rtx tmp;
12619 int distance;
12620
12621 /* This will exclude branch on count and branch on index patterns
12622 since these are correctly statically predicted. */
12623 if (!set
12624 || SET_DEST (set) != pc_rtx
12625 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
12626 return false;
12627
12628 /* Skip conditional returns. */
12629 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
12630 && XEXP (SET_SRC (set), 2) == pc_rtx)
12631 return false;
12632
12633 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
12634 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
12635
12636 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
12637
12638 code_label = XEXP (label_ref, 0);
12639
12640 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
12641 || INSN_ADDRESSES (INSN_UID (insn)) == -1
12642 || (INSN_ADDRESSES (INSN_UID (insn))
12643 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
12644 return false;
12645
12646 for (distance = 0, cur_insn = PREV_INSN (insn);
12647 distance < PREDICT_DISTANCE - 6;
12648 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
12649 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
12650 return false;
12651
12652 new_label = gen_label_rtx ();
12653 uncond_jump = emit_jump_insn_after (
12654 gen_rtx_SET (pc_rtx,
12655 gen_rtx_LABEL_REF (VOIDmode, code_label)),
12656 insn);
12657 emit_label_after (new_label, uncond_jump);
12658
12659 tmp = XEXP (SET_SRC (set), 1);
12660 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
12661 XEXP (SET_SRC (set), 2) = tmp;
12662 INSN_CODE (insn) = -1;
12663
12664 XEXP (label_ref, 0) = new_label;
12665 JUMP_LABEL (insn) = new_label;
12666 JUMP_LABEL (uncond_jump) = code_label;
12667
12668 return true;
12669 }
12670
12671 /* Returns 1 if INSN reads the value of REG for purposes not related
12672 to addressing of memory, and 0 otherwise. */
12673 static int
12674 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
12675 {
12676 return reg_referenced_p (reg, PATTERN (insn))
12677 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
12678 }
12679
12680 /* Starting from INSN find_cond_jump looks downwards in the insn
12681 stream for a single jump insn which is the last user of the
12682 condition code set in INSN. */
12683 static rtx_insn *
12684 find_cond_jump (rtx_insn *insn)
12685 {
12686 for (; insn; insn = NEXT_INSN (insn))
12687 {
12688 rtx ite, cc;
12689
12690 if (LABEL_P (insn))
12691 break;
12692
12693 if (!JUMP_P (insn))
12694 {
12695 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
12696 break;
12697 continue;
12698 }
12699
12700 /* This will be triggered by a return. */
12701 if (GET_CODE (PATTERN (insn)) != SET)
12702 break;
12703
12704 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
12705 ite = SET_SRC (PATTERN (insn));
12706
12707 if (GET_CODE (ite) != IF_THEN_ELSE)
12708 break;
12709
12710 cc = XEXP (XEXP (ite, 0), 0);
12711 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
12712 break;
12713
12714 if (find_reg_note (insn, REG_DEAD, cc))
12715 return insn;
12716 break;
12717 }
12718
12719 return NULL;
12720 }
12721
12722 /* Swap the condition in COND and the operands in OP0 and OP1 so that
12723 the semantics does not change. If NULL_RTX is passed as COND the
12724 function tries to find the conditional jump starting with INSN. */
12725 static void
12726 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
12727 {
12728 rtx tmp = *op0;
12729
12730 if (cond == NULL_RTX)
12731 {
12732 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
12733 rtx set = jump ? single_set (jump) : NULL_RTX;
12734
12735 if (set == NULL_RTX)
12736 return;
12737
12738 cond = XEXP (SET_SRC (set), 0);
12739 }
12740
12741 *op0 = *op1;
12742 *op1 = tmp;
12743 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
12744 }
12745
12746 /* On z10, instructions of the compare-and-branch family have the
12747 property to access the register occurring as second operand with
12748 its bits complemented. If such a compare is grouped with a second
12749 instruction that accesses the same register non-complemented, and
12750 if that register's value is delivered via a bypass, then the
12751 pipeline recycles, thereby causing significant performance decline.
12752 This function locates such situations and exchanges the two
12753 operands of the compare. The function return true whenever it
12754 added an insn. */
12755 static bool
12756 s390_z10_optimize_cmp (rtx_insn *insn)
12757 {
12758 rtx_insn *prev_insn, *next_insn;
12759 bool insn_added_p = false;
12760 rtx cond, *op0, *op1;
12761
12762 if (GET_CODE (PATTERN (insn)) == PARALLEL)
12763 {
12764 /* Handle compare and branch and branch on count
12765 instructions. */
12766 rtx pattern = single_set (insn);
12767
12768 if (!pattern
12769 || SET_DEST (pattern) != pc_rtx
12770 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
12771 return false;
12772
12773 cond = XEXP (SET_SRC (pattern), 0);
12774 op0 = &XEXP (cond, 0);
12775 op1 = &XEXP (cond, 1);
12776 }
12777 else if (GET_CODE (PATTERN (insn)) == SET)
12778 {
12779 rtx src, dest;
12780
12781 /* Handle normal compare instructions. */
12782 src = SET_SRC (PATTERN (insn));
12783 dest = SET_DEST (PATTERN (insn));
12784
12785 if (!REG_P (dest)
12786 || !CC_REGNO_P (REGNO (dest))
12787 || GET_CODE (src) != COMPARE)
12788 return false;
12789
12790 /* s390_swap_cmp will try to find the conditional
12791 jump when passing NULL_RTX as condition. */
12792 cond = NULL_RTX;
12793 op0 = &XEXP (src, 0);
12794 op1 = &XEXP (src, 1);
12795 }
12796 else
12797 return false;
12798
12799 if (!REG_P (*op0) || !REG_P (*op1))
12800 return false;
12801
12802 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
12803 return false;
12804
12805 /* Swap the COMPARE arguments and its mask if there is a
12806 conflicting access in the previous insn. */
12807 prev_insn = prev_active_insn (insn);
12808 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
12809 && reg_referenced_p (*op1, PATTERN (prev_insn)))
12810 s390_swap_cmp (cond, op0, op1, insn);
12811
12812 /* Check if there is a conflict with the next insn. If there
12813 was no conflict with the previous insn, then swap the
12814 COMPARE arguments and its mask. If we already swapped
12815 the operands, or if swapping them would cause a conflict
12816 with the previous insn, issue a NOP after the COMPARE in
12817 order to separate the two instuctions. */
12818 next_insn = next_active_insn (insn);
12819 if (next_insn != NULL_RTX && INSN_P (next_insn)
12820 && s390_non_addr_reg_read_p (*op1, next_insn))
12821 {
12822 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
12823 && s390_non_addr_reg_read_p (*op0, prev_insn))
12824 {
12825 if (REGNO (*op1) == 0)
12826 emit_insn_after (gen_nop1 (), insn);
12827 else
12828 emit_insn_after (gen_nop (), insn);
12829 insn_added_p = true;
12830 }
12831 else
12832 s390_swap_cmp (cond, op0, op1, insn);
12833 }
12834 return insn_added_p;
12835 }
12836
12837 /* Perform machine-dependent processing. */
12838
12839 static void
12840 s390_reorg (void)
12841 {
12842 bool pool_overflow = false;
12843 int hw_before, hw_after;
12844
12845 /* Make sure all splits have been performed; splits after
12846 machine_dependent_reorg might confuse insn length counts. */
12847 split_all_insns_noflow ();
12848
12849 /* Install the main literal pool and the associated base
12850 register load insns.
12851
12852 In addition, there are two problematic situations we need
12853 to correct:
12854
12855 - the literal pool might be > 4096 bytes in size, so that
12856 some of its elements cannot be directly accessed
12857
12858 - a branch target might be > 64K away from the branch, so that
12859 it is not possible to use a PC-relative instruction.
12860
12861 To fix those, we split the single literal pool into multiple
12862 pool chunks, reloading the pool base register at various
12863 points throughout the function to ensure it always points to
12864 the pool chunk the following code expects, and / or replace
12865 PC-relative branches by absolute branches.
12866
12867 However, the two problems are interdependent: splitting the
12868 literal pool can move a branch further away from its target,
12869 causing the 64K limit to overflow, and on the other hand,
12870 replacing a PC-relative branch by an absolute branch means
12871 we need to put the branch target address into the literal
12872 pool, possibly causing it to overflow.
12873
12874 So, we loop trying to fix up both problems until we manage
12875 to satisfy both conditions at the same time. Note that the
12876 loop is guaranteed to terminate as every pass of the loop
12877 strictly decreases the total number of PC-relative branches
12878 in the function. (This is not completely true as there
12879 might be branch-over-pool insns introduced by chunkify_start.
12880 Those never need to be split however.) */
12881
12882 for (;;)
12883 {
12884 struct constant_pool *pool = NULL;
12885
12886 /* Collect the literal pool. */
12887 if (!pool_overflow)
12888 {
12889 pool = s390_mainpool_start ();
12890 if (!pool)
12891 pool_overflow = true;
12892 }
12893
12894 /* If literal pool overflowed, start to chunkify it. */
12895 if (pool_overflow)
12896 pool = s390_chunkify_start ();
12897
12898 /* Split out-of-range branches. If this has created new
12899 literal pool entries, cancel current chunk list and
12900 recompute it. zSeries machines have large branch
12901 instructions, so we never need to split a branch. */
12902 if (!TARGET_CPU_ZARCH && s390_split_branches ())
12903 {
12904 if (pool_overflow)
12905 s390_chunkify_cancel (pool);
12906 else
12907 s390_mainpool_cancel (pool);
12908
12909 continue;
12910 }
12911
12912 /* If we made it up to here, both conditions are satisfied.
12913 Finish up literal pool related changes. */
12914 if (pool_overflow)
12915 s390_chunkify_finish (pool);
12916 else
12917 s390_mainpool_finish (pool);
12918
12919 /* We're done splitting branches. */
12920 cfun->machine->split_branches_pending_p = false;
12921 break;
12922 }
12923
12924 /* Generate out-of-pool execute target insns. */
12925 if (TARGET_CPU_ZARCH)
12926 {
12927 rtx_insn *insn, *target;
12928 rtx label;
12929
12930 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12931 {
12932 label = s390_execute_label (insn);
12933 if (!label)
12934 continue;
12935
12936 gcc_assert (label != const0_rtx);
12937
12938 target = emit_label (XEXP (label, 0));
12939 INSN_ADDRESSES_NEW (target, -1);
12940
12941 target = emit_insn (s390_execute_target (insn));
12942 INSN_ADDRESSES_NEW (target, -1);
12943 }
12944 }
12945
12946 /* Try to optimize prologue and epilogue further. */
12947 s390_optimize_prologue ();
12948
12949 /* Walk over the insns and do some >=z10 specific changes. */
12950 if (s390_tune >= PROCESSOR_2097_Z10)
12951 {
12952 rtx_insn *insn;
12953 bool insn_added_p = false;
12954
12955 /* The insn lengths and addresses have to be up to date for the
12956 following manipulations. */
12957 shorten_branches (get_insns ());
12958
12959 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12960 {
12961 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
12962 continue;
12963
12964 if (JUMP_P (insn))
12965 insn_added_p |= s390_fix_long_loop_prediction (insn);
12966
12967 if ((GET_CODE (PATTERN (insn)) == PARALLEL
12968 || GET_CODE (PATTERN (insn)) == SET)
12969 && s390_tune == PROCESSOR_2097_Z10)
12970 insn_added_p |= s390_z10_optimize_cmp (insn);
12971 }
12972
12973 /* Adjust branches if we added new instructions. */
12974 if (insn_added_p)
12975 shorten_branches (get_insns ());
12976 }
12977
12978 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
12979 if (hw_after > 0)
12980 {
12981 rtx_insn *insn;
12982
12983 /* Insert NOPs for hotpatching. */
12984 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12985 /* Emit NOPs
12986 1. inside the area covered by debug information to allow setting
12987 breakpoints at the NOPs,
12988 2. before any insn which results in an asm instruction,
12989 3. before in-function labels to avoid jumping to the NOPs, for
12990 example as part of a loop,
12991 4. before any barrier in case the function is completely empty
12992 (__builtin_unreachable ()) and has neither internal labels nor
12993 active insns.
12994 */
12995 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
12996 break;
12997 /* Output a series of NOPs before the first active insn. */
12998 while (insn && hw_after > 0)
12999 {
13000 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13001 {
13002 emit_insn_before (gen_nop_6_byte (), insn);
13003 hw_after -= 3;
13004 }
13005 else if (hw_after >= 2)
13006 {
13007 emit_insn_before (gen_nop_4_byte (), insn);
13008 hw_after -= 2;
13009 }
13010 else
13011 {
13012 emit_insn_before (gen_nop_2_byte (), insn);
13013 hw_after -= 1;
13014 }
13015 }
13016 }
13017 }
13018
13019 /* Return true if INSN is a fp load insn writing register REGNO. */
13020 static inline bool
13021 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13022 {
13023 rtx set;
13024 enum attr_type flag = s390_safe_attr_type (insn);
13025
13026 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13027 return false;
13028
13029 set = single_set (insn);
13030
13031 if (set == NULL_RTX)
13032 return false;
13033
13034 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13035 return false;
13036
13037 if (REGNO (SET_DEST (set)) != regno)
13038 return false;
13039
13040 return true;
13041 }
13042
13043 /* This value describes the distance to be avoided between an
13044 aritmetic fp instruction and an fp load writing the same register.
13045 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13046 fine but the exact value has to be avoided. Otherwise the FP
13047 pipeline will throw an exception causing a major penalty. */
13048 #define Z10_EARLYLOAD_DISTANCE 7
13049
13050 /* Rearrange the ready list in order to avoid the situation described
13051 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13052 moved to the very end of the ready list. */
13053 static void
13054 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13055 {
13056 unsigned int regno;
13057 int nready = *nready_p;
13058 rtx_insn *tmp;
13059 int i;
13060 rtx_insn *insn;
13061 rtx set;
13062 enum attr_type flag;
13063 int distance;
13064
13065 /* Skip DISTANCE - 1 active insns. */
13066 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13067 distance > 0 && insn != NULL_RTX;
13068 distance--, insn = prev_active_insn (insn))
13069 if (CALL_P (insn) || JUMP_P (insn))
13070 return;
13071
13072 if (insn == NULL_RTX)
13073 return;
13074
13075 set = single_set (insn);
13076
13077 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13078 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13079 return;
13080
13081 flag = s390_safe_attr_type (insn);
13082
13083 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13084 return;
13085
13086 regno = REGNO (SET_DEST (set));
13087 i = nready - 1;
13088
13089 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13090 i--;
13091
13092 if (!i)
13093 return;
13094
13095 tmp = ready[i];
13096 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13097 ready[0] = tmp;
13098 }
13099
13100
13101 /* The s390_sched_state variable tracks the state of the current or
13102 the last instruction group.
13103
13104 0,1,2 number of instructions scheduled in the current group
13105 3 the last group is complete - normal insns
13106 4 the last group was a cracked/expanded insn */
13107
13108 static int s390_sched_state;
13109
13110 #define S390_OOO_SCHED_STATE_NORMAL 3
13111 #define S390_OOO_SCHED_STATE_CRACKED 4
13112
13113 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
13114 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
13115 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
13116 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
13117
13118 static unsigned int
13119 s390_get_sched_attrmask (rtx_insn *insn)
13120 {
13121 unsigned int mask = 0;
13122
13123 if (get_attr_ooo_cracked (insn))
13124 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
13125 if (get_attr_ooo_expanded (insn))
13126 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
13127 if (get_attr_ooo_endgroup (insn))
13128 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
13129 if (get_attr_ooo_groupalone (insn))
13130 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
13131 return mask;
13132 }
13133
13134 /* Return the scheduling score for INSN. The higher the score the
13135 better. The score is calculated from the OOO scheduling attributes
13136 of INSN and the scheduling state s390_sched_state. */
13137 static int
13138 s390_sched_score (rtx_insn *insn)
13139 {
13140 unsigned int mask = s390_get_sched_attrmask (insn);
13141 int score = 0;
13142
13143 switch (s390_sched_state)
13144 {
13145 case 0:
13146 /* Try to put insns into the first slot which would otherwise
13147 break a group. */
13148 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13149 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13150 score += 5;
13151 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13152 score += 10;
13153 case 1:
13154 /* Prefer not cracked insns while trying to put together a
13155 group. */
13156 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13157 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13158 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13159 score += 10;
13160 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
13161 score += 5;
13162 break;
13163 case 2:
13164 /* Prefer not cracked insns while trying to put together a
13165 group. */
13166 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13167 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13168 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13169 score += 10;
13170 /* Prefer endgroup insns in the last slot. */
13171 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
13172 score += 10;
13173 break;
13174 case S390_OOO_SCHED_STATE_NORMAL:
13175 /* Prefer not cracked insns if the last was not cracked. */
13176 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13177 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
13178 score += 5;
13179 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13180 score += 10;
13181 break;
13182 case S390_OOO_SCHED_STATE_CRACKED:
13183 /* Try to keep cracked insns together to prevent them from
13184 interrupting groups. */
13185 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13186 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13187 score += 5;
13188 break;
13189 }
13190 return score;
13191 }
13192
13193 /* This function is called via hook TARGET_SCHED_REORDER before
13194 issuing one insn from list READY which contains *NREADYP entries.
13195 For target z10 it reorders load instructions to avoid early load
13196 conflicts in the floating point pipeline */
13197 static int
13198 s390_sched_reorder (FILE *file, int verbose,
13199 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13200 {
13201 if (s390_tune == PROCESSOR_2097_Z10
13202 && reload_completed
13203 && *nreadyp > 1)
13204 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13205
13206 if (s390_tune >= PROCESSOR_2827_ZEC12
13207 && reload_completed
13208 && *nreadyp > 1)
13209 {
13210 int i;
13211 int last_index = *nreadyp - 1;
13212 int max_index = -1;
13213 int max_score = -1;
13214 rtx_insn *tmp;
13215
13216 /* Just move the insn with the highest score to the top (the
13217 end) of the list. A full sort is not needed since a conflict
13218 in the hazard recognition cannot happen. So the top insn in
13219 the ready list will always be taken. */
13220 for (i = last_index; i >= 0; i--)
13221 {
13222 int score;
13223
13224 if (recog_memoized (ready[i]) < 0)
13225 continue;
13226
13227 score = s390_sched_score (ready[i]);
13228 if (score > max_score)
13229 {
13230 max_score = score;
13231 max_index = i;
13232 }
13233 }
13234
13235 if (max_index != -1)
13236 {
13237 if (max_index != last_index)
13238 {
13239 tmp = ready[max_index];
13240 ready[max_index] = ready[last_index];
13241 ready[last_index] = tmp;
13242
13243 if (verbose > 5)
13244 fprintf (file,
13245 "move insn %d to the top of list\n",
13246 INSN_UID (ready[last_index]));
13247 }
13248 else if (verbose > 5)
13249 fprintf (file,
13250 "best insn %d already on top\n",
13251 INSN_UID (ready[last_index]));
13252 }
13253
13254 if (verbose > 5)
13255 {
13256 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13257 s390_sched_state);
13258
13259 for (i = last_index; i >= 0; i--)
13260 {
13261 if (recog_memoized (ready[i]) < 0)
13262 continue;
13263 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
13264 s390_sched_score (ready[i]));
13265 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
13266 PRINT_OOO_ATTR (ooo_cracked);
13267 PRINT_OOO_ATTR (ooo_expanded);
13268 PRINT_OOO_ATTR (ooo_endgroup);
13269 PRINT_OOO_ATTR (ooo_groupalone);
13270 #undef PRINT_OOO_ATTR
13271 fprintf (file, "\n");
13272 }
13273 }
13274 }
13275
13276 return s390_issue_rate ();
13277 }
13278
13279
13280 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13281 the scheduler has issued INSN. It stores the last issued insn into
13282 last_scheduled_insn in order to make it available for
13283 s390_sched_reorder. */
13284 static int
13285 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13286 {
13287 last_scheduled_insn = insn;
13288
13289 if (s390_tune >= PROCESSOR_2827_ZEC12
13290 && reload_completed
13291 && recog_memoized (insn) >= 0)
13292 {
13293 unsigned int mask = s390_get_sched_attrmask (insn);
13294
13295 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13296 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13297 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
13298 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
13299 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13300 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13301 else
13302 {
13303 /* Only normal insns are left (mask == 0). */
13304 switch (s390_sched_state)
13305 {
13306 case 0:
13307 case 1:
13308 case 2:
13309 case S390_OOO_SCHED_STATE_NORMAL:
13310 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
13311 s390_sched_state = 1;
13312 else
13313 s390_sched_state++;
13314
13315 break;
13316 case S390_OOO_SCHED_STATE_CRACKED:
13317 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13318 break;
13319 }
13320 }
13321 if (verbose > 5)
13322 {
13323 fprintf (file, "insn %d: ", INSN_UID (insn));
13324 #define PRINT_OOO_ATTR(ATTR) \
13325 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
13326 PRINT_OOO_ATTR (ooo_cracked);
13327 PRINT_OOO_ATTR (ooo_expanded);
13328 PRINT_OOO_ATTR (ooo_endgroup);
13329 PRINT_OOO_ATTR (ooo_groupalone);
13330 #undef PRINT_OOO_ATTR
13331 fprintf (file, "\n");
13332 fprintf (file, "sched state: %d\n", s390_sched_state);
13333 }
13334 }
13335
13336 if (GET_CODE (PATTERN (insn)) != USE
13337 && GET_CODE (PATTERN (insn)) != CLOBBER)
13338 return more - 1;
13339 else
13340 return more;
13341 }
13342
13343 static void
13344 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
13345 int verbose ATTRIBUTE_UNUSED,
13346 int max_ready ATTRIBUTE_UNUSED)
13347 {
13348 last_scheduled_insn = NULL;
13349 s390_sched_state = 0;
13350 }
13351
13352 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
13353 a new number struct loop *loop should be unrolled if tuned for cpus with
13354 a built-in stride prefetcher.
13355 The loop is analyzed for memory accesses by calling check_dpu for
13356 each rtx of the loop. Depending on the loop_depth and the amount of
13357 memory accesses a new number <=nunroll is returned to improve the
13358 behaviour of the hardware prefetch unit. */
13359 static unsigned
13360 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
13361 {
13362 basic_block *bbs;
13363 rtx_insn *insn;
13364 unsigned i;
13365 unsigned mem_count = 0;
13366
13367 if (s390_tune < PROCESSOR_2097_Z10)
13368 return nunroll;
13369
13370 /* Count the number of memory references within the loop body. */
13371 bbs = get_loop_body (loop);
13372 subrtx_iterator::array_type array;
13373 for (i = 0; i < loop->num_nodes; i++)
13374 FOR_BB_INSNS (bbs[i], insn)
13375 if (INSN_P (insn) && INSN_CODE (insn) != -1)
13376 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
13377 if (MEM_P (*iter))
13378 mem_count += 1;
13379 free (bbs);
13380
13381 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
13382 if (mem_count == 0)
13383 return nunroll;
13384
13385 switch (loop_depth(loop))
13386 {
13387 case 1:
13388 return MIN (nunroll, 28 / mem_count);
13389 case 2:
13390 return MIN (nunroll, 22 / mem_count);
13391 default:
13392 return MIN (nunroll, 16 / mem_count);
13393 }
13394 }
13395
13396 static void
13397 s390_option_override (void)
13398 {
13399 unsigned int i;
13400 cl_deferred_option *opt;
13401 vec<cl_deferred_option> *v =
13402 (vec<cl_deferred_option> *) s390_deferred_options;
13403
13404 if (v)
13405 FOR_EACH_VEC_ELT (*v, i, opt)
13406 {
13407 switch (opt->opt_index)
13408 {
13409 case OPT_mhotpatch_:
13410 {
13411 int val1;
13412 int val2;
13413 char s[256];
13414 char *t;
13415
13416 strncpy (s, opt->arg, 256);
13417 s[255] = 0;
13418 t = strchr (s, ',');
13419 if (t != NULL)
13420 {
13421 *t = 0;
13422 t++;
13423 val1 = integral_argument (s);
13424 val2 = integral_argument (t);
13425 }
13426 else
13427 {
13428 val1 = -1;
13429 val2 = -1;
13430 }
13431 if (val1 == -1 || val2 == -1)
13432 {
13433 /* argument is not a plain number */
13434 error ("arguments to %qs should be non-negative integers",
13435 "-mhotpatch=n,m");
13436 break;
13437 }
13438 else if (val1 > s390_hotpatch_hw_max
13439 || val2 > s390_hotpatch_hw_max)
13440 {
13441 error ("argument to %qs is too large (max. %d)",
13442 "-mhotpatch=n,m", s390_hotpatch_hw_max);
13443 break;
13444 }
13445 s390_hotpatch_hw_before_label = val1;
13446 s390_hotpatch_hw_after_label = val2;
13447 break;
13448 }
13449 default:
13450 gcc_unreachable ();
13451 }
13452 }
13453
13454 /* Set up function hooks. */
13455 init_machine_status = s390_init_machine_status;
13456
13457 /* Architecture mode defaults according to ABI. */
13458 if (!(target_flags_explicit & MASK_ZARCH))
13459 {
13460 if (TARGET_64BIT)
13461 target_flags |= MASK_ZARCH;
13462 else
13463 target_flags &= ~MASK_ZARCH;
13464 }
13465
13466 /* Set the march default in case it hasn't been specified on
13467 cmdline. */
13468 if (s390_arch == PROCESSOR_max)
13469 {
13470 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
13471 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
13472 s390_arch_flags = processor_flags_table[(int)s390_arch];
13473 }
13474
13475 /* Determine processor to tune for. */
13476 if (s390_tune == PROCESSOR_max)
13477 {
13478 s390_tune = s390_arch;
13479 s390_tune_flags = s390_arch_flags;
13480 }
13481
13482 /* Sanity checks. */
13483 if (s390_arch == PROCESSOR_NATIVE || s390_tune == PROCESSOR_NATIVE)
13484 gcc_unreachable ();
13485 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
13486 error ("z/Architecture mode not supported on %s", s390_arch_string);
13487 if (TARGET_64BIT && !TARGET_ZARCH)
13488 error ("64-bit ABI not supported in ESA/390 mode");
13489
13490 /* Use hardware DFP if available and not explicitly disabled by
13491 user. E.g. with -m31 -march=z10 -mzarch */
13492 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
13493 target_flags |= MASK_HARD_DFP;
13494
13495 /* Enable hardware transactions if available and not explicitly
13496 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
13497 if (!(target_flags_explicit & MASK_OPT_HTM) && TARGET_CPU_HTM && TARGET_ZARCH)
13498 target_flags |= MASK_OPT_HTM;
13499
13500 if (target_flags_explicit & MASK_OPT_VX)
13501 {
13502 if (TARGET_OPT_VX)
13503 {
13504 if (!TARGET_CPU_VX)
13505 error ("hardware vector support not available on %s",
13506 s390_arch_string);
13507 if (TARGET_SOFT_FLOAT)
13508 error ("hardware vector support not available with -msoft-float");
13509 }
13510 }
13511 else if (TARGET_CPU_VX)
13512 /* Enable vector support if available and not explicitly disabled
13513 by user. E.g. with -m31 -march=z13 -mzarch */
13514 target_flags |= MASK_OPT_VX;
13515
13516 if (TARGET_HARD_DFP && !TARGET_DFP)
13517 {
13518 if (target_flags_explicit & MASK_HARD_DFP)
13519 {
13520 if (!TARGET_CPU_DFP)
13521 error ("hardware decimal floating point instructions"
13522 " not available on %s", s390_arch_string);
13523 if (!TARGET_ZARCH)
13524 error ("hardware decimal floating point instructions"
13525 " not available in ESA/390 mode");
13526 }
13527 else
13528 target_flags &= ~MASK_HARD_DFP;
13529 }
13530
13531 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
13532 {
13533 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
13534 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
13535
13536 target_flags &= ~MASK_HARD_DFP;
13537 }
13538
13539 /* Set processor cost function. */
13540 switch (s390_tune)
13541 {
13542 case PROCESSOR_2084_Z990:
13543 s390_cost = &z990_cost;
13544 break;
13545 case PROCESSOR_2094_Z9_109:
13546 case PROCESSOR_2094_Z9_EC:
13547 s390_cost = &z9_109_cost;
13548 break;
13549 case PROCESSOR_2097_Z10:
13550 s390_cost = &z10_cost;
13551 break;
13552 case PROCESSOR_2817_Z196:
13553 s390_cost = &z196_cost;
13554 break;
13555 case PROCESSOR_2827_ZEC12:
13556 case PROCESSOR_2964_Z13:
13557 s390_cost = &zEC12_cost;
13558 break;
13559 default:
13560 s390_cost = &z900_cost;
13561 }
13562
13563 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
13564 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
13565 "in combination");
13566
13567 if (s390_stack_size)
13568 {
13569 if (s390_stack_guard >= s390_stack_size)
13570 error ("stack size must be greater than the stack guard value");
13571 else if (s390_stack_size > 1 << 16)
13572 error ("stack size must not be greater than 64k");
13573 }
13574 else if (s390_stack_guard)
13575 error ("-mstack-guard implies use of -mstack-size");
13576
13577 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
13578 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
13579 target_flags |= MASK_LONG_DOUBLE_128;
13580 #endif
13581
13582 if (s390_tune >= PROCESSOR_2097_Z10)
13583 {
13584 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
13585 global_options.x_param_values,
13586 global_options_set.x_param_values);
13587 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
13588 global_options.x_param_values,
13589 global_options_set.x_param_values);
13590 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
13591 global_options.x_param_values,
13592 global_options_set.x_param_values);
13593 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
13594 global_options.x_param_values,
13595 global_options_set.x_param_values);
13596 }
13597
13598 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
13599 global_options.x_param_values,
13600 global_options_set.x_param_values);
13601 /* values for loop prefetching */
13602 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
13603 global_options.x_param_values,
13604 global_options_set.x_param_values);
13605 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
13606 global_options.x_param_values,
13607 global_options_set.x_param_values);
13608 /* s390 has more than 2 levels and the size is much larger. Since
13609 we are always running virtualized assume that we only get a small
13610 part of the caches above l1. */
13611 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
13612 global_options.x_param_values,
13613 global_options_set.x_param_values);
13614 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
13615 global_options.x_param_values,
13616 global_options_set.x_param_values);
13617 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
13618 global_options.x_param_values,
13619 global_options_set.x_param_values);
13620
13621 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
13622 requires the arch flags to be evaluated already. Since prefetching
13623 is beneficial on s390, we enable it if available. */
13624 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
13625 flag_prefetch_loop_arrays = 1;
13626
13627 /* Use the alternative scheduling-pressure algorithm by default. */
13628 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
13629 global_options.x_param_values,
13630 global_options_set.x_param_values);
13631
13632 if (TARGET_TPF)
13633 {
13634 /* Don't emit DWARF3/4 unless specifically selected. The TPF
13635 debuggers do not yet support DWARF 3/4. */
13636 if (!global_options_set.x_dwarf_strict)
13637 dwarf_strict = 1;
13638 if (!global_options_set.x_dwarf_version)
13639 dwarf_version = 2;
13640 }
13641
13642 /* Register a target-specific optimization-and-lowering pass
13643 to run immediately before prologue and epilogue generation.
13644
13645 Registering the pass must be done at start up. It's
13646 convenient to do it here. */
13647 opt_pass *new_pass = new pass_s390_early_mach (g);
13648 struct register_pass_info insert_pass_s390_early_mach =
13649 {
13650 new_pass, /* pass */
13651 "pro_and_epilogue", /* reference_pass_name */
13652 1, /* ref_pass_instance_number */
13653 PASS_POS_INSERT_BEFORE /* po_op */
13654 };
13655 register_pass (&insert_pass_s390_early_mach);
13656 }
13657
13658 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
13659
13660 static bool
13661 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
13662 unsigned int align ATTRIBUTE_UNUSED,
13663 enum by_pieces_operation op ATTRIBUTE_UNUSED,
13664 bool speed_p ATTRIBUTE_UNUSED)
13665 {
13666 return (size == 1 || size == 2
13667 || size == 4 || (TARGET_ZARCH && size == 8));
13668 }
13669
13670 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13671
13672 static void
13673 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13674 {
13675 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
13676 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
13677 tree call_efpc = build_call_expr (efpc, 0);
13678 tree fenv_var = create_tmp_var (unsigned_type_node);
13679
13680 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
13681 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
13682 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
13683 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
13684 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
13685 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
13686
13687 /* Generates the equivalent of feholdexcept (&fenv_var)
13688
13689 fenv_var = __builtin_s390_efpc ();
13690 __builtin_s390_sfpc (fenv_var & mask) */
13691 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
13692 tree new_fpc =
13693 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13694 build_int_cst (unsigned_type_node,
13695 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
13696 FPC_EXCEPTION_MASK)));
13697 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
13698 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
13699
13700 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
13701
13702 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
13703 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
13704 build_int_cst (unsigned_type_node,
13705 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
13706 *clear = build_call_expr (sfpc, 1, new_fpc);
13707
13708 /* Generates the equivalent of feupdateenv (fenv_var)
13709
13710 old_fpc = __builtin_s390_efpc ();
13711 __builtin_s390_sfpc (fenv_var);
13712 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
13713
13714 old_fpc = create_tmp_var (unsigned_type_node);
13715 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
13716 old_fpc, call_efpc);
13717
13718 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
13719
13720 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
13721 build_int_cst (unsigned_type_node,
13722 FPC_FLAGS_MASK));
13723 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
13724 build_int_cst (unsigned_type_node,
13725 FPC_FLAGS_SHIFT));
13726 tree atomic_feraiseexcept
13727 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13728 raise_old_except = build_call_expr (atomic_feraiseexcept,
13729 1, raise_old_except);
13730
13731 *update = build2 (COMPOUND_EXPR, void_type_node,
13732 build2 (COMPOUND_EXPR, void_type_node,
13733 store_old_fpc, set_new_fpc),
13734 raise_old_except);
13735
13736 #undef FPC_EXCEPTION_MASK
13737 #undef FPC_FLAGS_MASK
13738 #undef FPC_DXC_MASK
13739 #undef FPC_EXCEPTION_MASK_SHIFT
13740 #undef FPC_FLAGS_SHIFT
13741 #undef FPC_DXC_SHIFT
13742 }
13743
13744 /* Return the vector mode to be used for inner mode MODE when doing
13745 vectorization. */
13746 static machine_mode
13747 s390_preferred_simd_mode (machine_mode mode)
13748 {
13749 if (TARGET_VX)
13750 switch (mode)
13751 {
13752 case DFmode:
13753 return V2DFmode;
13754 case DImode:
13755 return V2DImode;
13756 case SImode:
13757 return V4SImode;
13758 case HImode:
13759 return V8HImode;
13760 case QImode:
13761 return V16QImode;
13762 default:;
13763 }
13764 return word_mode;
13765 }
13766
13767 /* Our hardware does not require vectors to be strictly aligned. */
13768 static bool
13769 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
13770 const_tree type ATTRIBUTE_UNUSED,
13771 int misalignment ATTRIBUTE_UNUSED,
13772 bool is_packed ATTRIBUTE_UNUSED)
13773 {
13774 if (TARGET_VX)
13775 return true;
13776
13777 return default_builtin_support_vector_misalignment (mode, type, misalignment,
13778 is_packed);
13779 }
13780
13781 /* The vector ABI requires vector types to be aligned on an 8 byte
13782 boundary (our stack alignment). However, we allow this to be
13783 overriden by the user, while this definitely breaks the ABI. */
13784 static HOST_WIDE_INT
13785 s390_vector_alignment (const_tree type)
13786 {
13787 if (!TARGET_VX_ABI)
13788 return default_vector_alignment (type);
13789
13790 if (TYPE_USER_ALIGN (type))
13791 return TYPE_ALIGN (type);
13792
13793 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
13794 }
13795
13796 /* Implement TARGET_ASM_FILE_END. */
13797 static void
13798 s390_asm_file_end (void)
13799 {
13800 #ifdef HAVE_AS_GNU_ATTRIBUTE
13801 varpool_node *vnode;
13802 cgraph_node *cnode;
13803
13804 FOR_EACH_VARIABLE (vnode)
13805 if (TREE_PUBLIC (vnode->decl))
13806 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
13807
13808 FOR_EACH_FUNCTION (cnode)
13809 if (TREE_PUBLIC (cnode->decl))
13810 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
13811
13812
13813 if (s390_vector_abi != 0)
13814 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
13815 s390_vector_abi);
13816 #endif
13817 file_end_indicate_exec_stack ();
13818 }
13819
13820 /* Return true if TYPE is a vector bool type. */
13821 static inline bool
13822 s390_vector_bool_type_p (const_tree type)
13823 {
13824 return TYPE_VECTOR_OPAQUE (type);
13825 }
13826
13827 /* Return the diagnostic message string if the binary operation OP is
13828 not permitted on TYPE1 and TYPE2, NULL otherwise. */
13829 static const char*
13830 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
13831 {
13832 bool bool1_p, bool2_p;
13833 bool plusminus_p;
13834 bool muldiv_p;
13835 bool compare_p;
13836 machine_mode mode1, mode2;
13837
13838 if (!TARGET_ZVECTOR)
13839 return NULL;
13840
13841 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
13842 return NULL;
13843
13844 bool1_p = s390_vector_bool_type_p (type1);
13845 bool2_p = s390_vector_bool_type_p (type2);
13846
13847 /* Mixing signed and unsigned types is forbidden for all
13848 operators. */
13849 if (!bool1_p && !bool2_p
13850 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
13851 return N_("types differ in signess");
13852
13853 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
13854 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
13855 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
13856 || op == ROUND_DIV_EXPR);
13857 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
13858 || op == EQ_EXPR || op == NE_EXPR);
13859
13860 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
13861 return N_("binary operator does not support two vector bool operands");
13862
13863 if (bool1_p != bool2_p && (muldiv_p || compare_p))
13864 return N_("binary operator does not support vector bool operand");
13865
13866 mode1 = TYPE_MODE (type1);
13867 mode2 = TYPE_MODE (type2);
13868
13869 if (bool1_p != bool2_p && plusminus_p
13870 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
13871 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
13872 return N_("binary operator does not support mixing vector "
13873 "bool with floating point vector operands");
13874
13875 return NULL;
13876 }
13877
13878 /* Initialize GCC target structure. */
13879
13880 #undef TARGET_ASM_ALIGNED_HI_OP
13881 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
13882 #undef TARGET_ASM_ALIGNED_DI_OP
13883 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
13884 #undef TARGET_ASM_INTEGER
13885 #define TARGET_ASM_INTEGER s390_assemble_integer
13886
13887 #undef TARGET_ASM_OPEN_PAREN
13888 #define TARGET_ASM_OPEN_PAREN ""
13889
13890 #undef TARGET_ASM_CLOSE_PAREN
13891 #define TARGET_ASM_CLOSE_PAREN ""
13892
13893 #undef TARGET_OPTION_OVERRIDE
13894 #define TARGET_OPTION_OVERRIDE s390_option_override
13895
13896 #undef TARGET_ENCODE_SECTION_INFO
13897 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
13898
13899 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13900 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
13901
13902 #ifdef HAVE_AS_TLS
13903 #undef TARGET_HAVE_TLS
13904 #define TARGET_HAVE_TLS true
13905 #endif
13906 #undef TARGET_CANNOT_FORCE_CONST_MEM
13907 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
13908
13909 #undef TARGET_DELEGITIMIZE_ADDRESS
13910 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
13911
13912 #undef TARGET_LEGITIMIZE_ADDRESS
13913 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
13914
13915 #undef TARGET_RETURN_IN_MEMORY
13916 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
13917
13918 #undef TARGET_INIT_BUILTINS
13919 #define TARGET_INIT_BUILTINS s390_init_builtins
13920 #undef TARGET_EXPAND_BUILTIN
13921 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
13922 #undef TARGET_BUILTIN_DECL
13923 #define TARGET_BUILTIN_DECL s390_builtin_decl
13924
13925 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
13926 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
13927
13928 #undef TARGET_ASM_OUTPUT_MI_THUNK
13929 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
13930 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
13931 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
13932
13933 #undef TARGET_SCHED_ADJUST_PRIORITY
13934 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
13935 #undef TARGET_SCHED_ISSUE_RATE
13936 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
13937 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
13938 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
13939
13940 #undef TARGET_SCHED_VARIABLE_ISSUE
13941 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
13942 #undef TARGET_SCHED_REORDER
13943 #define TARGET_SCHED_REORDER s390_sched_reorder
13944 #undef TARGET_SCHED_INIT
13945 #define TARGET_SCHED_INIT s390_sched_init
13946
13947 #undef TARGET_CANNOT_COPY_INSN_P
13948 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
13949 #undef TARGET_RTX_COSTS
13950 #define TARGET_RTX_COSTS s390_rtx_costs
13951 #undef TARGET_ADDRESS_COST
13952 #define TARGET_ADDRESS_COST s390_address_cost
13953 #undef TARGET_REGISTER_MOVE_COST
13954 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
13955 #undef TARGET_MEMORY_MOVE_COST
13956 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
13957
13958 #undef TARGET_MACHINE_DEPENDENT_REORG
13959 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
13960
13961 #undef TARGET_VALID_POINTER_MODE
13962 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
13963
13964 #undef TARGET_BUILD_BUILTIN_VA_LIST
13965 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
13966 #undef TARGET_EXPAND_BUILTIN_VA_START
13967 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
13968 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
13969 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
13970
13971 #undef TARGET_PROMOTE_FUNCTION_MODE
13972 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
13973 #undef TARGET_PASS_BY_REFERENCE
13974 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
13975
13976 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
13977 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
13978 #undef TARGET_FUNCTION_ARG
13979 #define TARGET_FUNCTION_ARG s390_function_arg
13980 #undef TARGET_FUNCTION_ARG_ADVANCE
13981 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
13982 #undef TARGET_FUNCTION_VALUE
13983 #define TARGET_FUNCTION_VALUE s390_function_value
13984 #undef TARGET_LIBCALL_VALUE
13985 #define TARGET_LIBCALL_VALUE s390_libcall_value
13986 #undef TARGET_STRICT_ARGUMENT_NAMING
13987 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
13988
13989 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
13990 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
13991
13992 #undef TARGET_FIXED_CONDITION_CODE_REGS
13993 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
13994
13995 #undef TARGET_CC_MODES_COMPATIBLE
13996 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
13997
13998 #undef TARGET_INVALID_WITHIN_DOLOOP
13999 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
14000
14001 #ifdef HAVE_AS_TLS
14002 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
14003 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
14004 #endif
14005
14006 #undef TARGET_DWARF_FRAME_REG_MODE
14007 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
14008
14009 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
14010 #undef TARGET_MANGLE_TYPE
14011 #define TARGET_MANGLE_TYPE s390_mangle_type
14012 #endif
14013
14014 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14015 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14016
14017 #undef TARGET_VECTOR_MODE_SUPPORTED_P
14018 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
14019
14020 #undef TARGET_PREFERRED_RELOAD_CLASS
14021 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
14022
14023 #undef TARGET_SECONDARY_RELOAD
14024 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
14025
14026 #undef TARGET_LIBGCC_CMP_RETURN_MODE
14027 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
14028
14029 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
14030 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
14031
14032 #undef TARGET_LEGITIMATE_ADDRESS_P
14033 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
14034
14035 #undef TARGET_LEGITIMATE_CONSTANT_P
14036 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
14037
14038 #undef TARGET_LRA_P
14039 #define TARGET_LRA_P s390_lra_p
14040
14041 #undef TARGET_CAN_ELIMINATE
14042 #define TARGET_CAN_ELIMINATE s390_can_eliminate
14043
14044 #undef TARGET_CONDITIONAL_REGISTER_USAGE
14045 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
14046
14047 #undef TARGET_LOOP_UNROLL_ADJUST
14048 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
14049
14050 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
14051 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
14052 #undef TARGET_TRAMPOLINE_INIT
14053 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
14054
14055 #undef TARGET_UNWIND_WORD_MODE
14056 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
14057
14058 #undef TARGET_CANONICALIZE_COMPARISON
14059 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
14060
14061 #undef TARGET_HARD_REGNO_SCRATCH_OK
14062 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
14063
14064 #undef TARGET_ATTRIBUTE_TABLE
14065 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
14066
14067 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14068 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14069
14070 #undef TARGET_SET_UP_BY_PROLOGUE
14071 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
14072
14073 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
14074 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
14075 s390_use_by_pieces_infrastructure_p
14076
14077 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
14078 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
14079
14080 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
14081 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
14082
14083 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
14084 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
14085
14086 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
14087 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
14088
14089 #undef TARGET_VECTOR_ALIGNMENT
14090 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
14091
14092 #undef TARGET_INVALID_BINARY_OP
14093 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
14094
14095 #undef TARGET_ASM_FILE_END
14096 #define TARGET_ASM_FILE_END s390_asm_file_end
14097
14098 struct gcc_target targetm = TARGET_INITIALIZER;
14099
14100 #include "gt-s390.h"
This page took 0.749919 seconds and 6 git commands to generate.