]> gcc.gnu.org Git - gcc.git/blob - gcc/config/s390/s390.c
S/390 Vector ABI GNU Attribute.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "alias.h"
29 #include "symtab.h"
30 #include "tree.h"
31 #include "fold-const.h"
32 #include "print-tree.h"
33 #include "stringpool.h"
34 #include "stor-layout.h"
35 #include "varasm.h"
36 #include "calls.h"
37 #include "tm_p.h"
38 #include "regs.h"
39 #include "hard-reg-set.h"
40 #include "insn-config.h"
41 #include "conditions.h"
42 #include "output.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "except.h"
46 #include "function.h"
47 #include "recog.h"
48 #include "expmed.h"
49 #include "dojump.h"
50 #include "explow.h"
51 #include "emit-rtl.h"
52 #include "stmt.h"
53 #include "expr.h"
54 #include "reload.h"
55 #include "diagnostic-core.h"
56 #include "predict.h"
57 #include "dominance.h"
58 #include "cfg.h"
59 #include "cfgrtl.h"
60 #include "cfganal.h"
61 #include "lcm.h"
62 #include "cfgbuild.h"
63 #include "cfgcleanup.h"
64 #include "basic-block.h"
65 #include "target.h"
66 #include "target-def.h"
67 #include "debug.h"
68 #include "langhooks.h"
69 #include "insn-codes.h"
70 #include "optabs.h"
71 #include "tree-ssa-alias.h"
72 #include "internal-fn.h"
73 #include "gimple-fold.h"
74 #include "tree-eh.h"
75 #include "gimple-expr.h"
76 #include "gimple.h"
77 #include "gimplify.h"
78 #include "df.h"
79 #include "params.h"
80 #include "cfgloop.h"
81 #include "opts.h"
82 #include "tree-pass.h"
83 #include "context.h"
84 #include "builtins.h"
85 #include "rtl-iter.h"
86 #include "intl.h"
87 #include "plugin-api.h"
88 #include "ipa-ref.h"
89 #include "cgraph.h"
90
91 /* Define the specific costs for a given cpu. */
92
93 struct processor_costs
94 {
95 /* multiplication */
96 const int m; /* cost of an M instruction. */
97 const int mghi; /* cost of an MGHI instruction. */
98 const int mh; /* cost of an MH instruction. */
99 const int mhi; /* cost of an MHI instruction. */
100 const int ml; /* cost of an ML instruction. */
101 const int mr; /* cost of an MR instruction. */
102 const int ms; /* cost of an MS instruction. */
103 const int msg; /* cost of an MSG instruction. */
104 const int msgf; /* cost of an MSGF instruction. */
105 const int msgfr; /* cost of an MSGFR instruction. */
106 const int msgr; /* cost of an MSGR instruction. */
107 const int msr; /* cost of an MSR instruction. */
108 const int mult_df; /* cost of multiplication in DFmode. */
109 const int mxbr;
110 /* square root */
111 const int sqxbr; /* cost of square root in TFmode. */
112 const int sqdbr; /* cost of square root in DFmode. */
113 const int sqebr; /* cost of square root in SFmode. */
114 /* multiply and add */
115 const int madbr; /* cost of multiply and add in DFmode. */
116 const int maebr; /* cost of multiply and add in SFmode. */
117 /* division */
118 const int dxbr;
119 const int ddbr;
120 const int debr;
121 const int dlgr;
122 const int dlr;
123 const int dr;
124 const int dsgfr;
125 const int dsgr;
126 };
127
128 const struct processor_costs *s390_cost;
129
130 static const
131 struct processor_costs z900_cost =
132 {
133 COSTS_N_INSNS (5), /* M */
134 COSTS_N_INSNS (10), /* MGHI */
135 COSTS_N_INSNS (5), /* MH */
136 COSTS_N_INSNS (4), /* MHI */
137 COSTS_N_INSNS (5), /* ML */
138 COSTS_N_INSNS (5), /* MR */
139 COSTS_N_INSNS (4), /* MS */
140 COSTS_N_INSNS (15), /* MSG */
141 COSTS_N_INSNS (7), /* MSGF */
142 COSTS_N_INSNS (7), /* MSGFR */
143 COSTS_N_INSNS (10), /* MSGR */
144 COSTS_N_INSNS (4), /* MSR */
145 COSTS_N_INSNS (7), /* multiplication in DFmode */
146 COSTS_N_INSNS (13), /* MXBR */
147 COSTS_N_INSNS (136), /* SQXBR */
148 COSTS_N_INSNS (44), /* SQDBR */
149 COSTS_N_INSNS (35), /* SQEBR */
150 COSTS_N_INSNS (18), /* MADBR */
151 COSTS_N_INSNS (13), /* MAEBR */
152 COSTS_N_INSNS (134), /* DXBR */
153 COSTS_N_INSNS (30), /* DDBR */
154 COSTS_N_INSNS (27), /* DEBR */
155 COSTS_N_INSNS (220), /* DLGR */
156 COSTS_N_INSNS (34), /* DLR */
157 COSTS_N_INSNS (34), /* DR */
158 COSTS_N_INSNS (32), /* DSGFR */
159 COSTS_N_INSNS (32), /* DSGR */
160 };
161
162 static const
163 struct processor_costs z990_cost =
164 {
165 COSTS_N_INSNS (4), /* M */
166 COSTS_N_INSNS (2), /* MGHI */
167 COSTS_N_INSNS (2), /* MH */
168 COSTS_N_INSNS (2), /* MHI */
169 COSTS_N_INSNS (4), /* ML */
170 COSTS_N_INSNS (4), /* MR */
171 COSTS_N_INSNS (5), /* MS */
172 COSTS_N_INSNS (6), /* MSG */
173 COSTS_N_INSNS (4), /* MSGF */
174 COSTS_N_INSNS (4), /* MSGFR */
175 COSTS_N_INSNS (4), /* MSGR */
176 COSTS_N_INSNS (4), /* MSR */
177 COSTS_N_INSNS (1), /* multiplication in DFmode */
178 COSTS_N_INSNS (28), /* MXBR */
179 COSTS_N_INSNS (130), /* SQXBR */
180 COSTS_N_INSNS (66), /* SQDBR */
181 COSTS_N_INSNS (38), /* SQEBR */
182 COSTS_N_INSNS (1), /* MADBR */
183 COSTS_N_INSNS (1), /* MAEBR */
184 COSTS_N_INSNS (60), /* DXBR */
185 COSTS_N_INSNS (40), /* DDBR */
186 COSTS_N_INSNS (26), /* DEBR */
187 COSTS_N_INSNS (176), /* DLGR */
188 COSTS_N_INSNS (31), /* DLR */
189 COSTS_N_INSNS (31), /* DR */
190 COSTS_N_INSNS (31), /* DSGFR */
191 COSTS_N_INSNS (31), /* DSGR */
192 };
193
194 static const
195 struct processor_costs z9_109_cost =
196 {
197 COSTS_N_INSNS (4), /* M */
198 COSTS_N_INSNS (2), /* MGHI */
199 COSTS_N_INSNS (2), /* MH */
200 COSTS_N_INSNS (2), /* MHI */
201 COSTS_N_INSNS (4), /* ML */
202 COSTS_N_INSNS (4), /* MR */
203 COSTS_N_INSNS (5), /* MS */
204 COSTS_N_INSNS (6), /* MSG */
205 COSTS_N_INSNS (4), /* MSGF */
206 COSTS_N_INSNS (4), /* MSGFR */
207 COSTS_N_INSNS (4), /* MSGR */
208 COSTS_N_INSNS (4), /* MSR */
209 COSTS_N_INSNS (1), /* multiplication in DFmode */
210 COSTS_N_INSNS (28), /* MXBR */
211 COSTS_N_INSNS (130), /* SQXBR */
212 COSTS_N_INSNS (66), /* SQDBR */
213 COSTS_N_INSNS (38), /* SQEBR */
214 COSTS_N_INSNS (1), /* MADBR */
215 COSTS_N_INSNS (1), /* MAEBR */
216 COSTS_N_INSNS (60), /* DXBR */
217 COSTS_N_INSNS (40), /* DDBR */
218 COSTS_N_INSNS (26), /* DEBR */
219 COSTS_N_INSNS (30), /* DLGR */
220 COSTS_N_INSNS (23), /* DLR */
221 COSTS_N_INSNS (23), /* DR */
222 COSTS_N_INSNS (24), /* DSGFR */
223 COSTS_N_INSNS (24), /* DSGR */
224 };
225
226 static const
227 struct processor_costs z10_cost =
228 {
229 COSTS_N_INSNS (10), /* M */
230 COSTS_N_INSNS (10), /* MGHI */
231 COSTS_N_INSNS (10), /* MH */
232 COSTS_N_INSNS (10), /* MHI */
233 COSTS_N_INSNS (10), /* ML */
234 COSTS_N_INSNS (10), /* MR */
235 COSTS_N_INSNS (10), /* MS */
236 COSTS_N_INSNS (10), /* MSG */
237 COSTS_N_INSNS (10), /* MSGF */
238 COSTS_N_INSNS (10), /* MSGFR */
239 COSTS_N_INSNS (10), /* MSGR */
240 COSTS_N_INSNS (10), /* MSR */
241 COSTS_N_INSNS (1) , /* multiplication in DFmode */
242 COSTS_N_INSNS (50), /* MXBR */
243 COSTS_N_INSNS (120), /* SQXBR */
244 COSTS_N_INSNS (52), /* SQDBR */
245 COSTS_N_INSNS (38), /* SQEBR */
246 COSTS_N_INSNS (1), /* MADBR */
247 COSTS_N_INSNS (1), /* MAEBR */
248 COSTS_N_INSNS (111), /* DXBR */
249 COSTS_N_INSNS (39), /* DDBR */
250 COSTS_N_INSNS (32), /* DEBR */
251 COSTS_N_INSNS (160), /* DLGR */
252 COSTS_N_INSNS (71), /* DLR */
253 COSTS_N_INSNS (71), /* DR */
254 COSTS_N_INSNS (71), /* DSGFR */
255 COSTS_N_INSNS (71), /* DSGR */
256 };
257
258 static const
259 struct processor_costs z196_cost =
260 {
261 COSTS_N_INSNS (7), /* M */
262 COSTS_N_INSNS (5), /* MGHI */
263 COSTS_N_INSNS (5), /* MH */
264 COSTS_N_INSNS (5), /* MHI */
265 COSTS_N_INSNS (7), /* ML */
266 COSTS_N_INSNS (7), /* MR */
267 COSTS_N_INSNS (6), /* MS */
268 COSTS_N_INSNS (8), /* MSG */
269 COSTS_N_INSNS (6), /* MSGF */
270 COSTS_N_INSNS (6), /* MSGFR */
271 COSTS_N_INSNS (8), /* MSGR */
272 COSTS_N_INSNS (6), /* MSR */
273 COSTS_N_INSNS (1) , /* multiplication in DFmode */
274 COSTS_N_INSNS (40), /* MXBR B+40 */
275 COSTS_N_INSNS (100), /* SQXBR B+100 */
276 COSTS_N_INSNS (42), /* SQDBR B+42 */
277 COSTS_N_INSNS (28), /* SQEBR B+28 */
278 COSTS_N_INSNS (1), /* MADBR B */
279 COSTS_N_INSNS (1), /* MAEBR B */
280 COSTS_N_INSNS (101), /* DXBR B+101 */
281 COSTS_N_INSNS (29), /* DDBR */
282 COSTS_N_INSNS (22), /* DEBR */
283 COSTS_N_INSNS (160), /* DLGR cracked */
284 COSTS_N_INSNS (160), /* DLR cracked */
285 COSTS_N_INSNS (160), /* DR expanded */
286 COSTS_N_INSNS (160), /* DSGFR cracked */
287 COSTS_N_INSNS (160), /* DSGR cracked */
288 };
289
290 static const
291 struct processor_costs zEC12_cost =
292 {
293 COSTS_N_INSNS (7), /* M */
294 COSTS_N_INSNS (5), /* MGHI */
295 COSTS_N_INSNS (5), /* MH */
296 COSTS_N_INSNS (5), /* MHI */
297 COSTS_N_INSNS (7), /* ML */
298 COSTS_N_INSNS (7), /* MR */
299 COSTS_N_INSNS (6), /* MS */
300 COSTS_N_INSNS (8), /* MSG */
301 COSTS_N_INSNS (6), /* MSGF */
302 COSTS_N_INSNS (6), /* MSGFR */
303 COSTS_N_INSNS (8), /* MSGR */
304 COSTS_N_INSNS (6), /* MSR */
305 COSTS_N_INSNS (1) , /* multiplication in DFmode */
306 COSTS_N_INSNS (40), /* MXBR B+40 */
307 COSTS_N_INSNS (100), /* SQXBR B+100 */
308 COSTS_N_INSNS (42), /* SQDBR B+42 */
309 COSTS_N_INSNS (28), /* SQEBR B+28 */
310 COSTS_N_INSNS (1), /* MADBR B */
311 COSTS_N_INSNS (1), /* MAEBR B */
312 COSTS_N_INSNS (131), /* DXBR B+131 */
313 COSTS_N_INSNS (29), /* DDBR */
314 COSTS_N_INSNS (22), /* DEBR */
315 COSTS_N_INSNS (160), /* DLGR cracked */
316 COSTS_N_INSNS (160), /* DLR cracked */
317 COSTS_N_INSNS (160), /* DR expanded */
318 COSTS_N_INSNS (160), /* DSGFR cracked */
319 COSTS_N_INSNS (160), /* DSGR cracked */
320 };
321
322 extern int reload_completed;
323
324 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
325 static rtx_insn *last_scheduled_insn;
326
327 /* Structure used to hold the components of a S/390 memory
328 address. A legitimate address on S/390 is of the general
329 form
330 base + index + displacement
331 where any of the components is optional.
332
333 base and index are registers of the class ADDR_REGS,
334 displacement is an unsigned 12-bit immediate constant. */
335
336 struct s390_address
337 {
338 rtx base;
339 rtx indx;
340 rtx disp;
341 bool pointer;
342 bool literal_pool;
343 };
344
345 /* The following structure is embedded in the machine
346 specific part of struct function. */
347
348 struct GTY (()) s390_frame_layout
349 {
350 /* Offset within stack frame. */
351 HOST_WIDE_INT gprs_offset;
352 HOST_WIDE_INT f0_offset;
353 HOST_WIDE_INT f4_offset;
354 HOST_WIDE_INT f8_offset;
355 HOST_WIDE_INT backchain_offset;
356
357 /* Number of first and last gpr where slots in the register
358 save area are reserved for. */
359 int first_save_gpr_slot;
360 int last_save_gpr_slot;
361
362 /* Location (FP register number) where GPRs (r0-r15) should
363 be saved to.
364 0 - does not need to be saved at all
365 -1 - stack slot */
366 signed char gpr_save_slots[16];
367
368 /* Number of first and last gpr to be saved, restored. */
369 int first_save_gpr;
370 int first_restore_gpr;
371 int last_save_gpr;
372 int last_restore_gpr;
373
374 /* Bits standing for floating point registers. Set, if the
375 respective register has to be saved. Starting with reg 16 (f0)
376 at the rightmost bit.
377 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
378 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
379 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
380 unsigned int fpr_bitmap;
381
382 /* Number of floating point registers f8-f15 which must be saved. */
383 int high_fprs;
384
385 /* Set if return address needs to be saved.
386 This flag is set by s390_return_addr_rtx if it could not use
387 the initial value of r14 and therefore depends on r14 saved
388 to the stack. */
389 bool save_return_addr_p;
390
391 /* Size of stack frame. */
392 HOST_WIDE_INT frame_size;
393 };
394
395 /* Define the structure for the machine field in struct function. */
396
397 struct GTY(()) machine_function
398 {
399 struct s390_frame_layout frame_layout;
400
401 /* Literal pool base register. */
402 rtx base_reg;
403
404 /* True if we may need to perform branch splitting. */
405 bool split_branches_pending_p;
406
407 bool has_landing_pad_p;
408
409 /* True if the current function may contain a tbegin clobbering
410 FPRs. */
411 bool tbegin_p;
412 };
413
414 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
415
416 #define cfun_frame_layout (cfun->machine->frame_layout)
417 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
418 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
419 ? cfun_frame_layout.fpr_bitmap & 0x0f \
420 : cfun_frame_layout.fpr_bitmap & 0x03))
421 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
422 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
423 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
424 (1 << (REGNO - FPR0_REGNUM)))
425 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
426 (1 << (REGNO - FPR0_REGNUM))))
427 #define cfun_gpr_save_slot(REGNO) \
428 cfun->machine->frame_layout.gpr_save_slots[REGNO]
429
430 /* Number of GPRs and FPRs used for argument passing. */
431 #define GP_ARG_NUM_REG 5
432 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
433 #define VEC_ARG_NUM_REG 8
434
435 /* A couple of shortcuts. */
436 #define CONST_OK_FOR_J(x) \
437 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
438 #define CONST_OK_FOR_K(x) \
439 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
440 #define CONST_OK_FOR_Os(x) \
441 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
442 #define CONST_OK_FOR_Op(x) \
443 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
444 #define CONST_OK_FOR_On(x) \
445 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
446
447 #define REGNO_PAIR_OK(REGNO, MODE) \
448 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
449
450 /* That's the read ahead of the dynamic branch prediction unit in
451 bytes on a z10 (or higher) CPU. */
452 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
453
454
455 /* Indicate which ABI has been used for passing vector args.
456 0 - no vector type arguments have been passed where the ABI is relevant
457 1 - the old ABI has been used
458 2 - a vector type argument has been passed either in a vector register
459 or on the stack by value */
460 static int s390_vector_abi = 0;
461
462 /* Set the vector ABI marker if TYPE is subject to the vector ABI
463 switch. The vector ABI affects only vector data types. There are
464 two aspects of the vector ABI relevant here:
465
466 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
467 ABI and natural alignment with the old.
468
469 2. vector <= 16 bytes are passed in VRs or by value on the stack
470 with the new ABI but by reference on the stack with the old.
471
472 If ARG_P is true TYPE is used for a function argument or return
473 value. The ABI marker then is set for all vector data types. If
474 ARG_P is false only type 1 vectors are being checked. */
475
476 static void
477 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
478 {
479 static hash_set<const_tree> visited_types_hash;
480
481 if (s390_vector_abi)
482 return;
483
484 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
485 return;
486
487 if (visited_types_hash.contains (type))
488 return;
489
490 visited_types_hash.add (type);
491
492 if (VECTOR_TYPE_P (type))
493 {
494 int type_size = int_size_in_bytes (type);
495
496 /* Outside arguments only the alignment is changing and this
497 only happens for vector types >= 16 bytes. */
498 if (!arg_p && type_size < 16)
499 return;
500
501 /* In arguments vector types > 16 are passed as before (GCC
502 never enforced the bigger alignment for arguments which was
503 required by the old vector ABI). However, it might still be
504 ABI relevant due to the changed alignment if it is a struct
505 member. */
506 if (arg_p && type_size > 16 && !in_struct_p)
507 return;
508
509 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
510 }
511 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
512 {
513 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
514 natural alignment there will never be ABI dependent padding
515 in an array type. That's why we do not set in_struct_p to
516 true here. */
517 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
518 }
519 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
520 {
521 tree arg_chain;
522
523 /* Check the return type. */
524 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
525
526 for (arg_chain = TYPE_ARG_TYPES (type);
527 arg_chain;
528 arg_chain = TREE_CHAIN (arg_chain))
529 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
530 }
531 else if (RECORD_OR_UNION_TYPE_P (type))
532 {
533 tree field;
534
535 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
536 {
537 if (TREE_CODE (field) != FIELD_DECL)
538 continue;
539
540 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
541 }
542 }
543 }
544
545
546 /* System z builtins. */
547
548 #include "s390-builtins.h"
549
550 const unsigned int flags_builtin[S390_BUILTIN_MAX + 1] =
551 {
552 #undef B_DEF
553 #undef OB_DEF
554 #undef OB_DEF_VAR
555 #define B_DEF(NAME, PATTERN, ATTRS, FLAGS, FNTYPE) FLAGS,
556 #define OB_DEF(...)
557 #define OB_DEF_VAR(...)
558 #include "s390-builtins.def"
559 0
560 };
561
562 const unsigned int flags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
563 {
564 #undef B_DEF
565 #undef OB_DEF
566 #undef OB_DEF_VAR
567 #define B_DEF(...)
568 #define OB_DEF(...)
569 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
570 #include "s390-builtins.def"
571 0
572 };
573
574 tree s390_builtin_types[BT_MAX];
575 tree s390_builtin_fn_types[BT_FN_MAX];
576 tree s390_builtin_decls[S390_BUILTIN_MAX +
577 S390_OVERLOADED_BUILTIN_MAX +
578 S390_OVERLOADED_BUILTIN_VAR_MAX];
579
580 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
581 #undef B_DEF
582 #undef OB_DEF
583 #undef OB_DEF_VAR
584 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
585 #define OB_DEF(...)
586 #define OB_DEF_VAR(...)
587
588 #include "s390-builtins.def"
589 CODE_FOR_nothing
590 };
591
592 static void
593 s390_init_builtins (void)
594 {
595 /* These definitions are being used in s390-builtins.def. */
596 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
597 NULL, NULL);
598 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
599 tree c_uint64_type_node;
600
601 /* The uint64_type_node from tree.c is not compatible to the C99
602 uint64_t data type. What we want is c_uint64_type_node from
603 c-common.c. But since backend code is not supposed to interface
604 with the frontend we recreate it here. */
605 if (TARGET_64BIT)
606 c_uint64_type_node = long_unsigned_type_node;
607 else
608 c_uint64_type_node = long_long_unsigned_type_node;
609
610 #undef DEF_TYPE
611 #define DEF_TYPE(INDEX, NODE, CONST_P) \
612 s390_builtin_types[INDEX] = (!CONST_P) ? \
613 (NODE) : build_type_variant ((NODE), 1, 0);
614
615 #undef DEF_POINTER_TYPE
616 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
617 s390_builtin_types[INDEX] = \
618 build_pointer_type (s390_builtin_types[INDEX_BASE]);
619
620 #undef DEF_DISTINCT_TYPE
621 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
622 s390_builtin_types[INDEX] = \
623 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
624
625 #undef DEF_VECTOR_TYPE
626 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
627 s390_builtin_types[INDEX] = \
628 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
629
630 #undef DEF_OPAQUE_VECTOR_TYPE
631 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
632 s390_builtin_types[INDEX] = \
633 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
634
635 #undef DEF_FN_TYPE
636 #define DEF_FN_TYPE(INDEX, args...) \
637 s390_builtin_fn_types[INDEX] = \
638 build_function_type_list (args, NULL_TREE);
639 #undef DEF_OV_TYPE
640 #define DEF_OV_TYPE(...)
641 #include "s390-builtin-types.def"
642
643 #undef B_DEF
644 #define B_DEF(NAME, PATTERN, ATTRS, FLAGS, FNTYPE) \
645 s390_builtin_decls[S390_BUILTIN_##NAME] = \
646 add_builtin_function ("__builtin_" #NAME, \
647 s390_builtin_fn_types[FNTYPE], \
648 S390_BUILTIN_##NAME, \
649 BUILT_IN_MD, \
650 NULL, \
651 ATTRS);
652 #undef OB_DEF
653 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, FNTYPE) \
654 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
655 add_builtin_function ("__builtin_" #NAME, \
656 s390_builtin_fn_types[FNTYPE], \
657 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
658 BUILT_IN_MD, \
659 NULL, \
660 0);
661 #undef OB_DEF_VAR
662 #define OB_DEF_VAR(...)
663 #include "s390-builtins.def"
664
665 }
666
667 /* Return true if ARG is appropriate as argument number ARGNUM of
668 builtin DECL. The operand flags from s390-builtins.def have to
669 passed as OP_FLAGS. */
670 bool
671 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
672 {
673 if (O_UIMM_P (op_flags))
674 {
675 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
676 int bitwidth = bitwidths[op_flags - O_U1];
677
678 if (!tree_fits_uhwi_p (arg)
679 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
680 {
681 error("constant argument %d for builtin %qF is out of range (0.."
682 HOST_WIDE_INT_PRINT_UNSIGNED ")",
683 argnum, decl,
684 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
685 return false;
686 }
687 }
688
689 if (O_SIMM_P (op_flags))
690 {
691 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
692 int bitwidth = bitwidths[op_flags - O_S2];
693
694 if (!tree_fits_shwi_p (arg)
695 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
696 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
697 {
698 error("constant argument %d for builtin %qF is out of range ("
699 HOST_WIDE_INT_PRINT_DEC ".."
700 HOST_WIDE_INT_PRINT_DEC ")",
701 argnum, decl,
702 -(HOST_WIDE_INT)1 << (bitwidth - 1),
703 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
704 return false;
705 }
706 }
707 return true;
708 }
709
710 /* Expand an expression EXP that calls a built-in function,
711 with result going to TARGET if that's convenient
712 (and in mode MODE if that's convenient).
713 SUBTARGET may be used as the target for computing one of EXP's operands.
714 IGNORE is nonzero if the value is to be ignored. */
715
716 static rtx
717 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
718 machine_mode mode ATTRIBUTE_UNUSED,
719 int ignore ATTRIBUTE_UNUSED)
720 {
721 #define MAX_ARGS 5
722
723 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
724 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
725 enum insn_code icode;
726 rtx op[MAX_ARGS], pat;
727 int arity;
728 bool nonvoid;
729 tree arg;
730 call_expr_arg_iterator iter;
731 unsigned int all_op_flags = flags_for_builtin (fcode);
732 machine_mode last_vec_mode = VOIDmode;
733
734 if (TARGET_DEBUG_ARG)
735 {
736 fprintf (stderr,
737 "s390_expand_builtin, code = %4d, %s\n",
738 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
739 }
740
741
742 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
743 && fcode < S390_ALL_BUILTIN_MAX)
744 {
745 gcc_unreachable ();
746 }
747 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
748 {
749 icode = code_for_builtin[fcode];
750 /* Set a flag in the machine specific cfun part in order to support
751 saving/restoring of FPRs. */
752 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
753 cfun->machine->tbegin_p = true;
754 }
755 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
756 {
757 error ("Unresolved overloaded builtin");
758 return const0_rtx;
759 }
760 else
761 internal_error ("bad builtin fcode");
762
763 if (icode == 0)
764 internal_error ("bad builtin icode");
765
766 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
767
768 if (nonvoid)
769 {
770 machine_mode tmode = insn_data[icode].operand[0].mode;
771 if (!target
772 || GET_MODE (target) != tmode
773 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
774 target = gen_reg_rtx (tmode);
775
776 /* There are builtins (e.g. vec_promote) with no vector
777 arguments but an element selector. So we have to also look
778 at the vector return type when emitting the modulo
779 operation. */
780 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
781 last_vec_mode = insn_data[icode].operand[0].mode;
782 }
783
784 arity = 0;
785 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
786 {
787 const struct insn_operand_data *insn_op;
788 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
789
790 all_op_flags = all_op_flags >> O_SHIFT;
791
792 if (arg == error_mark_node)
793 return NULL_RTX;
794 if (arity >= MAX_ARGS)
795 return NULL_RTX;
796
797 if (O_IMM_P (op_flags)
798 && TREE_CODE (arg) != INTEGER_CST)
799 {
800 error ("constant value required for builtin %qF argument %d",
801 fndecl, arity + 1);
802 return const0_rtx;
803 }
804
805 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
806 return const0_rtx;
807
808 insn_op = &insn_data[icode].operand[arity + nonvoid];
809 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
810
811 /* Wrap the expanded RTX for pointer types into a MEM expr with
812 the proper mode. This allows us to use e.g. (match_operand
813 "memory_operand"..) in the insn patterns instead of (mem
814 (match_operand "address_operand)). This is helpful for
815 patterns not just accepting MEMs. */
816 if (POINTER_TYPE_P (TREE_TYPE (arg))
817 && insn_op->predicate != address_operand)
818 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
819
820 /* Expand the module operation required on element selectors. */
821 if (op_flags == O_ELEM)
822 {
823 gcc_assert (last_vec_mode != VOIDmode);
824 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
825 op[arity],
826 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
827 NULL_RTX, 1, OPTAB_DIRECT);
828 }
829
830 /* Record the vector mode used for an element selector. This assumes:
831 1. There is no builtin with two different vector modes and an element selector
832 2. The element selector comes after the vector type it is referring to.
833 This currently the true for all the builtins but FIXME we
834 should better check for that. */
835 if (VECTOR_MODE_P (insn_op->mode))
836 last_vec_mode = insn_op->mode;
837
838 if (insn_op->predicate (op[arity], insn_op->mode))
839 {
840 arity++;
841 continue;
842 }
843
844 if (MEM_P (op[arity])
845 && insn_op->predicate == memory_operand
846 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
847 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
848 {
849 op[arity] = replace_equiv_address (op[arity],
850 copy_to_mode_reg (Pmode,
851 XEXP (op[arity], 0)));
852 }
853 else if (GET_MODE (op[arity]) == insn_op->mode
854 || GET_MODE (op[arity]) == VOIDmode
855 || (insn_op->predicate == address_operand
856 && GET_MODE (op[arity]) == Pmode))
857 {
858 /* An address_operand usually has VOIDmode in the expander
859 so we cannot use this. */
860 machine_mode target_mode =
861 (insn_op->predicate == address_operand
862 ? Pmode : insn_op->mode);
863 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
864 }
865
866 if (!insn_op->predicate (op[arity], insn_op->mode))
867 {
868 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
869 return const0_rtx;
870 }
871 arity++;
872 }
873
874 if (last_vec_mode != VOIDmode && !TARGET_VX)
875 {
876 error ("Vector type builtin %qF is not supported without -mvx "
877 "(default with -march=z13).",
878 fndecl);
879 return const0_rtx;
880 }
881
882 switch (arity)
883 {
884 case 0:
885 pat = GEN_FCN (icode) (target);
886 break;
887 case 1:
888 if (nonvoid)
889 pat = GEN_FCN (icode) (target, op[0]);
890 else
891 pat = GEN_FCN (icode) (op[0]);
892 break;
893 case 2:
894 if (nonvoid)
895 pat = GEN_FCN (icode) (target, op[0], op[1]);
896 else
897 pat = GEN_FCN (icode) (op[0], op[1]);
898 break;
899 case 3:
900 if (nonvoid)
901 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
902 else
903 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
904 break;
905 case 4:
906 if (nonvoid)
907 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
908 else
909 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
910 break;
911 case 5:
912 if (nonvoid)
913 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
914 else
915 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
916 break;
917 case 6:
918 if (nonvoid)
919 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
920 else
921 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
922 break;
923 default:
924 gcc_unreachable ();
925 }
926 if (!pat)
927 return NULL_RTX;
928 emit_insn (pat);
929
930 if (nonvoid)
931 return target;
932 else
933 return const0_rtx;
934 }
935
936
937 static const int s390_hotpatch_hw_max = 1000000;
938 static int s390_hotpatch_hw_before_label = 0;
939 static int s390_hotpatch_hw_after_label = 0;
940
941 /* Check whether the hotpatch attribute is applied to a function and, if it has
942 an argument, the argument is valid. */
943
944 static tree
945 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
946 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
947 {
948 tree expr;
949 tree expr2;
950 int err;
951
952 if (TREE_CODE (*node) != FUNCTION_DECL)
953 {
954 warning (OPT_Wattributes, "%qE attribute only applies to functions",
955 name);
956 *no_add_attrs = true;
957 }
958 if (args != NULL && TREE_CHAIN (args) != NULL)
959 {
960 expr = TREE_VALUE (args);
961 expr2 = TREE_VALUE (TREE_CHAIN (args));
962 }
963 if (args == NULL || TREE_CHAIN (args) == NULL)
964 err = 1;
965 else if (TREE_CODE (expr) != INTEGER_CST
966 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
967 || wi::gtu_p (expr, s390_hotpatch_hw_max))
968 err = 1;
969 else if (TREE_CODE (expr2) != INTEGER_CST
970 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
971 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
972 err = 1;
973 else
974 err = 0;
975 if (err)
976 {
977 error ("requested %qE attribute is not a comma separated pair of"
978 " non-negative integer constants or too large (max. %d)", name,
979 s390_hotpatch_hw_max);
980 *no_add_attrs = true;
981 }
982
983 return NULL_TREE;
984 }
985
986 /* Expand the s390_vector_bool type attribute. */
987
988 static tree
989 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
990 tree args ATTRIBUTE_UNUSED,
991 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
992 {
993 tree type = *node, result = NULL_TREE;
994 machine_mode mode;
995
996 while (POINTER_TYPE_P (type)
997 || TREE_CODE (type) == FUNCTION_TYPE
998 || TREE_CODE (type) == METHOD_TYPE
999 || TREE_CODE (type) == ARRAY_TYPE)
1000 type = TREE_TYPE (type);
1001
1002 mode = TYPE_MODE (type);
1003 switch (mode)
1004 {
1005 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1006 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1007 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1008 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1009 default: break;
1010 }
1011
1012 *no_add_attrs = true; /* No need to hang on to the attribute. */
1013
1014 if (result)
1015 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1016
1017 return NULL_TREE;
1018 }
1019
1020 static const struct attribute_spec s390_attribute_table[] = {
1021 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1022 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1023 /* End element. */
1024 { NULL, 0, 0, false, false, false, NULL, false }
1025 };
1026
1027 /* Return the alignment for LABEL. We default to the -falign-labels
1028 value except for the literal pool base label. */
1029 int
1030 s390_label_align (rtx label)
1031 {
1032 rtx_insn *prev_insn = prev_active_insn (label);
1033 rtx set, src;
1034
1035 if (prev_insn == NULL_RTX)
1036 goto old;
1037
1038 set = single_set (prev_insn);
1039
1040 if (set == NULL_RTX)
1041 goto old;
1042
1043 src = SET_SRC (set);
1044
1045 /* Don't align literal pool base labels. */
1046 if (GET_CODE (src) == UNSPEC
1047 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1048 return 0;
1049
1050 old:
1051 return align_labels_log;
1052 }
1053
1054 static machine_mode
1055 s390_libgcc_cmp_return_mode (void)
1056 {
1057 return TARGET_64BIT ? DImode : SImode;
1058 }
1059
1060 static machine_mode
1061 s390_libgcc_shift_count_mode (void)
1062 {
1063 return TARGET_64BIT ? DImode : SImode;
1064 }
1065
1066 static machine_mode
1067 s390_unwind_word_mode (void)
1068 {
1069 return TARGET_64BIT ? DImode : SImode;
1070 }
1071
1072 /* Return true if the back end supports mode MODE. */
1073 static bool
1074 s390_scalar_mode_supported_p (machine_mode mode)
1075 {
1076 /* In contrast to the default implementation reject TImode constants on 31bit
1077 TARGET_ZARCH for ABI compliance. */
1078 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1079 return false;
1080
1081 if (DECIMAL_FLOAT_MODE_P (mode))
1082 return default_decimal_float_supported_p ();
1083
1084 return default_scalar_mode_supported_p (mode);
1085 }
1086
1087 /* Return true if the back end supports vector mode MODE. */
1088 static bool
1089 s390_vector_mode_supported_p (machine_mode mode)
1090 {
1091 machine_mode inner;
1092
1093 if (!VECTOR_MODE_P (mode)
1094 || !TARGET_VX
1095 || GET_MODE_SIZE (mode) > 16)
1096 return false;
1097
1098 inner = GET_MODE_INNER (mode);
1099
1100 switch (inner)
1101 {
1102 case QImode:
1103 case HImode:
1104 case SImode:
1105 case DImode:
1106 case TImode:
1107 case SFmode:
1108 case DFmode:
1109 case TFmode:
1110 return true;
1111 default:
1112 return false;
1113 }
1114 }
1115
1116 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1117
1118 void
1119 s390_set_has_landing_pad_p (bool value)
1120 {
1121 cfun->machine->has_landing_pad_p = value;
1122 }
1123
1124 /* If two condition code modes are compatible, return a condition code
1125 mode which is compatible with both. Otherwise, return
1126 VOIDmode. */
1127
1128 static machine_mode
1129 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1130 {
1131 if (m1 == m2)
1132 return m1;
1133
1134 switch (m1)
1135 {
1136 case CCZmode:
1137 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1138 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1139 return m2;
1140 return VOIDmode;
1141
1142 case CCSmode:
1143 case CCUmode:
1144 case CCTmode:
1145 case CCSRmode:
1146 case CCURmode:
1147 case CCZ1mode:
1148 if (m2 == CCZmode)
1149 return m1;
1150
1151 return VOIDmode;
1152
1153 default:
1154 return VOIDmode;
1155 }
1156 return VOIDmode;
1157 }
1158
1159 /* Return true if SET either doesn't set the CC register, or else
1160 the source and destination have matching CC modes and that
1161 CC mode is at least as constrained as REQ_MODE. */
1162
1163 static bool
1164 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1165 {
1166 machine_mode set_mode;
1167
1168 gcc_assert (GET_CODE (set) == SET);
1169
1170 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1171 return 1;
1172
1173 set_mode = GET_MODE (SET_DEST (set));
1174 switch (set_mode)
1175 {
1176 case CCSmode:
1177 case CCSRmode:
1178 case CCUmode:
1179 case CCURmode:
1180 case CCLmode:
1181 case CCL1mode:
1182 case CCL2mode:
1183 case CCL3mode:
1184 case CCT1mode:
1185 case CCT2mode:
1186 case CCT3mode:
1187 case CCVEQmode:
1188 case CCVHmode:
1189 case CCVHUmode:
1190 case CCVFHmode:
1191 case CCVFHEmode:
1192 if (req_mode != set_mode)
1193 return 0;
1194 break;
1195
1196 case CCZmode:
1197 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1198 && req_mode != CCSRmode && req_mode != CCURmode)
1199 return 0;
1200 break;
1201
1202 case CCAPmode:
1203 case CCANmode:
1204 if (req_mode != CCAmode)
1205 return 0;
1206 break;
1207
1208 default:
1209 gcc_unreachable ();
1210 }
1211
1212 return (GET_MODE (SET_SRC (set)) == set_mode);
1213 }
1214
1215 /* Return true if every SET in INSN that sets the CC register
1216 has source and destination with matching CC modes and that
1217 CC mode is at least as constrained as REQ_MODE.
1218 If REQ_MODE is VOIDmode, always return false. */
1219
1220 bool
1221 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1222 {
1223 int i;
1224
1225 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1226 if (req_mode == VOIDmode)
1227 return false;
1228
1229 if (GET_CODE (PATTERN (insn)) == SET)
1230 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1231
1232 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1233 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1234 {
1235 rtx set = XVECEXP (PATTERN (insn), 0, i);
1236 if (GET_CODE (set) == SET)
1237 if (!s390_match_ccmode_set (set, req_mode))
1238 return false;
1239 }
1240
1241 return true;
1242 }
1243
1244 /* If a test-under-mask instruction can be used to implement
1245 (compare (and ... OP1) OP2), return the CC mode required
1246 to do that. Otherwise, return VOIDmode.
1247 MIXED is true if the instruction can distinguish between
1248 CC1 and CC2 for mixed selected bits (TMxx), it is false
1249 if the instruction cannot (TM). */
1250
1251 machine_mode
1252 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1253 {
1254 int bit0, bit1;
1255
1256 /* ??? Fixme: should work on CONST_DOUBLE as well. */
1257 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1258 return VOIDmode;
1259
1260 /* Selected bits all zero: CC0.
1261 e.g.: int a; if ((a & (16 + 128)) == 0) */
1262 if (INTVAL (op2) == 0)
1263 return CCTmode;
1264
1265 /* Selected bits all one: CC3.
1266 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1267 if (INTVAL (op2) == INTVAL (op1))
1268 return CCT3mode;
1269
1270 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1271 int a;
1272 if ((a & (16 + 128)) == 16) -> CCT1
1273 if ((a & (16 + 128)) == 128) -> CCT2 */
1274 if (mixed)
1275 {
1276 bit1 = exact_log2 (INTVAL (op2));
1277 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1278 if (bit0 != -1 && bit1 != -1)
1279 return bit0 > bit1 ? CCT1mode : CCT2mode;
1280 }
1281
1282 return VOIDmode;
1283 }
1284
1285 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1286 OP0 and OP1 of a COMPARE, return the mode to be used for the
1287 comparison. */
1288
1289 machine_mode
1290 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1291 {
1292 if (TARGET_VX
1293 && register_operand (op0, DFmode)
1294 && register_operand (op1, DFmode))
1295 {
1296 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1297 s390_emit_compare or s390_canonicalize_comparison will take
1298 care of it. */
1299 switch (code)
1300 {
1301 case EQ:
1302 case NE:
1303 return CCVEQmode;
1304 case GT:
1305 case UNLE:
1306 return CCVFHmode;
1307 case GE:
1308 case UNLT:
1309 return CCVFHEmode;
1310 default:
1311 ;
1312 }
1313 }
1314
1315 switch (code)
1316 {
1317 case EQ:
1318 case NE:
1319 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1320 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1321 return CCAPmode;
1322 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1323 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1324 return CCAPmode;
1325 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1326 || GET_CODE (op1) == NEG)
1327 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1328 return CCLmode;
1329
1330 if (GET_CODE (op0) == AND)
1331 {
1332 /* Check whether we can potentially do it via TM. */
1333 machine_mode ccmode;
1334 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1335 if (ccmode != VOIDmode)
1336 {
1337 /* Relax CCTmode to CCZmode to allow fall-back to AND
1338 if that turns out to be beneficial. */
1339 return ccmode == CCTmode ? CCZmode : ccmode;
1340 }
1341 }
1342
1343 if (register_operand (op0, HImode)
1344 && GET_CODE (op1) == CONST_INT
1345 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1346 return CCT3mode;
1347 if (register_operand (op0, QImode)
1348 && GET_CODE (op1) == CONST_INT
1349 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1350 return CCT3mode;
1351
1352 return CCZmode;
1353
1354 case LE:
1355 case LT:
1356 case GE:
1357 case GT:
1358 /* The only overflow condition of NEG and ABS happens when
1359 -INT_MAX is used as parameter, which stays negative. So
1360 we have an overflow from a positive value to a negative.
1361 Using CCAP mode the resulting cc can be used for comparisons. */
1362 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1363 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1364 return CCAPmode;
1365
1366 /* If constants are involved in an add instruction it is possible to use
1367 the resulting cc for comparisons with zero. Knowing the sign of the
1368 constant the overflow behavior gets predictable. e.g.:
1369 int a, b; if ((b = a + c) > 0)
1370 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1371 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1372 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1373 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1374 /* Avoid INT32_MIN on 32 bit. */
1375 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1376 {
1377 if (INTVAL (XEXP((op0), 1)) < 0)
1378 return CCANmode;
1379 else
1380 return CCAPmode;
1381 }
1382 /* Fall through. */
1383 case UNORDERED:
1384 case ORDERED:
1385 case UNEQ:
1386 case UNLE:
1387 case UNLT:
1388 case UNGE:
1389 case UNGT:
1390 case LTGT:
1391 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1392 && GET_CODE (op1) != CONST_INT)
1393 return CCSRmode;
1394 return CCSmode;
1395
1396 case LTU:
1397 case GEU:
1398 if (GET_CODE (op0) == PLUS
1399 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1400 return CCL1mode;
1401
1402 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1403 && GET_CODE (op1) != CONST_INT)
1404 return CCURmode;
1405 return CCUmode;
1406
1407 case LEU:
1408 case GTU:
1409 if (GET_CODE (op0) == MINUS
1410 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1411 return CCL2mode;
1412
1413 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1414 && GET_CODE (op1) != CONST_INT)
1415 return CCURmode;
1416 return CCUmode;
1417
1418 default:
1419 gcc_unreachable ();
1420 }
1421 }
1422
1423 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1424 that we can implement more efficiently. */
1425
1426 static void
1427 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1428 bool op0_preserve_value)
1429 {
1430 if (op0_preserve_value)
1431 return;
1432
1433 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1434 if ((*code == EQ || *code == NE)
1435 && *op1 == const0_rtx
1436 && GET_CODE (*op0) == ZERO_EXTRACT
1437 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1438 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1439 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1440 {
1441 rtx inner = XEXP (*op0, 0);
1442 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1443 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1444 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1445
1446 if (len > 0 && len < modesize
1447 && pos >= 0 && pos + len <= modesize
1448 && modesize <= HOST_BITS_PER_WIDE_INT)
1449 {
1450 unsigned HOST_WIDE_INT block;
1451 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1452 block <<= modesize - pos - len;
1453
1454 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1455 gen_int_mode (block, GET_MODE (inner)));
1456 }
1457 }
1458
1459 /* Narrow AND of memory against immediate to enable TM. */
1460 if ((*code == EQ || *code == NE)
1461 && *op1 == const0_rtx
1462 && GET_CODE (*op0) == AND
1463 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1464 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1465 {
1466 rtx inner = XEXP (*op0, 0);
1467 rtx mask = XEXP (*op0, 1);
1468
1469 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1470 if (GET_CODE (inner) == SUBREG
1471 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1472 && (GET_MODE_SIZE (GET_MODE (inner))
1473 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1474 && ((INTVAL (mask)
1475 & GET_MODE_MASK (GET_MODE (inner))
1476 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1477 == 0))
1478 inner = SUBREG_REG (inner);
1479
1480 /* Do not change volatile MEMs. */
1481 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1482 {
1483 int part = s390_single_part (XEXP (*op0, 1),
1484 GET_MODE (inner), QImode, 0);
1485 if (part >= 0)
1486 {
1487 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1488 inner = adjust_address_nv (inner, QImode, part);
1489 *op0 = gen_rtx_AND (QImode, inner, mask);
1490 }
1491 }
1492 }
1493
1494 /* Narrow comparisons against 0xffff to HImode if possible. */
1495 if ((*code == EQ || *code == NE)
1496 && GET_CODE (*op1) == CONST_INT
1497 && INTVAL (*op1) == 0xffff
1498 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1499 && (nonzero_bits (*op0, GET_MODE (*op0))
1500 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1501 {
1502 *op0 = gen_lowpart (HImode, *op0);
1503 *op1 = constm1_rtx;
1504 }
1505
1506 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1507 if (GET_CODE (*op0) == UNSPEC
1508 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1509 && XVECLEN (*op0, 0) == 1
1510 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1511 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1512 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1513 && *op1 == const0_rtx)
1514 {
1515 enum rtx_code new_code = UNKNOWN;
1516 switch (*code)
1517 {
1518 case EQ: new_code = EQ; break;
1519 case NE: new_code = NE; break;
1520 case LT: new_code = GTU; break;
1521 case GT: new_code = LTU; break;
1522 case LE: new_code = GEU; break;
1523 case GE: new_code = LEU; break;
1524 default: break;
1525 }
1526
1527 if (new_code != UNKNOWN)
1528 {
1529 *op0 = XVECEXP (*op0, 0, 0);
1530 *code = new_code;
1531 }
1532 }
1533
1534 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1535 if (GET_CODE (*op0) == UNSPEC
1536 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1537 && XVECLEN (*op0, 0) == 1
1538 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1539 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1540 && CONST_INT_P (*op1))
1541 {
1542 enum rtx_code new_code = UNKNOWN;
1543 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1544 {
1545 case CCZmode:
1546 case CCRAWmode:
1547 switch (*code)
1548 {
1549 case EQ: new_code = EQ; break;
1550 case NE: new_code = NE; break;
1551 default: break;
1552 }
1553 break;
1554 default: break;
1555 }
1556
1557 if (new_code != UNKNOWN)
1558 {
1559 /* For CCRAWmode put the required cc mask into the second
1560 operand. */
1561 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1562 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1563 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1564 *op0 = XVECEXP (*op0, 0, 0);
1565 *code = new_code;
1566 }
1567 }
1568
1569 /* Simplify cascaded EQ, NE with const0_rtx. */
1570 if ((*code == NE || *code == EQ)
1571 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1572 && GET_MODE (*op0) == SImode
1573 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1574 && REG_P (XEXP (*op0, 0))
1575 && XEXP (*op0, 1) == const0_rtx
1576 && *op1 == const0_rtx)
1577 {
1578 if ((*code == EQ && GET_CODE (*op0) == NE)
1579 || (*code == NE && GET_CODE (*op0) == EQ))
1580 *code = EQ;
1581 else
1582 *code = NE;
1583 *op0 = XEXP (*op0, 0);
1584 }
1585
1586 /* Prefer register over memory as first operand. */
1587 if (MEM_P (*op0) && REG_P (*op1))
1588 {
1589 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1590 *code = (int)swap_condition ((enum rtx_code)*code);
1591 }
1592
1593 /* Using the scalar variants of vector instructions for 64 bit FP
1594 comparisons might require swapping the operands. */
1595 if (TARGET_VX
1596 && register_operand (*op0, DFmode)
1597 && register_operand (*op1, DFmode)
1598 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1599 {
1600 rtx tmp;
1601
1602 switch (*code)
1603 {
1604 case LT: *code = GT; break;
1605 case LE: *code = GE; break;
1606 case UNGT: *code = UNLE; break;
1607 case UNGE: *code = UNLT; break;
1608 default: ;
1609 }
1610 tmp = *op0; *op0 = *op1; *op1 = tmp;
1611 }
1612 }
1613
1614 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1615 FP compare using the single element variant of vector instructions.
1616 Replace CODE with the comparison code to be used in the CC reg
1617 compare and return the condition code register RTX in CC. */
1618
1619 static bool
1620 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1621 rtx *cc)
1622 {
1623 machine_mode cmp_mode;
1624 bool swap_p = false;
1625
1626 switch (*code)
1627 {
1628 case EQ: cmp_mode = CCVEQmode; break;
1629 case NE: cmp_mode = CCVEQmode; break;
1630 case GT: cmp_mode = CCVFHmode; break;
1631 case GE: cmp_mode = CCVFHEmode; break;
1632 case UNLE: cmp_mode = CCVFHmode; break;
1633 case UNLT: cmp_mode = CCVFHEmode; break;
1634 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1635 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1636 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1637 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1638 default: return false;
1639 }
1640
1641 if (swap_p)
1642 {
1643 rtx tmp = cmp2;
1644 cmp2 = cmp1;
1645 cmp1 = tmp;
1646 }
1647 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1648 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1649 gen_rtvec (2,
1650 gen_rtx_SET (*cc,
1651 gen_rtx_COMPARE (cmp_mode, cmp1,
1652 cmp2)),
1653 gen_rtx_CLOBBER (VOIDmode,
1654 gen_rtx_SCRATCH (V2DImode)))));
1655 return true;
1656 }
1657
1658
1659 /* Emit a compare instruction suitable to implement the comparison
1660 OP0 CODE OP1. Return the correct condition RTL to be placed in
1661 the IF_THEN_ELSE of the conditional branch testing the result. */
1662
1663 rtx
1664 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1665 {
1666 machine_mode mode = s390_select_ccmode (code, op0, op1);
1667 rtx cc;
1668
1669 if (TARGET_VX
1670 && register_operand (op0, DFmode)
1671 && register_operand (op1, DFmode)
1672 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1673 {
1674 /* Work has been done by s390_expand_vec_compare_scalar already. */
1675 }
1676 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1677 {
1678 /* Do not output a redundant compare instruction if a
1679 compare_and_swap pattern already computed the result and the
1680 machine modes are compatible. */
1681 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1682 == GET_MODE (op0));
1683 cc = op0;
1684 }
1685 else
1686 {
1687 cc = gen_rtx_REG (mode, CC_REGNUM);
1688 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1689 }
1690
1691 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1692 }
1693
1694 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1695 matches CMP.
1696 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1697 conditional branch testing the result. */
1698
1699 static rtx
1700 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1701 rtx cmp, rtx new_rtx)
1702 {
1703 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1704 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1705 const0_rtx);
1706 }
1707
1708 /* Emit a jump instruction to TARGET and return it. If COND is
1709 NULL_RTX, emit an unconditional jump, else a conditional jump under
1710 condition COND. */
1711
1712 rtx_insn *
1713 s390_emit_jump (rtx target, rtx cond)
1714 {
1715 rtx insn;
1716
1717 target = gen_rtx_LABEL_REF (VOIDmode, target);
1718 if (cond)
1719 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1720
1721 insn = gen_rtx_SET (pc_rtx, target);
1722 return emit_jump_insn (insn);
1723 }
1724
1725 /* Return branch condition mask to implement a branch
1726 specified by CODE. Return -1 for invalid comparisons. */
1727
1728 int
1729 s390_branch_condition_mask (rtx code)
1730 {
1731 const int CC0 = 1 << 3;
1732 const int CC1 = 1 << 2;
1733 const int CC2 = 1 << 1;
1734 const int CC3 = 1 << 0;
1735
1736 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1737 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1738 gcc_assert (XEXP (code, 1) == const0_rtx
1739 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1740 && CONST_INT_P (XEXP (code, 1))));
1741
1742
1743 switch (GET_MODE (XEXP (code, 0)))
1744 {
1745 case CCZmode:
1746 case CCZ1mode:
1747 switch (GET_CODE (code))
1748 {
1749 case EQ: return CC0;
1750 case NE: return CC1 | CC2 | CC3;
1751 default: return -1;
1752 }
1753 break;
1754
1755 case CCT1mode:
1756 switch (GET_CODE (code))
1757 {
1758 case EQ: return CC1;
1759 case NE: return CC0 | CC2 | CC3;
1760 default: return -1;
1761 }
1762 break;
1763
1764 case CCT2mode:
1765 switch (GET_CODE (code))
1766 {
1767 case EQ: return CC2;
1768 case NE: return CC0 | CC1 | CC3;
1769 default: return -1;
1770 }
1771 break;
1772
1773 case CCT3mode:
1774 switch (GET_CODE (code))
1775 {
1776 case EQ: return CC3;
1777 case NE: return CC0 | CC1 | CC2;
1778 default: return -1;
1779 }
1780 break;
1781
1782 case CCLmode:
1783 switch (GET_CODE (code))
1784 {
1785 case EQ: return CC0 | CC2;
1786 case NE: return CC1 | CC3;
1787 default: return -1;
1788 }
1789 break;
1790
1791 case CCL1mode:
1792 switch (GET_CODE (code))
1793 {
1794 case LTU: return CC2 | CC3; /* carry */
1795 case GEU: return CC0 | CC1; /* no carry */
1796 default: return -1;
1797 }
1798 break;
1799
1800 case CCL2mode:
1801 switch (GET_CODE (code))
1802 {
1803 case GTU: return CC0 | CC1; /* borrow */
1804 case LEU: return CC2 | CC3; /* no borrow */
1805 default: return -1;
1806 }
1807 break;
1808
1809 case CCL3mode:
1810 switch (GET_CODE (code))
1811 {
1812 case EQ: return CC0 | CC2;
1813 case NE: return CC1 | CC3;
1814 case LTU: return CC1;
1815 case GTU: return CC3;
1816 case LEU: return CC1 | CC2;
1817 case GEU: return CC2 | CC3;
1818 default: return -1;
1819 }
1820
1821 case CCUmode:
1822 switch (GET_CODE (code))
1823 {
1824 case EQ: return CC0;
1825 case NE: return CC1 | CC2 | CC3;
1826 case LTU: return CC1;
1827 case GTU: return CC2;
1828 case LEU: return CC0 | CC1;
1829 case GEU: return CC0 | CC2;
1830 default: return -1;
1831 }
1832 break;
1833
1834 case CCURmode:
1835 switch (GET_CODE (code))
1836 {
1837 case EQ: return CC0;
1838 case NE: return CC2 | CC1 | CC3;
1839 case LTU: return CC2;
1840 case GTU: return CC1;
1841 case LEU: return CC0 | CC2;
1842 case GEU: return CC0 | CC1;
1843 default: return -1;
1844 }
1845 break;
1846
1847 case CCAPmode:
1848 switch (GET_CODE (code))
1849 {
1850 case EQ: return CC0;
1851 case NE: return CC1 | CC2 | CC3;
1852 case LT: return CC1 | CC3;
1853 case GT: return CC2;
1854 case LE: return CC0 | CC1 | CC3;
1855 case GE: return CC0 | CC2;
1856 default: return -1;
1857 }
1858 break;
1859
1860 case CCANmode:
1861 switch (GET_CODE (code))
1862 {
1863 case EQ: return CC0;
1864 case NE: return CC1 | CC2 | CC3;
1865 case LT: return CC1;
1866 case GT: return CC2 | CC3;
1867 case LE: return CC0 | CC1;
1868 case GE: return CC0 | CC2 | CC3;
1869 default: return -1;
1870 }
1871 break;
1872
1873 case CCSmode:
1874 switch (GET_CODE (code))
1875 {
1876 case EQ: return CC0;
1877 case NE: return CC1 | CC2 | CC3;
1878 case LT: return CC1;
1879 case GT: return CC2;
1880 case LE: return CC0 | CC1;
1881 case GE: return CC0 | CC2;
1882 case UNORDERED: return CC3;
1883 case ORDERED: return CC0 | CC1 | CC2;
1884 case UNEQ: return CC0 | CC3;
1885 case UNLT: return CC1 | CC3;
1886 case UNGT: return CC2 | CC3;
1887 case UNLE: return CC0 | CC1 | CC3;
1888 case UNGE: return CC0 | CC2 | CC3;
1889 case LTGT: return CC1 | CC2;
1890 default: return -1;
1891 }
1892 break;
1893
1894 case CCSRmode:
1895 switch (GET_CODE (code))
1896 {
1897 case EQ: return CC0;
1898 case NE: return CC2 | CC1 | CC3;
1899 case LT: return CC2;
1900 case GT: return CC1;
1901 case LE: return CC0 | CC2;
1902 case GE: return CC0 | CC1;
1903 case UNORDERED: return CC3;
1904 case ORDERED: return CC0 | CC2 | CC1;
1905 case UNEQ: return CC0 | CC3;
1906 case UNLT: return CC2 | CC3;
1907 case UNGT: return CC1 | CC3;
1908 case UNLE: return CC0 | CC2 | CC3;
1909 case UNGE: return CC0 | CC1 | CC3;
1910 case LTGT: return CC2 | CC1;
1911 default: return -1;
1912 }
1913 break;
1914
1915 /* Vector comparison modes. */
1916
1917 case CCVEQmode:
1918 switch (GET_CODE (code))
1919 {
1920 case EQ: return CC0;
1921 case NE: return CC3;
1922 default: return -1;
1923 }
1924
1925 case CCVEQANYmode:
1926 switch (GET_CODE (code))
1927 {
1928 case EQ: return CC0 | CC1;
1929 case NE: return CC3 | CC1;
1930 default: return -1;
1931 }
1932
1933 /* Integer vector compare modes. */
1934
1935 case CCVHmode:
1936 switch (GET_CODE (code))
1937 {
1938 case GT: return CC0;
1939 case LE: return CC3;
1940 default: return -1;
1941 }
1942
1943 case CCVHANYmode:
1944 switch (GET_CODE (code))
1945 {
1946 case GT: return CC0 | CC1;
1947 case LE: return CC3 | CC1;
1948 default: return -1;
1949 }
1950
1951 case CCVHUmode:
1952 switch (GET_CODE (code))
1953 {
1954 case GTU: return CC0;
1955 case LEU: return CC3;
1956 default: return -1;
1957 }
1958
1959 case CCVHUANYmode:
1960 switch (GET_CODE (code))
1961 {
1962 case GTU: return CC0 | CC1;
1963 case LEU: return CC3 | CC1;
1964 default: return -1;
1965 }
1966
1967 /* FP vector compare modes. */
1968
1969 case CCVFHmode:
1970 switch (GET_CODE (code))
1971 {
1972 case GT: return CC0;
1973 case UNLE: return CC3;
1974 default: return -1;
1975 }
1976
1977 case CCVFHANYmode:
1978 switch (GET_CODE (code))
1979 {
1980 case GT: return CC0 | CC1;
1981 case UNLE: return CC3 | CC1;
1982 default: return -1;
1983 }
1984
1985 case CCVFHEmode:
1986 switch (GET_CODE (code))
1987 {
1988 case GE: return CC0;
1989 case UNLT: return CC3;
1990 default: return -1;
1991 }
1992
1993 case CCVFHEANYmode:
1994 switch (GET_CODE (code))
1995 {
1996 case GE: return CC0 | CC1;
1997 case UNLT: return CC3 | CC1;
1998 default: return -1;
1999 }
2000
2001
2002 case CCRAWmode:
2003 switch (GET_CODE (code))
2004 {
2005 case EQ:
2006 return INTVAL (XEXP (code, 1));
2007 case NE:
2008 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2009 default:
2010 gcc_unreachable ();
2011 }
2012
2013 default:
2014 return -1;
2015 }
2016 }
2017
2018
2019 /* Return branch condition mask to implement a compare and branch
2020 specified by CODE. Return -1 for invalid comparisons. */
2021
2022 int
2023 s390_compare_and_branch_condition_mask (rtx code)
2024 {
2025 const int CC0 = 1 << 3;
2026 const int CC1 = 1 << 2;
2027 const int CC2 = 1 << 1;
2028
2029 switch (GET_CODE (code))
2030 {
2031 case EQ:
2032 return CC0;
2033 case NE:
2034 return CC1 | CC2;
2035 case LT:
2036 case LTU:
2037 return CC1;
2038 case GT:
2039 case GTU:
2040 return CC2;
2041 case LE:
2042 case LEU:
2043 return CC0 | CC1;
2044 case GE:
2045 case GEU:
2046 return CC0 | CC2;
2047 default:
2048 gcc_unreachable ();
2049 }
2050 return -1;
2051 }
2052
2053 /* If INV is false, return assembler mnemonic string to implement
2054 a branch specified by CODE. If INV is true, return mnemonic
2055 for the corresponding inverted branch. */
2056
2057 static const char *
2058 s390_branch_condition_mnemonic (rtx code, int inv)
2059 {
2060 int mask;
2061
2062 static const char *const mnemonic[16] =
2063 {
2064 NULL, "o", "h", "nle",
2065 "l", "nhe", "lh", "ne",
2066 "e", "nlh", "he", "nl",
2067 "le", "nh", "no", NULL
2068 };
2069
2070 if (GET_CODE (XEXP (code, 0)) == REG
2071 && REGNO (XEXP (code, 0)) == CC_REGNUM
2072 && (XEXP (code, 1) == const0_rtx
2073 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2074 && CONST_INT_P (XEXP (code, 1)))))
2075 mask = s390_branch_condition_mask (code);
2076 else
2077 mask = s390_compare_and_branch_condition_mask (code);
2078
2079 gcc_assert (mask >= 0);
2080
2081 if (inv)
2082 mask ^= 15;
2083
2084 gcc_assert (mask >= 1 && mask <= 14);
2085
2086 return mnemonic[mask];
2087 }
2088
2089 /* Return the part of op which has a value different from def.
2090 The size of the part is determined by mode.
2091 Use this function only if you already know that op really
2092 contains such a part. */
2093
2094 unsigned HOST_WIDE_INT
2095 s390_extract_part (rtx op, machine_mode mode, int def)
2096 {
2097 unsigned HOST_WIDE_INT value = 0;
2098 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2099 int part_bits = GET_MODE_BITSIZE (mode);
2100 unsigned HOST_WIDE_INT part_mask
2101 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2102 int i;
2103
2104 for (i = 0; i < max_parts; i++)
2105 {
2106 if (i == 0)
2107 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2108 else
2109 value >>= part_bits;
2110
2111 if ((value & part_mask) != (def & part_mask))
2112 return value & part_mask;
2113 }
2114
2115 gcc_unreachable ();
2116 }
2117
2118 /* If OP is an integer constant of mode MODE with exactly one
2119 part of mode PART_MODE unequal to DEF, return the number of that
2120 part. Otherwise, return -1. */
2121
2122 int
2123 s390_single_part (rtx op,
2124 machine_mode mode,
2125 machine_mode part_mode,
2126 int def)
2127 {
2128 unsigned HOST_WIDE_INT value = 0;
2129 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2130 unsigned HOST_WIDE_INT part_mask
2131 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2132 int i, part = -1;
2133
2134 if (GET_CODE (op) != CONST_INT)
2135 return -1;
2136
2137 for (i = 0; i < n_parts; i++)
2138 {
2139 if (i == 0)
2140 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2141 else
2142 value >>= GET_MODE_BITSIZE (part_mode);
2143
2144 if ((value & part_mask) != (def & part_mask))
2145 {
2146 if (part != -1)
2147 return -1;
2148 else
2149 part = i;
2150 }
2151 }
2152 return part == -1 ? -1 : n_parts - 1 - part;
2153 }
2154
2155 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2156 bits and no other bits are set in IN. POS and LENGTH can be used
2157 to obtain the start position and the length of the bitfield.
2158
2159 POS gives the position of the first bit of the bitfield counting
2160 from the lowest order bit starting with zero. In order to use this
2161 value for S/390 instructions this has to be converted to "bits big
2162 endian" style. */
2163
2164 bool
2165 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2166 int *pos, int *length)
2167 {
2168 int tmp_pos = 0;
2169 int tmp_length = 0;
2170 int i;
2171 unsigned HOST_WIDE_INT mask = 1ULL;
2172 bool contiguous = false;
2173
2174 for (i = 0; i < size; mask <<= 1, i++)
2175 {
2176 if (contiguous)
2177 {
2178 if (mask & in)
2179 tmp_length++;
2180 else
2181 break;
2182 }
2183 else
2184 {
2185 if (mask & in)
2186 {
2187 contiguous = true;
2188 tmp_length++;
2189 }
2190 else
2191 tmp_pos++;
2192 }
2193 }
2194
2195 if (!tmp_length)
2196 return false;
2197
2198 /* Calculate a mask for all bits beyond the contiguous bits. */
2199 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2200
2201 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2202 mask &= (HOST_WIDE_INT_1U << size) - 1;
2203
2204 if (mask & in)
2205 return false;
2206
2207 if (tmp_length + tmp_pos - 1 > size)
2208 return false;
2209
2210 if (length)
2211 *length = tmp_length;
2212
2213 if (pos)
2214 *pos = tmp_pos;
2215
2216 return true;
2217 }
2218
2219 /* Return true if OP contains the same contiguous bitfield in *all*
2220 its elements. START and END can be used to obtain the start and
2221 end position of the bitfield.
2222
2223 START/STOP give the position of the first/last bit of the bitfield
2224 counting from the lowest order bit starting with zero. In order to
2225 use these values for S/390 instructions this has to be converted to
2226 "bits big endian" style. */
2227
2228 bool
2229 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2230 {
2231 unsigned HOST_WIDE_INT mask;
2232 int length, size;
2233
2234 if (!VECTOR_MODE_P (GET_MODE (op))
2235 || GET_CODE (op) != CONST_VECTOR
2236 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2237 return false;
2238
2239 if (GET_MODE_NUNITS (GET_MODE (op)) > 1)
2240 {
2241 int i;
2242
2243 for (i = 1; i < GET_MODE_NUNITS (GET_MODE (op)); ++i)
2244 if (!rtx_equal_p (XVECEXP (op, 0, i), XVECEXP (op, 0, 0)))
2245 return false;
2246 }
2247
2248 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2249 mask = UINTVAL (XVECEXP (op, 0, 0));
2250 if (s390_contiguous_bitmask_p (mask, size, start,
2251 end != NULL ? &length : NULL))
2252 {
2253 if (end != NULL)
2254 *end = *start + length - 1;
2255 return true;
2256 }
2257 /* 0xff00000f style immediates can be covered by swapping start and
2258 end indices in vgm. */
2259 if (s390_contiguous_bitmask_p (~mask, size, start,
2260 end != NULL ? &length : NULL))
2261 {
2262 if (end != NULL)
2263 *end = *start - 1;
2264 if (start != NULL)
2265 *start = *start + length;
2266 return true;
2267 }
2268 return false;
2269 }
2270
2271 /* Return true if C consists only of byte chunks being either 0 or
2272 0xff. If MASK is !=NULL a byte mask is generated which is
2273 appropriate for the vector generate byte mask instruction. */
2274
2275 bool
2276 s390_bytemask_vector_p (rtx op, unsigned *mask)
2277 {
2278 int i;
2279 unsigned tmp_mask = 0;
2280 int nunit, unit_size;
2281
2282 if (!VECTOR_MODE_P (GET_MODE (op))
2283 || GET_CODE (op) != CONST_VECTOR
2284 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2285 return false;
2286
2287 nunit = GET_MODE_NUNITS (GET_MODE (op));
2288 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2289
2290 for (i = 0; i < nunit; i++)
2291 {
2292 unsigned HOST_WIDE_INT c;
2293 int j;
2294
2295 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2296 return false;
2297
2298 c = UINTVAL (XVECEXP (op, 0, i));
2299 for (j = 0; j < unit_size; j++)
2300 {
2301 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2302 return false;
2303 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2304 c = c >> BITS_PER_UNIT;
2305 }
2306 }
2307
2308 if (mask != NULL)
2309 *mask = tmp_mask;
2310
2311 return true;
2312 }
2313
2314 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2315 equivalent to a shift followed by the AND. In particular, CONTIG
2316 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2317 for ROTL indicate a rotate to the right. */
2318
2319 bool
2320 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2321 {
2322 int pos, len;
2323 bool ok;
2324
2325 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2326 gcc_assert (ok);
2327
2328 return ((rotl >= 0 && rotl <= pos)
2329 || (rotl < 0 && -rotl <= bitsize - len - pos));
2330 }
2331
2332 /* Check whether we can (and want to) split a double-word
2333 move in mode MODE from SRC to DST into two single-word
2334 moves, moving the subword FIRST_SUBWORD first. */
2335
2336 bool
2337 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2338 {
2339 /* Floating point and vector registers cannot be split. */
2340 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2341 return false;
2342
2343 /* We don't need to split if operands are directly accessible. */
2344 if (s_operand (src, mode) || s_operand (dst, mode))
2345 return false;
2346
2347 /* Non-offsettable memory references cannot be split. */
2348 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2349 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2350 return false;
2351
2352 /* Moving the first subword must not clobber a register
2353 needed to move the second subword. */
2354 if (register_operand (dst, mode))
2355 {
2356 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2357 if (reg_overlap_mentioned_p (subreg, src))
2358 return false;
2359 }
2360
2361 return true;
2362 }
2363
2364 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2365 and [MEM2, MEM2 + SIZE] do overlap and false
2366 otherwise. */
2367
2368 bool
2369 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2370 {
2371 rtx addr1, addr2, addr_delta;
2372 HOST_WIDE_INT delta;
2373
2374 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2375 return true;
2376
2377 if (size == 0)
2378 return false;
2379
2380 addr1 = XEXP (mem1, 0);
2381 addr2 = XEXP (mem2, 0);
2382
2383 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2384
2385 /* This overlapping check is used by peepholes merging memory block operations.
2386 Overlapping operations would otherwise be recognized by the S/390 hardware
2387 and would fall back to a slower implementation. Allowing overlapping
2388 operations would lead to slow code but not to wrong code. Therefore we are
2389 somewhat optimistic if we cannot prove that the memory blocks are
2390 overlapping.
2391 That's why we return false here although this may accept operations on
2392 overlapping memory areas. */
2393 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2394 return false;
2395
2396 delta = INTVAL (addr_delta);
2397
2398 if (delta == 0
2399 || (delta > 0 && delta < size)
2400 || (delta < 0 && -delta < size))
2401 return true;
2402
2403 return false;
2404 }
2405
2406 /* Check whether the address of memory reference MEM2 equals exactly
2407 the address of memory reference MEM1 plus DELTA. Return true if
2408 we can prove this to be the case, false otherwise. */
2409
2410 bool
2411 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2412 {
2413 rtx addr1, addr2, addr_delta;
2414
2415 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2416 return false;
2417
2418 addr1 = XEXP (mem1, 0);
2419 addr2 = XEXP (mem2, 0);
2420
2421 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2422 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2423 return false;
2424
2425 return true;
2426 }
2427
2428 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2429
2430 void
2431 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2432 rtx *operands)
2433 {
2434 machine_mode wmode = mode;
2435 rtx dst = operands[0];
2436 rtx src1 = operands[1];
2437 rtx src2 = operands[2];
2438 rtx op, clob, tem;
2439
2440 /* If we cannot handle the operation directly, use a temp register. */
2441 if (!s390_logical_operator_ok_p (operands))
2442 dst = gen_reg_rtx (mode);
2443
2444 /* QImode and HImode patterns make sense only if we have a destination
2445 in memory. Otherwise perform the operation in SImode. */
2446 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2447 wmode = SImode;
2448
2449 /* Widen operands if required. */
2450 if (mode != wmode)
2451 {
2452 if (GET_CODE (dst) == SUBREG
2453 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2454 dst = tem;
2455 else if (REG_P (dst))
2456 dst = gen_rtx_SUBREG (wmode, dst, 0);
2457 else
2458 dst = gen_reg_rtx (wmode);
2459
2460 if (GET_CODE (src1) == SUBREG
2461 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2462 src1 = tem;
2463 else if (GET_MODE (src1) != VOIDmode)
2464 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2465
2466 if (GET_CODE (src2) == SUBREG
2467 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2468 src2 = tem;
2469 else if (GET_MODE (src2) != VOIDmode)
2470 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2471 }
2472
2473 /* Emit the instruction. */
2474 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2475 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2476 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2477
2478 /* Fix up the destination if needed. */
2479 if (dst != operands[0])
2480 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2481 }
2482
2483 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2484
2485 bool
2486 s390_logical_operator_ok_p (rtx *operands)
2487 {
2488 /* If the destination operand is in memory, it needs to coincide
2489 with one of the source operands. After reload, it has to be
2490 the first source operand. */
2491 if (GET_CODE (operands[0]) == MEM)
2492 return rtx_equal_p (operands[0], operands[1])
2493 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2494
2495 return true;
2496 }
2497
2498 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2499 operand IMMOP to switch from SS to SI type instructions. */
2500
2501 void
2502 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2503 {
2504 int def = code == AND ? -1 : 0;
2505 HOST_WIDE_INT mask;
2506 int part;
2507
2508 gcc_assert (GET_CODE (*memop) == MEM);
2509 gcc_assert (!MEM_VOLATILE_P (*memop));
2510
2511 mask = s390_extract_part (*immop, QImode, def);
2512 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2513 gcc_assert (part >= 0);
2514
2515 *memop = adjust_address (*memop, QImode, part);
2516 *immop = gen_int_mode (mask, QImode);
2517 }
2518
2519
2520 /* How to allocate a 'struct machine_function'. */
2521
2522 static struct machine_function *
2523 s390_init_machine_status (void)
2524 {
2525 return ggc_cleared_alloc<machine_function> ();
2526 }
2527
2528 /* Map for smallest class containing reg regno. */
2529
2530 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2531 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2532 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2533 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2534 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2535 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2536 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2537 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2538 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2539 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2540 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2541 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2542 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2543 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2544 VEC_REGS, VEC_REGS /* 52 */
2545 };
2546
2547 /* Return attribute type of insn. */
2548
2549 static enum attr_type
2550 s390_safe_attr_type (rtx_insn *insn)
2551 {
2552 if (recog_memoized (insn) >= 0)
2553 return get_attr_type (insn);
2554 else
2555 return TYPE_NONE;
2556 }
2557
2558 /* Return true if DISP is a valid short displacement. */
2559
2560 static bool
2561 s390_short_displacement (rtx disp)
2562 {
2563 /* No displacement is OK. */
2564 if (!disp)
2565 return true;
2566
2567 /* Without the long displacement facility we don't need to
2568 distingiush between long and short displacement. */
2569 if (!TARGET_LONG_DISPLACEMENT)
2570 return true;
2571
2572 /* Integer displacement in range. */
2573 if (GET_CODE (disp) == CONST_INT)
2574 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2575
2576 /* GOT offset is not OK, the GOT can be large. */
2577 if (GET_CODE (disp) == CONST
2578 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2579 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2580 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2581 return false;
2582
2583 /* All other symbolic constants are literal pool references,
2584 which are OK as the literal pool must be small. */
2585 if (GET_CODE (disp) == CONST)
2586 return true;
2587
2588 return false;
2589 }
2590
2591 /* Decompose a RTL expression ADDR for a memory address into
2592 its components, returned in OUT.
2593
2594 Returns false if ADDR is not a valid memory address, true
2595 otherwise. If OUT is NULL, don't return the components,
2596 but check for validity only.
2597
2598 Note: Only addresses in canonical form are recognized.
2599 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2600 canonical form so that they will be recognized. */
2601
2602 static int
2603 s390_decompose_address (rtx addr, struct s390_address *out)
2604 {
2605 HOST_WIDE_INT offset = 0;
2606 rtx base = NULL_RTX;
2607 rtx indx = NULL_RTX;
2608 rtx disp = NULL_RTX;
2609 rtx orig_disp;
2610 bool pointer = false;
2611 bool base_ptr = false;
2612 bool indx_ptr = false;
2613 bool literal_pool = false;
2614
2615 /* We may need to substitute the literal pool base register into the address
2616 below. However, at this point we do not know which register is going to
2617 be used as base, so we substitute the arg pointer register. This is going
2618 to be treated as holding a pointer below -- it shouldn't be used for any
2619 other purpose. */
2620 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2621
2622 /* Decompose address into base + index + displacement. */
2623
2624 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2625 base = addr;
2626
2627 else if (GET_CODE (addr) == PLUS)
2628 {
2629 rtx op0 = XEXP (addr, 0);
2630 rtx op1 = XEXP (addr, 1);
2631 enum rtx_code code0 = GET_CODE (op0);
2632 enum rtx_code code1 = GET_CODE (op1);
2633
2634 if (code0 == REG || code0 == UNSPEC)
2635 {
2636 if (code1 == REG || code1 == UNSPEC)
2637 {
2638 indx = op0; /* index + base */
2639 base = op1;
2640 }
2641
2642 else
2643 {
2644 base = op0; /* base + displacement */
2645 disp = op1;
2646 }
2647 }
2648
2649 else if (code0 == PLUS)
2650 {
2651 indx = XEXP (op0, 0); /* index + base + disp */
2652 base = XEXP (op0, 1);
2653 disp = op1;
2654 }
2655
2656 else
2657 {
2658 return false;
2659 }
2660 }
2661
2662 else
2663 disp = addr; /* displacement */
2664
2665 /* Extract integer part of displacement. */
2666 orig_disp = disp;
2667 if (disp)
2668 {
2669 if (GET_CODE (disp) == CONST_INT)
2670 {
2671 offset = INTVAL (disp);
2672 disp = NULL_RTX;
2673 }
2674 else if (GET_CODE (disp) == CONST
2675 && GET_CODE (XEXP (disp, 0)) == PLUS
2676 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2677 {
2678 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2679 disp = XEXP (XEXP (disp, 0), 0);
2680 }
2681 }
2682
2683 /* Strip off CONST here to avoid special case tests later. */
2684 if (disp && GET_CODE (disp) == CONST)
2685 disp = XEXP (disp, 0);
2686
2687 /* We can convert literal pool addresses to
2688 displacements by basing them off the base register. */
2689 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2690 {
2691 /* Either base or index must be free to hold the base register. */
2692 if (!base)
2693 base = fake_pool_base, literal_pool = true;
2694 else if (!indx)
2695 indx = fake_pool_base, literal_pool = true;
2696 else
2697 return false;
2698
2699 /* Mark up the displacement. */
2700 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2701 UNSPEC_LTREL_OFFSET);
2702 }
2703
2704 /* Validate base register. */
2705 if (base)
2706 {
2707 if (GET_CODE (base) == UNSPEC)
2708 switch (XINT (base, 1))
2709 {
2710 case UNSPEC_LTREF:
2711 if (!disp)
2712 disp = gen_rtx_UNSPEC (Pmode,
2713 gen_rtvec (1, XVECEXP (base, 0, 0)),
2714 UNSPEC_LTREL_OFFSET);
2715 else
2716 return false;
2717
2718 base = XVECEXP (base, 0, 1);
2719 break;
2720
2721 case UNSPEC_LTREL_BASE:
2722 if (XVECLEN (base, 0) == 1)
2723 base = fake_pool_base, literal_pool = true;
2724 else
2725 base = XVECEXP (base, 0, 1);
2726 break;
2727
2728 default:
2729 return false;
2730 }
2731
2732 if (!REG_P (base)
2733 || (GET_MODE (base) != SImode
2734 && GET_MODE (base) != Pmode))
2735 return false;
2736
2737 if (REGNO (base) == STACK_POINTER_REGNUM
2738 || REGNO (base) == FRAME_POINTER_REGNUM
2739 || ((reload_completed || reload_in_progress)
2740 && frame_pointer_needed
2741 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2742 || REGNO (base) == ARG_POINTER_REGNUM
2743 || (flag_pic
2744 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2745 pointer = base_ptr = true;
2746
2747 if ((reload_completed || reload_in_progress)
2748 && base == cfun->machine->base_reg)
2749 pointer = base_ptr = literal_pool = true;
2750 }
2751
2752 /* Validate index register. */
2753 if (indx)
2754 {
2755 if (GET_CODE (indx) == UNSPEC)
2756 switch (XINT (indx, 1))
2757 {
2758 case UNSPEC_LTREF:
2759 if (!disp)
2760 disp = gen_rtx_UNSPEC (Pmode,
2761 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2762 UNSPEC_LTREL_OFFSET);
2763 else
2764 return false;
2765
2766 indx = XVECEXP (indx, 0, 1);
2767 break;
2768
2769 case UNSPEC_LTREL_BASE:
2770 if (XVECLEN (indx, 0) == 1)
2771 indx = fake_pool_base, literal_pool = true;
2772 else
2773 indx = XVECEXP (indx, 0, 1);
2774 break;
2775
2776 default:
2777 return false;
2778 }
2779
2780 if (!REG_P (indx)
2781 || (GET_MODE (indx) != SImode
2782 && GET_MODE (indx) != Pmode))
2783 return false;
2784
2785 if (REGNO (indx) == STACK_POINTER_REGNUM
2786 || REGNO (indx) == FRAME_POINTER_REGNUM
2787 || ((reload_completed || reload_in_progress)
2788 && frame_pointer_needed
2789 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2790 || REGNO (indx) == ARG_POINTER_REGNUM
2791 || (flag_pic
2792 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2793 pointer = indx_ptr = true;
2794
2795 if ((reload_completed || reload_in_progress)
2796 && indx == cfun->machine->base_reg)
2797 pointer = indx_ptr = literal_pool = true;
2798 }
2799
2800 /* Prefer to use pointer as base, not index. */
2801 if (base && indx && !base_ptr
2802 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2803 {
2804 rtx tmp = base;
2805 base = indx;
2806 indx = tmp;
2807 }
2808
2809 /* Validate displacement. */
2810 if (!disp)
2811 {
2812 /* If virtual registers are involved, the displacement will change later
2813 anyway as the virtual registers get eliminated. This could make a
2814 valid displacement invalid, but it is more likely to make an invalid
2815 displacement valid, because we sometimes access the register save area
2816 via negative offsets to one of those registers.
2817 Thus we don't check the displacement for validity here. If after
2818 elimination the displacement turns out to be invalid after all,
2819 this is fixed up by reload in any case. */
2820 /* LRA maintains always displacements up to date and we need to
2821 know the displacement is right during all LRA not only at the
2822 final elimination. */
2823 if (lra_in_progress
2824 || (base != arg_pointer_rtx
2825 && indx != arg_pointer_rtx
2826 && base != return_address_pointer_rtx
2827 && indx != return_address_pointer_rtx
2828 && base != frame_pointer_rtx
2829 && indx != frame_pointer_rtx
2830 && base != virtual_stack_vars_rtx
2831 && indx != virtual_stack_vars_rtx))
2832 if (!DISP_IN_RANGE (offset))
2833 return false;
2834 }
2835 else
2836 {
2837 /* All the special cases are pointers. */
2838 pointer = true;
2839
2840 /* In the small-PIC case, the linker converts @GOT
2841 and @GOTNTPOFF offsets to possible displacements. */
2842 if (GET_CODE (disp) == UNSPEC
2843 && (XINT (disp, 1) == UNSPEC_GOT
2844 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2845 && flag_pic == 1)
2846 {
2847 ;
2848 }
2849
2850 /* Accept pool label offsets. */
2851 else if (GET_CODE (disp) == UNSPEC
2852 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2853 ;
2854
2855 /* Accept literal pool references. */
2856 else if (GET_CODE (disp) == UNSPEC
2857 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2858 {
2859 /* In case CSE pulled a non literal pool reference out of
2860 the pool we have to reject the address. This is
2861 especially important when loading the GOT pointer on non
2862 zarch CPUs. In this case the literal pool contains an lt
2863 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2864 will most likely exceed the displacement. */
2865 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2866 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2867 return false;
2868
2869 orig_disp = gen_rtx_CONST (Pmode, disp);
2870 if (offset)
2871 {
2872 /* If we have an offset, make sure it does not
2873 exceed the size of the constant pool entry. */
2874 rtx sym = XVECEXP (disp, 0, 0);
2875 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2876 return false;
2877
2878 orig_disp = plus_constant (Pmode, orig_disp, offset);
2879 }
2880 }
2881
2882 else
2883 return false;
2884 }
2885
2886 if (!base && !indx)
2887 pointer = true;
2888
2889 if (out)
2890 {
2891 out->base = base;
2892 out->indx = indx;
2893 out->disp = orig_disp;
2894 out->pointer = pointer;
2895 out->literal_pool = literal_pool;
2896 }
2897
2898 return true;
2899 }
2900
2901 /* Decompose a RTL expression OP for a shift count into its components,
2902 and return the base register in BASE and the offset in OFFSET.
2903
2904 Return true if OP is a valid shift count, false if not. */
2905
2906 bool
2907 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2908 {
2909 HOST_WIDE_INT off = 0;
2910
2911 /* We can have an integer constant, an address register,
2912 or a sum of the two. */
2913 if (GET_CODE (op) == CONST_INT)
2914 {
2915 off = INTVAL (op);
2916 op = NULL_RTX;
2917 }
2918 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2919 {
2920 off = INTVAL (XEXP (op, 1));
2921 op = XEXP (op, 0);
2922 }
2923 while (op && GET_CODE (op) == SUBREG)
2924 op = SUBREG_REG (op);
2925
2926 if (op && GET_CODE (op) != REG)
2927 return false;
2928
2929 if (offset)
2930 *offset = off;
2931 if (base)
2932 *base = op;
2933
2934 return true;
2935 }
2936
2937
2938 /* Return true if CODE is a valid address without index. */
2939
2940 bool
2941 s390_legitimate_address_without_index_p (rtx op)
2942 {
2943 struct s390_address addr;
2944
2945 if (!s390_decompose_address (XEXP (op, 0), &addr))
2946 return false;
2947 if (addr.indx)
2948 return false;
2949
2950 return true;
2951 }
2952
2953
2954 /* Return TRUE if ADDR is an operand valid for a load/store relative
2955 instruction. Be aware that the alignment of the operand needs to
2956 be checked separately.
2957 Valid addresses are single references or a sum of a reference and a
2958 constant integer. Return these parts in SYMREF and ADDEND. You can
2959 pass NULL in REF and/or ADDEND if you are not interested in these
2960 values. Literal pool references are *not* considered symbol
2961 references. */
2962
2963 static bool
2964 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2965 {
2966 HOST_WIDE_INT tmpaddend = 0;
2967
2968 if (GET_CODE (addr) == CONST)
2969 addr = XEXP (addr, 0);
2970
2971 if (GET_CODE (addr) == PLUS)
2972 {
2973 if (!CONST_INT_P (XEXP (addr, 1)))
2974 return false;
2975
2976 tmpaddend = INTVAL (XEXP (addr, 1));
2977 addr = XEXP (addr, 0);
2978 }
2979
2980 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2981 || (GET_CODE (addr) == UNSPEC
2982 && (XINT (addr, 1) == UNSPEC_GOTENT
2983 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2984 {
2985 if (symref)
2986 *symref = addr;
2987 if (addend)
2988 *addend = tmpaddend;
2989
2990 return true;
2991 }
2992 return false;
2993 }
2994
2995 /* Return true if the address in OP is valid for constraint letter C
2996 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2997 pool MEMs should be accepted. Only the Q, R, S, T constraint
2998 letters are allowed for C. */
2999
3000 static int
3001 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3002 {
3003 struct s390_address addr;
3004 bool decomposed = false;
3005
3006 /* This check makes sure that no symbolic address (except literal
3007 pool references) are accepted by the R or T constraints. */
3008 if (s390_loadrelative_operand_p (op, NULL, NULL))
3009 return 0;
3010
3011 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3012 if (!lit_pool_ok)
3013 {
3014 if (!s390_decompose_address (op, &addr))
3015 return 0;
3016 if (addr.literal_pool)
3017 return 0;
3018 decomposed = true;
3019 }
3020
3021 switch (c)
3022 {
3023 case 'Q': /* no index short displacement */
3024 if (!decomposed && !s390_decompose_address (op, &addr))
3025 return 0;
3026 if (addr.indx)
3027 return 0;
3028 if (!s390_short_displacement (addr.disp))
3029 return 0;
3030 break;
3031
3032 case 'R': /* with index short displacement */
3033 if (TARGET_LONG_DISPLACEMENT)
3034 {
3035 if (!decomposed && !s390_decompose_address (op, &addr))
3036 return 0;
3037 if (!s390_short_displacement (addr.disp))
3038 return 0;
3039 }
3040 /* Any invalid address here will be fixed up by reload,
3041 so accept it for the most generic constraint. */
3042 break;
3043
3044 case 'S': /* no index long displacement */
3045 if (!TARGET_LONG_DISPLACEMENT)
3046 return 0;
3047 if (!decomposed && !s390_decompose_address (op, &addr))
3048 return 0;
3049 if (addr.indx)
3050 return 0;
3051 if (s390_short_displacement (addr.disp))
3052 return 0;
3053 break;
3054
3055 case 'T': /* with index long displacement */
3056 if (!TARGET_LONG_DISPLACEMENT)
3057 return 0;
3058 /* Any invalid address here will be fixed up by reload,
3059 so accept it for the most generic constraint. */
3060 if ((decomposed || s390_decompose_address (op, &addr))
3061 && s390_short_displacement (addr.disp))
3062 return 0;
3063 break;
3064 default:
3065 return 0;
3066 }
3067 return 1;
3068 }
3069
3070
3071 /* Evaluates constraint strings described by the regular expression
3072 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3073 the constraint given in STR, or 0 else. */
3074
3075 int
3076 s390_mem_constraint (const char *str, rtx op)
3077 {
3078 char c = str[0];
3079
3080 switch (c)
3081 {
3082 case 'A':
3083 /* Check for offsettable variants of memory constraints. */
3084 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3085 return 0;
3086 if ((reload_completed || reload_in_progress)
3087 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3088 return 0;
3089 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3090 case 'B':
3091 /* Check for non-literal-pool variants of memory constraints. */
3092 if (!MEM_P (op))
3093 return 0;
3094 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3095 case 'Q':
3096 case 'R':
3097 case 'S':
3098 case 'T':
3099 if (GET_CODE (op) != MEM)
3100 return 0;
3101 return s390_check_qrst_address (c, XEXP (op, 0), true);
3102 case 'U':
3103 return (s390_check_qrst_address ('Q', op, true)
3104 || s390_check_qrst_address ('R', op, true));
3105 case 'W':
3106 return (s390_check_qrst_address ('S', op, true)
3107 || s390_check_qrst_address ('T', op, true));
3108 case 'Y':
3109 /* Simply check for the basic form of a shift count. Reload will
3110 take care of making sure we have a proper base register. */
3111 if (!s390_decompose_shift_count (op, NULL, NULL))
3112 return 0;
3113 break;
3114 case 'Z':
3115 return s390_check_qrst_address (str[1], op, true);
3116 default:
3117 return 0;
3118 }
3119 return 1;
3120 }
3121
3122
3123 /* Evaluates constraint strings starting with letter O. Input
3124 parameter C is the second letter following the "O" in the constraint
3125 string. Returns 1 if VALUE meets the respective constraint and 0
3126 otherwise. */
3127
3128 int
3129 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3130 {
3131 if (!TARGET_EXTIMM)
3132 return 0;
3133
3134 switch (c)
3135 {
3136 case 's':
3137 return trunc_int_for_mode (value, SImode) == value;
3138
3139 case 'p':
3140 return value == 0
3141 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3142
3143 case 'n':
3144 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3145
3146 default:
3147 gcc_unreachable ();
3148 }
3149 }
3150
3151
3152 /* Evaluates constraint strings starting with letter N. Parameter STR
3153 contains the letters following letter "N" in the constraint string.
3154 Returns true if VALUE matches the constraint. */
3155
3156 int
3157 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3158 {
3159 machine_mode mode, part_mode;
3160 int def;
3161 int part, part_goal;
3162
3163
3164 if (str[0] == 'x')
3165 part_goal = -1;
3166 else
3167 part_goal = str[0] - '0';
3168
3169 switch (str[1])
3170 {
3171 case 'Q':
3172 part_mode = QImode;
3173 break;
3174 case 'H':
3175 part_mode = HImode;
3176 break;
3177 case 'S':
3178 part_mode = SImode;
3179 break;
3180 default:
3181 return 0;
3182 }
3183
3184 switch (str[2])
3185 {
3186 case 'H':
3187 mode = HImode;
3188 break;
3189 case 'S':
3190 mode = SImode;
3191 break;
3192 case 'D':
3193 mode = DImode;
3194 break;
3195 default:
3196 return 0;
3197 }
3198
3199 switch (str[3])
3200 {
3201 case '0':
3202 def = 0;
3203 break;
3204 case 'F':
3205 def = -1;
3206 break;
3207 default:
3208 return 0;
3209 }
3210
3211 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3212 return 0;
3213
3214 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3215 if (part < 0)
3216 return 0;
3217 if (part_goal != -1 && part_goal != part)
3218 return 0;
3219
3220 return 1;
3221 }
3222
3223
3224 /* Returns true if the input parameter VALUE is a float zero. */
3225
3226 int
3227 s390_float_const_zero_p (rtx value)
3228 {
3229 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3230 && value == CONST0_RTX (GET_MODE (value)));
3231 }
3232
3233 /* Implement TARGET_REGISTER_MOVE_COST. */
3234
3235 static int
3236 s390_register_move_cost (machine_mode mode,
3237 reg_class_t from, reg_class_t to)
3238 {
3239 /* On s390, copy between fprs and gprs is expensive. */
3240
3241 /* It becomes somewhat faster having ldgr/lgdr. */
3242 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3243 {
3244 /* ldgr is single cycle. */
3245 if (reg_classes_intersect_p (from, GENERAL_REGS)
3246 && reg_classes_intersect_p (to, FP_REGS))
3247 return 1;
3248 /* lgdr needs 3 cycles. */
3249 if (reg_classes_intersect_p (to, GENERAL_REGS)
3250 && reg_classes_intersect_p (from, FP_REGS))
3251 return 3;
3252 }
3253
3254 /* Otherwise copying is done via memory. */
3255 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3256 && reg_classes_intersect_p (to, FP_REGS))
3257 || (reg_classes_intersect_p (from, FP_REGS)
3258 && reg_classes_intersect_p (to, GENERAL_REGS)))
3259 return 10;
3260
3261 return 1;
3262 }
3263
3264 /* Implement TARGET_MEMORY_MOVE_COST. */
3265
3266 static int
3267 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3268 reg_class_t rclass ATTRIBUTE_UNUSED,
3269 bool in ATTRIBUTE_UNUSED)
3270 {
3271 return 2;
3272 }
3273
3274 /* Compute a (partial) cost for rtx X. Return true if the complete
3275 cost has been computed, and false if subexpressions should be
3276 scanned. In either case, *TOTAL contains the cost result.
3277 CODE contains GET_CODE (x), OUTER_CODE contains the code
3278 of the superexpression of x. */
3279
3280 static bool
3281 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
3282 int *total, bool speed ATTRIBUTE_UNUSED)
3283 {
3284 switch (code)
3285 {
3286 case CONST:
3287 case CONST_INT:
3288 case LABEL_REF:
3289 case SYMBOL_REF:
3290 case CONST_DOUBLE:
3291 case MEM:
3292 *total = 0;
3293 return true;
3294
3295 case ASHIFT:
3296 case ASHIFTRT:
3297 case LSHIFTRT:
3298 case ROTATE:
3299 case ROTATERT:
3300 case AND:
3301 case IOR:
3302 case XOR:
3303 case NEG:
3304 case NOT:
3305 *total = COSTS_N_INSNS (1);
3306 return false;
3307
3308 case PLUS:
3309 case MINUS:
3310 *total = COSTS_N_INSNS (1);
3311 return false;
3312
3313 case MULT:
3314 switch (GET_MODE (x))
3315 {
3316 case SImode:
3317 {
3318 rtx left = XEXP (x, 0);
3319 rtx right = XEXP (x, 1);
3320 if (GET_CODE (right) == CONST_INT
3321 && CONST_OK_FOR_K (INTVAL (right)))
3322 *total = s390_cost->mhi;
3323 else if (GET_CODE (left) == SIGN_EXTEND)
3324 *total = s390_cost->mh;
3325 else
3326 *total = s390_cost->ms; /* msr, ms, msy */
3327 break;
3328 }
3329 case DImode:
3330 {
3331 rtx left = XEXP (x, 0);
3332 rtx right = XEXP (x, 1);
3333 if (TARGET_ZARCH)
3334 {
3335 if (GET_CODE (right) == CONST_INT
3336 && CONST_OK_FOR_K (INTVAL (right)))
3337 *total = s390_cost->mghi;
3338 else if (GET_CODE (left) == SIGN_EXTEND)
3339 *total = s390_cost->msgf;
3340 else
3341 *total = s390_cost->msg; /* msgr, msg */
3342 }
3343 else /* TARGET_31BIT */
3344 {
3345 if (GET_CODE (left) == SIGN_EXTEND
3346 && GET_CODE (right) == SIGN_EXTEND)
3347 /* mulsidi case: mr, m */
3348 *total = s390_cost->m;
3349 else if (GET_CODE (left) == ZERO_EXTEND
3350 && GET_CODE (right) == ZERO_EXTEND
3351 && TARGET_CPU_ZARCH)
3352 /* umulsidi case: ml, mlr */
3353 *total = s390_cost->ml;
3354 else
3355 /* Complex calculation is required. */
3356 *total = COSTS_N_INSNS (40);
3357 }
3358 break;
3359 }
3360 case SFmode:
3361 case DFmode:
3362 *total = s390_cost->mult_df;
3363 break;
3364 case TFmode:
3365 *total = s390_cost->mxbr;
3366 break;
3367 default:
3368 return false;
3369 }
3370 return false;
3371
3372 case FMA:
3373 switch (GET_MODE (x))
3374 {
3375 case DFmode:
3376 *total = s390_cost->madbr;
3377 break;
3378 case SFmode:
3379 *total = s390_cost->maebr;
3380 break;
3381 default:
3382 return false;
3383 }
3384 /* Negate in the third argument is free: FMSUB. */
3385 if (GET_CODE (XEXP (x, 2)) == NEG)
3386 {
3387 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
3388 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
3389 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
3390 return true;
3391 }
3392 return false;
3393
3394 case UDIV:
3395 case UMOD:
3396 if (GET_MODE (x) == TImode) /* 128 bit division */
3397 *total = s390_cost->dlgr;
3398 else if (GET_MODE (x) == DImode)
3399 {
3400 rtx right = XEXP (x, 1);
3401 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3402 *total = s390_cost->dlr;
3403 else /* 64 by 64 bit division */
3404 *total = s390_cost->dlgr;
3405 }
3406 else if (GET_MODE (x) == SImode) /* 32 bit division */
3407 *total = s390_cost->dlr;
3408 return false;
3409
3410 case DIV:
3411 case MOD:
3412 if (GET_MODE (x) == DImode)
3413 {
3414 rtx right = XEXP (x, 1);
3415 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3416 if (TARGET_ZARCH)
3417 *total = s390_cost->dsgfr;
3418 else
3419 *total = s390_cost->dr;
3420 else /* 64 by 64 bit division */
3421 *total = s390_cost->dsgr;
3422 }
3423 else if (GET_MODE (x) == SImode) /* 32 bit division */
3424 *total = s390_cost->dlr;
3425 else if (GET_MODE (x) == SFmode)
3426 {
3427 *total = s390_cost->debr;
3428 }
3429 else if (GET_MODE (x) == DFmode)
3430 {
3431 *total = s390_cost->ddbr;
3432 }
3433 else if (GET_MODE (x) == TFmode)
3434 {
3435 *total = s390_cost->dxbr;
3436 }
3437 return false;
3438
3439 case SQRT:
3440 if (GET_MODE (x) == SFmode)
3441 *total = s390_cost->sqebr;
3442 else if (GET_MODE (x) == DFmode)
3443 *total = s390_cost->sqdbr;
3444 else /* TFmode */
3445 *total = s390_cost->sqxbr;
3446 return false;
3447
3448 case SIGN_EXTEND:
3449 case ZERO_EXTEND:
3450 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3451 || outer_code == PLUS || outer_code == MINUS
3452 || outer_code == COMPARE)
3453 *total = 0;
3454 return false;
3455
3456 case COMPARE:
3457 *total = COSTS_N_INSNS (1);
3458 if (GET_CODE (XEXP (x, 0)) == AND
3459 && GET_CODE (XEXP (x, 1)) == CONST_INT
3460 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3461 {
3462 rtx op0 = XEXP (XEXP (x, 0), 0);
3463 rtx op1 = XEXP (XEXP (x, 0), 1);
3464 rtx op2 = XEXP (x, 1);
3465
3466 if (memory_operand (op0, GET_MODE (op0))
3467 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3468 return true;
3469 if (register_operand (op0, GET_MODE (op0))
3470 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3471 return true;
3472 }
3473 return false;
3474
3475 default:
3476 return false;
3477 }
3478 }
3479
3480 /* Return the cost of an address rtx ADDR. */
3481
3482 static int
3483 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3484 addr_space_t as ATTRIBUTE_UNUSED,
3485 bool speed ATTRIBUTE_UNUSED)
3486 {
3487 struct s390_address ad;
3488 if (!s390_decompose_address (addr, &ad))
3489 return 1000;
3490
3491 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3492 }
3493
3494 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3495 otherwise return 0. */
3496
3497 int
3498 tls_symbolic_operand (rtx op)
3499 {
3500 if (GET_CODE (op) != SYMBOL_REF)
3501 return 0;
3502 return SYMBOL_REF_TLS_MODEL (op);
3503 }
3504 \f
3505 /* Split DImode access register reference REG (on 64-bit) into its constituent
3506 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3507 gen_highpart cannot be used as they assume all registers are word-sized,
3508 while our access registers have only half that size. */
3509
3510 void
3511 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3512 {
3513 gcc_assert (TARGET_64BIT);
3514 gcc_assert (ACCESS_REG_P (reg));
3515 gcc_assert (GET_MODE (reg) == DImode);
3516 gcc_assert (!(REGNO (reg) & 1));
3517
3518 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3519 *hi = gen_rtx_REG (SImode, REGNO (reg));
3520 }
3521
3522 /* Return true if OP contains a symbol reference */
3523
3524 bool
3525 symbolic_reference_mentioned_p (rtx op)
3526 {
3527 const char *fmt;
3528 int i;
3529
3530 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3531 return 1;
3532
3533 fmt = GET_RTX_FORMAT (GET_CODE (op));
3534 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3535 {
3536 if (fmt[i] == 'E')
3537 {
3538 int j;
3539
3540 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3541 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3542 return 1;
3543 }
3544
3545 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3546 return 1;
3547 }
3548
3549 return 0;
3550 }
3551
3552 /* Return true if OP contains a reference to a thread-local symbol. */
3553
3554 bool
3555 tls_symbolic_reference_mentioned_p (rtx op)
3556 {
3557 const char *fmt;
3558 int i;
3559
3560 if (GET_CODE (op) == SYMBOL_REF)
3561 return tls_symbolic_operand (op);
3562
3563 fmt = GET_RTX_FORMAT (GET_CODE (op));
3564 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3565 {
3566 if (fmt[i] == 'E')
3567 {
3568 int j;
3569
3570 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3571 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3572 return true;
3573 }
3574
3575 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3576 return true;
3577 }
3578
3579 return false;
3580 }
3581
3582
3583 /* Return true if OP is a legitimate general operand when
3584 generating PIC code. It is given that flag_pic is on
3585 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
3586
3587 int
3588 legitimate_pic_operand_p (rtx op)
3589 {
3590 /* Accept all non-symbolic constants. */
3591 if (!SYMBOLIC_CONST (op))
3592 return 1;
3593
3594 /* Reject everything else; must be handled
3595 via emit_symbolic_move. */
3596 return 0;
3597 }
3598
3599 /* Returns true if the constant value OP is a legitimate general operand.
3600 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
3601
3602 static bool
3603 s390_legitimate_constant_p (machine_mode mode, rtx op)
3604 {
3605 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3606 {
3607 if (GET_MODE_SIZE (mode) != 16)
3608 return 0;
3609
3610 if (!const0_operand (op, mode)
3611 && !s390_contiguous_bitmask_vector_p (op, NULL, NULL)
3612 && !s390_bytemask_vector_p (op, NULL))
3613 return 0;
3614 }
3615
3616 /* Accept all non-symbolic constants. */
3617 if (!SYMBOLIC_CONST (op))
3618 return 1;
3619
3620 /* Accept immediate LARL operands. */
3621 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3622 return 1;
3623
3624 /* Thread-local symbols are never legal constants. This is
3625 so that emit_call knows that computing such addresses
3626 might require a function call. */
3627 if (TLS_SYMBOLIC_CONST (op))
3628 return 0;
3629
3630 /* In the PIC case, symbolic constants must *not* be
3631 forced into the literal pool. We accept them here,
3632 so that they will be handled by emit_symbolic_move. */
3633 if (flag_pic)
3634 return 1;
3635
3636 /* All remaining non-PIC symbolic constants are
3637 forced into the literal pool. */
3638 return 0;
3639 }
3640
3641 /* Determine if it's legal to put X into the constant pool. This
3642 is not possible if X contains the address of a symbol that is
3643 not constant (TLS) or not known at final link time (PIC). */
3644
3645 static bool
3646 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3647 {
3648 switch (GET_CODE (x))
3649 {
3650 case CONST_INT:
3651 case CONST_DOUBLE:
3652 case CONST_VECTOR:
3653 /* Accept all non-symbolic constants. */
3654 return false;
3655
3656 case LABEL_REF:
3657 /* Labels are OK iff we are non-PIC. */
3658 return flag_pic != 0;
3659
3660 case SYMBOL_REF:
3661 /* 'Naked' TLS symbol references are never OK,
3662 non-TLS symbols are OK iff we are non-PIC. */
3663 if (tls_symbolic_operand (x))
3664 return true;
3665 else
3666 return flag_pic != 0;
3667
3668 case CONST:
3669 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3670 case PLUS:
3671 case MINUS:
3672 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3673 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3674
3675 case UNSPEC:
3676 switch (XINT (x, 1))
3677 {
3678 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3679 case UNSPEC_LTREL_OFFSET:
3680 case UNSPEC_GOT:
3681 case UNSPEC_GOTOFF:
3682 case UNSPEC_PLTOFF:
3683 case UNSPEC_TLSGD:
3684 case UNSPEC_TLSLDM:
3685 case UNSPEC_NTPOFF:
3686 case UNSPEC_DTPOFF:
3687 case UNSPEC_GOTNTPOFF:
3688 case UNSPEC_INDNTPOFF:
3689 return false;
3690
3691 /* If the literal pool shares the code section, be put
3692 execute template placeholders into the pool as well. */
3693 case UNSPEC_INSN:
3694 return TARGET_CPU_ZARCH;
3695
3696 default:
3697 return true;
3698 }
3699 break;
3700
3701 default:
3702 gcc_unreachable ();
3703 }
3704 }
3705
3706 /* Returns true if the constant value OP is a legitimate general
3707 operand during and after reload. The difference to
3708 legitimate_constant_p is that this function will not accept
3709 a constant that would need to be forced to the literal pool
3710 before it can be used as operand.
3711 This function accepts all constants which can be loaded directly
3712 into a GPR. */
3713
3714 bool
3715 legitimate_reload_constant_p (rtx op)
3716 {
3717 /* Accept la(y) operands. */
3718 if (GET_CODE (op) == CONST_INT
3719 && DISP_IN_RANGE (INTVAL (op)))
3720 return true;
3721
3722 /* Accept l(g)hi/l(g)fi operands. */
3723 if (GET_CODE (op) == CONST_INT
3724 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3725 return true;
3726
3727 /* Accept lliXX operands. */
3728 if (TARGET_ZARCH
3729 && GET_CODE (op) == CONST_INT
3730 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3731 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3732 return true;
3733
3734 if (TARGET_EXTIMM
3735 && GET_CODE (op) == CONST_INT
3736 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3737 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3738 return true;
3739
3740 /* Accept larl operands. */
3741 if (TARGET_CPU_ZARCH
3742 && larl_operand (op, VOIDmode))
3743 return true;
3744
3745 /* Accept floating-point zero operands that fit into a single GPR. */
3746 if (GET_CODE (op) == CONST_DOUBLE
3747 && s390_float_const_zero_p (op)
3748 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3749 return true;
3750
3751 /* Accept double-word operands that can be split. */
3752 if (GET_CODE (op) == CONST_INT
3753 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
3754 {
3755 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3756 rtx hi = operand_subword (op, 0, 0, dword_mode);
3757 rtx lo = operand_subword (op, 1, 0, dword_mode);
3758 return legitimate_reload_constant_p (hi)
3759 && legitimate_reload_constant_p (lo);
3760 }
3761
3762 /* Everything else cannot be handled without reload. */
3763 return false;
3764 }
3765
3766 /* Returns true if the constant value OP is a legitimate fp operand
3767 during and after reload.
3768 This function accepts all constants which can be loaded directly
3769 into an FPR. */
3770
3771 static bool
3772 legitimate_reload_fp_constant_p (rtx op)
3773 {
3774 /* Accept floating-point zero operands if the load zero instruction
3775 can be used. Prior to z196 the load fp zero instruction caused a
3776 performance penalty if the result is used as BFP number. */
3777 if (TARGET_Z196
3778 && GET_CODE (op) == CONST_DOUBLE
3779 && s390_float_const_zero_p (op))
3780 return true;
3781
3782 return false;
3783 }
3784
3785 /* Returns true if the constant value OP is a legitimate vector operand
3786 during and after reload.
3787 This function accepts all constants which can be loaded directly
3788 into an VR. */
3789
3790 static bool
3791 legitimate_reload_vector_constant_p (rtx op)
3792 {
3793 /* FIXME: Support constant vectors with all the same 16 bit unsigned
3794 operands. These can be loaded with vrepi. */
3795
3796 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3797 && (const0_operand (op, GET_MODE (op))
3798 || constm1_operand (op, GET_MODE (op))
3799 || s390_contiguous_bitmask_vector_p (op, NULL, NULL)
3800 || s390_bytemask_vector_p (op, NULL)))
3801 return true;
3802
3803 return false;
3804 }
3805
3806 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3807 return the class of reg to actually use. */
3808
3809 static reg_class_t
3810 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3811 {
3812 switch (GET_CODE (op))
3813 {
3814 /* Constants we cannot reload into general registers
3815 must be forced into the literal pool. */
3816 case CONST_VECTOR:
3817 case CONST_DOUBLE:
3818 case CONST_INT:
3819 if (reg_class_subset_p (GENERAL_REGS, rclass)
3820 && legitimate_reload_constant_p (op))
3821 return GENERAL_REGS;
3822 else if (reg_class_subset_p (ADDR_REGS, rclass)
3823 && legitimate_reload_constant_p (op))
3824 return ADDR_REGS;
3825 else if (reg_class_subset_p (FP_REGS, rclass)
3826 && legitimate_reload_fp_constant_p (op))
3827 return FP_REGS;
3828 else if (reg_class_subset_p (VEC_REGS, rclass)
3829 && legitimate_reload_vector_constant_p (op))
3830 return VEC_REGS;
3831
3832 return NO_REGS;
3833
3834 /* If a symbolic constant or a PLUS is reloaded,
3835 it is most likely being used as an address, so
3836 prefer ADDR_REGS. If 'class' is not a superset
3837 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3838 case CONST:
3839 /* Symrefs cannot be pushed into the literal pool with -fPIC
3840 so we *MUST NOT* return NO_REGS for these cases
3841 (s390_cannot_force_const_mem will return true).
3842
3843 On the other hand we MUST return NO_REGS for symrefs with
3844 invalid addend which might have been pushed to the literal
3845 pool (no -fPIC). Usually we would expect them to be
3846 handled via secondary reload but this does not happen if
3847 they are used as literal pool slot replacement in reload
3848 inheritance (see emit_input_reload_insns). */
3849 if (TARGET_CPU_ZARCH
3850 && GET_CODE (XEXP (op, 0)) == PLUS
3851 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3852 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3853 {
3854 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3855 return ADDR_REGS;
3856 else
3857 return NO_REGS;
3858 }
3859 /* fallthrough */
3860 case LABEL_REF:
3861 case SYMBOL_REF:
3862 if (!legitimate_reload_constant_p (op))
3863 return NO_REGS;
3864 /* fallthrough */
3865 case PLUS:
3866 /* load address will be used. */
3867 if (reg_class_subset_p (ADDR_REGS, rclass))
3868 return ADDR_REGS;
3869 else
3870 return NO_REGS;
3871
3872 default:
3873 break;
3874 }
3875
3876 return rclass;
3877 }
3878
3879 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3880 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3881 aligned. */
3882
3883 bool
3884 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3885 {
3886 HOST_WIDE_INT addend;
3887 rtx symref;
3888
3889 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3890 return false;
3891
3892 if (addend & (alignment - 1))
3893 return false;
3894
3895 if (GET_CODE (symref) == SYMBOL_REF
3896 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3897 return true;
3898
3899 if (GET_CODE (symref) == UNSPEC
3900 && alignment <= UNITS_PER_LONG)
3901 return true;
3902
3903 return false;
3904 }
3905
3906 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3907 operand SCRATCH is used to reload the even part of the address and
3908 adding one. */
3909
3910 void
3911 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3912 {
3913 HOST_WIDE_INT addend;
3914 rtx symref;
3915
3916 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3917 gcc_unreachable ();
3918
3919 if (!(addend & 1))
3920 /* Easy case. The addend is even so larl will do fine. */
3921 emit_move_insn (reg, addr);
3922 else
3923 {
3924 /* We can leave the scratch register untouched if the target
3925 register is a valid base register. */
3926 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3927 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3928 scratch = reg;
3929
3930 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3931 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3932
3933 if (addend != 1)
3934 emit_move_insn (scratch,
3935 gen_rtx_CONST (Pmode,
3936 gen_rtx_PLUS (Pmode, symref,
3937 GEN_INT (addend - 1))));
3938 else
3939 emit_move_insn (scratch, symref);
3940
3941 /* Increment the address using la in order to avoid clobbering cc. */
3942 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3943 }
3944 }
3945
3946 /* Generate what is necessary to move between REG and MEM using
3947 SCRATCH. The direction is given by TOMEM. */
3948
3949 void
3950 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3951 {
3952 /* Reload might have pulled a constant out of the literal pool.
3953 Force it back in. */
3954 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3955 || GET_CODE (mem) == CONST_VECTOR
3956 || GET_CODE (mem) == CONST)
3957 mem = force_const_mem (GET_MODE (reg), mem);
3958
3959 gcc_assert (MEM_P (mem));
3960
3961 /* For a load from memory we can leave the scratch register
3962 untouched if the target register is a valid base register. */
3963 if (!tomem
3964 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3965 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3966 && GET_MODE (reg) == GET_MODE (scratch))
3967 scratch = reg;
3968
3969 /* Load address into scratch register. Since we can't have a
3970 secondary reload for a secondary reload we have to cover the case
3971 where larl would need a secondary reload here as well. */
3972 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3973
3974 /* Now we can use a standard load/store to do the move. */
3975 if (tomem)
3976 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3977 else
3978 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3979 }
3980
3981 /* Inform reload about cases where moving X with a mode MODE to a register in
3982 RCLASS requires an extra scratch or immediate register. Return the class
3983 needed for the immediate register. */
3984
3985 static reg_class_t
3986 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3987 machine_mode mode, secondary_reload_info *sri)
3988 {
3989 enum reg_class rclass = (enum reg_class) rclass_i;
3990
3991 /* Intermediate register needed. */
3992 if (reg_classes_intersect_p (CC_REGS, rclass))
3993 return GENERAL_REGS;
3994
3995 if (TARGET_VX)
3996 {
3997 /* The vst/vl vector move instructions allow only for short
3998 displacements. */
3999 if (MEM_P (x)
4000 && GET_CODE (XEXP (x, 0)) == PLUS
4001 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4002 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4003 && reg_class_subset_p (rclass, VEC_REGS)
4004 && (!reg_class_subset_p (rclass, FP_REGS)
4005 || (GET_MODE_SIZE (mode) > 8
4006 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4007 {
4008 if (in_p)
4009 sri->icode = (TARGET_64BIT ?
4010 CODE_FOR_reloaddi_la_in :
4011 CODE_FOR_reloadsi_la_in);
4012 else
4013 sri->icode = (TARGET_64BIT ?
4014 CODE_FOR_reloaddi_la_out :
4015 CODE_FOR_reloadsi_la_out);
4016 }
4017 }
4018
4019 if (TARGET_Z10)
4020 {
4021 HOST_WIDE_INT offset;
4022 rtx symref;
4023
4024 /* On z10 several optimizer steps may generate larl operands with
4025 an odd addend. */
4026 if (in_p
4027 && s390_loadrelative_operand_p (x, &symref, &offset)
4028 && mode == Pmode
4029 && !SYMBOL_REF_ALIGN1_P (symref)
4030 && (offset & 1) == 1)
4031 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4032 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4033
4034 /* Handle all the (mem (symref)) accesses we cannot use the z10
4035 instructions for. */
4036 if (MEM_P (x)
4037 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4038 && (mode == QImode
4039 || !reg_classes_intersect_p (GENERAL_REGS, rclass)
4040 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4041 || !s390_check_symref_alignment (XEXP (x, 0),
4042 GET_MODE_SIZE (mode))))
4043 {
4044 #define __SECONDARY_RELOAD_CASE(M,m) \
4045 case M##mode: \
4046 if (TARGET_64BIT) \
4047 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4048 CODE_FOR_reload##m##di_tomem_z10; \
4049 else \
4050 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4051 CODE_FOR_reload##m##si_tomem_z10; \
4052 break;
4053
4054 switch (GET_MODE (x))
4055 {
4056 __SECONDARY_RELOAD_CASE (QI, qi);
4057 __SECONDARY_RELOAD_CASE (HI, hi);
4058 __SECONDARY_RELOAD_CASE (SI, si);
4059 __SECONDARY_RELOAD_CASE (DI, di);
4060 __SECONDARY_RELOAD_CASE (TI, ti);
4061 __SECONDARY_RELOAD_CASE (SF, sf);
4062 __SECONDARY_RELOAD_CASE (DF, df);
4063 __SECONDARY_RELOAD_CASE (TF, tf);
4064 __SECONDARY_RELOAD_CASE (SD, sd);
4065 __SECONDARY_RELOAD_CASE (DD, dd);
4066 __SECONDARY_RELOAD_CASE (TD, td);
4067 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4068 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4069 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4070 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4071 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4072 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4073 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4074 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4075 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4076 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4077 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4078 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4079 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4080 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4081 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4082 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4083 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4084 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4085 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4086 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4087 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4088 default:
4089 gcc_unreachable ();
4090 }
4091 #undef __SECONDARY_RELOAD_CASE
4092 }
4093 }
4094
4095 /* We need a scratch register when loading a PLUS expression which
4096 is not a legitimate operand of the LOAD ADDRESS instruction. */
4097 /* LRA can deal with transformation of plus op very well -- so we
4098 don't need to prompt LRA in this case. */
4099 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4100 sri->icode = (TARGET_64BIT ?
4101 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4102
4103 /* Performing a multiword move from or to memory we have to make sure the
4104 second chunk in memory is addressable without causing a displacement
4105 overflow. If that would be the case we calculate the address in
4106 a scratch register. */
4107 if (MEM_P (x)
4108 && GET_CODE (XEXP (x, 0)) == PLUS
4109 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4110 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4111 + GET_MODE_SIZE (mode) - 1))
4112 {
4113 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4114 in a s_operand address since we may fallback to lm/stm. So we only
4115 have to care about overflows in the b+i+d case. */
4116 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4117 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4118 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4119 /* For FP_REGS no lm/stm is available so this check is triggered
4120 for displacement overflows in b+i+d and b+d like addresses. */
4121 || (reg_classes_intersect_p (FP_REGS, rclass)
4122 && s390_class_max_nregs (FP_REGS, mode) > 1))
4123 {
4124 if (in_p)
4125 sri->icode = (TARGET_64BIT ?
4126 CODE_FOR_reloaddi_la_in :
4127 CODE_FOR_reloadsi_la_in);
4128 else
4129 sri->icode = (TARGET_64BIT ?
4130 CODE_FOR_reloaddi_la_out :
4131 CODE_FOR_reloadsi_la_out);
4132 }
4133 }
4134
4135 /* A scratch address register is needed when a symbolic constant is
4136 copied to r0 compiling with -fPIC. In other cases the target
4137 register might be used as temporary (see legitimize_pic_address). */
4138 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4139 sri->icode = (TARGET_64BIT ?
4140 CODE_FOR_reloaddi_PIC_addr :
4141 CODE_FOR_reloadsi_PIC_addr);
4142
4143 /* Either scratch or no register needed. */
4144 return NO_REGS;
4145 }
4146
4147 /* Generate code to load SRC, which is PLUS that is not a
4148 legitimate operand for the LA instruction, into TARGET.
4149 SCRATCH may be used as scratch register. */
4150
4151 void
4152 s390_expand_plus_operand (rtx target, rtx src,
4153 rtx scratch)
4154 {
4155 rtx sum1, sum2;
4156 struct s390_address ad;
4157
4158 /* src must be a PLUS; get its two operands. */
4159 gcc_assert (GET_CODE (src) == PLUS);
4160 gcc_assert (GET_MODE (src) == Pmode);
4161
4162 /* Check if any of the two operands is already scheduled
4163 for replacement by reload. This can happen e.g. when
4164 float registers occur in an address. */
4165 sum1 = find_replacement (&XEXP (src, 0));
4166 sum2 = find_replacement (&XEXP (src, 1));
4167 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4168
4169 /* If the address is already strictly valid, there's nothing to do. */
4170 if (!s390_decompose_address (src, &ad)
4171 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4172 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4173 {
4174 /* Otherwise, one of the operands cannot be an address register;
4175 we reload its value into the scratch register. */
4176 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4177 {
4178 emit_move_insn (scratch, sum1);
4179 sum1 = scratch;
4180 }
4181 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4182 {
4183 emit_move_insn (scratch, sum2);
4184 sum2 = scratch;
4185 }
4186
4187 /* According to the way these invalid addresses are generated
4188 in reload.c, it should never happen (at least on s390) that
4189 *neither* of the PLUS components, after find_replacements
4190 was applied, is an address register. */
4191 if (sum1 == scratch && sum2 == scratch)
4192 {
4193 debug_rtx (src);
4194 gcc_unreachable ();
4195 }
4196
4197 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4198 }
4199
4200 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4201 is only ever performed on addresses, so we can mark the
4202 sum as legitimate for LA in any case. */
4203 s390_load_address (target, src);
4204 }
4205
4206
4207 /* Return true if ADDR is a valid memory address.
4208 STRICT specifies whether strict register checking applies. */
4209
4210 static bool
4211 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4212 {
4213 struct s390_address ad;
4214
4215 if (TARGET_Z10
4216 && larl_operand (addr, VOIDmode)
4217 && (mode == VOIDmode
4218 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4219 return true;
4220
4221 if (!s390_decompose_address (addr, &ad))
4222 return false;
4223
4224 if (strict)
4225 {
4226 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4227 return false;
4228
4229 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4230 return false;
4231 }
4232 else
4233 {
4234 if (ad.base
4235 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4236 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4237 return false;
4238
4239 if (ad.indx
4240 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4241 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4242 return false;
4243 }
4244 return true;
4245 }
4246
4247 /* Return true if OP is a valid operand for the LA instruction.
4248 In 31-bit, we need to prove that the result is used as an
4249 address, as LA performs only a 31-bit addition. */
4250
4251 bool
4252 legitimate_la_operand_p (rtx op)
4253 {
4254 struct s390_address addr;
4255 if (!s390_decompose_address (op, &addr))
4256 return false;
4257
4258 return (TARGET_64BIT || addr.pointer);
4259 }
4260
4261 /* Return true if it is valid *and* preferable to use LA to
4262 compute the sum of OP1 and OP2. */
4263
4264 bool
4265 preferred_la_operand_p (rtx op1, rtx op2)
4266 {
4267 struct s390_address addr;
4268
4269 if (op2 != const0_rtx)
4270 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4271
4272 if (!s390_decompose_address (op1, &addr))
4273 return false;
4274 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4275 return false;
4276 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4277 return false;
4278
4279 /* Avoid LA instructions with index register on z196; it is
4280 preferable to use regular add instructions when possible.
4281 Starting with zEC12 the la with index register is "uncracked"
4282 again. */
4283 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4284 return false;
4285
4286 if (!TARGET_64BIT && !addr.pointer)
4287 return false;
4288
4289 if (addr.pointer)
4290 return true;
4291
4292 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4293 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4294 return true;
4295
4296 return false;
4297 }
4298
4299 /* Emit a forced load-address operation to load SRC into DST.
4300 This will use the LOAD ADDRESS instruction even in situations
4301 where legitimate_la_operand_p (SRC) returns false. */
4302
4303 void
4304 s390_load_address (rtx dst, rtx src)
4305 {
4306 if (TARGET_64BIT)
4307 emit_move_insn (dst, src);
4308 else
4309 emit_insn (gen_force_la_31 (dst, src));
4310 }
4311
4312 /* Return a legitimate reference for ORIG (an address) using the
4313 register REG. If REG is 0, a new pseudo is generated.
4314
4315 There are two types of references that must be handled:
4316
4317 1. Global data references must load the address from the GOT, via
4318 the PIC reg. An insn is emitted to do this load, and the reg is
4319 returned.
4320
4321 2. Static data references, constant pool addresses, and code labels
4322 compute the address as an offset from the GOT, whose base is in
4323 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4324 differentiate them from global data objects. The returned
4325 address is the PIC reg + an unspec constant.
4326
4327 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4328 reg also appears in the address. */
4329
4330 rtx
4331 legitimize_pic_address (rtx orig, rtx reg)
4332 {
4333 rtx addr = orig;
4334 rtx addend = const0_rtx;
4335 rtx new_rtx = orig;
4336
4337 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4338
4339 if (GET_CODE (addr) == CONST)
4340 addr = XEXP (addr, 0);
4341
4342 if (GET_CODE (addr) == PLUS)
4343 {
4344 addend = XEXP (addr, 1);
4345 addr = XEXP (addr, 0);
4346 }
4347
4348 if ((GET_CODE (addr) == LABEL_REF
4349 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4350 || (GET_CODE (addr) == UNSPEC &&
4351 (XINT (addr, 1) == UNSPEC_GOTENT
4352 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4353 && GET_CODE (addend) == CONST_INT)
4354 {
4355 /* This can be locally addressed. */
4356
4357 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4358 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4359 gen_rtx_CONST (Pmode, addr) : addr);
4360
4361 if (TARGET_CPU_ZARCH
4362 && larl_operand (const_addr, VOIDmode)
4363 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4364 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4365 {
4366 if (INTVAL (addend) & 1)
4367 {
4368 /* LARL can't handle odd offsets, so emit a pair of LARL
4369 and LA. */
4370 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4371
4372 if (!DISP_IN_RANGE (INTVAL (addend)))
4373 {
4374 HOST_WIDE_INT even = INTVAL (addend) - 1;
4375 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4376 addr = gen_rtx_CONST (Pmode, addr);
4377 addend = const1_rtx;
4378 }
4379
4380 emit_move_insn (temp, addr);
4381 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4382
4383 if (reg != 0)
4384 {
4385 s390_load_address (reg, new_rtx);
4386 new_rtx = reg;
4387 }
4388 }
4389 else
4390 {
4391 /* If the offset is even, we can just use LARL. This
4392 will happen automatically. */
4393 }
4394 }
4395 else
4396 {
4397 /* No larl - Access local symbols relative to the GOT. */
4398
4399 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4400
4401 if (reload_in_progress || reload_completed)
4402 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4403
4404 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4405 if (addend != const0_rtx)
4406 addr = gen_rtx_PLUS (Pmode, addr, addend);
4407 addr = gen_rtx_CONST (Pmode, addr);
4408 addr = force_const_mem (Pmode, addr);
4409 emit_move_insn (temp, addr);
4410
4411 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4412 if (reg != 0)
4413 {
4414 s390_load_address (reg, new_rtx);
4415 new_rtx = reg;
4416 }
4417 }
4418 }
4419 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4420 {
4421 /* A non-local symbol reference without addend.
4422
4423 The symbol ref is wrapped into an UNSPEC to make sure the
4424 proper operand modifier (@GOT or @GOTENT) will be emitted.
4425 This will tell the linker to put the symbol into the GOT.
4426
4427 Additionally the code dereferencing the GOT slot is emitted here.
4428
4429 An addend to the symref needs to be added afterwards.
4430 legitimize_pic_address calls itself recursively to handle
4431 that case. So no need to do it here. */
4432
4433 if (reg == 0)
4434 reg = gen_reg_rtx (Pmode);
4435
4436 if (TARGET_Z10)
4437 {
4438 /* Use load relative if possible.
4439 lgrl <target>, sym@GOTENT */
4440 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4441 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4442 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4443
4444 emit_move_insn (reg, new_rtx);
4445 new_rtx = reg;
4446 }
4447 else if (flag_pic == 1)
4448 {
4449 /* Assume GOT offset is a valid displacement operand (< 4k
4450 or < 512k with z990). This is handled the same way in
4451 both 31- and 64-bit code (@GOT).
4452 lg <target>, sym@GOT(r12) */
4453
4454 if (reload_in_progress || reload_completed)
4455 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4456
4457 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4458 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4459 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4460 new_rtx = gen_const_mem (Pmode, new_rtx);
4461 emit_move_insn (reg, new_rtx);
4462 new_rtx = reg;
4463 }
4464 else if (TARGET_CPU_ZARCH)
4465 {
4466 /* If the GOT offset might be >= 4k, we determine the position
4467 of the GOT entry via a PC-relative LARL (@GOTENT).
4468 larl temp, sym@GOTENT
4469 lg <target>, 0(temp) */
4470
4471 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4472
4473 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4474 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4475
4476 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4477 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4478 emit_move_insn (temp, new_rtx);
4479
4480 new_rtx = gen_const_mem (Pmode, temp);
4481 emit_move_insn (reg, new_rtx);
4482
4483 new_rtx = reg;
4484 }
4485 else
4486 {
4487 /* If the GOT offset might be >= 4k, we have to load it
4488 from the literal pool (@GOT).
4489
4490 lg temp, lit-litbase(r13)
4491 lg <target>, 0(temp)
4492 lit: .long sym@GOT */
4493
4494 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4495
4496 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4497 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4498
4499 if (reload_in_progress || reload_completed)
4500 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4501
4502 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4503 addr = gen_rtx_CONST (Pmode, addr);
4504 addr = force_const_mem (Pmode, addr);
4505 emit_move_insn (temp, addr);
4506
4507 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4508 new_rtx = gen_const_mem (Pmode, new_rtx);
4509 emit_move_insn (reg, new_rtx);
4510 new_rtx = reg;
4511 }
4512 }
4513 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4514 {
4515 gcc_assert (XVECLEN (addr, 0) == 1);
4516 switch (XINT (addr, 1))
4517 {
4518 /* These address symbols (or PLT slots) relative to the GOT
4519 (not GOT slots!). In general this will exceed the
4520 displacement range so these value belong into the literal
4521 pool. */
4522 case UNSPEC_GOTOFF:
4523 case UNSPEC_PLTOFF:
4524 new_rtx = force_const_mem (Pmode, orig);
4525 break;
4526
4527 /* For -fPIC the GOT size might exceed the displacement
4528 range so make sure the value is in the literal pool. */
4529 case UNSPEC_GOT:
4530 if (flag_pic == 2)
4531 new_rtx = force_const_mem (Pmode, orig);
4532 break;
4533
4534 /* For @GOTENT larl is used. This is handled like local
4535 symbol refs. */
4536 case UNSPEC_GOTENT:
4537 gcc_unreachable ();
4538 break;
4539
4540 /* @PLT is OK as is on 64-bit, must be converted to
4541 GOT-relative @PLTOFF on 31-bit. */
4542 case UNSPEC_PLT:
4543 if (!TARGET_CPU_ZARCH)
4544 {
4545 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4546
4547 if (reload_in_progress || reload_completed)
4548 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4549
4550 addr = XVECEXP (addr, 0, 0);
4551 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4552 UNSPEC_PLTOFF);
4553 if (addend != const0_rtx)
4554 addr = gen_rtx_PLUS (Pmode, addr, addend);
4555 addr = gen_rtx_CONST (Pmode, addr);
4556 addr = force_const_mem (Pmode, addr);
4557 emit_move_insn (temp, addr);
4558
4559 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4560 if (reg != 0)
4561 {
4562 s390_load_address (reg, new_rtx);
4563 new_rtx = reg;
4564 }
4565 }
4566 else
4567 /* On 64 bit larl can be used. This case is handled like
4568 local symbol refs. */
4569 gcc_unreachable ();
4570 break;
4571
4572 /* Everything else cannot happen. */
4573 default:
4574 gcc_unreachable ();
4575 }
4576 }
4577 else if (addend != const0_rtx)
4578 {
4579 /* Otherwise, compute the sum. */
4580
4581 rtx base = legitimize_pic_address (addr, reg);
4582 new_rtx = legitimize_pic_address (addend,
4583 base == reg ? NULL_RTX : reg);
4584 if (GET_CODE (new_rtx) == CONST_INT)
4585 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4586 else
4587 {
4588 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4589 {
4590 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4591 new_rtx = XEXP (new_rtx, 1);
4592 }
4593 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4594 }
4595
4596 if (GET_CODE (new_rtx) == CONST)
4597 new_rtx = XEXP (new_rtx, 0);
4598 new_rtx = force_operand (new_rtx, 0);
4599 }
4600
4601 return new_rtx;
4602 }
4603
4604 /* Load the thread pointer into a register. */
4605
4606 rtx
4607 s390_get_thread_pointer (void)
4608 {
4609 rtx tp = gen_reg_rtx (Pmode);
4610
4611 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4612 mark_reg_pointer (tp, BITS_PER_WORD);
4613
4614 return tp;
4615 }
4616
4617 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4618 in s390_tls_symbol which always refers to __tls_get_offset.
4619 The returned offset is written to RESULT_REG and an USE rtx is
4620 generated for TLS_CALL. */
4621
4622 static GTY(()) rtx s390_tls_symbol;
4623
4624 static void
4625 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4626 {
4627 rtx insn;
4628
4629 if (!flag_pic)
4630 emit_insn (s390_load_got ());
4631
4632 if (!s390_tls_symbol)
4633 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4634
4635 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4636 gen_rtx_REG (Pmode, RETURN_REGNUM));
4637
4638 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4639 RTL_CONST_CALL_P (insn) = 1;
4640 }
4641
4642 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4643 this (thread-local) address. REG may be used as temporary. */
4644
4645 static rtx
4646 legitimize_tls_address (rtx addr, rtx reg)
4647 {
4648 rtx new_rtx, tls_call, temp, base, r2, insn;
4649
4650 if (GET_CODE (addr) == SYMBOL_REF)
4651 switch (tls_symbolic_operand (addr))
4652 {
4653 case TLS_MODEL_GLOBAL_DYNAMIC:
4654 start_sequence ();
4655 r2 = gen_rtx_REG (Pmode, 2);
4656 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4657 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4658 new_rtx = force_const_mem (Pmode, new_rtx);
4659 emit_move_insn (r2, new_rtx);
4660 s390_emit_tls_call_insn (r2, tls_call);
4661 insn = get_insns ();
4662 end_sequence ();
4663
4664 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4665 temp = gen_reg_rtx (Pmode);
4666 emit_libcall_block (insn, temp, r2, new_rtx);
4667
4668 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4669 if (reg != 0)
4670 {
4671 s390_load_address (reg, new_rtx);
4672 new_rtx = reg;
4673 }
4674 break;
4675
4676 case TLS_MODEL_LOCAL_DYNAMIC:
4677 start_sequence ();
4678 r2 = gen_rtx_REG (Pmode, 2);
4679 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4680 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4681 new_rtx = force_const_mem (Pmode, new_rtx);
4682 emit_move_insn (r2, new_rtx);
4683 s390_emit_tls_call_insn (r2, tls_call);
4684 insn = get_insns ();
4685 end_sequence ();
4686
4687 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4688 temp = gen_reg_rtx (Pmode);
4689 emit_libcall_block (insn, temp, r2, new_rtx);
4690
4691 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4692 base = gen_reg_rtx (Pmode);
4693 s390_load_address (base, new_rtx);
4694
4695 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4696 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4697 new_rtx = force_const_mem (Pmode, new_rtx);
4698 temp = gen_reg_rtx (Pmode);
4699 emit_move_insn (temp, new_rtx);
4700
4701 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4702 if (reg != 0)
4703 {
4704 s390_load_address (reg, new_rtx);
4705 new_rtx = reg;
4706 }
4707 break;
4708
4709 case TLS_MODEL_INITIAL_EXEC:
4710 if (flag_pic == 1)
4711 {
4712 /* Assume GOT offset < 4k. This is handled the same way
4713 in both 31- and 64-bit code. */
4714
4715 if (reload_in_progress || reload_completed)
4716 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4717
4718 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4719 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4720 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4721 new_rtx = gen_const_mem (Pmode, new_rtx);
4722 temp = gen_reg_rtx (Pmode);
4723 emit_move_insn (temp, new_rtx);
4724 }
4725 else if (TARGET_CPU_ZARCH)
4726 {
4727 /* If the GOT offset might be >= 4k, we determine the position
4728 of the GOT entry via a PC-relative LARL. */
4729
4730 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4731 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4732 temp = gen_reg_rtx (Pmode);
4733 emit_move_insn (temp, new_rtx);
4734
4735 new_rtx = gen_const_mem (Pmode, temp);
4736 temp = gen_reg_rtx (Pmode);
4737 emit_move_insn (temp, new_rtx);
4738 }
4739 else if (flag_pic)
4740 {
4741 /* If the GOT offset might be >= 4k, we have to load it
4742 from the literal pool. */
4743
4744 if (reload_in_progress || reload_completed)
4745 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4746
4747 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4748 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4749 new_rtx = force_const_mem (Pmode, new_rtx);
4750 temp = gen_reg_rtx (Pmode);
4751 emit_move_insn (temp, new_rtx);
4752
4753 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4754 new_rtx = gen_const_mem (Pmode, new_rtx);
4755
4756 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4757 temp = gen_reg_rtx (Pmode);
4758 emit_insn (gen_rtx_SET (temp, new_rtx));
4759 }
4760 else
4761 {
4762 /* In position-dependent code, load the absolute address of
4763 the GOT entry from the literal pool. */
4764
4765 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4766 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4767 new_rtx = force_const_mem (Pmode, new_rtx);
4768 temp = gen_reg_rtx (Pmode);
4769 emit_move_insn (temp, new_rtx);
4770
4771 new_rtx = temp;
4772 new_rtx = gen_const_mem (Pmode, new_rtx);
4773 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4774 temp = gen_reg_rtx (Pmode);
4775 emit_insn (gen_rtx_SET (temp, new_rtx));
4776 }
4777
4778 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4779 if (reg != 0)
4780 {
4781 s390_load_address (reg, new_rtx);
4782 new_rtx = reg;
4783 }
4784 break;
4785
4786 case TLS_MODEL_LOCAL_EXEC:
4787 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4788 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4789 new_rtx = force_const_mem (Pmode, new_rtx);
4790 temp = gen_reg_rtx (Pmode);
4791 emit_move_insn (temp, new_rtx);
4792
4793 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4794 if (reg != 0)
4795 {
4796 s390_load_address (reg, new_rtx);
4797 new_rtx = reg;
4798 }
4799 break;
4800
4801 default:
4802 gcc_unreachable ();
4803 }
4804
4805 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4806 {
4807 switch (XINT (XEXP (addr, 0), 1))
4808 {
4809 case UNSPEC_INDNTPOFF:
4810 gcc_assert (TARGET_CPU_ZARCH);
4811 new_rtx = addr;
4812 break;
4813
4814 default:
4815 gcc_unreachable ();
4816 }
4817 }
4818
4819 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4820 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4821 {
4822 new_rtx = XEXP (XEXP (addr, 0), 0);
4823 if (GET_CODE (new_rtx) != SYMBOL_REF)
4824 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4825
4826 new_rtx = legitimize_tls_address (new_rtx, reg);
4827 new_rtx = plus_constant (Pmode, new_rtx,
4828 INTVAL (XEXP (XEXP (addr, 0), 1)));
4829 new_rtx = force_operand (new_rtx, 0);
4830 }
4831
4832 else
4833 gcc_unreachable (); /* for now ... */
4834
4835 return new_rtx;
4836 }
4837
4838 /* Emit insns making the address in operands[1] valid for a standard
4839 move to operands[0]. operands[1] is replaced by an address which
4840 should be used instead of the former RTX to emit the move
4841 pattern. */
4842
4843 void
4844 emit_symbolic_move (rtx *operands)
4845 {
4846 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4847
4848 if (GET_CODE (operands[0]) == MEM)
4849 operands[1] = force_reg (Pmode, operands[1]);
4850 else if (TLS_SYMBOLIC_CONST (operands[1]))
4851 operands[1] = legitimize_tls_address (operands[1], temp);
4852 else if (flag_pic)
4853 operands[1] = legitimize_pic_address (operands[1], temp);
4854 }
4855
4856 /* Try machine-dependent ways of modifying an illegitimate address X
4857 to be legitimate. If we find one, return the new, valid address.
4858
4859 OLDX is the address as it was before break_out_memory_refs was called.
4860 In some cases it is useful to look at this to decide what needs to be done.
4861
4862 MODE is the mode of the operand pointed to by X.
4863
4864 When -fpic is used, special handling is needed for symbolic references.
4865 See comments by legitimize_pic_address for details. */
4866
4867 static rtx
4868 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4869 machine_mode mode ATTRIBUTE_UNUSED)
4870 {
4871 rtx constant_term = const0_rtx;
4872
4873 if (TLS_SYMBOLIC_CONST (x))
4874 {
4875 x = legitimize_tls_address (x, 0);
4876
4877 if (s390_legitimate_address_p (mode, x, FALSE))
4878 return x;
4879 }
4880 else if (GET_CODE (x) == PLUS
4881 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
4882 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
4883 {
4884 return x;
4885 }
4886 else if (flag_pic)
4887 {
4888 if (SYMBOLIC_CONST (x)
4889 || (GET_CODE (x) == PLUS
4890 && (SYMBOLIC_CONST (XEXP (x, 0))
4891 || SYMBOLIC_CONST (XEXP (x, 1)))))
4892 x = legitimize_pic_address (x, 0);
4893
4894 if (s390_legitimate_address_p (mode, x, FALSE))
4895 return x;
4896 }
4897
4898 x = eliminate_constant_term (x, &constant_term);
4899
4900 /* Optimize loading of large displacements by splitting them
4901 into the multiple of 4K and the rest; this allows the
4902 former to be CSE'd if possible.
4903
4904 Don't do this if the displacement is added to a register
4905 pointing into the stack frame, as the offsets will
4906 change later anyway. */
4907
4908 if (GET_CODE (constant_term) == CONST_INT
4909 && !TARGET_LONG_DISPLACEMENT
4910 && !DISP_IN_RANGE (INTVAL (constant_term))
4911 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4912 {
4913 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4914 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4915
4916 rtx temp = gen_reg_rtx (Pmode);
4917 rtx val = force_operand (GEN_INT (upper), temp);
4918 if (val != temp)
4919 emit_move_insn (temp, val);
4920
4921 x = gen_rtx_PLUS (Pmode, x, temp);
4922 constant_term = GEN_INT (lower);
4923 }
4924
4925 if (GET_CODE (x) == PLUS)
4926 {
4927 if (GET_CODE (XEXP (x, 0)) == REG)
4928 {
4929 rtx temp = gen_reg_rtx (Pmode);
4930 rtx val = force_operand (XEXP (x, 1), temp);
4931 if (val != temp)
4932 emit_move_insn (temp, val);
4933
4934 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4935 }
4936
4937 else if (GET_CODE (XEXP (x, 1)) == REG)
4938 {
4939 rtx temp = gen_reg_rtx (Pmode);
4940 rtx val = force_operand (XEXP (x, 0), temp);
4941 if (val != temp)
4942 emit_move_insn (temp, val);
4943
4944 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4945 }
4946 }
4947
4948 if (constant_term != const0_rtx)
4949 x = gen_rtx_PLUS (Pmode, x, constant_term);
4950
4951 return x;
4952 }
4953
4954 /* Try a machine-dependent way of reloading an illegitimate address AD
4955 operand. If we find one, push the reload and return the new address.
4956
4957 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4958 and TYPE is the reload type of the current reload. */
4959
4960 rtx
4961 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
4962 int opnum, int type)
4963 {
4964 if (!optimize || TARGET_LONG_DISPLACEMENT)
4965 return NULL_RTX;
4966
4967 if (GET_CODE (ad) == PLUS)
4968 {
4969 rtx tem = simplify_binary_operation (PLUS, Pmode,
4970 XEXP (ad, 0), XEXP (ad, 1));
4971 if (tem)
4972 ad = tem;
4973 }
4974
4975 if (GET_CODE (ad) == PLUS
4976 && GET_CODE (XEXP (ad, 0)) == REG
4977 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4978 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4979 {
4980 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4981 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4982 rtx cst, tem, new_rtx;
4983
4984 cst = GEN_INT (upper);
4985 if (!legitimate_reload_constant_p (cst))
4986 cst = force_const_mem (Pmode, cst);
4987
4988 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4989 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4990
4991 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4992 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4993 opnum, (enum reload_type) type);
4994 return new_rtx;
4995 }
4996
4997 return NULL_RTX;
4998 }
4999
5000 /* Emit code to move LEN bytes from DST to SRC. */
5001
5002 bool
5003 s390_expand_movmem (rtx dst, rtx src, rtx len)
5004 {
5005 /* When tuning for z10 or higher we rely on the Glibc functions to
5006 do the right thing. Only for constant lengths below 64k we will
5007 generate inline code. */
5008 if (s390_tune >= PROCESSOR_2097_Z10
5009 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5010 return false;
5011
5012 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5013 {
5014 if (INTVAL (len) > 0)
5015 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5016 }
5017
5018 else if (TARGET_MVCLE)
5019 {
5020 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5021 }
5022
5023 else
5024 {
5025 rtx dst_addr, src_addr, count, blocks, temp;
5026 rtx_code_label *loop_start_label = gen_label_rtx ();
5027 rtx_code_label *loop_end_label = gen_label_rtx ();
5028 rtx_code_label *end_label = gen_label_rtx ();
5029 machine_mode mode;
5030
5031 mode = GET_MODE (len);
5032 if (mode == VOIDmode)
5033 mode = Pmode;
5034
5035 dst_addr = gen_reg_rtx (Pmode);
5036 src_addr = gen_reg_rtx (Pmode);
5037 count = gen_reg_rtx (mode);
5038 blocks = gen_reg_rtx (mode);
5039
5040 convert_move (count, len, 1);
5041 emit_cmp_and_jump_insns (count, const0_rtx,
5042 EQ, NULL_RTX, mode, 1, end_label);
5043
5044 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5045 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5046 dst = change_address (dst, VOIDmode, dst_addr);
5047 src = change_address (src, VOIDmode, src_addr);
5048
5049 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5050 OPTAB_DIRECT);
5051 if (temp != count)
5052 emit_move_insn (count, temp);
5053
5054 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5055 OPTAB_DIRECT);
5056 if (temp != blocks)
5057 emit_move_insn (blocks, temp);
5058
5059 emit_cmp_and_jump_insns (blocks, const0_rtx,
5060 EQ, NULL_RTX, mode, 1, loop_end_label);
5061
5062 emit_label (loop_start_label);
5063
5064 if (TARGET_Z10
5065 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5066 {
5067 rtx prefetch;
5068
5069 /* Issue a read prefetch for the +3 cache line. */
5070 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5071 const0_rtx, const0_rtx);
5072 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5073 emit_insn (prefetch);
5074
5075 /* Issue a write prefetch for the +3 cache line. */
5076 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5077 const1_rtx, const0_rtx);
5078 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5079 emit_insn (prefetch);
5080 }
5081
5082 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5083 s390_load_address (dst_addr,
5084 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5085 s390_load_address (src_addr,
5086 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5087
5088 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5089 OPTAB_DIRECT);
5090 if (temp != blocks)
5091 emit_move_insn (blocks, temp);
5092
5093 emit_cmp_and_jump_insns (blocks, const0_rtx,
5094 EQ, NULL_RTX, mode, 1, loop_end_label);
5095
5096 emit_jump (loop_start_label);
5097 emit_label (loop_end_label);
5098
5099 emit_insn (gen_movmem_short (dst, src,
5100 convert_to_mode (Pmode, count, 1)));
5101 emit_label (end_label);
5102 }
5103 return true;
5104 }
5105
5106 /* Emit code to set LEN bytes at DST to VAL.
5107 Make use of clrmem if VAL is zero. */
5108
5109 void
5110 s390_expand_setmem (rtx dst, rtx len, rtx val)
5111 {
5112 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5113 return;
5114
5115 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5116
5117 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5118 {
5119 if (val == const0_rtx && INTVAL (len) <= 256)
5120 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5121 else
5122 {
5123 /* Initialize memory by storing the first byte. */
5124 emit_move_insn (adjust_address (dst, QImode, 0), val);
5125
5126 if (INTVAL (len) > 1)
5127 {
5128 /* Initiate 1 byte overlap move.
5129 The first byte of DST is propagated through DSTP1.
5130 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5131 DST is set to size 1 so the rest of the memory location
5132 does not count as source operand. */
5133 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5134 set_mem_size (dst, 1);
5135
5136 emit_insn (gen_movmem_short (dstp1, dst,
5137 GEN_INT (INTVAL (len) - 2)));
5138 }
5139 }
5140 }
5141
5142 else if (TARGET_MVCLE)
5143 {
5144 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5145 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
5146 }
5147
5148 else
5149 {
5150 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5151 rtx_code_label *loop_start_label = gen_label_rtx ();
5152 rtx_code_label *loop_end_label = gen_label_rtx ();
5153 rtx_code_label *end_label = gen_label_rtx ();
5154 machine_mode mode;
5155
5156 mode = GET_MODE (len);
5157 if (mode == VOIDmode)
5158 mode = Pmode;
5159
5160 dst_addr = gen_reg_rtx (Pmode);
5161 count = gen_reg_rtx (mode);
5162 blocks = gen_reg_rtx (mode);
5163
5164 convert_move (count, len, 1);
5165 emit_cmp_and_jump_insns (count, const0_rtx,
5166 EQ, NULL_RTX, mode, 1, end_label);
5167
5168 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5169 dst = change_address (dst, VOIDmode, dst_addr);
5170
5171 if (val == const0_rtx)
5172 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5173 OPTAB_DIRECT);
5174 else
5175 {
5176 dstp1 = adjust_address (dst, VOIDmode, 1);
5177 set_mem_size (dst, 1);
5178
5179 /* Initialize memory by storing the first byte. */
5180 emit_move_insn (adjust_address (dst, QImode, 0), val);
5181
5182 /* If count is 1 we are done. */
5183 emit_cmp_and_jump_insns (count, const1_rtx,
5184 EQ, NULL_RTX, mode, 1, end_label);
5185
5186 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5187 OPTAB_DIRECT);
5188 }
5189 if (temp != count)
5190 emit_move_insn (count, temp);
5191
5192 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5193 OPTAB_DIRECT);
5194 if (temp != blocks)
5195 emit_move_insn (blocks, temp);
5196
5197 emit_cmp_and_jump_insns (blocks, const0_rtx,
5198 EQ, NULL_RTX, mode, 1, loop_end_label);
5199
5200 emit_label (loop_start_label);
5201
5202 if (TARGET_Z10
5203 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5204 {
5205 /* Issue a write prefetch for the +4 cache line. */
5206 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5207 GEN_INT (1024)),
5208 const1_rtx, const0_rtx);
5209 emit_insn (prefetch);
5210 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5211 }
5212
5213 if (val == const0_rtx)
5214 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5215 else
5216 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5217 s390_load_address (dst_addr,
5218 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5219
5220 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5221 OPTAB_DIRECT);
5222 if (temp != blocks)
5223 emit_move_insn (blocks, temp);
5224
5225 emit_cmp_and_jump_insns (blocks, const0_rtx,
5226 EQ, NULL_RTX, mode, 1, loop_end_label);
5227
5228 emit_jump (loop_start_label);
5229 emit_label (loop_end_label);
5230
5231 if (val == const0_rtx)
5232 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5233 else
5234 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5235 emit_label (end_label);
5236 }
5237 }
5238
5239 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5240 and return the result in TARGET. */
5241
5242 bool
5243 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5244 {
5245 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5246 rtx tmp;
5247
5248 /* When tuning for z10 or higher we rely on the Glibc functions to
5249 do the right thing. Only for constant lengths below 64k we will
5250 generate inline code. */
5251 if (s390_tune >= PROCESSOR_2097_Z10
5252 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5253 return false;
5254
5255 /* As the result of CMPINT is inverted compared to what we need,
5256 we have to swap the operands. */
5257 tmp = op0; op0 = op1; op1 = tmp;
5258
5259 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5260 {
5261 if (INTVAL (len) > 0)
5262 {
5263 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5264 emit_insn (gen_cmpint (target, ccreg));
5265 }
5266 else
5267 emit_move_insn (target, const0_rtx);
5268 }
5269 else if (TARGET_MVCLE)
5270 {
5271 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5272 emit_insn (gen_cmpint (target, ccreg));
5273 }
5274 else
5275 {
5276 rtx addr0, addr1, count, blocks, temp;
5277 rtx_code_label *loop_start_label = gen_label_rtx ();
5278 rtx_code_label *loop_end_label = gen_label_rtx ();
5279 rtx_code_label *end_label = gen_label_rtx ();
5280 machine_mode mode;
5281
5282 mode = GET_MODE (len);
5283 if (mode == VOIDmode)
5284 mode = Pmode;
5285
5286 addr0 = gen_reg_rtx (Pmode);
5287 addr1 = gen_reg_rtx (Pmode);
5288 count = gen_reg_rtx (mode);
5289 blocks = gen_reg_rtx (mode);
5290
5291 convert_move (count, len, 1);
5292 emit_cmp_and_jump_insns (count, const0_rtx,
5293 EQ, NULL_RTX, mode, 1, end_label);
5294
5295 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5296 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5297 op0 = change_address (op0, VOIDmode, addr0);
5298 op1 = change_address (op1, VOIDmode, addr1);
5299
5300 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5301 OPTAB_DIRECT);
5302 if (temp != count)
5303 emit_move_insn (count, temp);
5304
5305 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5306 OPTAB_DIRECT);
5307 if (temp != blocks)
5308 emit_move_insn (blocks, temp);
5309
5310 emit_cmp_and_jump_insns (blocks, const0_rtx,
5311 EQ, NULL_RTX, mode, 1, loop_end_label);
5312
5313 emit_label (loop_start_label);
5314
5315 if (TARGET_Z10
5316 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5317 {
5318 rtx prefetch;
5319
5320 /* Issue a read prefetch for the +2 cache line of operand 1. */
5321 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5322 const0_rtx, const0_rtx);
5323 emit_insn (prefetch);
5324 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5325
5326 /* Issue a read prefetch for the +2 cache line of operand 2. */
5327 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5328 const0_rtx, const0_rtx);
5329 emit_insn (prefetch);
5330 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5331 }
5332
5333 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5334 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5335 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5336 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5337 temp = gen_rtx_SET (pc_rtx, temp);
5338 emit_jump_insn (temp);
5339
5340 s390_load_address (addr0,
5341 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5342 s390_load_address (addr1,
5343 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5344
5345 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5346 OPTAB_DIRECT);
5347 if (temp != blocks)
5348 emit_move_insn (blocks, temp);
5349
5350 emit_cmp_and_jump_insns (blocks, const0_rtx,
5351 EQ, NULL_RTX, mode, 1, loop_end_label);
5352
5353 emit_jump (loop_start_label);
5354 emit_label (loop_end_label);
5355
5356 emit_insn (gen_cmpmem_short (op0, op1,
5357 convert_to_mode (Pmode, count, 1)));
5358 emit_label (end_label);
5359
5360 emit_insn (gen_cmpint (target, ccreg));
5361 }
5362 return true;
5363 }
5364
5365 /* Emit a conditional jump to LABEL for condition code mask MASK using
5366 comparsion operator COMPARISON. Return the emitted jump insn. */
5367
5368 static rtx
5369 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5370 {
5371 rtx temp;
5372
5373 gcc_assert (comparison == EQ || comparison == NE);
5374 gcc_assert (mask > 0 && mask < 15);
5375
5376 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5377 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5378 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5379 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5380 temp = gen_rtx_SET (pc_rtx, temp);
5381 return emit_jump_insn (temp);
5382 }
5383
5384 /* Emit the instructions to implement strlen of STRING and store the
5385 result in TARGET. The string has the known ALIGNMENT. This
5386 version uses vector instructions and is therefore not appropriate
5387 for targets prior to z13. */
5388
5389 void
5390 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5391 {
5392 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5393 int very_likely = REG_BR_PROB_BASE - 1;
5394 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5395 rtx str_reg = gen_reg_rtx (V16QImode);
5396 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5397 rtx str_idx_reg = gen_reg_rtx (Pmode);
5398 rtx result_reg = gen_reg_rtx (V16QImode);
5399 rtx is_aligned_label = gen_label_rtx ();
5400 rtx into_loop_label = NULL_RTX;
5401 rtx loop_start_label = gen_label_rtx ();
5402 rtx temp;
5403 rtx len = gen_reg_rtx (QImode);
5404 rtx cond;
5405
5406 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5407 emit_move_insn (str_idx_reg, const0_rtx);
5408
5409 if (INTVAL (alignment) < 16)
5410 {
5411 /* Check whether the address happens to be aligned properly so
5412 jump directly to the aligned loop. */
5413 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5414 str_addr_base_reg, GEN_INT (15)),
5415 const0_rtx, EQ, NULL_RTX,
5416 Pmode, 1, is_aligned_label);
5417
5418 temp = gen_reg_rtx (Pmode);
5419 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5420 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5421 gcc_assert (REG_P (temp));
5422 highest_index_to_load_reg =
5423 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5424 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5425 gcc_assert (REG_P (highest_index_to_load_reg));
5426 emit_insn (gen_vllv16qi (str_reg,
5427 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5428 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5429
5430 into_loop_label = gen_label_rtx ();
5431 s390_emit_jump (into_loop_label, NULL_RTX);
5432 emit_barrier ();
5433 }
5434
5435 emit_label (is_aligned_label);
5436 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5437
5438 /* Reaching this point we are only performing 16 bytes aligned
5439 loads. */
5440 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5441
5442 emit_label (loop_start_label);
5443 LABEL_NUSES (loop_start_label) = 1;
5444
5445 /* Load 16 bytes of the string into VR. */
5446 emit_move_insn (str_reg,
5447 gen_rtx_MEM (V16QImode,
5448 gen_rtx_PLUS (Pmode, str_idx_reg,
5449 str_addr_base_reg)));
5450 if (into_loop_label != NULL_RTX)
5451 {
5452 emit_label (into_loop_label);
5453 LABEL_NUSES (into_loop_label) = 1;
5454 }
5455
5456 /* Increment string index by 16 bytes. */
5457 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5458 str_idx_reg, 1, OPTAB_DIRECT);
5459
5460 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5461 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5462
5463 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5464 REG_BR_PROB, very_likely);
5465 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5466
5467 /* If the string pointer wasn't aligned we have loaded less then 16
5468 bytes and the remaining bytes got filled with zeros (by vll).
5469 Now we have to check whether the resulting index lies within the
5470 bytes actually part of the string. */
5471
5472 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5473 highest_index_to_load_reg);
5474 s390_load_address (highest_index_to_load_reg,
5475 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5476 const1_rtx));
5477 if (TARGET_64BIT)
5478 emit_insn (gen_movdicc (str_idx_reg, cond,
5479 highest_index_to_load_reg, str_idx_reg));
5480 else
5481 emit_insn (gen_movsicc (str_idx_reg, cond,
5482 highest_index_to_load_reg, str_idx_reg));
5483
5484 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5485 very_unlikely);
5486
5487 expand_binop (Pmode, add_optab, str_idx_reg,
5488 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5489 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5490 here. */
5491 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5492 convert_to_mode (Pmode, len, 1),
5493 target, 1, OPTAB_DIRECT);
5494 if (temp != target)
5495 emit_move_insn (target, temp);
5496 }
5497
5498 /* Expand conditional increment or decrement using alc/slb instructions.
5499 Should generate code setting DST to either SRC or SRC + INCREMENT,
5500 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5501 Returns true if successful, false otherwise.
5502
5503 That makes it possible to implement some if-constructs without jumps e.g.:
5504 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5505 unsigned int a, b, c;
5506 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5507 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5508 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5509 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5510
5511 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5512 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5513 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5514 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5515 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5516
5517 bool
5518 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5519 rtx dst, rtx src, rtx increment)
5520 {
5521 machine_mode cmp_mode;
5522 machine_mode cc_mode;
5523 rtx op_res;
5524 rtx insn;
5525 rtvec p;
5526 int ret;
5527
5528 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5529 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5530 cmp_mode = SImode;
5531 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5532 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5533 cmp_mode = DImode;
5534 else
5535 return false;
5536
5537 /* Try ADD LOGICAL WITH CARRY. */
5538 if (increment == const1_rtx)
5539 {
5540 /* Determine CC mode to use. */
5541 if (cmp_code == EQ || cmp_code == NE)
5542 {
5543 if (cmp_op1 != const0_rtx)
5544 {
5545 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5546 NULL_RTX, 0, OPTAB_WIDEN);
5547 cmp_op1 = const0_rtx;
5548 }
5549
5550 cmp_code = cmp_code == EQ ? LEU : GTU;
5551 }
5552
5553 if (cmp_code == LTU || cmp_code == LEU)
5554 {
5555 rtx tem = cmp_op0;
5556 cmp_op0 = cmp_op1;
5557 cmp_op1 = tem;
5558 cmp_code = swap_condition (cmp_code);
5559 }
5560
5561 switch (cmp_code)
5562 {
5563 case GTU:
5564 cc_mode = CCUmode;
5565 break;
5566
5567 case GEU:
5568 cc_mode = CCL3mode;
5569 break;
5570
5571 default:
5572 return false;
5573 }
5574
5575 /* Emit comparison instruction pattern. */
5576 if (!register_operand (cmp_op0, cmp_mode))
5577 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5578
5579 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5580 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5581 /* We use insn_invalid_p here to add clobbers if required. */
5582 ret = insn_invalid_p (emit_insn (insn), false);
5583 gcc_assert (!ret);
5584
5585 /* Emit ALC instruction pattern. */
5586 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5587 gen_rtx_REG (cc_mode, CC_REGNUM),
5588 const0_rtx);
5589
5590 if (src != const0_rtx)
5591 {
5592 if (!register_operand (src, GET_MODE (dst)))
5593 src = force_reg (GET_MODE (dst), src);
5594
5595 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5596 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5597 }
5598
5599 p = rtvec_alloc (2);
5600 RTVEC_ELT (p, 0) =
5601 gen_rtx_SET (dst, op_res);
5602 RTVEC_ELT (p, 1) =
5603 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5604 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5605
5606 return true;
5607 }
5608
5609 /* Try SUBTRACT LOGICAL WITH BORROW. */
5610 if (increment == constm1_rtx)
5611 {
5612 /* Determine CC mode to use. */
5613 if (cmp_code == EQ || cmp_code == NE)
5614 {
5615 if (cmp_op1 != const0_rtx)
5616 {
5617 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5618 NULL_RTX, 0, OPTAB_WIDEN);
5619 cmp_op1 = const0_rtx;
5620 }
5621
5622 cmp_code = cmp_code == EQ ? LEU : GTU;
5623 }
5624
5625 if (cmp_code == GTU || cmp_code == GEU)
5626 {
5627 rtx tem = cmp_op0;
5628 cmp_op0 = cmp_op1;
5629 cmp_op1 = tem;
5630 cmp_code = swap_condition (cmp_code);
5631 }
5632
5633 switch (cmp_code)
5634 {
5635 case LEU:
5636 cc_mode = CCUmode;
5637 break;
5638
5639 case LTU:
5640 cc_mode = CCL3mode;
5641 break;
5642
5643 default:
5644 return false;
5645 }
5646
5647 /* Emit comparison instruction pattern. */
5648 if (!register_operand (cmp_op0, cmp_mode))
5649 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5650
5651 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5652 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5653 /* We use insn_invalid_p here to add clobbers if required. */
5654 ret = insn_invalid_p (emit_insn (insn), false);
5655 gcc_assert (!ret);
5656
5657 /* Emit SLB instruction pattern. */
5658 if (!register_operand (src, GET_MODE (dst)))
5659 src = force_reg (GET_MODE (dst), src);
5660
5661 op_res = gen_rtx_MINUS (GET_MODE (dst),
5662 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5663 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5664 gen_rtx_REG (cc_mode, CC_REGNUM),
5665 const0_rtx));
5666 p = rtvec_alloc (2);
5667 RTVEC_ELT (p, 0) =
5668 gen_rtx_SET (dst, op_res);
5669 RTVEC_ELT (p, 1) =
5670 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5671 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5672
5673 return true;
5674 }
5675
5676 return false;
5677 }
5678
5679 /* Expand code for the insv template. Return true if successful. */
5680
5681 bool
5682 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5683 {
5684 int bitsize = INTVAL (op1);
5685 int bitpos = INTVAL (op2);
5686 machine_mode mode = GET_MODE (dest);
5687 machine_mode smode;
5688 int smode_bsize, mode_bsize;
5689 rtx op, clobber;
5690
5691 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5692 return false;
5693
5694 /* Generate INSERT IMMEDIATE (IILL et al). */
5695 /* (set (ze (reg)) (const_int)). */
5696 if (TARGET_ZARCH
5697 && register_operand (dest, word_mode)
5698 && (bitpos % 16) == 0
5699 && (bitsize % 16) == 0
5700 && const_int_operand (src, VOIDmode))
5701 {
5702 HOST_WIDE_INT val = INTVAL (src);
5703 int regpos = bitpos + bitsize;
5704
5705 while (regpos > bitpos)
5706 {
5707 machine_mode putmode;
5708 int putsize;
5709
5710 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5711 putmode = SImode;
5712 else
5713 putmode = HImode;
5714
5715 putsize = GET_MODE_BITSIZE (putmode);
5716 regpos -= putsize;
5717 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5718 GEN_INT (putsize),
5719 GEN_INT (regpos)),
5720 gen_int_mode (val, putmode));
5721 val >>= putsize;
5722 }
5723 gcc_assert (regpos == bitpos);
5724 return true;
5725 }
5726
5727 smode = smallest_mode_for_size (bitsize, MODE_INT);
5728 smode_bsize = GET_MODE_BITSIZE (smode);
5729 mode_bsize = GET_MODE_BITSIZE (mode);
5730
5731 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5732 if (bitpos == 0
5733 && (bitsize % BITS_PER_UNIT) == 0
5734 && MEM_P (dest)
5735 && (register_operand (src, word_mode)
5736 || const_int_operand (src, VOIDmode)))
5737 {
5738 /* Emit standard pattern if possible. */
5739 if (smode_bsize == bitsize)
5740 {
5741 emit_move_insn (adjust_address (dest, smode, 0),
5742 gen_lowpart (smode, src));
5743 return true;
5744 }
5745
5746 /* (set (ze (mem)) (const_int)). */
5747 else if (const_int_operand (src, VOIDmode))
5748 {
5749 int size = bitsize / BITS_PER_UNIT;
5750 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
5751 BLKmode,
5752 UNITS_PER_WORD - size);
5753
5754 dest = adjust_address (dest, BLKmode, 0);
5755 set_mem_size (dest, size);
5756 s390_expand_movmem (dest, src_mem, GEN_INT (size));
5757 return true;
5758 }
5759
5760 /* (set (ze (mem)) (reg)). */
5761 else if (register_operand (src, word_mode))
5762 {
5763 if (bitsize <= 32)
5764 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
5765 const0_rtx), src);
5766 else
5767 {
5768 /* Emit st,stcmh sequence. */
5769 int stcmh_width = bitsize - 32;
5770 int size = stcmh_width / BITS_PER_UNIT;
5771
5772 emit_move_insn (adjust_address (dest, SImode, size),
5773 gen_lowpart (SImode, src));
5774 set_mem_size (dest, size);
5775 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5776 GEN_INT (stcmh_width),
5777 const0_rtx),
5778 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
5779 }
5780 return true;
5781 }
5782 }
5783
5784 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
5785 if ((bitpos % BITS_PER_UNIT) == 0
5786 && (bitsize % BITS_PER_UNIT) == 0
5787 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
5788 && MEM_P (src)
5789 && (mode == DImode || mode == SImode)
5790 && register_operand (dest, mode))
5791 {
5792 /* Emit a strict_low_part pattern if possible. */
5793 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
5794 {
5795 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
5796 op = gen_rtx_SET (op, gen_lowpart (smode, src));
5797 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5798 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
5799 return true;
5800 }
5801
5802 /* ??? There are more powerful versions of ICM that are not
5803 completely represented in the md file. */
5804 }
5805
5806 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
5807 if (TARGET_Z10 && (mode == DImode || mode == SImode))
5808 {
5809 machine_mode mode_s = GET_MODE (src);
5810
5811 if (mode_s == VOIDmode)
5812 {
5813 /* Assume const_int etc already in the proper mode. */
5814 src = force_reg (mode, src);
5815 }
5816 else if (mode_s != mode)
5817 {
5818 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
5819 src = force_reg (mode_s, src);
5820 src = gen_lowpart (mode, src);
5821 }
5822
5823 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
5824 op = gen_rtx_SET (op, src);
5825
5826 if (!TARGET_ZEC12)
5827 {
5828 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5829 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
5830 }
5831 emit_insn (op);
5832
5833 return true;
5834 }
5835
5836 return false;
5837 }
5838
5839 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
5840 register that holds VAL of mode MODE shifted by COUNT bits. */
5841
5842 static inline rtx
5843 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
5844 {
5845 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
5846 NULL_RTX, 1, OPTAB_DIRECT);
5847 return expand_simple_binop (SImode, ASHIFT, val, count,
5848 NULL_RTX, 1, OPTAB_DIRECT);
5849 }
5850
5851 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
5852 the result in TARGET. */
5853
5854 void
5855 s390_expand_vec_compare (rtx target, enum rtx_code cond,
5856 rtx cmp_op1, rtx cmp_op2)
5857 {
5858 machine_mode mode = GET_MODE (target);
5859 bool neg_p = false, swap_p = false;
5860 rtx tmp;
5861
5862 if (GET_MODE (cmp_op1) == V2DFmode)
5863 {
5864 switch (cond)
5865 {
5866 /* NE a != b -> !(a == b) */
5867 case NE: cond = EQ; neg_p = true; break;
5868 /* UNGT a u> b -> !(b >= a) */
5869 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
5870 /* UNGE a u>= b -> !(b > a) */
5871 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
5872 /* LE: a <= b -> b >= a */
5873 case LE: cond = GE; swap_p = true; break;
5874 /* UNLE: a u<= b -> !(a > b) */
5875 case UNLE: cond = GT; neg_p = true; break;
5876 /* LT: a < b -> b > a */
5877 case LT: cond = GT; swap_p = true; break;
5878 /* UNLT: a u< b -> !(a >= b) */
5879 case UNLT: cond = GE; neg_p = true; break;
5880 case UNEQ:
5881 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
5882 return;
5883 case LTGT:
5884 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
5885 return;
5886 case ORDERED:
5887 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
5888 return;
5889 case UNORDERED:
5890 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
5891 return;
5892 default: break;
5893 }
5894 }
5895 else
5896 {
5897 switch (cond)
5898 {
5899 /* NE: a != b -> !(a == b) */
5900 case NE: cond = EQ; neg_p = true; break;
5901 /* GE: a >= b -> !(b > a) */
5902 case GE: cond = GT; neg_p = true; swap_p = true; break;
5903 /* GEU: a >= b -> !(b > a) */
5904 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
5905 /* LE: a <= b -> !(a > b) */
5906 case LE: cond = GT; neg_p = true; break;
5907 /* LEU: a <= b -> !(a > b) */
5908 case LEU: cond = GTU; neg_p = true; break;
5909 /* LT: a < b -> b > a */
5910 case LT: cond = GT; swap_p = true; break;
5911 /* LTU: a < b -> b > a */
5912 case LTU: cond = GTU; swap_p = true; break;
5913 default: break;
5914 }
5915 }
5916
5917 if (swap_p)
5918 {
5919 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
5920 }
5921
5922 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
5923 mode,
5924 cmp_op1, cmp_op2)));
5925 if (neg_p)
5926 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
5927 }
5928
5929 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
5930 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
5931 elements in CMP1 and CMP2 fulfill the comparison. */
5932 void
5933 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
5934 rtx cmp1, rtx cmp2, bool all_p)
5935 {
5936 enum rtx_code new_code = code;
5937 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
5938 rtx tmp_reg = gen_reg_rtx (SImode);
5939 bool swap_p = false;
5940
5941 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
5942 {
5943 switch (code)
5944 {
5945 case EQ: cmp_mode = CCVEQmode; break;
5946 case NE: cmp_mode = CCVEQmode; break;
5947 case GT: cmp_mode = CCVHmode; break;
5948 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
5949 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
5950 case LE: cmp_mode = CCVHmode; new_code = LE; break;
5951 case GTU: cmp_mode = CCVHUmode; break;
5952 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
5953 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
5954 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
5955 default: gcc_unreachable ();
5956 }
5957 scratch_mode = GET_MODE (cmp1);
5958 }
5959 else if (GET_MODE (cmp1) == V2DFmode)
5960 {
5961 switch (code)
5962 {
5963 case EQ: cmp_mode = CCVEQmode; break;
5964 case NE: cmp_mode = CCVEQmode; break;
5965 case GT: cmp_mode = CCVFHmode; break;
5966 case GE: cmp_mode = CCVFHEmode; break;
5967 case UNLE: cmp_mode = CCVFHmode; break;
5968 case UNLT: cmp_mode = CCVFHEmode; break;
5969 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
5970 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
5971 default: gcc_unreachable ();
5972 }
5973 scratch_mode = V2DImode;
5974 }
5975 else
5976 gcc_unreachable ();
5977
5978 if (!all_p)
5979 switch (cmp_mode)
5980 {
5981 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
5982 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
5983 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
5984 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
5985 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
5986 default: gcc_unreachable ();
5987 }
5988 else
5989 /* The modes without ANY match the ALL modes. */
5990 full_cmp_mode = cmp_mode;
5991
5992 if (swap_p)
5993 {
5994 rtx tmp = cmp2;
5995 cmp2 = cmp1;
5996 cmp1 = tmp;
5997 }
5998
5999 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6000 gen_rtvec (2, gen_rtx_SET (
6001 gen_rtx_REG (cmp_mode, CC_REGNUM),
6002 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6003 gen_rtx_CLOBBER (VOIDmode,
6004 gen_rtx_SCRATCH (scratch_mode)))));
6005 emit_move_insn (target, const0_rtx);
6006 emit_move_insn (tmp_reg, const1_rtx);
6007
6008 emit_move_insn (target,
6009 gen_rtx_IF_THEN_ELSE (SImode,
6010 gen_rtx_fmt_ee (new_code, VOIDmode,
6011 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6012 const0_rtx),
6013 target, tmp_reg));
6014 }
6015
6016 /* Generate a vector comparison expression loading either elements of
6017 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6018 and CMP_OP2. */
6019
6020 void
6021 s390_expand_vcond (rtx target, rtx then, rtx els,
6022 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6023 {
6024 rtx tmp;
6025 machine_mode result_mode;
6026 rtx result_target;
6027
6028 /* We always use an integral type vector to hold the comparison
6029 result. */
6030 result_mode = GET_MODE (cmp_op1) == V2DFmode ? V2DImode : GET_MODE (cmp_op1);
6031 result_target = gen_reg_rtx (result_mode);
6032
6033 /* Alternatively this could be done by reload by lowering the cmp*
6034 predicates. But it appears to be better for scheduling etc. to
6035 have that in early. */
6036 if (!REG_P (cmp_op1))
6037 cmp_op1 = force_reg (GET_MODE (target), cmp_op1);
6038
6039 if (!REG_P (cmp_op2))
6040 cmp_op2 = force_reg (GET_MODE (target), cmp_op2);
6041
6042 s390_expand_vec_compare (result_target, cond,
6043 cmp_op1, cmp_op2);
6044
6045 /* If the results are supposed to be either -1 or 0 we are done
6046 since this is what our compare instructions generate anyway. */
6047 if (constm1_operand (then, GET_MODE (then))
6048 && const0_operand (els, GET_MODE (els)))
6049 {
6050 emit_move_insn (target, gen_rtx_SUBREG (GET_MODE (target),
6051 result_target, 0));
6052 return;
6053 }
6054
6055 /* Otherwise we will do a vsel afterwards. */
6056 /* This gets triggered e.g.
6057 with gcc.c-torture/compile/pr53410-1.c */
6058 if (!REG_P (then))
6059 then = force_reg (GET_MODE (target), then);
6060
6061 if (!REG_P (els))
6062 els = force_reg (GET_MODE (target), els);
6063
6064 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6065 result_target,
6066 CONST0_RTX (result_mode));
6067
6068 /* We compared the result against zero above so we have to swap then
6069 and els here. */
6070 tmp = gen_rtx_IF_THEN_ELSE (GET_MODE (target), tmp, els, then);
6071
6072 gcc_assert (GET_MODE (target) == GET_MODE (then));
6073 emit_insn (gen_rtx_SET (target, tmp));
6074 }
6075
6076 /* Emit the RTX necessary to initialize the vector TARGET with values
6077 in VALS. */
6078 void
6079 s390_expand_vec_init (rtx target, rtx vals)
6080 {
6081 machine_mode mode = GET_MODE (target);
6082 machine_mode inner_mode = GET_MODE_INNER (mode);
6083 int n_elts = GET_MODE_NUNITS (mode);
6084 bool all_same = true, all_regs = true, all_const_int = true;
6085 rtx x;
6086 int i;
6087
6088 for (i = 0; i < n_elts; ++i)
6089 {
6090 x = XVECEXP (vals, 0, i);
6091
6092 if (!CONST_INT_P (x))
6093 all_const_int = false;
6094
6095 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6096 all_same = false;
6097
6098 if (!REG_P (x))
6099 all_regs = false;
6100 }
6101
6102 /* Use vector gen mask or vector gen byte mask if possible. */
6103 if (all_same && all_const_int
6104 && (XVECEXP (vals, 0, 0) == const0_rtx
6105 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6106 NULL, NULL)
6107 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6108 {
6109 emit_insn (gen_rtx_SET (target,
6110 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6111 return;
6112 }
6113
6114 if (all_same)
6115 {
6116 emit_insn (gen_rtx_SET (target,
6117 gen_rtx_VEC_DUPLICATE (mode,
6118 XVECEXP (vals, 0, 0))));
6119 return;
6120 }
6121
6122 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6123 {
6124 /* Use vector load pair. */
6125 emit_insn (gen_rtx_SET (target,
6126 gen_rtx_VEC_CONCAT (mode,
6127 XVECEXP (vals, 0, 0),
6128 XVECEXP (vals, 0, 1))));
6129 return;
6130 }
6131
6132 /* We are about to set the vector elements one by one. Zero out the
6133 full register first in order to help the data flow framework to
6134 detect it as full VR set. */
6135 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6136
6137 /* Unfortunately the vec_init expander is not allowed to fail. So
6138 we have to implement the fallback ourselves. */
6139 for (i = 0; i < n_elts; i++)
6140 emit_insn (gen_rtx_SET (target,
6141 gen_rtx_UNSPEC (mode,
6142 gen_rtvec (3, XVECEXP (vals, 0, i),
6143 GEN_INT (i), target),
6144 UNSPEC_VEC_SET)));
6145 }
6146
6147 /* Structure to hold the initial parameters for a compare_and_swap operation
6148 in HImode and QImode. */
6149
6150 struct alignment_context
6151 {
6152 rtx memsi; /* SI aligned memory location. */
6153 rtx shift; /* Bit offset with regard to lsb. */
6154 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6155 rtx modemaski; /* ~modemask */
6156 bool aligned; /* True if memory is aligned, false else. */
6157 };
6158
6159 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6160 structure AC for transparent simplifying, if the memory alignment is known
6161 to be at least 32bit. MEM is the memory location for the actual operation
6162 and MODE its mode. */
6163
6164 static void
6165 init_alignment_context (struct alignment_context *ac, rtx mem,
6166 machine_mode mode)
6167 {
6168 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6169 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6170
6171 if (ac->aligned)
6172 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6173 else
6174 {
6175 /* Alignment is unknown. */
6176 rtx byteoffset, addr, align;
6177
6178 /* Force the address into a register. */
6179 addr = force_reg (Pmode, XEXP (mem, 0));
6180
6181 /* Align it to SImode. */
6182 align = expand_simple_binop (Pmode, AND, addr,
6183 GEN_INT (-GET_MODE_SIZE (SImode)),
6184 NULL_RTX, 1, OPTAB_DIRECT);
6185 /* Generate MEM. */
6186 ac->memsi = gen_rtx_MEM (SImode, align);
6187 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6188 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6189 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6190
6191 /* Calculate shiftcount. */
6192 byteoffset = expand_simple_binop (Pmode, AND, addr,
6193 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6194 NULL_RTX, 1, OPTAB_DIRECT);
6195 /* As we already have some offset, evaluate the remaining distance. */
6196 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6197 NULL_RTX, 1, OPTAB_DIRECT);
6198 }
6199
6200 /* Shift is the byte count, but we need the bitcount. */
6201 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6202 NULL_RTX, 1, OPTAB_DIRECT);
6203
6204 /* Calculate masks. */
6205 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6206 GEN_INT (GET_MODE_MASK (mode)),
6207 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6208 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6209 NULL_RTX, 1);
6210 }
6211
6212 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6213 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6214 perform the merge in SEQ2. */
6215
6216 static rtx
6217 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6218 machine_mode mode, rtx val, rtx ins)
6219 {
6220 rtx tmp;
6221
6222 if (ac->aligned)
6223 {
6224 start_sequence ();
6225 tmp = copy_to_mode_reg (SImode, val);
6226 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6227 const0_rtx, ins))
6228 {
6229 *seq1 = NULL;
6230 *seq2 = get_insns ();
6231 end_sequence ();
6232 return tmp;
6233 }
6234 end_sequence ();
6235 }
6236
6237 /* Failed to use insv. Generate a two part shift and mask. */
6238 start_sequence ();
6239 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6240 *seq1 = get_insns ();
6241 end_sequence ();
6242
6243 start_sequence ();
6244 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6245 *seq2 = get_insns ();
6246 end_sequence ();
6247
6248 return tmp;
6249 }
6250
6251 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6252 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6253 value to set if CMP == MEM. */
6254
6255 void
6256 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6257 rtx cmp, rtx new_rtx, bool is_weak)
6258 {
6259 struct alignment_context ac;
6260 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6261 rtx res = gen_reg_rtx (SImode);
6262 rtx_code_label *csloop = NULL, *csend = NULL;
6263
6264 gcc_assert (MEM_P (mem));
6265
6266 init_alignment_context (&ac, mem, mode);
6267
6268 /* Load full word. Subsequent loads are performed by CS. */
6269 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6270 NULL_RTX, 1, OPTAB_DIRECT);
6271
6272 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6273 possible, we try to use insv to make this happen efficiently. If
6274 that fails we'll generate code both inside and outside the loop. */
6275 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6276 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6277
6278 if (seq0)
6279 emit_insn (seq0);
6280 if (seq1)
6281 emit_insn (seq1);
6282
6283 /* Start CS loop. */
6284 if (!is_weak)
6285 {
6286 /* Begin assuming success. */
6287 emit_move_insn (btarget, const1_rtx);
6288
6289 csloop = gen_label_rtx ();
6290 csend = gen_label_rtx ();
6291 emit_label (csloop);
6292 }
6293
6294 /* val = "<mem>00..0<mem>"
6295 * cmp = "00..0<cmp>00..0"
6296 * new = "00..0<new>00..0"
6297 */
6298
6299 emit_insn (seq2);
6300 emit_insn (seq3);
6301
6302 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6303 if (is_weak)
6304 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6305 else
6306 {
6307 rtx tmp;
6308
6309 /* Jump to end if we're done (likely?). */
6310 s390_emit_jump (csend, cc);
6311
6312 /* Check for changes outside mode, and loop internal if so.
6313 Arrange the moves so that the compare is adjacent to the
6314 branch so that we can generate CRJ. */
6315 tmp = copy_to_reg (val);
6316 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6317 1, OPTAB_DIRECT);
6318 cc = s390_emit_compare (NE, val, tmp);
6319 s390_emit_jump (csloop, cc);
6320
6321 /* Failed. */
6322 emit_move_insn (btarget, const0_rtx);
6323 emit_label (csend);
6324 }
6325
6326 /* Return the correct part of the bitfield. */
6327 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6328 NULL_RTX, 1, OPTAB_DIRECT), 1);
6329 }
6330
6331 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6332 and VAL the value to play with. If AFTER is true then store the value
6333 MEM holds after the operation, if AFTER is false then store the value MEM
6334 holds before the operation. If TARGET is zero then discard that value, else
6335 store it to TARGET. */
6336
6337 void
6338 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6339 rtx target, rtx mem, rtx val, bool after)
6340 {
6341 struct alignment_context ac;
6342 rtx cmp;
6343 rtx new_rtx = gen_reg_rtx (SImode);
6344 rtx orig = gen_reg_rtx (SImode);
6345 rtx_code_label *csloop = gen_label_rtx ();
6346
6347 gcc_assert (!target || register_operand (target, VOIDmode));
6348 gcc_assert (MEM_P (mem));
6349
6350 init_alignment_context (&ac, mem, mode);
6351
6352 /* Shift val to the correct bit positions.
6353 Preserve "icm", but prevent "ex icm". */
6354 if (!(ac.aligned && code == SET && MEM_P (val)))
6355 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6356
6357 /* Further preparation insns. */
6358 if (code == PLUS || code == MINUS)
6359 emit_move_insn (orig, val);
6360 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6361 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6362 NULL_RTX, 1, OPTAB_DIRECT);
6363
6364 /* Load full word. Subsequent loads are performed by CS. */
6365 cmp = force_reg (SImode, ac.memsi);
6366
6367 /* Start CS loop. */
6368 emit_label (csloop);
6369 emit_move_insn (new_rtx, cmp);
6370
6371 /* Patch new with val at correct position. */
6372 switch (code)
6373 {
6374 case PLUS:
6375 case MINUS:
6376 val = expand_simple_binop (SImode, code, new_rtx, orig,
6377 NULL_RTX, 1, OPTAB_DIRECT);
6378 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6379 NULL_RTX, 1, OPTAB_DIRECT);
6380 /* FALLTHRU */
6381 case SET:
6382 if (ac.aligned && MEM_P (val))
6383 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6384 0, 0, SImode, val);
6385 else
6386 {
6387 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6388 NULL_RTX, 1, OPTAB_DIRECT);
6389 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6390 NULL_RTX, 1, OPTAB_DIRECT);
6391 }
6392 break;
6393 case AND:
6394 case IOR:
6395 case XOR:
6396 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6397 NULL_RTX, 1, OPTAB_DIRECT);
6398 break;
6399 case MULT: /* NAND */
6400 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6401 NULL_RTX, 1, OPTAB_DIRECT);
6402 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6403 NULL_RTX, 1, OPTAB_DIRECT);
6404 break;
6405 default:
6406 gcc_unreachable ();
6407 }
6408
6409 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6410 ac.memsi, cmp, new_rtx));
6411
6412 /* Return the correct part of the bitfield. */
6413 if (target)
6414 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6415 after ? new_rtx : cmp, ac.shift,
6416 NULL_RTX, 1, OPTAB_DIRECT), 1);
6417 }
6418
6419 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6420 We need to emit DTP-relative relocations. */
6421
6422 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6423
6424 static void
6425 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6426 {
6427 switch (size)
6428 {
6429 case 4:
6430 fputs ("\t.long\t", file);
6431 break;
6432 case 8:
6433 fputs ("\t.quad\t", file);
6434 break;
6435 default:
6436 gcc_unreachable ();
6437 }
6438 output_addr_const (file, x);
6439 fputs ("@DTPOFF", file);
6440 }
6441
6442 /* Return the proper mode for REGNO being represented in the dwarf
6443 unwind table. */
6444 machine_mode
6445 s390_dwarf_frame_reg_mode (int regno)
6446 {
6447 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6448
6449 /* The rightmost 64 bits of vector registers are call-clobbered. */
6450 if (GET_MODE_SIZE (save_mode) > 8)
6451 save_mode = DImode;
6452
6453 return save_mode;
6454 }
6455
6456 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6457 /* Implement TARGET_MANGLE_TYPE. */
6458
6459 static const char *
6460 s390_mangle_type (const_tree type)
6461 {
6462 type = TYPE_MAIN_VARIANT (type);
6463
6464 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6465 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6466 return NULL;
6467
6468 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6469 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6470 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6471 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6472
6473 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6474 && TARGET_LONG_DOUBLE_128)
6475 return "g";
6476
6477 /* For all other types, use normal C++ mangling. */
6478 return NULL;
6479 }
6480 #endif
6481
6482 /* In the name of slightly smaller debug output, and to cater to
6483 general assembler lossage, recognize various UNSPEC sequences
6484 and turn them back into a direct symbol reference. */
6485
6486 static rtx
6487 s390_delegitimize_address (rtx orig_x)
6488 {
6489 rtx x, y;
6490
6491 orig_x = delegitimize_mem_from_attrs (orig_x);
6492 x = orig_x;
6493
6494 /* Extract the symbol ref from:
6495 (plus:SI (reg:SI 12 %r12)
6496 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6497 UNSPEC_GOTOFF/PLTOFF)))
6498 and
6499 (plus:SI (reg:SI 12 %r12)
6500 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6501 UNSPEC_GOTOFF/PLTOFF)
6502 (const_int 4 [0x4])))) */
6503 if (GET_CODE (x) == PLUS
6504 && REG_P (XEXP (x, 0))
6505 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6506 && GET_CODE (XEXP (x, 1)) == CONST)
6507 {
6508 HOST_WIDE_INT offset = 0;
6509
6510 /* The const operand. */
6511 y = XEXP (XEXP (x, 1), 0);
6512
6513 if (GET_CODE (y) == PLUS
6514 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6515 {
6516 offset = INTVAL (XEXP (y, 1));
6517 y = XEXP (y, 0);
6518 }
6519
6520 if (GET_CODE (y) == UNSPEC
6521 && (XINT (y, 1) == UNSPEC_GOTOFF
6522 || XINT (y, 1) == UNSPEC_PLTOFF))
6523 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6524 }
6525
6526 if (GET_CODE (x) != MEM)
6527 return orig_x;
6528
6529 x = XEXP (x, 0);
6530 if (GET_CODE (x) == PLUS
6531 && GET_CODE (XEXP (x, 1)) == CONST
6532 && GET_CODE (XEXP (x, 0)) == REG
6533 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6534 {
6535 y = XEXP (XEXP (x, 1), 0);
6536 if (GET_CODE (y) == UNSPEC
6537 && XINT (y, 1) == UNSPEC_GOT)
6538 y = XVECEXP (y, 0, 0);
6539 else
6540 return orig_x;
6541 }
6542 else if (GET_CODE (x) == CONST)
6543 {
6544 /* Extract the symbol ref from:
6545 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6546 UNSPEC_PLT/GOTENT))) */
6547
6548 y = XEXP (x, 0);
6549 if (GET_CODE (y) == UNSPEC
6550 && (XINT (y, 1) == UNSPEC_GOTENT
6551 || XINT (y, 1) == UNSPEC_PLT))
6552 y = XVECEXP (y, 0, 0);
6553 else
6554 return orig_x;
6555 }
6556 else
6557 return orig_x;
6558
6559 if (GET_MODE (orig_x) != Pmode)
6560 {
6561 if (GET_MODE (orig_x) == BLKmode)
6562 return orig_x;
6563 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6564 if (y == NULL_RTX)
6565 return orig_x;
6566 }
6567 return y;
6568 }
6569
6570 /* Output operand OP to stdio stream FILE.
6571 OP is an address (register + offset) which is not used to address data;
6572 instead the rightmost bits are interpreted as the value. */
6573
6574 static void
6575 print_shift_count_operand (FILE *file, rtx op)
6576 {
6577 HOST_WIDE_INT offset;
6578 rtx base;
6579
6580 /* Extract base register and offset. */
6581 if (!s390_decompose_shift_count (op, &base, &offset))
6582 gcc_unreachable ();
6583
6584 /* Sanity check. */
6585 if (base)
6586 {
6587 gcc_assert (GET_CODE (base) == REG);
6588 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6589 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6590 }
6591
6592 /* Offsets are constricted to twelve bits. */
6593 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6594 if (base)
6595 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6596 }
6597
6598 /* Assigns the number of NOP halfwords to be emitted before and after the
6599 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6600 If hotpatching is disabled for the function, the values are set to zero.
6601 */
6602
6603 static void
6604 s390_function_num_hotpatch_hw (tree decl,
6605 int *hw_before,
6606 int *hw_after)
6607 {
6608 tree attr;
6609
6610 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6611
6612 /* Handle the arguments of the hotpatch attribute. The values
6613 specified via attribute might override the cmdline argument
6614 values. */
6615 if (attr)
6616 {
6617 tree args = TREE_VALUE (attr);
6618
6619 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6620 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6621 }
6622 else
6623 {
6624 /* Use the values specified by the cmdline arguments. */
6625 *hw_before = s390_hotpatch_hw_before_label;
6626 *hw_after = s390_hotpatch_hw_after_label;
6627 }
6628 }
6629
6630 /* Write the extra assembler code needed to declare a function properly. */
6631
6632 void
6633 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
6634 tree decl)
6635 {
6636 int hw_before, hw_after;
6637
6638 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
6639 if (hw_before > 0)
6640 {
6641 unsigned int function_alignment;
6642 int i;
6643
6644 /* Add a trampoline code area before the function label and initialize it
6645 with two-byte nop instructions. This area can be overwritten with code
6646 that jumps to a patched version of the function. */
6647 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
6648 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
6649 hw_before);
6650 for (i = 1; i < hw_before; i++)
6651 fputs ("\tnopr\t%r7\n", asm_out_file);
6652
6653 /* Note: The function label must be aligned so that (a) the bytes of the
6654 following nop do not cross a cacheline boundary, and (b) a jump address
6655 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
6656 stored directly before the label without crossing a cacheline
6657 boundary. All this is necessary to make sure the trampoline code can
6658 be changed atomically.
6659 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
6660 if there are NOPs before the function label, the alignment is placed
6661 before them. So it is necessary to duplicate the alignment after the
6662 NOPs. */
6663 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
6664 if (! DECL_USER_ALIGN (decl))
6665 function_alignment = MAX (function_alignment,
6666 (unsigned int) align_functions);
6667 fputs ("\t# alignment for hotpatch\n", asm_out_file);
6668 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
6669 }
6670
6671 ASM_OUTPUT_LABEL (asm_out_file, fname);
6672 if (hw_after > 0)
6673 asm_fprintf (asm_out_file,
6674 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
6675 hw_after);
6676 }
6677
6678 /* Output machine-dependent UNSPECs occurring in address constant X
6679 in assembler syntax to stdio stream FILE. Returns true if the
6680 constant X could be recognized, false otherwise. */
6681
6682 static bool
6683 s390_output_addr_const_extra (FILE *file, rtx x)
6684 {
6685 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
6686 switch (XINT (x, 1))
6687 {
6688 case UNSPEC_GOTENT:
6689 output_addr_const (file, XVECEXP (x, 0, 0));
6690 fprintf (file, "@GOTENT");
6691 return true;
6692 case UNSPEC_GOT:
6693 output_addr_const (file, XVECEXP (x, 0, 0));
6694 fprintf (file, "@GOT");
6695 return true;
6696 case UNSPEC_GOTOFF:
6697 output_addr_const (file, XVECEXP (x, 0, 0));
6698 fprintf (file, "@GOTOFF");
6699 return true;
6700 case UNSPEC_PLT:
6701 output_addr_const (file, XVECEXP (x, 0, 0));
6702 fprintf (file, "@PLT");
6703 return true;
6704 case UNSPEC_PLTOFF:
6705 output_addr_const (file, XVECEXP (x, 0, 0));
6706 fprintf (file, "@PLTOFF");
6707 return true;
6708 case UNSPEC_TLSGD:
6709 output_addr_const (file, XVECEXP (x, 0, 0));
6710 fprintf (file, "@TLSGD");
6711 return true;
6712 case UNSPEC_TLSLDM:
6713 assemble_name (file, get_some_local_dynamic_name ());
6714 fprintf (file, "@TLSLDM");
6715 return true;
6716 case UNSPEC_DTPOFF:
6717 output_addr_const (file, XVECEXP (x, 0, 0));
6718 fprintf (file, "@DTPOFF");
6719 return true;
6720 case UNSPEC_NTPOFF:
6721 output_addr_const (file, XVECEXP (x, 0, 0));
6722 fprintf (file, "@NTPOFF");
6723 return true;
6724 case UNSPEC_GOTNTPOFF:
6725 output_addr_const (file, XVECEXP (x, 0, 0));
6726 fprintf (file, "@GOTNTPOFF");
6727 return true;
6728 case UNSPEC_INDNTPOFF:
6729 output_addr_const (file, XVECEXP (x, 0, 0));
6730 fprintf (file, "@INDNTPOFF");
6731 return true;
6732 }
6733
6734 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
6735 switch (XINT (x, 1))
6736 {
6737 case UNSPEC_POOL_OFFSET:
6738 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
6739 output_addr_const (file, x);
6740 return true;
6741 }
6742 return false;
6743 }
6744
6745 /* Output address operand ADDR in assembler syntax to
6746 stdio stream FILE. */
6747
6748 void
6749 print_operand_address (FILE *file, rtx addr)
6750 {
6751 struct s390_address ad;
6752
6753 if (s390_loadrelative_operand_p (addr, NULL, NULL))
6754 {
6755 if (!TARGET_Z10)
6756 {
6757 output_operand_lossage ("symbolic memory references are "
6758 "only supported on z10 or later");
6759 return;
6760 }
6761 output_addr_const (file, addr);
6762 return;
6763 }
6764
6765 if (!s390_decompose_address (addr, &ad)
6766 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6767 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
6768 output_operand_lossage ("cannot decompose address");
6769
6770 if (ad.disp)
6771 output_addr_const (file, ad.disp);
6772 else
6773 fprintf (file, "0");
6774
6775 if (ad.base && ad.indx)
6776 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
6777 reg_names[REGNO (ad.base)]);
6778 else if (ad.base)
6779 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
6780 }
6781
6782 /* Output operand X in assembler syntax to stdio stream FILE.
6783 CODE specified the format flag. The following format flags
6784 are recognized:
6785
6786 'C': print opcode suffix for branch condition.
6787 'D': print opcode suffix for inverse branch condition.
6788 'E': print opcode suffix for branch on index instruction.
6789 'G': print the size of the operand in bytes.
6790 'J': print tls_load/tls_gdcall/tls_ldcall suffix
6791 'M': print the second word of a TImode operand.
6792 'N': print the second word of a DImode operand.
6793 'O': print only the displacement of a memory reference or address.
6794 'R': print only the base register of a memory reference or address.
6795 'S': print S-type memory reference (base+displacement).
6796 'Y': print shift count operand.
6797
6798 'b': print integer X as if it's an unsigned byte.
6799 'c': print integer X as if it's an signed byte.
6800 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
6801 'f': "end" contiguous bitmask X in SImode.
6802 'h': print integer X as if it's a signed halfword.
6803 'i': print the first nonzero HImode part of X.
6804 'j': print the first HImode part unequal to -1 of X.
6805 'k': print the first nonzero SImode part of X.
6806 'm': print the first SImode part unequal to -1 of X.
6807 'o': print integer X as if it's an unsigned 32bit word.
6808 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
6809 't': CONST_INT: "start" of contiguous bitmask X in SImode.
6810 CONST_VECTOR: Generate a bitmask for vgbm instruction.
6811 'x': print integer X as if it's an unsigned halfword.
6812 'v': print register number as vector register (v1 instead of f1).
6813 */
6814
6815 void
6816 print_operand (FILE *file, rtx x, int code)
6817 {
6818 HOST_WIDE_INT ival;
6819
6820 switch (code)
6821 {
6822 case 'C':
6823 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
6824 return;
6825
6826 case 'D':
6827 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
6828 return;
6829
6830 case 'E':
6831 if (GET_CODE (x) == LE)
6832 fprintf (file, "l");
6833 else if (GET_CODE (x) == GT)
6834 fprintf (file, "h");
6835 else
6836 output_operand_lossage ("invalid comparison operator "
6837 "for 'E' output modifier");
6838 return;
6839
6840 case 'J':
6841 if (GET_CODE (x) == SYMBOL_REF)
6842 {
6843 fprintf (file, "%s", ":tls_load:");
6844 output_addr_const (file, x);
6845 }
6846 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
6847 {
6848 fprintf (file, "%s", ":tls_gdcall:");
6849 output_addr_const (file, XVECEXP (x, 0, 0));
6850 }
6851 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
6852 {
6853 fprintf (file, "%s", ":tls_ldcall:");
6854 const char *name = get_some_local_dynamic_name ();
6855 gcc_assert (name);
6856 assemble_name (file, name);
6857 }
6858 else
6859 output_operand_lossage ("invalid reference for 'J' output modifier");
6860 return;
6861
6862 case 'G':
6863 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
6864 return;
6865
6866 case 'O':
6867 {
6868 struct s390_address ad;
6869 int ret;
6870
6871 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
6872
6873 if (!ret
6874 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6875 || ad.indx)
6876 {
6877 output_operand_lossage ("invalid address for 'O' output modifier");
6878 return;
6879 }
6880
6881 if (ad.disp)
6882 output_addr_const (file, ad.disp);
6883 else
6884 fprintf (file, "0");
6885 }
6886 return;
6887
6888 case 'R':
6889 {
6890 struct s390_address ad;
6891 int ret;
6892
6893 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
6894
6895 if (!ret
6896 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6897 || ad.indx)
6898 {
6899 output_operand_lossage ("invalid address for 'R' output modifier");
6900 return;
6901 }
6902
6903 if (ad.base)
6904 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
6905 else
6906 fprintf (file, "0");
6907 }
6908 return;
6909
6910 case 'S':
6911 {
6912 struct s390_address ad;
6913 int ret;
6914
6915 if (!MEM_P (x))
6916 {
6917 output_operand_lossage ("memory reference expected for "
6918 "'S' output modifier");
6919 return;
6920 }
6921 ret = s390_decompose_address (XEXP (x, 0), &ad);
6922
6923 if (!ret
6924 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6925 || ad.indx)
6926 {
6927 output_operand_lossage ("invalid address for 'S' output modifier");
6928 return;
6929 }
6930
6931 if (ad.disp)
6932 output_addr_const (file, ad.disp);
6933 else
6934 fprintf (file, "0");
6935
6936 if (ad.base)
6937 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
6938 }
6939 return;
6940
6941 case 'N':
6942 if (GET_CODE (x) == REG)
6943 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
6944 else if (GET_CODE (x) == MEM)
6945 x = change_address (x, VOIDmode,
6946 plus_constant (Pmode, XEXP (x, 0), 4));
6947 else
6948 output_operand_lossage ("register or memory expression expected "
6949 "for 'N' output modifier");
6950 break;
6951
6952 case 'M':
6953 if (GET_CODE (x) == REG)
6954 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
6955 else if (GET_CODE (x) == MEM)
6956 x = change_address (x, VOIDmode,
6957 plus_constant (Pmode, XEXP (x, 0), 8));
6958 else
6959 output_operand_lossage ("register or memory expression expected "
6960 "for 'M' output modifier");
6961 break;
6962
6963 case 'Y':
6964 print_shift_count_operand (file, x);
6965 return;
6966 }
6967
6968 switch (GET_CODE (x))
6969 {
6970 case REG:
6971 /* Print FP regs as fx instead of vx when they are accessed
6972 through non-vector mode. */
6973 if (code == 'v'
6974 || VECTOR_NOFP_REG_P (x)
6975 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
6976 || (VECTOR_REG_P (x)
6977 && (GET_MODE_SIZE (GET_MODE (x)) /
6978 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
6979 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
6980 else
6981 fprintf (file, "%s", reg_names[REGNO (x)]);
6982 break;
6983
6984 case MEM:
6985 output_address (XEXP (x, 0));
6986 break;
6987
6988 case CONST:
6989 case CODE_LABEL:
6990 case LABEL_REF:
6991 case SYMBOL_REF:
6992 output_addr_const (file, x);
6993 break;
6994
6995 case CONST_INT:
6996 ival = INTVAL (x);
6997 switch (code)
6998 {
6999 case 0:
7000 break;
7001 case 'b':
7002 ival &= 0xff;
7003 break;
7004 case 'c':
7005 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7006 break;
7007 case 'x':
7008 ival &= 0xffff;
7009 break;
7010 case 'h':
7011 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7012 break;
7013 case 'i':
7014 ival = s390_extract_part (x, HImode, 0);
7015 break;
7016 case 'j':
7017 ival = s390_extract_part (x, HImode, -1);
7018 break;
7019 case 'k':
7020 ival = s390_extract_part (x, SImode, 0);
7021 break;
7022 case 'm':
7023 ival = s390_extract_part (x, SImode, -1);
7024 break;
7025 case 'o':
7026 ival &= 0xffffffff;
7027 break;
7028 case 'e': case 'f':
7029 case 's': case 't':
7030 {
7031 int pos, len;
7032 bool ok;
7033
7034 len = (code == 's' || code == 'e' ? 64 : 32);
7035 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7036 gcc_assert (ok);
7037 if (code == 's' || code == 't')
7038 ival = 64 - pos - len;
7039 else
7040 ival = 64 - 1 - pos;
7041 }
7042 break;
7043 default:
7044 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7045 }
7046 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7047 break;
7048
7049 case CONST_DOUBLE:
7050 gcc_assert (GET_MODE (x) == VOIDmode);
7051 if (code == 'b')
7052 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
7053 else if (code == 'x')
7054 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
7055 else if (code == 'h')
7056 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7057 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
7058 else
7059 {
7060 if (code == 0)
7061 output_operand_lossage ("invalid constant - try using "
7062 "an output modifier");
7063 else
7064 output_operand_lossage ("invalid constant for output modifier '%c'",
7065 code);
7066 }
7067 break;
7068 case CONST_VECTOR:
7069 switch (code)
7070 {
7071 case 'e':
7072 case 's':
7073 {
7074 int start, stop, inner_len;
7075 bool ok;
7076
7077 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7078 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7079 gcc_assert (ok);
7080 if (code == 's' || code == 't')
7081 ival = inner_len - stop - 1;
7082 else
7083 ival = inner_len - start - 1;
7084 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7085 }
7086 break;
7087 case 't':
7088 {
7089 unsigned mask;
7090 bool ok = s390_bytemask_vector_p (x, &mask);
7091 gcc_assert (ok);
7092 fprintf (file, "%u", mask);
7093 }
7094 break;
7095
7096 default:
7097 output_operand_lossage ("invalid constant vector for output "
7098 "modifier '%c'", code);
7099 }
7100 break;
7101
7102 default:
7103 if (code == 0)
7104 output_operand_lossage ("invalid expression - try using "
7105 "an output modifier");
7106 else
7107 output_operand_lossage ("invalid expression for output "
7108 "modifier '%c'", code);
7109 break;
7110 }
7111 }
7112
7113 /* Target hook for assembling integer objects. We need to define it
7114 here to work a round a bug in some versions of GAS, which couldn't
7115 handle values smaller than INT_MIN when printed in decimal. */
7116
7117 static bool
7118 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7119 {
7120 if (size == 8 && aligned_p
7121 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7122 {
7123 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7124 INTVAL (x));
7125 return true;
7126 }
7127 return default_assemble_integer (x, size, aligned_p);
7128 }
7129
7130 /* Returns true if register REGNO is used for forming
7131 a memory address in expression X. */
7132
7133 static bool
7134 reg_used_in_mem_p (int regno, rtx x)
7135 {
7136 enum rtx_code code = GET_CODE (x);
7137 int i, j;
7138 const char *fmt;
7139
7140 if (code == MEM)
7141 {
7142 if (refers_to_regno_p (regno, XEXP (x, 0)))
7143 return true;
7144 }
7145 else if (code == SET
7146 && GET_CODE (SET_DEST (x)) == PC)
7147 {
7148 if (refers_to_regno_p (regno, SET_SRC (x)))
7149 return true;
7150 }
7151
7152 fmt = GET_RTX_FORMAT (code);
7153 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7154 {
7155 if (fmt[i] == 'e'
7156 && reg_used_in_mem_p (regno, XEXP (x, i)))
7157 return true;
7158
7159 else if (fmt[i] == 'E')
7160 for (j = 0; j < XVECLEN (x, i); j++)
7161 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7162 return true;
7163 }
7164 return false;
7165 }
7166
7167 /* Returns true if expression DEP_RTX sets an address register
7168 used by instruction INSN to address memory. */
7169
7170 static bool
7171 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7172 {
7173 rtx target, pat;
7174
7175 if (NONJUMP_INSN_P (dep_rtx))
7176 dep_rtx = PATTERN (dep_rtx);
7177
7178 if (GET_CODE (dep_rtx) == SET)
7179 {
7180 target = SET_DEST (dep_rtx);
7181 if (GET_CODE (target) == STRICT_LOW_PART)
7182 target = XEXP (target, 0);
7183 while (GET_CODE (target) == SUBREG)
7184 target = SUBREG_REG (target);
7185
7186 if (GET_CODE (target) == REG)
7187 {
7188 int regno = REGNO (target);
7189
7190 if (s390_safe_attr_type (insn) == TYPE_LA)
7191 {
7192 pat = PATTERN (insn);
7193 if (GET_CODE (pat) == PARALLEL)
7194 {
7195 gcc_assert (XVECLEN (pat, 0) == 2);
7196 pat = XVECEXP (pat, 0, 0);
7197 }
7198 gcc_assert (GET_CODE (pat) == SET);
7199 return refers_to_regno_p (regno, SET_SRC (pat));
7200 }
7201 else if (get_attr_atype (insn) == ATYPE_AGEN)
7202 return reg_used_in_mem_p (regno, PATTERN (insn));
7203 }
7204 }
7205 return false;
7206 }
7207
7208 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7209
7210 int
7211 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7212 {
7213 rtx dep_rtx = PATTERN (dep_insn);
7214 int i;
7215
7216 if (GET_CODE (dep_rtx) == SET
7217 && addr_generation_dependency_p (dep_rtx, insn))
7218 return 1;
7219 else if (GET_CODE (dep_rtx) == PARALLEL)
7220 {
7221 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7222 {
7223 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7224 return 1;
7225 }
7226 }
7227 return 0;
7228 }
7229
7230
7231 /* A C statement (sans semicolon) to update the integer scheduling priority
7232 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7233 reduce the priority to execute INSN later. Do not define this macro if
7234 you do not need to adjust the scheduling priorities of insns.
7235
7236 A STD instruction should be scheduled earlier,
7237 in order to use the bypass. */
7238 static int
7239 s390_adjust_priority (rtx_insn *insn, int priority)
7240 {
7241 if (! INSN_P (insn))
7242 return priority;
7243
7244 if (s390_tune != PROCESSOR_2084_Z990
7245 && s390_tune != PROCESSOR_2094_Z9_109
7246 && s390_tune != PROCESSOR_2097_Z10
7247 && s390_tune != PROCESSOR_2817_Z196
7248 && s390_tune != PROCESSOR_2827_ZEC12
7249 && s390_tune != PROCESSOR_2964_Z13)
7250 return priority;
7251
7252 switch (s390_safe_attr_type (insn))
7253 {
7254 case TYPE_FSTOREDF:
7255 case TYPE_FSTORESF:
7256 priority = priority << 3;
7257 break;
7258 case TYPE_STORE:
7259 case TYPE_STM:
7260 priority = priority << 1;
7261 break;
7262 default:
7263 break;
7264 }
7265 return priority;
7266 }
7267
7268
7269 /* The number of instructions that can be issued per cycle. */
7270
7271 static int
7272 s390_issue_rate (void)
7273 {
7274 switch (s390_tune)
7275 {
7276 case PROCESSOR_2084_Z990:
7277 case PROCESSOR_2094_Z9_109:
7278 case PROCESSOR_2817_Z196:
7279 return 3;
7280 case PROCESSOR_2097_Z10:
7281 return 2;
7282 /* Starting with EC12 we use the sched_reorder hook to take care
7283 of instruction dispatch constraints. The algorithm only
7284 picks the best instruction and assumes only a single
7285 instruction gets issued per cycle. */
7286 case PROCESSOR_2827_ZEC12:
7287 default:
7288 return 1;
7289 }
7290 }
7291
7292 static int
7293 s390_first_cycle_multipass_dfa_lookahead (void)
7294 {
7295 return 4;
7296 }
7297
7298 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7299 Fix up MEMs as required. */
7300
7301 static void
7302 annotate_constant_pool_refs (rtx *x)
7303 {
7304 int i, j;
7305 const char *fmt;
7306
7307 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7308 || !CONSTANT_POOL_ADDRESS_P (*x));
7309
7310 /* Literal pool references can only occur inside a MEM ... */
7311 if (GET_CODE (*x) == MEM)
7312 {
7313 rtx memref = XEXP (*x, 0);
7314
7315 if (GET_CODE (memref) == SYMBOL_REF
7316 && CONSTANT_POOL_ADDRESS_P (memref))
7317 {
7318 rtx base = cfun->machine->base_reg;
7319 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7320 UNSPEC_LTREF);
7321
7322 *x = replace_equiv_address (*x, addr);
7323 return;
7324 }
7325
7326 if (GET_CODE (memref) == CONST
7327 && GET_CODE (XEXP (memref, 0)) == PLUS
7328 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7329 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7330 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7331 {
7332 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7333 rtx sym = XEXP (XEXP (memref, 0), 0);
7334 rtx base = cfun->machine->base_reg;
7335 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7336 UNSPEC_LTREF);
7337
7338 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7339 return;
7340 }
7341 }
7342
7343 /* ... or a load-address type pattern. */
7344 if (GET_CODE (*x) == SET)
7345 {
7346 rtx addrref = SET_SRC (*x);
7347
7348 if (GET_CODE (addrref) == SYMBOL_REF
7349 && CONSTANT_POOL_ADDRESS_P (addrref))
7350 {
7351 rtx base = cfun->machine->base_reg;
7352 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7353 UNSPEC_LTREF);
7354
7355 SET_SRC (*x) = addr;
7356 return;
7357 }
7358
7359 if (GET_CODE (addrref) == CONST
7360 && GET_CODE (XEXP (addrref, 0)) == PLUS
7361 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7362 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7363 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7364 {
7365 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7366 rtx sym = XEXP (XEXP (addrref, 0), 0);
7367 rtx base = cfun->machine->base_reg;
7368 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7369 UNSPEC_LTREF);
7370
7371 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7372 return;
7373 }
7374 }
7375
7376 /* Annotate LTREL_BASE as well. */
7377 if (GET_CODE (*x) == UNSPEC
7378 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7379 {
7380 rtx base = cfun->machine->base_reg;
7381 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7382 UNSPEC_LTREL_BASE);
7383 return;
7384 }
7385
7386 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7387 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7388 {
7389 if (fmt[i] == 'e')
7390 {
7391 annotate_constant_pool_refs (&XEXP (*x, i));
7392 }
7393 else if (fmt[i] == 'E')
7394 {
7395 for (j = 0; j < XVECLEN (*x, i); j++)
7396 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7397 }
7398 }
7399 }
7400
7401 /* Split all branches that exceed the maximum distance.
7402 Returns true if this created a new literal pool entry. */
7403
7404 static int
7405 s390_split_branches (void)
7406 {
7407 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7408 int new_literal = 0, ret;
7409 rtx_insn *insn;
7410 rtx pat, target;
7411 rtx *label;
7412
7413 /* We need correct insn addresses. */
7414
7415 shorten_branches (get_insns ());
7416
7417 /* Find all branches that exceed 64KB, and split them. */
7418
7419 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7420 {
7421 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7422 continue;
7423
7424 pat = PATTERN (insn);
7425 if (GET_CODE (pat) == PARALLEL)
7426 pat = XVECEXP (pat, 0, 0);
7427 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7428 continue;
7429
7430 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7431 {
7432 label = &SET_SRC (pat);
7433 }
7434 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7435 {
7436 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7437 label = &XEXP (SET_SRC (pat), 1);
7438 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7439 label = &XEXP (SET_SRC (pat), 2);
7440 else
7441 continue;
7442 }
7443 else
7444 continue;
7445
7446 if (get_attr_length (insn) <= 4)
7447 continue;
7448
7449 /* We are going to use the return register as scratch register,
7450 make sure it will be saved/restored by the prologue/epilogue. */
7451 cfun_frame_layout.save_return_addr_p = 1;
7452
7453 if (!flag_pic)
7454 {
7455 new_literal = 1;
7456 rtx mem = force_const_mem (Pmode, *label);
7457 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7458 insn);
7459 INSN_ADDRESSES_NEW (set_insn, -1);
7460 annotate_constant_pool_refs (&PATTERN (set_insn));
7461
7462 target = temp_reg;
7463 }
7464 else
7465 {
7466 new_literal = 1;
7467 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7468 UNSPEC_LTREL_OFFSET);
7469 target = gen_rtx_CONST (Pmode, target);
7470 target = force_const_mem (Pmode, target);
7471 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7472 insn);
7473 INSN_ADDRESSES_NEW (set_insn, -1);
7474 annotate_constant_pool_refs (&PATTERN (set_insn));
7475
7476 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7477 cfun->machine->base_reg),
7478 UNSPEC_LTREL_BASE);
7479 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7480 }
7481
7482 ret = validate_change (insn, label, target, 0);
7483 gcc_assert (ret);
7484 }
7485
7486 return new_literal;
7487 }
7488
7489
7490 /* Find an annotated literal pool symbol referenced in RTX X,
7491 and store it at REF. Will abort if X contains references to
7492 more than one such pool symbol; multiple references to the same
7493 symbol are allowed, however.
7494
7495 The rtx pointed to by REF must be initialized to NULL_RTX
7496 by the caller before calling this routine. */
7497
7498 static void
7499 find_constant_pool_ref (rtx x, rtx *ref)
7500 {
7501 int i, j;
7502 const char *fmt;
7503
7504 /* Ignore LTREL_BASE references. */
7505 if (GET_CODE (x) == UNSPEC
7506 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7507 return;
7508 /* Likewise POOL_ENTRY insns. */
7509 if (GET_CODE (x) == UNSPEC_VOLATILE
7510 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7511 return;
7512
7513 gcc_assert (GET_CODE (x) != SYMBOL_REF
7514 || !CONSTANT_POOL_ADDRESS_P (x));
7515
7516 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7517 {
7518 rtx sym = XVECEXP (x, 0, 0);
7519 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7520 && CONSTANT_POOL_ADDRESS_P (sym));
7521
7522 if (*ref == NULL_RTX)
7523 *ref = sym;
7524 else
7525 gcc_assert (*ref == sym);
7526
7527 return;
7528 }
7529
7530 fmt = GET_RTX_FORMAT (GET_CODE (x));
7531 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7532 {
7533 if (fmt[i] == 'e')
7534 {
7535 find_constant_pool_ref (XEXP (x, i), ref);
7536 }
7537 else if (fmt[i] == 'E')
7538 {
7539 for (j = 0; j < XVECLEN (x, i); j++)
7540 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7541 }
7542 }
7543 }
7544
7545 /* Replace every reference to the annotated literal pool
7546 symbol REF in X by its base plus OFFSET. */
7547
7548 static void
7549 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7550 {
7551 int i, j;
7552 const char *fmt;
7553
7554 gcc_assert (*x != ref);
7555
7556 if (GET_CODE (*x) == UNSPEC
7557 && XINT (*x, 1) == UNSPEC_LTREF
7558 && XVECEXP (*x, 0, 0) == ref)
7559 {
7560 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7561 return;
7562 }
7563
7564 if (GET_CODE (*x) == PLUS
7565 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7566 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7567 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7568 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7569 {
7570 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7571 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7572 return;
7573 }
7574
7575 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7576 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7577 {
7578 if (fmt[i] == 'e')
7579 {
7580 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7581 }
7582 else if (fmt[i] == 'E')
7583 {
7584 for (j = 0; j < XVECLEN (*x, i); j++)
7585 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7586 }
7587 }
7588 }
7589
7590 /* Check whether X contains an UNSPEC_LTREL_BASE.
7591 Return its constant pool symbol if found, NULL_RTX otherwise. */
7592
7593 static rtx
7594 find_ltrel_base (rtx x)
7595 {
7596 int i, j;
7597 const char *fmt;
7598
7599 if (GET_CODE (x) == UNSPEC
7600 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7601 return XVECEXP (x, 0, 0);
7602
7603 fmt = GET_RTX_FORMAT (GET_CODE (x));
7604 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7605 {
7606 if (fmt[i] == 'e')
7607 {
7608 rtx fnd = find_ltrel_base (XEXP (x, i));
7609 if (fnd)
7610 return fnd;
7611 }
7612 else if (fmt[i] == 'E')
7613 {
7614 for (j = 0; j < XVECLEN (x, i); j++)
7615 {
7616 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
7617 if (fnd)
7618 return fnd;
7619 }
7620 }
7621 }
7622
7623 return NULL_RTX;
7624 }
7625
7626 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
7627
7628 static void
7629 replace_ltrel_base (rtx *x)
7630 {
7631 int i, j;
7632 const char *fmt;
7633
7634 if (GET_CODE (*x) == UNSPEC
7635 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7636 {
7637 *x = XVECEXP (*x, 0, 1);
7638 return;
7639 }
7640
7641 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7642 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7643 {
7644 if (fmt[i] == 'e')
7645 {
7646 replace_ltrel_base (&XEXP (*x, i));
7647 }
7648 else if (fmt[i] == 'E')
7649 {
7650 for (j = 0; j < XVECLEN (*x, i); j++)
7651 replace_ltrel_base (&XVECEXP (*x, i, j));
7652 }
7653 }
7654 }
7655
7656
7657 /* We keep a list of constants which we have to add to internal
7658 constant tables in the middle of large functions. */
7659
7660 #define NR_C_MODES 31
7661 machine_mode constant_modes[NR_C_MODES] =
7662 {
7663 TFmode, TImode, TDmode,
7664 V16QImode, V8HImode, V4SImode, V2DImode, V4SFmode, V2DFmode, V1TFmode,
7665 DFmode, DImode, DDmode,
7666 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
7667 SFmode, SImode, SDmode,
7668 V4QImode, V2HImode, V1SImode, V1SFmode,
7669 HImode,
7670 V2QImode, V1HImode,
7671 QImode,
7672 V1QImode
7673 };
7674
7675 struct constant
7676 {
7677 struct constant *next;
7678 rtx value;
7679 rtx_code_label *label;
7680 };
7681
7682 struct constant_pool
7683 {
7684 struct constant_pool *next;
7685 rtx_insn *first_insn;
7686 rtx_insn *pool_insn;
7687 bitmap insns;
7688 rtx_insn *emit_pool_after;
7689
7690 struct constant *constants[NR_C_MODES];
7691 struct constant *execute;
7692 rtx_code_label *label;
7693 int size;
7694 };
7695
7696 /* Allocate new constant_pool structure. */
7697
7698 static struct constant_pool *
7699 s390_alloc_pool (void)
7700 {
7701 struct constant_pool *pool;
7702 int i;
7703
7704 pool = (struct constant_pool *) xmalloc (sizeof *pool);
7705 pool->next = NULL;
7706 for (i = 0; i < NR_C_MODES; i++)
7707 pool->constants[i] = NULL;
7708
7709 pool->execute = NULL;
7710 pool->label = gen_label_rtx ();
7711 pool->first_insn = NULL;
7712 pool->pool_insn = NULL;
7713 pool->insns = BITMAP_ALLOC (NULL);
7714 pool->size = 0;
7715 pool->emit_pool_after = NULL;
7716
7717 return pool;
7718 }
7719
7720 /* Create new constant pool covering instructions starting at INSN
7721 and chain it to the end of POOL_LIST. */
7722
7723 static struct constant_pool *
7724 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
7725 {
7726 struct constant_pool *pool, **prev;
7727
7728 pool = s390_alloc_pool ();
7729 pool->first_insn = insn;
7730
7731 for (prev = pool_list; *prev; prev = &(*prev)->next)
7732 ;
7733 *prev = pool;
7734
7735 return pool;
7736 }
7737
7738 /* End range of instructions covered by POOL at INSN and emit
7739 placeholder insn representing the pool. */
7740
7741 static void
7742 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
7743 {
7744 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
7745
7746 if (!insn)
7747 insn = get_last_insn ();
7748
7749 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
7750 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
7751 }
7752
7753 /* Add INSN to the list of insns covered by POOL. */
7754
7755 static void
7756 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
7757 {
7758 bitmap_set_bit (pool->insns, INSN_UID (insn));
7759 }
7760
7761 /* Return pool out of POOL_LIST that covers INSN. */
7762
7763 static struct constant_pool *
7764 s390_find_pool (struct constant_pool *pool_list, rtx insn)
7765 {
7766 struct constant_pool *pool;
7767
7768 for (pool = pool_list; pool; pool = pool->next)
7769 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
7770 break;
7771
7772 return pool;
7773 }
7774
7775 /* Add constant VAL of mode MODE to the constant pool POOL. */
7776
7777 static void
7778 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
7779 {
7780 struct constant *c;
7781 int i;
7782
7783 for (i = 0; i < NR_C_MODES; i++)
7784 if (constant_modes[i] == mode)
7785 break;
7786 gcc_assert (i != NR_C_MODES);
7787
7788 for (c = pool->constants[i]; c != NULL; c = c->next)
7789 if (rtx_equal_p (val, c->value))
7790 break;
7791
7792 if (c == NULL)
7793 {
7794 c = (struct constant *) xmalloc (sizeof *c);
7795 c->value = val;
7796 c->label = gen_label_rtx ();
7797 c->next = pool->constants[i];
7798 pool->constants[i] = c;
7799 pool->size += GET_MODE_SIZE (mode);
7800 }
7801 }
7802
7803 /* Return an rtx that represents the offset of X from the start of
7804 pool POOL. */
7805
7806 static rtx
7807 s390_pool_offset (struct constant_pool *pool, rtx x)
7808 {
7809 rtx label;
7810
7811 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
7812 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
7813 UNSPEC_POOL_OFFSET);
7814 return gen_rtx_CONST (GET_MODE (x), x);
7815 }
7816
7817 /* Find constant VAL of mode MODE in the constant pool POOL.
7818 Return an RTX describing the distance from the start of
7819 the pool to the location of the new constant. */
7820
7821 static rtx
7822 s390_find_constant (struct constant_pool *pool, rtx val,
7823 machine_mode mode)
7824 {
7825 struct constant *c;
7826 int i;
7827
7828 for (i = 0; i < NR_C_MODES; i++)
7829 if (constant_modes[i] == mode)
7830 break;
7831 gcc_assert (i != NR_C_MODES);
7832
7833 for (c = pool->constants[i]; c != NULL; c = c->next)
7834 if (rtx_equal_p (val, c->value))
7835 break;
7836
7837 gcc_assert (c);
7838
7839 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
7840 }
7841
7842 /* Check whether INSN is an execute. Return the label_ref to its
7843 execute target template if so, NULL_RTX otherwise. */
7844
7845 static rtx
7846 s390_execute_label (rtx insn)
7847 {
7848 if (NONJUMP_INSN_P (insn)
7849 && GET_CODE (PATTERN (insn)) == PARALLEL
7850 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
7851 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
7852 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
7853
7854 return NULL_RTX;
7855 }
7856
7857 /* Add execute target for INSN to the constant pool POOL. */
7858
7859 static void
7860 s390_add_execute (struct constant_pool *pool, rtx insn)
7861 {
7862 struct constant *c;
7863
7864 for (c = pool->execute; c != NULL; c = c->next)
7865 if (INSN_UID (insn) == INSN_UID (c->value))
7866 break;
7867
7868 if (c == NULL)
7869 {
7870 c = (struct constant *) xmalloc (sizeof *c);
7871 c->value = insn;
7872 c->label = gen_label_rtx ();
7873 c->next = pool->execute;
7874 pool->execute = c;
7875 pool->size += 6;
7876 }
7877 }
7878
7879 /* Find execute target for INSN in the constant pool POOL.
7880 Return an RTX describing the distance from the start of
7881 the pool to the location of the execute target. */
7882
7883 static rtx
7884 s390_find_execute (struct constant_pool *pool, rtx insn)
7885 {
7886 struct constant *c;
7887
7888 for (c = pool->execute; c != NULL; c = c->next)
7889 if (INSN_UID (insn) == INSN_UID (c->value))
7890 break;
7891
7892 gcc_assert (c);
7893
7894 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
7895 }
7896
7897 /* For an execute INSN, extract the execute target template. */
7898
7899 static rtx
7900 s390_execute_target (rtx insn)
7901 {
7902 rtx pattern = PATTERN (insn);
7903 gcc_assert (s390_execute_label (insn));
7904
7905 if (XVECLEN (pattern, 0) == 2)
7906 {
7907 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
7908 }
7909 else
7910 {
7911 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
7912 int i;
7913
7914 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
7915 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
7916
7917 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
7918 }
7919
7920 return pattern;
7921 }
7922
7923 /* Indicate that INSN cannot be duplicated. This is the case for
7924 execute insns that carry a unique label. */
7925
7926 static bool
7927 s390_cannot_copy_insn_p (rtx_insn *insn)
7928 {
7929 rtx label = s390_execute_label (insn);
7930 return label && label != const0_rtx;
7931 }
7932
7933 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
7934 do not emit the pool base label. */
7935
7936 static void
7937 s390_dump_pool (struct constant_pool *pool, bool remote_label)
7938 {
7939 struct constant *c;
7940 rtx_insn *insn = pool->pool_insn;
7941 int i;
7942
7943 /* Switch to rodata section. */
7944 if (TARGET_CPU_ZARCH)
7945 {
7946 insn = emit_insn_after (gen_pool_section_start (), insn);
7947 INSN_ADDRESSES_NEW (insn, -1);
7948 }
7949
7950 /* Ensure minimum pool alignment. */
7951 if (TARGET_CPU_ZARCH)
7952 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
7953 else
7954 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
7955 INSN_ADDRESSES_NEW (insn, -1);
7956
7957 /* Emit pool base label. */
7958 if (!remote_label)
7959 {
7960 insn = emit_label_after (pool->label, insn);
7961 INSN_ADDRESSES_NEW (insn, -1);
7962 }
7963
7964 /* Dump constants in descending alignment requirement order,
7965 ensuring proper alignment for every constant. */
7966 for (i = 0; i < NR_C_MODES; i++)
7967 for (c = pool->constants[i]; c; c = c->next)
7968 {
7969 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
7970 rtx value = copy_rtx (c->value);
7971 if (GET_CODE (value) == CONST
7972 && GET_CODE (XEXP (value, 0)) == UNSPEC
7973 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
7974 && XVECLEN (XEXP (value, 0), 0) == 1)
7975 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
7976
7977 insn = emit_label_after (c->label, insn);
7978 INSN_ADDRESSES_NEW (insn, -1);
7979
7980 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
7981 gen_rtvec (1, value),
7982 UNSPECV_POOL_ENTRY);
7983 insn = emit_insn_after (value, insn);
7984 INSN_ADDRESSES_NEW (insn, -1);
7985 }
7986
7987 /* Ensure minimum alignment for instructions. */
7988 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
7989 INSN_ADDRESSES_NEW (insn, -1);
7990
7991 /* Output in-pool execute template insns. */
7992 for (c = pool->execute; c; c = c->next)
7993 {
7994 insn = emit_label_after (c->label, insn);
7995 INSN_ADDRESSES_NEW (insn, -1);
7996
7997 insn = emit_insn_after (s390_execute_target (c->value), insn);
7998 INSN_ADDRESSES_NEW (insn, -1);
7999 }
8000
8001 /* Switch back to previous section. */
8002 if (TARGET_CPU_ZARCH)
8003 {
8004 insn = emit_insn_after (gen_pool_section_end (), insn);
8005 INSN_ADDRESSES_NEW (insn, -1);
8006 }
8007
8008 insn = emit_barrier_after (insn);
8009 INSN_ADDRESSES_NEW (insn, -1);
8010
8011 /* Remove placeholder insn. */
8012 remove_insn (pool->pool_insn);
8013 }
8014
8015 /* Free all memory used by POOL. */
8016
8017 static void
8018 s390_free_pool (struct constant_pool *pool)
8019 {
8020 struct constant *c, *next;
8021 int i;
8022
8023 for (i = 0; i < NR_C_MODES; i++)
8024 for (c = pool->constants[i]; c; c = next)
8025 {
8026 next = c->next;
8027 free (c);
8028 }
8029
8030 for (c = pool->execute; c; c = next)
8031 {
8032 next = c->next;
8033 free (c);
8034 }
8035
8036 BITMAP_FREE (pool->insns);
8037 free (pool);
8038 }
8039
8040
8041 /* Collect main literal pool. Return NULL on overflow. */
8042
8043 static struct constant_pool *
8044 s390_mainpool_start (void)
8045 {
8046 struct constant_pool *pool;
8047 rtx_insn *insn;
8048
8049 pool = s390_alloc_pool ();
8050
8051 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8052 {
8053 if (NONJUMP_INSN_P (insn)
8054 && GET_CODE (PATTERN (insn)) == SET
8055 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8056 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8057 {
8058 /* There might be two main_pool instructions if base_reg
8059 is call-clobbered; one for shrink-wrapped code and one
8060 for the rest. We want to keep the first. */
8061 if (pool->pool_insn)
8062 {
8063 insn = PREV_INSN (insn);
8064 delete_insn (NEXT_INSN (insn));
8065 continue;
8066 }
8067 pool->pool_insn = insn;
8068 }
8069
8070 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8071 {
8072 s390_add_execute (pool, insn);
8073 }
8074 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8075 {
8076 rtx pool_ref = NULL_RTX;
8077 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8078 if (pool_ref)
8079 {
8080 rtx constant = get_pool_constant (pool_ref);
8081 machine_mode mode = get_pool_mode (pool_ref);
8082 s390_add_constant (pool, constant, mode);
8083 }
8084 }
8085
8086 /* If hot/cold partitioning is enabled we have to make sure that
8087 the literal pool is emitted in the same section where the
8088 initialization of the literal pool base pointer takes place.
8089 emit_pool_after is only used in the non-overflow case on non
8090 Z cpus where we can emit the literal pool at the end of the
8091 function body within the text section. */
8092 if (NOTE_P (insn)
8093 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8094 && !pool->emit_pool_after)
8095 pool->emit_pool_after = PREV_INSN (insn);
8096 }
8097
8098 gcc_assert (pool->pool_insn || pool->size == 0);
8099
8100 if (pool->size >= 4096)
8101 {
8102 /* We're going to chunkify the pool, so remove the main
8103 pool placeholder insn. */
8104 remove_insn (pool->pool_insn);
8105
8106 s390_free_pool (pool);
8107 pool = NULL;
8108 }
8109
8110 /* If the functions ends with the section where the literal pool
8111 should be emitted set the marker to its end. */
8112 if (pool && !pool->emit_pool_after)
8113 pool->emit_pool_after = get_last_insn ();
8114
8115 return pool;
8116 }
8117
8118 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8119 Modify the current function to output the pool constants as well as
8120 the pool register setup instruction. */
8121
8122 static void
8123 s390_mainpool_finish (struct constant_pool *pool)
8124 {
8125 rtx base_reg = cfun->machine->base_reg;
8126
8127 /* If the pool is empty, we're done. */
8128 if (pool->size == 0)
8129 {
8130 /* We don't actually need a base register after all. */
8131 cfun->machine->base_reg = NULL_RTX;
8132
8133 if (pool->pool_insn)
8134 remove_insn (pool->pool_insn);
8135 s390_free_pool (pool);
8136 return;
8137 }
8138
8139 /* We need correct insn addresses. */
8140 shorten_branches (get_insns ());
8141
8142 /* On zSeries, we use a LARL to load the pool register. The pool is
8143 located in the .rodata section, so we emit it after the function. */
8144 if (TARGET_CPU_ZARCH)
8145 {
8146 rtx set = gen_main_base_64 (base_reg, pool->label);
8147 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8148 INSN_ADDRESSES_NEW (insn, -1);
8149 remove_insn (pool->pool_insn);
8150
8151 insn = get_last_insn ();
8152 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8153 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8154
8155 s390_dump_pool (pool, 0);
8156 }
8157
8158 /* On S/390, if the total size of the function's code plus literal pool
8159 does not exceed 4096 bytes, we use BASR to set up a function base
8160 pointer, and emit the literal pool at the end of the function. */
8161 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8162 + pool->size + 8 /* alignment slop */ < 4096)
8163 {
8164 rtx set = gen_main_base_31_small (base_reg, pool->label);
8165 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8166 INSN_ADDRESSES_NEW (insn, -1);
8167 remove_insn (pool->pool_insn);
8168
8169 insn = emit_label_after (pool->label, insn);
8170 INSN_ADDRESSES_NEW (insn, -1);
8171
8172 /* emit_pool_after will be set by s390_mainpool_start to the
8173 last insn of the section where the literal pool should be
8174 emitted. */
8175 insn = pool->emit_pool_after;
8176
8177 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8178 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8179
8180 s390_dump_pool (pool, 1);
8181 }
8182
8183 /* Otherwise, we emit an inline literal pool and use BASR to branch
8184 over it, setting up the pool register at the same time. */
8185 else
8186 {
8187 rtx_code_label *pool_end = gen_label_rtx ();
8188
8189 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8190 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8191 JUMP_LABEL (insn) = pool_end;
8192 INSN_ADDRESSES_NEW (insn, -1);
8193 remove_insn (pool->pool_insn);
8194
8195 insn = emit_label_after (pool->label, insn);
8196 INSN_ADDRESSES_NEW (insn, -1);
8197
8198 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8199 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8200
8201 insn = emit_label_after (pool_end, pool->pool_insn);
8202 INSN_ADDRESSES_NEW (insn, -1);
8203
8204 s390_dump_pool (pool, 1);
8205 }
8206
8207
8208 /* Replace all literal pool references. */
8209
8210 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8211 {
8212 if (INSN_P (insn))
8213 replace_ltrel_base (&PATTERN (insn));
8214
8215 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8216 {
8217 rtx addr, pool_ref = NULL_RTX;
8218 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8219 if (pool_ref)
8220 {
8221 if (s390_execute_label (insn))
8222 addr = s390_find_execute (pool, insn);
8223 else
8224 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8225 get_pool_mode (pool_ref));
8226
8227 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8228 INSN_CODE (insn) = -1;
8229 }
8230 }
8231 }
8232
8233
8234 /* Free the pool. */
8235 s390_free_pool (pool);
8236 }
8237
8238 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8239 We have decided we cannot use this pool, so revert all changes
8240 to the current function that were done by s390_mainpool_start. */
8241 static void
8242 s390_mainpool_cancel (struct constant_pool *pool)
8243 {
8244 /* We didn't actually change the instruction stream, so simply
8245 free the pool memory. */
8246 s390_free_pool (pool);
8247 }
8248
8249
8250 /* Chunkify the literal pool. */
8251
8252 #define S390_POOL_CHUNK_MIN 0xc00
8253 #define S390_POOL_CHUNK_MAX 0xe00
8254
8255 static struct constant_pool *
8256 s390_chunkify_start (void)
8257 {
8258 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8259 int extra_size = 0;
8260 bitmap far_labels;
8261 rtx pending_ltrel = NULL_RTX;
8262 rtx_insn *insn;
8263
8264 rtx (*gen_reload_base) (rtx, rtx) =
8265 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8266
8267
8268 /* We need correct insn addresses. */
8269
8270 shorten_branches (get_insns ());
8271
8272 /* Scan all insns and move literals to pool chunks. */
8273
8274 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8275 {
8276 bool section_switch_p = false;
8277
8278 /* Check for pending LTREL_BASE. */
8279 if (INSN_P (insn))
8280 {
8281 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8282 if (ltrel_base)
8283 {
8284 gcc_assert (ltrel_base == pending_ltrel);
8285 pending_ltrel = NULL_RTX;
8286 }
8287 }
8288
8289 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8290 {
8291 if (!curr_pool)
8292 curr_pool = s390_start_pool (&pool_list, insn);
8293
8294 s390_add_execute (curr_pool, insn);
8295 s390_add_pool_insn (curr_pool, insn);
8296 }
8297 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8298 {
8299 rtx pool_ref = NULL_RTX;
8300 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8301 if (pool_ref)
8302 {
8303 rtx constant = get_pool_constant (pool_ref);
8304 machine_mode mode = get_pool_mode (pool_ref);
8305
8306 if (!curr_pool)
8307 curr_pool = s390_start_pool (&pool_list, insn);
8308
8309 s390_add_constant (curr_pool, constant, mode);
8310 s390_add_pool_insn (curr_pool, insn);
8311
8312 /* Don't split the pool chunk between a LTREL_OFFSET load
8313 and the corresponding LTREL_BASE. */
8314 if (GET_CODE (constant) == CONST
8315 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8316 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8317 {
8318 gcc_assert (!pending_ltrel);
8319 pending_ltrel = pool_ref;
8320 }
8321 }
8322 }
8323
8324 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8325 {
8326 if (curr_pool)
8327 s390_add_pool_insn (curr_pool, insn);
8328 /* An LTREL_BASE must follow within the same basic block. */
8329 gcc_assert (!pending_ltrel);
8330 }
8331
8332 if (NOTE_P (insn))
8333 switch (NOTE_KIND (insn))
8334 {
8335 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8336 section_switch_p = true;
8337 break;
8338 case NOTE_INSN_VAR_LOCATION:
8339 case NOTE_INSN_CALL_ARG_LOCATION:
8340 continue;
8341 default:
8342 break;
8343 }
8344
8345 if (!curr_pool
8346 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8347 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8348 continue;
8349
8350 if (TARGET_CPU_ZARCH)
8351 {
8352 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8353 continue;
8354
8355 s390_end_pool (curr_pool, NULL);
8356 curr_pool = NULL;
8357 }
8358 else
8359 {
8360 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8361 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8362 + extra_size;
8363
8364 /* We will later have to insert base register reload insns.
8365 Those will have an effect on code size, which we need to
8366 consider here. This calculation makes rather pessimistic
8367 worst-case assumptions. */
8368 if (LABEL_P (insn))
8369 extra_size += 6;
8370
8371 if (chunk_size < S390_POOL_CHUNK_MIN
8372 && curr_pool->size < S390_POOL_CHUNK_MIN
8373 && !section_switch_p)
8374 continue;
8375
8376 /* Pool chunks can only be inserted after BARRIERs ... */
8377 if (BARRIER_P (insn))
8378 {
8379 s390_end_pool (curr_pool, insn);
8380 curr_pool = NULL;
8381 extra_size = 0;
8382 }
8383
8384 /* ... so if we don't find one in time, create one. */
8385 else if (chunk_size > S390_POOL_CHUNK_MAX
8386 || curr_pool->size > S390_POOL_CHUNK_MAX
8387 || section_switch_p)
8388 {
8389 rtx_insn *label, *jump, *barrier, *next, *prev;
8390
8391 if (!section_switch_p)
8392 {
8393 /* We can insert the barrier only after a 'real' insn. */
8394 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8395 continue;
8396 if (get_attr_length (insn) == 0)
8397 continue;
8398 /* Don't separate LTREL_BASE from the corresponding
8399 LTREL_OFFSET load. */
8400 if (pending_ltrel)
8401 continue;
8402 next = insn;
8403 do
8404 {
8405 insn = next;
8406 next = NEXT_INSN (insn);
8407 }
8408 while (next
8409 && NOTE_P (next)
8410 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8411 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8412 }
8413 else
8414 {
8415 gcc_assert (!pending_ltrel);
8416
8417 /* The old pool has to end before the section switch
8418 note in order to make it part of the current
8419 section. */
8420 insn = PREV_INSN (insn);
8421 }
8422
8423 label = gen_label_rtx ();
8424 prev = insn;
8425 if (prev && NOTE_P (prev))
8426 prev = prev_nonnote_insn (prev);
8427 if (prev)
8428 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8429 INSN_LOCATION (prev));
8430 else
8431 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8432 barrier = emit_barrier_after (jump);
8433 insn = emit_label_after (label, barrier);
8434 JUMP_LABEL (jump) = label;
8435 LABEL_NUSES (label) = 1;
8436
8437 INSN_ADDRESSES_NEW (jump, -1);
8438 INSN_ADDRESSES_NEW (barrier, -1);
8439 INSN_ADDRESSES_NEW (insn, -1);
8440
8441 s390_end_pool (curr_pool, barrier);
8442 curr_pool = NULL;
8443 extra_size = 0;
8444 }
8445 }
8446 }
8447
8448 if (curr_pool)
8449 s390_end_pool (curr_pool, NULL);
8450 gcc_assert (!pending_ltrel);
8451
8452 /* Find all labels that are branched into
8453 from an insn belonging to a different chunk. */
8454
8455 far_labels = BITMAP_ALLOC (NULL);
8456
8457 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8458 {
8459 rtx_jump_table_data *table;
8460
8461 /* Labels marked with LABEL_PRESERVE_P can be target
8462 of non-local jumps, so we have to mark them.
8463 The same holds for named labels.
8464
8465 Don't do that, however, if it is the label before
8466 a jump table. */
8467
8468 if (LABEL_P (insn)
8469 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8470 {
8471 rtx_insn *vec_insn = NEXT_INSN (insn);
8472 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8473 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8474 }
8475 /* Check potential targets in a table jump (casesi_jump). */
8476 else if (tablejump_p (insn, NULL, &table))
8477 {
8478 rtx vec_pat = PATTERN (table);
8479 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8480
8481 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8482 {
8483 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8484
8485 if (s390_find_pool (pool_list, label)
8486 != s390_find_pool (pool_list, insn))
8487 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8488 }
8489 }
8490 /* If we have a direct jump (conditional or unconditional),
8491 check all potential targets. */
8492 else if (JUMP_P (insn))
8493 {
8494 rtx pat = PATTERN (insn);
8495
8496 if (GET_CODE (pat) == PARALLEL)
8497 pat = XVECEXP (pat, 0, 0);
8498
8499 if (GET_CODE (pat) == SET)
8500 {
8501 rtx label = JUMP_LABEL (insn);
8502 if (label && !ANY_RETURN_P (label))
8503 {
8504 if (s390_find_pool (pool_list, label)
8505 != s390_find_pool (pool_list, insn))
8506 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8507 }
8508 }
8509 }
8510 }
8511
8512 /* Insert base register reload insns before every pool. */
8513
8514 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8515 {
8516 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8517 curr_pool->label);
8518 rtx_insn *insn = curr_pool->first_insn;
8519 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8520 }
8521
8522 /* Insert base register reload insns at every far label. */
8523
8524 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8525 if (LABEL_P (insn)
8526 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8527 {
8528 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8529 if (pool)
8530 {
8531 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8532 pool->label);
8533 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8534 }
8535 }
8536
8537
8538 BITMAP_FREE (far_labels);
8539
8540
8541 /* Recompute insn addresses. */
8542
8543 init_insn_lengths ();
8544 shorten_branches (get_insns ());
8545
8546 return pool_list;
8547 }
8548
8549 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8550 After we have decided to use this list, finish implementing
8551 all changes to the current function as required. */
8552
8553 static void
8554 s390_chunkify_finish (struct constant_pool *pool_list)
8555 {
8556 struct constant_pool *curr_pool = NULL;
8557 rtx_insn *insn;
8558
8559
8560 /* Replace all literal pool references. */
8561
8562 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8563 {
8564 if (INSN_P (insn))
8565 replace_ltrel_base (&PATTERN (insn));
8566
8567 curr_pool = s390_find_pool (pool_list, insn);
8568 if (!curr_pool)
8569 continue;
8570
8571 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8572 {
8573 rtx addr, pool_ref = NULL_RTX;
8574 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8575 if (pool_ref)
8576 {
8577 if (s390_execute_label (insn))
8578 addr = s390_find_execute (curr_pool, insn);
8579 else
8580 addr = s390_find_constant (curr_pool,
8581 get_pool_constant (pool_ref),
8582 get_pool_mode (pool_ref));
8583
8584 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8585 INSN_CODE (insn) = -1;
8586 }
8587 }
8588 }
8589
8590 /* Dump out all literal pools. */
8591
8592 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8593 s390_dump_pool (curr_pool, 0);
8594
8595 /* Free pool list. */
8596
8597 while (pool_list)
8598 {
8599 struct constant_pool *next = pool_list->next;
8600 s390_free_pool (pool_list);
8601 pool_list = next;
8602 }
8603 }
8604
8605 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8606 We have decided we cannot use this list, so revert all changes
8607 to the current function that were done by s390_chunkify_start. */
8608
8609 static void
8610 s390_chunkify_cancel (struct constant_pool *pool_list)
8611 {
8612 struct constant_pool *curr_pool = NULL;
8613 rtx_insn *insn;
8614
8615 /* Remove all pool placeholder insns. */
8616
8617 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8618 {
8619 /* Did we insert an extra barrier? Remove it. */
8620 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
8621 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
8622 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
8623
8624 if (jump && JUMP_P (jump)
8625 && barrier && BARRIER_P (barrier)
8626 && label && LABEL_P (label)
8627 && GET_CODE (PATTERN (jump)) == SET
8628 && SET_DEST (PATTERN (jump)) == pc_rtx
8629 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
8630 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
8631 {
8632 remove_insn (jump);
8633 remove_insn (barrier);
8634 remove_insn (label);
8635 }
8636
8637 remove_insn (curr_pool->pool_insn);
8638 }
8639
8640 /* Remove all base register reload insns. */
8641
8642 for (insn = get_insns (); insn; )
8643 {
8644 rtx_insn *next_insn = NEXT_INSN (insn);
8645
8646 if (NONJUMP_INSN_P (insn)
8647 && GET_CODE (PATTERN (insn)) == SET
8648 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
8649 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
8650 remove_insn (insn);
8651
8652 insn = next_insn;
8653 }
8654
8655 /* Free pool list. */
8656
8657 while (pool_list)
8658 {
8659 struct constant_pool *next = pool_list->next;
8660 s390_free_pool (pool_list);
8661 pool_list = next;
8662 }
8663 }
8664
8665 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
8666
8667 void
8668 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
8669 {
8670 REAL_VALUE_TYPE r;
8671
8672 switch (GET_MODE_CLASS (mode))
8673 {
8674 case MODE_FLOAT:
8675 case MODE_DECIMAL_FLOAT:
8676 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
8677
8678 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
8679 assemble_real (r, mode, align);
8680 break;
8681
8682 case MODE_INT:
8683 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
8684 mark_symbol_refs_as_used (exp);
8685 break;
8686
8687 case MODE_VECTOR_INT:
8688 case MODE_VECTOR_FLOAT:
8689 {
8690 int i;
8691 machine_mode inner_mode;
8692 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
8693
8694 inner_mode = GET_MODE_INNER (GET_MODE (exp));
8695 for (i = 0; i < XVECLEN (exp, 0); i++)
8696 s390_output_pool_entry (XVECEXP (exp, 0, i),
8697 inner_mode,
8698 i == 0
8699 ? align
8700 : GET_MODE_BITSIZE (inner_mode));
8701 }
8702 break;
8703
8704 default:
8705 gcc_unreachable ();
8706 }
8707 }
8708
8709
8710 /* Return an RTL expression representing the value of the return address
8711 for the frame COUNT steps up from the current frame. FRAME is the
8712 frame pointer of that frame. */
8713
8714 rtx
8715 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
8716 {
8717 int offset;
8718 rtx addr;
8719
8720 /* Without backchain, we fail for all but the current frame. */
8721
8722 if (!TARGET_BACKCHAIN && count > 0)
8723 return NULL_RTX;
8724
8725 /* For the current frame, we need to make sure the initial
8726 value of RETURN_REGNUM is actually saved. */
8727
8728 if (count == 0)
8729 {
8730 /* On non-z architectures branch splitting could overwrite r14. */
8731 if (TARGET_CPU_ZARCH)
8732 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
8733 else
8734 {
8735 cfun_frame_layout.save_return_addr_p = true;
8736 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
8737 }
8738 }
8739
8740 if (TARGET_PACKED_STACK)
8741 offset = -2 * UNITS_PER_LONG;
8742 else
8743 offset = RETURN_REGNUM * UNITS_PER_LONG;
8744
8745 addr = plus_constant (Pmode, frame, offset);
8746 addr = memory_address (Pmode, addr);
8747 return gen_rtx_MEM (Pmode, addr);
8748 }
8749
8750 /* Return an RTL expression representing the back chain stored in
8751 the current stack frame. */
8752
8753 rtx
8754 s390_back_chain_rtx (void)
8755 {
8756 rtx chain;
8757
8758 gcc_assert (TARGET_BACKCHAIN);
8759
8760 if (TARGET_PACKED_STACK)
8761 chain = plus_constant (Pmode, stack_pointer_rtx,
8762 STACK_POINTER_OFFSET - UNITS_PER_LONG);
8763 else
8764 chain = stack_pointer_rtx;
8765
8766 chain = gen_rtx_MEM (Pmode, chain);
8767 return chain;
8768 }
8769
8770 /* Find first call clobbered register unused in a function.
8771 This could be used as base register in a leaf function
8772 or for holding the return address before epilogue. */
8773
8774 static int
8775 find_unused_clobbered_reg (void)
8776 {
8777 int i;
8778 for (i = 0; i < 6; i++)
8779 if (!df_regs_ever_live_p (i))
8780 return i;
8781 return 0;
8782 }
8783
8784
8785 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
8786 clobbered hard regs in SETREG. */
8787
8788 static void
8789 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
8790 {
8791 char *regs_ever_clobbered = (char *)data;
8792 unsigned int i, regno;
8793 machine_mode mode = GET_MODE (setreg);
8794
8795 if (GET_CODE (setreg) == SUBREG)
8796 {
8797 rtx inner = SUBREG_REG (setreg);
8798 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
8799 return;
8800 regno = subreg_regno (setreg);
8801 }
8802 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
8803 regno = REGNO (setreg);
8804 else
8805 return;
8806
8807 for (i = regno;
8808 i < regno + HARD_REGNO_NREGS (regno, mode);
8809 i++)
8810 regs_ever_clobbered[i] = 1;
8811 }
8812
8813 /* Walks through all basic blocks of the current function looking
8814 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
8815 of the passed integer array REGS_EVER_CLOBBERED are set to one for
8816 each of those regs. */
8817
8818 static void
8819 s390_regs_ever_clobbered (char regs_ever_clobbered[])
8820 {
8821 basic_block cur_bb;
8822 rtx_insn *cur_insn;
8823 unsigned int i;
8824
8825 memset (regs_ever_clobbered, 0, 32);
8826
8827 /* For non-leaf functions we have to consider all call clobbered regs to be
8828 clobbered. */
8829 if (!crtl->is_leaf)
8830 {
8831 for (i = 0; i < 32; i++)
8832 regs_ever_clobbered[i] = call_really_used_regs[i];
8833 }
8834
8835 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
8836 this work is done by liveness analysis (mark_regs_live_at_end).
8837 Special care is needed for functions containing landing pads. Landing pads
8838 may use the eh registers, but the code which sets these registers is not
8839 contained in that function. Hence s390_regs_ever_clobbered is not able to
8840 deal with this automatically. */
8841 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
8842 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
8843 if (crtl->calls_eh_return
8844 || (cfun->machine->has_landing_pad_p
8845 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
8846 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
8847
8848 /* For nonlocal gotos all call-saved registers have to be saved.
8849 This flag is also set for the unwinding code in libgcc.
8850 See expand_builtin_unwind_init. For regs_ever_live this is done by
8851 reload. */
8852 if (crtl->saves_all_registers)
8853 for (i = 0; i < 32; i++)
8854 if (!call_really_used_regs[i])
8855 regs_ever_clobbered[i] = 1;
8856
8857 FOR_EACH_BB_FN (cur_bb, cfun)
8858 {
8859 FOR_BB_INSNS (cur_bb, cur_insn)
8860 {
8861 rtx pat;
8862
8863 if (!INSN_P (cur_insn))
8864 continue;
8865
8866 pat = PATTERN (cur_insn);
8867
8868 /* Ignore GPR restore insns. */
8869 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
8870 {
8871 if (GET_CODE (pat) == SET
8872 && GENERAL_REG_P (SET_DEST (pat)))
8873 {
8874 /* lgdr */
8875 if (GET_MODE (SET_SRC (pat)) == DImode
8876 && FP_REG_P (SET_SRC (pat)))
8877 continue;
8878
8879 /* l / lg */
8880 if (GET_CODE (SET_SRC (pat)) == MEM)
8881 continue;
8882 }
8883
8884 /* lm / lmg */
8885 if (GET_CODE (pat) == PARALLEL
8886 && load_multiple_operation (pat, VOIDmode))
8887 continue;
8888 }
8889
8890 note_stores (pat,
8891 s390_reg_clobbered_rtx,
8892 regs_ever_clobbered);
8893 }
8894 }
8895 }
8896
8897 /* Determine the frame area which actually has to be accessed
8898 in the function epilogue. The values are stored at the
8899 given pointers AREA_BOTTOM (address of the lowest used stack
8900 address) and AREA_TOP (address of the first item which does
8901 not belong to the stack frame). */
8902
8903 static void
8904 s390_frame_area (int *area_bottom, int *area_top)
8905 {
8906 int b, t;
8907
8908 b = INT_MAX;
8909 t = INT_MIN;
8910
8911 if (cfun_frame_layout.first_restore_gpr != -1)
8912 {
8913 b = (cfun_frame_layout.gprs_offset
8914 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
8915 t = b + (cfun_frame_layout.last_restore_gpr
8916 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
8917 }
8918
8919 if (TARGET_64BIT && cfun_save_high_fprs_p)
8920 {
8921 b = MIN (b, cfun_frame_layout.f8_offset);
8922 t = MAX (t, (cfun_frame_layout.f8_offset
8923 + cfun_frame_layout.high_fprs * 8));
8924 }
8925
8926 if (!TARGET_64BIT)
8927 {
8928 if (cfun_fpr_save_p (FPR4_REGNUM))
8929 {
8930 b = MIN (b, cfun_frame_layout.f4_offset);
8931 t = MAX (t, cfun_frame_layout.f4_offset + 8);
8932 }
8933 if (cfun_fpr_save_p (FPR6_REGNUM))
8934 {
8935 b = MIN (b, cfun_frame_layout.f4_offset + 8);
8936 t = MAX (t, cfun_frame_layout.f4_offset + 16);
8937 }
8938 }
8939 *area_bottom = b;
8940 *area_top = t;
8941 }
8942 /* Update gpr_save_slots in the frame layout trying to make use of
8943 FPRs as GPR save slots.
8944 This is a helper routine of s390_register_info. */
8945
8946 static void
8947 s390_register_info_gprtofpr ()
8948 {
8949 int save_reg_slot = FPR0_REGNUM;
8950 int i, j;
8951
8952 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8953 return;
8954
8955 for (i = 15; i >= 6; i--)
8956 {
8957 if (cfun_gpr_save_slot (i) == 0)
8958 continue;
8959
8960 /* Advance to the next FP register which can be used as a
8961 GPR save slot. */
8962 while ((!call_really_used_regs[save_reg_slot]
8963 || df_regs_ever_live_p (save_reg_slot)
8964 || cfun_fpr_save_p (save_reg_slot))
8965 && FP_REGNO_P (save_reg_slot))
8966 save_reg_slot++;
8967 if (!FP_REGNO_P (save_reg_slot))
8968 {
8969 /* We only want to use ldgr/lgdr if we can get rid of
8970 stm/lm entirely. So undo the gpr slot allocation in
8971 case we ran out of FPR save slots. */
8972 for (j = 6; j <= 15; j++)
8973 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
8974 cfun_gpr_save_slot (j) = -1;
8975 break;
8976 }
8977 cfun_gpr_save_slot (i) = save_reg_slot++;
8978 }
8979 }
8980
8981 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
8982 stdarg.
8983 This is a helper routine for s390_register_info. */
8984
8985 static void
8986 s390_register_info_stdarg_fpr ()
8987 {
8988 int i;
8989 int min_fpr;
8990 int max_fpr;
8991
8992 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
8993 f0-f4 for 64 bit. */
8994 if (!cfun->stdarg
8995 || !TARGET_HARD_FLOAT
8996 || !cfun->va_list_fpr_size
8997 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
8998 return;
8999
9000 min_fpr = crtl->args.info.fprs;
9001 max_fpr = min_fpr + cfun->va_list_fpr_size;
9002 if (max_fpr > FP_ARG_NUM_REG)
9003 max_fpr = FP_ARG_NUM_REG;
9004
9005 for (i = min_fpr; i < max_fpr; i++)
9006 cfun_set_fpr_save (i + FPR0_REGNUM);
9007 }
9008
9009 /* Reserve the GPR save slots for GPRs which need to be saved due to
9010 stdarg.
9011 This is a helper routine for s390_register_info. */
9012
9013 static void
9014 s390_register_info_stdarg_gpr ()
9015 {
9016 int i;
9017 int min_gpr;
9018 int max_gpr;
9019
9020 if (!cfun->stdarg
9021 || !cfun->va_list_gpr_size
9022 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9023 return;
9024
9025 min_gpr = crtl->args.info.gprs;
9026 max_gpr = min_gpr + cfun->va_list_gpr_size;
9027 if (max_gpr > GP_ARG_NUM_REG)
9028 max_gpr = GP_ARG_NUM_REG;
9029
9030 for (i = min_gpr; i < max_gpr; i++)
9031 cfun_gpr_save_slot (2 + i) = -1;
9032 }
9033
9034 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9035 for registers which need to be saved in function prologue.
9036 This function can be used until the insns emitted for save/restore
9037 of the regs are visible in the RTL stream. */
9038
9039 static void
9040 s390_register_info ()
9041 {
9042 int i, j;
9043 char clobbered_regs[32];
9044
9045 gcc_assert (!epilogue_completed);
9046
9047 if (reload_completed)
9048 /* After reload we rely on our own routine to determine which
9049 registers need saving. */
9050 s390_regs_ever_clobbered (clobbered_regs);
9051 else
9052 /* During reload we use regs_ever_live as a base since reload
9053 does changes in there which we otherwise would not be aware
9054 of. */
9055 for (i = 0; i < 32; i++)
9056 clobbered_regs[i] = df_regs_ever_live_p (i);
9057
9058 for (i = 0; i < 32; i++)
9059 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9060
9061 /* Mark the call-saved FPRs which need to be saved.
9062 This needs to be done before checking the special GPRs since the
9063 stack pointer usage depends on whether high FPRs have to be saved
9064 or not. */
9065 cfun_frame_layout.fpr_bitmap = 0;
9066 cfun_frame_layout.high_fprs = 0;
9067 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9068 if (clobbered_regs[i] && !call_really_used_regs[i])
9069 {
9070 cfun_set_fpr_save (i);
9071 if (i >= FPR8_REGNUM)
9072 cfun_frame_layout.high_fprs++;
9073 }
9074
9075 if (flag_pic)
9076 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
9077 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
9078
9079 clobbered_regs[BASE_REGNUM]
9080 |= (cfun->machine->base_reg
9081 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9082
9083 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9084 |= !!frame_pointer_needed;
9085
9086 /* On pre z900 machines this might take until machine dependent
9087 reorg to decide.
9088 save_return_addr_p will only be set on non-zarch machines so
9089 there is no risk that r14 goes into an FPR instead of a stack
9090 slot. */
9091 clobbered_regs[RETURN_REGNUM]
9092 |= (!crtl->is_leaf
9093 || TARGET_TPF_PROFILING
9094 || cfun->machine->split_branches_pending_p
9095 || cfun_frame_layout.save_return_addr_p
9096 || crtl->calls_eh_return);
9097
9098 clobbered_regs[STACK_POINTER_REGNUM]
9099 |= (!crtl->is_leaf
9100 || TARGET_TPF_PROFILING
9101 || cfun_save_high_fprs_p
9102 || get_frame_size () > 0
9103 || (reload_completed && cfun_frame_layout.frame_size > 0)
9104 || cfun->calls_alloca);
9105
9106 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
9107
9108 for (i = 6; i < 16; i++)
9109 if (clobbered_regs[i])
9110 cfun_gpr_save_slot (i) = -1;
9111
9112 s390_register_info_stdarg_fpr ();
9113 s390_register_info_gprtofpr ();
9114
9115 /* First find the range of GPRs to be restored. Vararg regs don't
9116 need to be restored so we do it before assigning slots to the
9117 vararg GPRs. */
9118 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9119 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9120 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9121 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9122
9123 /* stdarg functions might need to save GPRs 2 to 6. This might
9124 override the GPR->FPR save decision made above for r6 since
9125 vararg regs must go to the stack. */
9126 s390_register_info_stdarg_gpr ();
9127
9128 /* Now the range of GPRs which need saving. */
9129 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9130 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9131 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9132 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9133 }
9134
9135 /* This function is called by s390_optimize_prologue in order to get
9136 rid of unnecessary GPR save/restore instructions. The register info
9137 for the GPRs is re-computed and the ranges are re-calculated. */
9138
9139 static void
9140 s390_optimize_register_info ()
9141 {
9142 char clobbered_regs[32];
9143 int i, j;
9144
9145 gcc_assert (epilogue_completed);
9146 gcc_assert (!cfun->machine->split_branches_pending_p);
9147
9148 s390_regs_ever_clobbered (clobbered_regs);
9149
9150 for (i = 0; i < 32; i++)
9151 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9152
9153 /* There is still special treatment needed for cases invisible to
9154 s390_regs_ever_clobbered. */
9155 clobbered_regs[RETURN_REGNUM]
9156 |= (TARGET_TPF_PROFILING
9157 /* When expanding builtin_return_addr in ESA mode we do not
9158 know whether r14 will later be needed as scratch reg when
9159 doing branch splitting. So the builtin always accesses the
9160 r14 save slot and we need to stick to the save/restore
9161 decision for r14 even if it turns out that it didn't get
9162 clobbered. */
9163 || cfun_frame_layout.save_return_addr_p
9164 || crtl->calls_eh_return);
9165
9166 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
9167
9168 for (i = 6; i < 16; i++)
9169 if (!clobbered_regs[i])
9170 cfun_gpr_save_slot (i) = 0;
9171
9172 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9173 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9174 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9175 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9176
9177 s390_register_info_stdarg_gpr ();
9178
9179 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9180 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9181 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9182 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9183 }
9184
9185 /* Fill cfun->machine with info about frame of current function. */
9186
9187 static void
9188 s390_frame_info (void)
9189 {
9190 HOST_WIDE_INT lowest_offset;
9191
9192 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9193 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9194
9195 /* The va_arg builtin uses a constant distance of 16 *
9196 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9197 pointer. So even if we are going to save the stack pointer in an
9198 FPR we need the stack space in order to keep the offsets
9199 correct. */
9200 if (cfun->stdarg && cfun_save_arg_fprs_p)
9201 {
9202 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9203
9204 if (cfun_frame_layout.first_save_gpr_slot == -1)
9205 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9206 }
9207
9208 cfun_frame_layout.frame_size = get_frame_size ();
9209 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9210 fatal_error (input_location,
9211 "total size of local variables exceeds architecture limit");
9212
9213 if (!TARGET_PACKED_STACK)
9214 {
9215 /* Fixed stack layout. */
9216 cfun_frame_layout.backchain_offset = 0;
9217 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9218 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9219 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9220 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9221 * UNITS_PER_LONG);
9222 }
9223 else if (TARGET_BACKCHAIN)
9224 {
9225 /* Kernel stack layout - packed stack, backchain, no float */
9226 gcc_assert (TARGET_SOFT_FLOAT);
9227 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9228 - UNITS_PER_LONG);
9229
9230 /* The distance between the backchain and the return address
9231 save slot must not change. So we always need a slot for the
9232 stack pointer which resides in between. */
9233 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9234
9235 cfun_frame_layout.gprs_offset
9236 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9237
9238 /* FPRs will not be saved. Nevertheless pick sane values to
9239 keep area calculations valid. */
9240 cfun_frame_layout.f0_offset =
9241 cfun_frame_layout.f4_offset =
9242 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9243 }
9244 else
9245 {
9246 int num_fprs;
9247
9248 /* Packed stack layout without backchain. */
9249
9250 /* With stdarg FPRs need their dedicated slots. */
9251 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9252 : (cfun_fpr_save_p (FPR4_REGNUM) +
9253 cfun_fpr_save_p (FPR6_REGNUM)));
9254 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9255
9256 num_fprs = (cfun->stdarg ? 2
9257 : (cfun_fpr_save_p (FPR0_REGNUM)
9258 + cfun_fpr_save_p (FPR2_REGNUM)));
9259 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9260
9261 cfun_frame_layout.gprs_offset
9262 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9263
9264 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9265 - cfun_frame_layout.high_fprs * 8);
9266 }
9267
9268 if (cfun_save_high_fprs_p)
9269 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9270
9271 if (!crtl->is_leaf)
9272 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9273
9274 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9275 sized area at the bottom of the stack. This is required also for
9276 leaf functions. When GCC generates a local stack reference it
9277 will always add STACK_POINTER_OFFSET to all these references. */
9278 if (crtl->is_leaf
9279 && !TARGET_TPF_PROFILING
9280 && cfun_frame_layout.frame_size == 0
9281 && !cfun->calls_alloca)
9282 return;
9283
9284 /* Calculate the number of bytes we have used in our own register
9285 save area. With the packed stack layout we can re-use the
9286 remaining bytes for normal stack elements. */
9287
9288 if (TARGET_PACKED_STACK)
9289 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9290 cfun_frame_layout.f4_offset),
9291 cfun_frame_layout.gprs_offset);
9292 else
9293 lowest_offset = 0;
9294
9295 if (TARGET_BACKCHAIN)
9296 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9297
9298 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9299
9300 /* If under 31 bit an odd number of gprs has to be saved we have to
9301 adjust the frame size to sustain 8 byte alignment of stack
9302 frames. */
9303 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9304 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9305 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9306 }
9307
9308 /* Generate frame layout. Fills in register and frame data for the current
9309 function in cfun->machine. This routine can be called multiple times;
9310 it will re-do the complete frame layout every time. */
9311
9312 static void
9313 s390_init_frame_layout (void)
9314 {
9315 HOST_WIDE_INT frame_size;
9316 int base_used;
9317
9318 gcc_assert (!reload_completed);
9319
9320 /* On S/390 machines, we may need to perform branch splitting, which
9321 will require both base and return address register. We have no
9322 choice but to assume we're going to need them until right at the
9323 end of the machine dependent reorg phase. */
9324 if (!TARGET_CPU_ZARCH)
9325 cfun->machine->split_branches_pending_p = true;
9326
9327 do
9328 {
9329 frame_size = cfun_frame_layout.frame_size;
9330
9331 /* Try to predict whether we'll need the base register. */
9332 base_used = cfun->machine->split_branches_pending_p
9333 || crtl->uses_const_pool
9334 || (!DISP_IN_RANGE (frame_size)
9335 && !CONST_OK_FOR_K (frame_size));
9336
9337 /* Decide which register to use as literal pool base. In small
9338 leaf functions, try to use an unused call-clobbered register
9339 as base register to avoid save/restore overhead. */
9340 if (!base_used)
9341 cfun->machine->base_reg = NULL_RTX;
9342 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
9343 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
9344 else
9345 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
9346
9347 s390_register_info ();
9348 s390_frame_info ();
9349 }
9350 while (frame_size != cfun_frame_layout.frame_size);
9351 }
9352
9353 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9354 the TX is nonescaping. A transaction is considered escaping if
9355 there is at least one path from tbegin returning CC0 to the
9356 function exit block without an tend.
9357
9358 The check so far has some limitations:
9359 - only single tbegin/tend BBs are supported
9360 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9361 - when CC is copied to a GPR and the CC0 check is done with the GPR
9362 this is not supported
9363 */
9364
9365 static void
9366 s390_optimize_nonescaping_tx (void)
9367 {
9368 const unsigned int CC0 = 1 << 3;
9369 basic_block tbegin_bb = NULL;
9370 basic_block tend_bb = NULL;
9371 basic_block bb;
9372 rtx_insn *insn;
9373 bool result = true;
9374 int bb_index;
9375 rtx_insn *tbegin_insn = NULL;
9376
9377 if (!cfun->machine->tbegin_p)
9378 return;
9379
9380 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9381 {
9382 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9383
9384 if (!bb)
9385 continue;
9386
9387 FOR_BB_INSNS (bb, insn)
9388 {
9389 rtx ite, cc, pat, target;
9390 unsigned HOST_WIDE_INT mask;
9391
9392 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9393 continue;
9394
9395 pat = PATTERN (insn);
9396
9397 if (GET_CODE (pat) == PARALLEL)
9398 pat = XVECEXP (pat, 0, 0);
9399
9400 if (GET_CODE (pat) != SET
9401 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9402 continue;
9403
9404 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9405 {
9406 rtx_insn *tmp;
9407
9408 tbegin_insn = insn;
9409
9410 /* Just return if the tbegin doesn't have clobbers. */
9411 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9412 return;
9413
9414 if (tbegin_bb != NULL)
9415 return;
9416
9417 /* Find the next conditional jump. */
9418 for (tmp = NEXT_INSN (insn);
9419 tmp != NULL_RTX;
9420 tmp = NEXT_INSN (tmp))
9421 {
9422 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9423 return;
9424 if (!JUMP_P (tmp))
9425 continue;
9426
9427 ite = SET_SRC (PATTERN (tmp));
9428 if (GET_CODE (ite) != IF_THEN_ELSE)
9429 continue;
9430
9431 cc = XEXP (XEXP (ite, 0), 0);
9432 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9433 || GET_MODE (cc) != CCRAWmode
9434 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9435 return;
9436
9437 if (bb->succs->length () != 2)
9438 return;
9439
9440 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9441 if (GET_CODE (XEXP (ite, 0)) == NE)
9442 mask ^= 0xf;
9443
9444 if (mask == CC0)
9445 target = XEXP (ite, 1);
9446 else if (mask == (CC0 ^ 0xf))
9447 target = XEXP (ite, 2);
9448 else
9449 return;
9450
9451 {
9452 edge_iterator ei;
9453 edge e1, e2;
9454
9455 ei = ei_start (bb->succs);
9456 e1 = ei_safe_edge (ei);
9457 ei_next (&ei);
9458 e2 = ei_safe_edge (ei);
9459
9460 if (e2->flags & EDGE_FALLTHRU)
9461 {
9462 e2 = e1;
9463 e1 = ei_safe_edge (ei);
9464 }
9465
9466 if (!(e1->flags & EDGE_FALLTHRU))
9467 return;
9468
9469 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9470 }
9471 if (tmp == BB_END (bb))
9472 break;
9473 }
9474 }
9475
9476 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9477 {
9478 if (tend_bb != NULL)
9479 return;
9480 tend_bb = bb;
9481 }
9482 }
9483 }
9484
9485 /* Either we successfully remove the FPR clobbers here or we are not
9486 able to do anything for this TX. Both cases don't qualify for
9487 another look. */
9488 cfun->machine->tbegin_p = false;
9489
9490 if (tbegin_bb == NULL || tend_bb == NULL)
9491 return;
9492
9493 calculate_dominance_info (CDI_POST_DOMINATORS);
9494 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9495 free_dominance_info (CDI_POST_DOMINATORS);
9496
9497 if (!result)
9498 return;
9499
9500 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9501 gen_rtvec (2,
9502 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9503 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9504 INSN_CODE (tbegin_insn) = -1;
9505 df_insn_rescan (tbegin_insn);
9506
9507 return;
9508 }
9509
9510 /* Return true if it is legal to put a value with MODE into REGNO. */
9511
9512 bool
9513 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9514 {
9515 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9516 return false;
9517
9518 switch (REGNO_REG_CLASS (regno))
9519 {
9520 case VEC_REGS:
9521 return ((GET_MODE_CLASS (mode) == MODE_INT
9522 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9523 || mode == DFmode
9524 || s390_vector_mode_supported_p (mode));
9525 break;
9526 case FP_REGS:
9527 if (TARGET_VX
9528 && ((GET_MODE_CLASS (mode) == MODE_INT
9529 && s390_class_max_nregs (FP_REGS, mode) == 1)
9530 || mode == DFmode
9531 || s390_vector_mode_supported_p (mode)))
9532 return true;
9533
9534 if (REGNO_PAIR_OK (regno, mode))
9535 {
9536 if (mode == SImode || mode == DImode)
9537 return true;
9538
9539 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9540 return true;
9541 }
9542 break;
9543 case ADDR_REGS:
9544 if (FRAME_REGNO_P (regno) && mode == Pmode)
9545 return true;
9546
9547 /* fallthrough */
9548 case GENERAL_REGS:
9549 if (REGNO_PAIR_OK (regno, mode))
9550 {
9551 if (TARGET_ZARCH
9552 || (mode != TFmode && mode != TCmode && mode != TDmode))
9553 return true;
9554 }
9555 break;
9556 case CC_REGS:
9557 if (GET_MODE_CLASS (mode) == MODE_CC)
9558 return true;
9559 break;
9560 case ACCESS_REGS:
9561 if (REGNO_PAIR_OK (regno, mode))
9562 {
9563 if (mode == SImode || mode == Pmode)
9564 return true;
9565 }
9566 break;
9567 default:
9568 return false;
9569 }
9570
9571 return false;
9572 }
9573
9574 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
9575
9576 bool
9577 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
9578 {
9579 /* Once we've decided upon a register to use as base register, it must
9580 no longer be used for any other purpose. */
9581 if (cfun->machine->base_reg)
9582 if (REGNO (cfun->machine->base_reg) == old_reg
9583 || REGNO (cfun->machine->base_reg) == new_reg)
9584 return false;
9585
9586 /* Prevent regrename from using call-saved regs which haven't
9587 actually been saved. This is necessary since regrename assumes
9588 the backend save/restore decisions are based on
9589 df_regs_ever_live. Since we have our own routine we have to tell
9590 regrename manually about it. */
9591 if (GENERAL_REGNO_P (new_reg)
9592 && !call_really_used_regs[new_reg]
9593 && cfun_gpr_save_slot (new_reg) == 0)
9594 return false;
9595
9596 return true;
9597 }
9598
9599 /* Return nonzero if register REGNO can be used as a scratch register
9600 in peephole2. */
9601
9602 static bool
9603 s390_hard_regno_scratch_ok (unsigned int regno)
9604 {
9605 /* See s390_hard_regno_rename_ok. */
9606 if (GENERAL_REGNO_P (regno)
9607 && !call_really_used_regs[regno]
9608 && cfun_gpr_save_slot (regno) == 0)
9609 return false;
9610
9611 return true;
9612 }
9613
9614 /* Maximum number of registers to represent a value of mode MODE
9615 in a register of class RCLASS. */
9616
9617 int
9618 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
9619 {
9620 int reg_size;
9621 bool reg_pair_required_p = false;
9622
9623 switch (rclass)
9624 {
9625 case FP_REGS:
9626 case VEC_REGS:
9627 reg_size = TARGET_VX ? 16 : 8;
9628
9629 /* TF and TD modes would fit into a VR but we put them into a
9630 register pair since we do not have 128bit FP instructions on
9631 full VRs. */
9632 if (TARGET_VX
9633 && SCALAR_FLOAT_MODE_P (mode)
9634 && GET_MODE_SIZE (mode) >= 16)
9635 reg_pair_required_p = true;
9636
9637 /* Even if complex types would fit into a single FPR/VR we force
9638 them into a register pair to deal with the parts more easily.
9639 (FIXME: What about complex ints?) */
9640 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9641 reg_pair_required_p = true;
9642 break;
9643 case ACCESS_REGS:
9644 reg_size = 4;
9645 break;
9646 default:
9647 reg_size = UNITS_PER_WORD;
9648 break;
9649 }
9650
9651 if (reg_pair_required_p)
9652 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
9653
9654 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
9655 }
9656
9657 /* Return TRUE if changing mode from FROM to TO should not be allowed
9658 for register class CLASS. */
9659
9660 int
9661 s390_cannot_change_mode_class (machine_mode from_mode,
9662 machine_mode to_mode,
9663 enum reg_class rclass)
9664 {
9665 machine_mode small_mode;
9666 machine_mode big_mode;
9667
9668 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
9669 return 0;
9670
9671 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
9672 {
9673 small_mode = from_mode;
9674 big_mode = to_mode;
9675 }
9676 else
9677 {
9678 small_mode = to_mode;
9679 big_mode = from_mode;
9680 }
9681
9682 /* Values residing in VRs are little-endian style. All modes are
9683 placed left-aligned in an VR. This means that we cannot allow
9684 switching between modes with differing sizes. Also if the vector
9685 facility is available we still place TFmode values in VR register
9686 pairs, since the only instructions we have operating on TFmodes
9687 only deal with register pairs. Therefore we have to allow DFmode
9688 subregs of TFmodes to enable the TFmode splitters. */
9689 if (reg_classes_intersect_p (VEC_REGS, rclass)
9690 && (GET_MODE_SIZE (small_mode) < 8
9691 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
9692 return 1;
9693
9694 /* Likewise for access registers, since they have only half the
9695 word size on 64-bit. */
9696 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
9697 return 1;
9698
9699 return 0;
9700 }
9701
9702 /* Return true if we use LRA instead of reload pass. */
9703 static bool
9704 s390_lra_p (void)
9705 {
9706 return s390_lra_flag;
9707 }
9708
9709 /* Return true if register FROM can be eliminated via register TO. */
9710
9711 static bool
9712 s390_can_eliminate (const int from, const int to)
9713 {
9714 /* On zSeries machines, we have not marked the base register as fixed.
9715 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
9716 If a function requires the base register, we say here that this
9717 elimination cannot be performed. This will cause reload to free
9718 up the base register (as if it were fixed). On the other hand,
9719 if the current function does *not* require the base register, we
9720 say here the elimination succeeds, which in turn allows reload
9721 to allocate the base register for any other purpose. */
9722 if (from == BASE_REGNUM && to == BASE_REGNUM)
9723 {
9724 if (TARGET_CPU_ZARCH)
9725 {
9726 s390_init_frame_layout ();
9727 return cfun->machine->base_reg == NULL_RTX;
9728 }
9729
9730 return false;
9731 }
9732
9733 /* Everything else must point into the stack frame. */
9734 gcc_assert (to == STACK_POINTER_REGNUM
9735 || to == HARD_FRAME_POINTER_REGNUM);
9736
9737 gcc_assert (from == FRAME_POINTER_REGNUM
9738 || from == ARG_POINTER_REGNUM
9739 || from == RETURN_ADDRESS_POINTER_REGNUM);
9740
9741 /* Make sure we actually saved the return address. */
9742 if (from == RETURN_ADDRESS_POINTER_REGNUM)
9743 if (!crtl->calls_eh_return
9744 && !cfun->stdarg
9745 && !cfun_frame_layout.save_return_addr_p)
9746 return false;
9747
9748 return true;
9749 }
9750
9751 /* Return offset between register FROM and TO initially after prolog. */
9752
9753 HOST_WIDE_INT
9754 s390_initial_elimination_offset (int from, int to)
9755 {
9756 HOST_WIDE_INT offset;
9757
9758 /* ??? Why are we called for non-eliminable pairs? */
9759 if (!s390_can_eliminate (from, to))
9760 return 0;
9761
9762 switch (from)
9763 {
9764 case FRAME_POINTER_REGNUM:
9765 offset = (get_frame_size()
9766 + STACK_POINTER_OFFSET
9767 + crtl->outgoing_args_size);
9768 break;
9769
9770 case ARG_POINTER_REGNUM:
9771 s390_init_frame_layout ();
9772 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
9773 break;
9774
9775 case RETURN_ADDRESS_POINTER_REGNUM:
9776 s390_init_frame_layout ();
9777
9778 if (cfun_frame_layout.first_save_gpr_slot == -1)
9779 {
9780 /* If it turns out that for stdarg nothing went into the reg
9781 save area we also do not need the return address
9782 pointer. */
9783 if (cfun->stdarg && !cfun_save_arg_fprs_p)
9784 return 0;
9785
9786 gcc_unreachable ();
9787 }
9788
9789 /* In order to make the following work it is not necessary for
9790 r14 to have a save slot. It is sufficient if one other GPR
9791 got one. Since the GPRs are always stored without gaps we
9792 are able to calculate where the r14 save slot would
9793 reside. */
9794 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
9795 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
9796 UNITS_PER_LONG);
9797 break;
9798
9799 case BASE_REGNUM:
9800 offset = 0;
9801 break;
9802
9803 default:
9804 gcc_unreachable ();
9805 }
9806
9807 return offset;
9808 }
9809
9810 /* Emit insn to save fpr REGNUM at offset OFFSET relative
9811 to register BASE. Return generated insn. */
9812
9813 static rtx
9814 save_fpr (rtx base, int offset, int regnum)
9815 {
9816 rtx addr;
9817 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
9818
9819 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
9820 set_mem_alias_set (addr, get_varargs_alias_set ());
9821 else
9822 set_mem_alias_set (addr, get_frame_alias_set ());
9823
9824 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
9825 }
9826
9827 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
9828 to register BASE. Return generated insn. */
9829
9830 static rtx
9831 restore_fpr (rtx base, int offset, int regnum)
9832 {
9833 rtx addr;
9834 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
9835 set_mem_alias_set (addr, get_frame_alias_set ());
9836
9837 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
9838 }
9839
9840 /* Return true if REGNO is a global register, but not one
9841 of the special ones that need to be saved/restored in anyway. */
9842
9843 static inline bool
9844 global_not_special_regno_p (int regno)
9845 {
9846 return (global_regs[regno]
9847 /* These registers are special and need to be
9848 restored in any case. */
9849 && !(regno == STACK_POINTER_REGNUM
9850 || regno == RETURN_REGNUM
9851 || regno == BASE_REGNUM
9852 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
9853 }
9854
9855 /* Generate insn to save registers FIRST to LAST into
9856 the register save area located at offset OFFSET
9857 relative to register BASE. */
9858
9859 static rtx
9860 save_gprs (rtx base, int offset, int first, int last)
9861 {
9862 rtx addr, insn, note;
9863 int i;
9864
9865 addr = plus_constant (Pmode, base, offset);
9866 addr = gen_rtx_MEM (Pmode, addr);
9867
9868 set_mem_alias_set (addr, get_frame_alias_set ());
9869
9870 /* Special-case single register. */
9871 if (first == last)
9872 {
9873 if (TARGET_64BIT)
9874 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
9875 else
9876 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
9877
9878 if (!global_not_special_regno_p (first))
9879 RTX_FRAME_RELATED_P (insn) = 1;
9880 return insn;
9881 }
9882
9883
9884 insn = gen_store_multiple (addr,
9885 gen_rtx_REG (Pmode, first),
9886 GEN_INT (last - first + 1));
9887
9888 if (first <= 6 && cfun->stdarg)
9889 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9890 {
9891 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
9892
9893 if (first + i <= 6)
9894 set_mem_alias_set (mem, get_varargs_alias_set ());
9895 }
9896
9897 /* We need to set the FRAME_RELATED flag on all SETs
9898 inside the store-multiple pattern.
9899
9900 However, we must not emit DWARF records for registers 2..5
9901 if they are stored for use by variable arguments ...
9902
9903 ??? Unfortunately, it is not enough to simply not the
9904 FRAME_RELATED flags for those SETs, because the first SET
9905 of the PARALLEL is always treated as if it had the flag
9906 set, even if it does not. Therefore we emit a new pattern
9907 without those registers as REG_FRAME_RELATED_EXPR note. */
9908
9909 if (first >= 6 && !global_not_special_regno_p (first))
9910 {
9911 rtx pat = PATTERN (insn);
9912
9913 for (i = 0; i < XVECLEN (pat, 0); i++)
9914 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
9915 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
9916 0, i)))))
9917 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
9918
9919 RTX_FRAME_RELATED_P (insn) = 1;
9920 }
9921 else if (last >= 6)
9922 {
9923 int start;
9924
9925 for (start = first >= 6 ? first : 6; start <= last; start++)
9926 if (!global_not_special_regno_p (start))
9927 break;
9928
9929 if (start > last)
9930 return insn;
9931
9932 addr = plus_constant (Pmode, base,
9933 offset + (start - first) * UNITS_PER_LONG);
9934
9935 if (start == last)
9936 {
9937 if (TARGET_64BIT)
9938 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
9939 gen_rtx_REG (Pmode, start));
9940 else
9941 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
9942 gen_rtx_REG (Pmode, start));
9943 note = PATTERN (note);
9944
9945 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
9946 RTX_FRAME_RELATED_P (insn) = 1;
9947
9948 return insn;
9949 }
9950
9951 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
9952 gen_rtx_REG (Pmode, start),
9953 GEN_INT (last - start + 1));
9954 note = PATTERN (note);
9955
9956 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
9957
9958 for (i = 0; i < XVECLEN (note, 0); i++)
9959 if (GET_CODE (XVECEXP (note, 0, i)) == SET
9960 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
9961 0, i)))))
9962 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
9963
9964 RTX_FRAME_RELATED_P (insn) = 1;
9965 }
9966
9967 return insn;
9968 }
9969
9970 /* Generate insn to restore registers FIRST to LAST from
9971 the register save area located at offset OFFSET
9972 relative to register BASE. */
9973
9974 static rtx
9975 restore_gprs (rtx base, int offset, int first, int last)
9976 {
9977 rtx addr, insn;
9978
9979 addr = plus_constant (Pmode, base, offset);
9980 addr = gen_rtx_MEM (Pmode, addr);
9981 set_mem_alias_set (addr, get_frame_alias_set ());
9982
9983 /* Special-case single register. */
9984 if (first == last)
9985 {
9986 if (TARGET_64BIT)
9987 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
9988 else
9989 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
9990
9991 RTX_FRAME_RELATED_P (insn) = 1;
9992 return insn;
9993 }
9994
9995 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
9996 addr,
9997 GEN_INT (last - first + 1));
9998 RTX_FRAME_RELATED_P (insn) = 1;
9999 return insn;
10000 }
10001
10002 /* Return insn sequence to load the GOT register. */
10003
10004 static GTY(()) rtx got_symbol;
10005 rtx_insn *
10006 s390_load_got (void)
10007 {
10008 rtx_insn *insns;
10009
10010 /* We cannot use pic_offset_table_rtx here since we use this
10011 function also for non-pic if __tls_get_offset is called and in
10012 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10013 aren't usable. */
10014 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10015
10016 if (!got_symbol)
10017 {
10018 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10019 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10020 }
10021
10022 start_sequence ();
10023
10024 if (TARGET_CPU_ZARCH)
10025 {
10026 emit_move_insn (got_rtx, got_symbol);
10027 }
10028 else
10029 {
10030 rtx offset;
10031
10032 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10033 UNSPEC_LTREL_OFFSET);
10034 offset = gen_rtx_CONST (Pmode, offset);
10035 offset = force_const_mem (Pmode, offset);
10036
10037 emit_move_insn (got_rtx, offset);
10038
10039 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10040 UNSPEC_LTREL_BASE);
10041 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10042
10043 emit_move_insn (got_rtx, offset);
10044 }
10045
10046 insns = get_insns ();
10047 end_sequence ();
10048 return insns;
10049 }
10050
10051 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10052 and the change to the stack pointer. */
10053
10054 static void
10055 s390_emit_stack_tie (void)
10056 {
10057 rtx mem = gen_frame_mem (BLKmode,
10058 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10059
10060 emit_insn (gen_stack_tie (mem));
10061 }
10062
10063 /* Copy GPRS into FPR save slots. */
10064
10065 static void
10066 s390_save_gprs_to_fprs (void)
10067 {
10068 int i;
10069
10070 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10071 return;
10072
10073 for (i = 6; i < 16; i++)
10074 {
10075 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10076 {
10077 rtx_insn *insn =
10078 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10079 gen_rtx_REG (DImode, i));
10080 RTX_FRAME_RELATED_P (insn) = 1;
10081 }
10082 }
10083 }
10084
10085 /* Restore GPRs from FPR save slots. */
10086
10087 static void
10088 s390_restore_gprs_from_fprs (void)
10089 {
10090 int i;
10091
10092 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10093 return;
10094
10095 for (i = 6; i < 16; i++)
10096 {
10097 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10098 {
10099 rtx_insn *insn =
10100 emit_move_insn (gen_rtx_REG (DImode, i),
10101 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
10102 df_set_regs_ever_live (i, true);
10103 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10104 if (i == STACK_POINTER_REGNUM)
10105 add_reg_note (insn, REG_CFA_DEF_CFA,
10106 plus_constant (Pmode, stack_pointer_rtx,
10107 STACK_POINTER_OFFSET));
10108 RTX_FRAME_RELATED_P (insn) = 1;
10109 }
10110 }
10111 }
10112
10113
10114 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10115 generation. */
10116
10117 namespace {
10118
10119 const pass_data pass_data_s390_early_mach =
10120 {
10121 RTL_PASS, /* type */
10122 "early_mach", /* name */
10123 OPTGROUP_NONE, /* optinfo_flags */
10124 TV_MACH_DEP, /* tv_id */
10125 0, /* properties_required */
10126 0, /* properties_provided */
10127 0, /* properties_destroyed */
10128 0, /* todo_flags_start */
10129 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10130 };
10131
10132 class pass_s390_early_mach : public rtl_opt_pass
10133 {
10134 public:
10135 pass_s390_early_mach (gcc::context *ctxt)
10136 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10137 {}
10138
10139 /* opt_pass methods: */
10140 virtual unsigned int execute (function *);
10141
10142 }; // class pass_s390_early_mach
10143
10144 unsigned int
10145 pass_s390_early_mach::execute (function *fun)
10146 {
10147 rtx_insn *insn;
10148
10149 /* Try to get rid of the FPR clobbers. */
10150 s390_optimize_nonescaping_tx ();
10151
10152 /* Re-compute register info. */
10153 s390_register_info ();
10154
10155 /* If we're using a base register, ensure that it is always valid for
10156 the first non-prologue instruction. */
10157 if (fun->machine->base_reg)
10158 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10159
10160 /* Annotate all constant pool references to let the scheduler know
10161 they implicitly use the base register. */
10162 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10163 if (INSN_P (insn))
10164 {
10165 annotate_constant_pool_refs (&PATTERN (insn));
10166 df_insn_rescan (insn);
10167 }
10168 return 0;
10169 }
10170
10171 } // anon namespace
10172
10173 /* Expand the prologue into a bunch of separate insns. */
10174
10175 void
10176 s390_emit_prologue (void)
10177 {
10178 rtx insn, addr;
10179 rtx temp_reg;
10180 int i;
10181 int offset;
10182 int next_fpr = 0;
10183
10184 /* Choose best register to use for temp use within prologue.
10185 See below for why TPF must use the register 1. */
10186
10187 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10188 && !crtl->is_leaf
10189 && !TARGET_TPF_PROFILING)
10190 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10191 else
10192 temp_reg = gen_rtx_REG (Pmode, 1);
10193
10194 s390_save_gprs_to_fprs ();
10195
10196 /* Save call saved gprs. */
10197 if (cfun_frame_layout.first_save_gpr != -1)
10198 {
10199 insn = save_gprs (stack_pointer_rtx,
10200 cfun_frame_layout.gprs_offset +
10201 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10202 - cfun_frame_layout.first_save_gpr_slot),
10203 cfun_frame_layout.first_save_gpr,
10204 cfun_frame_layout.last_save_gpr);
10205 emit_insn (insn);
10206 }
10207
10208 /* Dummy insn to mark literal pool slot. */
10209
10210 if (cfun->machine->base_reg)
10211 emit_insn (gen_main_pool (cfun->machine->base_reg));
10212
10213 offset = cfun_frame_layout.f0_offset;
10214
10215 /* Save f0 and f2. */
10216 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10217 {
10218 if (cfun_fpr_save_p (i))
10219 {
10220 save_fpr (stack_pointer_rtx, offset, i);
10221 offset += 8;
10222 }
10223 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10224 offset += 8;
10225 }
10226
10227 /* Save f4 and f6. */
10228 offset = cfun_frame_layout.f4_offset;
10229 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10230 {
10231 if (cfun_fpr_save_p (i))
10232 {
10233 insn = save_fpr (stack_pointer_rtx, offset, i);
10234 offset += 8;
10235
10236 /* If f4 and f6 are call clobbered they are saved due to
10237 stdargs and therefore are not frame related. */
10238 if (!call_really_used_regs[i])
10239 RTX_FRAME_RELATED_P (insn) = 1;
10240 }
10241 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10242 offset += 8;
10243 }
10244
10245 if (TARGET_PACKED_STACK
10246 && cfun_save_high_fprs_p
10247 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10248 {
10249 offset = (cfun_frame_layout.f8_offset
10250 + (cfun_frame_layout.high_fprs - 1) * 8);
10251
10252 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10253 if (cfun_fpr_save_p (i))
10254 {
10255 insn = save_fpr (stack_pointer_rtx, offset, i);
10256
10257 RTX_FRAME_RELATED_P (insn) = 1;
10258 offset -= 8;
10259 }
10260 if (offset >= cfun_frame_layout.f8_offset)
10261 next_fpr = i;
10262 }
10263
10264 if (!TARGET_PACKED_STACK)
10265 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10266
10267 if (flag_stack_usage_info)
10268 current_function_static_stack_size = cfun_frame_layout.frame_size;
10269
10270 /* Decrement stack pointer. */
10271
10272 if (cfun_frame_layout.frame_size > 0)
10273 {
10274 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10275 rtx real_frame_off;
10276
10277 if (s390_stack_size)
10278 {
10279 HOST_WIDE_INT stack_guard;
10280
10281 if (s390_stack_guard)
10282 stack_guard = s390_stack_guard;
10283 else
10284 {
10285 /* If no value for stack guard is provided the smallest power of 2
10286 larger than the current frame size is chosen. */
10287 stack_guard = 1;
10288 while (stack_guard < cfun_frame_layout.frame_size)
10289 stack_guard <<= 1;
10290 }
10291
10292 if (cfun_frame_layout.frame_size >= s390_stack_size)
10293 {
10294 warning (0, "frame size of function %qs is %wd"
10295 " bytes exceeding user provided stack limit of "
10296 "%d bytes. "
10297 "An unconditional trap is added.",
10298 current_function_name(), cfun_frame_layout.frame_size,
10299 s390_stack_size);
10300 emit_insn (gen_trap ());
10301 }
10302 else
10303 {
10304 /* stack_guard has to be smaller than s390_stack_size.
10305 Otherwise we would emit an AND with zero which would
10306 not match the test under mask pattern. */
10307 if (stack_guard >= s390_stack_size)
10308 {
10309 warning (0, "frame size of function %qs is %wd"
10310 " bytes which is more than half the stack size. "
10311 "The dynamic check would not be reliable. "
10312 "No check emitted for this function.",
10313 current_function_name(),
10314 cfun_frame_layout.frame_size);
10315 }
10316 else
10317 {
10318 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10319 & ~(stack_guard - 1));
10320
10321 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10322 GEN_INT (stack_check_mask));
10323 if (TARGET_64BIT)
10324 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10325 t, const0_rtx),
10326 t, const0_rtx, const0_rtx));
10327 else
10328 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10329 t, const0_rtx),
10330 t, const0_rtx, const0_rtx));
10331 }
10332 }
10333 }
10334
10335 if (s390_warn_framesize > 0
10336 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10337 warning (0, "frame size of %qs is %wd bytes",
10338 current_function_name (), cfun_frame_layout.frame_size);
10339
10340 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10341 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10342
10343 /* Save incoming stack pointer into temp reg. */
10344 if (TARGET_BACKCHAIN || next_fpr)
10345 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10346
10347 /* Subtract frame size from stack pointer. */
10348
10349 if (DISP_IN_RANGE (INTVAL (frame_off)))
10350 {
10351 insn = gen_rtx_SET (stack_pointer_rtx,
10352 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10353 frame_off));
10354 insn = emit_insn (insn);
10355 }
10356 else
10357 {
10358 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10359 frame_off = force_const_mem (Pmode, frame_off);
10360
10361 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10362 annotate_constant_pool_refs (&PATTERN (insn));
10363 }
10364
10365 RTX_FRAME_RELATED_P (insn) = 1;
10366 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10367 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10368 gen_rtx_SET (stack_pointer_rtx,
10369 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10370 real_frame_off)));
10371
10372 /* Set backchain. */
10373
10374 if (TARGET_BACKCHAIN)
10375 {
10376 if (cfun_frame_layout.backchain_offset)
10377 addr = gen_rtx_MEM (Pmode,
10378 plus_constant (Pmode, stack_pointer_rtx,
10379 cfun_frame_layout.backchain_offset));
10380 else
10381 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10382 set_mem_alias_set (addr, get_frame_alias_set ());
10383 insn = emit_insn (gen_move_insn (addr, temp_reg));
10384 }
10385
10386 /* If we support non-call exceptions (e.g. for Java),
10387 we need to make sure the backchain pointer is set up
10388 before any possibly trapping memory access. */
10389 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10390 {
10391 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10392 emit_clobber (addr);
10393 }
10394 }
10395
10396 /* Save fprs 8 - 15 (64 bit ABI). */
10397
10398 if (cfun_save_high_fprs_p && next_fpr)
10399 {
10400 /* If the stack might be accessed through a different register
10401 we have to make sure that the stack pointer decrement is not
10402 moved below the use of the stack slots. */
10403 s390_emit_stack_tie ();
10404
10405 insn = emit_insn (gen_add2_insn (temp_reg,
10406 GEN_INT (cfun_frame_layout.f8_offset)));
10407
10408 offset = 0;
10409
10410 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10411 if (cfun_fpr_save_p (i))
10412 {
10413 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10414 cfun_frame_layout.frame_size
10415 + cfun_frame_layout.f8_offset
10416 + offset);
10417
10418 insn = save_fpr (temp_reg, offset, i);
10419 offset += 8;
10420 RTX_FRAME_RELATED_P (insn) = 1;
10421 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10422 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10423 gen_rtx_REG (DFmode, i)));
10424 }
10425 }
10426
10427 /* Set frame pointer, if needed. */
10428
10429 if (frame_pointer_needed)
10430 {
10431 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10432 RTX_FRAME_RELATED_P (insn) = 1;
10433 }
10434
10435 /* Set up got pointer, if needed. */
10436
10437 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10438 {
10439 rtx_insn *insns = s390_load_got ();
10440
10441 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10442 annotate_constant_pool_refs (&PATTERN (insn));
10443
10444 emit_insn (insns);
10445 }
10446
10447 if (TARGET_TPF_PROFILING)
10448 {
10449 /* Generate a BAS instruction to serve as a function
10450 entry intercept to facilitate the use of tracing
10451 algorithms located at the branch target. */
10452 emit_insn (gen_prologue_tpf ());
10453
10454 /* Emit a blockage here so that all code
10455 lies between the profiling mechanisms. */
10456 emit_insn (gen_blockage ());
10457 }
10458 }
10459
10460 /* Expand the epilogue into a bunch of separate insns. */
10461
10462 void
10463 s390_emit_epilogue (bool sibcall)
10464 {
10465 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10466 int area_bottom, area_top, offset = 0;
10467 int next_offset;
10468 rtvec p;
10469 int i;
10470
10471 if (TARGET_TPF_PROFILING)
10472 {
10473
10474 /* Generate a BAS instruction to serve as a function
10475 entry intercept to facilitate the use of tracing
10476 algorithms located at the branch target. */
10477
10478 /* Emit a blockage here so that all code
10479 lies between the profiling mechanisms. */
10480 emit_insn (gen_blockage ());
10481
10482 emit_insn (gen_epilogue_tpf ());
10483 }
10484
10485 /* Check whether to use frame or stack pointer for restore. */
10486
10487 frame_pointer = (frame_pointer_needed
10488 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10489
10490 s390_frame_area (&area_bottom, &area_top);
10491
10492 /* Check whether we can access the register save area.
10493 If not, increment the frame pointer as required. */
10494
10495 if (area_top <= area_bottom)
10496 {
10497 /* Nothing to restore. */
10498 }
10499 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10500 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10501 {
10502 /* Area is in range. */
10503 offset = cfun_frame_layout.frame_size;
10504 }
10505 else
10506 {
10507 rtx insn, frame_off, cfa;
10508
10509 offset = area_bottom < 0 ? -area_bottom : 0;
10510 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10511
10512 cfa = gen_rtx_SET (frame_pointer,
10513 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10514 if (DISP_IN_RANGE (INTVAL (frame_off)))
10515 {
10516 insn = gen_rtx_SET (frame_pointer,
10517 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10518 insn = emit_insn (insn);
10519 }
10520 else
10521 {
10522 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10523 frame_off = force_const_mem (Pmode, frame_off);
10524
10525 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10526 annotate_constant_pool_refs (&PATTERN (insn));
10527 }
10528 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10529 RTX_FRAME_RELATED_P (insn) = 1;
10530 }
10531
10532 /* Restore call saved fprs. */
10533
10534 if (TARGET_64BIT)
10535 {
10536 if (cfun_save_high_fprs_p)
10537 {
10538 next_offset = cfun_frame_layout.f8_offset;
10539 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10540 {
10541 if (cfun_fpr_save_p (i))
10542 {
10543 restore_fpr (frame_pointer,
10544 offset + next_offset, i);
10545 cfa_restores
10546 = alloc_reg_note (REG_CFA_RESTORE,
10547 gen_rtx_REG (DFmode, i), cfa_restores);
10548 next_offset += 8;
10549 }
10550 }
10551 }
10552
10553 }
10554 else
10555 {
10556 next_offset = cfun_frame_layout.f4_offset;
10557 /* f4, f6 */
10558 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10559 {
10560 if (cfun_fpr_save_p (i))
10561 {
10562 restore_fpr (frame_pointer,
10563 offset + next_offset, i);
10564 cfa_restores
10565 = alloc_reg_note (REG_CFA_RESTORE,
10566 gen_rtx_REG (DFmode, i), cfa_restores);
10567 next_offset += 8;
10568 }
10569 else if (!TARGET_PACKED_STACK)
10570 next_offset += 8;
10571 }
10572
10573 }
10574
10575 /* Return register. */
10576
10577 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10578
10579 /* Restore call saved gprs. */
10580
10581 if (cfun_frame_layout.first_restore_gpr != -1)
10582 {
10583 rtx insn, addr;
10584 int i;
10585
10586 /* Check for global register and save them
10587 to stack location from where they get restored. */
10588
10589 for (i = cfun_frame_layout.first_restore_gpr;
10590 i <= cfun_frame_layout.last_restore_gpr;
10591 i++)
10592 {
10593 if (global_not_special_regno_p (i))
10594 {
10595 addr = plus_constant (Pmode, frame_pointer,
10596 offset + cfun_frame_layout.gprs_offset
10597 + (i - cfun_frame_layout.first_save_gpr_slot)
10598 * UNITS_PER_LONG);
10599 addr = gen_rtx_MEM (Pmode, addr);
10600 set_mem_alias_set (addr, get_frame_alias_set ());
10601 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
10602 }
10603 else
10604 cfa_restores
10605 = alloc_reg_note (REG_CFA_RESTORE,
10606 gen_rtx_REG (Pmode, i), cfa_restores);
10607 }
10608
10609 if (! sibcall)
10610 {
10611 /* Fetch return address from stack before load multiple,
10612 this will do good for scheduling.
10613
10614 Only do this if we already decided that r14 needs to be
10615 saved to a stack slot. (And not just because r14 happens to
10616 be in between two GPRs which need saving.) Otherwise it
10617 would be difficult to take that decision back in
10618 s390_optimize_prologue. */
10619 if (cfun_gpr_save_slot (RETURN_REGNUM) == -1)
10620 {
10621 int return_regnum = find_unused_clobbered_reg();
10622 if (!return_regnum)
10623 return_regnum = 4;
10624 return_reg = gen_rtx_REG (Pmode, return_regnum);
10625
10626 addr = plus_constant (Pmode, frame_pointer,
10627 offset + cfun_frame_layout.gprs_offset
10628 + (RETURN_REGNUM
10629 - cfun_frame_layout.first_save_gpr_slot)
10630 * UNITS_PER_LONG);
10631 addr = gen_rtx_MEM (Pmode, addr);
10632 set_mem_alias_set (addr, get_frame_alias_set ());
10633 emit_move_insn (return_reg, addr);
10634
10635 /* Once we did that optimization we have to make sure
10636 s390_optimize_prologue does not try to remove the
10637 store of r14 since we will not be able to find the
10638 load issued here. */
10639 cfun_frame_layout.save_return_addr_p = true;
10640 }
10641 }
10642
10643 insn = restore_gprs (frame_pointer,
10644 offset + cfun_frame_layout.gprs_offset
10645 + (cfun_frame_layout.first_restore_gpr
10646 - cfun_frame_layout.first_save_gpr_slot)
10647 * UNITS_PER_LONG,
10648 cfun_frame_layout.first_restore_gpr,
10649 cfun_frame_layout.last_restore_gpr);
10650 insn = emit_insn (insn);
10651 REG_NOTES (insn) = cfa_restores;
10652 add_reg_note (insn, REG_CFA_DEF_CFA,
10653 plus_constant (Pmode, stack_pointer_rtx,
10654 STACK_POINTER_OFFSET));
10655 RTX_FRAME_RELATED_P (insn) = 1;
10656 }
10657
10658 s390_restore_gprs_from_fprs ();
10659
10660 if (! sibcall)
10661 {
10662
10663 /* Return to caller. */
10664
10665 p = rtvec_alloc (2);
10666
10667 RTVEC_ELT (p, 0) = ret_rtx;
10668 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
10669 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
10670 }
10671 }
10672
10673 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
10674
10675 static void
10676 s300_set_up_by_prologue (hard_reg_set_container *regs)
10677 {
10678 if (cfun->machine->base_reg
10679 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10680 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
10681 }
10682
10683 /* Return true if the function can use simple_return to return outside
10684 of a shrink-wrapped region. At present shrink-wrapping is supported
10685 in all cases. */
10686
10687 bool
10688 s390_can_use_simple_return_insn (void)
10689 {
10690 return true;
10691 }
10692
10693 /* Return true if the epilogue is guaranteed to contain only a return
10694 instruction and if a direct return can therefore be used instead.
10695 One of the main advantages of using direct return instructions
10696 is that we can then use conditional returns. */
10697
10698 bool
10699 s390_can_use_return_insn (void)
10700 {
10701 int i;
10702
10703 if (!reload_completed)
10704 return false;
10705
10706 if (crtl->profile)
10707 return false;
10708
10709 if (TARGET_TPF_PROFILING)
10710 return false;
10711
10712 for (i = 0; i < 16; i++)
10713 if (cfun_gpr_save_slot (i))
10714 return false;
10715
10716 /* For 31 bit this is not covered by the frame_size check below
10717 since f4, f6 are saved in the register save area without needing
10718 additional stack space. */
10719 if (!TARGET_64BIT
10720 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
10721 return false;
10722
10723 if (cfun->machine->base_reg
10724 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10725 return false;
10726
10727 return cfun_frame_layout.frame_size == 0;
10728 }
10729
10730 /* The VX ABI differs for vararg functions. Therefore we need the
10731 prototype of the callee to be available when passing vector type
10732 values. */
10733 static const char *
10734 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
10735 {
10736 return ((TARGET_VX_ABI
10737 && typelist == 0
10738 && VECTOR_TYPE_P (TREE_TYPE (val))
10739 && (funcdecl == NULL_TREE
10740 || (TREE_CODE (funcdecl) == FUNCTION_DECL
10741 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
10742 ? N_("Vector argument passed to unprototyped function")
10743 : NULL);
10744 }
10745
10746
10747 /* Return the size in bytes of a function argument of
10748 type TYPE and/or mode MODE. At least one of TYPE or
10749 MODE must be specified. */
10750
10751 static int
10752 s390_function_arg_size (machine_mode mode, const_tree type)
10753 {
10754 if (type)
10755 return int_size_in_bytes (type);
10756
10757 /* No type info available for some library calls ... */
10758 if (mode != BLKmode)
10759 return GET_MODE_SIZE (mode);
10760
10761 /* If we have neither type nor mode, abort */
10762 gcc_unreachable ();
10763 }
10764
10765 /* Return true if a function argument of type TYPE and mode MODE
10766 is to be passed in a vector register, if available. */
10767
10768 bool
10769 s390_function_arg_vector (machine_mode mode, const_tree type)
10770 {
10771 if (!TARGET_VX_ABI)
10772 return false;
10773
10774 if (s390_function_arg_size (mode, type) > 16)
10775 return false;
10776
10777 /* No type info available for some library calls ... */
10778 if (!type)
10779 return VECTOR_MODE_P (mode);
10780
10781 /* The ABI says that record types with a single member are treated
10782 just like that member would be. */
10783 while (TREE_CODE (type) == RECORD_TYPE)
10784 {
10785 tree field, single = NULL_TREE;
10786
10787 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
10788 {
10789 if (TREE_CODE (field) != FIELD_DECL)
10790 continue;
10791
10792 if (single == NULL_TREE)
10793 single = TREE_TYPE (field);
10794 else
10795 return false;
10796 }
10797
10798 if (single == NULL_TREE)
10799 return false;
10800 else
10801 {
10802 /* If the field declaration adds extra byte due to
10803 e.g. padding this is not accepted as vector type. */
10804 if (int_size_in_bytes (single) <= 0
10805 || int_size_in_bytes (single) != int_size_in_bytes (type))
10806 return false;
10807 type = single;
10808 }
10809 }
10810
10811 return VECTOR_TYPE_P (type);
10812 }
10813
10814 /* Return true if a function argument of type TYPE and mode MODE
10815 is to be passed in a floating-point register, if available. */
10816
10817 static bool
10818 s390_function_arg_float (machine_mode mode, const_tree type)
10819 {
10820 if (s390_function_arg_size (mode, type) > 8)
10821 return false;
10822
10823 /* Soft-float changes the ABI: no floating-point registers are used. */
10824 if (TARGET_SOFT_FLOAT)
10825 return false;
10826
10827 /* No type info available for some library calls ... */
10828 if (!type)
10829 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
10830
10831 /* The ABI says that record types with a single member are treated
10832 just like that member would be. */
10833 while (TREE_CODE (type) == RECORD_TYPE)
10834 {
10835 tree field, single = NULL_TREE;
10836
10837 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
10838 {
10839 if (TREE_CODE (field) != FIELD_DECL)
10840 continue;
10841
10842 if (single == NULL_TREE)
10843 single = TREE_TYPE (field);
10844 else
10845 return false;
10846 }
10847
10848 if (single == NULL_TREE)
10849 return false;
10850 else
10851 type = single;
10852 }
10853
10854 return TREE_CODE (type) == REAL_TYPE;
10855 }
10856
10857 /* Return true if a function argument of type TYPE and mode MODE
10858 is to be passed in an integer register, or a pair of integer
10859 registers, if available. */
10860
10861 static bool
10862 s390_function_arg_integer (machine_mode mode, const_tree type)
10863 {
10864 int size = s390_function_arg_size (mode, type);
10865 if (size > 8)
10866 return false;
10867
10868 /* No type info available for some library calls ... */
10869 if (!type)
10870 return GET_MODE_CLASS (mode) == MODE_INT
10871 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
10872
10873 /* We accept small integral (and similar) types. */
10874 if (INTEGRAL_TYPE_P (type)
10875 || POINTER_TYPE_P (type)
10876 || TREE_CODE (type) == NULLPTR_TYPE
10877 || TREE_CODE (type) == OFFSET_TYPE
10878 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
10879 return true;
10880
10881 /* We also accept structs of size 1, 2, 4, 8 that are not
10882 passed in floating-point registers. */
10883 if (AGGREGATE_TYPE_P (type)
10884 && exact_log2 (size) >= 0
10885 && !s390_function_arg_float (mode, type))
10886 return true;
10887
10888 return false;
10889 }
10890
10891 /* Return 1 if a function argument of type TYPE and mode MODE
10892 is to be passed by reference. The ABI specifies that only
10893 structures of size 1, 2, 4, or 8 bytes are passed by value,
10894 all other structures (and complex numbers) are passed by
10895 reference. */
10896
10897 static bool
10898 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
10899 machine_mode mode, const_tree type,
10900 bool named ATTRIBUTE_UNUSED)
10901 {
10902 int size = s390_function_arg_size (mode, type);
10903
10904 if (s390_function_arg_vector (mode, type))
10905 return false;
10906
10907 if (size > 8)
10908 return true;
10909
10910 if (type)
10911 {
10912 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
10913 return true;
10914
10915 if (TREE_CODE (type) == COMPLEX_TYPE
10916 || TREE_CODE (type) == VECTOR_TYPE)
10917 return true;
10918 }
10919
10920 return false;
10921 }
10922
10923 /* Update the data in CUM to advance over an argument of mode MODE and
10924 data type TYPE. (TYPE is null for libcalls where that information
10925 may not be available.). The boolean NAMED specifies whether the
10926 argument is a named argument (as opposed to an unnamed argument
10927 matching an ellipsis). */
10928
10929 static void
10930 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
10931 const_tree type, bool named)
10932 {
10933 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10934
10935 if (s390_function_arg_vector (mode, type))
10936 {
10937 /* We are called for unnamed vector stdarg arguments which are
10938 passed on the stack. In this case this hook does not have to
10939 do anything since stack arguments are tracked by common
10940 code. */
10941 if (!named)
10942 return;
10943 cum->vrs += 1;
10944 }
10945 else if (s390_function_arg_float (mode, type))
10946 {
10947 cum->fprs += 1;
10948 }
10949 else if (s390_function_arg_integer (mode, type))
10950 {
10951 int size = s390_function_arg_size (mode, type);
10952 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
10953 }
10954 else
10955 gcc_unreachable ();
10956 }
10957
10958 /* Define where to put the arguments to a function.
10959 Value is zero to push the argument on the stack,
10960 or a hard register in which to store the argument.
10961
10962 MODE is the argument's machine mode.
10963 TYPE is the data type of the argument (as a tree).
10964 This is null for libcalls where that information may
10965 not be available.
10966 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10967 the preceding args and about the function being called.
10968 NAMED is nonzero if this argument is a named parameter
10969 (otherwise it is an extra parameter matching an ellipsis).
10970
10971 On S/390, we use general purpose registers 2 through 6 to
10972 pass integer, pointer, and certain structure arguments, and
10973 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
10974 to pass floating point arguments. All remaining arguments
10975 are pushed to the stack. */
10976
10977 static rtx
10978 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
10979 const_tree type, bool named)
10980 {
10981 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10982
10983 if (!named)
10984 s390_check_type_for_vector_abi (type, true, false);
10985
10986 if (s390_function_arg_vector (mode, type))
10987 {
10988 /* Vector arguments being part of the ellipsis are passed on the
10989 stack. */
10990 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
10991 return NULL_RTX;
10992
10993 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
10994 }
10995 else if (s390_function_arg_float (mode, type))
10996 {
10997 if (cum->fprs + 1 > FP_ARG_NUM_REG)
10998 return NULL_RTX;
10999 else
11000 return gen_rtx_REG (mode, cum->fprs + 16);
11001 }
11002 else if (s390_function_arg_integer (mode, type))
11003 {
11004 int size = s390_function_arg_size (mode, type);
11005 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11006
11007 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11008 return NULL_RTX;
11009 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11010 return gen_rtx_REG (mode, cum->gprs + 2);
11011 else if (n_gprs == 2)
11012 {
11013 rtvec p = rtvec_alloc (2);
11014
11015 RTVEC_ELT (p, 0)
11016 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11017 const0_rtx);
11018 RTVEC_ELT (p, 1)
11019 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11020 GEN_INT (4));
11021
11022 return gen_rtx_PARALLEL (mode, p);
11023 }
11024 }
11025
11026 /* After the real arguments, expand_call calls us once again
11027 with a void_type_node type. Whatever we return here is
11028 passed as operand 2 to the call expanders.
11029
11030 We don't need this feature ... */
11031 else if (type == void_type_node)
11032 return const0_rtx;
11033
11034 gcc_unreachable ();
11035 }
11036
11037 /* Return true if return values of type TYPE should be returned
11038 in a memory buffer whose address is passed by the caller as
11039 hidden first argument. */
11040
11041 static bool
11042 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11043 {
11044 /* We accept small integral (and similar) types. */
11045 if (INTEGRAL_TYPE_P (type)
11046 || POINTER_TYPE_P (type)
11047 || TREE_CODE (type) == OFFSET_TYPE
11048 || TREE_CODE (type) == REAL_TYPE)
11049 return int_size_in_bytes (type) > 8;
11050
11051 /* vector types which fit into a VR. */
11052 if (TARGET_VX_ABI
11053 && VECTOR_TYPE_P (type)
11054 && int_size_in_bytes (type) <= 16)
11055 return false;
11056
11057 /* Aggregates and similar constructs are always returned
11058 in memory. */
11059 if (AGGREGATE_TYPE_P (type)
11060 || TREE_CODE (type) == COMPLEX_TYPE
11061 || VECTOR_TYPE_P (type))
11062 return true;
11063
11064 /* ??? We get called on all sorts of random stuff from
11065 aggregate_value_p. We can't abort, but it's not clear
11066 what's safe to return. Pretend it's a struct I guess. */
11067 return true;
11068 }
11069
11070 /* Function arguments and return values are promoted to word size. */
11071
11072 static machine_mode
11073 s390_promote_function_mode (const_tree type, machine_mode mode,
11074 int *punsignedp,
11075 const_tree fntype ATTRIBUTE_UNUSED,
11076 int for_return ATTRIBUTE_UNUSED)
11077 {
11078 if (INTEGRAL_MODE_P (mode)
11079 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11080 {
11081 if (type != NULL_TREE && POINTER_TYPE_P (type))
11082 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11083 return Pmode;
11084 }
11085
11086 return mode;
11087 }
11088
11089 /* Define where to return a (scalar) value of type RET_TYPE.
11090 If RET_TYPE is null, define where to return a (scalar)
11091 value of mode MODE from a libcall. */
11092
11093 static rtx
11094 s390_function_and_libcall_value (machine_mode mode,
11095 const_tree ret_type,
11096 const_tree fntype_or_decl,
11097 bool outgoing ATTRIBUTE_UNUSED)
11098 {
11099 /* For vector return types it is important to use the RET_TYPE
11100 argument whenever available since the middle-end might have
11101 changed the mode to a scalar mode. */
11102 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11103 || (!ret_type && VECTOR_MODE_P (mode)));
11104
11105 /* For normal functions perform the promotion as
11106 promote_function_mode would do. */
11107 if (ret_type)
11108 {
11109 int unsignedp = TYPE_UNSIGNED (ret_type);
11110 mode = promote_function_mode (ret_type, mode, &unsignedp,
11111 fntype_or_decl, 1);
11112 }
11113
11114 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11115 || SCALAR_FLOAT_MODE_P (mode)
11116 || (TARGET_VX_ABI && vector_ret_type_p));
11117 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11118
11119 if (TARGET_VX_ABI && vector_ret_type_p)
11120 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11121 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11122 return gen_rtx_REG (mode, 16);
11123 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11124 || UNITS_PER_LONG == UNITS_PER_WORD)
11125 return gen_rtx_REG (mode, 2);
11126 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11127 {
11128 /* This case is triggered when returning a 64 bit value with
11129 -m31 -mzarch. Although the value would fit into a single
11130 register it has to be forced into a 32 bit register pair in
11131 order to match the ABI. */
11132 rtvec p = rtvec_alloc (2);
11133
11134 RTVEC_ELT (p, 0)
11135 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11136 RTVEC_ELT (p, 1)
11137 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11138
11139 return gen_rtx_PARALLEL (mode, p);
11140 }
11141
11142 gcc_unreachable ();
11143 }
11144
11145 /* Define where to return a scalar return value of type RET_TYPE. */
11146
11147 static rtx
11148 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11149 bool outgoing)
11150 {
11151 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11152 fn_decl_or_type, outgoing);
11153 }
11154
11155 /* Define where to return a scalar libcall return value of mode
11156 MODE. */
11157
11158 static rtx
11159 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11160 {
11161 return s390_function_and_libcall_value (mode, NULL_TREE,
11162 NULL_TREE, true);
11163 }
11164
11165
11166 /* Create and return the va_list datatype.
11167
11168 On S/390, va_list is an array type equivalent to
11169
11170 typedef struct __va_list_tag
11171 {
11172 long __gpr;
11173 long __fpr;
11174 void *__overflow_arg_area;
11175 void *__reg_save_area;
11176 } va_list[1];
11177
11178 where __gpr and __fpr hold the number of general purpose
11179 or floating point arguments used up to now, respectively,
11180 __overflow_arg_area points to the stack location of the
11181 next argument passed on the stack, and __reg_save_area
11182 always points to the start of the register area in the
11183 call frame of the current function. The function prologue
11184 saves all registers used for argument passing into this
11185 area if the function uses variable arguments. */
11186
11187 static tree
11188 s390_build_builtin_va_list (void)
11189 {
11190 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11191
11192 record = lang_hooks.types.make_type (RECORD_TYPE);
11193
11194 type_decl =
11195 build_decl (BUILTINS_LOCATION,
11196 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11197
11198 f_gpr = build_decl (BUILTINS_LOCATION,
11199 FIELD_DECL, get_identifier ("__gpr"),
11200 long_integer_type_node);
11201 f_fpr = build_decl (BUILTINS_LOCATION,
11202 FIELD_DECL, get_identifier ("__fpr"),
11203 long_integer_type_node);
11204 f_ovf = build_decl (BUILTINS_LOCATION,
11205 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11206 ptr_type_node);
11207 f_sav = build_decl (BUILTINS_LOCATION,
11208 FIELD_DECL, get_identifier ("__reg_save_area"),
11209 ptr_type_node);
11210
11211 va_list_gpr_counter_field = f_gpr;
11212 va_list_fpr_counter_field = f_fpr;
11213
11214 DECL_FIELD_CONTEXT (f_gpr) = record;
11215 DECL_FIELD_CONTEXT (f_fpr) = record;
11216 DECL_FIELD_CONTEXT (f_ovf) = record;
11217 DECL_FIELD_CONTEXT (f_sav) = record;
11218
11219 TYPE_STUB_DECL (record) = type_decl;
11220 TYPE_NAME (record) = type_decl;
11221 TYPE_FIELDS (record) = f_gpr;
11222 DECL_CHAIN (f_gpr) = f_fpr;
11223 DECL_CHAIN (f_fpr) = f_ovf;
11224 DECL_CHAIN (f_ovf) = f_sav;
11225
11226 layout_type (record);
11227
11228 /* The correct type is an array type of one element. */
11229 return build_array_type (record, build_index_type (size_zero_node));
11230 }
11231
11232 /* Implement va_start by filling the va_list structure VALIST.
11233 STDARG_P is always true, and ignored.
11234 NEXTARG points to the first anonymous stack argument.
11235
11236 The following global variables are used to initialize
11237 the va_list structure:
11238
11239 crtl->args.info:
11240 holds number of gprs and fprs used for named arguments.
11241 crtl->args.arg_offset_rtx:
11242 holds the offset of the first anonymous stack argument
11243 (relative to the virtual arg pointer). */
11244
11245 static void
11246 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11247 {
11248 HOST_WIDE_INT n_gpr, n_fpr;
11249 int off;
11250 tree f_gpr, f_fpr, f_ovf, f_sav;
11251 tree gpr, fpr, ovf, sav, t;
11252
11253 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11254 f_fpr = DECL_CHAIN (f_gpr);
11255 f_ovf = DECL_CHAIN (f_fpr);
11256 f_sav = DECL_CHAIN (f_ovf);
11257
11258 valist = build_simple_mem_ref (valist);
11259 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11260 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11261 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11262 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11263
11264 /* Count number of gp and fp argument registers used. */
11265
11266 n_gpr = crtl->args.info.gprs;
11267 n_fpr = crtl->args.info.fprs;
11268
11269 if (cfun->va_list_gpr_size)
11270 {
11271 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11272 build_int_cst (NULL_TREE, n_gpr));
11273 TREE_SIDE_EFFECTS (t) = 1;
11274 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11275 }
11276
11277 if (cfun->va_list_fpr_size)
11278 {
11279 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11280 build_int_cst (NULL_TREE, n_fpr));
11281 TREE_SIDE_EFFECTS (t) = 1;
11282 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11283 }
11284
11285 /* Find the overflow area.
11286 FIXME: This currently is too pessimistic when the vector ABI is
11287 enabled. In that case we *always* set up the overflow area
11288 pointer. */
11289 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11290 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11291 || TARGET_VX_ABI)
11292 {
11293 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11294
11295 off = INTVAL (crtl->args.arg_offset_rtx);
11296 off = off < 0 ? 0 : off;
11297 if (TARGET_DEBUG_ARG)
11298 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11299 (int)n_gpr, (int)n_fpr, off);
11300
11301 t = fold_build_pointer_plus_hwi (t, off);
11302
11303 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11304 TREE_SIDE_EFFECTS (t) = 1;
11305 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11306 }
11307
11308 /* Find the register save area. */
11309 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11310 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11311 {
11312 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11313 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11314
11315 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11316 TREE_SIDE_EFFECTS (t) = 1;
11317 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11318 }
11319 }
11320
11321 /* Implement va_arg by updating the va_list structure
11322 VALIST as required to retrieve an argument of type
11323 TYPE, and returning that argument.
11324
11325 Generates code equivalent to:
11326
11327 if (integral value) {
11328 if (size <= 4 && args.gpr < 5 ||
11329 size > 4 && args.gpr < 4 )
11330 ret = args.reg_save_area[args.gpr+8]
11331 else
11332 ret = *args.overflow_arg_area++;
11333 } else if (vector value) {
11334 ret = *args.overflow_arg_area;
11335 args.overflow_arg_area += size / 8;
11336 } else if (float value) {
11337 if (args.fgpr < 2)
11338 ret = args.reg_save_area[args.fpr+64]
11339 else
11340 ret = *args.overflow_arg_area++;
11341 } else if (aggregate value) {
11342 if (args.gpr < 5)
11343 ret = *args.reg_save_area[args.gpr]
11344 else
11345 ret = **args.overflow_arg_area++;
11346 } */
11347
11348 static tree
11349 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11350 gimple_seq *post_p ATTRIBUTE_UNUSED)
11351 {
11352 tree f_gpr, f_fpr, f_ovf, f_sav;
11353 tree gpr, fpr, ovf, sav, reg, t, u;
11354 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11355 tree lab_false, lab_over;
11356 tree addr = create_tmp_var (ptr_type_node, "addr");
11357 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11358 a stack slot. */
11359
11360 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11361 f_fpr = DECL_CHAIN (f_gpr);
11362 f_ovf = DECL_CHAIN (f_fpr);
11363 f_sav = DECL_CHAIN (f_ovf);
11364
11365 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11366 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11367 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11368
11369 /* The tree for args* cannot be shared between gpr/fpr and ovf since
11370 both appear on a lhs. */
11371 valist = unshare_expr (valist);
11372 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11373
11374 size = int_size_in_bytes (type);
11375
11376 s390_check_type_for_vector_abi (type, true, false);
11377
11378 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11379 {
11380 if (TARGET_DEBUG_ARG)
11381 {
11382 fprintf (stderr, "va_arg: aggregate type");
11383 debug_tree (type);
11384 }
11385
11386 /* Aggregates are passed by reference. */
11387 indirect_p = 1;
11388 reg = gpr;
11389 n_reg = 1;
11390
11391 /* kernel stack layout on 31 bit: It is assumed here that no padding
11392 will be added by s390_frame_info because for va_args always an even
11393 number of gprs has to be saved r15-r2 = 14 regs. */
11394 sav_ofs = 2 * UNITS_PER_LONG;
11395 sav_scale = UNITS_PER_LONG;
11396 size = UNITS_PER_LONG;
11397 max_reg = GP_ARG_NUM_REG - n_reg;
11398 left_align_p = false;
11399 }
11400 else if (s390_function_arg_vector (TYPE_MODE (type), type))
11401 {
11402 if (TARGET_DEBUG_ARG)
11403 {
11404 fprintf (stderr, "va_arg: vector type");
11405 debug_tree (type);
11406 }
11407
11408 indirect_p = 0;
11409 reg = NULL_TREE;
11410 n_reg = 0;
11411 sav_ofs = 0;
11412 sav_scale = 8;
11413 max_reg = 0;
11414 left_align_p = true;
11415 }
11416 else if (s390_function_arg_float (TYPE_MODE (type), type))
11417 {
11418 if (TARGET_DEBUG_ARG)
11419 {
11420 fprintf (stderr, "va_arg: float type");
11421 debug_tree (type);
11422 }
11423
11424 /* FP args go in FP registers, if present. */
11425 indirect_p = 0;
11426 reg = fpr;
11427 n_reg = 1;
11428 sav_ofs = 16 * UNITS_PER_LONG;
11429 sav_scale = 8;
11430 max_reg = FP_ARG_NUM_REG - n_reg;
11431 left_align_p = false;
11432 }
11433 else
11434 {
11435 if (TARGET_DEBUG_ARG)
11436 {
11437 fprintf (stderr, "va_arg: other type");
11438 debug_tree (type);
11439 }
11440
11441 /* Otherwise into GP registers. */
11442 indirect_p = 0;
11443 reg = gpr;
11444 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11445
11446 /* kernel stack layout on 31 bit: It is assumed here that no padding
11447 will be added by s390_frame_info because for va_args always an even
11448 number of gprs has to be saved r15-r2 = 14 regs. */
11449 sav_ofs = 2 * UNITS_PER_LONG;
11450
11451 if (size < UNITS_PER_LONG)
11452 sav_ofs += UNITS_PER_LONG - size;
11453
11454 sav_scale = UNITS_PER_LONG;
11455 max_reg = GP_ARG_NUM_REG - n_reg;
11456 left_align_p = false;
11457 }
11458
11459 /* Pull the value out of the saved registers ... */
11460
11461 if (reg != NULL_TREE)
11462 {
11463 /*
11464 if (reg > ((typeof (reg))max_reg))
11465 goto lab_false;
11466
11467 addr = sav + sav_ofs + reg * save_scale;
11468
11469 goto lab_over;
11470
11471 lab_false:
11472 */
11473
11474 lab_false = create_artificial_label (UNKNOWN_LOCATION);
11475 lab_over = create_artificial_label (UNKNOWN_LOCATION);
11476
11477 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
11478 t = build2 (GT_EXPR, boolean_type_node, reg, t);
11479 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11480 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11481 gimplify_and_add (t, pre_p);
11482
11483 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11484 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
11485 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
11486 t = fold_build_pointer_plus (t, u);
11487
11488 gimplify_assign (addr, t, pre_p);
11489
11490 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11491
11492 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
11493 }
11494
11495 /* ... Otherwise out of the overflow area. */
11496
11497 t = ovf;
11498 if (size < UNITS_PER_LONG && !left_align_p)
11499 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
11500
11501 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11502
11503 gimplify_assign (addr, t, pre_p);
11504
11505 if (size < UNITS_PER_LONG && left_align_p)
11506 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
11507 else
11508 t = fold_build_pointer_plus_hwi (t, size);
11509
11510 gimplify_assign (ovf, t, pre_p);
11511
11512 if (reg != NULL_TREE)
11513 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
11514
11515
11516 /* Increment register save count. */
11517
11518 if (n_reg > 0)
11519 {
11520 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
11521 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
11522 gimplify_and_add (u, pre_p);
11523 }
11524
11525 if (indirect_p)
11526 {
11527 t = build_pointer_type_for_mode (build_pointer_type (type),
11528 ptr_mode, true);
11529 addr = fold_convert (t, addr);
11530 addr = build_va_arg_indirect_ref (addr);
11531 }
11532 else
11533 {
11534 t = build_pointer_type_for_mode (type, ptr_mode, true);
11535 addr = fold_convert (t, addr);
11536 }
11537
11538 return build_va_arg_indirect_ref (addr);
11539 }
11540
11541 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
11542 expanders.
11543 DEST - Register location where CC will be stored.
11544 TDB - Pointer to a 256 byte area where to store the transaction.
11545 diagnostic block. NULL if TDB is not needed.
11546 RETRY - Retry count value. If non-NULL a retry loop for CC2
11547 is emitted
11548 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
11549 of the tbegin instruction pattern. */
11550
11551 void
11552 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
11553 {
11554 rtx retry_plus_two = gen_reg_rtx (SImode);
11555 rtx retry_reg = gen_reg_rtx (SImode);
11556 rtx_code_label *retry_label = NULL;
11557
11558 if (retry != NULL_RTX)
11559 {
11560 emit_move_insn (retry_reg, retry);
11561 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
11562 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
11563 retry_label = gen_label_rtx ();
11564 emit_label (retry_label);
11565 }
11566
11567 if (clobber_fprs_p)
11568 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK), tdb));
11569 else
11570 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11571 tdb));
11572
11573 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
11574 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
11575 CC_REGNUM)),
11576 UNSPEC_CC_TO_INT));
11577 if (retry != NULL_RTX)
11578 {
11579 const int CC0 = 1 << 3;
11580 const int CC1 = 1 << 2;
11581 const int CC3 = 1 << 0;
11582 rtx jump;
11583 rtx count = gen_reg_rtx (SImode);
11584 rtx_code_label *leave_label = gen_label_rtx ();
11585
11586 /* Exit for success and permanent failures. */
11587 jump = s390_emit_jump (leave_label,
11588 gen_rtx_EQ (VOIDmode,
11589 gen_rtx_REG (CCRAWmode, CC_REGNUM),
11590 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
11591 LABEL_NUSES (leave_label) = 1;
11592
11593 /* CC2 - transient failure. Perform retry with ppa. */
11594 emit_move_insn (count, retry_plus_two);
11595 emit_insn (gen_subsi3 (count, count, retry_reg));
11596 emit_insn (gen_tx_assist (count));
11597 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
11598 retry_reg,
11599 retry_reg));
11600 JUMP_LABEL (jump) = retry_label;
11601 LABEL_NUSES (retry_label) = 1;
11602 emit_label (leave_label);
11603 }
11604 }
11605
11606
11607 /* Return the decl for the target specific builtin with the function
11608 code FCODE. */
11609
11610 static tree
11611 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
11612 {
11613 if (fcode >= S390_BUILTIN_MAX)
11614 return error_mark_node;
11615
11616 return s390_builtin_decls[fcode];
11617 }
11618
11619 /* We call mcount before the function prologue. So a profiled leaf
11620 function should stay a leaf function. */
11621
11622 static bool
11623 s390_keep_leaf_when_profiled ()
11624 {
11625 return true;
11626 }
11627
11628 /* Output assembly code for the trampoline template to
11629 stdio stream FILE.
11630
11631 On S/390, we use gpr 1 internally in the trampoline code;
11632 gpr 0 is used to hold the static chain. */
11633
11634 static void
11635 s390_asm_trampoline_template (FILE *file)
11636 {
11637 rtx op[2];
11638 op[0] = gen_rtx_REG (Pmode, 0);
11639 op[1] = gen_rtx_REG (Pmode, 1);
11640
11641 if (TARGET_64BIT)
11642 {
11643 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11644 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
11645 output_asm_insn ("br\t%1", op); /* 2 byte */
11646 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
11647 }
11648 else
11649 {
11650 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11651 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
11652 output_asm_insn ("br\t%1", op); /* 2 byte */
11653 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
11654 }
11655 }
11656
11657 /* Emit RTL insns to initialize the variable parts of a trampoline.
11658 FNADDR is an RTX for the address of the function's pure code.
11659 CXT is an RTX for the static chain value for the function. */
11660
11661 static void
11662 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
11663 {
11664 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
11665 rtx mem;
11666
11667 emit_block_move (m_tramp, assemble_trampoline_template (),
11668 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
11669
11670 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
11671 emit_move_insn (mem, cxt);
11672 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
11673 emit_move_insn (mem, fnaddr);
11674 }
11675
11676 /* Output assembler code to FILE to increment profiler label # LABELNO
11677 for profiling a function entry. */
11678
11679 void
11680 s390_function_profiler (FILE *file, int labelno)
11681 {
11682 rtx op[7];
11683
11684 char label[128];
11685 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
11686
11687 fprintf (file, "# function profiler \n");
11688
11689 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
11690 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
11691 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
11692
11693 op[2] = gen_rtx_REG (Pmode, 1);
11694 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
11695 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
11696
11697 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
11698 if (flag_pic)
11699 {
11700 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
11701 op[4] = gen_rtx_CONST (Pmode, op[4]);
11702 }
11703
11704 if (TARGET_64BIT)
11705 {
11706 output_asm_insn ("stg\t%0,%1", op);
11707 output_asm_insn ("larl\t%2,%3", op);
11708 output_asm_insn ("brasl\t%0,%4", op);
11709 output_asm_insn ("lg\t%0,%1", op);
11710 }
11711 else if (!flag_pic)
11712 {
11713 op[6] = gen_label_rtx ();
11714
11715 output_asm_insn ("st\t%0,%1", op);
11716 output_asm_insn ("bras\t%2,%l6", op);
11717 output_asm_insn (".long\t%4", op);
11718 output_asm_insn (".long\t%3", op);
11719 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11720 output_asm_insn ("l\t%0,0(%2)", op);
11721 output_asm_insn ("l\t%2,4(%2)", op);
11722 output_asm_insn ("basr\t%0,%0", op);
11723 output_asm_insn ("l\t%0,%1", op);
11724 }
11725 else
11726 {
11727 op[5] = gen_label_rtx ();
11728 op[6] = gen_label_rtx ();
11729
11730 output_asm_insn ("st\t%0,%1", op);
11731 output_asm_insn ("bras\t%2,%l6", op);
11732 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
11733 output_asm_insn (".long\t%4-%l5", op);
11734 output_asm_insn (".long\t%3-%l5", op);
11735 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11736 output_asm_insn ("lr\t%0,%2", op);
11737 output_asm_insn ("a\t%0,0(%2)", op);
11738 output_asm_insn ("a\t%2,4(%2)", op);
11739 output_asm_insn ("basr\t%0,%0", op);
11740 output_asm_insn ("l\t%0,%1", op);
11741 }
11742 }
11743
11744 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
11745 into its SYMBOL_REF_FLAGS. */
11746
11747 static void
11748 s390_encode_section_info (tree decl, rtx rtl, int first)
11749 {
11750 default_encode_section_info (decl, rtl, first);
11751
11752 if (TREE_CODE (decl) == VAR_DECL)
11753 {
11754 /* If a variable has a forced alignment to < 2 bytes, mark it
11755 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
11756 operand. */
11757 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
11758 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
11759 if (!DECL_SIZE (decl)
11760 || !DECL_ALIGN (decl)
11761 || !tree_fits_shwi_p (DECL_SIZE (decl))
11762 || (DECL_ALIGN (decl) <= 64
11763 && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl))))
11764 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
11765 }
11766
11767 /* Literal pool references don't have a decl so they are handled
11768 differently here. We rely on the information in the MEM_ALIGN
11769 entry to decide upon natural alignment. */
11770 if (MEM_P (rtl)
11771 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
11772 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
11773 && (MEM_ALIGN (rtl) == 0
11774 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
11775 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
11776 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
11777 }
11778
11779 /* Output thunk to FILE that implements a C++ virtual function call (with
11780 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
11781 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
11782 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
11783 relative to the resulting this pointer. */
11784
11785 static void
11786 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
11787 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11788 tree function)
11789 {
11790 rtx op[10];
11791 int nonlocal = 0;
11792
11793 /* Make sure unwind info is emitted for the thunk if needed. */
11794 final_start_function (emit_barrier (), file, 1);
11795
11796 /* Operand 0 is the target function. */
11797 op[0] = XEXP (DECL_RTL (function), 0);
11798 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
11799 {
11800 nonlocal = 1;
11801 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
11802 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
11803 op[0] = gen_rtx_CONST (Pmode, op[0]);
11804 }
11805
11806 /* Operand 1 is the 'this' pointer. */
11807 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11808 op[1] = gen_rtx_REG (Pmode, 3);
11809 else
11810 op[1] = gen_rtx_REG (Pmode, 2);
11811
11812 /* Operand 2 is the delta. */
11813 op[2] = GEN_INT (delta);
11814
11815 /* Operand 3 is the vcall_offset. */
11816 op[3] = GEN_INT (vcall_offset);
11817
11818 /* Operand 4 is the temporary register. */
11819 op[4] = gen_rtx_REG (Pmode, 1);
11820
11821 /* Operands 5 to 8 can be used as labels. */
11822 op[5] = NULL_RTX;
11823 op[6] = NULL_RTX;
11824 op[7] = NULL_RTX;
11825 op[8] = NULL_RTX;
11826
11827 /* Operand 9 can be used for temporary register. */
11828 op[9] = NULL_RTX;
11829
11830 /* Generate code. */
11831 if (TARGET_64BIT)
11832 {
11833 /* Setup literal pool pointer if required. */
11834 if ((!DISP_IN_RANGE (delta)
11835 && !CONST_OK_FOR_K (delta)
11836 && !CONST_OK_FOR_Os (delta))
11837 || (!DISP_IN_RANGE (vcall_offset)
11838 && !CONST_OK_FOR_K (vcall_offset)
11839 && !CONST_OK_FOR_Os (vcall_offset)))
11840 {
11841 op[5] = gen_label_rtx ();
11842 output_asm_insn ("larl\t%4,%5", op);
11843 }
11844
11845 /* Add DELTA to this pointer. */
11846 if (delta)
11847 {
11848 if (CONST_OK_FOR_J (delta))
11849 output_asm_insn ("la\t%1,%2(%1)", op);
11850 else if (DISP_IN_RANGE (delta))
11851 output_asm_insn ("lay\t%1,%2(%1)", op);
11852 else if (CONST_OK_FOR_K (delta))
11853 output_asm_insn ("aghi\t%1,%2", op);
11854 else if (CONST_OK_FOR_Os (delta))
11855 output_asm_insn ("agfi\t%1,%2", op);
11856 else
11857 {
11858 op[6] = gen_label_rtx ();
11859 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
11860 }
11861 }
11862
11863 /* Perform vcall adjustment. */
11864 if (vcall_offset)
11865 {
11866 if (DISP_IN_RANGE (vcall_offset))
11867 {
11868 output_asm_insn ("lg\t%4,0(%1)", op);
11869 output_asm_insn ("ag\t%1,%3(%4)", op);
11870 }
11871 else if (CONST_OK_FOR_K (vcall_offset))
11872 {
11873 output_asm_insn ("lghi\t%4,%3", op);
11874 output_asm_insn ("ag\t%4,0(%1)", op);
11875 output_asm_insn ("ag\t%1,0(%4)", op);
11876 }
11877 else if (CONST_OK_FOR_Os (vcall_offset))
11878 {
11879 output_asm_insn ("lgfi\t%4,%3", op);
11880 output_asm_insn ("ag\t%4,0(%1)", op);
11881 output_asm_insn ("ag\t%1,0(%4)", op);
11882 }
11883 else
11884 {
11885 op[7] = gen_label_rtx ();
11886 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
11887 output_asm_insn ("ag\t%4,0(%1)", op);
11888 output_asm_insn ("ag\t%1,0(%4)", op);
11889 }
11890 }
11891
11892 /* Jump to target. */
11893 output_asm_insn ("jg\t%0", op);
11894
11895 /* Output literal pool if required. */
11896 if (op[5])
11897 {
11898 output_asm_insn (".align\t4", op);
11899 targetm.asm_out.internal_label (file, "L",
11900 CODE_LABEL_NUMBER (op[5]));
11901 }
11902 if (op[6])
11903 {
11904 targetm.asm_out.internal_label (file, "L",
11905 CODE_LABEL_NUMBER (op[6]));
11906 output_asm_insn (".long\t%2", op);
11907 }
11908 if (op[7])
11909 {
11910 targetm.asm_out.internal_label (file, "L",
11911 CODE_LABEL_NUMBER (op[7]));
11912 output_asm_insn (".long\t%3", op);
11913 }
11914 }
11915 else
11916 {
11917 /* Setup base pointer if required. */
11918 if (!vcall_offset
11919 || (!DISP_IN_RANGE (delta)
11920 && !CONST_OK_FOR_K (delta)
11921 && !CONST_OK_FOR_Os (delta))
11922 || (!DISP_IN_RANGE (delta)
11923 && !CONST_OK_FOR_K (vcall_offset)
11924 && !CONST_OK_FOR_Os (vcall_offset)))
11925 {
11926 op[5] = gen_label_rtx ();
11927 output_asm_insn ("basr\t%4,0", op);
11928 targetm.asm_out.internal_label (file, "L",
11929 CODE_LABEL_NUMBER (op[5]));
11930 }
11931
11932 /* Add DELTA to this pointer. */
11933 if (delta)
11934 {
11935 if (CONST_OK_FOR_J (delta))
11936 output_asm_insn ("la\t%1,%2(%1)", op);
11937 else if (DISP_IN_RANGE (delta))
11938 output_asm_insn ("lay\t%1,%2(%1)", op);
11939 else if (CONST_OK_FOR_K (delta))
11940 output_asm_insn ("ahi\t%1,%2", op);
11941 else if (CONST_OK_FOR_Os (delta))
11942 output_asm_insn ("afi\t%1,%2", op);
11943 else
11944 {
11945 op[6] = gen_label_rtx ();
11946 output_asm_insn ("a\t%1,%6-%5(%4)", op);
11947 }
11948 }
11949
11950 /* Perform vcall adjustment. */
11951 if (vcall_offset)
11952 {
11953 if (CONST_OK_FOR_J (vcall_offset))
11954 {
11955 output_asm_insn ("l\t%4,0(%1)", op);
11956 output_asm_insn ("a\t%1,%3(%4)", op);
11957 }
11958 else if (DISP_IN_RANGE (vcall_offset))
11959 {
11960 output_asm_insn ("l\t%4,0(%1)", op);
11961 output_asm_insn ("ay\t%1,%3(%4)", op);
11962 }
11963 else if (CONST_OK_FOR_K (vcall_offset))
11964 {
11965 output_asm_insn ("lhi\t%4,%3", op);
11966 output_asm_insn ("a\t%4,0(%1)", op);
11967 output_asm_insn ("a\t%1,0(%4)", op);
11968 }
11969 else if (CONST_OK_FOR_Os (vcall_offset))
11970 {
11971 output_asm_insn ("iilf\t%4,%3", op);
11972 output_asm_insn ("a\t%4,0(%1)", op);
11973 output_asm_insn ("a\t%1,0(%4)", op);
11974 }
11975 else
11976 {
11977 op[7] = gen_label_rtx ();
11978 output_asm_insn ("l\t%4,%7-%5(%4)", op);
11979 output_asm_insn ("a\t%4,0(%1)", op);
11980 output_asm_insn ("a\t%1,0(%4)", op);
11981 }
11982
11983 /* We had to clobber the base pointer register.
11984 Re-setup the base pointer (with a different base). */
11985 op[5] = gen_label_rtx ();
11986 output_asm_insn ("basr\t%4,0", op);
11987 targetm.asm_out.internal_label (file, "L",
11988 CODE_LABEL_NUMBER (op[5]));
11989 }
11990
11991 /* Jump to target. */
11992 op[8] = gen_label_rtx ();
11993
11994 if (!flag_pic)
11995 output_asm_insn ("l\t%4,%8-%5(%4)", op);
11996 else if (!nonlocal)
11997 output_asm_insn ("a\t%4,%8-%5(%4)", op);
11998 /* We cannot call through .plt, since .plt requires %r12 loaded. */
11999 else if (flag_pic == 1)
12000 {
12001 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12002 output_asm_insn ("l\t%4,%0(%4)", op);
12003 }
12004 else if (flag_pic == 2)
12005 {
12006 op[9] = gen_rtx_REG (Pmode, 0);
12007 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12008 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12009 output_asm_insn ("ar\t%4,%9", op);
12010 output_asm_insn ("l\t%4,0(%4)", op);
12011 }
12012
12013 output_asm_insn ("br\t%4", op);
12014
12015 /* Output literal pool. */
12016 output_asm_insn (".align\t4", op);
12017
12018 if (nonlocal && flag_pic == 2)
12019 output_asm_insn (".long\t%0", op);
12020 if (nonlocal)
12021 {
12022 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12023 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12024 }
12025
12026 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12027 if (!flag_pic)
12028 output_asm_insn (".long\t%0", op);
12029 else
12030 output_asm_insn (".long\t%0-%5", op);
12031
12032 if (op[6])
12033 {
12034 targetm.asm_out.internal_label (file, "L",
12035 CODE_LABEL_NUMBER (op[6]));
12036 output_asm_insn (".long\t%2", op);
12037 }
12038 if (op[7])
12039 {
12040 targetm.asm_out.internal_label (file, "L",
12041 CODE_LABEL_NUMBER (op[7]));
12042 output_asm_insn (".long\t%3", op);
12043 }
12044 }
12045 final_end_function ();
12046 }
12047
12048 static bool
12049 s390_valid_pointer_mode (machine_mode mode)
12050 {
12051 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12052 }
12053
12054 /* Checks whether the given CALL_EXPR would use a caller
12055 saved register. This is used to decide whether sibling call
12056 optimization could be performed on the respective function
12057 call. */
12058
12059 static bool
12060 s390_call_saved_register_used (tree call_expr)
12061 {
12062 CUMULATIVE_ARGS cum_v;
12063 cumulative_args_t cum;
12064 tree parameter;
12065 machine_mode mode;
12066 tree type;
12067 rtx parm_rtx;
12068 int reg, i;
12069
12070 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12071 cum = pack_cumulative_args (&cum_v);
12072
12073 for (i = 0; i < call_expr_nargs (call_expr); i++)
12074 {
12075 parameter = CALL_EXPR_ARG (call_expr, i);
12076 gcc_assert (parameter);
12077
12078 /* For an undeclared variable passed as parameter we will get
12079 an ERROR_MARK node here. */
12080 if (TREE_CODE (parameter) == ERROR_MARK)
12081 return true;
12082
12083 type = TREE_TYPE (parameter);
12084 gcc_assert (type);
12085
12086 mode = TYPE_MODE (type);
12087 gcc_assert (mode);
12088
12089 /* We assume that in the target function all parameters are
12090 named. This only has an impact on vector argument register
12091 usage none of which is call-saved. */
12092 if (pass_by_reference (&cum_v, mode, type, true))
12093 {
12094 mode = Pmode;
12095 type = build_pointer_type (type);
12096 }
12097
12098 parm_rtx = s390_function_arg (cum, mode, type, true);
12099
12100 s390_function_arg_advance (cum, mode, type, true);
12101
12102 if (!parm_rtx)
12103 continue;
12104
12105 if (REG_P (parm_rtx))
12106 {
12107 for (reg = 0;
12108 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12109 reg++)
12110 if (!call_used_regs[reg + REGNO (parm_rtx)])
12111 return true;
12112 }
12113
12114 if (GET_CODE (parm_rtx) == PARALLEL)
12115 {
12116 int i;
12117
12118 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12119 {
12120 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12121
12122 gcc_assert (REG_P (r));
12123
12124 for (reg = 0;
12125 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12126 reg++)
12127 if (!call_used_regs[reg + REGNO (r)])
12128 return true;
12129 }
12130 }
12131
12132 }
12133 return false;
12134 }
12135
12136 /* Return true if the given call expression can be
12137 turned into a sibling call.
12138 DECL holds the declaration of the function to be called whereas
12139 EXP is the call expression itself. */
12140
12141 static bool
12142 s390_function_ok_for_sibcall (tree decl, tree exp)
12143 {
12144 /* The TPF epilogue uses register 1. */
12145 if (TARGET_TPF_PROFILING)
12146 return false;
12147
12148 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12149 which would have to be restored before the sibcall. */
12150 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12151 return false;
12152
12153 /* Register 6 on s390 is available as an argument register but unfortunately
12154 "caller saved". This makes functions needing this register for arguments
12155 not suitable for sibcalls. */
12156 return !s390_call_saved_register_used (exp);
12157 }
12158
12159 /* Return the fixed registers used for condition codes. */
12160
12161 static bool
12162 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12163 {
12164 *p1 = CC_REGNUM;
12165 *p2 = INVALID_REGNUM;
12166
12167 return true;
12168 }
12169
12170 /* This function is used by the call expanders of the machine description.
12171 It emits the call insn itself together with the necessary operations
12172 to adjust the target address and returns the emitted insn.
12173 ADDR_LOCATION is the target address rtx
12174 TLS_CALL the location of the thread-local symbol
12175 RESULT_REG the register where the result of the call should be stored
12176 RETADDR_REG the register where the return address should be stored
12177 If this parameter is NULL_RTX the call is considered
12178 to be a sibling call. */
12179
12180 rtx_insn *
12181 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12182 rtx retaddr_reg)
12183 {
12184 bool plt_call = false;
12185 rtx_insn *insn;
12186 rtx call;
12187 rtx clobber;
12188 rtvec vec;
12189
12190 /* Direct function calls need special treatment. */
12191 if (GET_CODE (addr_location) == SYMBOL_REF)
12192 {
12193 /* When calling a global routine in PIC mode, we must
12194 replace the symbol itself with the PLT stub. */
12195 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12196 {
12197 if (retaddr_reg != NULL_RTX)
12198 {
12199 addr_location = gen_rtx_UNSPEC (Pmode,
12200 gen_rtvec (1, addr_location),
12201 UNSPEC_PLT);
12202 addr_location = gen_rtx_CONST (Pmode, addr_location);
12203 plt_call = true;
12204 }
12205 else
12206 /* For -fpic code the PLT entries might use r12 which is
12207 call-saved. Therefore we cannot do a sibcall when
12208 calling directly using a symbol ref. When reaching
12209 this point we decided (in s390_function_ok_for_sibcall)
12210 to do a sibcall for a function pointer but one of the
12211 optimizers was able to get rid of the function pointer
12212 by propagating the symbol ref into the call. This
12213 optimization is illegal for S/390 so we turn the direct
12214 call into a indirect call again. */
12215 addr_location = force_reg (Pmode, addr_location);
12216 }
12217
12218 /* Unless we can use the bras(l) insn, force the
12219 routine address into a register. */
12220 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12221 {
12222 if (flag_pic)
12223 addr_location = legitimize_pic_address (addr_location, 0);
12224 else
12225 addr_location = force_reg (Pmode, addr_location);
12226 }
12227 }
12228
12229 /* If it is already an indirect call or the code above moved the
12230 SYMBOL_REF to somewhere else make sure the address can be found in
12231 register 1. */
12232 if (retaddr_reg == NULL_RTX
12233 && GET_CODE (addr_location) != SYMBOL_REF
12234 && !plt_call)
12235 {
12236 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12237 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12238 }
12239
12240 addr_location = gen_rtx_MEM (QImode, addr_location);
12241 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12242
12243 if (result_reg != NULL_RTX)
12244 call = gen_rtx_SET (result_reg, call);
12245
12246 if (retaddr_reg != NULL_RTX)
12247 {
12248 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12249
12250 if (tls_call != NULL_RTX)
12251 vec = gen_rtvec (3, call, clobber,
12252 gen_rtx_USE (VOIDmode, tls_call));
12253 else
12254 vec = gen_rtvec (2, call, clobber);
12255
12256 call = gen_rtx_PARALLEL (VOIDmode, vec);
12257 }
12258
12259 insn = emit_call_insn (call);
12260
12261 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12262 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12263 {
12264 /* s390_function_ok_for_sibcall should
12265 have denied sibcalls in this case. */
12266 gcc_assert (retaddr_reg != NULL_RTX);
12267 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12268 }
12269 return insn;
12270 }
12271
12272 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12273
12274 static void
12275 s390_conditional_register_usage (void)
12276 {
12277 int i;
12278
12279 if (flag_pic)
12280 {
12281 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12282 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12283 }
12284 if (TARGET_CPU_ZARCH)
12285 {
12286 fixed_regs[BASE_REGNUM] = 0;
12287 call_used_regs[BASE_REGNUM] = 0;
12288 fixed_regs[RETURN_REGNUM] = 0;
12289 call_used_regs[RETURN_REGNUM] = 0;
12290 }
12291 if (TARGET_64BIT)
12292 {
12293 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12294 call_used_regs[i] = call_really_used_regs[i] = 0;
12295 }
12296 else
12297 {
12298 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12299 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12300 }
12301
12302 if (TARGET_SOFT_FLOAT)
12303 {
12304 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12305 call_used_regs[i] = fixed_regs[i] = 1;
12306 }
12307
12308 /* Disable v16 - v31 for non-vector target. */
12309 if (!TARGET_VX)
12310 {
12311 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12312 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12313 }
12314 }
12315
12316 /* Corresponding function to eh_return expander. */
12317
12318 static GTY(()) rtx s390_tpf_eh_return_symbol;
12319 void
12320 s390_emit_tpf_eh_return (rtx target)
12321 {
12322 rtx_insn *insn;
12323 rtx reg, orig_ra;
12324
12325 if (!s390_tpf_eh_return_symbol)
12326 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12327
12328 reg = gen_rtx_REG (Pmode, 2);
12329 orig_ra = gen_rtx_REG (Pmode, 3);
12330
12331 emit_move_insn (reg, target);
12332 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12333 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12334 gen_rtx_REG (Pmode, RETURN_REGNUM));
12335 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12336 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12337
12338 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
12339 }
12340
12341 /* Rework the prologue/epilogue to avoid saving/restoring
12342 registers unnecessarily. */
12343
12344 static void
12345 s390_optimize_prologue (void)
12346 {
12347 rtx_insn *insn, *new_insn, *next_insn;
12348
12349 /* Do a final recompute of the frame-related data. */
12350 s390_optimize_register_info ();
12351
12352 /* If all special registers are in fact used, there's nothing we
12353 can do, so no point in walking the insn list. */
12354
12355 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
12356 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
12357 && (TARGET_CPU_ZARCH
12358 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
12359 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
12360 return;
12361
12362 /* Search for prologue/epilogue insns and replace them. */
12363
12364 for (insn = get_insns (); insn; insn = next_insn)
12365 {
12366 int first, last, off;
12367 rtx set, base, offset;
12368 rtx pat;
12369
12370 next_insn = NEXT_INSN (insn);
12371
12372 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
12373 continue;
12374
12375 pat = PATTERN (insn);
12376
12377 /* Remove ldgr/lgdr instructions used for saving and restore
12378 GPRs if possible. */
12379 if (TARGET_Z10
12380 && GET_CODE (pat) == SET
12381 && GET_MODE (SET_SRC (pat)) == DImode
12382 && REG_P (SET_SRC (pat))
12383 && REG_P (SET_DEST (pat)))
12384 {
12385 int src_regno = REGNO (SET_SRC (pat));
12386 int dest_regno = REGNO (SET_DEST (pat));
12387 int gpr_regno;
12388 int fpr_regno;
12389
12390 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
12391 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
12392 continue;
12393
12394 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
12395 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
12396
12397 /* GPR must be call-saved, FPR must be call-clobbered. */
12398 if (!call_really_used_regs[fpr_regno]
12399 || call_really_used_regs[gpr_regno])
12400 continue;
12401
12402 /* It must not happen that what we once saved in an FPR now
12403 needs a stack slot. */
12404 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
12405
12406 if (cfun_gpr_save_slot (gpr_regno) == 0)
12407 {
12408 remove_insn (insn);
12409 continue;
12410 }
12411 }
12412
12413 if (GET_CODE (pat) == PARALLEL
12414 && store_multiple_operation (pat, VOIDmode))
12415 {
12416 set = XVECEXP (pat, 0, 0);
12417 first = REGNO (SET_SRC (set));
12418 last = first + XVECLEN (pat, 0) - 1;
12419 offset = const0_rtx;
12420 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12421 off = INTVAL (offset);
12422
12423 if (GET_CODE (base) != REG || off < 0)
12424 continue;
12425 if (cfun_frame_layout.first_save_gpr != -1
12426 && (cfun_frame_layout.first_save_gpr < first
12427 || cfun_frame_layout.last_save_gpr > last))
12428 continue;
12429 if (REGNO (base) != STACK_POINTER_REGNUM
12430 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12431 continue;
12432 if (first > BASE_REGNUM || last < BASE_REGNUM)
12433 continue;
12434
12435 if (cfun_frame_layout.first_save_gpr != -1)
12436 {
12437 rtx s_pat = save_gprs (base,
12438 off + (cfun_frame_layout.first_save_gpr
12439 - first) * UNITS_PER_LONG,
12440 cfun_frame_layout.first_save_gpr,
12441 cfun_frame_layout.last_save_gpr);
12442 new_insn = emit_insn_before (s_pat, insn);
12443 INSN_ADDRESSES_NEW (new_insn, -1);
12444 }
12445
12446 remove_insn (insn);
12447 continue;
12448 }
12449
12450 if (cfun_frame_layout.first_save_gpr == -1
12451 && GET_CODE (pat) == SET
12452 && GENERAL_REG_P (SET_SRC (pat))
12453 && GET_CODE (SET_DEST (pat)) == MEM)
12454 {
12455 set = pat;
12456 first = REGNO (SET_SRC (set));
12457 offset = const0_rtx;
12458 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12459 off = INTVAL (offset);
12460
12461 if (GET_CODE (base) != REG || off < 0)
12462 continue;
12463 if (REGNO (base) != STACK_POINTER_REGNUM
12464 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12465 continue;
12466
12467 remove_insn (insn);
12468 continue;
12469 }
12470
12471 if (GET_CODE (pat) == PARALLEL
12472 && load_multiple_operation (pat, VOIDmode))
12473 {
12474 set = XVECEXP (pat, 0, 0);
12475 first = REGNO (SET_DEST (set));
12476 last = first + XVECLEN (pat, 0) - 1;
12477 offset = const0_rtx;
12478 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12479 off = INTVAL (offset);
12480
12481 if (GET_CODE (base) != REG || off < 0)
12482 continue;
12483
12484 if (cfun_frame_layout.first_restore_gpr != -1
12485 && (cfun_frame_layout.first_restore_gpr < first
12486 || cfun_frame_layout.last_restore_gpr > last))
12487 continue;
12488 if (REGNO (base) != STACK_POINTER_REGNUM
12489 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12490 continue;
12491 if (first > BASE_REGNUM || last < BASE_REGNUM)
12492 continue;
12493
12494 if (cfun_frame_layout.first_restore_gpr != -1)
12495 {
12496 rtx rpat = restore_gprs (base,
12497 off + (cfun_frame_layout.first_restore_gpr
12498 - first) * UNITS_PER_LONG,
12499 cfun_frame_layout.first_restore_gpr,
12500 cfun_frame_layout.last_restore_gpr);
12501
12502 /* Remove REG_CFA_RESTOREs for registers that we no
12503 longer need to save. */
12504 REG_NOTES (rpat) = REG_NOTES (insn);
12505 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
12506 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
12507 && ((int) REGNO (XEXP (*ptr, 0))
12508 < cfun_frame_layout.first_restore_gpr))
12509 *ptr = XEXP (*ptr, 1);
12510 else
12511 ptr = &XEXP (*ptr, 1);
12512 new_insn = emit_insn_before (rpat, insn);
12513 RTX_FRAME_RELATED_P (new_insn) = 1;
12514 INSN_ADDRESSES_NEW (new_insn, -1);
12515 }
12516
12517 remove_insn (insn);
12518 continue;
12519 }
12520
12521 if (cfun_frame_layout.first_restore_gpr == -1
12522 && GET_CODE (pat) == SET
12523 && GENERAL_REG_P (SET_DEST (pat))
12524 && GET_CODE (SET_SRC (pat)) == MEM)
12525 {
12526 set = pat;
12527 first = REGNO (SET_DEST (set));
12528 offset = const0_rtx;
12529 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12530 off = INTVAL (offset);
12531
12532 if (GET_CODE (base) != REG || off < 0)
12533 continue;
12534
12535 if (REGNO (base) != STACK_POINTER_REGNUM
12536 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12537 continue;
12538
12539 remove_insn (insn);
12540 continue;
12541 }
12542 }
12543 }
12544
12545 /* On z10 and later the dynamic branch prediction must see the
12546 backward jump within a certain windows. If not it falls back to
12547 the static prediction. This function rearranges the loop backward
12548 branch in a way which makes the static prediction always correct.
12549 The function returns true if it added an instruction. */
12550 static bool
12551 s390_fix_long_loop_prediction (rtx_insn *insn)
12552 {
12553 rtx set = single_set (insn);
12554 rtx code_label, label_ref, new_label;
12555 rtx_insn *uncond_jump;
12556 rtx_insn *cur_insn;
12557 rtx tmp;
12558 int distance;
12559
12560 /* This will exclude branch on count and branch on index patterns
12561 since these are correctly statically predicted. */
12562 if (!set
12563 || SET_DEST (set) != pc_rtx
12564 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
12565 return false;
12566
12567 /* Skip conditional returns. */
12568 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
12569 && XEXP (SET_SRC (set), 2) == pc_rtx)
12570 return false;
12571
12572 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
12573 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
12574
12575 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
12576
12577 code_label = XEXP (label_ref, 0);
12578
12579 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
12580 || INSN_ADDRESSES (INSN_UID (insn)) == -1
12581 || (INSN_ADDRESSES (INSN_UID (insn))
12582 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
12583 return false;
12584
12585 for (distance = 0, cur_insn = PREV_INSN (insn);
12586 distance < PREDICT_DISTANCE - 6;
12587 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
12588 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
12589 return false;
12590
12591 new_label = gen_label_rtx ();
12592 uncond_jump = emit_jump_insn_after (
12593 gen_rtx_SET (pc_rtx,
12594 gen_rtx_LABEL_REF (VOIDmode, code_label)),
12595 insn);
12596 emit_label_after (new_label, uncond_jump);
12597
12598 tmp = XEXP (SET_SRC (set), 1);
12599 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
12600 XEXP (SET_SRC (set), 2) = tmp;
12601 INSN_CODE (insn) = -1;
12602
12603 XEXP (label_ref, 0) = new_label;
12604 JUMP_LABEL (insn) = new_label;
12605 JUMP_LABEL (uncond_jump) = code_label;
12606
12607 return true;
12608 }
12609
12610 /* Returns 1 if INSN reads the value of REG for purposes not related
12611 to addressing of memory, and 0 otherwise. */
12612 static int
12613 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
12614 {
12615 return reg_referenced_p (reg, PATTERN (insn))
12616 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
12617 }
12618
12619 /* Starting from INSN find_cond_jump looks downwards in the insn
12620 stream for a single jump insn which is the last user of the
12621 condition code set in INSN. */
12622 static rtx_insn *
12623 find_cond_jump (rtx_insn *insn)
12624 {
12625 for (; insn; insn = NEXT_INSN (insn))
12626 {
12627 rtx ite, cc;
12628
12629 if (LABEL_P (insn))
12630 break;
12631
12632 if (!JUMP_P (insn))
12633 {
12634 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
12635 break;
12636 continue;
12637 }
12638
12639 /* This will be triggered by a return. */
12640 if (GET_CODE (PATTERN (insn)) != SET)
12641 break;
12642
12643 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
12644 ite = SET_SRC (PATTERN (insn));
12645
12646 if (GET_CODE (ite) != IF_THEN_ELSE)
12647 break;
12648
12649 cc = XEXP (XEXP (ite, 0), 0);
12650 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
12651 break;
12652
12653 if (find_reg_note (insn, REG_DEAD, cc))
12654 return insn;
12655 break;
12656 }
12657
12658 return NULL;
12659 }
12660
12661 /* Swap the condition in COND and the operands in OP0 and OP1 so that
12662 the semantics does not change. If NULL_RTX is passed as COND the
12663 function tries to find the conditional jump starting with INSN. */
12664 static void
12665 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
12666 {
12667 rtx tmp = *op0;
12668
12669 if (cond == NULL_RTX)
12670 {
12671 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
12672 rtx set = jump ? single_set (jump) : NULL_RTX;
12673
12674 if (set == NULL_RTX)
12675 return;
12676
12677 cond = XEXP (SET_SRC (set), 0);
12678 }
12679
12680 *op0 = *op1;
12681 *op1 = tmp;
12682 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
12683 }
12684
12685 /* On z10, instructions of the compare-and-branch family have the
12686 property to access the register occurring as second operand with
12687 its bits complemented. If such a compare is grouped with a second
12688 instruction that accesses the same register non-complemented, and
12689 if that register's value is delivered via a bypass, then the
12690 pipeline recycles, thereby causing significant performance decline.
12691 This function locates such situations and exchanges the two
12692 operands of the compare. The function return true whenever it
12693 added an insn. */
12694 static bool
12695 s390_z10_optimize_cmp (rtx_insn *insn)
12696 {
12697 rtx_insn *prev_insn, *next_insn;
12698 bool insn_added_p = false;
12699 rtx cond, *op0, *op1;
12700
12701 if (GET_CODE (PATTERN (insn)) == PARALLEL)
12702 {
12703 /* Handle compare and branch and branch on count
12704 instructions. */
12705 rtx pattern = single_set (insn);
12706
12707 if (!pattern
12708 || SET_DEST (pattern) != pc_rtx
12709 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
12710 return false;
12711
12712 cond = XEXP (SET_SRC (pattern), 0);
12713 op0 = &XEXP (cond, 0);
12714 op1 = &XEXP (cond, 1);
12715 }
12716 else if (GET_CODE (PATTERN (insn)) == SET)
12717 {
12718 rtx src, dest;
12719
12720 /* Handle normal compare instructions. */
12721 src = SET_SRC (PATTERN (insn));
12722 dest = SET_DEST (PATTERN (insn));
12723
12724 if (!REG_P (dest)
12725 || !CC_REGNO_P (REGNO (dest))
12726 || GET_CODE (src) != COMPARE)
12727 return false;
12728
12729 /* s390_swap_cmp will try to find the conditional
12730 jump when passing NULL_RTX as condition. */
12731 cond = NULL_RTX;
12732 op0 = &XEXP (src, 0);
12733 op1 = &XEXP (src, 1);
12734 }
12735 else
12736 return false;
12737
12738 if (!REG_P (*op0) || !REG_P (*op1))
12739 return false;
12740
12741 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
12742 return false;
12743
12744 /* Swap the COMPARE arguments and its mask if there is a
12745 conflicting access in the previous insn. */
12746 prev_insn = prev_active_insn (insn);
12747 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
12748 && reg_referenced_p (*op1, PATTERN (prev_insn)))
12749 s390_swap_cmp (cond, op0, op1, insn);
12750
12751 /* Check if there is a conflict with the next insn. If there
12752 was no conflict with the previous insn, then swap the
12753 COMPARE arguments and its mask. If we already swapped
12754 the operands, or if swapping them would cause a conflict
12755 with the previous insn, issue a NOP after the COMPARE in
12756 order to separate the two instuctions. */
12757 next_insn = next_active_insn (insn);
12758 if (next_insn != NULL_RTX && INSN_P (next_insn)
12759 && s390_non_addr_reg_read_p (*op1, next_insn))
12760 {
12761 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
12762 && s390_non_addr_reg_read_p (*op0, prev_insn))
12763 {
12764 if (REGNO (*op1) == 0)
12765 emit_insn_after (gen_nop1 (), insn);
12766 else
12767 emit_insn_after (gen_nop (), insn);
12768 insn_added_p = true;
12769 }
12770 else
12771 s390_swap_cmp (cond, op0, op1, insn);
12772 }
12773 return insn_added_p;
12774 }
12775
12776 /* Perform machine-dependent processing. */
12777
12778 static void
12779 s390_reorg (void)
12780 {
12781 bool pool_overflow = false;
12782 int hw_before, hw_after;
12783
12784 /* Make sure all splits have been performed; splits after
12785 machine_dependent_reorg might confuse insn length counts. */
12786 split_all_insns_noflow ();
12787
12788 /* Install the main literal pool and the associated base
12789 register load insns.
12790
12791 In addition, there are two problematic situations we need
12792 to correct:
12793
12794 - the literal pool might be > 4096 bytes in size, so that
12795 some of its elements cannot be directly accessed
12796
12797 - a branch target might be > 64K away from the branch, so that
12798 it is not possible to use a PC-relative instruction.
12799
12800 To fix those, we split the single literal pool into multiple
12801 pool chunks, reloading the pool base register at various
12802 points throughout the function to ensure it always points to
12803 the pool chunk the following code expects, and / or replace
12804 PC-relative branches by absolute branches.
12805
12806 However, the two problems are interdependent: splitting the
12807 literal pool can move a branch further away from its target,
12808 causing the 64K limit to overflow, and on the other hand,
12809 replacing a PC-relative branch by an absolute branch means
12810 we need to put the branch target address into the literal
12811 pool, possibly causing it to overflow.
12812
12813 So, we loop trying to fix up both problems until we manage
12814 to satisfy both conditions at the same time. Note that the
12815 loop is guaranteed to terminate as every pass of the loop
12816 strictly decreases the total number of PC-relative branches
12817 in the function. (This is not completely true as there
12818 might be branch-over-pool insns introduced by chunkify_start.
12819 Those never need to be split however.) */
12820
12821 for (;;)
12822 {
12823 struct constant_pool *pool = NULL;
12824
12825 /* Collect the literal pool. */
12826 if (!pool_overflow)
12827 {
12828 pool = s390_mainpool_start ();
12829 if (!pool)
12830 pool_overflow = true;
12831 }
12832
12833 /* If literal pool overflowed, start to chunkify it. */
12834 if (pool_overflow)
12835 pool = s390_chunkify_start ();
12836
12837 /* Split out-of-range branches. If this has created new
12838 literal pool entries, cancel current chunk list and
12839 recompute it. zSeries machines have large branch
12840 instructions, so we never need to split a branch. */
12841 if (!TARGET_CPU_ZARCH && s390_split_branches ())
12842 {
12843 if (pool_overflow)
12844 s390_chunkify_cancel (pool);
12845 else
12846 s390_mainpool_cancel (pool);
12847
12848 continue;
12849 }
12850
12851 /* If we made it up to here, both conditions are satisfied.
12852 Finish up literal pool related changes. */
12853 if (pool_overflow)
12854 s390_chunkify_finish (pool);
12855 else
12856 s390_mainpool_finish (pool);
12857
12858 /* We're done splitting branches. */
12859 cfun->machine->split_branches_pending_p = false;
12860 break;
12861 }
12862
12863 /* Generate out-of-pool execute target insns. */
12864 if (TARGET_CPU_ZARCH)
12865 {
12866 rtx_insn *insn, *target;
12867 rtx label;
12868
12869 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12870 {
12871 label = s390_execute_label (insn);
12872 if (!label)
12873 continue;
12874
12875 gcc_assert (label != const0_rtx);
12876
12877 target = emit_label (XEXP (label, 0));
12878 INSN_ADDRESSES_NEW (target, -1);
12879
12880 target = emit_insn (s390_execute_target (insn));
12881 INSN_ADDRESSES_NEW (target, -1);
12882 }
12883 }
12884
12885 /* Try to optimize prologue and epilogue further. */
12886 s390_optimize_prologue ();
12887
12888 /* Walk over the insns and do some >=z10 specific changes. */
12889 if (s390_tune == PROCESSOR_2097_Z10
12890 || s390_tune == PROCESSOR_2817_Z196
12891 || s390_tune == PROCESSOR_2827_ZEC12
12892 || s390_tune == PROCESSOR_2964_Z13)
12893 {
12894 rtx_insn *insn;
12895 bool insn_added_p = false;
12896
12897 /* The insn lengths and addresses have to be up to date for the
12898 following manipulations. */
12899 shorten_branches (get_insns ());
12900
12901 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12902 {
12903 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
12904 continue;
12905
12906 if (JUMP_P (insn))
12907 insn_added_p |= s390_fix_long_loop_prediction (insn);
12908
12909 if ((GET_CODE (PATTERN (insn)) == PARALLEL
12910 || GET_CODE (PATTERN (insn)) == SET)
12911 && s390_tune == PROCESSOR_2097_Z10)
12912 insn_added_p |= s390_z10_optimize_cmp (insn);
12913 }
12914
12915 /* Adjust branches if we added new instructions. */
12916 if (insn_added_p)
12917 shorten_branches (get_insns ());
12918 }
12919
12920 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
12921 if (hw_after > 0)
12922 {
12923 rtx_insn *insn;
12924
12925 /* Insert NOPs for hotpatching. */
12926 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12927 /* Emit NOPs
12928 1. inside the area covered by debug information to allow setting
12929 breakpoints at the NOPs,
12930 2. before any insn which results in an asm instruction,
12931 3. before in-function labels to avoid jumping to the NOPs, for
12932 example as part of a loop,
12933 4. before any barrier in case the function is completely empty
12934 (__builtin_unreachable ()) and has neither internal labels nor
12935 active insns.
12936 */
12937 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
12938 break;
12939 /* Output a series of NOPs before the first active insn. */
12940 while (insn && hw_after > 0)
12941 {
12942 if (hw_after >= 3 && TARGET_CPU_ZARCH)
12943 {
12944 emit_insn_before (gen_nop_6_byte (), insn);
12945 hw_after -= 3;
12946 }
12947 else if (hw_after >= 2)
12948 {
12949 emit_insn_before (gen_nop_4_byte (), insn);
12950 hw_after -= 2;
12951 }
12952 else
12953 {
12954 emit_insn_before (gen_nop_2_byte (), insn);
12955 hw_after -= 1;
12956 }
12957 }
12958 }
12959 }
12960
12961 /* Return true if INSN is a fp load insn writing register REGNO. */
12962 static inline bool
12963 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
12964 {
12965 rtx set;
12966 enum attr_type flag = s390_safe_attr_type (insn);
12967
12968 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
12969 return false;
12970
12971 set = single_set (insn);
12972
12973 if (set == NULL_RTX)
12974 return false;
12975
12976 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
12977 return false;
12978
12979 if (REGNO (SET_DEST (set)) != regno)
12980 return false;
12981
12982 return true;
12983 }
12984
12985 /* This value describes the distance to be avoided between an
12986 aritmetic fp instruction and an fp load writing the same register.
12987 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
12988 fine but the exact value has to be avoided. Otherwise the FP
12989 pipeline will throw an exception causing a major penalty. */
12990 #define Z10_EARLYLOAD_DISTANCE 7
12991
12992 /* Rearrange the ready list in order to avoid the situation described
12993 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
12994 moved to the very end of the ready list. */
12995 static void
12996 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
12997 {
12998 unsigned int regno;
12999 int nready = *nready_p;
13000 rtx_insn *tmp;
13001 int i;
13002 rtx_insn *insn;
13003 rtx set;
13004 enum attr_type flag;
13005 int distance;
13006
13007 /* Skip DISTANCE - 1 active insns. */
13008 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13009 distance > 0 && insn != NULL_RTX;
13010 distance--, insn = prev_active_insn (insn))
13011 if (CALL_P (insn) || JUMP_P (insn))
13012 return;
13013
13014 if (insn == NULL_RTX)
13015 return;
13016
13017 set = single_set (insn);
13018
13019 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13020 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13021 return;
13022
13023 flag = s390_safe_attr_type (insn);
13024
13025 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13026 return;
13027
13028 regno = REGNO (SET_DEST (set));
13029 i = nready - 1;
13030
13031 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13032 i--;
13033
13034 if (!i)
13035 return;
13036
13037 tmp = ready[i];
13038 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13039 ready[0] = tmp;
13040 }
13041
13042
13043 /* The s390_sched_state variable tracks the state of the current or
13044 the last instruction group.
13045
13046 0,1,2 number of instructions scheduled in the current group
13047 3 the last group is complete - normal insns
13048 4 the last group was a cracked/expanded insn */
13049
13050 static int s390_sched_state;
13051
13052 #define S390_OOO_SCHED_STATE_NORMAL 3
13053 #define S390_OOO_SCHED_STATE_CRACKED 4
13054
13055 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
13056 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
13057 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
13058 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
13059
13060 static unsigned int
13061 s390_get_sched_attrmask (rtx_insn *insn)
13062 {
13063 unsigned int mask = 0;
13064
13065 if (get_attr_ooo_cracked (insn))
13066 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
13067 if (get_attr_ooo_expanded (insn))
13068 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
13069 if (get_attr_ooo_endgroup (insn))
13070 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
13071 if (get_attr_ooo_groupalone (insn))
13072 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
13073 return mask;
13074 }
13075
13076 /* Return the scheduling score for INSN. The higher the score the
13077 better. The score is calculated from the OOO scheduling attributes
13078 of INSN and the scheduling state s390_sched_state. */
13079 static int
13080 s390_sched_score (rtx_insn *insn)
13081 {
13082 unsigned int mask = s390_get_sched_attrmask (insn);
13083 int score = 0;
13084
13085 switch (s390_sched_state)
13086 {
13087 case 0:
13088 /* Try to put insns into the first slot which would otherwise
13089 break a group. */
13090 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13091 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13092 score += 5;
13093 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13094 score += 10;
13095 case 1:
13096 /* Prefer not cracked insns while trying to put together a
13097 group. */
13098 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13099 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13100 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13101 score += 10;
13102 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
13103 score += 5;
13104 break;
13105 case 2:
13106 /* Prefer not cracked insns while trying to put together a
13107 group. */
13108 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13109 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13110 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13111 score += 10;
13112 /* Prefer endgroup insns in the last slot. */
13113 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
13114 score += 10;
13115 break;
13116 case S390_OOO_SCHED_STATE_NORMAL:
13117 /* Prefer not cracked insns if the last was not cracked. */
13118 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13119 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
13120 score += 5;
13121 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13122 score += 10;
13123 break;
13124 case S390_OOO_SCHED_STATE_CRACKED:
13125 /* Try to keep cracked insns together to prevent them from
13126 interrupting groups. */
13127 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13128 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13129 score += 5;
13130 break;
13131 }
13132 return score;
13133 }
13134
13135 /* This function is called via hook TARGET_SCHED_REORDER before
13136 issuing one insn from list READY which contains *NREADYP entries.
13137 For target z10 it reorders load instructions to avoid early load
13138 conflicts in the floating point pipeline */
13139 static int
13140 s390_sched_reorder (FILE *file, int verbose,
13141 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13142 {
13143 if (s390_tune == PROCESSOR_2097_Z10)
13144 if (reload_completed && *nreadyp > 1)
13145 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13146
13147 if ((s390_tune == PROCESSOR_2827_ZEC12
13148 || s390_tune == PROCESSOR_2964_Z13)
13149 && reload_completed
13150 && *nreadyp > 1)
13151 {
13152 int i;
13153 int last_index = *nreadyp - 1;
13154 int max_index = -1;
13155 int max_score = -1;
13156 rtx_insn *tmp;
13157
13158 /* Just move the insn with the highest score to the top (the
13159 end) of the list. A full sort is not needed since a conflict
13160 in the hazard recognition cannot happen. So the top insn in
13161 the ready list will always be taken. */
13162 for (i = last_index; i >= 0; i--)
13163 {
13164 int score;
13165
13166 if (recog_memoized (ready[i]) < 0)
13167 continue;
13168
13169 score = s390_sched_score (ready[i]);
13170 if (score > max_score)
13171 {
13172 max_score = score;
13173 max_index = i;
13174 }
13175 }
13176
13177 if (max_index != -1)
13178 {
13179 if (max_index != last_index)
13180 {
13181 tmp = ready[max_index];
13182 ready[max_index] = ready[last_index];
13183 ready[last_index] = tmp;
13184
13185 if (verbose > 5)
13186 fprintf (file,
13187 "move insn %d to the top of list\n",
13188 INSN_UID (ready[last_index]));
13189 }
13190 else if (verbose > 5)
13191 fprintf (file,
13192 "best insn %d already on top\n",
13193 INSN_UID (ready[last_index]));
13194 }
13195
13196 if (verbose > 5)
13197 {
13198 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13199 s390_sched_state);
13200
13201 for (i = last_index; i >= 0; i--)
13202 {
13203 if (recog_memoized (ready[i]) < 0)
13204 continue;
13205 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
13206 s390_sched_score (ready[i]));
13207 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
13208 PRINT_OOO_ATTR (ooo_cracked);
13209 PRINT_OOO_ATTR (ooo_expanded);
13210 PRINT_OOO_ATTR (ooo_endgroup);
13211 PRINT_OOO_ATTR (ooo_groupalone);
13212 #undef PRINT_OOO_ATTR
13213 fprintf (file, "\n");
13214 }
13215 }
13216 }
13217
13218 return s390_issue_rate ();
13219 }
13220
13221
13222 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13223 the scheduler has issued INSN. It stores the last issued insn into
13224 last_scheduled_insn in order to make it available for
13225 s390_sched_reorder. */
13226 static int
13227 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13228 {
13229 last_scheduled_insn = insn;
13230
13231 if ((s390_tune == PROCESSOR_2827_ZEC12
13232 || s390_tune == PROCESSOR_2964_Z13)
13233 && reload_completed
13234 && recog_memoized (insn) >= 0)
13235 {
13236 unsigned int mask = s390_get_sched_attrmask (insn);
13237
13238 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13239 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13240 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
13241 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
13242 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13243 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13244 else
13245 {
13246 /* Only normal insns are left (mask == 0). */
13247 switch (s390_sched_state)
13248 {
13249 case 0:
13250 case 1:
13251 case 2:
13252 case S390_OOO_SCHED_STATE_NORMAL:
13253 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
13254 s390_sched_state = 1;
13255 else
13256 s390_sched_state++;
13257
13258 break;
13259 case S390_OOO_SCHED_STATE_CRACKED:
13260 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13261 break;
13262 }
13263 }
13264 if (verbose > 5)
13265 {
13266 fprintf (file, "insn %d: ", INSN_UID (insn));
13267 #define PRINT_OOO_ATTR(ATTR) \
13268 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
13269 PRINT_OOO_ATTR (ooo_cracked);
13270 PRINT_OOO_ATTR (ooo_expanded);
13271 PRINT_OOO_ATTR (ooo_endgroup);
13272 PRINT_OOO_ATTR (ooo_groupalone);
13273 #undef PRINT_OOO_ATTR
13274 fprintf (file, "\n");
13275 fprintf (file, "sched state: %d\n", s390_sched_state);
13276 }
13277 }
13278
13279 if (GET_CODE (PATTERN (insn)) != USE
13280 && GET_CODE (PATTERN (insn)) != CLOBBER)
13281 return more - 1;
13282 else
13283 return more;
13284 }
13285
13286 static void
13287 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
13288 int verbose ATTRIBUTE_UNUSED,
13289 int max_ready ATTRIBUTE_UNUSED)
13290 {
13291 last_scheduled_insn = NULL;
13292 s390_sched_state = 0;
13293 }
13294
13295 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
13296 a new number struct loop *loop should be unrolled if tuned for cpus with
13297 a built-in stride prefetcher.
13298 The loop is analyzed for memory accesses by calling check_dpu for
13299 each rtx of the loop. Depending on the loop_depth and the amount of
13300 memory accesses a new number <=nunroll is returned to improve the
13301 behaviour of the hardware prefetch unit. */
13302 static unsigned
13303 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
13304 {
13305 basic_block *bbs;
13306 rtx_insn *insn;
13307 unsigned i;
13308 unsigned mem_count = 0;
13309
13310 if (s390_tune != PROCESSOR_2097_Z10
13311 && s390_tune != PROCESSOR_2817_Z196
13312 && s390_tune != PROCESSOR_2827_ZEC12
13313 && s390_tune != PROCESSOR_2964_Z13)
13314 return nunroll;
13315
13316 /* Count the number of memory references within the loop body. */
13317 bbs = get_loop_body (loop);
13318 subrtx_iterator::array_type array;
13319 for (i = 0; i < loop->num_nodes; i++)
13320 FOR_BB_INSNS (bbs[i], insn)
13321 if (INSN_P (insn) && INSN_CODE (insn) != -1)
13322 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
13323 if (MEM_P (*iter))
13324 mem_count += 1;
13325 free (bbs);
13326
13327 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
13328 if (mem_count == 0)
13329 return nunroll;
13330
13331 switch (loop_depth(loop))
13332 {
13333 case 1:
13334 return MIN (nunroll, 28 / mem_count);
13335 case 2:
13336 return MIN (nunroll, 22 / mem_count);
13337 default:
13338 return MIN (nunroll, 16 / mem_count);
13339 }
13340 }
13341
13342 static void
13343 s390_option_override (void)
13344 {
13345 unsigned int i;
13346 cl_deferred_option *opt;
13347 vec<cl_deferred_option> *v =
13348 (vec<cl_deferred_option> *) s390_deferred_options;
13349
13350 if (v)
13351 FOR_EACH_VEC_ELT (*v, i, opt)
13352 {
13353 switch (opt->opt_index)
13354 {
13355 case OPT_mhotpatch_:
13356 {
13357 int val1;
13358 int val2;
13359 char s[256];
13360 char *t;
13361
13362 strncpy (s, opt->arg, 256);
13363 s[255] = 0;
13364 t = strchr (s, ',');
13365 if (t != NULL)
13366 {
13367 *t = 0;
13368 t++;
13369 val1 = integral_argument (s);
13370 val2 = integral_argument (t);
13371 }
13372 else
13373 {
13374 val1 = -1;
13375 val2 = -1;
13376 }
13377 if (val1 == -1 || val2 == -1)
13378 {
13379 /* argument is not a plain number */
13380 error ("arguments to %qs should be non-negative integers",
13381 "-mhotpatch=n,m");
13382 break;
13383 }
13384 else if (val1 > s390_hotpatch_hw_max
13385 || val2 > s390_hotpatch_hw_max)
13386 {
13387 error ("argument to %qs is too large (max. %d)",
13388 "-mhotpatch=n,m", s390_hotpatch_hw_max);
13389 break;
13390 }
13391 s390_hotpatch_hw_before_label = val1;
13392 s390_hotpatch_hw_after_label = val2;
13393 break;
13394 }
13395 default:
13396 gcc_unreachable ();
13397 }
13398 }
13399
13400 /* Set up function hooks. */
13401 init_machine_status = s390_init_machine_status;
13402
13403 /* Architecture mode defaults according to ABI. */
13404 if (!(target_flags_explicit & MASK_ZARCH))
13405 {
13406 if (TARGET_64BIT)
13407 target_flags |= MASK_ZARCH;
13408 else
13409 target_flags &= ~MASK_ZARCH;
13410 }
13411
13412 /* Set the march default in case it hasn't been specified on
13413 cmdline. */
13414 if (s390_arch == PROCESSOR_max)
13415 {
13416 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
13417 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
13418 s390_arch_flags = processor_flags_table[(int)s390_arch];
13419 }
13420
13421 /* Determine processor to tune for. */
13422 if (s390_tune == PROCESSOR_max)
13423 {
13424 s390_tune = s390_arch;
13425 s390_tune_flags = s390_arch_flags;
13426 }
13427
13428 /* Sanity checks. */
13429 if (s390_arch == PROCESSOR_NATIVE || s390_tune == PROCESSOR_NATIVE)
13430 gcc_unreachable ();
13431 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
13432 error ("z/Architecture mode not supported on %s", s390_arch_string);
13433 if (TARGET_64BIT && !TARGET_ZARCH)
13434 error ("64-bit ABI not supported in ESA/390 mode");
13435
13436 /* Use hardware DFP if available and not explicitly disabled by
13437 user. E.g. with -m31 -march=z10 -mzarch */
13438 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
13439 target_flags |= MASK_HARD_DFP;
13440
13441 /* Enable hardware transactions if available and not explicitly
13442 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
13443 if (!(target_flags_explicit & MASK_OPT_HTM) && TARGET_CPU_HTM && TARGET_ZARCH)
13444 target_flags |= MASK_OPT_HTM;
13445
13446 if (target_flags_explicit & MASK_OPT_VX)
13447 {
13448 if (TARGET_OPT_VX)
13449 {
13450 if (!TARGET_CPU_VX)
13451 error ("hardware vector support not available on %s",
13452 s390_arch_string);
13453 if (TARGET_SOFT_FLOAT)
13454 error ("hardware vector support not available with -msoft-float");
13455 }
13456 }
13457 else if (TARGET_CPU_VX)
13458 /* Enable vector support if available and not explicitly disabled
13459 by user. E.g. with -m31 -march=z13 -mzarch */
13460 target_flags |= MASK_OPT_VX;
13461
13462 if (TARGET_HARD_DFP && !TARGET_DFP)
13463 {
13464 if (target_flags_explicit & MASK_HARD_DFP)
13465 {
13466 if (!TARGET_CPU_DFP)
13467 error ("hardware decimal floating point instructions"
13468 " not available on %s", s390_arch_string);
13469 if (!TARGET_ZARCH)
13470 error ("hardware decimal floating point instructions"
13471 " not available in ESA/390 mode");
13472 }
13473 else
13474 target_flags &= ~MASK_HARD_DFP;
13475 }
13476
13477 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
13478 {
13479 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
13480 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
13481
13482 target_flags &= ~MASK_HARD_DFP;
13483 }
13484
13485 /* Set processor cost function. */
13486 switch (s390_tune)
13487 {
13488 case PROCESSOR_2084_Z990:
13489 s390_cost = &z990_cost;
13490 break;
13491 case PROCESSOR_2094_Z9_109:
13492 s390_cost = &z9_109_cost;
13493 break;
13494 case PROCESSOR_2097_Z10:
13495 s390_cost = &z10_cost;
13496 break;
13497 case PROCESSOR_2817_Z196:
13498 s390_cost = &z196_cost;
13499 break;
13500 case PROCESSOR_2827_ZEC12:
13501 case PROCESSOR_2964_Z13:
13502 s390_cost = &zEC12_cost;
13503 break;
13504 default:
13505 s390_cost = &z900_cost;
13506 }
13507
13508 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
13509 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
13510 "in combination");
13511
13512 if (s390_stack_size)
13513 {
13514 if (s390_stack_guard >= s390_stack_size)
13515 error ("stack size must be greater than the stack guard value");
13516 else if (s390_stack_size > 1 << 16)
13517 error ("stack size must not be greater than 64k");
13518 }
13519 else if (s390_stack_guard)
13520 error ("-mstack-guard implies use of -mstack-size");
13521
13522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
13523 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
13524 target_flags |= MASK_LONG_DOUBLE_128;
13525 #endif
13526
13527 if (s390_tune == PROCESSOR_2097_Z10
13528 || s390_tune == PROCESSOR_2817_Z196
13529 || s390_tune == PROCESSOR_2827_ZEC12
13530 || s390_tune == PROCESSOR_2964_Z13)
13531 {
13532 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
13533 global_options.x_param_values,
13534 global_options_set.x_param_values);
13535 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
13536 global_options.x_param_values,
13537 global_options_set.x_param_values);
13538 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
13539 global_options.x_param_values,
13540 global_options_set.x_param_values);
13541 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
13542 global_options.x_param_values,
13543 global_options_set.x_param_values);
13544 }
13545
13546 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
13547 global_options.x_param_values,
13548 global_options_set.x_param_values);
13549 /* values for loop prefetching */
13550 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
13551 global_options.x_param_values,
13552 global_options_set.x_param_values);
13553 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
13554 global_options.x_param_values,
13555 global_options_set.x_param_values);
13556 /* s390 has more than 2 levels and the size is much larger. Since
13557 we are always running virtualized assume that we only get a small
13558 part of the caches above l1. */
13559 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
13560 global_options.x_param_values,
13561 global_options_set.x_param_values);
13562 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
13563 global_options.x_param_values,
13564 global_options_set.x_param_values);
13565 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
13566 global_options.x_param_values,
13567 global_options_set.x_param_values);
13568
13569 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
13570 requires the arch flags to be evaluated already. Since prefetching
13571 is beneficial on s390, we enable it if available. */
13572 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
13573 flag_prefetch_loop_arrays = 1;
13574
13575 /* Use the alternative scheduling-pressure algorithm by default. */
13576 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
13577 global_options.x_param_values,
13578 global_options_set.x_param_values);
13579
13580 if (TARGET_TPF)
13581 {
13582 /* Don't emit DWARF3/4 unless specifically selected. The TPF
13583 debuggers do not yet support DWARF 3/4. */
13584 if (!global_options_set.x_dwarf_strict)
13585 dwarf_strict = 1;
13586 if (!global_options_set.x_dwarf_version)
13587 dwarf_version = 2;
13588 }
13589
13590 /* Register a target-specific optimization-and-lowering pass
13591 to run immediately before prologue and epilogue generation.
13592
13593 Registering the pass must be done at start up. It's
13594 convenient to do it here. */
13595 opt_pass *new_pass = new pass_s390_early_mach (g);
13596 struct register_pass_info insert_pass_s390_early_mach =
13597 {
13598 new_pass, /* pass */
13599 "pro_and_epilogue", /* reference_pass_name */
13600 1, /* ref_pass_instance_number */
13601 PASS_POS_INSERT_BEFORE /* po_op */
13602 };
13603 register_pass (&insert_pass_s390_early_mach);
13604 }
13605
13606 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
13607
13608 static bool
13609 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
13610 unsigned int align ATTRIBUTE_UNUSED,
13611 enum by_pieces_operation op ATTRIBUTE_UNUSED,
13612 bool speed_p ATTRIBUTE_UNUSED)
13613 {
13614 return (size == 1 || size == 2
13615 || size == 4 || (TARGET_ZARCH && size == 8));
13616 }
13617
13618 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13619
13620 static void
13621 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13622 {
13623 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
13624 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
13625 tree call_efpc = build_call_expr (efpc, 0);
13626 tree fenv_var = create_tmp_var (unsigned_type_node);
13627
13628 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
13629 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
13630 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
13631 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
13632 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
13633 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
13634
13635 /* Generates the equivalent of feholdexcept (&fenv_var)
13636
13637 fenv_var = __builtin_s390_efpc ();
13638 __builtin_s390_sfpc (fenv_var & mask) */
13639 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
13640 tree new_fpc =
13641 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13642 build_int_cst (unsigned_type_node,
13643 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
13644 FPC_EXCEPTION_MASK)));
13645 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
13646 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
13647
13648 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
13649
13650 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
13651 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
13652 build_int_cst (unsigned_type_node,
13653 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
13654 *clear = build_call_expr (sfpc, 1, new_fpc);
13655
13656 /* Generates the equivalent of feupdateenv (fenv_var)
13657
13658 old_fpc = __builtin_s390_efpc ();
13659 __builtin_s390_sfpc (fenv_var);
13660 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
13661
13662 old_fpc = create_tmp_var (unsigned_type_node);
13663 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
13664 old_fpc, call_efpc);
13665
13666 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
13667
13668 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
13669 build_int_cst (unsigned_type_node,
13670 FPC_FLAGS_MASK));
13671 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
13672 build_int_cst (unsigned_type_node,
13673 FPC_FLAGS_SHIFT));
13674 tree atomic_feraiseexcept
13675 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13676 raise_old_except = build_call_expr (atomic_feraiseexcept,
13677 1, raise_old_except);
13678
13679 *update = build2 (COMPOUND_EXPR, void_type_node,
13680 build2 (COMPOUND_EXPR, void_type_node,
13681 store_old_fpc, set_new_fpc),
13682 raise_old_except);
13683
13684 #undef FPC_EXCEPTION_MASK
13685 #undef FPC_FLAGS_MASK
13686 #undef FPC_DXC_MASK
13687 #undef FPC_EXCEPTION_MASK_SHIFT
13688 #undef FPC_FLAGS_SHIFT
13689 #undef FPC_DXC_SHIFT
13690 }
13691
13692 /* Return the vector mode to be used for inner mode MODE when doing
13693 vectorization. */
13694 static machine_mode
13695 s390_preferred_simd_mode (machine_mode mode)
13696 {
13697 if (TARGET_VX)
13698 switch (mode)
13699 {
13700 case DFmode:
13701 return V2DFmode;
13702 case DImode:
13703 return V2DImode;
13704 case SImode:
13705 return V4SImode;
13706 case HImode:
13707 return V8HImode;
13708 case QImode:
13709 return V16QImode;
13710 default:;
13711 }
13712 return word_mode;
13713 }
13714
13715 /* Our hardware does not require vectors to be strictly aligned. */
13716 static bool
13717 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
13718 const_tree type ATTRIBUTE_UNUSED,
13719 int misalignment ATTRIBUTE_UNUSED,
13720 bool is_packed ATTRIBUTE_UNUSED)
13721 {
13722 return true;
13723 }
13724
13725 /* The vector ABI requires vector types to be aligned on an 8 byte
13726 boundary (our stack alignment). However, we allow this to be
13727 overriden by the user, while this definitely breaks the ABI. */
13728 static HOST_WIDE_INT
13729 s390_vector_alignment (const_tree type)
13730 {
13731 if (!TARGET_VX_ABI)
13732 return default_vector_alignment (type);
13733
13734 if (TYPE_USER_ALIGN (type))
13735 return TYPE_ALIGN (type);
13736
13737 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
13738 }
13739
13740 /* Implement TARGET_ASM_FILE_END. */
13741 static void
13742 s390_asm_file_end (void)
13743 {
13744 #ifdef HAVE_AS_GNU_ATTRIBUTE
13745 varpool_node *vnode;
13746 cgraph_node *cnode;
13747
13748 FOR_EACH_VARIABLE (vnode)
13749 if (TREE_PUBLIC (vnode->decl))
13750 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
13751
13752 FOR_EACH_FUNCTION (cnode)
13753 if (TREE_PUBLIC (cnode->decl))
13754 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
13755
13756
13757 if (s390_vector_abi != 0)
13758 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
13759 s390_vector_abi);
13760 #endif
13761 file_end_indicate_exec_stack ();
13762 }
13763
13764 /* Return true if TYPE is a vector bool type. */
13765 static inline bool
13766 s390_vector_bool_type_p (const_tree type)
13767 {
13768 return TYPE_VECTOR_OPAQUE (type);
13769 }
13770
13771 /* Return the diagnostic message string if the binary operation OP is
13772 not permitted on TYPE1 and TYPE2, NULL otherwise. */
13773 static const char*
13774 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
13775 {
13776 bool bool1_p, bool2_p;
13777 bool plusminus_p;
13778 bool muldiv_p;
13779 bool compare_p;
13780 machine_mode mode1, mode2;
13781
13782 if (!TARGET_ZVECTOR)
13783 return NULL;
13784
13785 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
13786 return NULL;
13787
13788 bool1_p = s390_vector_bool_type_p (type1);
13789 bool2_p = s390_vector_bool_type_p (type2);
13790
13791 /* Mixing signed and unsigned types is forbidden for all
13792 operators. */
13793 if (!bool1_p && !bool2_p
13794 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
13795 return N_("types differ in signess");
13796
13797 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
13798 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
13799 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
13800 || op == ROUND_DIV_EXPR);
13801 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
13802 || op == EQ_EXPR || op == NE_EXPR);
13803
13804 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
13805 return N_("binary operator does not support two vector bool operands");
13806
13807 if (bool1_p != bool2_p && (muldiv_p || compare_p))
13808 return N_("binary operator does not support vector bool operand");
13809
13810 mode1 = TYPE_MODE (type1);
13811 mode2 = TYPE_MODE (type2);
13812
13813 if (bool1_p != bool2_p && plusminus_p
13814 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
13815 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
13816 return N_("binary operator does not support mixing vector "
13817 "bool with floating point vector operands");
13818
13819 return NULL;
13820 }
13821
13822 /* Initialize GCC target structure. */
13823
13824 #undef TARGET_ASM_ALIGNED_HI_OP
13825 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
13826 #undef TARGET_ASM_ALIGNED_DI_OP
13827 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
13828 #undef TARGET_ASM_INTEGER
13829 #define TARGET_ASM_INTEGER s390_assemble_integer
13830
13831 #undef TARGET_ASM_OPEN_PAREN
13832 #define TARGET_ASM_OPEN_PAREN ""
13833
13834 #undef TARGET_ASM_CLOSE_PAREN
13835 #define TARGET_ASM_CLOSE_PAREN ""
13836
13837 #undef TARGET_OPTION_OVERRIDE
13838 #define TARGET_OPTION_OVERRIDE s390_option_override
13839
13840 #undef TARGET_ENCODE_SECTION_INFO
13841 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
13842
13843 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13844 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
13845
13846 #ifdef HAVE_AS_TLS
13847 #undef TARGET_HAVE_TLS
13848 #define TARGET_HAVE_TLS true
13849 #endif
13850 #undef TARGET_CANNOT_FORCE_CONST_MEM
13851 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
13852
13853 #undef TARGET_DELEGITIMIZE_ADDRESS
13854 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
13855
13856 #undef TARGET_LEGITIMIZE_ADDRESS
13857 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
13858
13859 #undef TARGET_RETURN_IN_MEMORY
13860 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
13861
13862 #undef TARGET_INIT_BUILTINS
13863 #define TARGET_INIT_BUILTINS s390_init_builtins
13864 #undef TARGET_EXPAND_BUILTIN
13865 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
13866 #undef TARGET_BUILTIN_DECL
13867 #define TARGET_BUILTIN_DECL s390_builtin_decl
13868
13869 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
13870 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
13871
13872 #undef TARGET_ASM_OUTPUT_MI_THUNK
13873 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
13874 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
13875 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
13876
13877 #undef TARGET_SCHED_ADJUST_PRIORITY
13878 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
13879 #undef TARGET_SCHED_ISSUE_RATE
13880 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
13881 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
13882 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
13883
13884 #undef TARGET_SCHED_VARIABLE_ISSUE
13885 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
13886 #undef TARGET_SCHED_REORDER
13887 #define TARGET_SCHED_REORDER s390_sched_reorder
13888 #undef TARGET_SCHED_INIT
13889 #define TARGET_SCHED_INIT s390_sched_init
13890
13891 #undef TARGET_CANNOT_COPY_INSN_P
13892 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
13893 #undef TARGET_RTX_COSTS
13894 #define TARGET_RTX_COSTS s390_rtx_costs
13895 #undef TARGET_ADDRESS_COST
13896 #define TARGET_ADDRESS_COST s390_address_cost
13897 #undef TARGET_REGISTER_MOVE_COST
13898 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
13899 #undef TARGET_MEMORY_MOVE_COST
13900 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
13901
13902 #undef TARGET_MACHINE_DEPENDENT_REORG
13903 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
13904
13905 #undef TARGET_VALID_POINTER_MODE
13906 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
13907
13908 #undef TARGET_BUILD_BUILTIN_VA_LIST
13909 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
13910 #undef TARGET_EXPAND_BUILTIN_VA_START
13911 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
13912 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
13913 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
13914
13915 #undef TARGET_PROMOTE_FUNCTION_MODE
13916 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
13917 #undef TARGET_PASS_BY_REFERENCE
13918 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
13919
13920 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
13921 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
13922 #undef TARGET_FUNCTION_ARG
13923 #define TARGET_FUNCTION_ARG s390_function_arg
13924 #undef TARGET_FUNCTION_ARG_ADVANCE
13925 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
13926 #undef TARGET_FUNCTION_VALUE
13927 #define TARGET_FUNCTION_VALUE s390_function_value
13928 #undef TARGET_LIBCALL_VALUE
13929 #define TARGET_LIBCALL_VALUE s390_libcall_value
13930 #undef TARGET_STRICT_ARGUMENT_NAMING
13931 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
13932
13933 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
13934 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
13935
13936 #undef TARGET_FIXED_CONDITION_CODE_REGS
13937 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
13938
13939 #undef TARGET_CC_MODES_COMPATIBLE
13940 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
13941
13942 #undef TARGET_INVALID_WITHIN_DOLOOP
13943 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
13944
13945 #ifdef HAVE_AS_TLS
13946 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
13947 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
13948 #endif
13949
13950 #undef TARGET_DWARF_FRAME_REG_MODE
13951 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
13952
13953 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
13954 #undef TARGET_MANGLE_TYPE
13955 #define TARGET_MANGLE_TYPE s390_mangle_type
13956 #endif
13957
13958 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13959 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
13960
13961 #undef TARGET_VECTOR_MODE_SUPPORTED_P
13962 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
13963
13964 #undef TARGET_PREFERRED_RELOAD_CLASS
13965 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
13966
13967 #undef TARGET_SECONDARY_RELOAD
13968 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
13969
13970 #undef TARGET_LIBGCC_CMP_RETURN_MODE
13971 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
13972
13973 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
13974 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
13975
13976 #undef TARGET_LEGITIMATE_ADDRESS_P
13977 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
13978
13979 #undef TARGET_LEGITIMATE_CONSTANT_P
13980 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
13981
13982 #undef TARGET_LRA_P
13983 #define TARGET_LRA_P s390_lra_p
13984
13985 #undef TARGET_CAN_ELIMINATE
13986 #define TARGET_CAN_ELIMINATE s390_can_eliminate
13987
13988 #undef TARGET_CONDITIONAL_REGISTER_USAGE
13989 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
13990
13991 #undef TARGET_LOOP_UNROLL_ADJUST
13992 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
13993
13994 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
13995 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
13996 #undef TARGET_TRAMPOLINE_INIT
13997 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
13998
13999 #undef TARGET_UNWIND_WORD_MODE
14000 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
14001
14002 #undef TARGET_CANONICALIZE_COMPARISON
14003 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
14004
14005 #undef TARGET_HARD_REGNO_SCRATCH_OK
14006 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
14007
14008 #undef TARGET_ATTRIBUTE_TABLE
14009 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
14010
14011 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14012 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14013
14014 #undef TARGET_SET_UP_BY_PROLOGUE
14015 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
14016
14017 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
14018 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
14019 s390_use_by_pieces_infrastructure_p
14020
14021 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
14022 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
14023
14024 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
14025 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
14026
14027 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
14028 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
14029
14030 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
14031 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
14032
14033 #undef TARGET_VECTOR_ALIGNMENT
14034 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
14035
14036 #undef TARGET_INVALID_BINARY_OP
14037 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
14038
14039 #undef TARGET_ASM_FILE_END
14040 #define TARGET_ASM_FILE_END s390_asm_file_end
14041
14042 struct gcc_target targetm = TARGET_INITIALIZER;
14043
14044 #include "gt-s390.h"
This page took 0.7234 seconds and 5 git commands to generate.