]> gcc.gnu.org Git - gcc.git/blob - gcc/config/s390/s390.c
S/390: Get rid of Y constraint in rotate patterns.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "target-globals.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "cfgloop.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "expmed.h"
38 #include "optabs.h"
39 #include "regs.h"
40 #include "emit-rtl.h"
41 #include "recog.h"
42 #include "cgraph.h"
43 #include "diagnostic-core.h"
44 #include "diagnostic.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "print-tree.h"
48 #include "stor-layout.h"
49 #include "varasm.h"
50 #include "calls.h"
51 #include "conditions.h"
52 #include "output.h"
53 #include "insn-attr.h"
54 #include "flags.h"
55 #include "except.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "stmt.h"
59 #include "expr.h"
60 #include "reload.h"
61 #include "cfgrtl.h"
62 #include "cfganal.h"
63 #include "lcm.h"
64 #include "cfgbuild.h"
65 #include "cfgcleanup.h"
66 #include "debug.h"
67 #include "langhooks.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
70 #include "tree-eh.h"
71 #include "gimplify.h"
72 #include "params.h"
73 #include "opts.h"
74 #include "tree-pass.h"
75 #include "context.h"
76 #include "builtins.h"
77 #include "rtl-iter.h"
78 #include "intl.h"
79 #include "tm-constrs.h"
80
81 /* This file should be included last. */
82 #include "target-def.h"
83
84 /* Remember the last target of s390_set_current_function. */
85 static GTY(()) tree s390_previous_fndecl;
86
87 /* Define the specific costs for a given cpu. */
88
89 struct processor_costs
90 {
91 /* multiplication */
92 const int m; /* cost of an M instruction. */
93 const int mghi; /* cost of an MGHI instruction. */
94 const int mh; /* cost of an MH instruction. */
95 const int mhi; /* cost of an MHI instruction. */
96 const int ml; /* cost of an ML instruction. */
97 const int mr; /* cost of an MR instruction. */
98 const int ms; /* cost of an MS instruction. */
99 const int msg; /* cost of an MSG instruction. */
100 const int msgf; /* cost of an MSGF instruction. */
101 const int msgfr; /* cost of an MSGFR instruction. */
102 const int msgr; /* cost of an MSGR instruction. */
103 const int msr; /* cost of an MSR instruction. */
104 const int mult_df; /* cost of multiplication in DFmode. */
105 const int mxbr;
106 /* square root */
107 const int sqxbr; /* cost of square root in TFmode. */
108 const int sqdbr; /* cost of square root in DFmode. */
109 const int sqebr; /* cost of square root in SFmode. */
110 /* multiply and add */
111 const int madbr; /* cost of multiply and add in DFmode. */
112 const int maebr; /* cost of multiply and add in SFmode. */
113 /* division */
114 const int dxbr;
115 const int ddbr;
116 const int debr;
117 const int dlgr;
118 const int dlr;
119 const int dr;
120 const int dsgfr;
121 const int dsgr;
122 };
123
124 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
125
126 static const
127 struct processor_costs z900_cost =
128 {
129 COSTS_N_INSNS (5), /* M */
130 COSTS_N_INSNS (10), /* MGHI */
131 COSTS_N_INSNS (5), /* MH */
132 COSTS_N_INSNS (4), /* MHI */
133 COSTS_N_INSNS (5), /* ML */
134 COSTS_N_INSNS (5), /* MR */
135 COSTS_N_INSNS (4), /* MS */
136 COSTS_N_INSNS (15), /* MSG */
137 COSTS_N_INSNS (7), /* MSGF */
138 COSTS_N_INSNS (7), /* MSGFR */
139 COSTS_N_INSNS (10), /* MSGR */
140 COSTS_N_INSNS (4), /* MSR */
141 COSTS_N_INSNS (7), /* multiplication in DFmode */
142 COSTS_N_INSNS (13), /* MXBR */
143 COSTS_N_INSNS (136), /* SQXBR */
144 COSTS_N_INSNS (44), /* SQDBR */
145 COSTS_N_INSNS (35), /* SQEBR */
146 COSTS_N_INSNS (18), /* MADBR */
147 COSTS_N_INSNS (13), /* MAEBR */
148 COSTS_N_INSNS (134), /* DXBR */
149 COSTS_N_INSNS (30), /* DDBR */
150 COSTS_N_INSNS (27), /* DEBR */
151 COSTS_N_INSNS (220), /* DLGR */
152 COSTS_N_INSNS (34), /* DLR */
153 COSTS_N_INSNS (34), /* DR */
154 COSTS_N_INSNS (32), /* DSGFR */
155 COSTS_N_INSNS (32), /* DSGR */
156 };
157
158 static const
159 struct processor_costs z990_cost =
160 {
161 COSTS_N_INSNS (4), /* M */
162 COSTS_N_INSNS (2), /* MGHI */
163 COSTS_N_INSNS (2), /* MH */
164 COSTS_N_INSNS (2), /* MHI */
165 COSTS_N_INSNS (4), /* ML */
166 COSTS_N_INSNS (4), /* MR */
167 COSTS_N_INSNS (5), /* MS */
168 COSTS_N_INSNS (6), /* MSG */
169 COSTS_N_INSNS (4), /* MSGF */
170 COSTS_N_INSNS (4), /* MSGFR */
171 COSTS_N_INSNS (4), /* MSGR */
172 COSTS_N_INSNS (4), /* MSR */
173 COSTS_N_INSNS (1), /* multiplication in DFmode */
174 COSTS_N_INSNS (28), /* MXBR */
175 COSTS_N_INSNS (130), /* SQXBR */
176 COSTS_N_INSNS (66), /* SQDBR */
177 COSTS_N_INSNS (38), /* SQEBR */
178 COSTS_N_INSNS (1), /* MADBR */
179 COSTS_N_INSNS (1), /* MAEBR */
180 COSTS_N_INSNS (60), /* DXBR */
181 COSTS_N_INSNS (40), /* DDBR */
182 COSTS_N_INSNS (26), /* DEBR */
183 COSTS_N_INSNS (176), /* DLGR */
184 COSTS_N_INSNS (31), /* DLR */
185 COSTS_N_INSNS (31), /* DR */
186 COSTS_N_INSNS (31), /* DSGFR */
187 COSTS_N_INSNS (31), /* DSGR */
188 };
189
190 static const
191 struct processor_costs z9_109_cost =
192 {
193 COSTS_N_INSNS (4), /* M */
194 COSTS_N_INSNS (2), /* MGHI */
195 COSTS_N_INSNS (2), /* MH */
196 COSTS_N_INSNS (2), /* MHI */
197 COSTS_N_INSNS (4), /* ML */
198 COSTS_N_INSNS (4), /* MR */
199 COSTS_N_INSNS (5), /* MS */
200 COSTS_N_INSNS (6), /* MSG */
201 COSTS_N_INSNS (4), /* MSGF */
202 COSTS_N_INSNS (4), /* MSGFR */
203 COSTS_N_INSNS (4), /* MSGR */
204 COSTS_N_INSNS (4), /* MSR */
205 COSTS_N_INSNS (1), /* multiplication in DFmode */
206 COSTS_N_INSNS (28), /* MXBR */
207 COSTS_N_INSNS (130), /* SQXBR */
208 COSTS_N_INSNS (66), /* SQDBR */
209 COSTS_N_INSNS (38), /* SQEBR */
210 COSTS_N_INSNS (1), /* MADBR */
211 COSTS_N_INSNS (1), /* MAEBR */
212 COSTS_N_INSNS (60), /* DXBR */
213 COSTS_N_INSNS (40), /* DDBR */
214 COSTS_N_INSNS (26), /* DEBR */
215 COSTS_N_INSNS (30), /* DLGR */
216 COSTS_N_INSNS (23), /* DLR */
217 COSTS_N_INSNS (23), /* DR */
218 COSTS_N_INSNS (24), /* DSGFR */
219 COSTS_N_INSNS (24), /* DSGR */
220 };
221
222 static const
223 struct processor_costs z10_cost =
224 {
225 COSTS_N_INSNS (10), /* M */
226 COSTS_N_INSNS (10), /* MGHI */
227 COSTS_N_INSNS (10), /* MH */
228 COSTS_N_INSNS (10), /* MHI */
229 COSTS_N_INSNS (10), /* ML */
230 COSTS_N_INSNS (10), /* MR */
231 COSTS_N_INSNS (10), /* MS */
232 COSTS_N_INSNS (10), /* MSG */
233 COSTS_N_INSNS (10), /* MSGF */
234 COSTS_N_INSNS (10), /* MSGFR */
235 COSTS_N_INSNS (10), /* MSGR */
236 COSTS_N_INSNS (10), /* MSR */
237 COSTS_N_INSNS (1) , /* multiplication in DFmode */
238 COSTS_N_INSNS (50), /* MXBR */
239 COSTS_N_INSNS (120), /* SQXBR */
240 COSTS_N_INSNS (52), /* SQDBR */
241 COSTS_N_INSNS (38), /* SQEBR */
242 COSTS_N_INSNS (1), /* MADBR */
243 COSTS_N_INSNS (1), /* MAEBR */
244 COSTS_N_INSNS (111), /* DXBR */
245 COSTS_N_INSNS (39), /* DDBR */
246 COSTS_N_INSNS (32), /* DEBR */
247 COSTS_N_INSNS (160), /* DLGR */
248 COSTS_N_INSNS (71), /* DLR */
249 COSTS_N_INSNS (71), /* DR */
250 COSTS_N_INSNS (71), /* DSGFR */
251 COSTS_N_INSNS (71), /* DSGR */
252 };
253
254 static const
255 struct processor_costs z196_cost =
256 {
257 COSTS_N_INSNS (7), /* M */
258 COSTS_N_INSNS (5), /* MGHI */
259 COSTS_N_INSNS (5), /* MH */
260 COSTS_N_INSNS (5), /* MHI */
261 COSTS_N_INSNS (7), /* ML */
262 COSTS_N_INSNS (7), /* MR */
263 COSTS_N_INSNS (6), /* MS */
264 COSTS_N_INSNS (8), /* MSG */
265 COSTS_N_INSNS (6), /* MSGF */
266 COSTS_N_INSNS (6), /* MSGFR */
267 COSTS_N_INSNS (8), /* MSGR */
268 COSTS_N_INSNS (6), /* MSR */
269 COSTS_N_INSNS (1) , /* multiplication in DFmode */
270 COSTS_N_INSNS (40), /* MXBR B+40 */
271 COSTS_N_INSNS (100), /* SQXBR B+100 */
272 COSTS_N_INSNS (42), /* SQDBR B+42 */
273 COSTS_N_INSNS (28), /* SQEBR B+28 */
274 COSTS_N_INSNS (1), /* MADBR B */
275 COSTS_N_INSNS (1), /* MAEBR B */
276 COSTS_N_INSNS (101), /* DXBR B+101 */
277 COSTS_N_INSNS (29), /* DDBR */
278 COSTS_N_INSNS (22), /* DEBR */
279 COSTS_N_INSNS (160), /* DLGR cracked */
280 COSTS_N_INSNS (160), /* DLR cracked */
281 COSTS_N_INSNS (160), /* DR expanded */
282 COSTS_N_INSNS (160), /* DSGFR cracked */
283 COSTS_N_INSNS (160), /* DSGR cracked */
284 };
285
286 static const
287 struct processor_costs zEC12_cost =
288 {
289 COSTS_N_INSNS (7), /* M */
290 COSTS_N_INSNS (5), /* MGHI */
291 COSTS_N_INSNS (5), /* MH */
292 COSTS_N_INSNS (5), /* MHI */
293 COSTS_N_INSNS (7), /* ML */
294 COSTS_N_INSNS (7), /* MR */
295 COSTS_N_INSNS (6), /* MS */
296 COSTS_N_INSNS (8), /* MSG */
297 COSTS_N_INSNS (6), /* MSGF */
298 COSTS_N_INSNS (6), /* MSGFR */
299 COSTS_N_INSNS (8), /* MSGR */
300 COSTS_N_INSNS (6), /* MSR */
301 COSTS_N_INSNS (1) , /* multiplication in DFmode */
302 COSTS_N_INSNS (40), /* MXBR B+40 */
303 COSTS_N_INSNS (100), /* SQXBR B+100 */
304 COSTS_N_INSNS (42), /* SQDBR B+42 */
305 COSTS_N_INSNS (28), /* SQEBR B+28 */
306 COSTS_N_INSNS (1), /* MADBR B */
307 COSTS_N_INSNS (1), /* MAEBR B */
308 COSTS_N_INSNS (131), /* DXBR B+131 */
309 COSTS_N_INSNS (29), /* DDBR */
310 COSTS_N_INSNS (22), /* DEBR */
311 COSTS_N_INSNS (160), /* DLGR cracked */
312 COSTS_N_INSNS (160), /* DLR cracked */
313 COSTS_N_INSNS (160), /* DR expanded */
314 COSTS_N_INSNS (160), /* DSGFR cracked */
315 COSTS_N_INSNS (160), /* DSGR cracked */
316 };
317
318 static struct
319 {
320 const char *const name;
321 const enum processor_type processor;
322 const struct processor_costs *cost;
323 }
324 const processor_table[] =
325 {
326 { "g5", PROCESSOR_9672_G5, &z900_cost },
327 { "g6", PROCESSOR_9672_G6, &z900_cost },
328 { "z900", PROCESSOR_2064_Z900, &z900_cost },
329 { "z990", PROCESSOR_2084_Z990, &z990_cost },
330 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
331 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
332 { "z10", PROCESSOR_2097_Z10, &z10_cost },
333 { "z196", PROCESSOR_2817_Z196, &z196_cost },
334 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
335 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
336 { "native", PROCESSOR_NATIVE, NULL }
337 };
338
339 extern int reload_completed;
340
341 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
342 static rtx_insn *last_scheduled_insn;
343 #define MAX_SCHED_UNITS 3
344 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
345
346 /* The maximum score added for an instruction whose unit hasn't been
347 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
348 give instruction mix scheduling more priority over instruction
349 grouping. */
350 #define MAX_SCHED_MIX_SCORE 8
351
352 /* The maximum distance up to which individual scores will be
353 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
354 Increase this with the OOO windows size of the machine. */
355 #define MAX_SCHED_MIX_DISTANCE 100
356
357 /* Structure used to hold the components of a S/390 memory
358 address. A legitimate address on S/390 is of the general
359 form
360 base + index + displacement
361 where any of the components is optional.
362
363 base and index are registers of the class ADDR_REGS,
364 displacement is an unsigned 12-bit immediate constant. */
365
366 struct s390_address
367 {
368 rtx base;
369 rtx indx;
370 rtx disp;
371 bool pointer;
372 bool literal_pool;
373 };
374
375 /* The following structure is embedded in the machine
376 specific part of struct function. */
377
378 struct GTY (()) s390_frame_layout
379 {
380 /* Offset within stack frame. */
381 HOST_WIDE_INT gprs_offset;
382 HOST_WIDE_INT f0_offset;
383 HOST_WIDE_INT f4_offset;
384 HOST_WIDE_INT f8_offset;
385 HOST_WIDE_INT backchain_offset;
386
387 /* Number of first and last gpr where slots in the register
388 save area are reserved for. */
389 int first_save_gpr_slot;
390 int last_save_gpr_slot;
391
392 /* Location (FP register number) where GPRs (r0-r15) should
393 be saved to.
394 0 - does not need to be saved at all
395 -1 - stack slot */
396 #define SAVE_SLOT_NONE 0
397 #define SAVE_SLOT_STACK -1
398 signed char gpr_save_slots[16];
399
400 /* Number of first and last gpr to be saved, restored. */
401 int first_save_gpr;
402 int first_restore_gpr;
403 int last_save_gpr;
404 int last_restore_gpr;
405
406 /* Bits standing for floating point registers. Set, if the
407 respective register has to be saved. Starting with reg 16 (f0)
408 at the rightmost bit.
409 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
410 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
411 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
412 unsigned int fpr_bitmap;
413
414 /* Number of floating point registers f8-f15 which must be saved. */
415 int high_fprs;
416
417 /* Set if return address needs to be saved.
418 This flag is set by s390_return_addr_rtx if it could not use
419 the initial value of r14 and therefore depends on r14 saved
420 to the stack. */
421 bool save_return_addr_p;
422
423 /* Size of stack frame. */
424 HOST_WIDE_INT frame_size;
425 };
426
427 /* Define the structure for the machine field in struct function. */
428
429 struct GTY(()) machine_function
430 {
431 struct s390_frame_layout frame_layout;
432
433 /* Literal pool base register. */
434 rtx base_reg;
435
436 /* True if we may need to perform branch splitting. */
437 bool split_branches_pending_p;
438
439 bool has_landing_pad_p;
440
441 /* True if the current function may contain a tbegin clobbering
442 FPRs. */
443 bool tbegin_p;
444
445 /* For -fsplit-stack support: A stack local which holds a pointer to
446 the stack arguments for a function with a variable number of
447 arguments. This is set at the start of the function and is used
448 to initialize the overflow_arg_area field of the va_list
449 structure. */
450 rtx split_stack_varargs_pointer;
451 };
452
453 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
454
455 #define cfun_frame_layout (cfun->machine->frame_layout)
456 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
457 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
458 ? cfun_frame_layout.fpr_bitmap & 0x0f \
459 : cfun_frame_layout.fpr_bitmap & 0x03))
460 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
461 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
462 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
463 (1 << (REGNO - FPR0_REGNUM)))
464 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
465 (1 << (REGNO - FPR0_REGNUM))))
466 #define cfun_gpr_save_slot(REGNO) \
467 cfun->machine->frame_layout.gpr_save_slots[REGNO]
468
469 /* Number of GPRs and FPRs used for argument passing. */
470 #define GP_ARG_NUM_REG 5
471 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
472 #define VEC_ARG_NUM_REG 8
473
474 /* A couple of shortcuts. */
475 #define CONST_OK_FOR_J(x) \
476 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
477 #define CONST_OK_FOR_K(x) \
478 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
479 #define CONST_OK_FOR_Os(x) \
480 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
481 #define CONST_OK_FOR_Op(x) \
482 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
483 #define CONST_OK_FOR_On(x) \
484 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
485
486 #define REGNO_PAIR_OK(REGNO, MODE) \
487 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
488
489 /* That's the read ahead of the dynamic branch prediction unit in
490 bytes on a z10 (or higher) CPU. */
491 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
492
493
494 /* Indicate which ABI has been used for passing vector args.
495 0 - no vector type arguments have been passed where the ABI is relevant
496 1 - the old ABI has been used
497 2 - a vector type argument has been passed either in a vector register
498 or on the stack by value */
499 static int s390_vector_abi = 0;
500
501 /* Set the vector ABI marker if TYPE is subject to the vector ABI
502 switch. The vector ABI affects only vector data types. There are
503 two aspects of the vector ABI relevant here:
504
505 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
506 ABI and natural alignment with the old.
507
508 2. vector <= 16 bytes are passed in VRs or by value on the stack
509 with the new ABI but by reference on the stack with the old.
510
511 If ARG_P is true TYPE is used for a function argument or return
512 value. The ABI marker then is set for all vector data types. If
513 ARG_P is false only type 1 vectors are being checked. */
514
515 static void
516 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
517 {
518 static hash_set<const_tree> visited_types_hash;
519
520 if (s390_vector_abi)
521 return;
522
523 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
524 return;
525
526 if (visited_types_hash.contains (type))
527 return;
528
529 visited_types_hash.add (type);
530
531 if (VECTOR_TYPE_P (type))
532 {
533 int type_size = int_size_in_bytes (type);
534
535 /* Outside arguments only the alignment is changing and this
536 only happens for vector types >= 16 bytes. */
537 if (!arg_p && type_size < 16)
538 return;
539
540 /* In arguments vector types > 16 are passed as before (GCC
541 never enforced the bigger alignment for arguments which was
542 required by the old vector ABI). However, it might still be
543 ABI relevant due to the changed alignment if it is a struct
544 member. */
545 if (arg_p && type_size > 16 && !in_struct_p)
546 return;
547
548 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
549 }
550 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
551 {
552 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
553 natural alignment there will never be ABI dependent padding
554 in an array type. That's why we do not set in_struct_p to
555 true here. */
556 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
557 }
558 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
559 {
560 tree arg_chain;
561
562 /* Check the return type. */
563 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
564
565 for (arg_chain = TYPE_ARG_TYPES (type);
566 arg_chain;
567 arg_chain = TREE_CHAIN (arg_chain))
568 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
569 }
570 else if (RECORD_OR_UNION_TYPE_P (type))
571 {
572 tree field;
573
574 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
575 {
576 if (TREE_CODE (field) != FIELD_DECL)
577 continue;
578
579 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
580 }
581 }
582 }
583
584
585 /* System z builtins. */
586
587 #include "s390-builtins.h"
588
589 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
590 {
591 #undef B_DEF
592 #undef OB_DEF
593 #undef OB_DEF_VAR
594 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
595 #define OB_DEF(...)
596 #define OB_DEF_VAR(...)
597 #include "s390-builtins.def"
598 0
599 };
600
601 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
602 {
603 #undef B_DEF
604 #undef OB_DEF
605 #undef OB_DEF_VAR
606 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
607 #define OB_DEF(...)
608 #define OB_DEF_VAR(...)
609 #include "s390-builtins.def"
610 0
611 };
612
613 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
614 {
615 #undef B_DEF
616 #undef OB_DEF
617 #undef OB_DEF_VAR
618 #define B_DEF(...)
619 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
620 #define OB_DEF_VAR(...)
621 #include "s390-builtins.def"
622 0
623 };
624
625 const unsigned int
626 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
627 {
628 #undef B_DEF
629 #undef OB_DEF
630 #undef OB_DEF_VAR
631 #define B_DEF(...)
632 #define OB_DEF(...)
633 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
634 #include "s390-builtins.def"
635 0
636 };
637
638 tree s390_builtin_types[BT_MAX];
639 tree s390_builtin_fn_types[BT_FN_MAX];
640 tree s390_builtin_decls[S390_BUILTIN_MAX +
641 S390_OVERLOADED_BUILTIN_MAX +
642 S390_OVERLOADED_BUILTIN_VAR_MAX];
643
644 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
645 #undef B_DEF
646 #undef OB_DEF
647 #undef OB_DEF_VAR
648 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
649 #define OB_DEF(...)
650 #define OB_DEF_VAR(...)
651
652 #include "s390-builtins.def"
653 CODE_FOR_nothing
654 };
655
656 static void
657 s390_init_builtins (void)
658 {
659 /* These definitions are being used in s390-builtins.def. */
660 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
661 NULL, NULL);
662 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
663 tree c_uint64_type_node;
664
665 /* The uint64_type_node from tree.c is not compatible to the C99
666 uint64_t data type. What we want is c_uint64_type_node from
667 c-common.c. But since backend code is not supposed to interface
668 with the frontend we recreate it here. */
669 if (TARGET_64BIT)
670 c_uint64_type_node = long_unsigned_type_node;
671 else
672 c_uint64_type_node = long_long_unsigned_type_node;
673
674 #undef DEF_TYPE
675 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
676 if (s390_builtin_types[INDEX] == NULL) \
677 s390_builtin_types[INDEX] = (!CONST_P) ? \
678 (NODE) : build_type_variant ((NODE), 1, 0);
679
680 #undef DEF_POINTER_TYPE
681 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
682 if (s390_builtin_types[INDEX] == NULL) \
683 s390_builtin_types[INDEX] = \
684 build_pointer_type (s390_builtin_types[INDEX_BASE]);
685
686 #undef DEF_DISTINCT_TYPE
687 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
688 if (s390_builtin_types[INDEX] == NULL) \
689 s390_builtin_types[INDEX] = \
690 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
691
692 #undef DEF_VECTOR_TYPE
693 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
694 if (s390_builtin_types[INDEX] == NULL) \
695 s390_builtin_types[INDEX] = \
696 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
697
698 #undef DEF_OPAQUE_VECTOR_TYPE
699 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
700 if (s390_builtin_types[INDEX] == NULL) \
701 s390_builtin_types[INDEX] = \
702 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
703
704 #undef DEF_FN_TYPE
705 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
706 if (s390_builtin_fn_types[INDEX] == NULL) \
707 s390_builtin_fn_types[INDEX] = \
708 build_function_type_list (args, NULL_TREE);
709 #undef DEF_OV_TYPE
710 #define DEF_OV_TYPE(...)
711 #include "s390-builtin-types.def"
712
713 #undef B_DEF
714 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
715 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
716 s390_builtin_decls[S390_BUILTIN_##NAME] = \
717 add_builtin_function ("__builtin_" #NAME, \
718 s390_builtin_fn_types[FNTYPE], \
719 S390_BUILTIN_##NAME, \
720 BUILT_IN_MD, \
721 NULL, \
722 ATTRS);
723 #undef OB_DEF
724 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
725 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
726 == NULL) \
727 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
728 add_builtin_function ("__builtin_" #NAME, \
729 s390_builtin_fn_types[FNTYPE], \
730 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
731 BUILT_IN_MD, \
732 NULL, \
733 0);
734 #undef OB_DEF_VAR
735 #define OB_DEF_VAR(...)
736 #include "s390-builtins.def"
737
738 }
739
740 /* Return true if ARG is appropriate as argument number ARGNUM of
741 builtin DECL. The operand flags from s390-builtins.def have to
742 passed as OP_FLAGS. */
743 bool
744 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
745 {
746 if (O_UIMM_P (op_flags))
747 {
748 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
749 int bitwidth = bitwidths[op_flags - O_U1];
750
751 if (!tree_fits_uhwi_p (arg)
752 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
753 {
754 error("constant argument %d for builtin %qF is out of range (0.."
755 HOST_WIDE_INT_PRINT_UNSIGNED ")",
756 argnum, decl,
757 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
758 return false;
759 }
760 }
761
762 if (O_SIMM_P (op_flags))
763 {
764 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
765 int bitwidth = bitwidths[op_flags - O_S2];
766
767 if (!tree_fits_shwi_p (arg)
768 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
769 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
770 {
771 error("constant argument %d for builtin %qF is out of range ("
772 HOST_WIDE_INT_PRINT_DEC ".."
773 HOST_WIDE_INT_PRINT_DEC ")",
774 argnum, decl,
775 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
776 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
777 return false;
778 }
779 }
780 return true;
781 }
782
783 /* Expand an expression EXP that calls a built-in function,
784 with result going to TARGET if that's convenient
785 (and in mode MODE if that's convenient).
786 SUBTARGET may be used as the target for computing one of EXP's operands.
787 IGNORE is nonzero if the value is to be ignored. */
788
789 static rtx
790 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
791 machine_mode mode ATTRIBUTE_UNUSED,
792 int ignore ATTRIBUTE_UNUSED)
793 {
794 #define MAX_ARGS 5
795
796 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
797 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
798 enum insn_code icode;
799 rtx op[MAX_ARGS], pat;
800 int arity;
801 bool nonvoid;
802 tree arg;
803 call_expr_arg_iterator iter;
804 unsigned int all_op_flags = opflags_for_builtin (fcode);
805 machine_mode last_vec_mode = VOIDmode;
806
807 if (TARGET_DEBUG_ARG)
808 {
809 fprintf (stderr,
810 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
811 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
812 bflags_for_builtin (fcode));
813 }
814
815 if (S390_USE_TARGET_ATTRIBUTE)
816 {
817 unsigned int bflags;
818
819 bflags = bflags_for_builtin (fcode);
820 if ((bflags & B_HTM) && !TARGET_HTM)
821 {
822 error ("Builtin %qF is not supported without -mhtm "
823 "(default with -march=zEC12 and higher).", fndecl);
824 return const0_rtx;
825 }
826 if ((bflags & B_VX) && !TARGET_VX)
827 {
828 error ("Builtin %qF is not supported without -mvx "
829 "(default with -march=z13 and higher).", fndecl);
830 return const0_rtx;
831 }
832 }
833 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
834 && fcode < S390_ALL_BUILTIN_MAX)
835 {
836 gcc_unreachable ();
837 }
838 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
839 {
840 icode = code_for_builtin[fcode];
841 /* Set a flag in the machine specific cfun part in order to support
842 saving/restoring of FPRs. */
843 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
844 cfun->machine->tbegin_p = true;
845 }
846 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
847 {
848 error ("Unresolved overloaded builtin");
849 return const0_rtx;
850 }
851 else
852 internal_error ("bad builtin fcode");
853
854 if (icode == 0)
855 internal_error ("bad builtin icode");
856
857 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
858
859 if (nonvoid)
860 {
861 machine_mode tmode = insn_data[icode].operand[0].mode;
862 if (!target
863 || GET_MODE (target) != tmode
864 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
865 target = gen_reg_rtx (tmode);
866
867 /* There are builtins (e.g. vec_promote) with no vector
868 arguments but an element selector. So we have to also look
869 at the vector return type when emitting the modulo
870 operation. */
871 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
872 last_vec_mode = insn_data[icode].operand[0].mode;
873 }
874
875 arity = 0;
876 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
877 {
878 const struct insn_operand_data *insn_op;
879 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
880
881 all_op_flags = all_op_flags >> O_SHIFT;
882
883 if (arg == error_mark_node)
884 return NULL_RTX;
885 if (arity >= MAX_ARGS)
886 return NULL_RTX;
887
888 if (O_IMM_P (op_flags)
889 && TREE_CODE (arg) != INTEGER_CST)
890 {
891 error ("constant value required for builtin %qF argument %d",
892 fndecl, arity + 1);
893 return const0_rtx;
894 }
895
896 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
897 return const0_rtx;
898
899 insn_op = &insn_data[icode].operand[arity + nonvoid];
900 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
901
902 /* expand_expr truncates constants to the target mode only if it
903 is "convenient". However, our checks below rely on this
904 being done. */
905 if (CONST_INT_P (op[arity])
906 && SCALAR_INT_MODE_P (insn_op->mode)
907 && GET_MODE (op[arity]) != insn_op->mode)
908 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
909 insn_op->mode));
910
911 /* Wrap the expanded RTX for pointer types into a MEM expr with
912 the proper mode. This allows us to use e.g. (match_operand
913 "memory_operand"..) in the insn patterns instead of (mem
914 (match_operand "address_operand)). This is helpful for
915 patterns not just accepting MEMs. */
916 if (POINTER_TYPE_P (TREE_TYPE (arg))
917 && insn_op->predicate != address_operand)
918 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
919
920 /* Expand the module operation required on element selectors. */
921 if (op_flags == O_ELEM)
922 {
923 gcc_assert (last_vec_mode != VOIDmode);
924 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
925 op[arity],
926 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
927 NULL_RTX, 1, OPTAB_DIRECT);
928 }
929
930 /* Record the vector mode used for an element selector. This assumes:
931 1. There is no builtin with two different vector modes and an element selector
932 2. The element selector comes after the vector type it is referring to.
933 This currently the true for all the builtins but FIXME we
934 should better check for that. */
935 if (VECTOR_MODE_P (insn_op->mode))
936 last_vec_mode = insn_op->mode;
937
938 if (insn_op->predicate (op[arity], insn_op->mode))
939 {
940 arity++;
941 continue;
942 }
943
944 if (MEM_P (op[arity])
945 && insn_op->predicate == memory_operand
946 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
947 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
948 {
949 op[arity] = replace_equiv_address (op[arity],
950 copy_to_mode_reg (Pmode,
951 XEXP (op[arity], 0)));
952 }
953 else if (GET_MODE (op[arity]) == insn_op->mode
954 || GET_MODE (op[arity]) == VOIDmode
955 || (insn_op->predicate == address_operand
956 && GET_MODE (op[arity]) == Pmode))
957 {
958 /* An address_operand usually has VOIDmode in the expander
959 so we cannot use this. */
960 machine_mode target_mode =
961 (insn_op->predicate == address_operand
962 ? Pmode : insn_op->mode);
963 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
964 }
965
966 if (!insn_op->predicate (op[arity], insn_op->mode))
967 {
968 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
969 return const0_rtx;
970 }
971 arity++;
972 }
973
974 switch (arity)
975 {
976 case 0:
977 pat = GEN_FCN (icode) (target);
978 break;
979 case 1:
980 if (nonvoid)
981 pat = GEN_FCN (icode) (target, op[0]);
982 else
983 pat = GEN_FCN (icode) (op[0]);
984 break;
985 case 2:
986 if (nonvoid)
987 pat = GEN_FCN (icode) (target, op[0], op[1]);
988 else
989 pat = GEN_FCN (icode) (op[0], op[1]);
990 break;
991 case 3:
992 if (nonvoid)
993 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
994 else
995 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
996 break;
997 case 4:
998 if (nonvoid)
999 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1000 else
1001 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1002 break;
1003 case 5:
1004 if (nonvoid)
1005 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1006 else
1007 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1008 break;
1009 case 6:
1010 if (nonvoid)
1011 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1012 else
1013 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1014 break;
1015 default:
1016 gcc_unreachable ();
1017 }
1018 if (!pat)
1019 return NULL_RTX;
1020 emit_insn (pat);
1021
1022 if (nonvoid)
1023 return target;
1024 else
1025 return const0_rtx;
1026 }
1027
1028
1029 static const int s390_hotpatch_hw_max = 1000000;
1030 static int s390_hotpatch_hw_before_label = 0;
1031 static int s390_hotpatch_hw_after_label = 0;
1032
1033 /* Check whether the hotpatch attribute is applied to a function and, if it has
1034 an argument, the argument is valid. */
1035
1036 static tree
1037 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1038 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1039 {
1040 tree expr;
1041 tree expr2;
1042 int err;
1043
1044 if (TREE_CODE (*node) != FUNCTION_DECL)
1045 {
1046 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1047 name);
1048 *no_add_attrs = true;
1049 }
1050 if (args != NULL && TREE_CHAIN (args) != NULL)
1051 {
1052 expr = TREE_VALUE (args);
1053 expr2 = TREE_VALUE (TREE_CHAIN (args));
1054 }
1055 if (args == NULL || TREE_CHAIN (args) == NULL)
1056 err = 1;
1057 else if (TREE_CODE (expr) != INTEGER_CST
1058 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1059 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1060 err = 1;
1061 else if (TREE_CODE (expr2) != INTEGER_CST
1062 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1063 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1064 err = 1;
1065 else
1066 err = 0;
1067 if (err)
1068 {
1069 error ("requested %qE attribute is not a comma separated pair of"
1070 " non-negative integer constants or too large (max. %d)", name,
1071 s390_hotpatch_hw_max);
1072 *no_add_attrs = true;
1073 }
1074
1075 return NULL_TREE;
1076 }
1077
1078 /* Expand the s390_vector_bool type attribute. */
1079
1080 static tree
1081 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1082 tree args ATTRIBUTE_UNUSED,
1083 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1084 {
1085 tree type = *node, result = NULL_TREE;
1086 machine_mode mode;
1087
1088 while (POINTER_TYPE_P (type)
1089 || TREE_CODE (type) == FUNCTION_TYPE
1090 || TREE_CODE (type) == METHOD_TYPE
1091 || TREE_CODE (type) == ARRAY_TYPE)
1092 type = TREE_TYPE (type);
1093
1094 mode = TYPE_MODE (type);
1095 switch (mode)
1096 {
1097 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1098 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1099 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1100 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1101 default: break;
1102 }
1103
1104 *no_add_attrs = true; /* No need to hang on to the attribute. */
1105
1106 if (result)
1107 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1108
1109 return NULL_TREE;
1110 }
1111
1112 static const struct attribute_spec s390_attribute_table[] = {
1113 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1114 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1115 /* End element. */
1116 { NULL, 0, 0, false, false, false, NULL, false }
1117 };
1118
1119 /* Return the alignment for LABEL. We default to the -falign-labels
1120 value except for the literal pool base label. */
1121 int
1122 s390_label_align (rtx label)
1123 {
1124 rtx_insn *prev_insn = prev_active_insn (label);
1125 rtx set, src;
1126
1127 if (prev_insn == NULL_RTX)
1128 goto old;
1129
1130 set = single_set (prev_insn);
1131
1132 if (set == NULL_RTX)
1133 goto old;
1134
1135 src = SET_SRC (set);
1136
1137 /* Don't align literal pool base labels. */
1138 if (GET_CODE (src) == UNSPEC
1139 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1140 return 0;
1141
1142 old:
1143 return align_labels_log;
1144 }
1145
1146 static machine_mode
1147 s390_libgcc_cmp_return_mode (void)
1148 {
1149 return TARGET_64BIT ? DImode : SImode;
1150 }
1151
1152 static machine_mode
1153 s390_libgcc_shift_count_mode (void)
1154 {
1155 return TARGET_64BIT ? DImode : SImode;
1156 }
1157
1158 static machine_mode
1159 s390_unwind_word_mode (void)
1160 {
1161 return TARGET_64BIT ? DImode : SImode;
1162 }
1163
1164 /* Return true if the back end supports mode MODE. */
1165 static bool
1166 s390_scalar_mode_supported_p (machine_mode mode)
1167 {
1168 /* In contrast to the default implementation reject TImode constants on 31bit
1169 TARGET_ZARCH for ABI compliance. */
1170 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1171 return false;
1172
1173 if (DECIMAL_FLOAT_MODE_P (mode))
1174 return default_decimal_float_supported_p ();
1175
1176 return default_scalar_mode_supported_p (mode);
1177 }
1178
1179 /* Return true if the back end supports vector mode MODE. */
1180 static bool
1181 s390_vector_mode_supported_p (machine_mode mode)
1182 {
1183 machine_mode inner;
1184
1185 if (!VECTOR_MODE_P (mode)
1186 || !TARGET_VX
1187 || GET_MODE_SIZE (mode) > 16)
1188 return false;
1189
1190 inner = GET_MODE_INNER (mode);
1191
1192 switch (inner)
1193 {
1194 case QImode:
1195 case HImode:
1196 case SImode:
1197 case DImode:
1198 case TImode:
1199 case SFmode:
1200 case DFmode:
1201 case TFmode:
1202 return true;
1203 default:
1204 return false;
1205 }
1206 }
1207
1208 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1209
1210 void
1211 s390_set_has_landing_pad_p (bool value)
1212 {
1213 cfun->machine->has_landing_pad_p = value;
1214 }
1215
1216 /* If two condition code modes are compatible, return a condition code
1217 mode which is compatible with both. Otherwise, return
1218 VOIDmode. */
1219
1220 static machine_mode
1221 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1222 {
1223 if (m1 == m2)
1224 return m1;
1225
1226 switch (m1)
1227 {
1228 case CCZmode:
1229 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1230 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1231 return m2;
1232 return VOIDmode;
1233
1234 case CCSmode:
1235 case CCUmode:
1236 case CCTmode:
1237 case CCSRmode:
1238 case CCURmode:
1239 case CCZ1mode:
1240 if (m2 == CCZmode)
1241 return m1;
1242
1243 return VOIDmode;
1244
1245 default:
1246 return VOIDmode;
1247 }
1248 return VOIDmode;
1249 }
1250
1251 /* Return true if SET either doesn't set the CC register, or else
1252 the source and destination have matching CC modes and that
1253 CC mode is at least as constrained as REQ_MODE. */
1254
1255 static bool
1256 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1257 {
1258 machine_mode set_mode;
1259
1260 gcc_assert (GET_CODE (set) == SET);
1261
1262 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1263 return 1;
1264
1265 set_mode = GET_MODE (SET_DEST (set));
1266 switch (set_mode)
1267 {
1268 case CCSmode:
1269 case CCSRmode:
1270 case CCUmode:
1271 case CCURmode:
1272 case CCLmode:
1273 case CCL1mode:
1274 case CCL2mode:
1275 case CCL3mode:
1276 case CCT1mode:
1277 case CCT2mode:
1278 case CCT3mode:
1279 case CCVEQmode:
1280 case CCVHmode:
1281 case CCVHUmode:
1282 case CCVFHmode:
1283 case CCVFHEmode:
1284 if (req_mode != set_mode)
1285 return 0;
1286 break;
1287
1288 case CCZmode:
1289 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1290 && req_mode != CCSRmode && req_mode != CCURmode)
1291 return 0;
1292 break;
1293
1294 case CCAPmode:
1295 case CCANmode:
1296 if (req_mode != CCAmode)
1297 return 0;
1298 break;
1299
1300 default:
1301 gcc_unreachable ();
1302 }
1303
1304 return (GET_MODE (SET_SRC (set)) == set_mode);
1305 }
1306
1307 /* Return true if every SET in INSN that sets the CC register
1308 has source and destination with matching CC modes and that
1309 CC mode is at least as constrained as REQ_MODE.
1310 If REQ_MODE is VOIDmode, always return false. */
1311
1312 bool
1313 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1314 {
1315 int i;
1316
1317 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1318 if (req_mode == VOIDmode)
1319 return false;
1320
1321 if (GET_CODE (PATTERN (insn)) == SET)
1322 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1323
1324 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1325 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1326 {
1327 rtx set = XVECEXP (PATTERN (insn), 0, i);
1328 if (GET_CODE (set) == SET)
1329 if (!s390_match_ccmode_set (set, req_mode))
1330 return false;
1331 }
1332
1333 return true;
1334 }
1335
1336 /* If a test-under-mask instruction can be used to implement
1337 (compare (and ... OP1) OP2), return the CC mode required
1338 to do that. Otherwise, return VOIDmode.
1339 MIXED is true if the instruction can distinguish between
1340 CC1 and CC2 for mixed selected bits (TMxx), it is false
1341 if the instruction cannot (TM). */
1342
1343 machine_mode
1344 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1345 {
1346 int bit0, bit1;
1347
1348 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1349 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1350 return VOIDmode;
1351
1352 /* Selected bits all zero: CC0.
1353 e.g.: int a; if ((a & (16 + 128)) == 0) */
1354 if (INTVAL (op2) == 0)
1355 return CCTmode;
1356
1357 /* Selected bits all one: CC3.
1358 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1359 if (INTVAL (op2) == INTVAL (op1))
1360 return CCT3mode;
1361
1362 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1363 int a;
1364 if ((a & (16 + 128)) == 16) -> CCT1
1365 if ((a & (16 + 128)) == 128) -> CCT2 */
1366 if (mixed)
1367 {
1368 bit1 = exact_log2 (INTVAL (op2));
1369 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1370 if (bit0 != -1 && bit1 != -1)
1371 return bit0 > bit1 ? CCT1mode : CCT2mode;
1372 }
1373
1374 return VOIDmode;
1375 }
1376
1377 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1378 OP0 and OP1 of a COMPARE, return the mode to be used for the
1379 comparison. */
1380
1381 machine_mode
1382 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1383 {
1384 if (TARGET_VX
1385 && register_operand (op0, DFmode)
1386 && register_operand (op1, DFmode))
1387 {
1388 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1389 s390_emit_compare or s390_canonicalize_comparison will take
1390 care of it. */
1391 switch (code)
1392 {
1393 case EQ:
1394 case NE:
1395 return CCVEQmode;
1396 case GT:
1397 case UNLE:
1398 return CCVFHmode;
1399 case GE:
1400 case UNLT:
1401 return CCVFHEmode;
1402 default:
1403 ;
1404 }
1405 }
1406
1407 switch (code)
1408 {
1409 case EQ:
1410 case NE:
1411 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1412 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1413 return CCAPmode;
1414 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1415 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1416 return CCAPmode;
1417 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1418 || GET_CODE (op1) == NEG)
1419 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1420 return CCLmode;
1421
1422 if (GET_CODE (op0) == AND)
1423 {
1424 /* Check whether we can potentially do it via TM. */
1425 machine_mode ccmode;
1426 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1427 if (ccmode != VOIDmode)
1428 {
1429 /* Relax CCTmode to CCZmode to allow fall-back to AND
1430 if that turns out to be beneficial. */
1431 return ccmode == CCTmode ? CCZmode : ccmode;
1432 }
1433 }
1434
1435 if (register_operand (op0, HImode)
1436 && GET_CODE (op1) == CONST_INT
1437 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1438 return CCT3mode;
1439 if (register_operand (op0, QImode)
1440 && GET_CODE (op1) == CONST_INT
1441 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1442 return CCT3mode;
1443
1444 return CCZmode;
1445
1446 case LE:
1447 case LT:
1448 case GE:
1449 case GT:
1450 /* The only overflow condition of NEG and ABS happens when
1451 -INT_MAX is used as parameter, which stays negative. So
1452 we have an overflow from a positive value to a negative.
1453 Using CCAP mode the resulting cc can be used for comparisons. */
1454 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1455 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1456 return CCAPmode;
1457
1458 /* If constants are involved in an add instruction it is possible to use
1459 the resulting cc for comparisons with zero. Knowing the sign of the
1460 constant the overflow behavior gets predictable. e.g.:
1461 int a, b; if ((b = a + c) > 0)
1462 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1463 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1464 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1465 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1466 /* Avoid INT32_MIN on 32 bit. */
1467 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1468 {
1469 if (INTVAL (XEXP((op0), 1)) < 0)
1470 return CCANmode;
1471 else
1472 return CCAPmode;
1473 }
1474 /* Fall through. */
1475 case UNORDERED:
1476 case ORDERED:
1477 case UNEQ:
1478 case UNLE:
1479 case UNLT:
1480 case UNGE:
1481 case UNGT:
1482 case LTGT:
1483 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1484 && GET_CODE (op1) != CONST_INT)
1485 return CCSRmode;
1486 return CCSmode;
1487
1488 case LTU:
1489 case GEU:
1490 if (GET_CODE (op0) == PLUS
1491 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1492 return CCL1mode;
1493
1494 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1495 && GET_CODE (op1) != CONST_INT)
1496 return CCURmode;
1497 return CCUmode;
1498
1499 case LEU:
1500 case GTU:
1501 if (GET_CODE (op0) == MINUS
1502 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1503 return CCL2mode;
1504
1505 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1506 && GET_CODE (op1) != CONST_INT)
1507 return CCURmode;
1508 return CCUmode;
1509
1510 default:
1511 gcc_unreachable ();
1512 }
1513 }
1514
1515 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1516 that we can implement more efficiently. */
1517
1518 static void
1519 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1520 bool op0_preserve_value)
1521 {
1522 if (op0_preserve_value)
1523 return;
1524
1525 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1526 if ((*code == EQ || *code == NE)
1527 && *op1 == const0_rtx
1528 && GET_CODE (*op0) == ZERO_EXTRACT
1529 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1530 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1531 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1532 {
1533 rtx inner = XEXP (*op0, 0);
1534 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1535 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1536 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1537
1538 if (len > 0 && len < modesize
1539 && pos >= 0 && pos + len <= modesize
1540 && modesize <= HOST_BITS_PER_WIDE_INT)
1541 {
1542 unsigned HOST_WIDE_INT block;
1543 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1544 block <<= modesize - pos - len;
1545
1546 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1547 gen_int_mode (block, GET_MODE (inner)));
1548 }
1549 }
1550
1551 /* Narrow AND of memory against immediate to enable TM. */
1552 if ((*code == EQ || *code == NE)
1553 && *op1 == const0_rtx
1554 && GET_CODE (*op0) == AND
1555 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1556 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1557 {
1558 rtx inner = XEXP (*op0, 0);
1559 rtx mask = XEXP (*op0, 1);
1560
1561 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1562 if (GET_CODE (inner) == SUBREG
1563 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1564 && (GET_MODE_SIZE (GET_MODE (inner))
1565 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1566 && ((INTVAL (mask)
1567 & GET_MODE_MASK (GET_MODE (inner))
1568 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1569 == 0))
1570 inner = SUBREG_REG (inner);
1571
1572 /* Do not change volatile MEMs. */
1573 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1574 {
1575 int part = s390_single_part (XEXP (*op0, 1),
1576 GET_MODE (inner), QImode, 0);
1577 if (part >= 0)
1578 {
1579 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1580 inner = adjust_address_nv (inner, QImode, part);
1581 *op0 = gen_rtx_AND (QImode, inner, mask);
1582 }
1583 }
1584 }
1585
1586 /* Narrow comparisons against 0xffff to HImode if possible. */
1587 if ((*code == EQ || *code == NE)
1588 && GET_CODE (*op1) == CONST_INT
1589 && INTVAL (*op1) == 0xffff
1590 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1591 && (nonzero_bits (*op0, GET_MODE (*op0))
1592 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1593 {
1594 *op0 = gen_lowpart (HImode, *op0);
1595 *op1 = constm1_rtx;
1596 }
1597
1598 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1599 if (GET_CODE (*op0) == UNSPEC
1600 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1601 && XVECLEN (*op0, 0) == 1
1602 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1603 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1604 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1605 && *op1 == const0_rtx)
1606 {
1607 enum rtx_code new_code = UNKNOWN;
1608 switch (*code)
1609 {
1610 case EQ: new_code = EQ; break;
1611 case NE: new_code = NE; break;
1612 case LT: new_code = GTU; break;
1613 case GT: new_code = LTU; break;
1614 case LE: new_code = GEU; break;
1615 case GE: new_code = LEU; break;
1616 default: break;
1617 }
1618
1619 if (new_code != UNKNOWN)
1620 {
1621 *op0 = XVECEXP (*op0, 0, 0);
1622 *code = new_code;
1623 }
1624 }
1625
1626 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1627 if (GET_CODE (*op0) == UNSPEC
1628 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1629 && XVECLEN (*op0, 0) == 1
1630 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1631 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1632 && CONST_INT_P (*op1))
1633 {
1634 enum rtx_code new_code = UNKNOWN;
1635 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1636 {
1637 case CCZmode:
1638 case CCRAWmode:
1639 switch (*code)
1640 {
1641 case EQ: new_code = EQ; break;
1642 case NE: new_code = NE; break;
1643 default: break;
1644 }
1645 break;
1646 default: break;
1647 }
1648
1649 if (new_code != UNKNOWN)
1650 {
1651 /* For CCRAWmode put the required cc mask into the second
1652 operand. */
1653 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1654 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1655 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1656 *op0 = XVECEXP (*op0, 0, 0);
1657 *code = new_code;
1658 }
1659 }
1660
1661 /* Simplify cascaded EQ, NE with const0_rtx. */
1662 if ((*code == NE || *code == EQ)
1663 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1664 && GET_MODE (*op0) == SImode
1665 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1666 && REG_P (XEXP (*op0, 0))
1667 && XEXP (*op0, 1) == const0_rtx
1668 && *op1 == const0_rtx)
1669 {
1670 if ((*code == EQ && GET_CODE (*op0) == NE)
1671 || (*code == NE && GET_CODE (*op0) == EQ))
1672 *code = EQ;
1673 else
1674 *code = NE;
1675 *op0 = XEXP (*op0, 0);
1676 }
1677
1678 /* Prefer register over memory as first operand. */
1679 if (MEM_P (*op0) && REG_P (*op1))
1680 {
1681 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1682 *code = (int)swap_condition ((enum rtx_code)*code);
1683 }
1684
1685 /* Using the scalar variants of vector instructions for 64 bit FP
1686 comparisons might require swapping the operands. */
1687 if (TARGET_VX
1688 && register_operand (*op0, DFmode)
1689 && register_operand (*op1, DFmode)
1690 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1691 {
1692 rtx tmp;
1693
1694 switch (*code)
1695 {
1696 case LT: *code = GT; break;
1697 case LE: *code = GE; break;
1698 case UNGT: *code = UNLE; break;
1699 case UNGE: *code = UNLT; break;
1700 default: ;
1701 }
1702 tmp = *op0; *op0 = *op1; *op1 = tmp;
1703 }
1704 }
1705
1706 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1707 FP compare using the single element variant of vector instructions.
1708 Replace CODE with the comparison code to be used in the CC reg
1709 compare and return the condition code register RTX in CC. */
1710
1711 static bool
1712 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1713 rtx *cc)
1714 {
1715 machine_mode cmp_mode;
1716 bool swap_p = false;
1717
1718 switch (*code)
1719 {
1720 case EQ: cmp_mode = CCVEQmode; break;
1721 case NE: cmp_mode = CCVEQmode; break;
1722 case GT: cmp_mode = CCVFHmode; break;
1723 case GE: cmp_mode = CCVFHEmode; break;
1724 case UNLE: cmp_mode = CCVFHmode; break;
1725 case UNLT: cmp_mode = CCVFHEmode; break;
1726 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1727 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1728 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1729 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1730 default: return false;
1731 }
1732
1733 if (swap_p)
1734 {
1735 rtx tmp = cmp2;
1736 cmp2 = cmp1;
1737 cmp1 = tmp;
1738 }
1739 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1740 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1741 gen_rtvec (2,
1742 gen_rtx_SET (*cc,
1743 gen_rtx_COMPARE (cmp_mode, cmp1,
1744 cmp2)),
1745 gen_rtx_CLOBBER (VOIDmode,
1746 gen_rtx_SCRATCH (V2DImode)))));
1747 return true;
1748 }
1749
1750
1751 /* Emit a compare instruction suitable to implement the comparison
1752 OP0 CODE OP1. Return the correct condition RTL to be placed in
1753 the IF_THEN_ELSE of the conditional branch testing the result. */
1754
1755 rtx
1756 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1757 {
1758 machine_mode mode = s390_select_ccmode (code, op0, op1);
1759 rtx cc;
1760
1761 if (TARGET_VX
1762 && register_operand (op0, DFmode)
1763 && register_operand (op1, DFmode)
1764 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1765 {
1766 /* Work has been done by s390_expand_vec_compare_scalar already. */
1767 }
1768 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1769 {
1770 /* Do not output a redundant compare instruction if a
1771 compare_and_swap pattern already computed the result and the
1772 machine modes are compatible. */
1773 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1774 == GET_MODE (op0));
1775 cc = op0;
1776 }
1777 else
1778 {
1779 cc = gen_rtx_REG (mode, CC_REGNUM);
1780 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1781 }
1782
1783 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1784 }
1785
1786 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1787 matches CMP.
1788 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1789 conditional branch testing the result. */
1790
1791 static rtx
1792 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1793 rtx cmp, rtx new_rtx)
1794 {
1795 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1796 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1797 const0_rtx);
1798 }
1799
1800 /* Emit a jump instruction to TARGET and return it. If COND is
1801 NULL_RTX, emit an unconditional jump, else a conditional jump under
1802 condition COND. */
1803
1804 rtx_insn *
1805 s390_emit_jump (rtx target, rtx cond)
1806 {
1807 rtx insn;
1808
1809 target = gen_rtx_LABEL_REF (VOIDmode, target);
1810 if (cond)
1811 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1812
1813 insn = gen_rtx_SET (pc_rtx, target);
1814 return emit_jump_insn (insn);
1815 }
1816
1817 /* Return branch condition mask to implement a branch
1818 specified by CODE. Return -1 for invalid comparisons. */
1819
1820 int
1821 s390_branch_condition_mask (rtx code)
1822 {
1823 const int CC0 = 1 << 3;
1824 const int CC1 = 1 << 2;
1825 const int CC2 = 1 << 1;
1826 const int CC3 = 1 << 0;
1827
1828 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1829 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1830 gcc_assert (XEXP (code, 1) == const0_rtx
1831 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1832 && CONST_INT_P (XEXP (code, 1))));
1833
1834
1835 switch (GET_MODE (XEXP (code, 0)))
1836 {
1837 case CCZmode:
1838 case CCZ1mode:
1839 switch (GET_CODE (code))
1840 {
1841 case EQ: return CC0;
1842 case NE: return CC1 | CC2 | CC3;
1843 default: return -1;
1844 }
1845 break;
1846
1847 case CCT1mode:
1848 switch (GET_CODE (code))
1849 {
1850 case EQ: return CC1;
1851 case NE: return CC0 | CC2 | CC3;
1852 default: return -1;
1853 }
1854 break;
1855
1856 case CCT2mode:
1857 switch (GET_CODE (code))
1858 {
1859 case EQ: return CC2;
1860 case NE: return CC0 | CC1 | CC3;
1861 default: return -1;
1862 }
1863 break;
1864
1865 case CCT3mode:
1866 switch (GET_CODE (code))
1867 {
1868 case EQ: return CC3;
1869 case NE: return CC0 | CC1 | CC2;
1870 default: return -1;
1871 }
1872 break;
1873
1874 case CCLmode:
1875 switch (GET_CODE (code))
1876 {
1877 case EQ: return CC0 | CC2;
1878 case NE: return CC1 | CC3;
1879 default: return -1;
1880 }
1881 break;
1882
1883 case CCL1mode:
1884 switch (GET_CODE (code))
1885 {
1886 case LTU: return CC2 | CC3; /* carry */
1887 case GEU: return CC0 | CC1; /* no carry */
1888 default: return -1;
1889 }
1890 break;
1891
1892 case CCL2mode:
1893 switch (GET_CODE (code))
1894 {
1895 case GTU: return CC0 | CC1; /* borrow */
1896 case LEU: return CC2 | CC3; /* no borrow */
1897 default: return -1;
1898 }
1899 break;
1900
1901 case CCL3mode:
1902 switch (GET_CODE (code))
1903 {
1904 case EQ: return CC0 | CC2;
1905 case NE: return CC1 | CC3;
1906 case LTU: return CC1;
1907 case GTU: return CC3;
1908 case LEU: return CC1 | CC2;
1909 case GEU: return CC2 | CC3;
1910 default: return -1;
1911 }
1912
1913 case CCUmode:
1914 switch (GET_CODE (code))
1915 {
1916 case EQ: return CC0;
1917 case NE: return CC1 | CC2 | CC3;
1918 case LTU: return CC1;
1919 case GTU: return CC2;
1920 case LEU: return CC0 | CC1;
1921 case GEU: return CC0 | CC2;
1922 default: return -1;
1923 }
1924 break;
1925
1926 case CCURmode:
1927 switch (GET_CODE (code))
1928 {
1929 case EQ: return CC0;
1930 case NE: return CC2 | CC1 | CC3;
1931 case LTU: return CC2;
1932 case GTU: return CC1;
1933 case LEU: return CC0 | CC2;
1934 case GEU: return CC0 | CC1;
1935 default: return -1;
1936 }
1937 break;
1938
1939 case CCAPmode:
1940 switch (GET_CODE (code))
1941 {
1942 case EQ: return CC0;
1943 case NE: return CC1 | CC2 | CC3;
1944 case LT: return CC1 | CC3;
1945 case GT: return CC2;
1946 case LE: return CC0 | CC1 | CC3;
1947 case GE: return CC0 | CC2;
1948 default: return -1;
1949 }
1950 break;
1951
1952 case CCANmode:
1953 switch (GET_CODE (code))
1954 {
1955 case EQ: return CC0;
1956 case NE: return CC1 | CC2 | CC3;
1957 case LT: return CC1;
1958 case GT: return CC2 | CC3;
1959 case LE: return CC0 | CC1;
1960 case GE: return CC0 | CC2 | CC3;
1961 default: return -1;
1962 }
1963 break;
1964
1965 case CCSmode:
1966 switch (GET_CODE (code))
1967 {
1968 case EQ: return CC0;
1969 case NE: return CC1 | CC2 | CC3;
1970 case LT: return CC1;
1971 case GT: return CC2;
1972 case LE: return CC0 | CC1;
1973 case GE: return CC0 | CC2;
1974 case UNORDERED: return CC3;
1975 case ORDERED: return CC0 | CC1 | CC2;
1976 case UNEQ: return CC0 | CC3;
1977 case UNLT: return CC1 | CC3;
1978 case UNGT: return CC2 | CC3;
1979 case UNLE: return CC0 | CC1 | CC3;
1980 case UNGE: return CC0 | CC2 | CC3;
1981 case LTGT: return CC1 | CC2;
1982 default: return -1;
1983 }
1984 break;
1985
1986 case CCSRmode:
1987 switch (GET_CODE (code))
1988 {
1989 case EQ: return CC0;
1990 case NE: return CC2 | CC1 | CC3;
1991 case LT: return CC2;
1992 case GT: return CC1;
1993 case LE: return CC0 | CC2;
1994 case GE: return CC0 | CC1;
1995 case UNORDERED: return CC3;
1996 case ORDERED: return CC0 | CC2 | CC1;
1997 case UNEQ: return CC0 | CC3;
1998 case UNLT: return CC2 | CC3;
1999 case UNGT: return CC1 | CC3;
2000 case UNLE: return CC0 | CC2 | CC3;
2001 case UNGE: return CC0 | CC1 | CC3;
2002 case LTGT: return CC2 | CC1;
2003 default: return -1;
2004 }
2005 break;
2006
2007 /* Vector comparison modes. */
2008
2009 case CCVEQmode:
2010 switch (GET_CODE (code))
2011 {
2012 case EQ: return CC0;
2013 case NE: return CC3;
2014 default: return -1;
2015 }
2016
2017 case CCVEQANYmode:
2018 switch (GET_CODE (code))
2019 {
2020 case EQ: return CC0 | CC1;
2021 case NE: return CC3 | CC1;
2022 default: return -1;
2023 }
2024
2025 /* Integer vector compare modes. */
2026
2027 case CCVHmode:
2028 switch (GET_CODE (code))
2029 {
2030 case GT: return CC0;
2031 case LE: return CC3;
2032 default: return -1;
2033 }
2034
2035 case CCVHANYmode:
2036 switch (GET_CODE (code))
2037 {
2038 case GT: return CC0 | CC1;
2039 case LE: return CC3 | CC1;
2040 default: return -1;
2041 }
2042
2043 case CCVHUmode:
2044 switch (GET_CODE (code))
2045 {
2046 case GTU: return CC0;
2047 case LEU: return CC3;
2048 default: return -1;
2049 }
2050
2051 case CCVHUANYmode:
2052 switch (GET_CODE (code))
2053 {
2054 case GTU: return CC0 | CC1;
2055 case LEU: return CC3 | CC1;
2056 default: return -1;
2057 }
2058
2059 /* FP vector compare modes. */
2060
2061 case CCVFHmode:
2062 switch (GET_CODE (code))
2063 {
2064 case GT: return CC0;
2065 case UNLE: return CC3;
2066 default: return -1;
2067 }
2068
2069 case CCVFHANYmode:
2070 switch (GET_CODE (code))
2071 {
2072 case GT: return CC0 | CC1;
2073 case UNLE: return CC3 | CC1;
2074 default: return -1;
2075 }
2076
2077 case CCVFHEmode:
2078 switch (GET_CODE (code))
2079 {
2080 case GE: return CC0;
2081 case UNLT: return CC3;
2082 default: return -1;
2083 }
2084
2085 case CCVFHEANYmode:
2086 switch (GET_CODE (code))
2087 {
2088 case GE: return CC0 | CC1;
2089 case UNLT: return CC3 | CC1;
2090 default: return -1;
2091 }
2092
2093
2094 case CCRAWmode:
2095 switch (GET_CODE (code))
2096 {
2097 case EQ:
2098 return INTVAL (XEXP (code, 1));
2099 case NE:
2100 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2101 default:
2102 gcc_unreachable ();
2103 }
2104
2105 default:
2106 return -1;
2107 }
2108 }
2109
2110
2111 /* Return branch condition mask to implement a compare and branch
2112 specified by CODE. Return -1 for invalid comparisons. */
2113
2114 int
2115 s390_compare_and_branch_condition_mask (rtx code)
2116 {
2117 const int CC0 = 1 << 3;
2118 const int CC1 = 1 << 2;
2119 const int CC2 = 1 << 1;
2120
2121 switch (GET_CODE (code))
2122 {
2123 case EQ:
2124 return CC0;
2125 case NE:
2126 return CC1 | CC2;
2127 case LT:
2128 case LTU:
2129 return CC1;
2130 case GT:
2131 case GTU:
2132 return CC2;
2133 case LE:
2134 case LEU:
2135 return CC0 | CC1;
2136 case GE:
2137 case GEU:
2138 return CC0 | CC2;
2139 default:
2140 gcc_unreachable ();
2141 }
2142 return -1;
2143 }
2144
2145 /* If INV is false, return assembler mnemonic string to implement
2146 a branch specified by CODE. If INV is true, return mnemonic
2147 for the corresponding inverted branch. */
2148
2149 static const char *
2150 s390_branch_condition_mnemonic (rtx code, int inv)
2151 {
2152 int mask;
2153
2154 static const char *const mnemonic[16] =
2155 {
2156 NULL, "o", "h", "nle",
2157 "l", "nhe", "lh", "ne",
2158 "e", "nlh", "he", "nl",
2159 "le", "nh", "no", NULL
2160 };
2161
2162 if (GET_CODE (XEXP (code, 0)) == REG
2163 && REGNO (XEXP (code, 0)) == CC_REGNUM
2164 && (XEXP (code, 1) == const0_rtx
2165 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2166 && CONST_INT_P (XEXP (code, 1)))))
2167 mask = s390_branch_condition_mask (code);
2168 else
2169 mask = s390_compare_and_branch_condition_mask (code);
2170
2171 gcc_assert (mask >= 0);
2172
2173 if (inv)
2174 mask ^= 15;
2175
2176 gcc_assert (mask >= 1 && mask <= 14);
2177
2178 return mnemonic[mask];
2179 }
2180
2181 /* Return the part of op which has a value different from def.
2182 The size of the part is determined by mode.
2183 Use this function only if you already know that op really
2184 contains such a part. */
2185
2186 unsigned HOST_WIDE_INT
2187 s390_extract_part (rtx op, machine_mode mode, int def)
2188 {
2189 unsigned HOST_WIDE_INT value = 0;
2190 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2191 int part_bits = GET_MODE_BITSIZE (mode);
2192 unsigned HOST_WIDE_INT part_mask
2193 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2194 int i;
2195
2196 for (i = 0; i < max_parts; i++)
2197 {
2198 if (i == 0)
2199 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2200 else
2201 value >>= part_bits;
2202
2203 if ((value & part_mask) != (def & part_mask))
2204 return value & part_mask;
2205 }
2206
2207 gcc_unreachable ();
2208 }
2209
2210 /* If OP is an integer constant of mode MODE with exactly one
2211 part of mode PART_MODE unequal to DEF, return the number of that
2212 part. Otherwise, return -1. */
2213
2214 int
2215 s390_single_part (rtx op,
2216 machine_mode mode,
2217 machine_mode part_mode,
2218 int def)
2219 {
2220 unsigned HOST_WIDE_INT value = 0;
2221 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2222 unsigned HOST_WIDE_INT part_mask
2223 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2224 int i, part = -1;
2225
2226 if (GET_CODE (op) != CONST_INT)
2227 return -1;
2228
2229 for (i = 0; i < n_parts; i++)
2230 {
2231 if (i == 0)
2232 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2233 else
2234 value >>= GET_MODE_BITSIZE (part_mode);
2235
2236 if ((value & part_mask) != (def & part_mask))
2237 {
2238 if (part != -1)
2239 return -1;
2240 else
2241 part = i;
2242 }
2243 }
2244 return part == -1 ? -1 : n_parts - 1 - part;
2245 }
2246
2247 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2248 bits and no other bits are set in IN. POS and LENGTH can be used
2249 to obtain the start position and the length of the bitfield.
2250
2251 POS gives the position of the first bit of the bitfield counting
2252 from the lowest order bit starting with zero. In order to use this
2253 value for S/390 instructions this has to be converted to "bits big
2254 endian" style. */
2255
2256 bool
2257 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2258 int *pos, int *length)
2259 {
2260 int tmp_pos = 0;
2261 int tmp_length = 0;
2262 int i;
2263 unsigned HOST_WIDE_INT mask = 1ULL;
2264 bool contiguous = false;
2265
2266 for (i = 0; i < size; mask <<= 1, i++)
2267 {
2268 if (contiguous)
2269 {
2270 if (mask & in)
2271 tmp_length++;
2272 else
2273 break;
2274 }
2275 else
2276 {
2277 if (mask & in)
2278 {
2279 contiguous = true;
2280 tmp_length++;
2281 }
2282 else
2283 tmp_pos++;
2284 }
2285 }
2286
2287 if (!tmp_length)
2288 return false;
2289
2290 /* Calculate a mask for all bits beyond the contiguous bits. */
2291 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2292
2293 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2294 mask &= (HOST_WIDE_INT_1U << size) - 1;
2295
2296 if (mask & in)
2297 return false;
2298
2299 if (tmp_length + tmp_pos - 1 > size)
2300 return false;
2301
2302 if (length)
2303 *length = tmp_length;
2304
2305 if (pos)
2306 *pos = tmp_pos;
2307
2308 return true;
2309 }
2310
2311 /* Return true if OP contains the same contiguous bitfield in *all*
2312 its elements. START and END can be used to obtain the start and
2313 end position of the bitfield.
2314
2315 START/STOP give the position of the first/last bit of the bitfield
2316 counting from the lowest order bit starting with zero. In order to
2317 use these values for S/390 instructions this has to be converted to
2318 "bits big endian" style. */
2319
2320 bool
2321 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2322 {
2323 unsigned HOST_WIDE_INT mask;
2324 int length, size;
2325 rtx elt;
2326
2327 if (!const_vec_duplicate_p (op, &elt)
2328 || !CONST_INT_P (elt))
2329 return false;
2330
2331 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2332
2333 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2334 if (size > 64)
2335 return false;
2336
2337 mask = UINTVAL (elt);
2338 if (s390_contiguous_bitmask_p (mask, size, start,
2339 end != NULL ? &length : NULL))
2340 {
2341 if (end != NULL)
2342 *end = *start + length - 1;
2343 return true;
2344 }
2345 /* 0xff00000f style immediates can be covered by swapping start and
2346 end indices in vgm. */
2347 if (s390_contiguous_bitmask_p (~mask, size, start,
2348 end != NULL ? &length : NULL))
2349 {
2350 if (end != NULL)
2351 *end = *start - 1;
2352 if (start != NULL)
2353 *start = *start + length;
2354 return true;
2355 }
2356 return false;
2357 }
2358
2359 /* Return true if C consists only of byte chunks being either 0 or
2360 0xff. If MASK is !=NULL a byte mask is generated which is
2361 appropriate for the vector generate byte mask instruction. */
2362
2363 bool
2364 s390_bytemask_vector_p (rtx op, unsigned *mask)
2365 {
2366 int i;
2367 unsigned tmp_mask = 0;
2368 int nunit, unit_size;
2369
2370 if (!VECTOR_MODE_P (GET_MODE (op))
2371 || GET_CODE (op) != CONST_VECTOR
2372 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2373 return false;
2374
2375 nunit = GET_MODE_NUNITS (GET_MODE (op));
2376 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2377
2378 for (i = 0; i < nunit; i++)
2379 {
2380 unsigned HOST_WIDE_INT c;
2381 int j;
2382
2383 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2384 return false;
2385
2386 c = UINTVAL (XVECEXP (op, 0, i));
2387 for (j = 0; j < unit_size; j++)
2388 {
2389 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2390 return false;
2391 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2392 c = c >> BITS_PER_UNIT;
2393 }
2394 }
2395
2396 if (mask != NULL)
2397 *mask = tmp_mask;
2398
2399 return true;
2400 }
2401
2402 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2403 equivalent to a shift followed by the AND. In particular, CONTIG
2404 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2405 for ROTL indicate a rotate to the right. */
2406
2407 bool
2408 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2409 {
2410 int pos, len;
2411 bool ok;
2412
2413 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2414 gcc_assert (ok);
2415
2416 return ((rotl >= 0 && rotl <= pos)
2417 || (rotl < 0 && -rotl <= bitsize - len - pos));
2418 }
2419
2420 /* Check whether we can (and want to) split a double-word
2421 move in mode MODE from SRC to DST into two single-word
2422 moves, moving the subword FIRST_SUBWORD first. */
2423
2424 bool
2425 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2426 {
2427 /* Floating point and vector registers cannot be split. */
2428 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2429 return false;
2430
2431 /* We don't need to split if operands are directly accessible. */
2432 if (s_operand (src, mode) || s_operand (dst, mode))
2433 return false;
2434
2435 /* Non-offsettable memory references cannot be split. */
2436 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2437 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2438 return false;
2439
2440 /* Moving the first subword must not clobber a register
2441 needed to move the second subword. */
2442 if (register_operand (dst, mode))
2443 {
2444 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2445 if (reg_overlap_mentioned_p (subreg, src))
2446 return false;
2447 }
2448
2449 return true;
2450 }
2451
2452 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2453 and [MEM2, MEM2 + SIZE] do overlap and false
2454 otherwise. */
2455
2456 bool
2457 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2458 {
2459 rtx addr1, addr2, addr_delta;
2460 HOST_WIDE_INT delta;
2461
2462 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2463 return true;
2464
2465 if (size == 0)
2466 return false;
2467
2468 addr1 = XEXP (mem1, 0);
2469 addr2 = XEXP (mem2, 0);
2470
2471 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2472
2473 /* This overlapping check is used by peepholes merging memory block operations.
2474 Overlapping operations would otherwise be recognized by the S/390 hardware
2475 and would fall back to a slower implementation. Allowing overlapping
2476 operations would lead to slow code but not to wrong code. Therefore we are
2477 somewhat optimistic if we cannot prove that the memory blocks are
2478 overlapping.
2479 That's why we return false here although this may accept operations on
2480 overlapping memory areas. */
2481 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2482 return false;
2483
2484 delta = INTVAL (addr_delta);
2485
2486 if (delta == 0
2487 || (delta > 0 && delta < size)
2488 || (delta < 0 && -delta < size))
2489 return true;
2490
2491 return false;
2492 }
2493
2494 /* Check whether the address of memory reference MEM2 equals exactly
2495 the address of memory reference MEM1 plus DELTA. Return true if
2496 we can prove this to be the case, false otherwise. */
2497
2498 bool
2499 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2500 {
2501 rtx addr1, addr2, addr_delta;
2502
2503 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2504 return false;
2505
2506 addr1 = XEXP (mem1, 0);
2507 addr2 = XEXP (mem2, 0);
2508
2509 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2510 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2511 return false;
2512
2513 return true;
2514 }
2515
2516 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2517
2518 void
2519 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2520 rtx *operands)
2521 {
2522 machine_mode wmode = mode;
2523 rtx dst = operands[0];
2524 rtx src1 = operands[1];
2525 rtx src2 = operands[2];
2526 rtx op, clob, tem;
2527
2528 /* If we cannot handle the operation directly, use a temp register. */
2529 if (!s390_logical_operator_ok_p (operands))
2530 dst = gen_reg_rtx (mode);
2531
2532 /* QImode and HImode patterns make sense only if we have a destination
2533 in memory. Otherwise perform the operation in SImode. */
2534 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2535 wmode = SImode;
2536
2537 /* Widen operands if required. */
2538 if (mode != wmode)
2539 {
2540 if (GET_CODE (dst) == SUBREG
2541 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2542 dst = tem;
2543 else if (REG_P (dst))
2544 dst = gen_rtx_SUBREG (wmode, dst, 0);
2545 else
2546 dst = gen_reg_rtx (wmode);
2547
2548 if (GET_CODE (src1) == SUBREG
2549 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2550 src1 = tem;
2551 else if (GET_MODE (src1) != VOIDmode)
2552 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2553
2554 if (GET_CODE (src2) == SUBREG
2555 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2556 src2 = tem;
2557 else if (GET_MODE (src2) != VOIDmode)
2558 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2559 }
2560
2561 /* Emit the instruction. */
2562 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2563 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2564 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2565
2566 /* Fix up the destination if needed. */
2567 if (dst != operands[0])
2568 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2569 }
2570
2571 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2572
2573 bool
2574 s390_logical_operator_ok_p (rtx *operands)
2575 {
2576 /* If the destination operand is in memory, it needs to coincide
2577 with one of the source operands. After reload, it has to be
2578 the first source operand. */
2579 if (GET_CODE (operands[0]) == MEM)
2580 return rtx_equal_p (operands[0], operands[1])
2581 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2582
2583 return true;
2584 }
2585
2586 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2587 operand IMMOP to switch from SS to SI type instructions. */
2588
2589 void
2590 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2591 {
2592 int def = code == AND ? -1 : 0;
2593 HOST_WIDE_INT mask;
2594 int part;
2595
2596 gcc_assert (GET_CODE (*memop) == MEM);
2597 gcc_assert (!MEM_VOLATILE_P (*memop));
2598
2599 mask = s390_extract_part (*immop, QImode, def);
2600 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2601 gcc_assert (part >= 0);
2602
2603 *memop = adjust_address (*memop, QImode, part);
2604 *immop = gen_int_mode (mask, QImode);
2605 }
2606
2607
2608 /* How to allocate a 'struct machine_function'. */
2609
2610 static struct machine_function *
2611 s390_init_machine_status (void)
2612 {
2613 return ggc_cleared_alloc<machine_function> ();
2614 }
2615
2616 /* Map for smallest class containing reg regno. */
2617
2618 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2619 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2620 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2621 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2622 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2623 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2624 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2625 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2626 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2627 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2628 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2629 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2630 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2631 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2632 VEC_REGS, VEC_REGS /* 52 */
2633 };
2634
2635 /* Return attribute type of insn. */
2636
2637 static enum attr_type
2638 s390_safe_attr_type (rtx_insn *insn)
2639 {
2640 if (recog_memoized (insn) >= 0)
2641 return get_attr_type (insn);
2642 else
2643 return TYPE_NONE;
2644 }
2645
2646 /* Return true if DISP is a valid short displacement. */
2647
2648 static bool
2649 s390_short_displacement (rtx disp)
2650 {
2651 /* No displacement is OK. */
2652 if (!disp)
2653 return true;
2654
2655 /* Without the long displacement facility we don't need to
2656 distingiush between long and short displacement. */
2657 if (!TARGET_LONG_DISPLACEMENT)
2658 return true;
2659
2660 /* Integer displacement in range. */
2661 if (GET_CODE (disp) == CONST_INT)
2662 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2663
2664 /* GOT offset is not OK, the GOT can be large. */
2665 if (GET_CODE (disp) == CONST
2666 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2667 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2668 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2669 return false;
2670
2671 /* All other symbolic constants are literal pool references,
2672 which are OK as the literal pool must be small. */
2673 if (GET_CODE (disp) == CONST)
2674 return true;
2675
2676 return false;
2677 }
2678
2679 /* Decompose a RTL expression ADDR for a memory address into
2680 its components, returned in OUT.
2681
2682 Returns false if ADDR is not a valid memory address, true
2683 otherwise. If OUT is NULL, don't return the components,
2684 but check for validity only.
2685
2686 Note: Only addresses in canonical form are recognized.
2687 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2688 canonical form so that they will be recognized. */
2689
2690 static int
2691 s390_decompose_address (rtx addr, struct s390_address *out)
2692 {
2693 HOST_WIDE_INT offset = 0;
2694 rtx base = NULL_RTX;
2695 rtx indx = NULL_RTX;
2696 rtx disp = NULL_RTX;
2697 rtx orig_disp;
2698 bool pointer = false;
2699 bool base_ptr = false;
2700 bool indx_ptr = false;
2701 bool literal_pool = false;
2702
2703 /* We may need to substitute the literal pool base register into the address
2704 below. However, at this point we do not know which register is going to
2705 be used as base, so we substitute the arg pointer register. This is going
2706 to be treated as holding a pointer below -- it shouldn't be used for any
2707 other purpose. */
2708 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2709
2710 /* Decompose address into base + index + displacement. */
2711
2712 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2713 base = addr;
2714
2715 else if (GET_CODE (addr) == PLUS)
2716 {
2717 rtx op0 = XEXP (addr, 0);
2718 rtx op1 = XEXP (addr, 1);
2719 enum rtx_code code0 = GET_CODE (op0);
2720 enum rtx_code code1 = GET_CODE (op1);
2721
2722 if (code0 == REG || code0 == UNSPEC)
2723 {
2724 if (code1 == REG || code1 == UNSPEC)
2725 {
2726 indx = op0; /* index + base */
2727 base = op1;
2728 }
2729
2730 else
2731 {
2732 base = op0; /* base + displacement */
2733 disp = op1;
2734 }
2735 }
2736
2737 else if (code0 == PLUS)
2738 {
2739 indx = XEXP (op0, 0); /* index + base + disp */
2740 base = XEXP (op0, 1);
2741 disp = op1;
2742 }
2743
2744 else
2745 {
2746 return false;
2747 }
2748 }
2749
2750 else
2751 disp = addr; /* displacement */
2752
2753 /* Extract integer part of displacement. */
2754 orig_disp = disp;
2755 if (disp)
2756 {
2757 if (GET_CODE (disp) == CONST_INT)
2758 {
2759 offset = INTVAL (disp);
2760 disp = NULL_RTX;
2761 }
2762 else if (GET_CODE (disp) == CONST
2763 && GET_CODE (XEXP (disp, 0)) == PLUS
2764 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2765 {
2766 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2767 disp = XEXP (XEXP (disp, 0), 0);
2768 }
2769 }
2770
2771 /* Strip off CONST here to avoid special case tests later. */
2772 if (disp && GET_CODE (disp) == CONST)
2773 disp = XEXP (disp, 0);
2774
2775 /* We can convert literal pool addresses to
2776 displacements by basing them off the base register. */
2777 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2778 {
2779 /* Either base or index must be free to hold the base register. */
2780 if (!base)
2781 base = fake_pool_base, literal_pool = true;
2782 else if (!indx)
2783 indx = fake_pool_base, literal_pool = true;
2784 else
2785 return false;
2786
2787 /* Mark up the displacement. */
2788 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2789 UNSPEC_LTREL_OFFSET);
2790 }
2791
2792 /* Validate base register. */
2793 if (base)
2794 {
2795 if (GET_CODE (base) == UNSPEC)
2796 switch (XINT (base, 1))
2797 {
2798 case UNSPEC_LTREF:
2799 if (!disp)
2800 disp = gen_rtx_UNSPEC (Pmode,
2801 gen_rtvec (1, XVECEXP (base, 0, 0)),
2802 UNSPEC_LTREL_OFFSET);
2803 else
2804 return false;
2805
2806 base = XVECEXP (base, 0, 1);
2807 break;
2808
2809 case UNSPEC_LTREL_BASE:
2810 if (XVECLEN (base, 0) == 1)
2811 base = fake_pool_base, literal_pool = true;
2812 else
2813 base = XVECEXP (base, 0, 1);
2814 break;
2815
2816 default:
2817 return false;
2818 }
2819
2820 if (!REG_P (base)
2821 || (GET_MODE (base) != SImode
2822 && GET_MODE (base) != Pmode))
2823 return false;
2824
2825 if (REGNO (base) == STACK_POINTER_REGNUM
2826 || REGNO (base) == FRAME_POINTER_REGNUM
2827 || ((reload_completed || reload_in_progress)
2828 && frame_pointer_needed
2829 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2830 || REGNO (base) == ARG_POINTER_REGNUM
2831 || (flag_pic
2832 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2833 pointer = base_ptr = true;
2834
2835 if ((reload_completed || reload_in_progress)
2836 && base == cfun->machine->base_reg)
2837 pointer = base_ptr = literal_pool = true;
2838 }
2839
2840 /* Validate index register. */
2841 if (indx)
2842 {
2843 if (GET_CODE (indx) == UNSPEC)
2844 switch (XINT (indx, 1))
2845 {
2846 case UNSPEC_LTREF:
2847 if (!disp)
2848 disp = gen_rtx_UNSPEC (Pmode,
2849 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2850 UNSPEC_LTREL_OFFSET);
2851 else
2852 return false;
2853
2854 indx = XVECEXP (indx, 0, 1);
2855 break;
2856
2857 case UNSPEC_LTREL_BASE:
2858 if (XVECLEN (indx, 0) == 1)
2859 indx = fake_pool_base, literal_pool = true;
2860 else
2861 indx = XVECEXP (indx, 0, 1);
2862 break;
2863
2864 default:
2865 return false;
2866 }
2867
2868 if (!REG_P (indx)
2869 || (GET_MODE (indx) != SImode
2870 && GET_MODE (indx) != Pmode))
2871 return false;
2872
2873 if (REGNO (indx) == STACK_POINTER_REGNUM
2874 || REGNO (indx) == FRAME_POINTER_REGNUM
2875 || ((reload_completed || reload_in_progress)
2876 && frame_pointer_needed
2877 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2878 || REGNO (indx) == ARG_POINTER_REGNUM
2879 || (flag_pic
2880 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2881 pointer = indx_ptr = true;
2882
2883 if ((reload_completed || reload_in_progress)
2884 && indx == cfun->machine->base_reg)
2885 pointer = indx_ptr = literal_pool = true;
2886 }
2887
2888 /* Prefer to use pointer as base, not index. */
2889 if (base && indx && !base_ptr
2890 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2891 {
2892 rtx tmp = base;
2893 base = indx;
2894 indx = tmp;
2895 }
2896
2897 /* Validate displacement. */
2898 if (!disp)
2899 {
2900 /* If virtual registers are involved, the displacement will change later
2901 anyway as the virtual registers get eliminated. This could make a
2902 valid displacement invalid, but it is more likely to make an invalid
2903 displacement valid, because we sometimes access the register save area
2904 via negative offsets to one of those registers.
2905 Thus we don't check the displacement for validity here. If after
2906 elimination the displacement turns out to be invalid after all,
2907 this is fixed up by reload in any case. */
2908 /* LRA maintains always displacements up to date and we need to
2909 know the displacement is right during all LRA not only at the
2910 final elimination. */
2911 if (lra_in_progress
2912 || (base != arg_pointer_rtx
2913 && indx != arg_pointer_rtx
2914 && base != return_address_pointer_rtx
2915 && indx != return_address_pointer_rtx
2916 && base != frame_pointer_rtx
2917 && indx != frame_pointer_rtx
2918 && base != virtual_stack_vars_rtx
2919 && indx != virtual_stack_vars_rtx))
2920 if (!DISP_IN_RANGE (offset))
2921 return false;
2922 }
2923 else
2924 {
2925 /* All the special cases are pointers. */
2926 pointer = true;
2927
2928 /* In the small-PIC case, the linker converts @GOT
2929 and @GOTNTPOFF offsets to possible displacements. */
2930 if (GET_CODE (disp) == UNSPEC
2931 && (XINT (disp, 1) == UNSPEC_GOT
2932 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2933 && flag_pic == 1)
2934 {
2935 ;
2936 }
2937
2938 /* Accept pool label offsets. */
2939 else if (GET_CODE (disp) == UNSPEC
2940 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2941 ;
2942
2943 /* Accept literal pool references. */
2944 else if (GET_CODE (disp) == UNSPEC
2945 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2946 {
2947 /* In case CSE pulled a non literal pool reference out of
2948 the pool we have to reject the address. This is
2949 especially important when loading the GOT pointer on non
2950 zarch CPUs. In this case the literal pool contains an lt
2951 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2952 will most likely exceed the displacement. */
2953 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2954 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2955 return false;
2956
2957 orig_disp = gen_rtx_CONST (Pmode, disp);
2958 if (offset)
2959 {
2960 /* If we have an offset, make sure it does not
2961 exceed the size of the constant pool entry. */
2962 rtx sym = XVECEXP (disp, 0, 0);
2963 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2964 return false;
2965
2966 orig_disp = plus_constant (Pmode, orig_disp, offset);
2967 }
2968 }
2969
2970 else
2971 return false;
2972 }
2973
2974 if (!base && !indx)
2975 pointer = true;
2976
2977 if (out)
2978 {
2979 out->base = base;
2980 out->indx = indx;
2981 out->disp = orig_disp;
2982 out->pointer = pointer;
2983 out->literal_pool = literal_pool;
2984 }
2985
2986 return true;
2987 }
2988
2989 /* Decompose a RTL expression OP for a shift count into its components,
2990 and return the base register in BASE and the offset in OFFSET.
2991
2992 Return true if OP is a valid shift count, false if not. */
2993
2994 bool
2995 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2996 {
2997 rtx off = NULL_RTX;
2998
2999 /* We can have an integer constant, an address register,
3000 or a sum of the two. */
3001 if (CONST_SCALAR_INT_P (op))
3002 {
3003 off = op;
3004 op = NULL_RTX;
3005 }
3006 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3007 {
3008 off = XEXP (op, 1);
3009 op = XEXP (op, 0);
3010 }
3011 while (op && GET_CODE (op) == SUBREG)
3012 op = SUBREG_REG (op);
3013
3014 if (op && GET_CODE (op) != REG)
3015 return false;
3016
3017 if (offset)
3018 {
3019 if (off == NULL_RTX)
3020 *offset = 0;
3021 else if (CONST_INT_P (off))
3022 *offset = INTVAL (off);
3023 else if (CONST_WIDE_INT_P (off))
3024 /* The offset will anyway be cut down to 12 bits so take just
3025 the lowest order chunk of the wide int. */
3026 *offset = CONST_WIDE_INT_ELT (off, 0);
3027 else
3028 gcc_unreachable ();
3029 }
3030 if (base)
3031 *base = op;
3032
3033 return true;
3034 }
3035
3036
3037 /* Return true if CODE is a valid address without index. */
3038
3039 bool
3040 s390_legitimate_address_without_index_p (rtx op)
3041 {
3042 struct s390_address addr;
3043
3044 if (!s390_decompose_address (XEXP (op, 0), &addr))
3045 return false;
3046 if (addr.indx)
3047 return false;
3048
3049 return true;
3050 }
3051
3052
3053 /* Return TRUE if ADDR is an operand valid for a load/store relative
3054 instruction. Be aware that the alignment of the operand needs to
3055 be checked separately.
3056 Valid addresses are single references or a sum of a reference and a
3057 constant integer. Return these parts in SYMREF and ADDEND. You can
3058 pass NULL in REF and/or ADDEND if you are not interested in these
3059 values. Literal pool references are *not* considered symbol
3060 references. */
3061
3062 static bool
3063 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3064 {
3065 HOST_WIDE_INT tmpaddend = 0;
3066
3067 if (GET_CODE (addr) == CONST)
3068 addr = XEXP (addr, 0);
3069
3070 if (GET_CODE (addr) == PLUS)
3071 {
3072 if (!CONST_INT_P (XEXP (addr, 1)))
3073 return false;
3074
3075 tmpaddend = INTVAL (XEXP (addr, 1));
3076 addr = XEXP (addr, 0);
3077 }
3078
3079 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3080 || (GET_CODE (addr) == UNSPEC
3081 && (XINT (addr, 1) == UNSPEC_GOTENT
3082 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3083 {
3084 if (symref)
3085 *symref = addr;
3086 if (addend)
3087 *addend = tmpaddend;
3088
3089 return true;
3090 }
3091 return false;
3092 }
3093
3094 /* Return true if the address in OP is valid for constraint letter C
3095 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3096 pool MEMs should be accepted. Only the Q, R, S, T constraint
3097 letters are allowed for C. */
3098
3099 static int
3100 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3101 {
3102 struct s390_address addr;
3103 bool decomposed = false;
3104
3105 /* This check makes sure that no symbolic address (except literal
3106 pool references) are accepted by the R or T constraints. */
3107 if (s390_loadrelative_operand_p (op, NULL, NULL))
3108 return 0;
3109
3110 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3111 if (!lit_pool_ok)
3112 {
3113 if (!s390_decompose_address (op, &addr))
3114 return 0;
3115 if (addr.literal_pool)
3116 return 0;
3117 decomposed = true;
3118 }
3119
3120 switch (c)
3121 {
3122 case 'Q': /* no index short displacement */
3123 if (!decomposed && !s390_decompose_address (op, &addr))
3124 return 0;
3125 if (addr.indx)
3126 return 0;
3127 if (!s390_short_displacement (addr.disp))
3128 return 0;
3129 break;
3130
3131 case 'R': /* with index short displacement */
3132 if (TARGET_LONG_DISPLACEMENT)
3133 {
3134 if (!decomposed && !s390_decompose_address (op, &addr))
3135 return 0;
3136 if (!s390_short_displacement (addr.disp))
3137 return 0;
3138 }
3139 /* Any invalid address here will be fixed up by reload,
3140 so accept it for the most generic constraint. */
3141 break;
3142
3143 case 'S': /* no index long displacement */
3144 if (!TARGET_LONG_DISPLACEMENT)
3145 return 0;
3146 if (!decomposed && !s390_decompose_address (op, &addr))
3147 return 0;
3148 if (addr.indx)
3149 return 0;
3150 if (s390_short_displacement (addr.disp))
3151 return 0;
3152 break;
3153
3154 case 'T': /* with index long displacement */
3155 if (!TARGET_LONG_DISPLACEMENT)
3156 return 0;
3157 /* Any invalid address here will be fixed up by reload,
3158 so accept it for the most generic constraint. */
3159 if ((decomposed || s390_decompose_address (op, &addr))
3160 && s390_short_displacement (addr.disp))
3161 return 0;
3162 break;
3163 default:
3164 return 0;
3165 }
3166 return 1;
3167 }
3168
3169
3170 /* Evaluates constraint strings described by the regular expression
3171 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3172 the constraint given in STR, or 0 else. */
3173
3174 int
3175 s390_mem_constraint (const char *str, rtx op)
3176 {
3177 char c = str[0];
3178
3179 switch (c)
3180 {
3181 case 'A':
3182 /* Check for offsettable variants of memory constraints. */
3183 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3184 return 0;
3185 if ((reload_completed || reload_in_progress)
3186 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3187 return 0;
3188 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3189 case 'B':
3190 /* Check for non-literal-pool variants of memory constraints. */
3191 if (!MEM_P (op))
3192 return 0;
3193 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3194 case 'Q':
3195 case 'R':
3196 case 'S':
3197 case 'T':
3198 if (GET_CODE (op) != MEM)
3199 return 0;
3200 return s390_check_qrst_address (c, XEXP (op, 0), true);
3201 case 'U':
3202 return (s390_check_qrst_address ('Q', op, true)
3203 || s390_check_qrst_address ('R', op, true));
3204 case 'W':
3205 return (s390_check_qrst_address ('S', op, true)
3206 || s390_check_qrst_address ('T', op, true));
3207 case 'Y':
3208 /* Simply check for the basic form of a shift count. Reload will
3209 take care of making sure we have a proper base register. */
3210 if (!s390_decompose_shift_count (op, NULL, NULL))
3211 return 0;
3212 break;
3213 case 'Z':
3214 return s390_check_qrst_address (str[1], op, true);
3215 default:
3216 return 0;
3217 }
3218 return 1;
3219 }
3220
3221
3222 /* Evaluates constraint strings starting with letter O. Input
3223 parameter C is the second letter following the "O" in the constraint
3224 string. Returns 1 if VALUE meets the respective constraint and 0
3225 otherwise. */
3226
3227 int
3228 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3229 {
3230 if (!TARGET_EXTIMM)
3231 return 0;
3232
3233 switch (c)
3234 {
3235 case 's':
3236 return trunc_int_for_mode (value, SImode) == value;
3237
3238 case 'p':
3239 return value == 0
3240 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3241
3242 case 'n':
3243 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3244
3245 default:
3246 gcc_unreachable ();
3247 }
3248 }
3249
3250
3251 /* Evaluates constraint strings starting with letter N. Parameter STR
3252 contains the letters following letter "N" in the constraint string.
3253 Returns true if VALUE matches the constraint. */
3254
3255 int
3256 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3257 {
3258 machine_mode mode, part_mode;
3259 int def;
3260 int part, part_goal;
3261
3262
3263 if (str[0] == 'x')
3264 part_goal = -1;
3265 else
3266 part_goal = str[0] - '0';
3267
3268 switch (str[1])
3269 {
3270 case 'Q':
3271 part_mode = QImode;
3272 break;
3273 case 'H':
3274 part_mode = HImode;
3275 break;
3276 case 'S':
3277 part_mode = SImode;
3278 break;
3279 default:
3280 return 0;
3281 }
3282
3283 switch (str[2])
3284 {
3285 case 'H':
3286 mode = HImode;
3287 break;
3288 case 'S':
3289 mode = SImode;
3290 break;
3291 case 'D':
3292 mode = DImode;
3293 break;
3294 default:
3295 return 0;
3296 }
3297
3298 switch (str[3])
3299 {
3300 case '0':
3301 def = 0;
3302 break;
3303 case 'F':
3304 def = -1;
3305 break;
3306 default:
3307 return 0;
3308 }
3309
3310 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3311 return 0;
3312
3313 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3314 if (part < 0)
3315 return 0;
3316 if (part_goal != -1 && part_goal != part)
3317 return 0;
3318
3319 return 1;
3320 }
3321
3322
3323 /* Returns true if the input parameter VALUE is a float zero. */
3324
3325 int
3326 s390_float_const_zero_p (rtx value)
3327 {
3328 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3329 && value == CONST0_RTX (GET_MODE (value)));
3330 }
3331
3332 /* Implement TARGET_REGISTER_MOVE_COST. */
3333
3334 static int
3335 s390_register_move_cost (machine_mode mode,
3336 reg_class_t from, reg_class_t to)
3337 {
3338 /* On s390, copy between fprs and gprs is expensive. */
3339
3340 /* It becomes somewhat faster having ldgr/lgdr. */
3341 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3342 {
3343 /* ldgr is single cycle. */
3344 if (reg_classes_intersect_p (from, GENERAL_REGS)
3345 && reg_classes_intersect_p (to, FP_REGS))
3346 return 1;
3347 /* lgdr needs 3 cycles. */
3348 if (reg_classes_intersect_p (to, GENERAL_REGS)
3349 && reg_classes_intersect_p (from, FP_REGS))
3350 return 3;
3351 }
3352
3353 /* Otherwise copying is done via memory. */
3354 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3355 && reg_classes_intersect_p (to, FP_REGS))
3356 || (reg_classes_intersect_p (from, FP_REGS)
3357 && reg_classes_intersect_p (to, GENERAL_REGS)))
3358 return 10;
3359
3360 return 1;
3361 }
3362
3363 /* Implement TARGET_MEMORY_MOVE_COST. */
3364
3365 static int
3366 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3367 reg_class_t rclass ATTRIBUTE_UNUSED,
3368 bool in ATTRIBUTE_UNUSED)
3369 {
3370 return 2;
3371 }
3372
3373 /* Compute a (partial) cost for rtx X. Return true if the complete
3374 cost has been computed, and false if subexpressions should be
3375 scanned. In either case, *TOTAL contains the cost result.
3376 OUTER_CODE contains the code of the superexpression of x. */
3377
3378 static bool
3379 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3380 int opno ATTRIBUTE_UNUSED,
3381 int *total, bool speed ATTRIBUTE_UNUSED)
3382 {
3383 int code = GET_CODE (x);
3384 switch (code)
3385 {
3386 case CONST:
3387 case CONST_INT:
3388 case LABEL_REF:
3389 case SYMBOL_REF:
3390 case CONST_DOUBLE:
3391 case CONST_WIDE_INT:
3392 case MEM:
3393 *total = 0;
3394 return true;
3395
3396 case IOR:
3397 /* risbg */
3398 if (GET_CODE (XEXP (x, 0)) == AND
3399 && GET_CODE (XEXP (x, 1)) == ASHIFT
3400 && REG_P (XEXP (XEXP (x, 0), 0))
3401 && REG_P (XEXP (XEXP (x, 1), 0))
3402 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3403 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3404 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3405 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3406 {
3407 *total = COSTS_N_INSNS (2);
3408 return true;
3409 }
3410 case ASHIFT:
3411 case ASHIFTRT:
3412 case LSHIFTRT:
3413 case ROTATE:
3414 case ROTATERT:
3415 case AND:
3416 case XOR:
3417 case NEG:
3418 case NOT:
3419 *total = COSTS_N_INSNS (1);
3420 return false;
3421
3422 case PLUS:
3423 case MINUS:
3424 *total = COSTS_N_INSNS (1);
3425 return false;
3426
3427 case MULT:
3428 switch (mode)
3429 {
3430 case SImode:
3431 {
3432 rtx left = XEXP (x, 0);
3433 rtx right = XEXP (x, 1);
3434 if (GET_CODE (right) == CONST_INT
3435 && CONST_OK_FOR_K (INTVAL (right)))
3436 *total = s390_cost->mhi;
3437 else if (GET_CODE (left) == SIGN_EXTEND)
3438 *total = s390_cost->mh;
3439 else
3440 *total = s390_cost->ms; /* msr, ms, msy */
3441 break;
3442 }
3443 case DImode:
3444 {
3445 rtx left = XEXP (x, 0);
3446 rtx right = XEXP (x, 1);
3447 if (TARGET_ZARCH)
3448 {
3449 if (GET_CODE (right) == CONST_INT
3450 && CONST_OK_FOR_K (INTVAL (right)))
3451 *total = s390_cost->mghi;
3452 else if (GET_CODE (left) == SIGN_EXTEND)
3453 *total = s390_cost->msgf;
3454 else
3455 *total = s390_cost->msg; /* msgr, msg */
3456 }
3457 else /* TARGET_31BIT */
3458 {
3459 if (GET_CODE (left) == SIGN_EXTEND
3460 && GET_CODE (right) == SIGN_EXTEND)
3461 /* mulsidi case: mr, m */
3462 *total = s390_cost->m;
3463 else if (GET_CODE (left) == ZERO_EXTEND
3464 && GET_CODE (right) == ZERO_EXTEND
3465 && TARGET_CPU_ZARCH)
3466 /* umulsidi case: ml, mlr */
3467 *total = s390_cost->ml;
3468 else
3469 /* Complex calculation is required. */
3470 *total = COSTS_N_INSNS (40);
3471 }
3472 break;
3473 }
3474 case SFmode:
3475 case DFmode:
3476 *total = s390_cost->mult_df;
3477 break;
3478 case TFmode:
3479 *total = s390_cost->mxbr;
3480 break;
3481 default:
3482 return false;
3483 }
3484 return false;
3485
3486 case FMA:
3487 switch (mode)
3488 {
3489 case DFmode:
3490 *total = s390_cost->madbr;
3491 break;
3492 case SFmode:
3493 *total = s390_cost->maebr;
3494 break;
3495 default:
3496 return false;
3497 }
3498 /* Negate in the third argument is free: FMSUB. */
3499 if (GET_CODE (XEXP (x, 2)) == NEG)
3500 {
3501 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3502 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3503 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3504 return true;
3505 }
3506 return false;
3507
3508 case UDIV:
3509 case UMOD:
3510 if (mode == TImode) /* 128 bit division */
3511 *total = s390_cost->dlgr;
3512 else if (mode == DImode)
3513 {
3514 rtx right = XEXP (x, 1);
3515 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3516 *total = s390_cost->dlr;
3517 else /* 64 by 64 bit division */
3518 *total = s390_cost->dlgr;
3519 }
3520 else if (mode == SImode) /* 32 bit division */
3521 *total = s390_cost->dlr;
3522 return false;
3523
3524 case DIV:
3525 case MOD:
3526 if (mode == DImode)
3527 {
3528 rtx right = XEXP (x, 1);
3529 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3530 if (TARGET_ZARCH)
3531 *total = s390_cost->dsgfr;
3532 else
3533 *total = s390_cost->dr;
3534 else /* 64 by 64 bit division */
3535 *total = s390_cost->dsgr;
3536 }
3537 else if (mode == SImode) /* 32 bit division */
3538 *total = s390_cost->dlr;
3539 else if (mode == SFmode)
3540 {
3541 *total = s390_cost->debr;
3542 }
3543 else if (mode == DFmode)
3544 {
3545 *total = s390_cost->ddbr;
3546 }
3547 else if (mode == TFmode)
3548 {
3549 *total = s390_cost->dxbr;
3550 }
3551 return false;
3552
3553 case SQRT:
3554 if (mode == SFmode)
3555 *total = s390_cost->sqebr;
3556 else if (mode == DFmode)
3557 *total = s390_cost->sqdbr;
3558 else /* TFmode */
3559 *total = s390_cost->sqxbr;
3560 return false;
3561
3562 case SIGN_EXTEND:
3563 case ZERO_EXTEND:
3564 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3565 || outer_code == PLUS || outer_code == MINUS
3566 || outer_code == COMPARE)
3567 *total = 0;
3568 return false;
3569
3570 case COMPARE:
3571 *total = COSTS_N_INSNS (1);
3572 if (GET_CODE (XEXP (x, 0)) == AND
3573 && GET_CODE (XEXP (x, 1)) == CONST_INT
3574 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3575 {
3576 rtx op0 = XEXP (XEXP (x, 0), 0);
3577 rtx op1 = XEXP (XEXP (x, 0), 1);
3578 rtx op2 = XEXP (x, 1);
3579
3580 if (memory_operand (op0, GET_MODE (op0))
3581 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3582 return true;
3583 if (register_operand (op0, GET_MODE (op0))
3584 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3585 return true;
3586 }
3587 return false;
3588
3589 default:
3590 return false;
3591 }
3592 }
3593
3594 /* Return the cost of an address rtx ADDR. */
3595
3596 static int
3597 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3598 addr_space_t as ATTRIBUTE_UNUSED,
3599 bool speed ATTRIBUTE_UNUSED)
3600 {
3601 struct s390_address ad;
3602 if (!s390_decompose_address (addr, &ad))
3603 return 1000;
3604
3605 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3606 }
3607
3608 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3609 otherwise return 0. */
3610
3611 int
3612 tls_symbolic_operand (rtx op)
3613 {
3614 if (GET_CODE (op) != SYMBOL_REF)
3615 return 0;
3616 return SYMBOL_REF_TLS_MODEL (op);
3617 }
3618 \f
3619 /* Split DImode access register reference REG (on 64-bit) into its constituent
3620 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3621 gen_highpart cannot be used as they assume all registers are word-sized,
3622 while our access registers have only half that size. */
3623
3624 void
3625 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3626 {
3627 gcc_assert (TARGET_64BIT);
3628 gcc_assert (ACCESS_REG_P (reg));
3629 gcc_assert (GET_MODE (reg) == DImode);
3630 gcc_assert (!(REGNO (reg) & 1));
3631
3632 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3633 *hi = gen_rtx_REG (SImode, REGNO (reg));
3634 }
3635
3636 /* Return true if OP contains a symbol reference */
3637
3638 bool
3639 symbolic_reference_mentioned_p (rtx op)
3640 {
3641 const char *fmt;
3642 int i;
3643
3644 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3645 return 1;
3646
3647 fmt = GET_RTX_FORMAT (GET_CODE (op));
3648 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3649 {
3650 if (fmt[i] == 'E')
3651 {
3652 int j;
3653
3654 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3655 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3656 return 1;
3657 }
3658
3659 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3660 return 1;
3661 }
3662
3663 return 0;
3664 }
3665
3666 /* Return true if OP contains a reference to a thread-local symbol. */
3667
3668 bool
3669 tls_symbolic_reference_mentioned_p (rtx op)
3670 {
3671 const char *fmt;
3672 int i;
3673
3674 if (GET_CODE (op) == SYMBOL_REF)
3675 return tls_symbolic_operand (op);
3676
3677 fmt = GET_RTX_FORMAT (GET_CODE (op));
3678 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3679 {
3680 if (fmt[i] == 'E')
3681 {
3682 int j;
3683
3684 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3685 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3686 return true;
3687 }
3688
3689 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3690 return true;
3691 }
3692
3693 return false;
3694 }
3695
3696
3697 /* Return true if OP is a legitimate general operand when
3698 generating PIC code. It is given that flag_pic is on
3699 and that OP satisfies CONSTANT_P. */
3700
3701 int
3702 legitimate_pic_operand_p (rtx op)
3703 {
3704 /* Accept all non-symbolic constants. */
3705 if (!SYMBOLIC_CONST (op))
3706 return 1;
3707
3708 /* Reject everything else; must be handled
3709 via emit_symbolic_move. */
3710 return 0;
3711 }
3712
3713 /* Returns true if the constant value OP is a legitimate general operand.
3714 It is given that OP satisfies CONSTANT_P. */
3715
3716 static bool
3717 s390_legitimate_constant_p (machine_mode mode, rtx op)
3718 {
3719 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3720 {
3721 if (GET_MODE_SIZE (mode) != 16)
3722 return 0;
3723
3724 if (!satisfies_constraint_j00 (op)
3725 && !satisfies_constraint_jm1 (op)
3726 && !satisfies_constraint_jKK (op)
3727 && !satisfies_constraint_jxx (op)
3728 && !satisfies_constraint_jyy (op))
3729 return 0;
3730 }
3731
3732 /* Accept all non-symbolic constants. */
3733 if (!SYMBOLIC_CONST (op))
3734 return 1;
3735
3736 /* Accept immediate LARL operands. */
3737 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3738 return 1;
3739
3740 /* Thread-local symbols are never legal constants. This is
3741 so that emit_call knows that computing such addresses
3742 might require a function call. */
3743 if (TLS_SYMBOLIC_CONST (op))
3744 return 0;
3745
3746 /* In the PIC case, symbolic constants must *not* be
3747 forced into the literal pool. We accept them here,
3748 so that they will be handled by emit_symbolic_move. */
3749 if (flag_pic)
3750 return 1;
3751
3752 /* All remaining non-PIC symbolic constants are
3753 forced into the literal pool. */
3754 return 0;
3755 }
3756
3757 /* Determine if it's legal to put X into the constant pool. This
3758 is not possible if X contains the address of a symbol that is
3759 not constant (TLS) or not known at final link time (PIC). */
3760
3761 static bool
3762 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3763 {
3764 switch (GET_CODE (x))
3765 {
3766 case CONST_INT:
3767 case CONST_DOUBLE:
3768 case CONST_WIDE_INT:
3769 case CONST_VECTOR:
3770 /* Accept all non-symbolic constants. */
3771 return false;
3772
3773 case LABEL_REF:
3774 /* Labels are OK iff we are non-PIC. */
3775 return flag_pic != 0;
3776
3777 case SYMBOL_REF:
3778 /* 'Naked' TLS symbol references are never OK,
3779 non-TLS symbols are OK iff we are non-PIC. */
3780 if (tls_symbolic_operand (x))
3781 return true;
3782 else
3783 return flag_pic != 0;
3784
3785 case CONST:
3786 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3787 case PLUS:
3788 case MINUS:
3789 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3790 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3791
3792 case UNSPEC:
3793 switch (XINT (x, 1))
3794 {
3795 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3796 case UNSPEC_LTREL_OFFSET:
3797 case UNSPEC_GOT:
3798 case UNSPEC_GOTOFF:
3799 case UNSPEC_PLTOFF:
3800 case UNSPEC_TLSGD:
3801 case UNSPEC_TLSLDM:
3802 case UNSPEC_NTPOFF:
3803 case UNSPEC_DTPOFF:
3804 case UNSPEC_GOTNTPOFF:
3805 case UNSPEC_INDNTPOFF:
3806 return false;
3807
3808 /* If the literal pool shares the code section, be put
3809 execute template placeholders into the pool as well. */
3810 case UNSPEC_INSN:
3811 return TARGET_CPU_ZARCH;
3812
3813 default:
3814 return true;
3815 }
3816 break;
3817
3818 default:
3819 gcc_unreachable ();
3820 }
3821 }
3822
3823 /* Returns true if the constant value OP is a legitimate general
3824 operand during and after reload. The difference to
3825 legitimate_constant_p is that this function will not accept
3826 a constant that would need to be forced to the literal pool
3827 before it can be used as operand.
3828 This function accepts all constants which can be loaded directly
3829 into a GPR. */
3830
3831 bool
3832 legitimate_reload_constant_p (rtx op)
3833 {
3834 /* Accept la(y) operands. */
3835 if (GET_CODE (op) == CONST_INT
3836 && DISP_IN_RANGE (INTVAL (op)))
3837 return true;
3838
3839 /* Accept l(g)hi/l(g)fi operands. */
3840 if (GET_CODE (op) == CONST_INT
3841 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3842 return true;
3843
3844 /* Accept lliXX operands. */
3845 if (TARGET_ZARCH
3846 && GET_CODE (op) == CONST_INT
3847 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3848 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3849 return true;
3850
3851 if (TARGET_EXTIMM
3852 && GET_CODE (op) == CONST_INT
3853 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3854 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3855 return true;
3856
3857 /* Accept larl operands. */
3858 if (TARGET_CPU_ZARCH
3859 && larl_operand (op, VOIDmode))
3860 return true;
3861
3862 /* Accept floating-point zero operands that fit into a single GPR. */
3863 if (GET_CODE (op) == CONST_DOUBLE
3864 && s390_float_const_zero_p (op)
3865 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3866 return true;
3867
3868 /* Accept double-word operands that can be split. */
3869 if (GET_CODE (op) == CONST_WIDE_INT
3870 || (GET_CODE (op) == CONST_INT
3871 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3872 {
3873 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3874 rtx hi = operand_subword (op, 0, 0, dword_mode);
3875 rtx lo = operand_subword (op, 1, 0, dword_mode);
3876 return legitimate_reload_constant_p (hi)
3877 && legitimate_reload_constant_p (lo);
3878 }
3879
3880 /* Everything else cannot be handled without reload. */
3881 return false;
3882 }
3883
3884 /* Returns true if the constant value OP is a legitimate fp operand
3885 during and after reload.
3886 This function accepts all constants which can be loaded directly
3887 into an FPR. */
3888
3889 static bool
3890 legitimate_reload_fp_constant_p (rtx op)
3891 {
3892 /* Accept floating-point zero operands if the load zero instruction
3893 can be used. Prior to z196 the load fp zero instruction caused a
3894 performance penalty if the result is used as BFP number. */
3895 if (TARGET_Z196
3896 && GET_CODE (op) == CONST_DOUBLE
3897 && s390_float_const_zero_p (op))
3898 return true;
3899
3900 return false;
3901 }
3902
3903 /* Returns true if the constant value OP is a legitimate vector operand
3904 during and after reload.
3905 This function accepts all constants which can be loaded directly
3906 into an VR. */
3907
3908 static bool
3909 legitimate_reload_vector_constant_p (rtx op)
3910 {
3911 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3912 && (satisfies_constraint_j00 (op)
3913 || satisfies_constraint_jm1 (op)
3914 || satisfies_constraint_jKK (op)
3915 || satisfies_constraint_jxx (op)
3916 || satisfies_constraint_jyy (op)))
3917 return true;
3918
3919 return false;
3920 }
3921
3922 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3923 return the class of reg to actually use. */
3924
3925 static reg_class_t
3926 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3927 {
3928 switch (GET_CODE (op))
3929 {
3930 /* Constants we cannot reload into general registers
3931 must be forced into the literal pool. */
3932 case CONST_VECTOR:
3933 case CONST_DOUBLE:
3934 case CONST_INT:
3935 case CONST_WIDE_INT:
3936 if (reg_class_subset_p (GENERAL_REGS, rclass)
3937 && legitimate_reload_constant_p (op))
3938 return GENERAL_REGS;
3939 else if (reg_class_subset_p (ADDR_REGS, rclass)
3940 && legitimate_reload_constant_p (op))
3941 return ADDR_REGS;
3942 else if (reg_class_subset_p (FP_REGS, rclass)
3943 && legitimate_reload_fp_constant_p (op))
3944 return FP_REGS;
3945 else if (reg_class_subset_p (VEC_REGS, rclass)
3946 && legitimate_reload_vector_constant_p (op))
3947 return VEC_REGS;
3948
3949 return NO_REGS;
3950
3951 /* If a symbolic constant or a PLUS is reloaded,
3952 it is most likely being used as an address, so
3953 prefer ADDR_REGS. If 'class' is not a superset
3954 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3955 case CONST:
3956 /* Symrefs cannot be pushed into the literal pool with -fPIC
3957 so we *MUST NOT* return NO_REGS for these cases
3958 (s390_cannot_force_const_mem will return true).
3959
3960 On the other hand we MUST return NO_REGS for symrefs with
3961 invalid addend which might have been pushed to the literal
3962 pool (no -fPIC). Usually we would expect them to be
3963 handled via secondary reload but this does not happen if
3964 they are used as literal pool slot replacement in reload
3965 inheritance (see emit_input_reload_insns). */
3966 if (TARGET_CPU_ZARCH
3967 && GET_CODE (XEXP (op, 0)) == PLUS
3968 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3969 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3970 {
3971 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3972 return ADDR_REGS;
3973 else
3974 return NO_REGS;
3975 }
3976 /* fallthrough */
3977 case LABEL_REF:
3978 case SYMBOL_REF:
3979 if (!legitimate_reload_constant_p (op))
3980 return NO_REGS;
3981 /* fallthrough */
3982 case PLUS:
3983 /* load address will be used. */
3984 if (reg_class_subset_p (ADDR_REGS, rclass))
3985 return ADDR_REGS;
3986 else
3987 return NO_REGS;
3988
3989 default:
3990 break;
3991 }
3992
3993 return rclass;
3994 }
3995
3996 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3997 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3998 aligned. */
3999
4000 bool
4001 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4002 {
4003 HOST_WIDE_INT addend;
4004 rtx symref;
4005
4006 /* The "required alignment" might be 0 (e.g. for certain structs
4007 accessed via BLKmode). Early abort in this case, as well as when
4008 an alignment > 8 is required. */
4009 if (alignment < 2 || alignment > 8)
4010 return false;
4011
4012 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4013 return false;
4014
4015 if (addend & (alignment - 1))
4016 return false;
4017
4018 if (GET_CODE (symref) == SYMBOL_REF)
4019 {
4020 /* We have load-relative instructions for 2-byte, 4-byte, and
4021 8-byte alignment so allow only these. */
4022 switch (alignment)
4023 {
4024 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4025 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4026 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4027 default: return false;
4028 }
4029 }
4030
4031 if (GET_CODE (symref) == UNSPEC
4032 && alignment <= UNITS_PER_LONG)
4033 return true;
4034
4035 return false;
4036 }
4037
4038 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4039 operand SCRATCH is used to reload the even part of the address and
4040 adding one. */
4041
4042 void
4043 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4044 {
4045 HOST_WIDE_INT addend;
4046 rtx symref;
4047
4048 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4049 gcc_unreachable ();
4050
4051 if (!(addend & 1))
4052 /* Easy case. The addend is even so larl will do fine. */
4053 emit_move_insn (reg, addr);
4054 else
4055 {
4056 /* We can leave the scratch register untouched if the target
4057 register is a valid base register. */
4058 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4059 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4060 scratch = reg;
4061
4062 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4063 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4064
4065 if (addend != 1)
4066 emit_move_insn (scratch,
4067 gen_rtx_CONST (Pmode,
4068 gen_rtx_PLUS (Pmode, symref,
4069 GEN_INT (addend - 1))));
4070 else
4071 emit_move_insn (scratch, symref);
4072
4073 /* Increment the address using la in order to avoid clobbering cc. */
4074 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4075 }
4076 }
4077
4078 /* Generate what is necessary to move between REG and MEM using
4079 SCRATCH. The direction is given by TOMEM. */
4080
4081 void
4082 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4083 {
4084 /* Reload might have pulled a constant out of the literal pool.
4085 Force it back in. */
4086 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4087 || GET_CODE (mem) == CONST_WIDE_INT
4088 || GET_CODE (mem) == CONST_VECTOR
4089 || GET_CODE (mem) == CONST)
4090 mem = force_const_mem (GET_MODE (reg), mem);
4091
4092 gcc_assert (MEM_P (mem));
4093
4094 /* For a load from memory we can leave the scratch register
4095 untouched if the target register is a valid base register. */
4096 if (!tomem
4097 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4098 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4099 && GET_MODE (reg) == GET_MODE (scratch))
4100 scratch = reg;
4101
4102 /* Load address into scratch register. Since we can't have a
4103 secondary reload for a secondary reload we have to cover the case
4104 where larl would need a secondary reload here as well. */
4105 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4106
4107 /* Now we can use a standard load/store to do the move. */
4108 if (tomem)
4109 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4110 else
4111 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4112 }
4113
4114 /* Inform reload about cases where moving X with a mode MODE to a register in
4115 RCLASS requires an extra scratch or immediate register. Return the class
4116 needed for the immediate register. */
4117
4118 static reg_class_t
4119 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4120 machine_mode mode, secondary_reload_info *sri)
4121 {
4122 enum reg_class rclass = (enum reg_class) rclass_i;
4123
4124 /* Intermediate register needed. */
4125 if (reg_classes_intersect_p (CC_REGS, rclass))
4126 return GENERAL_REGS;
4127
4128 if (TARGET_VX)
4129 {
4130 /* The vst/vl vector move instructions allow only for short
4131 displacements. */
4132 if (MEM_P (x)
4133 && GET_CODE (XEXP (x, 0)) == PLUS
4134 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4135 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4136 && reg_class_subset_p (rclass, VEC_REGS)
4137 && (!reg_class_subset_p (rclass, FP_REGS)
4138 || (GET_MODE_SIZE (mode) > 8
4139 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4140 {
4141 if (in_p)
4142 sri->icode = (TARGET_64BIT ?
4143 CODE_FOR_reloaddi_la_in :
4144 CODE_FOR_reloadsi_la_in);
4145 else
4146 sri->icode = (TARGET_64BIT ?
4147 CODE_FOR_reloaddi_la_out :
4148 CODE_FOR_reloadsi_la_out);
4149 }
4150 }
4151
4152 if (TARGET_Z10)
4153 {
4154 HOST_WIDE_INT offset;
4155 rtx symref;
4156
4157 /* On z10 several optimizer steps may generate larl operands with
4158 an odd addend. */
4159 if (in_p
4160 && s390_loadrelative_operand_p (x, &symref, &offset)
4161 && mode == Pmode
4162 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4163 && (offset & 1) == 1)
4164 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4165 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4166
4167 /* Handle all the (mem (symref)) accesses we cannot use the z10
4168 instructions for. */
4169 if (MEM_P (x)
4170 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4171 && (mode == QImode
4172 || !reg_class_subset_p (rclass, GENERAL_REGS)
4173 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4174 || !s390_check_symref_alignment (XEXP (x, 0),
4175 GET_MODE_SIZE (mode))))
4176 {
4177 #define __SECONDARY_RELOAD_CASE(M,m) \
4178 case M##mode: \
4179 if (TARGET_64BIT) \
4180 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4181 CODE_FOR_reload##m##di_tomem_z10; \
4182 else \
4183 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4184 CODE_FOR_reload##m##si_tomem_z10; \
4185 break;
4186
4187 switch (GET_MODE (x))
4188 {
4189 __SECONDARY_RELOAD_CASE (QI, qi);
4190 __SECONDARY_RELOAD_CASE (HI, hi);
4191 __SECONDARY_RELOAD_CASE (SI, si);
4192 __SECONDARY_RELOAD_CASE (DI, di);
4193 __SECONDARY_RELOAD_CASE (TI, ti);
4194 __SECONDARY_RELOAD_CASE (SF, sf);
4195 __SECONDARY_RELOAD_CASE (DF, df);
4196 __SECONDARY_RELOAD_CASE (TF, tf);
4197 __SECONDARY_RELOAD_CASE (SD, sd);
4198 __SECONDARY_RELOAD_CASE (DD, dd);
4199 __SECONDARY_RELOAD_CASE (TD, td);
4200 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4201 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4202 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4203 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4204 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4205 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4206 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4207 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4208 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4209 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4210 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4211 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4212 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4213 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4214 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4215 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4216 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4217 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4218 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4219 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4220 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4221 default:
4222 gcc_unreachable ();
4223 }
4224 #undef __SECONDARY_RELOAD_CASE
4225 }
4226 }
4227
4228 /* We need a scratch register when loading a PLUS expression which
4229 is not a legitimate operand of the LOAD ADDRESS instruction. */
4230 /* LRA can deal with transformation of plus op very well -- so we
4231 don't need to prompt LRA in this case. */
4232 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4233 sri->icode = (TARGET_64BIT ?
4234 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4235
4236 /* Performing a multiword move from or to memory we have to make sure the
4237 second chunk in memory is addressable without causing a displacement
4238 overflow. If that would be the case we calculate the address in
4239 a scratch register. */
4240 if (MEM_P (x)
4241 && GET_CODE (XEXP (x, 0)) == PLUS
4242 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4243 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4244 + GET_MODE_SIZE (mode) - 1))
4245 {
4246 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4247 in a s_operand address since we may fallback to lm/stm. So we only
4248 have to care about overflows in the b+i+d case. */
4249 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4250 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4251 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4252 /* For FP_REGS no lm/stm is available so this check is triggered
4253 for displacement overflows in b+i+d and b+d like addresses. */
4254 || (reg_classes_intersect_p (FP_REGS, rclass)
4255 && s390_class_max_nregs (FP_REGS, mode) > 1))
4256 {
4257 if (in_p)
4258 sri->icode = (TARGET_64BIT ?
4259 CODE_FOR_reloaddi_la_in :
4260 CODE_FOR_reloadsi_la_in);
4261 else
4262 sri->icode = (TARGET_64BIT ?
4263 CODE_FOR_reloaddi_la_out :
4264 CODE_FOR_reloadsi_la_out);
4265 }
4266 }
4267
4268 /* A scratch address register is needed when a symbolic constant is
4269 copied to r0 compiling with -fPIC. In other cases the target
4270 register might be used as temporary (see legitimize_pic_address). */
4271 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4272 sri->icode = (TARGET_64BIT ?
4273 CODE_FOR_reloaddi_PIC_addr :
4274 CODE_FOR_reloadsi_PIC_addr);
4275
4276 /* Either scratch or no register needed. */
4277 return NO_REGS;
4278 }
4279
4280 /* Generate code to load SRC, which is PLUS that is not a
4281 legitimate operand for the LA instruction, into TARGET.
4282 SCRATCH may be used as scratch register. */
4283
4284 void
4285 s390_expand_plus_operand (rtx target, rtx src,
4286 rtx scratch)
4287 {
4288 rtx sum1, sum2;
4289 struct s390_address ad;
4290
4291 /* src must be a PLUS; get its two operands. */
4292 gcc_assert (GET_CODE (src) == PLUS);
4293 gcc_assert (GET_MODE (src) == Pmode);
4294
4295 /* Check if any of the two operands is already scheduled
4296 for replacement by reload. This can happen e.g. when
4297 float registers occur in an address. */
4298 sum1 = find_replacement (&XEXP (src, 0));
4299 sum2 = find_replacement (&XEXP (src, 1));
4300 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4301
4302 /* If the address is already strictly valid, there's nothing to do. */
4303 if (!s390_decompose_address (src, &ad)
4304 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4305 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4306 {
4307 /* Otherwise, one of the operands cannot be an address register;
4308 we reload its value into the scratch register. */
4309 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4310 {
4311 emit_move_insn (scratch, sum1);
4312 sum1 = scratch;
4313 }
4314 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4315 {
4316 emit_move_insn (scratch, sum2);
4317 sum2 = scratch;
4318 }
4319
4320 /* According to the way these invalid addresses are generated
4321 in reload.c, it should never happen (at least on s390) that
4322 *neither* of the PLUS components, after find_replacements
4323 was applied, is an address register. */
4324 if (sum1 == scratch && sum2 == scratch)
4325 {
4326 debug_rtx (src);
4327 gcc_unreachable ();
4328 }
4329
4330 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4331 }
4332
4333 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4334 is only ever performed on addresses, so we can mark the
4335 sum as legitimate for LA in any case. */
4336 s390_load_address (target, src);
4337 }
4338
4339
4340 /* Return true if ADDR is a valid memory address.
4341 STRICT specifies whether strict register checking applies. */
4342
4343 static bool
4344 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4345 {
4346 struct s390_address ad;
4347
4348 if (TARGET_Z10
4349 && larl_operand (addr, VOIDmode)
4350 && (mode == VOIDmode
4351 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4352 return true;
4353
4354 if (!s390_decompose_address (addr, &ad))
4355 return false;
4356
4357 if (strict)
4358 {
4359 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4360 return false;
4361
4362 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4363 return false;
4364 }
4365 else
4366 {
4367 if (ad.base
4368 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4369 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4370 return false;
4371
4372 if (ad.indx
4373 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4374 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4375 return false;
4376 }
4377 return true;
4378 }
4379
4380 /* Return true if OP is a valid operand for the LA instruction.
4381 In 31-bit, we need to prove that the result is used as an
4382 address, as LA performs only a 31-bit addition. */
4383
4384 bool
4385 legitimate_la_operand_p (rtx op)
4386 {
4387 struct s390_address addr;
4388 if (!s390_decompose_address (op, &addr))
4389 return false;
4390
4391 return (TARGET_64BIT || addr.pointer);
4392 }
4393
4394 /* Return true if it is valid *and* preferable to use LA to
4395 compute the sum of OP1 and OP2. */
4396
4397 bool
4398 preferred_la_operand_p (rtx op1, rtx op2)
4399 {
4400 struct s390_address addr;
4401
4402 if (op2 != const0_rtx)
4403 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4404
4405 if (!s390_decompose_address (op1, &addr))
4406 return false;
4407 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4408 return false;
4409 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4410 return false;
4411
4412 /* Avoid LA instructions with index register on z196; it is
4413 preferable to use regular add instructions when possible.
4414 Starting with zEC12 the la with index register is "uncracked"
4415 again. */
4416 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4417 return false;
4418
4419 if (!TARGET_64BIT && !addr.pointer)
4420 return false;
4421
4422 if (addr.pointer)
4423 return true;
4424
4425 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4426 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4427 return true;
4428
4429 return false;
4430 }
4431
4432 /* Emit a forced load-address operation to load SRC into DST.
4433 This will use the LOAD ADDRESS instruction even in situations
4434 where legitimate_la_operand_p (SRC) returns false. */
4435
4436 void
4437 s390_load_address (rtx dst, rtx src)
4438 {
4439 if (TARGET_64BIT)
4440 emit_move_insn (dst, src);
4441 else
4442 emit_insn (gen_force_la_31 (dst, src));
4443 }
4444
4445 /* Return a legitimate reference for ORIG (an address) using the
4446 register REG. If REG is 0, a new pseudo is generated.
4447
4448 There are two types of references that must be handled:
4449
4450 1. Global data references must load the address from the GOT, via
4451 the PIC reg. An insn is emitted to do this load, and the reg is
4452 returned.
4453
4454 2. Static data references, constant pool addresses, and code labels
4455 compute the address as an offset from the GOT, whose base is in
4456 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4457 differentiate them from global data objects. The returned
4458 address is the PIC reg + an unspec constant.
4459
4460 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4461 reg also appears in the address. */
4462
4463 rtx
4464 legitimize_pic_address (rtx orig, rtx reg)
4465 {
4466 rtx addr = orig;
4467 rtx addend = const0_rtx;
4468 rtx new_rtx = orig;
4469
4470 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4471
4472 if (GET_CODE (addr) == CONST)
4473 addr = XEXP (addr, 0);
4474
4475 if (GET_CODE (addr) == PLUS)
4476 {
4477 addend = XEXP (addr, 1);
4478 addr = XEXP (addr, 0);
4479 }
4480
4481 if ((GET_CODE (addr) == LABEL_REF
4482 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4483 || (GET_CODE (addr) == UNSPEC &&
4484 (XINT (addr, 1) == UNSPEC_GOTENT
4485 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4486 && GET_CODE (addend) == CONST_INT)
4487 {
4488 /* This can be locally addressed. */
4489
4490 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4491 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4492 gen_rtx_CONST (Pmode, addr) : addr);
4493
4494 if (TARGET_CPU_ZARCH
4495 && larl_operand (const_addr, VOIDmode)
4496 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4497 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4498 {
4499 if (INTVAL (addend) & 1)
4500 {
4501 /* LARL can't handle odd offsets, so emit a pair of LARL
4502 and LA. */
4503 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4504
4505 if (!DISP_IN_RANGE (INTVAL (addend)))
4506 {
4507 HOST_WIDE_INT even = INTVAL (addend) - 1;
4508 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4509 addr = gen_rtx_CONST (Pmode, addr);
4510 addend = const1_rtx;
4511 }
4512
4513 emit_move_insn (temp, addr);
4514 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4515
4516 if (reg != 0)
4517 {
4518 s390_load_address (reg, new_rtx);
4519 new_rtx = reg;
4520 }
4521 }
4522 else
4523 {
4524 /* If the offset is even, we can just use LARL. This
4525 will happen automatically. */
4526 }
4527 }
4528 else
4529 {
4530 /* No larl - Access local symbols relative to the GOT. */
4531
4532 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4533
4534 if (reload_in_progress || reload_completed)
4535 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4536
4537 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4538 if (addend != const0_rtx)
4539 addr = gen_rtx_PLUS (Pmode, addr, addend);
4540 addr = gen_rtx_CONST (Pmode, addr);
4541 addr = force_const_mem (Pmode, addr);
4542 emit_move_insn (temp, addr);
4543
4544 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4545 if (reg != 0)
4546 {
4547 s390_load_address (reg, new_rtx);
4548 new_rtx = reg;
4549 }
4550 }
4551 }
4552 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4553 {
4554 /* A non-local symbol reference without addend.
4555
4556 The symbol ref is wrapped into an UNSPEC to make sure the
4557 proper operand modifier (@GOT or @GOTENT) will be emitted.
4558 This will tell the linker to put the symbol into the GOT.
4559
4560 Additionally the code dereferencing the GOT slot is emitted here.
4561
4562 An addend to the symref needs to be added afterwards.
4563 legitimize_pic_address calls itself recursively to handle
4564 that case. So no need to do it here. */
4565
4566 if (reg == 0)
4567 reg = gen_reg_rtx (Pmode);
4568
4569 if (TARGET_Z10)
4570 {
4571 /* Use load relative if possible.
4572 lgrl <target>, sym@GOTENT */
4573 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4574 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4575 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4576
4577 emit_move_insn (reg, new_rtx);
4578 new_rtx = reg;
4579 }
4580 else if (flag_pic == 1)
4581 {
4582 /* Assume GOT offset is a valid displacement operand (< 4k
4583 or < 512k with z990). This is handled the same way in
4584 both 31- and 64-bit code (@GOT).
4585 lg <target>, sym@GOT(r12) */
4586
4587 if (reload_in_progress || reload_completed)
4588 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4589
4590 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4591 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4592 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4593 new_rtx = gen_const_mem (Pmode, new_rtx);
4594 emit_move_insn (reg, new_rtx);
4595 new_rtx = reg;
4596 }
4597 else if (TARGET_CPU_ZARCH)
4598 {
4599 /* If the GOT offset might be >= 4k, we determine the position
4600 of the GOT entry via a PC-relative LARL (@GOTENT).
4601 larl temp, sym@GOTENT
4602 lg <target>, 0(temp) */
4603
4604 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4605
4606 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4607 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4608
4609 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4610 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4611 emit_move_insn (temp, new_rtx);
4612
4613 new_rtx = gen_const_mem (Pmode, temp);
4614 emit_move_insn (reg, new_rtx);
4615
4616 new_rtx = reg;
4617 }
4618 else
4619 {
4620 /* If the GOT offset might be >= 4k, we have to load it
4621 from the literal pool (@GOT).
4622
4623 lg temp, lit-litbase(r13)
4624 lg <target>, 0(temp)
4625 lit: .long sym@GOT */
4626
4627 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4628
4629 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4630 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4631
4632 if (reload_in_progress || reload_completed)
4633 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4634
4635 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4636 addr = gen_rtx_CONST (Pmode, addr);
4637 addr = force_const_mem (Pmode, addr);
4638 emit_move_insn (temp, addr);
4639
4640 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4641 new_rtx = gen_const_mem (Pmode, new_rtx);
4642 emit_move_insn (reg, new_rtx);
4643 new_rtx = reg;
4644 }
4645 }
4646 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4647 {
4648 gcc_assert (XVECLEN (addr, 0) == 1);
4649 switch (XINT (addr, 1))
4650 {
4651 /* These address symbols (or PLT slots) relative to the GOT
4652 (not GOT slots!). In general this will exceed the
4653 displacement range so these value belong into the literal
4654 pool. */
4655 case UNSPEC_GOTOFF:
4656 case UNSPEC_PLTOFF:
4657 new_rtx = force_const_mem (Pmode, orig);
4658 break;
4659
4660 /* For -fPIC the GOT size might exceed the displacement
4661 range so make sure the value is in the literal pool. */
4662 case UNSPEC_GOT:
4663 if (flag_pic == 2)
4664 new_rtx = force_const_mem (Pmode, orig);
4665 break;
4666
4667 /* For @GOTENT larl is used. This is handled like local
4668 symbol refs. */
4669 case UNSPEC_GOTENT:
4670 gcc_unreachable ();
4671 break;
4672
4673 /* @PLT is OK as is on 64-bit, must be converted to
4674 GOT-relative @PLTOFF on 31-bit. */
4675 case UNSPEC_PLT:
4676 if (!TARGET_CPU_ZARCH)
4677 {
4678 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4679
4680 if (reload_in_progress || reload_completed)
4681 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4682
4683 addr = XVECEXP (addr, 0, 0);
4684 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4685 UNSPEC_PLTOFF);
4686 if (addend != const0_rtx)
4687 addr = gen_rtx_PLUS (Pmode, addr, addend);
4688 addr = gen_rtx_CONST (Pmode, addr);
4689 addr = force_const_mem (Pmode, addr);
4690 emit_move_insn (temp, addr);
4691
4692 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4693 if (reg != 0)
4694 {
4695 s390_load_address (reg, new_rtx);
4696 new_rtx = reg;
4697 }
4698 }
4699 else
4700 /* On 64 bit larl can be used. This case is handled like
4701 local symbol refs. */
4702 gcc_unreachable ();
4703 break;
4704
4705 /* Everything else cannot happen. */
4706 default:
4707 gcc_unreachable ();
4708 }
4709 }
4710 else if (addend != const0_rtx)
4711 {
4712 /* Otherwise, compute the sum. */
4713
4714 rtx base = legitimize_pic_address (addr, reg);
4715 new_rtx = legitimize_pic_address (addend,
4716 base == reg ? NULL_RTX : reg);
4717 if (GET_CODE (new_rtx) == CONST_INT)
4718 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4719 else
4720 {
4721 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4722 {
4723 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4724 new_rtx = XEXP (new_rtx, 1);
4725 }
4726 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4727 }
4728
4729 if (GET_CODE (new_rtx) == CONST)
4730 new_rtx = XEXP (new_rtx, 0);
4731 new_rtx = force_operand (new_rtx, 0);
4732 }
4733
4734 return new_rtx;
4735 }
4736
4737 /* Load the thread pointer into a register. */
4738
4739 rtx
4740 s390_get_thread_pointer (void)
4741 {
4742 rtx tp = gen_reg_rtx (Pmode);
4743
4744 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4745 mark_reg_pointer (tp, BITS_PER_WORD);
4746
4747 return tp;
4748 }
4749
4750 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4751 in s390_tls_symbol which always refers to __tls_get_offset.
4752 The returned offset is written to RESULT_REG and an USE rtx is
4753 generated for TLS_CALL. */
4754
4755 static GTY(()) rtx s390_tls_symbol;
4756
4757 static void
4758 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4759 {
4760 rtx insn;
4761
4762 if (!flag_pic)
4763 emit_insn (s390_load_got ());
4764
4765 if (!s390_tls_symbol)
4766 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4767
4768 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4769 gen_rtx_REG (Pmode, RETURN_REGNUM));
4770
4771 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4772 RTL_CONST_CALL_P (insn) = 1;
4773 }
4774
4775 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4776 this (thread-local) address. REG may be used as temporary. */
4777
4778 static rtx
4779 legitimize_tls_address (rtx addr, rtx reg)
4780 {
4781 rtx new_rtx, tls_call, temp, base, r2, insn;
4782
4783 if (GET_CODE (addr) == SYMBOL_REF)
4784 switch (tls_symbolic_operand (addr))
4785 {
4786 case TLS_MODEL_GLOBAL_DYNAMIC:
4787 start_sequence ();
4788 r2 = gen_rtx_REG (Pmode, 2);
4789 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4790 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4791 new_rtx = force_const_mem (Pmode, new_rtx);
4792 emit_move_insn (r2, new_rtx);
4793 s390_emit_tls_call_insn (r2, tls_call);
4794 insn = get_insns ();
4795 end_sequence ();
4796
4797 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4798 temp = gen_reg_rtx (Pmode);
4799 emit_libcall_block (insn, temp, r2, new_rtx);
4800
4801 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4802 if (reg != 0)
4803 {
4804 s390_load_address (reg, new_rtx);
4805 new_rtx = reg;
4806 }
4807 break;
4808
4809 case TLS_MODEL_LOCAL_DYNAMIC:
4810 start_sequence ();
4811 r2 = gen_rtx_REG (Pmode, 2);
4812 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4813 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4814 new_rtx = force_const_mem (Pmode, new_rtx);
4815 emit_move_insn (r2, new_rtx);
4816 s390_emit_tls_call_insn (r2, tls_call);
4817 insn = get_insns ();
4818 end_sequence ();
4819
4820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4821 temp = gen_reg_rtx (Pmode);
4822 emit_libcall_block (insn, temp, r2, new_rtx);
4823
4824 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4825 base = gen_reg_rtx (Pmode);
4826 s390_load_address (base, new_rtx);
4827
4828 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4829 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4830 new_rtx = force_const_mem (Pmode, new_rtx);
4831 temp = gen_reg_rtx (Pmode);
4832 emit_move_insn (temp, new_rtx);
4833
4834 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4835 if (reg != 0)
4836 {
4837 s390_load_address (reg, new_rtx);
4838 new_rtx = reg;
4839 }
4840 break;
4841
4842 case TLS_MODEL_INITIAL_EXEC:
4843 if (flag_pic == 1)
4844 {
4845 /* Assume GOT offset < 4k. This is handled the same way
4846 in both 31- and 64-bit code. */
4847
4848 if (reload_in_progress || reload_completed)
4849 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4850
4851 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4852 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4853 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4854 new_rtx = gen_const_mem (Pmode, new_rtx);
4855 temp = gen_reg_rtx (Pmode);
4856 emit_move_insn (temp, new_rtx);
4857 }
4858 else if (TARGET_CPU_ZARCH)
4859 {
4860 /* If the GOT offset might be >= 4k, we determine the position
4861 of the GOT entry via a PC-relative LARL. */
4862
4863 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4864 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4865 temp = gen_reg_rtx (Pmode);
4866 emit_move_insn (temp, new_rtx);
4867
4868 new_rtx = gen_const_mem (Pmode, temp);
4869 temp = gen_reg_rtx (Pmode);
4870 emit_move_insn (temp, new_rtx);
4871 }
4872 else if (flag_pic)
4873 {
4874 /* If the GOT offset might be >= 4k, we have to load it
4875 from the literal pool. */
4876
4877 if (reload_in_progress || reload_completed)
4878 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4879
4880 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4881 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4882 new_rtx = force_const_mem (Pmode, new_rtx);
4883 temp = gen_reg_rtx (Pmode);
4884 emit_move_insn (temp, new_rtx);
4885
4886 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4887 new_rtx = gen_const_mem (Pmode, new_rtx);
4888
4889 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4890 temp = gen_reg_rtx (Pmode);
4891 emit_insn (gen_rtx_SET (temp, new_rtx));
4892 }
4893 else
4894 {
4895 /* In position-dependent code, load the absolute address of
4896 the GOT entry from the literal pool. */
4897
4898 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4899 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4900 new_rtx = force_const_mem (Pmode, new_rtx);
4901 temp = gen_reg_rtx (Pmode);
4902 emit_move_insn (temp, new_rtx);
4903
4904 new_rtx = temp;
4905 new_rtx = gen_const_mem (Pmode, new_rtx);
4906 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4907 temp = gen_reg_rtx (Pmode);
4908 emit_insn (gen_rtx_SET (temp, new_rtx));
4909 }
4910
4911 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4912 if (reg != 0)
4913 {
4914 s390_load_address (reg, new_rtx);
4915 new_rtx = reg;
4916 }
4917 break;
4918
4919 case TLS_MODEL_LOCAL_EXEC:
4920 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4921 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4922 new_rtx = force_const_mem (Pmode, new_rtx);
4923 temp = gen_reg_rtx (Pmode);
4924 emit_move_insn (temp, new_rtx);
4925
4926 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4927 if (reg != 0)
4928 {
4929 s390_load_address (reg, new_rtx);
4930 new_rtx = reg;
4931 }
4932 break;
4933
4934 default:
4935 gcc_unreachable ();
4936 }
4937
4938 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4939 {
4940 switch (XINT (XEXP (addr, 0), 1))
4941 {
4942 case UNSPEC_INDNTPOFF:
4943 gcc_assert (TARGET_CPU_ZARCH);
4944 new_rtx = addr;
4945 break;
4946
4947 default:
4948 gcc_unreachable ();
4949 }
4950 }
4951
4952 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4953 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4954 {
4955 new_rtx = XEXP (XEXP (addr, 0), 0);
4956 if (GET_CODE (new_rtx) != SYMBOL_REF)
4957 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4958
4959 new_rtx = legitimize_tls_address (new_rtx, reg);
4960 new_rtx = plus_constant (Pmode, new_rtx,
4961 INTVAL (XEXP (XEXP (addr, 0), 1)));
4962 new_rtx = force_operand (new_rtx, 0);
4963 }
4964
4965 else
4966 gcc_unreachable (); /* for now ... */
4967
4968 return new_rtx;
4969 }
4970
4971 /* Emit insns making the address in operands[1] valid for a standard
4972 move to operands[0]. operands[1] is replaced by an address which
4973 should be used instead of the former RTX to emit the move
4974 pattern. */
4975
4976 void
4977 emit_symbolic_move (rtx *operands)
4978 {
4979 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4980
4981 if (GET_CODE (operands[0]) == MEM)
4982 operands[1] = force_reg (Pmode, operands[1]);
4983 else if (TLS_SYMBOLIC_CONST (operands[1]))
4984 operands[1] = legitimize_tls_address (operands[1], temp);
4985 else if (flag_pic)
4986 operands[1] = legitimize_pic_address (operands[1], temp);
4987 }
4988
4989 /* Try machine-dependent ways of modifying an illegitimate address X
4990 to be legitimate. If we find one, return the new, valid address.
4991
4992 OLDX is the address as it was before break_out_memory_refs was called.
4993 In some cases it is useful to look at this to decide what needs to be done.
4994
4995 MODE is the mode of the operand pointed to by X.
4996
4997 When -fpic is used, special handling is needed for symbolic references.
4998 See comments by legitimize_pic_address for details. */
4999
5000 static rtx
5001 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5002 machine_mode mode ATTRIBUTE_UNUSED)
5003 {
5004 rtx constant_term = const0_rtx;
5005
5006 if (TLS_SYMBOLIC_CONST (x))
5007 {
5008 x = legitimize_tls_address (x, 0);
5009
5010 if (s390_legitimate_address_p (mode, x, FALSE))
5011 return x;
5012 }
5013 else if (GET_CODE (x) == PLUS
5014 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5015 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5016 {
5017 return x;
5018 }
5019 else if (flag_pic)
5020 {
5021 if (SYMBOLIC_CONST (x)
5022 || (GET_CODE (x) == PLUS
5023 && (SYMBOLIC_CONST (XEXP (x, 0))
5024 || SYMBOLIC_CONST (XEXP (x, 1)))))
5025 x = legitimize_pic_address (x, 0);
5026
5027 if (s390_legitimate_address_p (mode, x, FALSE))
5028 return x;
5029 }
5030
5031 x = eliminate_constant_term (x, &constant_term);
5032
5033 /* Optimize loading of large displacements by splitting them
5034 into the multiple of 4K and the rest; this allows the
5035 former to be CSE'd if possible.
5036
5037 Don't do this if the displacement is added to a register
5038 pointing into the stack frame, as the offsets will
5039 change later anyway. */
5040
5041 if (GET_CODE (constant_term) == CONST_INT
5042 && !TARGET_LONG_DISPLACEMENT
5043 && !DISP_IN_RANGE (INTVAL (constant_term))
5044 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5045 {
5046 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5047 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5048
5049 rtx temp = gen_reg_rtx (Pmode);
5050 rtx val = force_operand (GEN_INT (upper), temp);
5051 if (val != temp)
5052 emit_move_insn (temp, val);
5053
5054 x = gen_rtx_PLUS (Pmode, x, temp);
5055 constant_term = GEN_INT (lower);
5056 }
5057
5058 if (GET_CODE (x) == PLUS)
5059 {
5060 if (GET_CODE (XEXP (x, 0)) == REG)
5061 {
5062 rtx temp = gen_reg_rtx (Pmode);
5063 rtx val = force_operand (XEXP (x, 1), temp);
5064 if (val != temp)
5065 emit_move_insn (temp, val);
5066
5067 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5068 }
5069
5070 else if (GET_CODE (XEXP (x, 1)) == REG)
5071 {
5072 rtx temp = gen_reg_rtx (Pmode);
5073 rtx val = force_operand (XEXP (x, 0), temp);
5074 if (val != temp)
5075 emit_move_insn (temp, val);
5076
5077 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5078 }
5079 }
5080
5081 if (constant_term != const0_rtx)
5082 x = gen_rtx_PLUS (Pmode, x, constant_term);
5083
5084 return x;
5085 }
5086
5087 /* Try a machine-dependent way of reloading an illegitimate address AD
5088 operand. If we find one, push the reload and return the new address.
5089
5090 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5091 and TYPE is the reload type of the current reload. */
5092
5093 rtx
5094 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5095 int opnum, int type)
5096 {
5097 if (!optimize || TARGET_LONG_DISPLACEMENT)
5098 return NULL_RTX;
5099
5100 if (GET_CODE (ad) == PLUS)
5101 {
5102 rtx tem = simplify_binary_operation (PLUS, Pmode,
5103 XEXP (ad, 0), XEXP (ad, 1));
5104 if (tem)
5105 ad = tem;
5106 }
5107
5108 if (GET_CODE (ad) == PLUS
5109 && GET_CODE (XEXP (ad, 0)) == REG
5110 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5111 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5112 {
5113 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5114 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5115 rtx cst, tem, new_rtx;
5116
5117 cst = GEN_INT (upper);
5118 if (!legitimate_reload_constant_p (cst))
5119 cst = force_const_mem (Pmode, cst);
5120
5121 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5122 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5123
5124 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5125 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5126 opnum, (enum reload_type) type);
5127 return new_rtx;
5128 }
5129
5130 return NULL_RTX;
5131 }
5132
5133 /* Emit code to move LEN bytes from DST to SRC. */
5134
5135 bool
5136 s390_expand_movmem (rtx dst, rtx src, rtx len)
5137 {
5138 /* When tuning for z10 or higher we rely on the Glibc functions to
5139 do the right thing. Only for constant lengths below 64k we will
5140 generate inline code. */
5141 if (s390_tune >= PROCESSOR_2097_Z10
5142 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5143 return false;
5144
5145 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5146 {
5147 if (INTVAL (len) > 0)
5148 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5149 }
5150
5151 else if (TARGET_MVCLE)
5152 {
5153 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5154 }
5155
5156 else
5157 {
5158 rtx dst_addr, src_addr, count, blocks, temp;
5159 rtx_code_label *loop_start_label = gen_label_rtx ();
5160 rtx_code_label *loop_end_label = gen_label_rtx ();
5161 rtx_code_label *end_label = gen_label_rtx ();
5162 machine_mode mode;
5163
5164 mode = GET_MODE (len);
5165 if (mode == VOIDmode)
5166 mode = Pmode;
5167
5168 dst_addr = gen_reg_rtx (Pmode);
5169 src_addr = gen_reg_rtx (Pmode);
5170 count = gen_reg_rtx (mode);
5171 blocks = gen_reg_rtx (mode);
5172
5173 convert_move (count, len, 1);
5174 emit_cmp_and_jump_insns (count, const0_rtx,
5175 EQ, NULL_RTX, mode, 1, end_label);
5176
5177 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5178 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5179 dst = change_address (dst, VOIDmode, dst_addr);
5180 src = change_address (src, VOIDmode, src_addr);
5181
5182 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5183 OPTAB_DIRECT);
5184 if (temp != count)
5185 emit_move_insn (count, temp);
5186
5187 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5188 OPTAB_DIRECT);
5189 if (temp != blocks)
5190 emit_move_insn (blocks, temp);
5191
5192 emit_cmp_and_jump_insns (blocks, const0_rtx,
5193 EQ, NULL_RTX, mode, 1, loop_end_label);
5194
5195 emit_label (loop_start_label);
5196
5197 if (TARGET_Z10
5198 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5199 {
5200 rtx prefetch;
5201
5202 /* Issue a read prefetch for the +3 cache line. */
5203 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5204 const0_rtx, const0_rtx);
5205 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5206 emit_insn (prefetch);
5207
5208 /* Issue a write prefetch for the +3 cache line. */
5209 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5210 const1_rtx, const0_rtx);
5211 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5212 emit_insn (prefetch);
5213 }
5214
5215 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5216 s390_load_address (dst_addr,
5217 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5218 s390_load_address (src_addr,
5219 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5220
5221 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5222 OPTAB_DIRECT);
5223 if (temp != blocks)
5224 emit_move_insn (blocks, temp);
5225
5226 emit_cmp_and_jump_insns (blocks, const0_rtx,
5227 EQ, NULL_RTX, mode, 1, loop_end_label);
5228
5229 emit_jump (loop_start_label);
5230 emit_label (loop_end_label);
5231
5232 emit_insn (gen_movmem_short (dst, src,
5233 convert_to_mode (Pmode, count, 1)));
5234 emit_label (end_label);
5235 }
5236 return true;
5237 }
5238
5239 /* Emit code to set LEN bytes at DST to VAL.
5240 Make use of clrmem if VAL is zero. */
5241
5242 void
5243 s390_expand_setmem (rtx dst, rtx len, rtx val)
5244 {
5245 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5246 return;
5247
5248 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5249
5250 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5251 {
5252 if (val == const0_rtx && INTVAL (len) <= 256)
5253 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5254 else
5255 {
5256 /* Initialize memory by storing the first byte. */
5257 emit_move_insn (adjust_address (dst, QImode, 0), val);
5258
5259 if (INTVAL (len) > 1)
5260 {
5261 /* Initiate 1 byte overlap move.
5262 The first byte of DST is propagated through DSTP1.
5263 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5264 DST is set to size 1 so the rest of the memory location
5265 does not count as source operand. */
5266 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5267 set_mem_size (dst, 1);
5268
5269 emit_insn (gen_movmem_short (dstp1, dst,
5270 GEN_INT (INTVAL (len) - 2)));
5271 }
5272 }
5273 }
5274
5275 else if (TARGET_MVCLE)
5276 {
5277 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5278 if (TARGET_64BIT)
5279 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5280 val));
5281 else
5282 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5283 val));
5284 }
5285
5286 else
5287 {
5288 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5289 rtx_code_label *loop_start_label = gen_label_rtx ();
5290 rtx_code_label *loop_end_label = gen_label_rtx ();
5291 rtx_code_label *end_label = gen_label_rtx ();
5292 machine_mode mode;
5293
5294 mode = GET_MODE (len);
5295 if (mode == VOIDmode)
5296 mode = Pmode;
5297
5298 dst_addr = gen_reg_rtx (Pmode);
5299 count = gen_reg_rtx (mode);
5300 blocks = gen_reg_rtx (mode);
5301
5302 convert_move (count, len, 1);
5303 emit_cmp_and_jump_insns (count, const0_rtx,
5304 EQ, NULL_RTX, mode, 1, end_label);
5305
5306 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5307 dst = change_address (dst, VOIDmode, dst_addr);
5308
5309 if (val == const0_rtx)
5310 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5311 OPTAB_DIRECT);
5312 else
5313 {
5314 dstp1 = adjust_address (dst, VOIDmode, 1);
5315 set_mem_size (dst, 1);
5316
5317 /* Initialize memory by storing the first byte. */
5318 emit_move_insn (adjust_address (dst, QImode, 0), val);
5319
5320 /* If count is 1 we are done. */
5321 emit_cmp_and_jump_insns (count, const1_rtx,
5322 EQ, NULL_RTX, mode, 1, end_label);
5323
5324 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5325 OPTAB_DIRECT);
5326 }
5327 if (temp != count)
5328 emit_move_insn (count, temp);
5329
5330 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5331 OPTAB_DIRECT);
5332 if (temp != blocks)
5333 emit_move_insn (blocks, temp);
5334
5335 emit_cmp_and_jump_insns (blocks, const0_rtx,
5336 EQ, NULL_RTX, mode, 1, loop_end_label);
5337
5338 emit_label (loop_start_label);
5339
5340 if (TARGET_Z10
5341 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5342 {
5343 /* Issue a write prefetch for the +4 cache line. */
5344 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5345 GEN_INT (1024)),
5346 const1_rtx, const0_rtx);
5347 emit_insn (prefetch);
5348 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5349 }
5350
5351 if (val == const0_rtx)
5352 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5353 else
5354 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5355 s390_load_address (dst_addr,
5356 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5357
5358 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5359 OPTAB_DIRECT);
5360 if (temp != blocks)
5361 emit_move_insn (blocks, temp);
5362
5363 emit_cmp_and_jump_insns (blocks, const0_rtx,
5364 EQ, NULL_RTX, mode, 1, loop_end_label);
5365
5366 emit_jump (loop_start_label);
5367 emit_label (loop_end_label);
5368
5369 if (val == const0_rtx)
5370 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5371 else
5372 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5373 emit_label (end_label);
5374 }
5375 }
5376
5377 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5378 and return the result in TARGET. */
5379
5380 bool
5381 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5382 {
5383 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5384 rtx tmp;
5385
5386 /* When tuning for z10 or higher we rely on the Glibc functions to
5387 do the right thing. Only for constant lengths below 64k we will
5388 generate inline code. */
5389 if (s390_tune >= PROCESSOR_2097_Z10
5390 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5391 return false;
5392
5393 /* As the result of CMPINT is inverted compared to what we need,
5394 we have to swap the operands. */
5395 tmp = op0; op0 = op1; op1 = tmp;
5396
5397 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5398 {
5399 if (INTVAL (len) > 0)
5400 {
5401 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5402 emit_insn (gen_cmpint (target, ccreg));
5403 }
5404 else
5405 emit_move_insn (target, const0_rtx);
5406 }
5407 else if (TARGET_MVCLE)
5408 {
5409 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5410 emit_insn (gen_cmpint (target, ccreg));
5411 }
5412 else
5413 {
5414 rtx addr0, addr1, count, blocks, temp;
5415 rtx_code_label *loop_start_label = gen_label_rtx ();
5416 rtx_code_label *loop_end_label = gen_label_rtx ();
5417 rtx_code_label *end_label = gen_label_rtx ();
5418 machine_mode mode;
5419
5420 mode = GET_MODE (len);
5421 if (mode == VOIDmode)
5422 mode = Pmode;
5423
5424 addr0 = gen_reg_rtx (Pmode);
5425 addr1 = gen_reg_rtx (Pmode);
5426 count = gen_reg_rtx (mode);
5427 blocks = gen_reg_rtx (mode);
5428
5429 convert_move (count, len, 1);
5430 emit_cmp_and_jump_insns (count, const0_rtx,
5431 EQ, NULL_RTX, mode, 1, end_label);
5432
5433 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5434 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5435 op0 = change_address (op0, VOIDmode, addr0);
5436 op1 = change_address (op1, VOIDmode, addr1);
5437
5438 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5439 OPTAB_DIRECT);
5440 if (temp != count)
5441 emit_move_insn (count, temp);
5442
5443 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5444 OPTAB_DIRECT);
5445 if (temp != blocks)
5446 emit_move_insn (blocks, temp);
5447
5448 emit_cmp_and_jump_insns (blocks, const0_rtx,
5449 EQ, NULL_RTX, mode, 1, loop_end_label);
5450
5451 emit_label (loop_start_label);
5452
5453 if (TARGET_Z10
5454 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5455 {
5456 rtx prefetch;
5457
5458 /* Issue a read prefetch for the +2 cache line of operand 1. */
5459 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5460 const0_rtx, const0_rtx);
5461 emit_insn (prefetch);
5462 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5463
5464 /* Issue a read prefetch for the +2 cache line of operand 2. */
5465 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5466 const0_rtx, const0_rtx);
5467 emit_insn (prefetch);
5468 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5469 }
5470
5471 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5472 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5473 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5474 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5475 temp = gen_rtx_SET (pc_rtx, temp);
5476 emit_jump_insn (temp);
5477
5478 s390_load_address (addr0,
5479 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5480 s390_load_address (addr1,
5481 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5482
5483 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5484 OPTAB_DIRECT);
5485 if (temp != blocks)
5486 emit_move_insn (blocks, temp);
5487
5488 emit_cmp_and_jump_insns (blocks, const0_rtx,
5489 EQ, NULL_RTX, mode, 1, loop_end_label);
5490
5491 emit_jump (loop_start_label);
5492 emit_label (loop_end_label);
5493
5494 emit_insn (gen_cmpmem_short (op0, op1,
5495 convert_to_mode (Pmode, count, 1)));
5496 emit_label (end_label);
5497
5498 emit_insn (gen_cmpint (target, ccreg));
5499 }
5500 return true;
5501 }
5502
5503 /* Emit a conditional jump to LABEL for condition code mask MASK using
5504 comparsion operator COMPARISON. Return the emitted jump insn. */
5505
5506 static rtx
5507 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5508 {
5509 rtx temp;
5510
5511 gcc_assert (comparison == EQ || comparison == NE);
5512 gcc_assert (mask > 0 && mask < 15);
5513
5514 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5515 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5516 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5517 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5518 temp = gen_rtx_SET (pc_rtx, temp);
5519 return emit_jump_insn (temp);
5520 }
5521
5522 /* Emit the instructions to implement strlen of STRING and store the
5523 result in TARGET. The string has the known ALIGNMENT. This
5524 version uses vector instructions and is therefore not appropriate
5525 for targets prior to z13. */
5526
5527 void
5528 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5529 {
5530 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5531 int very_likely = REG_BR_PROB_BASE - 1;
5532 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5533 rtx str_reg = gen_reg_rtx (V16QImode);
5534 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5535 rtx str_idx_reg = gen_reg_rtx (Pmode);
5536 rtx result_reg = gen_reg_rtx (V16QImode);
5537 rtx is_aligned_label = gen_label_rtx ();
5538 rtx into_loop_label = NULL_RTX;
5539 rtx loop_start_label = gen_label_rtx ();
5540 rtx temp;
5541 rtx len = gen_reg_rtx (QImode);
5542 rtx cond;
5543
5544 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5545 emit_move_insn (str_idx_reg, const0_rtx);
5546
5547 if (INTVAL (alignment) < 16)
5548 {
5549 /* Check whether the address happens to be aligned properly so
5550 jump directly to the aligned loop. */
5551 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5552 str_addr_base_reg, GEN_INT (15)),
5553 const0_rtx, EQ, NULL_RTX,
5554 Pmode, 1, is_aligned_label);
5555
5556 temp = gen_reg_rtx (Pmode);
5557 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5558 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5559 gcc_assert (REG_P (temp));
5560 highest_index_to_load_reg =
5561 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5562 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5563 gcc_assert (REG_P (highest_index_to_load_reg));
5564 emit_insn (gen_vllv16qi (str_reg,
5565 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5566 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5567
5568 into_loop_label = gen_label_rtx ();
5569 s390_emit_jump (into_loop_label, NULL_RTX);
5570 emit_barrier ();
5571 }
5572
5573 emit_label (is_aligned_label);
5574 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5575
5576 /* Reaching this point we are only performing 16 bytes aligned
5577 loads. */
5578 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5579
5580 emit_label (loop_start_label);
5581 LABEL_NUSES (loop_start_label) = 1;
5582
5583 /* Load 16 bytes of the string into VR. */
5584 emit_move_insn (str_reg,
5585 gen_rtx_MEM (V16QImode,
5586 gen_rtx_PLUS (Pmode, str_idx_reg,
5587 str_addr_base_reg)));
5588 if (into_loop_label != NULL_RTX)
5589 {
5590 emit_label (into_loop_label);
5591 LABEL_NUSES (into_loop_label) = 1;
5592 }
5593
5594 /* Increment string index by 16 bytes. */
5595 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5596 str_idx_reg, 1, OPTAB_DIRECT);
5597
5598 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5599 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5600
5601 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5602 REG_BR_PROB, very_likely);
5603 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5604
5605 /* If the string pointer wasn't aligned we have loaded less then 16
5606 bytes and the remaining bytes got filled with zeros (by vll).
5607 Now we have to check whether the resulting index lies within the
5608 bytes actually part of the string. */
5609
5610 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5611 highest_index_to_load_reg);
5612 s390_load_address (highest_index_to_load_reg,
5613 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5614 const1_rtx));
5615 if (TARGET_64BIT)
5616 emit_insn (gen_movdicc (str_idx_reg, cond,
5617 highest_index_to_load_reg, str_idx_reg));
5618 else
5619 emit_insn (gen_movsicc (str_idx_reg, cond,
5620 highest_index_to_load_reg, str_idx_reg));
5621
5622 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5623 very_unlikely);
5624
5625 expand_binop (Pmode, add_optab, str_idx_reg,
5626 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5627 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5628 here. */
5629 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5630 convert_to_mode (Pmode, len, 1),
5631 target, 1, OPTAB_DIRECT);
5632 if (temp != target)
5633 emit_move_insn (target, temp);
5634 }
5635
5636 void
5637 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5638 {
5639 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5640 rtx temp = gen_reg_rtx (Pmode);
5641 rtx src_addr = XEXP (src, 0);
5642 rtx dst_addr = XEXP (dst, 0);
5643 rtx src_addr_reg = gen_reg_rtx (Pmode);
5644 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5645 rtx offset = gen_reg_rtx (Pmode);
5646 rtx vsrc = gen_reg_rtx (V16QImode);
5647 rtx vpos = gen_reg_rtx (V16QImode);
5648 rtx loadlen = gen_reg_rtx (SImode);
5649 rtx gpos_qi = gen_reg_rtx(QImode);
5650 rtx gpos = gen_reg_rtx (SImode);
5651 rtx done_label = gen_label_rtx ();
5652 rtx loop_label = gen_label_rtx ();
5653 rtx exit_label = gen_label_rtx ();
5654 rtx full_label = gen_label_rtx ();
5655
5656 /* Perform a quick check for string ending on the first up to 16
5657 bytes and exit early if successful. */
5658
5659 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5660 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5661 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5662 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5663 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5664 /* gpos is the byte index if a zero was found and 16 otherwise.
5665 So if it is lower than the loaded bytes we have a hit. */
5666 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5667 full_label);
5668 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5669
5670 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5671 1, OPTAB_DIRECT);
5672 emit_jump (exit_label);
5673 emit_barrier ();
5674
5675 emit_label (full_label);
5676 LABEL_NUSES (full_label) = 1;
5677
5678 /* Calculate `offset' so that src + offset points to the last byte
5679 before 16 byte alignment. */
5680
5681 /* temp = src_addr & 0xf */
5682 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5683 1, OPTAB_DIRECT);
5684
5685 /* offset = 0xf - temp */
5686 emit_move_insn (offset, GEN_INT (15));
5687 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5688 1, OPTAB_DIRECT);
5689
5690 /* Store `offset' bytes in the dstination string. The quick check
5691 has loaded at least `offset' bytes into vsrc. */
5692
5693 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5694
5695 /* Advance to the next byte to be loaded. */
5696 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5697 1, OPTAB_DIRECT);
5698
5699 /* Make sure the addresses are single regs which can be used as a
5700 base. */
5701 emit_move_insn (src_addr_reg, src_addr);
5702 emit_move_insn (dst_addr_reg, dst_addr);
5703
5704 /* MAIN LOOP */
5705
5706 emit_label (loop_label);
5707 LABEL_NUSES (loop_label) = 1;
5708
5709 emit_move_insn (vsrc,
5710 gen_rtx_MEM (V16QImode,
5711 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5712
5713 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5714 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5715 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5716 REG_BR_PROB, very_unlikely);
5717
5718 emit_move_insn (gen_rtx_MEM (V16QImode,
5719 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5720 vsrc);
5721 /* offset += 16 */
5722 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5723 offset, 1, OPTAB_DIRECT);
5724
5725 emit_jump (loop_label);
5726 emit_barrier ();
5727
5728 /* REGULAR EXIT */
5729
5730 /* We are done. Add the offset of the zero character to the dst_addr
5731 pointer to get the result. */
5732
5733 emit_label (done_label);
5734 LABEL_NUSES (done_label) = 1;
5735
5736 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5737 1, OPTAB_DIRECT);
5738
5739 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5740 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5741
5742 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5743
5744 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5745 1, OPTAB_DIRECT);
5746
5747 /* EARLY EXIT */
5748
5749 emit_label (exit_label);
5750 LABEL_NUSES (exit_label) = 1;
5751 }
5752
5753
5754 /* Expand conditional increment or decrement using alc/slb instructions.
5755 Should generate code setting DST to either SRC or SRC + INCREMENT,
5756 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5757 Returns true if successful, false otherwise.
5758
5759 That makes it possible to implement some if-constructs without jumps e.g.:
5760 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5761 unsigned int a, b, c;
5762 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5763 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5764 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5765 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5766
5767 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5768 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5769 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5770 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5771 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5772
5773 bool
5774 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5775 rtx dst, rtx src, rtx increment)
5776 {
5777 machine_mode cmp_mode;
5778 machine_mode cc_mode;
5779 rtx op_res;
5780 rtx insn;
5781 rtvec p;
5782 int ret;
5783
5784 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5785 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5786 cmp_mode = SImode;
5787 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5788 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5789 cmp_mode = DImode;
5790 else
5791 return false;
5792
5793 /* Try ADD LOGICAL WITH CARRY. */
5794 if (increment == const1_rtx)
5795 {
5796 /* Determine CC mode to use. */
5797 if (cmp_code == EQ || cmp_code == NE)
5798 {
5799 if (cmp_op1 != const0_rtx)
5800 {
5801 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5802 NULL_RTX, 0, OPTAB_WIDEN);
5803 cmp_op1 = const0_rtx;
5804 }
5805
5806 cmp_code = cmp_code == EQ ? LEU : GTU;
5807 }
5808
5809 if (cmp_code == LTU || cmp_code == LEU)
5810 {
5811 rtx tem = cmp_op0;
5812 cmp_op0 = cmp_op1;
5813 cmp_op1 = tem;
5814 cmp_code = swap_condition (cmp_code);
5815 }
5816
5817 switch (cmp_code)
5818 {
5819 case GTU:
5820 cc_mode = CCUmode;
5821 break;
5822
5823 case GEU:
5824 cc_mode = CCL3mode;
5825 break;
5826
5827 default:
5828 return false;
5829 }
5830
5831 /* Emit comparison instruction pattern. */
5832 if (!register_operand (cmp_op0, cmp_mode))
5833 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5834
5835 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5836 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5837 /* We use insn_invalid_p here to add clobbers if required. */
5838 ret = insn_invalid_p (emit_insn (insn), false);
5839 gcc_assert (!ret);
5840
5841 /* Emit ALC instruction pattern. */
5842 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5843 gen_rtx_REG (cc_mode, CC_REGNUM),
5844 const0_rtx);
5845
5846 if (src != const0_rtx)
5847 {
5848 if (!register_operand (src, GET_MODE (dst)))
5849 src = force_reg (GET_MODE (dst), src);
5850
5851 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5852 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5853 }
5854
5855 p = rtvec_alloc (2);
5856 RTVEC_ELT (p, 0) =
5857 gen_rtx_SET (dst, op_res);
5858 RTVEC_ELT (p, 1) =
5859 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5860 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5861
5862 return true;
5863 }
5864
5865 /* Try SUBTRACT LOGICAL WITH BORROW. */
5866 if (increment == constm1_rtx)
5867 {
5868 /* Determine CC mode to use. */
5869 if (cmp_code == EQ || cmp_code == NE)
5870 {
5871 if (cmp_op1 != const0_rtx)
5872 {
5873 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5874 NULL_RTX, 0, OPTAB_WIDEN);
5875 cmp_op1 = const0_rtx;
5876 }
5877
5878 cmp_code = cmp_code == EQ ? LEU : GTU;
5879 }
5880
5881 if (cmp_code == GTU || cmp_code == GEU)
5882 {
5883 rtx tem = cmp_op0;
5884 cmp_op0 = cmp_op1;
5885 cmp_op1 = tem;
5886 cmp_code = swap_condition (cmp_code);
5887 }
5888
5889 switch (cmp_code)
5890 {
5891 case LEU:
5892 cc_mode = CCUmode;
5893 break;
5894
5895 case LTU:
5896 cc_mode = CCL3mode;
5897 break;
5898
5899 default:
5900 return false;
5901 }
5902
5903 /* Emit comparison instruction pattern. */
5904 if (!register_operand (cmp_op0, cmp_mode))
5905 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5906
5907 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5908 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5909 /* We use insn_invalid_p here to add clobbers if required. */
5910 ret = insn_invalid_p (emit_insn (insn), false);
5911 gcc_assert (!ret);
5912
5913 /* Emit SLB instruction pattern. */
5914 if (!register_operand (src, GET_MODE (dst)))
5915 src = force_reg (GET_MODE (dst), src);
5916
5917 op_res = gen_rtx_MINUS (GET_MODE (dst),
5918 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5919 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5920 gen_rtx_REG (cc_mode, CC_REGNUM),
5921 const0_rtx));
5922 p = rtvec_alloc (2);
5923 RTVEC_ELT (p, 0) =
5924 gen_rtx_SET (dst, op_res);
5925 RTVEC_ELT (p, 1) =
5926 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5927 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5928
5929 return true;
5930 }
5931
5932 return false;
5933 }
5934
5935 /* Expand code for the insv template. Return true if successful. */
5936
5937 bool
5938 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5939 {
5940 int bitsize = INTVAL (op1);
5941 int bitpos = INTVAL (op2);
5942 machine_mode mode = GET_MODE (dest);
5943 machine_mode smode;
5944 int smode_bsize, mode_bsize;
5945 rtx op, clobber;
5946
5947 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5948 return false;
5949
5950 /* Generate INSERT IMMEDIATE (IILL et al). */
5951 /* (set (ze (reg)) (const_int)). */
5952 if (TARGET_ZARCH
5953 && register_operand (dest, word_mode)
5954 && (bitpos % 16) == 0
5955 && (bitsize % 16) == 0
5956 && const_int_operand (src, VOIDmode))
5957 {
5958 HOST_WIDE_INT val = INTVAL (src);
5959 int regpos = bitpos + bitsize;
5960
5961 while (regpos > bitpos)
5962 {
5963 machine_mode putmode;
5964 int putsize;
5965
5966 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5967 putmode = SImode;
5968 else
5969 putmode = HImode;
5970
5971 putsize = GET_MODE_BITSIZE (putmode);
5972 regpos -= putsize;
5973 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5974 GEN_INT (putsize),
5975 GEN_INT (regpos)),
5976 gen_int_mode (val, putmode));
5977 val >>= putsize;
5978 }
5979 gcc_assert (regpos == bitpos);
5980 return true;
5981 }
5982
5983 smode = smallest_mode_for_size (bitsize, MODE_INT);
5984 smode_bsize = GET_MODE_BITSIZE (smode);
5985 mode_bsize = GET_MODE_BITSIZE (mode);
5986
5987 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5988 if (bitpos == 0
5989 && (bitsize % BITS_PER_UNIT) == 0
5990 && MEM_P (dest)
5991 && (register_operand (src, word_mode)
5992 || const_int_operand (src, VOIDmode)))
5993 {
5994 /* Emit standard pattern if possible. */
5995 if (smode_bsize == bitsize)
5996 {
5997 emit_move_insn (adjust_address (dest, smode, 0),
5998 gen_lowpart (smode, src));
5999 return true;
6000 }
6001
6002 /* (set (ze (mem)) (const_int)). */
6003 else if (const_int_operand (src, VOIDmode))
6004 {
6005 int size = bitsize / BITS_PER_UNIT;
6006 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6007 BLKmode,
6008 UNITS_PER_WORD - size);
6009
6010 dest = adjust_address (dest, BLKmode, 0);
6011 set_mem_size (dest, size);
6012 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6013 return true;
6014 }
6015
6016 /* (set (ze (mem)) (reg)). */
6017 else if (register_operand (src, word_mode))
6018 {
6019 if (bitsize <= 32)
6020 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6021 const0_rtx), src);
6022 else
6023 {
6024 /* Emit st,stcmh sequence. */
6025 int stcmh_width = bitsize - 32;
6026 int size = stcmh_width / BITS_PER_UNIT;
6027
6028 emit_move_insn (adjust_address (dest, SImode, size),
6029 gen_lowpart (SImode, src));
6030 set_mem_size (dest, size);
6031 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6032 GEN_INT (stcmh_width),
6033 const0_rtx),
6034 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6035 }
6036 return true;
6037 }
6038 }
6039
6040 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6041 if ((bitpos % BITS_PER_UNIT) == 0
6042 && (bitsize % BITS_PER_UNIT) == 0
6043 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6044 && MEM_P (src)
6045 && (mode == DImode || mode == SImode)
6046 && register_operand (dest, mode))
6047 {
6048 /* Emit a strict_low_part pattern if possible. */
6049 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6050 {
6051 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6052 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6053 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6054 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6055 return true;
6056 }
6057
6058 /* ??? There are more powerful versions of ICM that are not
6059 completely represented in the md file. */
6060 }
6061
6062 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6063 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6064 {
6065 machine_mode mode_s = GET_MODE (src);
6066
6067 if (mode_s == VOIDmode)
6068 {
6069 /* For constant zero values the representation with AND
6070 appears to be folded in more situations than the (set
6071 (zero_extract) ...).
6072 We only do this when the start and end of the bitfield
6073 remain in the same SImode chunk. That way nihf or nilf
6074 can be used.
6075 The AND patterns might still generate a risbg for this. */
6076 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6077 return false;
6078 else
6079 src = force_reg (mode, src);
6080 }
6081 else if (mode_s != mode)
6082 {
6083 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6084 src = force_reg (mode_s, src);
6085 src = gen_lowpart (mode, src);
6086 }
6087
6088 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6089 op = gen_rtx_SET (op, src);
6090
6091 if (!TARGET_ZEC12)
6092 {
6093 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6094 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6095 }
6096 emit_insn (op);
6097
6098 return true;
6099 }
6100
6101 return false;
6102 }
6103
6104 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6105 register that holds VAL of mode MODE shifted by COUNT bits. */
6106
6107 static inline rtx
6108 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6109 {
6110 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6111 NULL_RTX, 1, OPTAB_DIRECT);
6112 return expand_simple_binop (SImode, ASHIFT, val, count,
6113 NULL_RTX, 1, OPTAB_DIRECT);
6114 }
6115
6116 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6117 the result in TARGET. */
6118
6119 void
6120 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6121 rtx cmp_op1, rtx cmp_op2)
6122 {
6123 machine_mode mode = GET_MODE (target);
6124 bool neg_p = false, swap_p = false;
6125 rtx tmp;
6126
6127 if (GET_MODE (cmp_op1) == V2DFmode)
6128 {
6129 switch (cond)
6130 {
6131 /* NE a != b -> !(a == b) */
6132 case NE: cond = EQ; neg_p = true; break;
6133 /* UNGT a u> b -> !(b >= a) */
6134 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6135 /* UNGE a u>= b -> !(b > a) */
6136 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6137 /* LE: a <= b -> b >= a */
6138 case LE: cond = GE; swap_p = true; break;
6139 /* UNLE: a u<= b -> !(a > b) */
6140 case UNLE: cond = GT; neg_p = true; break;
6141 /* LT: a < b -> b > a */
6142 case LT: cond = GT; swap_p = true; break;
6143 /* UNLT: a u< b -> !(a >= b) */
6144 case UNLT: cond = GE; neg_p = true; break;
6145 case UNEQ:
6146 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6147 return;
6148 case LTGT:
6149 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6150 return;
6151 case ORDERED:
6152 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6153 return;
6154 case UNORDERED:
6155 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6156 return;
6157 default: break;
6158 }
6159 }
6160 else
6161 {
6162 switch (cond)
6163 {
6164 /* NE: a != b -> !(a == b) */
6165 case NE: cond = EQ; neg_p = true; break;
6166 /* GE: a >= b -> !(b > a) */
6167 case GE: cond = GT; neg_p = true; swap_p = true; break;
6168 /* GEU: a >= b -> !(b > a) */
6169 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6170 /* LE: a <= b -> !(a > b) */
6171 case LE: cond = GT; neg_p = true; break;
6172 /* LEU: a <= b -> !(a > b) */
6173 case LEU: cond = GTU; neg_p = true; break;
6174 /* LT: a < b -> b > a */
6175 case LT: cond = GT; swap_p = true; break;
6176 /* LTU: a < b -> b > a */
6177 case LTU: cond = GTU; swap_p = true; break;
6178 default: break;
6179 }
6180 }
6181
6182 if (swap_p)
6183 {
6184 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6185 }
6186
6187 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6188 mode,
6189 cmp_op1, cmp_op2)));
6190 if (neg_p)
6191 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6192 }
6193
6194 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6195 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6196 elements in CMP1 and CMP2 fulfill the comparison. */
6197 void
6198 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6199 rtx cmp1, rtx cmp2, bool all_p)
6200 {
6201 enum rtx_code new_code = code;
6202 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
6203 rtx tmp_reg = gen_reg_rtx (SImode);
6204 bool swap_p = false;
6205
6206 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6207 {
6208 switch (code)
6209 {
6210 case EQ: cmp_mode = CCVEQmode; break;
6211 case NE: cmp_mode = CCVEQmode; break;
6212 case GT: cmp_mode = CCVHmode; break;
6213 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
6214 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
6215 case LE: cmp_mode = CCVHmode; new_code = LE; break;
6216 case GTU: cmp_mode = CCVHUmode; break;
6217 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
6218 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
6219 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
6220 default: gcc_unreachable ();
6221 }
6222 scratch_mode = GET_MODE (cmp1);
6223 }
6224 else if (GET_MODE (cmp1) == V2DFmode)
6225 {
6226 switch (code)
6227 {
6228 case EQ: cmp_mode = CCVEQmode; break;
6229 case NE: cmp_mode = CCVEQmode; break;
6230 case GT: cmp_mode = CCVFHmode; break;
6231 case GE: cmp_mode = CCVFHEmode; break;
6232 case UNLE: cmp_mode = CCVFHmode; break;
6233 case UNLT: cmp_mode = CCVFHEmode; break;
6234 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6235 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6236 default: gcc_unreachable ();
6237 }
6238 scratch_mode = V2DImode;
6239 }
6240 else
6241 gcc_unreachable ();
6242
6243 if (!all_p)
6244 switch (cmp_mode)
6245 {
6246 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6247 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6248 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6249 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6250 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6251 default: gcc_unreachable ();
6252 }
6253 else
6254 /* The modes without ANY match the ALL modes. */
6255 full_cmp_mode = cmp_mode;
6256
6257 if (swap_p)
6258 {
6259 rtx tmp = cmp2;
6260 cmp2 = cmp1;
6261 cmp1 = tmp;
6262 }
6263
6264 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6265 gen_rtvec (2, gen_rtx_SET (
6266 gen_rtx_REG (cmp_mode, CC_REGNUM),
6267 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6268 gen_rtx_CLOBBER (VOIDmode,
6269 gen_rtx_SCRATCH (scratch_mode)))));
6270 emit_move_insn (target, const0_rtx);
6271 emit_move_insn (tmp_reg, const1_rtx);
6272
6273 emit_move_insn (target,
6274 gen_rtx_IF_THEN_ELSE (SImode,
6275 gen_rtx_fmt_ee (new_code, VOIDmode,
6276 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6277 const0_rtx),
6278 target, tmp_reg));
6279 }
6280
6281 /* Generate a vector comparison expression loading either elements of
6282 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6283 and CMP_OP2. */
6284
6285 void
6286 s390_expand_vcond (rtx target, rtx then, rtx els,
6287 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6288 {
6289 rtx tmp;
6290 machine_mode result_mode;
6291 rtx result_target;
6292
6293 machine_mode target_mode = GET_MODE (target);
6294 machine_mode cmp_mode = GET_MODE (cmp_op1);
6295 rtx op = (cond == LT) ? els : then;
6296
6297 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6298 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6299 for short and byte (x >> 15 and x >> 7 respectively). */
6300 if ((cond == LT || cond == GE)
6301 && target_mode == cmp_mode
6302 && cmp_op2 == CONST0_RTX (cmp_mode)
6303 && op == CONST0_RTX (target_mode)
6304 && s390_vector_mode_supported_p (target_mode)
6305 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6306 {
6307 rtx negop = (cond == LT) ? then : els;
6308
6309 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6310
6311 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6312 if (negop == CONST1_RTX (target_mode))
6313 {
6314 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6315 GEN_INT (shift), target,
6316 1, OPTAB_DIRECT);
6317 if (res != target)
6318 emit_move_insn (target, res);
6319 return;
6320 }
6321
6322 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6323 else if (all_ones_operand (negop, target_mode))
6324 {
6325 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6326 GEN_INT (shift), target,
6327 0, OPTAB_DIRECT);
6328 if (res != target)
6329 emit_move_insn (target, res);
6330 return;
6331 }
6332 }
6333
6334 /* We always use an integral type vector to hold the comparison
6335 result. */
6336 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6337 result_target = gen_reg_rtx (result_mode);
6338
6339 /* We allow vector immediates as comparison operands that
6340 can be handled by the optimization above but not by the
6341 following code. Hence, force them into registers here. */
6342 if (!REG_P (cmp_op1))
6343 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6344
6345 if (!REG_P (cmp_op2))
6346 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6347
6348 s390_expand_vec_compare (result_target, cond,
6349 cmp_op1, cmp_op2);
6350
6351 /* If the results are supposed to be either -1 or 0 we are done
6352 since this is what our compare instructions generate anyway. */
6353 if (all_ones_operand (then, GET_MODE (then))
6354 && const0_operand (els, GET_MODE (els)))
6355 {
6356 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6357 result_target, 0));
6358 return;
6359 }
6360
6361 /* Otherwise we will do a vsel afterwards. */
6362 /* This gets triggered e.g.
6363 with gcc.c-torture/compile/pr53410-1.c */
6364 if (!REG_P (then))
6365 then = force_reg (target_mode, then);
6366
6367 if (!REG_P (els))
6368 els = force_reg (target_mode, els);
6369
6370 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6371 result_target,
6372 CONST0_RTX (result_mode));
6373
6374 /* We compared the result against zero above so we have to swap then
6375 and els here. */
6376 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6377
6378 gcc_assert (target_mode == GET_MODE (then));
6379 emit_insn (gen_rtx_SET (target, tmp));
6380 }
6381
6382 /* Emit the RTX necessary to initialize the vector TARGET with values
6383 in VALS. */
6384 void
6385 s390_expand_vec_init (rtx target, rtx vals)
6386 {
6387 machine_mode mode = GET_MODE (target);
6388 machine_mode inner_mode = GET_MODE_INNER (mode);
6389 int n_elts = GET_MODE_NUNITS (mode);
6390 bool all_same = true, all_regs = true, all_const_int = true;
6391 rtx x;
6392 int i;
6393
6394 for (i = 0; i < n_elts; ++i)
6395 {
6396 x = XVECEXP (vals, 0, i);
6397
6398 if (!CONST_INT_P (x))
6399 all_const_int = false;
6400
6401 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6402 all_same = false;
6403
6404 if (!REG_P (x))
6405 all_regs = false;
6406 }
6407
6408 /* Use vector gen mask or vector gen byte mask if possible. */
6409 if (all_same && all_const_int
6410 && (XVECEXP (vals, 0, 0) == const0_rtx
6411 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6412 NULL, NULL)
6413 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6414 {
6415 emit_insn (gen_rtx_SET (target,
6416 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6417 return;
6418 }
6419
6420 if (all_same)
6421 {
6422 emit_insn (gen_rtx_SET (target,
6423 gen_rtx_VEC_DUPLICATE (mode,
6424 XVECEXP (vals, 0, 0))));
6425 return;
6426 }
6427
6428 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6429 {
6430 /* Use vector load pair. */
6431 emit_insn (gen_rtx_SET (target,
6432 gen_rtx_VEC_CONCAT (mode,
6433 XVECEXP (vals, 0, 0),
6434 XVECEXP (vals, 0, 1))));
6435 return;
6436 }
6437
6438 /* We are about to set the vector elements one by one. Zero out the
6439 full register first in order to help the data flow framework to
6440 detect it as full VR set. */
6441 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6442
6443 /* Unfortunately the vec_init expander is not allowed to fail. So
6444 we have to implement the fallback ourselves. */
6445 for (i = 0; i < n_elts; i++)
6446 emit_insn (gen_rtx_SET (target,
6447 gen_rtx_UNSPEC (mode,
6448 gen_rtvec (3, XVECEXP (vals, 0, i),
6449 GEN_INT (i), target),
6450 UNSPEC_VEC_SET)));
6451 }
6452
6453 /* Structure to hold the initial parameters for a compare_and_swap operation
6454 in HImode and QImode. */
6455
6456 struct alignment_context
6457 {
6458 rtx memsi; /* SI aligned memory location. */
6459 rtx shift; /* Bit offset with regard to lsb. */
6460 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6461 rtx modemaski; /* ~modemask */
6462 bool aligned; /* True if memory is aligned, false else. */
6463 };
6464
6465 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6466 structure AC for transparent simplifying, if the memory alignment is known
6467 to be at least 32bit. MEM is the memory location for the actual operation
6468 and MODE its mode. */
6469
6470 static void
6471 init_alignment_context (struct alignment_context *ac, rtx mem,
6472 machine_mode mode)
6473 {
6474 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6475 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6476
6477 if (ac->aligned)
6478 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6479 else
6480 {
6481 /* Alignment is unknown. */
6482 rtx byteoffset, addr, align;
6483
6484 /* Force the address into a register. */
6485 addr = force_reg (Pmode, XEXP (mem, 0));
6486
6487 /* Align it to SImode. */
6488 align = expand_simple_binop (Pmode, AND, addr,
6489 GEN_INT (-GET_MODE_SIZE (SImode)),
6490 NULL_RTX, 1, OPTAB_DIRECT);
6491 /* Generate MEM. */
6492 ac->memsi = gen_rtx_MEM (SImode, align);
6493 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6494 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6495 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6496
6497 /* Calculate shiftcount. */
6498 byteoffset = expand_simple_binop (Pmode, AND, addr,
6499 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6500 NULL_RTX, 1, OPTAB_DIRECT);
6501 /* As we already have some offset, evaluate the remaining distance. */
6502 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6503 NULL_RTX, 1, OPTAB_DIRECT);
6504 }
6505
6506 /* Shift is the byte count, but we need the bitcount. */
6507 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6508 NULL_RTX, 1, OPTAB_DIRECT);
6509
6510 /* Calculate masks. */
6511 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6512 GEN_INT (GET_MODE_MASK (mode)),
6513 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6514 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6515 NULL_RTX, 1);
6516 }
6517
6518 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6519 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6520 perform the merge in SEQ2. */
6521
6522 static rtx
6523 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6524 machine_mode mode, rtx val, rtx ins)
6525 {
6526 rtx tmp;
6527
6528 if (ac->aligned)
6529 {
6530 start_sequence ();
6531 tmp = copy_to_mode_reg (SImode, val);
6532 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6533 const0_rtx, ins))
6534 {
6535 *seq1 = NULL;
6536 *seq2 = get_insns ();
6537 end_sequence ();
6538 return tmp;
6539 }
6540 end_sequence ();
6541 }
6542
6543 /* Failed to use insv. Generate a two part shift and mask. */
6544 start_sequence ();
6545 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6546 *seq1 = get_insns ();
6547 end_sequence ();
6548
6549 start_sequence ();
6550 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6551 *seq2 = get_insns ();
6552 end_sequence ();
6553
6554 return tmp;
6555 }
6556
6557 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6558 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6559 value to set if CMP == MEM. */
6560
6561 void
6562 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6563 rtx cmp, rtx new_rtx, bool is_weak)
6564 {
6565 struct alignment_context ac;
6566 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6567 rtx res = gen_reg_rtx (SImode);
6568 rtx_code_label *csloop = NULL, *csend = NULL;
6569
6570 gcc_assert (MEM_P (mem));
6571
6572 init_alignment_context (&ac, mem, mode);
6573
6574 /* Load full word. Subsequent loads are performed by CS. */
6575 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6576 NULL_RTX, 1, OPTAB_DIRECT);
6577
6578 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6579 possible, we try to use insv to make this happen efficiently. If
6580 that fails we'll generate code both inside and outside the loop. */
6581 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6582 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6583
6584 if (seq0)
6585 emit_insn (seq0);
6586 if (seq1)
6587 emit_insn (seq1);
6588
6589 /* Start CS loop. */
6590 if (!is_weak)
6591 {
6592 /* Begin assuming success. */
6593 emit_move_insn (btarget, const1_rtx);
6594
6595 csloop = gen_label_rtx ();
6596 csend = gen_label_rtx ();
6597 emit_label (csloop);
6598 }
6599
6600 /* val = "<mem>00..0<mem>"
6601 * cmp = "00..0<cmp>00..0"
6602 * new = "00..0<new>00..0"
6603 */
6604
6605 emit_insn (seq2);
6606 emit_insn (seq3);
6607
6608 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6609 if (is_weak)
6610 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6611 else
6612 {
6613 rtx tmp;
6614
6615 /* Jump to end if we're done (likely?). */
6616 s390_emit_jump (csend, cc);
6617
6618 /* Check for changes outside mode, and loop internal if so.
6619 Arrange the moves so that the compare is adjacent to the
6620 branch so that we can generate CRJ. */
6621 tmp = copy_to_reg (val);
6622 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6623 1, OPTAB_DIRECT);
6624 cc = s390_emit_compare (NE, val, tmp);
6625 s390_emit_jump (csloop, cc);
6626
6627 /* Failed. */
6628 emit_move_insn (btarget, const0_rtx);
6629 emit_label (csend);
6630 }
6631
6632 /* Return the correct part of the bitfield. */
6633 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6634 NULL_RTX, 1, OPTAB_DIRECT), 1);
6635 }
6636
6637 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6638 and VAL the value to play with. If AFTER is true then store the value
6639 MEM holds after the operation, if AFTER is false then store the value MEM
6640 holds before the operation. If TARGET is zero then discard that value, else
6641 store it to TARGET. */
6642
6643 void
6644 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6645 rtx target, rtx mem, rtx val, bool after)
6646 {
6647 struct alignment_context ac;
6648 rtx cmp;
6649 rtx new_rtx = gen_reg_rtx (SImode);
6650 rtx orig = gen_reg_rtx (SImode);
6651 rtx_code_label *csloop = gen_label_rtx ();
6652
6653 gcc_assert (!target || register_operand (target, VOIDmode));
6654 gcc_assert (MEM_P (mem));
6655
6656 init_alignment_context (&ac, mem, mode);
6657
6658 /* Shift val to the correct bit positions.
6659 Preserve "icm", but prevent "ex icm". */
6660 if (!(ac.aligned && code == SET && MEM_P (val)))
6661 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6662
6663 /* Further preparation insns. */
6664 if (code == PLUS || code == MINUS)
6665 emit_move_insn (orig, val);
6666 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6667 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6668 NULL_RTX, 1, OPTAB_DIRECT);
6669
6670 /* Load full word. Subsequent loads are performed by CS. */
6671 cmp = force_reg (SImode, ac.memsi);
6672
6673 /* Start CS loop. */
6674 emit_label (csloop);
6675 emit_move_insn (new_rtx, cmp);
6676
6677 /* Patch new with val at correct position. */
6678 switch (code)
6679 {
6680 case PLUS:
6681 case MINUS:
6682 val = expand_simple_binop (SImode, code, new_rtx, orig,
6683 NULL_RTX, 1, OPTAB_DIRECT);
6684 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6685 NULL_RTX, 1, OPTAB_DIRECT);
6686 /* FALLTHRU */
6687 case SET:
6688 if (ac.aligned && MEM_P (val))
6689 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6690 0, 0, SImode, val, false);
6691 else
6692 {
6693 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6694 NULL_RTX, 1, OPTAB_DIRECT);
6695 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6696 NULL_RTX, 1, OPTAB_DIRECT);
6697 }
6698 break;
6699 case AND:
6700 case IOR:
6701 case XOR:
6702 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6703 NULL_RTX, 1, OPTAB_DIRECT);
6704 break;
6705 case MULT: /* NAND */
6706 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6707 NULL_RTX, 1, OPTAB_DIRECT);
6708 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6709 NULL_RTX, 1, OPTAB_DIRECT);
6710 break;
6711 default:
6712 gcc_unreachable ();
6713 }
6714
6715 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6716 ac.memsi, cmp, new_rtx));
6717
6718 /* Return the correct part of the bitfield. */
6719 if (target)
6720 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6721 after ? new_rtx : cmp, ac.shift,
6722 NULL_RTX, 1, OPTAB_DIRECT), 1);
6723 }
6724
6725 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6726 We need to emit DTP-relative relocations. */
6727
6728 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6729
6730 static void
6731 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6732 {
6733 switch (size)
6734 {
6735 case 4:
6736 fputs ("\t.long\t", file);
6737 break;
6738 case 8:
6739 fputs ("\t.quad\t", file);
6740 break;
6741 default:
6742 gcc_unreachable ();
6743 }
6744 output_addr_const (file, x);
6745 fputs ("@DTPOFF", file);
6746 }
6747
6748 /* Return the proper mode for REGNO being represented in the dwarf
6749 unwind table. */
6750 machine_mode
6751 s390_dwarf_frame_reg_mode (int regno)
6752 {
6753 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6754
6755 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6756 if (GENERAL_REGNO_P (regno))
6757 save_mode = Pmode;
6758
6759 /* The rightmost 64 bits of vector registers are call-clobbered. */
6760 if (GET_MODE_SIZE (save_mode) > 8)
6761 save_mode = DImode;
6762
6763 return save_mode;
6764 }
6765
6766 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6767 /* Implement TARGET_MANGLE_TYPE. */
6768
6769 static const char *
6770 s390_mangle_type (const_tree type)
6771 {
6772 type = TYPE_MAIN_VARIANT (type);
6773
6774 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6775 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6776 return NULL;
6777
6778 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6779 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6780 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6781 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6782
6783 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6784 && TARGET_LONG_DOUBLE_128)
6785 return "g";
6786
6787 /* For all other types, use normal C++ mangling. */
6788 return NULL;
6789 }
6790 #endif
6791
6792 /* In the name of slightly smaller debug output, and to cater to
6793 general assembler lossage, recognize various UNSPEC sequences
6794 and turn them back into a direct symbol reference. */
6795
6796 static rtx
6797 s390_delegitimize_address (rtx orig_x)
6798 {
6799 rtx x, y;
6800
6801 orig_x = delegitimize_mem_from_attrs (orig_x);
6802 x = orig_x;
6803
6804 /* Extract the symbol ref from:
6805 (plus:SI (reg:SI 12 %r12)
6806 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6807 UNSPEC_GOTOFF/PLTOFF)))
6808 and
6809 (plus:SI (reg:SI 12 %r12)
6810 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6811 UNSPEC_GOTOFF/PLTOFF)
6812 (const_int 4 [0x4])))) */
6813 if (GET_CODE (x) == PLUS
6814 && REG_P (XEXP (x, 0))
6815 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6816 && GET_CODE (XEXP (x, 1)) == CONST)
6817 {
6818 HOST_WIDE_INT offset = 0;
6819
6820 /* The const operand. */
6821 y = XEXP (XEXP (x, 1), 0);
6822
6823 if (GET_CODE (y) == PLUS
6824 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6825 {
6826 offset = INTVAL (XEXP (y, 1));
6827 y = XEXP (y, 0);
6828 }
6829
6830 if (GET_CODE (y) == UNSPEC
6831 && (XINT (y, 1) == UNSPEC_GOTOFF
6832 || XINT (y, 1) == UNSPEC_PLTOFF))
6833 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6834 }
6835
6836 if (GET_CODE (x) != MEM)
6837 return orig_x;
6838
6839 x = XEXP (x, 0);
6840 if (GET_CODE (x) == PLUS
6841 && GET_CODE (XEXP (x, 1)) == CONST
6842 && GET_CODE (XEXP (x, 0)) == REG
6843 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6844 {
6845 y = XEXP (XEXP (x, 1), 0);
6846 if (GET_CODE (y) == UNSPEC
6847 && XINT (y, 1) == UNSPEC_GOT)
6848 y = XVECEXP (y, 0, 0);
6849 else
6850 return orig_x;
6851 }
6852 else if (GET_CODE (x) == CONST)
6853 {
6854 /* Extract the symbol ref from:
6855 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6856 UNSPEC_PLT/GOTENT))) */
6857
6858 y = XEXP (x, 0);
6859 if (GET_CODE (y) == UNSPEC
6860 && (XINT (y, 1) == UNSPEC_GOTENT
6861 || XINT (y, 1) == UNSPEC_PLT))
6862 y = XVECEXP (y, 0, 0);
6863 else
6864 return orig_x;
6865 }
6866 else
6867 return orig_x;
6868
6869 if (GET_MODE (orig_x) != Pmode)
6870 {
6871 if (GET_MODE (orig_x) == BLKmode)
6872 return orig_x;
6873 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6874 if (y == NULL_RTX)
6875 return orig_x;
6876 }
6877 return y;
6878 }
6879
6880 /* Output operand OP to stdio stream FILE.
6881 OP is an address (register + offset) which is not used to address data;
6882 instead the rightmost bits are interpreted as the value. */
6883
6884 static void
6885 print_shift_count_operand (FILE *file, rtx op)
6886 {
6887 HOST_WIDE_INT offset;
6888 rtx base;
6889
6890 /* Extract base register and offset. */
6891 if (!s390_decompose_shift_count (op, &base, &offset))
6892 gcc_unreachable ();
6893
6894 /* Sanity check. */
6895 if (base)
6896 {
6897 gcc_assert (GET_CODE (base) == REG);
6898 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6899 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6900 }
6901
6902 /* Offsets are constricted to twelve bits. */
6903 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6904 if (base)
6905 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6906 }
6907
6908 /* Assigns the number of NOP halfwords to be emitted before and after the
6909 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6910 If hotpatching is disabled for the function, the values are set to zero.
6911 */
6912
6913 static void
6914 s390_function_num_hotpatch_hw (tree decl,
6915 int *hw_before,
6916 int *hw_after)
6917 {
6918 tree attr;
6919
6920 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6921
6922 /* Handle the arguments of the hotpatch attribute. The values
6923 specified via attribute might override the cmdline argument
6924 values. */
6925 if (attr)
6926 {
6927 tree args = TREE_VALUE (attr);
6928
6929 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6930 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6931 }
6932 else
6933 {
6934 /* Use the values specified by the cmdline arguments. */
6935 *hw_before = s390_hotpatch_hw_before_label;
6936 *hw_after = s390_hotpatch_hw_after_label;
6937 }
6938 }
6939
6940 /* Write the current .machine and .machinemode specification to the assembler
6941 file. */
6942
6943 #ifdef HAVE_AS_MACHINE_MACHINEMODE
6944 static void
6945 s390_asm_output_machine_for_arch (FILE *asm_out_file)
6946 {
6947 fprintf (asm_out_file, "\t.machinemode %s\n",
6948 (TARGET_ZARCH) ? "zarch" : "esa");
6949 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
6950 if (S390_USE_ARCHITECTURE_MODIFIERS)
6951 {
6952 int cpu_flags;
6953
6954 cpu_flags = processor_flags_table[(int) s390_arch];
6955 if (TARGET_HTM && !(cpu_flags & PF_TX))
6956 fprintf (asm_out_file, "+htm");
6957 else if (!TARGET_HTM && (cpu_flags & PF_TX))
6958 fprintf (asm_out_file, "+nohtm");
6959 if (TARGET_VX && !(cpu_flags & PF_VX))
6960 fprintf (asm_out_file, "+vx");
6961 else if (!TARGET_VX && (cpu_flags & PF_VX))
6962 fprintf (asm_out_file, "+novx");
6963 }
6964 fprintf (asm_out_file, "\"\n");
6965 }
6966
6967 /* Write an extra function header before the very start of the function. */
6968
6969 void
6970 s390_asm_output_function_prefix (FILE *asm_out_file,
6971 const char *fnname ATTRIBUTE_UNUSED)
6972 {
6973 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
6974 return;
6975 /* Since only the function specific options are saved but not the indications
6976 which options are set, it's too much work here to figure out which options
6977 have actually changed. Thus, generate .machine and .machinemode whenever a
6978 function has the target attribute or pragma. */
6979 fprintf (asm_out_file, "\t.machinemode push\n");
6980 fprintf (asm_out_file, "\t.machine push\n");
6981 s390_asm_output_machine_for_arch (asm_out_file);
6982 }
6983
6984 /* Write an extra function footer after the very end of the function. */
6985
6986 void
6987 s390_asm_declare_function_size (FILE *asm_out_file,
6988 const char *fnname, tree decl)
6989 {
6990 if (!flag_inhibit_size_directive)
6991 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
6992 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
6993 return;
6994 fprintf (asm_out_file, "\t.machine pop\n");
6995 fprintf (asm_out_file, "\t.machinemode pop\n");
6996 }
6997 #endif
6998
6999 /* Write the extra assembler code needed to declare a function properly. */
7000
7001 void
7002 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7003 tree decl)
7004 {
7005 int hw_before, hw_after;
7006
7007 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7008 if (hw_before > 0)
7009 {
7010 unsigned int function_alignment;
7011 int i;
7012
7013 /* Add a trampoline code area before the function label and initialize it
7014 with two-byte nop instructions. This area can be overwritten with code
7015 that jumps to a patched version of the function. */
7016 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
7017 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7018 hw_before);
7019 for (i = 1; i < hw_before; i++)
7020 fputs ("\tnopr\t%r7\n", asm_out_file);
7021
7022 /* Note: The function label must be aligned so that (a) the bytes of the
7023 following nop do not cross a cacheline boundary, and (b) a jump address
7024 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7025 stored directly before the label without crossing a cacheline
7026 boundary. All this is necessary to make sure the trampoline code can
7027 be changed atomically.
7028 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7029 if there are NOPs before the function label, the alignment is placed
7030 before them. So it is necessary to duplicate the alignment after the
7031 NOPs. */
7032 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7033 if (! DECL_USER_ALIGN (decl))
7034 function_alignment = MAX (function_alignment,
7035 (unsigned int) align_functions);
7036 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7037 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7038 }
7039
7040 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7041 {
7042 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7043 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7044 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7045 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7046 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7047 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7048 s390_warn_framesize);
7049 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7050 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7051 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7052 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7053 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7054 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7055 TARGET_PACKED_STACK);
7056 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7057 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7058 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7059 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7060 s390_warn_dynamicstack_p);
7061 }
7062 ASM_OUTPUT_LABEL (asm_out_file, fname);
7063 if (hw_after > 0)
7064 asm_fprintf (asm_out_file,
7065 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7066 hw_after);
7067 }
7068
7069 /* Output machine-dependent UNSPECs occurring in address constant X
7070 in assembler syntax to stdio stream FILE. Returns true if the
7071 constant X could be recognized, false otherwise. */
7072
7073 static bool
7074 s390_output_addr_const_extra (FILE *file, rtx x)
7075 {
7076 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7077 switch (XINT (x, 1))
7078 {
7079 case UNSPEC_GOTENT:
7080 output_addr_const (file, XVECEXP (x, 0, 0));
7081 fprintf (file, "@GOTENT");
7082 return true;
7083 case UNSPEC_GOT:
7084 output_addr_const (file, XVECEXP (x, 0, 0));
7085 fprintf (file, "@GOT");
7086 return true;
7087 case UNSPEC_GOTOFF:
7088 output_addr_const (file, XVECEXP (x, 0, 0));
7089 fprintf (file, "@GOTOFF");
7090 return true;
7091 case UNSPEC_PLT:
7092 output_addr_const (file, XVECEXP (x, 0, 0));
7093 fprintf (file, "@PLT");
7094 return true;
7095 case UNSPEC_PLTOFF:
7096 output_addr_const (file, XVECEXP (x, 0, 0));
7097 fprintf (file, "@PLTOFF");
7098 return true;
7099 case UNSPEC_TLSGD:
7100 output_addr_const (file, XVECEXP (x, 0, 0));
7101 fprintf (file, "@TLSGD");
7102 return true;
7103 case UNSPEC_TLSLDM:
7104 assemble_name (file, get_some_local_dynamic_name ());
7105 fprintf (file, "@TLSLDM");
7106 return true;
7107 case UNSPEC_DTPOFF:
7108 output_addr_const (file, XVECEXP (x, 0, 0));
7109 fprintf (file, "@DTPOFF");
7110 return true;
7111 case UNSPEC_NTPOFF:
7112 output_addr_const (file, XVECEXP (x, 0, 0));
7113 fprintf (file, "@NTPOFF");
7114 return true;
7115 case UNSPEC_GOTNTPOFF:
7116 output_addr_const (file, XVECEXP (x, 0, 0));
7117 fprintf (file, "@GOTNTPOFF");
7118 return true;
7119 case UNSPEC_INDNTPOFF:
7120 output_addr_const (file, XVECEXP (x, 0, 0));
7121 fprintf (file, "@INDNTPOFF");
7122 return true;
7123 }
7124
7125 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7126 switch (XINT (x, 1))
7127 {
7128 case UNSPEC_POOL_OFFSET:
7129 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7130 output_addr_const (file, x);
7131 return true;
7132 }
7133 return false;
7134 }
7135
7136 /* Output address operand ADDR in assembler syntax to
7137 stdio stream FILE. */
7138
7139 void
7140 print_operand_address (FILE *file, rtx addr)
7141 {
7142 struct s390_address ad;
7143
7144 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7145 {
7146 if (!TARGET_Z10)
7147 {
7148 output_operand_lossage ("symbolic memory references are "
7149 "only supported on z10 or later");
7150 return;
7151 }
7152 output_addr_const (file, addr);
7153 return;
7154 }
7155
7156 if (!s390_decompose_address (addr, &ad)
7157 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7158 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7159 output_operand_lossage ("cannot decompose address");
7160
7161 if (ad.disp)
7162 output_addr_const (file, ad.disp);
7163 else
7164 fprintf (file, "0");
7165
7166 if (ad.base && ad.indx)
7167 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7168 reg_names[REGNO (ad.base)]);
7169 else if (ad.base)
7170 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7171 }
7172
7173 /* Output operand X in assembler syntax to stdio stream FILE.
7174 CODE specified the format flag. The following format flags
7175 are recognized:
7176
7177 'C': print opcode suffix for branch condition.
7178 'D': print opcode suffix for inverse branch condition.
7179 'E': print opcode suffix for branch on index instruction.
7180 'G': print the size of the operand in bytes.
7181 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7182 'M': print the second word of a TImode operand.
7183 'N': print the second word of a DImode operand.
7184 'O': print only the displacement of a memory reference or address.
7185 'R': print only the base register of a memory reference or address.
7186 'S': print S-type memory reference (base+displacement).
7187 'Y': print shift count operand.
7188
7189 'b': print integer X as if it's an unsigned byte.
7190 'c': print integer X as if it's an signed byte.
7191 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7192 'f': "end" contiguous bitmask X in SImode.
7193 'h': print integer X as if it's a signed halfword.
7194 'i': print the first nonzero HImode part of X.
7195 'j': print the first HImode part unequal to -1 of X.
7196 'k': print the first nonzero SImode part of X.
7197 'm': print the first SImode part unequal to -1 of X.
7198 'o': print integer X as if it's an unsigned 32bit word.
7199 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7200 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7201 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7202 'x': print integer X as if it's an unsigned halfword.
7203 'v': print register number as vector register (v1 instead of f1).
7204 */
7205
7206 void
7207 print_operand (FILE *file, rtx x, int code)
7208 {
7209 HOST_WIDE_INT ival;
7210
7211 switch (code)
7212 {
7213 case 'C':
7214 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7215 return;
7216
7217 case 'D':
7218 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7219 return;
7220
7221 case 'E':
7222 if (GET_CODE (x) == LE)
7223 fprintf (file, "l");
7224 else if (GET_CODE (x) == GT)
7225 fprintf (file, "h");
7226 else
7227 output_operand_lossage ("invalid comparison operator "
7228 "for 'E' output modifier");
7229 return;
7230
7231 case 'J':
7232 if (GET_CODE (x) == SYMBOL_REF)
7233 {
7234 fprintf (file, "%s", ":tls_load:");
7235 output_addr_const (file, x);
7236 }
7237 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7238 {
7239 fprintf (file, "%s", ":tls_gdcall:");
7240 output_addr_const (file, XVECEXP (x, 0, 0));
7241 }
7242 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7243 {
7244 fprintf (file, "%s", ":tls_ldcall:");
7245 const char *name = get_some_local_dynamic_name ();
7246 gcc_assert (name);
7247 assemble_name (file, name);
7248 }
7249 else
7250 output_operand_lossage ("invalid reference for 'J' output modifier");
7251 return;
7252
7253 case 'G':
7254 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7255 return;
7256
7257 case 'O':
7258 {
7259 struct s390_address ad;
7260 int ret;
7261
7262 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7263
7264 if (!ret
7265 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7266 || ad.indx)
7267 {
7268 output_operand_lossage ("invalid address for 'O' output modifier");
7269 return;
7270 }
7271
7272 if (ad.disp)
7273 output_addr_const (file, ad.disp);
7274 else
7275 fprintf (file, "0");
7276 }
7277 return;
7278
7279 case 'R':
7280 {
7281 struct s390_address ad;
7282 int ret;
7283
7284 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7285
7286 if (!ret
7287 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7288 || ad.indx)
7289 {
7290 output_operand_lossage ("invalid address for 'R' output modifier");
7291 return;
7292 }
7293
7294 if (ad.base)
7295 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7296 else
7297 fprintf (file, "0");
7298 }
7299 return;
7300
7301 case 'S':
7302 {
7303 struct s390_address ad;
7304 int ret;
7305
7306 if (!MEM_P (x))
7307 {
7308 output_operand_lossage ("memory reference expected for "
7309 "'S' output modifier");
7310 return;
7311 }
7312 ret = s390_decompose_address (XEXP (x, 0), &ad);
7313
7314 if (!ret
7315 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7316 || ad.indx)
7317 {
7318 output_operand_lossage ("invalid address for 'S' output modifier");
7319 return;
7320 }
7321
7322 if (ad.disp)
7323 output_addr_const (file, ad.disp);
7324 else
7325 fprintf (file, "0");
7326
7327 if (ad.base)
7328 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7329 }
7330 return;
7331
7332 case 'N':
7333 if (GET_CODE (x) == REG)
7334 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7335 else if (GET_CODE (x) == MEM)
7336 x = change_address (x, VOIDmode,
7337 plus_constant (Pmode, XEXP (x, 0), 4));
7338 else
7339 output_operand_lossage ("register or memory expression expected "
7340 "for 'N' output modifier");
7341 break;
7342
7343 case 'M':
7344 if (GET_CODE (x) == REG)
7345 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7346 else if (GET_CODE (x) == MEM)
7347 x = change_address (x, VOIDmode,
7348 plus_constant (Pmode, XEXP (x, 0), 8));
7349 else
7350 output_operand_lossage ("register or memory expression expected "
7351 "for 'M' output modifier");
7352 break;
7353
7354 case 'Y':
7355 print_shift_count_operand (file, x);
7356 return;
7357 }
7358
7359 switch (GET_CODE (x))
7360 {
7361 case REG:
7362 /* Print FP regs as fx instead of vx when they are accessed
7363 through non-vector mode. */
7364 if (code == 'v'
7365 || VECTOR_NOFP_REG_P (x)
7366 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7367 || (VECTOR_REG_P (x)
7368 && (GET_MODE_SIZE (GET_MODE (x)) /
7369 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7370 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7371 else
7372 fprintf (file, "%s", reg_names[REGNO (x)]);
7373 break;
7374
7375 case MEM:
7376 output_address (GET_MODE (x), XEXP (x, 0));
7377 break;
7378
7379 case CONST:
7380 case CODE_LABEL:
7381 case LABEL_REF:
7382 case SYMBOL_REF:
7383 output_addr_const (file, x);
7384 break;
7385
7386 case CONST_INT:
7387 ival = INTVAL (x);
7388 switch (code)
7389 {
7390 case 0:
7391 break;
7392 case 'b':
7393 ival &= 0xff;
7394 break;
7395 case 'c':
7396 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7397 break;
7398 case 'x':
7399 ival &= 0xffff;
7400 break;
7401 case 'h':
7402 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7403 break;
7404 case 'i':
7405 ival = s390_extract_part (x, HImode, 0);
7406 break;
7407 case 'j':
7408 ival = s390_extract_part (x, HImode, -1);
7409 break;
7410 case 'k':
7411 ival = s390_extract_part (x, SImode, 0);
7412 break;
7413 case 'm':
7414 ival = s390_extract_part (x, SImode, -1);
7415 break;
7416 case 'o':
7417 ival &= 0xffffffff;
7418 break;
7419 case 'e': case 'f':
7420 case 's': case 't':
7421 {
7422 int pos, len;
7423 bool ok;
7424
7425 len = (code == 's' || code == 'e' ? 64 : 32);
7426 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7427 gcc_assert (ok);
7428 if (code == 's' || code == 't')
7429 ival = 64 - pos - len;
7430 else
7431 ival = 64 - 1 - pos;
7432 }
7433 break;
7434 default:
7435 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7436 }
7437 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7438 break;
7439
7440 case CONST_WIDE_INT:
7441 if (code == 'b')
7442 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7443 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7444 else if (code == 'x')
7445 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7446 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7447 else if (code == 'h')
7448 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7449 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7450 else
7451 {
7452 if (code == 0)
7453 output_operand_lossage ("invalid constant - try using "
7454 "an output modifier");
7455 else
7456 output_operand_lossage ("invalid constant for output modifier '%c'",
7457 code);
7458 }
7459 break;
7460 case CONST_VECTOR:
7461 switch (code)
7462 {
7463 case 'h':
7464 gcc_assert (const_vec_duplicate_p (x));
7465 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7466 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7467 break;
7468 case 'e':
7469 case 's':
7470 {
7471 int start, stop, inner_len;
7472 bool ok;
7473
7474 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7475 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7476 gcc_assert (ok);
7477 if (code == 's' || code == 't')
7478 ival = inner_len - stop - 1;
7479 else
7480 ival = inner_len - start - 1;
7481 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7482 }
7483 break;
7484 case 't':
7485 {
7486 unsigned mask;
7487 bool ok = s390_bytemask_vector_p (x, &mask);
7488 gcc_assert (ok);
7489 fprintf (file, "%u", mask);
7490 }
7491 break;
7492
7493 default:
7494 output_operand_lossage ("invalid constant vector for output "
7495 "modifier '%c'", code);
7496 }
7497 break;
7498
7499 default:
7500 if (code == 0)
7501 output_operand_lossage ("invalid expression - try using "
7502 "an output modifier");
7503 else
7504 output_operand_lossage ("invalid expression for output "
7505 "modifier '%c'", code);
7506 break;
7507 }
7508 }
7509
7510 /* Target hook for assembling integer objects. We need to define it
7511 here to work a round a bug in some versions of GAS, which couldn't
7512 handle values smaller than INT_MIN when printed in decimal. */
7513
7514 static bool
7515 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7516 {
7517 if (size == 8 && aligned_p
7518 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7519 {
7520 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7521 INTVAL (x));
7522 return true;
7523 }
7524 return default_assemble_integer (x, size, aligned_p);
7525 }
7526
7527 /* Returns true if register REGNO is used for forming
7528 a memory address in expression X. */
7529
7530 static bool
7531 reg_used_in_mem_p (int regno, rtx x)
7532 {
7533 enum rtx_code code = GET_CODE (x);
7534 int i, j;
7535 const char *fmt;
7536
7537 if (code == MEM)
7538 {
7539 if (refers_to_regno_p (regno, XEXP (x, 0)))
7540 return true;
7541 }
7542 else if (code == SET
7543 && GET_CODE (SET_DEST (x)) == PC)
7544 {
7545 if (refers_to_regno_p (regno, SET_SRC (x)))
7546 return true;
7547 }
7548
7549 fmt = GET_RTX_FORMAT (code);
7550 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7551 {
7552 if (fmt[i] == 'e'
7553 && reg_used_in_mem_p (regno, XEXP (x, i)))
7554 return true;
7555
7556 else if (fmt[i] == 'E')
7557 for (j = 0; j < XVECLEN (x, i); j++)
7558 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7559 return true;
7560 }
7561 return false;
7562 }
7563
7564 /* Returns true if expression DEP_RTX sets an address register
7565 used by instruction INSN to address memory. */
7566
7567 static bool
7568 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7569 {
7570 rtx target, pat;
7571
7572 if (NONJUMP_INSN_P (dep_rtx))
7573 dep_rtx = PATTERN (dep_rtx);
7574
7575 if (GET_CODE (dep_rtx) == SET)
7576 {
7577 target = SET_DEST (dep_rtx);
7578 if (GET_CODE (target) == STRICT_LOW_PART)
7579 target = XEXP (target, 0);
7580 while (GET_CODE (target) == SUBREG)
7581 target = SUBREG_REG (target);
7582
7583 if (GET_CODE (target) == REG)
7584 {
7585 int regno = REGNO (target);
7586
7587 if (s390_safe_attr_type (insn) == TYPE_LA)
7588 {
7589 pat = PATTERN (insn);
7590 if (GET_CODE (pat) == PARALLEL)
7591 {
7592 gcc_assert (XVECLEN (pat, 0) == 2);
7593 pat = XVECEXP (pat, 0, 0);
7594 }
7595 gcc_assert (GET_CODE (pat) == SET);
7596 return refers_to_regno_p (regno, SET_SRC (pat));
7597 }
7598 else if (get_attr_atype (insn) == ATYPE_AGEN)
7599 return reg_used_in_mem_p (regno, PATTERN (insn));
7600 }
7601 }
7602 return false;
7603 }
7604
7605 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7606
7607 int
7608 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7609 {
7610 rtx dep_rtx = PATTERN (dep_insn);
7611 int i;
7612
7613 if (GET_CODE (dep_rtx) == SET
7614 && addr_generation_dependency_p (dep_rtx, insn))
7615 return 1;
7616 else if (GET_CODE (dep_rtx) == PARALLEL)
7617 {
7618 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7619 {
7620 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7621 return 1;
7622 }
7623 }
7624 return 0;
7625 }
7626
7627
7628 /* A C statement (sans semicolon) to update the integer scheduling priority
7629 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7630 reduce the priority to execute INSN later. Do not define this macro if
7631 you do not need to adjust the scheduling priorities of insns.
7632
7633 A STD instruction should be scheduled earlier,
7634 in order to use the bypass. */
7635 static int
7636 s390_adjust_priority (rtx_insn *insn, int priority)
7637 {
7638 if (! INSN_P (insn))
7639 return priority;
7640
7641 if (s390_tune <= PROCESSOR_2064_Z900)
7642 return priority;
7643
7644 switch (s390_safe_attr_type (insn))
7645 {
7646 case TYPE_FSTOREDF:
7647 case TYPE_FSTORESF:
7648 priority = priority << 3;
7649 break;
7650 case TYPE_STORE:
7651 case TYPE_STM:
7652 priority = priority << 1;
7653 break;
7654 default:
7655 break;
7656 }
7657 return priority;
7658 }
7659
7660
7661 /* The number of instructions that can be issued per cycle. */
7662
7663 static int
7664 s390_issue_rate (void)
7665 {
7666 switch (s390_tune)
7667 {
7668 case PROCESSOR_2084_Z990:
7669 case PROCESSOR_2094_Z9_109:
7670 case PROCESSOR_2094_Z9_EC:
7671 case PROCESSOR_2817_Z196:
7672 return 3;
7673 case PROCESSOR_2097_Z10:
7674 return 2;
7675 case PROCESSOR_9672_G5:
7676 case PROCESSOR_9672_G6:
7677 case PROCESSOR_2064_Z900:
7678 /* Starting with EC12 we use the sched_reorder hook to take care
7679 of instruction dispatch constraints. The algorithm only
7680 picks the best instruction and assumes only a single
7681 instruction gets issued per cycle. */
7682 case PROCESSOR_2827_ZEC12:
7683 case PROCESSOR_2964_Z13:
7684 default:
7685 return 1;
7686 }
7687 }
7688
7689 static int
7690 s390_first_cycle_multipass_dfa_lookahead (void)
7691 {
7692 return 4;
7693 }
7694
7695 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7696 Fix up MEMs as required. */
7697
7698 static void
7699 annotate_constant_pool_refs (rtx *x)
7700 {
7701 int i, j;
7702 const char *fmt;
7703
7704 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7705 || !CONSTANT_POOL_ADDRESS_P (*x));
7706
7707 /* Literal pool references can only occur inside a MEM ... */
7708 if (GET_CODE (*x) == MEM)
7709 {
7710 rtx memref = XEXP (*x, 0);
7711
7712 if (GET_CODE (memref) == SYMBOL_REF
7713 && CONSTANT_POOL_ADDRESS_P (memref))
7714 {
7715 rtx base = cfun->machine->base_reg;
7716 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7717 UNSPEC_LTREF);
7718
7719 *x = replace_equiv_address (*x, addr);
7720 return;
7721 }
7722
7723 if (GET_CODE (memref) == CONST
7724 && GET_CODE (XEXP (memref, 0)) == PLUS
7725 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7726 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7727 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7728 {
7729 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7730 rtx sym = XEXP (XEXP (memref, 0), 0);
7731 rtx base = cfun->machine->base_reg;
7732 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7733 UNSPEC_LTREF);
7734
7735 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7736 return;
7737 }
7738 }
7739
7740 /* ... or a load-address type pattern. */
7741 if (GET_CODE (*x) == SET)
7742 {
7743 rtx addrref = SET_SRC (*x);
7744
7745 if (GET_CODE (addrref) == SYMBOL_REF
7746 && CONSTANT_POOL_ADDRESS_P (addrref))
7747 {
7748 rtx base = cfun->machine->base_reg;
7749 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7750 UNSPEC_LTREF);
7751
7752 SET_SRC (*x) = addr;
7753 return;
7754 }
7755
7756 if (GET_CODE (addrref) == CONST
7757 && GET_CODE (XEXP (addrref, 0)) == PLUS
7758 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7759 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7760 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7761 {
7762 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7763 rtx sym = XEXP (XEXP (addrref, 0), 0);
7764 rtx base = cfun->machine->base_reg;
7765 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7766 UNSPEC_LTREF);
7767
7768 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7769 return;
7770 }
7771 }
7772
7773 /* Annotate LTREL_BASE as well. */
7774 if (GET_CODE (*x) == UNSPEC
7775 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7776 {
7777 rtx base = cfun->machine->base_reg;
7778 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7779 UNSPEC_LTREL_BASE);
7780 return;
7781 }
7782
7783 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7784 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7785 {
7786 if (fmt[i] == 'e')
7787 {
7788 annotate_constant_pool_refs (&XEXP (*x, i));
7789 }
7790 else if (fmt[i] == 'E')
7791 {
7792 for (j = 0; j < XVECLEN (*x, i); j++)
7793 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7794 }
7795 }
7796 }
7797
7798 /* Split all branches that exceed the maximum distance.
7799 Returns true if this created a new literal pool entry. */
7800
7801 static int
7802 s390_split_branches (void)
7803 {
7804 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7805 int new_literal = 0, ret;
7806 rtx_insn *insn;
7807 rtx pat, target;
7808 rtx *label;
7809
7810 /* We need correct insn addresses. */
7811
7812 shorten_branches (get_insns ());
7813
7814 /* Find all branches that exceed 64KB, and split them. */
7815
7816 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7817 {
7818 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7819 continue;
7820
7821 pat = PATTERN (insn);
7822 if (GET_CODE (pat) == PARALLEL)
7823 pat = XVECEXP (pat, 0, 0);
7824 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7825 continue;
7826
7827 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7828 {
7829 label = &SET_SRC (pat);
7830 }
7831 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7832 {
7833 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7834 label = &XEXP (SET_SRC (pat), 1);
7835 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7836 label = &XEXP (SET_SRC (pat), 2);
7837 else
7838 continue;
7839 }
7840 else
7841 continue;
7842
7843 if (get_attr_length (insn) <= 4)
7844 continue;
7845
7846 /* We are going to use the return register as scratch register,
7847 make sure it will be saved/restored by the prologue/epilogue. */
7848 cfun_frame_layout.save_return_addr_p = 1;
7849
7850 if (!flag_pic)
7851 {
7852 new_literal = 1;
7853 rtx mem = force_const_mem (Pmode, *label);
7854 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7855 insn);
7856 INSN_ADDRESSES_NEW (set_insn, -1);
7857 annotate_constant_pool_refs (&PATTERN (set_insn));
7858
7859 target = temp_reg;
7860 }
7861 else
7862 {
7863 new_literal = 1;
7864 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7865 UNSPEC_LTREL_OFFSET);
7866 target = gen_rtx_CONST (Pmode, target);
7867 target = force_const_mem (Pmode, target);
7868 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7869 insn);
7870 INSN_ADDRESSES_NEW (set_insn, -1);
7871 annotate_constant_pool_refs (&PATTERN (set_insn));
7872
7873 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7874 cfun->machine->base_reg),
7875 UNSPEC_LTREL_BASE);
7876 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7877 }
7878
7879 ret = validate_change (insn, label, target, 0);
7880 gcc_assert (ret);
7881 }
7882
7883 return new_literal;
7884 }
7885
7886
7887 /* Find an annotated literal pool symbol referenced in RTX X,
7888 and store it at REF. Will abort if X contains references to
7889 more than one such pool symbol; multiple references to the same
7890 symbol are allowed, however.
7891
7892 The rtx pointed to by REF must be initialized to NULL_RTX
7893 by the caller before calling this routine. */
7894
7895 static void
7896 find_constant_pool_ref (rtx x, rtx *ref)
7897 {
7898 int i, j;
7899 const char *fmt;
7900
7901 /* Ignore LTREL_BASE references. */
7902 if (GET_CODE (x) == UNSPEC
7903 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7904 return;
7905 /* Likewise POOL_ENTRY insns. */
7906 if (GET_CODE (x) == UNSPEC_VOLATILE
7907 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7908 return;
7909
7910 gcc_assert (GET_CODE (x) != SYMBOL_REF
7911 || !CONSTANT_POOL_ADDRESS_P (x));
7912
7913 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7914 {
7915 rtx sym = XVECEXP (x, 0, 0);
7916 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7917 && CONSTANT_POOL_ADDRESS_P (sym));
7918
7919 if (*ref == NULL_RTX)
7920 *ref = sym;
7921 else
7922 gcc_assert (*ref == sym);
7923
7924 return;
7925 }
7926
7927 fmt = GET_RTX_FORMAT (GET_CODE (x));
7928 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7929 {
7930 if (fmt[i] == 'e')
7931 {
7932 find_constant_pool_ref (XEXP (x, i), ref);
7933 }
7934 else if (fmt[i] == 'E')
7935 {
7936 for (j = 0; j < XVECLEN (x, i); j++)
7937 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7938 }
7939 }
7940 }
7941
7942 /* Replace every reference to the annotated literal pool
7943 symbol REF in X by its base plus OFFSET. */
7944
7945 static void
7946 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7947 {
7948 int i, j;
7949 const char *fmt;
7950
7951 gcc_assert (*x != ref);
7952
7953 if (GET_CODE (*x) == UNSPEC
7954 && XINT (*x, 1) == UNSPEC_LTREF
7955 && XVECEXP (*x, 0, 0) == ref)
7956 {
7957 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7958 return;
7959 }
7960
7961 if (GET_CODE (*x) == PLUS
7962 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7963 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7964 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7965 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7966 {
7967 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7968 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7969 return;
7970 }
7971
7972 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7973 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7974 {
7975 if (fmt[i] == 'e')
7976 {
7977 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7978 }
7979 else if (fmt[i] == 'E')
7980 {
7981 for (j = 0; j < XVECLEN (*x, i); j++)
7982 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7983 }
7984 }
7985 }
7986
7987 /* Check whether X contains an UNSPEC_LTREL_BASE.
7988 Return its constant pool symbol if found, NULL_RTX otherwise. */
7989
7990 static rtx
7991 find_ltrel_base (rtx x)
7992 {
7993 int i, j;
7994 const char *fmt;
7995
7996 if (GET_CODE (x) == UNSPEC
7997 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7998 return XVECEXP (x, 0, 0);
7999
8000 fmt = GET_RTX_FORMAT (GET_CODE (x));
8001 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8002 {
8003 if (fmt[i] == 'e')
8004 {
8005 rtx fnd = find_ltrel_base (XEXP (x, i));
8006 if (fnd)
8007 return fnd;
8008 }
8009 else if (fmt[i] == 'E')
8010 {
8011 for (j = 0; j < XVECLEN (x, i); j++)
8012 {
8013 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8014 if (fnd)
8015 return fnd;
8016 }
8017 }
8018 }
8019
8020 return NULL_RTX;
8021 }
8022
8023 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8024
8025 static void
8026 replace_ltrel_base (rtx *x)
8027 {
8028 int i, j;
8029 const char *fmt;
8030
8031 if (GET_CODE (*x) == UNSPEC
8032 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8033 {
8034 *x = XVECEXP (*x, 0, 1);
8035 return;
8036 }
8037
8038 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8039 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8040 {
8041 if (fmt[i] == 'e')
8042 {
8043 replace_ltrel_base (&XEXP (*x, i));
8044 }
8045 else if (fmt[i] == 'E')
8046 {
8047 for (j = 0; j < XVECLEN (*x, i); j++)
8048 replace_ltrel_base (&XVECEXP (*x, i, j));
8049 }
8050 }
8051 }
8052
8053
8054 /* We keep a list of constants which we have to add to internal
8055 constant tables in the middle of large functions. */
8056
8057 #define NR_C_MODES 32
8058 machine_mode constant_modes[NR_C_MODES] =
8059 {
8060 TFmode, TImode, TDmode,
8061 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8062 V4SFmode, V2DFmode, V1TFmode,
8063 DFmode, DImode, DDmode,
8064 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8065 SFmode, SImode, SDmode,
8066 V4QImode, V2HImode, V1SImode, V1SFmode,
8067 HImode,
8068 V2QImode, V1HImode,
8069 QImode,
8070 V1QImode
8071 };
8072
8073 struct constant
8074 {
8075 struct constant *next;
8076 rtx value;
8077 rtx_code_label *label;
8078 };
8079
8080 struct constant_pool
8081 {
8082 struct constant_pool *next;
8083 rtx_insn *first_insn;
8084 rtx_insn *pool_insn;
8085 bitmap insns;
8086 rtx_insn *emit_pool_after;
8087
8088 struct constant *constants[NR_C_MODES];
8089 struct constant *execute;
8090 rtx_code_label *label;
8091 int size;
8092 };
8093
8094 /* Allocate new constant_pool structure. */
8095
8096 static struct constant_pool *
8097 s390_alloc_pool (void)
8098 {
8099 struct constant_pool *pool;
8100 int i;
8101
8102 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8103 pool->next = NULL;
8104 for (i = 0; i < NR_C_MODES; i++)
8105 pool->constants[i] = NULL;
8106
8107 pool->execute = NULL;
8108 pool->label = gen_label_rtx ();
8109 pool->first_insn = NULL;
8110 pool->pool_insn = NULL;
8111 pool->insns = BITMAP_ALLOC (NULL);
8112 pool->size = 0;
8113 pool->emit_pool_after = NULL;
8114
8115 return pool;
8116 }
8117
8118 /* Create new constant pool covering instructions starting at INSN
8119 and chain it to the end of POOL_LIST. */
8120
8121 static struct constant_pool *
8122 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8123 {
8124 struct constant_pool *pool, **prev;
8125
8126 pool = s390_alloc_pool ();
8127 pool->first_insn = insn;
8128
8129 for (prev = pool_list; *prev; prev = &(*prev)->next)
8130 ;
8131 *prev = pool;
8132
8133 return pool;
8134 }
8135
8136 /* End range of instructions covered by POOL at INSN and emit
8137 placeholder insn representing the pool. */
8138
8139 static void
8140 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8141 {
8142 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8143
8144 if (!insn)
8145 insn = get_last_insn ();
8146
8147 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8148 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8149 }
8150
8151 /* Add INSN to the list of insns covered by POOL. */
8152
8153 static void
8154 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8155 {
8156 bitmap_set_bit (pool->insns, INSN_UID (insn));
8157 }
8158
8159 /* Return pool out of POOL_LIST that covers INSN. */
8160
8161 static struct constant_pool *
8162 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8163 {
8164 struct constant_pool *pool;
8165
8166 for (pool = pool_list; pool; pool = pool->next)
8167 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8168 break;
8169
8170 return pool;
8171 }
8172
8173 /* Add constant VAL of mode MODE to the constant pool POOL. */
8174
8175 static void
8176 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8177 {
8178 struct constant *c;
8179 int i;
8180
8181 for (i = 0; i < NR_C_MODES; i++)
8182 if (constant_modes[i] == mode)
8183 break;
8184 gcc_assert (i != NR_C_MODES);
8185
8186 for (c = pool->constants[i]; c != NULL; c = c->next)
8187 if (rtx_equal_p (val, c->value))
8188 break;
8189
8190 if (c == NULL)
8191 {
8192 c = (struct constant *) xmalloc (sizeof *c);
8193 c->value = val;
8194 c->label = gen_label_rtx ();
8195 c->next = pool->constants[i];
8196 pool->constants[i] = c;
8197 pool->size += GET_MODE_SIZE (mode);
8198 }
8199 }
8200
8201 /* Return an rtx that represents the offset of X from the start of
8202 pool POOL. */
8203
8204 static rtx
8205 s390_pool_offset (struct constant_pool *pool, rtx x)
8206 {
8207 rtx label;
8208
8209 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8210 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8211 UNSPEC_POOL_OFFSET);
8212 return gen_rtx_CONST (GET_MODE (x), x);
8213 }
8214
8215 /* Find constant VAL of mode MODE in the constant pool POOL.
8216 Return an RTX describing the distance from the start of
8217 the pool to the location of the new constant. */
8218
8219 static rtx
8220 s390_find_constant (struct constant_pool *pool, rtx val,
8221 machine_mode mode)
8222 {
8223 struct constant *c;
8224 int i;
8225
8226 for (i = 0; i < NR_C_MODES; i++)
8227 if (constant_modes[i] == mode)
8228 break;
8229 gcc_assert (i != NR_C_MODES);
8230
8231 for (c = pool->constants[i]; c != NULL; c = c->next)
8232 if (rtx_equal_p (val, c->value))
8233 break;
8234
8235 gcc_assert (c);
8236
8237 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8238 }
8239
8240 /* Check whether INSN is an execute. Return the label_ref to its
8241 execute target template if so, NULL_RTX otherwise. */
8242
8243 static rtx
8244 s390_execute_label (rtx insn)
8245 {
8246 if (NONJUMP_INSN_P (insn)
8247 && GET_CODE (PATTERN (insn)) == PARALLEL
8248 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8249 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8250 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8251
8252 return NULL_RTX;
8253 }
8254
8255 /* Add execute target for INSN to the constant pool POOL. */
8256
8257 static void
8258 s390_add_execute (struct constant_pool *pool, rtx insn)
8259 {
8260 struct constant *c;
8261
8262 for (c = pool->execute; c != NULL; c = c->next)
8263 if (INSN_UID (insn) == INSN_UID (c->value))
8264 break;
8265
8266 if (c == NULL)
8267 {
8268 c = (struct constant *) xmalloc (sizeof *c);
8269 c->value = insn;
8270 c->label = gen_label_rtx ();
8271 c->next = pool->execute;
8272 pool->execute = c;
8273 pool->size += 6;
8274 }
8275 }
8276
8277 /* Find execute target for INSN in the constant pool POOL.
8278 Return an RTX describing the distance from the start of
8279 the pool to the location of the execute target. */
8280
8281 static rtx
8282 s390_find_execute (struct constant_pool *pool, rtx insn)
8283 {
8284 struct constant *c;
8285
8286 for (c = pool->execute; c != NULL; c = c->next)
8287 if (INSN_UID (insn) == INSN_UID (c->value))
8288 break;
8289
8290 gcc_assert (c);
8291
8292 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8293 }
8294
8295 /* For an execute INSN, extract the execute target template. */
8296
8297 static rtx
8298 s390_execute_target (rtx insn)
8299 {
8300 rtx pattern = PATTERN (insn);
8301 gcc_assert (s390_execute_label (insn));
8302
8303 if (XVECLEN (pattern, 0) == 2)
8304 {
8305 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8306 }
8307 else
8308 {
8309 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8310 int i;
8311
8312 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8313 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8314
8315 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8316 }
8317
8318 return pattern;
8319 }
8320
8321 /* Indicate that INSN cannot be duplicated. This is the case for
8322 execute insns that carry a unique label. */
8323
8324 static bool
8325 s390_cannot_copy_insn_p (rtx_insn *insn)
8326 {
8327 rtx label = s390_execute_label (insn);
8328 return label && label != const0_rtx;
8329 }
8330
8331 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8332 do not emit the pool base label. */
8333
8334 static void
8335 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8336 {
8337 struct constant *c;
8338 rtx_insn *insn = pool->pool_insn;
8339 int i;
8340
8341 /* Switch to rodata section. */
8342 if (TARGET_CPU_ZARCH)
8343 {
8344 insn = emit_insn_after (gen_pool_section_start (), insn);
8345 INSN_ADDRESSES_NEW (insn, -1);
8346 }
8347
8348 /* Ensure minimum pool alignment. */
8349 if (TARGET_CPU_ZARCH)
8350 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8351 else
8352 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8353 INSN_ADDRESSES_NEW (insn, -1);
8354
8355 /* Emit pool base label. */
8356 if (!remote_label)
8357 {
8358 insn = emit_label_after (pool->label, insn);
8359 INSN_ADDRESSES_NEW (insn, -1);
8360 }
8361
8362 /* Dump constants in descending alignment requirement order,
8363 ensuring proper alignment for every constant. */
8364 for (i = 0; i < NR_C_MODES; i++)
8365 for (c = pool->constants[i]; c; c = c->next)
8366 {
8367 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8368 rtx value = copy_rtx (c->value);
8369 if (GET_CODE (value) == CONST
8370 && GET_CODE (XEXP (value, 0)) == UNSPEC
8371 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8372 && XVECLEN (XEXP (value, 0), 0) == 1)
8373 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8374
8375 insn = emit_label_after (c->label, insn);
8376 INSN_ADDRESSES_NEW (insn, -1);
8377
8378 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8379 gen_rtvec (1, value),
8380 UNSPECV_POOL_ENTRY);
8381 insn = emit_insn_after (value, insn);
8382 INSN_ADDRESSES_NEW (insn, -1);
8383 }
8384
8385 /* Ensure minimum alignment for instructions. */
8386 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8387 INSN_ADDRESSES_NEW (insn, -1);
8388
8389 /* Output in-pool execute template insns. */
8390 for (c = pool->execute; c; c = c->next)
8391 {
8392 insn = emit_label_after (c->label, insn);
8393 INSN_ADDRESSES_NEW (insn, -1);
8394
8395 insn = emit_insn_after (s390_execute_target (c->value), insn);
8396 INSN_ADDRESSES_NEW (insn, -1);
8397 }
8398
8399 /* Switch back to previous section. */
8400 if (TARGET_CPU_ZARCH)
8401 {
8402 insn = emit_insn_after (gen_pool_section_end (), insn);
8403 INSN_ADDRESSES_NEW (insn, -1);
8404 }
8405
8406 insn = emit_barrier_after (insn);
8407 INSN_ADDRESSES_NEW (insn, -1);
8408
8409 /* Remove placeholder insn. */
8410 remove_insn (pool->pool_insn);
8411 }
8412
8413 /* Free all memory used by POOL. */
8414
8415 static void
8416 s390_free_pool (struct constant_pool *pool)
8417 {
8418 struct constant *c, *next;
8419 int i;
8420
8421 for (i = 0; i < NR_C_MODES; i++)
8422 for (c = pool->constants[i]; c; c = next)
8423 {
8424 next = c->next;
8425 free (c);
8426 }
8427
8428 for (c = pool->execute; c; c = next)
8429 {
8430 next = c->next;
8431 free (c);
8432 }
8433
8434 BITMAP_FREE (pool->insns);
8435 free (pool);
8436 }
8437
8438
8439 /* Collect main literal pool. Return NULL on overflow. */
8440
8441 static struct constant_pool *
8442 s390_mainpool_start (void)
8443 {
8444 struct constant_pool *pool;
8445 rtx_insn *insn;
8446
8447 pool = s390_alloc_pool ();
8448
8449 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8450 {
8451 if (NONJUMP_INSN_P (insn)
8452 && GET_CODE (PATTERN (insn)) == SET
8453 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8454 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8455 {
8456 /* There might be two main_pool instructions if base_reg
8457 is call-clobbered; one for shrink-wrapped code and one
8458 for the rest. We want to keep the first. */
8459 if (pool->pool_insn)
8460 {
8461 insn = PREV_INSN (insn);
8462 delete_insn (NEXT_INSN (insn));
8463 continue;
8464 }
8465 pool->pool_insn = insn;
8466 }
8467
8468 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8469 {
8470 s390_add_execute (pool, insn);
8471 }
8472 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8473 {
8474 rtx pool_ref = NULL_RTX;
8475 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8476 if (pool_ref)
8477 {
8478 rtx constant = get_pool_constant (pool_ref);
8479 machine_mode mode = get_pool_mode (pool_ref);
8480 s390_add_constant (pool, constant, mode);
8481 }
8482 }
8483
8484 /* If hot/cold partitioning is enabled we have to make sure that
8485 the literal pool is emitted in the same section where the
8486 initialization of the literal pool base pointer takes place.
8487 emit_pool_after is only used in the non-overflow case on non
8488 Z cpus where we can emit the literal pool at the end of the
8489 function body within the text section. */
8490 if (NOTE_P (insn)
8491 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8492 && !pool->emit_pool_after)
8493 pool->emit_pool_after = PREV_INSN (insn);
8494 }
8495
8496 gcc_assert (pool->pool_insn || pool->size == 0);
8497
8498 if (pool->size >= 4096)
8499 {
8500 /* We're going to chunkify the pool, so remove the main
8501 pool placeholder insn. */
8502 remove_insn (pool->pool_insn);
8503
8504 s390_free_pool (pool);
8505 pool = NULL;
8506 }
8507
8508 /* If the functions ends with the section where the literal pool
8509 should be emitted set the marker to its end. */
8510 if (pool && !pool->emit_pool_after)
8511 pool->emit_pool_after = get_last_insn ();
8512
8513 return pool;
8514 }
8515
8516 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8517 Modify the current function to output the pool constants as well as
8518 the pool register setup instruction. */
8519
8520 static void
8521 s390_mainpool_finish (struct constant_pool *pool)
8522 {
8523 rtx base_reg = cfun->machine->base_reg;
8524
8525 /* If the pool is empty, we're done. */
8526 if (pool->size == 0)
8527 {
8528 /* We don't actually need a base register after all. */
8529 cfun->machine->base_reg = NULL_RTX;
8530
8531 if (pool->pool_insn)
8532 remove_insn (pool->pool_insn);
8533 s390_free_pool (pool);
8534 return;
8535 }
8536
8537 /* We need correct insn addresses. */
8538 shorten_branches (get_insns ());
8539
8540 /* On zSeries, we use a LARL to load the pool register. The pool is
8541 located in the .rodata section, so we emit it after the function. */
8542 if (TARGET_CPU_ZARCH)
8543 {
8544 rtx set = gen_main_base_64 (base_reg, pool->label);
8545 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8546 INSN_ADDRESSES_NEW (insn, -1);
8547 remove_insn (pool->pool_insn);
8548
8549 insn = get_last_insn ();
8550 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8551 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8552
8553 s390_dump_pool (pool, 0);
8554 }
8555
8556 /* On S/390, if the total size of the function's code plus literal pool
8557 does not exceed 4096 bytes, we use BASR to set up a function base
8558 pointer, and emit the literal pool at the end of the function. */
8559 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8560 + pool->size + 8 /* alignment slop */ < 4096)
8561 {
8562 rtx set = gen_main_base_31_small (base_reg, pool->label);
8563 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8564 INSN_ADDRESSES_NEW (insn, -1);
8565 remove_insn (pool->pool_insn);
8566
8567 insn = emit_label_after (pool->label, insn);
8568 INSN_ADDRESSES_NEW (insn, -1);
8569
8570 /* emit_pool_after will be set by s390_mainpool_start to the
8571 last insn of the section where the literal pool should be
8572 emitted. */
8573 insn = pool->emit_pool_after;
8574
8575 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8576 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8577
8578 s390_dump_pool (pool, 1);
8579 }
8580
8581 /* Otherwise, we emit an inline literal pool and use BASR to branch
8582 over it, setting up the pool register at the same time. */
8583 else
8584 {
8585 rtx_code_label *pool_end = gen_label_rtx ();
8586
8587 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8588 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8589 JUMP_LABEL (insn) = pool_end;
8590 INSN_ADDRESSES_NEW (insn, -1);
8591 remove_insn (pool->pool_insn);
8592
8593 insn = emit_label_after (pool->label, insn);
8594 INSN_ADDRESSES_NEW (insn, -1);
8595
8596 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8597 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8598
8599 insn = emit_label_after (pool_end, pool->pool_insn);
8600 INSN_ADDRESSES_NEW (insn, -1);
8601
8602 s390_dump_pool (pool, 1);
8603 }
8604
8605
8606 /* Replace all literal pool references. */
8607
8608 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8609 {
8610 if (INSN_P (insn))
8611 replace_ltrel_base (&PATTERN (insn));
8612
8613 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8614 {
8615 rtx addr, pool_ref = NULL_RTX;
8616 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8617 if (pool_ref)
8618 {
8619 if (s390_execute_label (insn))
8620 addr = s390_find_execute (pool, insn);
8621 else
8622 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8623 get_pool_mode (pool_ref));
8624
8625 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8626 INSN_CODE (insn) = -1;
8627 }
8628 }
8629 }
8630
8631
8632 /* Free the pool. */
8633 s390_free_pool (pool);
8634 }
8635
8636 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8637 We have decided we cannot use this pool, so revert all changes
8638 to the current function that were done by s390_mainpool_start. */
8639 static void
8640 s390_mainpool_cancel (struct constant_pool *pool)
8641 {
8642 /* We didn't actually change the instruction stream, so simply
8643 free the pool memory. */
8644 s390_free_pool (pool);
8645 }
8646
8647
8648 /* Chunkify the literal pool. */
8649
8650 #define S390_POOL_CHUNK_MIN 0xc00
8651 #define S390_POOL_CHUNK_MAX 0xe00
8652
8653 static struct constant_pool *
8654 s390_chunkify_start (void)
8655 {
8656 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8657 int extra_size = 0;
8658 bitmap far_labels;
8659 rtx pending_ltrel = NULL_RTX;
8660 rtx_insn *insn;
8661
8662 rtx (*gen_reload_base) (rtx, rtx) =
8663 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8664
8665
8666 /* We need correct insn addresses. */
8667
8668 shorten_branches (get_insns ());
8669
8670 /* Scan all insns and move literals to pool chunks. */
8671
8672 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8673 {
8674 bool section_switch_p = false;
8675
8676 /* Check for pending LTREL_BASE. */
8677 if (INSN_P (insn))
8678 {
8679 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8680 if (ltrel_base)
8681 {
8682 gcc_assert (ltrel_base == pending_ltrel);
8683 pending_ltrel = NULL_RTX;
8684 }
8685 }
8686
8687 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8688 {
8689 if (!curr_pool)
8690 curr_pool = s390_start_pool (&pool_list, insn);
8691
8692 s390_add_execute (curr_pool, insn);
8693 s390_add_pool_insn (curr_pool, insn);
8694 }
8695 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8696 {
8697 rtx pool_ref = NULL_RTX;
8698 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8699 if (pool_ref)
8700 {
8701 rtx constant = get_pool_constant (pool_ref);
8702 machine_mode mode = get_pool_mode (pool_ref);
8703
8704 if (!curr_pool)
8705 curr_pool = s390_start_pool (&pool_list, insn);
8706
8707 s390_add_constant (curr_pool, constant, mode);
8708 s390_add_pool_insn (curr_pool, insn);
8709
8710 /* Don't split the pool chunk between a LTREL_OFFSET load
8711 and the corresponding LTREL_BASE. */
8712 if (GET_CODE (constant) == CONST
8713 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8714 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8715 {
8716 gcc_assert (!pending_ltrel);
8717 pending_ltrel = pool_ref;
8718 }
8719 }
8720 }
8721
8722 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8723 {
8724 if (curr_pool)
8725 s390_add_pool_insn (curr_pool, insn);
8726 /* An LTREL_BASE must follow within the same basic block. */
8727 gcc_assert (!pending_ltrel);
8728 }
8729
8730 if (NOTE_P (insn))
8731 switch (NOTE_KIND (insn))
8732 {
8733 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8734 section_switch_p = true;
8735 break;
8736 case NOTE_INSN_VAR_LOCATION:
8737 case NOTE_INSN_CALL_ARG_LOCATION:
8738 continue;
8739 default:
8740 break;
8741 }
8742
8743 if (!curr_pool
8744 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8745 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8746 continue;
8747
8748 if (TARGET_CPU_ZARCH)
8749 {
8750 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8751 continue;
8752
8753 s390_end_pool (curr_pool, NULL);
8754 curr_pool = NULL;
8755 }
8756 else
8757 {
8758 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8759 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8760 + extra_size;
8761
8762 /* We will later have to insert base register reload insns.
8763 Those will have an effect on code size, which we need to
8764 consider here. This calculation makes rather pessimistic
8765 worst-case assumptions. */
8766 if (LABEL_P (insn))
8767 extra_size += 6;
8768
8769 if (chunk_size < S390_POOL_CHUNK_MIN
8770 && curr_pool->size < S390_POOL_CHUNK_MIN
8771 && !section_switch_p)
8772 continue;
8773
8774 /* Pool chunks can only be inserted after BARRIERs ... */
8775 if (BARRIER_P (insn))
8776 {
8777 s390_end_pool (curr_pool, insn);
8778 curr_pool = NULL;
8779 extra_size = 0;
8780 }
8781
8782 /* ... so if we don't find one in time, create one. */
8783 else if (chunk_size > S390_POOL_CHUNK_MAX
8784 || curr_pool->size > S390_POOL_CHUNK_MAX
8785 || section_switch_p)
8786 {
8787 rtx_insn *label, *jump, *barrier, *next, *prev;
8788
8789 if (!section_switch_p)
8790 {
8791 /* We can insert the barrier only after a 'real' insn. */
8792 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8793 continue;
8794 if (get_attr_length (insn) == 0)
8795 continue;
8796 /* Don't separate LTREL_BASE from the corresponding
8797 LTREL_OFFSET load. */
8798 if (pending_ltrel)
8799 continue;
8800 next = insn;
8801 do
8802 {
8803 insn = next;
8804 next = NEXT_INSN (insn);
8805 }
8806 while (next
8807 && NOTE_P (next)
8808 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8809 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8810 }
8811 else
8812 {
8813 gcc_assert (!pending_ltrel);
8814
8815 /* The old pool has to end before the section switch
8816 note in order to make it part of the current
8817 section. */
8818 insn = PREV_INSN (insn);
8819 }
8820
8821 label = gen_label_rtx ();
8822 prev = insn;
8823 if (prev && NOTE_P (prev))
8824 prev = prev_nonnote_insn (prev);
8825 if (prev)
8826 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8827 INSN_LOCATION (prev));
8828 else
8829 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8830 barrier = emit_barrier_after (jump);
8831 insn = emit_label_after (label, barrier);
8832 JUMP_LABEL (jump) = label;
8833 LABEL_NUSES (label) = 1;
8834
8835 INSN_ADDRESSES_NEW (jump, -1);
8836 INSN_ADDRESSES_NEW (barrier, -1);
8837 INSN_ADDRESSES_NEW (insn, -1);
8838
8839 s390_end_pool (curr_pool, barrier);
8840 curr_pool = NULL;
8841 extra_size = 0;
8842 }
8843 }
8844 }
8845
8846 if (curr_pool)
8847 s390_end_pool (curr_pool, NULL);
8848 gcc_assert (!pending_ltrel);
8849
8850 /* Find all labels that are branched into
8851 from an insn belonging to a different chunk. */
8852
8853 far_labels = BITMAP_ALLOC (NULL);
8854
8855 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8856 {
8857 rtx_jump_table_data *table;
8858
8859 /* Labels marked with LABEL_PRESERVE_P can be target
8860 of non-local jumps, so we have to mark them.
8861 The same holds for named labels.
8862
8863 Don't do that, however, if it is the label before
8864 a jump table. */
8865
8866 if (LABEL_P (insn)
8867 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8868 {
8869 rtx_insn *vec_insn = NEXT_INSN (insn);
8870 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8871 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8872 }
8873 /* Check potential targets in a table jump (casesi_jump). */
8874 else if (tablejump_p (insn, NULL, &table))
8875 {
8876 rtx vec_pat = PATTERN (table);
8877 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8878
8879 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8880 {
8881 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8882
8883 if (s390_find_pool (pool_list, label)
8884 != s390_find_pool (pool_list, insn))
8885 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8886 }
8887 }
8888 /* If we have a direct jump (conditional or unconditional),
8889 check all potential targets. */
8890 else if (JUMP_P (insn))
8891 {
8892 rtx pat = PATTERN (insn);
8893
8894 if (GET_CODE (pat) == PARALLEL)
8895 pat = XVECEXP (pat, 0, 0);
8896
8897 if (GET_CODE (pat) == SET)
8898 {
8899 rtx label = JUMP_LABEL (insn);
8900 if (label && !ANY_RETURN_P (label))
8901 {
8902 if (s390_find_pool (pool_list, label)
8903 != s390_find_pool (pool_list, insn))
8904 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8905 }
8906 }
8907 }
8908 }
8909
8910 /* Insert base register reload insns before every pool. */
8911
8912 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8913 {
8914 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8915 curr_pool->label);
8916 rtx_insn *insn = curr_pool->first_insn;
8917 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8918 }
8919
8920 /* Insert base register reload insns at every far label. */
8921
8922 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8923 if (LABEL_P (insn)
8924 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8925 {
8926 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8927 if (pool)
8928 {
8929 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8930 pool->label);
8931 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8932 }
8933 }
8934
8935
8936 BITMAP_FREE (far_labels);
8937
8938
8939 /* Recompute insn addresses. */
8940
8941 init_insn_lengths ();
8942 shorten_branches (get_insns ());
8943
8944 return pool_list;
8945 }
8946
8947 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8948 After we have decided to use this list, finish implementing
8949 all changes to the current function as required. */
8950
8951 static void
8952 s390_chunkify_finish (struct constant_pool *pool_list)
8953 {
8954 struct constant_pool *curr_pool = NULL;
8955 rtx_insn *insn;
8956
8957
8958 /* Replace all literal pool references. */
8959
8960 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8961 {
8962 if (INSN_P (insn))
8963 replace_ltrel_base (&PATTERN (insn));
8964
8965 curr_pool = s390_find_pool (pool_list, insn);
8966 if (!curr_pool)
8967 continue;
8968
8969 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8970 {
8971 rtx addr, pool_ref = NULL_RTX;
8972 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8973 if (pool_ref)
8974 {
8975 if (s390_execute_label (insn))
8976 addr = s390_find_execute (curr_pool, insn);
8977 else
8978 addr = s390_find_constant (curr_pool,
8979 get_pool_constant (pool_ref),
8980 get_pool_mode (pool_ref));
8981
8982 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8983 INSN_CODE (insn) = -1;
8984 }
8985 }
8986 }
8987
8988 /* Dump out all literal pools. */
8989
8990 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8991 s390_dump_pool (curr_pool, 0);
8992
8993 /* Free pool list. */
8994
8995 while (pool_list)
8996 {
8997 struct constant_pool *next = pool_list->next;
8998 s390_free_pool (pool_list);
8999 pool_list = next;
9000 }
9001 }
9002
9003 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9004 We have decided we cannot use this list, so revert all changes
9005 to the current function that were done by s390_chunkify_start. */
9006
9007 static void
9008 s390_chunkify_cancel (struct constant_pool *pool_list)
9009 {
9010 struct constant_pool *curr_pool = NULL;
9011 rtx_insn *insn;
9012
9013 /* Remove all pool placeholder insns. */
9014
9015 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9016 {
9017 /* Did we insert an extra barrier? Remove it. */
9018 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9019 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9020 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9021
9022 if (jump && JUMP_P (jump)
9023 && barrier && BARRIER_P (barrier)
9024 && label && LABEL_P (label)
9025 && GET_CODE (PATTERN (jump)) == SET
9026 && SET_DEST (PATTERN (jump)) == pc_rtx
9027 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9028 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9029 {
9030 remove_insn (jump);
9031 remove_insn (barrier);
9032 remove_insn (label);
9033 }
9034
9035 remove_insn (curr_pool->pool_insn);
9036 }
9037
9038 /* Remove all base register reload insns. */
9039
9040 for (insn = get_insns (); insn; )
9041 {
9042 rtx_insn *next_insn = NEXT_INSN (insn);
9043
9044 if (NONJUMP_INSN_P (insn)
9045 && GET_CODE (PATTERN (insn)) == SET
9046 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9047 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9048 remove_insn (insn);
9049
9050 insn = next_insn;
9051 }
9052
9053 /* Free pool list. */
9054
9055 while (pool_list)
9056 {
9057 struct constant_pool *next = pool_list->next;
9058 s390_free_pool (pool_list);
9059 pool_list = next;
9060 }
9061 }
9062
9063 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9064
9065 void
9066 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9067 {
9068 switch (GET_MODE_CLASS (mode))
9069 {
9070 case MODE_FLOAT:
9071 case MODE_DECIMAL_FLOAT:
9072 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9073
9074 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9075 break;
9076
9077 case MODE_INT:
9078 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9079 mark_symbol_refs_as_used (exp);
9080 break;
9081
9082 case MODE_VECTOR_INT:
9083 case MODE_VECTOR_FLOAT:
9084 {
9085 int i;
9086 machine_mode inner_mode;
9087 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9088
9089 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9090 for (i = 0; i < XVECLEN (exp, 0); i++)
9091 s390_output_pool_entry (XVECEXP (exp, 0, i),
9092 inner_mode,
9093 i == 0
9094 ? align
9095 : GET_MODE_BITSIZE (inner_mode));
9096 }
9097 break;
9098
9099 default:
9100 gcc_unreachable ();
9101 }
9102 }
9103
9104
9105 /* Return an RTL expression representing the value of the return address
9106 for the frame COUNT steps up from the current frame. FRAME is the
9107 frame pointer of that frame. */
9108
9109 rtx
9110 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9111 {
9112 int offset;
9113 rtx addr;
9114
9115 /* Without backchain, we fail for all but the current frame. */
9116
9117 if (!TARGET_BACKCHAIN && count > 0)
9118 return NULL_RTX;
9119
9120 /* For the current frame, we need to make sure the initial
9121 value of RETURN_REGNUM is actually saved. */
9122
9123 if (count == 0)
9124 {
9125 /* On non-z architectures branch splitting could overwrite r14. */
9126 if (TARGET_CPU_ZARCH)
9127 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9128 else
9129 {
9130 cfun_frame_layout.save_return_addr_p = true;
9131 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9132 }
9133 }
9134
9135 if (TARGET_PACKED_STACK)
9136 offset = -2 * UNITS_PER_LONG;
9137 else
9138 offset = RETURN_REGNUM * UNITS_PER_LONG;
9139
9140 addr = plus_constant (Pmode, frame, offset);
9141 addr = memory_address (Pmode, addr);
9142 return gen_rtx_MEM (Pmode, addr);
9143 }
9144
9145 /* Return an RTL expression representing the back chain stored in
9146 the current stack frame. */
9147
9148 rtx
9149 s390_back_chain_rtx (void)
9150 {
9151 rtx chain;
9152
9153 gcc_assert (TARGET_BACKCHAIN);
9154
9155 if (TARGET_PACKED_STACK)
9156 chain = plus_constant (Pmode, stack_pointer_rtx,
9157 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9158 else
9159 chain = stack_pointer_rtx;
9160
9161 chain = gen_rtx_MEM (Pmode, chain);
9162 return chain;
9163 }
9164
9165 /* Find first call clobbered register unused in a function.
9166 This could be used as base register in a leaf function
9167 or for holding the return address before epilogue. */
9168
9169 static int
9170 find_unused_clobbered_reg (void)
9171 {
9172 int i;
9173 for (i = 0; i < 6; i++)
9174 if (!df_regs_ever_live_p (i))
9175 return i;
9176 return 0;
9177 }
9178
9179
9180 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9181 clobbered hard regs in SETREG. */
9182
9183 static void
9184 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9185 {
9186 char *regs_ever_clobbered = (char *)data;
9187 unsigned int i, regno;
9188 machine_mode mode = GET_MODE (setreg);
9189
9190 if (GET_CODE (setreg) == SUBREG)
9191 {
9192 rtx inner = SUBREG_REG (setreg);
9193 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9194 return;
9195 regno = subreg_regno (setreg);
9196 }
9197 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9198 regno = REGNO (setreg);
9199 else
9200 return;
9201
9202 for (i = regno;
9203 i < regno + HARD_REGNO_NREGS (regno, mode);
9204 i++)
9205 regs_ever_clobbered[i] = 1;
9206 }
9207
9208 /* Walks through all basic blocks of the current function looking
9209 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9210 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9211 each of those regs. */
9212
9213 static void
9214 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9215 {
9216 basic_block cur_bb;
9217 rtx_insn *cur_insn;
9218 unsigned int i;
9219
9220 memset (regs_ever_clobbered, 0, 32);
9221
9222 /* For non-leaf functions we have to consider all call clobbered regs to be
9223 clobbered. */
9224 if (!crtl->is_leaf)
9225 {
9226 for (i = 0; i < 32; i++)
9227 regs_ever_clobbered[i] = call_really_used_regs[i];
9228 }
9229
9230 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9231 this work is done by liveness analysis (mark_regs_live_at_end).
9232 Special care is needed for functions containing landing pads. Landing pads
9233 may use the eh registers, but the code which sets these registers is not
9234 contained in that function. Hence s390_regs_ever_clobbered is not able to
9235 deal with this automatically. */
9236 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9237 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9238 if (crtl->calls_eh_return
9239 || (cfun->machine->has_landing_pad_p
9240 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9241 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9242
9243 /* For nonlocal gotos all call-saved registers have to be saved.
9244 This flag is also set for the unwinding code in libgcc.
9245 See expand_builtin_unwind_init. For regs_ever_live this is done by
9246 reload. */
9247 if (crtl->saves_all_registers)
9248 for (i = 0; i < 32; i++)
9249 if (!call_really_used_regs[i])
9250 regs_ever_clobbered[i] = 1;
9251
9252 FOR_EACH_BB_FN (cur_bb, cfun)
9253 {
9254 FOR_BB_INSNS (cur_bb, cur_insn)
9255 {
9256 rtx pat;
9257
9258 if (!INSN_P (cur_insn))
9259 continue;
9260
9261 pat = PATTERN (cur_insn);
9262
9263 /* Ignore GPR restore insns. */
9264 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9265 {
9266 if (GET_CODE (pat) == SET
9267 && GENERAL_REG_P (SET_DEST (pat)))
9268 {
9269 /* lgdr */
9270 if (GET_MODE (SET_SRC (pat)) == DImode
9271 && FP_REG_P (SET_SRC (pat)))
9272 continue;
9273
9274 /* l / lg */
9275 if (GET_CODE (SET_SRC (pat)) == MEM)
9276 continue;
9277 }
9278
9279 /* lm / lmg */
9280 if (GET_CODE (pat) == PARALLEL
9281 && load_multiple_operation (pat, VOIDmode))
9282 continue;
9283 }
9284
9285 note_stores (pat,
9286 s390_reg_clobbered_rtx,
9287 regs_ever_clobbered);
9288 }
9289 }
9290 }
9291
9292 /* Determine the frame area which actually has to be accessed
9293 in the function epilogue. The values are stored at the
9294 given pointers AREA_BOTTOM (address of the lowest used stack
9295 address) and AREA_TOP (address of the first item which does
9296 not belong to the stack frame). */
9297
9298 static void
9299 s390_frame_area (int *area_bottom, int *area_top)
9300 {
9301 int b, t;
9302
9303 b = INT_MAX;
9304 t = INT_MIN;
9305
9306 if (cfun_frame_layout.first_restore_gpr != -1)
9307 {
9308 b = (cfun_frame_layout.gprs_offset
9309 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9310 t = b + (cfun_frame_layout.last_restore_gpr
9311 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9312 }
9313
9314 if (TARGET_64BIT && cfun_save_high_fprs_p)
9315 {
9316 b = MIN (b, cfun_frame_layout.f8_offset);
9317 t = MAX (t, (cfun_frame_layout.f8_offset
9318 + cfun_frame_layout.high_fprs * 8));
9319 }
9320
9321 if (!TARGET_64BIT)
9322 {
9323 if (cfun_fpr_save_p (FPR4_REGNUM))
9324 {
9325 b = MIN (b, cfun_frame_layout.f4_offset);
9326 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9327 }
9328 if (cfun_fpr_save_p (FPR6_REGNUM))
9329 {
9330 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9331 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9332 }
9333 }
9334 *area_bottom = b;
9335 *area_top = t;
9336 }
9337 /* Update gpr_save_slots in the frame layout trying to make use of
9338 FPRs as GPR save slots.
9339 This is a helper routine of s390_register_info. */
9340
9341 static void
9342 s390_register_info_gprtofpr ()
9343 {
9344 int save_reg_slot = FPR0_REGNUM;
9345 int i, j;
9346
9347 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9348 return;
9349
9350 for (i = 15; i >= 6; i--)
9351 {
9352 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9353 continue;
9354
9355 /* Advance to the next FP register which can be used as a
9356 GPR save slot. */
9357 while ((!call_really_used_regs[save_reg_slot]
9358 || df_regs_ever_live_p (save_reg_slot)
9359 || cfun_fpr_save_p (save_reg_slot))
9360 && FP_REGNO_P (save_reg_slot))
9361 save_reg_slot++;
9362 if (!FP_REGNO_P (save_reg_slot))
9363 {
9364 /* We only want to use ldgr/lgdr if we can get rid of
9365 stm/lm entirely. So undo the gpr slot allocation in
9366 case we ran out of FPR save slots. */
9367 for (j = 6; j <= 15; j++)
9368 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9369 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9370 break;
9371 }
9372 cfun_gpr_save_slot (i) = save_reg_slot++;
9373 }
9374 }
9375
9376 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9377 stdarg.
9378 This is a helper routine for s390_register_info. */
9379
9380 static void
9381 s390_register_info_stdarg_fpr ()
9382 {
9383 int i;
9384 int min_fpr;
9385 int max_fpr;
9386
9387 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9388 f0-f4 for 64 bit. */
9389 if (!cfun->stdarg
9390 || !TARGET_HARD_FLOAT
9391 || !cfun->va_list_fpr_size
9392 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9393 return;
9394
9395 min_fpr = crtl->args.info.fprs;
9396 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9397 if (max_fpr >= FP_ARG_NUM_REG)
9398 max_fpr = FP_ARG_NUM_REG - 1;
9399
9400 /* FPR argument regs start at f0. */
9401 min_fpr += FPR0_REGNUM;
9402 max_fpr += FPR0_REGNUM;
9403
9404 for (i = min_fpr; i <= max_fpr; i++)
9405 cfun_set_fpr_save (i);
9406 }
9407
9408 /* Reserve the GPR save slots for GPRs which need to be saved due to
9409 stdarg.
9410 This is a helper routine for s390_register_info. */
9411
9412 static void
9413 s390_register_info_stdarg_gpr ()
9414 {
9415 int i;
9416 int min_gpr;
9417 int max_gpr;
9418
9419 if (!cfun->stdarg
9420 || !cfun->va_list_gpr_size
9421 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9422 return;
9423
9424 min_gpr = crtl->args.info.gprs;
9425 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9426 if (max_gpr >= GP_ARG_NUM_REG)
9427 max_gpr = GP_ARG_NUM_REG - 1;
9428
9429 /* GPR argument regs start at r2. */
9430 min_gpr += GPR2_REGNUM;
9431 max_gpr += GPR2_REGNUM;
9432
9433 /* If r6 was supposed to be saved into an FPR and now needs to go to
9434 the stack for vararg we have to adjust the restore range to make
9435 sure that the restore is done from stack as well. */
9436 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9437 && min_gpr <= GPR6_REGNUM
9438 && max_gpr >= GPR6_REGNUM)
9439 {
9440 if (cfun_frame_layout.first_restore_gpr == -1
9441 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9442 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9443 if (cfun_frame_layout.last_restore_gpr == -1
9444 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9445 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9446 }
9447
9448 if (cfun_frame_layout.first_save_gpr == -1
9449 || cfun_frame_layout.first_save_gpr > min_gpr)
9450 cfun_frame_layout.first_save_gpr = min_gpr;
9451
9452 if (cfun_frame_layout.last_save_gpr == -1
9453 || cfun_frame_layout.last_save_gpr < max_gpr)
9454 cfun_frame_layout.last_save_gpr = max_gpr;
9455
9456 for (i = min_gpr; i <= max_gpr; i++)
9457 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9458 }
9459
9460 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9461 prologue and epilogue. */
9462
9463 static void
9464 s390_register_info_set_ranges ()
9465 {
9466 int i, j;
9467
9468 /* Find the first and the last save slot supposed to use the stack
9469 to set the restore range.
9470 Vararg regs might be marked as save to stack but only the
9471 call-saved regs really need restoring (i.e. r6). This code
9472 assumes that the vararg regs have not yet been recorded in
9473 cfun_gpr_save_slot. */
9474 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9475 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9476 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9477 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9478 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9479 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9480 }
9481
9482 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9483 for registers which need to be saved in function prologue.
9484 This function can be used until the insns emitted for save/restore
9485 of the regs are visible in the RTL stream. */
9486
9487 static void
9488 s390_register_info ()
9489 {
9490 int i;
9491 char clobbered_regs[32];
9492
9493 gcc_assert (!epilogue_completed);
9494
9495 if (reload_completed)
9496 /* After reload we rely on our own routine to determine which
9497 registers need saving. */
9498 s390_regs_ever_clobbered (clobbered_regs);
9499 else
9500 /* During reload we use regs_ever_live as a base since reload
9501 does changes in there which we otherwise would not be aware
9502 of. */
9503 for (i = 0; i < 32; i++)
9504 clobbered_regs[i] = df_regs_ever_live_p (i);
9505
9506 for (i = 0; i < 32; i++)
9507 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9508
9509 /* Mark the call-saved FPRs which need to be saved.
9510 This needs to be done before checking the special GPRs since the
9511 stack pointer usage depends on whether high FPRs have to be saved
9512 or not. */
9513 cfun_frame_layout.fpr_bitmap = 0;
9514 cfun_frame_layout.high_fprs = 0;
9515 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9516 if (clobbered_regs[i] && !call_really_used_regs[i])
9517 {
9518 cfun_set_fpr_save (i);
9519 if (i >= FPR8_REGNUM)
9520 cfun_frame_layout.high_fprs++;
9521 }
9522
9523 /* Register 12 is used for GOT address, but also as temp in prologue
9524 for split-stack stdarg functions (unless r14 is available). */
9525 clobbered_regs[12]
9526 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9527 || (flag_split_stack && cfun->stdarg
9528 && (crtl->is_leaf || TARGET_TPF_PROFILING
9529 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9530
9531 clobbered_regs[BASE_REGNUM]
9532 |= (cfun->machine->base_reg
9533 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9534
9535 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9536 |= !!frame_pointer_needed;
9537
9538 /* On pre z900 machines this might take until machine dependent
9539 reorg to decide.
9540 save_return_addr_p will only be set on non-zarch machines so
9541 there is no risk that r14 goes into an FPR instead of a stack
9542 slot. */
9543 clobbered_regs[RETURN_REGNUM]
9544 |= (!crtl->is_leaf
9545 || TARGET_TPF_PROFILING
9546 || cfun->machine->split_branches_pending_p
9547 || cfun_frame_layout.save_return_addr_p
9548 || crtl->calls_eh_return);
9549
9550 clobbered_regs[STACK_POINTER_REGNUM]
9551 |= (!crtl->is_leaf
9552 || TARGET_TPF_PROFILING
9553 || cfun_save_high_fprs_p
9554 || get_frame_size () > 0
9555 || (reload_completed && cfun_frame_layout.frame_size > 0)
9556 || cfun->calls_alloca);
9557
9558 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9559
9560 for (i = 6; i < 16; i++)
9561 if (clobbered_regs[i])
9562 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9563
9564 s390_register_info_stdarg_fpr ();
9565 s390_register_info_gprtofpr ();
9566 s390_register_info_set_ranges ();
9567 /* stdarg functions might need to save GPRs 2 to 6. This might
9568 override the GPR->FPR save decision made by
9569 s390_register_info_gprtofpr for r6 since vararg regs must go to
9570 the stack. */
9571 s390_register_info_stdarg_gpr ();
9572 }
9573
9574 /* This function is called by s390_optimize_prologue in order to get
9575 rid of unnecessary GPR save/restore instructions. The register info
9576 for the GPRs is re-computed and the ranges are re-calculated. */
9577
9578 static void
9579 s390_optimize_register_info ()
9580 {
9581 char clobbered_regs[32];
9582 int i;
9583
9584 gcc_assert (epilogue_completed);
9585 gcc_assert (!cfun->machine->split_branches_pending_p);
9586
9587 s390_regs_ever_clobbered (clobbered_regs);
9588
9589 for (i = 0; i < 32; i++)
9590 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9591
9592 /* There is still special treatment needed for cases invisible to
9593 s390_regs_ever_clobbered. */
9594 clobbered_regs[RETURN_REGNUM]
9595 |= (TARGET_TPF_PROFILING
9596 /* When expanding builtin_return_addr in ESA mode we do not
9597 know whether r14 will later be needed as scratch reg when
9598 doing branch splitting. So the builtin always accesses the
9599 r14 save slot and we need to stick to the save/restore
9600 decision for r14 even if it turns out that it didn't get
9601 clobbered. */
9602 || cfun_frame_layout.save_return_addr_p
9603 || crtl->calls_eh_return);
9604
9605 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9606
9607 for (i = 6; i < 16; i++)
9608 if (!clobbered_regs[i])
9609 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9610
9611 s390_register_info_set_ranges ();
9612 s390_register_info_stdarg_gpr ();
9613 }
9614
9615 /* Fill cfun->machine with info about frame of current function. */
9616
9617 static void
9618 s390_frame_info (void)
9619 {
9620 HOST_WIDE_INT lowest_offset;
9621
9622 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9623 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9624
9625 /* The va_arg builtin uses a constant distance of 16 *
9626 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9627 pointer. So even if we are going to save the stack pointer in an
9628 FPR we need the stack space in order to keep the offsets
9629 correct. */
9630 if (cfun->stdarg && cfun_save_arg_fprs_p)
9631 {
9632 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9633
9634 if (cfun_frame_layout.first_save_gpr_slot == -1)
9635 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9636 }
9637
9638 cfun_frame_layout.frame_size = get_frame_size ();
9639 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9640 fatal_error (input_location,
9641 "total size of local variables exceeds architecture limit");
9642
9643 if (!TARGET_PACKED_STACK)
9644 {
9645 /* Fixed stack layout. */
9646 cfun_frame_layout.backchain_offset = 0;
9647 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9648 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9649 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9650 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9651 * UNITS_PER_LONG);
9652 }
9653 else if (TARGET_BACKCHAIN)
9654 {
9655 /* Kernel stack layout - packed stack, backchain, no float */
9656 gcc_assert (TARGET_SOFT_FLOAT);
9657 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9658 - UNITS_PER_LONG);
9659
9660 /* The distance between the backchain and the return address
9661 save slot must not change. So we always need a slot for the
9662 stack pointer which resides in between. */
9663 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9664
9665 cfun_frame_layout.gprs_offset
9666 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9667
9668 /* FPRs will not be saved. Nevertheless pick sane values to
9669 keep area calculations valid. */
9670 cfun_frame_layout.f0_offset =
9671 cfun_frame_layout.f4_offset =
9672 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9673 }
9674 else
9675 {
9676 int num_fprs;
9677
9678 /* Packed stack layout without backchain. */
9679
9680 /* With stdarg FPRs need their dedicated slots. */
9681 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9682 : (cfun_fpr_save_p (FPR4_REGNUM) +
9683 cfun_fpr_save_p (FPR6_REGNUM)));
9684 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9685
9686 num_fprs = (cfun->stdarg ? 2
9687 : (cfun_fpr_save_p (FPR0_REGNUM)
9688 + cfun_fpr_save_p (FPR2_REGNUM)));
9689 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9690
9691 cfun_frame_layout.gprs_offset
9692 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9693
9694 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9695 - cfun_frame_layout.high_fprs * 8);
9696 }
9697
9698 if (cfun_save_high_fprs_p)
9699 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9700
9701 if (!crtl->is_leaf)
9702 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9703
9704 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9705 sized area at the bottom of the stack. This is required also for
9706 leaf functions. When GCC generates a local stack reference it
9707 will always add STACK_POINTER_OFFSET to all these references. */
9708 if (crtl->is_leaf
9709 && !TARGET_TPF_PROFILING
9710 && cfun_frame_layout.frame_size == 0
9711 && !cfun->calls_alloca)
9712 return;
9713
9714 /* Calculate the number of bytes we have used in our own register
9715 save area. With the packed stack layout we can re-use the
9716 remaining bytes for normal stack elements. */
9717
9718 if (TARGET_PACKED_STACK)
9719 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9720 cfun_frame_layout.f4_offset),
9721 cfun_frame_layout.gprs_offset);
9722 else
9723 lowest_offset = 0;
9724
9725 if (TARGET_BACKCHAIN)
9726 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9727
9728 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9729
9730 /* If under 31 bit an odd number of gprs has to be saved we have to
9731 adjust the frame size to sustain 8 byte alignment of stack
9732 frames. */
9733 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9734 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9735 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9736 }
9737
9738 /* Generate frame layout. Fills in register and frame data for the current
9739 function in cfun->machine. This routine can be called multiple times;
9740 it will re-do the complete frame layout every time. */
9741
9742 static void
9743 s390_init_frame_layout (void)
9744 {
9745 HOST_WIDE_INT frame_size;
9746 int base_used;
9747
9748 /* After LRA the frame layout is supposed to be read-only and should
9749 not be re-computed. */
9750 if (reload_completed)
9751 return;
9752
9753 /* On S/390 machines, we may need to perform branch splitting, which
9754 will require both base and return address register. We have no
9755 choice but to assume we're going to need them until right at the
9756 end of the machine dependent reorg phase. */
9757 if (!TARGET_CPU_ZARCH)
9758 cfun->machine->split_branches_pending_p = true;
9759
9760 do
9761 {
9762 frame_size = cfun_frame_layout.frame_size;
9763
9764 /* Try to predict whether we'll need the base register. */
9765 base_used = cfun->machine->split_branches_pending_p
9766 || crtl->uses_const_pool
9767 || (!DISP_IN_RANGE (frame_size)
9768 && !CONST_OK_FOR_K (frame_size));
9769
9770 /* Decide which register to use as literal pool base. In small
9771 leaf functions, try to use an unused call-clobbered register
9772 as base register to avoid save/restore overhead. */
9773 if (!base_used)
9774 cfun->machine->base_reg = NULL_RTX;
9775 else
9776 {
9777 int br = 0;
9778
9779 if (crtl->is_leaf)
9780 /* Prefer r5 (most likely to be free). */
9781 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9782 ;
9783 cfun->machine->base_reg =
9784 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9785 }
9786
9787 s390_register_info ();
9788 s390_frame_info ();
9789 }
9790 while (frame_size != cfun_frame_layout.frame_size);
9791 }
9792
9793 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9794 the TX is nonescaping. A transaction is considered escaping if
9795 there is at least one path from tbegin returning CC0 to the
9796 function exit block without an tend.
9797
9798 The check so far has some limitations:
9799 - only single tbegin/tend BBs are supported
9800 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9801 - when CC is copied to a GPR and the CC0 check is done with the GPR
9802 this is not supported
9803 */
9804
9805 static void
9806 s390_optimize_nonescaping_tx (void)
9807 {
9808 const unsigned int CC0 = 1 << 3;
9809 basic_block tbegin_bb = NULL;
9810 basic_block tend_bb = NULL;
9811 basic_block bb;
9812 rtx_insn *insn;
9813 bool result = true;
9814 int bb_index;
9815 rtx_insn *tbegin_insn = NULL;
9816
9817 if (!cfun->machine->tbegin_p)
9818 return;
9819
9820 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9821 {
9822 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9823
9824 if (!bb)
9825 continue;
9826
9827 FOR_BB_INSNS (bb, insn)
9828 {
9829 rtx ite, cc, pat, target;
9830 unsigned HOST_WIDE_INT mask;
9831
9832 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9833 continue;
9834
9835 pat = PATTERN (insn);
9836
9837 if (GET_CODE (pat) == PARALLEL)
9838 pat = XVECEXP (pat, 0, 0);
9839
9840 if (GET_CODE (pat) != SET
9841 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9842 continue;
9843
9844 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9845 {
9846 rtx_insn *tmp;
9847
9848 tbegin_insn = insn;
9849
9850 /* Just return if the tbegin doesn't have clobbers. */
9851 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9852 return;
9853
9854 if (tbegin_bb != NULL)
9855 return;
9856
9857 /* Find the next conditional jump. */
9858 for (tmp = NEXT_INSN (insn);
9859 tmp != NULL_RTX;
9860 tmp = NEXT_INSN (tmp))
9861 {
9862 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9863 return;
9864 if (!JUMP_P (tmp))
9865 continue;
9866
9867 ite = SET_SRC (PATTERN (tmp));
9868 if (GET_CODE (ite) != IF_THEN_ELSE)
9869 continue;
9870
9871 cc = XEXP (XEXP (ite, 0), 0);
9872 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9873 || GET_MODE (cc) != CCRAWmode
9874 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9875 return;
9876
9877 if (bb->succs->length () != 2)
9878 return;
9879
9880 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9881 if (GET_CODE (XEXP (ite, 0)) == NE)
9882 mask ^= 0xf;
9883
9884 if (mask == CC0)
9885 target = XEXP (ite, 1);
9886 else if (mask == (CC0 ^ 0xf))
9887 target = XEXP (ite, 2);
9888 else
9889 return;
9890
9891 {
9892 edge_iterator ei;
9893 edge e1, e2;
9894
9895 ei = ei_start (bb->succs);
9896 e1 = ei_safe_edge (ei);
9897 ei_next (&ei);
9898 e2 = ei_safe_edge (ei);
9899
9900 if (e2->flags & EDGE_FALLTHRU)
9901 {
9902 e2 = e1;
9903 e1 = ei_safe_edge (ei);
9904 }
9905
9906 if (!(e1->flags & EDGE_FALLTHRU))
9907 return;
9908
9909 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9910 }
9911 if (tmp == BB_END (bb))
9912 break;
9913 }
9914 }
9915
9916 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9917 {
9918 if (tend_bb != NULL)
9919 return;
9920 tend_bb = bb;
9921 }
9922 }
9923 }
9924
9925 /* Either we successfully remove the FPR clobbers here or we are not
9926 able to do anything for this TX. Both cases don't qualify for
9927 another look. */
9928 cfun->machine->tbegin_p = false;
9929
9930 if (tbegin_bb == NULL || tend_bb == NULL)
9931 return;
9932
9933 calculate_dominance_info (CDI_POST_DOMINATORS);
9934 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9935 free_dominance_info (CDI_POST_DOMINATORS);
9936
9937 if (!result)
9938 return;
9939
9940 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9941 gen_rtvec (2,
9942 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9943 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9944 INSN_CODE (tbegin_insn) = -1;
9945 df_insn_rescan (tbegin_insn);
9946
9947 return;
9948 }
9949
9950 /* Return true if it is legal to put a value with MODE into REGNO. */
9951
9952 bool
9953 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9954 {
9955 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9956 return false;
9957
9958 switch (REGNO_REG_CLASS (regno))
9959 {
9960 case VEC_REGS:
9961 return ((GET_MODE_CLASS (mode) == MODE_INT
9962 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9963 || mode == DFmode
9964 || s390_vector_mode_supported_p (mode));
9965 break;
9966 case FP_REGS:
9967 if (TARGET_VX
9968 && ((GET_MODE_CLASS (mode) == MODE_INT
9969 && s390_class_max_nregs (FP_REGS, mode) == 1)
9970 || mode == DFmode
9971 || s390_vector_mode_supported_p (mode)))
9972 return true;
9973
9974 if (REGNO_PAIR_OK (regno, mode))
9975 {
9976 if (mode == SImode || mode == DImode)
9977 return true;
9978
9979 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9980 return true;
9981 }
9982 break;
9983 case ADDR_REGS:
9984 if (FRAME_REGNO_P (regno) && mode == Pmode)
9985 return true;
9986
9987 /* fallthrough */
9988 case GENERAL_REGS:
9989 if (REGNO_PAIR_OK (regno, mode))
9990 {
9991 if (TARGET_ZARCH
9992 || (mode != TFmode && mode != TCmode && mode != TDmode))
9993 return true;
9994 }
9995 break;
9996 case CC_REGS:
9997 if (GET_MODE_CLASS (mode) == MODE_CC)
9998 return true;
9999 break;
10000 case ACCESS_REGS:
10001 if (REGNO_PAIR_OK (regno, mode))
10002 {
10003 if (mode == SImode || mode == Pmode)
10004 return true;
10005 }
10006 break;
10007 default:
10008 return false;
10009 }
10010
10011 return false;
10012 }
10013
10014 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10015
10016 bool
10017 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10018 {
10019 /* Once we've decided upon a register to use as base register, it must
10020 no longer be used for any other purpose. */
10021 if (cfun->machine->base_reg)
10022 if (REGNO (cfun->machine->base_reg) == old_reg
10023 || REGNO (cfun->machine->base_reg) == new_reg)
10024 return false;
10025
10026 /* Prevent regrename from using call-saved regs which haven't
10027 actually been saved. This is necessary since regrename assumes
10028 the backend save/restore decisions are based on
10029 df_regs_ever_live. Since we have our own routine we have to tell
10030 regrename manually about it. */
10031 if (GENERAL_REGNO_P (new_reg)
10032 && !call_really_used_regs[new_reg]
10033 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10034 return false;
10035
10036 return true;
10037 }
10038
10039 /* Return nonzero if register REGNO can be used as a scratch register
10040 in peephole2. */
10041
10042 static bool
10043 s390_hard_regno_scratch_ok (unsigned int regno)
10044 {
10045 /* See s390_hard_regno_rename_ok. */
10046 if (GENERAL_REGNO_P (regno)
10047 && !call_really_used_regs[regno]
10048 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10049 return false;
10050
10051 return true;
10052 }
10053
10054 /* Maximum number of registers to represent a value of mode MODE
10055 in a register of class RCLASS. */
10056
10057 int
10058 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10059 {
10060 int reg_size;
10061 bool reg_pair_required_p = false;
10062
10063 switch (rclass)
10064 {
10065 case FP_REGS:
10066 case VEC_REGS:
10067 reg_size = TARGET_VX ? 16 : 8;
10068
10069 /* TF and TD modes would fit into a VR but we put them into a
10070 register pair since we do not have 128bit FP instructions on
10071 full VRs. */
10072 if (TARGET_VX
10073 && SCALAR_FLOAT_MODE_P (mode)
10074 && GET_MODE_SIZE (mode) >= 16)
10075 reg_pair_required_p = true;
10076
10077 /* Even if complex types would fit into a single FPR/VR we force
10078 them into a register pair to deal with the parts more easily.
10079 (FIXME: What about complex ints?) */
10080 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10081 reg_pair_required_p = true;
10082 break;
10083 case ACCESS_REGS:
10084 reg_size = 4;
10085 break;
10086 default:
10087 reg_size = UNITS_PER_WORD;
10088 break;
10089 }
10090
10091 if (reg_pair_required_p)
10092 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10093
10094 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10095 }
10096
10097 /* Return TRUE if changing mode from FROM to TO should not be allowed
10098 for register class CLASS. */
10099
10100 int
10101 s390_cannot_change_mode_class (machine_mode from_mode,
10102 machine_mode to_mode,
10103 enum reg_class rclass)
10104 {
10105 machine_mode small_mode;
10106 machine_mode big_mode;
10107
10108 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10109 return 0;
10110
10111 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10112 {
10113 small_mode = from_mode;
10114 big_mode = to_mode;
10115 }
10116 else
10117 {
10118 small_mode = to_mode;
10119 big_mode = from_mode;
10120 }
10121
10122 /* Values residing in VRs are little-endian style. All modes are
10123 placed left-aligned in an VR. This means that we cannot allow
10124 switching between modes with differing sizes. Also if the vector
10125 facility is available we still place TFmode values in VR register
10126 pairs, since the only instructions we have operating on TFmodes
10127 only deal with register pairs. Therefore we have to allow DFmode
10128 subregs of TFmodes to enable the TFmode splitters. */
10129 if (reg_classes_intersect_p (VEC_REGS, rclass)
10130 && (GET_MODE_SIZE (small_mode) < 8
10131 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10132 return 1;
10133
10134 /* Likewise for access registers, since they have only half the
10135 word size on 64-bit. */
10136 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10137 return 1;
10138
10139 return 0;
10140 }
10141
10142 /* Return true if we use LRA instead of reload pass. */
10143 static bool
10144 s390_lra_p (void)
10145 {
10146 return s390_lra_flag;
10147 }
10148
10149 /* Return true if register FROM can be eliminated via register TO. */
10150
10151 static bool
10152 s390_can_eliminate (const int from, const int to)
10153 {
10154 /* On zSeries machines, we have not marked the base register as fixed.
10155 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10156 If a function requires the base register, we say here that this
10157 elimination cannot be performed. This will cause reload to free
10158 up the base register (as if it were fixed). On the other hand,
10159 if the current function does *not* require the base register, we
10160 say here the elimination succeeds, which in turn allows reload
10161 to allocate the base register for any other purpose. */
10162 if (from == BASE_REGNUM && to == BASE_REGNUM)
10163 {
10164 if (TARGET_CPU_ZARCH)
10165 {
10166 s390_init_frame_layout ();
10167 return cfun->machine->base_reg == NULL_RTX;
10168 }
10169
10170 return false;
10171 }
10172
10173 /* Everything else must point into the stack frame. */
10174 gcc_assert (to == STACK_POINTER_REGNUM
10175 || to == HARD_FRAME_POINTER_REGNUM);
10176
10177 gcc_assert (from == FRAME_POINTER_REGNUM
10178 || from == ARG_POINTER_REGNUM
10179 || from == RETURN_ADDRESS_POINTER_REGNUM);
10180
10181 /* Make sure we actually saved the return address. */
10182 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10183 if (!crtl->calls_eh_return
10184 && !cfun->stdarg
10185 && !cfun_frame_layout.save_return_addr_p)
10186 return false;
10187
10188 return true;
10189 }
10190
10191 /* Return offset between register FROM and TO initially after prolog. */
10192
10193 HOST_WIDE_INT
10194 s390_initial_elimination_offset (int from, int to)
10195 {
10196 HOST_WIDE_INT offset;
10197
10198 /* ??? Why are we called for non-eliminable pairs? */
10199 if (!s390_can_eliminate (from, to))
10200 return 0;
10201
10202 switch (from)
10203 {
10204 case FRAME_POINTER_REGNUM:
10205 offset = (get_frame_size()
10206 + STACK_POINTER_OFFSET
10207 + crtl->outgoing_args_size);
10208 break;
10209
10210 case ARG_POINTER_REGNUM:
10211 s390_init_frame_layout ();
10212 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10213 break;
10214
10215 case RETURN_ADDRESS_POINTER_REGNUM:
10216 s390_init_frame_layout ();
10217
10218 if (cfun_frame_layout.first_save_gpr_slot == -1)
10219 {
10220 /* If it turns out that for stdarg nothing went into the reg
10221 save area we also do not need the return address
10222 pointer. */
10223 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10224 return 0;
10225
10226 gcc_unreachable ();
10227 }
10228
10229 /* In order to make the following work it is not necessary for
10230 r14 to have a save slot. It is sufficient if one other GPR
10231 got one. Since the GPRs are always stored without gaps we
10232 are able to calculate where the r14 save slot would
10233 reside. */
10234 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10235 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10236 UNITS_PER_LONG);
10237 break;
10238
10239 case BASE_REGNUM:
10240 offset = 0;
10241 break;
10242
10243 default:
10244 gcc_unreachable ();
10245 }
10246
10247 return offset;
10248 }
10249
10250 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10251 to register BASE. Return generated insn. */
10252
10253 static rtx
10254 save_fpr (rtx base, int offset, int regnum)
10255 {
10256 rtx addr;
10257 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10258
10259 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10260 set_mem_alias_set (addr, get_varargs_alias_set ());
10261 else
10262 set_mem_alias_set (addr, get_frame_alias_set ());
10263
10264 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10265 }
10266
10267 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10268 to register BASE. Return generated insn. */
10269
10270 static rtx
10271 restore_fpr (rtx base, int offset, int regnum)
10272 {
10273 rtx addr;
10274 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10275 set_mem_alias_set (addr, get_frame_alias_set ());
10276
10277 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10278 }
10279
10280 /* Return true if REGNO is a global register, but not one
10281 of the special ones that need to be saved/restored in anyway. */
10282
10283 static inline bool
10284 global_not_special_regno_p (int regno)
10285 {
10286 return (global_regs[regno]
10287 /* These registers are special and need to be
10288 restored in any case. */
10289 && !(regno == STACK_POINTER_REGNUM
10290 || regno == RETURN_REGNUM
10291 || regno == BASE_REGNUM
10292 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10293 }
10294
10295 /* Generate insn to save registers FIRST to LAST into
10296 the register save area located at offset OFFSET
10297 relative to register BASE. */
10298
10299 static rtx
10300 save_gprs (rtx base, int offset, int first, int last)
10301 {
10302 rtx addr, insn, note;
10303 int i;
10304
10305 addr = plus_constant (Pmode, base, offset);
10306 addr = gen_rtx_MEM (Pmode, addr);
10307
10308 set_mem_alias_set (addr, get_frame_alias_set ());
10309
10310 /* Special-case single register. */
10311 if (first == last)
10312 {
10313 if (TARGET_64BIT)
10314 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10315 else
10316 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10317
10318 if (!global_not_special_regno_p (first))
10319 RTX_FRAME_RELATED_P (insn) = 1;
10320 return insn;
10321 }
10322
10323
10324 insn = gen_store_multiple (addr,
10325 gen_rtx_REG (Pmode, first),
10326 GEN_INT (last - first + 1));
10327
10328 if (first <= 6 && cfun->stdarg)
10329 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10330 {
10331 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10332
10333 if (first + i <= 6)
10334 set_mem_alias_set (mem, get_varargs_alias_set ());
10335 }
10336
10337 /* We need to set the FRAME_RELATED flag on all SETs
10338 inside the store-multiple pattern.
10339
10340 However, we must not emit DWARF records for registers 2..5
10341 if they are stored for use by variable arguments ...
10342
10343 ??? Unfortunately, it is not enough to simply not the
10344 FRAME_RELATED flags for those SETs, because the first SET
10345 of the PARALLEL is always treated as if it had the flag
10346 set, even if it does not. Therefore we emit a new pattern
10347 without those registers as REG_FRAME_RELATED_EXPR note. */
10348
10349 if (first >= 6 && !global_not_special_regno_p (first))
10350 {
10351 rtx pat = PATTERN (insn);
10352
10353 for (i = 0; i < XVECLEN (pat, 0); i++)
10354 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10355 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10356 0, i)))))
10357 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10358
10359 RTX_FRAME_RELATED_P (insn) = 1;
10360 }
10361 else if (last >= 6)
10362 {
10363 int start;
10364
10365 for (start = first >= 6 ? first : 6; start <= last; start++)
10366 if (!global_not_special_regno_p (start))
10367 break;
10368
10369 if (start > last)
10370 return insn;
10371
10372 addr = plus_constant (Pmode, base,
10373 offset + (start - first) * UNITS_PER_LONG);
10374
10375 if (start == last)
10376 {
10377 if (TARGET_64BIT)
10378 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10379 gen_rtx_REG (Pmode, start));
10380 else
10381 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10382 gen_rtx_REG (Pmode, start));
10383 note = PATTERN (note);
10384
10385 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10386 RTX_FRAME_RELATED_P (insn) = 1;
10387
10388 return insn;
10389 }
10390
10391 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10392 gen_rtx_REG (Pmode, start),
10393 GEN_INT (last - start + 1));
10394 note = PATTERN (note);
10395
10396 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10397
10398 for (i = 0; i < XVECLEN (note, 0); i++)
10399 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10400 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10401 0, i)))))
10402 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10403
10404 RTX_FRAME_RELATED_P (insn) = 1;
10405 }
10406
10407 return insn;
10408 }
10409
10410 /* Generate insn to restore registers FIRST to LAST from
10411 the register save area located at offset OFFSET
10412 relative to register BASE. */
10413
10414 static rtx
10415 restore_gprs (rtx base, int offset, int first, int last)
10416 {
10417 rtx addr, insn;
10418
10419 addr = plus_constant (Pmode, base, offset);
10420 addr = gen_rtx_MEM (Pmode, addr);
10421 set_mem_alias_set (addr, get_frame_alias_set ());
10422
10423 /* Special-case single register. */
10424 if (first == last)
10425 {
10426 if (TARGET_64BIT)
10427 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10428 else
10429 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10430
10431 RTX_FRAME_RELATED_P (insn) = 1;
10432 return insn;
10433 }
10434
10435 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10436 addr,
10437 GEN_INT (last - first + 1));
10438 RTX_FRAME_RELATED_P (insn) = 1;
10439 return insn;
10440 }
10441
10442 /* Return insn sequence to load the GOT register. */
10443
10444 static GTY(()) rtx got_symbol;
10445 rtx_insn *
10446 s390_load_got (void)
10447 {
10448 rtx_insn *insns;
10449
10450 /* We cannot use pic_offset_table_rtx here since we use this
10451 function also for non-pic if __tls_get_offset is called and in
10452 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10453 aren't usable. */
10454 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10455
10456 if (!got_symbol)
10457 {
10458 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10459 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10460 }
10461
10462 start_sequence ();
10463
10464 if (TARGET_CPU_ZARCH)
10465 {
10466 emit_move_insn (got_rtx, got_symbol);
10467 }
10468 else
10469 {
10470 rtx offset;
10471
10472 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10473 UNSPEC_LTREL_OFFSET);
10474 offset = gen_rtx_CONST (Pmode, offset);
10475 offset = force_const_mem (Pmode, offset);
10476
10477 emit_move_insn (got_rtx, offset);
10478
10479 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10480 UNSPEC_LTREL_BASE);
10481 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10482
10483 emit_move_insn (got_rtx, offset);
10484 }
10485
10486 insns = get_insns ();
10487 end_sequence ();
10488 return insns;
10489 }
10490
10491 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10492 and the change to the stack pointer. */
10493
10494 static void
10495 s390_emit_stack_tie (void)
10496 {
10497 rtx mem = gen_frame_mem (BLKmode,
10498 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10499
10500 emit_insn (gen_stack_tie (mem));
10501 }
10502
10503 /* Copy GPRS into FPR save slots. */
10504
10505 static void
10506 s390_save_gprs_to_fprs (void)
10507 {
10508 int i;
10509
10510 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10511 return;
10512
10513 for (i = 6; i < 16; i++)
10514 {
10515 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10516 {
10517 rtx_insn *insn =
10518 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10519 gen_rtx_REG (DImode, i));
10520 RTX_FRAME_RELATED_P (insn) = 1;
10521 /* This prevents dwarf2cfi from interpreting the set. Doing
10522 so it might emit def_cfa_register infos setting an FPR as
10523 new CFA. */
10524 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10525 }
10526 }
10527 }
10528
10529 /* Restore GPRs from FPR save slots. */
10530
10531 static void
10532 s390_restore_gprs_from_fprs (void)
10533 {
10534 int i;
10535
10536 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10537 return;
10538
10539 for (i = 6; i < 16; i++)
10540 {
10541 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10542 {
10543 rtx_insn *insn =
10544 emit_move_insn (gen_rtx_REG (DImode, i),
10545 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
10546 df_set_regs_ever_live (i, true);
10547 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10548 if (i == STACK_POINTER_REGNUM)
10549 add_reg_note (insn, REG_CFA_DEF_CFA,
10550 plus_constant (Pmode, stack_pointer_rtx,
10551 STACK_POINTER_OFFSET));
10552 RTX_FRAME_RELATED_P (insn) = 1;
10553 }
10554 }
10555 }
10556
10557
10558 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10559 generation. */
10560
10561 namespace {
10562
10563 const pass_data pass_data_s390_early_mach =
10564 {
10565 RTL_PASS, /* type */
10566 "early_mach", /* name */
10567 OPTGROUP_NONE, /* optinfo_flags */
10568 TV_MACH_DEP, /* tv_id */
10569 0, /* properties_required */
10570 0, /* properties_provided */
10571 0, /* properties_destroyed */
10572 0, /* todo_flags_start */
10573 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10574 };
10575
10576 class pass_s390_early_mach : public rtl_opt_pass
10577 {
10578 public:
10579 pass_s390_early_mach (gcc::context *ctxt)
10580 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10581 {}
10582
10583 /* opt_pass methods: */
10584 virtual unsigned int execute (function *);
10585
10586 }; // class pass_s390_early_mach
10587
10588 unsigned int
10589 pass_s390_early_mach::execute (function *fun)
10590 {
10591 rtx_insn *insn;
10592
10593 /* Try to get rid of the FPR clobbers. */
10594 s390_optimize_nonescaping_tx ();
10595
10596 /* Re-compute register info. */
10597 s390_register_info ();
10598
10599 /* If we're using a base register, ensure that it is always valid for
10600 the first non-prologue instruction. */
10601 if (fun->machine->base_reg)
10602 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10603
10604 /* Annotate all constant pool references to let the scheduler know
10605 they implicitly use the base register. */
10606 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10607 if (INSN_P (insn))
10608 {
10609 annotate_constant_pool_refs (&PATTERN (insn));
10610 df_insn_rescan (insn);
10611 }
10612 return 0;
10613 }
10614
10615 } // anon namespace
10616
10617 /* Expand the prologue into a bunch of separate insns. */
10618
10619 void
10620 s390_emit_prologue (void)
10621 {
10622 rtx insn, addr;
10623 rtx temp_reg;
10624 int i;
10625 int offset;
10626 int next_fpr = 0;
10627
10628 /* Choose best register to use for temp use within prologue.
10629 TPF with profiling must avoid the register 14 - the tracing function
10630 needs the original contents of r14 to be preserved. */
10631
10632 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10633 && !crtl->is_leaf
10634 && !TARGET_TPF_PROFILING)
10635 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10636 else if (flag_split_stack && cfun->stdarg)
10637 temp_reg = gen_rtx_REG (Pmode, 12);
10638 else
10639 temp_reg = gen_rtx_REG (Pmode, 1);
10640
10641 s390_save_gprs_to_fprs ();
10642
10643 /* Save call saved gprs. */
10644 if (cfun_frame_layout.first_save_gpr != -1)
10645 {
10646 insn = save_gprs (stack_pointer_rtx,
10647 cfun_frame_layout.gprs_offset +
10648 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10649 - cfun_frame_layout.first_save_gpr_slot),
10650 cfun_frame_layout.first_save_gpr,
10651 cfun_frame_layout.last_save_gpr);
10652 emit_insn (insn);
10653 }
10654
10655 /* Dummy insn to mark literal pool slot. */
10656
10657 if (cfun->machine->base_reg)
10658 emit_insn (gen_main_pool (cfun->machine->base_reg));
10659
10660 offset = cfun_frame_layout.f0_offset;
10661
10662 /* Save f0 and f2. */
10663 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10664 {
10665 if (cfun_fpr_save_p (i))
10666 {
10667 save_fpr (stack_pointer_rtx, offset, i);
10668 offset += 8;
10669 }
10670 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10671 offset += 8;
10672 }
10673
10674 /* Save f4 and f6. */
10675 offset = cfun_frame_layout.f4_offset;
10676 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10677 {
10678 if (cfun_fpr_save_p (i))
10679 {
10680 insn = save_fpr (stack_pointer_rtx, offset, i);
10681 offset += 8;
10682
10683 /* If f4 and f6 are call clobbered they are saved due to
10684 stdargs and therefore are not frame related. */
10685 if (!call_really_used_regs[i])
10686 RTX_FRAME_RELATED_P (insn) = 1;
10687 }
10688 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10689 offset += 8;
10690 }
10691
10692 if (TARGET_PACKED_STACK
10693 && cfun_save_high_fprs_p
10694 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10695 {
10696 offset = (cfun_frame_layout.f8_offset
10697 + (cfun_frame_layout.high_fprs - 1) * 8);
10698
10699 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10700 if (cfun_fpr_save_p (i))
10701 {
10702 insn = save_fpr (stack_pointer_rtx, offset, i);
10703
10704 RTX_FRAME_RELATED_P (insn) = 1;
10705 offset -= 8;
10706 }
10707 if (offset >= cfun_frame_layout.f8_offset)
10708 next_fpr = i;
10709 }
10710
10711 if (!TARGET_PACKED_STACK)
10712 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10713
10714 if (flag_stack_usage_info)
10715 current_function_static_stack_size = cfun_frame_layout.frame_size;
10716
10717 /* Decrement stack pointer. */
10718
10719 if (cfun_frame_layout.frame_size > 0)
10720 {
10721 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10722 rtx real_frame_off;
10723
10724 if (s390_stack_size)
10725 {
10726 HOST_WIDE_INT stack_guard;
10727
10728 if (s390_stack_guard)
10729 stack_guard = s390_stack_guard;
10730 else
10731 {
10732 /* If no value for stack guard is provided the smallest power of 2
10733 larger than the current frame size is chosen. */
10734 stack_guard = 1;
10735 while (stack_guard < cfun_frame_layout.frame_size)
10736 stack_guard <<= 1;
10737 }
10738
10739 if (cfun_frame_layout.frame_size >= s390_stack_size)
10740 {
10741 warning (0, "frame size of function %qs is %wd"
10742 " bytes exceeding user provided stack limit of "
10743 "%d bytes. "
10744 "An unconditional trap is added.",
10745 current_function_name(), cfun_frame_layout.frame_size,
10746 s390_stack_size);
10747 emit_insn (gen_trap ());
10748 emit_barrier ();
10749 }
10750 else
10751 {
10752 /* stack_guard has to be smaller than s390_stack_size.
10753 Otherwise we would emit an AND with zero which would
10754 not match the test under mask pattern. */
10755 if (stack_guard >= s390_stack_size)
10756 {
10757 warning (0, "frame size of function %qs is %wd"
10758 " bytes which is more than half the stack size. "
10759 "The dynamic check would not be reliable. "
10760 "No check emitted for this function.",
10761 current_function_name(),
10762 cfun_frame_layout.frame_size);
10763 }
10764 else
10765 {
10766 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10767 & ~(stack_guard - 1));
10768
10769 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10770 GEN_INT (stack_check_mask));
10771 if (TARGET_64BIT)
10772 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10773 t, const0_rtx),
10774 t, const0_rtx, const0_rtx));
10775 else
10776 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10777 t, const0_rtx),
10778 t, const0_rtx, const0_rtx));
10779 }
10780 }
10781 }
10782
10783 if (s390_warn_framesize > 0
10784 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10785 warning (0, "frame size of %qs is %wd bytes",
10786 current_function_name (), cfun_frame_layout.frame_size);
10787
10788 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10789 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10790
10791 /* Save incoming stack pointer into temp reg. */
10792 if (TARGET_BACKCHAIN || next_fpr)
10793 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10794
10795 /* Subtract frame size from stack pointer. */
10796
10797 if (DISP_IN_RANGE (INTVAL (frame_off)))
10798 {
10799 insn = gen_rtx_SET (stack_pointer_rtx,
10800 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10801 frame_off));
10802 insn = emit_insn (insn);
10803 }
10804 else
10805 {
10806 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10807 frame_off = force_const_mem (Pmode, frame_off);
10808
10809 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10810 annotate_constant_pool_refs (&PATTERN (insn));
10811 }
10812
10813 RTX_FRAME_RELATED_P (insn) = 1;
10814 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10815 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10816 gen_rtx_SET (stack_pointer_rtx,
10817 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10818 real_frame_off)));
10819
10820 /* Set backchain. */
10821
10822 if (TARGET_BACKCHAIN)
10823 {
10824 if (cfun_frame_layout.backchain_offset)
10825 addr = gen_rtx_MEM (Pmode,
10826 plus_constant (Pmode, stack_pointer_rtx,
10827 cfun_frame_layout.backchain_offset));
10828 else
10829 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10830 set_mem_alias_set (addr, get_frame_alias_set ());
10831 insn = emit_insn (gen_move_insn (addr, temp_reg));
10832 }
10833
10834 /* If we support non-call exceptions (e.g. for Java),
10835 we need to make sure the backchain pointer is set up
10836 before any possibly trapping memory access. */
10837 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10838 {
10839 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10840 emit_clobber (addr);
10841 }
10842 }
10843
10844 /* Save fprs 8 - 15 (64 bit ABI). */
10845
10846 if (cfun_save_high_fprs_p && next_fpr)
10847 {
10848 /* If the stack might be accessed through a different register
10849 we have to make sure that the stack pointer decrement is not
10850 moved below the use of the stack slots. */
10851 s390_emit_stack_tie ();
10852
10853 insn = emit_insn (gen_add2_insn (temp_reg,
10854 GEN_INT (cfun_frame_layout.f8_offset)));
10855
10856 offset = 0;
10857
10858 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10859 if (cfun_fpr_save_p (i))
10860 {
10861 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10862 cfun_frame_layout.frame_size
10863 + cfun_frame_layout.f8_offset
10864 + offset);
10865
10866 insn = save_fpr (temp_reg, offset, i);
10867 offset += 8;
10868 RTX_FRAME_RELATED_P (insn) = 1;
10869 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10870 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10871 gen_rtx_REG (DFmode, i)));
10872 }
10873 }
10874
10875 /* Set frame pointer, if needed. */
10876
10877 if (frame_pointer_needed)
10878 {
10879 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10880 RTX_FRAME_RELATED_P (insn) = 1;
10881 }
10882
10883 /* Set up got pointer, if needed. */
10884
10885 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10886 {
10887 rtx_insn *insns = s390_load_got ();
10888
10889 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10890 annotate_constant_pool_refs (&PATTERN (insn));
10891
10892 emit_insn (insns);
10893 }
10894
10895 if (TARGET_TPF_PROFILING)
10896 {
10897 /* Generate a BAS instruction to serve as a function
10898 entry intercept to facilitate the use of tracing
10899 algorithms located at the branch target. */
10900 emit_insn (gen_prologue_tpf ());
10901
10902 /* Emit a blockage here so that all code
10903 lies between the profiling mechanisms. */
10904 emit_insn (gen_blockage ());
10905 }
10906 }
10907
10908 /* Expand the epilogue into a bunch of separate insns. */
10909
10910 void
10911 s390_emit_epilogue (bool sibcall)
10912 {
10913 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10914 int area_bottom, area_top, offset = 0;
10915 int next_offset;
10916 rtvec p;
10917 int i;
10918
10919 if (TARGET_TPF_PROFILING)
10920 {
10921
10922 /* Generate a BAS instruction to serve as a function
10923 entry intercept to facilitate the use of tracing
10924 algorithms located at the branch target. */
10925
10926 /* Emit a blockage here so that all code
10927 lies between the profiling mechanisms. */
10928 emit_insn (gen_blockage ());
10929
10930 emit_insn (gen_epilogue_tpf ());
10931 }
10932
10933 /* Check whether to use frame or stack pointer for restore. */
10934
10935 frame_pointer = (frame_pointer_needed
10936 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10937
10938 s390_frame_area (&area_bottom, &area_top);
10939
10940 /* Check whether we can access the register save area.
10941 If not, increment the frame pointer as required. */
10942
10943 if (area_top <= area_bottom)
10944 {
10945 /* Nothing to restore. */
10946 }
10947 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10948 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10949 {
10950 /* Area is in range. */
10951 offset = cfun_frame_layout.frame_size;
10952 }
10953 else
10954 {
10955 rtx insn, frame_off, cfa;
10956
10957 offset = area_bottom < 0 ? -area_bottom : 0;
10958 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10959
10960 cfa = gen_rtx_SET (frame_pointer,
10961 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10962 if (DISP_IN_RANGE (INTVAL (frame_off)))
10963 {
10964 insn = gen_rtx_SET (frame_pointer,
10965 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10966 insn = emit_insn (insn);
10967 }
10968 else
10969 {
10970 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10971 frame_off = force_const_mem (Pmode, frame_off);
10972
10973 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10974 annotate_constant_pool_refs (&PATTERN (insn));
10975 }
10976 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10977 RTX_FRAME_RELATED_P (insn) = 1;
10978 }
10979
10980 /* Restore call saved fprs. */
10981
10982 if (TARGET_64BIT)
10983 {
10984 if (cfun_save_high_fprs_p)
10985 {
10986 next_offset = cfun_frame_layout.f8_offset;
10987 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10988 {
10989 if (cfun_fpr_save_p (i))
10990 {
10991 restore_fpr (frame_pointer,
10992 offset + next_offset, i);
10993 cfa_restores
10994 = alloc_reg_note (REG_CFA_RESTORE,
10995 gen_rtx_REG (DFmode, i), cfa_restores);
10996 next_offset += 8;
10997 }
10998 }
10999 }
11000
11001 }
11002 else
11003 {
11004 next_offset = cfun_frame_layout.f4_offset;
11005 /* f4, f6 */
11006 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11007 {
11008 if (cfun_fpr_save_p (i))
11009 {
11010 restore_fpr (frame_pointer,
11011 offset + next_offset, i);
11012 cfa_restores
11013 = alloc_reg_note (REG_CFA_RESTORE,
11014 gen_rtx_REG (DFmode, i), cfa_restores);
11015 next_offset += 8;
11016 }
11017 else if (!TARGET_PACKED_STACK)
11018 next_offset += 8;
11019 }
11020
11021 }
11022
11023 /* Return register. */
11024
11025 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11026
11027 /* Restore call saved gprs. */
11028
11029 if (cfun_frame_layout.first_restore_gpr != -1)
11030 {
11031 rtx insn, addr;
11032 int i;
11033
11034 /* Check for global register and save them
11035 to stack location from where they get restored. */
11036
11037 for (i = cfun_frame_layout.first_restore_gpr;
11038 i <= cfun_frame_layout.last_restore_gpr;
11039 i++)
11040 {
11041 if (global_not_special_regno_p (i))
11042 {
11043 addr = plus_constant (Pmode, frame_pointer,
11044 offset + cfun_frame_layout.gprs_offset
11045 + (i - cfun_frame_layout.first_save_gpr_slot)
11046 * UNITS_PER_LONG);
11047 addr = gen_rtx_MEM (Pmode, addr);
11048 set_mem_alias_set (addr, get_frame_alias_set ());
11049 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11050 }
11051 else
11052 cfa_restores
11053 = alloc_reg_note (REG_CFA_RESTORE,
11054 gen_rtx_REG (Pmode, i), cfa_restores);
11055 }
11056
11057 if (! sibcall)
11058 {
11059 /* Fetch return address from stack before load multiple,
11060 this will do good for scheduling.
11061
11062 Only do this if we already decided that r14 needs to be
11063 saved to a stack slot. (And not just because r14 happens to
11064 be in between two GPRs which need saving.) Otherwise it
11065 would be difficult to take that decision back in
11066 s390_optimize_prologue. */
11067 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
11068 {
11069 int return_regnum = find_unused_clobbered_reg();
11070 if (!return_regnum)
11071 return_regnum = 4;
11072 return_reg = gen_rtx_REG (Pmode, return_regnum);
11073
11074 addr = plus_constant (Pmode, frame_pointer,
11075 offset + cfun_frame_layout.gprs_offset
11076 + (RETURN_REGNUM
11077 - cfun_frame_layout.first_save_gpr_slot)
11078 * UNITS_PER_LONG);
11079 addr = gen_rtx_MEM (Pmode, addr);
11080 set_mem_alias_set (addr, get_frame_alias_set ());
11081 emit_move_insn (return_reg, addr);
11082
11083 /* Once we did that optimization we have to make sure
11084 s390_optimize_prologue does not try to remove the
11085 store of r14 since we will not be able to find the
11086 load issued here. */
11087 cfun_frame_layout.save_return_addr_p = true;
11088 }
11089 }
11090
11091 insn = restore_gprs (frame_pointer,
11092 offset + cfun_frame_layout.gprs_offset
11093 + (cfun_frame_layout.first_restore_gpr
11094 - cfun_frame_layout.first_save_gpr_slot)
11095 * UNITS_PER_LONG,
11096 cfun_frame_layout.first_restore_gpr,
11097 cfun_frame_layout.last_restore_gpr);
11098 insn = emit_insn (insn);
11099 REG_NOTES (insn) = cfa_restores;
11100 add_reg_note (insn, REG_CFA_DEF_CFA,
11101 plus_constant (Pmode, stack_pointer_rtx,
11102 STACK_POINTER_OFFSET));
11103 RTX_FRAME_RELATED_P (insn) = 1;
11104 }
11105
11106 s390_restore_gprs_from_fprs ();
11107
11108 if (! sibcall)
11109 {
11110
11111 /* Return to caller. */
11112
11113 p = rtvec_alloc (2);
11114
11115 RTVEC_ELT (p, 0) = ret_rtx;
11116 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11117 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11118 }
11119 }
11120
11121 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11122
11123 static void
11124 s300_set_up_by_prologue (hard_reg_set_container *regs)
11125 {
11126 if (cfun->machine->base_reg
11127 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11128 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11129 }
11130
11131 /* -fsplit-stack support. */
11132
11133 /* A SYMBOL_REF for __morestack. */
11134 static GTY(()) rtx morestack_ref;
11135
11136 /* When using -fsplit-stack, the allocation routines set a field in
11137 the TCB to the bottom of the stack plus this much space, measured
11138 in bytes. */
11139
11140 #define SPLIT_STACK_AVAILABLE 1024
11141
11142 /* Emit -fsplit-stack prologue, which goes before the regular function
11143 prologue. */
11144
11145 void
11146 s390_expand_split_stack_prologue (void)
11147 {
11148 rtx r1, guard, cc = NULL;
11149 rtx_insn *insn;
11150 /* Offset from thread pointer to __private_ss. */
11151 int psso = TARGET_64BIT ? 0x38 : 0x20;
11152 /* Pointer size in bytes. */
11153 /* Frame size and argument size - the two parameters to __morestack. */
11154 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11155 /* Align argument size to 8 bytes - simplifies __morestack code. */
11156 HOST_WIDE_INT args_size = crtl->args.size >= 0
11157 ? ((crtl->args.size + 7) & ~7)
11158 : 0;
11159 /* Label to be called by __morestack. */
11160 rtx_code_label *call_done = NULL;
11161 rtx_code_label *parm_base = NULL;
11162 rtx tmp;
11163
11164 gcc_assert (flag_split_stack && reload_completed);
11165 if (!TARGET_CPU_ZARCH)
11166 {
11167 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11168 return;
11169 }
11170
11171 r1 = gen_rtx_REG (Pmode, 1);
11172
11173 /* If no stack frame will be allocated, don't do anything. */
11174 if (!frame_size)
11175 {
11176 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11177 {
11178 /* If va_start is used, just use r15. */
11179 emit_move_insn (r1,
11180 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11181 GEN_INT (STACK_POINTER_OFFSET)));
11182
11183 }
11184 return;
11185 }
11186
11187 if (morestack_ref == NULL_RTX)
11188 {
11189 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11190 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11191 | SYMBOL_FLAG_FUNCTION);
11192 }
11193
11194 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11195 {
11196 /* If frame_size will fit in an add instruction, do a stack space
11197 check, and only call __morestack if there's not enough space. */
11198
11199 /* Get thread pointer. r1 is the only register we can always destroy - r0
11200 could contain a static chain (and cannot be used to address memory
11201 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11202 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11203 /* Aim at __private_ss. */
11204 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11205
11206 /* If less that 1kiB used, skip addition and compare directly with
11207 __private_ss. */
11208 if (frame_size > SPLIT_STACK_AVAILABLE)
11209 {
11210 emit_move_insn (r1, guard);
11211 if (TARGET_64BIT)
11212 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11213 else
11214 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11215 guard = r1;
11216 }
11217
11218 /* Compare the (maybe adjusted) guard with the stack pointer. */
11219 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11220 }
11221
11222 call_done = gen_label_rtx ();
11223 parm_base = gen_label_rtx ();
11224
11225 /* Emit the parameter block. */
11226 tmp = gen_split_stack_data (parm_base, call_done,
11227 GEN_INT (frame_size),
11228 GEN_INT (args_size));
11229 insn = emit_insn (tmp);
11230 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11231 LABEL_NUSES (call_done)++;
11232 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11233 LABEL_NUSES (parm_base)++;
11234
11235 /* %r1 = litbase. */
11236 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11237 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11238 LABEL_NUSES (parm_base)++;
11239
11240 /* Now, we need to call __morestack. It has very special calling
11241 conventions: it preserves param/return/static chain registers for
11242 calling main function body, and looks for its own parameters at %r1. */
11243
11244 if (cc != NULL)
11245 {
11246 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11247
11248 insn = emit_jump_insn (tmp);
11249 JUMP_LABEL (insn) = call_done;
11250 LABEL_NUSES (call_done)++;
11251
11252 /* Mark the jump as very unlikely to be taken. */
11253 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11254
11255 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11256 {
11257 /* If va_start is used, and __morestack was not called, just use
11258 r15. */
11259 emit_move_insn (r1,
11260 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11261 GEN_INT (STACK_POINTER_OFFSET)));
11262 }
11263 }
11264 else
11265 {
11266 tmp = gen_split_stack_call (morestack_ref, call_done);
11267 insn = emit_jump_insn (tmp);
11268 JUMP_LABEL (insn) = call_done;
11269 LABEL_NUSES (call_done)++;
11270 emit_barrier ();
11271 }
11272
11273 /* __morestack will call us here. */
11274
11275 emit_label (call_done);
11276 }
11277
11278 /* We may have to tell the dataflow pass that the split stack prologue
11279 is initializing a register. */
11280
11281 static void
11282 s390_live_on_entry (bitmap regs)
11283 {
11284 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11285 {
11286 gcc_assert (flag_split_stack);
11287 bitmap_set_bit (regs, 1);
11288 }
11289 }
11290
11291 /* Return true if the function can use simple_return to return outside
11292 of a shrink-wrapped region. At present shrink-wrapping is supported
11293 in all cases. */
11294
11295 bool
11296 s390_can_use_simple_return_insn (void)
11297 {
11298 return true;
11299 }
11300
11301 /* Return true if the epilogue is guaranteed to contain only a return
11302 instruction and if a direct return can therefore be used instead.
11303 One of the main advantages of using direct return instructions
11304 is that we can then use conditional returns. */
11305
11306 bool
11307 s390_can_use_return_insn (void)
11308 {
11309 int i;
11310
11311 if (!reload_completed)
11312 return false;
11313
11314 if (crtl->profile)
11315 return false;
11316
11317 if (TARGET_TPF_PROFILING)
11318 return false;
11319
11320 for (i = 0; i < 16; i++)
11321 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11322 return false;
11323
11324 /* For 31 bit this is not covered by the frame_size check below
11325 since f4, f6 are saved in the register save area without needing
11326 additional stack space. */
11327 if (!TARGET_64BIT
11328 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11329 return false;
11330
11331 if (cfun->machine->base_reg
11332 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11333 return false;
11334
11335 return cfun_frame_layout.frame_size == 0;
11336 }
11337
11338 /* The VX ABI differs for vararg functions. Therefore we need the
11339 prototype of the callee to be available when passing vector type
11340 values. */
11341 static const char *
11342 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11343 {
11344 return ((TARGET_VX_ABI
11345 && typelist == 0
11346 && VECTOR_TYPE_P (TREE_TYPE (val))
11347 && (funcdecl == NULL_TREE
11348 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11349 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11350 ? N_("Vector argument passed to unprototyped function")
11351 : NULL);
11352 }
11353
11354
11355 /* Return the size in bytes of a function argument of
11356 type TYPE and/or mode MODE. At least one of TYPE or
11357 MODE must be specified. */
11358
11359 static int
11360 s390_function_arg_size (machine_mode mode, const_tree type)
11361 {
11362 if (type)
11363 return int_size_in_bytes (type);
11364
11365 /* No type info available for some library calls ... */
11366 if (mode != BLKmode)
11367 return GET_MODE_SIZE (mode);
11368
11369 /* If we have neither type nor mode, abort */
11370 gcc_unreachable ();
11371 }
11372
11373 /* Return true if a function argument of type TYPE and mode MODE
11374 is to be passed in a vector register, if available. */
11375
11376 bool
11377 s390_function_arg_vector (machine_mode mode, const_tree type)
11378 {
11379 if (!TARGET_VX_ABI)
11380 return false;
11381
11382 if (s390_function_arg_size (mode, type) > 16)
11383 return false;
11384
11385 /* No type info available for some library calls ... */
11386 if (!type)
11387 return VECTOR_MODE_P (mode);
11388
11389 /* The ABI says that record types with a single member are treated
11390 just like that member would be. */
11391 while (TREE_CODE (type) == RECORD_TYPE)
11392 {
11393 tree field, single = NULL_TREE;
11394
11395 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11396 {
11397 if (TREE_CODE (field) != FIELD_DECL)
11398 continue;
11399
11400 if (single == NULL_TREE)
11401 single = TREE_TYPE (field);
11402 else
11403 return false;
11404 }
11405
11406 if (single == NULL_TREE)
11407 return false;
11408 else
11409 {
11410 /* If the field declaration adds extra byte due to
11411 e.g. padding this is not accepted as vector type. */
11412 if (int_size_in_bytes (single) <= 0
11413 || int_size_in_bytes (single) != int_size_in_bytes (type))
11414 return false;
11415 type = single;
11416 }
11417 }
11418
11419 return VECTOR_TYPE_P (type);
11420 }
11421
11422 /* Return true if a function argument of type TYPE and mode MODE
11423 is to be passed in a floating-point register, if available. */
11424
11425 static bool
11426 s390_function_arg_float (machine_mode mode, const_tree type)
11427 {
11428 if (s390_function_arg_size (mode, type) > 8)
11429 return false;
11430
11431 /* Soft-float changes the ABI: no floating-point registers are used. */
11432 if (TARGET_SOFT_FLOAT)
11433 return false;
11434
11435 /* No type info available for some library calls ... */
11436 if (!type)
11437 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11438
11439 /* The ABI says that record types with a single member are treated
11440 just like that member would be. */
11441 while (TREE_CODE (type) == RECORD_TYPE)
11442 {
11443 tree field, single = NULL_TREE;
11444
11445 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11446 {
11447 if (TREE_CODE (field) != FIELD_DECL)
11448 continue;
11449
11450 if (single == NULL_TREE)
11451 single = TREE_TYPE (field);
11452 else
11453 return false;
11454 }
11455
11456 if (single == NULL_TREE)
11457 return false;
11458 else
11459 type = single;
11460 }
11461
11462 return TREE_CODE (type) == REAL_TYPE;
11463 }
11464
11465 /* Return true if a function argument of type TYPE and mode MODE
11466 is to be passed in an integer register, or a pair of integer
11467 registers, if available. */
11468
11469 static bool
11470 s390_function_arg_integer (machine_mode mode, const_tree type)
11471 {
11472 int size = s390_function_arg_size (mode, type);
11473 if (size > 8)
11474 return false;
11475
11476 /* No type info available for some library calls ... */
11477 if (!type)
11478 return GET_MODE_CLASS (mode) == MODE_INT
11479 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11480
11481 /* We accept small integral (and similar) types. */
11482 if (INTEGRAL_TYPE_P (type)
11483 || POINTER_TYPE_P (type)
11484 || TREE_CODE (type) == NULLPTR_TYPE
11485 || TREE_CODE (type) == OFFSET_TYPE
11486 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11487 return true;
11488
11489 /* We also accept structs of size 1, 2, 4, 8 that are not
11490 passed in floating-point registers. */
11491 if (AGGREGATE_TYPE_P (type)
11492 && exact_log2 (size) >= 0
11493 && !s390_function_arg_float (mode, type))
11494 return true;
11495
11496 return false;
11497 }
11498
11499 /* Return 1 if a function argument of type TYPE and mode MODE
11500 is to be passed by reference. The ABI specifies that only
11501 structures of size 1, 2, 4, or 8 bytes are passed by value,
11502 all other structures (and complex numbers) are passed by
11503 reference. */
11504
11505 static bool
11506 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11507 machine_mode mode, const_tree type,
11508 bool named ATTRIBUTE_UNUSED)
11509 {
11510 int size = s390_function_arg_size (mode, type);
11511
11512 if (s390_function_arg_vector (mode, type))
11513 return false;
11514
11515 if (size > 8)
11516 return true;
11517
11518 if (type)
11519 {
11520 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11521 return true;
11522
11523 if (TREE_CODE (type) == COMPLEX_TYPE
11524 || TREE_CODE (type) == VECTOR_TYPE)
11525 return true;
11526 }
11527
11528 return false;
11529 }
11530
11531 /* Update the data in CUM to advance over an argument of mode MODE and
11532 data type TYPE. (TYPE is null for libcalls where that information
11533 may not be available.). The boolean NAMED specifies whether the
11534 argument is a named argument (as opposed to an unnamed argument
11535 matching an ellipsis). */
11536
11537 static void
11538 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11539 const_tree type, bool named)
11540 {
11541 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11542
11543 if (s390_function_arg_vector (mode, type))
11544 {
11545 /* We are called for unnamed vector stdarg arguments which are
11546 passed on the stack. In this case this hook does not have to
11547 do anything since stack arguments are tracked by common
11548 code. */
11549 if (!named)
11550 return;
11551 cum->vrs += 1;
11552 }
11553 else if (s390_function_arg_float (mode, type))
11554 {
11555 cum->fprs += 1;
11556 }
11557 else if (s390_function_arg_integer (mode, type))
11558 {
11559 int size = s390_function_arg_size (mode, type);
11560 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11561 }
11562 else
11563 gcc_unreachable ();
11564 }
11565
11566 /* Define where to put the arguments to a function.
11567 Value is zero to push the argument on the stack,
11568 or a hard register in which to store the argument.
11569
11570 MODE is the argument's machine mode.
11571 TYPE is the data type of the argument (as a tree).
11572 This is null for libcalls where that information may
11573 not be available.
11574 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11575 the preceding args and about the function being called.
11576 NAMED is nonzero if this argument is a named parameter
11577 (otherwise it is an extra parameter matching an ellipsis).
11578
11579 On S/390, we use general purpose registers 2 through 6 to
11580 pass integer, pointer, and certain structure arguments, and
11581 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11582 to pass floating point arguments. All remaining arguments
11583 are pushed to the stack. */
11584
11585 static rtx
11586 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11587 const_tree type, bool named)
11588 {
11589 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11590
11591 if (!named)
11592 s390_check_type_for_vector_abi (type, true, false);
11593
11594 if (s390_function_arg_vector (mode, type))
11595 {
11596 /* Vector arguments being part of the ellipsis are passed on the
11597 stack. */
11598 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11599 return NULL_RTX;
11600
11601 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11602 }
11603 else if (s390_function_arg_float (mode, type))
11604 {
11605 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11606 return NULL_RTX;
11607 else
11608 return gen_rtx_REG (mode, cum->fprs + 16);
11609 }
11610 else if (s390_function_arg_integer (mode, type))
11611 {
11612 int size = s390_function_arg_size (mode, type);
11613 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11614
11615 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11616 return NULL_RTX;
11617 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11618 return gen_rtx_REG (mode, cum->gprs + 2);
11619 else if (n_gprs == 2)
11620 {
11621 rtvec p = rtvec_alloc (2);
11622
11623 RTVEC_ELT (p, 0)
11624 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11625 const0_rtx);
11626 RTVEC_ELT (p, 1)
11627 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11628 GEN_INT (4));
11629
11630 return gen_rtx_PARALLEL (mode, p);
11631 }
11632 }
11633
11634 /* After the real arguments, expand_call calls us once again
11635 with a void_type_node type. Whatever we return here is
11636 passed as operand 2 to the call expanders.
11637
11638 We don't need this feature ... */
11639 else if (type == void_type_node)
11640 return const0_rtx;
11641
11642 gcc_unreachable ();
11643 }
11644
11645 /* Return true if return values of type TYPE should be returned
11646 in a memory buffer whose address is passed by the caller as
11647 hidden first argument. */
11648
11649 static bool
11650 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11651 {
11652 /* We accept small integral (and similar) types. */
11653 if (INTEGRAL_TYPE_P (type)
11654 || POINTER_TYPE_P (type)
11655 || TREE_CODE (type) == OFFSET_TYPE
11656 || TREE_CODE (type) == REAL_TYPE)
11657 return int_size_in_bytes (type) > 8;
11658
11659 /* vector types which fit into a VR. */
11660 if (TARGET_VX_ABI
11661 && VECTOR_TYPE_P (type)
11662 && int_size_in_bytes (type) <= 16)
11663 return false;
11664
11665 /* Aggregates and similar constructs are always returned
11666 in memory. */
11667 if (AGGREGATE_TYPE_P (type)
11668 || TREE_CODE (type) == COMPLEX_TYPE
11669 || VECTOR_TYPE_P (type))
11670 return true;
11671
11672 /* ??? We get called on all sorts of random stuff from
11673 aggregate_value_p. We can't abort, but it's not clear
11674 what's safe to return. Pretend it's a struct I guess. */
11675 return true;
11676 }
11677
11678 /* Function arguments and return values are promoted to word size. */
11679
11680 static machine_mode
11681 s390_promote_function_mode (const_tree type, machine_mode mode,
11682 int *punsignedp,
11683 const_tree fntype ATTRIBUTE_UNUSED,
11684 int for_return ATTRIBUTE_UNUSED)
11685 {
11686 if (INTEGRAL_MODE_P (mode)
11687 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11688 {
11689 if (type != NULL_TREE && POINTER_TYPE_P (type))
11690 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11691 return Pmode;
11692 }
11693
11694 return mode;
11695 }
11696
11697 /* Define where to return a (scalar) value of type RET_TYPE.
11698 If RET_TYPE is null, define where to return a (scalar)
11699 value of mode MODE from a libcall. */
11700
11701 static rtx
11702 s390_function_and_libcall_value (machine_mode mode,
11703 const_tree ret_type,
11704 const_tree fntype_or_decl,
11705 bool outgoing ATTRIBUTE_UNUSED)
11706 {
11707 /* For vector return types it is important to use the RET_TYPE
11708 argument whenever available since the middle-end might have
11709 changed the mode to a scalar mode. */
11710 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11711 || (!ret_type && VECTOR_MODE_P (mode)));
11712
11713 /* For normal functions perform the promotion as
11714 promote_function_mode would do. */
11715 if (ret_type)
11716 {
11717 int unsignedp = TYPE_UNSIGNED (ret_type);
11718 mode = promote_function_mode (ret_type, mode, &unsignedp,
11719 fntype_or_decl, 1);
11720 }
11721
11722 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11723 || SCALAR_FLOAT_MODE_P (mode)
11724 || (TARGET_VX_ABI && vector_ret_type_p));
11725 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11726
11727 if (TARGET_VX_ABI && vector_ret_type_p)
11728 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11729 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11730 return gen_rtx_REG (mode, 16);
11731 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11732 || UNITS_PER_LONG == UNITS_PER_WORD)
11733 return gen_rtx_REG (mode, 2);
11734 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11735 {
11736 /* This case is triggered when returning a 64 bit value with
11737 -m31 -mzarch. Although the value would fit into a single
11738 register it has to be forced into a 32 bit register pair in
11739 order to match the ABI. */
11740 rtvec p = rtvec_alloc (2);
11741
11742 RTVEC_ELT (p, 0)
11743 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11744 RTVEC_ELT (p, 1)
11745 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11746
11747 return gen_rtx_PARALLEL (mode, p);
11748 }
11749
11750 gcc_unreachable ();
11751 }
11752
11753 /* Define where to return a scalar return value of type RET_TYPE. */
11754
11755 static rtx
11756 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11757 bool outgoing)
11758 {
11759 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11760 fn_decl_or_type, outgoing);
11761 }
11762
11763 /* Define where to return a scalar libcall return value of mode
11764 MODE. */
11765
11766 static rtx
11767 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11768 {
11769 return s390_function_and_libcall_value (mode, NULL_TREE,
11770 NULL_TREE, true);
11771 }
11772
11773
11774 /* Create and return the va_list datatype.
11775
11776 On S/390, va_list is an array type equivalent to
11777
11778 typedef struct __va_list_tag
11779 {
11780 long __gpr;
11781 long __fpr;
11782 void *__overflow_arg_area;
11783 void *__reg_save_area;
11784 } va_list[1];
11785
11786 where __gpr and __fpr hold the number of general purpose
11787 or floating point arguments used up to now, respectively,
11788 __overflow_arg_area points to the stack location of the
11789 next argument passed on the stack, and __reg_save_area
11790 always points to the start of the register area in the
11791 call frame of the current function. The function prologue
11792 saves all registers used for argument passing into this
11793 area if the function uses variable arguments. */
11794
11795 static tree
11796 s390_build_builtin_va_list (void)
11797 {
11798 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11799
11800 record = lang_hooks.types.make_type (RECORD_TYPE);
11801
11802 type_decl =
11803 build_decl (BUILTINS_LOCATION,
11804 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11805
11806 f_gpr = build_decl (BUILTINS_LOCATION,
11807 FIELD_DECL, get_identifier ("__gpr"),
11808 long_integer_type_node);
11809 f_fpr = build_decl (BUILTINS_LOCATION,
11810 FIELD_DECL, get_identifier ("__fpr"),
11811 long_integer_type_node);
11812 f_ovf = build_decl (BUILTINS_LOCATION,
11813 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11814 ptr_type_node);
11815 f_sav = build_decl (BUILTINS_LOCATION,
11816 FIELD_DECL, get_identifier ("__reg_save_area"),
11817 ptr_type_node);
11818
11819 va_list_gpr_counter_field = f_gpr;
11820 va_list_fpr_counter_field = f_fpr;
11821
11822 DECL_FIELD_CONTEXT (f_gpr) = record;
11823 DECL_FIELD_CONTEXT (f_fpr) = record;
11824 DECL_FIELD_CONTEXT (f_ovf) = record;
11825 DECL_FIELD_CONTEXT (f_sav) = record;
11826
11827 TYPE_STUB_DECL (record) = type_decl;
11828 TYPE_NAME (record) = type_decl;
11829 TYPE_FIELDS (record) = f_gpr;
11830 DECL_CHAIN (f_gpr) = f_fpr;
11831 DECL_CHAIN (f_fpr) = f_ovf;
11832 DECL_CHAIN (f_ovf) = f_sav;
11833
11834 layout_type (record);
11835
11836 /* The correct type is an array type of one element. */
11837 return build_array_type (record, build_index_type (size_zero_node));
11838 }
11839
11840 /* Implement va_start by filling the va_list structure VALIST.
11841 STDARG_P is always true, and ignored.
11842 NEXTARG points to the first anonymous stack argument.
11843
11844 The following global variables are used to initialize
11845 the va_list structure:
11846
11847 crtl->args.info:
11848 holds number of gprs and fprs used for named arguments.
11849 crtl->args.arg_offset_rtx:
11850 holds the offset of the first anonymous stack argument
11851 (relative to the virtual arg pointer). */
11852
11853 static void
11854 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11855 {
11856 HOST_WIDE_INT n_gpr, n_fpr;
11857 int off;
11858 tree f_gpr, f_fpr, f_ovf, f_sav;
11859 tree gpr, fpr, ovf, sav, t;
11860
11861 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11862 f_fpr = DECL_CHAIN (f_gpr);
11863 f_ovf = DECL_CHAIN (f_fpr);
11864 f_sav = DECL_CHAIN (f_ovf);
11865
11866 valist = build_simple_mem_ref (valist);
11867 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11868 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11869 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11870 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11871
11872 /* Count number of gp and fp argument registers used. */
11873
11874 n_gpr = crtl->args.info.gprs;
11875 n_fpr = crtl->args.info.fprs;
11876
11877 if (cfun->va_list_gpr_size)
11878 {
11879 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11880 build_int_cst (NULL_TREE, n_gpr));
11881 TREE_SIDE_EFFECTS (t) = 1;
11882 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11883 }
11884
11885 if (cfun->va_list_fpr_size)
11886 {
11887 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11888 build_int_cst (NULL_TREE, n_fpr));
11889 TREE_SIDE_EFFECTS (t) = 1;
11890 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11891 }
11892
11893 if (flag_split_stack
11894 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
11895 == NULL)
11896 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11897 {
11898 rtx reg;
11899 rtx_insn *seq;
11900
11901 reg = gen_reg_rtx (Pmode);
11902 cfun->machine->split_stack_varargs_pointer = reg;
11903
11904 start_sequence ();
11905 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
11906 seq = get_insns ();
11907 end_sequence ();
11908
11909 push_topmost_sequence ();
11910 emit_insn_after (seq, entry_of_function ());
11911 pop_topmost_sequence ();
11912 }
11913
11914 /* Find the overflow area.
11915 FIXME: This currently is too pessimistic when the vector ABI is
11916 enabled. In that case we *always* set up the overflow area
11917 pointer. */
11918 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11919 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11920 || TARGET_VX_ABI)
11921 {
11922 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11923 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11924 else
11925 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
11926
11927 off = INTVAL (crtl->args.arg_offset_rtx);
11928 off = off < 0 ? 0 : off;
11929 if (TARGET_DEBUG_ARG)
11930 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11931 (int)n_gpr, (int)n_fpr, off);
11932
11933 t = fold_build_pointer_plus_hwi (t, off);
11934
11935 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11936 TREE_SIDE_EFFECTS (t) = 1;
11937 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11938 }
11939
11940 /* Find the register save area. */
11941 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11942 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11943 {
11944 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11945 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11946
11947 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11948 TREE_SIDE_EFFECTS (t) = 1;
11949 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11950 }
11951 }
11952
11953 /* Implement va_arg by updating the va_list structure
11954 VALIST as required to retrieve an argument of type
11955 TYPE, and returning that argument.
11956
11957 Generates code equivalent to:
11958
11959 if (integral value) {
11960 if (size <= 4 && args.gpr < 5 ||
11961 size > 4 && args.gpr < 4 )
11962 ret = args.reg_save_area[args.gpr+8]
11963 else
11964 ret = *args.overflow_arg_area++;
11965 } else if (vector value) {
11966 ret = *args.overflow_arg_area;
11967 args.overflow_arg_area += size / 8;
11968 } else if (float value) {
11969 if (args.fgpr < 2)
11970 ret = args.reg_save_area[args.fpr+64]
11971 else
11972 ret = *args.overflow_arg_area++;
11973 } else if (aggregate value) {
11974 if (args.gpr < 5)
11975 ret = *args.reg_save_area[args.gpr]
11976 else
11977 ret = **args.overflow_arg_area++;
11978 } */
11979
11980 static tree
11981 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11982 gimple_seq *post_p ATTRIBUTE_UNUSED)
11983 {
11984 tree f_gpr, f_fpr, f_ovf, f_sav;
11985 tree gpr, fpr, ovf, sav, reg, t, u;
11986 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11987 tree lab_false, lab_over;
11988 tree addr = create_tmp_var (ptr_type_node, "addr");
11989 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11990 a stack slot. */
11991
11992 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11993 f_fpr = DECL_CHAIN (f_gpr);
11994 f_ovf = DECL_CHAIN (f_fpr);
11995 f_sav = DECL_CHAIN (f_ovf);
11996
11997 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11998 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11999 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12000
12001 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12002 both appear on a lhs. */
12003 valist = unshare_expr (valist);
12004 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12005
12006 size = int_size_in_bytes (type);
12007
12008 s390_check_type_for_vector_abi (type, true, false);
12009
12010 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12011 {
12012 if (TARGET_DEBUG_ARG)
12013 {
12014 fprintf (stderr, "va_arg: aggregate type");
12015 debug_tree (type);
12016 }
12017
12018 /* Aggregates are passed by reference. */
12019 indirect_p = 1;
12020 reg = gpr;
12021 n_reg = 1;
12022
12023 /* kernel stack layout on 31 bit: It is assumed here that no padding
12024 will be added by s390_frame_info because for va_args always an even
12025 number of gprs has to be saved r15-r2 = 14 regs. */
12026 sav_ofs = 2 * UNITS_PER_LONG;
12027 sav_scale = UNITS_PER_LONG;
12028 size = UNITS_PER_LONG;
12029 max_reg = GP_ARG_NUM_REG - n_reg;
12030 left_align_p = false;
12031 }
12032 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12033 {
12034 if (TARGET_DEBUG_ARG)
12035 {
12036 fprintf (stderr, "va_arg: vector type");
12037 debug_tree (type);
12038 }
12039
12040 indirect_p = 0;
12041 reg = NULL_TREE;
12042 n_reg = 0;
12043 sav_ofs = 0;
12044 sav_scale = 8;
12045 max_reg = 0;
12046 left_align_p = true;
12047 }
12048 else if (s390_function_arg_float (TYPE_MODE (type), type))
12049 {
12050 if (TARGET_DEBUG_ARG)
12051 {
12052 fprintf (stderr, "va_arg: float type");
12053 debug_tree (type);
12054 }
12055
12056 /* FP args go in FP registers, if present. */
12057 indirect_p = 0;
12058 reg = fpr;
12059 n_reg = 1;
12060 sav_ofs = 16 * UNITS_PER_LONG;
12061 sav_scale = 8;
12062 max_reg = FP_ARG_NUM_REG - n_reg;
12063 left_align_p = false;
12064 }
12065 else
12066 {
12067 if (TARGET_DEBUG_ARG)
12068 {
12069 fprintf (stderr, "va_arg: other type");
12070 debug_tree (type);
12071 }
12072
12073 /* Otherwise into GP registers. */
12074 indirect_p = 0;
12075 reg = gpr;
12076 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12077
12078 /* kernel stack layout on 31 bit: It is assumed here that no padding
12079 will be added by s390_frame_info because for va_args always an even
12080 number of gprs has to be saved r15-r2 = 14 regs. */
12081 sav_ofs = 2 * UNITS_PER_LONG;
12082
12083 if (size < UNITS_PER_LONG)
12084 sav_ofs += UNITS_PER_LONG - size;
12085
12086 sav_scale = UNITS_PER_LONG;
12087 max_reg = GP_ARG_NUM_REG - n_reg;
12088 left_align_p = false;
12089 }
12090
12091 /* Pull the value out of the saved registers ... */
12092
12093 if (reg != NULL_TREE)
12094 {
12095 /*
12096 if (reg > ((typeof (reg))max_reg))
12097 goto lab_false;
12098
12099 addr = sav + sav_ofs + reg * save_scale;
12100
12101 goto lab_over;
12102
12103 lab_false:
12104 */
12105
12106 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12107 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12108
12109 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12110 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12111 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12112 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12113 gimplify_and_add (t, pre_p);
12114
12115 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12116 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12117 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12118 t = fold_build_pointer_plus (t, u);
12119
12120 gimplify_assign (addr, t, pre_p);
12121
12122 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12123
12124 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12125 }
12126
12127 /* ... Otherwise out of the overflow area. */
12128
12129 t = ovf;
12130 if (size < UNITS_PER_LONG && !left_align_p)
12131 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12132
12133 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12134
12135 gimplify_assign (addr, t, pre_p);
12136
12137 if (size < UNITS_PER_LONG && left_align_p)
12138 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12139 else
12140 t = fold_build_pointer_plus_hwi (t, size);
12141
12142 gimplify_assign (ovf, t, pre_p);
12143
12144 if (reg != NULL_TREE)
12145 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12146
12147
12148 /* Increment register save count. */
12149
12150 if (n_reg > 0)
12151 {
12152 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12153 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12154 gimplify_and_add (u, pre_p);
12155 }
12156
12157 if (indirect_p)
12158 {
12159 t = build_pointer_type_for_mode (build_pointer_type (type),
12160 ptr_mode, true);
12161 addr = fold_convert (t, addr);
12162 addr = build_va_arg_indirect_ref (addr);
12163 }
12164 else
12165 {
12166 t = build_pointer_type_for_mode (type, ptr_mode, true);
12167 addr = fold_convert (t, addr);
12168 }
12169
12170 return build_va_arg_indirect_ref (addr);
12171 }
12172
12173 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12174 expanders.
12175 DEST - Register location where CC will be stored.
12176 TDB - Pointer to a 256 byte area where to store the transaction.
12177 diagnostic block. NULL if TDB is not needed.
12178 RETRY - Retry count value. If non-NULL a retry loop for CC2
12179 is emitted
12180 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12181 of the tbegin instruction pattern. */
12182
12183 void
12184 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12185 {
12186 rtx retry_plus_two = gen_reg_rtx (SImode);
12187 rtx retry_reg = gen_reg_rtx (SImode);
12188 rtx_code_label *retry_label = NULL;
12189
12190 if (retry != NULL_RTX)
12191 {
12192 emit_move_insn (retry_reg, retry);
12193 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12194 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12195 retry_label = gen_label_rtx ();
12196 emit_label (retry_label);
12197 }
12198
12199 if (clobber_fprs_p)
12200 {
12201 if (TARGET_VX)
12202 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12203 tdb));
12204 else
12205 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12206 tdb));
12207 }
12208 else
12209 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12210 tdb));
12211
12212 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12213 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12214 CC_REGNUM)),
12215 UNSPEC_CC_TO_INT));
12216 if (retry != NULL_RTX)
12217 {
12218 const int CC0 = 1 << 3;
12219 const int CC1 = 1 << 2;
12220 const int CC3 = 1 << 0;
12221 rtx jump;
12222 rtx count = gen_reg_rtx (SImode);
12223 rtx_code_label *leave_label = gen_label_rtx ();
12224
12225 /* Exit for success and permanent failures. */
12226 jump = s390_emit_jump (leave_label,
12227 gen_rtx_EQ (VOIDmode,
12228 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12229 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12230 LABEL_NUSES (leave_label) = 1;
12231
12232 /* CC2 - transient failure. Perform retry with ppa. */
12233 emit_move_insn (count, retry_plus_two);
12234 emit_insn (gen_subsi3 (count, count, retry_reg));
12235 emit_insn (gen_tx_assist (count));
12236 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12237 retry_reg,
12238 retry_reg));
12239 JUMP_LABEL (jump) = retry_label;
12240 LABEL_NUSES (retry_label) = 1;
12241 emit_label (leave_label);
12242 }
12243 }
12244
12245
12246 /* Return the decl for the target specific builtin with the function
12247 code FCODE. */
12248
12249 static tree
12250 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12251 {
12252 if (fcode >= S390_BUILTIN_MAX)
12253 return error_mark_node;
12254
12255 return s390_builtin_decls[fcode];
12256 }
12257
12258 /* We call mcount before the function prologue. So a profiled leaf
12259 function should stay a leaf function. */
12260
12261 static bool
12262 s390_keep_leaf_when_profiled ()
12263 {
12264 return true;
12265 }
12266
12267 /* Output assembly code for the trampoline template to
12268 stdio stream FILE.
12269
12270 On S/390, we use gpr 1 internally in the trampoline code;
12271 gpr 0 is used to hold the static chain. */
12272
12273 static void
12274 s390_asm_trampoline_template (FILE *file)
12275 {
12276 rtx op[2];
12277 op[0] = gen_rtx_REG (Pmode, 0);
12278 op[1] = gen_rtx_REG (Pmode, 1);
12279
12280 if (TARGET_64BIT)
12281 {
12282 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12283 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12284 output_asm_insn ("br\t%1", op); /* 2 byte */
12285 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12286 }
12287 else
12288 {
12289 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12290 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12291 output_asm_insn ("br\t%1", op); /* 2 byte */
12292 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12293 }
12294 }
12295
12296 /* Emit RTL insns to initialize the variable parts of a trampoline.
12297 FNADDR is an RTX for the address of the function's pure code.
12298 CXT is an RTX for the static chain value for the function. */
12299
12300 static void
12301 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12302 {
12303 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12304 rtx mem;
12305
12306 emit_block_move (m_tramp, assemble_trampoline_template (),
12307 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12308
12309 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12310 emit_move_insn (mem, cxt);
12311 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12312 emit_move_insn (mem, fnaddr);
12313 }
12314
12315 /* Output assembler code to FILE to increment profiler label # LABELNO
12316 for profiling a function entry. */
12317
12318 void
12319 s390_function_profiler (FILE *file, int labelno)
12320 {
12321 rtx op[7];
12322
12323 char label[128];
12324 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12325
12326 fprintf (file, "# function profiler \n");
12327
12328 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12329 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12330 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12331
12332 op[2] = gen_rtx_REG (Pmode, 1);
12333 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12334 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12335
12336 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12337 if (flag_pic)
12338 {
12339 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12340 op[4] = gen_rtx_CONST (Pmode, op[4]);
12341 }
12342
12343 if (TARGET_64BIT)
12344 {
12345 output_asm_insn ("stg\t%0,%1", op);
12346 output_asm_insn ("larl\t%2,%3", op);
12347 output_asm_insn ("brasl\t%0,%4", op);
12348 output_asm_insn ("lg\t%0,%1", op);
12349 }
12350 else if (TARGET_CPU_ZARCH)
12351 {
12352 output_asm_insn ("st\t%0,%1", op);
12353 output_asm_insn ("larl\t%2,%3", op);
12354 output_asm_insn ("brasl\t%0,%4", op);
12355 output_asm_insn ("l\t%0,%1", op);
12356 }
12357 else if (!flag_pic)
12358 {
12359 op[6] = gen_label_rtx ();
12360
12361 output_asm_insn ("st\t%0,%1", op);
12362 output_asm_insn ("bras\t%2,%l6", op);
12363 output_asm_insn (".long\t%4", op);
12364 output_asm_insn (".long\t%3", op);
12365 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12366 output_asm_insn ("l\t%0,0(%2)", op);
12367 output_asm_insn ("l\t%2,4(%2)", op);
12368 output_asm_insn ("basr\t%0,%0", op);
12369 output_asm_insn ("l\t%0,%1", op);
12370 }
12371 else
12372 {
12373 op[5] = gen_label_rtx ();
12374 op[6] = gen_label_rtx ();
12375
12376 output_asm_insn ("st\t%0,%1", op);
12377 output_asm_insn ("bras\t%2,%l6", op);
12378 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12379 output_asm_insn (".long\t%4-%l5", op);
12380 output_asm_insn (".long\t%3-%l5", op);
12381 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12382 output_asm_insn ("lr\t%0,%2", op);
12383 output_asm_insn ("a\t%0,0(%2)", op);
12384 output_asm_insn ("a\t%2,4(%2)", op);
12385 output_asm_insn ("basr\t%0,%0", op);
12386 output_asm_insn ("l\t%0,%1", op);
12387 }
12388 }
12389
12390 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12391 into its SYMBOL_REF_FLAGS. */
12392
12393 static void
12394 s390_encode_section_info (tree decl, rtx rtl, int first)
12395 {
12396 default_encode_section_info (decl, rtl, first);
12397
12398 if (TREE_CODE (decl) == VAR_DECL)
12399 {
12400 /* Store the alignment to be able to check if we can use
12401 a larl/load-relative instruction. We only handle the cases
12402 that can go wrong (i.e. no FUNC_DECLs). If a symref does
12403 not have any flag we assume it to be correctly aligned. */
12404
12405 if (DECL_ALIGN (decl) % 64)
12406 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12407
12408 if (DECL_ALIGN (decl) % 32)
12409 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12410
12411 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12412 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12413 }
12414
12415 /* Literal pool references don't have a decl so they are handled
12416 differently here. We rely on the information in the MEM_ALIGN
12417 entry to decide upon the alignment. */
12418 if (MEM_P (rtl)
12419 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12420 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
12421 && MEM_ALIGN (rtl) != 0
12422 && GET_MODE_BITSIZE (GET_MODE (rtl)) != 0)
12423 {
12424 if (MEM_ALIGN (rtl) % 64)
12425 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12426
12427 if (MEM_ALIGN (rtl) % 32)
12428 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12429
12430 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12431 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12432 }
12433 }
12434
12435 /* Output thunk to FILE that implements a C++ virtual function call (with
12436 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12437 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12438 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12439 relative to the resulting this pointer. */
12440
12441 static void
12442 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12443 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12444 tree function)
12445 {
12446 rtx op[10];
12447 int nonlocal = 0;
12448
12449 /* Make sure unwind info is emitted for the thunk if needed. */
12450 final_start_function (emit_barrier (), file, 1);
12451
12452 /* Operand 0 is the target function. */
12453 op[0] = XEXP (DECL_RTL (function), 0);
12454 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12455 {
12456 nonlocal = 1;
12457 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12458 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12459 op[0] = gen_rtx_CONST (Pmode, op[0]);
12460 }
12461
12462 /* Operand 1 is the 'this' pointer. */
12463 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12464 op[1] = gen_rtx_REG (Pmode, 3);
12465 else
12466 op[1] = gen_rtx_REG (Pmode, 2);
12467
12468 /* Operand 2 is the delta. */
12469 op[2] = GEN_INT (delta);
12470
12471 /* Operand 3 is the vcall_offset. */
12472 op[3] = GEN_INT (vcall_offset);
12473
12474 /* Operand 4 is the temporary register. */
12475 op[4] = gen_rtx_REG (Pmode, 1);
12476
12477 /* Operands 5 to 8 can be used as labels. */
12478 op[5] = NULL_RTX;
12479 op[6] = NULL_RTX;
12480 op[7] = NULL_RTX;
12481 op[8] = NULL_RTX;
12482
12483 /* Operand 9 can be used for temporary register. */
12484 op[9] = NULL_RTX;
12485
12486 /* Generate code. */
12487 if (TARGET_64BIT)
12488 {
12489 /* Setup literal pool pointer if required. */
12490 if ((!DISP_IN_RANGE (delta)
12491 && !CONST_OK_FOR_K (delta)
12492 && !CONST_OK_FOR_Os (delta))
12493 || (!DISP_IN_RANGE (vcall_offset)
12494 && !CONST_OK_FOR_K (vcall_offset)
12495 && !CONST_OK_FOR_Os (vcall_offset)))
12496 {
12497 op[5] = gen_label_rtx ();
12498 output_asm_insn ("larl\t%4,%5", op);
12499 }
12500
12501 /* Add DELTA to this pointer. */
12502 if (delta)
12503 {
12504 if (CONST_OK_FOR_J (delta))
12505 output_asm_insn ("la\t%1,%2(%1)", op);
12506 else if (DISP_IN_RANGE (delta))
12507 output_asm_insn ("lay\t%1,%2(%1)", op);
12508 else if (CONST_OK_FOR_K (delta))
12509 output_asm_insn ("aghi\t%1,%2", op);
12510 else if (CONST_OK_FOR_Os (delta))
12511 output_asm_insn ("agfi\t%1,%2", op);
12512 else
12513 {
12514 op[6] = gen_label_rtx ();
12515 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12516 }
12517 }
12518
12519 /* Perform vcall adjustment. */
12520 if (vcall_offset)
12521 {
12522 if (DISP_IN_RANGE (vcall_offset))
12523 {
12524 output_asm_insn ("lg\t%4,0(%1)", op);
12525 output_asm_insn ("ag\t%1,%3(%4)", op);
12526 }
12527 else if (CONST_OK_FOR_K (vcall_offset))
12528 {
12529 output_asm_insn ("lghi\t%4,%3", op);
12530 output_asm_insn ("ag\t%4,0(%1)", op);
12531 output_asm_insn ("ag\t%1,0(%4)", op);
12532 }
12533 else if (CONST_OK_FOR_Os (vcall_offset))
12534 {
12535 output_asm_insn ("lgfi\t%4,%3", op);
12536 output_asm_insn ("ag\t%4,0(%1)", op);
12537 output_asm_insn ("ag\t%1,0(%4)", op);
12538 }
12539 else
12540 {
12541 op[7] = gen_label_rtx ();
12542 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12543 output_asm_insn ("ag\t%4,0(%1)", op);
12544 output_asm_insn ("ag\t%1,0(%4)", op);
12545 }
12546 }
12547
12548 /* Jump to target. */
12549 output_asm_insn ("jg\t%0", op);
12550
12551 /* Output literal pool if required. */
12552 if (op[5])
12553 {
12554 output_asm_insn (".align\t4", op);
12555 targetm.asm_out.internal_label (file, "L",
12556 CODE_LABEL_NUMBER (op[5]));
12557 }
12558 if (op[6])
12559 {
12560 targetm.asm_out.internal_label (file, "L",
12561 CODE_LABEL_NUMBER (op[6]));
12562 output_asm_insn (".long\t%2", op);
12563 }
12564 if (op[7])
12565 {
12566 targetm.asm_out.internal_label (file, "L",
12567 CODE_LABEL_NUMBER (op[7]));
12568 output_asm_insn (".long\t%3", op);
12569 }
12570 }
12571 else
12572 {
12573 /* Setup base pointer if required. */
12574 if (!vcall_offset
12575 || (!DISP_IN_RANGE (delta)
12576 && !CONST_OK_FOR_K (delta)
12577 && !CONST_OK_FOR_Os (delta))
12578 || (!DISP_IN_RANGE (delta)
12579 && !CONST_OK_FOR_K (vcall_offset)
12580 && !CONST_OK_FOR_Os (vcall_offset)))
12581 {
12582 op[5] = gen_label_rtx ();
12583 output_asm_insn ("basr\t%4,0", op);
12584 targetm.asm_out.internal_label (file, "L",
12585 CODE_LABEL_NUMBER (op[5]));
12586 }
12587
12588 /* Add DELTA to this pointer. */
12589 if (delta)
12590 {
12591 if (CONST_OK_FOR_J (delta))
12592 output_asm_insn ("la\t%1,%2(%1)", op);
12593 else if (DISP_IN_RANGE (delta))
12594 output_asm_insn ("lay\t%1,%2(%1)", op);
12595 else if (CONST_OK_FOR_K (delta))
12596 output_asm_insn ("ahi\t%1,%2", op);
12597 else if (CONST_OK_FOR_Os (delta))
12598 output_asm_insn ("afi\t%1,%2", op);
12599 else
12600 {
12601 op[6] = gen_label_rtx ();
12602 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12603 }
12604 }
12605
12606 /* Perform vcall adjustment. */
12607 if (vcall_offset)
12608 {
12609 if (CONST_OK_FOR_J (vcall_offset))
12610 {
12611 output_asm_insn ("l\t%4,0(%1)", op);
12612 output_asm_insn ("a\t%1,%3(%4)", op);
12613 }
12614 else if (DISP_IN_RANGE (vcall_offset))
12615 {
12616 output_asm_insn ("l\t%4,0(%1)", op);
12617 output_asm_insn ("ay\t%1,%3(%4)", op);
12618 }
12619 else if (CONST_OK_FOR_K (vcall_offset))
12620 {
12621 output_asm_insn ("lhi\t%4,%3", op);
12622 output_asm_insn ("a\t%4,0(%1)", op);
12623 output_asm_insn ("a\t%1,0(%4)", op);
12624 }
12625 else if (CONST_OK_FOR_Os (vcall_offset))
12626 {
12627 output_asm_insn ("iilf\t%4,%3", op);
12628 output_asm_insn ("a\t%4,0(%1)", op);
12629 output_asm_insn ("a\t%1,0(%4)", op);
12630 }
12631 else
12632 {
12633 op[7] = gen_label_rtx ();
12634 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12635 output_asm_insn ("a\t%4,0(%1)", op);
12636 output_asm_insn ("a\t%1,0(%4)", op);
12637 }
12638
12639 /* We had to clobber the base pointer register.
12640 Re-setup the base pointer (with a different base). */
12641 op[5] = gen_label_rtx ();
12642 output_asm_insn ("basr\t%4,0", op);
12643 targetm.asm_out.internal_label (file, "L",
12644 CODE_LABEL_NUMBER (op[5]));
12645 }
12646
12647 /* Jump to target. */
12648 op[8] = gen_label_rtx ();
12649
12650 if (!flag_pic)
12651 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12652 else if (!nonlocal)
12653 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12654 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12655 else if (flag_pic == 1)
12656 {
12657 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12658 output_asm_insn ("l\t%4,%0(%4)", op);
12659 }
12660 else if (flag_pic == 2)
12661 {
12662 op[9] = gen_rtx_REG (Pmode, 0);
12663 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12664 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12665 output_asm_insn ("ar\t%4,%9", op);
12666 output_asm_insn ("l\t%4,0(%4)", op);
12667 }
12668
12669 output_asm_insn ("br\t%4", op);
12670
12671 /* Output literal pool. */
12672 output_asm_insn (".align\t4", op);
12673
12674 if (nonlocal && flag_pic == 2)
12675 output_asm_insn (".long\t%0", op);
12676 if (nonlocal)
12677 {
12678 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12679 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12680 }
12681
12682 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12683 if (!flag_pic)
12684 output_asm_insn (".long\t%0", op);
12685 else
12686 output_asm_insn (".long\t%0-%5", op);
12687
12688 if (op[6])
12689 {
12690 targetm.asm_out.internal_label (file, "L",
12691 CODE_LABEL_NUMBER (op[6]));
12692 output_asm_insn (".long\t%2", op);
12693 }
12694 if (op[7])
12695 {
12696 targetm.asm_out.internal_label (file, "L",
12697 CODE_LABEL_NUMBER (op[7]));
12698 output_asm_insn (".long\t%3", op);
12699 }
12700 }
12701 final_end_function ();
12702 }
12703
12704 static bool
12705 s390_valid_pointer_mode (machine_mode mode)
12706 {
12707 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12708 }
12709
12710 /* Checks whether the given CALL_EXPR would use a caller
12711 saved register. This is used to decide whether sibling call
12712 optimization could be performed on the respective function
12713 call. */
12714
12715 static bool
12716 s390_call_saved_register_used (tree call_expr)
12717 {
12718 CUMULATIVE_ARGS cum_v;
12719 cumulative_args_t cum;
12720 tree parameter;
12721 machine_mode mode;
12722 tree type;
12723 rtx parm_rtx;
12724 int reg, i;
12725
12726 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12727 cum = pack_cumulative_args (&cum_v);
12728
12729 for (i = 0; i < call_expr_nargs (call_expr); i++)
12730 {
12731 parameter = CALL_EXPR_ARG (call_expr, i);
12732 gcc_assert (parameter);
12733
12734 /* For an undeclared variable passed as parameter we will get
12735 an ERROR_MARK node here. */
12736 if (TREE_CODE (parameter) == ERROR_MARK)
12737 return true;
12738
12739 type = TREE_TYPE (parameter);
12740 gcc_assert (type);
12741
12742 mode = TYPE_MODE (type);
12743 gcc_assert (mode);
12744
12745 /* We assume that in the target function all parameters are
12746 named. This only has an impact on vector argument register
12747 usage none of which is call-saved. */
12748 if (pass_by_reference (&cum_v, mode, type, true))
12749 {
12750 mode = Pmode;
12751 type = build_pointer_type (type);
12752 }
12753
12754 parm_rtx = s390_function_arg (cum, mode, type, true);
12755
12756 s390_function_arg_advance (cum, mode, type, true);
12757
12758 if (!parm_rtx)
12759 continue;
12760
12761 if (REG_P (parm_rtx))
12762 {
12763 for (reg = 0;
12764 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12765 reg++)
12766 if (!call_used_regs[reg + REGNO (parm_rtx)])
12767 return true;
12768 }
12769
12770 if (GET_CODE (parm_rtx) == PARALLEL)
12771 {
12772 int i;
12773
12774 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12775 {
12776 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12777
12778 gcc_assert (REG_P (r));
12779
12780 for (reg = 0;
12781 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12782 reg++)
12783 if (!call_used_regs[reg + REGNO (r)])
12784 return true;
12785 }
12786 }
12787
12788 }
12789 return false;
12790 }
12791
12792 /* Return true if the given call expression can be
12793 turned into a sibling call.
12794 DECL holds the declaration of the function to be called whereas
12795 EXP is the call expression itself. */
12796
12797 static bool
12798 s390_function_ok_for_sibcall (tree decl, tree exp)
12799 {
12800 /* The TPF epilogue uses register 1. */
12801 if (TARGET_TPF_PROFILING)
12802 return false;
12803
12804 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12805 which would have to be restored before the sibcall. */
12806 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12807 return false;
12808
12809 /* Register 6 on s390 is available as an argument register but unfortunately
12810 "caller saved". This makes functions needing this register for arguments
12811 not suitable for sibcalls. */
12812 return !s390_call_saved_register_used (exp);
12813 }
12814
12815 /* Return the fixed registers used for condition codes. */
12816
12817 static bool
12818 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12819 {
12820 *p1 = CC_REGNUM;
12821 *p2 = INVALID_REGNUM;
12822
12823 return true;
12824 }
12825
12826 /* This function is used by the call expanders of the machine description.
12827 It emits the call insn itself together with the necessary operations
12828 to adjust the target address and returns the emitted insn.
12829 ADDR_LOCATION is the target address rtx
12830 TLS_CALL the location of the thread-local symbol
12831 RESULT_REG the register where the result of the call should be stored
12832 RETADDR_REG the register where the return address should be stored
12833 If this parameter is NULL_RTX the call is considered
12834 to be a sibling call. */
12835
12836 rtx_insn *
12837 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12838 rtx retaddr_reg)
12839 {
12840 bool plt_call = false;
12841 rtx_insn *insn;
12842 rtx call;
12843 rtx clobber;
12844 rtvec vec;
12845
12846 /* Direct function calls need special treatment. */
12847 if (GET_CODE (addr_location) == SYMBOL_REF)
12848 {
12849 /* When calling a global routine in PIC mode, we must
12850 replace the symbol itself with the PLT stub. */
12851 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12852 {
12853 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
12854 {
12855 addr_location = gen_rtx_UNSPEC (Pmode,
12856 gen_rtvec (1, addr_location),
12857 UNSPEC_PLT);
12858 addr_location = gen_rtx_CONST (Pmode, addr_location);
12859 plt_call = true;
12860 }
12861 else
12862 /* For -fpic code the PLT entries might use r12 which is
12863 call-saved. Therefore we cannot do a sibcall when
12864 calling directly using a symbol ref. When reaching
12865 this point we decided (in s390_function_ok_for_sibcall)
12866 to do a sibcall for a function pointer but one of the
12867 optimizers was able to get rid of the function pointer
12868 by propagating the symbol ref into the call. This
12869 optimization is illegal for S/390 so we turn the direct
12870 call into a indirect call again. */
12871 addr_location = force_reg (Pmode, addr_location);
12872 }
12873
12874 /* Unless we can use the bras(l) insn, force the
12875 routine address into a register. */
12876 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12877 {
12878 if (flag_pic)
12879 addr_location = legitimize_pic_address (addr_location, 0);
12880 else
12881 addr_location = force_reg (Pmode, addr_location);
12882 }
12883 }
12884
12885 /* If it is already an indirect call or the code above moved the
12886 SYMBOL_REF to somewhere else make sure the address can be found in
12887 register 1. */
12888 if (retaddr_reg == NULL_RTX
12889 && GET_CODE (addr_location) != SYMBOL_REF
12890 && !plt_call)
12891 {
12892 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12893 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12894 }
12895
12896 addr_location = gen_rtx_MEM (QImode, addr_location);
12897 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12898
12899 if (result_reg != NULL_RTX)
12900 call = gen_rtx_SET (result_reg, call);
12901
12902 if (retaddr_reg != NULL_RTX)
12903 {
12904 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12905
12906 if (tls_call != NULL_RTX)
12907 vec = gen_rtvec (3, call, clobber,
12908 gen_rtx_USE (VOIDmode, tls_call));
12909 else
12910 vec = gen_rtvec (2, call, clobber);
12911
12912 call = gen_rtx_PARALLEL (VOIDmode, vec);
12913 }
12914
12915 insn = emit_call_insn (call);
12916
12917 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12918 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12919 {
12920 /* s390_function_ok_for_sibcall should
12921 have denied sibcalls in this case. */
12922 gcc_assert (retaddr_reg != NULL_RTX);
12923 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12924 }
12925 return insn;
12926 }
12927
12928 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12929
12930 static void
12931 s390_conditional_register_usage (void)
12932 {
12933 int i;
12934
12935 if (flag_pic)
12936 {
12937 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12938 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12939 }
12940 if (TARGET_CPU_ZARCH)
12941 {
12942 fixed_regs[BASE_REGNUM] = 0;
12943 call_used_regs[BASE_REGNUM] = 0;
12944 fixed_regs[RETURN_REGNUM] = 0;
12945 call_used_regs[RETURN_REGNUM] = 0;
12946 }
12947 if (TARGET_64BIT)
12948 {
12949 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12950 call_used_regs[i] = call_really_used_regs[i] = 0;
12951 }
12952 else
12953 {
12954 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12955 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12956 }
12957
12958 if (TARGET_SOFT_FLOAT)
12959 {
12960 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12961 call_used_regs[i] = fixed_regs[i] = 1;
12962 }
12963
12964 /* Disable v16 - v31 for non-vector target. */
12965 if (!TARGET_VX)
12966 {
12967 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12968 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12969 }
12970 }
12971
12972 /* Corresponding function to eh_return expander. */
12973
12974 static GTY(()) rtx s390_tpf_eh_return_symbol;
12975 void
12976 s390_emit_tpf_eh_return (rtx target)
12977 {
12978 rtx_insn *insn;
12979 rtx reg, orig_ra;
12980
12981 if (!s390_tpf_eh_return_symbol)
12982 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12983
12984 reg = gen_rtx_REG (Pmode, 2);
12985 orig_ra = gen_rtx_REG (Pmode, 3);
12986
12987 emit_move_insn (reg, target);
12988 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12989 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12990 gen_rtx_REG (Pmode, RETURN_REGNUM));
12991 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12992 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12993
12994 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
12995 }
12996
12997 /* Rework the prologue/epilogue to avoid saving/restoring
12998 registers unnecessarily. */
12999
13000 static void
13001 s390_optimize_prologue (void)
13002 {
13003 rtx_insn *insn, *new_insn, *next_insn;
13004
13005 /* Do a final recompute of the frame-related data. */
13006 s390_optimize_register_info ();
13007
13008 /* If all special registers are in fact used, there's nothing we
13009 can do, so no point in walking the insn list. */
13010
13011 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13012 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13013 && (TARGET_CPU_ZARCH
13014 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13015 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13016 return;
13017
13018 /* Search for prologue/epilogue insns and replace them. */
13019
13020 for (insn = get_insns (); insn; insn = next_insn)
13021 {
13022 int first, last, off;
13023 rtx set, base, offset;
13024 rtx pat;
13025
13026 next_insn = NEXT_INSN (insn);
13027
13028 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13029 continue;
13030
13031 pat = PATTERN (insn);
13032
13033 /* Remove ldgr/lgdr instructions used for saving and restore
13034 GPRs if possible. */
13035 if (TARGET_Z10
13036 && GET_CODE (pat) == SET
13037 && GET_MODE (SET_SRC (pat)) == DImode
13038 && REG_P (SET_SRC (pat))
13039 && REG_P (SET_DEST (pat)))
13040 {
13041 int src_regno = REGNO (SET_SRC (pat));
13042 int dest_regno = REGNO (SET_DEST (pat));
13043 int gpr_regno;
13044 int fpr_regno;
13045
13046 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
13047 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
13048 continue;
13049
13050 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13051 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13052
13053 /* GPR must be call-saved, FPR must be call-clobbered. */
13054 if (!call_really_used_regs[fpr_regno]
13055 || call_really_used_regs[gpr_regno])
13056 continue;
13057
13058 /* It must not happen that what we once saved in an FPR now
13059 needs a stack slot. */
13060 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13061
13062 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13063 {
13064 remove_insn (insn);
13065 continue;
13066 }
13067 }
13068
13069 if (GET_CODE (pat) == PARALLEL
13070 && store_multiple_operation (pat, VOIDmode))
13071 {
13072 set = XVECEXP (pat, 0, 0);
13073 first = REGNO (SET_SRC (set));
13074 last = first + XVECLEN (pat, 0) - 1;
13075 offset = const0_rtx;
13076 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13077 off = INTVAL (offset);
13078
13079 if (GET_CODE (base) != REG || off < 0)
13080 continue;
13081 if (cfun_frame_layout.first_save_gpr != -1
13082 && (cfun_frame_layout.first_save_gpr < first
13083 || cfun_frame_layout.last_save_gpr > last))
13084 continue;
13085 if (REGNO (base) != STACK_POINTER_REGNUM
13086 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13087 continue;
13088 if (first > BASE_REGNUM || last < BASE_REGNUM)
13089 continue;
13090
13091 if (cfun_frame_layout.first_save_gpr != -1)
13092 {
13093 rtx s_pat = save_gprs (base,
13094 off + (cfun_frame_layout.first_save_gpr
13095 - first) * UNITS_PER_LONG,
13096 cfun_frame_layout.first_save_gpr,
13097 cfun_frame_layout.last_save_gpr);
13098 new_insn = emit_insn_before (s_pat, insn);
13099 INSN_ADDRESSES_NEW (new_insn, -1);
13100 }
13101
13102 remove_insn (insn);
13103 continue;
13104 }
13105
13106 if (cfun_frame_layout.first_save_gpr == -1
13107 && GET_CODE (pat) == SET
13108 && GENERAL_REG_P (SET_SRC (pat))
13109 && GET_CODE (SET_DEST (pat)) == MEM)
13110 {
13111 set = pat;
13112 first = REGNO (SET_SRC (set));
13113 offset = const0_rtx;
13114 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13115 off = INTVAL (offset);
13116
13117 if (GET_CODE (base) != REG || off < 0)
13118 continue;
13119 if (REGNO (base) != STACK_POINTER_REGNUM
13120 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13121 continue;
13122
13123 remove_insn (insn);
13124 continue;
13125 }
13126
13127 if (GET_CODE (pat) == PARALLEL
13128 && load_multiple_operation (pat, VOIDmode))
13129 {
13130 set = XVECEXP (pat, 0, 0);
13131 first = REGNO (SET_DEST (set));
13132 last = first + XVECLEN (pat, 0) - 1;
13133 offset = const0_rtx;
13134 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13135 off = INTVAL (offset);
13136
13137 if (GET_CODE (base) != REG || off < 0)
13138 continue;
13139
13140 if (cfun_frame_layout.first_restore_gpr != -1
13141 && (cfun_frame_layout.first_restore_gpr < first
13142 || cfun_frame_layout.last_restore_gpr > last))
13143 continue;
13144 if (REGNO (base) != STACK_POINTER_REGNUM
13145 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13146 continue;
13147 if (first > BASE_REGNUM || last < BASE_REGNUM)
13148 continue;
13149
13150 if (cfun_frame_layout.first_restore_gpr != -1)
13151 {
13152 rtx rpat = restore_gprs (base,
13153 off + (cfun_frame_layout.first_restore_gpr
13154 - first) * UNITS_PER_LONG,
13155 cfun_frame_layout.first_restore_gpr,
13156 cfun_frame_layout.last_restore_gpr);
13157
13158 /* Remove REG_CFA_RESTOREs for registers that we no
13159 longer need to save. */
13160 REG_NOTES (rpat) = REG_NOTES (insn);
13161 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
13162 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13163 && ((int) REGNO (XEXP (*ptr, 0))
13164 < cfun_frame_layout.first_restore_gpr))
13165 *ptr = XEXP (*ptr, 1);
13166 else
13167 ptr = &XEXP (*ptr, 1);
13168 new_insn = emit_insn_before (rpat, insn);
13169 RTX_FRAME_RELATED_P (new_insn) = 1;
13170 INSN_ADDRESSES_NEW (new_insn, -1);
13171 }
13172
13173 remove_insn (insn);
13174 continue;
13175 }
13176
13177 if (cfun_frame_layout.first_restore_gpr == -1
13178 && GET_CODE (pat) == SET
13179 && GENERAL_REG_P (SET_DEST (pat))
13180 && GET_CODE (SET_SRC (pat)) == MEM)
13181 {
13182 set = pat;
13183 first = REGNO (SET_DEST (set));
13184 offset = const0_rtx;
13185 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13186 off = INTVAL (offset);
13187
13188 if (GET_CODE (base) != REG || off < 0)
13189 continue;
13190
13191 if (REGNO (base) != STACK_POINTER_REGNUM
13192 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13193 continue;
13194
13195 remove_insn (insn);
13196 continue;
13197 }
13198 }
13199 }
13200
13201 /* On z10 and later the dynamic branch prediction must see the
13202 backward jump within a certain windows. If not it falls back to
13203 the static prediction. This function rearranges the loop backward
13204 branch in a way which makes the static prediction always correct.
13205 The function returns true if it added an instruction. */
13206 static bool
13207 s390_fix_long_loop_prediction (rtx_insn *insn)
13208 {
13209 rtx set = single_set (insn);
13210 rtx code_label, label_ref, new_label;
13211 rtx_insn *uncond_jump;
13212 rtx_insn *cur_insn;
13213 rtx tmp;
13214 int distance;
13215
13216 /* This will exclude branch on count and branch on index patterns
13217 since these are correctly statically predicted. */
13218 if (!set
13219 || SET_DEST (set) != pc_rtx
13220 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13221 return false;
13222
13223 /* Skip conditional returns. */
13224 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13225 && XEXP (SET_SRC (set), 2) == pc_rtx)
13226 return false;
13227
13228 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13229 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13230
13231 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13232
13233 code_label = XEXP (label_ref, 0);
13234
13235 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13236 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13237 || (INSN_ADDRESSES (INSN_UID (insn))
13238 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13239 return false;
13240
13241 for (distance = 0, cur_insn = PREV_INSN (insn);
13242 distance < PREDICT_DISTANCE - 6;
13243 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13244 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13245 return false;
13246
13247 new_label = gen_label_rtx ();
13248 uncond_jump = emit_jump_insn_after (
13249 gen_rtx_SET (pc_rtx,
13250 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13251 insn);
13252 emit_label_after (new_label, uncond_jump);
13253
13254 tmp = XEXP (SET_SRC (set), 1);
13255 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13256 XEXP (SET_SRC (set), 2) = tmp;
13257 INSN_CODE (insn) = -1;
13258
13259 XEXP (label_ref, 0) = new_label;
13260 JUMP_LABEL (insn) = new_label;
13261 JUMP_LABEL (uncond_jump) = code_label;
13262
13263 return true;
13264 }
13265
13266 /* Returns 1 if INSN reads the value of REG for purposes not related
13267 to addressing of memory, and 0 otherwise. */
13268 static int
13269 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13270 {
13271 return reg_referenced_p (reg, PATTERN (insn))
13272 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13273 }
13274
13275 /* Starting from INSN find_cond_jump looks downwards in the insn
13276 stream for a single jump insn which is the last user of the
13277 condition code set in INSN. */
13278 static rtx_insn *
13279 find_cond_jump (rtx_insn *insn)
13280 {
13281 for (; insn; insn = NEXT_INSN (insn))
13282 {
13283 rtx ite, cc;
13284
13285 if (LABEL_P (insn))
13286 break;
13287
13288 if (!JUMP_P (insn))
13289 {
13290 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13291 break;
13292 continue;
13293 }
13294
13295 /* This will be triggered by a return. */
13296 if (GET_CODE (PATTERN (insn)) != SET)
13297 break;
13298
13299 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13300 ite = SET_SRC (PATTERN (insn));
13301
13302 if (GET_CODE (ite) != IF_THEN_ELSE)
13303 break;
13304
13305 cc = XEXP (XEXP (ite, 0), 0);
13306 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13307 break;
13308
13309 if (find_reg_note (insn, REG_DEAD, cc))
13310 return insn;
13311 break;
13312 }
13313
13314 return NULL;
13315 }
13316
13317 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13318 the semantics does not change. If NULL_RTX is passed as COND the
13319 function tries to find the conditional jump starting with INSN. */
13320 static void
13321 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13322 {
13323 rtx tmp = *op0;
13324
13325 if (cond == NULL_RTX)
13326 {
13327 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13328 rtx set = jump ? single_set (jump) : NULL_RTX;
13329
13330 if (set == NULL_RTX)
13331 return;
13332
13333 cond = XEXP (SET_SRC (set), 0);
13334 }
13335
13336 *op0 = *op1;
13337 *op1 = tmp;
13338 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13339 }
13340
13341 /* On z10, instructions of the compare-and-branch family have the
13342 property to access the register occurring as second operand with
13343 its bits complemented. If such a compare is grouped with a second
13344 instruction that accesses the same register non-complemented, and
13345 if that register's value is delivered via a bypass, then the
13346 pipeline recycles, thereby causing significant performance decline.
13347 This function locates such situations and exchanges the two
13348 operands of the compare. The function return true whenever it
13349 added an insn. */
13350 static bool
13351 s390_z10_optimize_cmp (rtx_insn *insn)
13352 {
13353 rtx_insn *prev_insn, *next_insn;
13354 bool insn_added_p = false;
13355 rtx cond, *op0, *op1;
13356
13357 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13358 {
13359 /* Handle compare and branch and branch on count
13360 instructions. */
13361 rtx pattern = single_set (insn);
13362
13363 if (!pattern
13364 || SET_DEST (pattern) != pc_rtx
13365 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13366 return false;
13367
13368 cond = XEXP (SET_SRC (pattern), 0);
13369 op0 = &XEXP (cond, 0);
13370 op1 = &XEXP (cond, 1);
13371 }
13372 else if (GET_CODE (PATTERN (insn)) == SET)
13373 {
13374 rtx src, dest;
13375
13376 /* Handle normal compare instructions. */
13377 src = SET_SRC (PATTERN (insn));
13378 dest = SET_DEST (PATTERN (insn));
13379
13380 if (!REG_P (dest)
13381 || !CC_REGNO_P (REGNO (dest))
13382 || GET_CODE (src) != COMPARE)
13383 return false;
13384
13385 /* s390_swap_cmp will try to find the conditional
13386 jump when passing NULL_RTX as condition. */
13387 cond = NULL_RTX;
13388 op0 = &XEXP (src, 0);
13389 op1 = &XEXP (src, 1);
13390 }
13391 else
13392 return false;
13393
13394 if (!REG_P (*op0) || !REG_P (*op1))
13395 return false;
13396
13397 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13398 return false;
13399
13400 /* Swap the COMPARE arguments and its mask if there is a
13401 conflicting access in the previous insn. */
13402 prev_insn = prev_active_insn (insn);
13403 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13404 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13405 s390_swap_cmp (cond, op0, op1, insn);
13406
13407 /* Check if there is a conflict with the next insn. If there
13408 was no conflict with the previous insn, then swap the
13409 COMPARE arguments and its mask. If we already swapped
13410 the operands, or if swapping them would cause a conflict
13411 with the previous insn, issue a NOP after the COMPARE in
13412 order to separate the two instuctions. */
13413 next_insn = next_active_insn (insn);
13414 if (next_insn != NULL_RTX && INSN_P (next_insn)
13415 && s390_non_addr_reg_read_p (*op1, next_insn))
13416 {
13417 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13418 && s390_non_addr_reg_read_p (*op0, prev_insn))
13419 {
13420 if (REGNO (*op1) == 0)
13421 emit_insn_after (gen_nop1 (), insn);
13422 else
13423 emit_insn_after (gen_nop (), insn);
13424 insn_added_p = true;
13425 }
13426 else
13427 s390_swap_cmp (cond, op0, op1, insn);
13428 }
13429 return insn_added_p;
13430 }
13431
13432 /* Perform machine-dependent processing. */
13433
13434 static void
13435 s390_reorg (void)
13436 {
13437 bool pool_overflow = false;
13438 int hw_before, hw_after;
13439
13440 /* Make sure all splits have been performed; splits after
13441 machine_dependent_reorg might confuse insn length counts. */
13442 split_all_insns_noflow ();
13443
13444 /* Install the main literal pool and the associated base
13445 register load insns.
13446
13447 In addition, there are two problematic situations we need
13448 to correct:
13449
13450 - the literal pool might be > 4096 bytes in size, so that
13451 some of its elements cannot be directly accessed
13452
13453 - a branch target might be > 64K away from the branch, so that
13454 it is not possible to use a PC-relative instruction.
13455
13456 To fix those, we split the single literal pool into multiple
13457 pool chunks, reloading the pool base register at various
13458 points throughout the function to ensure it always points to
13459 the pool chunk the following code expects, and / or replace
13460 PC-relative branches by absolute branches.
13461
13462 However, the two problems are interdependent: splitting the
13463 literal pool can move a branch further away from its target,
13464 causing the 64K limit to overflow, and on the other hand,
13465 replacing a PC-relative branch by an absolute branch means
13466 we need to put the branch target address into the literal
13467 pool, possibly causing it to overflow.
13468
13469 So, we loop trying to fix up both problems until we manage
13470 to satisfy both conditions at the same time. Note that the
13471 loop is guaranteed to terminate as every pass of the loop
13472 strictly decreases the total number of PC-relative branches
13473 in the function. (This is not completely true as there
13474 might be branch-over-pool insns introduced by chunkify_start.
13475 Those never need to be split however.) */
13476
13477 for (;;)
13478 {
13479 struct constant_pool *pool = NULL;
13480
13481 /* Collect the literal pool. */
13482 if (!pool_overflow)
13483 {
13484 pool = s390_mainpool_start ();
13485 if (!pool)
13486 pool_overflow = true;
13487 }
13488
13489 /* If literal pool overflowed, start to chunkify it. */
13490 if (pool_overflow)
13491 pool = s390_chunkify_start ();
13492
13493 /* Split out-of-range branches. If this has created new
13494 literal pool entries, cancel current chunk list and
13495 recompute it. zSeries machines have large branch
13496 instructions, so we never need to split a branch. */
13497 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13498 {
13499 if (pool_overflow)
13500 s390_chunkify_cancel (pool);
13501 else
13502 s390_mainpool_cancel (pool);
13503
13504 continue;
13505 }
13506
13507 /* If we made it up to here, both conditions are satisfied.
13508 Finish up literal pool related changes. */
13509 if (pool_overflow)
13510 s390_chunkify_finish (pool);
13511 else
13512 s390_mainpool_finish (pool);
13513
13514 /* We're done splitting branches. */
13515 cfun->machine->split_branches_pending_p = false;
13516 break;
13517 }
13518
13519 /* Generate out-of-pool execute target insns. */
13520 if (TARGET_CPU_ZARCH)
13521 {
13522 rtx_insn *insn, *target;
13523 rtx label;
13524
13525 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13526 {
13527 label = s390_execute_label (insn);
13528 if (!label)
13529 continue;
13530
13531 gcc_assert (label != const0_rtx);
13532
13533 target = emit_label (XEXP (label, 0));
13534 INSN_ADDRESSES_NEW (target, -1);
13535
13536 target = emit_insn (s390_execute_target (insn));
13537 INSN_ADDRESSES_NEW (target, -1);
13538 }
13539 }
13540
13541 /* Try to optimize prologue and epilogue further. */
13542 s390_optimize_prologue ();
13543
13544 /* Walk over the insns and do some >=z10 specific changes. */
13545 if (s390_tune >= PROCESSOR_2097_Z10)
13546 {
13547 rtx_insn *insn;
13548 bool insn_added_p = false;
13549
13550 /* The insn lengths and addresses have to be up to date for the
13551 following manipulations. */
13552 shorten_branches (get_insns ());
13553
13554 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13555 {
13556 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13557 continue;
13558
13559 if (JUMP_P (insn))
13560 insn_added_p |= s390_fix_long_loop_prediction (insn);
13561
13562 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13563 || GET_CODE (PATTERN (insn)) == SET)
13564 && s390_tune == PROCESSOR_2097_Z10)
13565 insn_added_p |= s390_z10_optimize_cmp (insn);
13566 }
13567
13568 /* Adjust branches if we added new instructions. */
13569 if (insn_added_p)
13570 shorten_branches (get_insns ());
13571 }
13572
13573 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13574 if (hw_after > 0)
13575 {
13576 rtx_insn *insn;
13577
13578 /* Insert NOPs for hotpatching. */
13579 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13580 /* Emit NOPs
13581 1. inside the area covered by debug information to allow setting
13582 breakpoints at the NOPs,
13583 2. before any insn which results in an asm instruction,
13584 3. before in-function labels to avoid jumping to the NOPs, for
13585 example as part of a loop,
13586 4. before any barrier in case the function is completely empty
13587 (__builtin_unreachable ()) and has neither internal labels nor
13588 active insns.
13589 */
13590 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13591 break;
13592 /* Output a series of NOPs before the first active insn. */
13593 while (insn && hw_after > 0)
13594 {
13595 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13596 {
13597 emit_insn_before (gen_nop_6_byte (), insn);
13598 hw_after -= 3;
13599 }
13600 else if (hw_after >= 2)
13601 {
13602 emit_insn_before (gen_nop_4_byte (), insn);
13603 hw_after -= 2;
13604 }
13605 else
13606 {
13607 emit_insn_before (gen_nop_2_byte (), insn);
13608 hw_after -= 1;
13609 }
13610 }
13611 }
13612 }
13613
13614 /* Return true if INSN is a fp load insn writing register REGNO. */
13615 static inline bool
13616 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13617 {
13618 rtx set;
13619 enum attr_type flag = s390_safe_attr_type (insn);
13620
13621 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13622 return false;
13623
13624 set = single_set (insn);
13625
13626 if (set == NULL_RTX)
13627 return false;
13628
13629 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13630 return false;
13631
13632 if (REGNO (SET_DEST (set)) != regno)
13633 return false;
13634
13635 return true;
13636 }
13637
13638 /* This value describes the distance to be avoided between an
13639 aritmetic fp instruction and an fp load writing the same register.
13640 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13641 fine but the exact value has to be avoided. Otherwise the FP
13642 pipeline will throw an exception causing a major penalty. */
13643 #define Z10_EARLYLOAD_DISTANCE 7
13644
13645 /* Rearrange the ready list in order to avoid the situation described
13646 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13647 moved to the very end of the ready list. */
13648 static void
13649 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13650 {
13651 unsigned int regno;
13652 int nready = *nready_p;
13653 rtx_insn *tmp;
13654 int i;
13655 rtx_insn *insn;
13656 rtx set;
13657 enum attr_type flag;
13658 int distance;
13659
13660 /* Skip DISTANCE - 1 active insns. */
13661 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13662 distance > 0 && insn != NULL_RTX;
13663 distance--, insn = prev_active_insn (insn))
13664 if (CALL_P (insn) || JUMP_P (insn))
13665 return;
13666
13667 if (insn == NULL_RTX)
13668 return;
13669
13670 set = single_set (insn);
13671
13672 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13673 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13674 return;
13675
13676 flag = s390_safe_attr_type (insn);
13677
13678 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13679 return;
13680
13681 regno = REGNO (SET_DEST (set));
13682 i = nready - 1;
13683
13684 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13685 i--;
13686
13687 if (!i)
13688 return;
13689
13690 tmp = ready[i];
13691 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13692 ready[0] = tmp;
13693 }
13694
13695
13696 /* The s390_sched_state variable tracks the state of the current or
13697 the last instruction group.
13698
13699 0,1,2 number of instructions scheduled in the current group
13700 3 the last group is complete - normal insns
13701 4 the last group was a cracked/expanded insn */
13702
13703 static int s390_sched_state;
13704
13705 #define S390_SCHED_STATE_NORMAL 3
13706 #define S390_SCHED_STATE_CRACKED 4
13707
13708 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
13709 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
13710 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
13711 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
13712
13713 static unsigned int
13714 s390_get_sched_attrmask (rtx_insn *insn)
13715 {
13716 unsigned int mask = 0;
13717
13718 switch (s390_tune)
13719 {
13720 case PROCESSOR_2827_ZEC12:
13721 if (get_attr_zEC12_cracked (insn))
13722 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13723 if (get_attr_zEC12_expanded (insn))
13724 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13725 if (get_attr_zEC12_endgroup (insn))
13726 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13727 if (get_attr_zEC12_groupalone (insn))
13728 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13729 break;
13730 case PROCESSOR_2964_Z13:
13731 if (get_attr_z13_cracked (insn))
13732 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13733 if (get_attr_z13_expanded (insn))
13734 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13735 if (get_attr_z13_endgroup (insn))
13736 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13737 if (get_attr_z13_groupalone (insn))
13738 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13739 break;
13740 default:
13741 gcc_unreachable ();
13742 }
13743 return mask;
13744 }
13745
13746 static unsigned int
13747 s390_get_unit_mask (rtx_insn *insn, int *units)
13748 {
13749 unsigned int mask = 0;
13750
13751 switch (s390_tune)
13752 {
13753 case PROCESSOR_2964_Z13:
13754 *units = 3;
13755 if (get_attr_z13_unit_lsu (insn))
13756 mask |= 1 << 0;
13757 if (get_attr_z13_unit_fxu (insn))
13758 mask |= 1 << 1;
13759 if (get_attr_z13_unit_vfu (insn))
13760 mask |= 1 << 2;
13761 break;
13762 default:
13763 gcc_unreachable ();
13764 }
13765 return mask;
13766 }
13767
13768 /* Return the scheduling score for INSN. The higher the score the
13769 better. The score is calculated from the OOO scheduling attributes
13770 of INSN and the scheduling state s390_sched_state. */
13771 static int
13772 s390_sched_score (rtx_insn *insn)
13773 {
13774 unsigned int mask = s390_get_sched_attrmask (insn);
13775 int score = 0;
13776
13777 switch (s390_sched_state)
13778 {
13779 case 0:
13780 /* Try to put insns into the first slot which would otherwise
13781 break a group. */
13782 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13783 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13784 score += 5;
13785 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13786 score += 10;
13787 case 1:
13788 /* Prefer not cracked insns while trying to put together a
13789 group. */
13790 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13791 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
13792 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
13793 score += 10;
13794 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
13795 score += 5;
13796 break;
13797 case 2:
13798 /* Prefer not cracked insns while trying to put together a
13799 group. */
13800 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13801 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
13802 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
13803 score += 10;
13804 /* Prefer endgroup insns in the last slot. */
13805 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
13806 score += 10;
13807 break;
13808 case S390_SCHED_STATE_NORMAL:
13809 /* Prefer not cracked insns if the last was not cracked. */
13810 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13811 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
13812 score += 5;
13813 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13814 score += 10;
13815 break;
13816 case S390_SCHED_STATE_CRACKED:
13817 /* Try to keep cracked insns together to prevent them from
13818 interrupting groups. */
13819 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13820 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13821 score += 5;
13822 break;
13823 }
13824
13825 if (s390_tune == PROCESSOR_2964_Z13)
13826 {
13827 int units, i;
13828 unsigned unit_mask, m = 1;
13829
13830 unit_mask = s390_get_unit_mask (insn, &units);
13831 gcc_assert (units <= MAX_SCHED_UNITS);
13832
13833 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
13834 ago the last insn of this unit type got scheduled. This is
13835 supposed to help providing a proper instruction mix to the
13836 CPU. */
13837 for (i = 0; i < units; i++, m <<= 1)
13838 if (m & unit_mask)
13839 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
13840 MAX_SCHED_MIX_DISTANCE);
13841 }
13842 return score;
13843 }
13844
13845 /* This function is called via hook TARGET_SCHED_REORDER before
13846 issuing one insn from list READY which contains *NREADYP entries.
13847 For target z10 it reorders load instructions to avoid early load
13848 conflicts in the floating point pipeline */
13849 static int
13850 s390_sched_reorder (FILE *file, int verbose,
13851 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13852 {
13853 if (s390_tune == PROCESSOR_2097_Z10
13854 && reload_completed
13855 && *nreadyp > 1)
13856 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13857
13858 if (s390_tune >= PROCESSOR_2827_ZEC12
13859 && reload_completed
13860 && *nreadyp > 1)
13861 {
13862 int i;
13863 int last_index = *nreadyp - 1;
13864 int max_index = -1;
13865 int max_score = -1;
13866 rtx_insn *tmp;
13867
13868 /* Just move the insn with the highest score to the top (the
13869 end) of the list. A full sort is not needed since a conflict
13870 in the hazard recognition cannot happen. So the top insn in
13871 the ready list will always be taken. */
13872 for (i = last_index; i >= 0; i--)
13873 {
13874 int score;
13875
13876 if (recog_memoized (ready[i]) < 0)
13877 continue;
13878
13879 score = s390_sched_score (ready[i]);
13880 if (score > max_score)
13881 {
13882 max_score = score;
13883 max_index = i;
13884 }
13885 }
13886
13887 if (max_index != -1)
13888 {
13889 if (max_index != last_index)
13890 {
13891 tmp = ready[max_index];
13892 ready[max_index] = ready[last_index];
13893 ready[last_index] = tmp;
13894
13895 if (verbose > 5)
13896 fprintf (file,
13897 ";;\t\tBACKEND: move insn %d to the top of list\n",
13898 INSN_UID (ready[last_index]));
13899 }
13900 else if (verbose > 5)
13901 fprintf (file,
13902 ";;\t\tBACKEND: best insn %d already on top\n",
13903 INSN_UID (ready[last_index]));
13904 }
13905
13906 if (verbose > 5)
13907 {
13908 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13909 s390_sched_state);
13910
13911 for (i = last_index; i >= 0; i--)
13912 {
13913 unsigned int sched_mask;
13914 rtx_insn *insn = ready[i];
13915
13916 if (recog_memoized (insn) < 0)
13917 continue;
13918
13919 sched_mask = s390_get_sched_attrmask (insn);
13920 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
13921 INSN_UID (insn),
13922 s390_sched_score (insn));
13923 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
13924 ((M) & sched_mask) ? #ATTR : "");
13925 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
13926 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
13927 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
13928 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
13929 #undef PRINT_SCHED_ATTR
13930 if (s390_tune == PROCESSOR_2964_Z13)
13931 {
13932 unsigned int unit_mask, m = 1;
13933 int units, j;
13934
13935 unit_mask = s390_get_unit_mask (insn, &units);
13936 fprintf (file, "(units:");
13937 for (j = 0; j < units; j++, m <<= 1)
13938 if (m & unit_mask)
13939 fprintf (file, " u%d", j);
13940 fprintf (file, ")");
13941 }
13942 fprintf (file, "\n");
13943 }
13944 }
13945 }
13946
13947 return s390_issue_rate ();
13948 }
13949
13950
13951 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13952 the scheduler has issued INSN. It stores the last issued insn into
13953 last_scheduled_insn in order to make it available for
13954 s390_sched_reorder. */
13955 static int
13956 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13957 {
13958 last_scheduled_insn = insn;
13959
13960 if (s390_tune >= PROCESSOR_2827_ZEC12
13961 && reload_completed
13962 && recog_memoized (insn) >= 0)
13963 {
13964 unsigned int mask = s390_get_sched_attrmask (insn);
13965
13966 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13967 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13968 s390_sched_state = S390_SCHED_STATE_CRACKED;
13969 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
13970 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13971 s390_sched_state = S390_SCHED_STATE_NORMAL;
13972 else
13973 {
13974 /* Only normal insns are left (mask == 0). */
13975 switch (s390_sched_state)
13976 {
13977 case 0:
13978 case 1:
13979 case 2:
13980 case S390_SCHED_STATE_NORMAL:
13981 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
13982 s390_sched_state = 1;
13983 else
13984 s390_sched_state++;
13985
13986 break;
13987 case S390_SCHED_STATE_CRACKED:
13988 s390_sched_state = S390_SCHED_STATE_NORMAL;
13989 break;
13990 }
13991 }
13992
13993 if (s390_tune == PROCESSOR_2964_Z13)
13994 {
13995 int units, i;
13996 unsigned unit_mask, m = 1;
13997
13998 unit_mask = s390_get_unit_mask (insn, &units);
13999 gcc_assert (units <= MAX_SCHED_UNITS);
14000
14001 for (i = 0; i < units; i++, m <<= 1)
14002 if (m & unit_mask)
14003 last_scheduled_unit_distance[i] = 0;
14004 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14005 last_scheduled_unit_distance[i]++;
14006 }
14007
14008 if (verbose > 5)
14009 {
14010 unsigned int sched_mask;
14011
14012 sched_mask = s390_get_sched_attrmask (insn);
14013
14014 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14015 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14016 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14017 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14018 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14019 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14020 #undef PRINT_SCHED_ATTR
14021
14022 if (s390_tune == PROCESSOR_2964_Z13)
14023 {
14024 unsigned int unit_mask, m = 1;
14025 int units, j;
14026
14027 unit_mask = s390_get_unit_mask (insn, &units);
14028 fprintf (file, "(units:");
14029 for (j = 0; j < units; j++, m <<= 1)
14030 if (m & unit_mask)
14031 fprintf (file, " %d", j);
14032 fprintf (file, ")");
14033 }
14034 fprintf (file, " sched state: %d\n", s390_sched_state);
14035
14036 if (s390_tune == PROCESSOR_2964_Z13)
14037 {
14038 int units, j;
14039
14040 s390_get_unit_mask (insn, &units);
14041
14042 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14043 for (j = 0; j < units; j++)
14044 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14045 fprintf (file, "\n");
14046 }
14047 }
14048 }
14049
14050 if (GET_CODE (PATTERN (insn)) != USE
14051 && GET_CODE (PATTERN (insn)) != CLOBBER)
14052 return more - 1;
14053 else
14054 return more;
14055 }
14056
14057 static void
14058 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14059 int verbose ATTRIBUTE_UNUSED,
14060 int max_ready ATTRIBUTE_UNUSED)
14061 {
14062 last_scheduled_insn = NULL;
14063 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14064 s390_sched_state = 0;
14065 }
14066
14067 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14068 a new number struct loop *loop should be unrolled if tuned for cpus with
14069 a built-in stride prefetcher.
14070 The loop is analyzed for memory accesses by calling check_dpu for
14071 each rtx of the loop. Depending on the loop_depth and the amount of
14072 memory accesses a new number <=nunroll is returned to improve the
14073 behavior of the hardware prefetch unit. */
14074 static unsigned
14075 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14076 {
14077 basic_block *bbs;
14078 rtx_insn *insn;
14079 unsigned i;
14080 unsigned mem_count = 0;
14081
14082 if (s390_tune < PROCESSOR_2097_Z10)
14083 return nunroll;
14084
14085 /* Count the number of memory references within the loop body. */
14086 bbs = get_loop_body (loop);
14087 subrtx_iterator::array_type array;
14088 for (i = 0; i < loop->num_nodes; i++)
14089 FOR_BB_INSNS (bbs[i], insn)
14090 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14091 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14092 if (MEM_P (*iter))
14093 mem_count += 1;
14094 free (bbs);
14095
14096 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14097 if (mem_count == 0)
14098 return nunroll;
14099
14100 switch (loop_depth(loop))
14101 {
14102 case 1:
14103 return MIN (nunroll, 28 / mem_count);
14104 case 2:
14105 return MIN (nunroll, 22 / mem_count);
14106 default:
14107 return MIN (nunroll, 16 / mem_count);
14108 }
14109 }
14110
14111 /* Restore the current options. This is a hook function and also called
14112 internally. */
14113
14114 static void
14115 s390_function_specific_restore (struct gcc_options *opts,
14116 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14117 {
14118 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14119 }
14120
14121 static void
14122 s390_option_override_internal (bool main_args_p,
14123 struct gcc_options *opts,
14124 const struct gcc_options *opts_set)
14125 {
14126 const char *prefix;
14127 const char *suffix;
14128
14129 /* Set up prefix/suffix so the error messages refer to either the command
14130 line argument, or the attribute(target). */
14131 if (main_args_p)
14132 {
14133 prefix = "-m";
14134 suffix = "";
14135 }
14136 else
14137 {
14138 prefix = "option(\"";
14139 suffix = "\")";
14140 }
14141
14142
14143 /* Architecture mode defaults according to ABI. */
14144 if (!(opts_set->x_target_flags & MASK_ZARCH))
14145 {
14146 if (TARGET_64BIT)
14147 opts->x_target_flags |= MASK_ZARCH;
14148 else
14149 opts->x_target_flags &= ~MASK_ZARCH;
14150 }
14151
14152 /* Set the march default in case it hasn't been specified on cmdline. */
14153 if (!opts_set->x_s390_arch)
14154 opts->x_s390_arch = PROCESSOR_2064_Z900;
14155 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14156 || opts->x_s390_arch == PROCESSOR_9672_G6)
14157 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14158 "in future releases; use at least %sarch=z900%s",
14159 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14160 suffix, prefix, suffix);
14161
14162 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14163
14164 /* Determine processor to tune for. */
14165 if (!opts_set->x_s390_tune)
14166 opts->x_s390_tune = opts->x_s390_arch;
14167 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14168 || opts->x_s390_tune == PROCESSOR_9672_G6)
14169 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14170 "in future releases; use at least %stune=z900%s",
14171 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14172 suffix, prefix, suffix);
14173
14174 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14175
14176 /* Sanity checks. */
14177 if (opts->x_s390_arch == PROCESSOR_NATIVE
14178 || opts->x_s390_tune == PROCESSOR_NATIVE)
14179 gcc_unreachable ();
14180 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14181 error ("z/Architecture mode not supported on %s",
14182 processor_table[(int)opts->x_s390_arch].name);
14183 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14184 error ("64-bit ABI not supported in ESA/390 mode");
14185
14186 /* Enable hardware transactions if available and not explicitly
14187 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14188 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14189 {
14190 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14191 opts->x_target_flags |= MASK_OPT_HTM;
14192 else
14193 opts->x_target_flags &= ~MASK_OPT_HTM;
14194 }
14195
14196 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14197 {
14198 if (TARGET_OPT_VX_P (opts->x_target_flags))
14199 {
14200 if (!TARGET_CPU_VX_P (opts))
14201 error ("hardware vector support not available on %s",
14202 processor_table[(int)opts->x_s390_arch].name);
14203 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14204 error ("hardware vector support not available with -msoft-float");
14205 }
14206 }
14207 else
14208 {
14209 if (TARGET_CPU_VX_P (opts))
14210 /* Enable vector support if available and not explicitly disabled
14211 by user. E.g. with -m31 -march=z13 -mzarch */
14212 opts->x_target_flags |= MASK_OPT_VX;
14213 else
14214 opts->x_target_flags &= ~MASK_OPT_VX;
14215 }
14216
14217 /* Use hardware DFP if available and not explicitly disabled by
14218 user. E.g. with -m31 -march=z10 -mzarch */
14219 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14220 {
14221 if (TARGET_DFP_P (opts))
14222 opts->x_target_flags |= MASK_HARD_DFP;
14223 else
14224 opts->x_target_flags &= ~MASK_HARD_DFP;
14225 }
14226
14227 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14228 {
14229 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14230 {
14231 if (!TARGET_CPU_DFP_P (opts))
14232 error ("hardware decimal floating point instructions"
14233 " not available on %s",
14234 processor_table[(int)opts->x_s390_arch].name);
14235 if (!TARGET_ZARCH_P (opts->x_target_flags))
14236 error ("hardware decimal floating point instructions"
14237 " not available in ESA/390 mode");
14238 }
14239 else
14240 opts->x_target_flags &= ~MASK_HARD_DFP;
14241 }
14242
14243 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14244 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14245 {
14246 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14247 && TARGET_HARD_DFP_P (opts->x_target_flags))
14248 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14249
14250 opts->x_target_flags &= ~MASK_HARD_DFP;
14251 }
14252
14253 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14254 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14255 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14256 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14257 "in combination");
14258
14259 if (opts->x_s390_stack_size)
14260 {
14261 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14262 error ("stack size must be greater than the stack guard value");
14263 else if (opts->x_s390_stack_size > 1 << 16)
14264 error ("stack size must not be greater than 64k");
14265 }
14266 else if (opts->x_s390_stack_guard)
14267 error ("-mstack-guard implies use of -mstack-size");
14268
14269 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14270 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14271 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14272 #endif
14273
14274 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14275 {
14276 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14277 opts->x_param_values,
14278 opts_set->x_param_values);
14279 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14280 opts->x_param_values,
14281 opts_set->x_param_values);
14282 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14283 opts->x_param_values,
14284 opts_set->x_param_values);
14285 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14286 opts->x_param_values,
14287 opts_set->x_param_values);
14288 }
14289
14290 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14291 opts->x_param_values,
14292 opts_set->x_param_values);
14293 /* values for loop prefetching */
14294 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14295 opts->x_param_values,
14296 opts_set->x_param_values);
14297 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14298 opts->x_param_values,
14299 opts_set->x_param_values);
14300 /* s390 has more than 2 levels and the size is much larger. Since
14301 we are always running virtualized assume that we only get a small
14302 part of the caches above l1. */
14303 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14304 opts->x_param_values,
14305 opts_set->x_param_values);
14306 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14307 opts->x_param_values,
14308 opts_set->x_param_values);
14309 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14310 opts->x_param_values,
14311 opts_set->x_param_values);
14312
14313 /* Use the alternative scheduling-pressure algorithm by default. */
14314 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14315 opts->x_param_values,
14316 opts_set->x_param_values);
14317
14318 /* Call target specific restore function to do post-init work. At the moment,
14319 this just sets opts->x_s390_cost_pointer. */
14320 s390_function_specific_restore (opts, NULL);
14321 }
14322
14323 static void
14324 s390_option_override (void)
14325 {
14326 unsigned int i;
14327 cl_deferred_option *opt;
14328 vec<cl_deferred_option> *v =
14329 (vec<cl_deferred_option> *) s390_deferred_options;
14330
14331 if (v)
14332 FOR_EACH_VEC_ELT (*v, i, opt)
14333 {
14334 switch (opt->opt_index)
14335 {
14336 case OPT_mhotpatch_:
14337 {
14338 int val1;
14339 int val2;
14340 char s[256];
14341 char *t;
14342
14343 strncpy (s, opt->arg, 256);
14344 s[255] = 0;
14345 t = strchr (s, ',');
14346 if (t != NULL)
14347 {
14348 *t = 0;
14349 t++;
14350 val1 = integral_argument (s);
14351 val2 = integral_argument (t);
14352 }
14353 else
14354 {
14355 val1 = -1;
14356 val2 = -1;
14357 }
14358 if (val1 == -1 || val2 == -1)
14359 {
14360 /* argument is not a plain number */
14361 error ("arguments to %qs should be non-negative integers",
14362 "-mhotpatch=n,m");
14363 break;
14364 }
14365 else if (val1 > s390_hotpatch_hw_max
14366 || val2 > s390_hotpatch_hw_max)
14367 {
14368 error ("argument to %qs is too large (max. %d)",
14369 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14370 break;
14371 }
14372 s390_hotpatch_hw_before_label = val1;
14373 s390_hotpatch_hw_after_label = val2;
14374 break;
14375 }
14376 default:
14377 gcc_unreachable ();
14378 }
14379 }
14380
14381 /* Set up function hooks. */
14382 init_machine_status = s390_init_machine_status;
14383
14384 s390_option_override_internal (true, &global_options, &global_options_set);
14385
14386 /* Save the initial options in case the user does function specific
14387 options. */
14388 target_option_default_node = build_target_option_node (&global_options);
14389 target_option_current_node = target_option_default_node;
14390
14391 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14392 requires the arch flags to be evaluated already. Since prefetching
14393 is beneficial on s390, we enable it if available. */
14394 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14395 flag_prefetch_loop_arrays = 1;
14396
14397 if (TARGET_TPF)
14398 {
14399 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14400 debuggers do not yet support DWARF 3/4. */
14401 if (!global_options_set.x_dwarf_strict)
14402 dwarf_strict = 1;
14403 if (!global_options_set.x_dwarf_version)
14404 dwarf_version = 2;
14405 }
14406
14407 /* Register a target-specific optimization-and-lowering pass
14408 to run immediately before prologue and epilogue generation.
14409
14410 Registering the pass must be done at start up. It's
14411 convenient to do it here. */
14412 opt_pass *new_pass = new pass_s390_early_mach (g);
14413 struct register_pass_info insert_pass_s390_early_mach =
14414 {
14415 new_pass, /* pass */
14416 "pro_and_epilogue", /* reference_pass_name */
14417 1, /* ref_pass_instance_number */
14418 PASS_POS_INSERT_BEFORE /* po_op */
14419 };
14420 register_pass (&insert_pass_s390_early_mach);
14421 }
14422
14423 #if S390_USE_TARGET_ATTRIBUTE
14424 /* Inner function to process the attribute((target(...))), take an argument and
14425 set the current options from the argument. If we have a list, recursively go
14426 over the list. */
14427
14428 static bool
14429 s390_valid_target_attribute_inner_p (tree args,
14430 struct gcc_options *opts,
14431 struct gcc_options *new_opts_set,
14432 bool force_pragma)
14433 {
14434 char *next_optstr;
14435 bool ret = true;
14436
14437 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14438 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14439 static const struct
14440 {
14441 const char *string;
14442 size_t len;
14443 int opt;
14444 int has_arg;
14445 int only_as_pragma;
14446 } attrs[] = {
14447 /* enum options */
14448 S390_ATTRIB ("arch=", OPT_march_, 1),
14449 S390_ATTRIB ("tune=", OPT_mtune_, 1),
14450 /* uinteger options */
14451 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
14452 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
14453 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
14454 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
14455 /* flag options */
14456 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
14457 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
14458 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
14459 S390_ATTRIB ("htm", OPT_mhtm, 0),
14460 S390_ATTRIB ("vx", OPT_mvx, 0),
14461 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
14462 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14463 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14464 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14465 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14466 /* boolean options */
14467 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14468 };
14469 #undef S390_ATTRIB
14470 #undef S390_PRAGMA
14471
14472 /* If this is a list, recurse to get the options. */
14473 if (TREE_CODE (args) == TREE_LIST)
14474 {
14475 bool ret = true;
14476 int num_pragma_values;
14477 int i;
14478
14479 /* Note: attribs.c:decl_attributes prepends the values from
14480 current_target_pragma to the list of target attributes. To determine
14481 whether we're looking at a value of the attribute or the pragma we
14482 assume that the first [list_length (current_target_pragma)] values in
14483 the list are the values from the pragma. */
14484 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14485 ? list_length (current_target_pragma) : 0;
14486 for (i = 0; args; args = TREE_CHAIN (args), i++)
14487 {
14488 bool is_pragma;
14489
14490 is_pragma = (force_pragma || i < num_pragma_values);
14491 if (TREE_VALUE (args)
14492 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14493 opts, new_opts_set,
14494 is_pragma))
14495 {
14496 ret = false;
14497 }
14498 }
14499 return ret;
14500 }
14501
14502 else if (TREE_CODE (args) != STRING_CST)
14503 {
14504 error ("attribute %<target%> argument not a string");
14505 return false;
14506 }
14507
14508 /* Handle multiple arguments separated by commas. */
14509 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14510
14511 while (next_optstr && *next_optstr != '\0')
14512 {
14513 char *p = next_optstr;
14514 char *orig_p = p;
14515 char *comma = strchr (next_optstr, ',');
14516 size_t len, opt_len;
14517 int opt;
14518 bool opt_set_p;
14519 char ch;
14520 unsigned i;
14521 int mask = 0;
14522 enum cl_var_type var_type;
14523 bool found;
14524
14525 if (comma)
14526 {
14527 *comma = '\0';
14528 len = comma - next_optstr;
14529 next_optstr = comma + 1;
14530 }
14531 else
14532 {
14533 len = strlen (p);
14534 next_optstr = NULL;
14535 }
14536
14537 /* Recognize no-xxx. */
14538 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14539 {
14540 opt_set_p = false;
14541 p += 3;
14542 len -= 3;
14543 }
14544 else
14545 opt_set_p = true;
14546
14547 /* Find the option. */
14548 ch = *p;
14549 found = false;
14550 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14551 {
14552 opt_len = attrs[i].len;
14553 if (ch == attrs[i].string[0]
14554 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14555 && memcmp (p, attrs[i].string, opt_len) == 0)
14556 {
14557 opt = attrs[i].opt;
14558 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14559 continue;
14560 mask = cl_options[opt].var_value;
14561 var_type = cl_options[opt].var_type;
14562 found = true;
14563 break;
14564 }
14565 }
14566
14567 /* Process the option. */
14568 if (!found)
14569 {
14570 error ("attribute(target(\"%s\")) is unknown", orig_p);
14571 return false;
14572 }
14573 else if (attrs[i].only_as_pragma && !force_pragma)
14574 {
14575 /* Value is not allowed for the target attribute. */
14576 error ("Value %qs is not supported by attribute %<target%>",
14577 attrs[i].string);
14578 return false;
14579 }
14580
14581 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14582 {
14583 if (var_type == CLVC_BIT_CLEAR)
14584 opt_set_p = !opt_set_p;
14585
14586 if (opt_set_p)
14587 opts->x_target_flags |= mask;
14588 else
14589 opts->x_target_flags &= ~mask;
14590 new_opts_set->x_target_flags |= mask;
14591 }
14592
14593 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14594 {
14595 int value;
14596
14597 if (cl_options[opt].cl_uinteger)
14598 {
14599 /* Unsigned integer argument. Code based on the function
14600 decode_cmdline_option () in opts-common.c. */
14601 value = integral_argument (p + opt_len);
14602 }
14603 else
14604 value = (opt_set_p) ? 1 : 0;
14605
14606 if (value != -1)
14607 {
14608 struct cl_decoded_option decoded;
14609
14610 /* Value range check; only implemented for numeric and boolean
14611 options at the moment. */
14612 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14613 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14614 set_option (opts, new_opts_set, opt, value,
14615 p + opt_len, DK_UNSPECIFIED, input_location,
14616 global_dc);
14617 }
14618 else
14619 {
14620 error ("attribute(target(\"%s\")) is unknown", orig_p);
14621 ret = false;
14622 }
14623 }
14624
14625 else if (cl_options[opt].var_type == CLVC_ENUM)
14626 {
14627 bool arg_ok;
14628 int value;
14629
14630 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14631 if (arg_ok)
14632 set_option (opts, new_opts_set, opt, value,
14633 p + opt_len, DK_UNSPECIFIED, input_location,
14634 global_dc);
14635 else
14636 {
14637 error ("attribute(target(\"%s\")) is unknown", orig_p);
14638 ret = false;
14639 }
14640 }
14641
14642 else
14643 gcc_unreachable ();
14644 }
14645 return ret;
14646 }
14647
14648 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14649
14650 tree
14651 s390_valid_target_attribute_tree (tree args,
14652 struct gcc_options *opts,
14653 const struct gcc_options *opts_set,
14654 bool force_pragma)
14655 {
14656 tree t = NULL_TREE;
14657 struct gcc_options new_opts_set;
14658
14659 memset (&new_opts_set, 0, sizeof (new_opts_set));
14660
14661 /* Process each of the options on the chain. */
14662 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14663 force_pragma))
14664 return error_mark_node;
14665
14666 /* If some option was set (even if it has not changed), rerun
14667 s390_option_override_internal, and then save the options away. */
14668 if (new_opts_set.x_target_flags
14669 || new_opts_set.x_s390_arch
14670 || new_opts_set.x_s390_tune
14671 || new_opts_set.x_s390_stack_guard
14672 || new_opts_set.x_s390_stack_size
14673 || new_opts_set.x_s390_branch_cost
14674 || new_opts_set.x_s390_warn_framesize
14675 || new_opts_set.x_s390_warn_dynamicstack_p)
14676 {
14677 const unsigned char *src = (const unsigned char *)opts_set;
14678 unsigned char *dest = (unsigned char *)&new_opts_set;
14679 unsigned int i;
14680
14681 /* Merge the original option flags into the new ones. */
14682 for (i = 0; i < sizeof(*opts_set); i++)
14683 dest[i] |= src[i];
14684
14685 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
14686 s390_option_override_internal (false, opts, &new_opts_set);
14687 /* Save the current options unless we are validating options for
14688 #pragma. */
14689 t = build_target_option_node (opts);
14690 }
14691 return t;
14692 }
14693
14694 /* Hook to validate attribute((target("string"))). */
14695
14696 static bool
14697 s390_valid_target_attribute_p (tree fndecl,
14698 tree ARG_UNUSED (name),
14699 tree args,
14700 int ARG_UNUSED (flags))
14701 {
14702 struct gcc_options func_options;
14703 tree new_target, new_optimize;
14704 bool ret = true;
14705
14706 /* attribute((target("default"))) does nothing, beyond
14707 affecting multi-versioning. */
14708 if (TREE_VALUE (args)
14709 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
14710 && TREE_CHAIN (args) == NULL_TREE
14711 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
14712 return true;
14713
14714 tree old_optimize = build_optimization_node (&global_options);
14715
14716 /* Get the optimization options of the current function. */
14717 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
14718
14719 if (!func_optimize)
14720 func_optimize = old_optimize;
14721
14722 /* Init func_options. */
14723 memset (&func_options, 0, sizeof (func_options));
14724 init_options_struct (&func_options, NULL);
14725 lang_hooks.init_options_struct (&func_options);
14726
14727 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
14728
14729 /* Initialize func_options to the default before its target options can
14730 be set. */
14731 cl_target_option_restore (&func_options,
14732 TREE_TARGET_OPTION (target_option_default_node));
14733
14734 new_target = s390_valid_target_attribute_tree (args, &func_options,
14735 &global_options_set,
14736 (args ==
14737 current_target_pragma));
14738 new_optimize = build_optimization_node (&func_options);
14739 if (new_target == error_mark_node)
14740 ret = false;
14741 else if (fndecl && new_target)
14742 {
14743 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
14744 if (old_optimize != new_optimize)
14745 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
14746 }
14747 return ret;
14748 }
14749
14750 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
14751 cache. */
14752
14753 void
14754 s390_activate_target_options (tree new_tree)
14755 {
14756 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
14757 if (TREE_TARGET_GLOBALS (new_tree))
14758 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
14759 else if (new_tree == target_option_default_node)
14760 restore_target_globals (&default_target_globals);
14761 else
14762 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
14763 s390_previous_fndecl = NULL_TREE;
14764 }
14765
14766 /* Establish appropriate back-end context for processing the function
14767 FNDECL. The argument might be NULL to indicate processing at top
14768 level, outside of any function scope. */
14769 static void
14770 s390_set_current_function (tree fndecl)
14771 {
14772 /* Only change the context if the function changes. This hook is called
14773 several times in the course of compiling a function, and we don't want to
14774 slow things down too much or call target_reinit when it isn't safe. */
14775 if (fndecl == s390_previous_fndecl)
14776 return;
14777
14778 tree old_tree;
14779 if (s390_previous_fndecl == NULL_TREE)
14780 old_tree = target_option_current_node;
14781 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
14782 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
14783 else
14784 old_tree = target_option_default_node;
14785
14786 if (fndecl == NULL_TREE)
14787 {
14788 if (old_tree != target_option_current_node)
14789 s390_activate_target_options (target_option_current_node);
14790 return;
14791 }
14792
14793 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
14794 if (new_tree == NULL_TREE)
14795 new_tree = target_option_default_node;
14796
14797 if (old_tree != new_tree)
14798 s390_activate_target_options (new_tree);
14799 s390_previous_fndecl = fndecl;
14800 }
14801 #endif
14802
14803 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
14804
14805 static bool
14806 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
14807 unsigned int align ATTRIBUTE_UNUSED,
14808 enum by_pieces_operation op ATTRIBUTE_UNUSED,
14809 bool speed_p ATTRIBUTE_UNUSED)
14810 {
14811 return (size == 1 || size == 2
14812 || size == 4 || (TARGET_ZARCH && size == 8));
14813 }
14814
14815 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
14816
14817 static void
14818 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
14819 {
14820 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
14821 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
14822 tree call_efpc = build_call_expr (efpc, 0);
14823 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
14824
14825 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
14826 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
14827 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
14828 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
14829 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
14830 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
14831
14832 /* Generates the equivalent of feholdexcept (&fenv_var)
14833
14834 fenv_var = __builtin_s390_efpc ();
14835 __builtin_s390_sfpc (fenv_var & mask) */
14836 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
14837 tree new_fpc =
14838 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
14839 build_int_cst (unsigned_type_node,
14840 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
14841 FPC_EXCEPTION_MASK)));
14842 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
14843 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
14844
14845 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
14846
14847 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
14848 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
14849 build_int_cst (unsigned_type_node,
14850 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
14851 *clear = build_call_expr (sfpc, 1, new_fpc);
14852
14853 /* Generates the equivalent of feupdateenv (fenv_var)
14854
14855 old_fpc = __builtin_s390_efpc ();
14856 __builtin_s390_sfpc (fenv_var);
14857 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
14858
14859 old_fpc = create_tmp_var_raw (unsigned_type_node);
14860 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
14861 old_fpc, call_efpc);
14862
14863 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
14864
14865 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
14866 build_int_cst (unsigned_type_node,
14867 FPC_FLAGS_MASK));
14868 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
14869 build_int_cst (unsigned_type_node,
14870 FPC_FLAGS_SHIFT));
14871 tree atomic_feraiseexcept
14872 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
14873 raise_old_except = build_call_expr (atomic_feraiseexcept,
14874 1, raise_old_except);
14875
14876 *update = build2 (COMPOUND_EXPR, void_type_node,
14877 build2 (COMPOUND_EXPR, void_type_node,
14878 store_old_fpc, set_new_fpc),
14879 raise_old_except);
14880
14881 #undef FPC_EXCEPTION_MASK
14882 #undef FPC_FLAGS_MASK
14883 #undef FPC_DXC_MASK
14884 #undef FPC_EXCEPTION_MASK_SHIFT
14885 #undef FPC_FLAGS_SHIFT
14886 #undef FPC_DXC_SHIFT
14887 }
14888
14889 /* Return the vector mode to be used for inner mode MODE when doing
14890 vectorization. */
14891 static machine_mode
14892 s390_preferred_simd_mode (machine_mode mode)
14893 {
14894 if (TARGET_VX)
14895 switch (mode)
14896 {
14897 case DFmode:
14898 return V2DFmode;
14899 case DImode:
14900 return V2DImode;
14901 case SImode:
14902 return V4SImode;
14903 case HImode:
14904 return V8HImode;
14905 case QImode:
14906 return V16QImode;
14907 default:;
14908 }
14909 return word_mode;
14910 }
14911
14912 /* Our hardware does not require vectors to be strictly aligned. */
14913 static bool
14914 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
14915 const_tree type ATTRIBUTE_UNUSED,
14916 int misalignment ATTRIBUTE_UNUSED,
14917 bool is_packed ATTRIBUTE_UNUSED)
14918 {
14919 if (TARGET_VX)
14920 return true;
14921
14922 return default_builtin_support_vector_misalignment (mode, type, misalignment,
14923 is_packed);
14924 }
14925
14926 /* The vector ABI requires vector types to be aligned on an 8 byte
14927 boundary (our stack alignment). However, we allow this to be
14928 overriden by the user, while this definitely breaks the ABI. */
14929 static HOST_WIDE_INT
14930 s390_vector_alignment (const_tree type)
14931 {
14932 if (!TARGET_VX_ABI)
14933 return default_vector_alignment (type);
14934
14935 if (TYPE_USER_ALIGN (type))
14936 return TYPE_ALIGN (type);
14937
14938 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
14939 }
14940
14941 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14942 /* Implement TARGET_ASM_FILE_START. */
14943 static void
14944 s390_asm_file_start (void)
14945 {
14946 s390_asm_output_machine_for_arch (asm_out_file);
14947 }
14948 #endif
14949
14950 /* Implement TARGET_ASM_FILE_END. */
14951 static void
14952 s390_asm_file_end (void)
14953 {
14954 #ifdef HAVE_AS_GNU_ATTRIBUTE
14955 varpool_node *vnode;
14956 cgraph_node *cnode;
14957
14958 FOR_EACH_VARIABLE (vnode)
14959 if (TREE_PUBLIC (vnode->decl))
14960 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
14961
14962 FOR_EACH_FUNCTION (cnode)
14963 if (TREE_PUBLIC (cnode->decl))
14964 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
14965
14966
14967 if (s390_vector_abi != 0)
14968 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
14969 s390_vector_abi);
14970 #endif
14971 file_end_indicate_exec_stack ();
14972
14973 if (flag_split_stack)
14974 file_end_indicate_split_stack ();
14975 }
14976
14977 /* Return true if TYPE is a vector bool type. */
14978 static inline bool
14979 s390_vector_bool_type_p (const_tree type)
14980 {
14981 return TYPE_VECTOR_OPAQUE (type);
14982 }
14983
14984 /* Return the diagnostic message string if the binary operation OP is
14985 not permitted on TYPE1 and TYPE2, NULL otherwise. */
14986 static const char*
14987 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
14988 {
14989 bool bool1_p, bool2_p;
14990 bool plusminus_p;
14991 bool muldiv_p;
14992 bool compare_p;
14993 machine_mode mode1, mode2;
14994
14995 if (!TARGET_ZVECTOR)
14996 return NULL;
14997
14998 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
14999 return NULL;
15000
15001 bool1_p = s390_vector_bool_type_p (type1);
15002 bool2_p = s390_vector_bool_type_p (type2);
15003
15004 /* Mixing signed and unsigned types is forbidden for all
15005 operators. */
15006 if (!bool1_p && !bool2_p
15007 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15008 return N_("types differ in signess");
15009
15010 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15011 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15012 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15013 || op == ROUND_DIV_EXPR);
15014 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15015 || op == EQ_EXPR || op == NE_EXPR);
15016
15017 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15018 return N_("binary operator does not support two vector bool operands");
15019
15020 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15021 return N_("binary operator does not support vector bool operand");
15022
15023 mode1 = TYPE_MODE (type1);
15024 mode2 = TYPE_MODE (type2);
15025
15026 if (bool1_p != bool2_p && plusminus_p
15027 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15028 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15029 return N_("binary operator does not support mixing vector "
15030 "bool with floating point vector operands");
15031
15032 return NULL;
15033 }
15034
15035 /* Initialize GCC target structure. */
15036
15037 #undef TARGET_ASM_ALIGNED_HI_OP
15038 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15039 #undef TARGET_ASM_ALIGNED_DI_OP
15040 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15041 #undef TARGET_ASM_INTEGER
15042 #define TARGET_ASM_INTEGER s390_assemble_integer
15043
15044 #undef TARGET_ASM_OPEN_PAREN
15045 #define TARGET_ASM_OPEN_PAREN ""
15046
15047 #undef TARGET_ASM_CLOSE_PAREN
15048 #define TARGET_ASM_CLOSE_PAREN ""
15049
15050 #undef TARGET_OPTION_OVERRIDE
15051 #define TARGET_OPTION_OVERRIDE s390_option_override
15052
15053 #undef TARGET_ENCODE_SECTION_INFO
15054 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15055
15056 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15057 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15058
15059 #ifdef HAVE_AS_TLS
15060 #undef TARGET_HAVE_TLS
15061 #define TARGET_HAVE_TLS true
15062 #endif
15063 #undef TARGET_CANNOT_FORCE_CONST_MEM
15064 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15065
15066 #undef TARGET_DELEGITIMIZE_ADDRESS
15067 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15068
15069 #undef TARGET_LEGITIMIZE_ADDRESS
15070 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15071
15072 #undef TARGET_RETURN_IN_MEMORY
15073 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15074
15075 #undef TARGET_INIT_BUILTINS
15076 #define TARGET_INIT_BUILTINS s390_init_builtins
15077 #undef TARGET_EXPAND_BUILTIN
15078 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15079 #undef TARGET_BUILTIN_DECL
15080 #define TARGET_BUILTIN_DECL s390_builtin_decl
15081
15082 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15083 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15084
15085 #undef TARGET_ASM_OUTPUT_MI_THUNK
15086 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15087 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15088 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15089
15090 #undef TARGET_SCHED_ADJUST_PRIORITY
15091 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15092 #undef TARGET_SCHED_ISSUE_RATE
15093 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15094 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15095 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15096
15097 #undef TARGET_SCHED_VARIABLE_ISSUE
15098 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15099 #undef TARGET_SCHED_REORDER
15100 #define TARGET_SCHED_REORDER s390_sched_reorder
15101 #undef TARGET_SCHED_INIT
15102 #define TARGET_SCHED_INIT s390_sched_init
15103
15104 #undef TARGET_CANNOT_COPY_INSN_P
15105 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15106 #undef TARGET_RTX_COSTS
15107 #define TARGET_RTX_COSTS s390_rtx_costs
15108 #undef TARGET_ADDRESS_COST
15109 #define TARGET_ADDRESS_COST s390_address_cost
15110 #undef TARGET_REGISTER_MOVE_COST
15111 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15112 #undef TARGET_MEMORY_MOVE_COST
15113 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15114
15115 #undef TARGET_MACHINE_DEPENDENT_REORG
15116 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15117
15118 #undef TARGET_VALID_POINTER_MODE
15119 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15120
15121 #undef TARGET_BUILD_BUILTIN_VA_LIST
15122 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15123 #undef TARGET_EXPAND_BUILTIN_VA_START
15124 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15125 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15126 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15127
15128 #undef TARGET_PROMOTE_FUNCTION_MODE
15129 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15130 #undef TARGET_PASS_BY_REFERENCE
15131 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15132
15133 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15134 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15135 #undef TARGET_FUNCTION_ARG
15136 #define TARGET_FUNCTION_ARG s390_function_arg
15137 #undef TARGET_FUNCTION_ARG_ADVANCE
15138 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15139 #undef TARGET_FUNCTION_VALUE
15140 #define TARGET_FUNCTION_VALUE s390_function_value
15141 #undef TARGET_LIBCALL_VALUE
15142 #define TARGET_LIBCALL_VALUE s390_libcall_value
15143 #undef TARGET_STRICT_ARGUMENT_NAMING
15144 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15145
15146 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15147 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15148
15149 #undef TARGET_FIXED_CONDITION_CODE_REGS
15150 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15151
15152 #undef TARGET_CC_MODES_COMPATIBLE
15153 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15154
15155 #undef TARGET_INVALID_WITHIN_DOLOOP
15156 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15157
15158 #ifdef HAVE_AS_TLS
15159 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15160 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15161 #endif
15162
15163 #undef TARGET_DWARF_FRAME_REG_MODE
15164 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15165
15166 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15167 #undef TARGET_MANGLE_TYPE
15168 #define TARGET_MANGLE_TYPE s390_mangle_type
15169 #endif
15170
15171 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15172 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15173
15174 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15175 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15176
15177 #undef TARGET_PREFERRED_RELOAD_CLASS
15178 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15179
15180 #undef TARGET_SECONDARY_RELOAD
15181 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15182
15183 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15184 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15185
15186 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15187 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15188
15189 #undef TARGET_LEGITIMATE_ADDRESS_P
15190 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15191
15192 #undef TARGET_LEGITIMATE_CONSTANT_P
15193 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15194
15195 #undef TARGET_LRA_P
15196 #define TARGET_LRA_P s390_lra_p
15197
15198 #undef TARGET_CAN_ELIMINATE
15199 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15200
15201 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15202 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15203
15204 #undef TARGET_LOOP_UNROLL_ADJUST
15205 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15206
15207 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15208 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15209 #undef TARGET_TRAMPOLINE_INIT
15210 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15211
15212 #undef TARGET_UNWIND_WORD_MODE
15213 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15214
15215 #undef TARGET_CANONICALIZE_COMPARISON
15216 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15217
15218 #undef TARGET_HARD_REGNO_SCRATCH_OK
15219 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15220
15221 #undef TARGET_ATTRIBUTE_TABLE
15222 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15223
15224 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15225 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15226
15227 #undef TARGET_SET_UP_BY_PROLOGUE
15228 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15229
15230 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15231 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15232
15233 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15234 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15235 s390_use_by_pieces_infrastructure_p
15236
15237 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15238 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15239
15240 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15241 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15242
15243 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15244 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15245
15246 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15247 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15248
15249 #undef TARGET_VECTOR_ALIGNMENT
15250 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15251
15252 #undef TARGET_INVALID_BINARY_OP
15253 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15254
15255 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15256 #undef TARGET_ASM_FILE_START
15257 #define TARGET_ASM_FILE_START s390_asm_file_start
15258 #endif
15259
15260 #undef TARGET_ASM_FILE_END
15261 #define TARGET_ASM_FILE_END s390_asm_file_end
15262
15263 #if S390_USE_TARGET_ATTRIBUTE
15264 #undef TARGET_SET_CURRENT_FUNCTION
15265 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15266
15267 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15268 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15269 #endif
15270
15271 #undef TARGET_OPTION_RESTORE
15272 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15273
15274 struct gcc_target targetm = TARGET_INITIALIZER;
15275
15276 #include "gt-s390.h"
This page took 0.68382 seconds and 6 git commands to generate.