]> gcc.gnu.org Git - gcc.git/blob - gcc/config/s390/s390.c
PR70674: S/390: Add memory barrier to stack pointer restore from fpr.
[gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "target-globals.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "cfgloop.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "expmed.h"
38 #include "optabs.h"
39 #include "regs.h"
40 #include "emit-rtl.h"
41 #include "recog.h"
42 #include "cgraph.h"
43 #include "diagnostic-core.h"
44 #include "diagnostic.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "print-tree.h"
48 #include "stor-layout.h"
49 #include "varasm.h"
50 #include "calls.h"
51 #include "conditions.h"
52 #include "output.h"
53 #include "insn-attr.h"
54 #include "flags.h"
55 #include "except.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "stmt.h"
59 #include "expr.h"
60 #include "reload.h"
61 #include "cfgrtl.h"
62 #include "cfganal.h"
63 #include "lcm.h"
64 #include "cfgbuild.h"
65 #include "cfgcleanup.h"
66 #include "debug.h"
67 #include "langhooks.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
70 #include "tree-eh.h"
71 #include "gimplify.h"
72 #include "params.h"
73 #include "opts.h"
74 #include "tree-pass.h"
75 #include "context.h"
76 #include "builtins.h"
77 #include "rtl-iter.h"
78 #include "intl.h"
79 #include "tm-constrs.h"
80
81 /* This file should be included last. */
82 #include "target-def.h"
83
84 /* Remember the last target of s390_set_current_function. */
85 static GTY(()) tree s390_previous_fndecl;
86
87 /* Define the specific costs for a given cpu. */
88
89 struct processor_costs
90 {
91 /* multiplication */
92 const int m; /* cost of an M instruction. */
93 const int mghi; /* cost of an MGHI instruction. */
94 const int mh; /* cost of an MH instruction. */
95 const int mhi; /* cost of an MHI instruction. */
96 const int ml; /* cost of an ML instruction. */
97 const int mr; /* cost of an MR instruction. */
98 const int ms; /* cost of an MS instruction. */
99 const int msg; /* cost of an MSG instruction. */
100 const int msgf; /* cost of an MSGF instruction. */
101 const int msgfr; /* cost of an MSGFR instruction. */
102 const int msgr; /* cost of an MSGR instruction. */
103 const int msr; /* cost of an MSR instruction. */
104 const int mult_df; /* cost of multiplication in DFmode. */
105 const int mxbr;
106 /* square root */
107 const int sqxbr; /* cost of square root in TFmode. */
108 const int sqdbr; /* cost of square root in DFmode. */
109 const int sqebr; /* cost of square root in SFmode. */
110 /* multiply and add */
111 const int madbr; /* cost of multiply and add in DFmode. */
112 const int maebr; /* cost of multiply and add in SFmode. */
113 /* division */
114 const int dxbr;
115 const int ddbr;
116 const int debr;
117 const int dlgr;
118 const int dlr;
119 const int dr;
120 const int dsgfr;
121 const int dsgr;
122 };
123
124 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
125
126 static const
127 struct processor_costs z900_cost =
128 {
129 COSTS_N_INSNS (5), /* M */
130 COSTS_N_INSNS (10), /* MGHI */
131 COSTS_N_INSNS (5), /* MH */
132 COSTS_N_INSNS (4), /* MHI */
133 COSTS_N_INSNS (5), /* ML */
134 COSTS_N_INSNS (5), /* MR */
135 COSTS_N_INSNS (4), /* MS */
136 COSTS_N_INSNS (15), /* MSG */
137 COSTS_N_INSNS (7), /* MSGF */
138 COSTS_N_INSNS (7), /* MSGFR */
139 COSTS_N_INSNS (10), /* MSGR */
140 COSTS_N_INSNS (4), /* MSR */
141 COSTS_N_INSNS (7), /* multiplication in DFmode */
142 COSTS_N_INSNS (13), /* MXBR */
143 COSTS_N_INSNS (136), /* SQXBR */
144 COSTS_N_INSNS (44), /* SQDBR */
145 COSTS_N_INSNS (35), /* SQEBR */
146 COSTS_N_INSNS (18), /* MADBR */
147 COSTS_N_INSNS (13), /* MAEBR */
148 COSTS_N_INSNS (134), /* DXBR */
149 COSTS_N_INSNS (30), /* DDBR */
150 COSTS_N_INSNS (27), /* DEBR */
151 COSTS_N_INSNS (220), /* DLGR */
152 COSTS_N_INSNS (34), /* DLR */
153 COSTS_N_INSNS (34), /* DR */
154 COSTS_N_INSNS (32), /* DSGFR */
155 COSTS_N_INSNS (32), /* DSGR */
156 };
157
158 static const
159 struct processor_costs z990_cost =
160 {
161 COSTS_N_INSNS (4), /* M */
162 COSTS_N_INSNS (2), /* MGHI */
163 COSTS_N_INSNS (2), /* MH */
164 COSTS_N_INSNS (2), /* MHI */
165 COSTS_N_INSNS (4), /* ML */
166 COSTS_N_INSNS (4), /* MR */
167 COSTS_N_INSNS (5), /* MS */
168 COSTS_N_INSNS (6), /* MSG */
169 COSTS_N_INSNS (4), /* MSGF */
170 COSTS_N_INSNS (4), /* MSGFR */
171 COSTS_N_INSNS (4), /* MSGR */
172 COSTS_N_INSNS (4), /* MSR */
173 COSTS_N_INSNS (1), /* multiplication in DFmode */
174 COSTS_N_INSNS (28), /* MXBR */
175 COSTS_N_INSNS (130), /* SQXBR */
176 COSTS_N_INSNS (66), /* SQDBR */
177 COSTS_N_INSNS (38), /* SQEBR */
178 COSTS_N_INSNS (1), /* MADBR */
179 COSTS_N_INSNS (1), /* MAEBR */
180 COSTS_N_INSNS (60), /* DXBR */
181 COSTS_N_INSNS (40), /* DDBR */
182 COSTS_N_INSNS (26), /* DEBR */
183 COSTS_N_INSNS (176), /* DLGR */
184 COSTS_N_INSNS (31), /* DLR */
185 COSTS_N_INSNS (31), /* DR */
186 COSTS_N_INSNS (31), /* DSGFR */
187 COSTS_N_INSNS (31), /* DSGR */
188 };
189
190 static const
191 struct processor_costs z9_109_cost =
192 {
193 COSTS_N_INSNS (4), /* M */
194 COSTS_N_INSNS (2), /* MGHI */
195 COSTS_N_INSNS (2), /* MH */
196 COSTS_N_INSNS (2), /* MHI */
197 COSTS_N_INSNS (4), /* ML */
198 COSTS_N_INSNS (4), /* MR */
199 COSTS_N_INSNS (5), /* MS */
200 COSTS_N_INSNS (6), /* MSG */
201 COSTS_N_INSNS (4), /* MSGF */
202 COSTS_N_INSNS (4), /* MSGFR */
203 COSTS_N_INSNS (4), /* MSGR */
204 COSTS_N_INSNS (4), /* MSR */
205 COSTS_N_INSNS (1), /* multiplication in DFmode */
206 COSTS_N_INSNS (28), /* MXBR */
207 COSTS_N_INSNS (130), /* SQXBR */
208 COSTS_N_INSNS (66), /* SQDBR */
209 COSTS_N_INSNS (38), /* SQEBR */
210 COSTS_N_INSNS (1), /* MADBR */
211 COSTS_N_INSNS (1), /* MAEBR */
212 COSTS_N_INSNS (60), /* DXBR */
213 COSTS_N_INSNS (40), /* DDBR */
214 COSTS_N_INSNS (26), /* DEBR */
215 COSTS_N_INSNS (30), /* DLGR */
216 COSTS_N_INSNS (23), /* DLR */
217 COSTS_N_INSNS (23), /* DR */
218 COSTS_N_INSNS (24), /* DSGFR */
219 COSTS_N_INSNS (24), /* DSGR */
220 };
221
222 static const
223 struct processor_costs z10_cost =
224 {
225 COSTS_N_INSNS (10), /* M */
226 COSTS_N_INSNS (10), /* MGHI */
227 COSTS_N_INSNS (10), /* MH */
228 COSTS_N_INSNS (10), /* MHI */
229 COSTS_N_INSNS (10), /* ML */
230 COSTS_N_INSNS (10), /* MR */
231 COSTS_N_INSNS (10), /* MS */
232 COSTS_N_INSNS (10), /* MSG */
233 COSTS_N_INSNS (10), /* MSGF */
234 COSTS_N_INSNS (10), /* MSGFR */
235 COSTS_N_INSNS (10), /* MSGR */
236 COSTS_N_INSNS (10), /* MSR */
237 COSTS_N_INSNS (1) , /* multiplication in DFmode */
238 COSTS_N_INSNS (50), /* MXBR */
239 COSTS_N_INSNS (120), /* SQXBR */
240 COSTS_N_INSNS (52), /* SQDBR */
241 COSTS_N_INSNS (38), /* SQEBR */
242 COSTS_N_INSNS (1), /* MADBR */
243 COSTS_N_INSNS (1), /* MAEBR */
244 COSTS_N_INSNS (111), /* DXBR */
245 COSTS_N_INSNS (39), /* DDBR */
246 COSTS_N_INSNS (32), /* DEBR */
247 COSTS_N_INSNS (160), /* DLGR */
248 COSTS_N_INSNS (71), /* DLR */
249 COSTS_N_INSNS (71), /* DR */
250 COSTS_N_INSNS (71), /* DSGFR */
251 COSTS_N_INSNS (71), /* DSGR */
252 };
253
254 static const
255 struct processor_costs z196_cost =
256 {
257 COSTS_N_INSNS (7), /* M */
258 COSTS_N_INSNS (5), /* MGHI */
259 COSTS_N_INSNS (5), /* MH */
260 COSTS_N_INSNS (5), /* MHI */
261 COSTS_N_INSNS (7), /* ML */
262 COSTS_N_INSNS (7), /* MR */
263 COSTS_N_INSNS (6), /* MS */
264 COSTS_N_INSNS (8), /* MSG */
265 COSTS_N_INSNS (6), /* MSGF */
266 COSTS_N_INSNS (6), /* MSGFR */
267 COSTS_N_INSNS (8), /* MSGR */
268 COSTS_N_INSNS (6), /* MSR */
269 COSTS_N_INSNS (1) , /* multiplication in DFmode */
270 COSTS_N_INSNS (40), /* MXBR B+40 */
271 COSTS_N_INSNS (100), /* SQXBR B+100 */
272 COSTS_N_INSNS (42), /* SQDBR B+42 */
273 COSTS_N_INSNS (28), /* SQEBR B+28 */
274 COSTS_N_INSNS (1), /* MADBR B */
275 COSTS_N_INSNS (1), /* MAEBR B */
276 COSTS_N_INSNS (101), /* DXBR B+101 */
277 COSTS_N_INSNS (29), /* DDBR */
278 COSTS_N_INSNS (22), /* DEBR */
279 COSTS_N_INSNS (160), /* DLGR cracked */
280 COSTS_N_INSNS (160), /* DLR cracked */
281 COSTS_N_INSNS (160), /* DR expanded */
282 COSTS_N_INSNS (160), /* DSGFR cracked */
283 COSTS_N_INSNS (160), /* DSGR cracked */
284 };
285
286 static const
287 struct processor_costs zEC12_cost =
288 {
289 COSTS_N_INSNS (7), /* M */
290 COSTS_N_INSNS (5), /* MGHI */
291 COSTS_N_INSNS (5), /* MH */
292 COSTS_N_INSNS (5), /* MHI */
293 COSTS_N_INSNS (7), /* ML */
294 COSTS_N_INSNS (7), /* MR */
295 COSTS_N_INSNS (6), /* MS */
296 COSTS_N_INSNS (8), /* MSG */
297 COSTS_N_INSNS (6), /* MSGF */
298 COSTS_N_INSNS (6), /* MSGFR */
299 COSTS_N_INSNS (8), /* MSGR */
300 COSTS_N_INSNS (6), /* MSR */
301 COSTS_N_INSNS (1) , /* multiplication in DFmode */
302 COSTS_N_INSNS (40), /* MXBR B+40 */
303 COSTS_N_INSNS (100), /* SQXBR B+100 */
304 COSTS_N_INSNS (42), /* SQDBR B+42 */
305 COSTS_N_INSNS (28), /* SQEBR B+28 */
306 COSTS_N_INSNS (1), /* MADBR B */
307 COSTS_N_INSNS (1), /* MAEBR B */
308 COSTS_N_INSNS (131), /* DXBR B+131 */
309 COSTS_N_INSNS (29), /* DDBR */
310 COSTS_N_INSNS (22), /* DEBR */
311 COSTS_N_INSNS (160), /* DLGR cracked */
312 COSTS_N_INSNS (160), /* DLR cracked */
313 COSTS_N_INSNS (160), /* DR expanded */
314 COSTS_N_INSNS (160), /* DSGFR cracked */
315 COSTS_N_INSNS (160), /* DSGR cracked */
316 };
317
318 static struct
319 {
320 const char *const name;
321 const enum processor_type processor;
322 const struct processor_costs *cost;
323 }
324 const processor_table[] =
325 {
326 { "g5", PROCESSOR_9672_G5, &z900_cost },
327 { "g6", PROCESSOR_9672_G6, &z900_cost },
328 { "z900", PROCESSOR_2064_Z900, &z900_cost },
329 { "z990", PROCESSOR_2084_Z990, &z990_cost },
330 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
331 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
332 { "z10", PROCESSOR_2097_Z10, &z10_cost },
333 { "z196", PROCESSOR_2817_Z196, &z196_cost },
334 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
335 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
336 { "native", PROCESSOR_NATIVE, NULL }
337 };
338
339 extern int reload_completed;
340
341 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
342 static rtx_insn *last_scheduled_insn;
343 #define MAX_SCHED_UNITS 3
344 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
345
346 /* The maximum score added for an instruction whose unit hasn't been
347 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
348 give instruction mix scheduling more priority over instruction
349 grouping. */
350 #define MAX_SCHED_MIX_SCORE 8
351
352 /* The maximum distance up to which individual scores will be
353 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
354 Increase this with the OOO windows size of the machine. */
355 #define MAX_SCHED_MIX_DISTANCE 100
356
357 /* Structure used to hold the components of a S/390 memory
358 address. A legitimate address on S/390 is of the general
359 form
360 base + index + displacement
361 where any of the components is optional.
362
363 base and index are registers of the class ADDR_REGS,
364 displacement is an unsigned 12-bit immediate constant. */
365
366 struct s390_address
367 {
368 rtx base;
369 rtx indx;
370 rtx disp;
371 bool pointer;
372 bool literal_pool;
373 };
374
375 /* The following structure is embedded in the machine
376 specific part of struct function. */
377
378 struct GTY (()) s390_frame_layout
379 {
380 /* Offset within stack frame. */
381 HOST_WIDE_INT gprs_offset;
382 HOST_WIDE_INT f0_offset;
383 HOST_WIDE_INT f4_offset;
384 HOST_WIDE_INT f8_offset;
385 HOST_WIDE_INT backchain_offset;
386
387 /* Number of first and last gpr where slots in the register
388 save area are reserved for. */
389 int first_save_gpr_slot;
390 int last_save_gpr_slot;
391
392 /* Location (FP register number) where GPRs (r0-r15) should
393 be saved to.
394 0 - does not need to be saved at all
395 -1 - stack slot */
396 #define SAVE_SLOT_NONE 0
397 #define SAVE_SLOT_STACK -1
398 signed char gpr_save_slots[16];
399
400 /* Number of first and last gpr to be saved, restored. */
401 int first_save_gpr;
402 int first_restore_gpr;
403 int last_save_gpr;
404 int last_restore_gpr;
405
406 /* Bits standing for floating point registers. Set, if the
407 respective register has to be saved. Starting with reg 16 (f0)
408 at the rightmost bit.
409 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
410 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
411 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
412 unsigned int fpr_bitmap;
413
414 /* Number of floating point registers f8-f15 which must be saved. */
415 int high_fprs;
416
417 /* Set if return address needs to be saved.
418 This flag is set by s390_return_addr_rtx if it could not use
419 the initial value of r14 and therefore depends on r14 saved
420 to the stack. */
421 bool save_return_addr_p;
422
423 /* Size of stack frame. */
424 HOST_WIDE_INT frame_size;
425 };
426
427 /* Define the structure for the machine field in struct function. */
428
429 struct GTY(()) machine_function
430 {
431 struct s390_frame_layout frame_layout;
432
433 /* Literal pool base register. */
434 rtx base_reg;
435
436 /* True if we may need to perform branch splitting. */
437 bool split_branches_pending_p;
438
439 bool has_landing_pad_p;
440
441 /* True if the current function may contain a tbegin clobbering
442 FPRs. */
443 bool tbegin_p;
444
445 /* For -fsplit-stack support: A stack local which holds a pointer to
446 the stack arguments for a function with a variable number of
447 arguments. This is set at the start of the function and is used
448 to initialize the overflow_arg_area field of the va_list
449 structure. */
450 rtx split_stack_varargs_pointer;
451 };
452
453 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
454
455 #define cfun_frame_layout (cfun->machine->frame_layout)
456 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
457 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
458 ? cfun_frame_layout.fpr_bitmap & 0x0f \
459 : cfun_frame_layout.fpr_bitmap & 0x03))
460 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
461 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
462 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
463 (1 << (REGNO - FPR0_REGNUM)))
464 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
465 (1 << (REGNO - FPR0_REGNUM))))
466 #define cfun_gpr_save_slot(REGNO) \
467 cfun->machine->frame_layout.gpr_save_slots[REGNO]
468
469 /* Number of GPRs and FPRs used for argument passing. */
470 #define GP_ARG_NUM_REG 5
471 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
472 #define VEC_ARG_NUM_REG 8
473
474 /* A couple of shortcuts. */
475 #define CONST_OK_FOR_J(x) \
476 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
477 #define CONST_OK_FOR_K(x) \
478 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
479 #define CONST_OK_FOR_Os(x) \
480 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
481 #define CONST_OK_FOR_Op(x) \
482 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
483 #define CONST_OK_FOR_On(x) \
484 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
485
486 #define REGNO_PAIR_OK(REGNO, MODE) \
487 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
488
489 /* That's the read ahead of the dynamic branch prediction unit in
490 bytes on a z10 (or higher) CPU. */
491 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
492
493
494 /* Indicate which ABI has been used for passing vector args.
495 0 - no vector type arguments have been passed where the ABI is relevant
496 1 - the old ABI has been used
497 2 - a vector type argument has been passed either in a vector register
498 or on the stack by value */
499 static int s390_vector_abi = 0;
500
501 /* Set the vector ABI marker if TYPE is subject to the vector ABI
502 switch. The vector ABI affects only vector data types. There are
503 two aspects of the vector ABI relevant here:
504
505 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
506 ABI and natural alignment with the old.
507
508 2. vector <= 16 bytes are passed in VRs or by value on the stack
509 with the new ABI but by reference on the stack with the old.
510
511 If ARG_P is true TYPE is used for a function argument or return
512 value. The ABI marker then is set for all vector data types. If
513 ARG_P is false only type 1 vectors are being checked. */
514
515 static void
516 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
517 {
518 static hash_set<const_tree> visited_types_hash;
519
520 if (s390_vector_abi)
521 return;
522
523 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
524 return;
525
526 if (visited_types_hash.contains (type))
527 return;
528
529 visited_types_hash.add (type);
530
531 if (VECTOR_TYPE_P (type))
532 {
533 int type_size = int_size_in_bytes (type);
534
535 /* Outside arguments only the alignment is changing and this
536 only happens for vector types >= 16 bytes. */
537 if (!arg_p && type_size < 16)
538 return;
539
540 /* In arguments vector types > 16 are passed as before (GCC
541 never enforced the bigger alignment for arguments which was
542 required by the old vector ABI). However, it might still be
543 ABI relevant due to the changed alignment if it is a struct
544 member. */
545 if (arg_p && type_size > 16 && !in_struct_p)
546 return;
547
548 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
549 }
550 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
551 {
552 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
553 natural alignment there will never be ABI dependent padding
554 in an array type. That's why we do not set in_struct_p to
555 true here. */
556 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
557 }
558 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
559 {
560 tree arg_chain;
561
562 /* Check the return type. */
563 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
564
565 for (arg_chain = TYPE_ARG_TYPES (type);
566 arg_chain;
567 arg_chain = TREE_CHAIN (arg_chain))
568 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
569 }
570 else if (RECORD_OR_UNION_TYPE_P (type))
571 {
572 tree field;
573
574 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
575 {
576 if (TREE_CODE (field) != FIELD_DECL)
577 continue;
578
579 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
580 }
581 }
582 }
583
584
585 /* System z builtins. */
586
587 #include "s390-builtins.h"
588
589 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
590 {
591 #undef B_DEF
592 #undef OB_DEF
593 #undef OB_DEF_VAR
594 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
595 #define OB_DEF(...)
596 #define OB_DEF_VAR(...)
597 #include "s390-builtins.def"
598 0
599 };
600
601 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
602 {
603 #undef B_DEF
604 #undef OB_DEF
605 #undef OB_DEF_VAR
606 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
607 #define OB_DEF(...)
608 #define OB_DEF_VAR(...)
609 #include "s390-builtins.def"
610 0
611 };
612
613 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
614 {
615 #undef B_DEF
616 #undef OB_DEF
617 #undef OB_DEF_VAR
618 #define B_DEF(...)
619 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
620 #define OB_DEF_VAR(...)
621 #include "s390-builtins.def"
622 0
623 };
624
625 const unsigned int
626 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
627 {
628 #undef B_DEF
629 #undef OB_DEF
630 #undef OB_DEF_VAR
631 #define B_DEF(...)
632 #define OB_DEF(...)
633 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
634 #include "s390-builtins.def"
635 0
636 };
637
638 tree s390_builtin_types[BT_MAX];
639 tree s390_builtin_fn_types[BT_FN_MAX];
640 tree s390_builtin_decls[S390_BUILTIN_MAX +
641 S390_OVERLOADED_BUILTIN_MAX +
642 S390_OVERLOADED_BUILTIN_VAR_MAX];
643
644 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
645 #undef B_DEF
646 #undef OB_DEF
647 #undef OB_DEF_VAR
648 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
649 #define OB_DEF(...)
650 #define OB_DEF_VAR(...)
651
652 #include "s390-builtins.def"
653 CODE_FOR_nothing
654 };
655
656 static void
657 s390_init_builtins (void)
658 {
659 /* These definitions are being used in s390-builtins.def. */
660 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
661 NULL, NULL);
662 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
663 tree c_uint64_type_node;
664
665 /* The uint64_type_node from tree.c is not compatible to the C99
666 uint64_t data type. What we want is c_uint64_type_node from
667 c-common.c. But since backend code is not supposed to interface
668 with the frontend we recreate it here. */
669 if (TARGET_64BIT)
670 c_uint64_type_node = long_unsigned_type_node;
671 else
672 c_uint64_type_node = long_long_unsigned_type_node;
673
674 #undef DEF_TYPE
675 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
676 if (s390_builtin_types[INDEX] == NULL) \
677 s390_builtin_types[INDEX] = (!CONST_P) ? \
678 (NODE) : build_type_variant ((NODE), 1, 0);
679
680 #undef DEF_POINTER_TYPE
681 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
682 if (s390_builtin_types[INDEX] == NULL) \
683 s390_builtin_types[INDEX] = \
684 build_pointer_type (s390_builtin_types[INDEX_BASE]);
685
686 #undef DEF_DISTINCT_TYPE
687 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
688 if (s390_builtin_types[INDEX] == NULL) \
689 s390_builtin_types[INDEX] = \
690 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
691
692 #undef DEF_VECTOR_TYPE
693 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
694 if (s390_builtin_types[INDEX] == NULL) \
695 s390_builtin_types[INDEX] = \
696 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
697
698 #undef DEF_OPAQUE_VECTOR_TYPE
699 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
700 if (s390_builtin_types[INDEX] == NULL) \
701 s390_builtin_types[INDEX] = \
702 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
703
704 #undef DEF_FN_TYPE
705 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
706 if (s390_builtin_fn_types[INDEX] == NULL) \
707 s390_builtin_fn_types[INDEX] = \
708 build_function_type_list (args, NULL_TREE);
709 #undef DEF_OV_TYPE
710 #define DEF_OV_TYPE(...)
711 #include "s390-builtin-types.def"
712
713 #undef B_DEF
714 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
715 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
716 s390_builtin_decls[S390_BUILTIN_##NAME] = \
717 add_builtin_function ("__builtin_" #NAME, \
718 s390_builtin_fn_types[FNTYPE], \
719 S390_BUILTIN_##NAME, \
720 BUILT_IN_MD, \
721 NULL, \
722 ATTRS);
723 #undef OB_DEF
724 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
725 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
726 == NULL) \
727 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
728 add_builtin_function ("__builtin_" #NAME, \
729 s390_builtin_fn_types[FNTYPE], \
730 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
731 BUILT_IN_MD, \
732 NULL, \
733 0);
734 #undef OB_DEF_VAR
735 #define OB_DEF_VAR(...)
736 #include "s390-builtins.def"
737
738 }
739
740 /* Return true if ARG is appropriate as argument number ARGNUM of
741 builtin DECL. The operand flags from s390-builtins.def have to
742 passed as OP_FLAGS. */
743 bool
744 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
745 {
746 if (O_UIMM_P (op_flags))
747 {
748 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
749 int bitwidth = bitwidths[op_flags - O_U1];
750
751 if (!tree_fits_uhwi_p (arg)
752 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
753 {
754 error("constant argument %d for builtin %qF is out of range (0.."
755 HOST_WIDE_INT_PRINT_UNSIGNED ")",
756 argnum, decl,
757 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
758 return false;
759 }
760 }
761
762 if (O_SIMM_P (op_flags))
763 {
764 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
765 int bitwidth = bitwidths[op_flags - O_S2];
766
767 if (!tree_fits_shwi_p (arg)
768 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
769 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
770 {
771 error("constant argument %d for builtin %qF is out of range ("
772 HOST_WIDE_INT_PRINT_DEC ".."
773 HOST_WIDE_INT_PRINT_DEC ")",
774 argnum, decl,
775 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
776 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
777 return false;
778 }
779 }
780 return true;
781 }
782
783 /* Expand an expression EXP that calls a built-in function,
784 with result going to TARGET if that's convenient
785 (and in mode MODE if that's convenient).
786 SUBTARGET may be used as the target for computing one of EXP's operands.
787 IGNORE is nonzero if the value is to be ignored. */
788
789 static rtx
790 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
791 machine_mode mode ATTRIBUTE_UNUSED,
792 int ignore ATTRIBUTE_UNUSED)
793 {
794 #define MAX_ARGS 5
795
796 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
797 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
798 enum insn_code icode;
799 rtx op[MAX_ARGS], pat;
800 int arity;
801 bool nonvoid;
802 tree arg;
803 call_expr_arg_iterator iter;
804 unsigned int all_op_flags = opflags_for_builtin (fcode);
805 machine_mode last_vec_mode = VOIDmode;
806
807 if (TARGET_DEBUG_ARG)
808 {
809 fprintf (stderr,
810 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
811 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
812 bflags_for_builtin (fcode));
813 }
814
815 if (S390_USE_TARGET_ATTRIBUTE)
816 {
817 unsigned int bflags;
818
819 bflags = bflags_for_builtin (fcode);
820 if ((bflags & B_HTM) && !TARGET_HTM)
821 {
822 error ("Builtin %qF is not supported without -mhtm "
823 "(default with -march=zEC12 and higher).", fndecl);
824 return const0_rtx;
825 }
826 if ((bflags & B_VX) && !TARGET_VX)
827 {
828 error ("Builtin %qF is not supported without -mvx "
829 "(default with -march=z13 and higher).", fndecl);
830 return const0_rtx;
831 }
832 }
833 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
834 && fcode < S390_ALL_BUILTIN_MAX)
835 {
836 gcc_unreachable ();
837 }
838 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
839 {
840 icode = code_for_builtin[fcode];
841 /* Set a flag in the machine specific cfun part in order to support
842 saving/restoring of FPRs. */
843 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
844 cfun->machine->tbegin_p = true;
845 }
846 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
847 {
848 error ("Unresolved overloaded builtin");
849 return const0_rtx;
850 }
851 else
852 internal_error ("bad builtin fcode");
853
854 if (icode == 0)
855 internal_error ("bad builtin icode");
856
857 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
858
859 if (nonvoid)
860 {
861 machine_mode tmode = insn_data[icode].operand[0].mode;
862 if (!target
863 || GET_MODE (target) != tmode
864 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
865 target = gen_reg_rtx (tmode);
866
867 /* There are builtins (e.g. vec_promote) with no vector
868 arguments but an element selector. So we have to also look
869 at the vector return type when emitting the modulo
870 operation. */
871 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
872 last_vec_mode = insn_data[icode].operand[0].mode;
873 }
874
875 arity = 0;
876 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
877 {
878 const struct insn_operand_data *insn_op;
879 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
880
881 all_op_flags = all_op_flags >> O_SHIFT;
882
883 if (arg == error_mark_node)
884 return NULL_RTX;
885 if (arity >= MAX_ARGS)
886 return NULL_RTX;
887
888 if (O_IMM_P (op_flags)
889 && TREE_CODE (arg) != INTEGER_CST)
890 {
891 error ("constant value required for builtin %qF argument %d",
892 fndecl, arity + 1);
893 return const0_rtx;
894 }
895
896 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
897 return const0_rtx;
898
899 insn_op = &insn_data[icode].operand[arity + nonvoid];
900 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
901
902 /* expand_expr truncates constants to the target mode only if it
903 is "convenient". However, our checks below rely on this
904 being done. */
905 if (CONST_INT_P (op[arity])
906 && SCALAR_INT_MODE_P (insn_op->mode)
907 && GET_MODE (op[arity]) != insn_op->mode)
908 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
909 insn_op->mode));
910
911 /* Wrap the expanded RTX for pointer types into a MEM expr with
912 the proper mode. This allows us to use e.g. (match_operand
913 "memory_operand"..) in the insn patterns instead of (mem
914 (match_operand "address_operand)). This is helpful for
915 patterns not just accepting MEMs. */
916 if (POINTER_TYPE_P (TREE_TYPE (arg))
917 && insn_op->predicate != address_operand)
918 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
919
920 /* Expand the module operation required on element selectors. */
921 if (op_flags == O_ELEM)
922 {
923 gcc_assert (last_vec_mode != VOIDmode);
924 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
925 op[arity],
926 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
927 NULL_RTX, 1, OPTAB_DIRECT);
928 }
929
930 /* Record the vector mode used for an element selector. This assumes:
931 1. There is no builtin with two different vector modes and an element selector
932 2. The element selector comes after the vector type it is referring to.
933 This currently the true for all the builtins but FIXME we
934 should better check for that. */
935 if (VECTOR_MODE_P (insn_op->mode))
936 last_vec_mode = insn_op->mode;
937
938 if (insn_op->predicate (op[arity], insn_op->mode))
939 {
940 arity++;
941 continue;
942 }
943
944 if (MEM_P (op[arity])
945 && insn_op->predicate == memory_operand
946 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
947 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
948 {
949 op[arity] = replace_equiv_address (op[arity],
950 copy_to_mode_reg (Pmode,
951 XEXP (op[arity], 0)));
952 }
953 else if (GET_MODE (op[arity]) == insn_op->mode
954 || GET_MODE (op[arity]) == VOIDmode
955 || (insn_op->predicate == address_operand
956 && GET_MODE (op[arity]) == Pmode))
957 {
958 /* An address_operand usually has VOIDmode in the expander
959 so we cannot use this. */
960 machine_mode target_mode =
961 (insn_op->predicate == address_operand
962 ? Pmode : insn_op->mode);
963 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
964 }
965
966 if (!insn_op->predicate (op[arity], insn_op->mode))
967 {
968 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
969 return const0_rtx;
970 }
971 arity++;
972 }
973
974 switch (arity)
975 {
976 case 0:
977 pat = GEN_FCN (icode) (target);
978 break;
979 case 1:
980 if (nonvoid)
981 pat = GEN_FCN (icode) (target, op[0]);
982 else
983 pat = GEN_FCN (icode) (op[0]);
984 break;
985 case 2:
986 if (nonvoid)
987 pat = GEN_FCN (icode) (target, op[0], op[1]);
988 else
989 pat = GEN_FCN (icode) (op[0], op[1]);
990 break;
991 case 3:
992 if (nonvoid)
993 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
994 else
995 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
996 break;
997 case 4:
998 if (nonvoid)
999 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1000 else
1001 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1002 break;
1003 case 5:
1004 if (nonvoid)
1005 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1006 else
1007 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1008 break;
1009 case 6:
1010 if (nonvoid)
1011 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1012 else
1013 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1014 break;
1015 default:
1016 gcc_unreachable ();
1017 }
1018 if (!pat)
1019 return NULL_RTX;
1020 emit_insn (pat);
1021
1022 if (nonvoid)
1023 return target;
1024 else
1025 return const0_rtx;
1026 }
1027
1028
1029 static const int s390_hotpatch_hw_max = 1000000;
1030 static int s390_hotpatch_hw_before_label = 0;
1031 static int s390_hotpatch_hw_after_label = 0;
1032
1033 /* Check whether the hotpatch attribute is applied to a function and, if it has
1034 an argument, the argument is valid. */
1035
1036 static tree
1037 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1038 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1039 {
1040 tree expr;
1041 tree expr2;
1042 int err;
1043
1044 if (TREE_CODE (*node) != FUNCTION_DECL)
1045 {
1046 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1047 name);
1048 *no_add_attrs = true;
1049 }
1050 if (args != NULL && TREE_CHAIN (args) != NULL)
1051 {
1052 expr = TREE_VALUE (args);
1053 expr2 = TREE_VALUE (TREE_CHAIN (args));
1054 }
1055 if (args == NULL || TREE_CHAIN (args) == NULL)
1056 err = 1;
1057 else if (TREE_CODE (expr) != INTEGER_CST
1058 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1059 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1060 err = 1;
1061 else if (TREE_CODE (expr2) != INTEGER_CST
1062 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1063 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1064 err = 1;
1065 else
1066 err = 0;
1067 if (err)
1068 {
1069 error ("requested %qE attribute is not a comma separated pair of"
1070 " non-negative integer constants or too large (max. %d)", name,
1071 s390_hotpatch_hw_max);
1072 *no_add_attrs = true;
1073 }
1074
1075 return NULL_TREE;
1076 }
1077
1078 /* Expand the s390_vector_bool type attribute. */
1079
1080 static tree
1081 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1082 tree args ATTRIBUTE_UNUSED,
1083 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1084 {
1085 tree type = *node, result = NULL_TREE;
1086 machine_mode mode;
1087
1088 while (POINTER_TYPE_P (type)
1089 || TREE_CODE (type) == FUNCTION_TYPE
1090 || TREE_CODE (type) == METHOD_TYPE
1091 || TREE_CODE (type) == ARRAY_TYPE)
1092 type = TREE_TYPE (type);
1093
1094 mode = TYPE_MODE (type);
1095 switch (mode)
1096 {
1097 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1098 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1099 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1100 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1101 default: break;
1102 }
1103
1104 *no_add_attrs = true; /* No need to hang on to the attribute. */
1105
1106 if (result)
1107 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1108
1109 return NULL_TREE;
1110 }
1111
1112 static const struct attribute_spec s390_attribute_table[] = {
1113 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1114 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1115 /* End element. */
1116 { NULL, 0, 0, false, false, false, NULL, false }
1117 };
1118
1119 /* Return the alignment for LABEL. We default to the -falign-labels
1120 value except for the literal pool base label. */
1121 int
1122 s390_label_align (rtx label)
1123 {
1124 rtx_insn *prev_insn = prev_active_insn (label);
1125 rtx set, src;
1126
1127 if (prev_insn == NULL_RTX)
1128 goto old;
1129
1130 set = single_set (prev_insn);
1131
1132 if (set == NULL_RTX)
1133 goto old;
1134
1135 src = SET_SRC (set);
1136
1137 /* Don't align literal pool base labels. */
1138 if (GET_CODE (src) == UNSPEC
1139 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1140 return 0;
1141
1142 old:
1143 return align_labels_log;
1144 }
1145
1146 static machine_mode
1147 s390_libgcc_cmp_return_mode (void)
1148 {
1149 return TARGET_64BIT ? DImode : SImode;
1150 }
1151
1152 static machine_mode
1153 s390_libgcc_shift_count_mode (void)
1154 {
1155 return TARGET_64BIT ? DImode : SImode;
1156 }
1157
1158 static machine_mode
1159 s390_unwind_word_mode (void)
1160 {
1161 return TARGET_64BIT ? DImode : SImode;
1162 }
1163
1164 /* Return true if the back end supports mode MODE. */
1165 static bool
1166 s390_scalar_mode_supported_p (machine_mode mode)
1167 {
1168 /* In contrast to the default implementation reject TImode constants on 31bit
1169 TARGET_ZARCH for ABI compliance. */
1170 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1171 return false;
1172
1173 if (DECIMAL_FLOAT_MODE_P (mode))
1174 return default_decimal_float_supported_p ();
1175
1176 return default_scalar_mode_supported_p (mode);
1177 }
1178
1179 /* Return true if the back end supports vector mode MODE. */
1180 static bool
1181 s390_vector_mode_supported_p (machine_mode mode)
1182 {
1183 machine_mode inner;
1184
1185 if (!VECTOR_MODE_P (mode)
1186 || !TARGET_VX
1187 || GET_MODE_SIZE (mode) > 16)
1188 return false;
1189
1190 inner = GET_MODE_INNER (mode);
1191
1192 switch (inner)
1193 {
1194 case QImode:
1195 case HImode:
1196 case SImode:
1197 case DImode:
1198 case TImode:
1199 case SFmode:
1200 case DFmode:
1201 case TFmode:
1202 return true;
1203 default:
1204 return false;
1205 }
1206 }
1207
1208 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1209
1210 void
1211 s390_set_has_landing_pad_p (bool value)
1212 {
1213 cfun->machine->has_landing_pad_p = value;
1214 }
1215
1216 /* If two condition code modes are compatible, return a condition code
1217 mode which is compatible with both. Otherwise, return
1218 VOIDmode. */
1219
1220 static machine_mode
1221 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1222 {
1223 if (m1 == m2)
1224 return m1;
1225
1226 switch (m1)
1227 {
1228 case CCZmode:
1229 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1230 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1231 return m2;
1232 return VOIDmode;
1233
1234 case CCSmode:
1235 case CCUmode:
1236 case CCTmode:
1237 case CCSRmode:
1238 case CCURmode:
1239 case CCZ1mode:
1240 if (m2 == CCZmode)
1241 return m1;
1242
1243 return VOIDmode;
1244
1245 default:
1246 return VOIDmode;
1247 }
1248 return VOIDmode;
1249 }
1250
1251 /* Return true if SET either doesn't set the CC register, or else
1252 the source and destination have matching CC modes and that
1253 CC mode is at least as constrained as REQ_MODE. */
1254
1255 static bool
1256 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1257 {
1258 machine_mode set_mode;
1259
1260 gcc_assert (GET_CODE (set) == SET);
1261
1262 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1263 return 1;
1264
1265 set_mode = GET_MODE (SET_DEST (set));
1266 switch (set_mode)
1267 {
1268 case CCSmode:
1269 case CCSRmode:
1270 case CCUmode:
1271 case CCURmode:
1272 case CCLmode:
1273 case CCL1mode:
1274 case CCL2mode:
1275 case CCL3mode:
1276 case CCT1mode:
1277 case CCT2mode:
1278 case CCT3mode:
1279 case CCVEQmode:
1280 case CCVHmode:
1281 case CCVHUmode:
1282 case CCVFHmode:
1283 case CCVFHEmode:
1284 if (req_mode != set_mode)
1285 return 0;
1286 break;
1287
1288 case CCZmode:
1289 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1290 && req_mode != CCSRmode && req_mode != CCURmode)
1291 return 0;
1292 break;
1293
1294 case CCAPmode:
1295 case CCANmode:
1296 if (req_mode != CCAmode)
1297 return 0;
1298 break;
1299
1300 default:
1301 gcc_unreachable ();
1302 }
1303
1304 return (GET_MODE (SET_SRC (set)) == set_mode);
1305 }
1306
1307 /* Return true if every SET in INSN that sets the CC register
1308 has source and destination with matching CC modes and that
1309 CC mode is at least as constrained as REQ_MODE.
1310 If REQ_MODE is VOIDmode, always return false. */
1311
1312 bool
1313 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1314 {
1315 int i;
1316
1317 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1318 if (req_mode == VOIDmode)
1319 return false;
1320
1321 if (GET_CODE (PATTERN (insn)) == SET)
1322 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1323
1324 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1325 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1326 {
1327 rtx set = XVECEXP (PATTERN (insn), 0, i);
1328 if (GET_CODE (set) == SET)
1329 if (!s390_match_ccmode_set (set, req_mode))
1330 return false;
1331 }
1332
1333 return true;
1334 }
1335
1336 /* If a test-under-mask instruction can be used to implement
1337 (compare (and ... OP1) OP2), return the CC mode required
1338 to do that. Otherwise, return VOIDmode.
1339 MIXED is true if the instruction can distinguish between
1340 CC1 and CC2 for mixed selected bits (TMxx), it is false
1341 if the instruction cannot (TM). */
1342
1343 machine_mode
1344 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1345 {
1346 int bit0, bit1;
1347
1348 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1349 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1350 return VOIDmode;
1351
1352 /* Selected bits all zero: CC0.
1353 e.g.: int a; if ((a & (16 + 128)) == 0) */
1354 if (INTVAL (op2) == 0)
1355 return CCTmode;
1356
1357 /* Selected bits all one: CC3.
1358 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1359 if (INTVAL (op2) == INTVAL (op1))
1360 return CCT3mode;
1361
1362 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1363 int a;
1364 if ((a & (16 + 128)) == 16) -> CCT1
1365 if ((a & (16 + 128)) == 128) -> CCT2 */
1366 if (mixed)
1367 {
1368 bit1 = exact_log2 (INTVAL (op2));
1369 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1370 if (bit0 != -1 && bit1 != -1)
1371 return bit0 > bit1 ? CCT1mode : CCT2mode;
1372 }
1373
1374 return VOIDmode;
1375 }
1376
1377 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1378 OP0 and OP1 of a COMPARE, return the mode to be used for the
1379 comparison. */
1380
1381 machine_mode
1382 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1383 {
1384 if (TARGET_VX
1385 && register_operand (op0, DFmode)
1386 && register_operand (op1, DFmode))
1387 {
1388 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1389 s390_emit_compare or s390_canonicalize_comparison will take
1390 care of it. */
1391 switch (code)
1392 {
1393 case EQ:
1394 case NE:
1395 return CCVEQmode;
1396 case GT:
1397 case UNLE:
1398 return CCVFHmode;
1399 case GE:
1400 case UNLT:
1401 return CCVFHEmode;
1402 default:
1403 ;
1404 }
1405 }
1406
1407 switch (code)
1408 {
1409 case EQ:
1410 case NE:
1411 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1412 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1413 return CCAPmode;
1414 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1415 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1416 return CCAPmode;
1417 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1418 || GET_CODE (op1) == NEG)
1419 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1420 return CCLmode;
1421
1422 if (GET_CODE (op0) == AND)
1423 {
1424 /* Check whether we can potentially do it via TM. */
1425 machine_mode ccmode;
1426 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1427 if (ccmode != VOIDmode)
1428 {
1429 /* Relax CCTmode to CCZmode to allow fall-back to AND
1430 if that turns out to be beneficial. */
1431 return ccmode == CCTmode ? CCZmode : ccmode;
1432 }
1433 }
1434
1435 if (register_operand (op0, HImode)
1436 && GET_CODE (op1) == CONST_INT
1437 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1438 return CCT3mode;
1439 if (register_operand (op0, QImode)
1440 && GET_CODE (op1) == CONST_INT
1441 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1442 return CCT3mode;
1443
1444 return CCZmode;
1445
1446 case LE:
1447 case LT:
1448 case GE:
1449 case GT:
1450 /* The only overflow condition of NEG and ABS happens when
1451 -INT_MAX is used as parameter, which stays negative. So
1452 we have an overflow from a positive value to a negative.
1453 Using CCAP mode the resulting cc can be used for comparisons. */
1454 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1455 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1456 return CCAPmode;
1457
1458 /* If constants are involved in an add instruction it is possible to use
1459 the resulting cc for comparisons with zero. Knowing the sign of the
1460 constant the overflow behavior gets predictable. e.g.:
1461 int a, b; if ((b = a + c) > 0)
1462 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1463 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1464 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1465 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1466 /* Avoid INT32_MIN on 32 bit. */
1467 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1468 {
1469 if (INTVAL (XEXP((op0), 1)) < 0)
1470 return CCANmode;
1471 else
1472 return CCAPmode;
1473 }
1474 /* Fall through. */
1475 case UNORDERED:
1476 case ORDERED:
1477 case UNEQ:
1478 case UNLE:
1479 case UNLT:
1480 case UNGE:
1481 case UNGT:
1482 case LTGT:
1483 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1484 && GET_CODE (op1) != CONST_INT)
1485 return CCSRmode;
1486 return CCSmode;
1487
1488 case LTU:
1489 case GEU:
1490 if (GET_CODE (op0) == PLUS
1491 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1492 return CCL1mode;
1493
1494 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1495 && GET_CODE (op1) != CONST_INT)
1496 return CCURmode;
1497 return CCUmode;
1498
1499 case LEU:
1500 case GTU:
1501 if (GET_CODE (op0) == MINUS
1502 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1503 return CCL2mode;
1504
1505 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1506 && GET_CODE (op1) != CONST_INT)
1507 return CCURmode;
1508 return CCUmode;
1509
1510 default:
1511 gcc_unreachable ();
1512 }
1513 }
1514
1515 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1516 that we can implement more efficiently. */
1517
1518 static void
1519 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1520 bool op0_preserve_value)
1521 {
1522 if (op0_preserve_value)
1523 return;
1524
1525 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1526 if ((*code == EQ || *code == NE)
1527 && *op1 == const0_rtx
1528 && GET_CODE (*op0) == ZERO_EXTRACT
1529 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1530 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1531 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1532 {
1533 rtx inner = XEXP (*op0, 0);
1534 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1535 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1536 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1537
1538 if (len > 0 && len < modesize
1539 && pos >= 0 && pos + len <= modesize
1540 && modesize <= HOST_BITS_PER_WIDE_INT)
1541 {
1542 unsigned HOST_WIDE_INT block;
1543 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1544 block <<= modesize - pos - len;
1545
1546 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1547 gen_int_mode (block, GET_MODE (inner)));
1548 }
1549 }
1550
1551 /* Narrow AND of memory against immediate to enable TM. */
1552 if ((*code == EQ || *code == NE)
1553 && *op1 == const0_rtx
1554 && GET_CODE (*op0) == AND
1555 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1556 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1557 {
1558 rtx inner = XEXP (*op0, 0);
1559 rtx mask = XEXP (*op0, 1);
1560
1561 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1562 if (GET_CODE (inner) == SUBREG
1563 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1564 && (GET_MODE_SIZE (GET_MODE (inner))
1565 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1566 && ((INTVAL (mask)
1567 & GET_MODE_MASK (GET_MODE (inner))
1568 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1569 == 0))
1570 inner = SUBREG_REG (inner);
1571
1572 /* Do not change volatile MEMs. */
1573 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1574 {
1575 int part = s390_single_part (XEXP (*op0, 1),
1576 GET_MODE (inner), QImode, 0);
1577 if (part >= 0)
1578 {
1579 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1580 inner = adjust_address_nv (inner, QImode, part);
1581 *op0 = gen_rtx_AND (QImode, inner, mask);
1582 }
1583 }
1584 }
1585
1586 /* Narrow comparisons against 0xffff to HImode if possible. */
1587 if ((*code == EQ || *code == NE)
1588 && GET_CODE (*op1) == CONST_INT
1589 && INTVAL (*op1) == 0xffff
1590 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1591 && (nonzero_bits (*op0, GET_MODE (*op0))
1592 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1593 {
1594 *op0 = gen_lowpart (HImode, *op0);
1595 *op1 = constm1_rtx;
1596 }
1597
1598 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1599 if (GET_CODE (*op0) == UNSPEC
1600 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1601 && XVECLEN (*op0, 0) == 1
1602 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1603 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1604 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1605 && *op1 == const0_rtx)
1606 {
1607 enum rtx_code new_code = UNKNOWN;
1608 switch (*code)
1609 {
1610 case EQ: new_code = EQ; break;
1611 case NE: new_code = NE; break;
1612 case LT: new_code = GTU; break;
1613 case GT: new_code = LTU; break;
1614 case LE: new_code = GEU; break;
1615 case GE: new_code = LEU; break;
1616 default: break;
1617 }
1618
1619 if (new_code != UNKNOWN)
1620 {
1621 *op0 = XVECEXP (*op0, 0, 0);
1622 *code = new_code;
1623 }
1624 }
1625
1626 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1627 if (GET_CODE (*op0) == UNSPEC
1628 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1629 && XVECLEN (*op0, 0) == 1
1630 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1631 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1632 && CONST_INT_P (*op1))
1633 {
1634 enum rtx_code new_code = UNKNOWN;
1635 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1636 {
1637 case CCZmode:
1638 case CCRAWmode:
1639 switch (*code)
1640 {
1641 case EQ: new_code = EQ; break;
1642 case NE: new_code = NE; break;
1643 default: break;
1644 }
1645 break;
1646 default: break;
1647 }
1648
1649 if (new_code != UNKNOWN)
1650 {
1651 /* For CCRAWmode put the required cc mask into the second
1652 operand. */
1653 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1654 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1655 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1656 *op0 = XVECEXP (*op0, 0, 0);
1657 *code = new_code;
1658 }
1659 }
1660
1661 /* Simplify cascaded EQ, NE with const0_rtx. */
1662 if ((*code == NE || *code == EQ)
1663 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1664 && GET_MODE (*op0) == SImode
1665 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1666 && REG_P (XEXP (*op0, 0))
1667 && XEXP (*op0, 1) == const0_rtx
1668 && *op1 == const0_rtx)
1669 {
1670 if ((*code == EQ && GET_CODE (*op0) == NE)
1671 || (*code == NE && GET_CODE (*op0) == EQ))
1672 *code = EQ;
1673 else
1674 *code = NE;
1675 *op0 = XEXP (*op0, 0);
1676 }
1677
1678 /* Prefer register over memory as first operand. */
1679 if (MEM_P (*op0) && REG_P (*op1))
1680 {
1681 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1682 *code = (int)swap_condition ((enum rtx_code)*code);
1683 }
1684
1685 /* Using the scalar variants of vector instructions for 64 bit FP
1686 comparisons might require swapping the operands. */
1687 if (TARGET_VX
1688 && register_operand (*op0, DFmode)
1689 && register_operand (*op1, DFmode)
1690 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1691 {
1692 rtx tmp;
1693
1694 switch (*code)
1695 {
1696 case LT: *code = GT; break;
1697 case LE: *code = GE; break;
1698 case UNGT: *code = UNLE; break;
1699 case UNGE: *code = UNLT; break;
1700 default: ;
1701 }
1702 tmp = *op0; *op0 = *op1; *op1 = tmp;
1703 }
1704 }
1705
1706 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1707 FP compare using the single element variant of vector instructions.
1708 Replace CODE with the comparison code to be used in the CC reg
1709 compare and return the condition code register RTX in CC. */
1710
1711 static bool
1712 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1713 rtx *cc)
1714 {
1715 machine_mode cmp_mode;
1716 bool swap_p = false;
1717
1718 switch (*code)
1719 {
1720 case EQ: cmp_mode = CCVEQmode; break;
1721 case NE: cmp_mode = CCVEQmode; break;
1722 case GT: cmp_mode = CCVFHmode; break;
1723 case GE: cmp_mode = CCVFHEmode; break;
1724 case UNLE: cmp_mode = CCVFHmode; break;
1725 case UNLT: cmp_mode = CCVFHEmode; break;
1726 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1727 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1728 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1729 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1730 default: return false;
1731 }
1732
1733 if (swap_p)
1734 {
1735 rtx tmp = cmp2;
1736 cmp2 = cmp1;
1737 cmp1 = tmp;
1738 }
1739 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1740 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1741 gen_rtvec (2,
1742 gen_rtx_SET (*cc,
1743 gen_rtx_COMPARE (cmp_mode, cmp1,
1744 cmp2)),
1745 gen_rtx_CLOBBER (VOIDmode,
1746 gen_rtx_SCRATCH (V2DImode)))));
1747 return true;
1748 }
1749
1750
1751 /* Emit a compare instruction suitable to implement the comparison
1752 OP0 CODE OP1. Return the correct condition RTL to be placed in
1753 the IF_THEN_ELSE of the conditional branch testing the result. */
1754
1755 rtx
1756 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1757 {
1758 machine_mode mode = s390_select_ccmode (code, op0, op1);
1759 rtx cc;
1760
1761 if (TARGET_VX
1762 && register_operand (op0, DFmode)
1763 && register_operand (op1, DFmode)
1764 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1765 {
1766 /* Work has been done by s390_expand_vec_compare_scalar already. */
1767 }
1768 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1769 {
1770 /* Do not output a redundant compare instruction if a
1771 compare_and_swap pattern already computed the result and the
1772 machine modes are compatible. */
1773 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1774 == GET_MODE (op0));
1775 cc = op0;
1776 }
1777 else
1778 {
1779 cc = gen_rtx_REG (mode, CC_REGNUM);
1780 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1781 }
1782
1783 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1784 }
1785
1786 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1787 matches CMP.
1788 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1789 conditional branch testing the result. */
1790
1791 static rtx
1792 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1793 rtx cmp, rtx new_rtx)
1794 {
1795 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1796 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1797 const0_rtx);
1798 }
1799
1800 /* Emit a jump instruction to TARGET and return it. If COND is
1801 NULL_RTX, emit an unconditional jump, else a conditional jump under
1802 condition COND. */
1803
1804 rtx_insn *
1805 s390_emit_jump (rtx target, rtx cond)
1806 {
1807 rtx insn;
1808
1809 target = gen_rtx_LABEL_REF (VOIDmode, target);
1810 if (cond)
1811 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1812
1813 insn = gen_rtx_SET (pc_rtx, target);
1814 return emit_jump_insn (insn);
1815 }
1816
1817 /* Return branch condition mask to implement a branch
1818 specified by CODE. Return -1 for invalid comparisons. */
1819
1820 int
1821 s390_branch_condition_mask (rtx code)
1822 {
1823 const int CC0 = 1 << 3;
1824 const int CC1 = 1 << 2;
1825 const int CC2 = 1 << 1;
1826 const int CC3 = 1 << 0;
1827
1828 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1829 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1830 gcc_assert (XEXP (code, 1) == const0_rtx
1831 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1832 && CONST_INT_P (XEXP (code, 1))));
1833
1834
1835 switch (GET_MODE (XEXP (code, 0)))
1836 {
1837 case CCZmode:
1838 case CCZ1mode:
1839 switch (GET_CODE (code))
1840 {
1841 case EQ: return CC0;
1842 case NE: return CC1 | CC2 | CC3;
1843 default: return -1;
1844 }
1845 break;
1846
1847 case CCT1mode:
1848 switch (GET_CODE (code))
1849 {
1850 case EQ: return CC1;
1851 case NE: return CC0 | CC2 | CC3;
1852 default: return -1;
1853 }
1854 break;
1855
1856 case CCT2mode:
1857 switch (GET_CODE (code))
1858 {
1859 case EQ: return CC2;
1860 case NE: return CC0 | CC1 | CC3;
1861 default: return -1;
1862 }
1863 break;
1864
1865 case CCT3mode:
1866 switch (GET_CODE (code))
1867 {
1868 case EQ: return CC3;
1869 case NE: return CC0 | CC1 | CC2;
1870 default: return -1;
1871 }
1872 break;
1873
1874 case CCLmode:
1875 switch (GET_CODE (code))
1876 {
1877 case EQ: return CC0 | CC2;
1878 case NE: return CC1 | CC3;
1879 default: return -1;
1880 }
1881 break;
1882
1883 case CCL1mode:
1884 switch (GET_CODE (code))
1885 {
1886 case LTU: return CC2 | CC3; /* carry */
1887 case GEU: return CC0 | CC1; /* no carry */
1888 default: return -1;
1889 }
1890 break;
1891
1892 case CCL2mode:
1893 switch (GET_CODE (code))
1894 {
1895 case GTU: return CC0 | CC1; /* borrow */
1896 case LEU: return CC2 | CC3; /* no borrow */
1897 default: return -1;
1898 }
1899 break;
1900
1901 case CCL3mode:
1902 switch (GET_CODE (code))
1903 {
1904 case EQ: return CC0 | CC2;
1905 case NE: return CC1 | CC3;
1906 case LTU: return CC1;
1907 case GTU: return CC3;
1908 case LEU: return CC1 | CC2;
1909 case GEU: return CC2 | CC3;
1910 default: return -1;
1911 }
1912
1913 case CCUmode:
1914 switch (GET_CODE (code))
1915 {
1916 case EQ: return CC0;
1917 case NE: return CC1 | CC2 | CC3;
1918 case LTU: return CC1;
1919 case GTU: return CC2;
1920 case LEU: return CC0 | CC1;
1921 case GEU: return CC0 | CC2;
1922 default: return -1;
1923 }
1924 break;
1925
1926 case CCURmode:
1927 switch (GET_CODE (code))
1928 {
1929 case EQ: return CC0;
1930 case NE: return CC2 | CC1 | CC3;
1931 case LTU: return CC2;
1932 case GTU: return CC1;
1933 case LEU: return CC0 | CC2;
1934 case GEU: return CC0 | CC1;
1935 default: return -1;
1936 }
1937 break;
1938
1939 case CCAPmode:
1940 switch (GET_CODE (code))
1941 {
1942 case EQ: return CC0;
1943 case NE: return CC1 | CC2 | CC3;
1944 case LT: return CC1 | CC3;
1945 case GT: return CC2;
1946 case LE: return CC0 | CC1 | CC3;
1947 case GE: return CC0 | CC2;
1948 default: return -1;
1949 }
1950 break;
1951
1952 case CCANmode:
1953 switch (GET_CODE (code))
1954 {
1955 case EQ: return CC0;
1956 case NE: return CC1 | CC2 | CC3;
1957 case LT: return CC1;
1958 case GT: return CC2 | CC3;
1959 case LE: return CC0 | CC1;
1960 case GE: return CC0 | CC2 | CC3;
1961 default: return -1;
1962 }
1963 break;
1964
1965 case CCSmode:
1966 switch (GET_CODE (code))
1967 {
1968 case EQ: return CC0;
1969 case NE: return CC1 | CC2 | CC3;
1970 case LT: return CC1;
1971 case GT: return CC2;
1972 case LE: return CC0 | CC1;
1973 case GE: return CC0 | CC2;
1974 case UNORDERED: return CC3;
1975 case ORDERED: return CC0 | CC1 | CC2;
1976 case UNEQ: return CC0 | CC3;
1977 case UNLT: return CC1 | CC3;
1978 case UNGT: return CC2 | CC3;
1979 case UNLE: return CC0 | CC1 | CC3;
1980 case UNGE: return CC0 | CC2 | CC3;
1981 case LTGT: return CC1 | CC2;
1982 default: return -1;
1983 }
1984 break;
1985
1986 case CCSRmode:
1987 switch (GET_CODE (code))
1988 {
1989 case EQ: return CC0;
1990 case NE: return CC2 | CC1 | CC3;
1991 case LT: return CC2;
1992 case GT: return CC1;
1993 case LE: return CC0 | CC2;
1994 case GE: return CC0 | CC1;
1995 case UNORDERED: return CC3;
1996 case ORDERED: return CC0 | CC2 | CC1;
1997 case UNEQ: return CC0 | CC3;
1998 case UNLT: return CC2 | CC3;
1999 case UNGT: return CC1 | CC3;
2000 case UNLE: return CC0 | CC2 | CC3;
2001 case UNGE: return CC0 | CC1 | CC3;
2002 case LTGT: return CC2 | CC1;
2003 default: return -1;
2004 }
2005 break;
2006
2007 /* Vector comparison modes. */
2008
2009 case CCVEQmode:
2010 switch (GET_CODE (code))
2011 {
2012 case EQ: return CC0;
2013 case NE: return CC3;
2014 default: return -1;
2015 }
2016
2017 case CCVEQANYmode:
2018 switch (GET_CODE (code))
2019 {
2020 case EQ: return CC0 | CC1;
2021 case NE: return CC3 | CC1;
2022 default: return -1;
2023 }
2024
2025 /* Integer vector compare modes. */
2026
2027 case CCVHmode:
2028 switch (GET_CODE (code))
2029 {
2030 case GT: return CC0;
2031 case LE: return CC3;
2032 default: return -1;
2033 }
2034
2035 case CCVHANYmode:
2036 switch (GET_CODE (code))
2037 {
2038 case GT: return CC0 | CC1;
2039 case LE: return CC3 | CC1;
2040 default: return -1;
2041 }
2042
2043 case CCVHUmode:
2044 switch (GET_CODE (code))
2045 {
2046 case GTU: return CC0;
2047 case LEU: return CC3;
2048 default: return -1;
2049 }
2050
2051 case CCVHUANYmode:
2052 switch (GET_CODE (code))
2053 {
2054 case GTU: return CC0 | CC1;
2055 case LEU: return CC3 | CC1;
2056 default: return -1;
2057 }
2058
2059 /* FP vector compare modes. */
2060
2061 case CCVFHmode:
2062 switch (GET_CODE (code))
2063 {
2064 case GT: return CC0;
2065 case UNLE: return CC3;
2066 default: return -1;
2067 }
2068
2069 case CCVFHANYmode:
2070 switch (GET_CODE (code))
2071 {
2072 case GT: return CC0 | CC1;
2073 case UNLE: return CC3 | CC1;
2074 default: return -1;
2075 }
2076
2077 case CCVFHEmode:
2078 switch (GET_CODE (code))
2079 {
2080 case GE: return CC0;
2081 case UNLT: return CC3;
2082 default: return -1;
2083 }
2084
2085 case CCVFHEANYmode:
2086 switch (GET_CODE (code))
2087 {
2088 case GE: return CC0 | CC1;
2089 case UNLT: return CC3 | CC1;
2090 default: return -1;
2091 }
2092
2093
2094 case CCRAWmode:
2095 switch (GET_CODE (code))
2096 {
2097 case EQ:
2098 return INTVAL (XEXP (code, 1));
2099 case NE:
2100 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2101 default:
2102 gcc_unreachable ();
2103 }
2104
2105 default:
2106 return -1;
2107 }
2108 }
2109
2110
2111 /* Return branch condition mask to implement a compare and branch
2112 specified by CODE. Return -1 for invalid comparisons. */
2113
2114 int
2115 s390_compare_and_branch_condition_mask (rtx code)
2116 {
2117 const int CC0 = 1 << 3;
2118 const int CC1 = 1 << 2;
2119 const int CC2 = 1 << 1;
2120
2121 switch (GET_CODE (code))
2122 {
2123 case EQ:
2124 return CC0;
2125 case NE:
2126 return CC1 | CC2;
2127 case LT:
2128 case LTU:
2129 return CC1;
2130 case GT:
2131 case GTU:
2132 return CC2;
2133 case LE:
2134 case LEU:
2135 return CC0 | CC1;
2136 case GE:
2137 case GEU:
2138 return CC0 | CC2;
2139 default:
2140 gcc_unreachable ();
2141 }
2142 return -1;
2143 }
2144
2145 /* If INV is false, return assembler mnemonic string to implement
2146 a branch specified by CODE. If INV is true, return mnemonic
2147 for the corresponding inverted branch. */
2148
2149 static const char *
2150 s390_branch_condition_mnemonic (rtx code, int inv)
2151 {
2152 int mask;
2153
2154 static const char *const mnemonic[16] =
2155 {
2156 NULL, "o", "h", "nle",
2157 "l", "nhe", "lh", "ne",
2158 "e", "nlh", "he", "nl",
2159 "le", "nh", "no", NULL
2160 };
2161
2162 if (GET_CODE (XEXP (code, 0)) == REG
2163 && REGNO (XEXP (code, 0)) == CC_REGNUM
2164 && (XEXP (code, 1) == const0_rtx
2165 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2166 && CONST_INT_P (XEXP (code, 1)))))
2167 mask = s390_branch_condition_mask (code);
2168 else
2169 mask = s390_compare_and_branch_condition_mask (code);
2170
2171 gcc_assert (mask >= 0);
2172
2173 if (inv)
2174 mask ^= 15;
2175
2176 gcc_assert (mask >= 1 && mask <= 14);
2177
2178 return mnemonic[mask];
2179 }
2180
2181 /* Return the part of op which has a value different from def.
2182 The size of the part is determined by mode.
2183 Use this function only if you already know that op really
2184 contains such a part. */
2185
2186 unsigned HOST_WIDE_INT
2187 s390_extract_part (rtx op, machine_mode mode, int def)
2188 {
2189 unsigned HOST_WIDE_INT value = 0;
2190 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2191 int part_bits = GET_MODE_BITSIZE (mode);
2192 unsigned HOST_WIDE_INT part_mask
2193 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2194 int i;
2195
2196 for (i = 0; i < max_parts; i++)
2197 {
2198 if (i == 0)
2199 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2200 else
2201 value >>= part_bits;
2202
2203 if ((value & part_mask) != (def & part_mask))
2204 return value & part_mask;
2205 }
2206
2207 gcc_unreachable ();
2208 }
2209
2210 /* If OP is an integer constant of mode MODE with exactly one
2211 part of mode PART_MODE unequal to DEF, return the number of that
2212 part. Otherwise, return -1. */
2213
2214 int
2215 s390_single_part (rtx op,
2216 machine_mode mode,
2217 machine_mode part_mode,
2218 int def)
2219 {
2220 unsigned HOST_WIDE_INT value = 0;
2221 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2222 unsigned HOST_WIDE_INT part_mask
2223 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2224 int i, part = -1;
2225
2226 if (GET_CODE (op) != CONST_INT)
2227 return -1;
2228
2229 for (i = 0; i < n_parts; i++)
2230 {
2231 if (i == 0)
2232 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2233 else
2234 value >>= GET_MODE_BITSIZE (part_mode);
2235
2236 if ((value & part_mask) != (def & part_mask))
2237 {
2238 if (part != -1)
2239 return -1;
2240 else
2241 part = i;
2242 }
2243 }
2244 return part == -1 ? -1 : n_parts - 1 - part;
2245 }
2246
2247 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2248 bits and no other bits are set in IN. POS and LENGTH can be used
2249 to obtain the start position and the length of the bitfield.
2250
2251 POS gives the position of the first bit of the bitfield counting
2252 from the lowest order bit starting with zero. In order to use this
2253 value for S/390 instructions this has to be converted to "bits big
2254 endian" style. */
2255
2256 bool
2257 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2258 int *pos, int *length)
2259 {
2260 int tmp_pos = 0;
2261 int tmp_length = 0;
2262 int i;
2263 unsigned HOST_WIDE_INT mask = 1ULL;
2264 bool contiguous = false;
2265
2266 for (i = 0; i < size; mask <<= 1, i++)
2267 {
2268 if (contiguous)
2269 {
2270 if (mask & in)
2271 tmp_length++;
2272 else
2273 break;
2274 }
2275 else
2276 {
2277 if (mask & in)
2278 {
2279 contiguous = true;
2280 tmp_length++;
2281 }
2282 else
2283 tmp_pos++;
2284 }
2285 }
2286
2287 if (!tmp_length)
2288 return false;
2289
2290 /* Calculate a mask for all bits beyond the contiguous bits. */
2291 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2292
2293 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2294 mask &= (HOST_WIDE_INT_1U << size) - 1;
2295
2296 if (mask & in)
2297 return false;
2298
2299 if (tmp_length + tmp_pos - 1 > size)
2300 return false;
2301
2302 if (length)
2303 *length = tmp_length;
2304
2305 if (pos)
2306 *pos = tmp_pos;
2307
2308 return true;
2309 }
2310
2311 /* Return true if OP contains the same contiguous bitfield in *all*
2312 its elements. START and END can be used to obtain the start and
2313 end position of the bitfield.
2314
2315 START/STOP give the position of the first/last bit of the bitfield
2316 counting from the lowest order bit starting with zero. In order to
2317 use these values for S/390 instructions this has to be converted to
2318 "bits big endian" style. */
2319
2320 bool
2321 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2322 {
2323 unsigned HOST_WIDE_INT mask;
2324 int length, size;
2325 rtx elt;
2326
2327 if (!const_vec_duplicate_p (op, &elt)
2328 || !CONST_INT_P (elt))
2329 return false;
2330
2331 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2332
2333 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2334 if (size > 64)
2335 return false;
2336
2337 mask = UINTVAL (elt);
2338 if (s390_contiguous_bitmask_p (mask, size, start,
2339 end != NULL ? &length : NULL))
2340 {
2341 if (end != NULL)
2342 *end = *start + length - 1;
2343 return true;
2344 }
2345 /* 0xff00000f style immediates can be covered by swapping start and
2346 end indices in vgm. */
2347 if (s390_contiguous_bitmask_p (~mask, size, start,
2348 end != NULL ? &length : NULL))
2349 {
2350 if (end != NULL)
2351 *end = *start - 1;
2352 if (start != NULL)
2353 *start = *start + length;
2354 return true;
2355 }
2356 return false;
2357 }
2358
2359 /* Return true if C consists only of byte chunks being either 0 or
2360 0xff. If MASK is !=NULL a byte mask is generated which is
2361 appropriate for the vector generate byte mask instruction. */
2362
2363 bool
2364 s390_bytemask_vector_p (rtx op, unsigned *mask)
2365 {
2366 int i;
2367 unsigned tmp_mask = 0;
2368 int nunit, unit_size;
2369
2370 if (!VECTOR_MODE_P (GET_MODE (op))
2371 || GET_CODE (op) != CONST_VECTOR
2372 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2373 return false;
2374
2375 nunit = GET_MODE_NUNITS (GET_MODE (op));
2376 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2377
2378 for (i = 0; i < nunit; i++)
2379 {
2380 unsigned HOST_WIDE_INT c;
2381 int j;
2382
2383 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2384 return false;
2385
2386 c = UINTVAL (XVECEXP (op, 0, i));
2387 for (j = 0; j < unit_size; j++)
2388 {
2389 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2390 return false;
2391 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2392 c = c >> BITS_PER_UNIT;
2393 }
2394 }
2395
2396 if (mask != NULL)
2397 *mask = tmp_mask;
2398
2399 return true;
2400 }
2401
2402 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2403 equivalent to a shift followed by the AND. In particular, CONTIG
2404 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2405 for ROTL indicate a rotate to the right. */
2406
2407 bool
2408 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2409 {
2410 int pos, len;
2411 bool ok;
2412
2413 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2414 gcc_assert (ok);
2415
2416 return ((rotl >= 0 && rotl <= pos)
2417 || (rotl < 0 && -rotl <= bitsize - len - pos));
2418 }
2419
2420 /* Check whether we can (and want to) split a double-word
2421 move in mode MODE from SRC to DST into two single-word
2422 moves, moving the subword FIRST_SUBWORD first. */
2423
2424 bool
2425 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2426 {
2427 /* Floating point and vector registers cannot be split. */
2428 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2429 return false;
2430
2431 /* We don't need to split if operands are directly accessible. */
2432 if (s_operand (src, mode) || s_operand (dst, mode))
2433 return false;
2434
2435 /* Non-offsettable memory references cannot be split. */
2436 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2437 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2438 return false;
2439
2440 /* Moving the first subword must not clobber a register
2441 needed to move the second subword. */
2442 if (register_operand (dst, mode))
2443 {
2444 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2445 if (reg_overlap_mentioned_p (subreg, src))
2446 return false;
2447 }
2448
2449 return true;
2450 }
2451
2452 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2453 and [MEM2, MEM2 + SIZE] do overlap and false
2454 otherwise. */
2455
2456 bool
2457 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2458 {
2459 rtx addr1, addr2, addr_delta;
2460 HOST_WIDE_INT delta;
2461
2462 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2463 return true;
2464
2465 if (size == 0)
2466 return false;
2467
2468 addr1 = XEXP (mem1, 0);
2469 addr2 = XEXP (mem2, 0);
2470
2471 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2472
2473 /* This overlapping check is used by peepholes merging memory block operations.
2474 Overlapping operations would otherwise be recognized by the S/390 hardware
2475 and would fall back to a slower implementation. Allowing overlapping
2476 operations would lead to slow code but not to wrong code. Therefore we are
2477 somewhat optimistic if we cannot prove that the memory blocks are
2478 overlapping.
2479 That's why we return false here although this may accept operations on
2480 overlapping memory areas. */
2481 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2482 return false;
2483
2484 delta = INTVAL (addr_delta);
2485
2486 if (delta == 0
2487 || (delta > 0 && delta < size)
2488 || (delta < 0 && -delta < size))
2489 return true;
2490
2491 return false;
2492 }
2493
2494 /* Check whether the address of memory reference MEM2 equals exactly
2495 the address of memory reference MEM1 plus DELTA. Return true if
2496 we can prove this to be the case, false otherwise. */
2497
2498 bool
2499 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2500 {
2501 rtx addr1, addr2, addr_delta;
2502
2503 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2504 return false;
2505
2506 addr1 = XEXP (mem1, 0);
2507 addr2 = XEXP (mem2, 0);
2508
2509 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2510 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2511 return false;
2512
2513 return true;
2514 }
2515
2516 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2517
2518 void
2519 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2520 rtx *operands)
2521 {
2522 machine_mode wmode = mode;
2523 rtx dst = operands[0];
2524 rtx src1 = operands[1];
2525 rtx src2 = operands[2];
2526 rtx op, clob, tem;
2527
2528 /* If we cannot handle the operation directly, use a temp register. */
2529 if (!s390_logical_operator_ok_p (operands))
2530 dst = gen_reg_rtx (mode);
2531
2532 /* QImode and HImode patterns make sense only if we have a destination
2533 in memory. Otherwise perform the operation in SImode. */
2534 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2535 wmode = SImode;
2536
2537 /* Widen operands if required. */
2538 if (mode != wmode)
2539 {
2540 if (GET_CODE (dst) == SUBREG
2541 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2542 dst = tem;
2543 else if (REG_P (dst))
2544 dst = gen_rtx_SUBREG (wmode, dst, 0);
2545 else
2546 dst = gen_reg_rtx (wmode);
2547
2548 if (GET_CODE (src1) == SUBREG
2549 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2550 src1 = tem;
2551 else if (GET_MODE (src1) != VOIDmode)
2552 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2553
2554 if (GET_CODE (src2) == SUBREG
2555 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2556 src2 = tem;
2557 else if (GET_MODE (src2) != VOIDmode)
2558 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2559 }
2560
2561 /* Emit the instruction. */
2562 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2563 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2564 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2565
2566 /* Fix up the destination if needed. */
2567 if (dst != operands[0])
2568 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2569 }
2570
2571 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2572
2573 bool
2574 s390_logical_operator_ok_p (rtx *operands)
2575 {
2576 /* If the destination operand is in memory, it needs to coincide
2577 with one of the source operands. After reload, it has to be
2578 the first source operand. */
2579 if (GET_CODE (operands[0]) == MEM)
2580 return rtx_equal_p (operands[0], operands[1])
2581 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2582
2583 return true;
2584 }
2585
2586 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2587 operand IMMOP to switch from SS to SI type instructions. */
2588
2589 void
2590 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2591 {
2592 int def = code == AND ? -1 : 0;
2593 HOST_WIDE_INT mask;
2594 int part;
2595
2596 gcc_assert (GET_CODE (*memop) == MEM);
2597 gcc_assert (!MEM_VOLATILE_P (*memop));
2598
2599 mask = s390_extract_part (*immop, QImode, def);
2600 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2601 gcc_assert (part >= 0);
2602
2603 *memop = adjust_address (*memop, QImode, part);
2604 *immop = gen_int_mode (mask, QImode);
2605 }
2606
2607
2608 /* How to allocate a 'struct machine_function'. */
2609
2610 static struct machine_function *
2611 s390_init_machine_status (void)
2612 {
2613 return ggc_cleared_alloc<machine_function> ();
2614 }
2615
2616 /* Map for smallest class containing reg regno. */
2617
2618 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2619 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2620 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2621 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2622 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2623 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2624 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2625 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2626 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2627 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2628 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2629 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2630 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2631 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2632 VEC_REGS, VEC_REGS /* 52 */
2633 };
2634
2635 /* Return attribute type of insn. */
2636
2637 static enum attr_type
2638 s390_safe_attr_type (rtx_insn *insn)
2639 {
2640 if (recog_memoized (insn) >= 0)
2641 return get_attr_type (insn);
2642 else
2643 return TYPE_NONE;
2644 }
2645
2646 /* Return true if DISP is a valid short displacement. */
2647
2648 static bool
2649 s390_short_displacement (rtx disp)
2650 {
2651 /* No displacement is OK. */
2652 if (!disp)
2653 return true;
2654
2655 /* Without the long displacement facility we don't need to
2656 distingiush between long and short displacement. */
2657 if (!TARGET_LONG_DISPLACEMENT)
2658 return true;
2659
2660 /* Integer displacement in range. */
2661 if (GET_CODE (disp) == CONST_INT)
2662 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2663
2664 /* GOT offset is not OK, the GOT can be large. */
2665 if (GET_CODE (disp) == CONST
2666 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2667 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2668 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2669 return false;
2670
2671 /* All other symbolic constants are literal pool references,
2672 which are OK as the literal pool must be small. */
2673 if (GET_CODE (disp) == CONST)
2674 return true;
2675
2676 return false;
2677 }
2678
2679 /* Decompose a RTL expression ADDR for a memory address into
2680 its components, returned in OUT.
2681
2682 Returns false if ADDR is not a valid memory address, true
2683 otherwise. If OUT is NULL, don't return the components,
2684 but check for validity only.
2685
2686 Note: Only addresses in canonical form are recognized.
2687 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2688 canonical form so that they will be recognized. */
2689
2690 static int
2691 s390_decompose_address (rtx addr, struct s390_address *out)
2692 {
2693 HOST_WIDE_INT offset = 0;
2694 rtx base = NULL_RTX;
2695 rtx indx = NULL_RTX;
2696 rtx disp = NULL_RTX;
2697 rtx orig_disp;
2698 bool pointer = false;
2699 bool base_ptr = false;
2700 bool indx_ptr = false;
2701 bool literal_pool = false;
2702
2703 /* We may need to substitute the literal pool base register into the address
2704 below. However, at this point we do not know which register is going to
2705 be used as base, so we substitute the arg pointer register. This is going
2706 to be treated as holding a pointer below -- it shouldn't be used for any
2707 other purpose. */
2708 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2709
2710 /* Decompose address into base + index + displacement. */
2711
2712 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2713 base = addr;
2714
2715 else if (GET_CODE (addr) == PLUS)
2716 {
2717 rtx op0 = XEXP (addr, 0);
2718 rtx op1 = XEXP (addr, 1);
2719 enum rtx_code code0 = GET_CODE (op0);
2720 enum rtx_code code1 = GET_CODE (op1);
2721
2722 if (code0 == REG || code0 == UNSPEC)
2723 {
2724 if (code1 == REG || code1 == UNSPEC)
2725 {
2726 indx = op0; /* index + base */
2727 base = op1;
2728 }
2729
2730 else
2731 {
2732 base = op0; /* base + displacement */
2733 disp = op1;
2734 }
2735 }
2736
2737 else if (code0 == PLUS)
2738 {
2739 indx = XEXP (op0, 0); /* index + base + disp */
2740 base = XEXP (op0, 1);
2741 disp = op1;
2742 }
2743
2744 else
2745 {
2746 return false;
2747 }
2748 }
2749
2750 else
2751 disp = addr; /* displacement */
2752
2753 /* Extract integer part of displacement. */
2754 orig_disp = disp;
2755 if (disp)
2756 {
2757 if (GET_CODE (disp) == CONST_INT)
2758 {
2759 offset = INTVAL (disp);
2760 disp = NULL_RTX;
2761 }
2762 else if (GET_CODE (disp) == CONST
2763 && GET_CODE (XEXP (disp, 0)) == PLUS
2764 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2765 {
2766 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2767 disp = XEXP (XEXP (disp, 0), 0);
2768 }
2769 }
2770
2771 /* Strip off CONST here to avoid special case tests later. */
2772 if (disp && GET_CODE (disp) == CONST)
2773 disp = XEXP (disp, 0);
2774
2775 /* We can convert literal pool addresses to
2776 displacements by basing them off the base register. */
2777 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2778 {
2779 /* Either base or index must be free to hold the base register. */
2780 if (!base)
2781 base = fake_pool_base, literal_pool = true;
2782 else if (!indx)
2783 indx = fake_pool_base, literal_pool = true;
2784 else
2785 return false;
2786
2787 /* Mark up the displacement. */
2788 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2789 UNSPEC_LTREL_OFFSET);
2790 }
2791
2792 /* Validate base register. */
2793 if (base)
2794 {
2795 if (GET_CODE (base) == UNSPEC)
2796 switch (XINT (base, 1))
2797 {
2798 case UNSPEC_LTREF:
2799 if (!disp)
2800 disp = gen_rtx_UNSPEC (Pmode,
2801 gen_rtvec (1, XVECEXP (base, 0, 0)),
2802 UNSPEC_LTREL_OFFSET);
2803 else
2804 return false;
2805
2806 base = XVECEXP (base, 0, 1);
2807 break;
2808
2809 case UNSPEC_LTREL_BASE:
2810 if (XVECLEN (base, 0) == 1)
2811 base = fake_pool_base, literal_pool = true;
2812 else
2813 base = XVECEXP (base, 0, 1);
2814 break;
2815
2816 default:
2817 return false;
2818 }
2819
2820 if (!REG_P (base) || GET_MODE (base) != Pmode)
2821 return false;
2822
2823 if (REGNO (base) == STACK_POINTER_REGNUM
2824 || REGNO (base) == FRAME_POINTER_REGNUM
2825 || ((reload_completed || reload_in_progress)
2826 && frame_pointer_needed
2827 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2828 || REGNO (base) == ARG_POINTER_REGNUM
2829 || (flag_pic
2830 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2831 pointer = base_ptr = true;
2832
2833 if ((reload_completed || reload_in_progress)
2834 && base == cfun->machine->base_reg)
2835 pointer = base_ptr = literal_pool = true;
2836 }
2837
2838 /* Validate index register. */
2839 if (indx)
2840 {
2841 if (GET_CODE (indx) == UNSPEC)
2842 switch (XINT (indx, 1))
2843 {
2844 case UNSPEC_LTREF:
2845 if (!disp)
2846 disp = gen_rtx_UNSPEC (Pmode,
2847 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2848 UNSPEC_LTREL_OFFSET);
2849 else
2850 return false;
2851
2852 indx = XVECEXP (indx, 0, 1);
2853 break;
2854
2855 case UNSPEC_LTREL_BASE:
2856 if (XVECLEN (indx, 0) == 1)
2857 indx = fake_pool_base, literal_pool = true;
2858 else
2859 indx = XVECEXP (indx, 0, 1);
2860 break;
2861
2862 default:
2863 return false;
2864 }
2865
2866 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2867 return false;
2868
2869 if (REGNO (indx) == STACK_POINTER_REGNUM
2870 || REGNO (indx) == FRAME_POINTER_REGNUM
2871 || ((reload_completed || reload_in_progress)
2872 && frame_pointer_needed
2873 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2874 || REGNO (indx) == ARG_POINTER_REGNUM
2875 || (flag_pic
2876 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2877 pointer = indx_ptr = true;
2878
2879 if ((reload_completed || reload_in_progress)
2880 && indx == cfun->machine->base_reg)
2881 pointer = indx_ptr = literal_pool = true;
2882 }
2883
2884 /* Prefer to use pointer as base, not index. */
2885 if (base && indx && !base_ptr
2886 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2887 {
2888 rtx tmp = base;
2889 base = indx;
2890 indx = tmp;
2891 }
2892
2893 /* Validate displacement. */
2894 if (!disp)
2895 {
2896 /* If virtual registers are involved, the displacement will change later
2897 anyway as the virtual registers get eliminated. This could make a
2898 valid displacement invalid, but it is more likely to make an invalid
2899 displacement valid, because we sometimes access the register save area
2900 via negative offsets to one of those registers.
2901 Thus we don't check the displacement for validity here. If after
2902 elimination the displacement turns out to be invalid after all,
2903 this is fixed up by reload in any case. */
2904 /* LRA maintains always displacements up to date and we need to
2905 know the displacement is right during all LRA not only at the
2906 final elimination. */
2907 if (lra_in_progress
2908 || (base != arg_pointer_rtx
2909 && indx != arg_pointer_rtx
2910 && base != return_address_pointer_rtx
2911 && indx != return_address_pointer_rtx
2912 && base != frame_pointer_rtx
2913 && indx != frame_pointer_rtx
2914 && base != virtual_stack_vars_rtx
2915 && indx != virtual_stack_vars_rtx))
2916 if (!DISP_IN_RANGE (offset))
2917 return false;
2918 }
2919 else
2920 {
2921 /* All the special cases are pointers. */
2922 pointer = true;
2923
2924 /* In the small-PIC case, the linker converts @GOT
2925 and @GOTNTPOFF offsets to possible displacements. */
2926 if (GET_CODE (disp) == UNSPEC
2927 && (XINT (disp, 1) == UNSPEC_GOT
2928 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2929 && flag_pic == 1)
2930 {
2931 ;
2932 }
2933
2934 /* Accept pool label offsets. */
2935 else if (GET_CODE (disp) == UNSPEC
2936 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2937 ;
2938
2939 /* Accept literal pool references. */
2940 else if (GET_CODE (disp) == UNSPEC
2941 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2942 {
2943 /* In case CSE pulled a non literal pool reference out of
2944 the pool we have to reject the address. This is
2945 especially important when loading the GOT pointer on non
2946 zarch CPUs. In this case the literal pool contains an lt
2947 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2948 will most likely exceed the displacement. */
2949 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2950 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2951 return false;
2952
2953 orig_disp = gen_rtx_CONST (Pmode, disp);
2954 if (offset)
2955 {
2956 /* If we have an offset, make sure it does not
2957 exceed the size of the constant pool entry. */
2958 rtx sym = XVECEXP (disp, 0, 0);
2959 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2960 return false;
2961
2962 orig_disp = plus_constant (Pmode, orig_disp, offset);
2963 }
2964 }
2965
2966 else
2967 return false;
2968 }
2969
2970 if (!base && !indx)
2971 pointer = true;
2972
2973 if (out)
2974 {
2975 out->base = base;
2976 out->indx = indx;
2977 out->disp = orig_disp;
2978 out->pointer = pointer;
2979 out->literal_pool = literal_pool;
2980 }
2981
2982 return true;
2983 }
2984
2985 /* Decompose a RTL expression OP for an address style operand into its
2986 components, and return the base register in BASE and the offset in
2987 OFFSET. While OP looks like an address it is never supposed to be
2988 used as such.
2989
2990 Return true if OP is a valid address operand, false if not. */
2991
2992 bool
2993 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
2994 HOST_WIDE_INT *offset)
2995 {
2996 rtx off = NULL_RTX;
2997
2998 /* We can have an integer constant, an address register,
2999 or a sum of the two. */
3000 if (CONST_SCALAR_INT_P (op))
3001 {
3002 off = op;
3003 op = NULL_RTX;
3004 }
3005 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3006 {
3007 off = XEXP (op, 1);
3008 op = XEXP (op, 0);
3009 }
3010 while (op && GET_CODE (op) == SUBREG)
3011 op = SUBREG_REG (op);
3012
3013 if (op && GET_CODE (op) != REG)
3014 return false;
3015
3016 if (offset)
3017 {
3018 if (off == NULL_RTX)
3019 *offset = 0;
3020 else if (CONST_INT_P (off))
3021 *offset = INTVAL (off);
3022 else if (CONST_WIDE_INT_P (off))
3023 /* The offset will anyway be cut down to 12 bits so take just
3024 the lowest order chunk of the wide int. */
3025 *offset = CONST_WIDE_INT_ELT (off, 0);
3026 else
3027 gcc_unreachable ();
3028 }
3029 if (base)
3030 *base = op;
3031
3032 return true;
3033 }
3034
3035
3036 /* Return true if CODE is a valid address without index. */
3037
3038 bool
3039 s390_legitimate_address_without_index_p (rtx op)
3040 {
3041 struct s390_address addr;
3042
3043 if (!s390_decompose_address (XEXP (op, 0), &addr))
3044 return false;
3045 if (addr.indx)
3046 return false;
3047
3048 return true;
3049 }
3050
3051
3052 /* Return TRUE if ADDR is an operand valid for a load/store relative
3053 instruction. Be aware that the alignment of the operand needs to
3054 be checked separately.
3055 Valid addresses are single references or a sum of a reference and a
3056 constant integer. Return these parts in SYMREF and ADDEND. You can
3057 pass NULL in REF and/or ADDEND if you are not interested in these
3058 values. Literal pool references are *not* considered symbol
3059 references. */
3060
3061 static bool
3062 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3063 {
3064 HOST_WIDE_INT tmpaddend = 0;
3065
3066 if (GET_CODE (addr) == CONST)
3067 addr = XEXP (addr, 0);
3068
3069 if (GET_CODE (addr) == PLUS)
3070 {
3071 if (!CONST_INT_P (XEXP (addr, 1)))
3072 return false;
3073
3074 tmpaddend = INTVAL (XEXP (addr, 1));
3075 addr = XEXP (addr, 0);
3076 }
3077
3078 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3079 || (GET_CODE (addr) == UNSPEC
3080 && (XINT (addr, 1) == UNSPEC_GOTENT
3081 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3082 {
3083 if (symref)
3084 *symref = addr;
3085 if (addend)
3086 *addend = tmpaddend;
3087
3088 return true;
3089 }
3090 return false;
3091 }
3092
3093 /* Return true if the address in OP is valid for constraint letter C
3094 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3095 pool MEMs should be accepted. Only the Q, R, S, T constraint
3096 letters are allowed for C. */
3097
3098 static int
3099 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3100 {
3101 struct s390_address addr;
3102 bool decomposed = false;
3103
3104 /* This check makes sure that no symbolic address (except literal
3105 pool references) are accepted by the R or T constraints. */
3106 if (s390_loadrelative_operand_p (op, NULL, NULL))
3107 return 0;
3108
3109 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3110 if (!lit_pool_ok)
3111 {
3112 if (!s390_decompose_address (op, &addr))
3113 return 0;
3114 if (addr.literal_pool)
3115 return 0;
3116 decomposed = true;
3117 }
3118
3119 switch (c)
3120 {
3121 case 'Q': /* no index short displacement */
3122 if (!decomposed && !s390_decompose_address (op, &addr))
3123 return 0;
3124 if (addr.indx)
3125 return 0;
3126 if (!s390_short_displacement (addr.disp))
3127 return 0;
3128 break;
3129
3130 case 'R': /* with index short displacement */
3131 if (TARGET_LONG_DISPLACEMENT)
3132 {
3133 if (!decomposed && !s390_decompose_address (op, &addr))
3134 return 0;
3135 if (!s390_short_displacement (addr.disp))
3136 return 0;
3137 }
3138 /* Any invalid address here will be fixed up by reload,
3139 so accept it for the most generic constraint. */
3140 break;
3141
3142 case 'S': /* no index long displacement */
3143 if (!TARGET_LONG_DISPLACEMENT)
3144 return 0;
3145 if (!decomposed && !s390_decompose_address (op, &addr))
3146 return 0;
3147 if (addr.indx)
3148 return 0;
3149 if (s390_short_displacement (addr.disp))
3150 return 0;
3151 break;
3152
3153 case 'T': /* with index long displacement */
3154 if (!TARGET_LONG_DISPLACEMENT)
3155 return 0;
3156 /* Any invalid address here will be fixed up by reload,
3157 so accept it for the most generic constraint. */
3158 if ((decomposed || s390_decompose_address (op, &addr))
3159 && s390_short_displacement (addr.disp))
3160 return 0;
3161 break;
3162 default:
3163 return 0;
3164 }
3165 return 1;
3166 }
3167
3168
3169 /* Evaluates constraint strings described by the regular expression
3170 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3171 the constraint given in STR, or 0 else. */
3172
3173 int
3174 s390_mem_constraint (const char *str, rtx op)
3175 {
3176 char c = str[0];
3177
3178 switch (c)
3179 {
3180 case 'A':
3181 /* Check for offsettable variants of memory constraints. */
3182 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3183 return 0;
3184 if ((reload_completed || reload_in_progress)
3185 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3186 return 0;
3187 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3188 case 'B':
3189 /* Check for non-literal-pool variants of memory constraints. */
3190 if (!MEM_P (op))
3191 return 0;
3192 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3193 case 'Q':
3194 case 'R':
3195 case 'S':
3196 case 'T':
3197 if (GET_CODE (op) != MEM)
3198 return 0;
3199 return s390_check_qrst_address (c, XEXP (op, 0), true);
3200 case 'U':
3201 return (s390_check_qrst_address ('Q', op, true)
3202 || s390_check_qrst_address ('R', op, true));
3203 case 'W':
3204 return (s390_check_qrst_address ('S', op, true)
3205 || s390_check_qrst_address ('T', op, true));
3206 case 'Y':
3207 /* Simply check for the basic form of a shift count. Reload will
3208 take care of making sure we have a proper base register. */
3209 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3210 return 0;
3211 break;
3212 case 'Z':
3213 return s390_check_qrst_address (str[1], op, true);
3214 default:
3215 return 0;
3216 }
3217 return 1;
3218 }
3219
3220
3221 /* Evaluates constraint strings starting with letter O. Input
3222 parameter C is the second letter following the "O" in the constraint
3223 string. Returns 1 if VALUE meets the respective constraint and 0
3224 otherwise. */
3225
3226 int
3227 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3228 {
3229 if (!TARGET_EXTIMM)
3230 return 0;
3231
3232 switch (c)
3233 {
3234 case 's':
3235 return trunc_int_for_mode (value, SImode) == value;
3236
3237 case 'p':
3238 return value == 0
3239 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3240
3241 case 'n':
3242 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3243
3244 default:
3245 gcc_unreachable ();
3246 }
3247 }
3248
3249
3250 /* Evaluates constraint strings starting with letter N. Parameter STR
3251 contains the letters following letter "N" in the constraint string.
3252 Returns true if VALUE matches the constraint. */
3253
3254 int
3255 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3256 {
3257 machine_mode mode, part_mode;
3258 int def;
3259 int part, part_goal;
3260
3261
3262 if (str[0] == 'x')
3263 part_goal = -1;
3264 else
3265 part_goal = str[0] - '0';
3266
3267 switch (str[1])
3268 {
3269 case 'Q':
3270 part_mode = QImode;
3271 break;
3272 case 'H':
3273 part_mode = HImode;
3274 break;
3275 case 'S':
3276 part_mode = SImode;
3277 break;
3278 default:
3279 return 0;
3280 }
3281
3282 switch (str[2])
3283 {
3284 case 'H':
3285 mode = HImode;
3286 break;
3287 case 'S':
3288 mode = SImode;
3289 break;
3290 case 'D':
3291 mode = DImode;
3292 break;
3293 default:
3294 return 0;
3295 }
3296
3297 switch (str[3])
3298 {
3299 case '0':
3300 def = 0;
3301 break;
3302 case 'F':
3303 def = -1;
3304 break;
3305 default:
3306 return 0;
3307 }
3308
3309 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3310 return 0;
3311
3312 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3313 if (part < 0)
3314 return 0;
3315 if (part_goal != -1 && part_goal != part)
3316 return 0;
3317
3318 return 1;
3319 }
3320
3321
3322 /* Returns true if the input parameter VALUE is a float zero. */
3323
3324 int
3325 s390_float_const_zero_p (rtx value)
3326 {
3327 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3328 && value == CONST0_RTX (GET_MODE (value)));
3329 }
3330
3331 /* Implement TARGET_REGISTER_MOVE_COST. */
3332
3333 static int
3334 s390_register_move_cost (machine_mode mode,
3335 reg_class_t from, reg_class_t to)
3336 {
3337 /* On s390, copy between fprs and gprs is expensive. */
3338
3339 /* It becomes somewhat faster having ldgr/lgdr. */
3340 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3341 {
3342 /* ldgr is single cycle. */
3343 if (reg_classes_intersect_p (from, GENERAL_REGS)
3344 && reg_classes_intersect_p (to, FP_REGS))
3345 return 1;
3346 /* lgdr needs 3 cycles. */
3347 if (reg_classes_intersect_p (to, GENERAL_REGS)
3348 && reg_classes_intersect_p (from, FP_REGS))
3349 return 3;
3350 }
3351
3352 /* Otherwise copying is done via memory. */
3353 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3354 && reg_classes_intersect_p (to, FP_REGS))
3355 || (reg_classes_intersect_p (from, FP_REGS)
3356 && reg_classes_intersect_p (to, GENERAL_REGS)))
3357 return 10;
3358
3359 return 1;
3360 }
3361
3362 /* Implement TARGET_MEMORY_MOVE_COST. */
3363
3364 static int
3365 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3366 reg_class_t rclass ATTRIBUTE_UNUSED,
3367 bool in ATTRIBUTE_UNUSED)
3368 {
3369 return 2;
3370 }
3371
3372 /* Compute a (partial) cost for rtx X. Return true if the complete
3373 cost has been computed, and false if subexpressions should be
3374 scanned. In either case, *TOTAL contains the cost result.
3375 OUTER_CODE contains the code of the superexpression of x. */
3376
3377 static bool
3378 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3379 int opno ATTRIBUTE_UNUSED,
3380 int *total, bool speed ATTRIBUTE_UNUSED)
3381 {
3382 int code = GET_CODE (x);
3383 switch (code)
3384 {
3385 case CONST:
3386 case CONST_INT:
3387 case LABEL_REF:
3388 case SYMBOL_REF:
3389 case CONST_DOUBLE:
3390 case CONST_WIDE_INT:
3391 case MEM:
3392 *total = 0;
3393 return true;
3394
3395 case IOR:
3396 /* risbg */
3397 if (GET_CODE (XEXP (x, 0)) == AND
3398 && GET_CODE (XEXP (x, 1)) == ASHIFT
3399 && REG_P (XEXP (XEXP (x, 0), 0))
3400 && REG_P (XEXP (XEXP (x, 1), 0))
3401 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3402 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3403 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3404 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3405 {
3406 *total = COSTS_N_INSNS (2);
3407 return true;
3408 }
3409 case ASHIFT:
3410 case ASHIFTRT:
3411 case LSHIFTRT:
3412 case ROTATE:
3413 case ROTATERT:
3414 case AND:
3415 case XOR:
3416 case NEG:
3417 case NOT:
3418 *total = COSTS_N_INSNS (1);
3419 return false;
3420
3421 case PLUS:
3422 case MINUS:
3423 *total = COSTS_N_INSNS (1);
3424 return false;
3425
3426 case MULT:
3427 switch (mode)
3428 {
3429 case SImode:
3430 {
3431 rtx left = XEXP (x, 0);
3432 rtx right = XEXP (x, 1);
3433 if (GET_CODE (right) == CONST_INT
3434 && CONST_OK_FOR_K (INTVAL (right)))
3435 *total = s390_cost->mhi;
3436 else if (GET_CODE (left) == SIGN_EXTEND)
3437 *total = s390_cost->mh;
3438 else
3439 *total = s390_cost->ms; /* msr, ms, msy */
3440 break;
3441 }
3442 case DImode:
3443 {
3444 rtx left = XEXP (x, 0);
3445 rtx right = XEXP (x, 1);
3446 if (TARGET_ZARCH)
3447 {
3448 if (GET_CODE (right) == CONST_INT
3449 && CONST_OK_FOR_K (INTVAL (right)))
3450 *total = s390_cost->mghi;
3451 else if (GET_CODE (left) == SIGN_EXTEND)
3452 *total = s390_cost->msgf;
3453 else
3454 *total = s390_cost->msg; /* msgr, msg */
3455 }
3456 else /* TARGET_31BIT */
3457 {
3458 if (GET_CODE (left) == SIGN_EXTEND
3459 && GET_CODE (right) == SIGN_EXTEND)
3460 /* mulsidi case: mr, m */
3461 *total = s390_cost->m;
3462 else if (GET_CODE (left) == ZERO_EXTEND
3463 && GET_CODE (right) == ZERO_EXTEND
3464 && TARGET_CPU_ZARCH)
3465 /* umulsidi case: ml, mlr */
3466 *total = s390_cost->ml;
3467 else
3468 /* Complex calculation is required. */
3469 *total = COSTS_N_INSNS (40);
3470 }
3471 break;
3472 }
3473 case SFmode:
3474 case DFmode:
3475 *total = s390_cost->mult_df;
3476 break;
3477 case TFmode:
3478 *total = s390_cost->mxbr;
3479 break;
3480 default:
3481 return false;
3482 }
3483 return false;
3484
3485 case FMA:
3486 switch (mode)
3487 {
3488 case DFmode:
3489 *total = s390_cost->madbr;
3490 break;
3491 case SFmode:
3492 *total = s390_cost->maebr;
3493 break;
3494 default:
3495 return false;
3496 }
3497 /* Negate in the third argument is free: FMSUB. */
3498 if (GET_CODE (XEXP (x, 2)) == NEG)
3499 {
3500 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3501 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3502 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3503 return true;
3504 }
3505 return false;
3506
3507 case UDIV:
3508 case UMOD:
3509 if (mode == TImode) /* 128 bit division */
3510 *total = s390_cost->dlgr;
3511 else if (mode == DImode)
3512 {
3513 rtx right = XEXP (x, 1);
3514 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3515 *total = s390_cost->dlr;
3516 else /* 64 by 64 bit division */
3517 *total = s390_cost->dlgr;
3518 }
3519 else if (mode == SImode) /* 32 bit division */
3520 *total = s390_cost->dlr;
3521 return false;
3522
3523 case DIV:
3524 case MOD:
3525 if (mode == DImode)
3526 {
3527 rtx right = XEXP (x, 1);
3528 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3529 if (TARGET_ZARCH)
3530 *total = s390_cost->dsgfr;
3531 else
3532 *total = s390_cost->dr;
3533 else /* 64 by 64 bit division */
3534 *total = s390_cost->dsgr;
3535 }
3536 else if (mode == SImode) /* 32 bit division */
3537 *total = s390_cost->dlr;
3538 else if (mode == SFmode)
3539 {
3540 *total = s390_cost->debr;
3541 }
3542 else if (mode == DFmode)
3543 {
3544 *total = s390_cost->ddbr;
3545 }
3546 else if (mode == TFmode)
3547 {
3548 *total = s390_cost->dxbr;
3549 }
3550 return false;
3551
3552 case SQRT:
3553 if (mode == SFmode)
3554 *total = s390_cost->sqebr;
3555 else if (mode == DFmode)
3556 *total = s390_cost->sqdbr;
3557 else /* TFmode */
3558 *total = s390_cost->sqxbr;
3559 return false;
3560
3561 case SIGN_EXTEND:
3562 case ZERO_EXTEND:
3563 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3564 || outer_code == PLUS || outer_code == MINUS
3565 || outer_code == COMPARE)
3566 *total = 0;
3567 return false;
3568
3569 case COMPARE:
3570 *total = COSTS_N_INSNS (1);
3571 if (GET_CODE (XEXP (x, 0)) == AND
3572 && GET_CODE (XEXP (x, 1)) == CONST_INT
3573 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3574 {
3575 rtx op0 = XEXP (XEXP (x, 0), 0);
3576 rtx op1 = XEXP (XEXP (x, 0), 1);
3577 rtx op2 = XEXP (x, 1);
3578
3579 if (memory_operand (op0, GET_MODE (op0))
3580 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3581 return true;
3582 if (register_operand (op0, GET_MODE (op0))
3583 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3584 return true;
3585 }
3586 return false;
3587
3588 default:
3589 return false;
3590 }
3591 }
3592
3593 /* Return the cost of an address rtx ADDR. */
3594
3595 static int
3596 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3597 addr_space_t as ATTRIBUTE_UNUSED,
3598 bool speed ATTRIBUTE_UNUSED)
3599 {
3600 struct s390_address ad;
3601 if (!s390_decompose_address (addr, &ad))
3602 return 1000;
3603
3604 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3605 }
3606
3607 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3608 otherwise return 0. */
3609
3610 int
3611 tls_symbolic_operand (rtx op)
3612 {
3613 if (GET_CODE (op) != SYMBOL_REF)
3614 return 0;
3615 return SYMBOL_REF_TLS_MODEL (op);
3616 }
3617 \f
3618 /* Split DImode access register reference REG (on 64-bit) into its constituent
3619 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3620 gen_highpart cannot be used as they assume all registers are word-sized,
3621 while our access registers have only half that size. */
3622
3623 void
3624 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3625 {
3626 gcc_assert (TARGET_64BIT);
3627 gcc_assert (ACCESS_REG_P (reg));
3628 gcc_assert (GET_MODE (reg) == DImode);
3629 gcc_assert (!(REGNO (reg) & 1));
3630
3631 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3632 *hi = gen_rtx_REG (SImode, REGNO (reg));
3633 }
3634
3635 /* Return true if OP contains a symbol reference */
3636
3637 bool
3638 symbolic_reference_mentioned_p (rtx op)
3639 {
3640 const char *fmt;
3641 int i;
3642
3643 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3644 return 1;
3645
3646 fmt = GET_RTX_FORMAT (GET_CODE (op));
3647 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3648 {
3649 if (fmt[i] == 'E')
3650 {
3651 int j;
3652
3653 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3654 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3655 return 1;
3656 }
3657
3658 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3659 return 1;
3660 }
3661
3662 return 0;
3663 }
3664
3665 /* Return true if OP contains a reference to a thread-local symbol. */
3666
3667 bool
3668 tls_symbolic_reference_mentioned_p (rtx op)
3669 {
3670 const char *fmt;
3671 int i;
3672
3673 if (GET_CODE (op) == SYMBOL_REF)
3674 return tls_symbolic_operand (op);
3675
3676 fmt = GET_RTX_FORMAT (GET_CODE (op));
3677 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3678 {
3679 if (fmt[i] == 'E')
3680 {
3681 int j;
3682
3683 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3684 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3685 return true;
3686 }
3687
3688 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3689 return true;
3690 }
3691
3692 return false;
3693 }
3694
3695
3696 /* Return true if OP is a legitimate general operand when
3697 generating PIC code. It is given that flag_pic is on
3698 and that OP satisfies CONSTANT_P. */
3699
3700 int
3701 legitimate_pic_operand_p (rtx op)
3702 {
3703 /* Accept all non-symbolic constants. */
3704 if (!SYMBOLIC_CONST (op))
3705 return 1;
3706
3707 /* Reject everything else; must be handled
3708 via emit_symbolic_move. */
3709 return 0;
3710 }
3711
3712 /* Returns true if the constant value OP is a legitimate general operand.
3713 It is given that OP satisfies CONSTANT_P. */
3714
3715 static bool
3716 s390_legitimate_constant_p (machine_mode mode, rtx op)
3717 {
3718 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3719 {
3720 if (GET_MODE_SIZE (mode) != 16)
3721 return 0;
3722
3723 if (!satisfies_constraint_j00 (op)
3724 && !satisfies_constraint_jm1 (op)
3725 && !satisfies_constraint_jKK (op)
3726 && !satisfies_constraint_jxx (op)
3727 && !satisfies_constraint_jyy (op))
3728 return 0;
3729 }
3730
3731 /* Accept all non-symbolic constants. */
3732 if (!SYMBOLIC_CONST (op))
3733 return 1;
3734
3735 /* Accept immediate LARL operands. */
3736 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3737 return 1;
3738
3739 /* Thread-local symbols are never legal constants. This is
3740 so that emit_call knows that computing such addresses
3741 might require a function call. */
3742 if (TLS_SYMBOLIC_CONST (op))
3743 return 0;
3744
3745 /* In the PIC case, symbolic constants must *not* be
3746 forced into the literal pool. We accept them here,
3747 so that they will be handled by emit_symbolic_move. */
3748 if (flag_pic)
3749 return 1;
3750
3751 /* All remaining non-PIC symbolic constants are
3752 forced into the literal pool. */
3753 return 0;
3754 }
3755
3756 /* Determine if it's legal to put X into the constant pool. This
3757 is not possible if X contains the address of a symbol that is
3758 not constant (TLS) or not known at final link time (PIC). */
3759
3760 static bool
3761 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3762 {
3763 switch (GET_CODE (x))
3764 {
3765 case CONST_INT:
3766 case CONST_DOUBLE:
3767 case CONST_WIDE_INT:
3768 case CONST_VECTOR:
3769 /* Accept all non-symbolic constants. */
3770 return false;
3771
3772 case LABEL_REF:
3773 /* Labels are OK iff we are non-PIC. */
3774 return flag_pic != 0;
3775
3776 case SYMBOL_REF:
3777 /* 'Naked' TLS symbol references are never OK,
3778 non-TLS symbols are OK iff we are non-PIC. */
3779 if (tls_symbolic_operand (x))
3780 return true;
3781 else
3782 return flag_pic != 0;
3783
3784 case CONST:
3785 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3786 case PLUS:
3787 case MINUS:
3788 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3789 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3790
3791 case UNSPEC:
3792 switch (XINT (x, 1))
3793 {
3794 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3795 case UNSPEC_LTREL_OFFSET:
3796 case UNSPEC_GOT:
3797 case UNSPEC_GOTOFF:
3798 case UNSPEC_PLTOFF:
3799 case UNSPEC_TLSGD:
3800 case UNSPEC_TLSLDM:
3801 case UNSPEC_NTPOFF:
3802 case UNSPEC_DTPOFF:
3803 case UNSPEC_GOTNTPOFF:
3804 case UNSPEC_INDNTPOFF:
3805 return false;
3806
3807 /* If the literal pool shares the code section, be put
3808 execute template placeholders into the pool as well. */
3809 case UNSPEC_INSN:
3810 return TARGET_CPU_ZARCH;
3811
3812 default:
3813 return true;
3814 }
3815 break;
3816
3817 default:
3818 gcc_unreachable ();
3819 }
3820 }
3821
3822 /* Returns true if the constant value OP is a legitimate general
3823 operand during and after reload. The difference to
3824 legitimate_constant_p is that this function will not accept
3825 a constant that would need to be forced to the literal pool
3826 before it can be used as operand.
3827 This function accepts all constants which can be loaded directly
3828 into a GPR. */
3829
3830 bool
3831 legitimate_reload_constant_p (rtx op)
3832 {
3833 /* Accept la(y) operands. */
3834 if (GET_CODE (op) == CONST_INT
3835 && DISP_IN_RANGE (INTVAL (op)))
3836 return true;
3837
3838 /* Accept l(g)hi/l(g)fi operands. */
3839 if (GET_CODE (op) == CONST_INT
3840 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3841 return true;
3842
3843 /* Accept lliXX operands. */
3844 if (TARGET_ZARCH
3845 && GET_CODE (op) == CONST_INT
3846 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3847 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3848 return true;
3849
3850 if (TARGET_EXTIMM
3851 && GET_CODE (op) == CONST_INT
3852 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3853 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3854 return true;
3855
3856 /* Accept larl operands. */
3857 if (TARGET_CPU_ZARCH
3858 && larl_operand (op, VOIDmode))
3859 return true;
3860
3861 /* Accept floating-point zero operands that fit into a single GPR. */
3862 if (GET_CODE (op) == CONST_DOUBLE
3863 && s390_float_const_zero_p (op)
3864 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3865 return true;
3866
3867 /* Accept double-word operands that can be split. */
3868 if (GET_CODE (op) == CONST_WIDE_INT
3869 || (GET_CODE (op) == CONST_INT
3870 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3871 {
3872 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3873 rtx hi = operand_subword (op, 0, 0, dword_mode);
3874 rtx lo = operand_subword (op, 1, 0, dword_mode);
3875 return legitimate_reload_constant_p (hi)
3876 && legitimate_reload_constant_p (lo);
3877 }
3878
3879 /* Everything else cannot be handled without reload. */
3880 return false;
3881 }
3882
3883 /* Returns true if the constant value OP is a legitimate fp operand
3884 during and after reload.
3885 This function accepts all constants which can be loaded directly
3886 into an FPR. */
3887
3888 static bool
3889 legitimate_reload_fp_constant_p (rtx op)
3890 {
3891 /* Accept floating-point zero operands if the load zero instruction
3892 can be used. Prior to z196 the load fp zero instruction caused a
3893 performance penalty if the result is used as BFP number. */
3894 if (TARGET_Z196
3895 && GET_CODE (op) == CONST_DOUBLE
3896 && s390_float_const_zero_p (op))
3897 return true;
3898
3899 return false;
3900 }
3901
3902 /* Returns true if the constant value OP is a legitimate vector operand
3903 during and after reload.
3904 This function accepts all constants which can be loaded directly
3905 into an VR. */
3906
3907 static bool
3908 legitimate_reload_vector_constant_p (rtx op)
3909 {
3910 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3911 && (satisfies_constraint_j00 (op)
3912 || satisfies_constraint_jm1 (op)
3913 || satisfies_constraint_jKK (op)
3914 || satisfies_constraint_jxx (op)
3915 || satisfies_constraint_jyy (op)))
3916 return true;
3917
3918 return false;
3919 }
3920
3921 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3922 return the class of reg to actually use. */
3923
3924 static reg_class_t
3925 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3926 {
3927 switch (GET_CODE (op))
3928 {
3929 /* Constants we cannot reload into general registers
3930 must be forced into the literal pool. */
3931 case CONST_VECTOR:
3932 case CONST_DOUBLE:
3933 case CONST_INT:
3934 case CONST_WIDE_INT:
3935 if (reg_class_subset_p (GENERAL_REGS, rclass)
3936 && legitimate_reload_constant_p (op))
3937 return GENERAL_REGS;
3938 else if (reg_class_subset_p (ADDR_REGS, rclass)
3939 && legitimate_reload_constant_p (op))
3940 return ADDR_REGS;
3941 else if (reg_class_subset_p (FP_REGS, rclass)
3942 && legitimate_reload_fp_constant_p (op))
3943 return FP_REGS;
3944 else if (reg_class_subset_p (VEC_REGS, rclass)
3945 && legitimate_reload_vector_constant_p (op))
3946 return VEC_REGS;
3947
3948 return NO_REGS;
3949
3950 /* If a symbolic constant or a PLUS is reloaded,
3951 it is most likely being used as an address, so
3952 prefer ADDR_REGS. If 'class' is not a superset
3953 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3954 case CONST:
3955 /* Symrefs cannot be pushed into the literal pool with -fPIC
3956 so we *MUST NOT* return NO_REGS for these cases
3957 (s390_cannot_force_const_mem will return true).
3958
3959 On the other hand we MUST return NO_REGS for symrefs with
3960 invalid addend which might have been pushed to the literal
3961 pool (no -fPIC). Usually we would expect them to be
3962 handled via secondary reload but this does not happen if
3963 they are used as literal pool slot replacement in reload
3964 inheritance (see emit_input_reload_insns). */
3965 if (TARGET_CPU_ZARCH
3966 && GET_CODE (XEXP (op, 0)) == PLUS
3967 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3968 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3969 {
3970 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3971 return ADDR_REGS;
3972 else
3973 return NO_REGS;
3974 }
3975 /* fallthrough */
3976 case LABEL_REF:
3977 case SYMBOL_REF:
3978 if (!legitimate_reload_constant_p (op))
3979 return NO_REGS;
3980 /* fallthrough */
3981 case PLUS:
3982 /* load address will be used. */
3983 if (reg_class_subset_p (ADDR_REGS, rclass))
3984 return ADDR_REGS;
3985 else
3986 return NO_REGS;
3987
3988 default:
3989 break;
3990 }
3991
3992 return rclass;
3993 }
3994
3995 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3996 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3997 aligned. */
3998
3999 bool
4000 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4001 {
4002 HOST_WIDE_INT addend;
4003 rtx symref;
4004
4005 /* The "required alignment" might be 0 (e.g. for certain structs
4006 accessed via BLKmode). Early abort in this case, as well as when
4007 an alignment > 8 is required. */
4008 if (alignment < 2 || alignment > 8)
4009 return false;
4010
4011 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4012 return false;
4013
4014 if (addend & (alignment - 1))
4015 return false;
4016
4017 if (GET_CODE (symref) == SYMBOL_REF)
4018 {
4019 /* We have load-relative instructions for 2-byte, 4-byte, and
4020 8-byte alignment so allow only these. */
4021 switch (alignment)
4022 {
4023 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4024 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4025 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4026 default: return false;
4027 }
4028 }
4029
4030 if (GET_CODE (symref) == UNSPEC
4031 && alignment <= UNITS_PER_LONG)
4032 return true;
4033
4034 return false;
4035 }
4036
4037 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4038 operand SCRATCH is used to reload the even part of the address and
4039 adding one. */
4040
4041 void
4042 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4043 {
4044 HOST_WIDE_INT addend;
4045 rtx symref;
4046
4047 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4048 gcc_unreachable ();
4049
4050 if (!(addend & 1))
4051 /* Easy case. The addend is even so larl will do fine. */
4052 emit_move_insn (reg, addr);
4053 else
4054 {
4055 /* We can leave the scratch register untouched if the target
4056 register is a valid base register. */
4057 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4058 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4059 scratch = reg;
4060
4061 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4062 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4063
4064 if (addend != 1)
4065 emit_move_insn (scratch,
4066 gen_rtx_CONST (Pmode,
4067 gen_rtx_PLUS (Pmode, symref,
4068 GEN_INT (addend - 1))));
4069 else
4070 emit_move_insn (scratch, symref);
4071
4072 /* Increment the address using la in order to avoid clobbering cc. */
4073 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4074 }
4075 }
4076
4077 /* Generate what is necessary to move between REG and MEM using
4078 SCRATCH. The direction is given by TOMEM. */
4079
4080 void
4081 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4082 {
4083 /* Reload might have pulled a constant out of the literal pool.
4084 Force it back in. */
4085 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4086 || GET_CODE (mem) == CONST_WIDE_INT
4087 || GET_CODE (mem) == CONST_VECTOR
4088 || GET_CODE (mem) == CONST)
4089 mem = force_const_mem (GET_MODE (reg), mem);
4090
4091 gcc_assert (MEM_P (mem));
4092
4093 /* For a load from memory we can leave the scratch register
4094 untouched if the target register is a valid base register. */
4095 if (!tomem
4096 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4097 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4098 && GET_MODE (reg) == GET_MODE (scratch))
4099 scratch = reg;
4100
4101 /* Load address into scratch register. Since we can't have a
4102 secondary reload for a secondary reload we have to cover the case
4103 where larl would need a secondary reload here as well. */
4104 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4105
4106 /* Now we can use a standard load/store to do the move. */
4107 if (tomem)
4108 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4109 else
4110 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4111 }
4112
4113 /* Inform reload about cases where moving X with a mode MODE to a register in
4114 RCLASS requires an extra scratch or immediate register. Return the class
4115 needed for the immediate register. */
4116
4117 static reg_class_t
4118 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4119 machine_mode mode, secondary_reload_info *sri)
4120 {
4121 enum reg_class rclass = (enum reg_class) rclass_i;
4122
4123 /* Intermediate register needed. */
4124 if (reg_classes_intersect_p (CC_REGS, rclass))
4125 return GENERAL_REGS;
4126
4127 if (TARGET_VX)
4128 {
4129 /* The vst/vl vector move instructions allow only for short
4130 displacements. */
4131 if (MEM_P (x)
4132 && GET_CODE (XEXP (x, 0)) == PLUS
4133 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4134 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4135 && reg_class_subset_p (rclass, VEC_REGS)
4136 && (!reg_class_subset_p (rclass, FP_REGS)
4137 || (GET_MODE_SIZE (mode) > 8
4138 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4139 {
4140 if (in_p)
4141 sri->icode = (TARGET_64BIT ?
4142 CODE_FOR_reloaddi_la_in :
4143 CODE_FOR_reloadsi_la_in);
4144 else
4145 sri->icode = (TARGET_64BIT ?
4146 CODE_FOR_reloaddi_la_out :
4147 CODE_FOR_reloadsi_la_out);
4148 }
4149 }
4150
4151 if (TARGET_Z10)
4152 {
4153 HOST_WIDE_INT offset;
4154 rtx symref;
4155
4156 /* On z10 several optimizer steps may generate larl operands with
4157 an odd addend. */
4158 if (in_p
4159 && s390_loadrelative_operand_p (x, &symref, &offset)
4160 && mode == Pmode
4161 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4162 && (offset & 1) == 1)
4163 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4164 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4165
4166 /* Handle all the (mem (symref)) accesses we cannot use the z10
4167 instructions for. */
4168 if (MEM_P (x)
4169 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4170 && (mode == QImode
4171 || !reg_class_subset_p (rclass, GENERAL_REGS)
4172 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4173 || !s390_check_symref_alignment (XEXP (x, 0),
4174 GET_MODE_SIZE (mode))))
4175 {
4176 #define __SECONDARY_RELOAD_CASE(M,m) \
4177 case M##mode: \
4178 if (TARGET_64BIT) \
4179 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4180 CODE_FOR_reload##m##di_tomem_z10; \
4181 else \
4182 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4183 CODE_FOR_reload##m##si_tomem_z10; \
4184 break;
4185
4186 switch (GET_MODE (x))
4187 {
4188 __SECONDARY_RELOAD_CASE (QI, qi);
4189 __SECONDARY_RELOAD_CASE (HI, hi);
4190 __SECONDARY_RELOAD_CASE (SI, si);
4191 __SECONDARY_RELOAD_CASE (DI, di);
4192 __SECONDARY_RELOAD_CASE (TI, ti);
4193 __SECONDARY_RELOAD_CASE (SF, sf);
4194 __SECONDARY_RELOAD_CASE (DF, df);
4195 __SECONDARY_RELOAD_CASE (TF, tf);
4196 __SECONDARY_RELOAD_CASE (SD, sd);
4197 __SECONDARY_RELOAD_CASE (DD, dd);
4198 __SECONDARY_RELOAD_CASE (TD, td);
4199 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4200 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4201 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4202 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4203 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4204 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4205 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4206 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4207 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4208 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4209 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4210 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4211 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4212 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4213 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4214 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4215 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4216 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4217 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4218 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4219 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4220 default:
4221 gcc_unreachable ();
4222 }
4223 #undef __SECONDARY_RELOAD_CASE
4224 }
4225 }
4226
4227 /* We need a scratch register when loading a PLUS expression which
4228 is not a legitimate operand of the LOAD ADDRESS instruction. */
4229 /* LRA can deal with transformation of plus op very well -- so we
4230 don't need to prompt LRA in this case. */
4231 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4232 sri->icode = (TARGET_64BIT ?
4233 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4234
4235 /* Performing a multiword move from or to memory we have to make sure the
4236 second chunk in memory is addressable without causing a displacement
4237 overflow. If that would be the case we calculate the address in
4238 a scratch register. */
4239 if (MEM_P (x)
4240 && GET_CODE (XEXP (x, 0)) == PLUS
4241 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4242 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4243 + GET_MODE_SIZE (mode) - 1))
4244 {
4245 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4246 in a s_operand address since we may fallback to lm/stm. So we only
4247 have to care about overflows in the b+i+d case. */
4248 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4249 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4250 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4251 /* For FP_REGS no lm/stm is available so this check is triggered
4252 for displacement overflows in b+i+d and b+d like addresses. */
4253 || (reg_classes_intersect_p (FP_REGS, rclass)
4254 && s390_class_max_nregs (FP_REGS, mode) > 1))
4255 {
4256 if (in_p)
4257 sri->icode = (TARGET_64BIT ?
4258 CODE_FOR_reloaddi_la_in :
4259 CODE_FOR_reloadsi_la_in);
4260 else
4261 sri->icode = (TARGET_64BIT ?
4262 CODE_FOR_reloaddi_la_out :
4263 CODE_FOR_reloadsi_la_out);
4264 }
4265 }
4266
4267 /* A scratch address register is needed when a symbolic constant is
4268 copied to r0 compiling with -fPIC. In other cases the target
4269 register might be used as temporary (see legitimize_pic_address). */
4270 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4271 sri->icode = (TARGET_64BIT ?
4272 CODE_FOR_reloaddi_PIC_addr :
4273 CODE_FOR_reloadsi_PIC_addr);
4274
4275 /* Either scratch or no register needed. */
4276 return NO_REGS;
4277 }
4278
4279 /* Generate code to load SRC, which is PLUS that is not a
4280 legitimate operand for the LA instruction, into TARGET.
4281 SCRATCH may be used as scratch register. */
4282
4283 void
4284 s390_expand_plus_operand (rtx target, rtx src,
4285 rtx scratch)
4286 {
4287 rtx sum1, sum2;
4288 struct s390_address ad;
4289
4290 /* src must be a PLUS; get its two operands. */
4291 gcc_assert (GET_CODE (src) == PLUS);
4292 gcc_assert (GET_MODE (src) == Pmode);
4293
4294 /* Check if any of the two operands is already scheduled
4295 for replacement by reload. This can happen e.g. when
4296 float registers occur in an address. */
4297 sum1 = find_replacement (&XEXP (src, 0));
4298 sum2 = find_replacement (&XEXP (src, 1));
4299 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4300
4301 /* If the address is already strictly valid, there's nothing to do. */
4302 if (!s390_decompose_address (src, &ad)
4303 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4304 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4305 {
4306 /* Otherwise, one of the operands cannot be an address register;
4307 we reload its value into the scratch register. */
4308 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4309 {
4310 emit_move_insn (scratch, sum1);
4311 sum1 = scratch;
4312 }
4313 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4314 {
4315 emit_move_insn (scratch, sum2);
4316 sum2 = scratch;
4317 }
4318
4319 /* According to the way these invalid addresses are generated
4320 in reload.c, it should never happen (at least on s390) that
4321 *neither* of the PLUS components, after find_replacements
4322 was applied, is an address register. */
4323 if (sum1 == scratch && sum2 == scratch)
4324 {
4325 debug_rtx (src);
4326 gcc_unreachable ();
4327 }
4328
4329 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4330 }
4331
4332 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4333 is only ever performed on addresses, so we can mark the
4334 sum as legitimate for LA in any case. */
4335 s390_load_address (target, src);
4336 }
4337
4338
4339 /* Return true if ADDR is a valid memory address.
4340 STRICT specifies whether strict register checking applies. */
4341
4342 static bool
4343 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4344 {
4345 struct s390_address ad;
4346
4347 if (TARGET_Z10
4348 && larl_operand (addr, VOIDmode)
4349 && (mode == VOIDmode
4350 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4351 return true;
4352
4353 if (!s390_decompose_address (addr, &ad))
4354 return false;
4355
4356 if (strict)
4357 {
4358 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4359 return false;
4360
4361 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4362 return false;
4363 }
4364 else
4365 {
4366 if (ad.base
4367 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4368 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4369 return false;
4370
4371 if (ad.indx
4372 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4373 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4374 return false;
4375 }
4376 return true;
4377 }
4378
4379 /* Return true if OP is a valid operand for the LA instruction.
4380 In 31-bit, we need to prove that the result is used as an
4381 address, as LA performs only a 31-bit addition. */
4382
4383 bool
4384 legitimate_la_operand_p (rtx op)
4385 {
4386 struct s390_address addr;
4387 if (!s390_decompose_address (op, &addr))
4388 return false;
4389
4390 return (TARGET_64BIT || addr.pointer);
4391 }
4392
4393 /* Return true if it is valid *and* preferable to use LA to
4394 compute the sum of OP1 and OP2. */
4395
4396 bool
4397 preferred_la_operand_p (rtx op1, rtx op2)
4398 {
4399 struct s390_address addr;
4400
4401 if (op2 != const0_rtx)
4402 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4403
4404 if (!s390_decompose_address (op1, &addr))
4405 return false;
4406 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4407 return false;
4408 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4409 return false;
4410
4411 /* Avoid LA instructions with index register on z196; it is
4412 preferable to use regular add instructions when possible.
4413 Starting with zEC12 the la with index register is "uncracked"
4414 again. */
4415 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4416 return false;
4417
4418 if (!TARGET_64BIT && !addr.pointer)
4419 return false;
4420
4421 if (addr.pointer)
4422 return true;
4423
4424 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4425 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4426 return true;
4427
4428 return false;
4429 }
4430
4431 /* Emit a forced load-address operation to load SRC into DST.
4432 This will use the LOAD ADDRESS instruction even in situations
4433 where legitimate_la_operand_p (SRC) returns false. */
4434
4435 void
4436 s390_load_address (rtx dst, rtx src)
4437 {
4438 if (TARGET_64BIT)
4439 emit_move_insn (dst, src);
4440 else
4441 emit_insn (gen_force_la_31 (dst, src));
4442 }
4443
4444 /* Return a legitimate reference for ORIG (an address) using the
4445 register REG. If REG is 0, a new pseudo is generated.
4446
4447 There are two types of references that must be handled:
4448
4449 1. Global data references must load the address from the GOT, via
4450 the PIC reg. An insn is emitted to do this load, and the reg is
4451 returned.
4452
4453 2. Static data references, constant pool addresses, and code labels
4454 compute the address as an offset from the GOT, whose base is in
4455 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4456 differentiate them from global data objects. The returned
4457 address is the PIC reg + an unspec constant.
4458
4459 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4460 reg also appears in the address. */
4461
4462 rtx
4463 legitimize_pic_address (rtx orig, rtx reg)
4464 {
4465 rtx addr = orig;
4466 rtx addend = const0_rtx;
4467 rtx new_rtx = orig;
4468
4469 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4470
4471 if (GET_CODE (addr) == CONST)
4472 addr = XEXP (addr, 0);
4473
4474 if (GET_CODE (addr) == PLUS)
4475 {
4476 addend = XEXP (addr, 1);
4477 addr = XEXP (addr, 0);
4478 }
4479
4480 if ((GET_CODE (addr) == LABEL_REF
4481 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4482 || (GET_CODE (addr) == UNSPEC &&
4483 (XINT (addr, 1) == UNSPEC_GOTENT
4484 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4485 && GET_CODE (addend) == CONST_INT)
4486 {
4487 /* This can be locally addressed. */
4488
4489 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4490 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4491 gen_rtx_CONST (Pmode, addr) : addr);
4492
4493 if (TARGET_CPU_ZARCH
4494 && larl_operand (const_addr, VOIDmode)
4495 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4496 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4497 {
4498 if (INTVAL (addend) & 1)
4499 {
4500 /* LARL can't handle odd offsets, so emit a pair of LARL
4501 and LA. */
4502 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4503
4504 if (!DISP_IN_RANGE (INTVAL (addend)))
4505 {
4506 HOST_WIDE_INT even = INTVAL (addend) - 1;
4507 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4508 addr = gen_rtx_CONST (Pmode, addr);
4509 addend = const1_rtx;
4510 }
4511
4512 emit_move_insn (temp, addr);
4513 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4514
4515 if (reg != 0)
4516 {
4517 s390_load_address (reg, new_rtx);
4518 new_rtx = reg;
4519 }
4520 }
4521 else
4522 {
4523 /* If the offset is even, we can just use LARL. This
4524 will happen automatically. */
4525 }
4526 }
4527 else
4528 {
4529 /* No larl - Access local symbols relative to the GOT. */
4530
4531 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4532
4533 if (reload_in_progress || reload_completed)
4534 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4535
4536 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4537 if (addend != const0_rtx)
4538 addr = gen_rtx_PLUS (Pmode, addr, addend);
4539 addr = gen_rtx_CONST (Pmode, addr);
4540 addr = force_const_mem (Pmode, addr);
4541 emit_move_insn (temp, addr);
4542
4543 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4544 if (reg != 0)
4545 {
4546 s390_load_address (reg, new_rtx);
4547 new_rtx = reg;
4548 }
4549 }
4550 }
4551 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4552 {
4553 /* A non-local symbol reference without addend.
4554
4555 The symbol ref is wrapped into an UNSPEC to make sure the
4556 proper operand modifier (@GOT or @GOTENT) will be emitted.
4557 This will tell the linker to put the symbol into the GOT.
4558
4559 Additionally the code dereferencing the GOT slot is emitted here.
4560
4561 An addend to the symref needs to be added afterwards.
4562 legitimize_pic_address calls itself recursively to handle
4563 that case. So no need to do it here. */
4564
4565 if (reg == 0)
4566 reg = gen_reg_rtx (Pmode);
4567
4568 if (TARGET_Z10)
4569 {
4570 /* Use load relative if possible.
4571 lgrl <target>, sym@GOTENT */
4572 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4573 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4574 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4575
4576 emit_move_insn (reg, new_rtx);
4577 new_rtx = reg;
4578 }
4579 else if (flag_pic == 1)
4580 {
4581 /* Assume GOT offset is a valid displacement operand (< 4k
4582 or < 512k with z990). This is handled the same way in
4583 both 31- and 64-bit code (@GOT).
4584 lg <target>, sym@GOT(r12) */
4585
4586 if (reload_in_progress || reload_completed)
4587 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4588
4589 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4590 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4591 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4592 new_rtx = gen_const_mem (Pmode, new_rtx);
4593 emit_move_insn (reg, new_rtx);
4594 new_rtx = reg;
4595 }
4596 else if (TARGET_CPU_ZARCH)
4597 {
4598 /* If the GOT offset might be >= 4k, we determine the position
4599 of the GOT entry via a PC-relative LARL (@GOTENT).
4600 larl temp, sym@GOTENT
4601 lg <target>, 0(temp) */
4602
4603 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4604
4605 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4606 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4607
4608 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4609 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4610 emit_move_insn (temp, new_rtx);
4611
4612 new_rtx = gen_const_mem (Pmode, temp);
4613 emit_move_insn (reg, new_rtx);
4614
4615 new_rtx = reg;
4616 }
4617 else
4618 {
4619 /* If the GOT offset might be >= 4k, we have to load it
4620 from the literal pool (@GOT).
4621
4622 lg temp, lit-litbase(r13)
4623 lg <target>, 0(temp)
4624 lit: .long sym@GOT */
4625
4626 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4627
4628 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4629 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4630
4631 if (reload_in_progress || reload_completed)
4632 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4633
4634 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4635 addr = gen_rtx_CONST (Pmode, addr);
4636 addr = force_const_mem (Pmode, addr);
4637 emit_move_insn (temp, addr);
4638
4639 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4640 new_rtx = gen_const_mem (Pmode, new_rtx);
4641 emit_move_insn (reg, new_rtx);
4642 new_rtx = reg;
4643 }
4644 }
4645 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4646 {
4647 gcc_assert (XVECLEN (addr, 0) == 1);
4648 switch (XINT (addr, 1))
4649 {
4650 /* These address symbols (or PLT slots) relative to the GOT
4651 (not GOT slots!). In general this will exceed the
4652 displacement range so these value belong into the literal
4653 pool. */
4654 case UNSPEC_GOTOFF:
4655 case UNSPEC_PLTOFF:
4656 new_rtx = force_const_mem (Pmode, orig);
4657 break;
4658
4659 /* For -fPIC the GOT size might exceed the displacement
4660 range so make sure the value is in the literal pool. */
4661 case UNSPEC_GOT:
4662 if (flag_pic == 2)
4663 new_rtx = force_const_mem (Pmode, orig);
4664 break;
4665
4666 /* For @GOTENT larl is used. This is handled like local
4667 symbol refs. */
4668 case UNSPEC_GOTENT:
4669 gcc_unreachable ();
4670 break;
4671
4672 /* @PLT is OK as is on 64-bit, must be converted to
4673 GOT-relative @PLTOFF on 31-bit. */
4674 case UNSPEC_PLT:
4675 if (!TARGET_CPU_ZARCH)
4676 {
4677 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4678
4679 if (reload_in_progress || reload_completed)
4680 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4681
4682 addr = XVECEXP (addr, 0, 0);
4683 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4684 UNSPEC_PLTOFF);
4685 if (addend != const0_rtx)
4686 addr = gen_rtx_PLUS (Pmode, addr, addend);
4687 addr = gen_rtx_CONST (Pmode, addr);
4688 addr = force_const_mem (Pmode, addr);
4689 emit_move_insn (temp, addr);
4690
4691 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4692 if (reg != 0)
4693 {
4694 s390_load_address (reg, new_rtx);
4695 new_rtx = reg;
4696 }
4697 }
4698 else
4699 /* On 64 bit larl can be used. This case is handled like
4700 local symbol refs. */
4701 gcc_unreachable ();
4702 break;
4703
4704 /* Everything else cannot happen. */
4705 default:
4706 gcc_unreachable ();
4707 }
4708 }
4709 else if (addend != const0_rtx)
4710 {
4711 /* Otherwise, compute the sum. */
4712
4713 rtx base = legitimize_pic_address (addr, reg);
4714 new_rtx = legitimize_pic_address (addend,
4715 base == reg ? NULL_RTX : reg);
4716 if (GET_CODE (new_rtx) == CONST_INT)
4717 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4718 else
4719 {
4720 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4721 {
4722 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4723 new_rtx = XEXP (new_rtx, 1);
4724 }
4725 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4726 }
4727
4728 if (GET_CODE (new_rtx) == CONST)
4729 new_rtx = XEXP (new_rtx, 0);
4730 new_rtx = force_operand (new_rtx, 0);
4731 }
4732
4733 return new_rtx;
4734 }
4735
4736 /* Load the thread pointer into a register. */
4737
4738 rtx
4739 s390_get_thread_pointer (void)
4740 {
4741 rtx tp = gen_reg_rtx (Pmode);
4742
4743 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4744 mark_reg_pointer (tp, BITS_PER_WORD);
4745
4746 return tp;
4747 }
4748
4749 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4750 in s390_tls_symbol which always refers to __tls_get_offset.
4751 The returned offset is written to RESULT_REG and an USE rtx is
4752 generated for TLS_CALL. */
4753
4754 static GTY(()) rtx s390_tls_symbol;
4755
4756 static void
4757 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4758 {
4759 rtx insn;
4760
4761 if (!flag_pic)
4762 emit_insn (s390_load_got ());
4763
4764 if (!s390_tls_symbol)
4765 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4766
4767 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4768 gen_rtx_REG (Pmode, RETURN_REGNUM));
4769
4770 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4771 RTL_CONST_CALL_P (insn) = 1;
4772 }
4773
4774 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4775 this (thread-local) address. REG may be used as temporary. */
4776
4777 static rtx
4778 legitimize_tls_address (rtx addr, rtx reg)
4779 {
4780 rtx new_rtx, tls_call, temp, base, r2, insn;
4781
4782 if (GET_CODE (addr) == SYMBOL_REF)
4783 switch (tls_symbolic_operand (addr))
4784 {
4785 case TLS_MODEL_GLOBAL_DYNAMIC:
4786 start_sequence ();
4787 r2 = gen_rtx_REG (Pmode, 2);
4788 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4789 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4790 new_rtx = force_const_mem (Pmode, new_rtx);
4791 emit_move_insn (r2, new_rtx);
4792 s390_emit_tls_call_insn (r2, tls_call);
4793 insn = get_insns ();
4794 end_sequence ();
4795
4796 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4797 temp = gen_reg_rtx (Pmode);
4798 emit_libcall_block (insn, temp, r2, new_rtx);
4799
4800 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4801 if (reg != 0)
4802 {
4803 s390_load_address (reg, new_rtx);
4804 new_rtx = reg;
4805 }
4806 break;
4807
4808 case TLS_MODEL_LOCAL_DYNAMIC:
4809 start_sequence ();
4810 r2 = gen_rtx_REG (Pmode, 2);
4811 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4812 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4813 new_rtx = force_const_mem (Pmode, new_rtx);
4814 emit_move_insn (r2, new_rtx);
4815 s390_emit_tls_call_insn (r2, tls_call);
4816 insn = get_insns ();
4817 end_sequence ();
4818
4819 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4820 temp = gen_reg_rtx (Pmode);
4821 emit_libcall_block (insn, temp, r2, new_rtx);
4822
4823 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4824 base = gen_reg_rtx (Pmode);
4825 s390_load_address (base, new_rtx);
4826
4827 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4828 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4829 new_rtx = force_const_mem (Pmode, new_rtx);
4830 temp = gen_reg_rtx (Pmode);
4831 emit_move_insn (temp, new_rtx);
4832
4833 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4834 if (reg != 0)
4835 {
4836 s390_load_address (reg, new_rtx);
4837 new_rtx = reg;
4838 }
4839 break;
4840
4841 case TLS_MODEL_INITIAL_EXEC:
4842 if (flag_pic == 1)
4843 {
4844 /* Assume GOT offset < 4k. This is handled the same way
4845 in both 31- and 64-bit code. */
4846
4847 if (reload_in_progress || reload_completed)
4848 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4849
4850 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4851 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4852 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4853 new_rtx = gen_const_mem (Pmode, new_rtx);
4854 temp = gen_reg_rtx (Pmode);
4855 emit_move_insn (temp, new_rtx);
4856 }
4857 else if (TARGET_CPU_ZARCH)
4858 {
4859 /* If the GOT offset might be >= 4k, we determine the position
4860 of the GOT entry via a PC-relative LARL. */
4861
4862 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4863 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4864 temp = gen_reg_rtx (Pmode);
4865 emit_move_insn (temp, new_rtx);
4866
4867 new_rtx = gen_const_mem (Pmode, temp);
4868 temp = gen_reg_rtx (Pmode);
4869 emit_move_insn (temp, new_rtx);
4870 }
4871 else if (flag_pic)
4872 {
4873 /* If the GOT offset might be >= 4k, we have to load it
4874 from the literal pool. */
4875
4876 if (reload_in_progress || reload_completed)
4877 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4878
4879 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4880 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4881 new_rtx = force_const_mem (Pmode, new_rtx);
4882 temp = gen_reg_rtx (Pmode);
4883 emit_move_insn (temp, new_rtx);
4884
4885 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4886 new_rtx = gen_const_mem (Pmode, new_rtx);
4887
4888 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4889 temp = gen_reg_rtx (Pmode);
4890 emit_insn (gen_rtx_SET (temp, new_rtx));
4891 }
4892 else
4893 {
4894 /* In position-dependent code, load the absolute address of
4895 the GOT entry from the literal pool. */
4896
4897 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4898 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4899 new_rtx = force_const_mem (Pmode, new_rtx);
4900 temp = gen_reg_rtx (Pmode);
4901 emit_move_insn (temp, new_rtx);
4902
4903 new_rtx = temp;
4904 new_rtx = gen_const_mem (Pmode, new_rtx);
4905 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4906 temp = gen_reg_rtx (Pmode);
4907 emit_insn (gen_rtx_SET (temp, new_rtx));
4908 }
4909
4910 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4911 if (reg != 0)
4912 {
4913 s390_load_address (reg, new_rtx);
4914 new_rtx = reg;
4915 }
4916 break;
4917
4918 case TLS_MODEL_LOCAL_EXEC:
4919 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4920 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4921 new_rtx = force_const_mem (Pmode, new_rtx);
4922 temp = gen_reg_rtx (Pmode);
4923 emit_move_insn (temp, new_rtx);
4924
4925 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4926 if (reg != 0)
4927 {
4928 s390_load_address (reg, new_rtx);
4929 new_rtx = reg;
4930 }
4931 break;
4932
4933 default:
4934 gcc_unreachable ();
4935 }
4936
4937 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4938 {
4939 switch (XINT (XEXP (addr, 0), 1))
4940 {
4941 case UNSPEC_INDNTPOFF:
4942 gcc_assert (TARGET_CPU_ZARCH);
4943 new_rtx = addr;
4944 break;
4945
4946 default:
4947 gcc_unreachable ();
4948 }
4949 }
4950
4951 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4952 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4953 {
4954 new_rtx = XEXP (XEXP (addr, 0), 0);
4955 if (GET_CODE (new_rtx) != SYMBOL_REF)
4956 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4957
4958 new_rtx = legitimize_tls_address (new_rtx, reg);
4959 new_rtx = plus_constant (Pmode, new_rtx,
4960 INTVAL (XEXP (XEXP (addr, 0), 1)));
4961 new_rtx = force_operand (new_rtx, 0);
4962 }
4963
4964 else
4965 gcc_unreachable (); /* for now ... */
4966
4967 return new_rtx;
4968 }
4969
4970 /* Emit insns making the address in operands[1] valid for a standard
4971 move to operands[0]. operands[1] is replaced by an address which
4972 should be used instead of the former RTX to emit the move
4973 pattern. */
4974
4975 void
4976 emit_symbolic_move (rtx *operands)
4977 {
4978 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4979
4980 if (GET_CODE (operands[0]) == MEM)
4981 operands[1] = force_reg (Pmode, operands[1]);
4982 else if (TLS_SYMBOLIC_CONST (operands[1]))
4983 operands[1] = legitimize_tls_address (operands[1], temp);
4984 else if (flag_pic)
4985 operands[1] = legitimize_pic_address (operands[1], temp);
4986 }
4987
4988 /* Try machine-dependent ways of modifying an illegitimate address X
4989 to be legitimate. If we find one, return the new, valid address.
4990
4991 OLDX is the address as it was before break_out_memory_refs was called.
4992 In some cases it is useful to look at this to decide what needs to be done.
4993
4994 MODE is the mode of the operand pointed to by X.
4995
4996 When -fpic is used, special handling is needed for symbolic references.
4997 See comments by legitimize_pic_address for details. */
4998
4999 static rtx
5000 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5001 machine_mode mode ATTRIBUTE_UNUSED)
5002 {
5003 rtx constant_term = const0_rtx;
5004
5005 if (TLS_SYMBOLIC_CONST (x))
5006 {
5007 x = legitimize_tls_address (x, 0);
5008
5009 if (s390_legitimate_address_p (mode, x, FALSE))
5010 return x;
5011 }
5012 else if (GET_CODE (x) == PLUS
5013 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5014 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5015 {
5016 return x;
5017 }
5018 else if (flag_pic)
5019 {
5020 if (SYMBOLIC_CONST (x)
5021 || (GET_CODE (x) == PLUS
5022 && (SYMBOLIC_CONST (XEXP (x, 0))
5023 || SYMBOLIC_CONST (XEXP (x, 1)))))
5024 x = legitimize_pic_address (x, 0);
5025
5026 if (s390_legitimate_address_p (mode, x, FALSE))
5027 return x;
5028 }
5029
5030 x = eliminate_constant_term (x, &constant_term);
5031
5032 /* Optimize loading of large displacements by splitting them
5033 into the multiple of 4K and the rest; this allows the
5034 former to be CSE'd if possible.
5035
5036 Don't do this if the displacement is added to a register
5037 pointing into the stack frame, as the offsets will
5038 change later anyway. */
5039
5040 if (GET_CODE (constant_term) == CONST_INT
5041 && !TARGET_LONG_DISPLACEMENT
5042 && !DISP_IN_RANGE (INTVAL (constant_term))
5043 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5044 {
5045 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5046 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5047
5048 rtx temp = gen_reg_rtx (Pmode);
5049 rtx val = force_operand (GEN_INT (upper), temp);
5050 if (val != temp)
5051 emit_move_insn (temp, val);
5052
5053 x = gen_rtx_PLUS (Pmode, x, temp);
5054 constant_term = GEN_INT (lower);
5055 }
5056
5057 if (GET_CODE (x) == PLUS)
5058 {
5059 if (GET_CODE (XEXP (x, 0)) == REG)
5060 {
5061 rtx temp = gen_reg_rtx (Pmode);
5062 rtx val = force_operand (XEXP (x, 1), temp);
5063 if (val != temp)
5064 emit_move_insn (temp, val);
5065
5066 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5067 }
5068
5069 else if (GET_CODE (XEXP (x, 1)) == REG)
5070 {
5071 rtx temp = gen_reg_rtx (Pmode);
5072 rtx val = force_operand (XEXP (x, 0), temp);
5073 if (val != temp)
5074 emit_move_insn (temp, val);
5075
5076 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5077 }
5078 }
5079
5080 if (constant_term != const0_rtx)
5081 x = gen_rtx_PLUS (Pmode, x, constant_term);
5082
5083 return x;
5084 }
5085
5086 /* Try a machine-dependent way of reloading an illegitimate address AD
5087 operand. If we find one, push the reload and return the new address.
5088
5089 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5090 and TYPE is the reload type of the current reload. */
5091
5092 rtx
5093 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5094 int opnum, int type)
5095 {
5096 if (!optimize || TARGET_LONG_DISPLACEMENT)
5097 return NULL_RTX;
5098
5099 if (GET_CODE (ad) == PLUS)
5100 {
5101 rtx tem = simplify_binary_operation (PLUS, Pmode,
5102 XEXP (ad, 0), XEXP (ad, 1));
5103 if (tem)
5104 ad = tem;
5105 }
5106
5107 if (GET_CODE (ad) == PLUS
5108 && GET_CODE (XEXP (ad, 0)) == REG
5109 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5110 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5111 {
5112 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5113 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5114 rtx cst, tem, new_rtx;
5115
5116 cst = GEN_INT (upper);
5117 if (!legitimate_reload_constant_p (cst))
5118 cst = force_const_mem (Pmode, cst);
5119
5120 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5121 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5122
5123 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5124 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5125 opnum, (enum reload_type) type);
5126 return new_rtx;
5127 }
5128
5129 return NULL_RTX;
5130 }
5131
5132 /* Emit code to move LEN bytes from DST to SRC. */
5133
5134 bool
5135 s390_expand_movmem (rtx dst, rtx src, rtx len)
5136 {
5137 /* When tuning for z10 or higher we rely on the Glibc functions to
5138 do the right thing. Only for constant lengths below 64k we will
5139 generate inline code. */
5140 if (s390_tune >= PROCESSOR_2097_Z10
5141 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5142 return false;
5143
5144 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5145 {
5146 if (INTVAL (len) > 0)
5147 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5148 }
5149
5150 else if (TARGET_MVCLE)
5151 {
5152 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5153 }
5154
5155 else
5156 {
5157 rtx dst_addr, src_addr, count, blocks, temp;
5158 rtx_code_label *loop_start_label = gen_label_rtx ();
5159 rtx_code_label *loop_end_label = gen_label_rtx ();
5160 rtx_code_label *end_label = gen_label_rtx ();
5161 machine_mode mode;
5162
5163 mode = GET_MODE (len);
5164 if (mode == VOIDmode)
5165 mode = Pmode;
5166
5167 dst_addr = gen_reg_rtx (Pmode);
5168 src_addr = gen_reg_rtx (Pmode);
5169 count = gen_reg_rtx (mode);
5170 blocks = gen_reg_rtx (mode);
5171
5172 convert_move (count, len, 1);
5173 emit_cmp_and_jump_insns (count, const0_rtx,
5174 EQ, NULL_RTX, mode, 1, end_label);
5175
5176 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5177 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5178 dst = change_address (dst, VOIDmode, dst_addr);
5179 src = change_address (src, VOIDmode, src_addr);
5180
5181 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5182 OPTAB_DIRECT);
5183 if (temp != count)
5184 emit_move_insn (count, temp);
5185
5186 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5187 OPTAB_DIRECT);
5188 if (temp != blocks)
5189 emit_move_insn (blocks, temp);
5190
5191 emit_cmp_and_jump_insns (blocks, const0_rtx,
5192 EQ, NULL_RTX, mode, 1, loop_end_label);
5193
5194 emit_label (loop_start_label);
5195
5196 if (TARGET_Z10
5197 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5198 {
5199 rtx prefetch;
5200
5201 /* Issue a read prefetch for the +3 cache line. */
5202 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5203 const0_rtx, const0_rtx);
5204 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5205 emit_insn (prefetch);
5206
5207 /* Issue a write prefetch for the +3 cache line. */
5208 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5209 const1_rtx, const0_rtx);
5210 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5211 emit_insn (prefetch);
5212 }
5213
5214 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5215 s390_load_address (dst_addr,
5216 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5217 s390_load_address (src_addr,
5218 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5219
5220 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5221 OPTAB_DIRECT);
5222 if (temp != blocks)
5223 emit_move_insn (blocks, temp);
5224
5225 emit_cmp_and_jump_insns (blocks, const0_rtx,
5226 EQ, NULL_RTX, mode, 1, loop_end_label);
5227
5228 emit_jump (loop_start_label);
5229 emit_label (loop_end_label);
5230
5231 emit_insn (gen_movmem_short (dst, src,
5232 convert_to_mode (Pmode, count, 1)));
5233 emit_label (end_label);
5234 }
5235 return true;
5236 }
5237
5238 /* Emit code to set LEN bytes at DST to VAL.
5239 Make use of clrmem if VAL is zero. */
5240
5241 void
5242 s390_expand_setmem (rtx dst, rtx len, rtx val)
5243 {
5244 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5245 return;
5246
5247 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5248
5249 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5250 {
5251 if (val == const0_rtx && INTVAL (len) <= 256)
5252 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5253 else
5254 {
5255 /* Initialize memory by storing the first byte. */
5256 emit_move_insn (adjust_address (dst, QImode, 0), val);
5257
5258 if (INTVAL (len) > 1)
5259 {
5260 /* Initiate 1 byte overlap move.
5261 The first byte of DST is propagated through DSTP1.
5262 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5263 DST is set to size 1 so the rest of the memory location
5264 does not count as source operand. */
5265 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5266 set_mem_size (dst, 1);
5267
5268 emit_insn (gen_movmem_short (dstp1, dst,
5269 GEN_INT (INTVAL (len) - 2)));
5270 }
5271 }
5272 }
5273
5274 else if (TARGET_MVCLE)
5275 {
5276 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5277 if (TARGET_64BIT)
5278 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5279 val));
5280 else
5281 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5282 val));
5283 }
5284
5285 else
5286 {
5287 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5288 rtx_code_label *loop_start_label = gen_label_rtx ();
5289 rtx_code_label *loop_end_label = gen_label_rtx ();
5290 rtx_code_label *end_label = gen_label_rtx ();
5291 machine_mode mode;
5292
5293 mode = GET_MODE (len);
5294 if (mode == VOIDmode)
5295 mode = Pmode;
5296
5297 dst_addr = gen_reg_rtx (Pmode);
5298 count = gen_reg_rtx (mode);
5299 blocks = gen_reg_rtx (mode);
5300
5301 convert_move (count, len, 1);
5302 emit_cmp_and_jump_insns (count, const0_rtx,
5303 EQ, NULL_RTX, mode, 1, end_label);
5304
5305 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5306 dst = change_address (dst, VOIDmode, dst_addr);
5307
5308 if (val == const0_rtx)
5309 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5310 OPTAB_DIRECT);
5311 else
5312 {
5313 dstp1 = adjust_address (dst, VOIDmode, 1);
5314 set_mem_size (dst, 1);
5315
5316 /* Initialize memory by storing the first byte. */
5317 emit_move_insn (adjust_address (dst, QImode, 0), val);
5318
5319 /* If count is 1 we are done. */
5320 emit_cmp_and_jump_insns (count, const1_rtx,
5321 EQ, NULL_RTX, mode, 1, end_label);
5322
5323 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5324 OPTAB_DIRECT);
5325 }
5326 if (temp != count)
5327 emit_move_insn (count, temp);
5328
5329 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5330 OPTAB_DIRECT);
5331 if (temp != blocks)
5332 emit_move_insn (blocks, temp);
5333
5334 emit_cmp_and_jump_insns (blocks, const0_rtx,
5335 EQ, NULL_RTX, mode, 1, loop_end_label);
5336
5337 emit_label (loop_start_label);
5338
5339 if (TARGET_Z10
5340 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5341 {
5342 /* Issue a write prefetch for the +4 cache line. */
5343 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5344 GEN_INT (1024)),
5345 const1_rtx, const0_rtx);
5346 emit_insn (prefetch);
5347 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5348 }
5349
5350 if (val == const0_rtx)
5351 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5352 else
5353 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5354 s390_load_address (dst_addr,
5355 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5356
5357 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5358 OPTAB_DIRECT);
5359 if (temp != blocks)
5360 emit_move_insn (blocks, temp);
5361
5362 emit_cmp_and_jump_insns (blocks, const0_rtx,
5363 EQ, NULL_RTX, mode, 1, loop_end_label);
5364
5365 emit_jump (loop_start_label);
5366 emit_label (loop_end_label);
5367
5368 if (val == const0_rtx)
5369 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5370 else
5371 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5372 emit_label (end_label);
5373 }
5374 }
5375
5376 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5377 and return the result in TARGET. */
5378
5379 bool
5380 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5381 {
5382 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5383 rtx tmp;
5384
5385 /* When tuning for z10 or higher we rely on the Glibc functions to
5386 do the right thing. Only for constant lengths below 64k we will
5387 generate inline code. */
5388 if (s390_tune >= PROCESSOR_2097_Z10
5389 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5390 return false;
5391
5392 /* As the result of CMPINT is inverted compared to what we need,
5393 we have to swap the operands. */
5394 tmp = op0; op0 = op1; op1 = tmp;
5395
5396 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5397 {
5398 if (INTVAL (len) > 0)
5399 {
5400 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5401 emit_insn (gen_cmpint (target, ccreg));
5402 }
5403 else
5404 emit_move_insn (target, const0_rtx);
5405 }
5406 else if (TARGET_MVCLE)
5407 {
5408 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5409 emit_insn (gen_cmpint (target, ccreg));
5410 }
5411 else
5412 {
5413 rtx addr0, addr1, count, blocks, temp;
5414 rtx_code_label *loop_start_label = gen_label_rtx ();
5415 rtx_code_label *loop_end_label = gen_label_rtx ();
5416 rtx_code_label *end_label = gen_label_rtx ();
5417 machine_mode mode;
5418
5419 mode = GET_MODE (len);
5420 if (mode == VOIDmode)
5421 mode = Pmode;
5422
5423 addr0 = gen_reg_rtx (Pmode);
5424 addr1 = gen_reg_rtx (Pmode);
5425 count = gen_reg_rtx (mode);
5426 blocks = gen_reg_rtx (mode);
5427
5428 convert_move (count, len, 1);
5429 emit_cmp_and_jump_insns (count, const0_rtx,
5430 EQ, NULL_RTX, mode, 1, end_label);
5431
5432 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5433 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5434 op0 = change_address (op0, VOIDmode, addr0);
5435 op1 = change_address (op1, VOIDmode, addr1);
5436
5437 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5438 OPTAB_DIRECT);
5439 if (temp != count)
5440 emit_move_insn (count, temp);
5441
5442 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5443 OPTAB_DIRECT);
5444 if (temp != blocks)
5445 emit_move_insn (blocks, temp);
5446
5447 emit_cmp_and_jump_insns (blocks, const0_rtx,
5448 EQ, NULL_RTX, mode, 1, loop_end_label);
5449
5450 emit_label (loop_start_label);
5451
5452 if (TARGET_Z10
5453 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5454 {
5455 rtx prefetch;
5456
5457 /* Issue a read prefetch for the +2 cache line of operand 1. */
5458 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5459 const0_rtx, const0_rtx);
5460 emit_insn (prefetch);
5461 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5462
5463 /* Issue a read prefetch for the +2 cache line of operand 2. */
5464 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5465 const0_rtx, const0_rtx);
5466 emit_insn (prefetch);
5467 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5468 }
5469
5470 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5471 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5472 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5473 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5474 temp = gen_rtx_SET (pc_rtx, temp);
5475 emit_jump_insn (temp);
5476
5477 s390_load_address (addr0,
5478 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5479 s390_load_address (addr1,
5480 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5481
5482 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5483 OPTAB_DIRECT);
5484 if (temp != blocks)
5485 emit_move_insn (blocks, temp);
5486
5487 emit_cmp_and_jump_insns (blocks, const0_rtx,
5488 EQ, NULL_RTX, mode, 1, loop_end_label);
5489
5490 emit_jump (loop_start_label);
5491 emit_label (loop_end_label);
5492
5493 emit_insn (gen_cmpmem_short (op0, op1,
5494 convert_to_mode (Pmode, count, 1)));
5495 emit_label (end_label);
5496
5497 emit_insn (gen_cmpint (target, ccreg));
5498 }
5499 return true;
5500 }
5501
5502 /* Emit a conditional jump to LABEL for condition code mask MASK using
5503 comparsion operator COMPARISON. Return the emitted jump insn. */
5504
5505 static rtx
5506 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5507 {
5508 rtx temp;
5509
5510 gcc_assert (comparison == EQ || comparison == NE);
5511 gcc_assert (mask > 0 && mask < 15);
5512
5513 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5514 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5515 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5516 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5517 temp = gen_rtx_SET (pc_rtx, temp);
5518 return emit_jump_insn (temp);
5519 }
5520
5521 /* Emit the instructions to implement strlen of STRING and store the
5522 result in TARGET. The string has the known ALIGNMENT. This
5523 version uses vector instructions and is therefore not appropriate
5524 for targets prior to z13. */
5525
5526 void
5527 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5528 {
5529 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5530 int very_likely = REG_BR_PROB_BASE - 1;
5531 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5532 rtx str_reg = gen_reg_rtx (V16QImode);
5533 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5534 rtx str_idx_reg = gen_reg_rtx (Pmode);
5535 rtx result_reg = gen_reg_rtx (V16QImode);
5536 rtx is_aligned_label = gen_label_rtx ();
5537 rtx into_loop_label = NULL_RTX;
5538 rtx loop_start_label = gen_label_rtx ();
5539 rtx temp;
5540 rtx len = gen_reg_rtx (QImode);
5541 rtx cond;
5542
5543 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5544 emit_move_insn (str_idx_reg, const0_rtx);
5545
5546 if (INTVAL (alignment) < 16)
5547 {
5548 /* Check whether the address happens to be aligned properly so
5549 jump directly to the aligned loop. */
5550 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5551 str_addr_base_reg, GEN_INT (15)),
5552 const0_rtx, EQ, NULL_RTX,
5553 Pmode, 1, is_aligned_label);
5554
5555 temp = gen_reg_rtx (Pmode);
5556 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5557 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5558 gcc_assert (REG_P (temp));
5559 highest_index_to_load_reg =
5560 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5561 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5562 gcc_assert (REG_P (highest_index_to_load_reg));
5563 emit_insn (gen_vllv16qi (str_reg,
5564 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5565 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5566
5567 into_loop_label = gen_label_rtx ();
5568 s390_emit_jump (into_loop_label, NULL_RTX);
5569 emit_barrier ();
5570 }
5571
5572 emit_label (is_aligned_label);
5573 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5574
5575 /* Reaching this point we are only performing 16 bytes aligned
5576 loads. */
5577 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5578
5579 emit_label (loop_start_label);
5580 LABEL_NUSES (loop_start_label) = 1;
5581
5582 /* Load 16 bytes of the string into VR. */
5583 emit_move_insn (str_reg,
5584 gen_rtx_MEM (V16QImode,
5585 gen_rtx_PLUS (Pmode, str_idx_reg,
5586 str_addr_base_reg)));
5587 if (into_loop_label != NULL_RTX)
5588 {
5589 emit_label (into_loop_label);
5590 LABEL_NUSES (into_loop_label) = 1;
5591 }
5592
5593 /* Increment string index by 16 bytes. */
5594 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5595 str_idx_reg, 1, OPTAB_DIRECT);
5596
5597 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5598 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5599
5600 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5601 REG_BR_PROB, very_likely);
5602 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5603
5604 /* If the string pointer wasn't aligned we have loaded less then 16
5605 bytes and the remaining bytes got filled with zeros (by vll).
5606 Now we have to check whether the resulting index lies within the
5607 bytes actually part of the string. */
5608
5609 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5610 highest_index_to_load_reg);
5611 s390_load_address (highest_index_to_load_reg,
5612 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5613 const1_rtx));
5614 if (TARGET_64BIT)
5615 emit_insn (gen_movdicc (str_idx_reg, cond,
5616 highest_index_to_load_reg, str_idx_reg));
5617 else
5618 emit_insn (gen_movsicc (str_idx_reg, cond,
5619 highest_index_to_load_reg, str_idx_reg));
5620
5621 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5622 very_unlikely);
5623
5624 expand_binop (Pmode, add_optab, str_idx_reg,
5625 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5626 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5627 here. */
5628 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5629 convert_to_mode (Pmode, len, 1),
5630 target, 1, OPTAB_DIRECT);
5631 if (temp != target)
5632 emit_move_insn (target, temp);
5633 }
5634
5635 void
5636 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5637 {
5638 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5639 rtx temp = gen_reg_rtx (Pmode);
5640 rtx src_addr = XEXP (src, 0);
5641 rtx dst_addr = XEXP (dst, 0);
5642 rtx src_addr_reg = gen_reg_rtx (Pmode);
5643 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5644 rtx offset = gen_reg_rtx (Pmode);
5645 rtx vsrc = gen_reg_rtx (V16QImode);
5646 rtx vpos = gen_reg_rtx (V16QImode);
5647 rtx loadlen = gen_reg_rtx (SImode);
5648 rtx gpos_qi = gen_reg_rtx(QImode);
5649 rtx gpos = gen_reg_rtx (SImode);
5650 rtx done_label = gen_label_rtx ();
5651 rtx loop_label = gen_label_rtx ();
5652 rtx exit_label = gen_label_rtx ();
5653 rtx full_label = gen_label_rtx ();
5654
5655 /* Perform a quick check for string ending on the first up to 16
5656 bytes and exit early if successful. */
5657
5658 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5659 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5660 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5661 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5662 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5663 /* gpos is the byte index if a zero was found and 16 otherwise.
5664 So if it is lower than the loaded bytes we have a hit. */
5665 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5666 full_label);
5667 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5668
5669 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5670 1, OPTAB_DIRECT);
5671 emit_jump (exit_label);
5672 emit_barrier ();
5673
5674 emit_label (full_label);
5675 LABEL_NUSES (full_label) = 1;
5676
5677 /* Calculate `offset' so that src + offset points to the last byte
5678 before 16 byte alignment. */
5679
5680 /* temp = src_addr & 0xf */
5681 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5682 1, OPTAB_DIRECT);
5683
5684 /* offset = 0xf - temp */
5685 emit_move_insn (offset, GEN_INT (15));
5686 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5687 1, OPTAB_DIRECT);
5688
5689 /* Store `offset' bytes in the dstination string. The quick check
5690 has loaded at least `offset' bytes into vsrc. */
5691
5692 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5693
5694 /* Advance to the next byte to be loaded. */
5695 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5696 1, OPTAB_DIRECT);
5697
5698 /* Make sure the addresses are single regs which can be used as a
5699 base. */
5700 emit_move_insn (src_addr_reg, src_addr);
5701 emit_move_insn (dst_addr_reg, dst_addr);
5702
5703 /* MAIN LOOP */
5704
5705 emit_label (loop_label);
5706 LABEL_NUSES (loop_label) = 1;
5707
5708 emit_move_insn (vsrc,
5709 gen_rtx_MEM (V16QImode,
5710 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5711
5712 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5713 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5714 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5715 REG_BR_PROB, very_unlikely);
5716
5717 emit_move_insn (gen_rtx_MEM (V16QImode,
5718 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5719 vsrc);
5720 /* offset += 16 */
5721 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5722 offset, 1, OPTAB_DIRECT);
5723
5724 emit_jump (loop_label);
5725 emit_barrier ();
5726
5727 /* REGULAR EXIT */
5728
5729 /* We are done. Add the offset of the zero character to the dst_addr
5730 pointer to get the result. */
5731
5732 emit_label (done_label);
5733 LABEL_NUSES (done_label) = 1;
5734
5735 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5736 1, OPTAB_DIRECT);
5737
5738 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5739 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5740
5741 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5742
5743 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5744 1, OPTAB_DIRECT);
5745
5746 /* EARLY EXIT */
5747
5748 emit_label (exit_label);
5749 LABEL_NUSES (exit_label) = 1;
5750 }
5751
5752
5753 /* Expand conditional increment or decrement using alc/slb instructions.
5754 Should generate code setting DST to either SRC or SRC + INCREMENT,
5755 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5756 Returns true if successful, false otherwise.
5757
5758 That makes it possible to implement some if-constructs without jumps e.g.:
5759 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5760 unsigned int a, b, c;
5761 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5762 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5763 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5764 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5765
5766 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5767 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5768 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5769 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5770 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5771
5772 bool
5773 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5774 rtx dst, rtx src, rtx increment)
5775 {
5776 machine_mode cmp_mode;
5777 machine_mode cc_mode;
5778 rtx op_res;
5779 rtx insn;
5780 rtvec p;
5781 int ret;
5782
5783 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5784 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5785 cmp_mode = SImode;
5786 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5787 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5788 cmp_mode = DImode;
5789 else
5790 return false;
5791
5792 /* Try ADD LOGICAL WITH CARRY. */
5793 if (increment == const1_rtx)
5794 {
5795 /* Determine CC mode to use. */
5796 if (cmp_code == EQ || cmp_code == NE)
5797 {
5798 if (cmp_op1 != const0_rtx)
5799 {
5800 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5801 NULL_RTX, 0, OPTAB_WIDEN);
5802 cmp_op1 = const0_rtx;
5803 }
5804
5805 cmp_code = cmp_code == EQ ? LEU : GTU;
5806 }
5807
5808 if (cmp_code == LTU || cmp_code == LEU)
5809 {
5810 rtx tem = cmp_op0;
5811 cmp_op0 = cmp_op1;
5812 cmp_op1 = tem;
5813 cmp_code = swap_condition (cmp_code);
5814 }
5815
5816 switch (cmp_code)
5817 {
5818 case GTU:
5819 cc_mode = CCUmode;
5820 break;
5821
5822 case GEU:
5823 cc_mode = CCL3mode;
5824 break;
5825
5826 default:
5827 return false;
5828 }
5829
5830 /* Emit comparison instruction pattern. */
5831 if (!register_operand (cmp_op0, cmp_mode))
5832 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5833
5834 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5835 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5836 /* We use insn_invalid_p here to add clobbers if required. */
5837 ret = insn_invalid_p (emit_insn (insn), false);
5838 gcc_assert (!ret);
5839
5840 /* Emit ALC instruction pattern. */
5841 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5842 gen_rtx_REG (cc_mode, CC_REGNUM),
5843 const0_rtx);
5844
5845 if (src != const0_rtx)
5846 {
5847 if (!register_operand (src, GET_MODE (dst)))
5848 src = force_reg (GET_MODE (dst), src);
5849
5850 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5851 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5852 }
5853
5854 p = rtvec_alloc (2);
5855 RTVEC_ELT (p, 0) =
5856 gen_rtx_SET (dst, op_res);
5857 RTVEC_ELT (p, 1) =
5858 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5859 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5860
5861 return true;
5862 }
5863
5864 /* Try SUBTRACT LOGICAL WITH BORROW. */
5865 if (increment == constm1_rtx)
5866 {
5867 /* Determine CC mode to use. */
5868 if (cmp_code == EQ || cmp_code == NE)
5869 {
5870 if (cmp_op1 != const0_rtx)
5871 {
5872 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5873 NULL_RTX, 0, OPTAB_WIDEN);
5874 cmp_op1 = const0_rtx;
5875 }
5876
5877 cmp_code = cmp_code == EQ ? LEU : GTU;
5878 }
5879
5880 if (cmp_code == GTU || cmp_code == GEU)
5881 {
5882 rtx tem = cmp_op0;
5883 cmp_op0 = cmp_op1;
5884 cmp_op1 = tem;
5885 cmp_code = swap_condition (cmp_code);
5886 }
5887
5888 switch (cmp_code)
5889 {
5890 case LEU:
5891 cc_mode = CCUmode;
5892 break;
5893
5894 case LTU:
5895 cc_mode = CCL3mode;
5896 break;
5897
5898 default:
5899 return false;
5900 }
5901
5902 /* Emit comparison instruction pattern. */
5903 if (!register_operand (cmp_op0, cmp_mode))
5904 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5905
5906 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5907 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5908 /* We use insn_invalid_p here to add clobbers if required. */
5909 ret = insn_invalid_p (emit_insn (insn), false);
5910 gcc_assert (!ret);
5911
5912 /* Emit SLB instruction pattern. */
5913 if (!register_operand (src, GET_MODE (dst)))
5914 src = force_reg (GET_MODE (dst), src);
5915
5916 op_res = gen_rtx_MINUS (GET_MODE (dst),
5917 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5918 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5919 gen_rtx_REG (cc_mode, CC_REGNUM),
5920 const0_rtx));
5921 p = rtvec_alloc (2);
5922 RTVEC_ELT (p, 0) =
5923 gen_rtx_SET (dst, op_res);
5924 RTVEC_ELT (p, 1) =
5925 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5926 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5927
5928 return true;
5929 }
5930
5931 return false;
5932 }
5933
5934 /* Expand code for the insv template. Return true if successful. */
5935
5936 bool
5937 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5938 {
5939 int bitsize = INTVAL (op1);
5940 int bitpos = INTVAL (op2);
5941 machine_mode mode = GET_MODE (dest);
5942 machine_mode smode;
5943 int smode_bsize, mode_bsize;
5944 rtx op, clobber;
5945
5946 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5947 return false;
5948
5949 /* Generate INSERT IMMEDIATE (IILL et al). */
5950 /* (set (ze (reg)) (const_int)). */
5951 if (TARGET_ZARCH
5952 && register_operand (dest, word_mode)
5953 && (bitpos % 16) == 0
5954 && (bitsize % 16) == 0
5955 && const_int_operand (src, VOIDmode))
5956 {
5957 HOST_WIDE_INT val = INTVAL (src);
5958 int regpos = bitpos + bitsize;
5959
5960 while (regpos > bitpos)
5961 {
5962 machine_mode putmode;
5963 int putsize;
5964
5965 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5966 putmode = SImode;
5967 else
5968 putmode = HImode;
5969
5970 putsize = GET_MODE_BITSIZE (putmode);
5971 regpos -= putsize;
5972 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5973 GEN_INT (putsize),
5974 GEN_INT (regpos)),
5975 gen_int_mode (val, putmode));
5976 val >>= putsize;
5977 }
5978 gcc_assert (regpos == bitpos);
5979 return true;
5980 }
5981
5982 smode = smallest_mode_for_size (bitsize, MODE_INT);
5983 smode_bsize = GET_MODE_BITSIZE (smode);
5984 mode_bsize = GET_MODE_BITSIZE (mode);
5985
5986 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5987 if (bitpos == 0
5988 && (bitsize % BITS_PER_UNIT) == 0
5989 && MEM_P (dest)
5990 && (register_operand (src, word_mode)
5991 || const_int_operand (src, VOIDmode)))
5992 {
5993 /* Emit standard pattern if possible. */
5994 if (smode_bsize == bitsize)
5995 {
5996 emit_move_insn (adjust_address (dest, smode, 0),
5997 gen_lowpart (smode, src));
5998 return true;
5999 }
6000
6001 /* (set (ze (mem)) (const_int)). */
6002 else if (const_int_operand (src, VOIDmode))
6003 {
6004 int size = bitsize / BITS_PER_UNIT;
6005 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6006 BLKmode,
6007 UNITS_PER_WORD - size);
6008
6009 dest = adjust_address (dest, BLKmode, 0);
6010 set_mem_size (dest, size);
6011 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6012 return true;
6013 }
6014
6015 /* (set (ze (mem)) (reg)). */
6016 else if (register_operand (src, word_mode))
6017 {
6018 if (bitsize <= 32)
6019 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6020 const0_rtx), src);
6021 else
6022 {
6023 /* Emit st,stcmh sequence. */
6024 int stcmh_width = bitsize - 32;
6025 int size = stcmh_width / BITS_PER_UNIT;
6026
6027 emit_move_insn (adjust_address (dest, SImode, size),
6028 gen_lowpart (SImode, src));
6029 set_mem_size (dest, size);
6030 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6031 GEN_INT (stcmh_width),
6032 const0_rtx),
6033 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6034 }
6035 return true;
6036 }
6037 }
6038
6039 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6040 if ((bitpos % BITS_PER_UNIT) == 0
6041 && (bitsize % BITS_PER_UNIT) == 0
6042 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6043 && MEM_P (src)
6044 && (mode == DImode || mode == SImode)
6045 && register_operand (dest, mode))
6046 {
6047 /* Emit a strict_low_part pattern if possible. */
6048 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6049 {
6050 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6051 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6052 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6053 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6054 return true;
6055 }
6056
6057 /* ??? There are more powerful versions of ICM that are not
6058 completely represented in the md file. */
6059 }
6060
6061 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6062 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6063 {
6064 machine_mode mode_s = GET_MODE (src);
6065
6066 if (CONSTANT_P (src))
6067 {
6068 /* For constant zero values the representation with AND
6069 appears to be folded in more situations than the (set
6070 (zero_extract) ...).
6071 We only do this when the start and end of the bitfield
6072 remain in the same SImode chunk. That way nihf or nilf
6073 can be used.
6074 The AND patterns might still generate a risbg for this. */
6075 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6076 return false;
6077 else
6078 src = force_reg (mode, src);
6079 }
6080 else if (mode_s != mode)
6081 {
6082 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6083 src = force_reg (mode_s, src);
6084 src = gen_lowpart (mode, src);
6085 }
6086
6087 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6088 op = gen_rtx_SET (op, src);
6089
6090 if (!TARGET_ZEC12)
6091 {
6092 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6093 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6094 }
6095 emit_insn (op);
6096
6097 return true;
6098 }
6099
6100 return false;
6101 }
6102
6103 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6104 register that holds VAL of mode MODE shifted by COUNT bits. */
6105
6106 static inline rtx
6107 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6108 {
6109 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6110 NULL_RTX, 1, OPTAB_DIRECT);
6111 return expand_simple_binop (SImode, ASHIFT, val, count,
6112 NULL_RTX, 1, OPTAB_DIRECT);
6113 }
6114
6115 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6116 the result in TARGET. */
6117
6118 void
6119 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6120 rtx cmp_op1, rtx cmp_op2)
6121 {
6122 machine_mode mode = GET_MODE (target);
6123 bool neg_p = false, swap_p = false;
6124 rtx tmp;
6125
6126 if (GET_MODE (cmp_op1) == V2DFmode)
6127 {
6128 switch (cond)
6129 {
6130 /* NE a != b -> !(a == b) */
6131 case NE: cond = EQ; neg_p = true; break;
6132 /* UNGT a u> b -> !(b >= a) */
6133 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6134 /* UNGE a u>= b -> !(b > a) */
6135 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6136 /* LE: a <= b -> b >= a */
6137 case LE: cond = GE; swap_p = true; break;
6138 /* UNLE: a u<= b -> !(a > b) */
6139 case UNLE: cond = GT; neg_p = true; break;
6140 /* LT: a < b -> b > a */
6141 case LT: cond = GT; swap_p = true; break;
6142 /* UNLT: a u< b -> !(a >= b) */
6143 case UNLT: cond = GE; neg_p = true; break;
6144 case UNEQ:
6145 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6146 return;
6147 case LTGT:
6148 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6149 return;
6150 case ORDERED:
6151 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6152 return;
6153 case UNORDERED:
6154 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6155 return;
6156 default: break;
6157 }
6158 }
6159 else
6160 {
6161 switch (cond)
6162 {
6163 /* NE: a != b -> !(a == b) */
6164 case NE: cond = EQ; neg_p = true; break;
6165 /* GE: a >= b -> !(b > a) */
6166 case GE: cond = GT; neg_p = true; swap_p = true; break;
6167 /* GEU: a >= b -> !(b > a) */
6168 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6169 /* LE: a <= b -> !(a > b) */
6170 case LE: cond = GT; neg_p = true; break;
6171 /* LEU: a <= b -> !(a > b) */
6172 case LEU: cond = GTU; neg_p = true; break;
6173 /* LT: a < b -> b > a */
6174 case LT: cond = GT; swap_p = true; break;
6175 /* LTU: a < b -> b > a */
6176 case LTU: cond = GTU; swap_p = true; break;
6177 default: break;
6178 }
6179 }
6180
6181 if (swap_p)
6182 {
6183 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6184 }
6185
6186 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6187 mode,
6188 cmp_op1, cmp_op2)));
6189 if (neg_p)
6190 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6191 }
6192
6193 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6194 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6195 elements in CMP1 and CMP2 fulfill the comparison. */
6196 void
6197 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6198 rtx cmp1, rtx cmp2, bool all_p)
6199 {
6200 enum rtx_code new_code = code;
6201 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
6202 rtx tmp_reg = gen_reg_rtx (SImode);
6203 bool swap_p = false;
6204
6205 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6206 {
6207 switch (code)
6208 {
6209 case EQ: cmp_mode = CCVEQmode; break;
6210 case NE: cmp_mode = CCVEQmode; break;
6211 case GT: cmp_mode = CCVHmode; break;
6212 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
6213 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
6214 case LE: cmp_mode = CCVHmode; new_code = LE; break;
6215 case GTU: cmp_mode = CCVHUmode; break;
6216 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
6217 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
6218 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
6219 default: gcc_unreachable ();
6220 }
6221 scratch_mode = GET_MODE (cmp1);
6222 }
6223 else if (GET_MODE (cmp1) == V2DFmode)
6224 {
6225 switch (code)
6226 {
6227 case EQ: cmp_mode = CCVEQmode; break;
6228 case NE: cmp_mode = CCVEQmode; break;
6229 case GT: cmp_mode = CCVFHmode; break;
6230 case GE: cmp_mode = CCVFHEmode; break;
6231 case UNLE: cmp_mode = CCVFHmode; break;
6232 case UNLT: cmp_mode = CCVFHEmode; break;
6233 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6234 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6235 default: gcc_unreachable ();
6236 }
6237 scratch_mode = V2DImode;
6238 }
6239 else
6240 gcc_unreachable ();
6241
6242 if (!all_p)
6243 switch (cmp_mode)
6244 {
6245 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6246 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6247 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6248 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6249 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6250 default: gcc_unreachable ();
6251 }
6252 else
6253 /* The modes without ANY match the ALL modes. */
6254 full_cmp_mode = cmp_mode;
6255
6256 if (swap_p)
6257 {
6258 rtx tmp = cmp2;
6259 cmp2 = cmp1;
6260 cmp1 = tmp;
6261 }
6262
6263 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6264 gen_rtvec (2, gen_rtx_SET (
6265 gen_rtx_REG (cmp_mode, CC_REGNUM),
6266 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6267 gen_rtx_CLOBBER (VOIDmode,
6268 gen_rtx_SCRATCH (scratch_mode)))));
6269 emit_move_insn (target, const0_rtx);
6270 emit_move_insn (tmp_reg, const1_rtx);
6271
6272 emit_move_insn (target,
6273 gen_rtx_IF_THEN_ELSE (SImode,
6274 gen_rtx_fmt_ee (new_code, VOIDmode,
6275 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6276 const0_rtx),
6277 target, tmp_reg));
6278 }
6279
6280 /* Generate a vector comparison expression loading either elements of
6281 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6282 and CMP_OP2. */
6283
6284 void
6285 s390_expand_vcond (rtx target, rtx then, rtx els,
6286 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6287 {
6288 rtx tmp;
6289 machine_mode result_mode;
6290 rtx result_target;
6291
6292 machine_mode target_mode = GET_MODE (target);
6293 machine_mode cmp_mode = GET_MODE (cmp_op1);
6294 rtx op = (cond == LT) ? els : then;
6295
6296 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6297 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6298 for short and byte (x >> 15 and x >> 7 respectively). */
6299 if ((cond == LT || cond == GE)
6300 && target_mode == cmp_mode
6301 && cmp_op2 == CONST0_RTX (cmp_mode)
6302 && op == CONST0_RTX (target_mode)
6303 && s390_vector_mode_supported_p (target_mode)
6304 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6305 {
6306 rtx negop = (cond == LT) ? then : els;
6307
6308 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6309
6310 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6311 if (negop == CONST1_RTX (target_mode))
6312 {
6313 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6314 GEN_INT (shift), target,
6315 1, OPTAB_DIRECT);
6316 if (res != target)
6317 emit_move_insn (target, res);
6318 return;
6319 }
6320
6321 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6322 else if (all_ones_operand (negop, target_mode))
6323 {
6324 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6325 GEN_INT (shift), target,
6326 0, OPTAB_DIRECT);
6327 if (res != target)
6328 emit_move_insn (target, res);
6329 return;
6330 }
6331 }
6332
6333 /* We always use an integral type vector to hold the comparison
6334 result. */
6335 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6336 result_target = gen_reg_rtx (result_mode);
6337
6338 /* We allow vector immediates as comparison operands that
6339 can be handled by the optimization above but not by the
6340 following code. Hence, force them into registers here. */
6341 if (!REG_P (cmp_op1))
6342 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6343
6344 if (!REG_P (cmp_op2))
6345 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6346
6347 s390_expand_vec_compare (result_target, cond,
6348 cmp_op1, cmp_op2);
6349
6350 /* If the results are supposed to be either -1 or 0 we are done
6351 since this is what our compare instructions generate anyway. */
6352 if (all_ones_operand (then, GET_MODE (then))
6353 && const0_operand (els, GET_MODE (els)))
6354 {
6355 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6356 result_target, 0));
6357 return;
6358 }
6359
6360 /* Otherwise we will do a vsel afterwards. */
6361 /* This gets triggered e.g.
6362 with gcc.c-torture/compile/pr53410-1.c */
6363 if (!REG_P (then))
6364 then = force_reg (target_mode, then);
6365
6366 if (!REG_P (els))
6367 els = force_reg (target_mode, els);
6368
6369 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6370 result_target,
6371 CONST0_RTX (result_mode));
6372
6373 /* We compared the result against zero above so we have to swap then
6374 and els here. */
6375 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6376
6377 gcc_assert (target_mode == GET_MODE (then));
6378 emit_insn (gen_rtx_SET (target, tmp));
6379 }
6380
6381 /* Emit the RTX necessary to initialize the vector TARGET with values
6382 in VALS. */
6383 void
6384 s390_expand_vec_init (rtx target, rtx vals)
6385 {
6386 machine_mode mode = GET_MODE (target);
6387 machine_mode inner_mode = GET_MODE_INNER (mode);
6388 int n_elts = GET_MODE_NUNITS (mode);
6389 bool all_same = true, all_regs = true, all_const_int = true;
6390 rtx x;
6391 int i;
6392
6393 for (i = 0; i < n_elts; ++i)
6394 {
6395 x = XVECEXP (vals, 0, i);
6396
6397 if (!CONST_INT_P (x))
6398 all_const_int = false;
6399
6400 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6401 all_same = false;
6402
6403 if (!REG_P (x))
6404 all_regs = false;
6405 }
6406
6407 /* Use vector gen mask or vector gen byte mask if possible. */
6408 if (all_same && all_const_int
6409 && (XVECEXP (vals, 0, 0) == const0_rtx
6410 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6411 NULL, NULL)
6412 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6413 {
6414 emit_insn (gen_rtx_SET (target,
6415 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6416 return;
6417 }
6418
6419 if (all_same)
6420 {
6421 emit_insn (gen_rtx_SET (target,
6422 gen_rtx_VEC_DUPLICATE (mode,
6423 XVECEXP (vals, 0, 0))));
6424 return;
6425 }
6426
6427 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6428 {
6429 /* Use vector load pair. */
6430 emit_insn (gen_rtx_SET (target,
6431 gen_rtx_VEC_CONCAT (mode,
6432 XVECEXP (vals, 0, 0),
6433 XVECEXP (vals, 0, 1))));
6434 return;
6435 }
6436
6437 /* We are about to set the vector elements one by one. Zero out the
6438 full register first in order to help the data flow framework to
6439 detect it as full VR set. */
6440 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6441
6442 /* Unfortunately the vec_init expander is not allowed to fail. So
6443 we have to implement the fallback ourselves. */
6444 for (i = 0; i < n_elts; i++)
6445 emit_insn (gen_rtx_SET (target,
6446 gen_rtx_UNSPEC (mode,
6447 gen_rtvec (3, XVECEXP (vals, 0, i),
6448 GEN_INT (i), target),
6449 UNSPEC_VEC_SET)));
6450 }
6451
6452 /* Structure to hold the initial parameters for a compare_and_swap operation
6453 in HImode and QImode. */
6454
6455 struct alignment_context
6456 {
6457 rtx memsi; /* SI aligned memory location. */
6458 rtx shift; /* Bit offset with regard to lsb. */
6459 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6460 rtx modemaski; /* ~modemask */
6461 bool aligned; /* True if memory is aligned, false else. */
6462 };
6463
6464 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6465 structure AC for transparent simplifying, if the memory alignment is known
6466 to be at least 32bit. MEM is the memory location for the actual operation
6467 and MODE its mode. */
6468
6469 static void
6470 init_alignment_context (struct alignment_context *ac, rtx mem,
6471 machine_mode mode)
6472 {
6473 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6474 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6475
6476 if (ac->aligned)
6477 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6478 else
6479 {
6480 /* Alignment is unknown. */
6481 rtx byteoffset, addr, align;
6482
6483 /* Force the address into a register. */
6484 addr = force_reg (Pmode, XEXP (mem, 0));
6485
6486 /* Align it to SImode. */
6487 align = expand_simple_binop (Pmode, AND, addr,
6488 GEN_INT (-GET_MODE_SIZE (SImode)),
6489 NULL_RTX, 1, OPTAB_DIRECT);
6490 /* Generate MEM. */
6491 ac->memsi = gen_rtx_MEM (SImode, align);
6492 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6493 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6494 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6495
6496 /* Calculate shiftcount. */
6497 byteoffset = expand_simple_binop (Pmode, AND, addr,
6498 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6499 NULL_RTX, 1, OPTAB_DIRECT);
6500 /* As we already have some offset, evaluate the remaining distance. */
6501 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6502 NULL_RTX, 1, OPTAB_DIRECT);
6503 }
6504
6505 /* Shift is the byte count, but we need the bitcount. */
6506 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6507 NULL_RTX, 1, OPTAB_DIRECT);
6508
6509 /* Calculate masks. */
6510 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6511 GEN_INT (GET_MODE_MASK (mode)),
6512 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6513 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6514 NULL_RTX, 1);
6515 }
6516
6517 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6518 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6519 perform the merge in SEQ2. */
6520
6521 static rtx
6522 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6523 machine_mode mode, rtx val, rtx ins)
6524 {
6525 rtx tmp;
6526
6527 if (ac->aligned)
6528 {
6529 start_sequence ();
6530 tmp = copy_to_mode_reg (SImode, val);
6531 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6532 const0_rtx, ins))
6533 {
6534 *seq1 = NULL;
6535 *seq2 = get_insns ();
6536 end_sequence ();
6537 return tmp;
6538 }
6539 end_sequence ();
6540 }
6541
6542 /* Failed to use insv. Generate a two part shift and mask. */
6543 start_sequence ();
6544 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6545 *seq1 = get_insns ();
6546 end_sequence ();
6547
6548 start_sequence ();
6549 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6550 *seq2 = get_insns ();
6551 end_sequence ();
6552
6553 return tmp;
6554 }
6555
6556 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6557 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6558 value to set if CMP == MEM. */
6559
6560 void
6561 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6562 rtx cmp, rtx new_rtx, bool is_weak)
6563 {
6564 struct alignment_context ac;
6565 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6566 rtx res = gen_reg_rtx (SImode);
6567 rtx_code_label *csloop = NULL, *csend = NULL;
6568
6569 gcc_assert (MEM_P (mem));
6570
6571 init_alignment_context (&ac, mem, mode);
6572
6573 /* Load full word. Subsequent loads are performed by CS. */
6574 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6575 NULL_RTX, 1, OPTAB_DIRECT);
6576
6577 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6578 possible, we try to use insv to make this happen efficiently. If
6579 that fails we'll generate code both inside and outside the loop. */
6580 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6581 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6582
6583 if (seq0)
6584 emit_insn (seq0);
6585 if (seq1)
6586 emit_insn (seq1);
6587
6588 /* Start CS loop. */
6589 if (!is_weak)
6590 {
6591 /* Begin assuming success. */
6592 emit_move_insn (btarget, const1_rtx);
6593
6594 csloop = gen_label_rtx ();
6595 csend = gen_label_rtx ();
6596 emit_label (csloop);
6597 }
6598
6599 /* val = "<mem>00..0<mem>"
6600 * cmp = "00..0<cmp>00..0"
6601 * new = "00..0<new>00..0"
6602 */
6603
6604 emit_insn (seq2);
6605 emit_insn (seq3);
6606
6607 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6608 if (is_weak)
6609 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6610 else
6611 {
6612 rtx tmp;
6613
6614 /* Jump to end if we're done (likely?). */
6615 s390_emit_jump (csend, cc);
6616
6617 /* Check for changes outside mode, and loop internal if so.
6618 Arrange the moves so that the compare is adjacent to the
6619 branch so that we can generate CRJ. */
6620 tmp = copy_to_reg (val);
6621 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6622 1, OPTAB_DIRECT);
6623 cc = s390_emit_compare (NE, val, tmp);
6624 s390_emit_jump (csloop, cc);
6625
6626 /* Failed. */
6627 emit_move_insn (btarget, const0_rtx);
6628 emit_label (csend);
6629 }
6630
6631 /* Return the correct part of the bitfield. */
6632 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6633 NULL_RTX, 1, OPTAB_DIRECT), 1);
6634 }
6635
6636 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6637 and VAL the value to play with. If AFTER is true then store the value
6638 MEM holds after the operation, if AFTER is false then store the value MEM
6639 holds before the operation. If TARGET is zero then discard that value, else
6640 store it to TARGET. */
6641
6642 void
6643 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6644 rtx target, rtx mem, rtx val, bool after)
6645 {
6646 struct alignment_context ac;
6647 rtx cmp;
6648 rtx new_rtx = gen_reg_rtx (SImode);
6649 rtx orig = gen_reg_rtx (SImode);
6650 rtx_code_label *csloop = gen_label_rtx ();
6651
6652 gcc_assert (!target || register_operand (target, VOIDmode));
6653 gcc_assert (MEM_P (mem));
6654
6655 init_alignment_context (&ac, mem, mode);
6656
6657 /* Shift val to the correct bit positions.
6658 Preserve "icm", but prevent "ex icm". */
6659 if (!(ac.aligned && code == SET && MEM_P (val)))
6660 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6661
6662 /* Further preparation insns. */
6663 if (code == PLUS || code == MINUS)
6664 emit_move_insn (orig, val);
6665 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6666 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6667 NULL_RTX, 1, OPTAB_DIRECT);
6668
6669 /* Load full word. Subsequent loads are performed by CS. */
6670 cmp = force_reg (SImode, ac.memsi);
6671
6672 /* Start CS loop. */
6673 emit_label (csloop);
6674 emit_move_insn (new_rtx, cmp);
6675
6676 /* Patch new with val at correct position. */
6677 switch (code)
6678 {
6679 case PLUS:
6680 case MINUS:
6681 val = expand_simple_binop (SImode, code, new_rtx, orig,
6682 NULL_RTX, 1, OPTAB_DIRECT);
6683 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6684 NULL_RTX, 1, OPTAB_DIRECT);
6685 /* FALLTHRU */
6686 case SET:
6687 if (ac.aligned && MEM_P (val))
6688 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6689 0, 0, SImode, val, false);
6690 else
6691 {
6692 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6693 NULL_RTX, 1, OPTAB_DIRECT);
6694 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6695 NULL_RTX, 1, OPTAB_DIRECT);
6696 }
6697 break;
6698 case AND:
6699 case IOR:
6700 case XOR:
6701 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6702 NULL_RTX, 1, OPTAB_DIRECT);
6703 break;
6704 case MULT: /* NAND */
6705 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6706 NULL_RTX, 1, OPTAB_DIRECT);
6707 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6708 NULL_RTX, 1, OPTAB_DIRECT);
6709 break;
6710 default:
6711 gcc_unreachable ();
6712 }
6713
6714 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6715 ac.memsi, cmp, new_rtx));
6716
6717 /* Return the correct part of the bitfield. */
6718 if (target)
6719 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6720 after ? new_rtx : cmp, ac.shift,
6721 NULL_RTX, 1, OPTAB_DIRECT), 1);
6722 }
6723
6724 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6725 We need to emit DTP-relative relocations. */
6726
6727 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6728
6729 static void
6730 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6731 {
6732 switch (size)
6733 {
6734 case 4:
6735 fputs ("\t.long\t", file);
6736 break;
6737 case 8:
6738 fputs ("\t.quad\t", file);
6739 break;
6740 default:
6741 gcc_unreachable ();
6742 }
6743 output_addr_const (file, x);
6744 fputs ("@DTPOFF", file);
6745 }
6746
6747 /* Return the proper mode for REGNO being represented in the dwarf
6748 unwind table. */
6749 machine_mode
6750 s390_dwarf_frame_reg_mode (int regno)
6751 {
6752 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6753
6754 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6755 if (GENERAL_REGNO_P (regno))
6756 save_mode = Pmode;
6757
6758 /* The rightmost 64 bits of vector registers are call-clobbered. */
6759 if (GET_MODE_SIZE (save_mode) > 8)
6760 save_mode = DImode;
6761
6762 return save_mode;
6763 }
6764
6765 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6766 /* Implement TARGET_MANGLE_TYPE. */
6767
6768 static const char *
6769 s390_mangle_type (const_tree type)
6770 {
6771 type = TYPE_MAIN_VARIANT (type);
6772
6773 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6774 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6775 return NULL;
6776
6777 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6778 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6779 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6780 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6781
6782 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6783 && TARGET_LONG_DOUBLE_128)
6784 return "g";
6785
6786 /* For all other types, use normal C++ mangling. */
6787 return NULL;
6788 }
6789 #endif
6790
6791 /* In the name of slightly smaller debug output, and to cater to
6792 general assembler lossage, recognize various UNSPEC sequences
6793 and turn them back into a direct symbol reference. */
6794
6795 static rtx
6796 s390_delegitimize_address (rtx orig_x)
6797 {
6798 rtx x, y;
6799
6800 orig_x = delegitimize_mem_from_attrs (orig_x);
6801 x = orig_x;
6802
6803 /* Extract the symbol ref from:
6804 (plus:SI (reg:SI 12 %r12)
6805 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6806 UNSPEC_GOTOFF/PLTOFF)))
6807 and
6808 (plus:SI (reg:SI 12 %r12)
6809 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6810 UNSPEC_GOTOFF/PLTOFF)
6811 (const_int 4 [0x4])))) */
6812 if (GET_CODE (x) == PLUS
6813 && REG_P (XEXP (x, 0))
6814 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6815 && GET_CODE (XEXP (x, 1)) == CONST)
6816 {
6817 HOST_WIDE_INT offset = 0;
6818
6819 /* The const operand. */
6820 y = XEXP (XEXP (x, 1), 0);
6821
6822 if (GET_CODE (y) == PLUS
6823 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6824 {
6825 offset = INTVAL (XEXP (y, 1));
6826 y = XEXP (y, 0);
6827 }
6828
6829 if (GET_CODE (y) == UNSPEC
6830 && (XINT (y, 1) == UNSPEC_GOTOFF
6831 || XINT (y, 1) == UNSPEC_PLTOFF))
6832 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6833 }
6834
6835 if (GET_CODE (x) != MEM)
6836 return orig_x;
6837
6838 x = XEXP (x, 0);
6839 if (GET_CODE (x) == PLUS
6840 && GET_CODE (XEXP (x, 1)) == CONST
6841 && GET_CODE (XEXP (x, 0)) == REG
6842 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6843 {
6844 y = XEXP (XEXP (x, 1), 0);
6845 if (GET_CODE (y) == UNSPEC
6846 && XINT (y, 1) == UNSPEC_GOT)
6847 y = XVECEXP (y, 0, 0);
6848 else
6849 return orig_x;
6850 }
6851 else if (GET_CODE (x) == CONST)
6852 {
6853 /* Extract the symbol ref from:
6854 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6855 UNSPEC_PLT/GOTENT))) */
6856
6857 y = XEXP (x, 0);
6858 if (GET_CODE (y) == UNSPEC
6859 && (XINT (y, 1) == UNSPEC_GOTENT
6860 || XINT (y, 1) == UNSPEC_PLT))
6861 y = XVECEXP (y, 0, 0);
6862 else
6863 return orig_x;
6864 }
6865 else
6866 return orig_x;
6867
6868 if (GET_MODE (orig_x) != Pmode)
6869 {
6870 if (GET_MODE (orig_x) == BLKmode)
6871 return orig_x;
6872 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6873 if (y == NULL_RTX)
6874 return orig_x;
6875 }
6876 return y;
6877 }
6878
6879 /* Output operand OP to stdio stream FILE.
6880 OP is an address (register + offset) which is not used to address data;
6881 instead the rightmost bits are interpreted as the value. */
6882
6883 static void
6884 print_addrstyle_operand (FILE *file, rtx op)
6885 {
6886 HOST_WIDE_INT offset;
6887 rtx base;
6888
6889 /* Extract base register and offset. */
6890 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
6891 gcc_unreachable ();
6892
6893 /* Sanity check. */
6894 if (base)
6895 {
6896 gcc_assert (GET_CODE (base) == REG);
6897 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6898 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6899 }
6900
6901 /* Offsets are constricted to twelve bits. */
6902 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6903 if (base)
6904 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6905 }
6906
6907 /* Assigns the number of NOP halfwords to be emitted before and after the
6908 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6909 If hotpatching is disabled for the function, the values are set to zero.
6910 */
6911
6912 static void
6913 s390_function_num_hotpatch_hw (tree decl,
6914 int *hw_before,
6915 int *hw_after)
6916 {
6917 tree attr;
6918
6919 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6920
6921 /* Handle the arguments of the hotpatch attribute. The values
6922 specified via attribute might override the cmdline argument
6923 values. */
6924 if (attr)
6925 {
6926 tree args = TREE_VALUE (attr);
6927
6928 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6929 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6930 }
6931 else
6932 {
6933 /* Use the values specified by the cmdline arguments. */
6934 *hw_before = s390_hotpatch_hw_before_label;
6935 *hw_after = s390_hotpatch_hw_after_label;
6936 }
6937 }
6938
6939 /* Write the current .machine and .machinemode specification to the assembler
6940 file. */
6941
6942 #ifdef HAVE_AS_MACHINE_MACHINEMODE
6943 static void
6944 s390_asm_output_machine_for_arch (FILE *asm_out_file)
6945 {
6946 fprintf (asm_out_file, "\t.machinemode %s\n",
6947 (TARGET_ZARCH) ? "zarch" : "esa");
6948 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
6949 if (S390_USE_ARCHITECTURE_MODIFIERS)
6950 {
6951 int cpu_flags;
6952
6953 cpu_flags = processor_flags_table[(int) s390_arch];
6954 if (TARGET_HTM && !(cpu_flags & PF_TX))
6955 fprintf (asm_out_file, "+htm");
6956 else if (!TARGET_HTM && (cpu_flags & PF_TX))
6957 fprintf (asm_out_file, "+nohtm");
6958 if (TARGET_VX && !(cpu_flags & PF_VX))
6959 fprintf (asm_out_file, "+vx");
6960 else if (!TARGET_VX && (cpu_flags & PF_VX))
6961 fprintf (asm_out_file, "+novx");
6962 }
6963 fprintf (asm_out_file, "\"\n");
6964 }
6965
6966 /* Write an extra function header before the very start of the function. */
6967
6968 void
6969 s390_asm_output_function_prefix (FILE *asm_out_file,
6970 const char *fnname ATTRIBUTE_UNUSED)
6971 {
6972 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
6973 return;
6974 /* Since only the function specific options are saved but not the indications
6975 which options are set, it's too much work here to figure out which options
6976 have actually changed. Thus, generate .machine and .machinemode whenever a
6977 function has the target attribute or pragma. */
6978 fprintf (asm_out_file, "\t.machinemode push\n");
6979 fprintf (asm_out_file, "\t.machine push\n");
6980 s390_asm_output_machine_for_arch (asm_out_file);
6981 }
6982
6983 /* Write an extra function footer after the very end of the function. */
6984
6985 void
6986 s390_asm_declare_function_size (FILE *asm_out_file,
6987 const char *fnname, tree decl)
6988 {
6989 if (!flag_inhibit_size_directive)
6990 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
6991 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
6992 return;
6993 fprintf (asm_out_file, "\t.machine pop\n");
6994 fprintf (asm_out_file, "\t.machinemode pop\n");
6995 }
6996 #endif
6997
6998 /* Write the extra assembler code needed to declare a function properly. */
6999
7000 void
7001 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7002 tree decl)
7003 {
7004 int hw_before, hw_after;
7005
7006 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7007 if (hw_before > 0)
7008 {
7009 unsigned int function_alignment;
7010 int i;
7011
7012 /* Add a trampoline code area before the function label and initialize it
7013 with two-byte nop instructions. This area can be overwritten with code
7014 that jumps to a patched version of the function. */
7015 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
7016 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7017 hw_before);
7018 for (i = 1; i < hw_before; i++)
7019 fputs ("\tnopr\t%r7\n", asm_out_file);
7020
7021 /* Note: The function label must be aligned so that (a) the bytes of the
7022 following nop do not cross a cacheline boundary, and (b) a jump address
7023 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7024 stored directly before the label without crossing a cacheline
7025 boundary. All this is necessary to make sure the trampoline code can
7026 be changed atomically.
7027 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7028 if there are NOPs before the function label, the alignment is placed
7029 before them. So it is necessary to duplicate the alignment after the
7030 NOPs. */
7031 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7032 if (! DECL_USER_ALIGN (decl))
7033 function_alignment = MAX (function_alignment,
7034 (unsigned int) align_functions);
7035 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7036 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7037 }
7038
7039 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7040 {
7041 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7042 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7043 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7044 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7045 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7046 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7047 s390_warn_framesize);
7048 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7049 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7050 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7051 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7052 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7053 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7054 TARGET_PACKED_STACK);
7055 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7056 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7057 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7058 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7059 s390_warn_dynamicstack_p);
7060 }
7061 ASM_OUTPUT_LABEL (asm_out_file, fname);
7062 if (hw_after > 0)
7063 asm_fprintf (asm_out_file,
7064 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7065 hw_after);
7066 }
7067
7068 /* Output machine-dependent UNSPECs occurring in address constant X
7069 in assembler syntax to stdio stream FILE. Returns true if the
7070 constant X could be recognized, false otherwise. */
7071
7072 static bool
7073 s390_output_addr_const_extra (FILE *file, rtx x)
7074 {
7075 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7076 switch (XINT (x, 1))
7077 {
7078 case UNSPEC_GOTENT:
7079 output_addr_const (file, XVECEXP (x, 0, 0));
7080 fprintf (file, "@GOTENT");
7081 return true;
7082 case UNSPEC_GOT:
7083 output_addr_const (file, XVECEXP (x, 0, 0));
7084 fprintf (file, "@GOT");
7085 return true;
7086 case UNSPEC_GOTOFF:
7087 output_addr_const (file, XVECEXP (x, 0, 0));
7088 fprintf (file, "@GOTOFF");
7089 return true;
7090 case UNSPEC_PLT:
7091 output_addr_const (file, XVECEXP (x, 0, 0));
7092 fprintf (file, "@PLT");
7093 return true;
7094 case UNSPEC_PLTOFF:
7095 output_addr_const (file, XVECEXP (x, 0, 0));
7096 fprintf (file, "@PLTOFF");
7097 return true;
7098 case UNSPEC_TLSGD:
7099 output_addr_const (file, XVECEXP (x, 0, 0));
7100 fprintf (file, "@TLSGD");
7101 return true;
7102 case UNSPEC_TLSLDM:
7103 assemble_name (file, get_some_local_dynamic_name ());
7104 fprintf (file, "@TLSLDM");
7105 return true;
7106 case UNSPEC_DTPOFF:
7107 output_addr_const (file, XVECEXP (x, 0, 0));
7108 fprintf (file, "@DTPOFF");
7109 return true;
7110 case UNSPEC_NTPOFF:
7111 output_addr_const (file, XVECEXP (x, 0, 0));
7112 fprintf (file, "@NTPOFF");
7113 return true;
7114 case UNSPEC_GOTNTPOFF:
7115 output_addr_const (file, XVECEXP (x, 0, 0));
7116 fprintf (file, "@GOTNTPOFF");
7117 return true;
7118 case UNSPEC_INDNTPOFF:
7119 output_addr_const (file, XVECEXP (x, 0, 0));
7120 fprintf (file, "@INDNTPOFF");
7121 return true;
7122 }
7123
7124 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7125 switch (XINT (x, 1))
7126 {
7127 case UNSPEC_POOL_OFFSET:
7128 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7129 output_addr_const (file, x);
7130 return true;
7131 }
7132 return false;
7133 }
7134
7135 /* Output address operand ADDR in assembler syntax to
7136 stdio stream FILE. */
7137
7138 void
7139 print_operand_address (FILE *file, rtx addr)
7140 {
7141 struct s390_address ad;
7142
7143 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7144 {
7145 if (!TARGET_Z10)
7146 {
7147 output_operand_lossage ("symbolic memory references are "
7148 "only supported on z10 or later");
7149 return;
7150 }
7151 output_addr_const (file, addr);
7152 return;
7153 }
7154
7155 if (!s390_decompose_address (addr, &ad)
7156 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7157 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7158 output_operand_lossage ("cannot decompose address");
7159
7160 if (ad.disp)
7161 output_addr_const (file, ad.disp);
7162 else
7163 fprintf (file, "0");
7164
7165 if (ad.base && ad.indx)
7166 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7167 reg_names[REGNO (ad.base)]);
7168 else if (ad.base)
7169 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7170 }
7171
7172 /* Output operand X in assembler syntax to stdio stream FILE.
7173 CODE specified the format flag. The following format flags
7174 are recognized:
7175
7176 'C': print opcode suffix for branch condition.
7177 'D': print opcode suffix for inverse branch condition.
7178 'E': print opcode suffix for branch on index instruction.
7179 'G': print the size of the operand in bytes.
7180 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7181 'M': print the second word of a TImode operand.
7182 'N': print the second word of a DImode operand.
7183 'O': print only the displacement of a memory reference or address.
7184 'R': print only the base register of a memory reference or address.
7185 'S': print S-type memory reference (base+displacement).
7186 'Y': print address style operand without index (e.g. shift count or setmem
7187 operand).
7188
7189 'b': print integer X as if it's an unsigned byte.
7190 'c': print integer X as if it's an signed byte.
7191 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7192 'f': "end" contiguous bitmask X in SImode.
7193 'h': print integer X as if it's a signed halfword.
7194 'i': print the first nonzero HImode part of X.
7195 'j': print the first HImode part unequal to -1 of X.
7196 'k': print the first nonzero SImode part of X.
7197 'm': print the first SImode part unequal to -1 of X.
7198 'o': print integer X as if it's an unsigned 32bit word.
7199 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7200 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7201 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7202 'x': print integer X as if it's an unsigned halfword.
7203 'v': print register number as vector register (v1 instead of f1).
7204 */
7205
7206 void
7207 print_operand (FILE *file, rtx x, int code)
7208 {
7209 HOST_WIDE_INT ival;
7210
7211 switch (code)
7212 {
7213 case 'C':
7214 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7215 return;
7216
7217 case 'D':
7218 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7219 return;
7220
7221 case 'E':
7222 if (GET_CODE (x) == LE)
7223 fprintf (file, "l");
7224 else if (GET_CODE (x) == GT)
7225 fprintf (file, "h");
7226 else
7227 output_operand_lossage ("invalid comparison operator "
7228 "for 'E' output modifier");
7229 return;
7230
7231 case 'J':
7232 if (GET_CODE (x) == SYMBOL_REF)
7233 {
7234 fprintf (file, "%s", ":tls_load:");
7235 output_addr_const (file, x);
7236 }
7237 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7238 {
7239 fprintf (file, "%s", ":tls_gdcall:");
7240 output_addr_const (file, XVECEXP (x, 0, 0));
7241 }
7242 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7243 {
7244 fprintf (file, "%s", ":tls_ldcall:");
7245 const char *name = get_some_local_dynamic_name ();
7246 gcc_assert (name);
7247 assemble_name (file, name);
7248 }
7249 else
7250 output_operand_lossage ("invalid reference for 'J' output modifier");
7251 return;
7252
7253 case 'G':
7254 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7255 return;
7256
7257 case 'O':
7258 {
7259 struct s390_address ad;
7260 int ret;
7261
7262 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7263
7264 if (!ret
7265 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7266 || ad.indx)
7267 {
7268 output_operand_lossage ("invalid address for 'O' output modifier");
7269 return;
7270 }
7271
7272 if (ad.disp)
7273 output_addr_const (file, ad.disp);
7274 else
7275 fprintf (file, "0");
7276 }
7277 return;
7278
7279 case 'R':
7280 {
7281 struct s390_address ad;
7282 int ret;
7283
7284 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7285
7286 if (!ret
7287 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7288 || ad.indx)
7289 {
7290 output_operand_lossage ("invalid address for 'R' output modifier");
7291 return;
7292 }
7293
7294 if (ad.base)
7295 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7296 else
7297 fprintf (file, "0");
7298 }
7299 return;
7300
7301 case 'S':
7302 {
7303 struct s390_address ad;
7304 int ret;
7305
7306 if (!MEM_P (x))
7307 {
7308 output_operand_lossage ("memory reference expected for "
7309 "'S' output modifier");
7310 return;
7311 }
7312 ret = s390_decompose_address (XEXP (x, 0), &ad);
7313
7314 if (!ret
7315 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7316 || ad.indx)
7317 {
7318 output_operand_lossage ("invalid address for 'S' output modifier");
7319 return;
7320 }
7321
7322 if (ad.disp)
7323 output_addr_const (file, ad.disp);
7324 else
7325 fprintf (file, "0");
7326
7327 if (ad.base)
7328 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7329 }
7330 return;
7331
7332 case 'N':
7333 if (GET_CODE (x) == REG)
7334 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7335 else if (GET_CODE (x) == MEM)
7336 x = change_address (x, VOIDmode,
7337 plus_constant (Pmode, XEXP (x, 0), 4));
7338 else
7339 output_operand_lossage ("register or memory expression expected "
7340 "for 'N' output modifier");
7341 break;
7342
7343 case 'M':
7344 if (GET_CODE (x) == REG)
7345 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7346 else if (GET_CODE (x) == MEM)
7347 x = change_address (x, VOIDmode,
7348 plus_constant (Pmode, XEXP (x, 0), 8));
7349 else
7350 output_operand_lossage ("register or memory expression expected "
7351 "for 'M' output modifier");
7352 break;
7353
7354 case 'Y':
7355 print_addrstyle_operand (file, x);
7356 return;
7357 }
7358
7359 switch (GET_CODE (x))
7360 {
7361 case REG:
7362 /* Print FP regs as fx instead of vx when they are accessed
7363 through non-vector mode. */
7364 if (code == 'v'
7365 || VECTOR_NOFP_REG_P (x)
7366 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7367 || (VECTOR_REG_P (x)
7368 && (GET_MODE_SIZE (GET_MODE (x)) /
7369 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7370 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7371 else
7372 fprintf (file, "%s", reg_names[REGNO (x)]);
7373 break;
7374
7375 case MEM:
7376 output_address (GET_MODE (x), XEXP (x, 0));
7377 break;
7378
7379 case CONST:
7380 case CODE_LABEL:
7381 case LABEL_REF:
7382 case SYMBOL_REF:
7383 output_addr_const (file, x);
7384 break;
7385
7386 case CONST_INT:
7387 ival = INTVAL (x);
7388 switch (code)
7389 {
7390 case 0:
7391 break;
7392 case 'b':
7393 ival &= 0xff;
7394 break;
7395 case 'c':
7396 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7397 break;
7398 case 'x':
7399 ival &= 0xffff;
7400 break;
7401 case 'h':
7402 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7403 break;
7404 case 'i':
7405 ival = s390_extract_part (x, HImode, 0);
7406 break;
7407 case 'j':
7408 ival = s390_extract_part (x, HImode, -1);
7409 break;
7410 case 'k':
7411 ival = s390_extract_part (x, SImode, 0);
7412 break;
7413 case 'm':
7414 ival = s390_extract_part (x, SImode, -1);
7415 break;
7416 case 'o':
7417 ival &= 0xffffffff;
7418 break;
7419 case 'e': case 'f':
7420 case 's': case 't':
7421 {
7422 int pos, len;
7423 bool ok;
7424
7425 len = (code == 's' || code == 'e' ? 64 : 32);
7426 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7427 gcc_assert (ok);
7428 if (code == 's' || code == 't')
7429 ival = 64 - pos - len;
7430 else
7431 ival = 64 - 1 - pos;
7432 }
7433 break;
7434 default:
7435 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7436 }
7437 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7438 break;
7439
7440 case CONST_WIDE_INT:
7441 if (code == 'b')
7442 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7443 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7444 else if (code == 'x')
7445 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7446 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7447 else if (code == 'h')
7448 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7449 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7450 else
7451 {
7452 if (code == 0)
7453 output_operand_lossage ("invalid constant - try using "
7454 "an output modifier");
7455 else
7456 output_operand_lossage ("invalid constant for output modifier '%c'",
7457 code);
7458 }
7459 break;
7460 case CONST_VECTOR:
7461 switch (code)
7462 {
7463 case 'h':
7464 gcc_assert (const_vec_duplicate_p (x));
7465 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7466 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7467 break;
7468 case 'e':
7469 case 's':
7470 {
7471 int start, stop, inner_len;
7472 bool ok;
7473
7474 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7475 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7476 gcc_assert (ok);
7477 if (code == 's' || code == 't')
7478 ival = inner_len - stop - 1;
7479 else
7480 ival = inner_len - start - 1;
7481 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7482 }
7483 break;
7484 case 't':
7485 {
7486 unsigned mask;
7487 bool ok = s390_bytemask_vector_p (x, &mask);
7488 gcc_assert (ok);
7489 fprintf (file, "%u", mask);
7490 }
7491 break;
7492
7493 default:
7494 output_operand_lossage ("invalid constant vector for output "
7495 "modifier '%c'", code);
7496 }
7497 break;
7498
7499 default:
7500 if (code == 0)
7501 output_operand_lossage ("invalid expression - try using "
7502 "an output modifier");
7503 else
7504 output_operand_lossage ("invalid expression for output "
7505 "modifier '%c'", code);
7506 break;
7507 }
7508 }
7509
7510 /* Target hook for assembling integer objects. We need to define it
7511 here to work a round a bug in some versions of GAS, which couldn't
7512 handle values smaller than INT_MIN when printed in decimal. */
7513
7514 static bool
7515 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7516 {
7517 if (size == 8 && aligned_p
7518 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7519 {
7520 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7521 INTVAL (x));
7522 return true;
7523 }
7524 return default_assemble_integer (x, size, aligned_p);
7525 }
7526
7527 /* Returns true if register REGNO is used for forming
7528 a memory address in expression X. */
7529
7530 static bool
7531 reg_used_in_mem_p (int regno, rtx x)
7532 {
7533 enum rtx_code code = GET_CODE (x);
7534 int i, j;
7535 const char *fmt;
7536
7537 if (code == MEM)
7538 {
7539 if (refers_to_regno_p (regno, XEXP (x, 0)))
7540 return true;
7541 }
7542 else if (code == SET
7543 && GET_CODE (SET_DEST (x)) == PC)
7544 {
7545 if (refers_to_regno_p (regno, SET_SRC (x)))
7546 return true;
7547 }
7548
7549 fmt = GET_RTX_FORMAT (code);
7550 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7551 {
7552 if (fmt[i] == 'e'
7553 && reg_used_in_mem_p (regno, XEXP (x, i)))
7554 return true;
7555
7556 else if (fmt[i] == 'E')
7557 for (j = 0; j < XVECLEN (x, i); j++)
7558 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7559 return true;
7560 }
7561 return false;
7562 }
7563
7564 /* Returns true if expression DEP_RTX sets an address register
7565 used by instruction INSN to address memory. */
7566
7567 static bool
7568 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7569 {
7570 rtx target, pat;
7571
7572 if (NONJUMP_INSN_P (dep_rtx))
7573 dep_rtx = PATTERN (dep_rtx);
7574
7575 if (GET_CODE (dep_rtx) == SET)
7576 {
7577 target = SET_DEST (dep_rtx);
7578 if (GET_CODE (target) == STRICT_LOW_PART)
7579 target = XEXP (target, 0);
7580 while (GET_CODE (target) == SUBREG)
7581 target = SUBREG_REG (target);
7582
7583 if (GET_CODE (target) == REG)
7584 {
7585 int regno = REGNO (target);
7586
7587 if (s390_safe_attr_type (insn) == TYPE_LA)
7588 {
7589 pat = PATTERN (insn);
7590 if (GET_CODE (pat) == PARALLEL)
7591 {
7592 gcc_assert (XVECLEN (pat, 0) == 2);
7593 pat = XVECEXP (pat, 0, 0);
7594 }
7595 gcc_assert (GET_CODE (pat) == SET);
7596 return refers_to_regno_p (regno, SET_SRC (pat));
7597 }
7598 else if (get_attr_atype (insn) == ATYPE_AGEN)
7599 return reg_used_in_mem_p (regno, PATTERN (insn));
7600 }
7601 }
7602 return false;
7603 }
7604
7605 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7606
7607 int
7608 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7609 {
7610 rtx dep_rtx = PATTERN (dep_insn);
7611 int i;
7612
7613 if (GET_CODE (dep_rtx) == SET
7614 && addr_generation_dependency_p (dep_rtx, insn))
7615 return 1;
7616 else if (GET_CODE (dep_rtx) == PARALLEL)
7617 {
7618 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7619 {
7620 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7621 return 1;
7622 }
7623 }
7624 return 0;
7625 }
7626
7627
7628 /* A C statement (sans semicolon) to update the integer scheduling priority
7629 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7630 reduce the priority to execute INSN later. Do not define this macro if
7631 you do not need to adjust the scheduling priorities of insns.
7632
7633 A STD instruction should be scheduled earlier,
7634 in order to use the bypass. */
7635 static int
7636 s390_adjust_priority (rtx_insn *insn, int priority)
7637 {
7638 if (! INSN_P (insn))
7639 return priority;
7640
7641 if (s390_tune <= PROCESSOR_2064_Z900)
7642 return priority;
7643
7644 switch (s390_safe_attr_type (insn))
7645 {
7646 case TYPE_FSTOREDF:
7647 case TYPE_FSTORESF:
7648 priority = priority << 3;
7649 break;
7650 case TYPE_STORE:
7651 case TYPE_STM:
7652 priority = priority << 1;
7653 break;
7654 default:
7655 break;
7656 }
7657 return priority;
7658 }
7659
7660
7661 /* The number of instructions that can be issued per cycle. */
7662
7663 static int
7664 s390_issue_rate (void)
7665 {
7666 switch (s390_tune)
7667 {
7668 case PROCESSOR_2084_Z990:
7669 case PROCESSOR_2094_Z9_109:
7670 case PROCESSOR_2094_Z9_EC:
7671 case PROCESSOR_2817_Z196:
7672 return 3;
7673 case PROCESSOR_2097_Z10:
7674 return 2;
7675 case PROCESSOR_9672_G5:
7676 case PROCESSOR_9672_G6:
7677 case PROCESSOR_2064_Z900:
7678 /* Starting with EC12 we use the sched_reorder hook to take care
7679 of instruction dispatch constraints. The algorithm only
7680 picks the best instruction and assumes only a single
7681 instruction gets issued per cycle. */
7682 case PROCESSOR_2827_ZEC12:
7683 case PROCESSOR_2964_Z13:
7684 default:
7685 return 1;
7686 }
7687 }
7688
7689 static int
7690 s390_first_cycle_multipass_dfa_lookahead (void)
7691 {
7692 return 4;
7693 }
7694
7695 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7696 Fix up MEMs as required. */
7697
7698 static void
7699 annotate_constant_pool_refs (rtx *x)
7700 {
7701 int i, j;
7702 const char *fmt;
7703
7704 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7705 || !CONSTANT_POOL_ADDRESS_P (*x));
7706
7707 /* Literal pool references can only occur inside a MEM ... */
7708 if (GET_CODE (*x) == MEM)
7709 {
7710 rtx memref = XEXP (*x, 0);
7711
7712 if (GET_CODE (memref) == SYMBOL_REF
7713 && CONSTANT_POOL_ADDRESS_P (memref))
7714 {
7715 rtx base = cfun->machine->base_reg;
7716 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7717 UNSPEC_LTREF);
7718
7719 *x = replace_equiv_address (*x, addr);
7720 return;
7721 }
7722
7723 if (GET_CODE (memref) == CONST
7724 && GET_CODE (XEXP (memref, 0)) == PLUS
7725 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7726 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7727 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7728 {
7729 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7730 rtx sym = XEXP (XEXP (memref, 0), 0);
7731 rtx base = cfun->machine->base_reg;
7732 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7733 UNSPEC_LTREF);
7734
7735 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7736 return;
7737 }
7738 }
7739
7740 /* ... or a load-address type pattern. */
7741 if (GET_CODE (*x) == SET)
7742 {
7743 rtx addrref = SET_SRC (*x);
7744
7745 if (GET_CODE (addrref) == SYMBOL_REF
7746 && CONSTANT_POOL_ADDRESS_P (addrref))
7747 {
7748 rtx base = cfun->machine->base_reg;
7749 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7750 UNSPEC_LTREF);
7751
7752 SET_SRC (*x) = addr;
7753 return;
7754 }
7755
7756 if (GET_CODE (addrref) == CONST
7757 && GET_CODE (XEXP (addrref, 0)) == PLUS
7758 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7759 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7760 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7761 {
7762 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7763 rtx sym = XEXP (XEXP (addrref, 0), 0);
7764 rtx base = cfun->machine->base_reg;
7765 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7766 UNSPEC_LTREF);
7767
7768 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7769 return;
7770 }
7771 }
7772
7773 /* Annotate LTREL_BASE as well. */
7774 if (GET_CODE (*x) == UNSPEC
7775 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7776 {
7777 rtx base = cfun->machine->base_reg;
7778 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7779 UNSPEC_LTREL_BASE);
7780 return;
7781 }
7782
7783 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7784 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7785 {
7786 if (fmt[i] == 'e')
7787 {
7788 annotate_constant_pool_refs (&XEXP (*x, i));
7789 }
7790 else if (fmt[i] == 'E')
7791 {
7792 for (j = 0; j < XVECLEN (*x, i); j++)
7793 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7794 }
7795 }
7796 }
7797
7798 /* Split all branches that exceed the maximum distance.
7799 Returns true if this created a new literal pool entry. */
7800
7801 static int
7802 s390_split_branches (void)
7803 {
7804 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7805 int new_literal = 0, ret;
7806 rtx_insn *insn;
7807 rtx pat, target;
7808 rtx *label;
7809
7810 /* We need correct insn addresses. */
7811
7812 shorten_branches (get_insns ());
7813
7814 /* Find all branches that exceed 64KB, and split them. */
7815
7816 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7817 {
7818 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7819 continue;
7820
7821 pat = PATTERN (insn);
7822 if (GET_CODE (pat) == PARALLEL)
7823 pat = XVECEXP (pat, 0, 0);
7824 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7825 continue;
7826
7827 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7828 {
7829 label = &SET_SRC (pat);
7830 }
7831 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7832 {
7833 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7834 label = &XEXP (SET_SRC (pat), 1);
7835 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7836 label = &XEXP (SET_SRC (pat), 2);
7837 else
7838 continue;
7839 }
7840 else
7841 continue;
7842
7843 if (get_attr_length (insn) <= 4)
7844 continue;
7845
7846 /* We are going to use the return register as scratch register,
7847 make sure it will be saved/restored by the prologue/epilogue. */
7848 cfun_frame_layout.save_return_addr_p = 1;
7849
7850 if (!flag_pic)
7851 {
7852 new_literal = 1;
7853 rtx mem = force_const_mem (Pmode, *label);
7854 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7855 insn);
7856 INSN_ADDRESSES_NEW (set_insn, -1);
7857 annotate_constant_pool_refs (&PATTERN (set_insn));
7858
7859 target = temp_reg;
7860 }
7861 else
7862 {
7863 new_literal = 1;
7864 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7865 UNSPEC_LTREL_OFFSET);
7866 target = gen_rtx_CONST (Pmode, target);
7867 target = force_const_mem (Pmode, target);
7868 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7869 insn);
7870 INSN_ADDRESSES_NEW (set_insn, -1);
7871 annotate_constant_pool_refs (&PATTERN (set_insn));
7872
7873 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7874 cfun->machine->base_reg),
7875 UNSPEC_LTREL_BASE);
7876 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7877 }
7878
7879 ret = validate_change (insn, label, target, 0);
7880 gcc_assert (ret);
7881 }
7882
7883 return new_literal;
7884 }
7885
7886
7887 /* Find an annotated literal pool symbol referenced in RTX X,
7888 and store it at REF. Will abort if X contains references to
7889 more than one such pool symbol; multiple references to the same
7890 symbol are allowed, however.
7891
7892 The rtx pointed to by REF must be initialized to NULL_RTX
7893 by the caller before calling this routine. */
7894
7895 static void
7896 find_constant_pool_ref (rtx x, rtx *ref)
7897 {
7898 int i, j;
7899 const char *fmt;
7900
7901 /* Ignore LTREL_BASE references. */
7902 if (GET_CODE (x) == UNSPEC
7903 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7904 return;
7905 /* Likewise POOL_ENTRY insns. */
7906 if (GET_CODE (x) == UNSPEC_VOLATILE
7907 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7908 return;
7909
7910 gcc_assert (GET_CODE (x) != SYMBOL_REF
7911 || !CONSTANT_POOL_ADDRESS_P (x));
7912
7913 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7914 {
7915 rtx sym = XVECEXP (x, 0, 0);
7916 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7917 && CONSTANT_POOL_ADDRESS_P (sym));
7918
7919 if (*ref == NULL_RTX)
7920 *ref = sym;
7921 else
7922 gcc_assert (*ref == sym);
7923
7924 return;
7925 }
7926
7927 fmt = GET_RTX_FORMAT (GET_CODE (x));
7928 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7929 {
7930 if (fmt[i] == 'e')
7931 {
7932 find_constant_pool_ref (XEXP (x, i), ref);
7933 }
7934 else if (fmt[i] == 'E')
7935 {
7936 for (j = 0; j < XVECLEN (x, i); j++)
7937 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7938 }
7939 }
7940 }
7941
7942 /* Replace every reference to the annotated literal pool
7943 symbol REF in X by its base plus OFFSET. */
7944
7945 static void
7946 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7947 {
7948 int i, j;
7949 const char *fmt;
7950
7951 gcc_assert (*x != ref);
7952
7953 if (GET_CODE (*x) == UNSPEC
7954 && XINT (*x, 1) == UNSPEC_LTREF
7955 && XVECEXP (*x, 0, 0) == ref)
7956 {
7957 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7958 return;
7959 }
7960
7961 if (GET_CODE (*x) == PLUS
7962 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7963 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7964 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7965 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7966 {
7967 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7968 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7969 return;
7970 }
7971
7972 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7973 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7974 {
7975 if (fmt[i] == 'e')
7976 {
7977 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7978 }
7979 else if (fmt[i] == 'E')
7980 {
7981 for (j = 0; j < XVECLEN (*x, i); j++)
7982 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7983 }
7984 }
7985 }
7986
7987 /* Check whether X contains an UNSPEC_LTREL_BASE.
7988 Return its constant pool symbol if found, NULL_RTX otherwise. */
7989
7990 static rtx
7991 find_ltrel_base (rtx x)
7992 {
7993 int i, j;
7994 const char *fmt;
7995
7996 if (GET_CODE (x) == UNSPEC
7997 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7998 return XVECEXP (x, 0, 0);
7999
8000 fmt = GET_RTX_FORMAT (GET_CODE (x));
8001 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8002 {
8003 if (fmt[i] == 'e')
8004 {
8005 rtx fnd = find_ltrel_base (XEXP (x, i));
8006 if (fnd)
8007 return fnd;
8008 }
8009 else if (fmt[i] == 'E')
8010 {
8011 for (j = 0; j < XVECLEN (x, i); j++)
8012 {
8013 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8014 if (fnd)
8015 return fnd;
8016 }
8017 }
8018 }
8019
8020 return NULL_RTX;
8021 }
8022
8023 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8024
8025 static void
8026 replace_ltrel_base (rtx *x)
8027 {
8028 int i, j;
8029 const char *fmt;
8030
8031 if (GET_CODE (*x) == UNSPEC
8032 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8033 {
8034 *x = XVECEXP (*x, 0, 1);
8035 return;
8036 }
8037
8038 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8039 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8040 {
8041 if (fmt[i] == 'e')
8042 {
8043 replace_ltrel_base (&XEXP (*x, i));
8044 }
8045 else if (fmt[i] == 'E')
8046 {
8047 for (j = 0; j < XVECLEN (*x, i); j++)
8048 replace_ltrel_base (&XVECEXP (*x, i, j));
8049 }
8050 }
8051 }
8052
8053
8054 /* We keep a list of constants which we have to add to internal
8055 constant tables in the middle of large functions. */
8056
8057 #define NR_C_MODES 32
8058 machine_mode constant_modes[NR_C_MODES] =
8059 {
8060 TFmode, TImode, TDmode,
8061 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8062 V4SFmode, V2DFmode, V1TFmode,
8063 DFmode, DImode, DDmode,
8064 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8065 SFmode, SImode, SDmode,
8066 V4QImode, V2HImode, V1SImode, V1SFmode,
8067 HImode,
8068 V2QImode, V1HImode,
8069 QImode,
8070 V1QImode
8071 };
8072
8073 struct constant
8074 {
8075 struct constant *next;
8076 rtx value;
8077 rtx_code_label *label;
8078 };
8079
8080 struct constant_pool
8081 {
8082 struct constant_pool *next;
8083 rtx_insn *first_insn;
8084 rtx_insn *pool_insn;
8085 bitmap insns;
8086 rtx_insn *emit_pool_after;
8087
8088 struct constant *constants[NR_C_MODES];
8089 struct constant *execute;
8090 rtx_code_label *label;
8091 int size;
8092 };
8093
8094 /* Allocate new constant_pool structure. */
8095
8096 static struct constant_pool *
8097 s390_alloc_pool (void)
8098 {
8099 struct constant_pool *pool;
8100 int i;
8101
8102 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8103 pool->next = NULL;
8104 for (i = 0; i < NR_C_MODES; i++)
8105 pool->constants[i] = NULL;
8106
8107 pool->execute = NULL;
8108 pool->label = gen_label_rtx ();
8109 pool->first_insn = NULL;
8110 pool->pool_insn = NULL;
8111 pool->insns = BITMAP_ALLOC (NULL);
8112 pool->size = 0;
8113 pool->emit_pool_after = NULL;
8114
8115 return pool;
8116 }
8117
8118 /* Create new constant pool covering instructions starting at INSN
8119 and chain it to the end of POOL_LIST. */
8120
8121 static struct constant_pool *
8122 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8123 {
8124 struct constant_pool *pool, **prev;
8125
8126 pool = s390_alloc_pool ();
8127 pool->first_insn = insn;
8128
8129 for (prev = pool_list; *prev; prev = &(*prev)->next)
8130 ;
8131 *prev = pool;
8132
8133 return pool;
8134 }
8135
8136 /* End range of instructions covered by POOL at INSN and emit
8137 placeholder insn representing the pool. */
8138
8139 static void
8140 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8141 {
8142 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8143
8144 if (!insn)
8145 insn = get_last_insn ();
8146
8147 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8148 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8149 }
8150
8151 /* Add INSN to the list of insns covered by POOL. */
8152
8153 static void
8154 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8155 {
8156 bitmap_set_bit (pool->insns, INSN_UID (insn));
8157 }
8158
8159 /* Return pool out of POOL_LIST that covers INSN. */
8160
8161 static struct constant_pool *
8162 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8163 {
8164 struct constant_pool *pool;
8165
8166 for (pool = pool_list; pool; pool = pool->next)
8167 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8168 break;
8169
8170 return pool;
8171 }
8172
8173 /* Add constant VAL of mode MODE to the constant pool POOL. */
8174
8175 static void
8176 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8177 {
8178 struct constant *c;
8179 int i;
8180
8181 for (i = 0; i < NR_C_MODES; i++)
8182 if (constant_modes[i] == mode)
8183 break;
8184 gcc_assert (i != NR_C_MODES);
8185
8186 for (c = pool->constants[i]; c != NULL; c = c->next)
8187 if (rtx_equal_p (val, c->value))
8188 break;
8189
8190 if (c == NULL)
8191 {
8192 c = (struct constant *) xmalloc (sizeof *c);
8193 c->value = val;
8194 c->label = gen_label_rtx ();
8195 c->next = pool->constants[i];
8196 pool->constants[i] = c;
8197 pool->size += GET_MODE_SIZE (mode);
8198 }
8199 }
8200
8201 /* Return an rtx that represents the offset of X from the start of
8202 pool POOL. */
8203
8204 static rtx
8205 s390_pool_offset (struct constant_pool *pool, rtx x)
8206 {
8207 rtx label;
8208
8209 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8210 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8211 UNSPEC_POOL_OFFSET);
8212 return gen_rtx_CONST (GET_MODE (x), x);
8213 }
8214
8215 /* Find constant VAL of mode MODE in the constant pool POOL.
8216 Return an RTX describing the distance from the start of
8217 the pool to the location of the new constant. */
8218
8219 static rtx
8220 s390_find_constant (struct constant_pool *pool, rtx val,
8221 machine_mode mode)
8222 {
8223 struct constant *c;
8224 int i;
8225
8226 for (i = 0; i < NR_C_MODES; i++)
8227 if (constant_modes[i] == mode)
8228 break;
8229 gcc_assert (i != NR_C_MODES);
8230
8231 for (c = pool->constants[i]; c != NULL; c = c->next)
8232 if (rtx_equal_p (val, c->value))
8233 break;
8234
8235 gcc_assert (c);
8236
8237 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8238 }
8239
8240 /* Check whether INSN is an execute. Return the label_ref to its
8241 execute target template if so, NULL_RTX otherwise. */
8242
8243 static rtx
8244 s390_execute_label (rtx insn)
8245 {
8246 if (NONJUMP_INSN_P (insn)
8247 && GET_CODE (PATTERN (insn)) == PARALLEL
8248 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8249 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8250 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8251
8252 return NULL_RTX;
8253 }
8254
8255 /* Add execute target for INSN to the constant pool POOL. */
8256
8257 static void
8258 s390_add_execute (struct constant_pool *pool, rtx insn)
8259 {
8260 struct constant *c;
8261
8262 for (c = pool->execute; c != NULL; c = c->next)
8263 if (INSN_UID (insn) == INSN_UID (c->value))
8264 break;
8265
8266 if (c == NULL)
8267 {
8268 c = (struct constant *) xmalloc (sizeof *c);
8269 c->value = insn;
8270 c->label = gen_label_rtx ();
8271 c->next = pool->execute;
8272 pool->execute = c;
8273 pool->size += 6;
8274 }
8275 }
8276
8277 /* Find execute target for INSN in the constant pool POOL.
8278 Return an RTX describing the distance from the start of
8279 the pool to the location of the execute target. */
8280
8281 static rtx
8282 s390_find_execute (struct constant_pool *pool, rtx insn)
8283 {
8284 struct constant *c;
8285
8286 for (c = pool->execute; c != NULL; c = c->next)
8287 if (INSN_UID (insn) == INSN_UID (c->value))
8288 break;
8289
8290 gcc_assert (c);
8291
8292 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8293 }
8294
8295 /* For an execute INSN, extract the execute target template. */
8296
8297 static rtx
8298 s390_execute_target (rtx insn)
8299 {
8300 rtx pattern = PATTERN (insn);
8301 gcc_assert (s390_execute_label (insn));
8302
8303 if (XVECLEN (pattern, 0) == 2)
8304 {
8305 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8306 }
8307 else
8308 {
8309 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8310 int i;
8311
8312 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8313 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8314
8315 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8316 }
8317
8318 return pattern;
8319 }
8320
8321 /* Indicate that INSN cannot be duplicated. This is the case for
8322 execute insns that carry a unique label. */
8323
8324 static bool
8325 s390_cannot_copy_insn_p (rtx_insn *insn)
8326 {
8327 rtx label = s390_execute_label (insn);
8328 return label && label != const0_rtx;
8329 }
8330
8331 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8332 do not emit the pool base label. */
8333
8334 static void
8335 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8336 {
8337 struct constant *c;
8338 rtx_insn *insn = pool->pool_insn;
8339 int i;
8340
8341 /* Switch to rodata section. */
8342 if (TARGET_CPU_ZARCH)
8343 {
8344 insn = emit_insn_after (gen_pool_section_start (), insn);
8345 INSN_ADDRESSES_NEW (insn, -1);
8346 }
8347
8348 /* Ensure minimum pool alignment. */
8349 if (TARGET_CPU_ZARCH)
8350 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8351 else
8352 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8353 INSN_ADDRESSES_NEW (insn, -1);
8354
8355 /* Emit pool base label. */
8356 if (!remote_label)
8357 {
8358 insn = emit_label_after (pool->label, insn);
8359 INSN_ADDRESSES_NEW (insn, -1);
8360 }
8361
8362 /* Dump constants in descending alignment requirement order,
8363 ensuring proper alignment for every constant. */
8364 for (i = 0; i < NR_C_MODES; i++)
8365 for (c = pool->constants[i]; c; c = c->next)
8366 {
8367 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8368 rtx value = copy_rtx (c->value);
8369 if (GET_CODE (value) == CONST
8370 && GET_CODE (XEXP (value, 0)) == UNSPEC
8371 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8372 && XVECLEN (XEXP (value, 0), 0) == 1)
8373 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8374
8375 insn = emit_label_after (c->label, insn);
8376 INSN_ADDRESSES_NEW (insn, -1);
8377
8378 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8379 gen_rtvec (1, value),
8380 UNSPECV_POOL_ENTRY);
8381 insn = emit_insn_after (value, insn);
8382 INSN_ADDRESSES_NEW (insn, -1);
8383 }
8384
8385 /* Ensure minimum alignment for instructions. */
8386 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8387 INSN_ADDRESSES_NEW (insn, -1);
8388
8389 /* Output in-pool execute template insns. */
8390 for (c = pool->execute; c; c = c->next)
8391 {
8392 insn = emit_label_after (c->label, insn);
8393 INSN_ADDRESSES_NEW (insn, -1);
8394
8395 insn = emit_insn_after (s390_execute_target (c->value), insn);
8396 INSN_ADDRESSES_NEW (insn, -1);
8397 }
8398
8399 /* Switch back to previous section. */
8400 if (TARGET_CPU_ZARCH)
8401 {
8402 insn = emit_insn_after (gen_pool_section_end (), insn);
8403 INSN_ADDRESSES_NEW (insn, -1);
8404 }
8405
8406 insn = emit_barrier_after (insn);
8407 INSN_ADDRESSES_NEW (insn, -1);
8408
8409 /* Remove placeholder insn. */
8410 remove_insn (pool->pool_insn);
8411 }
8412
8413 /* Free all memory used by POOL. */
8414
8415 static void
8416 s390_free_pool (struct constant_pool *pool)
8417 {
8418 struct constant *c, *next;
8419 int i;
8420
8421 for (i = 0; i < NR_C_MODES; i++)
8422 for (c = pool->constants[i]; c; c = next)
8423 {
8424 next = c->next;
8425 free (c);
8426 }
8427
8428 for (c = pool->execute; c; c = next)
8429 {
8430 next = c->next;
8431 free (c);
8432 }
8433
8434 BITMAP_FREE (pool->insns);
8435 free (pool);
8436 }
8437
8438
8439 /* Collect main literal pool. Return NULL on overflow. */
8440
8441 static struct constant_pool *
8442 s390_mainpool_start (void)
8443 {
8444 struct constant_pool *pool;
8445 rtx_insn *insn;
8446
8447 pool = s390_alloc_pool ();
8448
8449 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8450 {
8451 if (NONJUMP_INSN_P (insn)
8452 && GET_CODE (PATTERN (insn)) == SET
8453 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8454 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8455 {
8456 /* There might be two main_pool instructions if base_reg
8457 is call-clobbered; one for shrink-wrapped code and one
8458 for the rest. We want to keep the first. */
8459 if (pool->pool_insn)
8460 {
8461 insn = PREV_INSN (insn);
8462 delete_insn (NEXT_INSN (insn));
8463 continue;
8464 }
8465 pool->pool_insn = insn;
8466 }
8467
8468 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8469 {
8470 s390_add_execute (pool, insn);
8471 }
8472 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8473 {
8474 rtx pool_ref = NULL_RTX;
8475 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8476 if (pool_ref)
8477 {
8478 rtx constant = get_pool_constant (pool_ref);
8479 machine_mode mode = get_pool_mode (pool_ref);
8480 s390_add_constant (pool, constant, mode);
8481 }
8482 }
8483
8484 /* If hot/cold partitioning is enabled we have to make sure that
8485 the literal pool is emitted in the same section where the
8486 initialization of the literal pool base pointer takes place.
8487 emit_pool_after is only used in the non-overflow case on non
8488 Z cpus where we can emit the literal pool at the end of the
8489 function body within the text section. */
8490 if (NOTE_P (insn)
8491 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8492 && !pool->emit_pool_after)
8493 pool->emit_pool_after = PREV_INSN (insn);
8494 }
8495
8496 gcc_assert (pool->pool_insn || pool->size == 0);
8497
8498 if (pool->size >= 4096)
8499 {
8500 /* We're going to chunkify the pool, so remove the main
8501 pool placeholder insn. */
8502 remove_insn (pool->pool_insn);
8503
8504 s390_free_pool (pool);
8505 pool = NULL;
8506 }
8507
8508 /* If the functions ends with the section where the literal pool
8509 should be emitted set the marker to its end. */
8510 if (pool && !pool->emit_pool_after)
8511 pool->emit_pool_after = get_last_insn ();
8512
8513 return pool;
8514 }
8515
8516 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8517 Modify the current function to output the pool constants as well as
8518 the pool register setup instruction. */
8519
8520 static void
8521 s390_mainpool_finish (struct constant_pool *pool)
8522 {
8523 rtx base_reg = cfun->machine->base_reg;
8524
8525 /* If the pool is empty, we're done. */
8526 if (pool->size == 0)
8527 {
8528 /* We don't actually need a base register after all. */
8529 cfun->machine->base_reg = NULL_RTX;
8530
8531 if (pool->pool_insn)
8532 remove_insn (pool->pool_insn);
8533 s390_free_pool (pool);
8534 return;
8535 }
8536
8537 /* We need correct insn addresses. */
8538 shorten_branches (get_insns ());
8539
8540 /* On zSeries, we use a LARL to load the pool register. The pool is
8541 located in the .rodata section, so we emit it after the function. */
8542 if (TARGET_CPU_ZARCH)
8543 {
8544 rtx set = gen_main_base_64 (base_reg, pool->label);
8545 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8546 INSN_ADDRESSES_NEW (insn, -1);
8547 remove_insn (pool->pool_insn);
8548
8549 insn = get_last_insn ();
8550 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8551 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8552
8553 s390_dump_pool (pool, 0);
8554 }
8555
8556 /* On S/390, if the total size of the function's code plus literal pool
8557 does not exceed 4096 bytes, we use BASR to set up a function base
8558 pointer, and emit the literal pool at the end of the function. */
8559 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8560 + pool->size + 8 /* alignment slop */ < 4096)
8561 {
8562 rtx set = gen_main_base_31_small (base_reg, pool->label);
8563 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8564 INSN_ADDRESSES_NEW (insn, -1);
8565 remove_insn (pool->pool_insn);
8566
8567 insn = emit_label_after (pool->label, insn);
8568 INSN_ADDRESSES_NEW (insn, -1);
8569
8570 /* emit_pool_after will be set by s390_mainpool_start to the
8571 last insn of the section where the literal pool should be
8572 emitted. */
8573 insn = pool->emit_pool_after;
8574
8575 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8576 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8577
8578 s390_dump_pool (pool, 1);
8579 }
8580
8581 /* Otherwise, we emit an inline literal pool and use BASR to branch
8582 over it, setting up the pool register at the same time. */
8583 else
8584 {
8585 rtx_code_label *pool_end = gen_label_rtx ();
8586
8587 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8588 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8589 JUMP_LABEL (insn) = pool_end;
8590 INSN_ADDRESSES_NEW (insn, -1);
8591 remove_insn (pool->pool_insn);
8592
8593 insn = emit_label_after (pool->label, insn);
8594 INSN_ADDRESSES_NEW (insn, -1);
8595
8596 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8597 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8598
8599 insn = emit_label_after (pool_end, pool->pool_insn);
8600 INSN_ADDRESSES_NEW (insn, -1);
8601
8602 s390_dump_pool (pool, 1);
8603 }
8604
8605
8606 /* Replace all literal pool references. */
8607
8608 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8609 {
8610 if (INSN_P (insn))
8611 replace_ltrel_base (&PATTERN (insn));
8612
8613 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8614 {
8615 rtx addr, pool_ref = NULL_RTX;
8616 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8617 if (pool_ref)
8618 {
8619 if (s390_execute_label (insn))
8620 addr = s390_find_execute (pool, insn);
8621 else
8622 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8623 get_pool_mode (pool_ref));
8624
8625 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8626 INSN_CODE (insn) = -1;
8627 }
8628 }
8629 }
8630
8631
8632 /* Free the pool. */
8633 s390_free_pool (pool);
8634 }
8635
8636 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8637 We have decided we cannot use this pool, so revert all changes
8638 to the current function that were done by s390_mainpool_start. */
8639 static void
8640 s390_mainpool_cancel (struct constant_pool *pool)
8641 {
8642 /* We didn't actually change the instruction stream, so simply
8643 free the pool memory. */
8644 s390_free_pool (pool);
8645 }
8646
8647
8648 /* Chunkify the literal pool. */
8649
8650 #define S390_POOL_CHUNK_MIN 0xc00
8651 #define S390_POOL_CHUNK_MAX 0xe00
8652
8653 static struct constant_pool *
8654 s390_chunkify_start (void)
8655 {
8656 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8657 int extra_size = 0;
8658 bitmap far_labels;
8659 rtx pending_ltrel = NULL_RTX;
8660 rtx_insn *insn;
8661
8662 rtx (*gen_reload_base) (rtx, rtx) =
8663 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8664
8665
8666 /* We need correct insn addresses. */
8667
8668 shorten_branches (get_insns ());
8669
8670 /* Scan all insns and move literals to pool chunks. */
8671
8672 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8673 {
8674 bool section_switch_p = false;
8675
8676 /* Check for pending LTREL_BASE. */
8677 if (INSN_P (insn))
8678 {
8679 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8680 if (ltrel_base)
8681 {
8682 gcc_assert (ltrel_base == pending_ltrel);
8683 pending_ltrel = NULL_RTX;
8684 }
8685 }
8686
8687 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8688 {
8689 if (!curr_pool)
8690 curr_pool = s390_start_pool (&pool_list, insn);
8691
8692 s390_add_execute (curr_pool, insn);
8693 s390_add_pool_insn (curr_pool, insn);
8694 }
8695 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8696 {
8697 rtx pool_ref = NULL_RTX;
8698 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8699 if (pool_ref)
8700 {
8701 rtx constant = get_pool_constant (pool_ref);
8702 machine_mode mode = get_pool_mode (pool_ref);
8703
8704 if (!curr_pool)
8705 curr_pool = s390_start_pool (&pool_list, insn);
8706
8707 s390_add_constant (curr_pool, constant, mode);
8708 s390_add_pool_insn (curr_pool, insn);
8709
8710 /* Don't split the pool chunk between a LTREL_OFFSET load
8711 and the corresponding LTREL_BASE. */
8712 if (GET_CODE (constant) == CONST
8713 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8714 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8715 {
8716 gcc_assert (!pending_ltrel);
8717 pending_ltrel = pool_ref;
8718 }
8719 }
8720 }
8721
8722 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8723 {
8724 if (curr_pool)
8725 s390_add_pool_insn (curr_pool, insn);
8726 /* An LTREL_BASE must follow within the same basic block. */
8727 gcc_assert (!pending_ltrel);
8728 }
8729
8730 if (NOTE_P (insn))
8731 switch (NOTE_KIND (insn))
8732 {
8733 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8734 section_switch_p = true;
8735 break;
8736 case NOTE_INSN_VAR_LOCATION:
8737 case NOTE_INSN_CALL_ARG_LOCATION:
8738 continue;
8739 default:
8740 break;
8741 }
8742
8743 if (!curr_pool
8744 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8745 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8746 continue;
8747
8748 if (TARGET_CPU_ZARCH)
8749 {
8750 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8751 continue;
8752
8753 s390_end_pool (curr_pool, NULL);
8754 curr_pool = NULL;
8755 }
8756 else
8757 {
8758 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8759 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8760 + extra_size;
8761
8762 /* We will later have to insert base register reload insns.
8763 Those will have an effect on code size, which we need to
8764 consider here. This calculation makes rather pessimistic
8765 worst-case assumptions. */
8766 if (LABEL_P (insn))
8767 extra_size += 6;
8768
8769 if (chunk_size < S390_POOL_CHUNK_MIN
8770 && curr_pool->size < S390_POOL_CHUNK_MIN
8771 && !section_switch_p)
8772 continue;
8773
8774 /* Pool chunks can only be inserted after BARRIERs ... */
8775 if (BARRIER_P (insn))
8776 {
8777 s390_end_pool (curr_pool, insn);
8778 curr_pool = NULL;
8779 extra_size = 0;
8780 }
8781
8782 /* ... so if we don't find one in time, create one. */
8783 else if (chunk_size > S390_POOL_CHUNK_MAX
8784 || curr_pool->size > S390_POOL_CHUNK_MAX
8785 || section_switch_p)
8786 {
8787 rtx_insn *label, *jump, *barrier, *next, *prev;
8788
8789 if (!section_switch_p)
8790 {
8791 /* We can insert the barrier only after a 'real' insn. */
8792 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8793 continue;
8794 if (get_attr_length (insn) == 0)
8795 continue;
8796 /* Don't separate LTREL_BASE from the corresponding
8797 LTREL_OFFSET load. */
8798 if (pending_ltrel)
8799 continue;
8800 next = insn;
8801 do
8802 {
8803 insn = next;
8804 next = NEXT_INSN (insn);
8805 }
8806 while (next
8807 && NOTE_P (next)
8808 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8809 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8810 }
8811 else
8812 {
8813 gcc_assert (!pending_ltrel);
8814
8815 /* The old pool has to end before the section switch
8816 note in order to make it part of the current
8817 section. */
8818 insn = PREV_INSN (insn);
8819 }
8820
8821 label = gen_label_rtx ();
8822 prev = insn;
8823 if (prev && NOTE_P (prev))
8824 prev = prev_nonnote_insn (prev);
8825 if (prev)
8826 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8827 INSN_LOCATION (prev));
8828 else
8829 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8830 barrier = emit_barrier_after (jump);
8831 insn = emit_label_after (label, barrier);
8832 JUMP_LABEL (jump) = label;
8833 LABEL_NUSES (label) = 1;
8834
8835 INSN_ADDRESSES_NEW (jump, -1);
8836 INSN_ADDRESSES_NEW (barrier, -1);
8837 INSN_ADDRESSES_NEW (insn, -1);
8838
8839 s390_end_pool (curr_pool, barrier);
8840 curr_pool = NULL;
8841 extra_size = 0;
8842 }
8843 }
8844 }
8845
8846 if (curr_pool)
8847 s390_end_pool (curr_pool, NULL);
8848 gcc_assert (!pending_ltrel);
8849
8850 /* Find all labels that are branched into
8851 from an insn belonging to a different chunk. */
8852
8853 far_labels = BITMAP_ALLOC (NULL);
8854
8855 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8856 {
8857 rtx_jump_table_data *table;
8858
8859 /* Labels marked with LABEL_PRESERVE_P can be target
8860 of non-local jumps, so we have to mark them.
8861 The same holds for named labels.
8862
8863 Don't do that, however, if it is the label before
8864 a jump table. */
8865
8866 if (LABEL_P (insn)
8867 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8868 {
8869 rtx_insn *vec_insn = NEXT_INSN (insn);
8870 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8871 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8872 }
8873 /* Check potential targets in a table jump (casesi_jump). */
8874 else if (tablejump_p (insn, NULL, &table))
8875 {
8876 rtx vec_pat = PATTERN (table);
8877 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8878
8879 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8880 {
8881 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8882
8883 if (s390_find_pool (pool_list, label)
8884 != s390_find_pool (pool_list, insn))
8885 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8886 }
8887 }
8888 /* If we have a direct jump (conditional or unconditional),
8889 check all potential targets. */
8890 else if (JUMP_P (insn))
8891 {
8892 rtx pat = PATTERN (insn);
8893
8894 if (GET_CODE (pat) == PARALLEL)
8895 pat = XVECEXP (pat, 0, 0);
8896
8897 if (GET_CODE (pat) == SET)
8898 {
8899 rtx label = JUMP_LABEL (insn);
8900 if (label && !ANY_RETURN_P (label))
8901 {
8902 if (s390_find_pool (pool_list, label)
8903 != s390_find_pool (pool_list, insn))
8904 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8905 }
8906 }
8907 }
8908 }
8909
8910 /* Insert base register reload insns before every pool. */
8911
8912 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8913 {
8914 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8915 curr_pool->label);
8916 rtx_insn *insn = curr_pool->first_insn;
8917 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8918 }
8919
8920 /* Insert base register reload insns at every far label. */
8921
8922 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8923 if (LABEL_P (insn)
8924 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8925 {
8926 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8927 if (pool)
8928 {
8929 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8930 pool->label);
8931 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8932 }
8933 }
8934
8935
8936 BITMAP_FREE (far_labels);
8937
8938
8939 /* Recompute insn addresses. */
8940
8941 init_insn_lengths ();
8942 shorten_branches (get_insns ());
8943
8944 return pool_list;
8945 }
8946
8947 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8948 After we have decided to use this list, finish implementing
8949 all changes to the current function as required. */
8950
8951 static void
8952 s390_chunkify_finish (struct constant_pool *pool_list)
8953 {
8954 struct constant_pool *curr_pool = NULL;
8955 rtx_insn *insn;
8956
8957
8958 /* Replace all literal pool references. */
8959
8960 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8961 {
8962 if (INSN_P (insn))
8963 replace_ltrel_base (&PATTERN (insn));
8964
8965 curr_pool = s390_find_pool (pool_list, insn);
8966 if (!curr_pool)
8967 continue;
8968
8969 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8970 {
8971 rtx addr, pool_ref = NULL_RTX;
8972 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8973 if (pool_ref)
8974 {
8975 if (s390_execute_label (insn))
8976 addr = s390_find_execute (curr_pool, insn);
8977 else
8978 addr = s390_find_constant (curr_pool,
8979 get_pool_constant (pool_ref),
8980 get_pool_mode (pool_ref));
8981
8982 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8983 INSN_CODE (insn) = -1;
8984 }
8985 }
8986 }
8987
8988 /* Dump out all literal pools. */
8989
8990 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8991 s390_dump_pool (curr_pool, 0);
8992
8993 /* Free pool list. */
8994
8995 while (pool_list)
8996 {
8997 struct constant_pool *next = pool_list->next;
8998 s390_free_pool (pool_list);
8999 pool_list = next;
9000 }
9001 }
9002
9003 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9004 We have decided we cannot use this list, so revert all changes
9005 to the current function that were done by s390_chunkify_start. */
9006
9007 static void
9008 s390_chunkify_cancel (struct constant_pool *pool_list)
9009 {
9010 struct constant_pool *curr_pool = NULL;
9011 rtx_insn *insn;
9012
9013 /* Remove all pool placeholder insns. */
9014
9015 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9016 {
9017 /* Did we insert an extra barrier? Remove it. */
9018 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9019 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9020 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9021
9022 if (jump && JUMP_P (jump)
9023 && barrier && BARRIER_P (barrier)
9024 && label && LABEL_P (label)
9025 && GET_CODE (PATTERN (jump)) == SET
9026 && SET_DEST (PATTERN (jump)) == pc_rtx
9027 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9028 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9029 {
9030 remove_insn (jump);
9031 remove_insn (barrier);
9032 remove_insn (label);
9033 }
9034
9035 remove_insn (curr_pool->pool_insn);
9036 }
9037
9038 /* Remove all base register reload insns. */
9039
9040 for (insn = get_insns (); insn; )
9041 {
9042 rtx_insn *next_insn = NEXT_INSN (insn);
9043
9044 if (NONJUMP_INSN_P (insn)
9045 && GET_CODE (PATTERN (insn)) == SET
9046 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9047 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9048 remove_insn (insn);
9049
9050 insn = next_insn;
9051 }
9052
9053 /* Free pool list. */
9054
9055 while (pool_list)
9056 {
9057 struct constant_pool *next = pool_list->next;
9058 s390_free_pool (pool_list);
9059 pool_list = next;
9060 }
9061 }
9062
9063 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9064
9065 void
9066 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9067 {
9068 switch (GET_MODE_CLASS (mode))
9069 {
9070 case MODE_FLOAT:
9071 case MODE_DECIMAL_FLOAT:
9072 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9073
9074 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9075 break;
9076
9077 case MODE_INT:
9078 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9079 mark_symbol_refs_as_used (exp);
9080 break;
9081
9082 case MODE_VECTOR_INT:
9083 case MODE_VECTOR_FLOAT:
9084 {
9085 int i;
9086 machine_mode inner_mode;
9087 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9088
9089 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9090 for (i = 0; i < XVECLEN (exp, 0); i++)
9091 s390_output_pool_entry (XVECEXP (exp, 0, i),
9092 inner_mode,
9093 i == 0
9094 ? align
9095 : GET_MODE_BITSIZE (inner_mode));
9096 }
9097 break;
9098
9099 default:
9100 gcc_unreachable ();
9101 }
9102 }
9103
9104
9105 /* Return an RTL expression representing the value of the return address
9106 for the frame COUNT steps up from the current frame. FRAME is the
9107 frame pointer of that frame. */
9108
9109 rtx
9110 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9111 {
9112 int offset;
9113 rtx addr;
9114
9115 /* Without backchain, we fail for all but the current frame. */
9116
9117 if (!TARGET_BACKCHAIN && count > 0)
9118 return NULL_RTX;
9119
9120 /* For the current frame, we need to make sure the initial
9121 value of RETURN_REGNUM is actually saved. */
9122
9123 if (count == 0)
9124 {
9125 /* On non-z architectures branch splitting could overwrite r14. */
9126 if (TARGET_CPU_ZARCH)
9127 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9128 else
9129 {
9130 cfun_frame_layout.save_return_addr_p = true;
9131 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9132 }
9133 }
9134
9135 if (TARGET_PACKED_STACK)
9136 offset = -2 * UNITS_PER_LONG;
9137 else
9138 offset = RETURN_REGNUM * UNITS_PER_LONG;
9139
9140 addr = plus_constant (Pmode, frame, offset);
9141 addr = memory_address (Pmode, addr);
9142 return gen_rtx_MEM (Pmode, addr);
9143 }
9144
9145 /* Return an RTL expression representing the back chain stored in
9146 the current stack frame. */
9147
9148 rtx
9149 s390_back_chain_rtx (void)
9150 {
9151 rtx chain;
9152
9153 gcc_assert (TARGET_BACKCHAIN);
9154
9155 if (TARGET_PACKED_STACK)
9156 chain = plus_constant (Pmode, stack_pointer_rtx,
9157 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9158 else
9159 chain = stack_pointer_rtx;
9160
9161 chain = gen_rtx_MEM (Pmode, chain);
9162 return chain;
9163 }
9164
9165 /* Find first call clobbered register unused in a function.
9166 This could be used as base register in a leaf function
9167 or for holding the return address before epilogue. */
9168
9169 static int
9170 find_unused_clobbered_reg (void)
9171 {
9172 int i;
9173 for (i = 0; i < 6; i++)
9174 if (!df_regs_ever_live_p (i))
9175 return i;
9176 return 0;
9177 }
9178
9179
9180 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9181 clobbered hard regs in SETREG. */
9182
9183 static void
9184 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9185 {
9186 char *regs_ever_clobbered = (char *)data;
9187 unsigned int i, regno;
9188 machine_mode mode = GET_MODE (setreg);
9189
9190 if (GET_CODE (setreg) == SUBREG)
9191 {
9192 rtx inner = SUBREG_REG (setreg);
9193 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9194 return;
9195 regno = subreg_regno (setreg);
9196 }
9197 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9198 regno = REGNO (setreg);
9199 else
9200 return;
9201
9202 for (i = regno;
9203 i < regno + HARD_REGNO_NREGS (regno, mode);
9204 i++)
9205 regs_ever_clobbered[i] = 1;
9206 }
9207
9208 /* Walks through all basic blocks of the current function looking
9209 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9210 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9211 each of those regs. */
9212
9213 static void
9214 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9215 {
9216 basic_block cur_bb;
9217 rtx_insn *cur_insn;
9218 unsigned int i;
9219
9220 memset (regs_ever_clobbered, 0, 32);
9221
9222 /* For non-leaf functions we have to consider all call clobbered regs to be
9223 clobbered. */
9224 if (!crtl->is_leaf)
9225 {
9226 for (i = 0; i < 32; i++)
9227 regs_ever_clobbered[i] = call_really_used_regs[i];
9228 }
9229
9230 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9231 this work is done by liveness analysis (mark_regs_live_at_end).
9232 Special care is needed for functions containing landing pads. Landing pads
9233 may use the eh registers, but the code which sets these registers is not
9234 contained in that function. Hence s390_regs_ever_clobbered is not able to
9235 deal with this automatically. */
9236 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9237 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9238 if (crtl->calls_eh_return
9239 || (cfun->machine->has_landing_pad_p
9240 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9241 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9242
9243 /* For nonlocal gotos all call-saved registers have to be saved.
9244 This flag is also set for the unwinding code in libgcc.
9245 See expand_builtin_unwind_init. For regs_ever_live this is done by
9246 reload. */
9247 if (crtl->saves_all_registers)
9248 for (i = 0; i < 32; i++)
9249 if (!call_really_used_regs[i])
9250 regs_ever_clobbered[i] = 1;
9251
9252 FOR_EACH_BB_FN (cur_bb, cfun)
9253 {
9254 FOR_BB_INSNS (cur_bb, cur_insn)
9255 {
9256 rtx pat;
9257
9258 if (!INSN_P (cur_insn))
9259 continue;
9260
9261 pat = PATTERN (cur_insn);
9262
9263 /* Ignore GPR restore insns. */
9264 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9265 {
9266 if (GET_CODE (pat) == SET
9267 && GENERAL_REG_P (SET_DEST (pat)))
9268 {
9269 /* lgdr */
9270 if (GET_MODE (SET_SRC (pat)) == DImode
9271 && FP_REG_P (SET_SRC (pat)))
9272 continue;
9273
9274 /* l / lg */
9275 if (GET_CODE (SET_SRC (pat)) == MEM)
9276 continue;
9277 }
9278
9279 /* lm / lmg */
9280 if (GET_CODE (pat) == PARALLEL
9281 && load_multiple_operation (pat, VOIDmode))
9282 continue;
9283 }
9284
9285 note_stores (pat,
9286 s390_reg_clobbered_rtx,
9287 regs_ever_clobbered);
9288 }
9289 }
9290 }
9291
9292 /* Determine the frame area which actually has to be accessed
9293 in the function epilogue. The values are stored at the
9294 given pointers AREA_BOTTOM (address of the lowest used stack
9295 address) and AREA_TOP (address of the first item which does
9296 not belong to the stack frame). */
9297
9298 static void
9299 s390_frame_area (int *area_bottom, int *area_top)
9300 {
9301 int b, t;
9302
9303 b = INT_MAX;
9304 t = INT_MIN;
9305
9306 if (cfun_frame_layout.first_restore_gpr != -1)
9307 {
9308 b = (cfun_frame_layout.gprs_offset
9309 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9310 t = b + (cfun_frame_layout.last_restore_gpr
9311 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9312 }
9313
9314 if (TARGET_64BIT && cfun_save_high_fprs_p)
9315 {
9316 b = MIN (b, cfun_frame_layout.f8_offset);
9317 t = MAX (t, (cfun_frame_layout.f8_offset
9318 + cfun_frame_layout.high_fprs * 8));
9319 }
9320
9321 if (!TARGET_64BIT)
9322 {
9323 if (cfun_fpr_save_p (FPR4_REGNUM))
9324 {
9325 b = MIN (b, cfun_frame_layout.f4_offset);
9326 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9327 }
9328 if (cfun_fpr_save_p (FPR6_REGNUM))
9329 {
9330 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9331 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9332 }
9333 }
9334 *area_bottom = b;
9335 *area_top = t;
9336 }
9337 /* Update gpr_save_slots in the frame layout trying to make use of
9338 FPRs as GPR save slots.
9339 This is a helper routine of s390_register_info. */
9340
9341 static void
9342 s390_register_info_gprtofpr ()
9343 {
9344 int save_reg_slot = FPR0_REGNUM;
9345 int i, j;
9346
9347 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9348 return;
9349
9350 for (i = 15; i >= 6; i--)
9351 {
9352 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9353 continue;
9354
9355 /* Advance to the next FP register which can be used as a
9356 GPR save slot. */
9357 while ((!call_really_used_regs[save_reg_slot]
9358 || df_regs_ever_live_p (save_reg_slot)
9359 || cfun_fpr_save_p (save_reg_slot))
9360 && FP_REGNO_P (save_reg_slot))
9361 save_reg_slot++;
9362 if (!FP_REGNO_P (save_reg_slot))
9363 {
9364 /* We only want to use ldgr/lgdr if we can get rid of
9365 stm/lm entirely. So undo the gpr slot allocation in
9366 case we ran out of FPR save slots. */
9367 for (j = 6; j <= 15; j++)
9368 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9369 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9370 break;
9371 }
9372 cfun_gpr_save_slot (i) = save_reg_slot++;
9373 }
9374 }
9375
9376 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9377 stdarg.
9378 This is a helper routine for s390_register_info. */
9379
9380 static void
9381 s390_register_info_stdarg_fpr ()
9382 {
9383 int i;
9384 int min_fpr;
9385 int max_fpr;
9386
9387 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9388 f0-f4 for 64 bit. */
9389 if (!cfun->stdarg
9390 || !TARGET_HARD_FLOAT
9391 || !cfun->va_list_fpr_size
9392 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9393 return;
9394
9395 min_fpr = crtl->args.info.fprs;
9396 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9397 if (max_fpr >= FP_ARG_NUM_REG)
9398 max_fpr = FP_ARG_NUM_REG - 1;
9399
9400 /* FPR argument regs start at f0. */
9401 min_fpr += FPR0_REGNUM;
9402 max_fpr += FPR0_REGNUM;
9403
9404 for (i = min_fpr; i <= max_fpr; i++)
9405 cfun_set_fpr_save (i);
9406 }
9407
9408 /* Reserve the GPR save slots for GPRs which need to be saved due to
9409 stdarg.
9410 This is a helper routine for s390_register_info. */
9411
9412 static void
9413 s390_register_info_stdarg_gpr ()
9414 {
9415 int i;
9416 int min_gpr;
9417 int max_gpr;
9418
9419 if (!cfun->stdarg
9420 || !cfun->va_list_gpr_size
9421 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9422 return;
9423
9424 min_gpr = crtl->args.info.gprs;
9425 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9426 if (max_gpr >= GP_ARG_NUM_REG)
9427 max_gpr = GP_ARG_NUM_REG - 1;
9428
9429 /* GPR argument regs start at r2. */
9430 min_gpr += GPR2_REGNUM;
9431 max_gpr += GPR2_REGNUM;
9432
9433 /* If r6 was supposed to be saved into an FPR and now needs to go to
9434 the stack for vararg we have to adjust the restore range to make
9435 sure that the restore is done from stack as well. */
9436 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9437 && min_gpr <= GPR6_REGNUM
9438 && max_gpr >= GPR6_REGNUM)
9439 {
9440 if (cfun_frame_layout.first_restore_gpr == -1
9441 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9442 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9443 if (cfun_frame_layout.last_restore_gpr == -1
9444 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9445 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9446 }
9447
9448 if (cfun_frame_layout.first_save_gpr == -1
9449 || cfun_frame_layout.first_save_gpr > min_gpr)
9450 cfun_frame_layout.first_save_gpr = min_gpr;
9451
9452 if (cfun_frame_layout.last_save_gpr == -1
9453 || cfun_frame_layout.last_save_gpr < max_gpr)
9454 cfun_frame_layout.last_save_gpr = max_gpr;
9455
9456 for (i = min_gpr; i <= max_gpr; i++)
9457 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9458 }
9459
9460 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9461 prologue and epilogue. */
9462
9463 static void
9464 s390_register_info_set_ranges ()
9465 {
9466 int i, j;
9467
9468 /* Find the first and the last save slot supposed to use the stack
9469 to set the restore range.
9470 Vararg regs might be marked as save to stack but only the
9471 call-saved regs really need restoring (i.e. r6). This code
9472 assumes that the vararg regs have not yet been recorded in
9473 cfun_gpr_save_slot. */
9474 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9475 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9476 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9477 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9478 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9479 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9480 }
9481
9482 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9483 for registers which need to be saved in function prologue.
9484 This function can be used until the insns emitted for save/restore
9485 of the regs are visible in the RTL stream. */
9486
9487 static void
9488 s390_register_info ()
9489 {
9490 int i;
9491 char clobbered_regs[32];
9492
9493 gcc_assert (!epilogue_completed);
9494
9495 if (reload_completed)
9496 /* After reload we rely on our own routine to determine which
9497 registers need saving. */
9498 s390_regs_ever_clobbered (clobbered_regs);
9499 else
9500 /* During reload we use regs_ever_live as a base since reload
9501 does changes in there which we otherwise would not be aware
9502 of. */
9503 for (i = 0; i < 32; i++)
9504 clobbered_regs[i] = df_regs_ever_live_p (i);
9505
9506 for (i = 0; i < 32; i++)
9507 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9508
9509 /* Mark the call-saved FPRs which need to be saved.
9510 This needs to be done before checking the special GPRs since the
9511 stack pointer usage depends on whether high FPRs have to be saved
9512 or not. */
9513 cfun_frame_layout.fpr_bitmap = 0;
9514 cfun_frame_layout.high_fprs = 0;
9515 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9516 if (clobbered_regs[i] && !call_really_used_regs[i])
9517 {
9518 cfun_set_fpr_save (i);
9519 if (i >= FPR8_REGNUM)
9520 cfun_frame_layout.high_fprs++;
9521 }
9522
9523 /* Register 12 is used for GOT address, but also as temp in prologue
9524 for split-stack stdarg functions (unless r14 is available). */
9525 clobbered_regs[12]
9526 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9527 || (flag_split_stack && cfun->stdarg
9528 && (crtl->is_leaf || TARGET_TPF_PROFILING
9529 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9530
9531 clobbered_regs[BASE_REGNUM]
9532 |= (cfun->machine->base_reg
9533 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9534
9535 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9536 |= !!frame_pointer_needed;
9537
9538 /* On pre z900 machines this might take until machine dependent
9539 reorg to decide.
9540 save_return_addr_p will only be set on non-zarch machines so
9541 there is no risk that r14 goes into an FPR instead of a stack
9542 slot. */
9543 clobbered_regs[RETURN_REGNUM]
9544 |= (!crtl->is_leaf
9545 || TARGET_TPF_PROFILING
9546 || cfun->machine->split_branches_pending_p
9547 || cfun_frame_layout.save_return_addr_p
9548 || crtl->calls_eh_return);
9549
9550 clobbered_regs[STACK_POINTER_REGNUM]
9551 |= (!crtl->is_leaf
9552 || TARGET_TPF_PROFILING
9553 || cfun_save_high_fprs_p
9554 || get_frame_size () > 0
9555 || (reload_completed && cfun_frame_layout.frame_size > 0)
9556 || cfun->calls_alloca);
9557
9558 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9559
9560 for (i = 6; i < 16; i++)
9561 if (clobbered_regs[i])
9562 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9563
9564 s390_register_info_stdarg_fpr ();
9565 s390_register_info_gprtofpr ();
9566 s390_register_info_set_ranges ();
9567 /* stdarg functions might need to save GPRs 2 to 6. This might
9568 override the GPR->FPR save decision made by
9569 s390_register_info_gprtofpr for r6 since vararg regs must go to
9570 the stack. */
9571 s390_register_info_stdarg_gpr ();
9572 }
9573
9574 /* This function is called by s390_optimize_prologue in order to get
9575 rid of unnecessary GPR save/restore instructions. The register info
9576 for the GPRs is re-computed and the ranges are re-calculated. */
9577
9578 static void
9579 s390_optimize_register_info ()
9580 {
9581 char clobbered_regs[32];
9582 int i;
9583
9584 gcc_assert (epilogue_completed);
9585 gcc_assert (!cfun->machine->split_branches_pending_p);
9586
9587 s390_regs_ever_clobbered (clobbered_regs);
9588
9589 for (i = 0; i < 32; i++)
9590 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9591
9592 /* There is still special treatment needed for cases invisible to
9593 s390_regs_ever_clobbered. */
9594 clobbered_regs[RETURN_REGNUM]
9595 |= (TARGET_TPF_PROFILING
9596 /* When expanding builtin_return_addr in ESA mode we do not
9597 know whether r14 will later be needed as scratch reg when
9598 doing branch splitting. So the builtin always accesses the
9599 r14 save slot and we need to stick to the save/restore
9600 decision for r14 even if it turns out that it didn't get
9601 clobbered. */
9602 || cfun_frame_layout.save_return_addr_p
9603 || crtl->calls_eh_return);
9604
9605 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9606
9607 for (i = 6; i < 16; i++)
9608 if (!clobbered_regs[i])
9609 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9610
9611 s390_register_info_set_ranges ();
9612 s390_register_info_stdarg_gpr ();
9613 }
9614
9615 /* Fill cfun->machine with info about frame of current function. */
9616
9617 static void
9618 s390_frame_info (void)
9619 {
9620 HOST_WIDE_INT lowest_offset;
9621
9622 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9623 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9624
9625 /* The va_arg builtin uses a constant distance of 16 *
9626 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9627 pointer. So even if we are going to save the stack pointer in an
9628 FPR we need the stack space in order to keep the offsets
9629 correct. */
9630 if (cfun->stdarg && cfun_save_arg_fprs_p)
9631 {
9632 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9633
9634 if (cfun_frame_layout.first_save_gpr_slot == -1)
9635 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9636 }
9637
9638 cfun_frame_layout.frame_size = get_frame_size ();
9639 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9640 fatal_error (input_location,
9641 "total size of local variables exceeds architecture limit");
9642
9643 if (!TARGET_PACKED_STACK)
9644 {
9645 /* Fixed stack layout. */
9646 cfun_frame_layout.backchain_offset = 0;
9647 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9648 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9649 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9650 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9651 * UNITS_PER_LONG);
9652 }
9653 else if (TARGET_BACKCHAIN)
9654 {
9655 /* Kernel stack layout - packed stack, backchain, no float */
9656 gcc_assert (TARGET_SOFT_FLOAT);
9657 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9658 - UNITS_PER_LONG);
9659
9660 /* The distance between the backchain and the return address
9661 save slot must not change. So we always need a slot for the
9662 stack pointer which resides in between. */
9663 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9664
9665 cfun_frame_layout.gprs_offset
9666 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9667
9668 /* FPRs will not be saved. Nevertheless pick sane values to
9669 keep area calculations valid. */
9670 cfun_frame_layout.f0_offset =
9671 cfun_frame_layout.f4_offset =
9672 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9673 }
9674 else
9675 {
9676 int num_fprs;
9677
9678 /* Packed stack layout without backchain. */
9679
9680 /* With stdarg FPRs need their dedicated slots. */
9681 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9682 : (cfun_fpr_save_p (FPR4_REGNUM) +
9683 cfun_fpr_save_p (FPR6_REGNUM)));
9684 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9685
9686 num_fprs = (cfun->stdarg ? 2
9687 : (cfun_fpr_save_p (FPR0_REGNUM)
9688 + cfun_fpr_save_p (FPR2_REGNUM)));
9689 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9690
9691 cfun_frame_layout.gprs_offset
9692 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9693
9694 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9695 - cfun_frame_layout.high_fprs * 8);
9696 }
9697
9698 if (cfun_save_high_fprs_p)
9699 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9700
9701 if (!crtl->is_leaf)
9702 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9703
9704 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9705 sized area at the bottom of the stack. This is required also for
9706 leaf functions. When GCC generates a local stack reference it
9707 will always add STACK_POINTER_OFFSET to all these references. */
9708 if (crtl->is_leaf
9709 && !TARGET_TPF_PROFILING
9710 && cfun_frame_layout.frame_size == 0
9711 && !cfun->calls_alloca)
9712 return;
9713
9714 /* Calculate the number of bytes we have used in our own register
9715 save area. With the packed stack layout we can re-use the
9716 remaining bytes for normal stack elements. */
9717
9718 if (TARGET_PACKED_STACK)
9719 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9720 cfun_frame_layout.f4_offset),
9721 cfun_frame_layout.gprs_offset);
9722 else
9723 lowest_offset = 0;
9724
9725 if (TARGET_BACKCHAIN)
9726 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9727
9728 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9729
9730 /* If under 31 bit an odd number of gprs has to be saved we have to
9731 adjust the frame size to sustain 8 byte alignment of stack
9732 frames. */
9733 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9734 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9735 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9736 }
9737
9738 /* Generate frame layout. Fills in register and frame data for the current
9739 function in cfun->machine. This routine can be called multiple times;
9740 it will re-do the complete frame layout every time. */
9741
9742 static void
9743 s390_init_frame_layout (void)
9744 {
9745 HOST_WIDE_INT frame_size;
9746 int base_used;
9747
9748 /* After LRA the frame layout is supposed to be read-only and should
9749 not be re-computed. */
9750 if (reload_completed)
9751 return;
9752
9753 /* On S/390 machines, we may need to perform branch splitting, which
9754 will require both base and return address register. We have no
9755 choice but to assume we're going to need them until right at the
9756 end of the machine dependent reorg phase. */
9757 if (!TARGET_CPU_ZARCH)
9758 cfun->machine->split_branches_pending_p = true;
9759
9760 do
9761 {
9762 frame_size = cfun_frame_layout.frame_size;
9763
9764 /* Try to predict whether we'll need the base register. */
9765 base_used = cfun->machine->split_branches_pending_p
9766 || crtl->uses_const_pool
9767 || (!DISP_IN_RANGE (frame_size)
9768 && !CONST_OK_FOR_K (frame_size));
9769
9770 /* Decide which register to use as literal pool base. In small
9771 leaf functions, try to use an unused call-clobbered register
9772 as base register to avoid save/restore overhead. */
9773 if (!base_used)
9774 cfun->machine->base_reg = NULL_RTX;
9775 else
9776 {
9777 int br = 0;
9778
9779 if (crtl->is_leaf)
9780 /* Prefer r5 (most likely to be free). */
9781 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9782 ;
9783 cfun->machine->base_reg =
9784 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9785 }
9786
9787 s390_register_info ();
9788 s390_frame_info ();
9789 }
9790 while (frame_size != cfun_frame_layout.frame_size);
9791 }
9792
9793 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9794 the TX is nonescaping. A transaction is considered escaping if
9795 there is at least one path from tbegin returning CC0 to the
9796 function exit block without an tend.
9797
9798 The check so far has some limitations:
9799 - only single tbegin/tend BBs are supported
9800 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9801 - when CC is copied to a GPR and the CC0 check is done with the GPR
9802 this is not supported
9803 */
9804
9805 static void
9806 s390_optimize_nonescaping_tx (void)
9807 {
9808 const unsigned int CC0 = 1 << 3;
9809 basic_block tbegin_bb = NULL;
9810 basic_block tend_bb = NULL;
9811 basic_block bb;
9812 rtx_insn *insn;
9813 bool result = true;
9814 int bb_index;
9815 rtx_insn *tbegin_insn = NULL;
9816
9817 if (!cfun->machine->tbegin_p)
9818 return;
9819
9820 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9821 {
9822 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9823
9824 if (!bb)
9825 continue;
9826
9827 FOR_BB_INSNS (bb, insn)
9828 {
9829 rtx ite, cc, pat, target;
9830 unsigned HOST_WIDE_INT mask;
9831
9832 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9833 continue;
9834
9835 pat = PATTERN (insn);
9836
9837 if (GET_CODE (pat) == PARALLEL)
9838 pat = XVECEXP (pat, 0, 0);
9839
9840 if (GET_CODE (pat) != SET
9841 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9842 continue;
9843
9844 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9845 {
9846 rtx_insn *tmp;
9847
9848 tbegin_insn = insn;
9849
9850 /* Just return if the tbegin doesn't have clobbers. */
9851 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9852 return;
9853
9854 if (tbegin_bb != NULL)
9855 return;
9856
9857 /* Find the next conditional jump. */
9858 for (tmp = NEXT_INSN (insn);
9859 tmp != NULL_RTX;
9860 tmp = NEXT_INSN (tmp))
9861 {
9862 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9863 return;
9864 if (!JUMP_P (tmp))
9865 continue;
9866
9867 ite = SET_SRC (PATTERN (tmp));
9868 if (GET_CODE (ite) != IF_THEN_ELSE)
9869 continue;
9870
9871 cc = XEXP (XEXP (ite, 0), 0);
9872 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9873 || GET_MODE (cc) != CCRAWmode
9874 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9875 return;
9876
9877 if (bb->succs->length () != 2)
9878 return;
9879
9880 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9881 if (GET_CODE (XEXP (ite, 0)) == NE)
9882 mask ^= 0xf;
9883
9884 if (mask == CC0)
9885 target = XEXP (ite, 1);
9886 else if (mask == (CC0 ^ 0xf))
9887 target = XEXP (ite, 2);
9888 else
9889 return;
9890
9891 {
9892 edge_iterator ei;
9893 edge e1, e2;
9894
9895 ei = ei_start (bb->succs);
9896 e1 = ei_safe_edge (ei);
9897 ei_next (&ei);
9898 e2 = ei_safe_edge (ei);
9899
9900 if (e2->flags & EDGE_FALLTHRU)
9901 {
9902 e2 = e1;
9903 e1 = ei_safe_edge (ei);
9904 }
9905
9906 if (!(e1->flags & EDGE_FALLTHRU))
9907 return;
9908
9909 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9910 }
9911 if (tmp == BB_END (bb))
9912 break;
9913 }
9914 }
9915
9916 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9917 {
9918 if (tend_bb != NULL)
9919 return;
9920 tend_bb = bb;
9921 }
9922 }
9923 }
9924
9925 /* Either we successfully remove the FPR clobbers here or we are not
9926 able to do anything for this TX. Both cases don't qualify for
9927 another look. */
9928 cfun->machine->tbegin_p = false;
9929
9930 if (tbegin_bb == NULL || tend_bb == NULL)
9931 return;
9932
9933 calculate_dominance_info (CDI_POST_DOMINATORS);
9934 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9935 free_dominance_info (CDI_POST_DOMINATORS);
9936
9937 if (!result)
9938 return;
9939
9940 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9941 gen_rtvec (2,
9942 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9943 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9944 INSN_CODE (tbegin_insn) = -1;
9945 df_insn_rescan (tbegin_insn);
9946
9947 return;
9948 }
9949
9950 /* Return true if it is legal to put a value with MODE into REGNO. */
9951
9952 bool
9953 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9954 {
9955 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9956 return false;
9957
9958 switch (REGNO_REG_CLASS (regno))
9959 {
9960 case VEC_REGS:
9961 return ((GET_MODE_CLASS (mode) == MODE_INT
9962 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9963 || mode == DFmode
9964 || s390_vector_mode_supported_p (mode));
9965 break;
9966 case FP_REGS:
9967 if (TARGET_VX
9968 && ((GET_MODE_CLASS (mode) == MODE_INT
9969 && s390_class_max_nregs (FP_REGS, mode) == 1)
9970 || mode == DFmode
9971 || s390_vector_mode_supported_p (mode)))
9972 return true;
9973
9974 if (REGNO_PAIR_OK (regno, mode))
9975 {
9976 if (mode == SImode || mode == DImode)
9977 return true;
9978
9979 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9980 return true;
9981 }
9982 break;
9983 case ADDR_REGS:
9984 if (FRAME_REGNO_P (regno) && mode == Pmode)
9985 return true;
9986
9987 /* fallthrough */
9988 case GENERAL_REGS:
9989 if (REGNO_PAIR_OK (regno, mode))
9990 {
9991 if (TARGET_ZARCH
9992 || (mode != TFmode && mode != TCmode && mode != TDmode))
9993 return true;
9994 }
9995 break;
9996 case CC_REGS:
9997 if (GET_MODE_CLASS (mode) == MODE_CC)
9998 return true;
9999 break;
10000 case ACCESS_REGS:
10001 if (REGNO_PAIR_OK (regno, mode))
10002 {
10003 if (mode == SImode || mode == Pmode)
10004 return true;
10005 }
10006 break;
10007 default:
10008 return false;
10009 }
10010
10011 return false;
10012 }
10013
10014 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10015
10016 bool
10017 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10018 {
10019 /* Once we've decided upon a register to use as base register, it must
10020 no longer be used for any other purpose. */
10021 if (cfun->machine->base_reg)
10022 if (REGNO (cfun->machine->base_reg) == old_reg
10023 || REGNO (cfun->machine->base_reg) == new_reg)
10024 return false;
10025
10026 /* Prevent regrename from using call-saved regs which haven't
10027 actually been saved. This is necessary since regrename assumes
10028 the backend save/restore decisions are based on
10029 df_regs_ever_live. Since we have our own routine we have to tell
10030 regrename manually about it. */
10031 if (GENERAL_REGNO_P (new_reg)
10032 && !call_really_used_regs[new_reg]
10033 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10034 return false;
10035
10036 return true;
10037 }
10038
10039 /* Return nonzero if register REGNO can be used as a scratch register
10040 in peephole2. */
10041
10042 static bool
10043 s390_hard_regno_scratch_ok (unsigned int regno)
10044 {
10045 /* See s390_hard_regno_rename_ok. */
10046 if (GENERAL_REGNO_P (regno)
10047 && !call_really_used_regs[regno]
10048 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10049 return false;
10050
10051 return true;
10052 }
10053
10054 /* Maximum number of registers to represent a value of mode MODE
10055 in a register of class RCLASS. */
10056
10057 int
10058 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10059 {
10060 int reg_size;
10061 bool reg_pair_required_p = false;
10062
10063 switch (rclass)
10064 {
10065 case FP_REGS:
10066 case VEC_REGS:
10067 reg_size = TARGET_VX ? 16 : 8;
10068
10069 /* TF and TD modes would fit into a VR but we put them into a
10070 register pair since we do not have 128bit FP instructions on
10071 full VRs. */
10072 if (TARGET_VX
10073 && SCALAR_FLOAT_MODE_P (mode)
10074 && GET_MODE_SIZE (mode) >= 16)
10075 reg_pair_required_p = true;
10076
10077 /* Even if complex types would fit into a single FPR/VR we force
10078 them into a register pair to deal with the parts more easily.
10079 (FIXME: What about complex ints?) */
10080 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10081 reg_pair_required_p = true;
10082 break;
10083 case ACCESS_REGS:
10084 reg_size = 4;
10085 break;
10086 default:
10087 reg_size = UNITS_PER_WORD;
10088 break;
10089 }
10090
10091 if (reg_pair_required_p)
10092 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10093
10094 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10095 }
10096
10097 /* Return TRUE if changing mode from FROM to TO should not be allowed
10098 for register class CLASS. */
10099
10100 int
10101 s390_cannot_change_mode_class (machine_mode from_mode,
10102 machine_mode to_mode,
10103 enum reg_class rclass)
10104 {
10105 machine_mode small_mode;
10106 machine_mode big_mode;
10107
10108 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10109 return 0;
10110
10111 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10112 {
10113 small_mode = from_mode;
10114 big_mode = to_mode;
10115 }
10116 else
10117 {
10118 small_mode = to_mode;
10119 big_mode = from_mode;
10120 }
10121
10122 /* Values residing in VRs are little-endian style. All modes are
10123 placed left-aligned in an VR. This means that we cannot allow
10124 switching between modes with differing sizes. Also if the vector
10125 facility is available we still place TFmode values in VR register
10126 pairs, since the only instructions we have operating on TFmodes
10127 only deal with register pairs. Therefore we have to allow DFmode
10128 subregs of TFmodes to enable the TFmode splitters. */
10129 if (reg_classes_intersect_p (VEC_REGS, rclass)
10130 && (GET_MODE_SIZE (small_mode) < 8
10131 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10132 return 1;
10133
10134 /* Likewise for access registers, since they have only half the
10135 word size on 64-bit. */
10136 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10137 return 1;
10138
10139 return 0;
10140 }
10141
10142 /* Return true if we use LRA instead of reload pass. */
10143 static bool
10144 s390_lra_p (void)
10145 {
10146 return s390_lra_flag;
10147 }
10148
10149 /* Return true if register FROM can be eliminated via register TO. */
10150
10151 static bool
10152 s390_can_eliminate (const int from, const int to)
10153 {
10154 /* On zSeries machines, we have not marked the base register as fixed.
10155 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10156 If a function requires the base register, we say here that this
10157 elimination cannot be performed. This will cause reload to free
10158 up the base register (as if it were fixed). On the other hand,
10159 if the current function does *not* require the base register, we
10160 say here the elimination succeeds, which in turn allows reload
10161 to allocate the base register for any other purpose. */
10162 if (from == BASE_REGNUM && to == BASE_REGNUM)
10163 {
10164 if (TARGET_CPU_ZARCH)
10165 {
10166 s390_init_frame_layout ();
10167 return cfun->machine->base_reg == NULL_RTX;
10168 }
10169
10170 return false;
10171 }
10172
10173 /* Everything else must point into the stack frame. */
10174 gcc_assert (to == STACK_POINTER_REGNUM
10175 || to == HARD_FRAME_POINTER_REGNUM);
10176
10177 gcc_assert (from == FRAME_POINTER_REGNUM
10178 || from == ARG_POINTER_REGNUM
10179 || from == RETURN_ADDRESS_POINTER_REGNUM);
10180
10181 /* Make sure we actually saved the return address. */
10182 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10183 if (!crtl->calls_eh_return
10184 && !cfun->stdarg
10185 && !cfun_frame_layout.save_return_addr_p)
10186 return false;
10187
10188 return true;
10189 }
10190
10191 /* Return offset between register FROM and TO initially after prolog. */
10192
10193 HOST_WIDE_INT
10194 s390_initial_elimination_offset (int from, int to)
10195 {
10196 HOST_WIDE_INT offset;
10197
10198 /* ??? Why are we called for non-eliminable pairs? */
10199 if (!s390_can_eliminate (from, to))
10200 return 0;
10201
10202 switch (from)
10203 {
10204 case FRAME_POINTER_REGNUM:
10205 offset = (get_frame_size()
10206 + STACK_POINTER_OFFSET
10207 + crtl->outgoing_args_size);
10208 break;
10209
10210 case ARG_POINTER_REGNUM:
10211 s390_init_frame_layout ();
10212 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10213 break;
10214
10215 case RETURN_ADDRESS_POINTER_REGNUM:
10216 s390_init_frame_layout ();
10217
10218 if (cfun_frame_layout.first_save_gpr_slot == -1)
10219 {
10220 /* If it turns out that for stdarg nothing went into the reg
10221 save area we also do not need the return address
10222 pointer. */
10223 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10224 return 0;
10225
10226 gcc_unreachable ();
10227 }
10228
10229 /* In order to make the following work it is not necessary for
10230 r14 to have a save slot. It is sufficient if one other GPR
10231 got one. Since the GPRs are always stored without gaps we
10232 are able to calculate where the r14 save slot would
10233 reside. */
10234 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10235 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10236 UNITS_PER_LONG);
10237 break;
10238
10239 case BASE_REGNUM:
10240 offset = 0;
10241 break;
10242
10243 default:
10244 gcc_unreachable ();
10245 }
10246
10247 return offset;
10248 }
10249
10250 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10251 to register BASE. Return generated insn. */
10252
10253 static rtx
10254 save_fpr (rtx base, int offset, int regnum)
10255 {
10256 rtx addr;
10257 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10258
10259 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10260 set_mem_alias_set (addr, get_varargs_alias_set ());
10261 else
10262 set_mem_alias_set (addr, get_frame_alias_set ());
10263
10264 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10265 }
10266
10267 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10268 to register BASE. Return generated insn. */
10269
10270 static rtx
10271 restore_fpr (rtx base, int offset, int regnum)
10272 {
10273 rtx addr;
10274 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10275 set_mem_alias_set (addr, get_frame_alias_set ());
10276
10277 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10278 }
10279
10280 /* Return true if REGNO is a global register, but not one
10281 of the special ones that need to be saved/restored in anyway. */
10282
10283 static inline bool
10284 global_not_special_regno_p (int regno)
10285 {
10286 return (global_regs[regno]
10287 /* These registers are special and need to be
10288 restored in any case. */
10289 && !(regno == STACK_POINTER_REGNUM
10290 || regno == RETURN_REGNUM
10291 || regno == BASE_REGNUM
10292 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10293 }
10294
10295 /* Generate insn to save registers FIRST to LAST into
10296 the register save area located at offset OFFSET
10297 relative to register BASE. */
10298
10299 static rtx
10300 save_gprs (rtx base, int offset, int first, int last)
10301 {
10302 rtx addr, insn, note;
10303 int i;
10304
10305 addr = plus_constant (Pmode, base, offset);
10306 addr = gen_rtx_MEM (Pmode, addr);
10307
10308 set_mem_alias_set (addr, get_frame_alias_set ());
10309
10310 /* Special-case single register. */
10311 if (first == last)
10312 {
10313 if (TARGET_64BIT)
10314 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10315 else
10316 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10317
10318 if (!global_not_special_regno_p (first))
10319 RTX_FRAME_RELATED_P (insn) = 1;
10320 return insn;
10321 }
10322
10323
10324 insn = gen_store_multiple (addr,
10325 gen_rtx_REG (Pmode, first),
10326 GEN_INT (last - first + 1));
10327
10328 if (first <= 6 && cfun->stdarg)
10329 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10330 {
10331 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10332
10333 if (first + i <= 6)
10334 set_mem_alias_set (mem, get_varargs_alias_set ());
10335 }
10336
10337 /* We need to set the FRAME_RELATED flag on all SETs
10338 inside the store-multiple pattern.
10339
10340 However, we must not emit DWARF records for registers 2..5
10341 if they are stored for use by variable arguments ...
10342
10343 ??? Unfortunately, it is not enough to simply not the
10344 FRAME_RELATED flags for those SETs, because the first SET
10345 of the PARALLEL is always treated as if it had the flag
10346 set, even if it does not. Therefore we emit a new pattern
10347 without those registers as REG_FRAME_RELATED_EXPR note. */
10348
10349 if (first >= 6 && !global_not_special_regno_p (first))
10350 {
10351 rtx pat = PATTERN (insn);
10352
10353 for (i = 0; i < XVECLEN (pat, 0); i++)
10354 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10355 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10356 0, i)))))
10357 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10358
10359 RTX_FRAME_RELATED_P (insn) = 1;
10360 }
10361 else if (last >= 6)
10362 {
10363 int start;
10364
10365 for (start = first >= 6 ? first : 6; start <= last; start++)
10366 if (!global_not_special_regno_p (start))
10367 break;
10368
10369 if (start > last)
10370 return insn;
10371
10372 addr = plus_constant (Pmode, base,
10373 offset + (start - first) * UNITS_PER_LONG);
10374
10375 if (start == last)
10376 {
10377 if (TARGET_64BIT)
10378 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10379 gen_rtx_REG (Pmode, start));
10380 else
10381 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10382 gen_rtx_REG (Pmode, start));
10383 note = PATTERN (note);
10384
10385 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10386 RTX_FRAME_RELATED_P (insn) = 1;
10387
10388 return insn;
10389 }
10390
10391 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10392 gen_rtx_REG (Pmode, start),
10393 GEN_INT (last - start + 1));
10394 note = PATTERN (note);
10395
10396 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10397
10398 for (i = 0; i < XVECLEN (note, 0); i++)
10399 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10400 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10401 0, i)))))
10402 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10403
10404 RTX_FRAME_RELATED_P (insn) = 1;
10405 }
10406
10407 return insn;
10408 }
10409
10410 /* Generate insn to restore registers FIRST to LAST from
10411 the register save area located at offset OFFSET
10412 relative to register BASE. */
10413
10414 static rtx
10415 restore_gprs (rtx base, int offset, int first, int last)
10416 {
10417 rtx addr, insn;
10418
10419 addr = plus_constant (Pmode, base, offset);
10420 addr = gen_rtx_MEM (Pmode, addr);
10421 set_mem_alias_set (addr, get_frame_alias_set ());
10422
10423 /* Special-case single register. */
10424 if (first == last)
10425 {
10426 if (TARGET_64BIT)
10427 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10428 else
10429 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10430
10431 RTX_FRAME_RELATED_P (insn) = 1;
10432 return insn;
10433 }
10434
10435 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10436 addr,
10437 GEN_INT (last - first + 1));
10438 RTX_FRAME_RELATED_P (insn) = 1;
10439 return insn;
10440 }
10441
10442 /* Return insn sequence to load the GOT register. */
10443
10444 static GTY(()) rtx got_symbol;
10445 rtx_insn *
10446 s390_load_got (void)
10447 {
10448 rtx_insn *insns;
10449
10450 /* We cannot use pic_offset_table_rtx here since we use this
10451 function also for non-pic if __tls_get_offset is called and in
10452 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10453 aren't usable. */
10454 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10455
10456 if (!got_symbol)
10457 {
10458 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10459 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10460 }
10461
10462 start_sequence ();
10463
10464 if (TARGET_CPU_ZARCH)
10465 {
10466 emit_move_insn (got_rtx, got_symbol);
10467 }
10468 else
10469 {
10470 rtx offset;
10471
10472 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10473 UNSPEC_LTREL_OFFSET);
10474 offset = gen_rtx_CONST (Pmode, offset);
10475 offset = force_const_mem (Pmode, offset);
10476
10477 emit_move_insn (got_rtx, offset);
10478
10479 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10480 UNSPEC_LTREL_BASE);
10481 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10482
10483 emit_move_insn (got_rtx, offset);
10484 }
10485
10486 insns = get_insns ();
10487 end_sequence ();
10488 return insns;
10489 }
10490
10491 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10492 and the change to the stack pointer. */
10493
10494 static void
10495 s390_emit_stack_tie (void)
10496 {
10497 rtx mem = gen_frame_mem (BLKmode,
10498 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10499
10500 emit_insn (gen_stack_tie (mem));
10501 }
10502
10503 /* Copy GPRS into FPR save slots. */
10504
10505 static void
10506 s390_save_gprs_to_fprs (void)
10507 {
10508 int i;
10509
10510 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10511 return;
10512
10513 for (i = 6; i < 16; i++)
10514 {
10515 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10516 {
10517 rtx_insn *insn =
10518 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10519 gen_rtx_REG (DImode, i));
10520 RTX_FRAME_RELATED_P (insn) = 1;
10521 /* This prevents dwarf2cfi from interpreting the set. Doing
10522 so it might emit def_cfa_register infos setting an FPR as
10523 new CFA. */
10524 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10525 }
10526 }
10527 }
10528
10529 /* Restore GPRs from FPR save slots. */
10530
10531 static void
10532 s390_restore_gprs_from_fprs (void)
10533 {
10534 int i;
10535
10536 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10537 return;
10538
10539 for (i = 6; i < 16; i++)
10540 {
10541 rtx_insn *insn;
10542
10543 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10544 continue;
10545
10546 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10547
10548 if (i == STACK_POINTER_REGNUM)
10549 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10550 else
10551 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10552
10553 df_set_regs_ever_live (i, true);
10554 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10555 if (i == STACK_POINTER_REGNUM)
10556 add_reg_note (insn, REG_CFA_DEF_CFA,
10557 plus_constant (Pmode, stack_pointer_rtx,
10558 STACK_POINTER_OFFSET));
10559 RTX_FRAME_RELATED_P (insn) = 1;
10560 }
10561 }
10562
10563
10564 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10565 generation. */
10566
10567 namespace {
10568
10569 const pass_data pass_data_s390_early_mach =
10570 {
10571 RTL_PASS, /* type */
10572 "early_mach", /* name */
10573 OPTGROUP_NONE, /* optinfo_flags */
10574 TV_MACH_DEP, /* tv_id */
10575 0, /* properties_required */
10576 0, /* properties_provided */
10577 0, /* properties_destroyed */
10578 0, /* todo_flags_start */
10579 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10580 };
10581
10582 class pass_s390_early_mach : public rtl_opt_pass
10583 {
10584 public:
10585 pass_s390_early_mach (gcc::context *ctxt)
10586 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10587 {}
10588
10589 /* opt_pass methods: */
10590 virtual unsigned int execute (function *);
10591
10592 }; // class pass_s390_early_mach
10593
10594 unsigned int
10595 pass_s390_early_mach::execute (function *fun)
10596 {
10597 rtx_insn *insn;
10598
10599 /* Try to get rid of the FPR clobbers. */
10600 s390_optimize_nonescaping_tx ();
10601
10602 /* Re-compute register info. */
10603 s390_register_info ();
10604
10605 /* If we're using a base register, ensure that it is always valid for
10606 the first non-prologue instruction. */
10607 if (fun->machine->base_reg)
10608 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10609
10610 /* Annotate all constant pool references to let the scheduler know
10611 they implicitly use the base register. */
10612 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10613 if (INSN_P (insn))
10614 {
10615 annotate_constant_pool_refs (&PATTERN (insn));
10616 df_insn_rescan (insn);
10617 }
10618 return 0;
10619 }
10620
10621 } // anon namespace
10622
10623 /* Expand the prologue into a bunch of separate insns. */
10624
10625 void
10626 s390_emit_prologue (void)
10627 {
10628 rtx insn, addr;
10629 rtx temp_reg;
10630 int i;
10631 int offset;
10632 int next_fpr = 0;
10633
10634 /* Choose best register to use for temp use within prologue.
10635 TPF with profiling must avoid the register 14 - the tracing function
10636 needs the original contents of r14 to be preserved. */
10637
10638 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10639 && !crtl->is_leaf
10640 && !TARGET_TPF_PROFILING)
10641 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10642 else if (flag_split_stack && cfun->stdarg)
10643 temp_reg = gen_rtx_REG (Pmode, 12);
10644 else
10645 temp_reg = gen_rtx_REG (Pmode, 1);
10646
10647 s390_save_gprs_to_fprs ();
10648
10649 /* Save call saved gprs. */
10650 if (cfun_frame_layout.first_save_gpr != -1)
10651 {
10652 insn = save_gprs (stack_pointer_rtx,
10653 cfun_frame_layout.gprs_offset +
10654 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10655 - cfun_frame_layout.first_save_gpr_slot),
10656 cfun_frame_layout.first_save_gpr,
10657 cfun_frame_layout.last_save_gpr);
10658 emit_insn (insn);
10659 }
10660
10661 /* Dummy insn to mark literal pool slot. */
10662
10663 if (cfun->machine->base_reg)
10664 emit_insn (gen_main_pool (cfun->machine->base_reg));
10665
10666 offset = cfun_frame_layout.f0_offset;
10667
10668 /* Save f0 and f2. */
10669 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10670 {
10671 if (cfun_fpr_save_p (i))
10672 {
10673 save_fpr (stack_pointer_rtx, offset, i);
10674 offset += 8;
10675 }
10676 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10677 offset += 8;
10678 }
10679
10680 /* Save f4 and f6. */
10681 offset = cfun_frame_layout.f4_offset;
10682 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10683 {
10684 if (cfun_fpr_save_p (i))
10685 {
10686 insn = save_fpr (stack_pointer_rtx, offset, i);
10687 offset += 8;
10688
10689 /* If f4 and f6 are call clobbered they are saved due to
10690 stdargs and therefore are not frame related. */
10691 if (!call_really_used_regs[i])
10692 RTX_FRAME_RELATED_P (insn) = 1;
10693 }
10694 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10695 offset += 8;
10696 }
10697
10698 if (TARGET_PACKED_STACK
10699 && cfun_save_high_fprs_p
10700 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10701 {
10702 offset = (cfun_frame_layout.f8_offset
10703 + (cfun_frame_layout.high_fprs - 1) * 8);
10704
10705 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10706 if (cfun_fpr_save_p (i))
10707 {
10708 insn = save_fpr (stack_pointer_rtx, offset, i);
10709
10710 RTX_FRAME_RELATED_P (insn) = 1;
10711 offset -= 8;
10712 }
10713 if (offset >= cfun_frame_layout.f8_offset)
10714 next_fpr = i;
10715 }
10716
10717 if (!TARGET_PACKED_STACK)
10718 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10719
10720 if (flag_stack_usage_info)
10721 current_function_static_stack_size = cfun_frame_layout.frame_size;
10722
10723 /* Decrement stack pointer. */
10724
10725 if (cfun_frame_layout.frame_size > 0)
10726 {
10727 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10728 rtx real_frame_off;
10729
10730 if (s390_stack_size)
10731 {
10732 HOST_WIDE_INT stack_guard;
10733
10734 if (s390_stack_guard)
10735 stack_guard = s390_stack_guard;
10736 else
10737 {
10738 /* If no value for stack guard is provided the smallest power of 2
10739 larger than the current frame size is chosen. */
10740 stack_guard = 1;
10741 while (stack_guard < cfun_frame_layout.frame_size)
10742 stack_guard <<= 1;
10743 }
10744
10745 if (cfun_frame_layout.frame_size >= s390_stack_size)
10746 {
10747 warning (0, "frame size of function %qs is %wd"
10748 " bytes exceeding user provided stack limit of "
10749 "%d bytes. "
10750 "An unconditional trap is added.",
10751 current_function_name(), cfun_frame_layout.frame_size,
10752 s390_stack_size);
10753 emit_insn (gen_trap ());
10754 emit_barrier ();
10755 }
10756 else
10757 {
10758 /* stack_guard has to be smaller than s390_stack_size.
10759 Otherwise we would emit an AND with zero which would
10760 not match the test under mask pattern. */
10761 if (stack_guard >= s390_stack_size)
10762 {
10763 warning (0, "frame size of function %qs is %wd"
10764 " bytes which is more than half the stack size. "
10765 "The dynamic check would not be reliable. "
10766 "No check emitted for this function.",
10767 current_function_name(),
10768 cfun_frame_layout.frame_size);
10769 }
10770 else
10771 {
10772 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10773 & ~(stack_guard - 1));
10774
10775 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10776 GEN_INT (stack_check_mask));
10777 if (TARGET_64BIT)
10778 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10779 t, const0_rtx),
10780 t, const0_rtx, const0_rtx));
10781 else
10782 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10783 t, const0_rtx),
10784 t, const0_rtx, const0_rtx));
10785 }
10786 }
10787 }
10788
10789 if (s390_warn_framesize > 0
10790 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10791 warning (0, "frame size of %qs is %wd bytes",
10792 current_function_name (), cfun_frame_layout.frame_size);
10793
10794 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10795 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10796
10797 /* Save incoming stack pointer into temp reg. */
10798 if (TARGET_BACKCHAIN || next_fpr)
10799 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10800
10801 /* Subtract frame size from stack pointer. */
10802
10803 if (DISP_IN_RANGE (INTVAL (frame_off)))
10804 {
10805 insn = gen_rtx_SET (stack_pointer_rtx,
10806 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10807 frame_off));
10808 insn = emit_insn (insn);
10809 }
10810 else
10811 {
10812 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10813 frame_off = force_const_mem (Pmode, frame_off);
10814
10815 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10816 annotate_constant_pool_refs (&PATTERN (insn));
10817 }
10818
10819 RTX_FRAME_RELATED_P (insn) = 1;
10820 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10821 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10822 gen_rtx_SET (stack_pointer_rtx,
10823 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10824 real_frame_off)));
10825
10826 /* Set backchain. */
10827
10828 if (TARGET_BACKCHAIN)
10829 {
10830 if (cfun_frame_layout.backchain_offset)
10831 addr = gen_rtx_MEM (Pmode,
10832 plus_constant (Pmode, stack_pointer_rtx,
10833 cfun_frame_layout.backchain_offset));
10834 else
10835 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10836 set_mem_alias_set (addr, get_frame_alias_set ());
10837 insn = emit_insn (gen_move_insn (addr, temp_reg));
10838 }
10839
10840 /* If we support non-call exceptions (e.g. for Java),
10841 we need to make sure the backchain pointer is set up
10842 before any possibly trapping memory access. */
10843 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10844 {
10845 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10846 emit_clobber (addr);
10847 }
10848 }
10849
10850 /* Save fprs 8 - 15 (64 bit ABI). */
10851
10852 if (cfun_save_high_fprs_p && next_fpr)
10853 {
10854 /* If the stack might be accessed through a different register
10855 we have to make sure that the stack pointer decrement is not
10856 moved below the use of the stack slots. */
10857 s390_emit_stack_tie ();
10858
10859 insn = emit_insn (gen_add2_insn (temp_reg,
10860 GEN_INT (cfun_frame_layout.f8_offset)));
10861
10862 offset = 0;
10863
10864 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10865 if (cfun_fpr_save_p (i))
10866 {
10867 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10868 cfun_frame_layout.frame_size
10869 + cfun_frame_layout.f8_offset
10870 + offset);
10871
10872 insn = save_fpr (temp_reg, offset, i);
10873 offset += 8;
10874 RTX_FRAME_RELATED_P (insn) = 1;
10875 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10876 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10877 gen_rtx_REG (DFmode, i)));
10878 }
10879 }
10880
10881 /* Set frame pointer, if needed. */
10882
10883 if (frame_pointer_needed)
10884 {
10885 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10886 RTX_FRAME_RELATED_P (insn) = 1;
10887 }
10888
10889 /* Set up got pointer, if needed. */
10890
10891 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10892 {
10893 rtx_insn *insns = s390_load_got ();
10894
10895 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10896 annotate_constant_pool_refs (&PATTERN (insn));
10897
10898 emit_insn (insns);
10899 }
10900
10901 if (TARGET_TPF_PROFILING)
10902 {
10903 /* Generate a BAS instruction to serve as a function
10904 entry intercept to facilitate the use of tracing
10905 algorithms located at the branch target. */
10906 emit_insn (gen_prologue_tpf ());
10907
10908 /* Emit a blockage here so that all code
10909 lies between the profiling mechanisms. */
10910 emit_insn (gen_blockage ());
10911 }
10912 }
10913
10914 /* Expand the epilogue into a bunch of separate insns. */
10915
10916 void
10917 s390_emit_epilogue (bool sibcall)
10918 {
10919 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10920 int area_bottom, area_top, offset = 0;
10921 int next_offset;
10922 rtvec p;
10923 int i;
10924
10925 if (TARGET_TPF_PROFILING)
10926 {
10927
10928 /* Generate a BAS instruction to serve as a function
10929 entry intercept to facilitate the use of tracing
10930 algorithms located at the branch target. */
10931
10932 /* Emit a blockage here so that all code
10933 lies between the profiling mechanisms. */
10934 emit_insn (gen_blockage ());
10935
10936 emit_insn (gen_epilogue_tpf ());
10937 }
10938
10939 /* Check whether to use frame or stack pointer for restore. */
10940
10941 frame_pointer = (frame_pointer_needed
10942 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10943
10944 s390_frame_area (&area_bottom, &area_top);
10945
10946 /* Check whether we can access the register save area.
10947 If not, increment the frame pointer as required. */
10948
10949 if (area_top <= area_bottom)
10950 {
10951 /* Nothing to restore. */
10952 }
10953 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10954 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10955 {
10956 /* Area is in range. */
10957 offset = cfun_frame_layout.frame_size;
10958 }
10959 else
10960 {
10961 rtx insn, frame_off, cfa;
10962
10963 offset = area_bottom < 0 ? -area_bottom : 0;
10964 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10965
10966 cfa = gen_rtx_SET (frame_pointer,
10967 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10968 if (DISP_IN_RANGE (INTVAL (frame_off)))
10969 {
10970 insn = gen_rtx_SET (frame_pointer,
10971 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10972 insn = emit_insn (insn);
10973 }
10974 else
10975 {
10976 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10977 frame_off = force_const_mem (Pmode, frame_off);
10978
10979 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10980 annotate_constant_pool_refs (&PATTERN (insn));
10981 }
10982 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10983 RTX_FRAME_RELATED_P (insn) = 1;
10984 }
10985
10986 /* Restore call saved fprs. */
10987
10988 if (TARGET_64BIT)
10989 {
10990 if (cfun_save_high_fprs_p)
10991 {
10992 next_offset = cfun_frame_layout.f8_offset;
10993 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10994 {
10995 if (cfun_fpr_save_p (i))
10996 {
10997 restore_fpr (frame_pointer,
10998 offset + next_offset, i);
10999 cfa_restores
11000 = alloc_reg_note (REG_CFA_RESTORE,
11001 gen_rtx_REG (DFmode, i), cfa_restores);
11002 next_offset += 8;
11003 }
11004 }
11005 }
11006
11007 }
11008 else
11009 {
11010 next_offset = cfun_frame_layout.f4_offset;
11011 /* f4, f6 */
11012 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11013 {
11014 if (cfun_fpr_save_p (i))
11015 {
11016 restore_fpr (frame_pointer,
11017 offset + next_offset, i);
11018 cfa_restores
11019 = alloc_reg_note (REG_CFA_RESTORE,
11020 gen_rtx_REG (DFmode, i), cfa_restores);
11021 next_offset += 8;
11022 }
11023 else if (!TARGET_PACKED_STACK)
11024 next_offset += 8;
11025 }
11026
11027 }
11028
11029 /* Return register. */
11030
11031 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11032
11033 /* Restore call saved gprs. */
11034
11035 if (cfun_frame_layout.first_restore_gpr != -1)
11036 {
11037 rtx insn, addr;
11038 int i;
11039
11040 /* Check for global register and save them
11041 to stack location from where they get restored. */
11042
11043 for (i = cfun_frame_layout.first_restore_gpr;
11044 i <= cfun_frame_layout.last_restore_gpr;
11045 i++)
11046 {
11047 if (global_not_special_regno_p (i))
11048 {
11049 addr = plus_constant (Pmode, frame_pointer,
11050 offset + cfun_frame_layout.gprs_offset
11051 + (i - cfun_frame_layout.first_save_gpr_slot)
11052 * UNITS_PER_LONG);
11053 addr = gen_rtx_MEM (Pmode, addr);
11054 set_mem_alias_set (addr, get_frame_alias_set ());
11055 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11056 }
11057 else
11058 cfa_restores
11059 = alloc_reg_note (REG_CFA_RESTORE,
11060 gen_rtx_REG (Pmode, i), cfa_restores);
11061 }
11062
11063 if (! sibcall)
11064 {
11065 /* Fetch return address from stack before load multiple,
11066 this will do good for scheduling.
11067
11068 Only do this if we already decided that r14 needs to be
11069 saved to a stack slot. (And not just because r14 happens to
11070 be in between two GPRs which need saving.) Otherwise it
11071 would be difficult to take that decision back in
11072 s390_optimize_prologue. */
11073 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
11074 {
11075 int return_regnum = find_unused_clobbered_reg();
11076 if (!return_regnum)
11077 return_regnum = 4;
11078 return_reg = gen_rtx_REG (Pmode, return_regnum);
11079
11080 addr = plus_constant (Pmode, frame_pointer,
11081 offset + cfun_frame_layout.gprs_offset
11082 + (RETURN_REGNUM
11083 - cfun_frame_layout.first_save_gpr_slot)
11084 * UNITS_PER_LONG);
11085 addr = gen_rtx_MEM (Pmode, addr);
11086 set_mem_alias_set (addr, get_frame_alias_set ());
11087 emit_move_insn (return_reg, addr);
11088
11089 /* Once we did that optimization we have to make sure
11090 s390_optimize_prologue does not try to remove the
11091 store of r14 since we will not be able to find the
11092 load issued here. */
11093 cfun_frame_layout.save_return_addr_p = true;
11094 }
11095 }
11096
11097 insn = restore_gprs (frame_pointer,
11098 offset + cfun_frame_layout.gprs_offset
11099 + (cfun_frame_layout.first_restore_gpr
11100 - cfun_frame_layout.first_save_gpr_slot)
11101 * UNITS_PER_LONG,
11102 cfun_frame_layout.first_restore_gpr,
11103 cfun_frame_layout.last_restore_gpr);
11104 insn = emit_insn (insn);
11105 REG_NOTES (insn) = cfa_restores;
11106 add_reg_note (insn, REG_CFA_DEF_CFA,
11107 plus_constant (Pmode, stack_pointer_rtx,
11108 STACK_POINTER_OFFSET));
11109 RTX_FRAME_RELATED_P (insn) = 1;
11110 }
11111
11112 s390_restore_gprs_from_fprs ();
11113
11114 if (! sibcall)
11115 {
11116
11117 /* Return to caller. */
11118
11119 p = rtvec_alloc (2);
11120
11121 RTVEC_ELT (p, 0) = ret_rtx;
11122 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11123 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11124 }
11125 }
11126
11127 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11128
11129 static void
11130 s300_set_up_by_prologue (hard_reg_set_container *regs)
11131 {
11132 if (cfun->machine->base_reg
11133 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11134 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11135 }
11136
11137 /* -fsplit-stack support. */
11138
11139 /* A SYMBOL_REF for __morestack. */
11140 static GTY(()) rtx morestack_ref;
11141
11142 /* When using -fsplit-stack, the allocation routines set a field in
11143 the TCB to the bottom of the stack plus this much space, measured
11144 in bytes. */
11145
11146 #define SPLIT_STACK_AVAILABLE 1024
11147
11148 /* Emit -fsplit-stack prologue, which goes before the regular function
11149 prologue. */
11150
11151 void
11152 s390_expand_split_stack_prologue (void)
11153 {
11154 rtx r1, guard, cc = NULL;
11155 rtx_insn *insn;
11156 /* Offset from thread pointer to __private_ss. */
11157 int psso = TARGET_64BIT ? 0x38 : 0x20;
11158 /* Pointer size in bytes. */
11159 /* Frame size and argument size - the two parameters to __morestack. */
11160 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11161 /* Align argument size to 8 bytes - simplifies __morestack code. */
11162 HOST_WIDE_INT args_size = crtl->args.size >= 0
11163 ? ((crtl->args.size + 7) & ~7)
11164 : 0;
11165 /* Label to be called by __morestack. */
11166 rtx_code_label *call_done = NULL;
11167 rtx_code_label *parm_base = NULL;
11168 rtx tmp;
11169
11170 gcc_assert (flag_split_stack && reload_completed);
11171 if (!TARGET_CPU_ZARCH)
11172 {
11173 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11174 return;
11175 }
11176
11177 r1 = gen_rtx_REG (Pmode, 1);
11178
11179 /* If no stack frame will be allocated, don't do anything. */
11180 if (!frame_size)
11181 {
11182 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11183 {
11184 /* If va_start is used, just use r15. */
11185 emit_move_insn (r1,
11186 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11187 GEN_INT (STACK_POINTER_OFFSET)));
11188
11189 }
11190 return;
11191 }
11192
11193 if (morestack_ref == NULL_RTX)
11194 {
11195 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11196 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11197 | SYMBOL_FLAG_FUNCTION);
11198 }
11199
11200 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11201 {
11202 /* If frame_size will fit in an add instruction, do a stack space
11203 check, and only call __morestack if there's not enough space. */
11204
11205 /* Get thread pointer. r1 is the only register we can always destroy - r0
11206 could contain a static chain (and cannot be used to address memory
11207 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11208 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11209 /* Aim at __private_ss. */
11210 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11211
11212 /* If less that 1kiB used, skip addition and compare directly with
11213 __private_ss. */
11214 if (frame_size > SPLIT_STACK_AVAILABLE)
11215 {
11216 emit_move_insn (r1, guard);
11217 if (TARGET_64BIT)
11218 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11219 else
11220 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11221 guard = r1;
11222 }
11223
11224 /* Compare the (maybe adjusted) guard with the stack pointer. */
11225 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11226 }
11227
11228 call_done = gen_label_rtx ();
11229 parm_base = gen_label_rtx ();
11230
11231 /* Emit the parameter block. */
11232 tmp = gen_split_stack_data (parm_base, call_done,
11233 GEN_INT (frame_size),
11234 GEN_INT (args_size));
11235 insn = emit_insn (tmp);
11236 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11237 LABEL_NUSES (call_done)++;
11238 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11239 LABEL_NUSES (parm_base)++;
11240
11241 /* %r1 = litbase. */
11242 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11243 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11244 LABEL_NUSES (parm_base)++;
11245
11246 /* Now, we need to call __morestack. It has very special calling
11247 conventions: it preserves param/return/static chain registers for
11248 calling main function body, and looks for its own parameters at %r1. */
11249
11250 if (cc != NULL)
11251 {
11252 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11253
11254 insn = emit_jump_insn (tmp);
11255 JUMP_LABEL (insn) = call_done;
11256 LABEL_NUSES (call_done)++;
11257
11258 /* Mark the jump as very unlikely to be taken. */
11259 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11260
11261 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11262 {
11263 /* If va_start is used, and __morestack was not called, just use
11264 r15. */
11265 emit_move_insn (r1,
11266 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11267 GEN_INT (STACK_POINTER_OFFSET)));
11268 }
11269 }
11270 else
11271 {
11272 tmp = gen_split_stack_call (morestack_ref, call_done);
11273 insn = emit_jump_insn (tmp);
11274 JUMP_LABEL (insn) = call_done;
11275 LABEL_NUSES (call_done)++;
11276 emit_barrier ();
11277 }
11278
11279 /* __morestack will call us here. */
11280
11281 emit_label (call_done);
11282 }
11283
11284 /* We may have to tell the dataflow pass that the split stack prologue
11285 is initializing a register. */
11286
11287 static void
11288 s390_live_on_entry (bitmap regs)
11289 {
11290 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11291 {
11292 gcc_assert (flag_split_stack);
11293 bitmap_set_bit (regs, 1);
11294 }
11295 }
11296
11297 /* Return true if the function can use simple_return to return outside
11298 of a shrink-wrapped region. At present shrink-wrapping is supported
11299 in all cases. */
11300
11301 bool
11302 s390_can_use_simple_return_insn (void)
11303 {
11304 return true;
11305 }
11306
11307 /* Return true if the epilogue is guaranteed to contain only a return
11308 instruction and if a direct return can therefore be used instead.
11309 One of the main advantages of using direct return instructions
11310 is that we can then use conditional returns. */
11311
11312 bool
11313 s390_can_use_return_insn (void)
11314 {
11315 int i;
11316
11317 if (!reload_completed)
11318 return false;
11319
11320 if (crtl->profile)
11321 return false;
11322
11323 if (TARGET_TPF_PROFILING)
11324 return false;
11325
11326 for (i = 0; i < 16; i++)
11327 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11328 return false;
11329
11330 /* For 31 bit this is not covered by the frame_size check below
11331 since f4, f6 are saved in the register save area without needing
11332 additional stack space. */
11333 if (!TARGET_64BIT
11334 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11335 return false;
11336
11337 if (cfun->machine->base_reg
11338 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11339 return false;
11340
11341 return cfun_frame_layout.frame_size == 0;
11342 }
11343
11344 /* The VX ABI differs for vararg functions. Therefore we need the
11345 prototype of the callee to be available when passing vector type
11346 values. */
11347 static const char *
11348 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11349 {
11350 return ((TARGET_VX_ABI
11351 && typelist == 0
11352 && VECTOR_TYPE_P (TREE_TYPE (val))
11353 && (funcdecl == NULL_TREE
11354 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11355 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11356 ? N_("Vector argument passed to unprototyped function")
11357 : NULL);
11358 }
11359
11360
11361 /* Return the size in bytes of a function argument of
11362 type TYPE and/or mode MODE. At least one of TYPE or
11363 MODE must be specified. */
11364
11365 static int
11366 s390_function_arg_size (machine_mode mode, const_tree type)
11367 {
11368 if (type)
11369 return int_size_in_bytes (type);
11370
11371 /* No type info available for some library calls ... */
11372 if (mode != BLKmode)
11373 return GET_MODE_SIZE (mode);
11374
11375 /* If we have neither type nor mode, abort */
11376 gcc_unreachable ();
11377 }
11378
11379 /* Return true if a function argument of type TYPE and mode MODE
11380 is to be passed in a vector register, if available. */
11381
11382 bool
11383 s390_function_arg_vector (machine_mode mode, const_tree type)
11384 {
11385 if (!TARGET_VX_ABI)
11386 return false;
11387
11388 if (s390_function_arg_size (mode, type) > 16)
11389 return false;
11390
11391 /* No type info available for some library calls ... */
11392 if (!type)
11393 return VECTOR_MODE_P (mode);
11394
11395 /* The ABI says that record types with a single member are treated
11396 just like that member would be. */
11397 while (TREE_CODE (type) == RECORD_TYPE)
11398 {
11399 tree field, single = NULL_TREE;
11400
11401 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11402 {
11403 if (TREE_CODE (field) != FIELD_DECL)
11404 continue;
11405
11406 if (single == NULL_TREE)
11407 single = TREE_TYPE (field);
11408 else
11409 return false;
11410 }
11411
11412 if (single == NULL_TREE)
11413 return false;
11414 else
11415 {
11416 /* If the field declaration adds extra byte due to
11417 e.g. padding this is not accepted as vector type. */
11418 if (int_size_in_bytes (single) <= 0
11419 || int_size_in_bytes (single) != int_size_in_bytes (type))
11420 return false;
11421 type = single;
11422 }
11423 }
11424
11425 return VECTOR_TYPE_P (type);
11426 }
11427
11428 /* Return true if a function argument of type TYPE and mode MODE
11429 is to be passed in a floating-point register, if available. */
11430
11431 static bool
11432 s390_function_arg_float (machine_mode mode, const_tree type)
11433 {
11434 if (s390_function_arg_size (mode, type) > 8)
11435 return false;
11436
11437 /* Soft-float changes the ABI: no floating-point registers are used. */
11438 if (TARGET_SOFT_FLOAT)
11439 return false;
11440
11441 /* No type info available for some library calls ... */
11442 if (!type)
11443 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11444
11445 /* The ABI says that record types with a single member are treated
11446 just like that member would be. */
11447 while (TREE_CODE (type) == RECORD_TYPE)
11448 {
11449 tree field, single = NULL_TREE;
11450
11451 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11452 {
11453 if (TREE_CODE (field) != FIELD_DECL)
11454 continue;
11455
11456 if (single == NULL_TREE)
11457 single = TREE_TYPE (field);
11458 else
11459 return false;
11460 }
11461
11462 if (single == NULL_TREE)
11463 return false;
11464 else
11465 type = single;
11466 }
11467
11468 return TREE_CODE (type) == REAL_TYPE;
11469 }
11470
11471 /* Return true if a function argument of type TYPE and mode MODE
11472 is to be passed in an integer register, or a pair of integer
11473 registers, if available. */
11474
11475 static bool
11476 s390_function_arg_integer (machine_mode mode, const_tree type)
11477 {
11478 int size = s390_function_arg_size (mode, type);
11479 if (size > 8)
11480 return false;
11481
11482 /* No type info available for some library calls ... */
11483 if (!type)
11484 return GET_MODE_CLASS (mode) == MODE_INT
11485 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11486
11487 /* We accept small integral (and similar) types. */
11488 if (INTEGRAL_TYPE_P (type)
11489 || POINTER_TYPE_P (type)
11490 || TREE_CODE (type) == NULLPTR_TYPE
11491 || TREE_CODE (type) == OFFSET_TYPE
11492 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11493 return true;
11494
11495 /* We also accept structs of size 1, 2, 4, 8 that are not
11496 passed in floating-point registers. */
11497 if (AGGREGATE_TYPE_P (type)
11498 && exact_log2 (size) >= 0
11499 && !s390_function_arg_float (mode, type))
11500 return true;
11501
11502 return false;
11503 }
11504
11505 /* Return 1 if a function argument of type TYPE and mode MODE
11506 is to be passed by reference. The ABI specifies that only
11507 structures of size 1, 2, 4, or 8 bytes are passed by value,
11508 all other structures (and complex numbers) are passed by
11509 reference. */
11510
11511 static bool
11512 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11513 machine_mode mode, const_tree type,
11514 bool named ATTRIBUTE_UNUSED)
11515 {
11516 int size = s390_function_arg_size (mode, type);
11517
11518 if (s390_function_arg_vector (mode, type))
11519 return false;
11520
11521 if (size > 8)
11522 return true;
11523
11524 if (type)
11525 {
11526 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11527 return true;
11528
11529 if (TREE_CODE (type) == COMPLEX_TYPE
11530 || TREE_CODE (type) == VECTOR_TYPE)
11531 return true;
11532 }
11533
11534 return false;
11535 }
11536
11537 /* Update the data in CUM to advance over an argument of mode MODE and
11538 data type TYPE. (TYPE is null for libcalls where that information
11539 may not be available.). The boolean NAMED specifies whether the
11540 argument is a named argument (as opposed to an unnamed argument
11541 matching an ellipsis). */
11542
11543 static void
11544 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11545 const_tree type, bool named)
11546 {
11547 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11548
11549 if (s390_function_arg_vector (mode, type))
11550 {
11551 /* We are called for unnamed vector stdarg arguments which are
11552 passed on the stack. In this case this hook does not have to
11553 do anything since stack arguments are tracked by common
11554 code. */
11555 if (!named)
11556 return;
11557 cum->vrs += 1;
11558 }
11559 else if (s390_function_arg_float (mode, type))
11560 {
11561 cum->fprs += 1;
11562 }
11563 else if (s390_function_arg_integer (mode, type))
11564 {
11565 int size = s390_function_arg_size (mode, type);
11566 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11567 }
11568 else
11569 gcc_unreachable ();
11570 }
11571
11572 /* Define where to put the arguments to a function.
11573 Value is zero to push the argument on the stack,
11574 or a hard register in which to store the argument.
11575
11576 MODE is the argument's machine mode.
11577 TYPE is the data type of the argument (as a tree).
11578 This is null for libcalls where that information may
11579 not be available.
11580 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11581 the preceding args and about the function being called.
11582 NAMED is nonzero if this argument is a named parameter
11583 (otherwise it is an extra parameter matching an ellipsis).
11584
11585 On S/390, we use general purpose registers 2 through 6 to
11586 pass integer, pointer, and certain structure arguments, and
11587 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11588 to pass floating point arguments. All remaining arguments
11589 are pushed to the stack. */
11590
11591 static rtx
11592 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11593 const_tree type, bool named)
11594 {
11595 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11596
11597 if (!named)
11598 s390_check_type_for_vector_abi (type, true, false);
11599
11600 if (s390_function_arg_vector (mode, type))
11601 {
11602 /* Vector arguments being part of the ellipsis are passed on the
11603 stack. */
11604 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11605 return NULL_RTX;
11606
11607 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11608 }
11609 else if (s390_function_arg_float (mode, type))
11610 {
11611 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11612 return NULL_RTX;
11613 else
11614 return gen_rtx_REG (mode, cum->fprs + 16);
11615 }
11616 else if (s390_function_arg_integer (mode, type))
11617 {
11618 int size = s390_function_arg_size (mode, type);
11619 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11620
11621 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11622 return NULL_RTX;
11623 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11624 return gen_rtx_REG (mode, cum->gprs + 2);
11625 else if (n_gprs == 2)
11626 {
11627 rtvec p = rtvec_alloc (2);
11628
11629 RTVEC_ELT (p, 0)
11630 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11631 const0_rtx);
11632 RTVEC_ELT (p, 1)
11633 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11634 GEN_INT (4));
11635
11636 return gen_rtx_PARALLEL (mode, p);
11637 }
11638 }
11639
11640 /* After the real arguments, expand_call calls us once again
11641 with a void_type_node type. Whatever we return here is
11642 passed as operand 2 to the call expanders.
11643
11644 We don't need this feature ... */
11645 else if (type == void_type_node)
11646 return const0_rtx;
11647
11648 gcc_unreachable ();
11649 }
11650
11651 /* Return true if return values of type TYPE should be returned
11652 in a memory buffer whose address is passed by the caller as
11653 hidden first argument. */
11654
11655 static bool
11656 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11657 {
11658 /* We accept small integral (and similar) types. */
11659 if (INTEGRAL_TYPE_P (type)
11660 || POINTER_TYPE_P (type)
11661 || TREE_CODE (type) == OFFSET_TYPE
11662 || TREE_CODE (type) == REAL_TYPE)
11663 return int_size_in_bytes (type) > 8;
11664
11665 /* vector types which fit into a VR. */
11666 if (TARGET_VX_ABI
11667 && VECTOR_TYPE_P (type)
11668 && int_size_in_bytes (type) <= 16)
11669 return false;
11670
11671 /* Aggregates and similar constructs are always returned
11672 in memory. */
11673 if (AGGREGATE_TYPE_P (type)
11674 || TREE_CODE (type) == COMPLEX_TYPE
11675 || VECTOR_TYPE_P (type))
11676 return true;
11677
11678 /* ??? We get called on all sorts of random stuff from
11679 aggregate_value_p. We can't abort, but it's not clear
11680 what's safe to return. Pretend it's a struct I guess. */
11681 return true;
11682 }
11683
11684 /* Function arguments and return values are promoted to word size. */
11685
11686 static machine_mode
11687 s390_promote_function_mode (const_tree type, machine_mode mode,
11688 int *punsignedp,
11689 const_tree fntype ATTRIBUTE_UNUSED,
11690 int for_return ATTRIBUTE_UNUSED)
11691 {
11692 if (INTEGRAL_MODE_P (mode)
11693 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11694 {
11695 if (type != NULL_TREE && POINTER_TYPE_P (type))
11696 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11697 return Pmode;
11698 }
11699
11700 return mode;
11701 }
11702
11703 /* Define where to return a (scalar) value of type RET_TYPE.
11704 If RET_TYPE is null, define where to return a (scalar)
11705 value of mode MODE from a libcall. */
11706
11707 static rtx
11708 s390_function_and_libcall_value (machine_mode mode,
11709 const_tree ret_type,
11710 const_tree fntype_or_decl,
11711 bool outgoing ATTRIBUTE_UNUSED)
11712 {
11713 /* For vector return types it is important to use the RET_TYPE
11714 argument whenever available since the middle-end might have
11715 changed the mode to a scalar mode. */
11716 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11717 || (!ret_type && VECTOR_MODE_P (mode)));
11718
11719 /* For normal functions perform the promotion as
11720 promote_function_mode would do. */
11721 if (ret_type)
11722 {
11723 int unsignedp = TYPE_UNSIGNED (ret_type);
11724 mode = promote_function_mode (ret_type, mode, &unsignedp,
11725 fntype_or_decl, 1);
11726 }
11727
11728 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11729 || SCALAR_FLOAT_MODE_P (mode)
11730 || (TARGET_VX_ABI && vector_ret_type_p));
11731 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11732
11733 if (TARGET_VX_ABI && vector_ret_type_p)
11734 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11735 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11736 return gen_rtx_REG (mode, 16);
11737 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11738 || UNITS_PER_LONG == UNITS_PER_WORD)
11739 return gen_rtx_REG (mode, 2);
11740 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11741 {
11742 /* This case is triggered when returning a 64 bit value with
11743 -m31 -mzarch. Although the value would fit into a single
11744 register it has to be forced into a 32 bit register pair in
11745 order to match the ABI. */
11746 rtvec p = rtvec_alloc (2);
11747
11748 RTVEC_ELT (p, 0)
11749 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11750 RTVEC_ELT (p, 1)
11751 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11752
11753 return gen_rtx_PARALLEL (mode, p);
11754 }
11755
11756 gcc_unreachable ();
11757 }
11758
11759 /* Define where to return a scalar return value of type RET_TYPE. */
11760
11761 static rtx
11762 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11763 bool outgoing)
11764 {
11765 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11766 fn_decl_or_type, outgoing);
11767 }
11768
11769 /* Define where to return a scalar libcall return value of mode
11770 MODE. */
11771
11772 static rtx
11773 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11774 {
11775 return s390_function_and_libcall_value (mode, NULL_TREE,
11776 NULL_TREE, true);
11777 }
11778
11779
11780 /* Create and return the va_list datatype.
11781
11782 On S/390, va_list is an array type equivalent to
11783
11784 typedef struct __va_list_tag
11785 {
11786 long __gpr;
11787 long __fpr;
11788 void *__overflow_arg_area;
11789 void *__reg_save_area;
11790 } va_list[1];
11791
11792 where __gpr and __fpr hold the number of general purpose
11793 or floating point arguments used up to now, respectively,
11794 __overflow_arg_area points to the stack location of the
11795 next argument passed on the stack, and __reg_save_area
11796 always points to the start of the register area in the
11797 call frame of the current function. The function prologue
11798 saves all registers used for argument passing into this
11799 area if the function uses variable arguments. */
11800
11801 static tree
11802 s390_build_builtin_va_list (void)
11803 {
11804 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11805
11806 record = lang_hooks.types.make_type (RECORD_TYPE);
11807
11808 type_decl =
11809 build_decl (BUILTINS_LOCATION,
11810 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11811
11812 f_gpr = build_decl (BUILTINS_LOCATION,
11813 FIELD_DECL, get_identifier ("__gpr"),
11814 long_integer_type_node);
11815 f_fpr = build_decl (BUILTINS_LOCATION,
11816 FIELD_DECL, get_identifier ("__fpr"),
11817 long_integer_type_node);
11818 f_ovf = build_decl (BUILTINS_LOCATION,
11819 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11820 ptr_type_node);
11821 f_sav = build_decl (BUILTINS_LOCATION,
11822 FIELD_DECL, get_identifier ("__reg_save_area"),
11823 ptr_type_node);
11824
11825 va_list_gpr_counter_field = f_gpr;
11826 va_list_fpr_counter_field = f_fpr;
11827
11828 DECL_FIELD_CONTEXT (f_gpr) = record;
11829 DECL_FIELD_CONTEXT (f_fpr) = record;
11830 DECL_FIELD_CONTEXT (f_ovf) = record;
11831 DECL_FIELD_CONTEXT (f_sav) = record;
11832
11833 TYPE_STUB_DECL (record) = type_decl;
11834 TYPE_NAME (record) = type_decl;
11835 TYPE_FIELDS (record) = f_gpr;
11836 DECL_CHAIN (f_gpr) = f_fpr;
11837 DECL_CHAIN (f_fpr) = f_ovf;
11838 DECL_CHAIN (f_ovf) = f_sav;
11839
11840 layout_type (record);
11841
11842 /* The correct type is an array type of one element. */
11843 return build_array_type (record, build_index_type (size_zero_node));
11844 }
11845
11846 /* Implement va_start by filling the va_list structure VALIST.
11847 STDARG_P is always true, and ignored.
11848 NEXTARG points to the first anonymous stack argument.
11849
11850 The following global variables are used to initialize
11851 the va_list structure:
11852
11853 crtl->args.info:
11854 holds number of gprs and fprs used for named arguments.
11855 crtl->args.arg_offset_rtx:
11856 holds the offset of the first anonymous stack argument
11857 (relative to the virtual arg pointer). */
11858
11859 static void
11860 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11861 {
11862 HOST_WIDE_INT n_gpr, n_fpr;
11863 int off;
11864 tree f_gpr, f_fpr, f_ovf, f_sav;
11865 tree gpr, fpr, ovf, sav, t;
11866
11867 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11868 f_fpr = DECL_CHAIN (f_gpr);
11869 f_ovf = DECL_CHAIN (f_fpr);
11870 f_sav = DECL_CHAIN (f_ovf);
11871
11872 valist = build_simple_mem_ref (valist);
11873 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11874 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11875 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11876 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11877
11878 /* Count number of gp and fp argument registers used. */
11879
11880 n_gpr = crtl->args.info.gprs;
11881 n_fpr = crtl->args.info.fprs;
11882
11883 if (cfun->va_list_gpr_size)
11884 {
11885 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11886 build_int_cst (NULL_TREE, n_gpr));
11887 TREE_SIDE_EFFECTS (t) = 1;
11888 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11889 }
11890
11891 if (cfun->va_list_fpr_size)
11892 {
11893 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11894 build_int_cst (NULL_TREE, n_fpr));
11895 TREE_SIDE_EFFECTS (t) = 1;
11896 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11897 }
11898
11899 if (flag_split_stack
11900 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
11901 == NULL)
11902 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11903 {
11904 rtx reg;
11905 rtx_insn *seq;
11906
11907 reg = gen_reg_rtx (Pmode);
11908 cfun->machine->split_stack_varargs_pointer = reg;
11909
11910 start_sequence ();
11911 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
11912 seq = get_insns ();
11913 end_sequence ();
11914
11915 push_topmost_sequence ();
11916 emit_insn_after (seq, entry_of_function ());
11917 pop_topmost_sequence ();
11918 }
11919
11920 /* Find the overflow area.
11921 FIXME: This currently is too pessimistic when the vector ABI is
11922 enabled. In that case we *always* set up the overflow area
11923 pointer. */
11924 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11925 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11926 || TARGET_VX_ABI)
11927 {
11928 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11929 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11930 else
11931 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
11932
11933 off = INTVAL (crtl->args.arg_offset_rtx);
11934 off = off < 0 ? 0 : off;
11935 if (TARGET_DEBUG_ARG)
11936 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11937 (int)n_gpr, (int)n_fpr, off);
11938
11939 t = fold_build_pointer_plus_hwi (t, off);
11940
11941 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11942 TREE_SIDE_EFFECTS (t) = 1;
11943 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11944 }
11945
11946 /* Find the register save area. */
11947 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11948 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11949 {
11950 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11951 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11952
11953 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11954 TREE_SIDE_EFFECTS (t) = 1;
11955 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11956 }
11957 }
11958
11959 /* Implement va_arg by updating the va_list structure
11960 VALIST as required to retrieve an argument of type
11961 TYPE, and returning that argument.
11962
11963 Generates code equivalent to:
11964
11965 if (integral value) {
11966 if (size <= 4 && args.gpr < 5 ||
11967 size > 4 && args.gpr < 4 )
11968 ret = args.reg_save_area[args.gpr+8]
11969 else
11970 ret = *args.overflow_arg_area++;
11971 } else if (vector value) {
11972 ret = *args.overflow_arg_area;
11973 args.overflow_arg_area += size / 8;
11974 } else if (float value) {
11975 if (args.fgpr < 2)
11976 ret = args.reg_save_area[args.fpr+64]
11977 else
11978 ret = *args.overflow_arg_area++;
11979 } else if (aggregate value) {
11980 if (args.gpr < 5)
11981 ret = *args.reg_save_area[args.gpr]
11982 else
11983 ret = **args.overflow_arg_area++;
11984 } */
11985
11986 static tree
11987 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11988 gimple_seq *post_p ATTRIBUTE_UNUSED)
11989 {
11990 tree f_gpr, f_fpr, f_ovf, f_sav;
11991 tree gpr, fpr, ovf, sav, reg, t, u;
11992 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11993 tree lab_false, lab_over;
11994 tree addr = create_tmp_var (ptr_type_node, "addr");
11995 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11996 a stack slot. */
11997
11998 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11999 f_fpr = DECL_CHAIN (f_gpr);
12000 f_ovf = DECL_CHAIN (f_fpr);
12001 f_sav = DECL_CHAIN (f_ovf);
12002
12003 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12004 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12005 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12006
12007 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12008 both appear on a lhs. */
12009 valist = unshare_expr (valist);
12010 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12011
12012 size = int_size_in_bytes (type);
12013
12014 s390_check_type_for_vector_abi (type, true, false);
12015
12016 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12017 {
12018 if (TARGET_DEBUG_ARG)
12019 {
12020 fprintf (stderr, "va_arg: aggregate type");
12021 debug_tree (type);
12022 }
12023
12024 /* Aggregates are passed by reference. */
12025 indirect_p = 1;
12026 reg = gpr;
12027 n_reg = 1;
12028
12029 /* kernel stack layout on 31 bit: It is assumed here that no padding
12030 will be added by s390_frame_info because for va_args always an even
12031 number of gprs has to be saved r15-r2 = 14 regs. */
12032 sav_ofs = 2 * UNITS_PER_LONG;
12033 sav_scale = UNITS_PER_LONG;
12034 size = UNITS_PER_LONG;
12035 max_reg = GP_ARG_NUM_REG - n_reg;
12036 left_align_p = false;
12037 }
12038 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12039 {
12040 if (TARGET_DEBUG_ARG)
12041 {
12042 fprintf (stderr, "va_arg: vector type");
12043 debug_tree (type);
12044 }
12045
12046 indirect_p = 0;
12047 reg = NULL_TREE;
12048 n_reg = 0;
12049 sav_ofs = 0;
12050 sav_scale = 8;
12051 max_reg = 0;
12052 left_align_p = true;
12053 }
12054 else if (s390_function_arg_float (TYPE_MODE (type), type))
12055 {
12056 if (TARGET_DEBUG_ARG)
12057 {
12058 fprintf (stderr, "va_arg: float type");
12059 debug_tree (type);
12060 }
12061
12062 /* FP args go in FP registers, if present. */
12063 indirect_p = 0;
12064 reg = fpr;
12065 n_reg = 1;
12066 sav_ofs = 16 * UNITS_PER_LONG;
12067 sav_scale = 8;
12068 max_reg = FP_ARG_NUM_REG - n_reg;
12069 left_align_p = false;
12070 }
12071 else
12072 {
12073 if (TARGET_DEBUG_ARG)
12074 {
12075 fprintf (stderr, "va_arg: other type");
12076 debug_tree (type);
12077 }
12078
12079 /* Otherwise into GP registers. */
12080 indirect_p = 0;
12081 reg = gpr;
12082 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12083
12084 /* kernel stack layout on 31 bit: It is assumed here that no padding
12085 will be added by s390_frame_info because for va_args always an even
12086 number of gprs has to be saved r15-r2 = 14 regs. */
12087 sav_ofs = 2 * UNITS_PER_LONG;
12088
12089 if (size < UNITS_PER_LONG)
12090 sav_ofs += UNITS_PER_LONG - size;
12091
12092 sav_scale = UNITS_PER_LONG;
12093 max_reg = GP_ARG_NUM_REG - n_reg;
12094 left_align_p = false;
12095 }
12096
12097 /* Pull the value out of the saved registers ... */
12098
12099 if (reg != NULL_TREE)
12100 {
12101 /*
12102 if (reg > ((typeof (reg))max_reg))
12103 goto lab_false;
12104
12105 addr = sav + sav_ofs + reg * save_scale;
12106
12107 goto lab_over;
12108
12109 lab_false:
12110 */
12111
12112 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12113 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12114
12115 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12116 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12117 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12118 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12119 gimplify_and_add (t, pre_p);
12120
12121 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12122 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12123 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12124 t = fold_build_pointer_plus (t, u);
12125
12126 gimplify_assign (addr, t, pre_p);
12127
12128 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12129
12130 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12131 }
12132
12133 /* ... Otherwise out of the overflow area. */
12134
12135 t = ovf;
12136 if (size < UNITS_PER_LONG && !left_align_p)
12137 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12138
12139 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12140
12141 gimplify_assign (addr, t, pre_p);
12142
12143 if (size < UNITS_PER_LONG && left_align_p)
12144 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12145 else
12146 t = fold_build_pointer_plus_hwi (t, size);
12147
12148 gimplify_assign (ovf, t, pre_p);
12149
12150 if (reg != NULL_TREE)
12151 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12152
12153
12154 /* Increment register save count. */
12155
12156 if (n_reg > 0)
12157 {
12158 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12159 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12160 gimplify_and_add (u, pre_p);
12161 }
12162
12163 if (indirect_p)
12164 {
12165 t = build_pointer_type_for_mode (build_pointer_type (type),
12166 ptr_mode, true);
12167 addr = fold_convert (t, addr);
12168 addr = build_va_arg_indirect_ref (addr);
12169 }
12170 else
12171 {
12172 t = build_pointer_type_for_mode (type, ptr_mode, true);
12173 addr = fold_convert (t, addr);
12174 }
12175
12176 return build_va_arg_indirect_ref (addr);
12177 }
12178
12179 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12180 expanders.
12181 DEST - Register location where CC will be stored.
12182 TDB - Pointer to a 256 byte area where to store the transaction.
12183 diagnostic block. NULL if TDB is not needed.
12184 RETRY - Retry count value. If non-NULL a retry loop for CC2
12185 is emitted
12186 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12187 of the tbegin instruction pattern. */
12188
12189 void
12190 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12191 {
12192 rtx retry_plus_two = gen_reg_rtx (SImode);
12193 rtx retry_reg = gen_reg_rtx (SImode);
12194 rtx_code_label *retry_label = NULL;
12195
12196 if (retry != NULL_RTX)
12197 {
12198 emit_move_insn (retry_reg, retry);
12199 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12200 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12201 retry_label = gen_label_rtx ();
12202 emit_label (retry_label);
12203 }
12204
12205 if (clobber_fprs_p)
12206 {
12207 if (TARGET_VX)
12208 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12209 tdb));
12210 else
12211 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12212 tdb));
12213 }
12214 else
12215 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12216 tdb));
12217
12218 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12219 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12220 CC_REGNUM)),
12221 UNSPEC_CC_TO_INT));
12222 if (retry != NULL_RTX)
12223 {
12224 const int CC0 = 1 << 3;
12225 const int CC1 = 1 << 2;
12226 const int CC3 = 1 << 0;
12227 rtx jump;
12228 rtx count = gen_reg_rtx (SImode);
12229 rtx_code_label *leave_label = gen_label_rtx ();
12230
12231 /* Exit for success and permanent failures. */
12232 jump = s390_emit_jump (leave_label,
12233 gen_rtx_EQ (VOIDmode,
12234 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12235 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12236 LABEL_NUSES (leave_label) = 1;
12237
12238 /* CC2 - transient failure. Perform retry with ppa. */
12239 emit_move_insn (count, retry_plus_two);
12240 emit_insn (gen_subsi3 (count, count, retry_reg));
12241 emit_insn (gen_tx_assist (count));
12242 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12243 retry_reg,
12244 retry_reg));
12245 JUMP_LABEL (jump) = retry_label;
12246 LABEL_NUSES (retry_label) = 1;
12247 emit_label (leave_label);
12248 }
12249 }
12250
12251
12252 /* Return the decl for the target specific builtin with the function
12253 code FCODE. */
12254
12255 static tree
12256 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12257 {
12258 if (fcode >= S390_BUILTIN_MAX)
12259 return error_mark_node;
12260
12261 return s390_builtin_decls[fcode];
12262 }
12263
12264 /* We call mcount before the function prologue. So a profiled leaf
12265 function should stay a leaf function. */
12266
12267 static bool
12268 s390_keep_leaf_when_profiled ()
12269 {
12270 return true;
12271 }
12272
12273 /* Output assembly code for the trampoline template to
12274 stdio stream FILE.
12275
12276 On S/390, we use gpr 1 internally in the trampoline code;
12277 gpr 0 is used to hold the static chain. */
12278
12279 static void
12280 s390_asm_trampoline_template (FILE *file)
12281 {
12282 rtx op[2];
12283 op[0] = gen_rtx_REG (Pmode, 0);
12284 op[1] = gen_rtx_REG (Pmode, 1);
12285
12286 if (TARGET_64BIT)
12287 {
12288 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12289 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12290 output_asm_insn ("br\t%1", op); /* 2 byte */
12291 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12292 }
12293 else
12294 {
12295 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12296 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12297 output_asm_insn ("br\t%1", op); /* 2 byte */
12298 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12299 }
12300 }
12301
12302 /* Emit RTL insns to initialize the variable parts of a trampoline.
12303 FNADDR is an RTX for the address of the function's pure code.
12304 CXT is an RTX for the static chain value for the function. */
12305
12306 static void
12307 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12308 {
12309 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12310 rtx mem;
12311
12312 emit_block_move (m_tramp, assemble_trampoline_template (),
12313 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12314
12315 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12316 emit_move_insn (mem, cxt);
12317 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12318 emit_move_insn (mem, fnaddr);
12319 }
12320
12321 /* Output assembler code to FILE to increment profiler label # LABELNO
12322 for profiling a function entry. */
12323
12324 void
12325 s390_function_profiler (FILE *file, int labelno)
12326 {
12327 rtx op[7];
12328
12329 char label[128];
12330 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12331
12332 fprintf (file, "# function profiler \n");
12333
12334 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12335 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12336 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12337
12338 op[2] = gen_rtx_REG (Pmode, 1);
12339 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12340 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12341
12342 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12343 if (flag_pic)
12344 {
12345 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12346 op[4] = gen_rtx_CONST (Pmode, op[4]);
12347 }
12348
12349 if (TARGET_64BIT)
12350 {
12351 output_asm_insn ("stg\t%0,%1", op);
12352 output_asm_insn ("larl\t%2,%3", op);
12353 output_asm_insn ("brasl\t%0,%4", op);
12354 output_asm_insn ("lg\t%0,%1", op);
12355 }
12356 else if (TARGET_CPU_ZARCH)
12357 {
12358 output_asm_insn ("st\t%0,%1", op);
12359 output_asm_insn ("larl\t%2,%3", op);
12360 output_asm_insn ("brasl\t%0,%4", op);
12361 output_asm_insn ("l\t%0,%1", op);
12362 }
12363 else if (!flag_pic)
12364 {
12365 op[6] = gen_label_rtx ();
12366
12367 output_asm_insn ("st\t%0,%1", op);
12368 output_asm_insn ("bras\t%2,%l6", op);
12369 output_asm_insn (".long\t%4", op);
12370 output_asm_insn (".long\t%3", op);
12371 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12372 output_asm_insn ("l\t%0,0(%2)", op);
12373 output_asm_insn ("l\t%2,4(%2)", op);
12374 output_asm_insn ("basr\t%0,%0", op);
12375 output_asm_insn ("l\t%0,%1", op);
12376 }
12377 else
12378 {
12379 op[5] = gen_label_rtx ();
12380 op[6] = gen_label_rtx ();
12381
12382 output_asm_insn ("st\t%0,%1", op);
12383 output_asm_insn ("bras\t%2,%l6", op);
12384 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12385 output_asm_insn (".long\t%4-%l5", op);
12386 output_asm_insn (".long\t%3-%l5", op);
12387 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12388 output_asm_insn ("lr\t%0,%2", op);
12389 output_asm_insn ("a\t%0,0(%2)", op);
12390 output_asm_insn ("a\t%2,4(%2)", op);
12391 output_asm_insn ("basr\t%0,%0", op);
12392 output_asm_insn ("l\t%0,%1", op);
12393 }
12394 }
12395
12396 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12397 into its SYMBOL_REF_FLAGS. */
12398
12399 static void
12400 s390_encode_section_info (tree decl, rtx rtl, int first)
12401 {
12402 default_encode_section_info (decl, rtl, first);
12403
12404 if (TREE_CODE (decl) == VAR_DECL)
12405 {
12406 /* Store the alignment to be able to check if we can use
12407 a larl/load-relative instruction. We only handle the cases
12408 that can go wrong (i.e. no FUNC_DECLs). If a symref does
12409 not have any flag we assume it to be correctly aligned. */
12410
12411 if (DECL_ALIGN (decl) % 64)
12412 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12413
12414 if (DECL_ALIGN (decl) % 32)
12415 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12416
12417 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12418 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12419 }
12420
12421 /* Literal pool references don't have a decl so they are handled
12422 differently here. We rely on the information in the MEM_ALIGN
12423 entry to decide upon the alignment. */
12424 if (MEM_P (rtl)
12425 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12426 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
12427 && MEM_ALIGN (rtl) != 0
12428 && GET_MODE_BITSIZE (GET_MODE (rtl)) != 0)
12429 {
12430 if (MEM_ALIGN (rtl) % 64)
12431 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12432
12433 if (MEM_ALIGN (rtl) % 32)
12434 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12435
12436 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12437 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12438 }
12439 }
12440
12441 /* Output thunk to FILE that implements a C++ virtual function call (with
12442 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12443 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12444 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12445 relative to the resulting this pointer. */
12446
12447 static void
12448 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12449 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12450 tree function)
12451 {
12452 rtx op[10];
12453 int nonlocal = 0;
12454
12455 /* Make sure unwind info is emitted for the thunk if needed. */
12456 final_start_function (emit_barrier (), file, 1);
12457
12458 /* Operand 0 is the target function. */
12459 op[0] = XEXP (DECL_RTL (function), 0);
12460 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12461 {
12462 nonlocal = 1;
12463 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12464 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12465 op[0] = gen_rtx_CONST (Pmode, op[0]);
12466 }
12467
12468 /* Operand 1 is the 'this' pointer. */
12469 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12470 op[1] = gen_rtx_REG (Pmode, 3);
12471 else
12472 op[1] = gen_rtx_REG (Pmode, 2);
12473
12474 /* Operand 2 is the delta. */
12475 op[2] = GEN_INT (delta);
12476
12477 /* Operand 3 is the vcall_offset. */
12478 op[3] = GEN_INT (vcall_offset);
12479
12480 /* Operand 4 is the temporary register. */
12481 op[4] = gen_rtx_REG (Pmode, 1);
12482
12483 /* Operands 5 to 8 can be used as labels. */
12484 op[5] = NULL_RTX;
12485 op[6] = NULL_RTX;
12486 op[7] = NULL_RTX;
12487 op[8] = NULL_RTX;
12488
12489 /* Operand 9 can be used for temporary register. */
12490 op[9] = NULL_RTX;
12491
12492 /* Generate code. */
12493 if (TARGET_64BIT)
12494 {
12495 /* Setup literal pool pointer if required. */
12496 if ((!DISP_IN_RANGE (delta)
12497 && !CONST_OK_FOR_K (delta)
12498 && !CONST_OK_FOR_Os (delta))
12499 || (!DISP_IN_RANGE (vcall_offset)
12500 && !CONST_OK_FOR_K (vcall_offset)
12501 && !CONST_OK_FOR_Os (vcall_offset)))
12502 {
12503 op[5] = gen_label_rtx ();
12504 output_asm_insn ("larl\t%4,%5", op);
12505 }
12506
12507 /* Add DELTA to this pointer. */
12508 if (delta)
12509 {
12510 if (CONST_OK_FOR_J (delta))
12511 output_asm_insn ("la\t%1,%2(%1)", op);
12512 else if (DISP_IN_RANGE (delta))
12513 output_asm_insn ("lay\t%1,%2(%1)", op);
12514 else if (CONST_OK_FOR_K (delta))
12515 output_asm_insn ("aghi\t%1,%2", op);
12516 else if (CONST_OK_FOR_Os (delta))
12517 output_asm_insn ("agfi\t%1,%2", op);
12518 else
12519 {
12520 op[6] = gen_label_rtx ();
12521 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12522 }
12523 }
12524
12525 /* Perform vcall adjustment. */
12526 if (vcall_offset)
12527 {
12528 if (DISP_IN_RANGE (vcall_offset))
12529 {
12530 output_asm_insn ("lg\t%4,0(%1)", op);
12531 output_asm_insn ("ag\t%1,%3(%4)", op);
12532 }
12533 else if (CONST_OK_FOR_K (vcall_offset))
12534 {
12535 output_asm_insn ("lghi\t%4,%3", op);
12536 output_asm_insn ("ag\t%4,0(%1)", op);
12537 output_asm_insn ("ag\t%1,0(%4)", op);
12538 }
12539 else if (CONST_OK_FOR_Os (vcall_offset))
12540 {
12541 output_asm_insn ("lgfi\t%4,%3", op);
12542 output_asm_insn ("ag\t%4,0(%1)", op);
12543 output_asm_insn ("ag\t%1,0(%4)", op);
12544 }
12545 else
12546 {
12547 op[7] = gen_label_rtx ();
12548 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12549 output_asm_insn ("ag\t%4,0(%1)", op);
12550 output_asm_insn ("ag\t%1,0(%4)", op);
12551 }
12552 }
12553
12554 /* Jump to target. */
12555 output_asm_insn ("jg\t%0", op);
12556
12557 /* Output literal pool if required. */
12558 if (op[5])
12559 {
12560 output_asm_insn (".align\t4", op);
12561 targetm.asm_out.internal_label (file, "L",
12562 CODE_LABEL_NUMBER (op[5]));
12563 }
12564 if (op[6])
12565 {
12566 targetm.asm_out.internal_label (file, "L",
12567 CODE_LABEL_NUMBER (op[6]));
12568 output_asm_insn (".long\t%2", op);
12569 }
12570 if (op[7])
12571 {
12572 targetm.asm_out.internal_label (file, "L",
12573 CODE_LABEL_NUMBER (op[7]));
12574 output_asm_insn (".long\t%3", op);
12575 }
12576 }
12577 else
12578 {
12579 /* Setup base pointer if required. */
12580 if (!vcall_offset
12581 || (!DISP_IN_RANGE (delta)
12582 && !CONST_OK_FOR_K (delta)
12583 && !CONST_OK_FOR_Os (delta))
12584 || (!DISP_IN_RANGE (delta)
12585 && !CONST_OK_FOR_K (vcall_offset)
12586 && !CONST_OK_FOR_Os (vcall_offset)))
12587 {
12588 op[5] = gen_label_rtx ();
12589 output_asm_insn ("basr\t%4,0", op);
12590 targetm.asm_out.internal_label (file, "L",
12591 CODE_LABEL_NUMBER (op[5]));
12592 }
12593
12594 /* Add DELTA to this pointer. */
12595 if (delta)
12596 {
12597 if (CONST_OK_FOR_J (delta))
12598 output_asm_insn ("la\t%1,%2(%1)", op);
12599 else if (DISP_IN_RANGE (delta))
12600 output_asm_insn ("lay\t%1,%2(%1)", op);
12601 else if (CONST_OK_FOR_K (delta))
12602 output_asm_insn ("ahi\t%1,%2", op);
12603 else if (CONST_OK_FOR_Os (delta))
12604 output_asm_insn ("afi\t%1,%2", op);
12605 else
12606 {
12607 op[6] = gen_label_rtx ();
12608 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12609 }
12610 }
12611
12612 /* Perform vcall adjustment. */
12613 if (vcall_offset)
12614 {
12615 if (CONST_OK_FOR_J (vcall_offset))
12616 {
12617 output_asm_insn ("l\t%4,0(%1)", op);
12618 output_asm_insn ("a\t%1,%3(%4)", op);
12619 }
12620 else if (DISP_IN_RANGE (vcall_offset))
12621 {
12622 output_asm_insn ("l\t%4,0(%1)", op);
12623 output_asm_insn ("ay\t%1,%3(%4)", op);
12624 }
12625 else if (CONST_OK_FOR_K (vcall_offset))
12626 {
12627 output_asm_insn ("lhi\t%4,%3", op);
12628 output_asm_insn ("a\t%4,0(%1)", op);
12629 output_asm_insn ("a\t%1,0(%4)", op);
12630 }
12631 else if (CONST_OK_FOR_Os (vcall_offset))
12632 {
12633 output_asm_insn ("iilf\t%4,%3", op);
12634 output_asm_insn ("a\t%4,0(%1)", op);
12635 output_asm_insn ("a\t%1,0(%4)", op);
12636 }
12637 else
12638 {
12639 op[7] = gen_label_rtx ();
12640 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12641 output_asm_insn ("a\t%4,0(%1)", op);
12642 output_asm_insn ("a\t%1,0(%4)", op);
12643 }
12644
12645 /* We had to clobber the base pointer register.
12646 Re-setup the base pointer (with a different base). */
12647 op[5] = gen_label_rtx ();
12648 output_asm_insn ("basr\t%4,0", op);
12649 targetm.asm_out.internal_label (file, "L",
12650 CODE_LABEL_NUMBER (op[5]));
12651 }
12652
12653 /* Jump to target. */
12654 op[8] = gen_label_rtx ();
12655
12656 if (!flag_pic)
12657 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12658 else if (!nonlocal)
12659 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12660 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12661 else if (flag_pic == 1)
12662 {
12663 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12664 output_asm_insn ("l\t%4,%0(%4)", op);
12665 }
12666 else if (flag_pic == 2)
12667 {
12668 op[9] = gen_rtx_REG (Pmode, 0);
12669 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12670 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12671 output_asm_insn ("ar\t%4,%9", op);
12672 output_asm_insn ("l\t%4,0(%4)", op);
12673 }
12674
12675 output_asm_insn ("br\t%4", op);
12676
12677 /* Output literal pool. */
12678 output_asm_insn (".align\t4", op);
12679
12680 if (nonlocal && flag_pic == 2)
12681 output_asm_insn (".long\t%0", op);
12682 if (nonlocal)
12683 {
12684 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12685 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12686 }
12687
12688 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12689 if (!flag_pic)
12690 output_asm_insn (".long\t%0", op);
12691 else
12692 output_asm_insn (".long\t%0-%5", op);
12693
12694 if (op[6])
12695 {
12696 targetm.asm_out.internal_label (file, "L",
12697 CODE_LABEL_NUMBER (op[6]));
12698 output_asm_insn (".long\t%2", op);
12699 }
12700 if (op[7])
12701 {
12702 targetm.asm_out.internal_label (file, "L",
12703 CODE_LABEL_NUMBER (op[7]));
12704 output_asm_insn (".long\t%3", op);
12705 }
12706 }
12707 final_end_function ();
12708 }
12709
12710 static bool
12711 s390_valid_pointer_mode (machine_mode mode)
12712 {
12713 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12714 }
12715
12716 /* Checks whether the given CALL_EXPR would use a caller
12717 saved register. This is used to decide whether sibling call
12718 optimization could be performed on the respective function
12719 call. */
12720
12721 static bool
12722 s390_call_saved_register_used (tree call_expr)
12723 {
12724 CUMULATIVE_ARGS cum_v;
12725 cumulative_args_t cum;
12726 tree parameter;
12727 machine_mode mode;
12728 tree type;
12729 rtx parm_rtx;
12730 int reg, i;
12731
12732 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12733 cum = pack_cumulative_args (&cum_v);
12734
12735 for (i = 0; i < call_expr_nargs (call_expr); i++)
12736 {
12737 parameter = CALL_EXPR_ARG (call_expr, i);
12738 gcc_assert (parameter);
12739
12740 /* For an undeclared variable passed as parameter we will get
12741 an ERROR_MARK node here. */
12742 if (TREE_CODE (parameter) == ERROR_MARK)
12743 return true;
12744
12745 type = TREE_TYPE (parameter);
12746 gcc_assert (type);
12747
12748 mode = TYPE_MODE (type);
12749 gcc_assert (mode);
12750
12751 /* We assume that in the target function all parameters are
12752 named. This only has an impact on vector argument register
12753 usage none of which is call-saved. */
12754 if (pass_by_reference (&cum_v, mode, type, true))
12755 {
12756 mode = Pmode;
12757 type = build_pointer_type (type);
12758 }
12759
12760 parm_rtx = s390_function_arg (cum, mode, type, true);
12761
12762 s390_function_arg_advance (cum, mode, type, true);
12763
12764 if (!parm_rtx)
12765 continue;
12766
12767 if (REG_P (parm_rtx))
12768 {
12769 for (reg = 0;
12770 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12771 reg++)
12772 if (!call_used_regs[reg + REGNO (parm_rtx)])
12773 return true;
12774 }
12775
12776 if (GET_CODE (parm_rtx) == PARALLEL)
12777 {
12778 int i;
12779
12780 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12781 {
12782 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12783
12784 gcc_assert (REG_P (r));
12785
12786 for (reg = 0;
12787 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12788 reg++)
12789 if (!call_used_regs[reg + REGNO (r)])
12790 return true;
12791 }
12792 }
12793
12794 }
12795 return false;
12796 }
12797
12798 /* Return true if the given call expression can be
12799 turned into a sibling call.
12800 DECL holds the declaration of the function to be called whereas
12801 EXP is the call expression itself. */
12802
12803 static bool
12804 s390_function_ok_for_sibcall (tree decl, tree exp)
12805 {
12806 /* The TPF epilogue uses register 1. */
12807 if (TARGET_TPF_PROFILING)
12808 return false;
12809
12810 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12811 which would have to be restored before the sibcall. */
12812 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12813 return false;
12814
12815 /* Register 6 on s390 is available as an argument register but unfortunately
12816 "caller saved". This makes functions needing this register for arguments
12817 not suitable for sibcalls. */
12818 return !s390_call_saved_register_used (exp);
12819 }
12820
12821 /* Return the fixed registers used for condition codes. */
12822
12823 static bool
12824 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12825 {
12826 *p1 = CC_REGNUM;
12827 *p2 = INVALID_REGNUM;
12828
12829 return true;
12830 }
12831
12832 /* This function is used by the call expanders of the machine description.
12833 It emits the call insn itself together with the necessary operations
12834 to adjust the target address and returns the emitted insn.
12835 ADDR_LOCATION is the target address rtx
12836 TLS_CALL the location of the thread-local symbol
12837 RESULT_REG the register where the result of the call should be stored
12838 RETADDR_REG the register where the return address should be stored
12839 If this parameter is NULL_RTX the call is considered
12840 to be a sibling call. */
12841
12842 rtx_insn *
12843 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12844 rtx retaddr_reg)
12845 {
12846 bool plt_call = false;
12847 rtx_insn *insn;
12848 rtx call;
12849 rtx clobber;
12850 rtvec vec;
12851
12852 /* Direct function calls need special treatment. */
12853 if (GET_CODE (addr_location) == SYMBOL_REF)
12854 {
12855 /* When calling a global routine in PIC mode, we must
12856 replace the symbol itself with the PLT stub. */
12857 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12858 {
12859 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
12860 {
12861 addr_location = gen_rtx_UNSPEC (Pmode,
12862 gen_rtvec (1, addr_location),
12863 UNSPEC_PLT);
12864 addr_location = gen_rtx_CONST (Pmode, addr_location);
12865 plt_call = true;
12866 }
12867 else
12868 /* For -fpic code the PLT entries might use r12 which is
12869 call-saved. Therefore we cannot do a sibcall when
12870 calling directly using a symbol ref. When reaching
12871 this point we decided (in s390_function_ok_for_sibcall)
12872 to do a sibcall for a function pointer but one of the
12873 optimizers was able to get rid of the function pointer
12874 by propagating the symbol ref into the call. This
12875 optimization is illegal for S/390 so we turn the direct
12876 call into a indirect call again. */
12877 addr_location = force_reg (Pmode, addr_location);
12878 }
12879
12880 /* Unless we can use the bras(l) insn, force the
12881 routine address into a register. */
12882 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12883 {
12884 if (flag_pic)
12885 addr_location = legitimize_pic_address (addr_location, 0);
12886 else
12887 addr_location = force_reg (Pmode, addr_location);
12888 }
12889 }
12890
12891 /* If it is already an indirect call or the code above moved the
12892 SYMBOL_REF to somewhere else make sure the address can be found in
12893 register 1. */
12894 if (retaddr_reg == NULL_RTX
12895 && GET_CODE (addr_location) != SYMBOL_REF
12896 && !plt_call)
12897 {
12898 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12899 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12900 }
12901
12902 addr_location = gen_rtx_MEM (QImode, addr_location);
12903 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12904
12905 if (result_reg != NULL_RTX)
12906 call = gen_rtx_SET (result_reg, call);
12907
12908 if (retaddr_reg != NULL_RTX)
12909 {
12910 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12911
12912 if (tls_call != NULL_RTX)
12913 vec = gen_rtvec (3, call, clobber,
12914 gen_rtx_USE (VOIDmode, tls_call));
12915 else
12916 vec = gen_rtvec (2, call, clobber);
12917
12918 call = gen_rtx_PARALLEL (VOIDmode, vec);
12919 }
12920
12921 insn = emit_call_insn (call);
12922
12923 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12924 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12925 {
12926 /* s390_function_ok_for_sibcall should
12927 have denied sibcalls in this case. */
12928 gcc_assert (retaddr_reg != NULL_RTX);
12929 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12930 }
12931 return insn;
12932 }
12933
12934 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12935
12936 static void
12937 s390_conditional_register_usage (void)
12938 {
12939 int i;
12940
12941 if (flag_pic)
12942 {
12943 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12944 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12945 }
12946 if (TARGET_CPU_ZARCH)
12947 {
12948 fixed_regs[BASE_REGNUM] = 0;
12949 call_used_regs[BASE_REGNUM] = 0;
12950 fixed_regs[RETURN_REGNUM] = 0;
12951 call_used_regs[RETURN_REGNUM] = 0;
12952 }
12953 if (TARGET_64BIT)
12954 {
12955 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12956 call_used_regs[i] = call_really_used_regs[i] = 0;
12957 }
12958 else
12959 {
12960 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12961 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12962 }
12963
12964 if (TARGET_SOFT_FLOAT)
12965 {
12966 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12967 call_used_regs[i] = fixed_regs[i] = 1;
12968 }
12969
12970 /* Disable v16 - v31 for non-vector target. */
12971 if (!TARGET_VX)
12972 {
12973 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12974 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12975 }
12976 }
12977
12978 /* Corresponding function to eh_return expander. */
12979
12980 static GTY(()) rtx s390_tpf_eh_return_symbol;
12981 void
12982 s390_emit_tpf_eh_return (rtx target)
12983 {
12984 rtx_insn *insn;
12985 rtx reg, orig_ra;
12986
12987 if (!s390_tpf_eh_return_symbol)
12988 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12989
12990 reg = gen_rtx_REG (Pmode, 2);
12991 orig_ra = gen_rtx_REG (Pmode, 3);
12992
12993 emit_move_insn (reg, target);
12994 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12995 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12996 gen_rtx_REG (Pmode, RETURN_REGNUM));
12997 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12998 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12999
13000 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13001 }
13002
13003 /* Rework the prologue/epilogue to avoid saving/restoring
13004 registers unnecessarily. */
13005
13006 static void
13007 s390_optimize_prologue (void)
13008 {
13009 rtx_insn *insn, *new_insn, *next_insn;
13010
13011 /* Do a final recompute of the frame-related data. */
13012 s390_optimize_register_info ();
13013
13014 /* If all special registers are in fact used, there's nothing we
13015 can do, so no point in walking the insn list. */
13016
13017 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13018 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13019 && (TARGET_CPU_ZARCH
13020 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13021 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13022 return;
13023
13024 /* Search for prologue/epilogue insns and replace them. */
13025
13026 for (insn = get_insns (); insn; insn = next_insn)
13027 {
13028 int first, last, off;
13029 rtx set, base, offset;
13030 rtx pat;
13031
13032 next_insn = NEXT_INSN (insn);
13033
13034 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13035 continue;
13036
13037 pat = PATTERN (insn);
13038
13039 /* Remove ldgr/lgdr instructions used for saving and restore
13040 GPRs if possible. */
13041 if (TARGET_Z10)
13042 {
13043 rtx tmp_pat = pat;
13044
13045 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13046 tmp_pat = XVECEXP (pat, 0, 0);
13047
13048 if (GET_CODE (tmp_pat) == SET
13049 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13050 && REG_P (SET_SRC (tmp_pat))
13051 && REG_P (SET_DEST (tmp_pat)))
13052 {
13053 int src_regno = REGNO (SET_SRC (tmp_pat));
13054 int dest_regno = REGNO (SET_DEST (tmp_pat));
13055 int gpr_regno;
13056 int fpr_regno;
13057
13058 if (!((GENERAL_REGNO_P (src_regno)
13059 && FP_REGNO_P (dest_regno))
13060 || (FP_REGNO_P (src_regno)
13061 && GENERAL_REGNO_P (dest_regno))))
13062 continue;
13063
13064 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13065 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13066
13067 /* GPR must be call-saved, FPR must be call-clobbered. */
13068 if (!call_really_used_regs[fpr_regno]
13069 || call_really_used_regs[gpr_regno])
13070 continue;
13071
13072 /* It must not happen that what we once saved in an FPR now
13073 needs a stack slot. */
13074 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13075
13076 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13077 {
13078 remove_insn (insn);
13079 continue;
13080 }
13081 }
13082 }
13083
13084 if (GET_CODE (pat) == PARALLEL
13085 && store_multiple_operation (pat, VOIDmode))
13086 {
13087 set = XVECEXP (pat, 0, 0);
13088 first = REGNO (SET_SRC (set));
13089 last = first + XVECLEN (pat, 0) - 1;
13090 offset = const0_rtx;
13091 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13092 off = INTVAL (offset);
13093
13094 if (GET_CODE (base) != REG || off < 0)
13095 continue;
13096 if (cfun_frame_layout.first_save_gpr != -1
13097 && (cfun_frame_layout.first_save_gpr < first
13098 || cfun_frame_layout.last_save_gpr > last))
13099 continue;
13100 if (REGNO (base) != STACK_POINTER_REGNUM
13101 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13102 continue;
13103 if (first > BASE_REGNUM || last < BASE_REGNUM)
13104 continue;
13105
13106 if (cfun_frame_layout.first_save_gpr != -1)
13107 {
13108 rtx s_pat = save_gprs (base,
13109 off + (cfun_frame_layout.first_save_gpr
13110 - first) * UNITS_PER_LONG,
13111 cfun_frame_layout.first_save_gpr,
13112 cfun_frame_layout.last_save_gpr);
13113 new_insn = emit_insn_before (s_pat, insn);
13114 INSN_ADDRESSES_NEW (new_insn, -1);
13115 }
13116
13117 remove_insn (insn);
13118 continue;
13119 }
13120
13121 if (cfun_frame_layout.first_save_gpr == -1
13122 && GET_CODE (pat) == SET
13123 && GENERAL_REG_P (SET_SRC (pat))
13124 && GET_CODE (SET_DEST (pat)) == MEM)
13125 {
13126 set = pat;
13127 first = REGNO (SET_SRC (set));
13128 offset = const0_rtx;
13129 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13130 off = INTVAL (offset);
13131
13132 if (GET_CODE (base) != REG || off < 0)
13133 continue;
13134 if (REGNO (base) != STACK_POINTER_REGNUM
13135 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13136 continue;
13137
13138 remove_insn (insn);
13139 continue;
13140 }
13141
13142 if (GET_CODE (pat) == PARALLEL
13143 && load_multiple_operation (pat, VOIDmode))
13144 {
13145 set = XVECEXP (pat, 0, 0);
13146 first = REGNO (SET_DEST (set));
13147 last = first + XVECLEN (pat, 0) - 1;
13148 offset = const0_rtx;
13149 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13150 off = INTVAL (offset);
13151
13152 if (GET_CODE (base) != REG || off < 0)
13153 continue;
13154
13155 if (cfun_frame_layout.first_restore_gpr != -1
13156 && (cfun_frame_layout.first_restore_gpr < first
13157 || cfun_frame_layout.last_restore_gpr > last))
13158 continue;
13159 if (REGNO (base) != STACK_POINTER_REGNUM
13160 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13161 continue;
13162 if (first > BASE_REGNUM || last < BASE_REGNUM)
13163 continue;
13164
13165 if (cfun_frame_layout.first_restore_gpr != -1)
13166 {
13167 rtx rpat = restore_gprs (base,
13168 off + (cfun_frame_layout.first_restore_gpr
13169 - first) * UNITS_PER_LONG,
13170 cfun_frame_layout.first_restore_gpr,
13171 cfun_frame_layout.last_restore_gpr);
13172
13173 /* Remove REG_CFA_RESTOREs for registers that we no
13174 longer need to save. */
13175 REG_NOTES (rpat) = REG_NOTES (insn);
13176 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
13177 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13178 && ((int) REGNO (XEXP (*ptr, 0))
13179 < cfun_frame_layout.first_restore_gpr))
13180 *ptr = XEXP (*ptr, 1);
13181 else
13182 ptr = &XEXP (*ptr, 1);
13183 new_insn = emit_insn_before (rpat, insn);
13184 RTX_FRAME_RELATED_P (new_insn) = 1;
13185 INSN_ADDRESSES_NEW (new_insn, -1);
13186 }
13187
13188 remove_insn (insn);
13189 continue;
13190 }
13191
13192 if (cfun_frame_layout.first_restore_gpr == -1
13193 && GET_CODE (pat) == SET
13194 && GENERAL_REG_P (SET_DEST (pat))
13195 && GET_CODE (SET_SRC (pat)) == MEM)
13196 {
13197 set = pat;
13198 first = REGNO (SET_DEST (set));
13199 offset = const0_rtx;
13200 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13201 off = INTVAL (offset);
13202
13203 if (GET_CODE (base) != REG || off < 0)
13204 continue;
13205
13206 if (REGNO (base) != STACK_POINTER_REGNUM
13207 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13208 continue;
13209
13210 remove_insn (insn);
13211 continue;
13212 }
13213 }
13214 }
13215
13216 /* On z10 and later the dynamic branch prediction must see the
13217 backward jump within a certain windows. If not it falls back to
13218 the static prediction. This function rearranges the loop backward
13219 branch in a way which makes the static prediction always correct.
13220 The function returns true if it added an instruction. */
13221 static bool
13222 s390_fix_long_loop_prediction (rtx_insn *insn)
13223 {
13224 rtx set = single_set (insn);
13225 rtx code_label, label_ref, new_label;
13226 rtx_insn *uncond_jump;
13227 rtx_insn *cur_insn;
13228 rtx tmp;
13229 int distance;
13230
13231 /* This will exclude branch on count and branch on index patterns
13232 since these are correctly statically predicted. */
13233 if (!set
13234 || SET_DEST (set) != pc_rtx
13235 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13236 return false;
13237
13238 /* Skip conditional returns. */
13239 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13240 && XEXP (SET_SRC (set), 2) == pc_rtx)
13241 return false;
13242
13243 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13244 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13245
13246 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13247
13248 code_label = XEXP (label_ref, 0);
13249
13250 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13251 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13252 || (INSN_ADDRESSES (INSN_UID (insn))
13253 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13254 return false;
13255
13256 for (distance = 0, cur_insn = PREV_INSN (insn);
13257 distance < PREDICT_DISTANCE - 6;
13258 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13259 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13260 return false;
13261
13262 new_label = gen_label_rtx ();
13263 uncond_jump = emit_jump_insn_after (
13264 gen_rtx_SET (pc_rtx,
13265 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13266 insn);
13267 emit_label_after (new_label, uncond_jump);
13268
13269 tmp = XEXP (SET_SRC (set), 1);
13270 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13271 XEXP (SET_SRC (set), 2) = tmp;
13272 INSN_CODE (insn) = -1;
13273
13274 XEXP (label_ref, 0) = new_label;
13275 JUMP_LABEL (insn) = new_label;
13276 JUMP_LABEL (uncond_jump) = code_label;
13277
13278 return true;
13279 }
13280
13281 /* Returns 1 if INSN reads the value of REG for purposes not related
13282 to addressing of memory, and 0 otherwise. */
13283 static int
13284 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13285 {
13286 return reg_referenced_p (reg, PATTERN (insn))
13287 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13288 }
13289
13290 /* Starting from INSN find_cond_jump looks downwards in the insn
13291 stream for a single jump insn which is the last user of the
13292 condition code set in INSN. */
13293 static rtx_insn *
13294 find_cond_jump (rtx_insn *insn)
13295 {
13296 for (; insn; insn = NEXT_INSN (insn))
13297 {
13298 rtx ite, cc;
13299
13300 if (LABEL_P (insn))
13301 break;
13302
13303 if (!JUMP_P (insn))
13304 {
13305 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13306 break;
13307 continue;
13308 }
13309
13310 /* This will be triggered by a return. */
13311 if (GET_CODE (PATTERN (insn)) != SET)
13312 break;
13313
13314 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13315 ite = SET_SRC (PATTERN (insn));
13316
13317 if (GET_CODE (ite) != IF_THEN_ELSE)
13318 break;
13319
13320 cc = XEXP (XEXP (ite, 0), 0);
13321 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13322 break;
13323
13324 if (find_reg_note (insn, REG_DEAD, cc))
13325 return insn;
13326 break;
13327 }
13328
13329 return NULL;
13330 }
13331
13332 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13333 the semantics does not change. If NULL_RTX is passed as COND the
13334 function tries to find the conditional jump starting with INSN. */
13335 static void
13336 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13337 {
13338 rtx tmp = *op0;
13339
13340 if (cond == NULL_RTX)
13341 {
13342 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13343 rtx set = jump ? single_set (jump) : NULL_RTX;
13344
13345 if (set == NULL_RTX)
13346 return;
13347
13348 cond = XEXP (SET_SRC (set), 0);
13349 }
13350
13351 *op0 = *op1;
13352 *op1 = tmp;
13353 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13354 }
13355
13356 /* On z10, instructions of the compare-and-branch family have the
13357 property to access the register occurring as second operand with
13358 its bits complemented. If such a compare is grouped with a second
13359 instruction that accesses the same register non-complemented, and
13360 if that register's value is delivered via a bypass, then the
13361 pipeline recycles, thereby causing significant performance decline.
13362 This function locates such situations and exchanges the two
13363 operands of the compare. The function return true whenever it
13364 added an insn. */
13365 static bool
13366 s390_z10_optimize_cmp (rtx_insn *insn)
13367 {
13368 rtx_insn *prev_insn, *next_insn;
13369 bool insn_added_p = false;
13370 rtx cond, *op0, *op1;
13371
13372 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13373 {
13374 /* Handle compare and branch and branch on count
13375 instructions. */
13376 rtx pattern = single_set (insn);
13377
13378 if (!pattern
13379 || SET_DEST (pattern) != pc_rtx
13380 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13381 return false;
13382
13383 cond = XEXP (SET_SRC (pattern), 0);
13384 op0 = &XEXP (cond, 0);
13385 op1 = &XEXP (cond, 1);
13386 }
13387 else if (GET_CODE (PATTERN (insn)) == SET)
13388 {
13389 rtx src, dest;
13390
13391 /* Handle normal compare instructions. */
13392 src = SET_SRC (PATTERN (insn));
13393 dest = SET_DEST (PATTERN (insn));
13394
13395 if (!REG_P (dest)
13396 || !CC_REGNO_P (REGNO (dest))
13397 || GET_CODE (src) != COMPARE)
13398 return false;
13399
13400 /* s390_swap_cmp will try to find the conditional
13401 jump when passing NULL_RTX as condition. */
13402 cond = NULL_RTX;
13403 op0 = &XEXP (src, 0);
13404 op1 = &XEXP (src, 1);
13405 }
13406 else
13407 return false;
13408
13409 if (!REG_P (*op0) || !REG_P (*op1))
13410 return false;
13411
13412 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13413 return false;
13414
13415 /* Swap the COMPARE arguments and its mask if there is a
13416 conflicting access in the previous insn. */
13417 prev_insn = prev_active_insn (insn);
13418 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13419 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13420 s390_swap_cmp (cond, op0, op1, insn);
13421
13422 /* Check if there is a conflict with the next insn. If there
13423 was no conflict with the previous insn, then swap the
13424 COMPARE arguments and its mask. If we already swapped
13425 the operands, or if swapping them would cause a conflict
13426 with the previous insn, issue a NOP after the COMPARE in
13427 order to separate the two instuctions. */
13428 next_insn = next_active_insn (insn);
13429 if (next_insn != NULL_RTX && INSN_P (next_insn)
13430 && s390_non_addr_reg_read_p (*op1, next_insn))
13431 {
13432 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13433 && s390_non_addr_reg_read_p (*op0, prev_insn))
13434 {
13435 if (REGNO (*op1) == 0)
13436 emit_insn_after (gen_nop1 (), insn);
13437 else
13438 emit_insn_after (gen_nop (), insn);
13439 insn_added_p = true;
13440 }
13441 else
13442 s390_swap_cmp (cond, op0, op1, insn);
13443 }
13444 return insn_added_p;
13445 }
13446
13447 /* Perform machine-dependent processing. */
13448
13449 static void
13450 s390_reorg (void)
13451 {
13452 bool pool_overflow = false;
13453 int hw_before, hw_after;
13454
13455 /* Make sure all splits have been performed; splits after
13456 machine_dependent_reorg might confuse insn length counts. */
13457 split_all_insns_noflow ();
13458
13459 /* Install the main literal pool and the associated base
13460 register load insns.
13461
13462 In addition, there are two problematic situations we need
13463 to correct:
13464
13465 - the literal pool might be > 4096 bytes in size, so that
13466 some of its elements cannot be directly accessed
13467
13468 - a branch target might be > 64K away from the branch, so that
13469 it is not possible to use a PC-relative instruction.
13470
13471 To fix those, we split the single literal pool into multiple
13472 pool chunks, reloading the pool base register at various
13473 points throughout the function to ensure it always points to
13474 the pool chunk the following code expects, and / or replace
13475 PC-relative branches by absolute branches.
13476
13477 However, the two problems are interdependent: splitting the
13478 literal pool can move a branch further away from its target,
13479 causing the 64K limit to overflow, and on the other hand,
13480 replacing a PC-relative branch by an absolute branch means
13481 we need to put the branch target address into the literal
13482 pool, possibly causing it to overflow.
13483
13484 So, we loop trying to fix up both problems until we manage
13485 to satisfy both conditions at the same time. Note that the
13486 loop is guaranteed to terminate as every pass of the loop
13487 strictly decreases the total number of PC-relative branches
13488 in the function. (This is not completely true as there
13489 might be branch-over-pool insns introduced by chunkify_start.
13490 Those never need to be split however.) */
13491
13492 for (;;)
13493 {
13494 struct constant_pool *pool = NULL;
13495
13496 /* Collect the literal pool. */
13497 if (!pool_overflow)
13498 {
13499 pool = s390_mainpool_start ();
13500 if (!pool)
13501 pool_overflow = true;
13502 }
13503
13504 /* If literal pool overflowed, start to chunkify it. */
13505 if (pool_overflow)
13506 pool = s390_chunkify_start ();
13507
13508 /* Split out-of-range branches. If this has created new
13509 literal pool entries, cancel current chunk list and
13510 recompute it. zSeries machines have large branch
13511 instructions, so we never need to split a branch. */
13512 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13513 {
13514 if (pool_overflow)
13515 s390_chunkify_cancel (pool);
13516 else
13517 s390_mainpool_cancel (pool);
13518
13519 continue;
13520 }
13521
13522 /* If we made it up to here, both conditions are satisfied.
13523 Finish up literal pool related changes. */
13524 if (pool_overflow)
13525 s390_chunkify_finish (pool);
13526 else
13527 s390_mainpool_finish (pool);
13528
13529 /* We're done splitting branches. */
13530 cfun->machine->split_branches_pending_p = false;
13531 break;
13532 }
13533
13534 /* Generate out-of-pool execute target insns. */
13535 if (TARGET_CPU_ZARCH)
13536 {
13537 rtx_insn *insn, *target;
13538 rtx label;
13539
13540 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13541 {
13542 label = s390_execute_label (insn);
13543 if (!label)
13544 continue;
13545
13546 gcc_assert (label != const0_rtx);
13547
13548 target = emit_label (XEXP (label, 0));
13549 INSN_ADDRESSES_NEW (target, -1);
13550
13551 target = emit_insn (s390_execute_target (insn));
13552 INSN_ADDRESSES_NEW (target, -1);
13553 }
13554 }
13555
13556 /* Try to optimize prologue and epilogue further. */
13557 s390_optimize_prologue ();
13558
13559 /* Walk over the insns and do some >=z10 specific changes. */
13560 if (s390_tune >= PROCESSOR_2097_Z10)
13561 {
13562 rtx_insn *insn;
13563 bool insn_added_p = false;
13564
13565 /* The insn lengths and addresses have to be up to date for the
13566 following manipulations. */
13567 shorten_branches (get_insns ());
13568
13569 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13570 {
13571 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13572 continue;
13573
13574 if (JUMP_P (insn))
13575 insn_added_p |= s390_fix_long_loop_prediction (insn);
13576
13577 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13578 || GET_CODE (PATTERN (insn)) == SET)
13579 && s390_tune == PROCESSOR_2097_Z10)
13580 insn_added_p |= s390_z10_optimize_cmp (insn);
13581 }
13582
13583 /* Adjust branches if we added new instructions. */
13584 if (insn_added_p)
13585 shorten_branches (get_insns ());
13586 }
13587
13588 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13589 if (hw_after > 0)
13590 {
13591 rtx_insn *insn;
13592
13593 /* Insert NOPs for hotpatching. */
13594 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13595 /* Emit NOPs
13596 1. inside the area covered by debug information to allow setting
13597 breakpoints at the NOPs,
13598 2. before any insn which results in an asm instruction,
13599 3. before in-function labels to avoid jumping to the NOPs, for
13600 example as part of a loop,
13601 4. before any barrier in case the function is completely empty
13602 (__builtin_unreachable ()) and has neither internal labels nor
13603 active insns.
13604 */
13605 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13606 break;
13607 /* Output a series of NOPs before the first active insn. */
13608 while (insn && hw_after > 0)
13609 {
13610 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13611 {
13612 emit_insn_before (gen_nop_6_byte (), insn);
13613 hw_after -= 3;
13614 }
13615 else if (hw_after >= 2)
13616 {
13617 emit_insn_before (gen_nop_4_byte (), insn);
13618 hw_after -= 2;
13619 }
13620 else
13621 {
13622 emit_insn_before (gen_nop_2_byte (), insn);
13623 hw_after -= 1;
13624 }
13625 }
13626 }
13627 }
13628
13629 /* Return true if INSN is a fp load insn writing register REGNO. */
13630 static inline bool
13631 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13632 {
13633 rtx set;
13634 enum attr_type flag = s390_safe_attr_type (insn);
13635
13636 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13637 return false;
13638
13639 set = single_set (insn);
13640
13641 if (set == NULL_RTX)
13642 return false;
13643
13644 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13645 return false;
13646
13647 if (REGNO (SET_DEST (set)) != regno)
13648 return false;
13649
13650 return true;
13651 }
13652
13653 /* This value describes the distance to be avoided between an
13654 aritmetic fp instruction and an fp load writing the same register.
13655 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13656 fine but the exact value has to be avoided. Otherwise the FP
13657 pipeline will throw an exception causing a major penalty. */
13658 #define Z10_EARLYLOAD_DISTANCE 7
13659
13660 /* Rearrange the ready list in order to avoid the situation described
13661 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13662 moved to the very end of the ready list. */
13663 static void
13664 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13665 {
13666 unsigned int regno;
13667 int nready = *nready_p;
13668 rtx_insn *tmp;
13669 int i;
13670 rtx_insn *insn;
13671 rtx set;
13672 enum attr_type flag;
13673 int distance;
13674
13675 /* Skip DISTANCE - 1 active insns. */
13676 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13677 distance > 0 && insn != NULL_RTX;
13678 distance--, insn = prev_active_insn (insn))
13679 if (CALL_P (insn) || JUMP_P (insn))
13680 return;
13681
13682 if (insn == NULL_RTX)
13683 return;
13684
13685 set = single_set (insn);
13686
13687 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13688 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13689 return;
13690
13691 flag = s390_safe_attr_type (insn);
13692
13693 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13694 return;
13695
13696 regno = REGNO (SET_DEST (set));
13697 i = nready - 1;
13698
13699 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13700 i--;
13701
13702 if (!i)
13703 return;
13704
13705 tmp = ready[i];
13706 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13707 ready[0] = tmp;
13708 }
13709
13710
13711 /* The s390_sched_state variable tracks the state of the current or
13712 the last instruction group.
13713
13714 0,1,2 number of instructions scheduled in the current group
13715 3 the last group is complete - normal insns
13716 4 the last group was a cracked/expanded insn */
13717
13718 static int s390_sched_state;
13719
13720 #define S390_SCHED_STATE_NORMAL 3
13721 #define S390_SCHED_STATE_CRACKED 4
13722
13723 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
13724 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
13725 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
13726 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
13727
13728 static unsigned int
13729 s390_get_sched_attrmask (rtx_insn *insn)
13730 {
13731 unsigned int mask = 0;
13732
13733 switch (s390_tune)
13734 {
13735 case PROCESSOR_2827_ZEC12:
13736 if (get_attr_zEC12_cracked (insn))
13737 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13738 if (get_attr_zEC12_expanded (insn))
13739 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13740 if (get_attr_zEC12_endgroup (insn))
13741 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13742 if (get_attr_zEC12_groupalone (insn))
13743 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13744 break;
13745 case PROCESSOR_2964_Z13:
13746 if (get_attr_z13_cracked (insn))
13747 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13748 if (get_attr_z13_expanded (insn))
13749 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13750 if (get_attr_z13_endgroup (insn))
13751 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13752 if (get_attr_z13_groupalone (insn))
13753 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13754 break;
13755 default:
13756 gcc_unreachable ();
13757 }
13758 return mask;
13759 }
13760
13761 static unsigned int
13762 s390_get_unit_mask (rtx_insn *insn, int *units)
13763 {
13764 unsigned int mask = 0;
13765
13766 switch (s390_tune)
13767 {
13768 case PROCESSOR_2964_Z13:
13769 *units = 3;
13770 if (get_attr_z13_unit_lsu (insn))
13771 mask |= 1 << 0;
13772 if (get_attr_z13_unit_fxu (insn))
13773 mask |= 1 << 1;
13774 if (get_attr_z13_unit_vfu (insn))
13775 mask |= 1 << 2;
13776 break;
13777 default:
13778 gcc_unreachable ();
13779 }
13780 return mask;
13781 }
13782
13783 /* Return the scheduling score for INSN. The higher the score the
13784 better. The score is calculated from the OOO scheduling attributes
13785 of INSN and the scheduling state s390_sched_state. */
13786 static int
13787 s390_sched_score (rtx_insn *insn)
13788 {
13789 unsigned int mask = s390_get_sched_attrmask (insn);
13790 int score = 0;
13791
13792 switch (s390_sched_state)
13793 {
13794 case 0:
13795 /* Try to put insns into the first slot which would otherwise
13796 break a group. */
13797 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13798 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13799 score += 5;
13800 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13801 score += 10;
13802 case 1:
13803 /* Prefer not cracked insns while trying to put together a
13804 group. */
13805 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13806 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
13807 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
13808 score += 10;
13809 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
13810 score += 5;
13811 break;
13812 case 2:
13813 /* Prefer not cracked insns while trying to put together a
13814 group. */
13815 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13816 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
13817 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
13818 score += 10;
13819 /* Prefer endgroup insns in the last slot. */
13820 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
13821 score += 10;
13822 break;
13823 case S390_SCHED_STATE_NORMAL:
13824 /* Prefer not cracked insns if the last was not cracked. */
13825 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
13826 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
13827 score += 5;
13828 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13829 score += 10;
13830 break;
13831 case S390_SCHED_STATE_CRACKED:
13832 /* Try to keep cracked insns together to prevent them from
13833 interrupting groups. */
13834 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13835 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13836 score += 5;
13837 break;
13838 }
13839
13840 if (s390_tune == PROCESSOR_2964_Z13)
13841 {
13842 int units, i;
13843 unsigned unit_mask, m = 1;
13844
13845 unit_mask = s390_get_unit_mask (insn, &units);
13846 gcc_assert (units <= MAX_SCHED_UNITS);
13847
13848 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
13849 ago the last insn of this unit type got scheduled. This is
13850 supposed to help providing a proper instruction mix to the
13851 CPU. */
13852 for (i = 0; i < units; i++, m <<= 1)
13853 if (m & unit_mask)
13854 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
13855 MAX_SCHED_MIX_DISTANCE);
13856 }
13857 return score;
13858 }
13859
13860 /* This function is called via hook TARGET_SCHED_REORDER before
13861 issuing one insn from list READY which contains *NREADYP entries.
13862 For target z10 it reorders load instructions to avoid early load
13863 conflicts in the floating point pipeline */
13864 static int
13865 s390_sched_reorder (FILE *file, int verbose,
13866 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13867 {
13868 if (s390_tune == PROCESSOR_2097_Z10
13869 && reload_completed
13870 && *nreadyp > 1)
13871 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13872
13873 if (s390_tune >= PROCESSOR_2827_ZEC12
13874 && reload_completed
13875 && *nreadyp > 1)
13876 {
13877 int i;
13878 int last_index = *nreadyp - 1;
13879 int max_index = -1;
13880 int max_score = -1;
13881 rtx_insn *tmp;
13882
13883 /* Just move the insn with the highest score to the top (the
13884 end) of the list. A full sort is not needed since a conflict
13885 in the hazard recognition cannot happen. So the top insn in
13886 the ready list will always be taken. */
13887 for (i = last_index; i >= 0; i--)
13888 {
13889 int score;
13890
13891 if (recog_memoized (ready[i]) < 0)
13892 continue;
13893
13894 score = s390_sched_score (ready[i]);
13895 if (score > max_score)
13896 {
13897 max_score = score;
13898 max_index = i;
13899 }
13900 }
13901
13902 if (max_index != -1)
13903 {
13904 if (max_index != last_index)
13905 {
13906 tmp = ready[max_index];
13907 ready[max_index] = ready[last_index];
13908 ready[last_index] = tmp;
13909
13910 if (verbose > 5)
13911 fprintf (file,
13912 ";;\t\tBACKEND: move insn %d to the top of list\n",
13913 INSN_UID (ready[last_index]));
13914 }
13915 else if (verbose > 5)
13916 fprintf (file,
13917 ";;\t\tBACKEND: best insn %d already on top\n",
13918 INSN_UID (ready[last_index]));
13919 }
13920
13921 if (verbose > 5)
13922 {
13923 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13924 s390_sched_state);
13925
13926 for (i = last_index; i >= 0; i--)
13927 {
13928 unsigned int sched_mask;
13929 rtx_insn *insn = ready[i];
13930
13931 if (recog_memoized (insn) < 0)
13932 continue;
13933
13934 sched_mask = s390_get_sched_attrmask (insn);
13935 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
13936 INSN_UID (insn),
13937 s390_sched_score (insn));
13938 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
13939 ((M) & sched_mask) ? #ATTR : "");
13940 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
13941 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
13942 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
13943 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
13944 #undef PRINT_SCHED_ATTR
13945 if (s390_tune == PROCESSOR_2964_Z13)
13946 {
13947 unsigned int unit_mask, m = 1;
13948 int units, j;
13949
13950 unit_mask = s390_get_unit_mask (insn, &units);
13951 fprintf (file, "(units:");
13952 for (j = 0; j < units; j++, m <<= 1)
13953 if (m & unit_mask)
13954 fprintf (file, " u%d", j);
13955 fprintf (file, ")");
13956 }
13957 fprintf (file, "\n");
13958 }
13959 }
13960 }
13961
13962 return s390_issue_rate ();
13963 }
13964
13965
13966 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13967 the scheduler has issued INSN. It stores the last issued insn into
13968 last_scheduled_insn in order to make it available for
13969 s390_sched_reorder. */
13970 static int
13971 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13972 {
13973 last_scheduled_insn = insn;
13974
13975 if (s390_tune >= PROCESSOR_2827_ZEC12
13976 && reload_completed
13977 && recog_memoized (insn) >= 0)
13978 {
13979 unsigned int mask = s390_get_sched_attrmask (insn);
13980
13981 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
13982 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
13983 s390_sched_state = S390_SCHED_STATE_CRACKED;
13984 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
13985 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
13986 s390_sched_state = S390_SCHED_STATE_NORMAL;
13987 else
13988 {
13989 /* Only normal insns are left (mask == 0). */
13990 switch (s390_sched_state)
13991 {
13992 case 0:
13993 case 1:
13994 case 2:
13995 case S390_SCHED_STATE_NORMAL:
13996 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
13997 s390_sched_state = 1;
13998 else
13999 s390_sched_state++;
14000
14001 break;
14002 case S390_SCHED_STATE_CRACKED:
14003 s390_sched_state = S390_SCHED_STATE_NORMAL;
14004 break;
14005 }
14006 }
14007
14008 if (s390_tune == PROCESSOR_2964_Z13)
14009 {
14010 int units, i;
14011 unsigned unit_mask, m = 1;
14012
14013 unit_mask = s390_get_unit_mask (insn, &units);
14014 gcc_assert (units <= MAX_SCHED_UNITS);
14015
14016 for (i = 0; i < units; i++, m <<= 1)
14017 if (m & unit_mask)
14018 last_scheduled_unit_distance[i] = 0;
14019 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14020 last_scheduled_unit_distance[i]++;
14021 }
14022
14023 if (verbose > 5)
14024 {
14025 unsigned int sched_mask;
14026
14027 sched_mask = s390_get_sched_attrmask (insn);
14028
14029 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14030 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14031 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14032 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14033 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14034 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14035 #undef PRINT_SCHED_ATTR
14036
14037 if (s390_tune == PROCESSOR_2964_Z13)
14038 {
14039 unsigned int unit_mask, m = 1;
14040 int units, j;
14041
14042 unit_mask = s390_get_unit_mask (insn, &units);
14043 fprintf (file, "(units:");
14044 for (j = 0; j < units; j++, m <<= 1)
14045 if (m & unit_mask)
14046 fprintf (file, " %d", j);
14047 fprintf (file, ")");
14048 }
14049 fprintf (file, " sched state: %d\n", s390_sched_state);
14050
14051 if (s390_tune == PROCESSOR_2964_Z13)
14052 {
14053 int units, j;
14054
14055 s390_get_unit_mask (insn, &units);
14056
14057 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14058 for (j = 0; j < units; j++)
14059 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14060 fprintf (file, "\n");
14061 }
14062 }
14063 }
14064
14065 if (GET_CODE (PATTERN (insn)) != USE
14066 && GET_CODE (PATTERN (insn)) != CLOBBER)
14067 return more - 1;
14068 else
14069 return more;
14070 }
14071
14072 static void
14073 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14074 int verbose ATTRIBUTE_UNUSED,
14075 int max_ready ATTRIBUTE_UNUSED)
14076 {
14077 last_scheduled_insn = NULL;
14078 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14079 s390_sched_state = 0;
14080 }
14081
14082 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14083 a new number struct loop *loop should be unrolled if tuned for cpus with
14084 a built-in stride prefetcher.
14085 The loop is analyzed for memory accesses by calling check_dpu for
14086 each rtx of the loop. Depending on the loop_depth and the amount of
14087 memory accesses a new number <=nunroll is returned to improve the
14088 behavior of the hardware prefetch unit. */
14089 static unsigned
14090 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14091 {
14092 basic_block *bbs;
14093 rtx_insn *insn;
14094 unsigned i;
14095 unsigned mem_count = 0;
14096
14097 if (s390_tune < PROCESSOR_2097_Z10)
14098 return nunroll;
14099
14100 /* Count the number of memory references within the loop body. */
14101 bbs = get_loop_body (loop);
14102 subrtx_iterator::array_type array;
14103 for (i = 0; i < loop->num_nodes; i++)
14104 FOR_BB_INSNS (bbs[i], insn)
14105 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14106 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14107 if (MEM_P (*iter))
14108 mem_count += 1;
14109 free (bbs);
14110
14111 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14112 if (mem_count == 0)
14113 return nunroll;
14114
14115 switch (loop_depth(loop))
14116 {
14117 case 1:
14118 return MIN (nunroll, 28 / mem_count);
14119 case 2:
14120 return MIN (nunroll, 22 / mem_count);
14121 default:
14122 return MIN (nunroll, 16 / mem_count);
14123 }
14124 }
14125
14126 /* Restore the current options. This is a hook function and also called
14127 internally. */
14128
14129 static void
14130 s390_function_specific_restore (struct gcc_options *opts,
14131 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14132 {
14133 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14134 }
14135
14136 static void
14137 s390_option_override_internal (bool main_args_p,
14138 struct gcc_options *opts,
14139 const struct gcc_options *opts_set)
14140 {
14141 const char *prefix;
14142 const char *suffix;
14143
14144 /* Set up prefix/suffix so the error messages refer to either the command
14145 line argument, or the attribute(target). */
14146 if (main_args_p)
14147 {
14148 prefix = "-m";
14149 suffix = "";
14150 }
14151 else
14152 {
14153 prefix = "option(\"";
14154 suffix = "\")";
14155 }
14156
14157
14158 /* Architecture mode defaults according to ABI. */
14159 if (!(opts_set->x_target_flags & MASK_ZARCH))
14160 {
14161 if (TARGET_64BIT)
14162 opts->x_target_flags |= MASK_ZARCH;
14163 else
14164 opts->x_target_flags &= ~MASK_ZARCH;
14165 }
14166
14167 /* Set the march default in case it hasn't been specified on cmdline. */
14168 if (!opts_set->x_s390_arch)
14169 opts->x_s390_arch = PROCESSOR_2064_Z900;
14170 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14171 || opts->x_s390_arch == PROCESSOR_9672_G6)
14172 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14173 "in future releases; use at least %sarch=z900%s",
14174 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14175 suffix, prefix, suffix);
14176
14177 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14178
14179 /* Determine processor to tune for. */
14180 if (!opts_set->x_s390_tune)
14181 opts->x_s390_tune = opts->x_s390_arch;
14182 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14183 || opts->x_s390_tune == PROCESSOR_9672_G6)
14184 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14185 "in future releases; use at least %stune=z900%s",
14186 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14187 suffix, prefix, suffix);
14188
14189 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14190
14191 /* Sanity checks. */
14192 if (opts->x_s390_arch == PROCESSOR_NATIVE
14193 || opts->x_s390_tune == PROCESSOR_NATIVE)
14194 gcc_unreachable ();
14195 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14196 error ("z/Architecture mode not supported on %s",
14197 processor_table[(int)opts->x_s390_arch].name);
14198 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14199 error ("64-bit ABI not supported in ESA/390 mode");
14200
14201 /* Enable hardware transactions if available and not explicitly
14202 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14203 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14204 {
14205 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14206 opts->x_target_flags |= MASK_OPT_HTM;
14207 else
14208 opts->x_target_flags &= ~MASK_OPT_HTM;
14209 }
14210
14211 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14212 {
14213 if (TARGET_OPT_VX_P (opts->x_target_flags))
14214 {
14215 if (!TARGET_CPU_VX_P (opts))
14216 error ("hardware vector support not available on %s",
14217 processor_table[(int)opts->x_s390_arch].name);
14218 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14219 error ("hardware vector support not available with -msoft-float");
14220 }
14221 }
14222 else
14223 {
14224 if (TARGET_CPU_VX_P (opts))
14225 /* Enable vector support if available and not explicitly disabled
14226 by user. E.g. with -m31 -march=z13 -mzarch */
14227 opts->x_target_flags |= MASK_OPT_VX;
14228 else
14229 opts->x_target_flags &= ~MASK_OPT_VX;
14230 }
14231
14232 /* Use hardware DFP if available and not explicitly disabled by
14233 user. E.g. with -m31 -march=z10 -mzarch */
14234 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14235 {
14236 if (TARGET_DFP_P (opts))
14237 opts->x_target_flags |= MASK_HARD_DFP;
14238 else
14239 opts->x_target_flags &= ~MASK_HARD_DFP;
14240 }
14241
14242 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14243 {
14244 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14245 {
14246 if (!TARGET_CPU_DFP_P (opts))
14247 error ("hardware decimal floating point instructions"
14248 " not available on %s",
14249 processor_table[(int)opts->x_s390_arch].name);
14250 if (!TARGET_ZARCH_P (opts->x_target_flags))
14251 error ("hardware decimal floating point instructions"
14252 " not available in ESA/390 mode");
14253 }
14254 else
14255 opts->x_target_flags &= ~MASK_HARD_DFP;
14256 }
14257
14258 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14259 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14260 {
14261 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14262 && TARGET_HARD_DFP_P (opts->x_target_flags))
14263 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14264
14265 opts->x_target_flags &= ~MASK_HARD_DFP;
14266 }
14267
14268 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14269 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14270 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14271 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14272 "in combination");
14273
14274 if (opts->x_s390_stack_size)
14275 {
14276 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14277 error ("stack size must be greater than the stack guard value");
14278 else if (opts->x_s390_stack_size > 1 << 16)
14279 error ("stack size must not be greater than 64k");
14280 }
14281 else if (opts->x_s390_stack_guard)
14282 error ("-mstack-guard implies use of -mstack-size");
14283
14284 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14285 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14286 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14287 #endif
14288
14289 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14290 {
14291 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14292 opts->x_param_values,
14293 opts_set->x_param_values);
14294 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14295 opts->x_param_values,
14296 opts_set->x_param_values);
14297 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14298 opts->x_param_values,
14299 opts_set->x_param_values);
14300 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14301 opts->x_param_values,
14302 opts_set->x_param_values);
14303 }
14304
14305 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14306 opts->x_param_values,
14307 opts_set->x_param_values);
14308 /* values for loop prefetching */
14309 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14310 opts->x_param_values,
14311 opts_set->x_param_values);
14312 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14313 opts->x_param_values,
14314 opts_set->x_param_values);
14315 /* s390 has more than 2 levels and the size is much larger. Since
14316 we are always running virtualized assume that we only get a small
14317 part of the caches above l1. */
14318 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14319 opts->x_param_values,
14320 opts_set->x_param_values);
14321 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14322 opts->x_param_values,
14323 opts_set->x_param_values);
14324 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14325 opts->x_param_values,
14326 opts_set->x_param_values);
14327
14328 /* Use the alternative scheduling-pressure algorithm by default. */
14329 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14330 opts->x_param_values,
14331 opts_set->x_param_values);
14332
14333 /* Call target specific restore function to do post-init work. At the moment,
14334 this just sets opts->x_s390_cost_pointer. */
14335 s390_function_specific_restore (opts, NULL);
14336 }
14337
14338 static void
14339 s390_option_override (void)
14340 {
14341 unsigned int i;
14342 cl_deferred_option *opt;
14343 vec<cl_deferred_option> *v =
14344 (vec<cl_deferred_option> *) s390_deferred_options;
14345
14346 if (v)
14347 FOR_EACH_VEC_ELT (*v, i, opt)
14348 {
14349 switch (opt->opt_index)
14350 {
14351 case OPT_mhotpatch_:
14352 {
14353 int val1;
14354 int val2;
14355 char s[256];
14356 char *t;
14357
14358 strncpy (s, opt->arg, 256);
14359 s[255] = 0;
14360 t = strchr (s, ',');
14361 if (t != NULL)
14362 {
14363 *t = 0;
14364 t++;
14365 val1 = integral_argument (s);
14366 val2 = integral_argument (t);
14367 }
14368 else
14369 {
14370 val1 = -1;
14371 val2 = -1;
14372 }
14373 if (val1 == -1 || val2 == -1)
14374 {
14375 /* argument is not a plain number */
14376 error ("arguments to %qs should be non-negative integers",
14377 "-mhotpatch=n,m");
14378 break;
14379 }
14380 else if (val1 > s390_hotpatch_hw_max
14381 || val2 > s390_hotpatch_hw_max)
14382 {
14383 error ("argument to %qs is too large (max. %d)",
14384 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14385 break;
14386 }
14387 s390_hotpatch_hw_before_label = val1;
14388 s390_hotpatch_hw_after_label = val2;
14389 break;
14390 }
14391 default:
14392 gcc_unreachable ();
14393 }
14394 }
14395
14396 /* Set up function hooks. */
14397 init_machine_status = s390_init_machine_status;
14398
14399 s390_option_override_internal (true, &global_options, &global_options_set);
14400
14401 /* Save the initial options in case the user does function specific
14402 options. */
14403 target_option_default_node = build_target_option_node (&global_options);
14404 target_option_current_node = target_option_default_node;
14405
14406 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14407 requires the arch flags to be evaluated already. Since prefetching
14408 is beneficial on s390, we enable it if available. */
14409 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14410 flag_prefetch_loop_arrays = 1;
14411
14412 if (TARGET_TPF)
14413 {
14414 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14415 debuggers do not yet support DWARF 3/4. */
14416 if (!global_options_set.x_dwarf_strict)
14417 dwarf_strict = 1;
14418 if (!global_options_set.x_dwarf_version)
14419 dwarf_version = 2;
14420 }
14421
14422 /* Register a target-specific optimization-and-lowering pass
14423 to run immediately before prologue and epilogue generation.
14424
14425 Registering the pass must be done at start up. It's
14426 convenient to do it here. */
14427 opt_pass *new_pass = new pass_s390_early_mach (g);
14428 struct register_pass_info insert_pass_s390_early_mach =
14429 {
14430 new_pass, /* pass */
14431 "pro_and_epilogue", /* reference_pass_name */
14432 1, /* ref_pass_instance_number */
14433 PASS_POS_INSERT_BEFORE /* po_op */
14434 };
14435 register_pass (&insert_pass_s390_early_mach);
14436 }
14437
14438 #if S390_USE_TARGET_ATTRIBUTE
14439 /* Inner function to process the attribute((target(...))), take an argument and
14440 set the current options from the argument. If we have a list, recursively go
14441 over the list. */
14442
14443 static bool
14444 s390_valid_target_attribute_inner_p (tree args,
14445 struct gcc_options *opts,
14446 struct gcc_options *new_opts_set,
14447 bool force_pragma)
14448 {
14449 char *next_optstr;
14450 bool ret = true;
14451
14452 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14453 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14454 static const struct
14455 {
14456 const char *string;
14457 size_t len;
14458 int opt;
14459 int has_arg;
14460 int only_as_pragma;
14461 } attrs[] = {
14462 /* enum options */
14463 S390_ATTRIB ("arch=", OPT_march_, 1),
14464 S390_ATTRIB ("tune=", OPT_mtune_, 1),
14465 /* uinteger options */
14466 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
14467 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
14468 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
14469 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
14470 /* flag options */
14471 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
14472 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
14473 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
14474 S390_ATTRIB ("htm", OPT_mhtm, 0),
14475 S390_ATTRIB ("vx", OPT_mvx, 0),
14476 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
14477 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14478 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14479 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14480 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14481 /* boolean options */
14482 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14483 };
14484 #undef S390_ATTRIB
14485 #undef S390_PRAGMA
14486
14487 /* If this is a list, recurse to get the options. */
14488 if (TREE_CODE (args) == TREE_LIST)
14489 {
14490 bool ret = true;
14491 int num_pragma_values;
14492 int i;
14493
14494 /* Note: attribs.c:decl_attributes prepends the values from
14495 current_target_pragma to the list of target attributes. To determine
14496 whether we're looking at a value of the attribute or the pragma we
14497 assume that the first [list_length (current_target_pragma)] values in
14498 the list are the values from the pragma. */
14499 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14500 ? list_length (current_target_pragma) : 0;
14501 for (i = 0; args; args = TREE_CHAIN (args), i++)
14502 {
14503 bool is_pragma;
14504
14505 is_pragma = (force_pragma || i < num_pragma_values);
14506 if (TREE_VALUE (args)
14507 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14508 opts, new_opts_set,
14509 is_pragma))
14510 {
14511 ret = false;
14512 }
14513 }
14514 return ret;
14515 }
14516
14517 else if (TREE_CODE (args) != STRING_CST)
14518 {
14519 error ("attribute %<target%> argument not a string");
14520 return false;
14521 }
14522
14523 /* Handle multiple arguments separated by commas. */
14524 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14525
14526 while (next_optstr && *next_optstr != '\0')
14527 {
14528 char *p = next_optstr;
14529 char *orig_p = p;
14530 char *comma = strchr (next_optstr, ',');
14531 size_t len, opt_len;
14532 int opt;
14533 bool opt_set_p;
14534 char ch;
14535 unsigned i;
14536 int mask = 0;
14537 enum cl_var_type var_type;
14538 bool found;
14539
14540 if (comma)
14541 {
14542 *comma = '\0';
14543 len = comma - next_optstr;
14544 next_optstr = comma + 1;
14545 }
14546 else
14547 {
14548 len = strlen (p);
14549 next_optstr = NULL;
14550 }
14551
14552 /* Recognize no-xxx. */
14553 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14554 {
14555 opt_set_p = false;
14556 p += 3;
14557 len -= 3;
14558 }
14559 else
14560 opt_set_p = true;
14561
14562 /* Find the option. */
14563 ch = *p;
14564 found = false;
14565 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14566 {
14567 opt_len = attrs[i].len;
14568 if (ch == attrs[i].string[0]
14569 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14570 && memcmp (p, attrs[i].string, opt_len) == 0)
14571 {
14572 opt = attrs[i].opt;
14573 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14574 continue;
14575 mask = cl_options[opt].var_value;
14576 var_type = cl_options[opt].var_type;
14577 found = true;
14578 break;
14579 }
14580 }
14581
14582 /* Process the option. */
14583 if (!found)
14584 {
14585 error ("attribute(target(\"%s\")) is unknown", orig_p);
14586 return false;
14587 }
14588 else if (attrs[i].only_as_pragma && !force_pragma)
14589 {
14590 /* Value is not allowed for the target attribute. */
14591 error ("Value %qs is not supported by attribute %<target%>",
14592 attrs[i].string);
14593 return false;
14594 }
14595
14596 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14597 {
14598 if (var_type == CLVC_BIT_CLEAR)
14599 opt_set_p = !opt_set_p;
14600
14601 if (opt_set_p)
14602 opts->x_target_flags |= mask;
14603 else
14604 opts->x_target_flags &= ~mask;
14605 new_opts_set->x_target_flags |= mask;
14606 }
14607
14608 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14609 {
14610 int value;
14611
14612 if (cl_options[opt].cl_uinteger)
14613 {
14614 /* Unsigned integer argument. Code based on the function
14615 decode_cmdline_option () in opts-common.c. */
14616 value = integral_argument (p + opt_len);
14617 }
14618 else
14619 value = (opt_set_p) ? 1 : 0;
14620
14621 if (value != -1)
14622 {
14623 struct cl_decoded_option decoded;
14624
14625 /* Value range check; only implemented for numeric and boolean
14626 options at the moment. */
14627 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14628 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14629 set_option (opts, new_opts_set, opt, value,
14630 p + opt_len, DK_UNSPECIFIED, input_location,
14631 global_dc);
14632 }
14633 else
14634 {
14635 error ("attribute(target(\"%s\")) is unknown", orig_p);
14636 ret = false;
14637 }
14638 }
14639
14640 else if (cl_options[opt].var_type == CLVC_ENUM)
14641 {
14642 bool arg_ok;
14643 int value;
14644
14645 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14646 if (arg_ok)
14647 set_option (opts, new_opts_set, opt, value,
14648 p + opt_len, DK_UNSPECIFIED, input_location,
14649 global_dc);
14650 else
14651 {
14652 error ("attribute(target(\"%s\")) is unknown", orig_p);
14653 ret = false;
14654 }
14655 }
14656
14657 else
14658 gcc_unreachable ();
14659 }
14660 return ret;
14661 }
14662
14663 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14664
14665 tree
14666 s390_valid_target_attribute_tree (tree args,
14667 struct gcc_options *opts,
14668 const struct gcc_options *opts_set,
14669 bool force_pragma)
14670 {
14671 tree t = NULL_TREE;
14672 struct gcc_options new_opts_set;
14673
14674 memset (&new_opts_set, 0, sizeof (new_opts_set));
14675
14676 /* Process each of the options on the chain. */
14677 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14678 force_pragma))
14679 return error_mark_node;
14680
14681 /* If some option was set (even if it has not changed), rerun
14682 s390_option_override_internal, and then save the options away. */
14683 if (new_opts_set.x_target_flags
14684 || new_opts_set.x_s390_arch
14685 || new_opts_set.x_s390_tune
14686 || new_opts_set.x_s390_stack_guard
14687 || new_opts_set.x_s390_stack_size
14688 || new_opts_set.x_s390_branch_cost
14689 || new_opts_set.x_s390_warn_framesize
14690 || new_opts_set.x_s390_warn_dynamicstack_p)
14691 {
14692 const unsigned char *src = (const unsigned char *)opts_set;
14693 unsigned char *dest = (unsigned char *)&new_opts_set;
14694 unsigned int i;
14695
14696 /* Merge the original option flags into the new ones. */
14697 for (i = 0; i < sizeof(*opts_set); i++)
14698 dest[i] |= src[i];
14699
14700 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
14701 s390_option_override_internal (false, opts, &new_opts_set);
14702 /* Save the current options unless we are validating options for
14703 #pragma. */
14704 t = build_target_option_node (opts);
14705 }
14706 return t;
14707 }
14708
14709 /* Hook to validate attribute((target("string"))). */
14710
14711 static bool
14712 s390_valid_target_attribute_p (tree fndecl,
14713 tree ARG_UNUSED (name),
14714 tree args,
14715 int ARG_UNUSED (flags))
14716 {
14717 struct gcc_options func_options;
14718 tree new_target, new_optimize;
14719 bool ret = true;
14720
14721 /* attribute((target("default"))) does nothing, beyond
14722 affecting multi-versioning. */
14723 if (TREE_VALUE (args)
14724 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
14725 && TREE_CHAIN (args) == NULL_TREE
14726 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
14727 return true;
14728
14729 tree old_optimize = build_optimization_node (&global_options);
14730
14731 /* Get the optimization options of the current function. */
14732 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
14733
14734 if (!func_optimize)
14735 func_optimize = old_optimize;
14736
14737 /* Init func_options. */
14738 memset (&func_options, 0, sizeof (func_options));
14739 init_options_struct (&func_options, NULL);
14740 lang_hooks.init_options_struct (&func_options);
14741
14742 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
14743
14744 /* Initialize func_options to the default before its target options can
14745 be set. */
14746 cl_target_option_restore (&func_options,
14747 TREE_TARGET_OPTION (target_option_default_node));
14748
14749 new_target = s390_valid_target_attribute_tree (args, &func_options,
14750 &global_options_set,
14751 (args ==
14752 current_target_pragma));
14753 new_optimize = build_optimization_node (&func_options);
14754 if (new_target == error_mark_node)
14755 ret = false;
14756 else if (fndecl && new_target)
14757 {
14758 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
14759 if (old_optimize != new_optimize)
14760 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
14761 }
14762 return ret;
14763 }
14764
14765 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
14766 cache. */
14767
14768 void
14769 s390_activate_target_options (tree new_tree)
14770 {
14771 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
14772 if (TREE_TARGET_GLOBALS (new_tree))
14773 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
14774 else if (new_tree == target_option_default_node)
14775 restore_target_globals (&default_target_globals);
14776 else
14777 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
14778 s390_previous_fndecl = NULL_TREE;
14779 }
14780
14781 /* Establish appropriate back-end context for processing the function
14782 FNDECL. The argument might be NULL to indicate processing at top
14783 level, outside of any function scope. */
14784 static void
14785 s390_set_current_function (tree fndecl)
14786 {
14787 /* Only change the context if the function changes. This hook is called
14788 several times in the course of compiling a function, and we don't want to
14789 slow things down too much or call target_reinit when it isn't safe. */
14790 if (fndecl == s390_previous_fndecl)
14791 return;
14792
14793 tree old_tree;
14794 if (s390_previous_fndecl == NULL_TREE)
14795 old_tree = target_option_current_node;
14796 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
14797 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
14798 else
14799 old_tree = target_option_default_node;
14800
14801 if (fndecl == NULL_TREE)
14802 {
14803 if (old_tree != target_option_current_node)
14804 s390_activate_target_options (target_option_current_node);
14805 return;
14806 }
14807
14808 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
14809 if (new_tree == NULL_TREE)
14810 new_tree = target_option_default_node;
14811
14812 if (old_tree != new_tree)
14813 s390_activate_target_options (new_tree);
14814 s390_previous_fndecl = fndecl;
14815 }
14816 #endif
14817
14818 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
14819
14820 static bool
14821 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
14822 unsigned int align ATTRIBUTE_UNUSED,
14823 enum by_pieces_operation op ATTRIBUTE_UNUSED,
14824 bool speed_p ATTRIBUTE_UNUSED)
14825 {
14826 return (size == 1 || size == 2
14827 || size == 4 || (TARGET_ZARCH && size == 8));
14828 }
14829
14830 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
14831
14832 static void
14833 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
14834 {
14835 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
14836 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
14837 tree call_efpc = build_call_expr (efpc, 0);
14838 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
14839
14840 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
14841 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
14842 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
14843 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
14844 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
14845 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
14846
14847 /* Generates the equivalent of feholdexcept (&fenv_var)
14848
14849 fenv_var = __builtin_s390_efpc ();
14850 __builtin_s390_sfpc (fenv_var & mask) */
14851 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
14852 tree new_fpc =
14853 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
14854 build_int_cst (unsigned_type_node,
14855 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
14856 FPC_EXCEPTION_MASK)));
14857 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
14858 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
14859
14860 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
14861
14862 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
14863 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
14864 build_int_cst (unsigned_type_node,
14865 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
14866 *clear = build_call_expr (sfpc, 1, new_fpc);
14867
14868 /* Generates the equivalent of feupdateenv (fenv_var)
14869
14870 old_fpc = __builtin_s390_efpc ();
14871 __builtin_s390_sfpc (fenv_var);
14872 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
14873
14874 old_fpc = create_tmp_var_raw (unsigned_type_node);
14875 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
14876 old_fpc, call_efpc);
14877
14878 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
14879
14880 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
14881 build_int_cst (unsigned_type_node,
14882 FPC_FLAGS_MASK));
14883 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
14884 build_int_cst (unsigned_type_node,
14885 FPC_FLAGS_SHIFT));
14886 tree atomic_feraiseexcept
14887 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
14888 raise_old_except = build_call_expr (atomic_feraiseexcept,
14889 1, raise_old_except);
14890
14891 *update = build2 (COMPOUND_EXPR, void_type_node,
14892 build2 (COMPOUND_EXPR, void_type_node,
14893 store_old_fpc, set_new_fpc),
14894 raise_old_except);
14895
14896 #undef FPC_EXCEPTION_MASK
14897 #undef FPC_FLAGS_MASK
14898 #undef FPC_DXC_MASK
14899 #undef FPC_EXCEPTION_MASK_SHIFT
14900 #undef FPC_FLAGS_SHIFT
14901 #undef FPC_DXC_SHIFT
14902 }
14903
14904 /* Return the vector mode to be used for inner mode MODE when doing
14905 vectorization. */
14906 static machine_mode
14907 s390_preferred_simd_mode (machine_mode mode)
14908 {
14909 if (TARGET_VX)
14910 switch (mode)
14911 {
14912 case DFmode:
14913 return V2DFmode;
14914 case DImode:
14915 return V2DImode;
14916 case SImode:
14917 return V4SImode;
14918 case HImode:
14919 return V8HImode;
14920 case QImode:
14921 return V16QImode;
14922 default:;
14923 }
14924 return word_mode;
14925 }
14926
14927 /* Our hardware does not require vectors to be strictly aligned. */
14928 static bool
14929 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
14930 const_tree type ATTRIBUTE_UNUSED,
14931 int misalignment ATTRIBUTE_UNUSED,
14932 bool is_packed ATTRIBUTE_UNUSED)
14933 {
14934 if (TARGET_VX)
14935 return true;
14936
14937 return default_builtin_support_vector_misalignment (mode, type, misalignment,
14938 is_packed);
14939 }
14940
14941 /* The vector ABI requires vector types to be aligned on an 8 byte
14942 boundary (our stack alignment). However, we allow this to be
14943 overriden by the user, while this definitely breaks the ABI. */
14944 static HOST_WIDE_INT
14945 s390_vector_alignment (const_tree type)
14946 {
14947 if (!TARGET_VX_ABI)
14948 return default_vector_alignment (type);
14949
14950 if (TYPE_USER_ALIGN (type))
14951 return TYPE_ALIGN (type);
14952
14953 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
14954 }
14955
14956 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14957 /* Implement TARGET_ASM_FILE_START. */
14958 static void
14959 s390_asm_file_start (void)
14960 {
14961 s390_asm_output_machine_for_arch (asm_out_file);
14962 }
14963 #endif
14964
14965 /* Implement TARGET_ASM_FILE_END. */
14966 static void
14967 s390_asm_file_end (void)
14968 {
14969 #ifdef HAVE_AS_GNU_ATTRIBUTE
14970 varpool_node *vnode;
14971 cgraph_node *cnode;
14972
14973 FOR_EACH_VARIABLE (vnode)
14974 if (TREE_PUBLIC (vnode->decl))
14975 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
14976
14977 FOR_EACH_FUNCTION (cnode)
14978 if (TREE_PUBLIC (cnode->decl))
14979 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
14980
14981
14982 if (s390_vector_abi != 0)
14983 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
14984 s390_vector_abi);
14985 #endif
14986 file_end_indicate_exec_stack ();
14987
14988 if (flag_split_stack)
14989 file_end_indicate_split_stack ();
14990 }
14991
14992 /* Return true if TYPE is a vector bool type. */
14993 static inline bool
14994 s390_vector_bool_type_p (const_tree type)
14995 {
14996 return TYPE_VECTOR_OPAQUE (type);
14997 }
14998
14999 /* Return the diagnostic message string if the binary operation OP is
15000 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15001 static const char*
15002 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15003 {
15004 bool bool1_p, bool2_p;
15005 bool plusminus_p;
15006 bool muldiv_p;
15007 bool compare_p;
15008 machine_mode mode1, mode2;
15009
15010 if (!TARGET_ZVECTOR)
15011 return NULL;
15012
15013 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15014 return NULL;
15015
15016 bool1_p = s390_vector_bool_type_p (type1);
15017 bool2_p = s390_vector_bool_type_p (type2);
15018
15019 /* Mixing signed and unsigned types is forbidden for all
15020 operators. */
15021 if (!bool1_p && !bool2_p
15022 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15023 return N_("types differ in signess");
15024
15025 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15026 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15027 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15028 || op == ROUND_DIV_EXPR);
15029 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15030 || op == EQ_EXPR || op == NE_EXPR);
15031
15032 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15033 return N_("binary operator does not support two vector bool operands");
15034
15035 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15036 return N_("binary operator does not support vector bool operand");
15037
15038 mode1 = TYPE_MODE (type1);
15039 mode2 = TYPE_MODE (type2);
15040
15041 if (bool1_p != bool2_p && plusminus_p
15042 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15043 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15044 return N_("binary operator does not support mixing vector "
15045 "bool with floating point vector operands");
15046
15047 return NULL;
15048 }
15049
15050 /* Initialize GCC target structure. */
15051
15052 #undef TARGET_ASM_ALIGNED_HI_OP
15053 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15054 #undef TARGET_ASM_ALIGNED_DI_OP
15055 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15056 #undef TARGET_ASM_INTEGER
15057 #define TARGET_ASM_INTEGER s390_assemble_integer
15058
15059 #undef TARGET_ASM_OPEN_PAREN
15060 #define TARGET_ASM_OPEN_PAREN ""
15061
15062 #undef TARGET_ASM_CLOSE_PAREN
15063 #define TARGET_ASM_CLOSE_PAREN ""
15064
15065 #undef TARGET_OPTION_OVERRIDE
15066 #define TARGET_OPTION_OVERRIDE s390_option_override
15067
15068 #undef TARGET_ENCODE_SECTION_INFO
15069 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15070
15071 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15072 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15073
15074 #ifdef HAVE_AS_TLS
15075 #undef TARGET_HAVE_TLS
15076 #define TARGET_HAVE_TLS true
15077 #endif
15078 #undef TARGET_CANNOT_FORCE_CONST_MEM
15079 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15080
15081 #undef TARGET_DELEGITIMIZE_ADDRESS
15082 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15083
15084 #undef TARGET_LEGITIMIZE_ADDRESS
15085 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15086
15087 #undef TARGET_RETURN_IN_MEMORY
15088 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15089
15090 #undef TARGET_INIT_BUILTINS
15091 #define TARGET_INIT_BUILTINS s390_init_builtins
15092 #undef TARGET_EXPAND_BUILTIN
15093 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15094 #undef TARGET_BUILTIN_DECL
15095 #define TARGET_BUILTIN_DECL s390_builtin_decl
15096
15097 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15098 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15099
15100 #undef TARGET_ASM_OUTPUT_MI_THUNK
15101 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15102 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15103 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15104
15105 #undef TARGET_SCHED_ADJUST_PRIORITY
15106 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15107 #undef TARGET_SCHED_ISSUE_RATE
15108 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15109 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15110 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15111
15112 #undef TARGET_SCHED_VARIABLE_ISSUE
15113 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15114 #undef TARGET_SCHED_REORDER
15115 #define TARGET_SCHED_REORDER s390_sched_reorder
15116 #undef TARGET_SCHED_INIT
15117 #define TARGET_SCHED_INIT s390_sched_init
15118
15119 #undef TARGET_CANNOT_COPY_INSN_P
15120 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15121 #undef TARGET_RTX_COSTS
15122 #define TARGET_RTX_COSTS s390_rtx_costs
15123 #undef TARGET_ADDRESS_COST
15124 #define TARGET_ADDRESS_COST s390_address_cost
15125 #undef TARGET_REGISTER_MOVE_COST
15126 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15127 #undef TARGET_MEMORY_MOVE_COST
15128 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15129
15130 #undef TARGET_MACHINE_DEPENDENT_REORG
15131 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15132
15133 #undef TARGET_VALID_POINTER_MODE
15134 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15135
15136 #undef TARGET_BUILD_BUILTIN_VA_LIST
15137 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15138 #undef TARGET_EXPAND_BUILTIN_VA_START
15139 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15140 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15141 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15142
15143 #undef TARGET_PROMOTE_FUNCTION_MODE
15144 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15145 #undef TARGET_PASS_BY_REFERENCE
15146 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15147
15148 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15149 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15150 #undef TARGET_FUNCTION_ARG
15151 #define TARGET_FUNCTION_ARG s390_function_arg
15152 #undef TARGET_FUNCTION_ARG_ADVANCE
15153 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15154 #undef TARGET_FUNCTION_VALUE
15155 #define TARGET_FUNCTION_VALUE s390_function_value
15156 #undef TARGET_LIBCALL_VALUE
15157 #define TARGET_LIBCALL_VALUE s390_libcall_value
15158 #undef TARGET_STRICT_ARGUMENT_NAMING
15159 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15160
15161 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15162 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15163
15164 #undef TARGET_FIXED_CONDITION_CODE_REGS
15165 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15166
15167 #undef TARGET_CC_MODES_COMPATIBLE
15168 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15169
15170 #undef TARGET_INVALID_WITHIN_DOLOOP
15171 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15172
15173 #ifdef HAVE_AS_TLS
15174 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15175 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15176 #endif
15177
15178 #undef TARGET_DWARF_FRAME_REG_MODE
15179 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15180
15181 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15182 #undef TARGET_MANGLE_TYPE
15183 #define TARGET_MANGLE_TYPE s390_mangle_type
15184 #endif
15185
15186 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15187 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15188
15189 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15190 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15191
15192 #undef TARGET_PREFERRED_RELOAD_CLASS
15193 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15194
15195 #undef TARGET_SECONDARY_RELOAD
15196 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15197
15198 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15199 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15200
15201 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15202 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15203
15204 #undef TARGET_LEGITIMATE_ADDRESS_P
15205 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15206
15207 #undef TARGET_LEGITIMATE_CONSTANT_P
15208 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15209
15210 #undef TARGET_LRA_P
15211 #define TARGET_LRA_P s390_lra_p
15212
15213 #undef TARGET_CAN_ELIMINATE
15214 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15215
15216 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15217 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15218
15219 #undef TARGET_LOOP_UNROLL_ADJUST
15220 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15221
15222 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15223 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15224 #undef TARGET_TRAMPOLINE_INIT
15225 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15226
15227 #undef TARGET_UNWIND_WORD_MODE
15228 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15229
15230 #undef TARGET_CANONICALIZE_COMPARISON
15231 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15232
15233 #undef TARGET_HARD_REGNO_SCRATCH_OK
15234 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15235
15236 #undef TARGET_ATTRIBUTE_TABLE
15237 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15238
15239 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15240 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15241
15242 #undef TARGET_SET_UP_BY_PROLOGUE
15243 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15244
15245 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15246 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15247
15248 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15249 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15250 s390_use_by_pieces_infrastructure_p
15251
15252 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15253 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15254
15255 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15256 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15257
15258 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15259 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15260
15261 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15262 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15263
15264 #undef TARGET_VECTOR_ALIGNMENT
15265 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15266
15267 #undef TARGET_INVALID_BINARY_OP
15268 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15269
15270 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15271 #undef TARGET_ASM_FILE_START
15272 #define TARGET_ASM_FILE_START s390_asm_file_start
15273 #endif
15274
15275 #undef TARGET_ASM_FILE_END
15276 #define TARGET_ASM_FILE_END s390_asm_file_end
15277
15278 #if S390_USE_TARGET_ATTRIBUTE
15279 #undef TARGET_SET_CURRENT_FUNCTION
15280 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15281
15282 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15283 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15284 #endif
15285
15286 #undef TARGET_OPTION_RESTORE
15287 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15288
15289 struct gcc_target targetm = TARGET_INITIALIZER;
15290
15291 #include "gt-s390.h"
This page took 0.712877 seconds and 6 git commands to generate.