]> gcc.gnu.org Git - gcc.git/blob - gcc/config/rs6000/rs6000.c
50d63911fa50ee8379f375f520cf2f5afa892eed
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2015 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "hash-set.h"
35 #include "machmode.h"
36 #include "vec.h"
37 #include "double-int.h"
38 #include "input.h"
39 #include "alias.h"
40 #include "symtab.h"
41 #include "wide-int.h"
42 #include "inchash.h"
43 #include "tree.h"
44 #include "fold-const.h"
45 #include "stringpool.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "hashtab.h"
51 #include "function.h"
52 #include "statistics.h"
53 #include "real.h"
54 #include "fixed-value.h"
55 #include "expmed.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "emit-rtl.h"
59 #include "stmt.h"
60 #include "expr.h"
61 #include "insn-codes.h"
62 #include "optabs.h"
63 #include "except.h"
64 #include "output.h"
65 #include "dbxout.h"
66 #include "predict.h"
67 #include "dominance.h"
68 #include "cfg.h"
69 #include "cfgrtl.h"
70 #include "cfganal.h"
71 #include "lcm.h"
72 #include "cfgbuild.h"
73 #include "cfgcleanup.h"
74 #include "basic-block.h"
75 #include "diagnostic-core.h"
76 #include "toplev.h"
77 #include "ggc.h"
78 #include "tm_p.h"
79 #include "target.h"
80 #include "target-def.h"
81 #include "common/common-target.h"
82 #include "langhooks.h"
83 #include "reload.h"
84 #include "cfgloop.h"
85 #include "sched-int.h"
86 #include "hash-table.h"
87 #include "tree-ssa-alias.h"
88 #include "internal-fn.h"
89 #include "gimple-fold.h"
90 #include "tree-eh.h"
91 #include "gimple-expr.h"
92 #include "is-a.h"
93 #include "gimple.h"
94 #include "gimplify.h"
95 #include "gimple-iterator.h"
96 #include "gimple-walk.h"
97 #include "intl.h"
98 #include "params.h"
99 #include "tm-constrs.h"
100 #include "ira.h"
101 #include "opts.h"
102 #include "tree-vectorizer.h"
103 #include "dumpfile.h"
104 #include "hash-map.h"
105 #include "plugin-api.h"
106 #include "ipa-ref.h"
107 #include "cgraph.h"
108 #include "target-globals.h"
109 #include "builtins.h"
110 #include "context.h"
111 #include "tree-pass.h"
112 #if TARGET_XCOFF
113 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
114 #endif
115 #if TARGET_MACHO
116 #include "gstab.h" /* for N_SLINE */
117 #endif
118
119 #ifndef TARGET_NO_PROTOTYPE
120 #define TARGET_NO_PROTOTYPE 0
121 #endif
122
123 #define min(A,B) ((A) < (B) ? (A) : (B))
124 #define max(A,B) ((A) > (B) ? (A) : (B))
125
126 /* Structure used to define the rs6000 stack */
127 typedef struct rs6000_stack {
128 int reload_completed; /* stack info won't change from here on */
129 int first_gp_reg_save; /* first callee saved GP register used */
130 int first_fp_reg_save; /* first callee saved FP register used */
131 int first_altivec_reg_save; /* first callee saved AltiVec register used */
132 int lr_save_p; /* true if the link reg needs to be saved */
133 int cr_save_p; /* true if the CR reg needs to be saved */
134 unsigned int vrsave_mask; /* mask of vec registers to save */
135 int push_p; /* true if we need to allocate stack space */
136 int calls_p; /* true if the function makes any calls */
137 int world_save_p; /* true if we're saving *everything*:
138 r13-r31, cr, f14-f31, vrsave, v20-v31 */
139 enum rs6000_abi abi; /* which ABI to use */
140 int gp_save_offset; /* offset to save GP regs from initial SP */
141 int fp_save_offset; /* offset to save FP regs from initial SP */
142 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
143 int lr_save_offset; /* offset to save LR from initial SP */
144 int cr_save_offset; /* offset to save CR from initial SP */
145 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
146 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
147 int varargs_save_offset; /* offset to save the varargs registers */
148 int ehrd_offset; /* offset to EH return data */
149 int ehcr_offset; /* offset to EH CR field data */
150 int reg_size; /* register size (4 or 8) */
151 HOST_WIDE_INT vars_size; /* variable save area size */
152 int parm_size; /* outgoing parameter size */
153 int save_size; /* save area size */
154 int fixed_size; /* fixed size of stack frame */
155 int gp_size; /* size of saved GP registers */
156 int fp_size; /* size of saved FP registers */
157 int altivec_size; /* size of saved AltiVec registers */
158 int cr_size; /* size to hold CR if not in save_size */
159 int vrsave_size; /* size to hold VRSAVE if not in save_size */
160 int altivec_padding_size; /* size of altivec alignment padding if
161 not in save_size */
162 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
163 int spe_padding_size;
164 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
165 int spe_64bit_regs_used;
166 int savres_strategy;
167 } rs6000_stack_t;
168
169 /* A C structure for machine-specific, per-function data.
170 This is added to the cfun structure. */
171 typedef struct GTY(()) machine_function
172 {
173 /* Whether the instruction chain has been scanned already. */
174 int insn_chain_scanned_p;
175 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
176 int ra_needs_full_frame;
177 /* Flags if __builtin_return_address (0) was used. */
178 int ra_need_lr;
179 /* Cache lr_save_p after expansion of builtin_eh_return. */
180 int lr_save_state;
181 /* Whether we need to save the TOC to the reserved stack location in the
182 function prologue. */
183 bool save_toc_in_prologue;
184 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
185 varargs save area. */
186 HOST_WIDE_INT varargs_save_offset;
187 /* Temporary stack slot to use for SDmode copies. This slot is
188 64-bits wide and is allocated early enough so that the offset
189 does not overflow the 16-bit load/store offset field. */
190 rtx sdmode_stack_slot;
191 /* Flag if r2 setup is needed with ELFv2 ABI. */
192 bool r2_setup_needed;
193 } machine_function;
194
195 /* Support targetm.vectorize.builtin_mask_for_load. */
196 static GTY(()) tree altivec_builtin_mask_for_load;
197
198 /* Set to nonzero once AIX common-mode calls have been defined. */
199 static GTY(()) int common_mode_defined;
200
201 /* Label number of label created for -mrelocatable, to call to so we can
202 get the address of the GOT section */
203 static int rs6000_pic_labelno;
204
205 #ifdef USING_ELFOS_H
206 /* Counter for labels which are to be placed in .fixup. */
207 int fixuplabelno = 0;
208 #endif
209
210 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
211 int dot_symbols;
212
213 /* Specify the machine mode that pointers have. After generation of rtl, the
214 compiler makes no further distinction between pointers and any other objects
215 of this machine mode. The type is unsigned since not all things that
216 include rs6000.h also include machmode.h. */
217 unsigned rs6000_pmode;
218
219 /* Width in bits of a pointer. */
220 unsigned rs6000_pointer_size;
221
222 #ifdef HAVE_AS_GNU_ATTRIBUTE
223 /* Flag whether floating point values have been passed/returned. */
224 static bool rs6000_passes_float;
225 /* Flag whether vector values have been passed/returned. */
226 static bool rs6000_passes_vector;
227 /* Flag whether small (<= 8 byte) structures have been returned. */
228 static bool rs6000_returns_struct;
229 #endif
230
231 /* Value is TRUE if register/mode pair is acceptable. */
232 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
233
234 /* Maximum number of registers needed for a given register class and mode. */
235 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
236
237 /* How many registers are needed for a given register and mode. */
238 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
239
240 /* Map register number to register class. */
241 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
242
243 static int dbg_cost_ctrl;
244
245 /* Built in types. */
246 tree rs6000_builtin_types[RS6000_BTI_MAX];
247 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
248
249 /* Flag to say the TOC is initialized */
250 int toc_initialized;
251 char toc_label_name[10];
252
253 /* Cached value of rs6000_variable_issue. This is cached in
254 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
255 static short cached_can_issue_more;
256
257 static GTY(()) section *read_only_data_section;
258 static GTY(()) section *private_data_section;
259 static GTY(()) section *tls_data_section;
260 static GTY(()) section *tls_private_data_section;
261 static GTY(()) section *read_only_private_data_section;
262 static GTY(()) section *sdata2_section;
263 static GTY(()) section *toc_section;
264
265 struct builtin_description
266 {
267 const HOST_WIDE_INT mask;
268 const enum insn_code icode;
269 const char *const name;
270 const enum rs6000_builtins code;
271 };
272
273 /* Describe the vector unit used for modes. */
274 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
275 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
276
277 /* Register classes for various constraints that are based on the target
278 switches. */
279 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
280
281 /* Describe the alignment of a vector. */
282 int rs6000_vector_align[NUM_MACHINE_MODES];
283
284 /* Map selected modes to types for builtins. */
285 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
286
287 /* What modes to automatically generate reciprocal divide estimate (fre) and
288 reciprocal sqrt (frsqrte) for. */
289 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
290
291 /* Masks to determine which reciprocal esitmate instructions to generate
292 automatically. */
293 enum rs6000_recip_mask {
294 RECIP_SF_DIV = 0x001, /* Use divide estimate */
295 RECIP_DF_DIV = 0x002,
296 RECIP_V4SF_DIV = 0x004,
297 RECIP_V2DF_DIV = 0x008,
298
299 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
300 RECIP_DF_RSQRT = 0x020,
301 RECIP_V4SF_RSQRT = 0x040,
302 RECIP_V2DF_RSQRT = 0x080,
303
304 /* Various combination of flags for -mrecip=xxx. */
305 RECIP_NONE = 0,
306 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
307 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
308 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
309
310 RECIP_HIGH_PRECISION = RECIP_ALL,
311
312 /* On low precision machines like the power5, don't enable double precision
313 reciprocal square root estimate, since it isn't accurate enough. */
314 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
315 };
316
317 /* -mrecip options. */
318 static struct
319 {
320 const char *string; /* option name */
321 unsigned int mask; /* mask bits to set */
322 } recip_options[] = {
323 { "all", RECIP_ALL },
324 { "none", RECIP_NONE },
325 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
326 | RECIP_V2DF_DIV) },
327 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
328 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
329 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
330 | RECIP_V2DF_RSQRT) },
331 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
332 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
333 };
334
335 /* Pointer to function (in rs6000-c.c) that can define or undefine target
336 macros that have changed. Languages that don't support the preprocessor
337 don't link in rs6000-c.c, so we can't call it directly. */
338 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
339
340 /* Simplfy register classes into simpler classifications. We assume
341 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
342 check for standard register classes (gpr/floating/altivec/vsx) and
343 floating/vector classes (float/altivec/vsx). */
344
345 enum rs6000_reg_type {
346 NO_REG_TYPE,
347 PSEUDO_REG_TYPE,
348 GPR_REG_TYPE,
349 VSX_REG_TYPE,
350 ALTIVEC_REG_TYPE,
351 FPR_REG_TYPE,
352 SPR_REG_TYPE,
353 CR_REG_TYPE,
354 SPE_ACC_TYPE,
355 SPEFSCR_REG_TYPE
356 };
357
358 /* Map register class to register type. */
359 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
360
361 /* First/last register type for the 'normal' register types (i.e. general
362 purpose, floating point, altivec, and VSX registers). */
363 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
364
365 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
366
367
368 /* Register classes we care about in secondary reload or go if legitimate
369 address. We only need to worry about GPR, FPR, and Altivec registers here,
370 along an ANY field that is the OR of the 3 register classes. */
371
372 enum rs6000_reload_reg_type {
373 RELOAD_REG_GPR, /* General purpose registers. */
374 RELOAD_REG_FPR, /* Traditional floating point regs. */
375 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
376 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
377 N_RELOAD_REG
378 };
379
380 /* For setting up register classes, loop through the 3 register classes mapping
381 into real registers, and skip the ANY class, which is just an OR of the
382 bits. */
383 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
384 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
385
386 /* Map reload register type to a register in the register class. */
387 struct reload_reg_map_type {
388 const char *name; /* Register class name. */
389 int reg; /* Register in the register class. */
390 };
391
392 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
393 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
394 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
395 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
396 { "Any", -1 }, /* RELOAD_REG_ANY. */
397 };
398
399 /* Mask bits for each register class, indexed per mode. Historically the
400 compiler has been more restrictive which types can do PRE_MODIFY instead of
401 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
402 typedef unsigned char addr_mask_type;
403
404 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
405 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
406 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
407 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
408 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
409 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
410 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
411
412 /* Register type masks based on the type, of valid addressing modes. */
413 struct rs6000_reg_addr {
414 enum insn_code reload_load; /* INSN to reload for loading. */
415 enum insn_code reload_store; /* INSN to reload for storing. */
416 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
417 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
418 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
419 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
420 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
421 };
422
423 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
424
425 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
426 static inline bool
427 mode_supports_pre_incdec_p (machine_mode mode)
428 {
429 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
430 != 0);
431 }
432
433 /* Helper function to say whether a mode supports PRE_MODIFY. */
434 static inline bool
435 mode_supports_pre_modify_p (machine_mode mode)
436 {
437 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
438 != 0);
439 }
440
441 \f
442 /* Target cpu costs. */
443
444 struct processor_costs {
445 const int mulsi; /* cost of SImode multiplication. */
446 const int mulsi_const; /* cost of SImode multiplication by constant. */
447 const int mulsi_const9; /* cost of SImode mult by short constant. */
448 const int muldi; /* cost of DImode multiplication. */
449 const int divsi; /* cost of SImode division. */
450 const int divdi; /* cost of DImode division. */
451 const int fp; /* cost of simple SFmode and DFmode insns. */
452 const int dmul; /* cost of DFmode multiplication (and fmadd). */
453 const int sdiv; /* cost of SFmode division (fdivs). */
454 const int ddiv; /* cost of DFmode division (fdiv). */
455 const int cache_line_size; /* cache line size in bytes. */
456 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
457 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
458 const int simultaneous_prefetches; /* number of parallel prefetch
459 operations. */
460 const int sfdf_convert; /* cost of SF->DF conversion. */
461 };
462
463 const struct processor_costs *rs6000_cost;
464
465 /* Processor costs (relative to an add) */
466
467 /* Instruction size costs on 32bit processors. */
468 static const
469 struct processor_costs size32_cost = {
470 COSTS_N_INSNS (1), /* mulsi */
471 COSTS_N_INSNS (1), /* mulsi_const */
472 COSTS_N_INSNS (1), /* mulsi_const9 */
473 COSTS_N_INSNS (1), /* muldi */
474 COSTS_N_INSNS (1), /* divsi */
475 COSTS_N_INSNS (1), /* divdi */
476 COSTS_N_INSNS (1), /* fp */
477 COSTS_N_INSNS (1), /* dmul */
478 COSTS_N_INSNS (1), /* sdiv */
479 COSTS_N_INSNS (1), /* ddiv */
480 32, /* cache line size */
481 0, /* l1 cache */
482 0, /* l2 cache */
483 0, /* streams */
484 0, /* SF->DF convert */
485 };
486
487 /* Instruction size costs on 64bit processors. */
488 static const
489 struct processor_costs size64_cost = {
490 COSTS_N_INSNS (1), /* mulsi */
491 COSTS_N_INSNS (1), /* mulsi_const */
492 COSTS_N_INSNS (1), /* mulsi_const9 */
493 COSTS_N_INSNS (1), /* muldi */
494 COSTS_N_INSNS (1), /* divsi */
495 COSTS_N_INSNS (1), /* divdi */
496 COSTS_N_INSNS (1), /* fp */
497 COSTS_N_INSNS (1), /* dmul */
498 COSTS_N_INSNS (1), /* sdiv */
499 COSTS_N_INSNS (1), /* ddiv */
500 128, /* cache line size */
501 0, /* l1 cache */
502 0, /* l2 cache */
503 0, /* streams */
504 0, /* SF->DF convert */
505 };
506
507 /* Instruction costs on RS64A processors. */
508 static const
509 struct processor_costs rs64a_cost = {
510 COSTS_N_INSNS (20), /* mulsi */
511 COSTS_N_INSNS (12), /* mulsi_const */
512 COSTS_N_INSNS (8), /* mulsi_const9 */
513 COSTS_N_INSNS (34), /* muldi */
514 COSTS_N_INSNS (65), /* divsi */
515 COSTS_N_INSNS (67), /* divdi */
516 COSTS_N_INSNS (4), /* fp */
517 COSTS_N_INSNS (4), /* dmul */
518 COSTS_N_INSNS (31), /* sdiv */
519 COSTS_N_INSNS (31), /* ddiv */
520 128, /* cache line size */
521 128, /* l1 cache */
522 2048, /* l2 cache */
523 1, /* streams */
524 0, /* SF->DF convert */
525 };
526
527 /* Instruction costs on MPCCORE processors. */
528 static const
529 struct processor_costs mpccore_cost = {
530 COSTS_N_INSNS (2), /* mulsi */
531 COSTS_N_INSNS (2), /* mulsi_const */
532 COSTS_N_INSNS (2), /* mulsi_const9 */
533 COSTS_N_INSNS (2), /* muldi */
534 COSTS_N_INSNS (6), /* divsi */
535 COSTS_N_INSNS (6), /* divdi */
536 COSTS_N_INSNS (4), /* fp */
537 COSTS_N_INSNS (5), /* dmul */
538 COSTS_N_INSNS (10), /* sdiv */
539 COSTS_N_INSNS (17), /* ddiv */
540 32, /* cache line size */
541 4, /* l1 cache */
542 16, /* l2 cache */
543 1, /* streams */
544 0, /* SF->DF convert */
545 };
546
547 /* Instruction costs on PPC403 processors. */
548 static const
549 struct processor_costs ppc403_cost = {
550 COSTS_N_INSNS (4), /* mulsi */
551 COSTS_N_INSNS (4), /* mulsi_const */
552 COSTS_N_INSNS (4), /* mulsi_const9 */
553 COSTS_N_INSNS (4), /* muldi */
554 COSTS_N_INSNS (33), /* divsi */
555 COSTS_N_INSNS (33), /* divdi */
556 COSTS_N_INSNS (11), /* fp */
557 COSTS_N_INSNS (11), /* dmul */
558 COSTS_N_INSNS (11), /* sdiv */
559 COSTS_N_INSNS (11), /* ddiv */
560 32, /* cache line size */
561 4, /* l1 cache */
562 16, /* l2 cache */
563 1, /* streams */
564 0, /* SF->DF convert */
565 };
566
567 /* Instruction costs on PPC405 processors. */
568 static const
569 struct processor_costs ppc405_cost = {
570 COSTS_N_INSNS (5), /* mulsi */
571 COSTS_N_INSNS (4), /* mulsi_const */
572 COSTS_N_INSNS (3), /* mulsi_const9 */
573 COSTS_N_INSNS (5), /* muldi */
574 COSTS_N_INSNS (35), /* divsi */
575 COSTS_N_INSNS (35), /* divdi */
576 COSTS_N_INSNS (11), /* fp */
577 COSTS_N_INSNS (11), /* dmul */
578 COSTS_N_INSNS (11), /* sdiv */
579 COSTS_N_INSNS (11), /* ddiv */
580 32, /* cache line size */
581 16, /* l1 cache */
582 128, /* l2 cache */
583 1, /* streams */
584 0, /* SF->DF convert */
585 };
586
587 /* Instruction costs on PPC440 processors. */
588 static const
589 struct processor_costs ppc440_cost = {
590 COSTS_N_INSNS (3), /* mulsi */
591 COSTS_N_INSNS (2), /* mulsi_const */
592 COSTS_N_INSNS (2), /* mulsi_const9 */
593 COSTS_N_INSNS (3), /* muldi */
594 COSTS_N_INSNS (34), /* divsi */
595 COSTS_N_INSNS (34), /* divdi */
596 COSTS_N_INSNS (5), /* fp */
597 COSTS_N_INSNS (5), /* dmul */
598 COSTS_N_INSNS (19), /* sdiv */
599 COSTS_N_INSNS (33), /* ddiv */
600 32, /* cache line size */
601 32, /* l1 cache */
602 256, /* l2 cache */
603 1, /* streams */
604 0, /* SF->DF convert */
605 };
606
607 /* Instruction costs on PPC476 processors. */
608 static const
609 struct processor_costs ppc476_cost = {
610 COSTS_N_INSNS (4), /* mulsi */
611 COSTS_N_INSNS (4), /* mulsi_const */
612 COSTS_N_INSNS (4), /* mulsi_const9 */
613 COSTS_N_INSNS (4), /* muldi */
614 COSTS_N_INSNS (11), /* divsi */
615 COSTS_N_INSNS (11), /* divdi */
616 COSTS_N_INSNS (6), /* fp */
617 COSTS_N_INSNS (6), /* dmul */
618 COSTS_N_INSNS (19), /* sdiv */
619 COSTS_N_INSNS (33), /* ddiv */
620 32, /* l1 cache line size */
621 32, /* l1 cache */
622 512, /* l2 cache */
623 1, /* streams */
624 0, /* SF->DF convert */
625 };
626
627 /* Instruction costs on PPC601 processors. */
628 static const
629 struct processor_costs ppc601_cost = {
630 COSTS_N_INSNS (5), /* mulsi */
631 COSTS_N_INSNS (5), /* mulsi_const */
632 COSTS_N_INSNS (5), /* mulsi_const9 */
633 COSTS_N_INSNS (5), /* muldi */
634 COSTS_N_INSNS (36), /* divsi */
635 COSTS_N_INSNS (36), /* divdi */
636 COSTS_N_INSNS (4), /* fp */
637 COSTS_N_INSNS (5), /* dmul */
638 COSTS_N_INSNS (17), /* sdiv */
639 COSTS_N_INSNS (31), /* ddiv */
640 32, /* cache line size */
641 32, /* l1 cache */
642 256, /* l2 cache */
643 1, /* streams */
644 0, /* SF->DF convert */
645 };
646
647 /* Instruction costs on PPC603 processors. */
648 static const
649 struct processor_costs ppc603_cost = {
650 COSTS_N_INSNS (5), /* mulsi */
651 COSTS_N_INSNS (3), /* mulsi_const */
652 COSTS_N_INSNS (2), /* mulsi_const9 */
653 COSTS_N_INSNS (5), /* muldi */
654 COSTS_N_INSNS (37), /* divsi */
655 COSTS_N_INSNS (37), /* divdi */
656 COSTS_N_INSNS (3), /* fp */
657 COSTS_N_INSNS (4), /* dmul */
658 COSTS_N_INSNS (18), /* sdiv */
659 COSTS_N_INSNS (33), /* ddiv */
660 32, /* cache line size */
661 8, /* l1 cache */
662 64, /* l2 cache */
663 1, /* streams */
664 0, /* SF->DF convert */
665 };
666
667 /* Instruction costs on PPC604 processors. */
668 static const
669 struct processor_costs ppc604_cost = {
670 COSTS_N_INSNS (4), /* mulsi */
671 COSTS_N_INSNS (4), /* mulsi_const */
672 COSTS_N_INSNS (4), /* mulsi_const9 */
673 COSTS_N_INSNS (4), /* muldi */
674 COSTS_N_INSNS (20), /* divsi */
675 COSTS_N_INSNS (20), /* divdi */
676 COSTS_N_INSNS (3), /* fp */
677 COSTS_N_INSNS (3), /* dmul */
678 COSTS_N_INSNS (18), /* sdiv */
679 COSTS_N_INSNS (32), /* ddiv */
680 32, /* cache line size */
681 16, /* l1 cache */
682 512, /* l2 cache */
683 1, /* streams */
684 0, /* SF->DF convert */
685 };
686
687 /* Instruction costs on PPC604e processors. */
688 static const
689 struct processor_costs ppc604e_cost = {
690 COSTS_N_INSNS (2), /* mulsi */
691 COSTS_N_INSNS (2), /* mulsi_const */
692 COSTS_N_INSNS (2), /* mulsi_const9 */
693 COSTS_N_INSNS (2), /* muldi */
694 COSTS_N_INSNS (20), /* divsi */
695 COSTS_N_INSNS (20), /* divdi */
696 COSTS_N_INSNS (3), /* fp */
697 COSTS_N_INSNS (3), /* dmul */
698 COSTS_N_INSNS (18), /* sdiv */
699 COSTS_N_INSNS (32), /* ddiv */
700 32, /* cache line size */
701 32, /* l1 cache */
702 1024, /* l2 cache */
703 1, /* streams */
704 0, /* SF->DF convert */
705 };
706
707 /* Instruction costs on PPC620 processors. */
708 static const
709 struct processor_costs ppc620_cost = {
710 COSTS_N_INSNS (5), /* mulsi */
711 COSTS_N_INSNS (4), /* mulsi_const */
712 COSTS_N_INSNS (3), /* mulsi_const9 */
713 COSTS_N_INSNS (7), /* muldi */
714 COSTS_N_INSNS (21), /* divsi */
715 COSTS_N_INSNS (37), /* divdi */
716 COSTS_N_INSNS (3), /* fp */
717 COSTS_N_INSNS (3), /* dmul */
718 COSTS_N_INSNS (18), /* sdiv */
719 COSTS_N_INSNS (32), /* ddiv */
720 128, /* cache line size */
721 32, /* l1 cache */
722 1024, /* l2 cache */
723 1, /* streams */
724 0, /* SF->DF convert */
725 };
726
727 /* Instruction costs on PPC630 processors. */
728 static const
729 struct processor_costs ppc630_cost = {
730 COSTS_N_INSNS (5), /* mulsi */
731 COSTS_N_INSNS (4), /* mulsi_const */
732 COSTS_N_INSNS (3), /* mulsi_const9 */
733 COSTS_N_INSNS (7), /* muldi */
734 COSTS_N_INSNS (21), /* divsi */
735 COSTS_N_INSNS (37), /* divdi */
736 COSTS_N_INSNS (3), /* fp */
737 COSTS_N_INSNS (3), /* dmul */
738 COSTS_N_INSNS (17), /* sdiv */
739 COSTS_N_INSNS (21), /* ddiv */
740 128, /* cache line size */
741 64, /* l1 cache */
742 1024, /* l2 cache */
743 1, /* streams */
744 0, /* SF->DF convert */
745 };
746
747 /* Instruction costs on Cell processor. */
748 /* COSTS_N_INSNS (1) ~ one add. */
749 static const
750 struct processor_costs ppccell_cost = {
751 COSTS_N_INSNS (9/2)+2, /* mulsi */
752 COSTS_N_INSNS (6/2), /* mulsi_const */
753 COSTS_N_INSNS (6/2), /* mulsi_const9 */
754 COSTS_N_INSNS (15/2)+2, /* muldi */
755 COSTS_N_INSNS (38/2), /* divsi */
756 COSTS_N_INSNS (70/2), /* divdi */
757 COSTS_N_INSNS (10/2), /* fp */
758 COSTS_N_INSNS (10/2), /* dmul */
759 COSTS_N_INSNS (74/2), /* sdiv */
760 COSTS_N_INSNS (74/2), /* ddiv */
761 128, /* cache line size */
762 32, /* l1 cache */
763 512, /* l2 cache */
764 6, /* streams */
765 0, /* SF->DF convert */
766 };
767
768 /* Instruction costs on PPC750 and PPC7400 processors. */
769 static const
770 struct processor_costs ppc750_cost = {
771 COSTS_N_INSNS (5), /* mulsi */
772 COSTS_N_INSNS (3), /* mulsi_const */
773 COSTS_N_INSNS (2), /* mulsi_const9 */
774 COSTS_N_INSNS (5), /* muldi */
775 COSTS_N_INSNS (17), /* divsi */
776 COSTS_N_INSNS (17), /* divdi */
777 COSTS_N_INSNS (3), /* fp */
778 COSTS_N_INSNS (3), /* dmul */
779 COSTS_N_INSNS (17), /* sdiv */
780 COSTS_N_INSNS (31), /* ddiv */
781 32, /* cache line size */
782 32, /* l1 cache */
783 512, /* l2 cache */
784 1, /* streams */
785 0, /* SF->DF convert */
786 };
787
788 /* Instruction costs on PPC7450 processors. */
789 static const
790 struct processor_costs ppc7450_cost = {
791 COSTS_N_INSNS (4), /* mulsi */
792 COSTS_N_INSNS (3), /* mulsi_const */
793 COSTS_N_INSNS (3), /* mulsi_const9 */
794 COSTS_N_INSNS (4), /* muldi */
795 COSTS_N_INSNS (23), /* divsi */
796 COSTS_N_INSNS (23), /* divdi */
797 COSTS_N_INSNS (5), /* fp */
798 COSTS_N_INSNS (5), /* dmul */
799 COSTS_N_INSNS (21), /* sdiv */
800 COSTS_N_INSNS (35), /* ddiv */
801 32, /* cache line size */
802 32, /* l1 cache */
803 1024, /* l2 cache */
804 1, /* streams */
805 0, /* SF->DF convert */
806 };
807
808 /* Instruction costs on PPC8540 processors. */
809 static const
810 struct processor_costs ppc8540_cost = {
811 COSTS_N_INSNS (4), /* mulsi */
812 COSTS_N_INSNS (4), /* mulsi_const */
813 COSTS_N_INSNS (4), /* mulsi_const9 */
814 COSTS_N_INSNS (4), /* muldi */
815 COSTS_N_INSNS (19), /* divsi */
816 COSTS_N_INSNS (19), /* divdi */
817 COSTS_N_INSNS (4), /* fp */
818 COSTS_N_INSNS (4), /* dmul */
819 COSTS_N_INSNS (29), /* sdiv */
820 COSTS_N_INSNS (29), /* ddiv */
821 32, /* cache line size */
822 32, /* l1 cache */
823 256, /* l2 cache */
824 1, /* prefetch streams /*/
825 0, /* SF->DF convert */
826 };
827
828 /* Instruction costs on E300C2 and E300C3 cores. */
829 static const
830 struct processor_costs ppce300c2c3_cost = {
831 COSTS_N_INSNS (4), /* mulsi */
832 COSTS_N_INSNS (4), /* mulsi_const */
833 COSTS_N_INSNS (4), /* mulsi_const9 */
834 COSTS_N_INSNS (4), /* muldi */
835 COSTS_N_INSNS (19), /* divsi */
836 COSTS_N_INSNS (19), /* divdi */
837 COSTS_N_INSNS (3), /* fp */
838 COSTS_N_INSNS (4), /* dmul */
839 COSTS_N_INSNS (18), /* sdiv */
840 COSTS_N_INSNS (33), /* ddiv */
841 32,
842 16, /* l1 cache */
843 16, /* l2 cache */
844 1, /* prefetch streams /*/
845 0, /* SF->DF convert */
846 };
847
848 /* Instruction costs on PPCE500MC processors. */
849 static const
850 struct processor_costs ppce500mc_cost = {
851 COSTS_N_INSNS (4), /* mulsi */
852 COSTS_N_INSNS (4), /* mulsi_const */
853 COSTS_N_INSNS (4), /* mulsi_const9 */
854 COSTS_N_INSNS (4), /* muldi */
855 COSTS_N_INSNS (14), /* divsi */
856 COSTS_N_INSNS (14), /* divdi */
857 COSTS_N_INSNS (8), /* fp */
858 COSTS_N_INSNS (10), /* dmul */
859 COSTS_N_INSNS (36), /* sdiv */
860 COSTS_N_INSNS (66), /* ddiv */
861 64, /* cache line size */
862 32, /* l1 cache */
863 128, /* l2 cache */
864 1, /* prefetch streams /*/
865 0, /* SF->DF convert */
866 };
867
868 /* Instruction costs on PPCE500MC64 processors. */
869 static const
870 struct processor_costs ppce500mc64_cost = {
871 COSTS_N_INSNS (4), /* mulsi */
872 COSTS_N_INSNS (4), /* mulsi_const */
873 COSTS_N_INSNS (4), /* mulsi_const9 */
874 COSTS_N_INSNS (4), /* muldi */
875 COSTS_N_INSNS (14), /* divsi */
876 COSTS_N_INSNS (14), /* divdi */
877 COSTS_N_INSNS (4), /* fp */
878 COSTS_N_INSNS (10), /* dmul */
879 COSTS_N_INSNS (36), /* sdiv */
880 COSTS_N_INSNS (66), /* ddiv */
881 64, /* cache line size */
882 32, /* l1 cache */
883 128, /* l2 cache */
884 1, /* prefetch streams /*/
885 0, /* SF->DF convert */
886 };
887
888 /* Instruction costs on PPCE5500 processors. */
889 static const
890 struct processor_costs ppce5500_cost = {
891 COSTS_N_INSNS (5), /* mulsi */
892 COSTS_N_INSNS (5), /* mulsi_const */
893 COSTS_N_INSNS (4), /* mulsi_const9 */
894 COSTS_N_INSNS (5), /* muldi */
895 COSTS_N_INSNS (14), /* divsi */
896 COSTS_N_INSNS (14), /* divdi */
897 COSTS_N_INSNS (7), /* fp */
898 COSTS_N_INSNS (10), /* dmul */
899 COSTS_N_INSNS (36), /* sdiv */
900 COSTS_N_INSNS (66), /* ddiv */
901 64, /* cache line size */
902 32, /* l1 cache */
903 128, /* l2 cache */
904 1, /* prefetch streams /*/
905 0, /* SF->DF convert */
906 };
907
908 /* Instruction costs on PPCE6500 processors. */
909 static const
910 struct processor_costs ppce6500_cost = {
911 COSTS_N_INSNS (5), /* mulsi */
912 COSTS_N_INSNS (5), /* mulsi_const */
913 COSTS_N_INSNS (4), /* mulsi_const9 */
914 COSTS_N_INSNS (5), /* muldi */
915 COSTS_N_INSNS (14), /* divsi */
916 COSTS_N_INSNS (14), /* divdi */
917 COSTS_N_INSNS (7), /* fp */
918 COSTS_N_INSNS (10), /* dmul */
919 COSTS_N_INSNS (36), /* sdiv */
920 COSTS_N_INSNS (66), /* ddiv */
921 64, /* cache line size */
922 32, /* l1 cache */
923 128, /* l2 cache */
924 1, /* prefetch streams /*/
925 0, /* SF->DF convert */
926 };
927
928 /* Instruction costs on AppliedMicro Titan processors. */
929 static const
930 struct processor_costs titan_cost = {
931 COSTS_N_INSNS (5), /* mulsi */
932 COSTS_N_INSNS (5), /* mulsi_const */
933 COSTS_N_INSNS (5), /* mulsi_const9 */
934 COSTS_N_INSNS (5), /* muldi */
935 COSTS_N_INSNS (18), /* divsi */
936 COSTS_N_INSNS (18), /* divdi */
937 COSTS_N_INSNS (10), /* fp */
938 COSTS_N_INSNS (10), /* dmul */
939 COSTS_N_INSNS (46), /* sdiv */
940 COSTS_N_INSNS (72), /* ddiv */
941 32, /* cache line size */
942 32, /* l1 cache */
943 512, /* l2 cache */
944 1, /* prefetch streams /*/
945 0, /* SF->DF convert */
946 };
947
948 /* Instruction costs on POWER4 and POWER5 processors. */
949 static const
950 struct processor_costs power4_cost = {
951 COSTS_N_INSNS (3), /* mulsi */
952 COSTS_N_INSNS (2), /* mulsi_const */
953 COSTS_N_INSNS (2), /* mulsi_const9 */
954 COSTS_N_INSNS (4), /* muldi */
955 COSTS_N_INSNS (18), /* divsi */
956 COSTS_N_INSNS (34), /* divdi */
957 COSTS_N_INSNS (3), /* fp */
958 COSTS_N_INSNS (3), /* dmul */
959 COSTS_N_INSNS (17), /* sdiv */
960 COSTS_N_INSNS (17), /* ddiv */
961 128, /* cache line size */
962 32, /* l1 cache */
963 1024, /* l2 cache */
964 8, /* prefetch streams /*/
965 0, /* SF->DF convert */
966 };
967
968 /* Instruction costs on POWER6 processors. */
969 static const
970 struct processor_costs power6_cost = {
971 COSTS_N_INSNS (8), /* mulsi */
972 COSTS_N_INSNS (8), /* mulsi_const */
973 COSTS_N_INSNS (8), /* mulsi_const9 */
974 COSTS_N_INSNS (8), /* muldi */
975 COSTS_N_INSNS (22), /* divsi */
976 COSTS_N_INSNS (28), /* divdi */
977 COSTS_N_INSNS (3), /* fp */
978 COSTS_N_INSNS (3), /* dmul */
979 COSTS_N_INSNS (13), /* sdiv */
980 COSTS_N_INSNS (16), /* ddiv */
981 128, /* cache line size */
982 64, /* l1 cache */
983 2048, /* l2 cache */
984 16, /* prefetch streams */
985 0, /* SF->DF convert */
986 };
987
988 /* Instruction costs on POWER7 processors. */
989 static const
990 struct processor_costs power7_cost = {
991 COSTS_N_INSNS (2), /* mulsi */
992 COSTS_N_INSNS (2), /* mulsi_const */
993 COSTS_N_INSNS (2), /* mulsi_const9 */
994 COSTS_N_INSNS (2), /* muldi */
995 COSTS_N_INSNS (18), /* divsi */
996 COSTS_N_INSNS (34), /* divdi */
997 COSTS_N_INSNS (3), /* fp */
998 COSTS_N_INSNS (3), /* dmul */
999 COSTS_N_INSNS (13), /* sdiv */
1000 COSTS_N_INSNS (16), /* ddiv */
1001 128, /* cache line size */
1002 32, /* l1 cache */
1003 256, /* l2 cache */
1004 12, /* prefetch streams */
1005 COSTS_N_INSNS (3), /* SF->DF convert */
1006 };
1007
1008 /* Instruction costs on POWER8 processors. */
1009 static const
1010 struct processor_costs power8_cost = {
1011 COSTS_N_INSNS (3), /* mulsi */
1012 COSTS_N_INSNS (3), /* mulsi_const */
1013 COSTS_N_INSNS (3), /* mulsi_const9 */
1014 COSTS_N_INSNS (3), /* muldi */
1015 COSTS_N_INSNS (19), /* divsi */
1016 COSTS_N_INSNS (35), /* divdi */
1017 COSTS_N_INSNS (3), /* fp */
1018 COSTS_N_INSNS (3), /* dmul */
1019 COSTS_N_INSNS (14), /* sdiv */
1020 COSTS_N_INSNS (17), /* ddiv */
1021 128, /* cache line size */
1022 32, /* l1 cache */
1023 256, /* l2 cache */
1024 12, /* prefetch streams */
1025 COSTS_N_INSNS (3), /* SF->DF convert */
1026 };
1027
1028 /* Instruction costs on POWER A2 processors. */
1029 static const
1030 struct processor_costs ppca2_cost = {
1031 COSTS_N_INSNS (16), /* mulsi */
1032 COSTS_N_INSNS (16), /* mulsi_const */
1033 COSTS_N_INSNS (16), /* mulsi_const9 */
1034 COSTS_N_INSNS (16), /* muldi */
1035 COSTS_N_INSNS (22), /* divsi */
1036 COSTS_N_INSNS (28), /* divdi */
1037 COSTS_N_INSNS (3), /* fp */
1038 COSTS_N_INSNS (3), /* dmul */
1039 COSTS_N_INSNS (59), /* sdiv */
1040 COSTS_N_INSNS (72), /* ddiv */
1041 64,
1042 16, /* l1 cache */
1043 2048, /* l2 cache */
1044 16, /* prefetch streams */
1045 0, /* SF->DF convert */
1046 };
1047
1048 \f
1049 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1050 #undef RS6000_BUILTIN_1
1051 #undef RS6000_BUILTIN_2
1052 #undef RS6000_BUILTIN_3
1053 #undef RS6000_BUILTIN_A
1054 #undef RS6000_BUILTIN_D
1055 #undef RS6000_BUILTIN_E
1056 #undef RS6000_BUILTIN_H
1057 #undef RS6000_BUILTIN_P
1058 #undef RS6000_BUILTIN_Q
1059 #undef RS6000_BUILTIN_S
1060 #undef RS6000_BUILTIN_X
1061
1062 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1063 { NAME, ICODE, MASK, ATTR },
1064
1065 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1066 { NAME, ICODE, MASK, ATTR },
1067
1068 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1069 { NAME, ICODE, MASK, ATTR },
1070
1071 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1072 { NAME, ICODE, MASK, ATTR },
1073
1074 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1075 { NAME, ICODE, MASK, ATTR },
1076
1077 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1078 { NAME, ICODE, MASK, ATTR },
1079
1080 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1081 { NAME, ICODE, MASK, ATTR },
1082
1083 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1084 { NAME, ICODE, MASK, ATTR },
1085
1086 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1087 { NAME, ICODE, MASK, ATTR },
1088
1089 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1090 { NAME, ICODE, MASK, ATTR },
1091
1092 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1093 { NAME, ICODE, MASK, ATTR },
1094
1095 struct rs6000_builtin_info_type {
1096 const char *name;
1097 const enum insn_code icode;
1098 const HOST_WIDE_INT mask;
1099 const unsigned attr;
1100 };
1101
1102 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1103 {
1104 #include "rs6000-builtin.def"
1105 };
1106
1107 #undef RS6000_BUILTIN_1
1108 #undef RS6000_BUILTIN_2
1109 #undef RS6000_BUILTIN_3
1110 #undef RS6000_BUILTIN_A
1111 #undef RS6000_BUILTIN_D
1112 #undef RS6000_BUILTIN_E
1113 #undef RS6000_BUILTIN_H
1114 #undef RS6000_BUILTIN_P
1115 #undef RS6000_BUILTIN_Q
1116 #undef RS6000_BUILTIN_S
1117 #undef RS6000_BUILTIN_X
1118
1119 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1120 static tree (*rs6000_veclib_handler) (tree, tree, tree);
1121
1122 \f
1123 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1124 static bool spe_func_has_64bit_regs_p (void);
1125 static struct machine_function * rs6000_init_machine_status (void);
1126 static int rs6000_ra_ever_killed (void);
1127 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1128 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1129 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1130 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
1131 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1132 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1133 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
1134 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1135 bool);
1136 static int rs6000_debug_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
1137 static bool is_microcoded_insn (rtx_insn *);
1138 static bool is_nonpipeline_insn (rtx_insn *);
1139 static bool is_cracked_insn (rtx_insn *);
1140 static bool is_load_insn (rtx, rtx *);
1141 static bool is_store_insn (rtx, rtx *);
1142 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1143 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1144 static bool insn_must_be_first_in_group (rtx_insn *);
1145 static bool insn_must_be_last_in_group (rtx_insn *);
1146 static void altivec_init_builtins (void);
1147 static tree builtin_function_type (machine_mode, machine_mode,
1148 machine_mode, machine_mode,
1149 enum rs6000_builtins, const char *name);
1150 static void rs6000_common_init_builtins (void);
1151 static void paired_init_builtins (void);
1152 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1153 static void spe_init_builtins (void);
1154 static void htm_init_builtins (void);
1155 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1156 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1157 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1158 static rs6000_stack_t *rs6000_stack_info (void);
1159 static void is_altivec_return_reg (rtx, void *);
1160 int easy_vector_constant (rtx, machine_mode);
1161 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1162 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1163 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1164 bool, bool);
1165 #if TARGET_MACHO
1166 static void macho_branch_islands (void);
1167 #endif
1168 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1169 int, int *);
1170 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1171 int, int, int *);
1172 static bool rs6000_mode_dependent_address (const_rtx);
1173 static bool rs6000_debug_mode_dependent_address (const_rtx);
1174 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1175 machine_mode, rtx);
1176 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1177 machine_mode,
1178 rtx);
1179 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1180 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1181 enum reg_class);
1182 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1183 machine_mode);
1184 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1185 enum reg_class,
1186 machine_mode);
1187 static bool rs6000_cannot_change_mode_class (machine_mode,
1188 machine_mode,
1189 enum reg_class);
1190 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1191 machine_mode,
1192 enum reg_class);
1193 static bool rs6000_save_toc_in_prologue_p (void);
1194
1195 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1196 int, int *)
1197 = rs6000_legitimize_reload_address;
1198
1199 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1200 = rs6000_mode_dependent_address;
1201
1202 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1203 machine_mode, rtx)
1204 = rs6000_secondary_reload_class;
1205
1206 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1207 = rs6000_preferred_reload_class;
1208
1209 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1210 machine_mode)
1211 = rs6000_secondary_memory_needed;
1212
1213 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1214 machine_mode,
1215 enum reg_class)
1216 = rs6000_cannot_change_mode_class;
1217
1218 const int INSN_NOT_AVAILABLE = -1;
1219
1220 static void rs6000_print_isa_options (FILE *, int, const char *,
1221 HOST_WIDE_INT);
1222 static void rs6000_print_builtin_options (FILE *, int, const char *,
1223 HOST_WIDE_INT);
1224
1225 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1226 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1227 enum rs6000_reg_type,
1228 machine_mode,
1229 secondary_reload_info *,
1230 bool);
1231 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1232
1233 /* Hash table stuff for keeping track of TOC entries. */
1234
1235 struct GTY((for_user)) toc_hash_struct
1236 {
1237 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1238 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1239 rtx key;
1240 machine_mode key_mode;
1241 int labelno;
1242 };
1243
1244 struct toc_hasher : ggc_hasher<toc_hash_struct *>
1245 {
1246 static hashval_t hash (toc_hash_struct *);
1247 static bool equal (toc_hash_struct *, toc_hash_struct *);
1248 };
1249
1250 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1251
1252 /* Hash table to keep track of the argument types for builtin functions. */
1253
1254 struct GTY((for_user)) builtin_hash_struct
1255 {
1256 tree type;
1257 machine_mode mode[4]; /* return value + 3 arguments. */
1258 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1259 };
1260
1261 struct builtin_hasher : ggc_hasher<builtin_hash_struct *>
1262 {
1263 static hashval_t hash (builtin_hash_struct *);
1264 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1265 };
1266
1267 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1268
1269 \f
1270 /* Default register names. */
1271 char rs6000_reg_names[][8] =
1272 {
1273 "0", "1", "2", "3", "4", "5", "6", "7",
1274 "8", "9", "10", "11", "12", "13", "14", "15",
1275 "16", "17", "18", "19", "20", "21", "22", "23",
1276 "24", "25", "26", "27", "28", "29", "30", "31",
1277 "0", "1", "2", "3", "4", "5", "6", "7",
1278 "8", "9", "10", "11", "12", "13", "14", "15",
1279 "16", "17", "18", "19", "20", "21", "22", "23",
1280 "24", "25", "26", "27", "28", "29", "30", "31",
1281 "mq", "lr", "ctr","ap",
1282 "0", "1", "2", "3", "4", "5", "6", "7",
1283 "ca",
1284 /* AltiVec registers. */
1285 "0", "1", "2", "3", "4", "5", "6", "7",
1286 "8", "9", "10", "11", "12", "13", "14", "15",
1287 "16", "17", "18", "19", "20", "21", "22", "23",
1288 "24", "25", "26", "27", "28", "29", "30", "31",
1289 "vrsave", "vscr",
1290 /* SPE registers. */
1291 "spe_acc", "spefscr",
1292 /* Soft frame pointer. */
1293 "sfp",
1294 /* HTM SPR registers. */
1295 "tfhar", "tfiar", "texasr",
1296 /* SPE High registers. */
1297 "0", "1", "2", "3", "4", "5", "6", "7",
1298 "8", "9", "10", "11", "12", "13", "14", "15",
1299 "16", "17", "18", "19", "20", "21", "22", "23",
1300 "24", "25", "26", "27", "28", "29", "30", "31"
1301 };
1302
1303 #ifdef TARGET_REGNAMES
1304 static const char alt_reg_names[][8] =
1305 {
1306 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1307 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1308 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1309 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1310 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1311 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1312 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1313 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1314 "mq", "lr", "ctr", "ap",
1315 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1316 "ca",
1317 /* AltiVec registers. */
1318 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1319 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1320 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1321 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1322 "vrsave", "vscr",
1323 /* SPE registers. */
1324 "spe_acc", "spefscr",
1325 /* Soft frame pointer. */
1326 "sfp",
1327 /* HTM SPR registers. */
1328 "tfhar", "tfiar", "texasr",
1329 /* SPE High registers. */
1330 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1331 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1332 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1333 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1334 };
1335 #endif
1336
1337 /* Table of valid machine attributes. */
1338
1339 static const struct attribute_spec rs6000_attribute_table[] =
1340 {
1341 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1342 affects_type_identity } */
1343 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1344 false },
1345 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1346 false },
1347 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1348 false },
1349 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1350 false },
1351 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1352 false },
1353 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1354 SUBTARGET_ATTRIBUTE_TABLE,
1355 #endif
1356 { NULL, 0, 0, false, false, false, NULL, false }
1357 };
1358 \f
1359 #ifndef TARGET_PROFILE_KERNEL
1360 #define TARGET_PROFILE_KERNEL 0
1361 #endif
1362
1363 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1364 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1365 \f
1366 /* Initialize the GCC target structure. */
1367 #undef TARGET_ATTRIBUTE_TABLE
1368 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1369 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1370 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1371 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1372 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1373
1374 #undef TARGET_ASM_ALIGNED_DI_OP
1375 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1376
1377 /* Default unaligned ops are only provided for ELF. Find the ops needed
1378 for non-ELF systems. */
1379 #ifndef OBJECT_FORMAT_ELF
1380 #if TARGET_XCOFF
1381 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1382 64-bit targets. */
1383 #undef TARGET_ASM_UNALIGNED_HI_OP
1384 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1385 #undef TARGET_ASM_UNALIGNED_SI_OP
1386 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1387 #undef TARGET_ASM_UNALIGNED_DI_OP
1388 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1389 #else
1390 /* For Darwin. */
1391 #undef TARGET_ASM_UNALIGNED_HI_OP
1392 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1393 #undef TARGET_ASM_UNALIGNED_SI_OP
1394 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1395 #undef TARGET_ASM_UNALIGNED_DI_OP
1396 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1397 #undef TARGET_ASM_ALIGNED_DI_OP
1398 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1399 #endif
1400 #endif
1401
1402 /* This hook deals with fixups for relocatable code and DI-mode objects
1403 in 64-bit code. */
1404 #undef TARGET_ASM_INTEGER
1405 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1406
1407 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1408 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1409 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1410 #endif
1411
1412 #undef TARGET_SET_UP_BY_PROLOGUE
1413 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1414
1415 #undef TARGET_HAVE_TLS
1416 #define TARGET_HAVE_TLS HAVE_AS_TLS
1417
1418 #undef TARGET_CANNOT_FORCE_CONST_MEM
1419 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1420
1421 #undef TARGET_DELEGITIMIZE_ADDRESS
1422 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1423
1424 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1425 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1426
1427 #undef TARGET_ASM_FUNCTION_PROLOGUE
1428 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1429 #undef TARGET_ASM_FUNCTION_EPILOGUE
1430 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1431
1432 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1433 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1434
1435 #undef TARGET_LEGITIMIZE_ADDRESS
1436 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1437
1438 #undef TARGET_SCHED_VARIABLE_ISSUE
1439 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1440
1441 #undef TARGET_SCHED_ISSUE_RATE
1442 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1443 #undef TARGET_SCHED_ADJUST_COST
1444 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1445 #undef TARGET_SCHED_ADJUST_PRIORITY
1446 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1447 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1448 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1449 #undef TARGET_SCHED_INIT
1450 #define TARGET_SCHED_INIT rs6000_sched_init
1451 #undef TARGET_SCHED_FINISH
1452 #define TARGET_SCHED_FINISH rs6000_sched_finish
1453 #undef TARGET_SCHED_REORDER
1454 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1455 #undef TARGET_SCHED_REORDER2
1456 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1457
1458 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1459 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1460
1461 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1462 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1463
1464 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1465 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1466 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1467 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1468 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1469 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1470 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1471 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1472
1473 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1474 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1475 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1476 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1477 rs6000_builtin_support_vector_misalignment
1478 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1479 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1480 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1481 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1482 rs6000_builtin_vectorization_cost
1483 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1484 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1485 rs6000_preferred_simd_mode
1486 #undef TARGET_VECTORIZE_INIT_COST
1487 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1488 #undef TARGET_VECTORIZE_ADD_STMT_COST
1489 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1490 #undef TARGET_VECTORIZE_FINISH_COST
1491 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1492 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1493 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1494
1495 #undef TARGET_INIT_BUILTINS
1496 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1497 #undef TARGET_BUILTIN_DECL
1498 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1499
1500 #undef TARGET_EXPAND_BUILTIN
1501 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1502
1503 #undef TARGET_MANGLE_TYPE
1504 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1505
1506 #undef TARGET_INIT_LIBFUNCS
1507 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1508
1509 #if TARGET_MACHO
1510 #undef TARGET_BINDS_LOCAL_P
1511 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1512 #endif
1513
1514 #undef TARGET_MS_BITFIELD_LAYOUT_P
1515 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1516
1517 #undef TARGET_ASM_OUTPUT_MI_THUNK
1518 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1519
1520 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1521 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1522
1523 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1524 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1525
1526 #undef TARGET_REGISTER_MOVE_COST
1527 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1528 #undef TARGET_MEMORY_MOVE_COST
1529 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1530 #undef TARGET_RTX_COSTS
1531 #define TARGET_RTX_COSTS rs6000_rtx_costs
1532 #undef TARGET_ADDRESS_COST
1533 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1534
1535 #undef TARGET_DWARF_REGISTER_SPAN
1536 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1537
1538 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1539 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1540
1541 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1542 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1543
1544 #undef TARGET_PROMOTE_FUNCTION_MODE
1545 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1546
1547 #undef TARGET_RETURN_IN_MEMORY
1548 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1549
1550 #undef TARGET_RETURN_IN_MSB
1551 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1552
1553 #undef TARGET_SETUP_INCOMING_VARARGS
1554 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1555
1556 /* Always strict argument naming on rs6000. */
1557 #undef TARGET_STRICT_ARGUMENT_NAMING
1558 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1559 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1560 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1561 #undef TARGET_SPLIT_COMPLEX_ARG
1562 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1563 #undef TARGET_MUST_PASS_IN_STACK
1564 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1565 #undef TARGET_PASS_BY_REFERENCE
1566 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1567 #undef TARGET_ARG_PARTIAL_BYTES
1568 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1569 #undef TARGET_FUNCTION_ARG_ADVANCE
1570 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1571 #undef TARGET_FUNCTION_ARG
1572 #define TARGET_FUNCTION_ARG rs6000_function_arg
1573 #undef TARGET_FUNCTION_ARG_BOUNDARY
1574 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1575
1576 #undef TARGET_BUILD_BUILTIN_VA_LIST
1577 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1578
1579 #undef TARGET_EXPAND_BUILTIN_VA_START
1580 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1581
1582 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1583 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1584
1585 #undef TARGET_EH_RETURN_FILTER_MODE
1586 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1587
1588 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1589 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1590
1591 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1592 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1593
1594 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1595 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1596
1597 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1598 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1599
1600 #undef TARGET_MD_ASM_ADJUST
1601 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1602
1603 #undef TARGET_OPTION_OVERRIDE
1604 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1605
1606 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1607 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1608 rs6000_builtin_vectorized_function
1609
1610 #if !TARGET_MACHO
1611 #undef TARGET_STACK_PROTECT_FAIL
1612 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1613 #endif
1614
1615 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1616 The PowerPC architecture requires only weak consistency among
1617 processors--that is, memory accesses between processors need not be
1618 sequentially consistent and memory accesses among processors can occur
1619 in any order. The ability to order memory accesses weakly provides
1620 opportunities for more efficient use of the system bus. Unless a
1621 dependency exists, the 604e allows read operations to precede store
1622 operations. */
1623 #undef TARGET_RELAXED_ORDERING
1624 #define TARGET_RELAXED_ORDERING true
1625
1626 #ifdef HAVE_AS_TLS
1627 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1628 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1629 #endif
1630
1631 /* Use a 32-bit anchor range. This leads to sequences like:
1632
1633 addis tmp,anchor,high
1634 add dest,tmp,low
1635
1636 where tmp itself acts as an anchor, and can be shared between
1637 accesses to the same 64k page. */
1638 #undef TARGET_MIN_ANCHOR_OFFSET
1639 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1640 #undef TARGET_MAX_ANCHOR_OFFSET
1641 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1642 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1643 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1644 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1645 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1646
1647 #undef TARGET_BUILTIN_RECIPROCAL
1648 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1649
1650 #undef TARGET_EXPAND_TO_RTL_HOOK
1651 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1652
1653 #undef TARGET_INSTANTIATE_DECLS
1654 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1655
1656 #undef TARGET_SECONDARY_RELOAD
1657 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1658
1659 #undef TARGET_LEGITIMATE_ADDRESS_P
1660 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1661
1662 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1663 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1664
1665 #undef TARGET_LRA_P
1666 #define TARGET_LRA_P rs6000_lra_p
1667
1668 #undef TARGET_CAN_ELIMINATE
1669 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1670
1671 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1672 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1673
1674 #undef TARGET_TRAMPOLINE_INIT
1675 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1676
1677 #undef TARGET_FUNCTION_VALUE
1678 #define TARGET_FUNCTION_VALUE rs6000_function_value
1679
1680 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1681 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1682
1683 #undef TARGET_OPTION_SAVE
1684 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1685
1686 #undef TARGET_OPTION_RESTORE
1687 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1688
1689 #undef TARGET_OPTION_PRINT
1690 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1691
1692 #undef TARGET_CAN_INLINE_P
1693 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1694
1695 #undef TARGET_SET_CURRENT_FUNCTION
1696 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1697
1698 #undef TARGET_LEGITIMATE_CONSTANT_P
1699 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1700
1701 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1702 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1703
1704 #undef TARGET_CAN_USE_DOLOOP_P
1705 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1706
1707 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1708 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1709
1710 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1711 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1712 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1713 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1714 #undef TARGET_UNWIND_WORD_MODE
1715 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1716 \f
1717
1718 /* Processor table. */
1719 struct rs6000_ptt
1720 {
1721 const char *const name; /* Canonical processor name. */
1722 const enum processor_type processor; /* Processor type enum value. */
1723 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1724 };
1725
1726 static struct rs6000_ptt const processor_target_table[] =
1727 {
1728 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1729 #include "rs6000-cpus.def"
1730 #undef RS6000_CPU
1731 };
1732
1733 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1734 name is invalid. */
1735
1736 static int
1737 rs6000_cpu_name_lookup (const char *name)
1738 {
1739 size_t i;
1740
1741 if (name != NULL)
1742 {
1743 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1744 if (! strcmp (name, processor_target_table[i].name))
1745 return (int)i;
1746 }
1747
1748 return -1;
1749 }
1750
1751 \f
1752 /* Return number of consecutive hard regs needed starting at reg REGNO
1753 to hold something of mode MODE.
1754 This is ordinarily the length in words of a value of mode MODE
1755 but can be less for certain modes in special long registers.
1756
1757 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1758 scalar instructions. The upper 32 bits are only available to the
1759 SIMD instructions.
1760
1761 POWER and PowerPC GPRs hold 32 bits worth;
1762 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1763
1764 static int
1765 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1766 {
1767 unsigned HOST_WIDE_INT reg_size;
1768
1769 /* TF/TD modes are special in that they always take 2 registers. */
1770 if (FP_REGNO_P (regno))
1771 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1772 ? UNITS_PER_VSX_WORD
1773 : UNITS_PER_FP_WORD);
1774
1775 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1776 reg_size = UNITS_PER_SPE_WORD;
1777
1778 else if (ALTIVEC_REGNO_P (regno))
1779 reg_size = UNITS_PER_ALTIVEC_WORD;
1780
1781 /* The value returned for SCmode in the E500 double case is 2 for
1782 ABI compatibility; storing an SCmode value in a single register
1783 would require function_arg and rs6000_spe_function_arg to handle
1784 SCmode so as to pass the value correctly in a pair of
1785 registers. */
1786 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1787 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
1788 reg_size = UNITS_PER_FP_WORD;
1789
1790 else
1791 reg_size = UNITS_PER_WORD;
1792
1793 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1794 }
1795
1796 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1797 MODE. */
1798 static int
1799 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
1800 {
1801 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1802
1803 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1804 register combinations, and use PTImode where we need to deal with quad
1805 word memory operations. Don't allow quad words in the argument or frame
1806 pointer registers, just registers 0..31. */
1807 if (mode == PTImode)
1808 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1809 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1810 && ((regno & 1) == 0));
1811
1812 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1813 implementations. Don't allow an item to be split between a FP register
1814 and an Altivec register. Allow TImode in all VSX registers if the user
1815 asked for it. */
1816 if (TARGET_VSX && VSX_REGNO_P (regno)
1817 && (VECTOR_MEM_VSX_P (mode)
1818 || reg_addr[mode].scalar_in_vmx_p
1819 || (TARGET_VSX_TIMODE && mode == TImode)
1820 || (TARGET_VADDUQM && mode == V1TImode)))
1821 {
1822 if (FP_REGNO_P (regno))
1823 return FP_REGNO_P (last_regno);
1824
1825 if (ALTIVEC_REGNO_P (regno))
1826 {
1827 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1828 return 0;
1829
1830 return ALTIVEC_REGNO_P (last_regno);
1831 }
1832 }
1833
1834 /* The GPRs can hold any mode, but values bigger than one register
1835 cannot go past R31. */
1836 if (INT_REGNO_P (regno))
1837 return INT_REGNO_P (last_regno);
1838
1839 /* The float registers (except for VSX vector modes) can only hold floating
1840 modes and DImode. */
1841 if (FP_REGNO_P (regno))
1842 {
1843 if (SCALAR_FLOAT_MODE_P (mode)
1844 && (mode != TDmode || (regno % 2) == 0)
1845 && FP_REGNO_P (last_regno))
1846 return 1;
1847
1848 if (GET_MODE_CLASS (mode) == MODE_INT
1849 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1850 return 1;
1851
1852 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1853 && PAIRED_VECTOR_MODE (mode))
1854 return 1;
1855
1856 return 0;
1857 }
1858
1859 /* The CR register can only hold CC modes. */
1860 if (CR_REGNO_P (regno))
1861 return GET_MODE_CLASS (mode) == MODE_CC;
1862
1863 if (CA_REGNO_P (regno))
1864 return mode == Pmode || mode == SImode;
1865
1866 /* AltiVec only in AldyVec registers. */
1867 if (ALTIVEC_REGNO_P (regno))
1868 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1869 || mode == V1TImode);
1870
1871 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1872 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1873 return 1;
1874
1875 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1876 and it must be able to fit within the register set. */
1877
1878 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1879 }
1880
1881 /* Print interesting facts about registers. */
1882 static void
1883 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1884 {
1885 int r, m;
1886
1887 for (r = first_regno; r <= last_regno; ++r)
1888 {
1889 const char *comma = "";
1890 int len;
1891
1892 if (first_regno == last_regno)
1893 fprintf (stderr, "%s:\t", reg_name);
1894 else
1895 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1896
1897 len = 8;
1898 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1899 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1900 {
1901 if (len > 70)
1902 {
1903 fprintf (stderr, ",\n\t");
1904 len = 8;
1905 comma = "";
1906 }
1907
1908 if (rs6000_hard_regno_nregs[m][r] > 1)
1909 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1910 rs6000_hard_regno_nregs[m][r]);
1911 else
1912 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1913
1914 comma = ", ";
1915 }
1916
1917 if (call_used_regs[r])
1918 {
1919 if (len > 70)
1920 {
1921 fprintf (stderr, ",\n\t");
1922 len = 8;
1923 comma = "";
1924 }
1925
1926 len += fprintf (stderr, "%s%s", comma, "call-used");
1927 comma = ", ";
1928 }
1929
1930 if (fixed_regs[r])
1931 {
1932 if (len > 70)
1933 {
1934 fprintf (stderr, ",\n\t");
1935 len = 8;
1936 comma = "";
1937 }
1938
1939 len += fprintf (stderr, "%s%s", comma, "fixed");
1940 comma = ", ";
1941 }
1942
1943 if (len > 70)
1944 {
1945 fprintf (stderr, ",\n\t");
1946 comma = "";
1947 }
1948
1949 len += fprintf (stderr, "%sreg-class = %s", comma,
1950 reg_class_names[(int)rs6000_regno_regclass[r]]);
1951 comma = ", ";
1952
1953 if (len > 70)
1954 {
1955 fprintf (stderr, ",\n\t");
1956 comma = "";
1957 }
1958
1959 fprintf (stderr, "%sregno = %d\n", comma, r);
1960 }
1961 }
1962
1963 static const char *
1964 rs6000_debug_vector_unit (enum rs6000_vector v)
1965 {
1966 const char *ret;
1967
1968 switch (v)
1969 {
1970 case VECTOR_NONE: ret = "none"; break;
1971 case VECTOR_ALTIVEC: ret = "altivec"; break;
1972 case VECTOR_VSX: ret = "vsx"; break;
1973 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
1974 case VECTOR_PAIRED: ret = "paired"; break;
1975 case VECTOR_SPE: ret = "spe"; break;
1976 case VECTOR_OTHER: ret = "other"; break;
1977 default: ret = "unknown"; break;
1978 }
1979
1980 return ret;
1981 }
1982
1983 /* Inner function printing just the address mask for a particular reload
1984 register class. */
1985 DEBUG_FUNCTION char *
1986 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
1987 {
1988 static char ret[8];
1989 char *p = ret;
1990
1991 if ((mask & RELOAD_REG_VALID) != 0)
1992 *p++ = 'v';
1993 else if (keep_spaces)
1994 *p++ = ' ';
1995
1996 if ((mask & RELOAD_REG_MULTIPLE) != 0)
1997 *p++ = 'm';
1998 else if (keep_spaces)
1999 *p++ = ' ';
2000
2001 if ((mask & RELOAD_REG_INDEXED) != 0)
2002 *p++ = 'i';
2003 else if (keep_spaces)
2004 *p++ = ' ';
2005
2006 if ((mask & RELOAD_REG_OFFSET) != 0)
2007 *p++ = 'o';
2008 else if (keep_spaces)
2009 *p++ = ' ';
2010
2011 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2012 *p++ = '+';
2013 else if (keep_spaces)
2014 *p++ = ' ';
2015
2016 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2017 *p++ = '+';
2018 else if (keep_spaces)
2019 *p++ = ' ';
2020
2021 if ((mask & RELOAD_REG_AND_M16) != 0)
2022 *p++ = '&';
2023 else if (keep_spaces)
2024 *p++ = ' ';
2025
2026 *p = '\0';
2027
2028 return ret;
2029 }
2030
2031 /* Print the address masks in a human readble fashion. */
2032 DEBUG_FUNCTION void
2033 rs6000_debug_print_mode (ssize_t m)
2034 {
2035 ssize_t rc;
2036
2037 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2038 for (rc = 0; rc < N_RELOAD_REG; rc++)
2039 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2040 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2041
2042 if (rs6000_vector_unit[m] != VECTOR_NONE
2043 || rs6000_vector_mem[m] != VECTOR_NONE
2044 || (reg_addr[m].reload_store != CODE_FOR_nothing)
2045 || (reg_addr[m].reload_load != CODE_FOR_nothing)
2046 || reg_addr[m].scalar_in_vmx_p)
2047 {
2048 fprintf (stderr,
2049 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c Upper=%c",
2050 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2051 rs6000_debug_vector_unit (rs6000_vector_mem[m]),
2052 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2053 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*',
2054 (reg_addr[m].scalar_in_vmx_p) ? 'y' : 'n');
2055 }
2056
2057 fputs ("\n", stderr);
2058 }
2059
2060 #define DEBUG_FMT_ID "%-32s= "
2061 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2062 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2063 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2064
2065 /* Print various interesting information with -mdebug=reg. */
2066 static void
2067 rs6000_debug_reg_global (void)
2068 {
2069 static const char *const tf[2] = { "false", "true" };
2070 const char *nl = (const char *)0;
2071 int m;
2072 size_t m1, m2, v;
2073 char costly_num[20];
2074 char nop_num[20];
2075 char flags_buffer[40];
2076 const char *costly_str;
2077 const char *nop_str;
2078 const char *trace_str;
2079 const char *abi_str;
2080 const char *cmodel_str;
2081 struct cl_target_option cl_opts;
2082
2083 /* Modes we want tieable information on. */
2084 static const machine_mode print_tieable_modes[] = {
2085 QImode,
2086 HImode,
2087 SImode,
2088 DImode,
2089 TImode,
2090 PTImode,
2091 SFmode,
2092 DFmode,
2093 TFmode,
2094 SDmode,
2095 DDmode,
2096 TDmode,
2097 V8QImode,
2098 V4HImode,
2099 V2SImode,
2100 V16QImode,
2101 V8HImode,
2102 V4SImode,
2103 V2DImode,
2104 V1TImode,
2105 V32QImode,
2106 V16HImode,
2107 V8SImode,
2108 V4DImode,
2109 V2TImode,
2110 V2SFmode,
2111 V4SFmode,
2112 V2DFmode,
2113 V8SFmode,
2114 V4DFmode,
2115 CCmode,
2116 CCUNSmode,
2117 CCEQmode,
2118 };
2119
2120 /* Virtual regs we are interested in. */
2121 const static struct {
2122 int regno; /* register number. */
2123 const char *name; /* register name. */
2124 } virtual_regs[] = {
2125 { STACK_POINTER_REGNUM, "stack pointer:" },
2126 { TOC_REGNUM, "toc: " },
2127 { STATIC_CHAIN_REGNUM, "static chain: " },
2128 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2129 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2130 { ARG_POINTER_REGNUM, "arg pointer: " },
2131 { FRAME_POINTER_REGNUM, "frame pointer:" },
2132 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2133 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2134 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2135 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2136 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2137 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2138 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2139 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2140 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2141 };
2142
2143 fputs ("\nHard register information:\n", stderr);
2144 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2145 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2146 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2147 LAST_ALTIVEC_REGNO,
2148 "vs");
2149 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2150 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2151 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2152 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2153 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2154 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2155 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2156 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2157
2158 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2159 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2160 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2161
2162 fprintf (stderr,
2163 "\n"
2164 "d reg_class = %s\n"
2165 "f reg_class = %s\n"
2166 "v reg_class = %s\n"
2167 "wa reg_class = %s\n"
2168 "wd reg_class = %s\n"
2169 "wf reg_class = %s\n"
2170 "wg reg_class = %s\n"
2171 "wh reg_class = %s\n"
2172 "wi reg_class = %s\n"
2173 "wj reg_class = %s\n"
2174 "wk reg_class = %s\n"
2175 "wl reg_class = %s\n"
2176 "wm reg_class = %s\n"
2177 "wr reg_class = %s\n"
2178 "ws reg_class = %s\n"
2179 "wt reg_class = %s\n"
2180 "wu reg_class = %s\n"
2181 "wv reg_class = %s\n"
2182 "ww reg_class = %s\n"
2183 "wx reg_class = %s\n"
2184 "wy reg_class = %s\n"
2185 "wz reg_class = %s\n"
2186 "\n",
2187 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2188 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2189 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2190 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2191 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2192 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2193 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2194 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2195 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2196 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2197 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2198 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2199 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2200 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2201 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2202 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2203 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2204 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2205 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2206 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2207 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2208 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2209
2210 nl = "\n";
2211 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2212 rs6000_debug_print_mode (m);
2213
2214 fputs ("\n", stderr);
2215
2216 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2217 {
2218 machine_mode mode1 = print_tieable_modes[m1];
2219 bool first_time = true;
2220
2221 nl = (const char *)0;
2222 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2223 {
2224 machine_mode mode2 = print_tieable_modes[m2];
2225 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2226 {
2227 if (first_time)
2228 {
2229 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2230 nl = "\n";
2231 first_time = false;
2232 }
2233
2234 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2235 }
2236 }
2237
2238 if (!first_time)
2239 fputs ("\n", stderr);
2240 }
2241
2242 if (nl)
2243 fputs (nl, stderr);
2244
2245 if (rs6000_recip_control)
2246 {
2247 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2248
2249 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2250 if (rs6000_recip_bits[m])
2251 {
2252 fprintf (stderr,
2253 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2254 GET_MODE_NAME (m),
2255 (RS6000_RECIP_AUTO_RE_P (m)
2256 ? "auto"
2257 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2258 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2259 ? "auto"
2260 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2261 }
2262
2263 fputs ("\n", stderr);
2264 }
2265
2266 if (rs6000_cpu_index >= 0)
2267 {
2268 const char *name = processor_target_table[rs6000_cpu_index].name;
2269 HOST_WIDE_INT flags
2270 = processor_target_table[rs6000_cpu_index].target_enable;
2271
2272 sprintf (flags_buffer, "-mcpu=%s flags", name);
2273 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2274 }
2275 else
2276 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2277
2278 if (rs6000_tune_index >= 0)
2279 {
2280 const char *name = processor_target_table[rs6000_tune_index].name;
2281 HOST_WIDE_INT flags
2282 = processor_target_table[rs6000_tune_index].target_enable;
2283
2284 sprintf (flags_buffer, "-mtune=%s flags", name);
2285 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2286 }
2287 else
2288 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2289
2290 cl_target_option_save (&cl_opts, &global_options);
2291 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2292 rs6000_isa_flags);
2293
2294 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2295 rs6000_isa_flags_explicit);
2296
2297 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2298 rs6000_builtin_mask);
2299
2300 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2301
2302 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2303 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2304
2305 switch (rs6000_sched_costly_dep)
2306 {
2307 case max_dep_latency:
2308 costly_str = "max_dep_latency";
2309 break;
2310
2311 case no_dep_costly:
2312 costly_str = "no_dep_costly";
2313 break;
2314
2315 case all_deps_costly:
2316 costly_str = "all_deps_costly";
2317 break;
2318
2319 case true_store_to_load_dep_costly:
2320 costly_str = "true_store_to_load_dep_costly";
2321 break;
2322
2323 case store_to_load_dep_costly:
2324 costly_str = "store_to_load_dep_costly";
2325 break;
2326
2327 default:
2328 costly_str = costly_num;
2329 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2330 break;
2331 }
2332
2333 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2334
2335 switch (rs6000_sched_insert_nops)
2336 {
2337 case sched_finish_regroup_exact:
2338 nop_str = "sched_finish_regroup_exact";
2339 break;
2340
2341 case sched_finish_pad_groups:
2342 nop_str = "sched_finish_pad_groups";
2343 break;
2344
2345 case sched_finish_none:
2346 nop_str = "sched_finish_none";
2347 break;
2348
2349 default:
2350 nop_str = nop_num;
2351 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2352 break;
2353 }
2354
2355 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2356
2357 switch (rs6000_sdata)
2358 {
2359 default:
2360 case SDATA_NONE:
2361 break;
2362
2363 case SDATA_DATA:
2364 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2365 break;
2366
2367 case SDATA_SYSV:
2368 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2369 break;
2370
2371 case SDATA_EABI:
2372 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2373 break;
2374
2375 }
2376
2377 switch (rs6000_traceback)
2378 {
2379 case traceback_default: trace_str = "default"; break;
2380 case traceback_none: trace_str = "none"; break;
2381 case traceback_part: trace_str = "part"; break;
2382 case traceback_full: trace_str = "full"; break;
2383 default: trace_str = "unknown"; break;
2384 }
2385
2386 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2387
2388 switch (rs6000_current_cmodel)
2389 {
2390 case CMODEL_SMALL: cmodel_str = "small"; break;
2391 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2392 case CMODEL_LARGE: cmodel_str = "large"; break;
2393 default: cmodel_str = "unknown"; break;
2394 }
2395
2396 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2397
2398 switch (rs6000_current_abi)
2399 {
2400 case ABI_NONE: abi_str = "none"; break;
2401 case ABI_AIX: abi_str = "aix"; break;
2402 case ABI_ELFv2: abi_str = "ELFv2"; break;
2403 case ABI_V4: abi_str = "V4"; break;
2404 case ABI_DARWIN: abi_str = "darwin"; break;
2405 default: abi_str = "unknown"; break;
2406 }
2407
2408 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2409
2410 if (rs6000_altivec_abi)
2411 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2412
2413 if (rs6000_spe_abi)
2414 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2415
2416 if (rs6000_darwin64_abi)
2417 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2418
2419 if (rs6000_float_gprs)
2420 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2421
2422 fprintf (stderr, DEBUG_FMT_S, "fprs",
2423 (TARGET_FPRS ? "true" : "false"));
2424
2425 fprintf (stderr, DEBUG_FMT_S, "single_float",
2426 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2427
2428 fprintf (stderr, DEBUG_FMT_S, "double_float",
2429 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2430
2431 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2432 (TARGET_SOFT_FLOAT ? "true" : "false"));
2433
2434 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2435 (TARGET_E500_SINGLE ? "true" : "false"));
2436
2437 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2438 (TARGET_E500_DOUBLE ? "true" : "false"));
2439
2440 if (TARGET_LINK_STACK)
2441 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2442
2443 if (targetm.lra_p ())
2444 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2445
2446 if (TARGET_P8_FUSION)
2447 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2448 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2449
2450 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2451 TARGET_SECURE_PLT ? "secure" : "bss");
2452 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2453 aix_struct_return ? "aix" : "sysv");
2454 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2455 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2456 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2457 tf[!!rs6000_align_branch_targets]);
2458 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2459 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2460 rs6000_long_double_type_size);
2461 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2462 (int)rs6000_sched_restricted_insns_priority);
2463 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2464 (int)END_BUILTINS);
2465 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2466 (int)RS6000_BUILTIN_COUNT);
2467
2468 if (TARGET_VSX)
2469 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2470 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2471 }
2472
2473 \f
2474 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2475 legitimate address support to figure out the appropriate addressing to
2476 use. */
2477
2478 static void
2479 rs6000_setup_reg_addr_masks (void)
2480 {
2481 ssize_t rc, reg, m, nregs;
2482 addr_mask_type any_addr_mask, addr_mask;
2483
2484 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2485 {
2486 machine_mode m2 = (machine_mode)m;
2487
2488 /* SDmode is special in that we want to access it only via REG+REG
2489 addressing on power7 and above, since we want to use the LFIWZX and
2490 STFIWZX instructions to load it. */
2491 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2492
2493 any_addr_mask = 0;
2494 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2495 {
2496 addr_mask = 0;
2497 reg = reload_reg_map[rc].reg;
2498
2499 /* Can mode values go in the GPR/FPR/Altivec registers? */
2500 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2501 {
2502 nregs = rs6000_hard_regno_nregs[m][reg];
2503 addr_mask |= RELOAD_REG_VALID;
2504
2505 /* Indicate if the mode takes more than 1 physical register. If
2506 it takes a single register, indicate it can do REG+REG
2507 addressing. */
2508 if (nregs > 1 || m == BLKmode)
2509 addr_mask |= RELOAD_REG_MULTIPLE;
2510 else
2511 addr_mask |= RELOAD_REG_INDEXED;
2512
2513 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2514 addressing. Restrict addressing on SPE for 64-bit types
2515 because of the SUBREG hackery used to address 64-bit floats in
2516 '32-bit' GPRs. */
2517
2518 if (TARGET_UPDATE
2519 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2520 && GET_MODE_SIZE (m2) <= 8
2521 && !VECTOR_MODE_P (m2)
2522 && !COMPLEX_MODE_P (m2)
2523 && !indexed_only_p
2524 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (m2) == 8))
2525 {
2526 addr_mask |= RELOAD_REG_PRE_INCDEC;
2527
2528 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2529 we don't allow PRE_MODIFY for some multi-register
2530 operations. */
2531 switch (m)
2532 {
2533 default:
2534 addr_mask |= RELOAD_REG_PRE_MODIFY;
2535 break;
2536
2537 case DImode:
2538 if (TARGET_POWERPC64)
2539 addr_mask |= RELOAD_REG_PRE_MODIFY;
2540 break;
2541
2542 case DFmode:
2543 case DDmode:
2544 if (TARGET_DF_INSN)
2545 addr_mask |= RELOAD_REG_PRE_MODIFY;
2546 break;
2547 }
2548 }
2549 }
2550
2551 /* GPR and FPR registers can do REG+OFFSET addressing, except
2552 possibly for SDmode. */
2553 if ((addr_mask != 0) && !indexed_only_p
2554 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR))
2555 addr_mask |= RELOAD_REG_OFFSET;
2556
2557 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2558 addressing on 128-bit types. */
2559 if (rc == RELOAD_REG_VMX && GET_MODE_SIZE (m2) == 16
2560 && (addr_mask & RELOAD_REG_VALID) != 0)
2561 addr_mask |= RELOAD_REG_AND_M16;
2562
2563 reg_addr[m].addr_mask[rc] = addr_mask;
2564 any_addr_mask |= addr_mask;
2565 }
2566
2567 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2568 }
2569 }
2570
2571 \f
2572 /* Initialize the various global tables that are based on register size. */
2573 static void
2574 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2575 {
2576 ssize_t r, m, c;
2577 int align64;
2578 int align32;
2579
2580 /* Precalculate REGNO_REG_CLASS. */
2581 rs6000_regno_regclass[0] = GENERAL_REGS;
2582 for (r = 1; r < 32; ++r)
2583 rs6000_regno_regclass[r] = BASE_REGS;
2584
2585 for (r = 32; r < 64; ++r)
2586 rs6000_regno_regclass[r] = FLOAT_REGS;
2587
2588 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2589 rs6000_regno_regclass[r] = NO_REGS;
2590
2591 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2592 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2593
2594 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2595 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2596 rs6000_regno_regclass[r] = CR_REGS;
2597
2598 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2599 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2600 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2601 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2602 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2603 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2604 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2605 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2606 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2607 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2608 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2609 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2610
2611 /* Precalculate register class to simpler reload register class. We don't
2612 need all of the register classes that are combinations of different
2613 classes, just the simple ones that have constraint letters. */
2614 for (c = 0; c < N_REG_CLASSES; c++)
2615 reg_class_to_reg_type[c] = NO_REG_TYPE;
2616
2617 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2618 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2619 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2620 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2621 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2622 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2623 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2624 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2625 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2626 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2627 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2628 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2629
2630 if (TARGET_VSX)
2631 {
2632 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2633 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2634 }
2635 else
2636 {
2637 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2638 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2639 }
2640
2641 /* Precalculate the valid memory formats as well as the vector information,
2642 this must be set up before the rs6000_hard_regno_nregs_internal calls
2643 below. */
2644 gcc_assert ((int)VECTOR_NONE == 0);
2645 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2646 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2647
2648 gcc_assert ((int)CODE_FOR_nothing == 0);
2649 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2650
2651 gcc_assert ((int)NO_REGS == 0);
2652 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2653
2654 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2655 believes it can use native alignment or still uses 128-bit alignment. */
2656 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2657 {
2658 align64 = 64;
2659 align32 = 32;
2660 }
2661 else
2662 {
2663 align64 = 128;
2664 align32 = 128;
2665 }
2666
2667 /* V2DF mode, VSX only. */
2668 if (TARGET_VSX)
2669 {
2670 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2671 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2672 rs6000_vector_align[V2DFmode] = align64;
2673 }
2674
2675 /* V4SF mode, either VSX or Altivec. */
2676 if (TARGET_VSX)
2677 {
2678 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2679 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2680 rs6000_vector_align[V4SFmode] = align32;
2681 }
2682 else if (TARGET_ALTIVEC)
2683 {
2684 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2685 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2686 rs6000_vector_align[V4SFmode] = align32;
2687 }
2688
2689 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2690 and stores. */
2691 if (TARGET_ALTIVEC)
2692 {
2693 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2694 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2695 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2696 rs6000_vector_align[V4SImode] = align32;
2697 rs6000_vector_align[V8HImode] = align32;
2698 rs6000_vector_align[V16QImode] = align32;
2699
2700 if (TARGET_VSX)
2701 {
2702 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2703 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2704 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2705 }
2706 else
2707 {
2708 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2709 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2710 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2711 }
2712 }
2713
2714 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2715 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2716 if (TARGET_VSX)
2717 {
2718 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2719 rs6000_vector_unit[V2DImode]
2720 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2721 rs6000_vector_align[V2DImode] = align64;
2722
2723 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
2724 rs6000_vector_unit[V1TImode]
2725 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2726 rs6000_vector_align[V1TImode] = 128;
2727 }
2728
2729 /* DFmode, see if we want to use the VSX unit. Memory is handled
2730 differently, so don't set rs6000_vector_mem. */
2731 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2732 {
2733 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2734 rs6000_vector_align[DFmode] = 64;
2735 }
2736
2737 /* SFmode, see if we want to use the VSX unit. */
2738 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
2739 {
2740 rs6000_vector_unit[SFmode] = VECTOR_VSX;
2741 rs6000_vector_align[SFmode] = 32;
2742 }
2743
2744 /* Allow TImode in VSX register and set the VSX memory macros. */
2745 if (TARGET_VSX && TARGET_VSX_TIMODE)
2746 {
2747 rs6000_vector_mem[TImode] = VECTOR_VSX;
2748 rs6000_vector_align[TImode] = align64;
2749 }
2750
2751 /* TODO add SPE and paired floating point vector support. */
2752
2753 /* Register class constraints for the constraints that depend on compile
2754 switches. When the VSX code was added, different constraints were added
2755 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2756 of the VSX registers are used. The register classes for scalar floating
2757 point types is set, based on whether we allow that type into the upper
2758 (Altivec) registers. GCC has register classes to target the Altivec
2759 registers for load/store operations, to select using a VSX memory
2760 operation instead of the traditional floating point operation. The
2761 constraints are:
2762
2763 d - Register class to use with traditional DFmode instructions.
2764 f - Register class to use with traditional SFmode instructions.
2765 v - Altivec register.
2766 wa - Any VSX register.
2767 wc - Reserved to represent individual CR bits (used in LLVM).
2768 wd - Preferred register class for V2DFmode.
2769 wf - Preferred register class for V4SFmode.
2770 wg - Float register for power6x move insns.
2771 wh - FP register for direct move instructions.
2772 wi - FP or VSX register to hold 64-bit integers for VSX insns.
2773 wj - FP or VSX register to hold 64-bit integers for direct moves.
2774 wk - FP or VSX register to hold 64-bit doubles for direct moves.
2775 wl - Float register if we can do 32-bit signed int loads.
2776 wm - VSX register for ISA 2.07 direct move operations.
2777 wn - always NO_REGS.
2778 wr - GPR if 64-bit mode is permitted.
2779 ws - Register class to do ISA 2.06 DF operations.
2780 wt - VSX register for TImode in VSX registers.
2781 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2782 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2783 ww - Register class to do SF conversions in with VSX operations.
2784 wx - Float register if we can do 32-bit int stores.
2785 wy - Register class to do ISA 2.07 SF operations.
2786 wz - Float register if we can do 32-bit unsigned int loads. */
2787
2788 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2789 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
2790
2791 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2792 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
2793
2794 if (TARGET_VSX)
2795 {
2796 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2797 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
2798 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
2799 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS; /* DImode */
2800
2801 if (TARGET_VSX_TIMODE)
2802 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
2803
2804 if (TARGET_UPPER_REGS_DF) /* DFmode */
2805 {
2806 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2807 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2808 }
2809 else
2810 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
2811 }
2812
2813 /* Add conditional constraints based on various options, to allow us to
2814 collapse multiple insn patterns. */
2815 if (TARGET_ALTIVEC)
2816 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2817
2818 if (TARGET_MFPGPR) /* DFmode */
2819 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2820
2821 if (TARGET_LFIWAX)
2822 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
2823
2824 if (TARGET_DIRECT_MOVE)
2825 {
2826 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
2827 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
2828 = rs6000_constraints[RS6000_CONSTRAINT_wi];
2829 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
2830 = rs6000_constraints[RS6000_CONSTRAINT_ws];
2831 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2832 }
2833
2834 if (TARGET_POWERPC64)
2835 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2836
2837 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
2838 {
2839 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
2840 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
2841 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
2842 }
2843 else if (TARGET_P8_VECTOR)
2844 {
2845 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
2846 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2847 }
2848 else if (TARGET_VSX)
2849 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2850
2851 if (TARGET_STFIWX)
2852 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
2853
2854 if (TARGET_LFIWZX)
2855 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
2856
2857 /* Set up the reload helper and direct move functions. */
2858 if (TARGET_VSX || TARGET_ALTIVEC)
2859 {
2860 if (TARGET_64BIT)
2861 {
2862 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
2863 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
2864 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
2865 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
2866 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
2867 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
2868 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
2869 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
2870 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
2871 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
2872 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
2873 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
2874 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
2875 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
2876 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
2877 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
2878 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
2879 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
2880 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
2881 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
2882
2883 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
2884 available. */
2885 if (TARGET_NO_SDMODE_STACK)
2886 {
2887 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
2888 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
2889 }
2890
2891 if (TARGET_VSX_TIMODE)
2892 {
2893 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
2894 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
2895 }
2896
2897 if (TARGET_DIRECT_MOVE)
2898 {
2899 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
2900 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
2901 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
2902 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
2903 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
2904 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
2905 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
2906 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
2907 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
2908
2909 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
2910 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
2911 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
2912 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
2913 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
2914 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
2915 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
2916 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
2917 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
2918 }
2919 }
2920 else
2921 {
2922 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
2923 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
2924 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
2925 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
2926 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
2927 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
2928 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
2929 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
2930 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
2931 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
2932 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
2933 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
2934 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
2935 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
2936 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
2937 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
2938 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
2939 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
2940 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
2941 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
2942
2943 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
2944 available. */
2945 if (TARGET_NO_SDMODE_STACK)
2946 {
2947 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
2948 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
2949 }
2950
2951 if (TARGET_VSX_TIMODE)
2952 {
2953 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
2954 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
2955 }
2956
2957 if (TARGET_DIRECT_MOVE)
2958 {
2959 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
2960 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
2961 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
2962 }
2963 }
2964
2965 if (TARGET_UPPER_REGS_DF)
2966 reg_addr[DFmode].scalar_in_vmx_p = true;
2967
2968 if (TARGET_UPPER_REGS_SF)
2969 reg_addr[SFmode].scalar_in_vmx_p = true;
2970 }
2971
2972 /* Precalculate HARD_REGNO_NREGS. */
2973 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2974 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2975 rs6000_hard_regno_nregs[m][r]
2976 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
2977
2978 /* Precalculate HARD_REGNO_MODE_OK. */
2979 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2980 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2981 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
2982 rs6000_hard_regno_mode_ok_p[m][r] = true;
2983
2984 /* Precalculate CLASS_MAX_NREGS sizes. */
2985 for (c = 0; c < LIM_REG_CLASSES; ++c)
2986 {
2987 int reg_size;
2988
2989 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2990 reg_size = UNITS_PER_VSX_WORD;
2991
2992 else if (c == ALTIVEC_REGS)
2993 reg_size = UNITS_PER_ALTIVEC_WORD;
2994
2995 else if (c == FLOAT_REGS)
2996 reg_size = UNITS_PER_FP_WORD;
2997
2998 else
2999 reg_size = UNITS_PER_WORD;
3000
3001 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3002 {
3003 machine_mode m2 = (machine_mode)m;
3004 int reg_size2 = reg_size;
3005
3006 /* TFmode/TDmode always takes 2 registers, even in VSX. */
3007 if (TARGET_VSX && VSX_REG_CLASS_P (c)
3008 && (m == TDmode || m == TFmode))
3009 reg_size2 = UNITS_PER_FP_WORD;
3010
3011 rs6000_class_max_nregs[m][c]
3012 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3013 }
3014 }
3015
3016 if (TARGET_E500_DOUBLE)
3017 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3018
3019 /* Calculate which modes to automatically generate code to use a the
3020 reciprocal divide and square root instructions. In the future, possibly
3021 automatically generate the instructions even if the user did not specify
3022 -mrecip. The older machines double precision reciprocal sqrt estimate is
3023 not accurate enough. */
3024 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3025 if (TARGET_FRES)
3026 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3027 if (TARGET_FRE)
3028 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3029 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3030 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3031 if (VECTOR_UNIT_VSX_P (V2DFmode))
3032 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3033
3034 if (TARGET_FRSQRTES)
3035 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3036 if (TARGET_FRSQRTE)
3037 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3038 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3039 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3040 if (VECTOR_UNIT_VSX_P (V2DFmode))
3041 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3042
3043 if (rs6000_recip_control)
3044 {
3045 if (!flag_finite_math_only)
3046 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3047 if (flag_trapping_math)
3048 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3049 if (!flag_reciprocal_math)
3050 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3051 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3052 {
3053 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3054 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3055 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3056
3057 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3058 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3059 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3060
3061 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3062 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3063 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3064
3065 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3066 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3067 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3068
3069 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3070 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3071 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3072
3073 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3074 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3075 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3076
3077 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3078 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3079 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3080
3081 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3082 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3083 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3084 }
3085 }
3086
3087 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3088 legitimate address support to figure out the appropriate addressing to
3089 use. */
3090 rs6000_setup_reg_addr_masks ();
3091
3092 if (global_init_p || TARGET_DEBUG_TARGET)
3093 {
3094 if (TARGET_DEBUG_REG)
3095 rs6000_debug_reg_global ();
3096
3097 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3098 fprintf (stderr,
3099 "SImode variable mult cost = %d\n"
3100 "SImode constant mult cost = %d\n"
3101 "SImode short constant mult cost = %d\n"
3102 "DImode multipliciation cost = %d\n"
3103 "SImode division cost = %d\n"
3104 "DImode division cost = %d\n"
3105 "Simple fp operation cost = %d\n"
3106 "DFmode multiplication cost = %d\n"
3107 "SFmode division cost = %d\n"
3108 "DFmode division cost = %d\n"
3109 "cache line size = %d\n"
3110 "l1 cache size = %d\n"
3111 "l2 cache size = %d\n"
3112 "simultaneous prefetches = %d\n"
3113 "\n",
3114 rs6000_cost->mulsi,
3115 rs6000_cost->mulsi_const,
3116 rs6000_cost->mulsi_const9,
3117 rs6000_cost->muldi,
3118 rs6000_cost->divsi,
3119 rs6000_cost->divdi,
3120 rs6000_cost->fp,
3121 rs6000_cost->dmul,
3122 rs6000_cost->sdiv,
3123 rs6000_cost->ddiv,
3124 rs6000_cost->cache_line_size,
3125 rs6000_cost->l1_cache_size,
3126 rs6000_cost->l2_cache_size,
3127 rs6000_cost->simultaneous_prefetches);
3128 }
3129 }
3130
3131 #if TARGET_MACHO
3132 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3133
3134 static void
3135 darwin_rs6000_override_options (void)
3136 {
3137 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3138 off. */
3139 rs6000_altivec_abi = 1;
3140 TARGET_ALTIVEC_VRSAVE = 1;
3141 rs6000_current_abi = ABI_DARWIN;
3142
3143 if (DEFAULT_ABI == ABI_DARWIN
3144 && TARGET_64BIT)
3145 darwin_one_byte_bool = 1;
3146
3147 if (TARGET_64BIT && ! TARGET_POWERPC64)
3148 {
3149 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3150 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3151 }
3152 if (flag_mkernel)
3153 {
3154 rs6000_default_long_calls = 1;
3155 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3156 }
3157
3158 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3159 Altivec. */
3160 if (!flag_mkernel && !flag_apple_kext
3161 && TARGET_64BIT
3162 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3163 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3164
3165 /* Unless the user (not the configurer) has explicitly overridden
3166 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3167 G4 unless targeting the kernel. */
3168 if (!flag_mkernel
3169 && !flag_apple_kext
3170 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3171 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3172 && ! global_options_set.x_rs6000_cpu_index)
3173 {
3174 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3175 }
3176 }
3177 #endif
3178
3179 /* If not otherwise specified by a target, make 'long double' equivalent to
3180 'double'. */
3181
3182 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3183 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3184 #endif
3185
3186 /* Return the builtin mask of the various options used that could affect which
3187 builtins were used. In the past we used target_flags, but we've run out of
3188 bits, and some options like SPE and PAIRED are no longer in
3189 target_flags. */
3190
3191 HOST_WIDE_INT
3192 rs6000_builtin_mask_calculate (void)
3193 {
3194 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3195 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3196 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3197 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3198 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3199 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3200 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3201 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3202 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3203 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3204 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3205 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3206 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3207 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3208 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3209 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0));
3210 }
3211
3212 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3213 to clobber the XER[CA] bit because clobbering that bit without telling
3214 the compiler worked just fine with versions of GCC before GCC 5, and
3215 breaking a lot of older code in ways that are hard to track down is
3216 not such a great idea. */
3217
3218 static rtx_insn *
3219 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3220 vec<const char *> &/*constraints*/,
3221 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3222 {
3223 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3224 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3225 return NULL;
3226 }
3227
3228 /* Override command line options. Mostly we process the processor type and
3229 sometimes adjust other TARGET_ options. */
3230
3231 static bool
3232 rs6000_option_override_internal (bool global_init_p)
3233 {
3234 bool ret = true;
3235 bool have_cpu = false;
3236
3237 /* The default cpu requested at configure time, if any. */
3238 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3239
3240 HOST_WIDE_INT set_masks;
3241 int cpu_index;
3242 int tune_index;
3243 struct cl_target_option *main_target_opt
3244 = ((global_init_p || target_option_default_node == NULL)
3245 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3246
3247 /* Print defaults. */
3248 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3249 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3250
3251 /* Remember the explicit arguments. */
3252 if (global_init_p)
3253 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3254
3255 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3256 library functions, so warn about it. The flag may be useful for
3257 performance studies from time to time though, so don't disable it
3258 entirely. */
3259 if (global_options_set.x_rs6000_alignment_flags
3260 && rs6000_alignment_flags == MASK_ALIGN_POWER
3261 && DEFAULT_ABI == ABI_DARWIN
3262 && TARGET_64BIT)
3263 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3264 " it is incompatible with the installed C and C++ libraries");
3265
3266 /* Numerous experiment shows that IRA based loop pressure
3267 calculation works better for RTL loop invariant motion on targets
3268 with enough (>= 32) registers. It is an expensive optimization.
3269 So it is on only for peak performance. */
3270 if (optimize >= 3 && global_init_p
3271 && !global_options_set.x_flag_ira_loop_pressure)
3272 flag_ira_loop_pressure = 1;
3273
3274 /* Set the pointer size. */
3275 if (TARGET_64BIT)
3276 {
3277 rs6000_pmode = (int)DImode;
3278 rs6000_pointer_size = 64;
3279 }
3280 else
3281 {
3282 rs6000_pmode = (int)SImode;
3283 rs6000_pointer_size = 32;
3284 }
3285
3286 /* Some OSs don't support saving the high part of 64-bit registers on context
3287 switch. Other OSs don't support saving Altivec registers. On those OSs,
3288 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3289 if the user wants either, the user must explicitly specify them and we
3290 won't interfere with the user's specification. */
3291
3292 set_masks = POWERPC_MASKS;
3293 #ifdef OS_MISSING_POWERPC64
3294 if (OS_MISSING_POWERPC64)
3295 set_masks &= ~OPTION_MASK_POWERPC64;
3296 #endif
3297 #ifdef OS_MISSING_ALTIVEC
3298 if (OS_MISSING_ALTIVEC)
3299 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3300 #endif
3301
3302 /* Don't override by the processor default if given explicitly. */
3303 set_masks &= ~rs6000_isa_flags_explicit;
3304
3305 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3306 the cpu in a target attribute or pragma, but did not specify a tuning
3307 option, use the cpu for the tuning option rather than the option specified
3308 with -mtune on the command line. Process a '--with-cpu' configuration
3309 request as an implicit --cpu. */
3310 if (rs6000_cpu_index >= 0)
3311 {
3312 cpu_index = rs6000_cpu_index;
3313 have_cpu = true;
3314 }
3315 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3316 {
3317 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3318 have_cpu = true;
3319 }
3320 else if (implicit_cpu)
3321 {
3322 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3323 have_cpu = true;
3324 }
3325 else
3326 {
3327 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3328 const char *default_cpu = ((!TARGET_POWERPC64)
3329 ? "powerpc"
3330 : ((BYTES_BIG_ENDIAN)
3331 ? "powerpc64"
3332 : "powerpc64le"));
3333
3334 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3335 have_cpu = false;
3336 }
3337
3338 gcc_assert (cpu_index >= 0);
3339
3340 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3341 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3342 with those from the cpu, except for options that were explicitly set. If
3343 we don't have a cpu, do not override the target bits set in
3344 TARGET_DEFAULT. */
3345 if (have_cpu)
3346 {
3347 rs6000_isa_flags &= ~set_masks;
3348 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3349 & set_masks);
3350 }
3351 else
3352 {
3353 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3354 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3355 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3356 to using rs6000_isa_flags, we need to do the initialization here.
3357
3358 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3359 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3360 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
3361 : processor_target_table[cpu_index].target_enable);
3362 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3363 }
3364
3365 if (rs6000_tune_index >= 0)
3366 tune_index = rs6000_tune_index;
3367 else if (have_cpu)
3368 rs6000_tune_index = tune_index = cpu_index;
3369 else
3370 {
3371 size_t i;
3372 enum processor_type tune_proc
3373 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3374
3375 tune_index = -1;
3376 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3377 if (processor_target_table[i].processor == tune_proc)
3378 {
3379 rs6000_tune_index = tune_index = i;
3380 break;
3381 }
3382 }
3383
3384 gcc_assert (tune_index >= 0);
3385 rs6000_cpu = processor_target_table[tune_index].processor;
3386
3387 /* Pick defaults for SPE related control flags. Do this early to make sure
3388 that the TARGET_ macros are representative ASAP. */
3389 {
3390 int spe_capable_cpu =
3391 (rs6000_cpu == PROCESSOR_PPC8540
3392 || rs6000_cpu == PROCESSOR_PPC8548);
3393
3394 if (!global_options_set.x_rs6000_spe_abi)
3395 rs6000_spe_abi = spe_capable_cpu;
3396
3397 if (!global_options_set.x_rs6000_spe)
3398 rs6000_spe = spe_capable_cpu;
3399
3400 if (!global_options_set.x_rs6000_float_gprs)
3401 rs6000_float_gprs =
3402 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3403 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3404 : 0);
3405 }
3406
3407 if (global_options_set.x_rs6000_spe_abi
3408 && rs6000_spe_abi
3409 && !TARGET_SPE_ABI)
3410 error ("not configured for SPE ABI");
3411
3412 if (global_options_set.x_rs6000_spe
3413 && rs6000_spe
3414 && !TARGET_SPE)
3415 error ("not configured for SPE instruction set");
3416
3417 if (main_target_opt != NULL
3418 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3419 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3420 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3421 error ("target attribute or pragma changes SPE ABI");
3422
3423 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3424 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3425 || rs6000_cpu == PROCESSOR_PPCE5500)
3426 {
3427 if (TARGET_ALTIVEC)
3428 error ("AltiVec not supported in this target");
3429 if (TARGET_SPE)
3430 error ("SPE not supported in this target");
3431 }
3432 if (rs6000_cpu == PROCESSOR_PPCE6500)
3433 {
3434 if (TARGET_SPE)
3435 error ("SPE not supported in this target");
3436 }
3437
3438 /* Disable Cell microcode if we are optimizing for the Cell
3439 and not optimizing for size. */
3440 if (rs6000_gen_cell_microcode == -1)
3441 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3442 && !optimize_size);
3443
3444 /* If we are optimizing big endian systems for space and it's OK to
3445 use instructions that would be microcoded on the Cell, use the
3446 load/store multiple and string instructions. */
3447 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3448 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3449 | OPTION_MASK_STRING);
3450
3451 /* Don't allow -mmultiple or -mstring on little endian systems
3452 unless the cpu is a 750, because the hardware doesn't support the
3453 instructions used in little endian mode, and causes an alignment
3454 trap. The 750 does not cause an alignment trap (except when the
3455 target is unaligned). */
3456
3457 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3458 {
3459 if (TARGET_MULTIPLE)
3460 {
3461 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3462 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3463 warning (0, "-mmultiple is not supported on little endian systems");
3464 }
3465
3466 if (TARGET_STRING)
3467 {
3468 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3469 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3470 warning (0, "-mstring is not supported on little endian systems");
3471 }
3472 }
3473
3474 /* If little-endian, default to -mstrict-align on older processors.
3475 Testing for htm matches power8 and later. */
3476 if (!BYTES_BIG_ENDIAN
3477 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3478 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3479
3480 /* -maltivec={le,be} implies -maltivec. */
3481 if (rs6000_altivec_element_order != 0)
3482 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3483
3484 /* Disallow -maltivec=le in big endian mode for now. This is not
3485 known to be useful for anyone. */
3486 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3487 {
3488 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3489 rs6000_altivec_element_order = 0;
3490 }
3491
3492 /* Add some warnings for VSX. */
3493 if (TARGET_VSX)
3494 {
3495 const char *msg = NULL;
3496 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3497 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3498 {
3499 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3500 msg = N_("-mvsx requires hardware floating point");
3501 else
3502 {
3503 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3504 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3505 }
3506 }
3507 else if (TARGET_PAIRED_FLOAT)
3508 msg = N_("-mvsx and -mpaired are incompatible");
3509 else if (TARGET_AVOID_XFORM > 0)
3510 msg = N_("-mvsx needs indexed addressing");
3511 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3512 & OPTION_MASK_ALTIVEC))
3513 {
3514 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3515 msg = N_("-mvsx and -mno-altivec are incompatible");
3516 else
3517 msg = N_("-mno-altivec disables vsx");
3518 }
3519
3520 if (msg)
3521 {
3522 warning (0, msg);
3523 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3524 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3525 }
3526 }
3527
3528 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3529 the -mcpu setting to enable options that conflict. */
3530 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3531 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3532 | OPTION_MASK_ALTIVEC
3533 | OPTION_MASK_VSX)) != 0)
3534 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3535 | OPTION_MASK_DIRECT_MOVE)
3536 & ~rs6000_isa_flags_explicit);
3537
3538 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3539 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3540
3541 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3542 unless the user explicitly used the -mno-<option> to disable the code. */
3543 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3544 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3545 else if (TARGET_VSX)
3546 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3547 else if (TARGET_POPCNTD)
3548 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3549 else if (TARGET_DFP)
3550 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3551 else if (TARGET_CMPB)
3552 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3553 else if (TARGET_FPRND)
3554 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3555 else if (TARGET_POPCNTB)
3556 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3557 else if (TARGET_ALTIVEC)
3558 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3559
3560 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3561 {
3562 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3563 error ("-mcrypto requires -maltivec");
3564 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3565 }
3566
3567 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3568 {
3569 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3570 error ("-mdirect-move requires -mvsx");
3571 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3572 }
3573
3574 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3575 {
3576 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3577 error ("-mpower8-vector requires -maltivec");
3578 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3579 }
3580
3581 if (TARGET_P8_VECTOR && !TARGET_VSX)
3582 {
3583 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3584 error ("-mpower8-vector requires -mvsx");
3585 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3586 }
3587
3588 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3589 {
3590 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3591 error ("-mvsx-timode requires -mvsx");
3592 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3593 }
3594
3595 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3596 {
3597 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3598 error ("-mhard-dfp requires -mhard-float");
3599 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3600 }
3601
3602 /* Allow an explicit -mupper-regs to set both -mupper-regs-df and
3603 -mupper-regs-sf, depending on the cpu, unless the user explicitly also set
3604 the individual option. */
3605 if (TARGET_UPPER_REGS > 0)
3606 {
3607 if (TARGET_VSX
3608 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
3609 {
3610 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
3611 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
3612 }
3613 if (TARGET_P8_VECTOR
3614 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
3615 {
3616 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
3617 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
3618 }
3619 }
3620 else if (TARGET_UPPER_REGS == 0)
3621 {
3622 if (TARGET_VSX
3623 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
3624 {
3625 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
3626 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
3627 }
3628 if (TARGET_P8_VECTOR
3629 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
3630 {
3631 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
3632 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
3633 }
3634 }
3635
3636 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
3637 {
3638 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
3639 error ("-mupper-regs-df requires -mvsx");
3640 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
3641 }
3642
3643 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
3644 {
3645 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
3646 error ("-mupper-regs-sf requires -mpower8-vector");
3647 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
3648 }
3649
3650 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3651 silently turn off quad memory mode. */
3652 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3653 {
3654 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3655 warning (0, N_("-mquad-memory requires 64-bit mode"));
3656
3657 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3658 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
3659
3660 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3661 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3662 }
3663
3664 /* Non-atomic quad memory load/store are disabled for little endian, since
3665 the words are reversed, but atomic operations can still be done by
3666 swapping the words. */
3667 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3668 {
3669 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3670 warning (0, N_("-mquad-memory is not available in little endian mode"));
3671
3672 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3673 }
3674
3675 /* Assume if the user asked for normal quad memory instructions, they want
3676 the atomic versions as well, unless they explicity told us not to use quad
3677 word atomic instructions. */
3678 if (TARGET_QUAD_MEMORY
3679 && !TARGET_QUAD_MEMORY_ATOMIC
3680 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3681 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3682
3683 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3684 generating power8 instructions. */
3685 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3686 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
3687 & OPTION_MASK_P8_FUSION);
3688
3689 /* Power8 does not fuse sign extended loads with the addis. If we are
3690 optimizing at high levels for speed, convert a sign extended load into a
3691 zero extending load, and an explicit sign extension. */
3692 if (TARGET_P8_FUSION
3693 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3694 && optimize_function_for_speed_p (cfun)
3695 && optimize >= 3)
3696 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3697
3698 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3699 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3700
3701 /* E500mc does "better" if we inline more aggressively. Respect the
3702 user's opinion, though. */
3703 if (rs6000_block_move_inline_limit == 0
3704 && (rs6000_cpu == PROCESSOR_PPCE500MC
3705 || rs6000_cpu == PROCESSOR_PPCE500MC64
3706 || rs6000_cpu == PROCESSOR_PPCE5500
3707 || rs6000_cpu == PROCESSOR_PPCE6500))
3708 rs6000_block_move_inline_limit = 128;
3709
3710 /* store_one_arg depends on expand_block_move to handle at least the
3711 size of reg_parm_stack_space. */
3712 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3713 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3714
3715 if (global_init_p)
3716 {
3717 /* If the appropriate debug option is enabled, replace the target hooks
3718 with debug versions that call the real version and then prints
3719 debugging information. */
3720 if (TARGET_DEBUG_COST)
3721 {
3722 targetm.rtx_costs = rs6000_debug_rtx_costs;
3723 targetm.address_cost = rs6000_debug_address_cost;
3724 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3725 }
3726
3727 if (TARGET_DEBUG_ADDR)
3728 {
3729 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3730 targetm.legitimize_address = rs6000_debug_legitimize_address;
3731 rs6000_secondary_reload_class_ptr
3732 = rs6000_debug_secondary_reload_class;
3733 rs6000_secondary_memory_needed_ptr
3734 = rs6000_debug_secondary_memory_needed;
3735 rs6000_cannot_change_mode_class_ptr
3736 = rs6000_debug_cannot_change_mode_class;
3737 rs6000_preferred_reload_class_ptr
3738 = rs6000_debug_preferred_reload_class;
3739 rs6000_legitimize_reload_address_ptr
3740 = rs6000_debug_legitimize_reload_address;
3741 rs6000_mode_dependent_address_ptr
3742 = rs6000_debug_mode_dependent_address;
3743 }
3744
3745 if (rs6000_veclibabi_name)
3746 {
3747 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3748 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3749 else
3750 {
3751 error ("unknown vectorization library ABI type (%s) for "
3752 "-mveclibabi= switch", rs6000_veclibabi_name);
3753 ret = false;
3754 }
3755 }
3756 }
3757
3758 if (!global_options_set.x_rs6000_long_double_type_size)
3759 {
3760 if (main_target_opt != NULL
3761 && (main_target_opt->x_rs6000_long_double_type_size
3762 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3763 error ("target attribute or pragma changes long double size");
3764 else
3765 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3766 }
3767
3768 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3769 if (!global_options_set.x_rs6000_ieeequad)
3770 rs6000_ieeequad = 1;
3771 #endif
3772
3773 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3774 target attribute or pragma which automatically enables both options,
3775 unless the altivec ABI was set. This is set by default for 64-bit, but
3776 not for 32-bit. */
3777 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3778 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3779 & ~rs6000_isa_flags_explicit);
3780
3781 /* Enable Altivec ABI for AIX -maltivec. */
3782 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3783 {
3784 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3785 error ("target attribute or pragma changes AltiVec ABI");
3786 else
3787 rs6000_altivec_abi = 1;
3788 }
3789
3790 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3791 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3792 be explicitly overridden in either case. */
3793 if (TARGET_ELF)
3794 {
3795 if (!global_options_set.x_rs6000_altivec_abi
3796 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3797 {
3798 if (main_target_opt != NULL &&
3799 !main_target_opt->x_rs6000_altivec_abi)
3800 error ("target attribute or pragma changes AltiVec ABI");
3801 else
3802 rs6000_altivec_abi = 1;
3803 }
3804 }
3805
3806 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3807 So far, the only darwin64 targets are also MACH-O. */
3808 if (TARGET_MACHO
3809 && DEFAULT_ABI == ABI_DARWIN
3810 && TARGET_64BIT)
3811 {
3812 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3813 error ("target attribute or pragma changes darwin64 ABI");
3814 else
3815 {
3816 rs6000_darwin64_abi = 1;
3817 /* Default to natural alignment, for better performance. */
3818 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3819 }
3820 }
3821
3822 /* Place FP constants in the constant pool instead of TOC
3823 if section anchors enabled. */
3824 if (flag_section_anchors
3825 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3826 TARGET_NO_FP_IN_TOC = 1;
3827
3828 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3829 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3830
3831 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3832 SUBTARGET_OVERRIDE_OPTIONS;
3833 #endif
3834 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3835 SUBSUBTARGET_OVERRIDE_OPTIONS;
3836 #endif
3837 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3838 SUB3TARGET_OVERRIDE_OPTIONS;
3839 #endif
3840
3841 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3842 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3843
3844 /* For the E500 family of cores, reset the single/double FP flags to let us
3845 check that they remain constant across attributes or pragmas. Also,
3846 clear a possible request for string instructions, not supported and which
3847 we might have silently queried above for -Os.
3848
3849 For other families, clear ISEL in case it was set implicitly.
3850 */
3851
3852 switch (rs6000_cpu)
3853 {
3854 case PROCESSOR_PPC8540:
3855 case PROCESSOR_PPC8548:
3856 case PROCESSOR_PPCE500MC:
3857 case PROCESSOR_PPCE500MC64:
3858 case PROCESSOR_PPCE5500:
3859 case PROCESSOR_PPCE6500:
3860
3861 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3862 rs6000_double_float = TARGET_E500_DOUBLE;
3863
3864 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3865
3866 break;
3867
3868 default:
3869
3870 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3871 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3872
3873 break;
3874 }
3875
3876 if (main_target_opt)
3877 {
3878 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3879 error ("target attribute or pragma changes single precision floating "
3880 "point");
3881 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3882 error ("target attribute or pragma changes double precision floating "
3883 "point");
3884 }
3885
3886 /* Detect invalid option combinations with E500. */
3887 CHECK_E500_OPTIONS;
3888
3889 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3890 && rs6000_cpu != PROCESSOR_POWER5
3891 && rs6000_cpu != PROCESSOR_POWER6
3892 && rs6000_cpu != PROCESSOR_POWER7
3893 && rs6000_cpu != PROCESSOR_POWER8
3894 && rs6000_cpu != PROCESSOR_PPCA2
3895 && rs6000_cpu != PROCESSOR_CELL
3896 && rs6000_cpu != PROCESSOR_PPC476);
3897 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3898 || rs6000_cpu == PROCESSOR_POWER5
3899 || rs6000_cpu == PROCESSOR_POWER7
3900 || rs6000_cpu == PROCESSOR_POWER8);
3901 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3902 || rs6000_cpu == PROCESSOR_POWER5
3903 || rs6000_cpu == PROCESSOR_POWER6
3904 || rs6000_cpu == PROCESSOR_POWER7
3905 || rs6000_cpu == PROCESSOR_POWER8
3906 || rs6000_cpu == PROCESSOR_PPCE500MC
3907 || rs6000_cpu == PROCESSOR_PPCE500MC64
3908 || rs6000_cpu == PROCESSOR_PPCE5500
3909 || rs6000_cpu == PROCESSOR_PPCE6500);
3910
3911 /* Allow debug switches to override the above settings. These are set to -1
3912 in rs6000.opt to indicate the user hasn't directly set the switch. */
3913 if (TARGET_ALWAYS_HINT >= 0)
3914 rs6000_always_hint = TARGET_ALWAYS_HINT;
3915
3916 if (TARGET_SCHED_GROUPS >= 0)
3917 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3918
3919 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3920 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3921
3922 rs6000_sched_restricted_insns_priority
3923 = (rs6000_sched_groups ? 1 : 0);
3924
3925 /* Handle -msched-costly-dep option. */
3926 rs6000_sched_costly_dep
3927 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3928
3929 if (rs6000_sched_costly_dep_str)
3930 {
3931 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3932 rs6000_sched_costly_dep = no_dep_costly;
3933 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3934 rs6000_sched_costly_dep = all_deps_costly;
3935 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3936 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3937 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3938 rs6000_sched_costly_dep = store_to_load_dep_costly;
3939 else
3940 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3941 atoi (rs6000_sched_costly_dep_str));
3942 }
3943
3944 /* Handle -minsert-sched-nops option. */
3945 rs6000_sched_insert_nops
3946 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3947
3948 if (rs6000_sched_insert_nops_str)
3949 {
3950 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3951 rs6000_sched_insert_nops = sched_finish_none;
3952 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3953 rs6000_sched_insert_nops = sched_finish_pad_groups;
3954 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3955 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3956 else
3957 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3958 atoi (rs6000_sched_insert_nops_str));
3959 }
3960
3961 if (global_init_p)
3962 {
3963 #ifdef TARGET_REGNAMES
3964 /* If the user desires alternate register names, copy in the
3965 alternate names now. */
3966 if (TARGET_REGNAMES)
3967 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3968 #endif
3969
3970 /* Set aix_struct_return last, after the ABI is determined.
3971 If -maix-struct-return or -msvr4-struct-return was explicitly
3972 used, don't override with the ABI default. */
3973 if (!global_options_set.x_aix_struct_return)
3974 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3975
3976 #if 0
3977 /* IBM XL compiler defaults to unsigned bitfields. */
3978 if (TARGET_XL_COMPAT)
3979 flag_signed_bitfields = 0;
3980 #endif
3981
3982 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3983 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3984
3985 if (TARGET_TOC)
3986 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3987
3988 /* We can only guarantee the availability of DI pseudo-ops when
3989 assembling for 64-bit targets. */
3990 if (!TARGET_64BIT)
3991 {
3992 targetm.asm_out.aligned_op.di = NULL;
3993 targetm.asm_out.unaligned_op.di = NULL;
3994 }
3995
3996
3997 /* Set branch target alignment, if not optimizing for size. */
3998 if (!optimize_size)
3999 {
4000 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4001 aligned 8byte to avoid misprediction by the branch predictor. */
4002 if (rs6000_cpu == PROCESSOR_TITAN
4003 || rs6000_cpu == PROCESSOR_CELL)
4004 {
4005 if (align_functions <= 0)
4006 align_functions = 8;
4007 if (align_jumps <= 0)
4008 align_jumps = 8;
4009 if (align_loops <= 0)
4010 align_loops = 8;
4011 }
4012 if (rs6000_align_branch_targets)
4013 {
4014 if (align_functions <= 0)
4015 align_functions = 16;
4016 if (align_jumps <= 0)
4017 align_jumps = 16;
4018 if (align_loops <= 0)
4019 {
4020 can_override_loop_align = 1;
4021 align_loops = 16;
4022 }
4023 }
4024 if (align_jumps_max_skip <= 0)
4025 align_jumps_max_skip = 15;
4026 if (align_loops_max_skip <= 0)
4027 align_loops_max_skip = 15;
4028 }
4029
4030 /* Arrange to save and restore machine status around nested functions. */
4031 init_machine_status = rs6000_init_machine_status;
4032
4033 /* We should always be splitting complex arguments, but we can't break
4034 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4035 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4036 targetm.calls.split_complex_arg = NULL;
4037 }
4038
4039 /* Initialize rs6000_cost with the appropriate target costs. */
4040 if (optimize_size)
4041 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4042 else
4043 switch (rs6000_cpu)
4044 {
4045 case PROCESSOR_RS64A:
4046 rs6000_cost = &rs64a_cost;
4047 break;
4048
4049 case PROCESSOR_MPCCORE:
4050 rs6000_cost = &mpccore_cost;
4051 break;
4052
4053 case PROCESSOR_PPC403:
4054 rs6000_cost = &ppc403_cost;
4055 break;
4056
4057 case PROCESSOR_PPC405:
4058 rs6000_cost = &ppc405_cost;
4059 break;
4060
4061 case PROCESSOR_PPC440:
4062 rs6000_cost = &ppc440_cost;
4063 break;
4064
4065 case PROCESSOR_PPC476:
4066 rs6000_cost = &ppc476_cost;
4067 break;
4068
4069 case PROCESSOR_PPC601:
4070 rs6000_cost = &ppc601_cost;
4071 break;
4072
4073 case PROCESSOR_PPC603:
4074 rs6000_cost = &ppc603_cost;
4075 break;
4076
4077 case PROCESSOR_PPC604:
4078 rs6000_cost = &ppc604_cost;
4079 break;
4080
4081 case PROCESSOR_PPC604e:
4082 rs6000_cost = &ppc604e_cost;
4083 break;
4084
4085 case PROCESSOR_PPC620:
4086 rs6000_cost = &ppc620_cost;
4087 break;
4088
4089 case PROCESSOR_PPC630:
4090 rs6000_cost = &ppc630_cost;
4091 break;
4092
4093 case PROCESSOR_CELL:
4094 rs6000_cost = &ppccell_cost;
4095 break;
4096
4097 case PROCESSOR_PPC750:
4098 case PROCESSOR_PPC7400:
4099 rs6000_cost = &ppc750_cost;
4100 break;
4101
4102 case PROCESSOR_PPC7450:
4103 rs6000_cost = &ppc7450_cost;
4104 break;
4105
4106 case PROCESSOR_PPC8540:
4107 case PROCESSOR_PPC8548:
4108 rs6000_cost = &ppc8540_cost;
4109 break;
4110
4111 case PROCESSOR_PPCE300C2:
4112 case PROCESSOR_PPCE300C3:
4113 rs6000_cost = &ppce300c2c3_cost;
4114 break;
4115
4116 case PROCESSOR_PPCE500MC:
4117 rs6000_cost = &ppce500mc_cost;
4118 break;
4119
4120 case PROCESSOR_PPCE500MC64:
4121 rs6000_cost = &ppce500mc64_cost;
4122 break;
4123
4124 case PROCESSOR_PPCE5500:
4125 rs6000_cost = &ppce5500_cost;
4126 break;
4127
4128 case PROCESSOR_PPCE6500:
4129 rs6000_cost = &ppce6500_cost;
4130 break;
4131
4132 case PROCESSOR_TITAN:
4133 rs6000_cost = &titan_cost;
4134 break;
4135
4136 case PROCESSOR_POWER4:
4137 case PROCESSOR_POWER5:
4138 rs6000_cost = &power4_cost;
4139 break;
4140
4141 case PROCESSOR_POWER6:
4142 rs6000_cost = &power6_cost;
4143 break;
4144
4145 case PROCESSOR_POWER7:
4146 rs6000_cost = &power7_cost;
4147 break;
4148
4149 case PROCESSOR_POWER8:
4150 rs6000_cost = &power8_cost;
4151 break;
4152
4153 case PROCESSOR_PPCA2:
4154 rs6000_cost = &ppca2_cost;
4155 break;
4156
4157 default:
4158 gcc_unreachable ();
4159 }
4160
4161 if (global_init_p)
4162 {
4163 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4164 rs6000_cost->simultaneous_prefetches,
4165 global_options.x_param_values,
4166 global_options_set.x_param_values);
4167 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4168 global_options.x_param_values,
4169 global_options_set.x_param_values);
4170 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4171 rs6000_cost->cache_line_size,
4172 global_options.x_param_values,
4173 global_options_set.x_param_values);
4174 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4175 global_options.x_param_values,
4176 global_options_set.x_param_values);
4177
4178 /* Increase loop peeling limits based on performance analysis. */
4179 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4180 global_options.x_param_values,
4181 global_options_set.x_param_values);
4182 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4183 global_options.x_param_values,
4184 global_options_set.x_param_values);
4185
4186 /* If using typedef char *va_list, signal that
4187 __builtin_va_start (&ap, 0) can be optimized to
4188 ap = __builtin_next_arg (0). */
4189 if (DEFAULT_ABI != ABI_V4)
4190 targetm.expand_builtin_va_start = NULL;
4191 }
4192
4193 /* Set up single/double float flags.
4194 If TARGET_HARD_FLOAT is set, but neither single or double is set,
4195 then set both flags. */
4196 if (TARGET_HARD_FLOAT && TARGET_FPRS
4197 && rs6000_single_float == 0 && rs6000_double_float == 0)
4198 rs6000_single_float = rs6000_double_float = 1;
4199
4200 /* If not explicitly specified via option, decide whether to generate indexed
4201 load/store instructions. */
4202 if (TARGET_AVOID_XFORM == -1)
4203 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4204 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4205 need indexed accesses and the type used is the scalar type of the element
4206 being loaded or stored. */
4207 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
4208 && !TARGET_ALTIVEC);
4209
4210 /* Set the -mrecip options. */
4211 if (rs6000_recip_name)
4212 {
4213 char *p = ASTRDUP (rs6000_recip_name);
4214 char *q;
4215 unsigned int mask, i;
4216 bool invert;
4217
4218 while ((q = strtok (p, ",")) != NULL)
4219 {
4220 p = NULL;
4221 if (*q == '!')
4222 {
4223 invert = true;
4224 q++;
4225 }
4226 else
4227 invert = false;
4228
4229 if (!strcmp (q, "default"))
4230 mask = ((TARGET_RECIP_PRECISION)
4231 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4232 else
4233 {
4234 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4235 if (!strcmp (q, recip_options[i].string))
4236 {
4237 mask = recip_options[i].mask;
4238 break;
4239 }
4240
4241 if (i == ARRAY_SIZE (recip_options))
4242 {
4243 error ("unknown option for -mrecip=%s", q);
4244 invert = false;
4245 mask = 0;
4246 ret = false;
4247 }
4248 }
4249
4250 if (invert)
4251 rs6000_recip_control &= ~mask;
4252 else
4253 rs6000_recip_control |= mask;
4254 }
4255 }
4256
4257 /* Determine when unaligned vector accesses are permitted, and when
4258 they are preferred over masked Altivec loads. Note that if
4259 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4260 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4261 not true. */
4262 if (TARGET_EFFICIENT_UNALIGNED_VSX == -1) {
4263 if (TARGET_VSX && rs6000_cpu == PROCESSOR_POWER8
4264 && TARGET_ALLOW_MOVMISALIGN != 0)
4265 TARGET_EFFICIENT_UNALIGNED_VSX = 1;
4266 else
4267 TARGET_EFFICIENT_UNALIGNED_VSX = 0;
4268 }
4269
4270 if (TARGET_ALLOW_MOVMISALIGN == -1 && rs6000_cpu == PROCESSOR_POWER8)
4271 TARGET_ALLOW_MOVMISALIGN = 1;
4272
4273 /* Set the builtin mask of the various options used that could affect which
4274 builtins were used. In the past we used target_flags, but we've run out
4275 of bits, and some options like SPE and PAIRED are no longer in
4276 target_flags. */
4277 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4278 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4279 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4280 rs6000_builtin_mask);
4281
4282 /* Initialize all of the registers. */
4283 rs6000_init_hard_regno_mode_ok (global_init_p);
4284
4285 /* Save the initial options in case the user does function specific options */
4286 if (global_init_p)
4287 target_option_default_node = target_option_current_node
4288 = build_target_option_node (&global_options);
4289
4290 /* If not explicitly specified via option, decide whether to generate the
4291 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4292 if (TARGET_LINK_STACK == -1)
4293 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
4294
4295 return ret;
4296 }
4297
4298 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4299 define the target cpu type. */
4300
4301 static void
4302 rs6000_option_override (void)
4303 {
4304 (void) rs6000_option_override_internal (true);
4305
4306 /* Register machine-specific passes. This needs to be done at start-up.
4307 It's convenient to do it here (like i386 does). */
4308 opt_pass *pass_analyze_swaps = make_pass_analyze_swaps (g);
4309
4310 struct register_pass_info analyze_swaps_info
4311 = { pass_analyze_swaps, "cse1", 1, PASS_POS_INSERT_BEFORE };
4312
4313 register_pass (&analyze_swaps_info);
4314 }
4315
4316 \f
4317 /* Implement targetm.vectorize.builtin_mask_for_load. */
4318 static tree
4319 rs6000_builtin_mask_for_load (void)
4320 {
4321 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4322 if ((TARGET_ALTIVEC && !TARGET_VSX)
4323 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4324 return altivec_builtin_mask_for_load;
4325 else
4326 return 0;
4327 }
4328
4329 /* Implement LOOP_ALIGN. */
4330 int
4331 rs6000_loop_align (rtx label)
4332 {
4333 basic_block bb;
4334 int ninsns;
4335
4336 /* Don't override loop alignment if -falign-loops was specified. */
4337 if (!can_override_loop_align)
4338 return align_loops_log;
4339
4340 bb = BLOCK_FOR_INSN (label);
4341 ninsns = num_loop_insns(bb->loop_father);
4342
4343 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4344 if (ninsns > 4 && ninsns <= 8
4345 && (rs6000_cpu == PROCESSOR_POWER4
4346 || rs6000_cpu == PROCESSOR_POWER5
4347 || rs6000_cpu == PROCESSOR_POWER6
4348 || rs6000_cpu == PROCESSOR_POWER7
4349 || rs6000_cpu == PROCESSOR_POWER8))
4350 return 5;
4351 else
4352 return align_loops_log;
4353 }
4354
4355 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
4356 static int
4357 rs6000_loop_align_max_skip (rtx_insn *label)
4358 {
4359 return (1 << rs6000_loop_align (label)) - 1;
4360 }
4361
4362 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4363 after applying N number of iterations. This routine does not determine
4364 how may iterations are required to reach desired alignment. */
4365
4366 static bool
4367 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4368 {
4369 if (is_packed)
4370 return false;
4371
4372 if (TARGET_32BIT)
4373 {
4374 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4375 return true;
4376
4377 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4378 return true;
4379
4380 return false;
4381 }
4382 else
4383 {
4384 if (TARGET_MACHO)
4385 return false;
4386
4387 /* Assuming that all other types are naturally aligned. CHECKME! */
4388 return true;
4389 }
4390 }
4391
4392 /* Return true if the vector misalignment factor is supported by the
4393 target. */
4394 static bool
4395 rs6000_builtin_support_vector_misalignment (machine_mode mode,
4396 const_tree type,
4397 int misalignment,
4398 bool is_packed)
4399 {
4400 if (TARGET_VSX)
4401 {
4402 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4403 return true;
4404
4405 /* Return if movmisalign pattern is not supported for this mode. */
4406 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4407 return false;
4408
4409 if (misalignment == -1)
4410 {
4411 /* Misalignment factor is unknown at compile time but we know
4412 it's word aligned. */
4413 if (rs6000_vector_alignment_reachable (type, is_packed))
4414 {
4415 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4416
4417 if (element_size == 64 || element_size == 32)
4418 return true;
4419 }
4420
4421 return false;
4422 }
4423
4424 /* VSX supports word-aligned vector. */
4425 if (misalignment % 4 == 0)
4426 return true;
4427 }
4428 return false;
4429 }
4430
4431 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4432 static int
4433 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4434 tree vectype, int misalign)
4435 {
4436 unsigned elements;
4437 tree elem_type;
4438
4439 switch (type_of_cost)
4440 {
4441 case scalar_stmt:
4442 case scalar_load:
4443 case scalar_store:
4444 case vector_stmt:
4445 case vector_load:
4446 case vector_store:
4447 case vec_to_scalar:
4448 case scalar_to_vec:
4449 case cond_branch_not_taken:
4450 return 1;
4451
4452 case vec_perm:
4453 if (TARGET_VSX)
4454 return 3;
4455 else
4456 return 1;
4457
4458 case vec_promote_demote:
4459 if (TARGET_VSX)
4460 return 4;
4461 else
4462 return 1;
4463
4464 case cond_branch_taken:
4465 return 3;
4466
4467 case unaligned_load:
4468 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4469 return 1;
4470
4471 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4472 {
4473 elements = TYPE_VECTOR_SUBPARTS (vectype);
4474 if (elements == 2)
4475 /* Double word aligned. */
4476 return 2;
4477
4478 if (elements == 4)
4479 {
4480 switch (misalign)
4481 {
4482 case 8:
4483 /* Double word aligned. */
4484 return 2;
4485
4486 case -1:
4487 /* Unknown misalignment. */
4488 case 4:
4489 case 12:
4490 /* Word aligned. */
4491 return 22;
4492
4493 default:
4494 gcc_unreachable ();
4495 }
4496 }
4497 }
4498
4499 if (TARGET_ALTIVEC)
4500 /* Misaligned loads are not supported. */
4501 gcc_unreachable ();
4502
4503 return 2;
4504
4505 case unaligned_store:
4506 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4507 return 1;
4508
4509 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4510 {
4511 elements = TYPE_VECTOR_SUBPARTS (vectype);
4512 if (elements == 2)
4513 /* Double word aligned. */
4514 return 2;
4515
4516 if (elements == 4)
4517 {
4518 switch (misalign)
4519 {
4520 case 8:
4521 /* Double word aligned. */
4522 return 2;
4523
4524 case -1:
4525 /* Unknown misalignment. */
4526 case 4:
4527 case 12:
4528 /* Word aligned. */
4529 return 23;
4530
4531 default:
4532 gcc_unreachable ();
4533 }
4534 }
4535 }
4536
4537 if (TARGET_ALTIVEC)
4538 /* Misaligned stores are not supported. */
4539 gcc_unreachable ();
4540
4541 return 2;
4542
4543 case vec_construct:
4544 elements = TYPE_VECTOR_SUBPARTS (vectype);
4545 elem_type = TREE_TYPE (vectype);
4546 /* 32-bit vectors loaded into registers are stored as double
4547 precision, so we need n/2 converts in addition to the usual
4548 n/2 merges to construct a vector of short floats from them. */
4549 if (SCALAR_FLOAT_TYPE_P (elem_type)
4550 && TYPE_PRECISION (elem_type) == 32)
4551 return elements + 1;
4552 else
4553 return elements / 2 + 1;
4554
4555 default:
4556 gcc_unreachable ();
4557 }
4558 }
4559
4560 /* Implement targetm.vectorize.preferred_simd_mode. */
4561
4562 static machine_mode
4563 rs6000_preferred_simd_mode (machine_mode mode)
4564 {
4565 if (TARGET_VSX)
4566 switch (mode)
4567 {
4568 case DFmode:
4569 return V2DFmode;
4570 default:;
4571 }
4572 if (TARGET_ALTIVEC || TARGET_VSX)
4573 switch (mode)
4574 {
4575 case SFmode:
4576 return V4SFmode;
4577 case TImode:
4578 return V1TImode;
4579 case DImode:
4580 return V2DImode;
4581 case SImode:
4582 return V4SImode;
4583 case HImode:
4584 return V8HImode;
4585 case QImode:
4586 return V16QImode;
4587 default:;
4588 }
4589 if (TARGET_SPE)
4590 switch (mode)
4591 {
4592 case SFmode:
4593 return V2SFmode;
4594 case SImode:
4595 return V2SImode;
4596 default:;
4597 }
4598 if (TARGET_PAIRED_FLOAT
4599 && mode == SFmode)
4600 return V2SFmode;
4601 return word_mode;
4602 }
4603
4604 typedef struct _rs6000_cost_data
4605 {
4606 struct loop *loop_info;
4607 unsigned cost[3];
4608 } rs6000_cost_data;
4609
4610 /* Test for likely overcommitment of vector hardware resources. If a
4611 loop iteration is relatively large, and too large a percentage of
4612 instructions in the loop are vectorized, the cost model may not
4613 adequately reflect delays from unavailable vector resources.
4614 Penalize the loop body cost for this case. */
4615
4616 static void
4617 rs6000_density_test (rs6000_cost_data *data)
4618 {
4619 const int DENSITY_PCT_THRESHOLD = 85;
4620 const int DENSITY_SIZE_THRESHOLD = 70;
4621 const int DENSITY_PENALTY = 10;
4622 struct loop *loop = data->loop_info;
4623 basic_block *bbs = get_loop_body (loop);
4624 int nbbs = loop->num_nodes;
4625 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
4626 int i, density_pct;
4627
4628 for (i = 0; i < nbbs; i++)
4629 {
4630 basic_block bb = bbs[i];
4631 gimple_stmt_iterator gsi;
4632
4633 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4634 {
4635 gimple stmt = gsi_stmt (gsi);
4636 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4637
4638 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4639 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
4640 not_vec_cost++;
4641 }
4642 }
4643
4644 free (bbs);
4645 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
4646
4647 if (density_pct > DENSITY_PCT_THRESHOLD
4648 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
4649 {
4650 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
4651 if (dump_enabled_p ())
4652 dump_printf_loc (MSG_NOTE, vect_location,
4653 "density %d%%, cost %d exceeds threshold, penalizing "
4654 "loop body cost by %d%%", density_pct,
4655 vec_cost + not_vec_cost, DENSITY_PENALTY);
4656 }
4657 }
4658
4659 /* Implement targetm.vectorize.init_cost. */
4660
4661 static void *
4662 rs6000_init_cost (struct loop *loop_info)
4663 {
4664 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
4665 data->loop_info = loop_info;
4666 data->cost[vect_prologue] = 0;
4667 data->cost[vect_body] = 0;
4668 data->cost[vect_epilogue] = 0;
4669 return data;
4670 }
4671
4672 /* Implement targetm.vectorize.add_stmt_cost. */
4673
4674 static unsigned
4675 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4676 struct _stmt_vec_info *stmt_info, int misalign,
4677 enum vect_cost_model_location where)
4678 {
4679 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4680 unsigned retval = 0;
4681
4682 if (flag_vect_cost_model)
4683 {
4684 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4685 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4686 misalign);
4687 /* Statements in an inner loop relative to the loop being
4688 vectorized are weighted more heavily. The value here is
4689 arbitrary and could potentially be improved with analysis. */
4690 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4691 count *= 50; /* FIXME. */
4692
4693 retval = (unsigned) (count * stmt_cost);
4694 cost_data->cost[where] += retval;
4695 }
4696
4697 return retval;
4698 }
4699
4700 /* Implement targetm.vectorize.finish_cost. */
4701
4702 static void
4703 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4704 unsigned *body_cost, unsigned *epilogue_cost)
4705 {
4706 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4707
4708 if (cost_data->loop_info)
4709 rs6000_density_test (cost_data);
4710
4711 *prologue_cost = cost_data->cost[vect_prologue];
4712 *body_cost = cost_data->cost[vect_body];
4713 *epilogue_cost = cost_data->cost[vect_epilogue];
4714 }
4715
4716 /* Implement targetm.vectorize.destroy_cost_data. */
4717
4718 static void
4719 rs6000_destroy_cost_data (void *data)
4720 {
4721 free (data);
4722 }
4723
4724 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4725 library with vectorized intrinsics. */
4726
4727 static tree
4728 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4729 {
4730 char name[32];
4731 const char *suffix = NULL;
4732 tree fntype, new_fndecl, bdecl = NULL_TREE;
4733 int n_args = 1;
4734 const char *bname;
4735 machine_mode el_mode, in_mode;
4736 int n, in_n;
4737
4738 /* Libmass is suitable for unsafe math only as it does not correctly support
4739 parts of IEEE with the required precision such as denormals. Only support
4740 it if we have VSX to use the simd d2 or f4 functions.
4741 XXX: Add variable length support. */
4742 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4743 return NULL_TREE;
4744
4745 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4746 n = TYPE_VECTOR_SUBPARTS (type_out);
4747 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4748 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4749 if (el_mode != in_mode
4750 || n != in_n)
4751 return NULL_TREE;
4752
4753 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4754 {
4755 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4756 switch (fn)
4757 {
4758 case BUILT_IN_ATAN2:
4759 case BUILT_IN_HYPOT:
4760 case BUILT_IN_POW:
4761 n_args = 2;
4762 /* fall through */
4763
4764 case BUILT_IN_ACOS:
4765 case BUILT_IN_ACOSH:
4766 case BUILT_IN_ASIN:
4767 case BUILT_IN_ASINH:
4768 case BUILT_IN_ATAN:
4769 case BUILT_IN_ATANH:
4770 case BUILT_IN_CBRT:
4771 case BUILT_IN_COS:
4772 case BUILT_IN_COSH:
4773 case BUILT_IN_ERF:
4774 case BUILT_IN_ERFC:
4775 case BUILT_IN_EXP2:
4776 case BUILT_IN_EXP:
4777 case BUILT_IN_EXPM1:
4778 case BUILT_IN_LGAMMA:
4779 case BUILT_IN_LOG10:
4780 case BUILT_IN_LOG1P:
4781 case BUILT_IN_LOG2:
4782 case BUILT_IN_LOG:
4783 case BUILT_IN_SIN:
4784 case BUILT_IN_SINH:
4785 case BUILT_IN_SQRT:
4786 case BUILT_IN_TAN:
4787 case BUILT_IN_TANH:
4788 bdecl = builtin_decl_implicit (fn);
4789 suffix = "d2"; /* pow -> powd2 */
4790 if (el_mode != DFmode
4791 || n != 2
4792 || !bdecl)
4793 return NULL_TREE;
4794 break;
4795
4796 case BUILT_IN_ATAN2F:
4797 case BUILT_IN_HYPOTF:
4798 case BUILT_IN_POWF:
4799 n_args = 2;
4800 /* fall through */
4801
4802 case BUILT_IN_ACOSF:
4803 case BUILT_IN_ACOSHF:
4804 case BUILT_IN_ASINF:
4805 case BUILT_IN_ASINHF:
4806 case BUILT_IN_ATANF:
4807 case BUILT_IN_ATANHF:
4808 case BUILT_IN_CBRTF:
4809 case BUILT_IN_COSF:
4810 case BUILT_IN_COSHF:
4811 case BUILT_IN_ERFF:
4812 case BUILT_IN_ERFCF:
4813 case BUILT_IN_EXP2F:
4814 case BUILT_IN_EXPF:
4815 case BUILT_IN_EXPM1F:
4816 case BUILT_IN_LGAMMAF:
4817 case BUILT_IN_LOG10F:
4818 case BUILT_IN_LOG1PF:
4819 case BUILT_IN_LOG2F:
4820 case BUILT_IN_LOGF:
4821 case BUILT_IN_SINF:
4822 case BUILT_IN_SINHF:
4823 case BUILT_IN_SQRTF:
4824 case BUILT_IN_TANF:
4825 case BUILT_IN_TANHF:
4826 bdecl = builtin_decl_implicit (fn);
4827 suffix = "4"; /* powf -> powf4 */
4828 if (el_mode != SFmode
4829 || n != 4
4830 || !bdecl)
4831 return NULL_TREE;
4832 break;
4833
4834 default:
4835 return NULL_TREE;
4836 }
4837 }
4838 else
4839 return NULL_TREE;
4840
4841 gcc_assert (suffix != NULL);
4842 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4843 if (!bname)
4844 return NULL_TREE;
4845
4846 strcpy (name, bname + sizeof ("__builtin_") - 1);
4847 strcat (name, suffix);
4848
4849 if (n_args == 1)
4850 fntype = build_function_type_list (type_out, type_in, NULL);
4851 else if (n_args == 2)
4852 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4853 else
4854 gcc_unreachable ();
4855
4856 /* Build a function declaration for the vectorized function. */
4857 new_fndecl = build_decl (BUILTINS_LOCATION,
4858 FUNCTION_DECL, get_identifier (name), fntype);
4859 TREE_PUBLIC (new_fndecl) = 1;
4860 DECL_EXTERNAL (new_fndecl) = 1;
4861 DECL_IS_NOVOPS (new_fndecl) = 1;
4862 TREE_READONLY (new_fndecl) = 1;
4863
4864 return new_fndecl;
4865 }
4866
4867 /* Returns a function decl for a vectorized version of the builtin function
4868 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4869 if it is not available. */
4870
4871 static tree
4872 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4873 tree type_in)
4874 {
4875 machine_mode in_mode, out_mode;
4876 int in_n, out_n;
4877
4878 if (TARGET_DEBUG_BUILTIN)
4879 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4880 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4881 GET_MODE_NAME (TYPE_MODE (type_out)),
4882 GET_MODE_NAME (TYPE_MODE (type_in)));
4883
4884 if (TREE_CODE (type_out) != VECTOR_TYPE
4885 || TREE_CODE (type_in) != VECTOR_TYPE
4886 || !TARGET_VECTORIZE_BUILTINS)
4887 return NULL_TREE;
4888
4889 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4890 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4891 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4892 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4893
4894 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4895 {
4896 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4897 switch (fn)
4898 {
4899 case BUILT_IN_CLZIMAX:
4900 case BUILT_IN_CLZLL:
4901 case BUILT_IN_CLZL:
4902 case BUILT_IN_CLZ:
4903 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4904 {
4905 if (out_mode == QImode && out_n == 16)
4906 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4907 else if (out_mode == HImode && out_n == 8)
4908 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4909 else if (out_mode == SImode && out_n == 4)
4910 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4911 else if (out_mode == DImode && out_n == 2)
4912 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4913 }
4914 break;
4915 case BUILT_IN_COPYSIGN:
4916 if (VECTOR_UNIT_VSX_P (V2DFmode)
4917 && out_mode == DFmode && out_n == 2
4918 && in_mode == DFmode && in_n == 2)
4919 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4920 break;
4921 case BUILT_IN_COPYSIGNF:
4922 if (out_mode != SFmode || out_n != 4
4923 || in_mode != SFmode || in_n != 4)
4924 break;
4925 if (VECTOR_UNIT_VSX_P (V4SFmode))
4926 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4927 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4928 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4929 break;
4930 case BUILT_IN_POPCOUNTIMAX:
4931 case BUILT_IN_POPCOUNTLL:
4932 case BUILT_IN_POPCOUNTL:
4933 case BUILT_IN_POPCOUNT:
4934 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4935 {
4936 if (out_mode == QImode && out_n == 16)
4937 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4938 else if (out_mode == HImode && out_n == 8)
4939 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4940 else if (out_mode == SImode && out_n == 4)
4941 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4942 else if (out_mode == DImode && out_n == 2)
4943 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4944 }
4945 break;
4946 case BUILT_IN_SQRT:
4947 if (VECTOR_UNIT_VSX_P (V2DFmode)
4948 && out_mode == DFmode && out_n == 2
4949 && in_mode == DFmode && in_n == 2)
4950 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4951 break;
4952 case BUILT_IN_SQRTF:
4953 if (VECTOR_UNIT_VSX_P (V4SFmode)
4954 && out_mode == SFmode && out_n == 4
4955 && in_mode == SFmode && in_n == 4)
4956 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4957 break;
4958 case BUILT_IN_CEIL:
4959 if (VECTOR_UNIT_VSX_P (V2DFmode)
4960 && out_mode == DFmode && out_n == 2
4961 && in_mode == DFmode && in_n == 2)
4962 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4963 break;
4964 case BUILT_IN_CEILF:
4965 if (out_mode != SFmode || out_n != 4
4966 || in_mode != SFmode || in_n != 4)
4967 break;
4968 if (VECTOR_UNIT_VSX_P (V4SFmode))
4969 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4970 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4971 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4972 break;
4973 case BUILT_IN_FLOOR:
4974 if (VECTOR_UNIT_VSX_P (V2DFmode)
4975 && out_mode == DFmode && out_n == 2
4976 && in_mode == DFmode && in_n == 2)
4977 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4978 break;
4979 case BUILT_IN_FLOORF:
4980 if (out_mode != SFmode || out_n != 4
4981 || in_mode != SFmode || in_n != 4)
4982 break;
4983 if (VECTOR_UNIT_VSX_P (V4SFmode))
4984 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4985 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4986 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4987 break;
4988 case BUILT_IN_FMA:
4989 if (VECTOR_UNIT_VSX_P (V2DFmode)
4990 && out_mode == DFmode && out_n == 2
4991 && in_mode == DFmode && in_n == 2)
4992 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4993 break;
4994 case BUILT_IN_FMAF:
4995 if (VECTOR_UNIT_VSX_P (V4SFmode)
4996 && out_mode == SFmode && out_n == 4
4997 && in_mode == SFmode && in_n == 4)
4998 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4999 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5000 && out_mode == SFmode && out_n == 4
5001 && in_mode == SFmode && in_n == 4)
5002 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5003 break;
5004 case BUILT_IN_TRUNC:
5005 if (VECTOR_UNIT_VSX_P (V2DFmode)
5006 && out_mode == DFmode && out_n == 2
5007 && in_mode == DFmode && in_n == 2)
5008 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5009 break;
5010 case BUILT_IN_TRUNCF:
5011 if (out_mode != SFmode || out_n != 4
5012 || in_mode != SFmode || in_n != 4)
5013 break;
5014 if (VECTOR_UNIT_VSX_P (V4SFmode))
5015 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5016 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
5017 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5018 break;
5019 case BUILT_IN_NEARBYINT:
5020 if (VECTOR_UNIT_VSX_P (V2DFmode)
5021 && flag_unsafe_math_optimizations
5022 && out_mode == DFmode && out_n == 2
5023 && in_mode == DFmode && in_n == 2)
5024 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5025 break;
5026 case BUILT_IN_NEARBYINTF:
5027 if (VECTOR_UNIT_VSX_P (V4SFmode)
5028 && flag_unsafe_math_optimizations
5029 && out_mode == SFmode && out_n == 4
5030 && in_mode == SFmode && in_n == 4)
5031 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5032 break;
5033 case BUILT_IN_RINT:
5034 if (VECTOR_UNIT_VSX_P (V2DFmode)
5035 && !flag_trapping_math
5036 && out_mode == DFmode && out_n == 2
5037 && in_mode == DFmode && in_n == 2)
5038 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5039 break;
5040 case BUILT_IN_RINTF:
5041 if (VECTOR_UNIT_VSX_P (V4SFmode)
5042 && !flag_trapping_math
5043 && out_mode == SFmode && out_n == 4
5044 && in_mode == SFmode && in_n == 4)
5045 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5046 break;
5047 default:
5048 break;
5049 }
5050 }
5051
5052 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
5053 {
5054 enum rs6000_builtins fn
5055 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
5056 switch (fn)
5057 {
5058 case RS6000_BUILTIN_RSQRTF:
5059 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5060 && out_mode == SFmode && out_n == 4
5061 && in_mode == SFmode && in_n == 4)
5062 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5063 break;
5064 case RS6000_BUILTIN_RSQRT:
5065 if (VECTOR_UNIT_VSX_P (V2DFmode)
5066 && out_mode == DFmode && out_n == 2
5067 && in_mode == DFmode && in_n == 2)
5068 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5069 break;
5070 case RS6000_BUILTIN_RECIPF:
5071 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5072 && out_mode == SFmode && out_n == 4
5073 && in_mode == SFmode && in_n == 4)
5074 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5075 break;
5076 case RS6000_BUILTIN_RECIP:
5077 if (VECTOR_UNIT_VSX_P (V2DFmode)
5078 && out_mode == DFmode && out_n == 2
5079 && in_mode == DFmode && in_n == 2)
5080 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5081 break;
5082 default:
5083 break;
5084 }
5085 }
5086
5087 /* Generate calls to libmass if appropriate. */
5088 if (rs6000_veclib_handler)
5089 return rs6000_veclib_handler (fndecl, type_out, type_in);
5090
5091 return NULL_TREE;
5092 }
5093 \f
5094 /* Default CPU string for rs6000*_file_start functions. */
5095 static const char *rs6000_default_cpu;
5096
5097 /* Do anything needed at the start of the asm file. */
5098
5099 static void
5100 rs6000_file_start (void)
5101 {
5102 char buffer[80];
5103 const char *start = buffer;
5104 FILE *file = asm_out_file;
5105
5106 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5107
5108 default_file_start ();
5109
5110 if (flag_verbose_asm)
5111 {
5112 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5113
5114 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5115 {
5116 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5117 start = "";
5118 }
5119
5120 if (global_options_set.x_rs6000_cpu_index)
5121 {
5122 fprintf (file, "%s -mcpu=%s", start,
5123 processor_target_table[rs6000_cpu_index].name);
5124 start = "";
5125 }
5126
5127 if (global_options_set.x_rs6000_tune_index)
5128 {
5129 fprintf (file, "%s -mtune=%s", start,
5130 processor_target_table[rs6000_tune_index].name);
5131 start = "";
5132 }
5133
5134 if (PPC405_ERRATUM77)
5135 {
5136 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5137 start = "";
5138 }
5139
5140 #ifdef USING_ELFOS_H
5141 switch (rs6000_sdata)
5142 {
5143 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5144 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5145 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5146 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5147 }
5148
5149 if (rs6000_sdata && g_switch_value)
5150 {
5151 fprintf (file, "%s -G %d", start,
5152 g_switch_value);
5153 start = "";
5154 }
5155 #endif
5156
5157 if (*start == '\0')
5158 putc ('\n', file);
5159 }
5160
5161 #ifdef USING_ELFOS_H
5162 if (rs6000_default_cpu == 0 || rs6000_default_cpu[0] == '\0'
5163 || !global_options_set.x_rs6000_cpu_index)
5164 {
5165 fputs ("\t.machine ", asm_out_file);
5166 if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5167 fputs ("power8\n", asm_out_file);
5168 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5169 fputs ("power7\n", asm_out_file);
5170 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5171 fputs ("power6\n", asm_out_file);
5172 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5173 fputs ("power5\n", asm_out_file);
5174 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5175 fputs ("power4\n", asm_out_file);
5176 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5177 fputs ("ppc64\n", asm_out_file);
5178 else
5179 fputs ("ppc\n", asm_out_file);
5180 }
5181 #endif
5182
5183 if (DEFAULT_ABI == ABI_ELFv2)
5184 fprintf (file, "\t.abiversion 2\n");
5185
5186 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2
5187 || (TARGET_ELF && flag_pic == 2))
5188 {
5189 switch_to_section (toc_section);
5190 switch_to_section (text_section);
5191 }
5192 }
5193
5194 \f
5195 /* Return nonzero if this function is known to have a null epilogue. */
5196
5197 int
5198 direct_return (void)
5199 {
5200 if (reload_completed)
5201 {
5202 rs6000_stack_t *info = rs6000_stack_info ();
5203
5204 if (info->first_gp_reg_save == 32
5205 && info->first_fp_reg_save == 64
5206 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5207 && ! info->lr_save_p
5208 && ! info->cr_save_p
5209 && info->vrsave_mask == 0
5210 && ! info->push_p)
5211 return 1;
5212 }
5213
5214 return 0;
5215 }
5216
5217 /* Return the number of instructions it takes to form a constant in an
5218 integer register. */
5219
5220 int
5221 num_insns_constant_wide (HOST_WIDE_INT value)
5222 {
5223 /* signed constant loadable with addi */
5224 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5225 return 1;
5226
5227 /* constant loadable with addis */
5228 else if ((value & 0xffff) == 0
5229 && (value >> 31 == -1 || value >> 31 == 0))
5230 return 1;
5231
5232 else if (TARGET_POWERPC64)
5233 {
5234 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5235 HOST_WIDE_INT high = value >> 31;
5236
5237 if (high == 0 || high == -1)
5238 return 2;
5239
5240 high >>= 1;
5241
5242 if (low == 0)
5243 return num_insns_constant_wide (high) + 1;
5244 else if (high == 0)
5245 return num_insns_constant_wide (low) + 1;
5246 else
5247 return (num_insns_constant_wide (high)
5248 + num_insns_constant_wide (low) + 1);
5249 }
5250
5251 else
5252 return 2;
5253 }
5254
5255 int
5256 num_insns_constant (rtx op, machine_mode mode)
5257 {
5258 HOST_WIDE_INT low, high;
5259
5260 switch (GET_CODE (op))
5261 {
5262 case CONST_INT:
5263 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5264 && mask64_operand (op, mode))
5265 return 2;
5266 else
5267 return num_insns_constant_wide (INTVAL (op));
5268
5269 case CONST_WIDE_INT:
5270 {
5271 int i;
5272 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5273 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5274 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5275 return ins;
5276 }
5277
5278 case CONST_DOUBLE:
5279 if (mode == SFmode || mode == SDmode)
5280 {
5281 long l;
5282 REAL_VALUE_TYPE rv;
5283
5284 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
5285 if (DECIMAL_FLOAT_MODE_P (mode))
5286 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
5287 else
5288 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
5289 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5290 }
5291
5292 long l[2];
5293 REAL_VALUE_TYPE rv;
5294
5295 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
5296 if (DECIMAL_FLOAT_MODE_P (mode))
5297 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
5298 else
5299 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
5300 high = l[WORDS_BIG_ENDIAN == 0];
5301 low = l[WORDS_BIG_ENDIAN != 0];
5302
5303 if (TARGET_32BIT)
5304 return (num_insns_constant_wide (low)
5305 + num_insns_constant_wide (high));
5306 else
5307 {
5308 if ((high == 0 && low >= 0)
5309 || (high == -1 && low < 0))
5310 return num_insns_constant_wide (low);
5311
5312 else if (mask64_operand (op, mode))
5313 return 2;
5314
5315 else if (low == 0)
5316 return num_insns_constant_wide (high) + 1;
5317
5318 else
5319 return (num_insns_constant_wide (high)
5320 + num_insns_constant_wide (low) + 1);
5321 }
5322
5323 default:
5324 gcc_unreachable ();
5325 }
5326 }
5327
5328 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5329 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5330 corresponding element of the vector, but for V4SFmode and V2SFmode,
5331 the corresponding "float" is interpreted as an SImode integer. */
5332
5333 HOST_WIDE_INT
5334 const_vector_elt_as_int (rtx op, unsigned int elt)
5335 {
5336 rtx tmp;
5337
5338 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5339 gcc_assert (GET_MODE (op) != V2DImode
5340 && GET_MODE (op) != V2DFmode);
5341
5342 tmp = CONST_VECTOR_ELT (op, elt);
5343 if (GET_MODE (op) == V4SFmode
5344 || GET_MODE (op) == V2SFmode)
5345 tmp = gen_lowpart (SImode, tmp);
5346 return INTVAL (tmp);
5347 }
5348
5349 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5350 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5351 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5352 all items are set to the same value and contain COPIES replicas of the
5353 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5354 operand and the others are set to the value of the operand's msb. */
5355
5356 static bool
5357 vspltis_constant (rtx op, unsigned step, unsigned copies)
5358 {
5359 machine_mode mode = GET_MODE (op);
5360 machine_mode inner = GET_MODE_INNER (mode);
5361
5362 unsigned i;
5363 unsigned nunits;
5364 unsigned bitsize;
5365 unsigned mask;
5366
5367 HOST_WIDE_INT val;
5368 HOST_WIDE_INT splat_val;
5369 HOST_WIDE_INT msb_val;
5370
5371 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5372 return false;
5373
5374 nunits = GET_MODE_NUNITS (mode);
5375 bitsize = GET_MODE_BITSIZE (inner);
5376 mask = GET_MODE_MASK (inner);
5377
5378 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5379 splat_val = val;
5380 msb_val = val >= 0 ? 0 : -1;
5381
5382 /* Construct the value to be splatted, if possible. If not, return 0. */
5383 for (i = 2; i <= copies; i *= 2)
5384 {
5385 HOST_WIDE_INT small_val;
5386 bitsize /= 2;
5387 small_val = splat_val >> bitsize;
5388 mask >>= bitsize;
5389 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
5390 return false;
5391 splat_val = small_val;
5392 }
5393
5394 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5395 if (EASY_VECTOR_15 (splat_val))
5396 ;
5397
5398 /* Also check if we can splat, and then add the result to itself. Do so if
5399 the value is positive, of if the splat instruction is using OP's mode;
5400 for splat_val < 0, the splat and the add should use the same mode. */
5401 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5402 && (splat_val >= 0 || (step == 1 && copies == 1)))
5403 ;
5404
5405 /* Also check if are loading up the most significant bit which can be done by
5406 loading up -1 and shifting the value left by -1. */
5407 else if (EASY_VECTOR_MSB (splat_val, inner))
5408 ;
5409
5410 else
5411 return false;
5412
5413 /* Check if VAL is present in every STEP-th element, and the
5414 other elements are filled with its most significant bit. */
5415 for (i = 1; i < nunits; ++i)
5416 {
5417 HOST_WIDE_INT desired_val;
5418 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5419 if ((i & (step - 1)) == 0)
5420 desired_val = val;
5421 else
5422 desired_val = msb_val;
5423
5424 if (desired_val != const_vector_elt_as_int (op, elt))
5425 return false;
5426 }
5427
5428 return true;
5429 }
5430
5431
5432 /* Return true if OP is of the given MODE and can be synthesized
5433 with a vspltisb, vspltish or vspltisw. */
5434
5435 bool
5436 easy_altivec_constant (rtx op, machine_mode mode)
5437 {
5438 unsigned step, copies;
5439
5440 if (mode == VOIDmode)
5441 mode = GET_MODE (op);
5442 else if (mode != GET_MODE (op))
5443 return false;
5444
5445 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5446 constants. */
5447 if (mode == V2DFmode)
5448 return zero_constant (op, mode);
5449
5450 else if (mode == V2DImode)
5451 {
5452 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
5453 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
5454 return false;
5455
5456 if (zero_constant (op, mode))
5457 return true;
5458
5459 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
5460 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
5461 return true;
5462
5463 return false;
5464 }
5465
5466 /* V1TImode is a special container for TImode. Ignore for now. */
5467 else if (mode == V1TImode)
5468 return false;
5469
5470 /* Start with a vspltisw. */
5471 step = GET_MODE_NUNITS (mode) / 4;
5472 copies = 1;
5473
5474 if (vspltis_constant (op, step, copies))
5475 return true;
5476
5477 /* Then try with a vspltish. */
5478 if (step == 1)
5479 copies <<= 1;
5480 else
5481 step >>= 1;
5482
5483 if (vspltis_constant (op, step, copies))
5484 return true;
5485
5486 /* And finally a vspltisb. */
5487 if (step == 1)
5488 copies <<= 1;
5489 else
5490 step >>= 1;
5491
5492 if (vspltis_constant (op, step, copies))
5493 return true;
5494
5495 return false;
5496 }
5497
5498 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5499 result is OP. Abort if it is not possible. */
5500
5501 rtx
5502 gen_easy_altivec_constant (rtx op)
5503 {
5504 machine_mode mode = GET_MODE (op);
5505 int nunits = GET_MODE_NUNITS (mode);
5506 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5507 unsigned step = nunits / 4;
5508 unsigned copies = 1;
5509
5510 /* Start with a vspltisw. */
5511 if (vspltis_constant (op, step, copies))
5512 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
5513
5514 /* Then try with a vspltish. */
5515 if (step == 1)
5516 copies <<= 1;
5517 else
5518 step >>= 1;
5519
5520 if (vspltis_constant (op, step, copies))
5521 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
5522
5523 /* And finally a vspltisb. */
5524 if (step == 1)
5525 copies <<= 1;
5526 else
5527 step >>= 1;
5528
5529 if (vspltis_constant (op, step, copies))
5530 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
5531
5532 gcc_unreachable ();
5533 }
5534
5535 const char *
5536 output_vec_const_move (rtx *operands)
5537 {
5538 int cst, cst2;
5539 machine_mode mode;
5540 rtx dest, vec;
5541
5542 dest = operands[0];
5543 vec = operands[1];
5544 mode = GET_MODE (dest);
5545
5546 if (TARGET_VSX)
5547 {
5548 if (zero_constant (vec, mode))
5549 return "xxlxor %x0,%x0,%x0";
5550
5551 if ((mode == V2DImode || mode == V1TImode)
5552 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
5553 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
5554 return "vspltisw %0,-1";
5555 }
5556
5557 if (TARGET_ALTIVEC)
5558 {
5559 rtx splat_vec;
5560 if (zero_constant (vec, mode))
5561 return "vxor %0,%0,%0";
5562
5563 splat_vec = gen_easy_altivec_constant (vec);
5564 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
5565 operands[1] = XEXP (splat_vec, 0);
5566 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
5567 return "#";
5568
5569 switch (GET_MODE (splat_vec))
5570 {
5571 case V4SImode:
5572 return "vspltisw %0,%1";
5573
5574 case V8HImode:
5575 return "vspltish %0,%1";
5576
5577 case V16QImode:
5578 return "vspltisb %0,%1";
5579
5580 default:
5581 gcc_unreachable ();
5582 }
5583 }
5584
5585 gcc_assert (TARGET_SPE);
5586
5587 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5588 pattern of V1DI, V4HI, and V2SF.
5589
5590 FIXME: We should probably return # and add post reload
5591 splitters for these, but this way is so easy ;-). */
5592 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
5593 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
5594 operands[1] = CONST_VECTOR_ELT (vec, 0);
5595 operands[2] = CONST_VECTOR_ELT (vec, 1);
5596 if (cst == cst2)
5597 return "li %0,%1\n\tevmergelo %0,%0,%0";
5598 else if (WORDS_BIG_ENDIAN)
5599 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5600 else
5601 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
5602 }
5603
5604 /* Initialize TARGET of vector PAIRED to VALS. */
5605
5606 void
5607 paired_expand_vector_init (rtx target, rtx vals)
5608 {
5609 machine_mode mode = GET_MODE (target);
5610 int n_elts = GET_MODE_NUNITS (mode);
5611 int n_var = 0;
5612 rtx x, new_rtx, tmp, constant_op, op1, op2;
5613 int i;
5614
5615 for (i = 0; i < n_elts; ++i)
5616 {
5617 x = XVECEXP (vals, 0, i);
5618 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5619 ++n_var;
5620 }
5621 if (n_var == 0)
5622 {
5623 /* Load from constant pool. */
5624 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
5625 return;
5626 }
5627
5628 if (n_var == 2)
5629 {
5630 /* The vector is initialized only with non-constants. */
5631 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
5632 XVECEXP (vals, 0, 1));
5633
5634 emit_move_insn (target, new_rtx);
5635 return;
5636 }
5637
5638 /* One field is non-constant and the other one is a constant. Load the
5639 constant from the constant pool and use ps_merge instruction to
5640 construct the whole vector. */
5641 op1 = XVECEXP (vals, 0, 0);
5642 op2 = XVECEXP (vals, 0, 1);
5643
5644 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
5645
5646 tmp = gen_reg_rtx (GET_MODE (constant_op));
5647 emit_move_insn (tmp, constant_op);
5648
5649 if (CONSTANT_P (op1))
5650 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
5651 else
5652 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
5653
5654 emit_move_insn (target, new_rtx);
5655 }
5656
5657 void
5658 paired_expand_vector_move (rtx operands[])
5659 {
5660 rtx op0 = operands[0], op1 = operands[1];
5661
5662 emit_move_insn (op0, op1);
5663 }
5664
5665 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5666 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5667 operands for the relation operation COND. This is a recursive
5668 function. */
5669
5670 static void
5671 paired_emit_vector_compare (enum rtx_code rcode,
5672 rtx dest, rtx op0, rtx op1,
5673 rtx cc_op0, rtx cc_op1)
5674 {
5675 rtx tmp = gen_reg_rtx (V2SFmode);
5676 rtx tmp1, max, min;
5677
5678 gcc_assert (TARGET_PAIRED_FLOAT);
5679 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
5680
5681 switch (rcode)
5682 {
5683 case LT:
5684 case LTU:
5685 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5686 return;
5687 case GE:
5688 case GEU:
5689 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5690 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
5691 return;
5692 case LE:
5693 case LEU:
5694 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
5695 return;
5696 case GT:
5697 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5698 return;
5699 case EQ:
5700 tmp1 = gen_reg_rtx (V2SFmode);
5701 max = gen_reg_rtx (V2SFmode);
5702 min = gen_reg_rtx (V2SFmode);
5703 gen_reg_rtx (V2SFmode);
5704
5705 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5706 emit_insn (gen_selv2sf4
5707 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5708 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
5709 emit_insn (gen_selv2sf4
5710 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5711 emit_insn (gen_subv2sf3 (tmp1, min, max));
5712 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
5713 return;
5714 case NE:
5715 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
5716 return;
5717 case UNLE:
5718 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5719 return;
5720 case UNLT:
5721 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5722 return;
5723 case UNGE:
5724 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5725 return;
5726 case UNGT:
5727 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5728 return;
5729 default:
5730 gcc_unreachable ();
5731 }
5732
5733 return;
5734 }
5735
5736 /* Emit vector conditional expression.
5737 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5738 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5739
5740 int
5741 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5742 rtx cond, rtx cc_op0, rtx cc_op1)
5743 {
5744 enum rtx_code rcode = GET_CODE (cond);
5745
5746 if (!TARGET_PAIRED_FLOAT)
5747 return 0;
5748
5749 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5750
5751 return 1;
5752 }
5753
5754 /* Initialize vector TARGET to VALS. */
5755
5756 void
5757 rs6000_expand_vector_init (rtx target, rtx vals)
5758 {
5759 machine_mode mode = GET_MODE (target);
5760 machine_mode inner_mode = GET_MODE_INNER (mode);
5761 int n_elts = GET_MODE_NUNITS (mode);
5762 int n_var = 0, one_var = -1;
5763 bool all_same = true, all_const_zero = true;
5764 rtx x, mem;
5765 int i;
5766
5767 for (i = 0; i < n_elts; ++i)
5768 {
5769 x = XVECEXP (vals, 0, i);
5770 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5771 ++n_var, one_var = i;
5772 else if (x != CONST0_RTX (inner_mode))
5773 all_const_zero = false;
5774
5775 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5776 all_same = false;
5777 }
5778
5779 if (n_var == 0)
5780 {
5781 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5782 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5783 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5784 {
5785 /* Zero register. */
5786 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (mode, target, target)));
5787 return;
5788 }
5789 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5790 {
5791 /* Splat immediate. */
5792 emit_insn (gen_rtx_SET (target, const_vec));
5793 return;
5794 }
5795 else
5796 {
5797 /* Load from constant pool. */
5798 emit_move_insn (target, const_vec);
5799 return;
5800 }
5801 }
5802
5803 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5804 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5805 {
5806 rtx op0 = XVECEXP (vals, 0, 0);
5807 rtx op1 = XVECEXP (vals, 0, 1);
5808 if (all_same)
5809 {
5810 if (!MEM_P (op0) && !REG_P (op0))
5811 op0 = force_reg (inner_mode, op0);
5812 if (mode == V2DFmode)
5813 emit_insn (gen_vsx_splat_v2df (target, op0));
5814 else
5815 emit_insn (gen_vsx_splat_v2di (target, op0));
5816 }
5817 else
5818 {
5819 op0 = force_reg (inner_mode, op0);
5820 op1 = force_reg (inner_mode, op1);
5821 if (mode == V2DFmode)
5822 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5823 else
5824 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5825 }
5826 return;
5827 }
5828
5829 /* With single precision floating point on VSX, know that internally single
5830 precision is actually represented as a double, and either make 2 V2DF
5831 vectors, and convert these vectors to single precision, or do one
5832 conversion, and splat the result to the other elements. */
5833 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5834 {
5835 if (all_same)
5836 {
5837 rtx freg = gen_reg_rtx (V4SFmode);
5838 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5839 rtx cvt = ((TARGET_XSCVDPSPN)
5840 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5841 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5842
5843 emit_insn (cvt);
5844 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg, const0_rtx));
5845 }
5846 else
5847 {
5848 rtx dbl_even = gen_reg_rtx (V2DFmode);
5849 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5850 rtx flt_even = gen_reg_rtx (V4SFmode);
5851 rtx flt_odd = gen_reg_rtx (V4SFmode);
5852 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5853 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5854 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5855 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5856
5857 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5858 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5859 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5860 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5861 rs6000_expand_extract_even (target, flt_even, flt_odd);
5862 }
5863 return;
5864 }
5865
5866 /* Store value to stack temp. Load vector element. Splat. However, splat
5867 of 64-bit items is not supported on Altivec. */
5868 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5869 {
5870 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5871 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5872 XVECEXP (vals, 0, 0));
5873 x = gen_rtx_UNSPEC (VOIDmode,
5874 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5875 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5876 gen_rtvec (2,
5877 gen_rtx_SET (target, mem),
5878 x)));
5879 x = gen_rtx_VEC_SELECT (inner_mode, target,
5880 gen_rtx_PARALLEL (VOIDmode,
5881 gen_rtvec (1, const0_rtx)));
5882 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
5883 return;
5884 }
5885
5886 /* One field is non-constant. Load constant then overwrite
5887 varying field. */
5888 if (n_var == 1)
5889 {
5890 rtx copy = copy_rtx (vals);
5891
5892 /* Load constant part of vector, substitute neighboring value for
5893 varying element. */
5894 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5895 rs6000_expand_vector_init (target, copy);
5896
5897 /* Insert variable. */
5898 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5899 return;
5900 }
5901
5902 /* Construct the vector in memory one field at a time
5903 and load the whole vector. */
5904 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5905 for (i = 0; i < n_elts; i++)
5906 emit_move_insn (adjust_address_nv (mem, inner_mode,
5907 i * GET_MODE_SIZE (inner_mode)),
5908 XVECEXP (vals, 0, i));
5909 emit_move_insn (target, mem);
5910 }
5911
5912 /* Set field ELT of TARGET to VAL. */
5913
5914 void
5915 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5916 {
5917 machine_mode mode = GET_MODE (target);
5918 machine_mode inner_mode = GET_MODE_INNER (mode);
5919 rtx reg = gen_reg_rtx (mode);
5920 rtx mask, mem, x;
5921 int width = GET_MODE_SIZE (inner_mode);
5922 int i;
5923
5924 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5925 {
5926 rtx (*set_func) (rtx, rtx, rtx, rtx)
5927 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5928 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5929 return;
5930 }
5931
5932 /* Simplify setting single element vectors like V1TImode. */
5933 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
5934 {
5935 emit_move_insn (target, gen_lowpart (mode, val));
5936 return;
5937 }
5938
5939 /* Load single variable value. */
5940 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5941 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5942 x = gen_rtx_UNSPEC (VOIDmode,
5943 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5944 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5945 gen_rtvec (2,
5946 gen_rtx_SET (reg, mem),
5947 x)));
5948
5949 /* Linear sequence. */
5950 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5951 for (i = 0; i < 16; ++i)
5952 XVECEXP (mask, 0, i) = GEN_INT (i);
5953
5954 /* Set permute mask to insert element into target. */
5955 for (i = 0; i < width; ++i)
5956 XVECEXP (mask, 0, elt*width + i)
5957 = GEN_INT (i + 0x10);
5958 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5959
5960 if (BYTES_BIG_ENDIAN)
5961 x = gen_rtx_UNSPEC (mode,
5962 gen_rtvec (3, target, reg,
5963 force_reg (V16QImode, x)),
5964 UNSPEC_VPERM);
5965 else
5966 {
5967 /* Invert selector. We prefer to generate VNAND on P8 so
5968 that future fusion opportunities can kick in, but must
5969 generate VNOR elsewhere. */
5970 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
5971 rtx iorx = (TARGET_P8_VECTOR
5972 ? gen_rtx_IOR (V16QImode, notx, notx)
5973 : gen_rtx_AND (V16QImode, notx, notx));
5974 rtx tmp = gen_reg_rtx (V16QImode);
5975 emit_insn (gen_rtx_SET (tmp, iorx));
5976
5977 /* Permute with operands reversed and adjusted selector. */
5978 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
5979 UNSPEC_VPERM);
5980 }
5981
5982 emit_insn (gen_rtx_SET (target, x));
5983 }
5984
5985 /* Extract field ELT from VEC into TARGET. */
5986
5987 void
5988 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5989 {
5990 machine_mode mode = GET_MODE (vec);
5991 machine_mode inner_mode = GET_MODE_INNER (mode);
5992 rtx mem;
5993
5994 if (VECTOR_MEM_VSX_P (mode))
5995 {
5996 switch (mode)
5997 {
5998 default:
5999 break;
6000 case V1TImode:
6001 gcc_assert (elt == 0 && inner_mode == TImode);
6002 emit_move_insn (target, gen_lowpart (TImode, vec));
6003 break;
6004 case V2DFmode:
6005 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
6006 return;
6007 case V2DImode:
6008 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
6009 return;
6010 case V4SFmode:
6011 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
6012 return;
6013 }
6014 }
6015
6016 /* Allocate mode-sized buffer. */
6017 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6018
6019 emit_move_insn (mem, vec);
6020
6021 /* Add offset to field within buffer matching vector element. */
6022 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
6023
6024 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6025 }
6026
6027 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
6028 implement ANDing by the mask IN. */
6029 void
6030 build_mask64_2_operands (rtx in, rtx *out)
6031 {
6032 unsigned HOST_WIDE_INT c, lsb, m1, m2;
6033 int shift;
6034
6035 gcc_assert (GET_CODE (in) == CONST_INT);
6036
6037 c = INTVAL (in);
6038 if (c & 1)
6039 {
6040 /* Assume c initially something like 0x00fff000000fffff. The idea
6041 is to rotate the word so that the middle ^^^^^^ group of zeros
6042 is at the MS end and can be cleared with an rldicl mask. We then
6043 rotate back and clear off the MS ^^ group of zeros with a
6044 second rldicl. */
6045 c = ~c; /* c == 0xff000ffffff00000 */
6046 lsb = c & -c; /* lsb == 0x0000000000100000 */
6047 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
6048 c = ~c; /* c == 0x00fff000000fffff */
6049 c &= -lsb; /* c == 0x00fff00000000000 */
6050 lsb = c & -c; /* lsb == 0x0000100000000000 */
6051 c = ~c; /* c == 0xff000fffffffffff */
6052 c &= -lsb; /* c == 0xff00000000000000 */
6053 shift = 0;
6054 while ((lsb >>= 1) != 0)
6055 shift++; /* shift == 44 on exit from loop */
6056 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
6057 m1 = ~m1; /* m1 == 0x000000ffffffffff */
6058 m2 = ~c; /* m2 == 0x00ffffffffffffff */
6059 }
6060 else
6061 {
6062 /* Assume c initially something like 0xff000f0000000000. The idea
6063 is to rotate the word so that the ^^^ middle group of zeros
6064 is at the LS end and can be cleared with an rldicr mask. We then
6065 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
6066 a second rldicr. */
6067 lsb = c & -c; /* lsb == 0x0000010000000000 */
6068 m2 = -lsb; /* m2 == 0xffffff0000000000 */
6069 c = ~c; /* c == 0x00fff0ffffffffff */
6070 c &= -lsb; /* c == 0x00fff00000000000 */
6071 lsb = c & -c; /* lsb == 0x0000100000000000 */
6072 c = ~c; /* c == 0xff000fffffffffff */
6073 c &= -lsb; /* c == 0xff00000000000000 */
6074 shift = 0;
6075 while ((lsb >>= 1) != 0)
6076 shift++; /* shift == 44 on exit from loop */
6077 m1 = ~c; /* m1 == 0x00ffffffffffffff */
6078 m1 >>= shift; /* m1 == 0x0000000000000fff */
6079 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
6080 }
6081
6082 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
6083 masks will be all 1's. We are guaranteed more than one transition. */
6084 out[0] = GEN_INT (64 - shift);
6085 out[1] = GEN_INT (m1);
6086 out[2] = GEN_INT (shift);
6087 out[3] = GEN_INT (m2);
6088 }
6089
6090 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
6091
6092 bool
6093 invalid_e500_subreg (rtx op, machine_mode mode)
6094 {
6095 if (TARGET_E500_DOUBLE)
6096 {
6097 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
6098 subreg:TI and reg:TF. Decimal float modes are like integer
6099 modes (only low part of each register used) for this
6100 purpose. */
6101 if (GET_CODE (op) == SUBREG
6102 && (mode == SImode || mode == DImode || mode == TImode
6103 || mode == DDmode || mode == TDmode || mode == PTImode)
6104 && REG_P (SUBREG_REG (op))
6105 && (GET_MODE (SUBREG_REG (op)) == DFmode
6106 || GET_MODE (SUBREG_REG (op)) == TFmode))
6107 return true;
6108
6109 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
6110 reg:TI. */
6111 if (GET_CODE (op) == SUBREG
6112 && (mode == DFmode || mode == TFmode)
6113 && REG_P (SUBREG_REG (op))
6114 && (GET_MODE (SUBREG_REG (op)) == DImode
6115 || GET_MODE (SUBREG_REG (op)) == TImode
6116 || GET_MODE (SUBREG_REG (op)) == PTImode
6117 || GET_MODE (SUBREG_REG (op)) == DDmode
6118 || GET_MODE (SUBREG_REG (op)) == TDmode))
6119 return true;
6120 }
6121
6122 if (TARGET_SPE
6123 && GET_CODE (op) == SUBREG
6124 && mode == SImode
6125 && REG_P (SUBREG_REG (op))
6126 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
6127 return true;
6128
6129 return false;
6130 }
6131
6132 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
6133 selects whether the alignment is abi mandated, optional, or
6134 both abi and optional alignment. */
6135
6136 unsigned int
6137 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
6138 {
6139 if (how != align_opt)
6140 {
6141 if (TREE_CODE (type) == VECTOR_TYPE)
6142 {
6143 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
6144 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
6145 {
6146 if (align < 64)
6147 align = 64;
6148 }
6149 else if (align < 128)
6150 align = 128;
6151 }
6152 else if (TARGET_E500_DOUBLE
6153 && TREE_CODE (type) == REAL_TYPE
6154 && TYPE_MODE (type) == DFmode)
6155 {
6156 if (align < 64)
6157 align = 64;
6158 }
6159 }
6160
6161 if (how != align_abi)
6162 {
6163 if (TREE_CODE (type) == ARRAY_TYPE
6164 && TYPE_MODE (TREE_TYPE (type)) == QImode)
6165 {
6166 if (align < BITS_PER_WORD)
6167 align = BITS_PER_WORD;
6168 }
6169 }
6170
6171 return align;
6172 }
6173
6174 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
6175
6176 bool
6177 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
6178 {
6179 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6180 {
6181 if (computed != 128)
6182 {
6183 static bool warned;
6184 if (!warned && warn_psabi)
6185 {
6186 warned = true;
6187 inform (input_location,
6188 "the layout of aggregates containing vectors with"
6189 " %d-byte alignment has changed in GCC 5",
6190 computed / BITS_PER_UNIT);
6191 }
6192 }
6193 /* In current GCC there is no special case. */
6194 return false;
6195 }
6196
6197 return false;
6198 }
6199
6200 /* AIX increases natural record alignment to doubleword if the first
6201 field is an FP double while the FP fields remain word aligned. */
6202
6203 unsigned int
6204 rs6000_special_round_type_align (tree type, unsigned int computed,
6205 unsigned int specified)
6206 {
6207 unsigned int align = MAX (computed, specified);
6208 tree field = TYPE_FIELDS (type);
6209
6210 /* Skip all non field decls */
6211 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
6212 field = DECL_CHAIN (field);
6213
6214 if (field != NULL && field != type)
6215 {
6216 type = TREE_TYPE (field);
6217 while (TREE_CODE (type) == ARRAY_TYPE)
6218 type = TREE_TYPE (type);
6219
6220 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
6221 align = MAX (align, 64);
6222 }
6223
6224 return align;
6225 }
6226
6227 /* Darwin increases record alignment to the natural alignment of
6228 the first field. */
6229
6230 unsigned int
6231 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
6232 unsigned int specified)
6233 {
6234 unsigned int align = MAX (computed, specified);
6235
6236 if (TYPE_PACKED (type))
6237 return align;
6238
6239 /* Find the first field, looking down into aggregates. */
6240 do {
6241 tree field = TYPE_FIELDS (type);
6242 /* Skip all non field decls */
6243 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
6244 field = DECL_CHAIN (field);
6245 if (! field)
6246 break;
6247 /* A packed field does not contribute any extra alignment. */
6248 if (DECL_PACKED (field))
6249 return align;
6250 type = TREE_TYPE (field);
6251 while (TREE_CODE (type) == ARRAY_TYPE)
6252 type = TREE_TYPE (type);
6253 } while (AGGREGATE_TYPE_P (type));
6254
6255 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
6256 align = MAX (align, TYPE_ALIGN (type));
6257
6258 return align;
6259 }
6260
6261 /* Return 1 for an operand in small memory on V.4/eabi. */
6262
6263 int
6264 small_data_operand (rtx op ATTRIBUTE_UNUSED,
6265 machine_mode mode ATTRIBUTE_UNUSED)
6266 {
6267 #if TARGET_ELF
6268 rtx sym_ref;
6269
6270 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
6271 return 0;
6272
6273 if (DEFAULT_ABI != ABI_V4)
6274 return 0;
6275
6276 /* Vector and float memory instructions have a limited offset on the
6277 SPE, so using a vector or float variable directly as an operand is
6278 not useful. */
6279 if (TARGET_SPE
6280 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
6281 return 0;
6282
6283 if (GET_CODE (op) == SYMBOL_REF)
6284 sym_ref = op;
6285
6286 else if (GET_CODE (op) != CONST
6287 || GET_CODE (XEXP (op, 0)) != PLUS
6288 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
6289 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
6290 return 0;
6291
6292 else
6293 {
6294 rtx sum = XEXP (op, 0);
6295 HOST_WIDE_INT summand;
6296
6297 /* We have to be careful here, because it is the referenced address
6298 that must be 32k from _SDA_BASE_, not just the symbol. */
6299 summand = INTVAL (XEXP (sum, 1));
6300 if (summand < 0 || summand > g_switch_value)
6301 return 0;
6302
6303 sym_ref = XEXP (sum, 0);
6304 }
6305
6306 return SYMBOL_REF_SMALL_P (sym_ref);
6307 #else
6308 return 0;
6309 #endif
6310 }
6311
6312 /* Return true if either operand is a general purpose register. */
6313
6314 bool
6315 gpr_or_gpr_p (rtx op0, rtx op1)
6316 {
6317 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
6318 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
6319 }
6320
6321 /* Return true if this is a move direct operation between GPR registers and
6322 floating point/VSX registers. */
6323
6324 bool
6325 direct_move_p (rtx op0, rtx op1)
6326 {
6327 int regno0, regno1;
6328
6329 if (!REG_P (op0) || !REG_P (op1))
6330 return false;
6331
6332 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
6333 return false;
6334
6335 regno0 = REGNO (op0);
6336 regno1 = REGNO (op1);
6337 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
6338 return false;
6339
6340 if (INT_REGNO_P (regno0))
6341 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
6342
6343 else if (INT_REGNO_P (regno1))
6344 {
6345 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
6346 return true;
6347
6348 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
6349 return true;
6350 }
6351
6352 return false;
6353 }
6354
6355 /* Return true if this is a load or store quad operation. This function does
6356 not handle the atomic quad memory instructions. */
6357
6358 bool
6359 quad_load_store_p (rtx op0, rtx op1)
6360 {
6361 bool ret;
6362
6363 if (!TARGET_QUAD_MEMORY)
6364 ret = false;
6365
6366 else if (REG_P (op0) && MEM_P (op1))
6367 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
6368 && quad_memory_operand (op1, GET_MODE (op1))
6369 && !reg_overlap_mentioned_p (op0, op1));
6370
6371 else if (MEM_P (op0) && REG_P (op1))
6372 ret = (quad_memory_operand (op0, GET_MODE (op0))
6373 && quad_int_reg_operand (op1, GET_MODE (op1)));
6374
6375 else
6376 ret = false;
6377
6378 if (TARGET_DEBUG_ADDR)
6379 {
6380 fprintf (stderr, "\n========== quad_load_store, return %s\n",
6381 ret ? "true" : "false");
6382 debug_rtx (gen_rtx_SET (op0, op1));
6383 }
6384
6385 return ret;
6386 }
6387
6388 /* Given an address, return a constant offset term if one exists. */
6389
6390 static rtx
6391 address_offset (rtx op)
6392 {
6393 if (GET_CODE (op) == PRE_INC
6394 || GET_CODE (op) == PRE_DEC)
6395 op = XEXP (op, 0);
6396 else if (GET_CODE (op) == PRE_MODIFY
6397 || GET_CODE (op) == LO_SUM)
6398 op = XEXP (op, 1);
6399
6400 if (GET_CODE (op) == CONST)
6401 op = XEXP (op, 0);
6402
6403 if (GET_CODE (op) == PLUS)
6404 op = XEXP (op, 1);
6405
6406 if (CONST_INT_P (op))
6407 return op;
6408
6409 return NULL_RTX;
6410 }
6411
6412 /* Return true if the MEM operand is a memory operand suitable for use
6413 with a (full width, possibly multiple) gpr load/store. On
6414 powerpc64 this means the offset must be divisible by 4.
6415 Implements 'Y' constraint.
6416
6417 Accept direct, indexed, offset, lo_sum and tocref. Since this is
6418 a constraint function we know the operand has satisfied a suitable
6419 memory predicate. Also accept some odd rtl generated by reload
6420 (see rs6000_legitimize_reload_address for various forms). It is
6421 important that reload rtl be accepted by appropriate constraints
6422 but not by the operand predicate.
6423
6424 Offsetting a lo_sum should not be allowed, except where we know by
6425 alignment that a 32k boundary is not crossed, but see the ???
6426 comment in rs6000_legitimize_reload_address. Note that by
6427 "offsetting" here we mean a further offset to access parts of the
6428 MEM. It's fine to have a lo_sum where the inner address is offset
6429 from a sym, since the same sym+offset will appear in the high part
6430 of the address calculation. */
6431
6432 bool
6433 mem_operand_gpr (rtx op, machine_mode mode)
6434 {
6435 unsigned HOST_WIDE_INT offset;
6436 int extra;
6437 rtx addr = XEXP (op, 0);
6438
6439 op = address_offset (addr);
6440 if (op == NULL_RTX)
6441 return true;
6442
6443 offset = INTVAL (op);
6444 if (TARGET_POWERPC64 && (offset & 3) != 0)
6445 return false;
6446
6447 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
6448 if (extra < 0)
6449 extra = 0;
6450
6451 if (GET_CODE (addr) == LO_SUM)
6452 /* For lo_sum addresses, we must allow any offset except one that
6453 causes a wrap, so test only the low 16 bits. */
6454 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
6455
6456 return offset + 0x8000 < 0x10000u - extra;
6457 }
6458 \f
6459 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6460
6461 static bool
6462 reg_offset_addressing_ok_p (machine_mode mode)
6463 {
6464 switch (mode)
6465 {
6466 case V16QImode:
6467 case V8HImode:
6468 case V4SFmode:
6469 case V4SImode:
6470 case V2DFmode:
6471 case V2DImode:
6472 case V1TImode:
6473 case TImode:
6474 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6475 TImode is not a vector mode, if we want to use the VSX registers to
6476 move it around, we need to restrict ourselves to reg+reg
6477 addressing. */
6478 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
6479 return false;
6480 break;
6481
6482 case V4HImode:
6483 case V2SImode:
6484 case V1DImode:
6485 case V2SFmode:
6486 /* Paired vector modes. Only reg+reg addressing is valid. */
6487 if (TARGET_PAIRED_FLOAT)
6488 return false;
6489 break;
6490
6491 case SDmode:
6492 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6493 addressing for the LFIWZX and STFIWX instructions. */
6494 if (TARGET_NO_SDMODE_STACK)
6495 return false;
6496 break;
6497
6498 default:
6499 break;
6500 }
6501
6502 return true;
6503 }
6504
6505 static bool
6506 virtual_stack_registers_memory_p (rtx op)
6507 {
6508 int regnum;
6509
6510 if (GET_CODE (op) == REG)
6511 regnum = REGNO (op);
6512
6513 else if (GET_CODE (op) == PLUS
6514 && GET_CODE (XEXP (op, 0)) == REG
6515 && GET_CODE (XEXP (op, 1)) == CONST_INT)
6516 regnum = REGNO (XEXP (op, 0));
6517
6518 else
6519 return false;
6520
6521 return (regnum >= FIRST_VIRTUAL_REGISTER
6522 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
6523 }
6524
6525 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6526 is known to not straddle a 32k boundary. This function is used
6527 to determine whether -mcmodel=medium code can use TOC pointer
6528 relative addressing for OP. This means the alignment of the TOC
6529 pointer must also be taken into account, and unfortunately that is
6530 only 8 bytes. */
6531
6532 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
6533 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
6534 #endif
6535
6536 static bool
6537 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
6538 machine_mode mode)
6539 {
6540 tree decl;
6541 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
6542
6543 if (GET_CODE (op) != SYMBOL_REF)
6544 return false;
6545
6546 dsize = GET_MODE_SIZE (mode);
6547 decl = SYMBOL_REF_DECL (op);
6548 if (!decl)
6549 {
6550 if (dsize == 0)
6551 return false;
6552
6553 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6554 replacing memory addresses with an anchor plus offset. We
6555 could find the decl by rummaging around in the block->objects
6556 VEC for the given offset but that seems like too much work. */
6557 dalign = BITS_PER_UNIT;
6558 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
6559 && SYMBOL_REF_ANCHOR_P (op)
6560 && SYMBOL_REF_BLOCK (op) != NULL)
6561 {
6562 struct object_block *block = SYMBOL_REF_BLOCK (op);
6563
6564 dalign = block->alignment;
6565 offset += SYMBOL_REF_BLOCK_OFFSET (op);
6566 }
6567 else if (CONSTANT_POOL_ADDRESS_P (op))
6568 {
6569 /* It would be nice to have get_pool_align().. */
6570 machine_mode cmode = get_pool_mode (op);
6571
6572 dalign = GET_MODE_ALIGNMENT (cmode);
6573 }
6574 }
6575 else if (DECL_P (decl))
6576 {
6577 dalign = DECL_ALIGN (decl);
6578
6579 if (dsize == 0)
6580 {
6581 /* Allow BLKmode when the entire object is known to not
6582 cross a 32k boundary. */
6583 if (!DECL_SIZE_UNIT (decl))
6584 return false;
6585
6586 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
6587 return false;
6588
6589 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
6590 if (dsize > 32768)
6591 return false;
6592
6593 dalign /= BITS_PER_UNIT;
6594 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
6595 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
6596 return dalign >= dsize;
6597 }
6598 }
6599 else
6600 gcc_unreachable ();
6601
6602 /* Find how many bits of the alignment we know for this access. */
6603 dalign /= BITS_PER_UNIT;
6604 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
6605 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
6606 mask = dalign - 1;
6607 lsb = offset & -offset;
6608 mask &= lsb - 1;
6609 dalign = mask + 1;
6610
6611 return dalign >= dsize;
6612 }
6613
6614 static bool
6615 constant_pool_expr_p (rtx op)
6616 {
6617 rtx base, offset;
6618
6619 split_const (op, &base, &offset);
6620 return (GET_CODE (base) == SYMBOL_REF
6621 && CONSTANT_POOL_ADDRESS_P (base)
6622 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
6623 }
6624
6625 static const_rtx tocrel_base, tocrel_offset;
6626
6627 /* Return true if OP is a toc pointer relative address (the output
6628 of create_TOC_reference). If STRICT, do not match high part or
6629 non-split -mcmodel=large/medium toc pointer relative addresses. */
6630
6631 bool
6632 toc_relative_expr_p (const_rtx op, bool strict)
6633 {
6634 if (!TARGET_TOC)
6635 return false;
6636
6637 if (TARGET_CMODEL != CMODEL_SMALL)
6638 {
6639 /* Only match the low part. */
6640 if (GET_CODE (op) == LO_SUM
6641 && REG_P (XEXP (op, 0))
6642 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
6643 op = XEXP (op, 1);
6644 else if (strict)
6645 return false;
6646 }
6647
6648 tocrel_base = op;
6649 tocrel_offset = const0_rtx;
6650 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
6651 {
6652 tocrel_base = XEXP (op, 0);
6653 tocrel_offset = XEXP (op, 1);
6654 }
6655
6656 return (GET_CODE (tocrel_base) == UNSPEC
6657 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
6658 }
6659
6660 /* Return true if X is a constant pool address, and also for cmodel=medium
6661 if X is a toc-relative address known to be offsettable within MODE. */
6662
6663 bool
6664 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
6665 bool strict)
6666 {
6667 return (toc_relative_expr_p (x, strict)
6668 && (TARGET_CMODEL != CMODEL_MEDIUM
6669 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
6670 || mode == QImode
6671 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
6672 INTVAL (tocrel_offset), mode)));
6673 }
6674
6675 static bool
6676 legitimate_small_data_p (machine_mode mode, rtx x)
6677 {
6678 return (DEFAULT_ABI == ABI_V4
6679 && !flag_pic && !TARGET_TOC
6680 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
6681 && small_data_operand (x, mode));
6682 }
6683
6684 /* SPE offset addressing is limited to 5-bits worth of double words. */
6685 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6686
6687 bool
6688 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
6689 bool strict, bool worst_case)
6690 {
6691 unsigned HOST_WIDE_INT offset;
6692 unsigned int extra;
6693
6694 if (GET_CODE (x) != PLUS)
6695 return false;
6696 if (!REG_P (XEXP (x, 0)))
6697 return false;
6698 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6699 return false;
6700 if (!reg_offset_addressing_ok_p (mode))
6701 return virtual_stack_registers_memory_p (x);
6702 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
6703 return true;
6704 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6705 return false;
6706
6707 offset = INTVAL (XEXP (x, 1));
6708 extra = 0;
6709 switch (mode)
6710 {
6711 case V4HImode:
6712 case V2SImode:
6713 case V1DImode:
6714 case V2SFmode:
6715 /* SPE vector modes. */
6716 return SPE_CONST_OFFSET_OK (offset);
6717
6718 case DFmode:
6719 case DDmode:
6720 case DImode:
6721 /* On e500v2, we may have:
6722
6723 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6724
6725 Which gets addressed with evldd instructions. */
6726 if (TARGET_E500_DOUBLE)
6727 return SPE_CONST_OFFSET_OK (offset);
6728
6729 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6730 addressing. */
6731 if (VECTOR_MEM_VSX_P (mode))
6732 return false;
6733
6734 if (!worst_case)
6735 break;
6736 if (!TARGET_POWERPC64)
6737 extra = 4;
6738 else if (offset & 3)
6739 return false;
6740 break;
6741
6742 case TFmode:
6743 if (TARGET_E500_DOUBLE)
6744 return (SPE_CONST_OFFSET_OK (offset)
6745 && SPE_CONST_OFFSET_OK (offset + 8));
6746 /* fall through */
6747
6748 case TDmode:
6749 case TImode:
6750 case PTImode:
6751 extra = 8;
6752 if (!worst_case)
6753 break;
6754 if (!TARGET_POWERPC64)
6755 extra = 12;
6756 else if (offset & 3)
6757 return false;
6758 break;
6759
6760 default:
6761 break;
6762 }
6763
6764 offset += 0x8000;
6765 return offset < 0x10000 - extra;
6766 }
6767
6768 bool
6769 legitimate_indexed_address_p (rtx x, int strict)
6770 {
6771 rtx op0, op1;
6772
6773 if (GET_CODE (x) != PLUS)
6774 return false;
6775
6776 op0 = XEXP (x, 0);
6777 op1 = XEXP (x, 1);
6778
6779 /* Recognize the rtl generated by reload which we know will later be
6780 replaced with proper base and index regs. */
6781 if (!strict
6782 && reload_in_progress
6783 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6784 && REG_P (op1))
6785 return true;
6786
6787 return (REG_P (op0) && REG_P (op1)
6788 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6789 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6790 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6791 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6792 }
6793
6794 bool
6795 avoiding_indexed_address_p (machine_mode mode)
6796 {
6797 /* Avoid indexed addressing for modes that have non-indexed
6798 load/store instruction forms. */
6799 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6800 }
6801
6802 bool
6803 legitimate_indirect_address_p (rtx x, int strict)
6804 {
6805 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6806 }
6807
6808 bool
6809 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
6810 {
6811 if (!TARGET_MACHO || !flag_pic
6812 || mode != SImode || GET_CODE (x) != MEM)
6813 return false;
6814 x = XEXP (x, 0);
6815
6816 if (GET_CODE (x) != LO_SUM)
6817 return false;
6818 if (GET_CODE (XEXP (x, 0)) != REG)
6819 return false;
6820 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6821 return false;
6822 x = XEXP (x, 1);
6823
6824 return CONSTANT_P (x);
6825 }
6826
6827 static bool
6828 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
6829 {
6830 if (GET_CODE (x) != LO_SUM)
6831 return false;
6832 if (GET_CODE (XEXP (x, 0)) != REG)
6833 return false;
6834 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6835 return false;
6836 /* Restrict addressing for DI because of our SUBREG hackery. */
6837 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6838 return false;
6839 x = XEXP (x, 1);
6840
6841 if (TARGET_ELF || TARGET_MACHO)
6842 {
6843 bool large_toc_ok;
6844
6845 if (DEFAULT_ABI == ABI_V4 && flag_pic)
6846 return false;
6847 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6848 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6849 recognizes some LO_SUM addresses as valid although this
6850 function says opposite. In most cases, LRA through different
6851 transformations can generate correct code for address reloads.
6852 It can not manage only some LO_SUM cases. So we need to add
6853 code analogous to one in rs6000_legitimize_reload_address for
6854 LOW_SUM here saying that some addresses are still valid. */
6855 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
6856 && small_toc_ref (x, VOIDmode));
6857 if (TARGET_TOC && ! large_toc_ok)
6858 return false;
6859 if (GET_MODE_NUNITS (mode) != 1)
6860 return false;
6861 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6862 && !(/* ??? Assume floating point reg based on mode? */
6863 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6864 && (mode == DFmode || mode == DDmode)))
6865 return false;
6866
6867 return CONSTANT_P (x) || large_toc_ok;
6868 }
6869
6870 return false;
6871 }
6872
6873
6874 /* Try machine-dependent ways of modifying an illegitimate address
6875 to be legitimate. If we find one, return the new, valid address.
6876 This is used from only one place: `memory_address' in explow.c.
6877
6878 OLDX is the address as it was before break_out_memory_refs was
6879 called. In some cases it is useful to look at this to decide what
6880 needs to be done.
6881
6882 It is always safe for this function to do nothing. It exists to
6883 recognize opportunities to optimize the output.
6884
6885 On RS/6000, first check for the sum of a register with a constant
6886 integer that is out of range. If so, generate code to add the
6887 constant with the low-order 16 bits masked to the register and force
6888 this result into another register (this can be done with `cau').
6889 Then generate an address of REG+(CONST&0xffff), allowing for the
6890 possibility of bit 16 being a one.
6891
6892 Then check for the sum of a register and something not constant, try to
6893 load the other things into a register and return the sum. */
6894
6895 static rtx
6896 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6897 machine_mode mode)
6898 {
6899 unsigned int extra;
6900
6901 if (!reg_offset_addressing_ok_p (mode))
6902 {
6903 if (virtual_stack_registers_memory_p (x))
6904 return x;
6905
6906 /* In theory we should not be seeing addresses of the form reg+0,
6907 but just in case it is generated, optimize it away. */
6908 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6909 return force_reg (Pmode, XEXP (x, 0));
6910
6911 /* For TImode with load/store quad, restrict addresses to just a single
6912 pointer, so it works with both GPRs and VSX registers. */
6913 /* Make sure both operands are registers. */
6914 else if (GET_CODE (x) == PLUS
6915 && (mode != TImode || !TARGET_QUAD_MEMORY))
6916 return gen_rtx_PLUS (Pmode,
6917 force_reg (Pmode, XEXP (x, 0)),
6918 force_reg (Pmode, XEXP (x, 1)));
6919 else
6920 return force_reg (Pmode, x);
6921 }
6922 if (GET_CODE (x) == SYMBOL_REF)
6923 {
6924 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6925 if (model != 0)
6926 return rs6000_legitimize_tls_address (x, model);
6927 }
6928
6929 extra = 0;
6930 switch (mode)
6931 {
6932 case TFmode:
6933 case TDmode:
6934 case TImode:
6935 case PTImode:
6936 /* As in legitimate_offset_address_p we do not assume
6937 worst-case. The mode here is just a hint as to the registers
6938 used. A TImode is usually in gprs, but may actually be in
6939 fprs. Leave worst-case scenario for reload to handle via
6940 insn constraints. PTImode is only GPRs. */
6941 extra = 8;
6942 break;
6943 default:
6944 break;
6945 }
6946
6947 if (GET_CODE (x) == PLUS
6948 && GET_CODE (XEXP (x, 0)) == REG
6949 && GET_CODE (XEXP (x, 1)) == CONST_INT
6950 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6951 >= 0x10000 - extra)
6952 && !(SPE_VECTOR_MODE (mode)
6953 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6954 {
6955 HOST_WIDE_INT high_int, low_int;
6956 rtx sum;
6957 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6958 if (low_int >= 0x8000 - extra)
6959 low_int = 0;
6960 high_int = INTVAL (XEXP (x, 1)) - low_int;
6961 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6962 GEN_INT (high_int)), 0);
6963 return plus_constant (Pmode, sum, low_int);
6964 }
6965 else if (GET_CODE (x) == PLUS
6966 && GET_CODE (XEXP (x, 0)) == REG
6967 && GET_CODE (XEXP (x, 1)) != CONST_INT
6968 && GET_MODE_NUNITS (mode) == 1
6969 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6970 || (/* ??? Assume floating point reg based on mode? */
6971 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6972 && (mode == DFmode || mode == DDmode)))
6973 && !avoiding_indexed_address_p (mode))
6974 {
6975 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6976 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6977 }
6978 else if (SPE_VECTOR_MODE (mode)
6979 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6980 {
6981 if (mode == DImode)
6982 return x;
6983 /* We accept [reg + reg] and [reg + OFFSET]. */
6984
6985 if (GET_CODE (x) == PLUS)
6986 {
6987 rtx op1 = XEXP (x, 0);
6988 rtx op2 = XEXP (x, 1);
6989 rtx y;
6990
6991 op1 = force_reg (Pmode, op1);
6992
6993 if (GET_CODE (op2) != REG
6994 && (GET_CODE (op2) != CONST_INT
6995 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6996 || (GET_MODE_SIZE (mode) > 8
6997 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6998 op2 = force_reg (Pmode, op2);
6999
7000 /* We can't always do [reg + reg] for these, because [reg +
7001 reg + offset] is not a legitimate addressing mode. */
7002 y = gen_rtx_PLUS (Pmode, op1, op2);
7003
7004 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
7005 return force_reg (Pmode, y);
7006 else
7007 return y;
7008 }
7009
7010 return force_reg (Pmode, x);
7011 }
7012 else if ((TARGET_ELF
7013 #if TARGET_MACHO
7014 || !MACHO_DYNAMIC_NO_PIC_P
7015 #endif
7016 )
7017 && TARGET_32BIT
7018 && TARGET_NO_TOC
7019 && ! flag_pic
7020 && GET_CODE (x) != CONST_INT
7021 && GET_CODE (x) != CONST_WIDE_INT
7022 && GET_CODE (x) != CONST_DOUBLE
7023 && CONSTANT_P (x)
7024 && GET_MODE_NUNITS (mode) == 1
7025 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
7026 || (/* ??? Assume floating point reg based on mode? */
7027 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7028 && (mode == DFmode || mode == DDmode))))
7029 {
7030 rtx reg = gen_reg_rtx (Pmode);
7031 if (TARGET_ELF)
7032 emit_insn (gen_elf_high (reg, x));
7033 else
7034 emit_insn (gen_macho_high (reg, x));
7035 return gen_rtx_LO_SUM (Pmode, reg, x);
7036 }
7037 else if (TARGET_TOC
7038 && GET_CODE (x) == SYMBOL_REF
7039 && constant_pool_expr_p (x)
7040 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
7041 return create_TOC_reference (x, NULL_RTX);
7042 else
7043 return x;
7044 }
7045
7046 /* Debug version of rs6000_legitimize_address. */
7047 static rtx
7048 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
7049 {
7050 rtx ret;
7051 rtx_insn *insns;
7052
7053 start_sequence ();
7054 ret = rs6000_legitimize_address (x, oldx, mode);
7055 insns = get_insns ();
7056 end_sequence ();
7057
7058 if (ret != x)
7059 {
7060 fprintf (stderr,
7061 "\nrs6000_legitimize_address: mode %s, old code %s, "
7062 "new code %s, modified\n",
7063 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
7064 GET_RTX_NAME (GET_CODE (ret)));
7065
7066 fprintf (stderr, "Original address:\n");
7067 debug_rtx (x);
7068
7069 fprintf (stderr, "oldx:\n");
7070 debug_rtx (oldx);
7071
7072 fprintf (stderr, "New address:\n");
7073 debug_rtx (ret);
7074
7075 if (insns)
7076 {
7077 fprintf (stderr, "Insns added:\n");
7078 debug_rtx_list (insns, 20);
7079 }
7080 }
7081 else
7082 {
7083 fprintf (stderr,
7084 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
7085 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
7086
7087 debug_rtx (x);
7088 }
7089
7090 if (insns)
7091 emit_insn (insns);
7092
7093 return ret;
7094 }
7095
7096 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7097 We need to emit DTP-relative relocations. */
7098
7099 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7100 static void
7101 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
7102 {
7103 switch (size)
7104 {
7105 case 4:
7106 fputs ("\t.long\t", file);
7107 break;
7108 case 8:
7109 fputs (DOUBLE_INT_ASM_OP, file);
7110 break;
7111 default:
7112 gcc_unreachable ();
7113 }
7114 output_addr_const (file, x);
7115 fputs ("@dtprel+0x8000", file);
7116 }
7117
7118 /* Return true if X is a symbol that refers to real (rather than emulated)
7119 TLS. */
7120
7121 static bool
7122 rs6000_real_tls_symbol_ref_p (rtx x)
7123 {
7124 return (GET_CODE (x) == SYMBOL_REF
7125 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
7126 }
7127
7128 /* In the name of slightly smaller debug output, and to cater to
7129 general assembler lossage, recognize various UNSPEC sequences
7130 and turn them back into a direct symbol reference. */
7131
7132 static rtx
7133 rs6000_delegitimize_address (rtx orig_x)
7134 {
7135 rtx x, y, offset;
7136
7137 orig_x = delegitimize_mem_from_attrs (orig_x);
7138 x = orig_x;
7139 if (MEM_P (x))
7140 x = XEXP (x, 0);
7141
7142 y = x;
7143 if (TARGET_CMODEL != CMODEL_SMALL
7144 && GET_CODE (y) == LO_SUM)
7145 y = XEXP (y, 1);
7146
7147 offset = NULL_RTX;
7148 if (GET_CODE (y) == PLUS
7149 && GET_MODE (y) == Pmode
7150 && CONST_INT_P (XEXP (y, 1)))
7151 {
7152 offset = XEXP (y, 1);
7153 y = XEXP (y, 0);
7154 }
7155
7156 if (GET_CODE (y) == UNSPEC
7157 && XINT (y, 1) == UNSPEC_TOCREL)
7158 {
7159 y = XVECEXP (y, 0, 0);
7160
7161 #ifdef HAVE_AS_TLS
7162 /* Do not associate thread-local symbols with the original
7163 constant pool symbol. */
7164 if (TARGET_XCOFF
7165 && GET_CODE (y) == SYMBOL_REF
7166 && CONSTANT_POOL_ADDRESS_P (y)
7167 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
7168 return orig_x;
7169 #endif
7170
7171 if (offset != NULL_RTX)
7172 y = gen_rtx_PLUS (Pmode, y, offset);
7173 if (!MEM_P (orig_x))
7174 return y;
7175 else
7176 return replace_equiv_address_nv (orig_x, y);
7177 }
7178
7179 if (TARGET_MACHO
7180 && GET_CODE (orig_x) == LO_SUM
7181 && GET_CODE (XEXP (orig_x, 1)) == CONST)
7182 {
7183 y = XEXP (XEXP (orig_x, 1), 0);
7184 if (GET_CODE (y) == UNSPEC
7185 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
7186 return XVECEXP (y, 0, 0);
7187 }
7188
7189 return orig_x;
7190 }
7191
7192 /* Return true if X shouldn't be emitted into the debug info.
7193 The linker doesn't like .toc section references from
7194 .debug_* sections, so reject .toc section symbols. */
7195
7196 static bool
7197 rs6000_const_not_ok_for_debug_p (rtx x)
7198 {
7199 if (GET_CODE (x) == SYMBOL_REF
7200 && CONSTANT_POOL_ADDRESS_P (x))
7201 {
7202 rtx c = get_pool_constant (x);
7203 machine_mode cmode = get_pool_mode (x);
7204 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
7205 return true;
7206 }
7207
7208 return false;
7209 }
7210
7211 /* Construct the SYMBOL_REF for the tls_get_addr function. */
7212
7213 static GTY(()) rtx rs6000_tls_symbol;
7214 static rtx
7215 rs6000_tls_get_addr (void)
7216 {
7217 if (!rs6000_tls_symbol)
7218 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
7219
7220 return rs6000_tls_symbol;
7221 }
7222
7223 /* Construct the SYMBOL_REF for TLS GOT references. */
7224
7225 static GTY(()) rtx rs6000_got_symbol;
7226 static rtx
7227 rs6000_got_sym (void)
7228 {
7229 if (!rs6000_got_symbol)
7230 {
7231 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7232 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
7233 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
7234 }
7235
7236 return rs6000_got_symbol;
7237 }
7238
7239 /* AIX Thread-Local Address support. */
7240
7241 static rtx
7242 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
7243 {
7244 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
7245 const char *name;
7246 char *tlsname;
7247
7248 name = XSTR (addr, 0);
7249 /* Append TLS CSECT qualifier, unless the symbol already is qualified
7250 or the symbol will be in TLS private data section. */
7251 if (name[strlen (name) - 1] != ']'
7252 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
7253 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
7254 {
7255 tlsname = XALLOCAVEC (char, strlen (name) + 4);
7256 strcpy (tlsname, name);
7257 strcat (tlsname,
7258 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
7259 tlsaddr = copy_rtx (addr);
7260 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
7261 }
7262 else
7263 tlsaddr = addr;
7264
7265 /* Place addr into TOC constant pool. */
7266 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
7267
7268 /* Output the TOC entry and create the MEM referencing the value. */
7269 if (constant_pool_expr_p (XEXP (sym, 0))
7270 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
7271 {
7272 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
7273 mem = gen_const_mem (Pmode, tocref);
7274 set_mem_alias_set (mem, get_TOC_alias_set ());
7275 }
7276 else
7277 return sym;
7278
7279 /* Use global-dynamic for local-dynamic. */
7280 if (model == TLS_MODEL_GLOBAL_DYNAMIC
7281 || model == TLS_MODEL_LOCAL_DYNAMIC)
7282 {
7283 /* Create new TOC reference for @m symbol. */
7284 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
7285 tlsname = XALLOCAVEC (char, strlen (name) + 1);
7286 strcpy (tlsname, "*LCM");
7287 strcat (tlsname, name + 3);
7288 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
7289 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
7290 tocref = create_TOC_reference (modaddr, NULL_RTX);
7291 rtx modmem = gen_const_mem (Pmode, tocref);
7292 set_mem_alias_set (modmem, get_TOC_alias_set ());
7293
7294 rtx modreg = gen_reg_rtx (Pmode);
7295 emit_insn (gen_rtx_SET (modreg, modmem));
7296
7297 tmpreg = gen_reg_rtx (Pmode);
7298 emit_insn (gen_rtx_SET (tmpreg, mem));
7299
7300 dest = gen_reg_rtx (Pmode);
7301 if (TARGET_32BIT)
7302 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
7303 else
7304 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
7305 return dest;
7306 }
7307 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
7308 else if (TARGET_32BIT)
7309 {
7310 tlsreg = gen_reg_rtx (SImode);
7311 emit_insn (gen_tls_get_tpointer (tlsreg));
7312 }
7313 else
7314 tlsreg = gen_rtx_REG (DImode, 13);
7315
7316 /* Load the TOC value into temporary register. */
7317 tmpreg = gen_reg_rtx (Pmode);
7318 emit_insn (gen_rtx_SET (tmpreg, mem));
7319 set_unique_reg_note (get_last_insn (), REG_EQUAL,
7320 gen_rtx_MINUS (Pmode, addr, tlsreg));
7321
7322 /* Add TOC symbol value to TLS pointer. */
7323 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
7324
7325 return dest;
7326 }
7327
7328 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
7329 this (thread-local) address. */
7330
7331 static rtx
7332 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
7333 {
7334 rtx dest, insn;
7335
7336 if (TARGET_XCOFF)
7337 return rs6000_legitimize_tls_address_aix (addr, model);
7338
7339 dest = gen_reg_rtx (Pmode);
7340 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
7341 {
7342 rtx tlsreg;
7343
7344 if (TARGET_64BIT)
7345 {
7346 tlsreg = gen_rtx_REG (Pmode, 13);
7347 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
7348 }
7349 else
7350 {
7351 tlsreg = gen_rtx_REG (Pmode, 2);
7352 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
7353 }
7354 emit_insn (insn);
7355 }
7356 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
7357 {
7358 rtx tlsreg, tmp;
7359
7360 tmp = gen_reg_rtx (Pmode);
7361 if (TARGET_64BIT)
7362 {
7363 tlsreg = gen_rtx_REG (Pmode, 13);
7364 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
7365 }
7366 else
7367 {
7368 tlsreg = gen_rtx_REG (Pmode, 2);
7369 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
7370 }
7371 emit_insn (insn);
7372 if (TARGET_64BIT)
7373 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
7374 else
7375 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
7376 emit_insn (insn);
7377 }
7378 else
7379 {
7380 rtx r3, got, tga, tmp1, tmp2, call_insn;
7381
7382 /* We currently use relocations like @got@tlsgd for tls, which
7383 means the linker will handle allocation of tls entries, placing
7384 them in the .got section. So use a pointer to the .got section,
7385 not one to secondary TOC sections used by 64-bit -mminimal-toc,
7386 or to secondary GOT sections used by 32-bit -fPIC. */
7387 if (TARGET_64BIT)
7388 got = gen_rtx_REG (Pmode, 2);
7389 else
7390 {
7391 if (flag_pic == 1)
7392 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
7393 else
7394 {
7395 rtx gsym = rs6000_got_sym ();
7396 got = gen_reg_rtx (Pmode);
7397 if (flag_pic == 0)
7398 rs6000_emit_move (got, gsym, Pmode);
7399 else
7400 {
7401 rtx mem, lab, last;
7402
7403 tmp1 = gen_reg_rtx (Pmode);
7404 tmp2 = gen_reg_rtx (Pmode);
7405 mem = gen_const_mem (Pmode, tmp1);
7406 lab = gen_label_rtx ();
7407 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
7408 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
7409 if (TARGET_LINK_STACK)
7410 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
7411 emit_move_insn (tmp2, mem);
7412 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
7413 set_unique_reg_note (last, REG_EQUAL, gsym);
7414 }
7415 }
7416 }
7417
7418 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
7419 {
7420 tga = rs6000_tls_get_addr ();
7421 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
7422 1, const0_rtx, Pmode);
7423
7424 r3 = gen_rtx_REG (Pmode, 3);
7425 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7426 {
7427 if (TARGET_64BIT)
7428 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
7429 else
7430 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
7431 }
7432 else if (DEFAULT_ABI == ABI_V4)
7433 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
7434 else
7435 gcc_unreachable ();
7436 call_insn = last_call_insn ();
7437 PATTERN (call_insn) = insn;
7438 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7439 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7440 pic_offset_table_rtx);
7441 }
7442 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
7443 {
7444 tga = rs6000_tls_get_addr ();
7445 tmp1 = gen_reg_rtx (Pmode);
7446 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
7447 1, const0_rtx, Pmode);
7448
7449 r3 = gen_rtx_REG (Pmode, 3);
7450 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7451 {
7452 if (TARGET_64BIT)
7453 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
7454 else
7455 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
7456 }
7457 else if (DEFAULT_ABI == ABI_V4)
7458 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
7459 else
7460 gcc_unreachable ();
7461 call_insn = last_call_insn ();
7462 PATTERN (call_insn) = insn;
7463 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7464 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7465 pic_offset_table_rtx);
7466
7467 if (rs6000_tls_size == 16)
7468 {
7469 if (TARGET_64BIT)
7470 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
7471 else
7472 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
7473 }
7474 else if (rs6000_tls_size == 32)
7475 {
7476 tmp2 = gen_reg_rtx (Pmode);
7477 if (TARGET_64BIT)
7478 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
7479 else
7480 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
7481 emit_insn (insn);
7482 if (TARGET_64BIT)
7483 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
7484 else
7485 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
7486 }
7487 else
7488 {
7489 tmp2 = gen_reg_rtx (Pmode);
7490 if (TARGET_64BIT)
7491 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
7492 else
7493 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
7494 emit_insn (insn);
7495 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
7496 }
7497 emit_insn (insn);
7498 }
7499 else
7500 {
7501 /* IE, or 64-bit offset LE. */
7502 tmp2 = gen_reg_rtx (Pmode);
7503 if (TARGET_64BIT)
7504 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
7505 else
7506 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
7507 emit_insn (insn);
7508 if (TARGET_64BIT)
7509 insn = gen_tls_tls_64 (dest, tmp2, addr);
7510 else
7511 insn = gen_tls_tls_32 (dest, tmp2, addr);
7512 emit_insn (insn);
7513 }
7514 }
7515
7516 return dest;
7517 }
7518
7519 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7520
7521 static bool
7522 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
7523 {
7524 if (GET_CODE (x) == HIGH
7525 && GET_CODE (XEXP (x, 0)) == UNSPEC)
7526 return true;
7527
7528 /* A TLS symbol in the TOC cannot contain a sum. */
7529 if (GET_CODE (x) == CONST
7530 && GET_CODE (XEXP (x, 0)) == PLUS
7531 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7532 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
7533 return true;
7534
7535 /* Do not place an ELF TLS symbol in the constant pool. */
7536 return TARGET_ELF && tls_referenced_p (x);
7537 }
7538
7539 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7540 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7541 can be addressed relative to the toc pointer. */
7542
7543 static bool
7544 use_toc_relative_ref (rtx sym, machine_mode mode)
7545 {
7546 return ((constant_pool_expr_p (sym)
7547 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
7548 get_pool_mode (sym)))
7549 || (TARGET_CMODEL == CMODEL_MEDIUM
7550 && SYMBOL_REF_LOCAL_P (sym)
7551 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
7552 }
7553
7554 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7555 replace the input X, or the original X if no replacement is called for.
7556 The output parameter *WIN is 1 if the calling macro should goto WIN,
7557 0 if it should not.
7558
7559 For RS/6000, we wish to handle large displacements off a base
7560 register by splitting the addend across an addiu/addis and the mem insn.
7561 This cuts number of extra insns needed from 3 to 1.
7562
7563 On Darwin, we use this to generate code for floating point constants.
7564 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7565 The Darwin code is inside #if TARGET_MACHO because only then are the
7566 machopic_* functions defined. */
7567 static rtx
7568 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
7569 int opnum, int type,
7570 int ind_levels ATTRIBUTE_UNUSED, int *win)
7571 {
7572 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7573
7574 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7575 DFmode/DImode MEM. */
7576 if (reg_offset_p
7577 && opnum == 1
7578 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
7579 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
7580 reg_offset_p = false;
7581
7582 /* We must recognize output that we have already generated ourselves. */
7583 if (GET_CODE (x) == PLUS
7584 && GET_CODE (XEXP (x, 0)) == PLUS
7585 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7586 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7587 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7588 {
7589 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7590 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7591 opnum, (enum reload_type) type);
7592 *win = 1;
7593 return x;
7594 }
7595
7596 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7597 if (GET_CODE (x) == LO_SUM
7598 && GET_CODE (XEXP (x, 0)) == HIGH)
7599 {
7600 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7601 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7602 opnum, (enum reload_type) type);
7603 *win = 1;
7604 return x;
7605 }
7606
7607 #if TARGET_MACHO
7608 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
7609 && GET_CODE (x) == LO_SUM
7610 && GET_CODE (XEXP (x, 0)) == PLUS
7611 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
7612 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
7613 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
7614 && machopic_operand_p (XEXP (x, 1)))
7615 {
7616 /* Result of previous invocation of this function on Darwin
7617 floating point constant. */
7618 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7619 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7620 opnum, (enum reload_type) type);
7621 *win = 1;
7622 return x;
7623 }
7624 #endif
7625
7626 if (TARGET_CMODEL != CMODEL_SMALL
7627 && reg_offset_p
7628 && small_toc_ref (x, VOIDmode))
7629 {
7630 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
7631 x = gen_rtx_LO_SUM (Pmode, hi, x);
7632 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7633 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7634 opnum, (enum reload_type) type);
7635 *win = 1;
7636 return x;
7637 }
7638
7639 if (GET_CODE (x) == PLUS
7640 && GET_CODE (XEXP (x, 0)) == REG
7641 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
7642 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
7643 && GET_CODE (XEXP (x, 1)) == CONST_INT
7644 && reg_offset_p
7645 && !SPE_VECTOR_MODE (mode)
7646 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7647 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
7648 {
7649 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
7650 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
7651 HOST_WIDE_INT high
7652 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7653
7654 /* Check for 32-bit overflow. */
7655 if (high + low != val)
7656 {
7657 *win = 0;
7658 return x;
7659 }
7660
7661 /* Reload the high part into a base reg; leave the low part
7662 in the mem directly. */
7663
7664 x = gen_rtx_PLUS (GET_MODE (x),
7665 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
7666 GEN_INT (high)),
7667 GEN_INT (low));
7668
7669 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7670 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7671 opnum, (enum reload_type) type);
7672 *win = 1;
7673 return x;
7674 }
7675
7676 if (GET_CODE (x) == SYMBOL_REF
7677 && reg_offset_p
7678 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
7679 && !SPE_VECTOR_MODE (mode)
7680 #if TARGET_MACHO
7681 && DEFAULT_ABI == ABI_DARWIN
7682 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
7683 && machopic_symbol_defined_p (x)
7684 #else
7685 && DEFAULT_ABI == ABI_V4
7686 && !flag_pic
7687 #endif
7688 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7689 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7690 without fprs.
7691 ??? Assume floating point reg based on mode? This assumption is
7692 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7693 where reload ends up doing a DFmode load of a constant from
7694 mem using two gprs. Unfortunately, at this point reload
7695 hasn't yet selected regs so poking around in reload data
7696 won't help and even if we could figure out the regs reliably,
7697 we'd still want to allow this transformation when the mem is
7698 naturally aligned. Since we say the address is good here, we
7699 can't disable offsets from LO_SUMs in mem_operand_gpr.
7700 FIXME: Allow offset from lo_sum for other modes too, when
7701 mem is sufficiently aligned.
7702
7703 Also disallow this if the type can go in VMX/Altivec registers, since
7704 those registers do not have d-form (reg+offset) address modes. */
7705 && !reg_addr[mode].scalar_in_vmx_p
7706 && mode != TFmode
7707 && mode != TDmode
7708 && (mode != TImode || !TARGET_VSX_TIMODE)
7709 && mode != PTImode
7710 && (mode != DImode || TARGET_POWERPC64)
7711 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
7712 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
7713 {
7714 #if TARGET_MACHO
7715 if (flag_pic)
7716 {
7717 rtx offset = machopic_gen_offset (x);
7718 x = gen_rtx_LO_SUM (GET_MODE (x),
7719 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
7720 gen_rtx_HIGH (Pmode, offset)), offset);
7721 }
7722 else
7723 #endif
7724 x = gen_rtx_LO_SUM (GET_MODE (x),
7725 gen_rtx_HIGH (Pmode, x), x);
7726
7727 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7728 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7729 opnum, (enum reload_type) type);
7730 *win = 1;
7731 return x;
7732 }
7733
7734 /* Reload an offset address wrapped by an AND that represents the
7735 masking of the lower bits. Strip the outer AND and let reload
7736 convert the offset address into an indirect address. For VSX,
7737 force reload to create the address with an AND in a separate
7738 register, because we can't guarantee an altivec register will
7739 be used. */
7740 if (VECTOR_MEM_ALTIVEC_P (mode)
7741 && GET_CODE (x) == AND
7742 && GET_CODE (XEXP (x, 0)) == PLUS
7743 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7744 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7745 && GET_CODE (XEXP (x, 1)) == CONST_INT
7746 && INTVAL (XEXP (x, 1)) == -16)
7747 {
7748 x = XEXP (x, 0);
7749 *win = 1;
7750 return x;
7751 }
7752
7753 if (TARGET_TOC
7754 && reg_offset_p
7755 && GET_CODE (x) == SYMBOL_REF
7756 && use_toc_relative_ref (x, mode))
7757 {
7758 x = create_TOC_reference (x, NULL_RTX);
7759 if (TARGET_CMODEL != CMODEL_SMALL)
7760 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7761 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7762 opnum, (enum reload_type) type);
7763 *win = 1;
7764 return x;
7765 }
7766 *win = 0;
7767 return x;
7768 }
7769
7770 /* Debug version of rs6000_legitimize_reload_address. */
7771 static rtx
7772 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
7773 int opnum, int type,
7774 int ind_levels, int *win)
7775 {
7776 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7777 ind_levels, win);
7778 fprintf (stderr,
7779 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7780 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7781 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7782 debug_rtx (x);
7783
7784 if (x == ret)
7785 fprintf (stderr, "Same address returned\n");
7786 else if (!ret)
7787 fprintf (stderr, "NULL returned\n");
7788 else
7789 {
7790 fprintf (stderr, "New address:\n");
7791 debug_rtx (ret);
7792 }
7793
7794 return ret;
7795 }
7796
7797 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7798 that is a valid memory address for an instruction.
7799 The MODE argument is the machine mode for the MEM expression
7800 that wants to use this address.
7801
7802 On the RS/6000, there are four valid address: a SYMBOL_REF that
7803 refers to a constant pool entry of an address (or the sum of it
7804 plus a constant), a short (16-bit signed) constant plus a register,
7805 the sum of two registers, or a register indirect, possibly with an
7806 auto-increment. For DFmode, DDmode and DImode with a constant plus
7807 register, we must ensure that both words are addressable or PowerPC64
7808 with offset word aligned.
7809
7810 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7811 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7812 because adjacent memory cells are accessed by adding word-sized offsets
7813 during assembly output. */
7814 static bool
7815 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
7816 {
7817 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7818
7819 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7820 if (VECTOR_MEM_ALTIVEC_P (mode)
7821 && GET_CODE (x) == AND
7822 && GET_CODE (XEXP (x, 1)) == CONST_INT
7823 && INTVAL (XEXP (x, 1)) == -16)
7824 x = XEXP (x, 0);
7825
7826 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7827 return 0;
7828 if (legitimate_indirect_address_p (x, reg_ok_strict))
7829 return 1;
7830 if (TARGET_UPDATE
7831 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7832 && mode_supports_pre_incdec_p (mode)
7833 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7834 return 1;
7835 if (virtual_stack_registers_memory_p (x))
7836 return 1;
7837 if (reg_offset_p && legitimate_small_data_p (mode, x))
7838 return 1;
7839 if (reg_offset_p
7840 && legitimate_constant_pool_address_p (x, mode,
7841 reg_ok_strict || lra_in_progress))
7842 return 1;
7843 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7844 allow register indirect addresses. This will allow the values to go in
7845 either GPRs or VSX registers without reloading. The vector types would
7846 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7847 somewhat split, in that some uses are GPR based, and some VSX based. */
7848 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
7849 return 0;
7850 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7851 if (! reg_ok_strict
7852 && reg_offset_p
7853 && GET_CODE (x) == PLUS
7854 && GET_CODE (XEXP (x, 0)) == REG
7855 && (XEXP (x, 0) == virtual_stack_vars_rtx
7856 || XEXP (x, 0) == arg_pointer_rtx)
7857 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7858 return 1;
7859 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7860 return 1;
7861 if (mode != TFmode
7862 && mode != TDmode
7863 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7864 || TARGET_POWERPC64
7865 || (mode != DFmode && mode != DDmode)
7866 || (TARGET_E500_DOUBLE && mode != DDmode))
7867 && (TARGET_POWERPC64 || mode != DImode)
7868 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7869 && mode != PTImode
7870 && !avoiding_indexed_address_p (mode)
7871 && legitimate_indexed_address_p (x, reg_ok_strict))
7872 return 1;
7873 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
7874 && mode_supports_pre_modify_p (mode)
7875 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7876 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7877 reg_ok_strict, false)
7878 || (!avoiding_indexed_address_p (mode)
7879 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7880 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7881 return 1;
7882 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7883 return 1;
7884 return 0;
7885 }
7886
7887 /* Debug version of rs6000_legitimate_address_p. */
7888 static bool
7889 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
7890 bool reg_ok_strict)
7891 {
7892 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7893 fprintf (stderr,
7894 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7895 "strict = %d, reload = %s, code = %s\n",
7896 ret ? "true" : "false",
7897 GET_MODE_NAME (mode),
7898 reg_ok_strict,
7899 (reload_completed
7900 ? "after"
7901 : (reload_in_progress ? "progress" : "before")),
7902 GET_RTX_NAME (GET_CODE (x)));
7903 debug_rtx (x);
7904
7905 return ret;
7906 }
7907
7908 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7909
7910 static bool
7911 rs6000_mode_dependent_address_p (const_rtx addr,
7912 addr_space_t as ATTRIBUTE_UNUSED)
7913 {
7914 return rs6000_mode_dependent_address_ptr (addr);
7915 }
7916
7917 /* Go to LABEL if ADDR (a legitimate address expression)
7918 has an effect that depends on the machine mode it is used for.
7919
7920 On the RS/6000 this is true of all integral offsets (since AltiVec
7921 and VSX modes don't allow them) or is a pre-increment or decrement.
7922
7923 ??? Except that due to conceptual problems in offsettable_address_p
7924 we can't really report the problems of integral offsets. So leave
7925 this assuming that the adjustable offset must be valid for the
7926 sub-words of a TFmode operand, which is what we had before. */
7927
7928 static bool
7929 rs6000_mode_dependent_address (const_rtx addr)
7930 {
7931 switch (GET_CODE (addr))
7932 {
7933 case PLUS:
7934 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7935 is considered a legitimate address before reload, so there
7936 are no offset restrictions in that case. Note that this
7937 condition is safe in strict mode because any address involving
7938 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7939 been rejected as illegitimate. */
7940 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7941 && XEXP (addr, 0) != arg_pointer_rtx
7942 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7943 {
7944 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7945 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7946 }
7947 break;
7948
7949 case LO_SUM:
7950 /* Anything in the constant pool is sufficiently aligned that
7951 all bytes have the same high part address. */
7952 return !legitimate_constant_pool_address_p (addr, QImode, false);
7953
7954 /* Auto-increment cases are now treated generically in recog.c. */
7955 case PRE_MODIFY:
7956 return TARGET_UPDATE;
7957
7958 /* AND is only allowed in Altivec loads. */
7959 case AND:
7960 return true;
7961
7962 default:
7963 break;
7964 }
7965
7966 return false;
7967 }
7968
7969 /* Debug version of rs6000_mode_dependent_address. */
7970 static bool
7971 rs6000_debug_mode_dependent_address (const_rtx addr)
7972 {
7973 bool ret = rs6000_mode_dependent_address (addr);
7974
7975 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7976 ret ? "true" : "false");
7977 debug_rtx (addr);
7978
7979 return ret;
7980 }
7981
7982 /* Implement FIND_BASE_TERM. */
7983
7984 rtx
7985 rs6000_find_base_term (rtx op)
7986 {
7987 rtx base;
7988
7989 base = op;
7990 if (GET_CODE (base) == CONST)
7991 base = XEXP (base, 0);
7992 if (GET_CODE (base) == PLUS)
7993 base = XEXP (base, 0);
7994 if (GET_CODE (base) == UNSPEC)
7995 switch (XINT (base, 1))
7996 {
7997 case UNSPEC_TOCREL:
7998 case UNSPEC_MACHOPIC_OFFSET:
7999 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
8000 for aliasing purposes. */
8001 return XVECEXP (base, 0, 0);
8002 }
8003
8004 return op;
8005 }
8006
8007 /* More elaborate version of recog's offsettable_memref_p predicate
8008 that works around the ??? note of rs6000_mode_dependent_address.
8009 In particular it accepts
8010
8011 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
8012
8013 in 32-bit mode, that the recog predicate rejects. */
8014
8015 static bool
8016 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
8017 {
8018 bool worst_case;
8019
8020 if (!MEM_P (op))
8021 return false;
8022
8023 /* First mimic offsettable_memref_p. */
8024 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
8025 return true;
8026
8027 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
8028 the latter predicate knows nothing about the mode of the memory
8029 reference and, therefore, assumes that it is the largest supported
8030 mode (TFmode). As a consequence, legitimate offsettable memory
8031 references are rejected. rs6000_legitimate_offset_address_p contains
8032 the correct logic for the PLUS case of rs6000_mode_dependent_address,
8033 at least with a little bit of help here given that we know the
8034 actual registers used. */
8035 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
8036 || GET_MODE_SIZE (reg_mode) == 4);
8037 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
8038 true, worst_case);
8039 }
8040
8041 /* Change register usage conditional on target flags. */
8042 static void
8043 rs6000_conditional_register_usage (void)
8044 {
8045 int i;
8046
8047 if (TARGET_DEBUG_TARGET)
8048 fprintf (stderr, "rs6000_conditional_register_usage called\n");
8049
8050 /* Set MQ register fixed (already call_used) so that it will not be
8051 allocated. */
8052 fixed_regs[64] = 1;
8053
8054 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
8055 if (TARGET_64BIT)
8056 fixed_regs[13] = call_used_regs[13]
8057 = call_really_used_regs[13] = 1;
8058
8059 /* Conditionally disable FPRs. */
8060 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
8061 for (i = 32; i < 64; i++)
8062 fixed_regs[i] = call_used_regs[i]
8063 = call_really_used_regs[i] = 1;
8064
8065 /* The TOC register is not killed across calls in a way that is
8066 visible to the compiler. */
8067 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8068 call_really_used_regs[2] = 0;
8069
8070 if (DEFAULT_ABI == ABI_V4
8071 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
8072 && flag_pic == 2)
8073 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
8074
8075 if (DEFAULT_ABI == ABI_V4
8076 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
8077 && flag_pic == 1)
8078 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
8079 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
8080 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
8081
8082 if (DEFAULT_ABI == ABI_DARWIN
8083 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
8084 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
8085 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
8086 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
8087
8088 if (TARGET_TOC && TARGET_MINIMAL_TOC)
8089 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
8090 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
8091
8092 if (TARGET_SPE)
8093 {
8094 global_regs[SPEFSCR_REGNO] = 1;
8095 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
8096 registers in prologues and epilogues. We no longer use r14
8097 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
8098 pool for link-compatibility with older versions of GCC. Once
8099 "old" code has died out, we can return r14 to the allocation
8100 pool. */
8101 fixed_regs[14]
8102 = call_used_regs[14]
8103 = call_really_used_regs[14] = 1;
8104 }
8105
8106 if (!TARGET_ALTIVEC && !TARGET_VSX)
8107 {
8108 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
8109 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
8110 call_really_used_regs[VRSAVE_REGNO] = 1;
8111 }
8112
8113 if (TARGET_ALTIVEC || TARGET_VSX)
8114 global_regs[VSCR_REGNO] = 1;
8115
8116 if (TARGET_ALTIVEC_ABI)
8117 {
8118 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
8119 call_used_regs[i] = call_really_used_regs[i] = 1;
8120
8121 /* AIX reserves VR20:31 in non-extended ABI mode. */
8122 if (TARGET_XCOFF)
8123 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
8124 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
8125 }
8126 }
8127
8128 \f
8129 /* Output insns to set DEST equal to the constant SOURCE as a series of
8130 lis, ori and shl instructions and return TRUE. */
8131
8132 bool
8133 rs6000_emit_set_const (rtx dest, rtx source)
8134 {
8135 machine_mode mode = GET_MODE (dest);
8136 rtx temp, set;
8137 rtx_insn *insn;
8138 HOST_WIDE_INT c;
8139
8140 gcc_checking_assert (CONST_INT_P (source));
8141 c = INTVAL (source);
8142 switch (mode)
8143 {
8144 case QImode:
8145 case HImode:
8146 emit_insn (gen_rtx_SET (dest, source));
8147 return true;
8148
8149 case SImode:
8150 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
8151
8152 emit_insn (gen_rtx_SET (copy_rtx (temp),
8153 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
8154 emit_insn (gen_rtx_SET (dest,
8155 gen_rtx_IOR (SImode, copy_rtx (temp),
8156 GEN_INT (c & 0xffff))));
8157 break;
8158
8159 case DImode:
8160 if (!TARGET_POWERPC64)
8161 {
8162 rtx hi, lo;
8163
8164 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
8165 DImode);
8166 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
8167 DImode);
8168 emit_move_insn (hi, GEN_INT (c >> 32));
8169 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
8170 emit_move_insn (lo, GEN_INT (c));
8171 }
8172 else
8173 rs6000_emit_set_long_const (dest, c);
8174 break;
8175
8176 default:
8177 gcc_unreachable ();
8178 }
8179
8180 insn = get_last_insn ();
8181 set = single_set (insn);
8182 if (! CONSTANT_P (SET_SRC (set)))
8183 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
8184
8185 return true;
8186 }
8187
8188 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
8189 Output insns to set DEST equal to the constant C as a series of
8190 lis, ori and shl instructions. */
8191
8192 static void
8193 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
8194 {
8195 rtx temp;
8196 HOST_WIDE_INT ud1, ud2, ud3, ud4;
8197
8198 ud1 = c & 0xffff;
8199 c = c >> 16;
8200 ud2 = c & 0xffff;
8201 c = c >> 16;
8202 ud3 = c & 0xffff;
8203 c = c >> 16;
8204 ud4 = c & 0xffff;
8205
8206 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
8207 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
8208 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
8209
8210 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
8211 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
8212 {
8213 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8214
8215 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8216 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
8217 if (ud1 != 0)
8218 emit_move_insn (dest,
8219 gen_rtx_IOR (DImode, copy_rtx (temp),
8220 GEN_INT (ud1)));
8221 }
8222 else if (ud3 == 0 && ud4 == 0)
8223 {
8224 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8225
8226 gcc_assert (ud2 & 0x8000);
8227 emit_move_insn (copy_rtx (temp),
8228 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
8229 if (ud1 != 0)
8230 emit_move_insn (copy_rtx (temp),
8231 gen_rtx_IOR (DImode, copy_rtx (temp),
8232 GEN_INT (ud1)));
8233 emit_move_insn (dest,
8234 gen_rtx_ZERO_EXTEND (DImode,
8235 gen_lowpart (SImode,
8236 copy_rtx (temp))));
8237 }
8238 else if ((ud4 == 0xffff && (ud3 & 0x8000))
8239 || (ud4 == 0 && ! (ud3 & 0x8000)))
8240 {
8241 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8242
8243 emit_move_insn (copy_rtx (temp),
8244 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
8245 if (ud2 != 0)
8246 emit_move_insn (copy_rtx (temp),
8247 gen_rtx_IOR (DImode, copy_rtx (temp),
8248 GEN_INT (ud2)));
8249 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8250 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
8251 GEN_INT (16)));
8252 if (ud1 != 0)
8253 emit_move_insn (dest,
8254 gen_rtx_IOR (DImode, copy_rtx (temp),
8255 GEN_INT (ud1)));
8256 }
8257 else
8258 {
8259 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8260
8261 emit_move_insn (copy_rtx (temp),
8262 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
8263 if (ud3 != 0)
8264 emit_move_insn (copy_rtx (temp),
8265 gen_rtx_IOR (DImode, copy_rtx (temp),
8266 GEN_INT (ud3)));
8267
8268 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
8269 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
8270 GEN_INT (32)));
8271 if (ud2 != 0)
8272 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8273 gen_rtx_IOR (DImode, copy_rtx (temp),
8274 GEN_INT (ud2 << 16)));
8275 if (ud1 != 0)
8276 emit_move_insn (dest,
8277 gen_rtx_IOR (DImode, copy_rtx (temp),
8278 GEN_INT (ud1)));
8279 }
8280 }
8281
8282 /* Helper for the following. Get rid of [r+r] memory refs
8283 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
8284
8285 static void
8286 rs6000_eliminate_indexed_memrefs (rtx operands[2])
8287 {
8288 if (reload_in_progress)
8289 return;
8290
8291 if (GET_CODE (operands[0]) == MEM
8292 && GET_CODE (XEXP (operands[0], 0)) != REG
8293 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
8294 GET_MODE (operands[0]), false))
8295 operands[0]
8296 = replace_equiv_address (operands[0],
8297 copy_addr_to_reg (XEXP (operands[0], 0)));
8298
8299 if (GET_CODE (operands[1]) == MEM
8300 && GET_CODE (XEXP (operands[1], 0)) != REG
8301 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
8302 GET_MODE (operands[1]), false))
8303 operands[1]
8304 = replace_equiv_address (operands[1],
8305 copy_addr_to_reg (XEXP (operands[1], 0)));
8306 }
8307
8308 /* Generate a vector of constants to permute MODE for a little-endian
8309 storage operation by swapping the two halves of a vector. */
8310 static rtvec
8311 rs6000_const_vec (machine_mode mode)
8312 {
8313 int i, subparts;
8314 rtvec v;
8315
8316 switch (mode)
8317 {
8318 case V1TImode:
8319 subparts = 1;
8320 break;
8321 case V2DFmode:
8322 case V2DImode:
8323 subparts = 2;
8324 break;
8325 case V4SFmode:
8326 case V4SImode:
8327 subparts = 4;
8328 break;
8329 case V8HImode:
8330 subparts = 8;
8331 break;
8332 case V16QImode:
8333 subparts = 16;
8334 break;
8335 default:
8336 gcc_unreachable();
8337 }
8338
8339 v = rtvec_alloc (subparts);
8340
8341 for (i = 0; i < subparts / 2; ++i)
8342 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
8343 for (i = subparts / 2; i < subparts; ++i)
8344 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
8345
8346 return v;
8347 }
8348
8349 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
8350 for a VSX load or store operation. */
8351 rtx
8352 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
8353 {
8354 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
8355 return gen_rtx_VEC_SELECT (mode, source, par);
8356 }
8357
8358 /* Emit a little-endian load from vector memory location SOURCE to VSX
8359 register DEST in mode MODE. The load is done with two permuting
8360 insn's that represent an lxvd2x and xxpermdi. */
8361 void
8362 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
8363 {
8364 rtx tmp, permute_mem, permute_reg;
8365
8366 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8367 V1TImode). */
8368 if (mode == TImode || mode == V1TImode)
8369 {
8370 mode = V2DImode;
8371 dest = gen_lowpart (V2DImode, dest);
8372 source = adjust_address (source, V2DImode, 0);
8373 }
8374
8375 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
8376 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
8377 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
8378 emit_insn (gen_rtx_SET (tmp, permute_mem));
8379 emit_insn (gen_rtx_SET (dest, permute_reg));
8380 }
8381
8382 /* Emit a little-endian store to vector memory location DEST from VSX
8383 register SOURCE in mode MODE. The store is done with two permuting
8384 insn's that represent an xxpermdi and an stxvd2x. */
8385 void
8386 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
8387 {
8388 rtx tmp, permute_src, permute_tmp;
8389
8390 /* This should never be called during or after reload, because it does
8391 not re-permute the source register. It is intended only for use
8392 during expand. */
8393 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
8394
8395 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8396 V1TImode). */
8397 if (mode == TImode || mode == V1TImode)
8398 {
8399 mode = V2DImode;
8400 dest = adjust_address (dest, V2DImode, 0);
8401 source = gen_lowpart (V2DImode, source);
8402 }
8403
8404 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
8405 permute_src = rs6000_gen_le_vsx_permute (source, mode);
8406 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
8407 emit_insn (gen_rtx_SET (tmp, permute_src));
8408 emit_insn (gen_rtx_SET (dest, permute_tmp));
8409 }
8410
8411 /* Emit a sequence representing a little-endian VSX load or store,
8412 moving data from SOURCE to DEST in mode MODE. This is done
8413 separately from rs6000_emit_move to ensure it is called only
8414 during expand. LE VSX loads and stores introduced later are
8415 handled with a split. The expand-time RTL generation allows
8416 us to optimize away redundant pairs of register-permutes. */
8417 void
8418 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
8419 {
8420 gcc_assert (!BYTES_BIG_ENDIAN
8421 && VECTOR_MEM_VSX_P (mode)
8422 && !gpr_or_gpr_p (dest, source)
8423 && (MEM_P (source) ^ MEM_P (dest)));
8424
8425 if (MEM_P (source))
8426 {
8427 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
8428 rs6000_emit_le_vsx_load (dest, source, mode);
8429 }
8430 else
8431 {
8432 if (!REG_P (source))
8433 source = force_reg (mode, source);
8434 rs6000_emit_le_vsx_store (dest, source, mode);
8435 }
8436 }
8437
8438 /* Emit a move from SOURCE to DEST in mode MODE. */
8439 void
8440 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
8441 {
8442 rtx operands[2];
8443 operands[0] = dest;
8444 operands[1] = source;
8445
8446 if (TARGET_DEBUG_ADDR)
8447 {
8448 fprintf (stderr,
8449 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8450 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8451 GET_MODE_NAME (mode),
8452 reload_in_progress,
8453 reload_completed,
8454 can_create_pseudo_p ());
8455 debug_rtx (dest);
8456 fprintf (stderr, "source:\n");
8457 debug_rtx (source);
8458 }
8459
8460 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8461 if (CONST_WIDE_INT_P (operands[1])
8462 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
8463 {
8464 /* This should be fixed with the introduction of CONST_WIDE_INT. */
8465 gcc_unreachable ();
8466 }
8467
8468 /* Check if GCC is setting up a block move that will end up using FP
8469 registers as temporaries. We must make sure this is acceptable. */
8470 if (GET_CODE (operands[0]) == MEM
8471 && GET_CODE (operands[1]) == MEM
8472 && mode == DImode
8473 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
8474 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
8475 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
8476 ? 32 : MEM_ALIGN (operands[0])))
8477 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
8478 ? 32
8479 : MEM_ALIGN (operands[1]))))
8480 && ! MEM_VOLATILE_P (operands [0])
8481 && ! MEM_VOLATILE_P (operands [1]))
8482 {
8483 emit_move_insn (adjust_address (operands[0], SImode, 0),
8484 adjust_address (operands[1], SImode, 0));
8485 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
8486 adjust_address (copy_rtx (operands[1]), SImode, 4));
8487 return;
8488 }
8489
8490 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
8491 && !gpc_reg_operand (operands[1], mode))
8492 operands[1] = force_reg (mode, operands[1]);
8493
8494 /* Recognize the case where operand[1] is a reference to thread-local
8495 data and load its address to a register. */
8496 if (tls_referenced_p (operands[1]))
8497 {
8498 enum tls_model model;
8499 rtx tmp = operands[1];
8500 rtx addend = NULL;
8501
8502 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
8503 {
8504 addend = XEXP (XEXP (tmp, 0), 1);
8505 tmp = XEXP (XEXP (tmp, 0), 0);
8506 }
8507
8508 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
8509 model = SYMBOL_REF_TLS_MODEL (tmp);
8510 gcc_assert (model != 0);
8511
8512 tmp = rs6000_legitimize_tls_address (tmp, model);
8513 if (addend)
8514 {
8515 tmp = gen_rtx_PLUS (mode, tmp, addend);
8516 tmp = force_operand (tmp, operands[0]);
8517 }
8518 operands[1] = tmp;
8519 }
8520
8521 /* Handle the case where reload calls us with an invalid address. */
8522 if (reload_in_progress && mode == Pmode
8523 && (! general_operand (operands[1], mode)
8524 || ! nonimmediate_operand (operands[0], mode)))
8525 goto emit_set;
8526
8527 /* 128-bit constant floating-point values on Darwin should really be loaded
8528 as two parts. However, this premature splitting is a problem when DFmode
8529 values can go into Altivec registers. */
8530 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
8531 && !reg_addr[DFmode].scalar_in_vmx_p
8532 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
8533 {
8534 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
8535 simplify_gen_subreg (DFmode, operands[1], mode, 0),
8536 DFmode);
8537 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
8538 GET_MODE_SIZE (DFmode)),
8539 simplify_gen_subreg (DFmode, operands[1], mode,
8540 GET_MODE_SIZE (DFmode)),
8541 DFmode);
8542 return;
8543 }
8544
8545 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
8546 cfun->machine->sdmode_stack_slot =
8547 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
8548
8549
8550 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
8551 p1:SD) if p1 is not of floating point class and p0 is spilled as
8552 we can have no analogous movsd_store for this. */
8553 if (lra_in_progress && mode == DDmode
8554 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8555 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8556 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
8557 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
8558 {
8559 enum reg_class cl;
8560 int regno = REGNO (SUBREG_REG (operands[1]));
8561
8562 if (regno >= FIRST_PSEUDO_REGISTER)
8563 {
8564 cl = reg_preferred_class (regno);
8565 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
8566 }
8567 if (regno >= 0 && ! FP_REGNO_P (regno))
8568 {
8569 mode = SDmode;
8570 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
8571 operands[1] = SUBREG_REG (operands[1]);
8572 }
8573 }
8574 if (lra_in_progress
8575 && mode == SDmode
8576 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8577 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8578 && (REG_P (operands[1])
8579 || (GET_CODE (operands[1]) == SUBREG
8580 && REG_P (SUBREG_REG (operands[1])))))
8581 {
8582 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
8583 ? SUBREG_REG (operands[1]) : operands[1]);
8584 enum reg_class cl;
8585
8586 if (regno >= FIRST_PSEUDO_REGISTER)
8587 {
8588 cl = reg_preferred_class (regno);
8589 gcc_assert (cl != NO_REGS);
8590 regno = ira_class_hard_regs[cl][0];
8591 }
8592 if (FP_REGNO_P (regno))
8593 {
8594 if (GET_MODE (operands[0]) != DDmode)
8595 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
8596 emit_insn (gen_movsd_store (operands[0], operands[1]));
8597 }
8598 else if (INT_REGNO_P (regno))
8599 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8600 else
8601 gcc_unreachable();
8602 return;
8603 }
8604 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
8605 p:DD)) if p0 is not of floating point class and p1 is spilled as
8606 we can have no analogous movsd_load for this. */
8607 if (lra_in_progress && mode == DDmode
8608 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
8609 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
8610 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8611 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8612 {
8613 enum reg_class cl;
8614 int regno = REGNO (SUBREG_REG (operands[0]));
8615
8616 if (regno >= FIRST_PSEUDO_REGISTER)
8617 {
8618 cl = reg_preferred_class (regno);
8619 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
8620 }
8621 if (regno >= 0 && ! FP_REGNO_P (regno))
8622 {
8623 mode = SDmode;
8624 operands[0] = SUBREG_REG (operands[0]);
8625 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
8626 }
8627 }
8628 if (lra_in_progress
8629 && mode == SDmode
8630 && (REG_P (operands[0])
8631 || (GET_CODE (operands[0]) == SUBREG
8632 && REG_P (SUBREG_REG (operands[0]))))
8633 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8634 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8635 {
8636 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
8637 ? SUBREG_REG (operands[0]) : operands[0]);
8638 enum reg_class cl;
8639
8640 if (regno >= FIRST_PSEUDO_REGISTER)
8641 {
8642 cl = reg_preferred_class (regno);
8643 gcc_assert (cl != NO_REGS);
8644 regno = ira_class_hard_regs[cl][0];
8645 }
8646 if (FP_REGNO_P (regno))
8647 {
8648 if (GET_MODE (operands[1]) != DDmode)
8649 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
8650 emit_insn (gen_movsd_load (operands[0], operands[1]));
8651 }
8652 else if (INT_REGNO_P (regno))
8653 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8654 else
8655 gcc_unreachable();
8656 return;
8657 }
8658
8659 if (reload_in_progress
8660 && mode == SDmode
8661 && cfun->machine->sdmode_stack_slot != NULL_RTX
8662 && MEM_P (operands[0])
8663 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
8664 && REG_P (operands[1]))
8665 {
8666 if (FP_REGNO_P (REGNO (operands[1])))
8667 {
8668 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
8669 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8670 emit_insn (gen_movsd_store (mem, operands[1]));
8671 }
8672 else if (INT_REGNO_P (REGNO (operands[1])))
8673 {
8674 rtx mem = operands[0];
8675 if (BYTES_BIG_ENDIAN)
8676 mem = adjust_address_nv (mem, mode, 4);
8677 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8678 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
8679 }
8680 else
8681 gcc_unreachable();
8682 return;
8683 }
8684 if (reload_in_progress
8685 && mode == SDmode
8686 && REG_P (operands[0])
8687 && MEM_P (operands[1])
8688 && cfun->machine->sdmode_stack_slot != NULL_RTX
8689 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
8690 {
8691 if (FP_REGNO_P (REGNO (operands[0])))
8692 {
8693 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
8694 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8695 emit_insn (gen_movsd_load (operands[0], mem));
8696 }
8697 else if (INT_REGNO_P (REGNO (operands[0])))
8698 {
8699 rtx mem = operands[1];
8700 if (BYTES_BIG_ENDIAN)
8701 mem = adjust_address_nv (mem, mode, 4);
8702 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8703 emit_insn (gen_movsd_hardfloat (operands[0], mem));
8704 }
8705 else
8706 gcc_unreachable();
8707 return;
8708 }
8709
8710 /* FIXME: In the long term, this switch statement should go away
8711 and be replaced by a sequence of tests based on things like
8712 mode == Pmode. */
8713 switch (mode)
8714 {
8715 case HImode:
8716 case QImode:
8717 if (CONSTANT_P (operands[1])
8718 && GET_CODE (operands[1]) != CONST_INT)
8719 operands[1] = force_const_mem (mode, operands[1]);
8720 break;
8721
8722 case TFmode:
8723 case TDmode:
8724 rs6000_eliminate_indexed_memrefs (operands);
8725 /* fall through */
8726
8727 case DFmode:
8728 case DDmode:
8729 case SFmode:
8730 case SDmode:
8731 if (CONSTANT_P (operands[1])
8732 && ! easy_fp_constant (operands[1], mode))
8733 operands[1] = force_const_mem (mode, operands[1]);
8734 break;
8735
8736 case V16QImode:
8737 case V8HImode:
8738 case V4SFmode:
8739 case V4SImode:
8740 case V4HImode:
8741 case V2SFmode:
8742 case V2SImode:
8743 case V1DImode:
8744 case V2DFmode:
8745 case V2DImode:
8746 case V1TImode:
8747 if (CONSTANT_P (operands[1])
8748 && !easy_vector_constant (operands[1], mode))
8749 operands[1] = force_const_mem (mode, operands[1]);
8750 break;
8751
8752 case SImode:
8753 case DImode:
8754 /* Use default pattern for address of ELF small data */
8755 if (TARGET_ELF
8756 && mode == Pmode
8757 && DEFAULT_ABI == ABI_V4
8758 && (GET_CODE (operands[1]) == SYMBOL_REF
8759 || GET_CODE (operands[1]) == CONST)
8760 && small_data_operand (operands[1], mode))
8761 {
8762 emit_insn (gen_rtx_SET (operands[0], operands[1]));
8763 return;
8764 }
8765
8766 if (DEFAULT_ABI == ABI_V4
8767 && mode == Pmode && mode == SImode
8768 && flag_pic == 1 && got_operand (operands[1], mode))
8769 {
8770 emit_insn (gen_movsi_got (operands[0], operands[1]));
8771 return;
8772 }
8773
8774 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
8775 && TARGET_NO_TOC
8776 && ! flag_pic
8777 && mode == Pmode
8778 && CONSTANT_P (operands[1])
8779 && GET_CODE (operands[1]) != HIGH
8780 && GET_CODE (operands[1]) != CONST_INT)
8781 {
8782 rtx target = (!can_create_pseudo_p ()
8783 ? operands[0]
8784 : gen_reg_rtx (mode));
8785
8786 /* If this is a function address on -mcall-aixdesc,
8787 convert it to the address of the descriptor. */
8788 if (DEFAULT_ABI == ABI_AIX
8789 && GET_CODE (operands[1]) == SYMBOL_REF
8790 && XSTR (operands[1], 0)[0] == '.')
8791 {
8792 const char *name = XSTR (operands[1], 0);
8793 rtx new_ref;
8794 while (*name == '.')
8795 name++;
8796 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
8797 CONSTANT_POOL_ADDRESS_P (new_ref)
8798 = CONSTANT_POOL_ADDRESS_P (operands[1]);
8799 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
8800 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
8801 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
8802 operands[1] = new_ref;
8803 }
8804
8805 if (DEFAULT_ABI == ABI_DARWIN)
8806 {
8807 #if TARGET_MACHO
8808 if (MACHO_DYNAMIC_NO_PIC_P)
8809 {
8810 /* Take care of any required data indirection. */
8811 operands[1] = rs6000_machopic_legitimize_pic_address (
8812 operands[1], mode, operands[0]);
8813 if (operands[0] != operands[1])
8814 emit_insn (gen_rtx_SET (operands[0], operands[1]));
8815 return;
8816 }
8817 #endif
8818 emit_insn (gen_macho_high (target, operands[1]));
8819 emit_insn (gen_macho_low (operands[0], target, operands[1]));
8820 return;
8821 }
8822
8823 emit_insn (gen_elf_high (target, operands[1]));
8824 emit_insn (gen_elf_low (operands[0], target, operands[1]));
8825 return;
8826 }
8827
8828 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8829 and we have put it in the TOC, we just need to make a TOC-relative
8830 reference to it. */
8831 if (TARGET_TOC
8832 && GET_CODE (operands[1]) == SYMBOL_REF
8833 && use_toc_relative_ref (operands[1], mode))
8834 operands[1] = create_TOC_reference (operands[1], operands[0]);
8835 else if (mode == Pmode
8836 && CONSTANT_P (operands[1])
8837 && GET_CODE (operands[1]) != HIGH
8838 && ((GET_CODE (operands[1]) != CONST_INT
8839 && ! easy_fp_constant (operands[1], mode))
8840 || (GET_CODE (operands[1]) == CONST_INT
8841 && (num_insns_constant (operands[1], mode)
8842 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
8843 || (GET_CODE (operands[0]) == REG
8844 && FP_REGNO_P (REGNO (operands[0]))))
8845 && !toc_relative_expr_p (operands[1], false)
8846 && (TARGET_CMODEL == CMODEL_SMALL
8847 || can_create_pseudo_p ()
8848 || (REG_P (operands[0])
8849 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
8850 {
8851
8852 #if TARGET_MACHO
8853 /* Darwin uses a special PIC legitimizer. */
8854 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
8855 {
8856 operands[1] =
8857 rs6000_machopic_legitimize_pic_address (operands[1], mode,
8858 operands[0]);
8859 if (operands[0] != operands[1])
8860 emit_insn (gen_rtx_SET (operands[0], operands[1]));
8861 return;
8862 }
8863 #endif
8864
8865 /* If we are to limit the number of things we put in the TOC and
8866 this is a symbol plus a constant we can add in one insn,
8867 just put the symbol in the TOC and add the constant. Don't do
8868 this if reload is in progress. */
8869 if (GET_CODE (operands[1]) == CONST
8870 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
8871 && GET_CODE (XEXP (operands[1], 0)) == PLUS
8872 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
8873 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
8874 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
8875 && ! side_effects_p (operands[0]))
8876 {
8877 rtx sym =
8878 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
8879 rtx other = XEXP (XEXP (operands[1], 0), 1);
8880
8881 sym = force_reg (mode, sym);
8882 emit_insn (gen_add3_insn (operands[0], sym, other));
8883 return;
8884 }
8885
8886 operands[1] = force_const_mem (mode, operands[1]);
8887
8888 if (TARGET_TOC
8889 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
8890 && constant_pool_expr_p (XEXP (operands[1], 0))
8891 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8892 get_pool_constant (XEXP (operands[1], 0)),
8893 get_pool_mode (XEXP (operands[1], 0))))
8894 {
8895 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
8896 operands[0]);
8897 operands[1] = gen_const_mem (mode, tocref);
8898 set_mem_alias_set (operands[1], get_TOC_alias_set ());
8899 }
8900 }
8901 break;
8902
8903 case TImode:
8904 if (!VECTOR_MEM_VSX_P (TImode))
8905 rs6000_eliminate_indexed_memrefs (operands);
8906 break;
8907
8908 case PTImode:
8909 rs6000_eliminate_indexed_memrefs (operands);
8910 break;
8911
8912 default:
8913 fatal_insn ("bad move", gen_rtx_SET (dest, source));
8914 }
8915
8916 /* Above, we may have called force_const_mem which may have returned
8917 an invalid address. If we can, fix this up; otherwise, reload will
8918 have to deal with it. */
8919 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
8920 operands[1] = validize_mem (operands[1]);
8921
8922 emit_set:
8923 emit_insn (gen_rtx_SET (operands[0], operands[1]));
8924 }
8925
8926 /* Return true if a structure, union or array containing FIELD should be
8927 accessed using `BLKMODE'.
8928
8929 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8930 entire thing in a DI and use subregs to access the internals.
8931 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8932 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8933 best thing to do is set structs to BLKmode and avoid Severe Tire
8934 Damage.
8935
8936 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8937 fit into 1, whereas DI still needs two. */
8938
8939 static bool
8940 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
8941 {
8942 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
8943 || (TARGET_E500_DOUBLE && mode == DFmode));
8944 }
8945 \f
8946 /* Nonzero if we can use a floating-point register to pass this arg. */
8947 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8948 (SCALAR_FLOAT_MODE_P (MODE) \
8949 && (CUM)->fregno <= FP_ARG_MAX_REG \
8950 && TARGET_HARD_FLOAT && TARGET_FPRS)
8951
8952 /* Nonzero if we can use an AltiVec register to pass this arg. */
8953 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8954 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8955 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8956 && TARGET_ALTIVEC_ABI \
8957 && (NAMED))
8958
8959 /* Walk down the type tree of TYPE counting consecutive base elements.
8960 If *MODEP is VOIDmode, then set it to the first valid floating point
8961 or vector type. If a non-floating point or vector type is found, or
8962 if a floating point or vector type that doesn't match a non-VOIDmode
8963 *MODEP is found, then return -1, otherwise return the count in the
8964 sub-tree. */
8965
8966 static int
8967 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
8968 {
8969 machine_mode mode;
8970 HOST_WIDE_INT size;
8971
8972 switch (TREE_CODE (type))
8973 {
8974 case REAL_TYPE:
8975 mode = TYPE_MODE (type);
8976 if (!SCALAR_FLOAT_MODE_P (mode))
8977 return -1;
8978
8979 if (*modep == VOIDmode)
8980 *modep = mode;
8981
8982 if (*modep == mode)
8983 return 1;
8984
8985 break;
8986
8987 case COMPLEX_TYPE:
8988 mode = TYPE_MODE (TREE_TYPE (type));
8989 if (!SCALAR_FLOAT_MODE_P (mode))
8990 return -1;
8991
8992 if (*modep == VOIDmode)
8993 *modep = mode;
8994
8995 if (*modep == mode)
8996 return 2;
8997
8998 break;
8999
9000 case VECTOR_TYPE:
9001 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
9002 return -1;
9003
9004 /* Use V4SImode as representative of all 128-bit vector types. */
9005 size = int_size_in_bytes (type);
9006 switch (size)
9007 {
9008 case 16:
9009 mode = V4SImode;
9010 break;
9011 default:
9012 return -1;
9013 }
9014
9015 if (*modep == VOIDmode)
9016 *modep = mode;
9017
9018 /* Vector modes are considered to be opaque: two vectors are
9019 equivalent for the purposes of being homogeneous aggregates
9020 if they are the same size. */
9021 if (*modep == mode)
9022 return 1;
9023
9024 break;
9025
9026 case ARRAY_TYPE:
9027 {
9028 int count;
9029 tree index = TYPE_DOMAIN (type);
9030
9031 /* Can't handle incomplete types nor sizes that are not
9032 fixed. */
9033 if (!COMPLETE_TYPE_P (type)
9034 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9035 return -1;
9036
9037 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
9038 if (count == -1
9039 || !index
9040 || !TYPE_MAX_VALUE (index)
9041 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
9042 || !TYPE_MIN_VALUE (index)
9043 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
9044 || count < 0)
9045 return -1;
9046
9047 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
9048 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
9049
9050 /* There must be no padding. */
9051 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
9052 return -1;
9053
9054 return count;
9055 }
9056
9057 case RECORD_TYPE:
9058 {
9059 int count = 0;
9060 int sub_count;
9061 tree field;
9062
9063 /* Can't handle incomplete types nor sizes that are not
9064 fixed. */
9065 if (!COMPLETE_TYPE_P (type)
9066 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9067 return -1;
9068
9069 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
9070 {
9071 if (TREE_CODE (field) != FIELD_DECL)
9072 continue;
9073
9074 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
9075 if (sub_count < 0)
9076 return -1;
9077 count += sub_count;
9078 }
9079
9080 /* There must be no padding. */
9081 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
9082 return -1;
9083
9084 return count;
9085 }
9086
9087 case UNION_TYPE:
9088 case QUAL_UNION_TYPE:
9089 {
9090 /* These aren't very interesting except in a degenerate case. */
9091 int count = 0;
9092 int sub_count;
9093 tree field;
9094
9095 /* Can't handle incomplete types nor sizes that are not
9096 fixed. */
9097 if (!COMPLETE_TYPE_P (type)
9098 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9099 return -1;
9100
9101 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
9102 {
9103 if (TREE_CODE (field) != FIELD_DECL)
9104 continue;
9105
9106 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
9107 if (sub_count < 0)
9108 return -1;
9109 count = count > sub_count ? count : sub_count;
9110 }
9111
9112 /* There must be no padding. */
9113 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
9114 return -1;
9115
9116 return count;
9117 }
9118
9119 default:
9120 break;
9121 }
9122
9123 return -1;
9124 }
9125
9126 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
9127 float or vector aggregate that shall be passed in FP/vector registers
9128 according to the ELFv2 ABI, return the homogeneous element mode in
9129 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
9130
9131 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
9132
9133 static bool
9134 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
9135 machine_mode *elt_mode,
9136 int *n_elts)
9137 {
9138 /* Note that we do not accept complex types at the top level as
9139 homogeneous aggregates; these types are handled via the
9140 targetm.calls.split_complex_arg mechanism. Complex types
9141 can be elements of homogeneous aggregates, however. */
9142 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
9143 {
9144 machine_mode field_mode = VOIDmode;
9145 int field_count = rs6000_aggregate_candidate (type, &field_mode);
9146
9147 if (field_count > 0)
9148 {
9149 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode)?
9150 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
9151
9152 /* The ELFv2 ABI allows homogeneous aggregates to occupy
9153 up to AGGR_ARG_NUM_REG registers. */
9154 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
9155 {
9156 if (elt_mode)
9157 *elt_mode = field_mode;
9158 if (n_elts)
9159 *n_elts = field_count;
9160 return true;
9161 }
9162 }
9163 }
9164
9165 if (elt_mode)
9166 *elt_mode = mode;
9167 if (n_elts)
9168 *n_elts = 1;
9169 return false;
9170 }
9171
9172 /* Return a nonzero value to say to return the function value in
9173 memory, just as large structures are always returned. TYPE will be
9174 the data type of the value, and FNTYPE will be the type of the
9175 function doing the returning, or @code{NULL} for libcalls.
9176
9177 The AIX ABI for the RS/6000 specifies that all structures are
9178 returned in memory. The Darwin ABI does the same.
9179
9180 For the Darwin 64 Bit ABI, a function result can be returned in
9181 registers or in memory, depending on the size of the return data
9182 type. If it is returned in registers, the value occupies the same
9183 registers as it would if it were the first and only function
9184 argument. Otherwise, the function places its result in memory at
9185 the location pointed to by GPR3.
9186
9187 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
9188 but a draft put them in memory, and GCC used to implement the draft
9189 instead of the final standard. Therefore, aix_struct_return
9190 controls this instead of DEFAULT_ABI; V.4 targets needing backward
9191 compatibility can change DRAFT_V4_STRUCT_RET to override the
9192 default, and -m switches get the final word. See
9193 rs6000_option_override_internal for more details.
9194
9195 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
9196 long double support is enabled. These values are returned in memory.
9197
9198 int_size_in_bytes returns -1 for variable size objects, which go in
9199 memory always. The cast to unsigned makes -1 > 8. */
9200
9201 static bool
9202 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9203 {
9204 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
9205 if (TARGET_MACHO
9206 && rs6000_darwin64_abi
9207 && TREE_CODE (type) == RECORD_TYPE
9208 && int_size_in_bytes (type) > 0)
9209 {
9210 CUMULATIVE_ARGS valcum;
9211 rtx valret;
9212
9213 valcum.words = 0;
9214 valcum.fregno = FP_ARG_MIN_REG;
9215 valcum.vregno = ALTIVEC_ARG_MIN_REG;
9216 /* Do a trial code generation as if this were going to be passed
9217 as an argument; if any part goes in memory, we return NULL. */
9218 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
9219 if (valret)
9220 return false;
9221 /* Otherwise fall through to more conventional ABI rules. */
9222 }
9223
9224 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
9225 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
9226 NULL, NULL))
9227 return false;
9228
9229 /* The ELFv2 ABI returns aggregates up to 16B in registers */
9230 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
9231 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
9232 return false;
9233
9234 if (AGGREGATE_TYPE_P (type)
9235 && (aix_struct_return
9236 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
9237 return true;
9238
9239 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
9240 modes only exist for GCC vector types if -maltivec. */
9241 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
9242 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9243 return false;
9244
9245 /* Return synthetic vectors in memory. */
9246 if (TREE_CODE (type) == VECTOR_TYPE
9247 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
9248 {
9249 static bool warned_for_return_big_vectors = false;
9250 if (!warned_for_return_big_vectors)
9251 {
9252 warning (0, "GCC vector returned by reference: "
9253 "non-standard ABI extension with no compatibility guarantee");
9254 warned_for_return_big_vectors = true;
9255 }
9256 return true;
9257 }
9258
9259 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
9260 return true;
9261
9262 return false;
9263 }
9264
9265 /* Specify whether values returned in registers should be at the most
9266 significant end of a register. We want aggregates returned by
9267 value to match the way aggregates are passed to functions. */
9268
9269 static bool
9270 rs6000_return_in_msb (const_tree valtype)
9271 {
9272 return (DEFAULT_ABI == ABI_ELFv2
9273 && BYTES_BIG_ENDIAN
9274 && AGGREGATE_TYPE_P (valtype)
9275 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
9276 }
9277
9278 #ifdef HAVE_AS_GNU_ATTRIBUTE
9279 /* Return TRUE if a call to function FNDECL may be one that
9280 potentially affects the function calling ABI of the object file. */
9281
9282 static bool
9283 call_ABI_of_interest (tree fndecl)
9284 {
9285 if (symtab->state == EXPANSION)
9286 {
9287 struct cgraph_node *c_node;
9288
9289 /* Libcalls are always interesting. */
9290 if (fndecl == NULL_TREE)
9291 return true;
9292
9293 /* Any call to an external function is interesting. */
9294 if (DECL_EXTERNAL (fndecl))
9295 return true;
9296
9297 /* Interesting functions that we are emitting in this object file. */
9298 c_node = cgraph_node::get (fndecl);
9299 c_node = c_node->ultimate_alias_target ();
9300 return !c_node->only_called_directly_p ();
9301 }
9302 return false;
9303 }
9304 #endif
9305
9306 /* Initialize a variable CUM of type CUMULATIVE_ARGS
9307 for a call to a function whose data type is FNTYPE.
9308 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
9309
9310 For incoming args we set the number of arguments in the prototype large
9311 so we never return a PARALLEL. */
9312
9313 void
9314 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
9315 rtx libname ATTRIBUTE_UNUSED, int incoming,
9316 int libcall, int n_named_args,
9317 tree fndecl ATTRIBUTE_UNUSED,
9318 machine_mode return_mode ATTRIBUTE_UNUSED)
9319 {
9320 static CUMULATIVE_ARGS zero_cumulative;
9321
9322 *cum = zero_cumulative;
9323 cum->words = 0;
9324 cum->fregno = FP_ARG_MIN_REG;
9325 cum->vregno = ALTIVEC_ARG_MIN_REG;
9326 cum->prototype = (fntype && prototype_p (fntype));
9327 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
9328 ? CALL_LIBCALL : CALL_NORMAL);
9329 cum->sysv_gregno = GP_ARG_MIN_REG;
9330 cum->stdarg = stdarg_p (fntype);
9331
9332 cum->nargs_prototype = 0;
9333 if (incoming || cum->prototype)
9334 cum->nargs_prototype = n_named_args;
9335
9336 /* Check for a longcall attribute. */
9337 if ((!fntype && rs6000_default_long_calls)
9338 || (fntype
9339 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
9340 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
9341 cum->call_cookie |= CALL_LONG;
9342
9343 if (TARGET_DEBUG_ARG)
9344 {
9345 fprintf (stderr, "\ninit_cumulative_args:");
9346 if (fntype)
9347 {
9348 tree ret_type = TREE_TYPE (fntype);
9349 fprintf (stderr, " ret code = %s,",
9350 get_tree_code_name (TREE_CODE (ret_type)));
9351 }
9352
9353 if (cum->call_cookie & CALL_LONG)
9354 fprintf (stderr, " longcall,");
9355
9356 fprintf (stderr, " proto = %d, nargs = %d\n",
9357 cum->prototype, cum->nargs_prototype);
9358 }
9359
9360 #ifdef HAVE_AS_GNU_ATTRIBUTE
9361 if (DEFAULT_ABI == ABI_V4)
9362 {
9363 cum->escapes = call_ABI_of_interest (fndecl);
9364 if (cum->escapes)
9365 {
9366 tree return_type;
9367
9368 if (fntype)
9369 {
9370 return_type = TREE_TYPE (fntype);
9371 return_mode = TYPE_MODE (return_type);
9372 }
9373 else
9374 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
9375
9376 if (return_type != NULL)
9377 {
9378 if (TREE_CODE (return_type) == RECORD_TYPE
9379 && TYPE_TRANSPARENT_AGGR (return_type))
9380 {
9381 return_type = TREE_TYPE (first_field (return_type));
9382 return_mode = TYPE_MODE (return_type);
9383 }
9384 if (AGGREGATE_TYPE_P (return_type)
9385 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
9386 <= 8))
9387 rs6000_returns_struct = true;
9388 }
9389 if (SCALAR_FLOAT_MODE_P (return_mode))
9390 rs6000_passes_float = true;
9391 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
9392 || SPE_VECTOR_MODE (return_mode))
9393 rs6000_passes_vector = true;
9394 }
9395 }
9396 #endif
9397
9398 if (fntype
9399 && !TARGET_ALTIVEC
9400 && TARGET_ALTIVEC_ABI
9401 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
9402 {
9403 error ("cannot return value in vector register because"
9404 " altivec instructions are disabled, use -maltivec"
9405 " to enable them");
9406 }
9407 }
9408 \f
9409 /* The mode the ABI uses for a word. This is not the same as word_mode
9410 for -m32 -mpowerpc64. This is used to implement various target hooks. */
9411
9412 static machine_mode
9413 rs6000_abi_word_mode (void)
9414 {
9415 return TARGET_32BIT ? SImode : DImode;
9416 }
9417
9418 /* On rs6000, function arguments are promoted, as are function return
9419 values. */
9420
9421 static machine_mode
9422 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9423 machine_mode mode,
9424 int *punsignedp ATTRIBUTE_UNUSED,
9425 const_tree, int)
9426 {
9427 PROMOTE_MODE (mode, *punsignedp, type);
9428
9429 return mode;
9430 }
9431
9432 /* Return true if TYPE must be passed on the stack and not in registers. */
9433
9434 static bool
9435 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
9436 {
9437 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
9438 return must_pass_in_stack_var_size (mode, type);
9439 else
9440 return must_pass_in_stack_var_size_or_pad (mode, type);
9441 }
9442
9443 /* If defined, a C expression which determines whether, and in which
9444 direction, to pad out an argument with extra space. The value
9445 should be of type `enum direction': either `upward' to pad above
9446 the argument, `downward' to pad below, or `none' to inhibit
9447 padding.
9448
9449 For the AIX ABI structs are always stored left shifted in their
9450 argument slot. */
9451
9452 enum direction
9453 function_arg_padding (machine_mode mode, const_tree type)
9454 {
9455 #ifndef AGGREGATE_PADDING_FIXED
9456 #define AGGREGATE_PADDING_FIXED 0
9457 #endif
9458 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
9459 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
9460 #endif
9461
9462 if (!AGGREGATE_PADDING_FIXED)
9463 {
9464 /* GCC used to pass structures of the same size as integer types as
9465 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
9466 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
9467 passed padded downward, except that -mstrict-align further
9468 muddied the water in that multi-component structures of 2 and 4
9469 bytes in size were passed padded upward.
9470
9471 The following arranges for best compatibility with previous
9472 versions of gcc, but removes the -mstrict-align dependency. */
9473 if (BYTES_BIG_ENDIAN)
9474 {
9475 HOST_WIDE_INT size = 0;
9476
9477 if (mode == BLKmode)
9478 {
9479 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
9480 size = int_size_in_bytes (type);
9481 }
9482 else
9483 size = GET_MODE_SIZE (mode);
9484
9485 if (size == 1 || size == 2 || size == 4)
9486 return downward;
9487 }
9488 return upward;
9489 }
9490
9491 if (AGGREGATES_PAD_UPWARD_ALWAYS)
9492 {
9493 if (type != 0 && AGGREGATE_TYPE_P (type))
9494 return upward;
9495 }
9496
9497 /* Fall back to the default. */
9498 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9499 }
9500
9501 /* If defined, a C expression that gives the alignment boundary, in bits,
9502 of an argument with the specified mode and type. If it is not defined,
9503 PARM_BOUNDARY is used for all arguments.
9504
9505 V.4 wants long longs and doubles to be double word aligned. Just
9506 testing the mode size is a boneheaded way to do this as it means
9507 that other types such as complex int are also double word aligned.
9508 However, we're stuck with this because changing the ABI might break
9509 existing library interfaces.
9510
9511 Doubleword align SPE vectors.
9512 Quadword align Altivec/VSX vectors.
9513 Quadword align large synthetic vector types. */
9514
9515 static unsigned int
9516 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
9517 {
9518 machine_mode elt_mode;
9519 int n_elts;
9520
9521 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9522
9523 if (DEFAULT_ABI == ABI_V4
9524 && (GET_MODE_SIZE (mode) == 8
9525 || (TARGET_HARD_FLOAT
9526 && TARGET_FPRS
9527 && (mode == TFmode || mode == TDmode))))
9528 return 64;
9529 else if (SPE_VECTOR_MODE (mode)
9530 || (type && TREE_CODE (type) == VECTOR_TYPE
9531 && int_size_in_bytes (type) >= 8
9532 && int_size_in_bytes (type) < 16))
9533 return 64;
9534 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9535 || (type && TREE_CODE (type) == VECTOR_TYPE
9536 && int_size_in_bytes (type) >= 16))
9537 return 128;
9538
9539 /* Aggregate types that need > 8 byte alignment are quadword-aligned
9540 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
9541 -mcompat-align-parm is used. */
9542 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
9543 || DEFAULT_ABI == ABI_ELFv2)
9544 && type && TYPE_ALIGN (type) > 64)
9545 {
9546 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
9547 or homogeneous float/vector aggregates here. We already handled
9548 vector aggregates above, but still need to check for float here. */
9549 bool aggregate_p = (AGGREGATE_TYPE_P (type)
9550 && !SCALAR_FLOAT_MODE_P (elt_mode));
9551
9552 /* We used to check for BLKmode instead of the above aggregate type
9553 check. Warn when this results in any difference to the ABI. */
9554 if (aggregate_p != (mode == BLKmode))
9555 {
9556 static bool warned;
9557 if (!warned && warn_psabi)
9558 {
9559 warned = true;
9560 inform (input_location,
9561 "the ABI of passing aggregates with %d-byte alignment"
9562 " has changed in GCC 5",
9563 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
9564 }
9565 }
9566
9567 if (aggregate_p)
9568 return 128;
9569 }
9570
9571 /* Similar for the Darwin64 ABI. Note that for historical reasons we
9572 implement the "aggregate type" check as a BLKmode check here; this
9573 means certain aggregate types are in fact not aligned. */
9574 if (TARGET_MACHO && rs6000_darwin64_abi
9575 && mode == BLKmode
9576 && type && TYPE_ALIGN (type) > 64)
9577 return 128;
9578
9579 return PARM_BOUNDARY;
9580 }
9581
9582 /* The offset in words to the start of the parameter save area. */
9583
9584 static unsigned int
9585 rs6000_parm_offset (void)
9586 {
9587 return (DEFAULT_ABI == ABI_V4 ? 2
9588 : DEFAULT_ABI == ABI_ELFv2 ? 4
9589 : 6);
9590 }
9591
9592 /* For a function parm of MODE and TYPE, return the starting word in
9593 the parameter area. NWORDS of the parameter area are already used. */
9594
9595 static unsigned int
9596 rs6000_parm_start (machine_mode mode, const_tree type,
9597 unsigned int nwords)
9598 {
9599 unsigned int align;
9600
9601 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
9602 return nwords + (-(rs6000_parm_offset () + nwords) & align);
9603 }
9604
9605 /* Compute the size (in words) of a function argument. */
9606
9607 static unsigned long
9608 rs6000_arg_size (machine_mode mode, const_tree type)
9609 {
9610 unsigned long size;
9611
9612 if (mode != BLKmode)
9613 size = GET_MODE_SIZE (mode);
9614 else
9615 size = int_size_in_bytes (type);
9616
9617 if (TARGET_32BIT)
9618 return (size + 3) >> 2;
9619 else
9620 return (size + 7) >> 3;
9621 }
9622 \f
9623 /* Use this to flush pending int fields. */
9624
9625 static void
9626 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
9627 HOST_WIDE_INT bitpos, int final)
9628 {
9629 unsigned int startbit, endbit;
9630 int intregs, intoffset;
9631 machine_mode mode;
9632
9633 /* Handle the situations where a float is taking up the first half
9634 of the GPR, and the other half is empty (typically due to
9635 alignment restrictions). We can detect this by a 8-byte-aligned
9636 int field, or by seeing that this is the final flush for this
9637 argument. Count the word and continue on. */
9638 if (cum->floats_in_gpr == 1
9639 && (cum->intoffset % 64 == 0
9640 || (cum->intoffset == -1 && final)))
9641 {
9642 cum->words++;
9643 cum->floats_in_gpr = 0;
9644 }
9645
9646 if (cum->intoffset == -1)
9647 return;
9648
9649 intoffset = cum->intoffset;
9650 cum->intoffset = -1;
9651 cum->floats_in_gpr = 0;
9652
9653 if (intoffset % BITS_PER_WORD != 0)
9654 {
9655 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9656 MODE_INT, 0);
9657 if (mode == BLKmode)
9658 {
9659 /* We couldn't find an appropriate mode, which happens,
9660 e.g., in packed structs when there are 3 bytes to load.
9661 Back intoffset back to the beginning of the word in this
9662 case. */
9663 intoffset = intoffset & -BITS_PER_WORD;
9664 }
9665 }
9666
9667 startbit = intoffset & -BITS_PER_WORD;
9668 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9669 intregs = (endbit - startbit) / BITS_PER_WORD;
9670 cum->words += intregs;
9671 /* words should be unsigned. */
9672 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
9673 {
9674 int pad = (endbit/BITS_PER_WORD) - cum->words;
9675 cum->words += pad;
9676 }
9677 }
9678
9679 /* The darwin64 ABI calls for us to recurse down through structs,
9680 looking for elements passed in registers. Unfortunately, we have
9681 to track int register count here also because of misalignments
9682 in powerpc alignment mode. */
9683
9684 static void
9685 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
9686 const_tree type,
9687 HOST_WIDE_INT startbitpos)
9688 {
9689 tree f;
9690
9691 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9692 if (TREE_CODE (f) == FIELD_DECL)
9693 {
9694 HOST_WIDE_INT bitpos = startbitpos;
9695 tree ftype = TREE_TYPE (f);
9696 machine_mode mode;
9697 if (ftype == error_mark_node)
9698 continue;
9699 mode = TYPE_MODE (ftype);
9700
9701 if (DECL_SIZE (f) != 0
9702 && tree_fits_uhwi_p (bit_position (f)))
9703 bitpos += int_bit_position (f);
9704
9705 /* ??? FIXME: else assume zero offset. */
9706
9707 if (TREE_CODE (ftype) == RECORD_TYPE)
9708 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
9709 else if (USE_FP_FOR_ARG_P (cum, mode))
9710 {
9711 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
9712 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9713 cum->fregno += n_fpregs;
9714 /* Single-precision floats present a special problem for
9715 us, because they are smaller than an 8-byte GPR, and so
9716 the structure-packing rules combined with the standard
9717 varargs behavior mean that we want to pack float/float
9718 and float/int combinations into a single register's
9719 space. This is complicated by the arg advance flushing,
9720 which works on arbitrarily large groups of int-type
9721 fields. */
9722 if (mode == SFmode)
9723 {
9724 if (cum->floats_in_gpr == 1)
9725 {
9726 /* Two floats in a word; count the word and reset
9727 the float count. */
9728 cum->words++;
9729 cum->floats_in_gpr = 0;
9730 }
9731 else if (bitpos % 64 == 0)
9732 {
9733 /* A float at the beginning of an 8-byte word;
9734 count it and put off adjusting cum->words until
9735 we see if a arg advance flush is going to do it
9736 for us. */
9737 cum->floats_in_gpr++;
9738 }
9739 else
9740 {
9741 /* The float is at the end of a word, preceded
9742 by integer fields, so the arg advance flush
9743 just above has already set cum->words and
9744 everything is taken care of. */
9745 }
9746 }
9747 else
9748 cum->words += n_fpregs;
9749 }
9750 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9751 {
9752 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9753 cum->vregno++;
9754 cum->words += 2;
9755 }
9756 else if (cum->intoffset == -1)
9757 cum->intoffset = bitpos;
9758 }
9759 }
9760
9761 /* Check for an item that needs to be considered specially under the darwin 64
9762 bit ABI. These are record types where the mode is BLK or the structure is
9763 8 bytes in size. */
9764 static int
9765 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
9766 {
9767 return rs6000_darwin64_abi
9768 && ((mode == BLKmode
9769 && TREE_CODE (type) == RECORD_TYPE
9770 && int_size_in_bytes (type) > 0)
9771 || (type && TREE_CODE (type) == RECORD_TYPE
9772 && int_size_in_bytes (type) == 8)) ? 1 : 0;
9773 }
9774
9775 /* Update the data in CUM to advance over an argument
9776 of mode MODE and data type TYPE.
9777 (TYPE is null for libcalls where that information may not be available.)
9778
9779 Note that for args passed by reference, function_arg will be called
9780 with MODE and TYPE set to that of the pointer to the arg, not the arg
9781 itself. */
9782
9783 static void
9784 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
9785 const_tree type, bool named, int depth)
9786 {
9787 machine_mode elt_mode;
9788 int n_elts;
9789
9790 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9791
9792 /* Only tick off an argument if we're not recursing. */
9793 if (depth == 0)
9794 cum->nargs_prototype--;
9795
9796 #ifdef HAVE_AS_GNU_ATTRIBUTE
9797 if (DEFAULT_ABI == ABI_V4
9798 && cum->escapes)
9799 {
9800 if (SCALAR_FLOAT_MODE_P (mode))
9801 rs6000_passes_float = true;
9802 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
9803 rs6000_passes_vector = true;
9804 else if (SPE_VECTOR_MODE (mode)
9805 && !cum->stdarg
9806 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9807 rs6000_passes_vector = true;
9808 }
9809 #endif
9810
9811 if (TARGET_ALTIVEC_ABI
9812 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9813 || (type && TREE_CODE (type) == VECTOR_TYPE
9814 && int_size_in_bytes (type) == 16)))
9815 {
9816 bool stack = false;
9817
9818 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
9819 {
9820 cum->vregno += n_elts;
9821
9822 if (!TARGET_ALTIVEC)
9823 error ("cannot pass argument in vector register because"
9824 " altivec instructions are disabled, use -maltivec"
9825 " to enable them");
9826
9827 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9828 even if it is going to be passed in a vector register.
9829 Darwin does the same for variable-argument functions. */
9830 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9831 && TARGET_64BIT)
9832 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
9833 stack = true;
9834 }
9835 else
9836 stack = true;
9837
9838 if (stack)
9839 {
9840 int align;
9841
9842 /* Vector parameters must be 16-byte aligned. In 32-bit
9843 mode this means we need to take into account the offset
9844 to the parameter save area. In 64-bit mode, they just
9845 have to start on an even word, since the parameter save
9846 area is 16-byte aligned. */
9847 if (TARGET_32BIT)
9848 align = -(rs6000_parm_offset () + cum->words) & 3;
9849 else
9850 align = cum->words & 1;
9851 cum->words += align + rs6000_arg_size (mode, type);
9852
9853 if (TARGET_DEBUG_ARG)
9854 {
9855 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
9856 cum->words, align);
9857 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
9858 cum->nargs_prototype, cum->prototype,
9859 GET_MODE_NAME (mode));
9860 }
9861 }
9862 }
9863 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
9864 && !cum->stdarg
9865 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9866 cum->sysv_gregno++;
9867
9868 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9869 {
9870 int size = int_size_in_bytes (type);
9871 /* Variable sized types have size == -1 and are
9872 treated as if consisting entirely of ints.
9873 Pad to 16 byte boundary if needed. */
9874 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9875 && (cum->words % 2) != 0)
9876 cum->words++;
9877 /* For varargs, we can just go up by the size of the struct. */
9878 if (!named)
9879 cum->words += (size + 7) / 8;
9880 else
9881 {
9882 /* It is tempting to say int register count just goes up by
9883 sizeof(type)/8, but this is wrong in a case such as
9884 { int; double; int; } [powerpc alignment]. We have to
9885 grovel through the fields for these too. */
9886 cum->intoffset = 0;
9887 cum->floats_in_gpr = 0;
9888 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
9889 rs6000_darwin64_record_arg_advance_flush (cum,
9890 size * BITS_PER_UNIT, 1);
9891 }
9892 if (TARGET_DEBUG_ARG)
9893 {
9894 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
9895 cum->words, TYPE_ALIGN (type), size);
9896 fprintf (stderr,
9897 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9898 cum->nargs_prototype, cum->prototype,
9899 GET_MODE_NAME (mode));
9900 }
9901 }
9902 else if (DEFAULT_ABI == ABI_V4)
9903 {
9904 if (TARGET_HARD_FLOAT && TARGET_FPRS
9905 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9906 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9907 || (mode == TFmode && !TARGET_IEEEQUAD)
9908 || mode == SDmode || mode == DDmode || mode == TDmode))
9909 {
9910 /* _Decimal128 must use an even/odd register pair. This assumes
9911 that the register number is odd when fregno is odd. */
9912 if (mode == TDmode && (cum->fregno % 2) == 1)
9913 cum->fregno++;
9914
9915 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9916 <= FP_ARG_V4_MAX_REG)
9917 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
9918 else
9919 {
9920 cum->fregno = FP_ARG_V4_MAX_REG + 1;
9921 if (mode == DFmode || mode == TFmode
9922 || mode == DDmode || mode == TDmode)
9923 cum->words += cum->words & 1;
9924 cum->words += rs6000_arg_size (mode, type);
9925 }
9926 }
9927 else
9928 {
9929 int n_words = rs6000_arg_size (mode, type);
9930 int gregno = cum->sysv_gregno;
9931
9932 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9933 (r7,r8) or (r9,r10). As does any other 2 word item such
9934 as complex int due to a historical mistake. */
9935 if (n_words == 2)
9936 gregno += (1 - gregno) & 1;
9937
9938 /* Multi-reg args are not split between registers and stack. */
9939 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9940 {
9941 /* Long long and SPE vectors are aligned on the stack.
9942 So are other 2 word items such as complex int due to
9943 a historical mistake. */
9944 if (n_words == 2)
9945 cum->words += cum->words & 1;
9946 cum->words += n_words;
9947 }
9948
9949 /* Note: continuing to accumulate gregno past when we've started
9950 spilling to the stack indicates the fact that we've started
9951 spilling to the stack to expand_builtin_saveregs. */
9952 cum->sysv_gregno = gregno + n_words;
9953 }
9954
9955 if (TARGET_DEBUG_ARG)
9956 {
9957 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9958 cum->words, cum->fregno);
9959 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
9960 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
9961 fprintf (stderr, "mode = %4s, named = %d\n",
9962 GET_MODE_NAME (mode), named);
9963 }
9964 }
9965 else
9966 {
9967 int n_words = rs6000_arg_size (mode, type);
9968 int start_words = cum->words;
9969 int align_words = rs6000_parm_start (mode, type, start_words);
9970
9971 cum->words = align_words + n_words;
9972
9973 if (SCALAR_FLOAT_MODE_P (elt_mode)
9974 && TARGET_HARD_FLOAT && TARGET_FPRS)
9975 {
9976 /* _Decimal128 must be passed in an even/odd float register pair.
9977 This assumes that the register number is odd when fregno is
9978 odd. */
9979 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
9980 cum->fregno++;
9981 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
9982 }
9983
9984 if (TARGET_DEBUG_ARG)
9985 {
9986 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9987 cum->words, cum->fregno);
9988 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
9989 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
9990 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
9991 named, align_words - start_words, depth);
9992 }
9993 }
9994 }
9995
9996 static void
9997 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
9998 const_tree type, bool named)
9999 {
10000 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
10001 0);
10002 }
10003
10004 static rtx
10005 spe_build_register_parallel (machine_mode mode, int gregno)
10006 {
10007 rtx r1, r3, r5, r7;
10008
10009 switch (mode)
10010 {
10011 case DFmode:
10012 r1 = gen_rtx_REG (DImode, gregno);
10013 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
10014 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
10015
10016 case DCmode:
10017 case TFmode:
10018 r1 = gen_rtx_REG (DImode, gregno);
10019 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
10020 r3 = gen_rtx_REG (DImode, gregno + 2);
10021 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
10022 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
10023
10024 case TCmode:
10025 r1 = gen_rtx_REG (DImode, gregno);
10026 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
10027 r3 = gen_rtx_REG (DImode, gregno + 2);
10028 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
10029 r5 = gen_rtx_REG (DImode, gregno + 4);
10030 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
10031 r7 = gen_rtx_REG (DImode, gregno + 6);
10032 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
10033 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
10034
10035 default:
10036 gcc_unreachable ();
10037 }
10038 }
10039
10040 /* Determine where to put a SIMD argument on the SPE. */
10041 static rtx
10042 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
10043 const_tree type)
10044 {
10045 int gregno = cum->sysv_gregno;
10046
10047 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
10048 are passed and returned in a pair of GPRs for ABI compatibility. */
10049 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
10050 || mode == DCmode || mode == TCmode))
10051 {
10052 int n_words = rs6000_arg_size (mode, type);
10053
10054 /* Doubles go in an odd/even register pair (r5/r6, etc). */
10055 if (mode == DFmode)
10056 gregno += (1 - gregno) & 1;
10057
10058 /* Multi-reg args are not split between registers and stack. */
10059 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10060 return NULL_RTX;
10061
10062 return spe_build_register_parallel (mode, gregno);
10063 }
10064 if (cum->stdarg)
10065 {
10066 int n_words = rs6000_arg_size (mode, type);
10067
10068 /* SPE vectors are put in odd registers. */
10069 if (n_words == 2 && (gregno & 1) == 0)
10070 gregno += 1;
10071
10072 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
10073 {
10074 rtx r1, r2;
10075 machine_mode m = SImode;
10076
10077 r1 = gen_rtx_REG (m, gregno);
10078 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
10079 r2 = gen_rtx_REG (m, gregno + 1);
10080 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
10081 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
10082 }
10083 else
10084 return NULL_RTX;
10085 }
10086 else
10087 {
10088 if (gregno <= GP_ARG_MAX_REG)
10089 return gen_rtx_REG (mode, gregno);
10090 else
10091 return NULL_RTX;
10092 }
10093 }
10094
10095 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
10096 structure between cum->intoffset and bitpos to integer registers. */
10097
10098 static void
10099 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
10100 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
10101 {
10102 machine_mode mode;
10103 unsigned int regno;
10104 unsigned int startbit, endbit;
10105 int this_regno, intregs, intoffset;
10106 rtx reg;
10107
10108 if (cum->intoffset == -1)
10109 return;
10110
10111 intoffset = cum->intoffset;
10112 cum->intoffset = -1;
10113
10114 /* If this is the trailing part of a word, try to only load that
10115 much into the register. Otherwise load the whole register. Note
10116 that in the latter case we may pick up unwanted bits. It's not a
10117 problem at the moment but may wish to revisit. */
10118
10119 if (intoffset % BITS_PER_WORD != 0)
10120 {
10121 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
10122 MODE_INT, 0);
10123 if (mode == BLKmode)
10124 {
10125 /* We couldn't find an appropriate mode, which happens,
10126 e.g., in packed structs when there are 3 bytes to load.
10127 Back intoffset back to the beginning of the word in this
10128 case. */
10129 intoffset = intoffset & -BITS_PER_WORD;
10130 mode = word_mode;
10131 }
10132 }
10133 else
10134 mode = word_mode;
10135
10136 startbit = intoffset & -BITS_PER_WORD;
10137 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
10138 intregs = (endbit - startbit) / BITS_PER_WORD;
10139 this_regno = cum->words + intoffset / BITS_PER_WORD;
10140
10141 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
10142 cum->use_stack = 1;
10143
10144 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
10145 if (intregs <= 0)
10146 return;
10147
10148 intoffset /= BITS_PER_UNIT;
10149 do
10150 {
10151 regno = GP_ARG_MIN_REG + this_regno;
10152 reg = gen_rtx_REG (mode, regno);
10153 rvec[(*k)++] =
10154 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
10155
10156 this_regno += 1;
10157 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
10158 mode = word_mode;
10159 intregs -= 1;
10160 }
10161 while (intregs > 0);
10162 }
10163
10164 /* Recursive workhorse for the following. */
10165
10166 static void
10167 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
10168 HOST_WIDE_INT startbitpos, rtx rvec[],
10169 int *k)
10170 {
10171 tree f;
10172
10173 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10174 if (TREE_CODE (f) == FIELD_DECL)
10175 {
10176 HOST_WIDE_INT bitpos = startbitpos;
10177 tree ftype = TREE_TYPE (f);
10178 machine_mode mode;
10179 if (ftype == error_mark_node)
10180 continue;
10181 mode = TYPE_MODE (ftype);
10182
10183 if (DECL_SIZE (f) != 0
10184 && tree_fits_uhwi_p (bit_position (f)))
10185 bitpos += int_bit_position (f);
10186
10187 /* ??? FIXME: else assume zero offset. */
10188
10189 if (TREE_CODE (ftype) == RECORD_TYPE)
10190 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
10191 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
10192 {
10193 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
10194 #if 0
10195 switch (mode)
10196 {
10197 case SCmode: mode = SFmode; break;
10198 case DCmode: mode = DFmode; break;
10199 case TCmode: mode = TFmode; break;
10200 default: break;
10201 }
10202 #endif
10203 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
10204 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
10205 {
10206 gcc_assert (cum->fregno == FP_ARG_MAX_REG
10207 && (mode == TFmode || mode == TDmode));
10208 /* Long double or _Decimal128 split over regs and memory. */
10209 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
10210 cum->use_stack=1;
10211 }
10212 rvec[(*k)++]
10213 = gen_rtx_EXPR_LIST (VOIDmode,
10214 gen_rtx_REG (mode, cum->fregno++),
10215 GEN_INT (bitpos / BITS_PER_UNIT));
10216 if (mode == TFmode || mode == TDmode)
10217 cum->fregno++;
10218 }
10219 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10220 {
10221 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
10222 rvec[(*k)++]
10223 = gen_rtx_EXPR_LIST (VOIDmode,
10224 gen_rtx_REG (mode, cum->vregno++),
10225 GEN_INT (bitpos / BITS_PER_UNIT));
10226 }
10227 else if (cum->intoffset == -1)
10228 cum->intoffset = bitpos;
10229 }
10230 }
10231
10232 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
10233 the register(s) to be used for each field and subfield of a struct
10234 being passed by value, along with the offset of where the
10235 register's value may be found in the block. FP fields go in FP
10236 register, vector fields go in vector registers, and everything
10237 else goes in int registers, packed as in memory.
10238
10239 This code is also used for function return values. RETVAL indicates
10240 whether this is the case.
10241
10242 Much of this is taken from the SPARC V9 port, which has a similar
10243 calling convention. */
10244
10245 static rtx
10246 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
10247 bool named, bool retval)
10248 {
10249 rtx rvec[FIRST_PSEUDO_REGISTER];
10250 int k = 1, kbase = 1;
10251 HOST_WIDE_INT typesize = int_size_in_bytes (type);
10252 /* This is a copy; modifications are not visible to our caller. */
10253 CUMULATIVE_ARGS copy_cum = *orig_cum;
10254 CUMULATIVE_ARGS *cum = &copy_cum;
10255
10256 /* Pad to 16 byte boundary if needed. */
10257 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10258 && (cum->words % 2) != 0)
10259 cum->words++;
10260
10261 cum->intoffset = 0;
10262 cum->use_stack = 0;
10263 cum->named = named;
10264
10265 /* Put entries into rvec[] for individual FP and vector fields, and
10266 for the chunks of memory that go in int regs. Note we start at
10267 element 1; 0 is reserved for an indication of using memory, and
10268 may or may not be filled in below. */
10269 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
10270 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
10271
10272 /* If any part of the struct went on the stack put all of it there.
10273 This hack is because the generic code for
10274 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
10275 parts of the struct are not at the beginning. */
10276 if (cum->use_stack)
10277 {
10278 if (retval)
10279 return NULL_RTX; /* doesn't go in registers at all */
10280 kbase = 0;
10281 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10282 }
10283 if (k > 1 || cum->use_stack)
10284 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
10285 else
10286 return NULL_RTX;
10287 }
10288
10289 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
10290
10291 static rtx
10292 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
10293 int align_words)
10294 {
10295 int n_units;
10296 int i, k;
10297 rtx rvec[GP_ARG_NUM_REG + 1];
10298
10299 if (align_words >= GP_ARG_NUM_REG)
10300 return NULL_RTX;
10301
10302 n_units = rs6000_arg_size (mode, type);
10303
10304 /* Optimize the simple case where the arg fits in one gpr, except in
10305 the case of BLKmode due to assign_parms assuming that registers are
10306 BITS_PER_WORD wide. */
10307 if (n_units == 0
10308 || (n_units == 1 && mode != BLKmode))
10309 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10310
10311 k = 0;
10312 if (align_words + n_units > GP_ARG_NUM_REG)
10313 /* Not all of the arg fits in gprs. Say that it goes in memory too,
10314 using a magic NULL_RTX component.
10315 This is not strictly correct. Only some of the arg belongs in
10316 memory, not all of it. However, the normal scheme using
10317 function_arg_partial_nregs can result in unusual subregs, eg.
10318 (subreg:SI (reg:DF) 4), which are not handled well. The code to
10319 store the whole arg to memory is often more efficient than code
10320 to store pieces, and we know that space is available in the right
10321 place for the whole arg. */
10322 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10323
10324 i = 0;
10325 do
10326 {
10327 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
10328 rtx off = GEN_INT (i++ * 4);
10329 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10330 }
10331 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
10332
10333 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10334 }
10335
10336 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
10337 but must also be copied into the parameter save area starting at
10338 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
10339 to the GPRs and/or memory. Return the number of elements used. */
10340
10341 static int
10342 rs6000_psave_function_arg (machine_mode mode, const_tree type,
10343 int align_words, rtx *rvec)
10344 {
10345 int k = 0;
10346
10347 if (align_words < GP_ARG_NUM_REG)
10348 {
10349 int n_words = rs6000_arg_size (mode, type);
10350
10351 if (align_words + n_words > GP_ARG_NUM_REG
10352 || mode == BLKmode
10353 || (TARGET_32BIT && TARGET_POWERPC64))
10354 {
10355 /* If this is partially on the stack, then we only
10356 include the portion actually in registers here. */
10357 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10358 int i = 0;
10359
10360 if (align_words + n_words > GP_ARG_NUM_REG)
10361 {
10362 /* Not all of the arg fits in gprs. Say that it goes in memory
10363 too, using a magic NULL_RTX component. Also see comment in
10364 rs6000_mixed_function_arg for why the normal
10365 function_arg_partial_nregs scheme doesn't work in this case. */
10366 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10367 }
10368
10369 do
10370 {
10371 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10372 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
10373 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10374 }
10375 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10376 }
10377 else
10378 {
10379 /* The whole arg fits in gprs. */
10380 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10381 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
10382 }
10383 }
10384 else
10385 {
10386 /* It's entirely in memory. */
10387 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10388 }
10389
10390 return k;
10391 }
10392
10393 /* RVEC is a vector of K components of an argument of mode MODE.
10394 Construct the final function_arg return value from it. */
10395
10396 static rtx
10397 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
10398 {
10399 gcc_assert (k >= 1);
10400
10401 /* Avoid returning a PARALLEL in the trivial cases. */
10402 if (k == 1)
10403 {
10404 if (XEXP (rvec[0], 0) == NULL_RTX)
10405 return NULL_RTX;
10406
10407 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
10408 return XEXP (rvec[0], 0);
10409 }
10410
10411 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10412 }
10413
10414 /* Determine where to put an argument to a function.
10415 Value is zero to push the argument on the stack,
10416 or a hard register in which to store the argument.
10417
10418 MODE is the argument's machine mode.
10419 TYPE is the data type of the argument (as a tree).
10420 This is null for libcalls where that information may
10421 not be available.
10422 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10423 the preceding args and about the function being called. It is
10424 not modified in this routine.
10425 NAMED is nonzero if this argument is a named parameter
10426 (otherwise it is an extra parameter matching an ellipsis).
10427
10428 On RS/6000 the first eight words of non-FP are normally in registers
10429 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
10430 Under V.4, the first 8 FP args are in registers.
10431
10432 If this is floating-point and no prototype is specified, we use
10433 both an FP and integer register (or possibly FP reg and stack). Library
10434 functions (when CALL_LIBCALL is set) always have the proper types for args,
10435 so we can pass the FP value just in one register. emit_library_function
10436 doesn't support PARALLEL anyway.
10437
10438 Note that for args passed by reference, function_arg will be called
10439 with MODE and TYPE set to that of the pointer to the arg, not the arg
10440 itself. */
10441
10442 static rtx
10443 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
10444 const_tree type, bool named)
10445 {
10446 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10447 enum rs6000_abi abi = DEFAULT_ABI;
10448 machine_mode elt_mode;
10449 int n_elts;
10450
10451 /* Return a marker to indicate whether CR1 needs to set or clear the
10452 bit that V.4 uses to say fp args were passed in registers.
10453 Assume that we don't need the marker for software floating point,
10454 or compiler generated library calls. */
10455 if (mode == VOIDmode)
10456 {
10457 if (abi == ABI_V4
10458 && (cum->call_cookie & CALL_LIBCALL) == 0
10459 && (cum->stdarg
10460 || (cum->nargs_prototype < 0
10461 && (cum->prototype || TARGET_NO_PROTOTYPE))))
10462 {
10463 /* For the SPE, we need to crxor CR6 always. */
10464 if (TARGET_SPE_ABI)
10465 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
10466 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
10467 return GEN_INT (cum->call_cookie
10468 | ((cum->fregno == FP_ARG_MIN_REG)
10469 ? CALL_V4_SET_FP_ARGS
10470 : CALL_V4_CLEAR_FP_ARGS));
10471 }
10472
10473 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
10474 }
10475
10476 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10477
10478 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10479 {
10480 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
10481 if (rslt != NULL_RTX)
10482 return rslt;
10483 /* Else fall through to usual handling. */
10484 }
10485
10486 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10487 {
10488 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10489 rtx r, off;
10490 int i, k = 0;
10491
10492 /* Do we also need to pass this argument in the parameter
10493 save area? */
10494 if (TARGET_64BIT && ! cum->prototype)
10495 {
10496 int align_words = (cum->words + 1) & ~1;
10497 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10498 }
10499
10500 /* Describe where this argument goes in the vector registers. */
10501 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
10502 {
10503 r = gen_rtx_REG (elt_mode, cum->vregno + i);
10504 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10505 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10506 }
10507
10508 return rs6000_finish_function_arg (mode, rvec, k);
10509 }
10510 else if (TARGET_ALTIVEC_ABI
10511 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
10512 || (type && TREE_CODE (type) == VECTOR_TYPE
10513 && int_size_in_bytes (type) == 16)))
10514 {
10515 if (named || abi == ABI_V4)
10516 return NULL_RTX;
10517 else
10518 {
10519 /* Vector parameters to varargs functions under AIX or Darwin
10520 get passed in memory and possibly also in GPRs. */
10521 int align, align_words, n_words;
10522 machine_mode part_mode;
10523
10524 /* Vector parameters must be 16-byte aligned. In 32-bit
10525 mode this means we need to take into account the offset
10526 to the parameter save area. In 64-bit mode, they just
10527 have to start on an even word, since the parameter save
10528 area is 16-byte aligned. */
10529 if (TARGET_32BIT)
10530 align = -(rs6000_parm_offset () + cum->words) & 3;
10531 else
10532 align = cum->words & 1;
10533 align_words = cum->words + align;
10534
10535 /* Out of registers? Memory, then. */
10536 if (align_words >= GP_ARG_NUM_REG)
10537 return NULL_RTX;
10538
10539 if (TARGET_32BIT && TARGET_POWERPC64)
10540 return rs6000_mixed_function_arg (mode, type, align_words);
10541
10542 /* The vector value goes in GPRs. Only the part of the
10543 value in GPRs is reported here. */
10544 part_mode = mode;
10545 n_words = rs6000_arg_size (mode, type);
10546 if (align_words + n_words > GP_ARG_NUM_REG)
10547 /* Fortunately, there are only two possibilities, the value
10548 is either wholly in GPRs or half in GPRs and half not. */
10549 part_mode = DImode;
10550
10551 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
10552 }
10553 }
10554 else if (TARGET_SPE_ABI && TARGET_SPE
10555 && (SPE_VECTOR_MODE (mode)
10556 || (TARGET_E500_DOUBLE && (mode == DFmode
10557 || mode == DCmode
10558 || mode == TFmode
10559 || mode == TCmode))))
10560 return rs6000_spe_function_arg (cum, mode, type);
10561
10562 else if (abi == ABI_V4)
10563 {
10564 if (TARGET_HARD_FLOAT && TARGET_FPRS
10565 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
10566 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
10567 || (mode == TFmode && !TARGET_IEEEQUAD)
10568 || mode == SDmode || mode == DDmode || mode == TDmode))
10569 {
10570 /* _Decimal128 must use an even/odd register pair. This assumes
10571 that the register number is odd when fregno is odd. */
10572 if (mode == TDmode && (cum->fregno % 2) == 1)
10573 cum->fregno++;
10574
10575 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
10576 <= FP_ARG_V4_MAX_REG)
10577 return gen_rtx_REG (mode, cum->fregno);
10578 else
10579 return NULL_RTX;
10580 }
10581 else
10582 {
10583 int n_words = rs6000_arg_size (mode, type);
10584 int gregno = cum->sysv_gregno;
10585
10586 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10587 (r7,r8) or (r9,r10). As does any other 2 word item such
10588 as complex int due to a historical mistake. */
10589 if (n_words == 2)
10590 gregno += (1 - gregno) & 1;
10591
10592 /* Multi-reg args are not split between registers and stack. */
10593 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10594 return NULL_RTX;
10595
10596 if (TARGET_32BIT && TARGET_POWERPC64)
10597 return rs6000_mixed_function_arg (mode, type,
10598 gregno - GP_ARG_MIN_REG);
10599 return gen_rtx_REG (mode, gregno);
10600 }
10601 }
10602 else
10603 {
10604 int align_words = rs6000_parm_start (mode, type, cum->words);
10605
10606 /* _Decimal128 must be passed in an even/odd float register pair.
10607 This assumes that the register number is odd when fregno is odd. */
10608 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
10609 cum->fregno++;
10610
10611 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10612 {
10613 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10614 rtx r, off;
10615 int i, k = 0;
10616 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10617 int fpr_words;
10618
10619 /* Do we also need to pass this argument in the parameter
10620 save area? */
10621 if (type && (cum->nargs_prototype <= 0
10622 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10623 && TARGET_XL_COMPAT
10624 && align_words >= GP_ARG_NUM_REG)))
10625 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10626
10627 /* Describe where this argument goes in the fprs. */
10628 for (i = 0; i < n_elts
10629 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
10630 {
10631 /* Check if the argument is split over registers and memory.
10632 This can only ever happen for long double or _Decimal128;
10633 complex types are handled via split_complex_arg. */
10634 machine_mode fmode = elt_mode;
10635 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
10636 {
10637 gcc_assert (fmode == TFmode || fmode == TDmode);
10638 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
10639 }
10640
10641 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
10642 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10643 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10644 }
10645
10646 /* If there were not enough FPRs to hold the argument, the rest
10647 usually goes into memory. However, if the current position
10648 is still within the register parameter area, a portion may
10649 actually have to go into GPRs.
10650
10651 Note that it may happen that the portion of the argument
10652 passed in the first "half" of the first GPR was already
10653 passed in the last FPR as well.
10654
10655 For unnamed arguments, we already set up GPRs to cover the
10656 whole argument in rs6000_psave_function_arg, so there is
10657 nothing further to do at this point. */
10658 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
10659 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
10660 && cum->nargs_prototype > 0)
10661 {
10662 static bool warned;
10663
10664 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10665 int n_words = rs6000_arg_size (mode, type);
10666
10667 align_words += fpr_words;
10668 n_words -= fpr_words;
10669
10670 do
10671 {
10672 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10673 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
10674 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10675 }
10676 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10677
10678 if (!warned && warn_psabi)
10679 {
10680 warned = true;
10681 inform (input_location,
10682 "the ABI of passing homogeneous float aggregates"
10683 " has changed in GCC 5");
10684 }
10685 }
10686
10687 return rs6000_finish_function_arg (mode, rvec, k);
10688 }
10689 else if (align_words < GP_ARG_NUM_REG)
10690 {
10691 if (TARGET_32BIT && TARGET_POWERPC64)
10692 return rs6000_mixed_function_arg (mode, type, align_words);
10693
10694 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10695 }
10696 else
10697 return NULL_RTX;
10698 }
10699 }
10700 \f
10701 /* For an arg passed partly in registers and partly in memory, this is
10702 the number of bytes passed in registers. For args passed entirely in
10703 registers or entirely in memory, zero. When an arg is described by a
10704 PARALLEL, perhaps using more than one register type, this function
10705 returns the number of bytes used by the first element of the PARALLEL. */
10706
10707 static int
10708 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
10709 tree type, bool named)
10710 {
10711 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10712 bool passed_in_gprs = true;
10713 int ret = 0;
10714 int align_words;
10715 machine_mode elt_mode;
10716 int n_elts;
10717
10718 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10719
10720 if (DEFAULT_ABI == ABI_V4)
10721 return 0;
10722
10723 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10724 {
10725 /* If we are passing this arg in the fixed parameter save area
10726 (gprs or memory) as well as VRs, we do not use the partial
10727 bytes mechanism; instead, rs6000_function_arg will return a
10728 PARALLEL including a memory element as necessary. */
10729 if (TARGET_64BIT && ! cum->prototype)
10730 return 0;
10731
10732 /* Otherwise, we pass in VRs only. Check for partial copies. */
10733 passed_in_gprs = false;
10734 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
10735 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
10736 }
10737
10738 /* In this complicated case we just disable the partial_nregs code. */
10739 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10740 return 0;
10741
10742 align_words = rs6000_parm_start (mode, type, cum->words);
10743
10744 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10745 {
10746 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10747
10748 /* If we are passing this arg in the fixed parameter save area
10749 (gprs or memory) as well as FPRs, we do not use the partial
10750 bytes mechanism; instead, rs6000_function_arg will return a
10751 PARALLEL including a memory element as necessary. */
10752 if (type
10753 && (cum->nargs_prototype <= 0
10754 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10755 && TARGET_XL_COMPAT
10756 && align_words >= GP_ARG_NUM_REG)))
10757 return 0;
10758
10759 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10760 passed_in_gprs = false;
10761 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
10762 {
10763 /* Compute number of bytes / words passed in FPRs. If there
10764 is still space available in the register parameter area
10765 *after* that amount, a part of the argument will be passed
10766 in GPRs. In that case, the total amount passed in any
10767 registers is equal to the amount that would have been passed
10768 in GPRs if everything were passed there, so we fall back to
10769 the GPR code below to compute the appropriate value. */
10770 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
10771 * MIN (8, GET_MODE_SIZE (elt_mode)));
10772 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
10773
10774 if (align_words + fpr_words < GP_ARG_NUM_REG)
10775 passed_in_gprs = true;
10776 else
10777 ret = fpr;
10778 }
10779 }
10780
10781 if (passed_in_gprs
10782 && align_words < GP_ARG_NUM_REG
10783 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
10784 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
10785
10786 if (ret != 0 && TARGET_DEBUG_ARG)
10787 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
10788
10789 return ret;
10790 }
10791 \f
10792 /* A C expression that indicates when an argument must be passed by
10793 reference. If nonzero for an argument, a copy of that argument is
10794 made in memory and a pointer to the argument is passed instead of
10795 the argument itself. The pointer is passed in whatever way is
10796 appropriate for passing a pointer to that type.
10797
10798 Under V.4, aggregates and long double are passed by reference.
10799
10800 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10801 reference unless the AltiVec vector extension ABI is in force.
10802
10803 As an extension to all ABIs, variable sized types are passed by
10804 reference. */
10805
10806 static bool
10807 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
10808 machine_mode mode, const_tree type,
10809 bool named ATTRIBUTE_UNUSED)
10810 {
10811 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
10812 {
10813 if (TARGET_DEBUG_ARG)
10814 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
10815 return 1;
10816 }
10817
10818 if (!type)
10819 return 0;
10820
10821 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
10822 {
10823 if (TARGET_DEBUG_ARG)
10824 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
10825 return 1;
10826 }
10827
10828 if (int_size_in_bytes (type) < 0)
10829 {
10830 if (TARGET_DEBUG_ARG)
10831 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
10832 return 1;
10833 }
10834
10835 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10836 modes only exist for GCC vector types if -maltivec. */
10837 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
10838 {
10839 if (TARGET_DEBUG_ARG)
10840 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
10841 return 1;
10842 }
10843
10844 /* Pass synthetic vectors in memory. */
10845 if (TREE_CODE (type) == VECTOR_TYPE
10846 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10847 {
10848 static bool warned_for_pass_big_vectors = false;
10849 if (TARGET_DEBUG_ARG)
10850 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
10851 if (!warned_for_pass_big_vectors)
10852 {
10853 warning (0, "GCC vector passed by reference: "
10854 "non-standard ABI extension with no compatibility guarantee");
10855 warned_for_pass_big_vectors = true;
10856 }
10857 return 1;
10858 }
10859
10860 return 0;
10861 }
10862
10863 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10864 already processes. Return true if the parameter must be passed
10865 (fully or partially) on the stack. */
10866
10867 static bool
10868 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
10869 {
10870 machine_mode mode;
10871 int unsignedp;
10872 rtx entry_parm;
10873
10874 /* Catch errors. */
10875 if (type == NULL || type == error_mark_node)
10876 return true;
10877
10878 /* Handle types with no storage requirement. */
10879 if (TYPE_MODE (type) == VOIDmode)
10880 return false;
10881
10882 /* Handle complex types. */
10883 if (TREE_CODE (type) == COMPLEX_TYPE)
10884 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
10885 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
10886
10887 /* Handle transparent aggregates. */
10888 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
10889 && TYPE_TRANSPARENT_AGGR (type))
10890 type = TREE_TYPE (first_field (type));
10891
10892 /* See if this arg was passed by invisible reference. */
10893 if (pass_by_reference (get_cumulative_args (args_so_far),
10894 TYPE_MODE (type), type, true))
10895 type = build_pointer_type (type);
10896
10897 /* Find mode as it is passed by the ABI. */
10898 unsignedp = TYPE_UNSIGNED (type);
10899 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
10900
10901 /* If we must pass in stack, we need a stack. */
10902 if (rs6000_must_pass_in_stack (mode, type))
10903 return true;
10904
10905 /* If there is no incoming register, we need a stack. */
10906 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
10907 if (entry_parm == NULL)
10908 return true;
10909
10910 /* Likewise if we need to pass both in registers and on the stack. */
10911 if (GET_CODE (entry_parm) == PARALLEL
10912 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
10913 return true;
10914
10915 /* Also true if we're partially in registers and partially not. */
10916 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
10917 return true;
10918
10919 /* Update info on where next arg arrives in registers. */
10920 rs6000_function_arg_advance (args_so_far, mode, type, true);
10921 return false;
10922 }
10923
10924 /* Return true if FUN has no prototype, has a variable argument
10925 list, or passes any parameter in memory. */
10926
10927 static bool
10928 rs6000_function_parms_need_stack (tree fun, bool incoming)
10929 {
10930 tree fntype, result;
10931 CUMULATIVE_ARGS args_so_far_v;
10932 cumulative_args_t args_so_far;
10933
10934 if (!fun)
10935 /* Must be a libcall, all of which only use reg parms. */
10936 return false;
10937
10938 fntype = fun;
10939 if (!TYPE_P (fun))
10940 fntype = TREE_TYPE (fun);
10941
10942 /* Varargs functions need the parameter save area. */
10943 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
10944 return true;
10945
10946 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
10947 args_so_far = pack_cumulative_args (&args_so_far_v);
10948
10949 /* When incoming, we will have been passed the function decl.
10950 It is necessary to use the decl to handle K&R style functions,
10951 where TYPE_ARG_TYPES may not be available. */
10952 if (incoming)
10953 {
10954 gcc_assert (DECL_P (fun));
10955 result = DECL_RESULT (fun);
10956 }
10957 else
10958 result = TREE_TYPE (fntype);
10959
10960 if (result && aggregate_value_p (result, fntype))
10961 {
10962 if (!TYPE_P (result))
10963 result = TREE_TYPE (result);
10964 result = build_pointer_type (result);
10965 rs6000_parm_needs_stack (args_so_far, result);
10966 }
10967
10968 if (incoming)
10969 {
10970 tree parm;
10971
10972 for (parm = DECL_ARGUMENTS (fun);
10973 parm && parm != void_list_node;
10974 parm = TREE_CHAIN (parm))
10975 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
10976 return true;
10977 }
10978 else
10979 {
10980 function_args_iterator args_iter;
10981 tree arg_type;
10982
10983 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
10984 if (rs6000_parm_needs_stack (args_so_far, arg_type))
10985 return true;
10986 }
10987
10988 return false;
10989 }
10990
10991 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10992 usually a constant depending on the ABI. However, in the ELFv2 ABI
10993 the register parameter area is optional when calling a function that
10994 has a prototype is scope, has no variable argument list, and passes
10995 all parameters in registers. */
10996
10997 int
10998 rs6000_reg_parm_stack_space (tree fun, bool incoming)
10999 {
11000 int reg_parm_stack_space;
11001
11002 switch (DEFAULT_ABI)
11003 {
11004 default:
11005 reg_parm_stack_space = 0;
11006 break;
11007
11008 case ABI_AIX:
11009 case ABI_DARWIN:
11010 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
11011 break;
11012
11013 case ABI_ELFv2:
11014 /* ??? Recomputing this every time is a bit expensive. Is there
11015 a place to cache this information? */
11016 if (rs6000_function_parms_need_stack (fun, incoming))
11017 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
11018 else
11019 reg_parm_stack_space = 0;
11020 break;
11021 }
11022
11023 return reg_parm_stack_space;
11024 }
11025
11026 static void
11027 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
11028 {
11029 int i;
11030 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
11031
11032 if (nregs == 0)
11033 return;
11034
11035 for (i = 0; i < nregs; i++)
11036 {
11037 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
11038 if (reload_completed)
11039 {
11040 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
11041 tem = NULL_RTX;
11042 else
11043 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
11044 i * GET_MODE_SIZE (reg_mode));
11045 }
11046 else
11047 tem = replace_equiv_address (tem, XEXP (tem, 0));
11048
11049 gcc_assert (tem);
11050
11051 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
11052 }
11053 }
11054 \f
11055 /* Perform any needed actions needed for a function that is receiving a
11056 variable number of arguments.
11057
11058 CUM is as above.
11059
11060 MODE and TYPE are the mode and type of the current parameter.
11061
11062 PRETEND_SIZE is a variable that should be set to the amount of stack
11063 that must be pushed by the prolog to pretend that our caller pushed
11064 it.
11065
11066 Normally, this macro will push all remaining incoming registers on the
11067 stack and set PRETEND_SIZE to the length of the registers pushed. */
11068
11069 static void
11070 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
11071 tree type, int *pretend_size ATTRIBUTE_UNUSED,
11072 int no_rtl)
11073 {
11074 CUMULATIVE_ARGS next_cum;
11075 int reg_size = TARGET_32BIT ? 4 : 8;
11076 rtx save_area = NULL_RTX, mem;
11077 int first_reg_offset;
11078 alias_set_type set;
11079
11080 /* Skip the last named argument. */
11081 next_cum = *get_cumulative_args (cum);
11082 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
11083
11084 if (DEFAULT_ABI == ABI_V4)
11085 {
11086 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
11087
11088 if (! no_rtl)
11089 {
11090 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
11091 HOST_WIDE_INT offset = 0;
11092
11093 /* Try to optimize the size of the varargs save area.
11094 The ABI requires that ap.reg_save_area is doubleword
11095 aligned, but we don't need to allocate space for all
11096 the bytes, only those to which we actually will save
11097 anything. */
11098 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
11099 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
11100 if (TARGET_HARD_FLOAT && TARGET_FPRS
11101 && next_cum.fregno <= FP_ARG_V4_MAX_REG
11102 && cfun->va_list_fpr_size)
11103 {
11104 if (gpr_reg_num)
11105 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
11106 * UNITS_PER_FP_WORD;
11107 if (cfun->va_list_fpr_size
11108 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
11109 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
11110 else
11111 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
11112 * UNITS_PER_FP_WORD;
11113 }
11114 if (gpr_reg_num)
11115 {
11116 offset = -((first_reg_offset * reg_size) & ~7);
11117 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
11118 {
11119 gpr_reg_num = cfun->va_list_gpr_size;
11120 if (reg_size == 4 && (first_reg_offset & 1))
11121 gpr_reg_num++;
11122 }
11123 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
11124 }
11125 else if (fpr_size)
11126 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
11127 * UNITS_PER_FP_WORD
11128 - (int) (GP_ARG_NUM_REG * reg_size);
11129
11130 if (gpr_size + fpr_size)
11131 {
11132 rtx reg_save_area
11133 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
11134 gcc_assert (GET_CODE (reg_save_area) == MEM);
11135 reg_save_area = XEXP (reg_save_area, 0);
11136 if (GET_CODE (reg_save_area) == PLUS)
11137 {
11138 gcc_assert (XEXP (reg_save_area, 0)
11139 == virtual_stack_vars_rtx);
11140 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
11141 offset += INTVAL (XEXP (reg_save_area, 1));
11142 }
11143 else
11144 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
11145 }
11146
11147 cfun->machine->varargs_save_offset = offset;
11148 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
11149 }
11150 }
11151 else
11152 {
11153 first_reg_offset = next_cum.words;
11154 save_area = virtual_incoming_args_rtx;
11155
11156 if (targetm.calls.must_pass_in_stack (mode, type))
11157 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
11158 }
11159
11160 set = get_varargs_alias_set ();
11161 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
11162 && cfun->va_list_gpr_size)
11163 {
11164 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
11165
11166 if (va_list_gpr_counter_field)
11167 /* V4 va_list_gpr_size counts number of registers needed. */
11168 n_gpr = cfun->va_list_gpr_size;
11169 else
11170 /* char * va_list instead counts number of bytes needed. */
11171 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
11172
11173 if (nregs > n_gpr)
11174 nregs = n_gpr;
11175
11176 mem = gen_rtx_MEM (BLKmode,
11177 plus_constant (Pmode, save_area,
11178 first_reg_offset * reg_size));
11179 MEM_NOTRAP_P (mem) = 1;
11180 set_mem_alias_set (mem, set);
11181 set_mem_align (mem, BITS_PER_WORD);
11182
11183 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
11184 nregs);
11185 }
11186
11187 /* Save FP registers if needed. */
11188 if (DEFAULT_ABI == ABI_V4
11189 && TARGET_HARD_FLOAT && TARGET_FPRS
11190 && ! no_rtl
11191 && next_cum.fregno <= FP_ARG_V4_MAX_REG
11192 && cfun->va_list_fpr_size)
11193 {
11194 int fregno = next_cum.fregno, nregs;
11195 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
11196 rtx lab = gen_label_rtx ();
11197 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
11198 * UNITS_PER_FP_WORD);
11199
11200 emit_jump_insn
11201 (gen_rtx_SET (pc_rtx,
11202 gen_rtx_IF_THEN_ELSE (VOIDmode,
11203 gen_rtx_NE (VOIDmode, cr1,
11204 const0_rtx),
11205 gen_rtx_LABEL_REF (VOIDmode, lab),
11206 pc_rtx)));
11207
11208 for (nregs = 0;
11209 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
11210 fregno++, off += UNITS_PER_FP_WORD, nregs++)
11211 {
11212 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
11213 ? DFmode : SFmode,
11214 plus_constant (Pmode, save_area, off));
11215 MEM_NOTRAP_P (mem) = 1;
11216 set_mem_alias_set (mem, set);
11217 set_mem_align (mem, GET_MODE_ALIGNMENT (
11218 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
11219 ? DFmode : SFmode));
11220 emit_move_insn (mem, gen_rtx_REG (
11221 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
11222 ? DFmode : SFmode, fregno));
11223 }
11224
11225 emit_label (lab);
11226 }
11227 }
11228
11229 /* Create the va_list data type. */
11230
11231 static tree
11232 rs6000_build_builtin_va_list (void)
11233 {
11234 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
11235
11236 /* For AIX, prefer 'char *' because that's what the system
11237 header files like. */
11238 if (DEFAULT_ABI != ABI_V4)
11239 return build_pointer_type (char_type_node);
11240
11241 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
11242 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11243 get_identifier ("__va_list_tag"), record);
11244
11245 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
11246 unsigned_char_type_node);
11247 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
11248 unsigned_char_type_node);
11249 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
11250 every user file. */
11251 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11252 get_identifier ("reserved"), short_unsigned_type_node);
11253 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11254 get_identifier ("overflow_arg_area"),
11255 ptr_type_node);
11256 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11257 get_identifier ("reg_save_area"),
11258 ptr_type_node);
11259
11260 va_list_gpr_counter_field = f_gpr;
11261 va_list_fpr_counter_field = f_fpr;
11262
11263 DECL_FIELD_CONTEXT (f_gpr) = record;
11264 DECL_FIELD_CONTEXT (f_fpr) = record;
11265 DECL_FIELD_CONTEXT (f_res) = record;
11266 DECL_FIELD_CONTEXT (f_ovf) = record;
11267 DECL_FIELD_CONTEXT (f_sav) = record;
11268
11269 TYPE_STUB_DECL (record) = type_decl;
11270 TYPE_NAME (record) = type_decl;
11271 TYPE_FIELDS (record) = f_gpr;
11272 DECL_CHAIN (f_gpr) = f_fpr;
11273 DECL_CHAIN (f_fpr) = f_res;
11274 DECL_CHAIN (f_res) = f_ovf;
11275 DECL_CHAIN (f_ovf) = f_sav;
11276
11277 layout_type (record);
11278
11279 /* The correct type is an array type of one element. */
11280 return build_array_type (record, build_index_type (size_zero_node));
11281 }
11282
11283 /* Implement va_start. */
11284
11285 static void
11286 rs6000_va_start (tree valist, rtx nextarg)
11287 {
11288 HOST_WIDE_INT words, n_gpr, n_fpr;
11289 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
11290 tree gpr, fpr, ovf, sav, t;
11291
11292 /* Only SVR4 needs something special. */
11293 if (DEFAULT_ABI != ABI_V4)
11294 {
11295 std_expand_builtin_va_start (valist, nextarg);
11296 return;
11297 }
11298
11299 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11300 f_fpr = DECL_CHAIN (f_gpr);
11301 f_res = DECL_CHAIN (f_fpr);
11302 f_ovf = DECL_CHAIN (f_res);
11303 f_sav = DECL_CHAIN (f_ovf);
11304
11305 valist = build_simple_mem_ref (valist);
11306 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11307 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11308 f_fpr, NULL_TREE);
11309 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11310 f_ovf, NULL_TREE);
11311 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11312 f_sav, NULL_TREE);
11313
11314 /* Count number of gp and fp argument registers used. */
11315 words = crtl->args.info.words;
11316 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
11317 GP_ARG_NUM_REG);
11318 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
11319 FP_ARG_NUM_REG);
11320
11321 if (TARGET_DEBUG_ARG)
11322 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
11323 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
11324 words, n_gpr, n_fpr);
11325
11326 if (cfun->va_list_gpr_size)
11327 {
11328 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11329 build_int_cst (NULL_TREE, n_gpr));
11330 TREE_SIDE_EFFECTS (t) = 1;
11331 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11332 }
11333
11334 if (cfun->va_list_fpr_size)
11335 {
11336 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11337 build_int_cst (NULL_TREE, n_fpr));
11338 TREE_SIDE_EFFECTS (t) = 1;
11339 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11340
11341 #ifdef HAVE_AS_GNU_ATTRIBUTE
11342 if (call_ABI_of_interest (cfun->decl))
11343 rs6000_passes_float = true;
11344 #endif
11345 }
11346
11347 /* Find the overflow area. */
11348 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11349 if (words != 0)
11350 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
11351 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11352 TREE_SIDE_EFFECTS (t) = 1;
11353 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11354
11355 /* If there were no va_arg invocations, don't set up the register
11356 save area. */
11357 if (!cfun->va_list_gpr_size
11358 && !cfun->va_list_fpr_size
11359 && n_gpr < GP_ARG_NUM_REG
11360 && n_fpr < FP_ARG_V4_MAX_REG)
11361 return;
11362
11363 /* Find the register save area. */
11364 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
11365 if (cfun->machine->varargs_save_offset)
11366 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
11367 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11368 TREE_SIDE_EFFECTS (t) = 1;
11369 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11370 }
11371
11372 /* Implement va_arg. */
11373
11374 static tree
11375 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11376 gimple_seq *post_p)
11377 {
11378 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
11379 tree gpr, fpr, ovf, sav, reg, t, u;
11380 int size, rsize, n_reg, sav_ofs, sav_scale;
11381 tree lab_false, lab_over, addr;
11382 int align;
11383 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
11384 int regalign = 0;
11385 gimple stmt;
11386
11387 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11388 {
11389 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
11390 return build_va_arg_indirect_ref (t);
11391 }
11392
11393 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
11394 earlier version of gcc, with the property that it always applied alignment
11395 adjustments to the va-args (even for zero-sized types). The cheapest way
11396 to deal with this is to replicate the effect of the part of
11397 std_gimplify_va_arg_expr that carries out the align adjust, for the case
11398 of relevance.
11399 We don't need to check for pass-by-reference because of the test above.
11400 We can return a simplifed answer, since we know there's no offset to add. */
11401
11402 if (((TARGET_MACHO
11403 && rs6000_darwin64_abi)
11404 || DEFAULT_ABI == ABI_ELFv2
11405 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
11406 && integer_zerop (TYPE_SIZE (type)))
11407 {
11408 unsigned HOST_WIDE_INT align, boundary;
11409 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
11410 align = PARM_BOUNDARY / BITS_PER_UNIT;
11411 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
11412 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
11413 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
11414 boundary /= BITS_PER_UNIT;
11415 if (boundary > align)
11416 {
11417 tree t ;
11418 /* This updates arg ptr by the amount that would be necessary
11419 to align the zero-sized (but not zero-alignment) item. */
11420 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11421 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
11422 gimplify_and_add (t, pre_p);
11423
11424 t = fold_convert (sizetype, valist_tmp);
11425 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11426 fold_convert (TREE_TYPE (valist),
11427 fold_build2 (BIT_AND_EXPR, sizetype, t,
11428 size_int (-boundary))));
11429 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
11430 gimplify_and_add (t, pre_p);
11431 }
11432 /* Since it is zero-sized there's no increment for the item itself. */
11433 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
11434 return build_va_arg_indirect_ref (valist_tmp);
11435 }
11436
11437 if (DEFAULT_ABI != ABI_V4)
11438 {
11439 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
11440 {
11441 tree elem_type = TREE_TYPE (type);
11442 machine_mode elem_mode = TYPE_MODE (elem_type);
11443 int elem_size = GET_MODE_SIZE (elem_mode);
11444
11445 if (elem_size < UNITS_PER_WORD)
11446 {
11447 tree real_part, imag_part;
11448 gimple_seq post = NULL;
11449
11450 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11451 &post);
11452 /* Copy the value into a temporary, lest the formal temporary
11453 be reused out from under us. */
11454 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
11455 gimple_seq_add_seq (pre_p, post);
11456
11457 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11458 post_p);
11459
11460 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
11461 }
11462 }
11463
11464 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
11465 }
11466
11467 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11468 f_fpr = DECL_CHAIN (f_gpr);
11469 f_res = DECL_CHAIN (f_fpr);
11470 f_ovf = DECL_CHAIN (f_res);
11471 f_sav = DECL_CHAIN (f_ovf);
11472
11473 valist = build_va_arg_indirect_ref (valist);
11474 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11475 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11476 f_fpr, NULL_TREE);
11477 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11478 f_ovf, NULL_TREE);
11479 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11480 f_sav, NULL_TREE);
11481
11482 size = int_size_in_bytes (type);
11483 rsize = (size + 3) / 4;
11484 align = 1;
11485
11486 if (TARGET_HARD_FLOAT && TARGET_FPRS
11487 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
11488 || (TARGET_DOUBLE_FLOAT
11489 && (TYPE_MODE (type) == DFmode
11490 || TYPE_MODE (type) == TFmode
11491 || TYPE_MODE (type) == SDmode
11492 || TYPE_MODE (type) == DDmode
11493 || TYPE_MODE (type) == TDmode))))
11494 {
11495 /* FP args go in FP registers, if present. */
11496 reg = fpr;
11497 n_reg = (size + 7) / 8;
11498 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
11499 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
11500 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
11501 align = 8;
11502 }
11503 else
11504 {
11505 /* Otherwise into GP registers. */
11506 reg = gpr;
11507 n_reg = rsize;
11508 sav_ofs = 0;
11509 sav_scale = 4;
11510 if (n_reg == 2)
11511 align = 8;
11512 }
11513
11514 /* Pull the value out of the saved registers.... */
11515
11516 lab_over = NULL;
11517 addr = create_tmp_var (ptr_type_node, "addr");
11518
11519 /* AltiVec vectors never go in registers when -mabi=altivec. */
11520 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11521 align = 16;
11522 else
11523 {
11524 lab_false = create_artificial_label (input_location);
11525 lab_over = create_artificial_label (input_location);
11526
11527 /* Long long and SPE vectors are aligned in the registers.
11528 As are any other 2 gpr item such as complex int due to a
11529 historical mistake. */
11530 u = reg;
11531 if (n_reg == 2 && reg == gpr)
11532 {
11533 regalign = 1;
11534 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11535 build_int_cst (TREE_TYPE (reg), n_reg - 1));
11536 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
11537 unshare_expr (reg), u);
11538 }
11539 /* _Decimal128 is passed in even/odd fpr pairs; the stored
11540 reg number is 0 for f1, so we want to make it odd. */
11541 else if (reg == fpr && TYPE_MODE (type) == TDmode)
11542 {
11543 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11544 build_int_cst (TREE_TYPE (reg), 1));
11545 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
11546 }
11547
11548 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
11549 t = build2 (GE_EXPR, boolean_type_node, u, t);
11550 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11551 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11552 gimplify_and_add (t, pre_p);
11553
11554 t = sav;
11555 if (sav_ofs)
11556 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11557
11558 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11559 build_int_cst (TREE_TYPE (reg), n_reg));
11560 u = fold_convert (sizetype, u);
11561 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
11562 t = fold_build_pointer_plus (t, u);
11563
11564 /* _Decimal32 varargs are located in the second word of the 64-bit
11565 FP register for 32-bit binaries. */
11566 if (TARGET_32BIT
11567 && TARGET_HARD_FLOAT && TARGET_FPRS
11568 && TYPE_MODE (type) == SDmode)
11569 t = fold_build_pointer_plus_hwi (t, size);
11570
11571 gimplify_assign (addr, t, pre_p);
11572
11573 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11574
11575 stmt = gimple_build_label (lab_false);
11576 gimple_seq_add_stmt (pre_p, stmt);
11577
11578 if ((n_reg == 2 && !regalign) || n_reg > 2)
11579 {
11580 /* Ensure that we don't find any more args in regs.
11581 Alignment has taken care of for special cases. */
11582 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
11583 }
11584 }
11585
11586 /* ... otherwise out of the overflow area. */
11587
11588 /* Care for on-stack alignment if needed. */
11589 t = ovf;
11590 if (align != 1)
11591 {
11592 t = fold_build_pointer_plus_hwi (t, align - 1);
11593 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
11594 build_int_cst (TREE_TYPE (t), -align));
11595 }
11596 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11597
11598 gimplify_assign (unshare_expr (addr), t, pre_p);
11599
11600 t = fold_build_pointer_plus_hwi (t, size);
11601 gimplify_assign (unshare_expr (ovf), t, pre_p);
11602
11603 if (lab_over)
11604 {
11605 stmt = gimple_build_label (lab_over);
11606 gimple_seq_add_stmt (pre_p, stmt);
11607 }
11608
11609 if (STRICT_ALIGNMENT
11610 && (TYPE_ALIGN (type)
11611 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
11612 {
11613 /* The value (of type complex double, for example) may not be
11614 aligned in memory in the saved registers, so copy via a
11615 temporary. (This is the same code as used for SPARC.) */
11616 tree tmp = create_tmp_var (type, "va_arg_tmp");
11617 tree dest_addr = build_fold_addr_expr (tmp);
11618
11619 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
11620 3, dest_addr, addr, size_int (rsize * 4));
11621
11622 gimplify_and_add (copy, pre_p);
11623 addr = dest_addr;
11624 }
11625
11626 addr = fold_convert (ptrtype, addr);
11627 return build_va_arg_indirect_ref (addr);
11628 }
11629
11630 /* Builtins. */
11631
11632 static void
11633 def_builtin (const char *name, tree type, enum rs6000_builtins code)
11634 {
11635 tree t;
11636 unsigned classify = rs6000_builtin_info[(int)code].attr;
11637 const char *attr_string = "";
11638
11639 gcc_assert (name != NULL);
11640 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
11641
11642 if (rs6000_builtin_decls[(int)code])
11643 fatal_error (input_location,
11644 "internal error: builtin function %s already processed", name);
11645
11646 rs6000_builtin_decls[(int)code] = t =
11647 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
11648
11649 /* Set any special attributes. */
11650 if ((classify & RS6000_BTC_CONST) != 0)
11651 {
11652 /* const function, function only depends on the inputs. */
11653 TREE_READONLY (t) = 1;
11654 TREE_NOTHROW (t) = 1;
11655 attr_string = ", pure";
11656 }
11657 else if ((classify & RS6000_BTC_PURE) != 0)
11658 {
11659 /* pure function, function can read global memory, but does not set any
11660 external state. */
11661 DECL_PURE_P (t) = 1;
11662 TREE_NOTHROW (t) = 1;
11663 attr_string = ", const";
11664 }
11665 else if ((classify & RS6000_BTC_FP) != 0)
11666 {
11667 /* Function is a math function. If rounding mode is on, then treat the
11668 function as not reading global memory, but it can have arbitrary side
11669 effects. If it is off, then assume the function is a const function.
11670 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11671 builtin-attribute.def that is used for the math functions. */
11672 TREE_NOTHROW (t) = 1;
11673 if (flag_rounding_math)
11674 {
11675 DECL_PURE_P (t) = 1;
11676 DECL_IS_NOVOPS (t) = 1;
11677 attr_string = ", fp, pure";
11678 }
11679 else
11680 {
11681 TREE_READONLY (t) = 1;
11682 attr_string = ", fp, const";
11683 }
11684 }
11685 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
11686 gcc_unreachable ();
11687
11688 if (TARGET_DEBUG_BUILTIN)
11689 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
11690 (int)code, name, attr_string);
11691 }
11692
11693 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11694
11695 #undef RS6000_BUILTIN_1
11696 #undef RS6000_BUILTIN_2
11697 #undef RS6000_BUILTIN_3
11698 #undef RS6000_BUILTIN_A
11699 #undef RS6000_BUILTIN_D
11700 #undef RS6000_BUILTIN_E
11701 #undef RS6000_BUILTIN_H
11702 #undef RS6000_BUILTIN_P
11703 #undef RS6000_BUILTIN_Q
11704 #undef RS6000_BUILTIN_S
11705 #undef RS6000_BUILTIN_X
11706
11707 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11708 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11709 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11710 { MASK, ICODE, NAME, ENUM },
11711
11712 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11713 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11714 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11715 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11716 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11717 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11718 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11719 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11720
11721 static const struct builtin_description bdesc_3arg[] =
11722 {
11723 #include "rs6000-builtin.def"
11724 };
11725
11726 /* DST operations: void foo (void *, const int, const char). */
11727
11728 #undef RS6000_BUILTIN_1
11729 #undef RS6000_BUILTIN_2
11730 #undef RS6000_BUILTIN_3
11731 #undef RS6000_BUILTIN_A
11732 #undef RS6000_BUILTIN_D
11733 #undef RS6000_BUILTIN_E
11734 #undef RS6000_BUILTIN_H
11735 #undef RS6000_BUILTIN_P
11736 #undef RS6000_BUILTIN_Q
11737 #undef RS6000_BUILTIN_S
11738 #undef RS6000_BUILTIN_X
11739
11740 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11741 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11742 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11743 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11744 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11745 { MASK, ICODE, NAME, ENUM },
11746
11747 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11748 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11749 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11750 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11751 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11752 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11753
11754 static const struct builtin_description bdesc_dst[] =
11755 {
11756 #include "rs6000-builtin.def"
11757 };
11758
11759 /* Simple binary operations: VECc = foo (VECa, VECb). */
11760
11761 #undef RS6000_BUILTIN_1
11762 #undef RS6000_BUILTIN_2
11763 #undef RS6000_BUILTIN_3
11764 #undef RS6000_BUILTIN_A
11765 #undef RS6000_BUILTIN_D
11766 #undef RS6000_BUILTIN_E
11767 #undef RS6000_BUILTIN_H
11768 #undef RS6000_BUILTIN_P
11769 #undef RS6000_BUILTIN_Q
11770 #undef RS6000_BUILTIN_S
11771 #undef RS6000_BUILTIN_X
11772
11773 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11774 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11775 { MASK, ICODE, NAME, ENUM },
11776
11777 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11778 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11779 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11780 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11781 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11782 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11783 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11784 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11785 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11786
11787 static const struct builtin_description bdesc_2arg[] =
11788 {
11789 #include "rs6000-builtin.def"
11790 };
11791
11792 #undef RS6000_BUILTIN_1
11793 #undef RS6000_BUILTIN_2
11794 #undef RS6000_BUILTIN_3
11795 #undef RS6000_BUILTIN_A
11796 #undef RS6000_BUILTIN_D
11797 #undef RS6000_BUILTIN_E
11798 #undef RS6000_BUILTIN_H
11799 #undef RS6000_BUILTIN_P
11800 #undef RS6000_BUILTIN_Q
11801 #undef RS6000_BUILTIN_S
11802 #undef RS6000_BUILTIN_X
11803
11804 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11805 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11806 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11807 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11808 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11809 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11810 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11811 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11812 { MASK, ICODE, NAME, ENUM },
11813
11814 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11815 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11816 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11817
11818 /* AltiVec predicates. */
11819
11820 static const struct builtin_description bdesc_altivec_preds[] =
11821 {
11822 #include "rs6000-builtin.def"
11823 };
11824
11825 /* SPE predicates. */
11826 #undef RS6000_BUILTIN_1
11827 #undef RS6000_BUILTIN_2
11828 #undef RS6000_BUILTIN_3
11829 #undef RS6000_BUILTIN_A
11830 #undef RS6000_BUILTIN_D
11831 #undef RS6000_BUILTIN_E
11832 #undef RS6000_BUILTIN_H
11833 #undef RS6000_BUILTIN_P
11834 #undef RS6000_BUILTIN_Q
11835 #undef RS6000_BUILTIN_S
11836 #undef RS6000_BUILTIN_X
11837
11838 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11839 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11840 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11841 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11842 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11843 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11844 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11845 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11846 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11847 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11848 { MASK, ICODE, NAME, ENUM },
11849
11850 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11851
11852 static const struct builtin_description bdesc_spe_predicates[] =
11853 {
11854 #include "rs6000-builtin.def"
11855 };
11856
11857 /* SPE evsel predicates. */
11858 #undef RS6000_BUILTIN_1
11859 #undef RS6000_BUILTIN_2
11860 #undef RS6000_BUILTIN_3
11861 #undef RS6000_BUILTIN_A
11862 #undef RS6000_BUILTIN_D
11863 #undef RS6000_BUILTIN_E
11864 #undef RS6000_BUILTIN_H
11865 #undef RS6000_BUILTIN_P
11866 #undef RS6000_BUILTIN_Q
11867 #undef RS6000_BUILTIN_S
11868 #undef RS6000_BUILTIN_X
11869
11870 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11871 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11872 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11873 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11874 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11875 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11876 { MASK, ICODE, NAME, ENUM },
11877
11878 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11879 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11880 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11881 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11882 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11883
11884 static const struct builtin_description bdesc_spe_evsel[] =
11885 {
11886 #include "rs6000-builtin.def"
11887 };
11888
11889 /* PAIRED predicates. */
11890 #undef RS6000_BUILTIN_1
11891 #undef RS6000_BUILTIN_2
11892 #undef RS6000_BUILTIN_3
11893 #undef RS6000_BUILTIN_A
11894 #undef RS6000_BUILTIN_D
11895 #undef RS6000_BUILTIN_E
11896 #undef RS6000_BUILTIN_H
11897 #undef RS6000_BUILTIN_P
11898 #undef RS6000_BUILTIN_Q
11899 #undef RS6000_BUILTIN_S
11900 #undef RS6000_BUILTIN_X
11901
11902 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11903 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11904 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11905 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11906 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11907 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11908 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11909 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11910 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11911 { MASK, ICODE, NAME, ENUM },
11912
11913 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11914 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11915
11916 static const struct builtin_description bdesc_paired_preds[] =
11917 {
11918 #include "rs6000-builtin.def"
11919 };
11920
11921 /* ABS* operations. */
11922
11923 #undef RS6000_BUILTIN_1
11924 #undef RS6000_BUILTIN_2
11925 #undef RS6000_BUILTIN_3
11926 #undef RS6000_BUILTIN_A
11927 #undef RS6000_BUILTIN_D
11928 #undef RS6000_BUILTIN_E
11929 #undef RS6000_BUILTIN_H
11930 #undef RS6000_BUILTIN_P
11931 #undef RS6000_BUILTIN_Q
11932 #undef RS6000_BUILTIN_S
11933 #undef RS6000_BUILTIN_X
11934
11935 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11936 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11937 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11938 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11939 { MASK, ICODE, NAME, ENUM },
11940
11941 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11942 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11943 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11944 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11945 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11946 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11947 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11948
11949 static const struct builtin_description bdesc_abs[] =
11950 {
11951 #include "rs6000-builtin.def"
11952 };
11953
11954 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11955 foo (VECa). */
11956
11957 #undef RS6000_BUILTIN_1
11958 #undef RS6000_BUILTIN_2
11959 #undef RS6000_BUILTIN_3
11960 #undef RS6000_BUILTIN_A
11961 #undef RS6000_BUILTIN_D
11962 #undef RS6000_BUILTIN_E
11963 #undef RS6000_BUILTIN_H
11964 #undef RS6000_BUILTIN_P
11965 #undef RS6000_BUILTIN_Q
11966 #undef RS6000_BUILTIN_S
11967 #undef RS6000_BUILTIN_X
11968
11969 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11970 { MASK, ICODE, NAME, ENUM },
11971
11972 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11973 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11974 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11975 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11976 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11977 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11978 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11979 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11980 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11981 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11982
11983 static const struct builtin_description bdesc_1arg[] =
11984 {
11985 #include "rs6000-builtin.def"
11986 };
11987
11988 /* HTM builtins. */
11989 #undef RS6000_BUILTIN_1
11990 #undef RS6000_BUILTIN_2
11991 #undef RS6000_BUILTIN_3
11992 #undef RS6000_BUILTIN_A
11993 #undef RS6000_BUILTIN_D
11994 #undef RS6000_BUILTIN_E
11995 #undef RS6000_BUILTIN_H
11996 #undef RS6000_BUILTIN_P
11997 #undef RS6000_BUILTIN_Q
11998 #undef RS6000_BUILTIN_S
11999 #undef RS6000_BUILTIN_X
12000
12001 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12002 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12003 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12004 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12005 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12006 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12007 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
12008 { MASK, ICODE, NAME, ENUM },
12009
12010 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12011 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12012 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12013 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12014
12015 static const struct builtin_description bdesc_htm[] =
12016 {
12017 #include "rs6000-builtin.def"
12018 };
12019
12020 #undef RS6000_BUILTIN_1
12021 #undef RS6000_BUILTIN_2
12022 #undef RS6000_BUILTIN_3
12023 #undef RS6000_BUILTIN_A
12024 #undef RS6000_BUILTIN_D
12025 #undef RS6000_BUILTIN_E
12026 #undef RS6000_BUILTIN_H
12027 #undef RS6000_BUILTIN_P
12028 #undef RS6000_BUILTIN_Q
12029 #undef RS6000_BUILTIN_S
12030
12031 /* Return true if a builtin function is overloaded. */
12032 bool
12033 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
12034 {
12035 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
12036 }
12037
12038 /* Expand an expression EXP that calls a builtin without arguments. */
12039 static rtx
12040 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
12041 {
12042 rtx pat;
12043 machine_mode tmode = insn_data[icode].operand[0].mode;
12044
12045 if (icode == CODE_FOR_nothing)
12046 /* Builtin not supported on this processor. */
12047 return 0;
12048
12049 if (target == 0
12050 || GET_MODE (target) != tmode
12051 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12052 target = gen_reg_rtx (tmode);
12053
12054 pat = GEN_FCN (icode) (target);
12055 if (! pat)
12056 return 0;
12057 emit_insn (pat);
12058
12059 return target;
12060 }
12061
12062
12063 static rtx
12064 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
12065 {
12066 rtx pat;
12067 tree arg0 = CALL_EXPR_ARG (exp, 0);
12068 tree arg1 = CALL_EXPR_ARG (exp, 1);
12069 rtx op0 = expand_normal (arg0);
12070 rtx op1 = expand_normal (arg1);
12071 machine_mode mode0 = insn_data[icode].operand[0].mode;
12072 machine_mode mode1 = insn_data[icode].operand[1].mode;
12073
12074 if (icode == CODE_FOR_nothing)
12075 /* Builtin not supported on this processor. */
12076 return 0;
12077
12078 /* If we got invalid arguments bail out before generating bad rtl. */
12079 if (arg0 == error_mark_node || arg1 == error_mark_node)
12080 return const0_rtx;
12081
12082 if (GET_CODE (op0) != CONST_INT
12083 || INTVAL (op0) > 255
12084 || INTVAL (op0) < 0)
12085 {
12086 error ("argument 1 must be an 8-bit field value");
12087 return const0_rtx;
12088 }
12089
12090 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12091 op0 = copy_to_mode_reg (mode0, op0);
12092
12093 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12094 op1 = copy_to_mode_reg (mode1, op1);
12095
12096 pat = GEN_FCN (icode) (op0, op1);
12097 if (! pat)
12098 return const0_rtx;
12099 emit_insn (pat);
12100
12101 return NULL_RTX;
12102 }
12103
12104
12105 static rtx
12106 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
12107 {
12108 rtx pat;
12109 tree arg0 = CALL_EXPR_ARG (exp, 0);
12110 rtx op0 = expand_normal (arg0);
12111 machine_mode tmode = insn_data[icode].operand[0].mode;
12112 machine_mode mode0 = insn_data[icode].operand[1].mode;
12113
12114 if (icode == CODE_FOR_nothing)
12115 /* Builtin not supported on this processor. */
12116 return 0;
12117
12118 /* If we got invalid arguments bail out before generating bad rtl. */
12119 if (arg0 == error_mark_node)
12120 return const0_rtx;
12121
12122 if (icode == CODE_FOR_altivec_vspltisb
12123 || icode == CODE_FOR_altivec_vspltish
12124 || icode == CODE_FOR_altivec_vspltisw
12125 || icode == CODE_FOR_spe_evsplatfi
12126 || icode == CODE_FOR_spe_evsplati)
12127 {
12128 /* Only allow 5-bit *signed* literals. */
12129 if (GET_CODE (op0) != CONST_INT
12130 || INTVAL (op0) > 15
12131 || INTVAL (op0) < -16)
12132 {
12133 error ("argument 1 must be a 5-bit signed literal");
12134 return const0_rtx;
12135 }
12136 }
12137
12138 if (target == 0
12139 || GET_MODE (target) != tmode
12140 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12141 target = gen_reg_rtx (tmode);
12142
12143 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12144 op0 = copy_to_mode_reg (mode0, op0);
12145
12146 pat = GEN_FCN (icode) (target, op0);
12147 if (! pat)
12148 return 0;
12149 emit_insn (pat);
12150
12151 return target;
12152 }
12153
12154 static rtx
12155 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
12156 {
12157 rtx pat, scratch1, scratch2;
12158 tree arg0 = CALL_EXPR_ARG (exp, 0);
12159 rtx op0 = expand_normal (arg0);
12160 machine_mode tmode = insn_data[icode].operand[0].mode;
12161 machine_mode mode0 = insn_data[icode].operand[1].mode;
12162
12163 /* If we have invalid arguments, bail out before generating bad rtl. */
12164 if (arg0 == error_mark_node)
12165 return const0_rtx;
12166
12167 if (target == 0
12168 || GET_MODE (target) != tmode
12169 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12170 target = gen_reg_rtx (tmode);
12171
12172 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12173 op0 = copy_to_mode_reg (mode0, op0);
12174
12175 scratch1 = gen_reg_rtx (mode0);
12176 scratch2 = gen_reg_rtx (mode0);
12177
12178 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
12179 if (! pat)
12180 return 0;
12181 emit_insn (pat);
12182
12183 return target;
12184 }
12185
12186 static rtx
12187 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
12188 {
12189 rtx pat;
12190 tree arg0 = CALL_EXPR_ARG (exp, 0);
12191 tree arg1 = CALL_EXPR_ARG (exp, 1);
12192 rtx op0 = expand_normal (arg0);
12193 rtx op1 = expand_normal (arg1);
12194 machine_mode tmode = insn_data[icode].operand[0].mode;
12195 machine_mode mode0 = insn_data[icode].operand[1].mode;
12196 machine_mode mode1 = insn_data[icode].operand[2].mode;
12197
12198 if (icode == CODE_FOR_nothing)
12199 /* Builtin not supported on this processor. */
12200 return 0;
12201
12202 /* If we got invalid arguments bail out before generating bad rtl. */
12203 if (arg0 == error_mark_node || arg1 == error_mark_node)
12204 return const0_rtx;
12205
12206 if (icode == CODE_FOR_altivec_vcfux
12207 || icode == CODE_FOR_altivec_vcfsx
12208 || icode == CODE_FOR_altivec_vctsxs
12209 || icode == CODE_FOR_altivec_vctuxs
12210 || icode == CODE_FOR_altivec_vspltb
12211 || icode == CODE_FOR_altivec_vsplth
12212 || icode == CODE_FOR_altivec_vspltw
12213 || icode == CODE_FOR_spe_evaddiw
12214 || icode == CODE_FOR_spe_evldd
12215 || icode == CODE_FOR_spe_evldh
12216 || icode == CODE_FOR_spe_evldw
12217 || icode == CODE_FOR_spe_evlhhesplat
12218 || icode == CODE_FOR_spe_evlhhossplat
12219 || icode == CODE_FOR_spe_evlhhousplat
12220 || icode == CODE_FOR_spe_evlwhe
12221 || icode == CODE_FOR_spe_evlwhos
12222 || icode == CODE_FOR_spe_evlwhou
12223 || icode == CODE_FOR_spe_evlwhsplat
12224 || icode == CODE_FOR_spe_evlwwsplat
12225 || icode == CODE_FOR_spe_evrlwi
12226 || icode == CODE_FOR_spe_evslwi
12227 || icode == CODE_FOR_spe_evsrwis
12228 || icode == CODE_FOR_spe_evsubifw
12229 || icode == CODE_FOR_spe_evsrwiu)
12230 {
12231 /* Only allow 5-bit unsigned literals. */
12232 STRIP_NOPS (arg1);
12233 if (TREE_CODE (arg1) != INTEGER_CST
12234 || TREE_INT_CST_LOW (arg1) & ~0x1f)
12235 {
12236 error ("argument 2 must be a 5-bit unsigned literal");
12237 return const0_rtx;
12238 }
12239 }
12240
12241 if (target == 0
12242 || GET_MODE (target) != tmode
12243 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12244 target = gen_reg_rtx (tmode);
12245
12246 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12247 op0 = copy_to_mode_reg (mode0, op0);
12248 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12249 op1 = copy_to_mode_reg (mode1, op1);
12250
12251 pat = GEN_FCN (icode) (target, op0, op1);
12252 if (! pat)
12253 return 0;
12254 emit_insn (pat);
12255
12256 return target;
12257 }
12258
12259 static rtx
12260 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
12261 {
12262 rtx pat, scratch;
12263 tree cr6_form = CALL_EXPR_ARG (exp, 0);
12264 tree arg0 = CALL_EXPR_ARG (exp, 1);
12265 tree arg1 = CALL_EXPR_ARG (exp, 2);
12266 rtx op0 = expand_normal (arg0);
12267 rtx op1 = expand_normal (arg1);
12268 machine_mode tmode = SImode;
12269 machine_mode mode0 = insn_data[icode].operand[1].mode;
12270 machine_mode mode1 = insn_data[icode].operand[2].mode;
12271 int cr6_form_int;
12272
12273 if (TREE_CODE (cr6_form) != INTEGER_CST)
12274 {
12275 error ("argument 1 of __builtin_altivec_predicate must be a constant");
12276 return const0_rtx;
12277 }
12278 else
12279 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
12280
12281 gcc_assert (mode0 == mode1);
12282
12283 /* If we have invalid arguments, bail out before generating bad rtl. */
12284 if (arg0 == error_mark_node || arg1 == error_mark_node)
12285 return const0_rtx;
12286
12287 if (target == 0
12288 || GET_MODE (target) != tmode
12289 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12290 target = gen_reg_rtx (tmode);
12291
12292 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12293 op0 = copy_to_mode_reg (mode0, op0);
12294 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12295 op1 = copy_to_mode_reg (mode1, op1);
12296
12297 scratch = gen_reg_rtx (mode0);
12298
12299 pat = GEN_FCN (icode) (scratch, op0, op1);
12300 if (! pat)
12301 return 0;
12302 emit_insn (pat);
12303
12304 /* The vec_any* and vec_all* predicates use the same opcodes for two
12305 different operations, but the bits in CR6 will be different
12306 depending on what information we want. So we have to play tricks
12307 with CR6 to get the right bits out.
12308
12309 If you think this is disgusting, look at the specs for the
12310 AltiVec predicates. */
12311
12312 switch (cr6_form_int)
12313 {
12314 case 0:
12315 emit_insn (gen_cr6_test_for_zero (target));
12316 break;
12317 case 1:
12318 emit_insn (gen_cr6_test_for_zero_reverse (target));
12319 break;
12320 case 2:
12321 emit_insn (gen_cr6_test_for_lt (target));
12322 break;
12323 case 3:
12324 emit_insn (gen_cr6_test_for_lt_reverse (target));
12325 break;
12326 default:
12327 error ("argument 1 of __builtin_altivec_predicate is out of range");
12328 break;
12329 }
12330
12331 return target;
12332 }
12333
12334 static rtx
12335 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
12336 {
12337 rtx pat, addr;
12338 tree arg0 = CALL_EXPR_ARG (exp, 0);
12339 tree arg1 = CALL_EXPR_ARG (exp, 1);
12340 machine_mode tmode = insn_data[icode].operand[0].mode;
12341 machine_mode mode0 = Pmode;
12342 machine_mode mode1 = Pmode;
12343 rtx op0 = expand_normal (arg0);
12344 rtx op1 = expand_normal (arg1);
12345
12346 if (icode == CODE_FOR_nothing)
12347 /* Builtin not supported on this processor. */
12348 return 0;
12349
12350 /* If we got invalid arguments bail out before generating bad rtl. */
12351 if (arg0 == error_mark_node || arg1 == error_mark_node)
12352 return const0_rtx;
12353
12354 if (target == 0
12355 || GET_MODE (target) != tmode
12356 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12357 target = gen_reg_rtx (tmode);
12358
12359 op1 = copy_to_mode_reg (mode1, op1);
12360
12361 if (op0 == const0_rtx)
12362 {
12363 addr = gen_rtx_MEM (tmode, op1);
12364 }
12365 else
12366 {
12367 op0 = copy_to_mode_reg (mode0, op0);
12368 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
12369 }
12370
12371 pat = GEN_FCN (icode) (target, addr);
12372
12373 if (! pat)
12374 return 0;
12375 emit_insn (pat);
12376
12377 return target;
12378 }
12379
12380 /* Return a constant vector for use as a little-endian permute control vector
12381 to reverse the order of elements of the given vector mode. */
12382 static rtx
12383 swap_selector_for_mode (machine_mode mode)
12384 {
12385 /* These are little endian vectors, so their elements are reversed
12386 from what you would normally expect for a permute control vector. */
12387 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
12388 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
12389 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
12390 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
12391 unsigned int *swaparray, i;
12392 rtx perm[16];
12393
12394 switch (mode)
12395 {
12396 case V2DFmode:
12397 case V2DImode:
12398 swaparray = swap2;
12399 break;
12400 case V4SFmode:
12401 case V4SImode:
12402 swaparray = swap4;
12403 break;
12404 case V8HImode:
12405 swaparray = swap8;
12406 break;
12407 case V16QImode:
12408 swaparray = swap16;
12409 break;
12410 default:
12411 gcc_unreachable ();
12412 }
12413
12414 for (i = 0; i < 16; ++i)
12415 perm[i] = GEN_INT (swaparray[i]);
12416
12417 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
12418 }
12419
12420 /* Generate code for an "lvx", "lvxl", or "lve*x" built-in for a little endian target
12421 with -maltivec=be specified. Issue the load followed by an element-reversing
12422 permute. */
12423 void
12424 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
12425 {
12426 rtx tmp = gen_reg_rtx (mode);
12427 rtx load = gen_rtx_SET (tmp, op1);
12428 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12429 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
12430 rtx sel = swap_selector_for_mode (mode);
12431 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
12432
12433 gcc_assert (REG_P (op0));
12434 emit_insn (par);
12435 emit_insn (gen_rtx_SET (op0, vperm));
12436 }
12437
12438 /* Generate code for a "stvx" or "stvxl" built-in for a little endian target
12439 with -maltivec=be specified. Issue the store preceded by an element-reversing
12440 permute. */
12441 void
12442 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
12443 {
12444 rtx tmp = gen_reg_rtx (mode);
12445 rtx store = gen_rtx_SET (op0, tmp);
12446 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12447 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
12448 rtx sel = swap_selector_for_mode (mode);
12449 rtx vperm;
12450
12451 gcc_assert (REG_P (op1));
12452 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12453 emit_insn (gen_rtx_SET (tmp, vperm));
12454 emit_insn (par);
12455 }
12456
12457 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
12458 specified. Issue the store preceded by an element-reversing permute. */
12459 void
12460 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
12461 {
12462 machine_mode inner_mode = GET_MODE_INNER (mode);
12463 rtx tmp = gen_reg_rtx (mode);
12464 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
12465 rtx sel = swap_selector_for_mode (mode);
12466 rtx vperm;
12467
12468 gcc_assert (REG_P (op1));
12469 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12470 emit_insn (gen_rtx_SET (tmp, vperm));
12471 emit_insn (gen_rtx_SET (op0, stvx));
12472 }
12473
12474 static rtx
12475 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
12476 {
12477 rtx pat, addr;
12478 tree arg0 = CALL_EXPR_ARG (exp, 0);
12479 tree arg1 = CALL_EXPR_ARG (exp, 1);
12480 machine_mode tmode = insn_data[icode].operand[0].mode;
12481 machine_mode mode0 = Pmode;
12482 machine_mode mode1 = Pmode;
12483 rtx op0 = expand_normal (arg0);
12484 rtx op1 = expand_normal (arg1);
12485
12486 if (icode == CODE_FOR_nothing)
12487 /* Builtin not supported on this processor. */
12488 return 0;
12489
12490 /* If we got invalid arguments bail out before generating bad rtl. */
12491 if (arg0 == error_mark_node || arg1 == error_mark_node)
12492 return const0_rtx;
12493
12494 if (target == 0
12495 || GET_MODE (target) != tmode
12496 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12497 target = gen_reg_rtx (tmode);
12498
12499 op1 = copy_to_mode_reg (mode1, op1);
12500
12501 if (op0 == const0_rtx)
12502 {
12503 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
12504 }
12505 else
12506 {
12507 op0 = copy_to_mode_reg (mode0, op0);
12508 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
12509 }
12510
12511 pat = GEN_FCN (icode) (target, addr);
12512
12513 if (! pat)
12514 return 0;
12515 emit_insn (pat);
12516
12517 return target;
12518 }
12519
12520 static rtx
12521 spe_expand_stv_builtin (enum insn_code icode, tree exp)
12522 {
12523 tree arg0 = CALL_EXPR_ARG (exp, 0);
12524 tree arg1 = CALL_EXPR_ARG (exp, 1);
12525 tree arg2 = CALL_EXPR_ARG (exp, 2);
12526 rtx op0 = expand_normal (arg0);
12527 rtx op1 = expand_normal (arg1);
12528 rtx op2 = expand_normal (arg2);
12529 rtx pat;
12530 machine_mode mode0 = insn_data[icode].operand[0].mode;
12531 machine_mode mode1 = insn_data[icode].operand[1].mode;
12532 machine_mode mode2 = insn_data[icode].operand[2].mode;
12533
12534 /* Invalid arguments. Bail before doing anything stoopid! */
12535 if (arg0 == error_mark_node
12536 || arg1 == error_mark_node
12537 || arg2 == error_mark_node)
12538 return const0_rtx;
12539
12540 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
12541 op0 = copy_to_mode_reg (mode2, op0);
12542 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
12543 op1 = copy_to_mode_reg (mode0, op1);
12544 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
12545 op2 = copy_to_mode_reg (mode1, op2);
12546
12547 pat = GEN_FCN (icode) (op1, op2, op0);
12548 if (pat)
12549 emit_insn (pat);
12550 return NULL_RTX;
12551 }
12552
12553 static rtx
12554 paired_expand_stv_builtin (enum insn_code icode, tree exp)
12555 {
12556 tree arg0 = CALL_EXPR_ARG (exp, 0);
12557 tree arg1 = CALL_EXPR_ARG (exp, 1);
12558 tree arg2 = CALL_EXPR_ARG (exp, 2);
12559 rtx op0 = expand_normal (arg0);
12560 rtx op1 = expand_normal (arg1);
12561 rtx op2 = expand_normal (arg2);
12562 rtx pat, addr;
12563 machine_mode tmode = insn_data[icode].operand[0].mode;
12564 machine_mode mode1 = Pmode;
12565 machine_mode mode2 = Pmode;
12566
12567 /* Invalid arguments. Bail before doing anything stoopid! */
12568 if (arg0 == error_mark_node
12569 || arg1 == error_mark_node
12570 || arg2 == error_mark_node)
12571 return const0_rtx;
12572
12573 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
12574 op0 = copy_to_mode_reg (tmode, op0);
12575
12576 op2 = copy_to_mode_reg (mode2, op2);
12577
12578 if (op1 == const0_rtx)
12579 {
12580 addr = gen_rtx_MEM (tmode, op2);
12581 }
12582 else
12583 {
12584 op1 = copy_to_mode_reg (mode1, op1);
12585 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12586 }
12587
12588 pat = GEN_FCN (icode) (addr, op0);
12589 if (pat)
12590 emit_insn (pat);
12591 return NULL_RTX;
12592 }
12593
12594 static rtx
12595 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
12596 {
12597 tree arg0 = CALL_EXPR_ARG (exp, 0);
12598 tree arg1 = CALL_EXPR_ARG (exp, 1);
12599 tree arg2 = CALL_EXPR_ARG (exp, 2);
12600 rtx op0 = expand_normal (arg0);
12601 rtx op1 = expand_normal (arg1);
12602 rtx op2 = expand_normal (arg2);
12603 rtx pat, addr;
12604 machine_mode tmode = insn_data[icode].operand[0].mode;
12605 machine_mode smode = insn_data[icode].operand[1].mode;
12606 machine_mode mode1 = Pmode;
12607 machine_mode mode2 = Pmode;
12608
12609 /* Invalid arguments. Bail before doing anything stoopid! */
12610 if (arg0 == error_mark_node
12611 || arg1 == error_mark_node
12612 || arg2 == error_mark_node)
12613 return const0_rtx;
12614
12615 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
12616 op0 = copy_to_mode_reg (smode, op0);
12617
12618 op2 = copy_to_mode_reg (mode2, op2);
12619
12620 if (op1 == const0_rtx)
12621 {
12622 addr = gen_rtx_MEM (tmode, op2);
12623 }
12624 else
12625 {
12626 op1 = copy_to_mode_reg (mode1, op1);
12627 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12628 }
12629
12630 pat = GEN_FCN (icode) (addr, op0);
12631 if (pat)
12632 emit_insn (pat);
12633 return NULL_RTX;
12634 }
12635
12636 /* Return the appropriate SPR number associated with the given builtin. */
12637 static inline HOST_WIDE_INT
12638 htm_spr_num (enum rs6000_builtins code)
12639 {
12640 if (code == HTM_BUILTIN_GET_TFHAR
12641 || code == HTM_BUILTIN_SET_TFHAR)
12642 return TFHAR_SPR;
12643 else if (code == HTM_BUILTIN_GET_TFIAR
12644 || code == HTM_BUILTIN_SET_TFIAR)
12645 return TFIAR_SPR;
12646 else if (code == HTM_BUILTIN_GET_TEXASR
12647 || code == HTM_BUILTIN_SET_TEXASR)
12648 return TEXASR_SPR;
12649 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
12650 || code == HTM_BUILTIN_SET_TEXASRU);
12651 return TEXASRU_SPR;
12652 }
12653
12654 /* Return the appropriate SPR regno associated with the given builtin. */
12655 static inline HOST_WIDE_INT
12656 htm_spr_regno (enum rs6000_builtins code)
12657 {
12658 if (code == HTM_BUILTIN_GET_TFHAR
12659 || code == HTM_BUILTIN_SET_TFHAR)
12660 return TFHAR_REGNO;
12661 else if (code == HTM_BUILTIN_GET_TFIAR
12662 || code == HTM_BUILTIN_SET_TFIAR)
12663 return TFIAR_REGNO;
12664 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
12665 || code == HTM_BUILTIN_SET_TEXASR
12666 || code == HTM_BUILTIN_GET_TEXASRU
12667 || code == HTM_BUILTIN_SET_TEXASRU);
12668 return TEXASR_REGNO;
12669 }
12670
12671 /* Return the correct ICODE value depending on whether we are
12672 setting or reading the HTM SPRs. */
12673 static inline enum insn_code
12674 rs6000_htm_spr_icode (bool nonvoid)
12675 {
12676 if (nonvoid)
12677 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
12678 else
12679 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
12680 }
12681
12682 /* Expand the HTM builtin in EXP and store the result in TARGET.
12683 Store true in *EXPANDEDP if we found a builtin to expand. */
12684 static rtx
12685 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
12686 {
12687 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12688 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
12689 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12690 const struct builtin_description *d;
12691 size_t i;
12692
12693 *expandedp = true;
12694
12695 if (!TARGET_POWERPC64
12696 && (fcode == HTM_BUILTIN_TABORTDC
12697 || fcode == HTM_BUILTIN_TABORTDCI))
12698 {
12699 size_t uns_fcode = (size_t)fcode;
12700 const char *name = rs6000_builtin_info[uns_fcode].name;
12701 error ("builtin %s is only valid in 64-bit mode", name);
12702 return const0_rtx;
12703 }
12704
12705 /* Expand the HTM builtins. */
12706 d = bdesc_htm;
12707 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
12708 if (d->code == fcode)
12709 {
12710 rtx op[MAX_HTM_OPERANDS], pat;
12711 int nopnds = 0;
12712 tree arg;
12713 call_expr_arg_iterator iter;
12714 unsigned attr = rs6000_builtin_info[fcode].attr;
12715 enum insn_code icode = d->icode;
12716 const struct insn_operand_data *insn_op;
12717 bool uses_spr = (attr & RS6000_BTC_SPR);
12718 rtx cr = NULL_RTX;
12719
12720 if (uses_spr)
12721 icode = rs6000_htm_spr_icode (nonvoid);
12722 insn_op = &insn_data[icode].operand[0];
12723
12724 if (nonvoid)
12725 {
12726 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
12727 if (!target
12728 || GET_MODE (target) != tmode
12729 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
12730 target = gen_reg_rtx (tmode);
12731 if (uses_spr)
12732 op[nopnds++] = target;
12733 }
12734
12735 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
12736 {
12737 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
12738 return const0_rtx;
12739
12740 insn_op = &insn_data[icode].operand[nopnds];
12741
12742 op[nopnds] = expand_normal (arg);
12743
12744 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
12745 {
12746 if (!strcmp (insn_op->constraint, "n"))
12747 {
12748 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
12749 if (!CONST_INT_P (op[nopnds]))
12750 error ("argument %d must be an unsigned literal", arg_num);
12751 else
12752 error ("argument %d is an unsigned literal that is "
12753 "out of range", arg_num);
12754 return const0_rtx;
12755 }
12756 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
12757 }
12758
12759 nopnds++;
12760 }
12761
12762 /* Handle the builtins for extended mnemonics. These accept
12763 no arguments, but map to builtins that take arguments. */
12764 switch (fcode)
12765 {
12766 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
12767 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
12768 op[nopnds++] = GEN_INT (1);
12769 #ifdef ENABLE_CHECKING
12770 attr |= RS6000_BTC_UNARY;
12771 #endif
12772 break;
12773 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
12774 op[nopnds++] = GEN_INT (0);
12775 #ifdef ENABLE_CHECKING
12776 attr |= RS6000_BTC_UNARY;
12777 #endif
12778 break;
12779 default:
12780 break;
12781 }
12782
12783 /* If this builtin accesses SPRs, then pass in the appropriate
12784 SPR number and SPR regno as the last two operands. */
12785 if (uses_spr)
12786 {
12787 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
12788 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
12789 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
12790 }
12791 /* If this builtin accesses a CR, then pass in a scratch
12792 CR as the last operand. */
12793 else if (attr & RS6000_BTC_CR)
12794 { cr = gen_reg_rtx (CCmode);
12795 op[nopnds++] = cr;
12796 }
12797
12798 #ifdef ENABLE_CHECKING
12799 int expected_nopnds = 0;
12800 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
12801 expected_nopnds = 1;
12802 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
12803 expected_nopnds = 2;
12804 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
12805 expected_nopnds = 3;
12806 if (!(attr & RS6000_BTC_VOID))
12807 expected_nopnds += 1;
12808 if (uses_spr)
12809 expected_nopnds += 2;
12810
12811 gcc_assert (nopnds == expected_nopnds && nopnds <= MAX_HTM_OPERANDS);
12812 #endif
12813
12814 switch (nopnds)
12815 {
12816 case 1:
12817 pat = GEN_FCN (icode) (op[0]);
12818 break;
12819 case 2:
12820 pat = GEN_FCN (icode) (op[0], op[1]);
12821 break;
12822 case 3:
12823 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
12824 break;
12825 case 4:
12826 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
12827 break;
12828 default:
12829 gcc_unreachable ();
12830 }
12831 if (!pat)
12832 return NULL_RTX;
12833 emit_insn (pat);
12834
12835 if (attr & RS6000_BTC_CR)
12836 {
12837 if (fcode == HTM_BUILTIN_TBEGIN)
12838 {
12839 /* Emit code to set TARGET to true or false depending on
12840 whether the tbegin. instruction successfully or failed
12841 to start a transaction. We do this by placing the 1's
12842 complement of CR's EQ bit into TARGET. */
12843 rtx scratch = gen_reg_rtx (SImode);
12844 emit_insn (gen_rtx_SET (scratch,
12845 gen_rtx_EQ (SImode, cr,
12846 const0_rtx)));
12847 emit_insn (gen_rtx_SET (target,
12848 gen_rtx_XOR (SImode, scratch,
12849 GEN_INT (1))));
12850 }
12851 else
12852 {
12853 /* Emit code to copy the 4-bit condition register field
12854 CR into the least significant end of register TARGET. */
12855 rtx scratch1 = gen_reg_rtx (SImode);
12856 rtx scratch2 = gen_reg_rtx (SImode);
12857 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
12858 emit_insn (gen_movcc (subreg, cr));
12859 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
12860 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
12861 }
12862 }
12863
12864 if (nonvoid)
12865 return target;
12866 return const0_rtx;
12867 }
12868
12869 *expandedp = false;
12870 return NULL_RTX;
12871 }
12872
12873 static rtx
12874 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
12875 {
12876 rtx pat;
12877 tree arg0 = CALL_EXPR_ARG (exp, 0);
12878 tree arg1 = CALL_EXPR_ARG (exp, 1);
12879 tree arg2 = CALL_EXPR_ARG (exp, 2);
12880 rtx op0 = expand_normal (arg0);
12881 rtx op1 = expand_normal (arg1);
12882 rtx op2 = expand_normal (arg2);
12883 machine_mode tmode = insn_data[icode].operand[0].mode;
12884 machine_mode mode0 = insn_data[icode].operand[1].mode;
12885 machine_mode mode1 = insn_data[icode].operand[2].mode;
12886 machine_mode mode2 = insn_data[icode].operand[3].mode;
12887
12888 if (icode == CODE_FOR_nothing)
12889 /* Builtin not supported on this processor. */
12890 return 0;
12891
12892 /* If we got invalid arguments bail out before generating bad rtl. */
12893 if (arg0 == error_mark_node
12894 || arg1 == error_mark_node
12895 || arg2 == error_mark_node)
12896 return const0_rtx;
12897
12898 /* Check and prepare argument depending on the instruction code.
12899
12900 Note that a switch statement instead of the sequence of tests
12901 would be incorrect as many of the CODE_FOR values could be
12902 CODE_FOR_nothing and that would yield multiple alternatives
12903 with identical values. We'd never reach here at runtime in
12904 this case. */
12905 if (icode == CODE_FOR_altivec_vsldoi_v4sf
12906 || icode == CODE_FOR_altivec_vsldoi_v4si
12907 || icode == CODE_FOR_altivec_vsldoi_v8hi
12908 || icode == CODE_FOR_altivec_vsldoi_v16qi)
12909 {
12910 /* Only allow 4-bit unsigned literals. */
12911 STRIP_NOPS (arg2);
12912 if (TREE_CODE (arg2) != INTEGER_CST
12913 || TREE_INT_CST_LOW (arg2) & ~0xf)
12914 {
12915 error ("argument 3 must be a 4-bit unsigned literal");
12916 return const0_rtx;
12917 }
12918 }
12919 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
12920 || icode == CODE_FOR_vsx_xxpermdi_v2di
12921 || icode == CODE_FOR_vsx_xxsldwi_v16qi
12922 || icode == CODE_FOR_vsx_xxsldwi_v8hi
12923 || icode == CODE_FOR_vsx_xxsldwi_v4si
12924 || icode == CODE_FOR_vsx_xxsldwi_v4sf
12925 || icode == CODE_FOR_vsx_xxsldwi_v2di
12926 || icode == CODE_FOR_vsx_xxsldwi_v2df)
12927 {
12928 /* Only allow 2-bit unsigned literals. */
12929 STRIP_NOPS (arg2);
12930 if (TREE_CODE (arg2) != INTEGER_CST
12931 || TREE_INT_CST_LOW (arg2) & ~0x3)
12932 {
12933 error ("argument 3 must be a 2-bit unsigned literal");
12934 return const0_rtx;
12935 }
12936 }
12937 else if (icode == CODE_FOR_vsx_set_v2df
12938 || icode == CODE_FOR_vsx_set_v2di
12939 || icode == CODE_FOR_bcdadd
12940 || icode == CODE_FOR_bcdadd_lt
12941 || icode == CODE_FOR_bcdadd_eq
12942 || icode == CODE_FOR_bcdadd_gt
12943 || icode == CODE_FOR_bcdsub
12944 || icode == CODE_FOR_bcdsub_lt
12945 || icode == CODE_FOR_bcdsub_eq
12946 || icode == CODE_FOR_bcdsub_gt)
12947 {
12948 /* Only allow 1-bit unsigned literals. */
12949 STRIP_NOPS (arg2);
12950 if (TREE_CODE (arg2) != INTEGER_CST
12951 || TREE_INT_CST_LOW (arg2) & ~0x1)
12952 {
12953 error ("argument 3 must be a 1-bit unsigned literal");
12954 return const0_rtx;
12955 }
12956 }
12957 else if (icode == CODE_FOR_dfp_ddedpd_dd
12958 || icode == CODE_FOR_dfp_ddedpd_td)
12959 {
12960 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
12961 STRIP_NOPS (arg0);
12962 if (TREE_CODE (arg0) != INTEGER_CST
12963 || TREE_INT_CST_LOW (arg2) & ~0x3)
12964 {
12965 error ("argument 1 must be 0 or 2");
12966 return const0_rtx;
12967 }
12968 }
12969 else if (icode == CODE_FOR_dfp_denbcd_dd
12970 || icode == CODE_FOR_dfp_denbcd_td)
12971 {
12972 /* Only allow 1-bit unsigned literals. */
12973 STRIP_NOPS (arg0);
12974 if (TREE_CODE (arg0) != INTEGER_CST
12975 || TREE_INT_CST_LOW (arg0) & ~0x1)
12976 {
12977 error ("argument 1 must be a 1-bit unsigned literal");
12978 return const0_rtx;
12979 }
12980 }
12981 else if (icode == CODE_FOR_dfp_dscli_dd
12982 || icode == CODE_FOR_dfp_dscli_td
12983 || icode == CODE_FOR_dfp_dscri_dd
12984 || icode == CODE_FOR_dfp_dscri_td)
12985 {
12986 /* Only allow 6-bit unsigned literals. */
12987 STRIP_NOPS (arg1);
12988 if (TREE_CODE (arg1) != INTEGER_CST
12989 || TREE_INT_CST_LOW (arg1) & ~0x3f)
12990 {
12991 error ("argument 2 must be a 6-bit unsigned literal");
12992 return const0_rtx;
12993 }
12994 }
12995 else if (icode == CODE_FOR_crypto_vshasigmaw
12996 || icode == CODE_FOR_crypto_vshasigmad)
12997 {
12998 /* Check whether the 2nd and 3rd arguments are integer constants and in
12999 range and prepare arguments. */
13000 STRIP_NOPS (arg1);
13001 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
13002 {
13003 error ("argument 2 must be 0 or 1");
13004 return const0_rtx;
13005 }
13006
13007 STRIP_NOPS (arg2);
13008 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
13009 {
13010 error ("argument 3 must be in the range 0..15");
13011 return const0_rtx;
13012 }
13013 }
13014
13015 if (target == 0
13016 || GET_MODE (target) != tmode
13017 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13018 target = gen_reg_rtx (tmode);
13019
13020 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13021 op0 = copy_to_mode_reg (mode0, op0);
13022 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13023 op1 = copy_to_mode_reg (mode1, op1);
13024 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13025 op2 = copy_to_mode_reg (mode2, op2);
13026
13027 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
13028 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
13029 else
13030 pat = GEN_FCN (icode) (target, op0, op1, op2);
13031 if (! pat)
13032 return 0;
13033 emit_insn (pat);
13034
13035 return target;
13036 }
13037
13038 /* Expand the lvx builtins. */
13039 static rtx
13040 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
13041 {
13042 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13043 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
13044 tree arg0;
13045 machine_mode tmode, mode0;
13046 rtx pat, op0;
13047 enum insn_code icode;
13048
13049 switch (fcode)
13050 {
13051 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
13052 icode = CODE_FOR_vector_altivec_load_v16qi;
13053 break;
13054 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
13055 icode = CODE_FOR_vector_altivec_load_v8hi;
13056 break;
13057 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
13058 icode = CODE_FOR_vector_altivec_load_v4si;
13059 break;
13060 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
13061 icode = CODE_FOR_vector_altivec_load_v4sf;
13062 break;
13063 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
13064 icode = CODE_FOR_vector_altivec_load_v2df;
13065 break;
13066 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
13067 icode = CODE_FOR_vector_altivec_load_v2di;
13068 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
13069 icode = CODE_FOR_vector_altivec_load_v1ti;
13070 break;
13071 default:
13072 *expandedp = false;
13073 return NULL_RTX;
13074 }
13075
13076 *expandedp = true;
13077
13078 arg0 = CALL_EXPR_ARG (exp, 0);
13079 op0 = expand_normal (arg0);
13080 tmode = insn_data[icode].operand[0].mode;
13081 mode0 = insn_data[icode].operand[1].mode;
13082
13083 if (target == 0
13084 || GET_MODE (target) != tmode
13085 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13086 target = gen_reg_rtx (tmode);
13087
13088 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13089 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13090
13091 pat = GEN_FCN (icode) (target, op0);
13092 if (! pat)
13093 return 0;
13094 emit_insn (pat);
13095 return target;
13096 }
13097
13098 /* Expand the stvx builtins. */
13099 static rtx
13100 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
13101 bool *expandedp)
13102 {
13103 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13104 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
13105 tree arg0, arg1;
13106 machine_mode mode0, mode1;
13107 rtx pat, op0, op1;
13108 enum insn_code icode;
13109
13110 switch (fcode)
13111 {
13112 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
13113 icode = CODE_FOR_vector_altivec_store_v16qi;
13114 break;
13115 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
13116 icode = CODE_FOR_vector_altivec_store_v8hi;
13117 break;
13118 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
13119 icode = CODE_FOR_vector_altivec_store_v4si;
13120 break;
13121 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
13122 icode = CODE_FOR_vector_altivec_store_v4sf;
13123 break;
13124 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
13125 icode = CODE_FOR_vector_altivec_store_v2df;
13126 break;
13127 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
13128 icode = CODE_FOR_vector_altivec_store_v2di;
13129 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
13130 icode = CODE_FOR_vector_altivec_store_v1ti;
13131 break;
13132 default:
13133 *expandedp = false;
13134 return NULL_RTX;
13135 }
13136
13137 arg0 = CALL_EXPR_ARG (exp, 0);
13138 arg1 = CALL_EXPR_ARG (exp, 1);
13139 op0 = expand_normal (arg0);
13140 op1 = expand_normal (arg1);
13141 mode0 = insn_data[icode].operand[0].mode;
13142 mode1 = insn_data[icode].operand[1].mode;
13143
13144 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13145 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13146 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13147 op1 = copy_to_mode_reg (mode1, op1);
13148
13149 pat = GEN_FCN (icode) (op0, op1);
13150 if (pat)
13151 emit_insn (pat);
13152
13153 *expandedp = true;
13154 return NULL_RTX;
13155 }
13156
13157 /* Expand the dst builtins. */
13158 static rtx
13159 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
13160 bool *expandedp)
13161 {
13162 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13163 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13164 tree arg0, arg1, arg2;
13165 machine_mode mode0, mode1;
13166 rtx pat, op0, op1, op2;
13167 const struct builtin_description *d;
13168 size_t i;
13169
13170 *expandedp = false;
13171
13172 /* Handle DST variants. */
13173 d = bdesc_dst;
13174 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
13175 if (d->code == fcode)
13176 {
13177 arg0 = CALL_EXPR_ARG (exp, 0);
13178 arg1 = CALL_EXPR_ARG (exp, 1);
13179 arg2 = CALL_EXPR_ARG (exp, 2);
13180 op0 = expand_normal (arg0);
13181 op1 = expand_normal (arg1);
13182 op2 = expand_normal (arg2);
13183 mode0 = insn_data[d->icode].operand[0].mode;
13184 mode1 = insn_data[d->icode].operand[1].mode;
13185
13186 /* Invalid arguments, bail out before generating bad rtl. */
13187 if (arg0 == error_mark_node
13188 || arg1 == error_mark_node
13189 || arg2 == error_mark_node)
13190 return const0_rtx;
13191
13192 *expandedp = true;
13193 STRIP_NOPS (arg2);
13194 if (TREE_CODE (arg2) != INTEGER_CST
13195 || TREE_INT_CST_LOW (arg2) & ~0x3)
13196 {
13197 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
13198 return const0_rtx;
13199 }
13200
13201 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13202 op0 = copy_to_mode_reg (Pmode, op0);
13203 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13204 op1 = copy_to_mode_reg (mode1, op1);
13205
13206 pat = GEN_FCN (d->icode) (op0, op1, op2);
13207 if (pat != 0)
13208 emit_insn (pat);
13209
13210 return NULL_RTX;
13211 }
13212
13213 return NULL_RTX;
13214 }
13215
13216 /* Expand vec_init builtin. */
13217 static rtx
13218 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
13219 {
13220 machine_mode tmode = TYPE_MODE (type);
13221 machine_mode inner_mode = GET_MODE_INNER (tmode);
13222 int i, n_elt = GET_MODE_NUNITS (tmode);
13223
13224 gcc_assert (VECTOR_MODE_P (tmode));
13225 gcc_assert (n_elt == call_expr_nargs (exp));
13226
13227 if (!target || !register_operand (target, tmode))
13228 target = gen_reg_rtx (tmode);
13229
13230 /* If we have a vector compromised of a single element, such as V1TImode, do
13231 the initialization directly. */
13232 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
13233 {
13234 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
13235 emit_move_insn (target, gen_lowpart (tmode, x));
13236 }
13237 else
13238 {
13239 rtvec v = rtvec_alloc (n_elt);
13240
13241 for (i = 0; i < n_elt; ++i)
13242 {
13243 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
13244 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
13245 }
13246
13247 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
13248 }
13249
13250 return target;
13251 }
13252
13253 /* Return the integer constant in ARG. Constrain it to be in the range
13254 of the subparts of VEC_TYPE; issue an error if not. */
13255
13256 static int
13257 get_element_number (tree vec_type, tree arg)
13258 {
13259 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
13260
13261 if (!tree_fits_uhwi_p (arg)
13262 || (elt = tree_to_uhwi (arg), elt > max))
13263 {
13264 error ("selector must be an integer constant in the range 0..%wi", max);
13265 return 0;
13266 }
13267
13268 return elt;
13269 }
13270
13271 /* Expand vec_set builtin. */
13272 static rtx
13273 altivec_expand_vec_set_builtin (tree exp)
13274 {
13275 machine_mode tmode, mode1;
13276 tree arg0, arg1, arg2;
13277 int elt;
13278 rtx op0, op1;
13279
13280 arg0 = CALL_EXPR_ARG (exp, 0);
13281 arg1 = CALL_EXPR_ARG (exp, 1);
13282 arg2 = CALL_EXPR_ARG (exp, 2);
13283
13284 tmode = TYPE_MODE (TREE_TYPE (arg0));
13285 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
13286 gcc_assert (VECTOR_MODE_P (tmode));
13287
13288 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
13289 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
13290 elt = get_element_number (TREE_TYPE (arg0), arg2);
13291
13292 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
13293 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
13294
13295 op0 = force_reg (tmode, op0);
13296 op1 = force_reg (mode1, op1);
13297
13298 rs6000_expand_vector_set (op0, op1, elt);
13299
13300 return op0;
13301 }
13302
13303 /* Expand vec_ext builtin. */
13304 static rtx
13305 altivec_expand_vec_ext_builtin (tree exp, rtx target)
13306 {
13307 machine_mode tmode, mode0;
13308 tree arg0, arg1;
13309 int elt;
13310 rtx op0;
13311
13312 arg0 = CALL_EXPR_ARG (exp, 0);
13313 arg1 = CALL_EXPR_ARG (exp, 1);
13314
13315 op0 = expand_normal (arg0);
13316 elt = get_element_number (TREE_TYPE (arg0), arg1);
13317
13318 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
13319 mode0 = TYPE_MODE (TREE_TYPE (arg0));
13320 gcc_assert (VECTOR_MODE_P (mode0));
13321
13322 op0 = force_reg (mode0, op0);
13323
13324 if (optimize || !target || !register_operand (target, tmode))
13325 target = gen_reg_rtx (tmode);
13326
13327 rs6000_expand_vector_extract (target, op0, elt);
13328
13329 return target;
13330 }
13331
13332 /* Expand the builtin in EXP and store the result in TARGET. Store
13333 true in *EXPANDEDP if we found a builtin to expand. */
13334 static rtx
13335 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
13336 {
13337 const struct builtin_description *d;
13338 size_t i;
13339 enum insn_code icode;
13340 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13341 tree arg0;
13342 rtx op0, pat;
13343 machine_mode tmode, mode0;
13344 enum rs6000_builtins fcode
13345 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13346
13347 if (rs6000_overloaded_builtin_p (fcode))
13348 {
13349 *expandedp = true;
13350 error ("unresolved overload for Altivec builtin %qF", fndecl);
13351
13352 /* Given it is invalid, just generate a normal call. */
13353 return expand_call (exp, target, false);
13354 }
13355
13356 target = altivec_expand_ld_builtin (exp, target, expandedp);
13357 if (*expandedp)
13358 return target;
13359
13360 target = altivec_expand_st_builtin (exp, target, expandedp);
13361 if (*expandedp)
13362 return target;
13363
13364 target = altivec_expand_dst_builtin (exp, target, expandedp);
13365 if (*expandedp)
13366 return target;
13367
13368 *expandedp = true;
13369
13370 switch (fcode)
13371 {
13372 case ALTIVEC_BUILTIN_STVX_V2DF:
13373 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
13374 case ALTIVEC_BUILTIN_STVX_V2DI:
13375 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
13376 case ALTIVEC_BUILTIN_STVX_V4SF:
13377 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
13378 case ALTIVEC_BUILTIN_STVX:
13379 case ALTIVEC_BUILTIN_STVX_V4SI:
13380 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
13381 case ALTIVEC_BUILTIN_STVX_V8HI:
13382 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
13383 case ALTIVEC_BUILTIN_STVX_V16QI:
13384 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
13385 case ALTIVEC_BUILTIN_STVEBX:
13386 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
13387 case ALTIVEC_BUILTIN_STVEHX:
13388 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
13389 case ALTIVEC_BUILTIN_STVEWX:
13390 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
13391 case ALTIVEC_BUILTIN_STVXL_V2DF:
13392 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
13393 case ALTIVEC_BUILTIN_STVXL_V2DI:
13394 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
13395 case ALTIVEC_BUILTIN_STVXL_V4SF:
13396 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
13397 case ALTIVEC_BUILTIN_STVXL:
13398 case ALTIVEC_BUILTIN_STVXL_V4SI:
13399 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
13400 case ALTIVEC_BUILTIN_STVXL_V8HI:
13401 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
13402 case ALTIVEC_BUILTIN_STVXL_V16QI:
13403 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
13404
13405 case ALTIVEC_BUILTIN_STVLX:
13406 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
13407 case ALTIVEC_BUILTIN_STVLXL:
13408 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
13409 case ALTIVEC_BUILTIN_STVRX:
13410 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
13411 case ALTIVEC_BUILTIN_STVRXL:
13412 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
13413
13414 case VSX_BUILTIN_STXVD2X_V1TI:
13415 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
13416 case VSX_BUILTIN_STXVD2X_V2DF:
13417 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
13418 case VSX_BUILTIN_STXVD2X_V2DI:
13419 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
13420 case VSX_BUILTIN_STXVW4X_V4SF:
13421 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
13422 case VSX_BUILTIN_STXVW4X_V4SI:
13423 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
13424 case VSX_BUILTIN_STXVW4X_V8HI:
13425 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
13426 case VSX_BUILTIN_STXVW4X_V16QI:
13427 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
13428
13429 case ALTIVEC_BUILTIN_MFVSCR:
13430 icode = CODE_FOR_altivec_mfvscr;
13431 tmode = insn_data[icode].operand[0].mode;
13432
13433 if (target == 0
13434 || GET_MODE (target) != tmode
13435 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13436 target = gen_reg_rtx (tmode);
13437
13438 pat = GEN_FCN (icode) (target);
13439 if (! pat)
13440 return 0;
13441 emit_insn (pat);
13442 return target;
13443
13444 case ALTIVEC_BUILTIN_MTVSCR:
13445 icode = CODE_FOR_altivec_mtvscr;
13446 arg0 = CALL_EXPR_ARG (exp, 0);
13447 op0 = expand_normal (arg0);
13448 mode0 = insn_data[icode].operand[0].mode;
13449
13450 /* If we got invalid arguments bail out before generating bad rtl. */
13451 if (arg0 == error_mark_node)
13452 return const0_rtx;
13453
13454 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13455 op0 = copy_to_mode_reg (mode0, op0);
13456
13457 pat = GEN_FCN (icode) (op0);
13458 if (pat)
13459 emit_insn (pat);
13460 return NULL_RTX;
13461
13462 case ALTIVEC_BUILTIN_DSSALL:
13463 emit_insn (gen_altivec_dssall ());
13464 return NULL_RTX;
13465
13466 case ALTIVEC_BUILTIN_DSS:
13467 icode = CODE_FOR_altivec_dss;
13468 arg0 = CALL_EXPR_ARG (exp, 0);
13469 STRIP_NOPS (arg0);
13470 op0 = expand_normal (arg0);
13471 mode0 = insn_data[icode].operand[0].mode;
13472
13473 /* If we got invalid arguments bail out before generating bad rtl. */
13474 if (arg0 == error_mark_node)
13475 return const0_rtx;
13476
13477 if (TREE_CODE (arg0) != INTEGER_CST
13478 || TREE_INT_CST_LOW (arg0) & ~0x3)
13479 {
13480 error ("argument to dss must be a 2-bit unsigned literal");
13481 return const0_rtx;
13482 }
13483
13484 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13485 op0 = copy_to_mode_reg (mode0, op0);
13486
13487 emit_insn (gen_altivec_dss (op0));
13488 return NULL_RTX;
13489
13490 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
13491 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
13492 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
13493 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
13494 case VSX_BUILTIN_VEC_INIT_V2DF:
13495 case VSX_BUILTIN_VEC_INIT_V2DI:
13496 case VSX_BUILTIN_VEC_INIT_V1TI:
13497 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
13498
13499 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
13500 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
13501 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
13502 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
13503 case VSX_BUILTIN_VEC_SET_V2DF:
13504 case VSX_BUILTIN_VEC_SET_V2DI:
13505 case VSX_BUILTIN_VEC_SET_V1TI:
13506 return altivec_expand_vec_set_builtin (exp);
13507
13508 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
13509 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
13510 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
13511 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
13512 case VSX_BUILTIN_VEC_EXT_V2DF:
13513 case VSX_BUILTIN_VEC_EXT_V2DI:
13514 case VSX_BUILTIN_VEC_EXT_V1TI:
13515 return altivec_expand_vec_ext_builtin (exp, target);
13516
13517 default:
13518 break;
13519 /* Fall through. */
13520 }
13521
13522 /* Expand abs* operations. */
13523 d = bdesc_abs;
13524 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
13525 if (d->code == fcode)
13526 return altivec_expand_abs_builtin (d->icode, exp, target);
13527
13528 /* Expand the AltiVec predicates. */
13529 d = bdesc_altivec_preds;
13530 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
13531 if (d->code == fcode)
13532 return altivec_expand_predicate_builtin (d->icode, exp, target);
13533
13534 /* LV* are funky. We initialized them differently. */
13535 switch (fcode)
13536 {
13537 case ALTIVEC_BUILTIN_LVSL:
13538 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
13539 exp, target, false);
13540 case ALTIVEC_BUILTIN_LVSR:
13541 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
13542 exp, target, false);
13543 case ALTIVEC_BUILTIN_LVEBX:
13544 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
13545 exp, target, false);
13546 case ALTIVEC_BUILTIN_LVEHX:
13547 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
13548 exp, target, false);
13549 case ALTIVEC_BUILTIN_LVEWX:
13550 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
13551 exp, target, false);
13552 case ALTIVEC_BUILTIN_LVXL_V2DF:
13553 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
13554 exp, target, false);
13555 case ALTIVEC_BUILTIN_LVXL_V2DI:
13556 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
13557 exp, target, false);
13558 case ALTIVEC_BUILTIN_LVXL_V4SF:
13559 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
13560 exp, target, false);
13561 case ALTIVEC_BUILTIN_LVXL:
13562 case ALTIVEC_BUILTIN_LVXL_V4SI:
13563 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
13564 exp, target, false);
13565 case ALTIVEC_BUILTIN_LVXL_V8HI:
13566 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
13567 exp, target, false);
13568 case ALTIVEC_BUILTIN_LVXL_V16QI:
13569 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
13570 exp, target, false);
13571 case ALTIVEC_BUILTIN_LVX_V2DF:
13572 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
13573 exp, target, false);
13574 case ALTIVEC_BUILTIN_LVX_V2DI:
13575 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
13576 exp, target, false);
13577 case ALTIVEC_BUILTIN_LVX_V4SF:
13578 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
13579 exp, target, false);
13580 case ALTIVEC_BUILTIN_LVX:
13581 case ALTIVEC_BUILTIN_LVX_V4SI:
13582 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
13583 exp, target, false);
13584 case ALTIVEC_BUILTIN_LVX_V8HI:
13585 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
13586 exp, target, false);
13587 case ALTIVEC_BUILTIN_LVX_V16QI:
13588 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
13589 exp, target, false);
13590 case ALTIVEC_BUILTIN_LVLX:
13591 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
13592 exp, target, true);
13593 case ALTIVEC_BUILTIN_LVLXL:
13594 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
13595 exp, target, true);
13596 case ALTIVEC_BUILTIN_LVRX:
13597 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
13598 exp, target, true);
13599 case ALTIVEC_BUILTIN_LVRXL:
13600 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
13601 exp, target, true);
13602 case VSX_BUILTIN_LXVD2X_V1TI:
13603 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
13604 exp, target, false);
13605 case VSX_BUILTIN_LXVD2X_V2DF:
13606 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
13607 exp, target, false);
13608 case VSX_BUILTIN_LXVD2X_V2DI:
13609 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
13610 exp, target, false);
13611 case VSX_BUILTIN_LXVW4X_V4SF:
13612 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
13613 exp, target, false);
13614 case VSX_BUILTIN_LXVW4X_V4SI:
13615 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
13616 exp, target, false);
13617 case VSX_BUILTIN_LXVW4X_V8HI:
13618 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
13619 exp, target, false);
13620 case VSX_BUILTIN_LXVW4X_V16QI:
13621 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
13622 exp, target, false);
13623 break;
13624 default:
13625 break;
13626 /* Fall through. */
13627 }
13628
13629 *expandedp = false;
13630 return NULL_RTX;
13631 }
13632
13633 /* Expand the builtin in EXP and store the result in TARGET. Store
13634 true in *EXPANDEDP if we found a builtin to expand. */
13635 static rtx
13636 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
13637 {
13638 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13639 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13640 const struct builtin_description *d;
13641 size_t i;
13642
13643 *expandedp = true;
13644
13645 switch (fcode)
13646 {
13647 case PAIRED_BUILTIN_STX:
13648 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
13649 case PAIRED_BUILTIN_LX:
13650 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
13651 default:
13652 break;
13653 /* Fall through. */
13654 }
13655
13656 /* Expand the paired predicates. */
13657 d = bdesc_paired_preds;
13658 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
13659 if (d->code == fcode)
13660 return paired_expand_predicate_builtin (d->icode, exp, target);
13661
13662 *expandedp = false;
13663 return NULL_RTX;
13664 }
13665
13666 /* Binops that need to be initialized manually, but can be expanded
13667 automagically by rs6000_expand_binop_builtin. */
13668 static const struct builtin_description bdesc_2arg_spe[] =
13669 {
13670 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
13671 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
13672 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
13673 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
13674 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
13675 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
13676 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
13677 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
13678 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
13679 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
13680 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
13681 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
13682 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
13683 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
13684 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
13685 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
13686 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
13687 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
13688 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
13689 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
13690 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
13691 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
13692 };
13693
13694 /* Expand the builtin in EXP and store the result in TARGET. Store
13695 true in *EXPANDEDP if we found a builtin to expand.
13696
13697 This expands the SPE builtins that are not simple unary and binary
13698 operations. */
13699 static rtx
13700 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
13701 {
13702 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13703 tree arg1, arg0;
13704 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13705 enum insn_code icode;
13706 machine_mode tmode, mode0;
13707 rtx pat, op0;
13708 const struct builtin_description *d;
13709 size_t i;
13710
13711 *expandedp = true;
13712
13713 /* Syntax check for a 5-bit unsigned immediate. */
13714 switch (fcode)
13715 {
13716 case SPE_BUILTIN_EVSTDD:
13717 case SPE_BUILTIN_EVSTDH:
13718 case SPE_BUILTIN_EVSTDW:
13719 case SPE_BUILTIN_EVSTWHE:
13720 case SPE_BUILTIN_EVSTWHO:
13721 case SPE_BUILTIN_EVSTWWE:
13722 case SPE_BUILTIN_EVSTWWO:
13723 arg1 = CALL_EXPR_ARG (exp, 2);
13724 if (TREE_CODE (arg1) != INTEGER_CST
13725 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13726 {
13727 error ("argument 2 must be a 5-bit unsigned literal");
13728 return const0_rtx;
13729 }
13730 break;
13731 default:
13732 break;
13733 }
13734
13735 /* The evsplat*i instructions are not quite generic. */
13736 switch (fcode)
13737 {
13738 case SPE_BUILTIN_EVSPLATFI:
13739 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
13740 exp, target);
13741 case SPE_BUILTIN_EVSPLATI:
13742 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
13743 exp, target);
13744 default:
13745 break;
13746 }
13747
13748 d = bdesc_2arg_spe;
13749 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
13750 if (d->code == fcode)
13751 return rs6000_expand_binop_builtin (d->icode, exp, target);
13752
13753 d = bdesc_spe_predicates;
13754 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
13755 if (d->code == fcode)
13756 return spe_expand_predicate_builtin (d->icode, exp, target);
13757
13758 d = bdesc_spe_evsel;
13759 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
13760 if (d->code == fcode)
13761 return spe_expand_evsel_builtin (d->icode, exp, target);
13762
13763 switch (fcode)
13764 {
13765 case SPE_BUILTIN_EVSTDDX:
13766 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
13767 case SPE_BUILTIN_EVSTDHX:
13768 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
13769 case SPE_BUILTIN_EVSTDWX:
13770 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
13771 case SPE_BUILTIN_EVSTWHEX:
13772 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
13773 case SPE_BUILTIN_EVSTWHOX:
13774 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
13775 case SPE_BUILTIN_EVSTWWEX:
13776 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
13777 case SPE_BUILTIN_EVSTWWOX:
13778 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
13779 case SPE_BUILTIN_EVSTDD:
13780 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
13781 case SPE_BUILTIN_EVSTDH:
13782 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
13783 case SPE_BUILTIN_EVSTDW:
13784 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
13785 case SPE_BUILTIN_EVSTWHE:
13786 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
13787 case SPE_BUILTIN_EVSTWHO:
13788 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
13789 case SPE_BUILTIN_EVSTWWE:
13790 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
13791 case SPE_BUILTIN_EVSTWWO:
13792 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
13793 case SPE_BUILTIN_MFSPEFSCR:
13794 icode = CODE_FOR_spe_mfspefscr;
13795 tmode = insn_data[icode].operand[0].mode;
13796
13797 if (target == 0
13798 || GET_MODE (target) != tmode
13799 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13800 target = gen_reg_rtx (tmode);
13801
13802 pat = GEN_FCN (icode) (target);
13803 if (! pat)
13804 return 0;
13805 emit_insn (pat);
13806 return target;
13807 case SPE_BUILTIN_MTSPEFSCR:
13808 icode = CODE_FOR_spe_mtspefscr;
13809 arg0 = CALL_EXPR_ARG (exp, 0);
13810 op0 = expand_normal (arg0);
13811 mode0 = insn_data[icode].operand[0].mode;
13812
13813 if (arg0 == error_mark_node)
13814 return const0_rtx;
13815
13816 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13817 op0 = copy_to_mode_reg (mode0, op0);
13818
13819 pat = GEN_FCN (icode) (op0);
13820 if (pat)
13821 emit_insn (pat);
13822 return NULL_RTX;
13823 default:
13824 break;
13825 }
13826
13827 *expandedp = false;
13828 return NULL_RTX;
13829 }
13830
13831 static rtx
13832 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13833 {
13834 rtx pat, scratch, tmp;
13835 tree form = CALL_EXPR_ARG (exp, 0);
13836 tree arg0 = CALL_EXPR_ARG (exp, 1);
13837 tree arg1 = CALL_EXPR_ARG (exp, 2);
13838 rtx op0 = expand_normal (arg0);
13839 rtx op1 = expand_normal (arg1);
13840 machine_mode mode0 = insn_data[icode].operand[1].mode;
13841 machine_mode mode1 = insn_data[icode].operand[2].mode;
13842 int form_int;
13843 enum rtx_code code;
13844
13845 if (TREE_CODE (form) != INTEGER_CST)
13846 {
13847 error ("argument 1 of __builtin_paired_predicate must be a constant");
13848 return const0_rtx;
13849 }
13850 else
13851 form_int = TREE_INT_CST_LOW (form);
13852
13853 gcc_assert (mode0 == mode1);
13854
13855 if (arg0 == error_mark_node || arg1 == error_mark_node)
13856 return const0_rtx;
13857
13858 if (target == 0
13859 || GET_MODE (target) != SImode
13860 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
13861 target = gen_reg_rtx (SImode);
13862 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13863 op0 = copy_to_mode_reg (mode0, op0);
13864 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13865 op1 = copy_to_mode_reg (mode1, op1);
13866
13867 scratch = gen_reg_rtx (CCFPmode);
13868
13869 pat = GEN_FCN (icode) (scratch, op0, op1);
13870 if (!pat)
13871 return const0_rtx;
13872
13873 emit_insn (pat);
13874
13875 switch (form_int)
13876 {
13877 /* LT bit. */
13878 case 0:
13879 code = LT;
13880 break;
13881 /* GT bit. */
13882 case 1:
13883 code = GT;
13884 break;
13885 /* EQ bit. */
13886 case 2:
13887 code = EQ;
13888 break;
13889 /* UN bit. */
13890 case 3:
13891 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13892 return target;
13893 default:
13894 error ("argument 1 of __builtin_paired_predicate is out of range");
13895 return const0_rtx;
13896 }
13897
13898 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13899 emit_move_insn (target, tmp);
13900 return target;
13901 }
13902
13903 static rtx
13904 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13905 {
13906 rtx pat, scratch, tmp;
13907 tree form = CALL_EXPR_ARG (exp, 0);
13908 tree arg0 = CALL_EXPR_ARG (exp, 1);
13909 tree arg1 = CALL_EXPR_ARG (exp, 2);
13910 rtx op0 = expand_normal (arg0);
13911 rtx op1 = expand_normal (arg1);
13912 machine_mode mode0 = insn_data[icode].operand[1].mode;
13913 machine_mode mode1 = insn_data[icode].operand[2].mode;
13914 int form_int;
13915 enum rtx_code code;
13916
13917 if (TREE_CODE (form) != INTEGER_CST)
13918 {
13919 error ("argument 1 of __builtin_spe_predicate must be a constant");
13920 return const0_rtx;
13921 }
13922 else
13923 form_int = TREE_INT_CST_LOW (form);
13924
13925 gcc_assert (mode0 == mode1);
13926
13927 if (arg0 == error_mark_node || arg1 == error_mark_node)
13928 return const0_rtx;
13929
13930 if (target == 0
13931 || GET_MODE (target) != SImode
13932 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
13933 target = gen_reg_rtx (SImode);
13934
13935 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13936 op0 = copy_to_mode_reg (mode0, op0);
13937 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13938 op1 = copy_to_mode_reg (mode1, op1);
13939
13940 scratch = gen_reg_rtx (CCmode);
13941
13942 pat = GEN_FCN (icode) (scratch, op0, op1);
13943 if (! pat)
13944 return const0_rtx;
13945 emit_insn (pat);
13946
13947 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13948 _lower_. We use one compare, but look in different bits of the
13949 CR for each variant.
13950
13951 There are 2 elements in each SPE simd type (upper/lower). The CR
13952 bits are set as follows:
13953
13954 BIT0 | BIT 1 | BIT 2 | BIT 3
13955 U | L | (U | L) | (U & L)
13956
13957 So, for an "all" relationship, BIT 3 would be set.
13958 For an "any" relationship, BIT 2 would be set. Etc.
13959
13960 Following traditional nomenclature, these bits map to:
13961
13962 BIT0 | BIT 1 | BIT 2 | BIT 3
13963 LT | GT | EQ | OV
13964
13965 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13966 */
13967
13968 switch (form_int)
13969 {
13970 /* All variant. OV bit. */
13971 case 0:
13972 /* We need to get to the OV bit, which is the ORDERED bit. We
13973 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13974 that's ugly and will make validate_condition_mode die.
13975 So let's just use another pattern. */
13976 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13977 return target;
13978 /* Any variant. EQ bit. */
13979 case 1:
13980 code = EQ;
13981 break;
13982 /* Upper variant. LT bit. */
13983 case 2:
13984 code = LT;
13985 break;
13986 /* Lower variant. GT bit. */
13987 case 3:
13988 code = GT;
13989 break;
13990 default:
13991 error ("argument 1 of __builtin_spe_predicate is out of range");
13992 return const0_rtx;
13993 }
13994
13995 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13996 emit_move_insn (target, tmp);
13997
13998 return target;
13999 }
14000
14001 /* The evsel builtins look like this:
14002
14003 e = __builtin_spe_evsel_OP (a, b, c, d);
14004
14005 and work like this:
14006
14007 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
14008 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
14009 */
14010
14011 static rtx
14012 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
14013 {
14014 rtx pat, scratch;
14015 tree arg0 = CALL_EXPR_ARG (exp, 0);
14016 tree arg1 = CALL_EXPR_ARG (exp, 1);
14017 tree arg2 = CALL_EXPR_ARG (exp, 2);
14018 tree arg3 = CALL_EXPR_ARG (exp, 3);
14019 rtx op0 = expand_normal (arg0);
14020 rtx op1 = expand_normal (arg1);
14021 rtx op2 = expand_normal (arg2);
14022 rtx op3 = expand_normal (arg3);
14023 machine_mode mode0 = insn_data[icode].operand[1].mode;
14024 machine_mode mode1 = insn_data[icode].operand[2].mode;
14025
14026 gcc_assert (mode0 == mode1);
14027
14028 if (arg0 == error_mark_node || arg1 == error_mark_node
14029 || arg2 == error_mark_node || arg3 == error_mark_node)
14030 return const0_rtx;
14031
14032 if (target == 0
14033 || GET_MODE (target) != mode0
14034 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
14035 target = gen_reg_rtx (mode0);
14036
14037 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14038 op0 = copy_to_mode_reg (mode0, op0);
14039 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14040 op1 = copy_to_mode_reg (mode0, op1);
14041 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
14042 op2 = copy_to_mode_reg (mode0, op2);
14043 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
14044 op3 = copy_to_mode_reg (mode0, op3);
14045
14046 /* Generate the compare. */
14047 scratch = gen_reg_rtx (CCmode);
14048 pat = GEN_FCN (icode) (scratch, op0, op1);
14049 if (! pat)
14050 return const0_rtx;
14051 emit_insn (pat);
14052
14053 if (mode0 == V2SImode)
14054 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
14055 else
14056 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
14057
14058 return target;
14059 }
14060
14061 /* Raise an error message for a builtin function that is called without the
14062 appropriate target options being set. */
14063
14064 static void
14065 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14066 {
14067 size_t uns_fncode = (size_t)fncode;
14068 const char *name = rs6000_builtin_info[uns_fncode].name;
14069 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14070
14071 gcc_assert (name != NULL);
14072 if ((fnmask & RS6000_BTM_CELL) != 0)
14073 error ("Builtin function %s is only valid for the cell processor", name);
14074 else if ((fnmask & RS6000_BTM_VSX) != 0)
14075 error ("Builtin function %s requires the -mvsx option", name);
14076 else if ((fnmask & RS6000_BTM_HTM) != 0)
14077 error ("Builtin function %s requires the -mhtm option", name);
14078 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14079 error ("Builtin function %s requires the -maltivec option", name);
14080 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
14081 error ("Builtin function %s requires the -mpaired option", name);
14082 else if ((fnmask & RS6000_BTM_SPE) != 0)
14083 error ("Builtin function %s requires the -mspe option", name);
14084 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14085 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14086 error ("Builtin function %s requires the -mhard-dfp and"
14087 " -mpower8-vector options", name);
14088 else if ((fnmask & RS6000_BTM_DFP) != 0)
14089 error ("Builtin function %s requires the -mhard-dfp option", name);
14090 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14091 error ("Builtin function %s requires the -mpower8-vector option", name);
14092 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
14093 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
14094 error ("Builtin function %s requires the -mhard-float and"
14095 " -mlong-double-128 options", name);
14096 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14097 error ("Builtin function %s requires the -mhard-float option", name);
14098 else
14099 error ("Builtin function %s is not supported with the current options",
14100 name);
14101 }
14102
14103 /* Expand an expression EXP that calls a built-in function,
14104 with result going to TARGET if that's convenient
14105 (and in mode MODE if that's convenient).
14106 SUBTARGET may be used as the target for computing one of EXP's operands.
14107 IGNORE is nonzero if the value is to be ignored. */
14108
14109 static rtx
14110 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
14111 machine_mode mode ATTRIBUTE_UNUSED,
14112 int ignore ATTRIBUTE_UNUSED)
14113 {
14114 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14115 enum rs6000_builtins fcode
14116 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
14117 size_t uns_fcode = (size_t)fcode;
14118 const struct builtin_description *d;
14119 size_t i;
14120 rtx ret;
14121 bool success;
14122 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
14123 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
14124
14125 if (TARGET_DEBUG_BUILTIN)
14126 {
14127 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
14128 const char *name1 = rs6000_builtin_info[uns_fcode].name;
14129 const char *name2 = ((icode != CODE_FOR_nothing)
14130 ? get_insn_name ((int)icode)
14131 : "nothing");
14132 const char *name3;
14133
14134 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
14135 {
14136 default: name3 = "unknown"; break;
14137 case RS6000_BTC_SPECIAL: name3 = "special"; break;
14138 case RS6000_BTC_UNARY: name3 = "unary"; break;
14139 case RS6000_BTC_BINARY: name3 = "binary"; break;
14140 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
14141 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
14142 case RS6000_BTC_ABS: name3 = "abs"; break;
14143 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
14144 case RS6000_BTC_DST: name3 = "dst"; break;
14145 }
14146
14147
14148 fprintf (stderr,
14149 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
14150 (name1) ? name1 : "---", fcode,
14151 (name2) ? name2 : "---", (int)icode,
14152 name3,
14153 func_valid_p ? "" : ", not valid");
14154 }
14155
14156 if (!func_valid_p)
14157 {
14158 rs6000_invalid_builtin (fcode);
14159
14160 /* Given it is invalid, just generate a normal call. */
14161 return expand_call (exp, target, ignore);
14162 }
14163
14164 switch (fcode)
14165 {
14166 case RS6000_BUILTIN_RECIP:
14167 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
14168
14169 case RS6000_BUILTIN_RECIPF:
14170 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
14171
14172 case RS6000_BUILTIN_RSQRTF:
14173 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
14174
14175 case RS6000_BUILTIN_RSQRT:
14176 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
14177
14178 case POWER7_BUILTIN_BPERMD:
14179 return rs6000_expand_binop_builtin (((TARGET_64BIT)
14180 ? CODE_FOR_bpermd_di
14181 : CODE_FOR_bpermd_si), exp, target);
14182
14183 case RS6000_BUILTIN_GET_TB:
14184 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
14185 target);
14186
14187 case RS6000_BUILTIN_MFTB:
14188 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
14189 ? CODE_FOR_rs6000_mftb_di
14190 : CODE_FOR_rs6000_mftb_si),
14191 target);
14192
14193 case RS6000_BUILTIN_MFFS:
14194 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
14195
14196 case RS6000_BUILTIN_MTFSF:
14197 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
14198
14199 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
14200 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
14201 {
14202 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
14203 : (int) CODE_FOR_altivec_lvsl_direct);
14204 machine_mode tmode = insn_data[icode].operand[0].mode;
14205 machine_mode mode = insn_data[icode].operand[1].mode;
14206 tree arg;
14207 rtx op, addr, pat;
14208
14209 gcc_assert (TARGET_ALTIVEC);
14210
14211 arg = CALL_EXPR_ARG (exp, 0);
14212 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
14213 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
14214 addr = memory_address (mode, op);
14215 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
14216 op = addr;
14217 else
14218 {
14219 /* For the load case need to negate the address. */
14220 op = gen_reg_rtx (GET_MODE (addr));
14221 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
14222 }
14223 op = gen_rtx_MEM (mode, op);
14224
14225 if (target == 0
14226 || GET_MODE (target) != tmode
14227 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14228 target = gen_reg_rtx (tmode);
14229
14230 pat = GEN_FCN (icode) (target, op);
14231 if (!pat)
14232 return 0;
14233 emit_insn (pat);
14234
14235 return target;
14236 }
14237
14238 case ALTIVEC_BUILTIN_VCFUX:
14239 case ALTIVEC_BUILTIN_VCFSX:
14240 case ALTIVEC_BUILTIN_VCTUXS:
14241 case ALTIVEC_BUILTIN_VCTSXS:
14242 /* FIXME: There's got to be a nicer way to handle this case than
14243 constructing a new CALL_EXPR. */
14244 if (call_expr_nargs (exp) == 1)
14245 {
14246 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
14247 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
14248 }
14249 break;
14250
14251 default:
14252 break;
14253 }
14254
14255 if (TARGET_ALTIVEC)
14256 {
14257 ret = altivec_expand_builtin (exp, target, &success);
14258
14259 if (success)
14260 return ret;
14261 }
14262 if (TARGET_SPE)
14263 {
14264 ret = spe_expand_builtin (exp, target, &success);
14265
14266 if (success)
14267 return ret;
14268 }
14269 if (TARGET_PAIRED_FLOAT)
14270 {
14271 ret = paired_expand_builtin (exp, target, &success);
14272
14273 if (success)
14274 return ret;
14275 }
14276 if (TARGET_HTM)
14277 {
14278 ret = htm_expand_builtin (exp, target, &success);
14279
14280 if (success)
14281 return ret;
14282 }
14283
14284 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
14285 gcc_assert (attr == RS6000_BTC_UNARY
14286 || attr == RS6000_BTC_BINARY
14287 || attr == RS6000_BTC_TERNARY);
14288
14289 /* Handle simple unary operations. */
14290 d = bdesc_1arg;
14291 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
14292 if (d->code == fcode)
14293 return rs6000_expand_unop_builtin (d->icode, exp, target);
14294
14295 /* Handle simple binary operations. */
14296 d = bdesc_2arg;
14297 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14298 if (d->code == fcode)
14299 return rs6000_expand_binop_builtin (d->icode, exp, target);
14300
14301 /* Handle simple ternary operations. */
14302 d = bdesc_3arg;
14303 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
14304 if (d->code == fcode)
14305 return rs6000_expand_ternop_builtin (d->icode, exp, target);
14306
14307 gcc_unreachable ();
14308 }
14309
14310 static void
14311 rs6000_init_builtins (void)
14312 {
14313 tree tdecl;
14314 tree ftype;
14315 machine_mode mode;
14316
14317 if (TARGET_DEBUG_BUILTIN)
14318 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
14319 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
14320 (TARGET_SPE) ? ", spe" : "",
14321 (TARGET_ALTIVEC) ? ", altivec" : "",
14322 (TARGET_VSX) ? ", vsx" : "");
14323
14324 V2SI_type_node = build_vector_type (intSI_type_node, 2);
14325 V2SF_type_node = build_vector_type (float_type_node, 2);
14326 V2DI_type_node = build_vector_type (intDI_type_node, 2);
14327 V2DF_type_node = build_vector_type (double_type_node, 2);
14328 V4HI_type_node = build_vector_type (intHI_type_node, 4);
14329 V4SI_type_node = build_vector_type (intSI_type_node, 4);
14330 V4SF_type_node = build_vector_type (float_type_node, 4);
14331 V8HI_type_node = build_vector_type (intHI_type_node, 8);
14332 V16QI_type_node = build_vector_type (intQI_type_node, 16);
14333
14334 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
14335 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
14336 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
14337 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
14338
14339 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
14340 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
14341 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
14342 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
14343
14344 /* We use V1TI mode as a special container to hold __int128_t items that
14345 must live in VSX registers. */
14346 if (intTI_type_node)
14347 {
14348 V1TI_type_node = build_vector_type (intTI_type_node, 1);
14349 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
14350 }
14351
14352 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
14353 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
14354 'vector unsigned short'. */
14355
14356 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
14357 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
14358 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
14359 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
14360 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
14361
14362 long_integer_type_internal_node = long_integer_type_node;
14363 long_unsigned_type_internal_node = long_unsigned_type_node;
14364 long_long_integer_type_internal_node = long_long_integer_type_node;
14365 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
14366 intQI_type_internal_node = intQI_type_node;
14367 uintQI_type_internal_node = unsigned_intQI_type_node;
14368 intHI_type_internal_node = intHI_type_node;
14369 uintHI_type_internal_node = unsigned_intHI_type_node;
14370 intSI_type_internal_node = intSI_type_node;
14371 uintSI_type_internal_node = unsigned_intSI_type_node;
14372 intDI_type_internal_node = intDI_type_node;
14373 uintDI_type_internal_node = unsigned_intDI_type_node;
14374 intTI_type_internal_node = intTI_type_node;
14375 uintTI_type_internal_node = unsigned_intTI_type_node;
14376 float_type_internal_node = float_type_node;
14377 double_type_internal_node = double_type_node;
14378 long_double_type_internal_node = long_double_type_node;
14379 dfloat64_type_internal_node = dfloat64_type_node;
14380 dfloat128_type_internal_node = dfloat128_type_node;
14381 void_type_internal_node = void_type_node;
14382
14383 /* Initialize the modes for builtin_function_type, mapping a machine mode to
14384 tree type node. */
14385 builtin_mode_to_type[QImode][0] = integer_type_node;
14386 builtin_mode_to_type[HImode][0] = integer_type_node;
14387 builtin_mode_to_type[SImode][0] = intSI_type_node;
14388 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
14389 builtin_mode_to_type[DImode][0] = intDI_type_node;
14390 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
14391 builtin_mode_to_type[TImode][0] = intTI_type_node;
14392 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
14393 builtin_mode_to_type[SFmode][0] = float_type_node;
14394 builtin_mode_to_type[DFmode][0] = double_type_node;
14395 builtin_mode_to_type[TFmode][0] = long_double_type_node;
14396 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
14397 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
14398 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
14399 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
14400 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
14401 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
14402 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
14403 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
14404 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
14405 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
14406 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
14407 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
14408 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
14409 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
14410 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
14411 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
14412 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
14413
14414 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
14415 TYPE_NAME (bool_char_type_node) = tdecl;
14416
14417 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
14418 TYPE_NAME (bool_short_type_node) = tdecl;
14419
14420 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
14421 TYPE_NAME (bool_int_type_node) = tdecl;
14422
14423 tdecl = add_builtin_type ("__pixel", pixel_type_node);
14424 TYPE_NAME (pixel_type_node) = tdecl;
14425
14426 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
14427 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
14428 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
14429 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
14430 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
14431
14432 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
14433 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
14434
14435 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
14436 TYPE_NAME (V16QI_type_node) = tdecl;
14437
14438 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
14439 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
14440
14441 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
14442 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
14443
14444 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
14445 TYPE_NAME (V8HI_type_node) = tdecl;
14446
14447 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
14448 TYPE_NAME (bool_V8HI_type_node) = tdecl;
14449
14450 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
14451 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
14452
14453 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
14454 TYPE_NAME (V4SI_type_node) = tdecl;
14455
14456 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
14457 TYPE_NAME (bool_V4SI_type_node) = tdecl;
14458
14459 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
14460 TYPE_NAME (V4SF_type_node) = tdecl;
14461
14462 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
14463 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
14464
14465 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
14466 TYPE_NAME (V2DF_type_node) = tdecl;
14467
14468 if (TARGET_POWERPC64)
14469 {
14470 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
14471 TYPE_NAME (V2DI_type_node) = tdecl;
14472
14473 tdecl = add_builtin_type ("__vector unsigned long",
14474 unsigned_V2DI_type_node);
14475 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14476
14477 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
14478 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14479 }
14480 else
14481 {
14482 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
14483 TYPE_NAME (V2DI_type_node) = tdecl;
14484
14485 tdecl = add_builtin_type ("__vector unsigned long long",
14486 unsigned_V2DI_type_node);
14487 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14488
14489 tdecl = add_builtin_type ("__vector __bool long long",
14490 bool_V2DI_type_node);
14491 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14492 }
14493
14494 if (V1TI_type_node)
14495 {
14496 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
14497 TYPE_NAME (V1TI_type_node) = tdecl;
14498
14499 tdecl = add_builtin_type ("__vector unsigned __int128",
14500 unsigned_V1TI_type_node);
14501 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
14502 }
14503
14504 /* Paired and SPE builtins are only available if you build a compiler with
14505 the appropriate options, so only create those builtins with the
14506 appropriate compiler option. Create Altivec and VSX builtins on machines
14507 with at least the general purpose extensions (970 and newer) to allow the
14508 use of the target attribute. */
14509 if (TARGET_PAIRED_FLOAT)
14510 paired_init_builtins ();
14511 if (TARGET_SPE)
14512 spe_init_builtins ();
14513 if (TARGET_EXTRA_BUILTINS)
14514 altivec_init_builtins ();
14515 if (TARGET_HTM)
14516 htm_init_builtins ();
14517
14518 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
14519 rs6000_common_init_builtins ();
14520
14521 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
14522 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
14523 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
14524
14525 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
14526 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
14527 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
14528
14529 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
14530 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
14531 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
14532
14533 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
14534 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
14535 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
14536
14537 mode = (TARGET_64BIT) ? DImode : SImode;
14538 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
14539 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
14540 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
14541
14542 ftype = build_function_type_list (unsigned_intDI_type_node,
14543 NULL_TREE);
14544 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
14545
14546 if (TARGET_64BIT)
14547 ftype = build_function_type_list (unsigned_intDI_type_node,
14548 NULL_TREE);
14549 else
14550 ftype = build_function_type_list (unsigned_intSI_type_node,
14551 NULL_TREE);
14552 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
14553
14554 ftype = build_function_type_list (double_type_node, NULL_TREE);
14555 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
14556
14557 ftype = build_function_type_list (void_type_node,
14558 intSI_type_node, double_type_node,
14559 NULL_TREE);
14560 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
14561
14562 #if TARGET_XCOFF
14563 /* AIX libm provides clog as __clog. */
14564 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
14565 set_user_assembler_name (tdecl, "__clog");
14566 #endif
14567
14568 #ifdef SUBTARGET_INIT_BUILTINS
14569 SUBTARGET_INIT_BUILTINS;
14570 #endif
14571 }
14572
14573 /* Returns the rs6000 builtin decl for CODE. */
14574
14575 static tree
14576 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
14577 {
14578 HOST_WIDE_INT fnmask;
14579
14580 if (code >= RS6000_BUILTIN_COUNT)
14581 return error_mark_node;
14582
14583 fnmask = rs6000_builtin_info[code].mask;
14584 if ((fnmask & rs6000_builtin_mask) != fnmask)
14585 {
14586 rs6000_invalid_builtin ((enum rs6000_builtins)code);
14587 return error_mark_node;
14588 }
14589
14590 return rs6000_builtin_decls[code];
14591 }
14592
14593 static void
14594 spe_init_builtins (void)
14595 {
14596 tree puint_type_node = build_pointer_type (unsigned_type_node);
14597 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
14598 const struct builtin_description *d;
14599 size_t i;
14600
14601 tree v2si_ftype_4_v2si
14602 = build_function_type_list (opaque_V2SI_type_node,
14603 opaque_V2SI_type_node,
14604 opaque_V2SI_type_node,
14605 opaque_V2SI_type_node,
14606 opaque_V2SI_type_node,
14607 NULL_TREE);
14608
14609 tree v2sf_ftype_4_v2sf
14610 = build_function_type_list (opaque_V2SF_type_node,
14611 opaque_V2SF_type_node,
14612 opaque_V2SF_type_node,
14613 opaque_V2SF_type_node,
14614 opaque_V2SF_type_node,
14615 NULL_TREE);
14616
14617 tree int_ftype_int_v2si_v2si
14618 = build_function_type_list (integer_type_node,
14619 integer_type_node,
14620 opaque_V2SI_type_node,
14621 opaque_V2SI_type_node,
14622 NULL_TREE);
14623
14624 tree int_ftype_int_v2sf_v2sf
14625 = build_function_type_list (integer_type_node,
14626 integer_type_node,
14627 opaque_V2SF_type_node,
14628 opaque_V2SF_type_node,
14629 NULL_TREE);
14630
14631 tree void_ftype_v2si_puint_int
14632 = build_function_type_list (void_type_node,
14633 opaque_V2SI_type_node,
14634 puint_type_node,
14635 integer_type_node,
14636 NULL_TREE);
14637
14638 tree void_ftype_v2si_puint_char
14639 = build_function_type_list (void_type_node,
14640 opaque_V2SI_type_node,
14641 puint_type_node,
14642 char_type_node,
14643 NULL_TREE);
14644
14645 tree void_ftype_v2si_pv2si_int
14646 = build_function_type_list (void_type_node,
14647 opaque_V2SI_type_node,
14648 opaque_p_V2SI_type_node,
14649 integer_type_node,
14650 NULL_TREE);
14651
14652 tree void_ftype_v2si_pv2si_char
14653 = build_function_type_list (void_type_node,
14654 opaque_V2SI_type_node,
14655 opaque_p_V2SI_type_node,
14656 char_type_node,
14657 NULL_TREE);
14658
14659 tree void_ftype_int
14660 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14661
14662 tree int_ftype_void
14663 = build_function_type_list (integer_type_node, NULL_TREE);
14664
14665 tree v2si_ftype_pv2si_int
14666 = build_function_type_list (opaque_V2SI_type_node,
14667 opaque_p_V2SI_type_node,
14668 integer_type_node,
14669 NULL_TREE);
14670
14671 tree v2si_ftype_puint_int
14672 = build_function_type_list (opaque_V2SI_type_node,
14673 puint_type_node,
14674 integer_type_node,
14675 NULL_TREE);
14676
14677 tree v2si_ftype_pushort_int
14678 = build_function_type_list (opaque_V2SI_type_node,
14679 pushort_type_node,
14680 integer_type_node,
14681 NULL_TREE);
14682
14683 tree v2si_ftype_signed_char
14684 = build_function_type_list (opaque_V2SI_type_node,
14685 signed_char_type_node,
14686 NULL_TREE);
14687
14688 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
14689
14690 /* Initialize irregular SPE builtins. */
14691
14692 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
14693 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
14694 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
14695 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
14696 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
14697 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
14698 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
14699 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
14700 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
14701 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
14702 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
14703 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
14704 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
14705 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
14706 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
14707 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
14708 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
14709 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
14710
14711 /* Loads. */
14712 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
14713 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
14714 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
14715 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
14716 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
14717 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
14718 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
14719 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
14720 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
14721 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
14722 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
14723 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
14724 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
14725 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
14726 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
14727 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
14728 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
14729 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
14730 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
14731 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
14732 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
14733 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
14734
14735 /* Predicates. */
14736 d = bdesc_spe_predicates;
14737 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
14738 {
14739 tree type;
14740
14741 switch (insn_data[d->icode].operand[1].mode)
14742 {
14743 case V2SImode:
14744 type = int_ftype_int_v2si_v2si;
14745 break;
14746 case V2SFmode:
14747 type = int_ftype_int_v2sf_v2sf;
14748 break;
14749 default:
14750 gcc_unreachable ();
14751 }
14752
14753 def_builtin (d->name, type, d->code);
14754 }
14755
14756 /* Evsel predicates. */
14757 d = bdesc_spe_evsel;
14758 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
14759 {
14760 tree type;
14761
14762 switch (insn_data[d->icode].operand[1].mode)
14763 {
14764 case V2SImode:
14765 type = v2si_ftype_4_v2si;
14766 break;
14767 case V2SFmode:
14768 type = v2sf_ftype_4_v2sf;
14769 break;
14770 default:
14771 gcc_unreachable ();
14772 }
14773
14774 def_builtin (d->name, type, d->code);
14775 }
14776 }
14777
14778 static void
14779 paired_init_builtins (void)
14780 {
14781 const struct builtin_description *d;
14782 size_t i;
14783
14784 tree int_ftype_int_v2sf_v2sf
14785 = build_function_type_list (integer_type_node,
14786 integer_type_node,
14787 V2SF_type_node,
14788 V2SF_type_node,
14789 NULL_TREE);
14790 tree pcfloat_type_node =
14791 build_pointer_type (build_qualified_type
14792 (float_type_node, TYPE_QUAL_CONST));
14793
14794 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
14795 long_integer_type_node,
14796 pcfloat_type_node,
14797 NULL_TREE);
14798 tree void_ftype_v2sf_long_pcfloat =
14799 build_function_type_list (void_type_node,
14800 V2SF_type_node,
14801 long_integer_type_node,
14802 pcfloat_type_node,
14803 NULL_TREE);
14804
14805
14806 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
14807 PAIRED_BUILTIN_LX);
14808
14809
14810 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
14811 PAIRED_BUILTIN_STX);
14812
14813 /* Predicates. */
14814 d = bdesc_paired_preds;
14815 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
14816 {
14817 tree type;
14818
14819 if (TARGET_DEBUG_BUILTIN)
14820 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
14821 (int)i, get_insn_name (d->icode), (int)d->icode,
14822 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
14823
14824 switch (insn_data[d->icode].operand[1].mode)
14825 {
14826 case V2SFmode:
14827 type = int_ftype_int_v2sf_v2sf;
14828 break;
14829 default:
14830 gcc_unreachable ();
14831 }
14832
14833 def_builtin (d->name, type, d->code);
14834 }
14835 }
14836
14837 static void
14838 altivec_init_builtins (void)
14839 {
14840 const struct builtin_description *d;
14841 size_t i;
14842 tree ftype;
14843 tree decl;
14844
14845 tree pvoid_type_node = build_pointer_type (void_type_node);
14846
14847 tree pcvoid_type_node
14848 = build_pointer_type (build_qualified_type (void_type_node,
14849 TYPE_QUAL_CONST));
14850
14851 tree int_ftype_opaque
14852 = build_function_type_list (integer_type_node,
14853 opaque_V4SI_type_node, NULL_TREE);
14854 tree opaque_ftype_opaque
14855 = build_function_type_list (integer_type_node, NULL_TREE);
14856 tree opaque_ftype_opaque_int
14857 = build_function_type_list (opaque_V4SI_type_node,
14858 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
14859 tree opaque_ftype_opaque_opaque_int
14860 = build_function_type_list (opaque_V4SI_type_node,
14861 opaque_V4SI_type_node, opaque_V4SI_type_node,
14862 integer_type_node, NULL_TREE);
14863 tree int_ftype_int_opaque_opaque
14864 = build_function_type_list (integer_type_node,
14865 integer_type_node, opaque_V4SI_type_node,
14866 opaque_V4SI_type_node, NULL_TREE);
14867 tree int_ftype_int_v4si_v4si
14868 = build_function_type_list (integer_type_node,
14869 integer_type_node, V4SI_type_node,
14870 V4SI_type_node, NULL_TREE);
14871 tree int_ftype_int_v2di_v2di
14872 = build_function_type_list (integer_type_node,
14873 integer_type_node, V2DI_type_node,
14874 V2DI_type_node, NULL_TREE);
14875 tree void_ftype_v4si
14876 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
14877 tree v8hi_ftype_void
14878 = build_function_type_list (V8HI_type_node, NULL_TREE);
14879 tree void_ftype_void
14880 = build_function_type_list (void_type_node, NULL_TREE);
14881 tree void_ftype_int
14882 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14883
14884 tree opaque_ftype_long_pcvoid
14885 = build_function_type_list (opaque_V4SI_type_node,
14886 long_integer_type_node, pcvoid_type_node,
14887 NULL_TREE);
14888 tree v16qi_ftype_long_pcvoid
14889 = build_function_type_list (V16QI_type_node,
14890 long_integer_type_node, pcvoid_type_node,
14891 NULL_TREE);
14892 tree v8hi_ftype_long_pcvoid
14893 = build_function_type_list (V8HI_type_node,
14894 long_integer_type_node, pcvoid_type_node,
14895 NULL_TREE);
14896 tree v4si_ftype_long_pcvoid
14897 = build_function_type_list (V4SI_type_node,
14898 long_integer_type_node, pcvoid_type_node,
14899 NULL_TREE);
14900 tree v4sf_ftype_long_pcvoid
14901 = build_function_type_list (V4SF_type_node,
14902 long_integer_type_node, pcvoid_type_node,
14903 NULL_TREE);
14904 tree v2df_ftype_long_pcvoid
14905 = build_function_type_list (V2DF_type_node,
14906 long_integer_type_node, pcvoid_type_node,
14907 NULL_TREE);
14908 tree v2di_ftype_long_pcvoid
14909 = build_function_type_list (V2DI_type_node,
14910 long_integer_type_node, pcvoid_type_node,
14911 NULL_TREE);
14912
14913 tree void_ftype_opaque_long_pvoid
14914 = build_function_type_list (void_type_node,
14915 opaque_V4SI_type_node, long_integer_type_node,
14916 pvoid_type_node, NULL_TREE);
14917 tree void_ftype_v4si_long_pvoid
14918 = build_function_type_list (void_type_node,
14919 V4SI_type_node, long_integer_type_node,
14920 pvoid_type_node, NULL_TREE);
14921 tree void_ftype_v16qi_long_pvoid
14922 = build_function_type_list (void_type_node,
14923 V16QI_type_node, long_integer_type_node,
14924 pvoid_type_node, NULL_TREE);
14925 tree void_ftype_v8hi_long_pvoid
14926 = build_function_type_list (void_type_node,
14927 V8HI_type_node, long_integer_type_node,
14928 pvoid_type_node, NULL_TREE);
14929 tree void_ftype_v4sf_long_pvoid
14930 = build_function_type_list (void_type_node,
14931 V4SF_type_node, long_integer_type_node,
14932 pvoid_type_node, NULL_TREE);
14933 tree void_ftype_v2df_long_pvoid
14934 = build_function_type_list (void_type_node,
14935 V2DF_type_node, long_integer_type_node,
14936 pvoid_type_node, NULL_TREE);
14937 tree void_ftype_v2di_long_pvoid
14938 = build_function_type_list (void_type_node,
14939 V2DI_type_node, long_integer_type_node,
14940 pvoid_type_node, NULL_TREE);
14941 tree int_ftype_int_v8hi_v8hi
14942 = build_function_type_list (integer_type_node,
14943 integer_type_node, V8HI_type_node,
14944 V8HI_type_node, NULL_TREE);
14945 tree int_ftype_int_v16qi_v16qi
14946 = build_function_type_list (integer_type_node,
14947 integer_type_node, V16QI_type_node,
14948 V16QI_type_node, NULL_TREE);
14949 tree int_ftype_int_v4sf_v4sf
14950 = build_function_type_list (integer_type_node,
14951 integer_type_node, V4SF_type_node,
14952 V4SF_type_node, NULL_TREE);
14953 tree int_ftype_int_v2df_v2df
14954 = build_function_type_list (integer_type_node,
14955 integer_type_node, V2DF_type_node,
14956 V2DF_type_node, NULL_TREE);
14957 tree v2di_ftype_v2di
14958 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
14959 tree v4si_ftype_v4si
14960 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
14961 tree v8hi_ftype_v8hi
14962 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
14963 tree v16qi_ftype_v16qi
14964 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
14965 tree v4sf_ftype_v4sf
14966 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14967 tree v2df_ftype_v2df
14968 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14969 tree void_ftype_pcvoid_int_int
14970 = build_function_type_list (void_type_node,
14971 pcvoid_type_node, integer_type_node,
14972 integer_type_node, NULL_TREE);
14973
14974 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
14975 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
14976 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
14977 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
14978 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
14979 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
14980 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
14981 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
14982 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
14983 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
14984 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
14985 ALTIVEC_BUILTIN_LVXL_V2DF);
14986 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
14987 ALTIVEC_BUILTIN_LVXL_V2DI);
14988 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
14989 ALTIVEC_BUILTIN_LVXL_V4SF);
14990 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
14991 ALTIVEC_BUILTIN_LVXL_V4SI);
14992 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
14993 ALTIVEC_BUILTIN_LVXL_V8HI);
14994 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
14995 ALTIVEC_BUILTIN_LVXL_V16QI);
14996 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
14997 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
14998 ALTIVEC_BUILTIN_LVX_V2DF);
14999 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
15000 ALTIVEC_BUILTIN_LVX_V2DI);
15001 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
15002 ALTIVEC_BUILTIN_LVX_V4SF);
15003 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
15004 ALTIVEC_BUILTIN_LVX_V4SI);
15005 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
15006 ALTIVEC_BUILTIN_LVX_V8HI);
15007 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
15008 ALTIVEC_BUILTIN_LVX_V16QI);
15009 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
15010 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
15011 ALTIVEC_BUILTIN_STVX_V2DF);
15012 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
15013 ALTIVEC_BUILTIN_STVX_V2DI);
15014 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
15015 ALTIVEC_BUILTIN_STVX_V4SF);
15016 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
15017 ALTIVEC_BUILTIN_STVX_V4SI);
15018 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
15019 ALTIVEC_BUILTIN_STVX_V8HI);
15020 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
15021 ALTIVEC_BUILTIN_STVX_V16QI);
15022 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
15023 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
15024 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
15025 ALTIVEC_BUILTIN_STVXL_V2DF);
15026 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
15027 ALTIVEC_BUILTIN_STVXL_V2DI);
15028 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
15029 ALTIVEC_BUILTIN_STVXL_V4SF);
15030 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
15031 ALTIVEC_BUILTIN_STVXL_V4SI);
15032 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
15033 ALTIVEC_BUILTIN_STVXL_V8HI);
15034 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
15035 ALTIVEC_BUILTIN_STVXL_V16QI);
15036 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
15037 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
15038 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
15039 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
15040 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
15041 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
15042 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
15043 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
15044 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
15045 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
15046 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
15047 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
15048 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
15049 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
15050 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
15051 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
15052
15053 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
15054 VSX_BUILTIN_LXVD2X_V2DF);
15055 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
15056 VSX_BUILTIN_LXVD2X_V2DI);
15057 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
15058 VSX_BUILTIN_LXVW4X_V4SF);
15059 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
15060 VSX_BUILTIN_LXVW4X_V4SI);
15061 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
15062 VSX_BUILTIN_LXVW4X_V8HI);
15063 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
15064 VSX_BUILTIN_LXVW4X_V16QI);
15065 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
15066 VSX_BUILTIN_STXVD2X_V2DF);
15067 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
15068 VSX_BUILTIN_STXVD2X_V2DI);
15069 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
15070 VSX_BUILTIN_STXVW4X_V4SF);
15071 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
15072 VSX_BUILTIN_STXVW4X_V4SI);
15073 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
15074 VSX_BUILTIN_STXVW4X_V8HI);
15075 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
15076 VSX_BUILTIN_STXVW4X_V16QI);
15077 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
15078 VSX_BUILTIN_VEC_LD);
15079 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
15080 VSX_BUILTIN_VEC_ST);
15081
15082 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
15083 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
15084 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
15085
15086 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
15087 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
15088 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
15089 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
15090 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
15091 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
15092 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
15093 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
15094 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
15095 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
15096 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
15097 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
15098
15099 /* Cell builtins. */
15100 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
15101 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
15102 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
15103 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
15104
15105 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
15106 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
15107 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
15108 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
15109
15110 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
15111 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
15112 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
15113 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
15114
15115 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
15116 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
15117 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
15118 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
15119
15120 /* Add the DST variants. */
15121 d = bdesc_dst;
15122 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15123 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
15124
15125 /* Initialize the predicates. */
15126 d = bdesc_altivec_preds;
15127 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15128 {
15129 machine_mode mode1;
15130 tree type;
15131
15132 if (rs6000_overloaded_builtin_p (d->code))
15133 mode1 = VOIDmode;
15134 else
15135 mode1 = insn_data[d->icode].operand[1].mode;
15136
15137 switch (mode1)
15138 {
15139 case VOIDmode:
15140 type = int_ftype_int_opaque_opaque;
15141 break;
15142 case V2DImode:
15143 type = int_ftype_int_v2di_v2di;
15144 break;
15145 case V4SImode:
15146 type = int_ftype_int_v4si_v4si;
15147 break;
15148 case V8HImode:
15149 type = int_ftype_int_v8hi_v8hi;
15150 break;
15151 case V16QImode:
15152 type = int_ftype_int_v16qi_v16qi;
15153 break;
15154 case V4SFmode:
15155 type = int_ftype_int_v4sf_v4sf;
15156 break;
15157 case V2DFmode:
15158 type = int_ftype_int_v2df_v2df;
15159 break;
15160 default:
15161 gcc_unreachable ();
15162 }
15163
15164 def_builtin (d->name, type, d->code);
15165 }
15166
15167 /* Initialize the abs* operators. */
15168 d = bdesc_abs;
15169 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15170 {
15171 machine_mode mode0;
15172 tree type;
15173
15174 mode0 = insn_data[d->icode].operand[0].mode;
15175
15176 switch (mode0)
15177 {
15178 case V2DImode:
15179 type = v2di_ftype_v2di;
15180 break;
15181 case V4SImode:
15182 type = v4si_ftype_v4si;
15183 break;
15184 case V8HImode:
15185 type = v8hi_ftype_v8hi;
15186 break;
15187 case V16QImode:
15188 type = v16qi_ftype_v16qi;
15189 break;
15190 case V4SFmode:
15191 type = v4sf_ftype_v4sf;
15192 break;
15193 case V2DFmode:
15194 type = v2df_ftype_v2df;
15195 break;
15196 default:
15197 gcc_unreachable ();
15198 }
15199
15200 def_builtin (d->name, type, d->code);
15201 }
15202
15203 /* Initialize target builtin that implements
15204 targetm.vectorize.builtin_mask_for_load. */
15205
15206 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
15207 v16qi_ftype_long_pcvoid,
15208 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
15209 BUILT_IN_MD, NULL, NULL_TREE);
15210 TREE_READONLY (decl) = 1;
15211 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
15212 altivec_builtin_mask_for_load = decl;
15213
15214 /* Access to the vec_init patterns. */
15215 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
15216 integer_type_node, integer_type_node,
15217 integer_type_node, NULL_TREE);
15218 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
15219
15220 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
15221 short_integer_type_node,
15222 short_integer_type_node,
15223 short_integer_type_node,
15224 short_integer_type_node,
15225 short_integer_type_node,
15226 short_integer_type_node,
15227 short_integer_type_node, NULL_TREE);
15228 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
15229
15230 ftype = build_function_type_list (V16QI_type_node, char_type_node,
15231 char_type_node, char_type_node,
15232 char_type_node, char_type_node,
15233 char_type_node, char_type_node,
15234 char_type_node, char_type_node,
15235 char_type_node, char_type_node,
15236 char_type_node, char_type_node,
15237 char_type_node, char_type_node,
15238 char_type_node, NULL_TREE);
15239 def_builtin ("__builtin_vec_init_v16qi", ftype,
15240 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
15241
15242 ftype = build_function_type_list (V4SF_type_node, float_type_node,
15243 float_type_node, float_type_node,
15244 float_type_node, NULL_TREE);
15245 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
15246
15247 /* VSX builtins. */
15248 ftype = build_function_type_list (V2DF_type_node, double_type_node,
15249 double_type_node, NULL_TREE);
15250 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
15251
15252 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
15253 intDI_type_node, NULL_TREE);
15254 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
15255
15256 /* Access to the vec_set patterns. */
15257 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
15258 intSI_type_node,
15259 integer_type_node, NULL_TREE);
15260 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
15261
15262 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
15263 intHI_type_node,
15264 integer_type_node, NULL_TREE);
15265 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
15266
15267 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
15268 intQI_type_node,
15269 integer_type_node, NULL_TREE);
15270 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
15271
15272 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
15273 float_type_node,
15274 integer_type_node, NULL_TREE);
15275 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
15276
15277 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
15278 double_type_node,
15279 integer_type_node, NULL_TREE);
15280 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
15281
15282 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
15283 intDI_type_node,
15284 integer_type_node, NULL_TREE);
15285 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
15286
15287 /* Access to the vec_extract patterns. */
15288 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
15289 integer_type_node, NULL_TREE);
15290 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
15291
15292 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
15293 integer_type_node, NULL_TREE);
15294 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
15295
15296 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
15297 integer_type_node, NULL_TREE);
15298 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
15299
15300 ftype = build_function_type_list (float_type_node, V4SF_type_node,
15301 integer_type_node, NULL_TREE);
15302 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
15303
15304 ftype = build_function_type_list (double_type_node, V2DF_type_node,
15305 integer_type_node, NULL_TREE);
15306 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
15307
15308 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
15309 integer_type_node, NULL_TREE);
15310 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
15311
15312
15313 if (V1TI_type_node)
15314 {
15315 tree v1ti_ftype_long_pcvoid
15316 = build_function_type_list (V1TI_type_node,
15317 long_integer_type_node, pcvoid_type_node,
15318 NULL_TREE);
15319 tree void_ftype_v1ti_long_pvoid
15320 = build_function_type_list (void_type_node,
15321 V1TI_type_node, long_integer_type_node,
15322 pvoid_type_node, NULL_TREE);
15323 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
15324 VSX_BUILTIN_LXVD2X_V1TI);
15325 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
15326 VSX_BUILTIN_STXVD2X_V1TI);
15327 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
15328 NULL_TREE, NULL_TREE);
15329 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
15330 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
15331 intTI_type_node,
15332 integer_type_node, NULL_TREE);
15333 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
15334 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
15335 integer_type_node, NULL_TREE);
15336 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
15337 }
15338
15339 }
15340
15341 static void
15342 htm_init_builtins (void)
15343 {
15344 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15345 const struct builtin_description *d;
15346 size_t i;
15347
15348 d = bdesc_htm;
15349 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
15350 {
15351 tree op[MAX_HTM_OPERANDS], type;
15352 HOST_WIDE_INT mask = d->mask;
15353 unsigned attr = rs6000_builtin_info[d->code].attr;
15354 bool void_func = (attr & RS6000_BTC_VOID);
15355 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
15356 int nopnds = 0;
15357 tree gpr_type_node;
15358 tree rettype;
15359 tree argtype;
15360
15361 if (TARGET_32BIT && TARGET_POWERPC64)
15362 gpr_type_node = long_long_unsigned_type_node;
15363 else
15364 gpr_type_node = long_unsigned_type_node;
15365
15366 if (attr & RS6000_BTC_SPR)
15367 {
15368 rettype = gpr_type_node;
15369 argtype = gpr_type_node;
15370 }
15371 else if (d->code == HTM_BUILTIN_TABORTDC
15372 || d->code == HTM_BUILTIN_TABORTDCI)
15373 {
15374 rettype = unsigned_type_node;
15375 argtype = gpr_type_node;
15376 }
15377 else
15378 {
15379 rettype = unsigned_type_node;
15380 argtype = unsigned_type_node;
15381 }
15382
15383 if ((mask & builtin_mask) != mask)
15384 {
15385 if (TARGET_DEBUG_BUILTIN)
15386 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
15387 continue;
15388 }
15389
15390 if (d->name == 0)
15391 {
15392 if (TARGET_DEBUG_BUILTIN)
15393 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
15394 (long unsigned) i);
15395 continue;
15396 }
15397
15398 op[nopnds++] = (void_func) ? void_type_node : rettype;
15399
15400 if (attr_args == RS6000_BTC_UNARY)
15401 op[nopnds++] = argtype;
15402 else if (attr_args == RS6000_BTC_BINARY)
15403 {
15404 op[nopnds++] = argtype;
15405 op[nopnds++] = argtype;
15406 }
15407 else if (attr_args == RS6000_BTC_TERNARY)
15408 {
15409 op[nopnds++] = argtype;
15410 op[nopnds++] = argtype;
15411 op[nopnds++] = argtype;
15412 }
15413
15414 switch (nopnds)
15415 {
15416 case 1:
15417 type = build_function_type_list (op[0], NULL_TREE);
15418 break;
15419 case 2:
15420 type = build_function_type_list (op[0], op[1], NULL_TREE);
15421 break;
15422 case 3:
15423 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
15424 break;
15425 case 4:
15426 type = build_function_type_list (op[0], op[1], op[2], op[3],
15427 NULL_TREE);
15428 break;
15429 default:
15430 gcc_unreachable ();
15431 }
15432
15433 def_builtin (d->name, type, d->code);
15434 }
15435 }
15436
15437 /* Hash function for builtin functions with up to 3 arguments and a return
15438 type. */
15439 hashval_t
15440 builtin_hasher::hash (builtin_hash_struct *bh)
15441 {
15442 unsigned ret = 0;
15443 int i;
15444
15445 for (i = 0; i < 4; i++)
15446 {
15447 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
15448 ret = (ret * 2) + bh->uns_p[i];
15449 }
15450
15451 return ret;
15452 }
15453
15454 /* Compare builtin hash entries H1 and H2 for equivalence. */
15455 bool
15456 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
15457 {
15458 return ((p1->mode[0] == p2->mode[0])
15459 && (p1->mode[1] == p2->mode[1])
15460 && (p1->mode[2] == p2->mode[2])
15461 && (p1->mode[3] == p2->mode[3])
15462 && (p1->uns_p[0] == p2->uns_p[0])
15463 && (p1->uns_p[1] == p2->uns_p[1])
15464 && (p1->uns_p[2] == p2->uns_p[2])
15465 && (p1->uns_p[3] == p2->uns_p[3]));
15466 }
15467
15468 /* Map types for builtin functions with an explicit return type and up to 3
15469 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
15470 of the argument. */
15471 static tree
15472 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
15473 machine_mode mode_arg1, machine_mode mode_arg2,
15474 enum rs6000_builtins builtin, const char *name)
15475 {
15476 struct builtin_hash_struct h;
15477 struct builtin_hash_struct *h2;
15478 int num_args = 3;
15479 int i;
15480 tree ret_type = NULL_TREE;
15481 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
15482
15483 /* Create builtin_hash_table. */
15484 if (builtin_hash_table == NULL)
15485 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
15486
15487 h.type = NULL_TREE;
15488 h.mode[0] = mode_ret;
15489 h.mode[1] = mode_arg0;
15490 h.mode[2] = mode_arg1;
15491 h.mode[3] = mode_arg2;
15492 h.uns_p[0] = 0;
15493 h.uns_p[1] = 0;
15494 h.uns_p[2] = 0;
15495 h.uns_p[3] = 0;
15496
15497 /* If the builtin is a type that produces unsigned results or takes unsigned
15498 arguments, and it is returned as a decl for the vectorizer (such as
15499 widening multiplies, permute), make sure the arguments and return value
15500 are type correct. */
15501 switch (builtin)
15502 {
15503 /* unsigned 1 argument functions. */
15504 case CRYPTO_BUILTIN_VSBOX:
15505 case P8V_BUILTIN_VGBBD:
15506 case MISC_BUILTIN_CDTBCD:
15507 case MISC_BUILTIN_CBCDTD:
15508 h.uns_p[0] = 1;
15509 h.uns_p[1] = 1;
15510 break;
15511
15512 /* unsigned 2 argument functions. */
15513 case ALTIVEC_BUILTIN_VMULEUB_UNS:
15514 case ALTIVEC_BUILTIN_VMULEUH_UNS:
15515 case ALTIVEC_BUILTIN_VMULOUB_UNS:
15516 case ALTIVEC_BUILTIN_VMULOUH_UNS:
15517 case CRYPTO_BUILTIN_VCIPHER:
15518 case CRYPTO_BUILTIN_VCIPHERLAST:
15519 case CRYPTO_BUILTIN_VNCIPHER:
15520 case CRYPTO_BUILTIN_VNCIPHERLAST:
15521 case CRYPTO_BUILTIN_VPMSUMB:
15522 case CRYPTO_BUILTIN_VPMSUMH:
15523 case CRYPTO_BUILTIN_VPMSUMW:
15524 case CRYPTO_BUILTIN_VPMSUMD:
15525 case CRYPTO_BUILTIN_VPMSUM:
15526 case MISC_BUILTIN_ADDG6S:
15527 case MISC_BUILTIN_DIVWEU:
15528 case MISC_BUILTIN_DIVWEUO:
15529 case MISC_BUILTIN_DIVDEU:
15530 case MISC_BUILTIN_DIVDEUO:
15531 h.uns_p[0] = 1;
15532 h.uns_p[1] = 1;
15533 h.uns_p[2] = 1;
15534 break;
15535
15536 /* unsigned 3 argument functions. */
15537 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
15538 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
15539 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
15540 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
15541 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
15542 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
15543 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
15544 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
15545 case VSX_BUILTIN_VPERM_16QI_UNS:
15546 case VSX_BUILTIN_VPERM_8HI_UNS:
15547 case VSX_BUILTIN_VPERM_4SI_UNS:
15548 case VSX_BUILTIN_VPERM_2DI_UNS:
15549 case VSX_BUILTIN_XXSEL_16QI_UNS:
15550 case VSX_BUILTIN_XXSEL_8HI_UNS:
15551 case VSX_BUILTIN_XXSEL_4SI_UNS:
15552 case VSX_BUILTIN_XXSEL_2DI_UNS:
15553 case CRYPTO_BUILTIN_VPERMXOR:
15554 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
15555 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
15556 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
15557 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
15558 case CRYPTO_BUILTIN_VSHASIGMAW:
15559 case CRYPTO_BUILTIN_VSHASIGMAD:
15560 case CRYPTO_BUILTIN_VSHASIGMA:
15561 h.uns_p[0] = 1;
15562 h.uns_p[1] = 1;
15563 h.uns_p[2] = 1;
15564 h.uns_p[3] = 1;
15565 break;
15566
15567 /* signed permute functions with unsigned char mask. */
15568 case ALTIVEC_BUILTIN_VPERM_16QI:
15569 case ALTIVEC_BUILTIN_VPERM_8HI:
15570 case ALTIVEC_BUILTIN_VPERM_4SI:
15571 case ALTIVEC_BUILTIN_VPERM_4SF:
15572 case ALTIVEC_BUILTIN_VPERM_2DI:
15573 case ALTIVEC_BUILTIN_VPERM_2DF:
15574 case VSX_BUILTIN_VPERM_16QI:
15575 case VSX_BUILTIN_VPERM_8HI:
15576 case VSX_BUILTIN_VPERM_4SI:
15577 case VSX_BUILTIN_VPERM_4SF:
15578 case VSX_BUILTIN_VPERM_2DI:
15579 case VSX_BUILTIN_VPERM_2DF:
15580 h.uns_p[3] = 1;
15581 break;
15582
15583 /* unsigned args, signed return. */
15584 case VSX_BUILTIN_XVCVUXDDP_UNS:
15585 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
15586 h.uns_p[1] = 1;
15587 break;
15588
15589 /* signed args, unsigned return. */
15590 case VSX_BUILTIN_XVCVDPUXDS_UNS:
15591 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
15592 case MISC_BUILTIN_UNPACK_TD:
15593 case MISC_BUILTIN_UNPACK_V1TI:
15594 h.uns_p[0] = 1;
15595 break;
15596
15597 /* unsigned arguments for 128-bit pack instructions. */
15598 case MISC_BUILTIN_PACK_TD:
15599 case MISC_BUILTIN_PACK_V1TI:
15600 h.uns_p[1] = 1;
15601 h.uns_p[2] = 1;
15602 break;
15603
15604 default:
15605 break;
15606 }
15607
15608 /* Figure out how many args are present. */
15609 while (num_args > 0 && h.mode[num_args] == VOIDmode)
15610 num_args--;
15611
15612 if (num_args == 0)
15613 fatal_error (input_location,
15614 "internal error: builtin function %s had no type", name);
15615
15616 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
15617 if (!ret_type && h.uns_p[0])
15618 ret_type = builtin_mode_to_type[h.mode[0]][0];
15619
15620 if (!ret_type)
15621 fatal_error (input_location,
15622 "internal error: builtin function %s had an unexpected "
15623 "return type %s", name, GET_MODE_NAME (h.mode[0]));
15624
15625 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
15626 arg_type[i] = NULL_TREE;
15627
15628 for (i = 0; i < num_args; i++)
15629 {
15630 int m = (int) h.mode[i+1];
15631 int uns_p = h.uns_p[i+1];
15632
15633 arg_type[i] = builtin_mode_to_type[m][uns_p];
15634 if (!arg_type[i] && uns_p)
15635 arg_type[i] = builtin_mode_to_type[m][0];
15636
15637 if (!arg_type[i])
15638 fatal_error (input_location,
15639 "internal error: builtin function %s, argument %d "
15640 "had unexpected argument type %s", name, i,
15641 GET_MODE_NAME (m));
15642 }
15643
15644 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
15645 if (*found == NULL)
15646 {
15647 h2 = ggc_alloc<builtin_hash_struct> ();
15648 *h2 = h;
15649 *found = h2;
15650
15651 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
15652 arg_type[2], NULL_TREE);
15653 }
15654
15655 return (*found)->type;
15656 }
15657
15658 static void
15659 rs6000_common_init_builtins (void)
15660 {
15661 const struct builtin_description *d;
15662 size_t i;
15663
15664 tree opaque_ftype_opaque = NULL_TREE;
15665 tree opaque_ftype_opaque_opaque = NULL_TREE;
15666 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
15667 tree v2si_ftype_qi = NULL_TREE;
15668 tree v2si_ftype_v2si_qi = NULL_TREE;
15669 tree v2si_ftype_int_qi = NULL_TREE;
15670 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15671
15672 if (!TARGET_PAIRED_FLOAT)
15673 {
15674 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
15675 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
15676 }
15677
15678 /* Paired and SPE builtins are only available if you build a compiler with
15679 the appropriate options, so only create those builtins with the
15680 appropriate compiler option. Create Altivec and VSX builtins on machines
15681 with at least the general purpose extensions (970 and newer) to allow the
15682 use of the target attribute.. */
15683
15684 if (TARGET_EXTRA_BUILTINS)
15685 builtin_mask |= RS6000_BTM_COMMON;
15686
15687 /* Add the ternary operators. */
15688 d = bdesc_3arg;
15689 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15690 {
15691 tree type;
15692 HOST_WIDE_INT mask = d->mask;
15693
15694 if ((mask & builtin_mask) != mask)
15695 {
15696 if (TARGET_DEBUG_BUILTIN)
15697 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
15698 continue;
15699 }
15700
15701 if (rs6000_overloaded_builtin_p (d->code))
15702 {
15703 if (! (type = opaque_ftype_opaque_opaque_opaque))
15704 type = opaque_ftype_opaque_opaque_opaque
15705 = build_function_type_list (opaque_V4SI_type_node,
15706 opaque_V4SI_type_node,
15707 opaque_V4SI_type_node,
15708 opaque_V4SI_type_node,
15709 NULL_TREE);
15710 }
15711 else
15712 {
15713 enum insn_code icode = d->icode;
15714 if (d->name == 0)
15715 {
15716 if (TARGET_DEBUG_BUILTIN)
15717 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
15718 (long unsigned)i);
15719
15720 continue;
15721 }
15722
15723 if (icode == CODE_FOR_nothing)
15724 {
15725 if (TARGET_DEBUG_BUILTIN)
15726 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
15727 d->name);
15728
15729 continue;
15730 }
15731
15732 type = builtin_function_type (insn_data[icode].operand[0].mode,
15733 insn_data[icode].operand[1].mode,
15734 insn_data[icode].operand[2].mode,
15735 insn_data[icode].operand[3].mode,
15736 d->code, d->name);
15737 }
15738
15739 def_builtin (d->name, type, d->code);
15740 }
15741
15742 /* Add the binary operators. */
15743 d = bdesc_2arg;
15744 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15745 {
15746 machine_mode mode0, mode1, mode2;
15747 tree type;
15748 HOST_WIDE_INT mask = d->mask;
15749
15750 if ((mask & builtin_mask) != mask)
15751 {
15752 if (TARGET_DEBUG_BUILTIN)
15753 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
15754 continue;
15755 }
15756
15757 if (rs6000_overloaded_builtin_p (d->code))
15758 {
15759 if (! (type = opaque_ftype_opaque_opaque))
15760 type = opaque_ftype_opaque_opaque
15761 = build_function_type_list (opaque_V4SI_type_node,
15762 opaque_V4SI_type_node,
15763 opaque_V4SI_type_node,
15764 NULL_TREE);
15765 }
15766 else
15767 {
15768 enum insn_code icode = d->icode;
15769 if (d->name == 0)
15770 {
15771 if (TARGET_DEBUG_BUILTIN)
15772 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
15773 (long unsigned)i);
15774
15775 continue;
15776 }
15777
15778 if (icode == CODE_FOR_nothing)
15779 {
15780 if (TARGET_DEBUG_BUILTIN)
15781 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
15782 d->name);
15783
15784 continue;
15785 }
15786
15787 mode0 = insn_data[icode].operand[0].mode;
15788 mode1 = insn_data[icode].operand[1].mode;
15789 mode2 = insn_data[icode].operand[2].mode;
15790
15791 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
15792 {
15793 if (! (type = v2si_ftype_v2si_qi))
15794 type = v2si_ftype_v2si_qi
15795 = build_function_type_list (opaque_V2SI_type_node,
15796 opaque_V2SI_type_node,
15797 char_type_node,
15798 NULL_TREE);
15799 }
15800
15801 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
15802 && mode2 == QImode)
15803 {
15804 if (! (type = v2si_ftype_int_qi))
15805 type = v2si_ftype_int_qi
15806 = build_function_type_list (opaque_V2SI_type_node,
15807 integer_type_node,
15808 char_type_node,
15809 NULL_TREE);
15810 }
15811
15812 else
15813 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
15814 d->code, d->name);
15815 }
15816
15817 def_builtin (d->name, type, d->code);
15818 }
15819
15820 /* Add the simple unary operators. */
15821 d = bdesc_1arg;
15822 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15823 {
15824 machine_mode mode0, mode1;
15825 tree type;
15826 HOST_WIDE_INT mask = d->mask;
15827
15828 if ((mask & builtin_mask) != mask)
15829 {
15830 if (TARGET_DEBUG_BUILTIN)
15831 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
15832 continue;
15833 }
15834
15835 if (rs6000_overloaded_builtin_p (d->code))
15836 {
15837 if (! (type = opaque_ftype_opaque))
15838 type = opaque_ftype_opaque
15839 = build_function_type_list (opaque_V4SI_type_node,
15840 opaque_V4SI_type_node,
15841 NULL_TREE);
15842 }
15843 else
15844 {
15845 enum insn_code icode = d->icode;
15846 if (d->name == 0)
15847 {
15848 if (TARGET_DEBUG_BUILTIN)
15849 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
15850 (long unsigned)i);
15851
15852 continue;
15853 }
15854
15855 if (icode == CODE_FOR_nothing)
15856 {
15857 if (TARGET_DEBUG_BUILTIN)
15858 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
15859 d->name);
15860
15861 continue;
15862 }
15863
15864 mode0 = insn_data[icode].operand[0].mode;
15865 mode1 = insn_data[icode].operand[1].mode;
15866
15867 if (mode0 == V2SImode && mode1 == QImode)
15868 {
15869 if (! (type = v2si_ftype_qi))
15870 type = v2si_ftype_qi
15871 = build_function_type_list (opaque_V2SI_type_node,
15872 char_type_node,
15873 NULL_TREE);
15874 }
15875
15876 else
15877 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
15878 d->code, d->name);
15879 }
15880
15881 def_builtin (d->name, type, d->code);
15882 }
15883 }
15884
15885 static void
15886 rs6000_init_libfuncs (void)
15887 {
15888 if (!TARGET_IEEEQUAD)
15889 /* AIX/Darwin/64-bit Linux quad floating point routines. */
15890 if (!TARGET_XL_COMPAT)
15891 {
15892 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
15893 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
15894 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
15895 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
15896
15897 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
15898 {
15899 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
15900 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
15901 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
15902 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
15903 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
15904 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
15905 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
15906
15907 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
15908 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
15909 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
15910 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
15911 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
15912 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
15913 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
15914 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
15915 }
15916
15917 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
15918 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
15919 }
15920 else
15921 {
15922 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
15923 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
15924 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
15925 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
15926 }
15927 else
15928 {
15929 /* 32-bit SVR4 quad floating point routines. */
15930
15931 set_optab_libfunc (add_optab, TFmode, "_q_add");
15932 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
15933 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
15934 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
15935 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
15936 if (TARGET_PPC_GPOPT)
15937 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
15938
15939 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
15940 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
15941 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
15942 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
15943 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
15944 set_optab_libfunc (le_optab, TFmode, "_q_fle");
15945
15946 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
15947 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
15948 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
15949 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
15950 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
15951 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
15952 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
15953 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
15954 }
15955 }
15956
15957 \f
15958 /* Expand a block clear operation, and return 1 if successful. Return 0
15959 if we should let the compiler generate normal code.
15960
15961 operands[0] is the destination
15962 operands[1] is the length
15963 operands[3] is the alignment */
15964
15965 int
15966 expand_block_clear (rtx operands[])
15967 {
15968 rtx orig_dest = operands[0];
15969 rtx bytes_rtx = operands[1];
15970 rtx align_rtx = operands[3];
15971 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
15972 HOST_WIDE_INT align;
15973 HOST_WIDE_INT bytes;
15974 int offset;
15975 int clear_bytes;
15976 int clear_step;
15977
15978 /* If this is not a fixed size move, just call memcpy */
15979 if (! constp)
15980 return 0;
15981
15982 /* This must be a fixed size alignment */
15983 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15984 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15985
15986 /* Anything to clear? */
15987 bytes = INTVAL (bytes_rtx);
15988 if (bytes <= 0)
15989 return 1;
15990
15991 /* Use the builtin memset after a point, to avoid huge code bloat.
15992 When optimize_size, avoid any significant code bloat; calling
15993 memset is about 4 instructions, so allow for one instruction to
15994 load zero and three to do clearing. */
15995 if (TARGET_ALTIVEC && align >= 128)
15996 clear_step = 16;
15997 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
15998 clear_step = 8;
15999 else if (TARGET_SPE && align >= 64)
16000 clear_step = 8;
16001 else
16002 clear_step = 4;
16003
16004 if (optimize_size && bytes > 3 * clear_step)
16005 return 0;
16006 if (! optimize_size && bytes > 8 * clear_step)
16007 return 0;
16008
16009 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
16010 {
16011 machine_mode mode = BLKmode;
16012 rtx dest;
16013
16014 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
16015 {
16016 clear_bytes = 16;
16017 mode = V4SImode;
16018 }
16019 else if (bytes >= 8 && TARGET_SPE && align >= 64)
16020 {
16021 clear_bytes = 8;
16022 mode = V2SImode;
16023 }
16024 else if (bytes >= 8 && TARGET_POWERPC64
16025 && (align >= 64 || !STRICT_ALIGNMENT))
16026 {
16027 clear_bytes = 8;
16028 mode = DImode;
16029 if (offset == 0 && align < 64)
16030 {
16031 rtx addr;
16032
16033 /* If the address form is reg+offset with offset not a
16034 multiple of four, reload into reg indirect form here
16035 rather than waiting for reload. This way we get one
16036 reload, not one per store. */
16037 addr = XEXP (orig_dest, 0);
16038 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
16039 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16040 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
16041 {
16042 addr = copy_addr_to_reg (addr);
16043 orig_dest = replace_equiv_address (orig_dest, addr);
16044 }
16045 }
16046 }
16047 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
16048 { /* move 4 bytes */
16049 clear_bytes = 4;
16050 mode = SImode;
16051 }
16052 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
16053 { /* move 2 bytes */
16054 clear_bytes = 2;
16055 mode = HImode;
16056 }
16057 else /* move 1 byte at a time */
16058 {
16059 clear_bytes = 1;
16060 mode = QImode;
16061 }
16062
16063 dest = adjust_address (orig_dest, mode, offset);
16064
16065 emit_move_insn (dest, CONST0_RTX (mode));
16066 }
16067
16068 return 1;
16069 }
16070
16071 \f
16072 /* Expand a block move operation, and return 1 if successful. Return 0
16073 if we should let the compiler generate normal code.
16074
16075 operands[0] is the destination
16076 operands[1] is the source
16077 operands[2] is the length
16078 operands[3] is the alignment */
16079
16080 #define MAX_MOVE_REG 4
16081
16082 int
16083 expand_block_move (rtx operands[])
16084 {
16085 rtx orig_dest = operands[0];
16086 rtx orig_src = operands[1];
16087 rtx bytes_rtx = operands[2];
16088 rtx align_rtx = operands[3];
16089 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
16090 int align;
16091 int bytes;
16092 int offset;
16093 int move_bytes;
16094 rtx stores[MAX_MOVE_REG];
16095 int num_reg = 0;
16096
16097 /* If this is not a fixed size move, just call memcpy */
16098 if (! constp)
16099 return 0;
16100
16101 /* This must be a fixed size alignment */
16102 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
16103 align = INTVAL (align_rtx) * BITS_PER_UNIT;
16104
16105 /* Anything to move? */
16106 bytes = INTVAL (bytes_rtx);
16107 if (bytes <= 0)
16108 return 1;
16109
16110 if (bytes > rs6000_block_move_inline_limit)
16111 return 0;
16112
16113 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
16114 {
16115 union {
16116 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
16117 rtx (*mov) (rtx, rtx);
16118 } gen_func;
16119 machine_mode mode = BLKmode;
16120 rtx src, dest;
16121
16122 /* Altivec first, since it will be faster than a string move
16123 when it applies, and usually not significantly larger. */
16124 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
16125 {
16126 move_bytes = 16;
16127 mode = V4SImode;
16128 gen_func.mov = gen_movv4si;
16129 }
16130 else if (TARGET_SPE && bytes >= 8 && align >= 64)
16131 {
16132 move_bytes = 8;
16133 mode = V2SImode;
16134 gen_func.mov = gen_movv2si;
16135 }
16136 else if (TARGET_STRING
16137 && bytes > 24 /* move up to 32 bytes at a time */
16138 && ! fixed_regs[5]
16139 && ! fixed_regs[6]
16140 && ! fixed_regs[7]
16141 && ! fixed_regs[8]
16142 && ! fixed_regs[9]
16143 && ! fixed_regs[10]
16144 && ! fixed_regs[11]
16145 && ! fixed_regs[12])
16146 {
16147 move_bytes = (bytes > 32) ? 32 : bytes;
16148 gen_func.movmemsi = gen_movmemsi_8reg;
16149 }
16150 else if (TARGET_STRING
16151 && bytes > 16 /* move up to 24 bytes at a time */
16152 && ! fixed_regs[5]
16153 && ! fixed_regs[6]
16154 && ! fixed_regs[7]
16155 && ! fixed_regs[8]
16156 && ! fixed_regs[9]
16157 && ! fixed_regs[10])
16158 {
16159 move_bytes = (bytes > 24) ? 24 : bytes;
16160 gen_func.movmemsi = gen_movmemsi_6reg;
16161 }
16162 else if (TARGET_STRING
16163 && bytes > 8 /* move up to 16 bytes at a time */
16164 && ! fixed_regs[5]
16165 && ! fixed_regs[6]
16166 && ! fixed_regs[7]
16167 && ! fixed_regs[8])
16168 {
16169 move_bytes = (bytes > 16) ? 16 : bytes;
16170 gen_func.movmemsi = gen_movmemsi_4reg;
16171 }
16172 else if (bytes >= 8 && TARGET_POWERPC64
16173 && (align >= 64 || !STRICT_ALIGNMENT))
16174 {
16175 move_bytes = 8;
16176 mode = DImode;
16177 gen_func.mov = gen_movdi;
16178 if (offset == 0 && align < 64)
16179 {
16180 rtx addr;
16181
16182 /* If the address form is reg+offset with offset not a
16183 multiple of four, reload into reg indirect form here
16184 rather than waiting for reload. This way we get one
16185 reload, not one per load and/or store. */
16186 addr = XEXP (orig_dest, 0);
16187 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
16188 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16189 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
16190 {
16191 addr = copy_addr_to_reg (addr);
16192 orig_dest = replace_equiv_address (orig_dest, addr);
16193 }
16194 addr = XEXP (orig_src, 0);
16195 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
16196 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16197 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
16198 {
16199 addr = copy_addr_to_reg (addr);
16200 orig_src = replace_equiv_address (orig_src, addr);
16201 }
16202 }
16203 }
16204 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
16205 { /* move up to 8 bytes at a time */
16206 move_bytes = (bytes > 8) ? 8 : bytes;
16207 gen_func.movmemsi = gen_movmemsi_2reg;
16208 }
16209 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
16210 { /* move 4 bytes */
16211 move_bytes = 4;
16212 mode = SImode;
16213 gen_func.mov = gen_movsi;
16214 }
16215 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
16216 { /* move 2 bytes */
16217 move_bytes = 2;
16218 mode = HImode;
16219 gen_func.mov = gen_movhi;
16220 }
16221 else if (TARGET_STRING && bytes > 1)
16222 { /* move up to 4 bytes at a time */
16223 move_bytes = (bytes > 4) ? 4 : bytes;
16224 gen_func.movmemsi = gen_movmemsi_1reg;
16225 }
16226 else /* move 1 byte at a time */
16227 {
16228 move_bytes = 1;
16229 mode = QImode;
16230 gen_func.mov = gen_movqi;
16231 }
16232
16233 src = adjust_address (orig_src, mode, offset);
16234 dest = adjust_address (orig_dest, mode, offset);
16235
16236 if (mode != BLKmode)
16237 {
16238 rtx tmp_reg = gen_reg_rtx (mode);
16239
16240 emit_insn ((*gen_func.mov) (tmp_reg, src));
16241 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
16242 }
16243
16244 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
16245 {
16246 int i;
16247 for (i = 0; i < num_reg; i++)
16248 emit_insn (stores[i]);
16249 num_reg = 0;
16250 }
16251
16252 if (mode == BLKmode)
16253 {
16254 /* Move the address into scratch registers. The movmemsi
16255 patterns require zero offset. */
16256 if (!REG_P (XEXP (src, 0)))
16257 {
16258 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
16259 src = replace_equiv_address (src, src_reg);
16260 }
16261 set_mem_size (src, move_bytes);
16262
16263 if (!REG_P (XEXP (dest, 0)))
16264 {
16265 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
16266 dest = replace_equiv_address (dest, dest_reg);
16267 }
16268 set_mem_size (dest, move_bytes);
16269
16270 emit_insn ((*gen_func.movmemsi) (dest, src,
16271 GEN_INT (move_bytes & 31),
16272 align_rtx));
16273 }
16274 }
16275
16276 return 1;
16277 }
16278
16279 \f
16280 /* Return a string to perform a load_multiple operation.
16281 operands[0] is the vector.
16282 operands[1] is the source address.
16283 operands[2] is the first destination register. */
16284
16285 const char *
16286 rs6000_output_load_multiple (rtx operands[3])
16287 {
16288 /* We have to handle the case where the pseudo used to contain the address
16289 is assigned to one of the output registers. */
16290 int i, j;
16291 int words = XVECLEN (operands[0], 0);
16292 rtx xop[10];
16293
16294 if (XVECLEN (operands[0], 0) == 1)
16295 return "lwz %2,0(%1)";
16296
16297 for (i = 0; i < words; i++)
16298 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
16299 {
16300 if (i == words-1)
16301 {
16302 xop[0] = GEN_INT (4 * (words-1));
16303 xop[1] = operands[1];
16304 xop[2] = operands[2];
16305 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
16306 return "";
16307 }
16308 else if (i == 0)
16309 {
16310 xop[0] = GEN_INT (4 * (words-1));
16311 xop[1] = operands[1];
16312 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
16313 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
16314 return "";
16315 }
16316 else
16317 {
16318 for (j = 0; j < words; j++)
16319 if (j != i)
16320 {
16321 xop[0] = GEN_INT (j * 4);
16322 xop[1] = operands[1];
16323 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
16324 output_asm_insn ("lwz %2,%0(%1)", xop);
16325 }
16326 xop[0] = GEN_INT (i * 4);
16327 xop[1] = operands[1];
16328 output_asm_insn ("lwz %1,%0(%1)", xop);
16329 return "";
16330 }
16331 }
16332
16333 return "lswi %2,%1,%N0";
16334 }
16335
16336 \f
16337 /* A validation routine: say whether CODE, a condition code, and MODE
16338 match. The other alternatives either don't make sense or should
16339 never be generated. */
16340
16341 void
16342 validate_condition_mode (enum rtx_code code, machine_mode mode)
16343 {
16344 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
16345 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
16346 && GET_MODE_CLASS (mode) == MODE_CC);
16347
16348 /* These don't make sense. */
16349 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
16350 || mode != CCUNSmode);
16351
16352 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
16353 || mode == CCUNSmode);
16354
16355 gcc_assert (mode == CCFPmode
16356 || (code != ORDERED && code != UNORDERED
16357 && code != UNEQ && code != LTGT
16358 && code != UNGT && code != UNLT
16359 && code != UNGE && code != UNLE));
16360
16361 /* These should never be generated except for
16362 flag_finite_math_only. */
16363 gcc_assert (mode != CCFPmode
16364 || flag_finite_math_only
16365 || (code != LE && code != GE
16366 && code != UNEQ && code != LTGT
16367 && code != UNGT && code != UNLT));
16368
16369 /* These are invalid; the information is not there. */
16370 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
16371 }
16372
16373 \f
16374 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
16375 mask required to convert the result of a rotate insn into a shift
16376 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
16377
16378 int
16379 includes_lshift_p (rtx shiftop, rtx andop)
16380 {
16381 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
16382
16383 shift_mask <<= INTVAL (shiftop);
16384
16385 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
16386 }
16387
16388 /* Similar, but for right shift. */
16389
16390 int
16391 includes_rshift_p (rtx shiftop, rtx andop)
16392 {
16393 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
16394
16395 shift_mask >>= INTVAL (shiftop);
16396
16397 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
16398 }
16399
16400 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
16401 to perform a left shift. It must have exactly SHIFTOP least
16402 significant 0's, then one or more 1's, then zero or more 0's. */
16403
16404 int
16405 includes_rldic_lshift_p (rtx shiftop, rtx andop)
16406 {
16407 if (GET_CODE (andop) == CONST_INT)
16408 {
16409 unsigned HOST_WIDE_INT c, lsb, shift_mask;
16410
16411 c = INTVAL (andop);
16412 if (c == 0 || c == HOST_WIDE_INT_M1U)
16413 return 0;
16414
16415 shift_mask = HOST_WIDE_INT_M1U;
16416 shift_mask <<= INTVAL (shiftop);
16417
16418 /* Find the least significant one bit. */
16419 lsb = c & -c;
16420
16421 /* It must coincide with the LSB of the shift mask. */
16422 if (-lsb != shift_mask)
16423 return 0;
16424
16425 /* Invert to look for the next transition (if any). */
16426 c = ~c;
16427
16428 /* Remove the low group of ones (originally low group of zeros). */
16429 c &= -lsb;
16430
16431 /* Again find the lsb, and check we have all 1's above. */
16432 lsb = c & -c;
16433 return c == -lsb;
16434 }
16435 else
16436 return 0;
16437 }
16438
16439 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
16440 to perform a left shift. It must have SHIFTOP or more least
16441 significant 0's, with the remainder of the word 1's. */
16442
16443 int
16444 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
16445 {
16446 if (GET_CODE (andop) == CONST_INT)
16447 {
16448 unsigned HOST_WIDE_INT c, lsb, shift_mask;
16449
16450 shift_mask = HOST_WIDE_INT_M1U;
16451 shift_mask <<= INTVAL (shiftop);
16452 c = INTVAL (andop);
16453
16454 /* Find the least significant one bit. */
16455 lsb = c & -c;
16456
16457 /* It must be covered by the shift mask.
16458 This test also rejects c == 0. */
16459 if ((lsb & shift_mask) == 0)
16460 return 0;
16461
16462 /* Check we have all 1's above the transition, and reject all 1's. */
16463 return c == -lsb && lsb != 1;
16464 }
16465 else
16466 return 0;
16467 }
16468
16469 /* Return 1 if operands will generate a valid arguments to rlwimi
16470 instruction for insert with right shift in 64-bit mode. The mask may
16471 not start on the first bit or stop on the last bit because wrap-around
16472 effects of instruction do not correspond to semantics of RTL insn. */
16473
16474 int
16475 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
16476 {
16477 if (INTVAL (startop) > 32
16478 && INTVAL (startop) < 64
16479 && INTVAL (sizeop) > 1
16480 && INTVAL (sizeop) + INTVAL (startop) < 64
16481 && INTVAL (shiftop) > 0
16482 && INTVAL (sizeop) + INTVAL (shiftop) < 32
16483 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
16484 return 1;
16485
16486 return 0;
16487 }
16488
16489 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
16490 for lfq and stfq insns iff the registers are hard registers. */
16491
16492 int
16493 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
16494 {
16495 /* We might have been passed a SUBREG. */
16496 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
16497 return 0;
16498
16499 /* We might have been passed non floating point registers. */
16500 if (!FP_REGNO_P (REGNO (reg1))
16501 || !FP_REGNO_P (REGNO (reg2)))
16502 return 0;
16503
16504 return (REGNO (reg1) == REGNO (reg2) - 1);
16505 }
16506
16507 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
16508 addr1 and addr2 must be in consecutive memory locations
16509 (addr2 == addr1 + 8). */
16510
16511 int
16512 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
16513 {
16514 rtx addr1, addr2;
16515 unsigned int reg1, reg2;
16516 int offset1, offset2;
16517
16518 /* The mems cannot be volatile. */
16519 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
16520 return 0;
16521
16522 addr1 = XEXP (mem1, 0);
16523 addr2 = XEXP (mem2, 0);
16524
16525 /* Extract an offset (if used) from the first addr. */
16526 if (GET_CODE (addr1) == PLUS)
16527 {
16528 /* If not a REG, return zero. */
16529 if (GET_CODE (XEXP (addr1, 0)) != REG)
16530 return 0;
16531 else
16532 {
16533 reg1 = REGNO (XEXP (addr1, 0));
16534 /* The offset must be constant! */
16535 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
16536 return 0;
16537 offset1 = INTVAL (XEXP (addr1, 1));
16538 }
16539 }
16540 else if (GET_CODE (addr1) != REG)
16541 return 0;
16542 else
16543 {
16544 reg1 = REGNO (addr1);
16545 /* This was a simple (mem (reg)) expression. Offset is 0. */
16546 offset1 = 0;
16547 }
16548
16549 /* And now for the second addr. */
16550 if (GET_CODE (addr2) == PLUS)
16551 {
16552 /* If not a REG, return zero. */
16553 if (GET_CODE (XEXP (addr2, 0)) != REG)
16554 return 0;
16555 else
16556 {
16557 reg2 = REGNO (XEXP (addr2, 0));
16558 /* The offset must be constant. */
16559 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
16560 return 0;
16561 offset2 = INTVAL (XEXP (addr2, 1));
16562 }
16563 }
16564 else if (GET_CODE (addr2) != REG)
16565 return 0;
16566 else
16567 {
16568 reg2 = REGNO (addr2);
16569 /* This was a simple (mem (reg)) expression. Offset is 0. */
16570 offset2 = 0;
16571 }
16572
16573 /* Both of these must have the same base register. */
16574 if (reg1 != reg2)
16575 return 0;
16576
16577 /* The offset for the second addr must be 8 more than the first addr. */
16578 if (offset2 != offset1 + 8)
16579 return 0;
16580
16581 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
16582 instructions. */
16583 return 1;
16584 }
16585 \f
16586
16587 rtx
16588 rs6000_secondary_memory_needed_rtx (machine_mode mode)
16589 {
16590 static bool eliminated = false;
16591 rtx ret;
16592
16593 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
16594 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
16595 else
16596 {
16597 rtx mem = cfun->machine->sdmode_stack_slot;
16598 gcc_assert (mem != NULL_RTX);
16599
16600 if (!eliminated)
16601 {
16602 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
16603 cfun->machine->sdmode_stack_slot = mem;
16604 eliminated = true;
16605 }
16606 ret = mem;
16607 }
16608
16609 if (TARGET_DEBUG_ADDR)
16610 {
16611 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
16612 GET_MODE_NAME (mode));
16613 if (!ret)
16614 fprintf (stderr, "\tNULL_RTX\n");
16615 else
16616 debug_rtx (ret);
16617 }
16618
16619 return ret;
16620 }
16621
16622 /* Return the mode to be used for memory when a secondary memory
16623 location is needed. For SDmode values we need to use DDmode, in
16624 all other cases we can use the same mode. */
16625 machine_mode
16626 rs6000_secondary_memory_needed_mode (machine_mode mode)
16627 {
16628 if (lra_in_progress && mode == SDmode)
16629 return DDmode;
16630 return mode;
16631 }
16632
16633 static tree
16634 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
16635 {
16636 /* Don't walk into types. */
16637 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
16638 {
16639 *walk_subtrees = 0;
16640 return NULL_TREE;
16641 }
16642
16643 switch (TREE_CODE (*tp))
16644 {
16645 case VAR_DECL:
16646 case PARM_DECL:
16647 case FIELD_DECL:
16648 case RESULT_DECL:
16649 case SSA_NAME:
16650 case REAL_CST:
16651 case MEM_REF:
16652 case VIEW_CONVERT_EXPR:
16653 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
16654 return *tp;
16655 break;
16656 default:
16657 break;
16658 }
16659
16660 return NULL_TREE;
16661 }
16662
16663 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
16664 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
16665 only work on the traditional altivec registers, note if an altivec register
16666 was chosen. */
16667
16668 static enum rs6000_reg_type
16669 register_to_reg_type (rtx reg, bool *is_altivec)
16670 {
16671 HOST_WIDE_INT regno;
16672 enum reg_class rclass;
16673
16674 if (GET_CODE (reg) == SUBREG)
16675 reg = SUBREG_REG (reg);
16676
16677 if (!REG_P (reg))
16678 return NO_REG_TYPE;
16679
16680 regno = REGNO (reg);
16681 if (regno >= FIRST_PSEUDO_REGISTER)
16682 {
16683 if (!lra_in_progress && !reload_in_progress && !reload_completed)
16684 return PSEUDO_REG_TYPE;
16685
16686 regno = true_regnum (reg);
16687 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16688 return PSEUDO_REG_TYPE;
16689 }
16690
16691 gcc_assert (regno >= 0);
16692
16693 if (is_altivec && ALTIVEC_REGNO_P (regno))
16694 *is_altivec = true;
16695
16696 rclass = rs6000_regno_regclass[regno];
16697 return reg_class_to_reg_type[(int)rclass];
16698 }
16699
16700 /* Helper function to return the cost of adding a TOC entry address. */
16701
16702 static inline int
16703 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
16704 {
16705 int ret;
16706
16707 if (TARGET_CMODEL != CMODEL_SMALL)
16708 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
16709
16710 else
16711 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
16712
16713 return ret;
16714 }
16715
16716 /* Helper function for rs6000_secondary_reload to determine whether the memory
16717 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
16718 needs reloading. Return negative if the memory is not handled by the memory
16719 helper functions and to try a different reload method, 0 if no additional
16720 instructions are need, and positive to give the extra cost for the
16721 memory. */
16722
16723 static int
16724 rs6000_secondary_reload_memory (rtx addr,
16725 enum reg_class rclass,
16726 enum machine_mode mode)
16727 {
16728 int extra_cost = 0;
16729 rtx reg, and_arg, plus_arg0, plus_arg1;
16730 addr_mask_type addr_mask;
16731 const char *type = NULL;
16732 const char *fail_msg = NULL;
16733
16734 if (GPR_REG_CLASS_P (rclass))
16735 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
16736
16737 else if (rclass == FLOAT_REGS)
16738 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
16739
16740 else if (rclass == ALTIVEC_REGS)
16741 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
16742
16743 /* For the combined VSX_REGS, turn off Altivec AND -16. */
16744 else if (rclass == VSX_REGS)
16745 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
16746 & ~RELOAD_REG_AND_M16);
16747
16748 else
16749 {
16750 if (TARGET_DEBUG_ADDR)
16751 fprintf (stderr,
16752 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
16753 "class is not GPR, FPR, VMX\n",
16754 GET_MODE_NAME (mode), reg_class_names[rclass]);
16755
16756 return -1;
16757 }
16758
16759 /* If the register isn't valid in this register class, just return now. */
16760 if ((addr_mask & RELOAD_REG_VALID) == 0)
16761 {
16762 if (TARGET_DEBUG_ADDR)
16763 fprintf (stderr,
16764 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
16765 "not valid in class\n",
16766 GET_MODE_NAME (mode), reg_class_names[rclass]);
16767
16768 return -1;
16769 }
16770
16771 switch (GET_CODE (addr))
16772 {
16773 /* Does the register class supports auto update forms for this mode? We
16774 don't need a scratch register, since the powerpc only supports
16775 PRE_INC, PRE_DEC, and PRE_MODIFY. */
16776 case PRE_INC:
16777 case PRE_DEC:
16778 reg = XEXP (addr, 0);
16779 if (!base_reg_operand (addr, GET_MODE (reg)))
16780 {
16781 fail_msg = "no base register #1";
16782 extra_cost = -1;
16783 }
16784
16785 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
16786 {
16787 extra_cost = 1;
16788 type = "update";
16789 }
16790 break;
16791
16792 case PRE_MODIFY:
16793 reg = XEXP (addr, 0);
16794 plus_arg1 = XEXP (addr, 1);
16795 if (!base_reg_operand (reg, GET_MODE (reg))
16796 || GET_CODE (plus_arg1) != PLUS
16797 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
16798 {
16799 fail_msg = "bad PRE_MODIFY";
16800 extra_cost = -1;
16801 }
16802
16803 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
16804 {
16805 extra_cost = 1;
16806 type = "update";
16807 }
16808 break;
16809
16810 /* Do we need to simulate AND -16 to clear the bottom address bits used
16811 in VMX load/stores? Only allow the AND for vector sizes. */
16812 case AND:
16813 and_arg = XEXP (addr, 0);
16814 if (GET_MODE_SIZE (mode) != 16
16815 || GET_CODE (XEXP (addr, 1)) != CONST_INT
16816 || INTVAL (XEXP (addr, 1)) != -16)
16817 {
16818 fail_msg = "bad Altivec AND #1";
16819 extra_cost = -1;
16820 }
16821
16822 if (rclass != ALTIVEC_REGS)
16823 {
16824 if (legitimate_indirect_address_p (and_arg, false))
16825 extra_cost = 1;
16826
16827 else if (legitimate_indexed_address_p (and_arg, false))
16828 extra_cost = 2;
16829
16830 else
16831 {
16832 fail_msg = "bad Altivec AND #2";
16833 extra_cost = -1;
16834 }
16835
16836 type = "and";
16837 }
16838 break;
16839
16840 /* If this is an indirect address, make sure it is a base register. */
16841 case REG:
16842 case SUBREG:
16843 if (!legitimate_indirect_address_p (addr, false))
16844 {
16845 extra_cost = 1;
16846 type = "move";
16847 }
16848 break;
16849
16850 /* If this is an indexed address, make sure the register class can handle
16851 indexed addresses for this mode. */
16852 case PLUS:
16853 plus_arg0 = XEXP (addr, 0);
16854 plus_arg1 = XEXP (addr, 1);
16855
16856 /* (plus (plus (reg) (constant)) (constant)) is generated during
16857 push_reload processing, so handle it now. */
16858 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
16859 {
16860 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
16861 {
16862 extra_cost = 1;
16863 type = "offset";
16864 }
16865 }
16866
16867 /* (plus (plus (reg) (constant)) (reg)) is also generated during
16868 push_reload processing, so handle it now. */
16869 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
16870 {
16871 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
16872 {
16873 extra_cost = 1;
16874 type = "indexed #2";
16875 }
16876 }
16877
16878 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
16879 {
16880 fail_msg = "no base register #2";
16881 extra_cost = -1;
16882 }
16883
16884 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
16885 {
16886 if ((addr_mask & RELOAD_REG_INDEXED) == 0
16887 || !legitimate_indexed_address_p (addr, false))
16888 {
16889 extra_cost = 1;
16890 type = "indexed";
16891 }
16892 }
16893
16894 /* Make sure the register class can handle offset addresses. */
16895 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
16896 {
16897 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
16898 {
16899 extra_cost = 1;
16900 type = "offset";
16901 }
16902 }
16903
16904 else
16905 {
16906 fail_msg = "bad PLUS";
16907 extra_cost = -1;
16908 }
16909
16910 break;
16911
16912 case LO_SUM:
16913 if (!legitimate_lo_sum_address_p (mode, addr, false))
16914 {
16915 fail_msg = "bad LO_SUM";
16916 extra_cost = -1;
16917 }
16918
16919 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
16920 {
16921 extra_cost = 1;
16922 type = "lo_sum";
16923 }
16924 break;
16925
16926 /* Static addresses need to create a TOC entry. */
16927 case CONST:
16928 case SYMBOL_REF:
16929 case LABEL_REF:
16930 type = "address";
16931 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
16932 break;
16933
16934 /* TOC references look like offsetable memory. */
16935 case UNSPEC:
16936 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
16937 {
16938 fail_msg = "bad UNSPEC";
16939 extra_cost = -1;
16940 }
16941
16942 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
16943 {
16944 extra_cost = 1;
16945 type = "toc reference";
16946 }
16947 break;
16948
16949 default:
16950 {
16951 fail_msg = "bad address";
16952 extra_cost = -1;
16953 }
16954 }
16955
16956 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
16957 {
16958 if (extra_cost < 0)
16959 fprintf (stderr,
16960 "rs6000_secondary_reload_memory error: mode = %s, "
16961 "class = %s, addr_mask = '%s', %s\n",
16962 GET_MODE_NAME (mode),
16963 reg_class_names[rclass],
16964 rs6000_debug_addr_mask (addr_mask, false),
16965 (fail_msg != NULL) ? fail_msg : "<bad address>");
16966
16967 else
16968 fprintf (stderr,
16969 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
16970 "addr_mask = '%s', extra cost = %d, %s\n",
16971 GET_MODE_NAME (mode),
16972 reg_class_names[rclass],
16973 rs6000_debug_addr_mask (addr_mask, false),
16974 extra_cost,
16975 (type) ? type : "<none>");
16976
16977 debug_rtx (addr);
16978 }
16979
16980 return extra_cost;
16981 }
16982
16983 /* Helper function for rs6000_secondary_reload to return true if a move to a
16984 different register classe is really a simple move. */
16985
16986 static bool
16987 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
16988 enum rs6000_reg_type from_type,
16989 machine_mode mode)
16990 {
16991 int size;
16992
16993 /* Add support for various direct moves available. In this function, we only
16994 look at cases where we don't need any extra registers, and one or more
16995 simple move insns are issued. At present, 32-bit integers are not allowed
16996 in FPR/VSX registers. Single precision binary floating is not a simple
16997 move because we need to convert to the single precision memory layout.
16998 The 4-byte SDmode can be moved. */
16999 size = GET_MODE_SIZE (mode);
17000 if (TARGET_DIRECT_MOVE
17001 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
17002 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
17003 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
17004 return true;
17005
17006 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
17007 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
17008 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
17009 return true;
17010
17011 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
17012 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
17013 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
17014 return true;
17015
17016 return false;
17017 }
17018
17019 /* Power8 helper function for rs6000_secondary_reload, handle all of the
17020 special direct moves that involve allocating an extra register, return the
17021 insn code of the helper function if there is such a function or
17022 CODE_FOR_nothing if not. */
17023
17024 static bool
17025 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
17026 enum rs6000_reg_type from_type,
17027 machine_mode mode,
17028 secondary_reload_info *sri,
17029 bool altivec_p)
17030 {
17031 bool ret = false;
17032 enum insn_code icode = CODE_FOR_nothing;
17033 int cost = 0;
17034 int size = GET_MODE_SIZE (mode);
17035
17036 if (TARGET_POWERPC64)
17037 {
17038 if (size == 16)
17039 {
17040 /* Handle moving 128-bit values from GPRs to VSX point registers on
17041 power8 when running in 64-bit mode using XXPERMDI to glue the two
17042 64-bit values back together. */
17043 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
17044 {
17045 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
17046 icode = reg_addr[mode].reload_vsx_gpr;
17047 }
17048
17049 /* Handle moving 128-bit values from VSX point registers to GPRs on
17050 power8 when running in 64-bit mode using XXPERMDI to get access to the
17051 bottom 64-bit value. */
17052 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
17053 {
17054 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
17055 icode = reg_addr[mode].reload_gpr_vsx;
17056 }
17057 }
17058
17059 else if (mode == SFmode)
17060 {
17061 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
17062 {
17063 cost = 3; /* xscvdpspn, mfvsrd, and. */
17064 icode = reg_addr[mode].reload_gpr_vsx;
17065 }
17066
17067 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
17068 {
17069 cost = 2; /* mtvsrz, xscvspdpn. */
17070 icode = reg_addr[mode].reload_vsx_gpr;
17071 }
17072 }
17073 }
17074
17075 if (TARGET_POWERPC64 && size == 16)
17076 {
17077 /* Handle moving 128-bit values from GPRs to VSX point registers on
17078 power8 when running in 64-bit mode using XXPERMDI to glue the two
17079 64-bit values back together. */
17080 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
17081 {
17082 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
17083 icode = reg_addr[mode].reload_vsx_gpr;
17084 }
17085
17086 /* Handle moving 128-bit values from VSX point registers to GPRs on
17087 power8 when running in 64-bit mode using XXPERMDI to get access to the
17088 bottom 64-bit value. */
17089 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
17090 {
17091 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
17092 icode = reg_addr[mode].reload_gpr_vsx;
17093 }
17094 }
17095
17096 else if (!TARGET_POWERPC64 && size == 8)
17097 {
17098 /* Handle moving 64-bit values from GPRs to floating point registers on
17099 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
17100 values back together. Altivec register classes must be handled
17101 specially since a different instruction is used, and the secondary
17102 reload support requires a single instruction class in the scratch
17103 register constraint. However, right now TFmode is not allowed in
17104 Altivec registers, so the pattern will never match. */
17105 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
17106 {
17107 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
17108 icode = reg_addr[mode].reload_fpr_gpr;
17109 }
17110 }
17111
17112 if (icode != CODE_FOR_nothing)
17113 {
17114 ret = true;
17115 if (sri)
17116 {
17117 sri->icode = icode;
17118 sri->extra_cost = cost;
17119 }
17120 }
17121
17122 return ret;
17123 }
17124
17125 /* Return whether a move between two register classes can be done either
17126 directly (simple move) or via a pattern that uses a single extra temporary
17127 (using power8's direct move in this case. */
17128
17129 static bool
17130 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
17131 enum rs6000_reg_type from_type,
17132 machine_mode mode,
17133 secondary_reload_info *sri,
17134 bool altivec_p)
17135 {
17136 /* Fall back to load/store reloads if either type is not a register. */
17137 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
17138 return false;
17139
17140 /* If we haven't allocated registers yet, assume the move can be done for the
17141 standard register types. */
17142 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
17143 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
17144 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
17145 return true;
17146
17147 /* Moves to the same set of registers is a simple move for non-specialized
17148 registers. */
17149 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
17150 return true;
17151
17152 /* Check whether a simple move can be done directly. */
17153 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
17154 {
17155 if (sri)
17156 {
17157 sri->icode = CODE_FOR_nothing;
17158 sri->extra_cost = 0;
17159 }
17160 return true;
17161 }
17162
17163 /* Now check if we can do it in a few steps. */
17164 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
17165 altivec_p);
17166 }
17167
17168 /* Inform reload about cases where moving X with a mode MODE to a register in
17169 RCLASS requires an extra scratch or immediate register. Return the class
17170 needed for the immediate register.
17171
17172 For VSX and Altivec, we may need a register to convert sp+offset into
17173 reg+sp.
17174
17175 For misaligned 64-bit gpr loads and stores we need a register to
17176 convert an offset address to indirect. */
17177
17178 static reg_class_t
17179 rs6000_secondary_reload (bool in_p,
17180 rtx x,
17181 reg_class_t rclass_i,
17182 machine_mode mode,
17183 secondary_reload_info *sri)
17184 {
17185 enum reg_class rclass = (enum reg_class) rclass_i;
17186 reg_class_t ret = ALL_REGS;
17187 enum insn_code icode;
17188 bool default_p = false;
17189 bool done_p = false;
17190
17191 /* Allow subreg of memory before/during reload. */
17192 bool memory_p = (MEM_P (x)
17193 || (!reload_completed && GET_CODE (x) == SUBREG
17194 && MEM_P (SUBREG_REG (x))));
17195
17196 sri->icode = CODE_FOR_nothing;
17197 sri->extra_cost = 0;
17198 icode = ((in_p)
17199 ? reg_addr[mode].reload_load
17200 : reg_addr[mode].reload_store);
17201
17202 if (REG_P (x) || register_operand (x, mode))
17203 {
17204 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
17205 bool altivec_p = (rclass == ALTIVEC_REGS);
17206 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
17207
17208 if (!in_p)
17209 {
17210 enum rs6000_reg_type exchange = to_type;
17211 to_type = from_type;
17212 from_type = exchange;
17213 }
17214
17215 /* Can we do a direct move of some sort? */
17216 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
17217 altivec_p))
17218 {
17219 icode = (enum insn_code)sri->icode;
17220 default_p = false;
17221 done_p = true;
17222 ret = NO_REGS;
17223 }
17224 }
17225
17226 /* Make sure 0.0 is not reloaded or forced into memory. */
17227 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
17228 {
17229 ret = NO_REGS;
17230 default_p = false;
17231 done_p = true;
17232 }
17233
17234 /* If this is a scalar floating point value and we want to load it into the
17235 traditional Altivec registers, do it via a move via a traditional floating
17236 point register. Also make sure that non-zero constants use a FPR. */
17237 if (!done_p && reg_addr[mode].scalar_in_vmx_p
17238 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
17239 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
17240 {
17241 ret = FLOAT_REGS;
17242 default_p = false;
17243 done_p = true;
17244 }
17245
17246 /* Handle reload of load/stores if we have reload helper functions. */
17247 if (!done_p && icode != CODE_FOR_nothing && memory_p)
17248 {
17249 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
17250 mode);
17251
17252 if (extra_cost >= 0)
17253 {
17254 done_p = true;
17255 ret = NO_REGS;
17256 if (extra_cost > 0)
17257 {
17258 sri->extra_cost = extra_cost;
17259 sri->icode = icode;
17260 }
17261 }
17262 }
17263
17264 /* Handle unaligned loads and stores of integer registers. */
17265 if (!done_p && TARGET_POWERPC64
17266 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
17267 && memory_p
17268 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
17269 {
17270 rtx addr = XEXP (x, 0);
17271 rtx off = address_offset (addr);
17272
17273 if (off != NULL_RTX)
17274 {
17275 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
17276 unsigned HOST_WIDE_INT offset = INTVAL (off);
17277
17278 /* We need a secondary reload when our legitimate_address_p
17279 says the address is good (as otherwise the entire address
17280 will be reloaded), and the offset is not a multiple of
17281 four or we have an address wrap. Address wrap will only
17282 occur for LO_SUMs since legitimate_offset_address_p
17283 rejects addresses for 16-byte mems that will wrap. */
17284 if (GET_CODE (addr) == LO_SUM
17285 ? (1 /* legitimate_address_p allows any offset for lo_sum */
17286 && ((offset & 3) != 0
17287 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
17288 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
17289 && (offset & 3) != 0))
17290 {
17291 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
17292 if (in_p)
17293 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
17294 : CODE_FOR_reload_di_load);
17295 else
17296 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
17297 : CODE_FOR_reload_di_store);
17298 sri->extra_cost = 2;
17299 ret = NO_REGS;
17300 done_p = true;
17301 }
17302 else
17303 default_p = true;
17304 }
17305 else
17306 default_p = true;
17307 }
17308
17309 if (!done_p && !TARGET_POWERPC64
17310 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
17311 && memory_p
17312 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
17313 {
17314 rtx addr = XEXP (x, 0);
17315 rtx off = address_offset (addr);
17316
17317 if (off != NULL_RTX)
17318 {
17319 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
17320 unsigned HOST_WIDE_INT offset = INTVAL (off);
17321
17322 /* We need a secondary reload when our legitimate_address_p
17323 says the address is good (as otherwise the entire address
17324 will be reloaded), and we have a wrap.
17325
17326 legitimate_lo_sum_address_p allows LO_SUM addresses to
17327 have any offset so test for wrap in the low 16 bits.
17328
17329 legitimate_offset_address_p checks for the range
17330 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
17331 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
17332 [0x7ff4,0x7fff] respectively, so test for the
17333 intersection of these ranges, [0x7ffc,0x7fff] and
17334 [0x7ff4,0x7ff7] respectively.
17335
17336 Note that the address we see here may have been
17337 manipulated by legitimize_reload_address. */
17338 if (GET_CODE (addr) == LO_SUM
17339 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
17340 : offset - (0x8000 - extra) < UNITS_PER_WORD)
17341 {
17342 if (in_p)
17343 sri->icode = CODE_FOR_reload_si_load;
17344 else
17345 sri->icode = CODE_FOR_reload_si_store;
17346 sri->extra_cost = 2;
17347 ret = NO_REGS;
17348 done_p = true;
17349 }
17350 else
17351 default_p = true;
17352 }
17353 else
17354 default_p = true;
17355 }
17356
17357 if (!done_p)
17358 default_p = true;
17359
17360 if (default_p)
17361 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
17362
17363 gcc_assert (ret != ALL_REGS);
17364
17365 if (TARGET_DEBUG_ADDR)
17366 {
17367 fprintf (stderr,
17368 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
17369 "mode = %s",
17370 reg_class_names[ret],
17371 in_p ? "true" : "false",
17372 reg_class_names[rclass],
17373 GET_MODE_NAME (mode));
17374
17375 if (reload_completed)
17376 fputs (", after reload", stderr);
17377
17378 if (!done_p)
17379 fputs (", done_p not set", stderr);
17380
17381 if (default_p)
17382 fputs (", default secondary reload", stderr);
17383
17384 if (sri->icode != CODE_FOR_nothing)
17385 fprintf (stderr, ", reload func = %s, extra cost = %d",
17386 insn_data[sri->icode].name, sri->extra_cost);
17387
17388 fputs ("\n", stderr);
17389 debug_rtx (x);
17390 }
17391
17392 return ret;
17393 }
17394
17395 /* Better tracing for rs6000_secondary_reload_inner. */
17396
17397 static void
17398 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
17399 bool store_p)
17400 {
17401 rtx set, clobber;
17402
17403 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
17404
17405 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
17406 store_p ? "store" : "load");
17407
17408 if (store_p)
17409 set = gen_rtx_SET (mem, reg);
17410 else
17411 set = gen_rtx_SET (reg, mem);
17412
17413 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
17414 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
17415 }
17416
17417 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
17418 ATTRIBUTE_NORETURN;
17419
17420 static void
17421 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
17422 bool store_p)
17423 {
17424 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
17425 gcc_unreachable ();
17426 }
17427
17428 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
17429 reload helper functions. These were identified in
17430 rs6000_secondary_reload_memory, and if reload decided to use the secondary
17431 reload, it calls the insns:
17432 reload_<RELOAD:mode>_<P:mptrsize>_store
17433 reload_<RELOAD:mode>_<P:mptrsize>_load
17434
17435 which in turn calls this function, to do whatever is necessary to create
17436 valid addresses. */
17437
17438 void
17439 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
17440 {
17441 int regno = true_regnum (reg);
17442 machine_mode mode = GET_MODE (reg);
17443 addr_mask_type addr_mask;
17444 rtx addr;
17445 rtx new_addr;
17446 rtx op_reg, op0, op1;
17447 rtx and_op;
17448 rtx cc_clobber;
17449 rtvec rv;
17450
17451 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
17452 || !base_reg_operand (scratch, GET_MODE (scratch)))
17453 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17454
17455 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
17456 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
17457
17458 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
17459 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
17460
17461 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
17462 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
17463
17464 else
17465 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17466
17467 /* Make sure the mode is valid in this register class. */
17468 if ((addr_mask & RELOAD_REG_VALID) == 0)
17469 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17470
17471 if (TARGET_DEBUG_ADDR)
17472 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
17473
17474 new_addr = addr = XEXP (mem, 0);
17475 switch (GET_CODE (addr))
17476 {
17477 /* Does the register class support auto update forms for this mode? If
17478 not, do the update now. We don't need a scratch register, since the
17479 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
17480 case PRE_INC:
17481 case PRE_DEC:
17482 op_reg = XEXP (addr, 0);
17483 if (!base_reg_operand (op_reg, Pmode))
17484 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17485
17486 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
17487 {
17488 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
17489 new_addr = op_reg;
17490 }
17491 break;
17492
17493 case PRE_MODIFY:
17494 op0 = XEXP (addr, 0);
17495 op1 = XEXP (addr, 1);
17496 if (!base_reg_operand (op0, Pmode)
17497 || GET_CODE (op1) != PLUS
17498 || !rtx_equal_p (op0, XEXP (op1, 0)))
17499 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17500
17501 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
17502 {
17503 emit_insn (gen_rtx_SET (op0, op1));
17504 new_addr = reg;
17505 }
17506 break;
17507
17508 /* Do we need to simulate AND -16 to clear the bottom address bits used
17509 in VMX load/stores? */
17510 case AND:
17511 op0 = XEXP (addr, 0);
17512 op1 = XEXP (addr, 1);
17513 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
17514 {
17515 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
17516 op_reg = op0;
17517
17518 else if (GET_CODE (op1) == PLUS)
17519 {
17520 emit_insn (gen_rtx_SET (scratch, op1));
17521 op_reg = scratch;
17522 }
17523
17524 else
17525 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17526
17527 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
17528 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
17529 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
17530 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
17531 new_addr = scratch;
17532 }
17533 break;
17534
17535 /* If this is an indirect address, make sure it is a base register. */
17536 case REG:
17537 case SUBREG:
17538 if (!base_reg_operand (addr, GET_MODE (addr)))
17539 {
17540 emit_insn (gen_rtx_SET (scratch, addr));
17541 new_addr = scratch;
17542 }
17543 break;
17544
17545 /* If this is an indexed address, make sure the register class can handle
17546 indexed addresses for this mode. */
17547 case PLUS:
17548 op0 = XEXP (addr, 0);
17549 op1 = XEXP (addr, 1);
17550 if (!base_reg_operand (op0, Pmode))
17551 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17552
17553 else if (int_reg_operand (op1, Pmode))
17554 {
17555 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
17556 {
17557 emit_insn (gen_rtx_SET (scratch, addr));
17558 new_addr = scratch;
17559 }
17560 }
17561
17562 /* Make sure the register class can handle offset addresses. */
17563 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
17564 {
17565 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
17566 {
17567 emit_insn (gen_rtx_SET (scratch, addr));
17568 new_addr = scratch;
17569 }
17570 }
17571
17572 else
17573 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17574
17575 break;
17576
17577 case LO_SUM:
17578 op0 = XEXP (addr, 0);
17579 op1 = XEXP (addr, 1);
17580 if (!base_reg_operand (op0, Pmode))
17581 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17582
17583 else if (int_reg_operand (op1, Pmode))
17584 {
17585 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
17586 {
17587 emit_insn (gen_rtx_SET (scratch, addr));
17588 new_addr = scratch;
17589 }
17590 }
17591
17592 /* Make sure the register class can handle offset addresses. */
17593 else if (legitimate_lo_sum_address_p (mode, addr, false))
17594 {
17595 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
17596 {
17597 emit_insn (gen_rtx_SET (scratch, addr));
17598 new_addr = scratch;
17599 }
17600 }
17601
17602 else
17603 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17604
17605 break;
17606
17607 case SYMBOL_REF:
17608 case CONST:
17609 case LABEL_REF:
17610 rs6000_emit_move (scratch, addr, Pmode);
17611 new_addr = scratch;
17612 break;
17613
17614 default:
17615 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17616 }
17617
17618 /* Adjust the address if it changed. */
17619 if (addr != new_addr)
17620 {
17621 mem = replace_equiv_address_nv (mem, new_addr);
17622 if (TARGET_DEBUG_ADDR)
17623 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
17624 }
17625
17626 /* Now create the move. */
17627 if (store_p)
17628 emit_insn (gen_rtx_SET (mem, reg));
17629 else
17630 emit_insn (gen_rtx_SET (reg, mem));
17631
17632 return;
17633 }
17634
17635 /* Convert reloads involving 64-bit gprs and misaligned offset
17636 addressing, or multiple 32-bit gprs and offsets that are too large,
17637 to use indirect addressing. */
17638
17639 void
17640 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
17641 {
17642 int regno = true_regnum (reg);
17643 enum reg_class rclass;
17644 rtx addr;
17645 rtx scratch_or_premodify = scratch;
17646
17647 if (TARGET_DEBUG_ADDR)
17648 {
17649 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
17650 store_p ? "store" : "load");
17651 fprintf (stderr, "reg:\n");
17652 debug_rtx (reg);
17653 fprintf (stderr, "mem:\n");
17654 debug_rtx (mem);
17655 fprintf (stderr, "scratch:\n");
17656 debug_rtx (scratch);
17657 }
17658
17659 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
17660 gcc_assert (GET_CODE (mem) == MEM);
17661 rclass = REGNO_REG_CLASS (regno);
17662 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
17663 addr = XEXP (mem, 0);
17664
17665 if (GET_CODE (addr) == PRE_MODIFY)
17666 {
17667 scratch_or_premodify = XEXP (addr, 0);
17668 gcc_assert (REG_P (scratch_or_premodify));
17669 addr = XEXP (addr, 1);
17670 }
17671 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
17672
17673 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
17674
17675 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
17676
17677 /* Now create the move. */
17678 if (store_p)
17679 emit_insn (gen_rtx_SET (mem, reg));
17680 else
17681 emit_insn (gen_rtx_SET (reg, mem));
17682
17683 return;
17684 }
17685
17686 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
17687 this function has any SDmode references. If we are on a power7 or later, we
17688 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
17689 can load/store the value. */
17690
17691 static void
17692 rs6000_alloc_sdmode_stack_slot (void)
17693 {
17694 tree t;
17695 basic_block bb;
17696 gimple_stmt_iterator gsi;
17697
17698 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
17699 /* We use a different approach for dealing with the secondary
17700 memory in LRA. */
17701 if (ira_use_lra_p)
17702 return;
17703
17704 if (TARGET_NO_SDMODE_STACK)
17705 return;
17706
17707 FOR_EACH_BB_FN (bb, cfun)
17708 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
17709 {
17710 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
17711 if (ret)
17712 {
17713 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17714 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17715 SDmode, 0);
17716 return;
17717 }
17718 }
17719
17720 /* Check for any SDmode parameters of the function. */
17721 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
17722 {
17723 if (TREE_TYPE (t) == error_mark_node)
17724 continue;
17725
17726 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
17727 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
17728 {
17729 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17730 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17731 SDmode, 0);
17732 return;
17733 }
17734 }
17735 }
17736
17737 static void
17738 rs6000_instantiate_decls (void)
17739 {
17740 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
17741 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
17742 }
17743
17744 /* Given an rtx X being reloaded into a reg required to be
17745 in class CLASS, return the class of reg to actually use.
17746 In general this is just CLASS; but on some machines
17747 in some cases it is preferable to use a more restrictive class.
17748
17749 On the RS/6000, we have to return NO_REGS when we want to reload a
17750 floating-point CONST_DOUBLE to force it to be copied to memory.
17751
17752 We also don't want to reload integer values into floating-point
17753 registers if we can at all help it. In fact, this can
17754 cause reload to die, if it tries to generate a reload of CTR
17755 into a FP register and discovers it doesn't have the memory location
17756 required.
17757
17758 ??? Would it be a good idea to have reload do the converse, that is
17759 try to reload floating modes into FP registers if possible?
17760 */
17761
17762 static enum reg_class
17763 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
17764 {
17765 machine_mode mode = GET_MODE (x);
17766 bool is_constant = CONSTANT_P (x);
17767
17768 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
17769 the reloading of address expressions using PLUS into floating point
17770 registers. */
17771 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
17772 {
17773 if (is_constant)
17774 {
17775 /* Zero is always allowed in all VSX registers. */
17776 if (x == CONST0_RTX (mode))
17777 return rclass;
17778
17779 /* If this is a vector constant that can be formed with a few Altivec
17780 instructions, we want altivec registers. */
17781 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
17782 return ALTIVEC_REGS;
17783
17784 /* Force constant to memory. */
17785 return NO_REGS;
17786 }
17787
17788 /* If this is a scalar floating point value, prefer the traditional
17789 floating point registers so that we can use D-form (register+offset)
17790 addressing. */
17791 if (GET_MODE_SIZE (mode) < 16)
17792 return FLOAT_REGS;
17793
17794 /* Prefer the Altivec registers if Altivec is handling the vector
17795 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
17796 loads. */
17797 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
17798 || mode == V1TImode)
17799 return ALTIVEC_REGS;
17800
17801 return rclass;
17802 }
17803
17804 if (is_constant || GET_CODE (x) == PLUS)
17805 {
17806 if (reg_class_subset_p (GENERAL_REGS, rclass))
17807 return GENERAL_REGS;
17808 if (reg_class_subset_p (BASE_REGS, rclass))
17809 return BASE_REGS;
17810 return NO_REGS;
17811 }
17812
17813 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
17814 return GENERAL_REGS;
17815
17816 return rclass;
17817 }
17818
17819 /* Debug version of rs6000_preferred_reload_class. */
17820 static enum reg_class
17821 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
17822 {
17823 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
17824
17825 fprintf (stderr,
17826 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
17827 "mode = %s, x:\n",
17828 reg_class_names[ret], reg_class_names[rclass],
17829 GET_MODE_NAME (GET_MODE (x)));
17830 debug_rtx (x);
17831
17832 return ret;
17833 }
17834
17835 /* If we are copying between FP or AltiVec registers and anything else, we need
17836 a memory location. The exception is when we are targeting ppc64 and the
17837 move to/from fpr to gpr instructions are available. Also, under VSX, you
17838 can copy vector registers from the FP register set to the Altivec register
17839 set and vice versa. */
17840
17841 static bool
17842 rs6000_secondary_memory_needed (enum reg_class from_class,
17843 enum reg_class to_class,
17844 machine_mode mode)
17845 {
17846 enum rs6000_reg_type from_type, to_type;
17847 bool altivec_p = ((from_class == ALTIVEC_REGS)
17848 || (to_class == ALTIVEC_REGS));
17849
17850 /* If a simple/direct move is available, we don't need secondary memory */
17851 from_type = reg_class_to_reg_type[(int)from_class];
17852 to_type = reg_class_to_reg_type[(int)to_class];
17853
17854 if (rs6000_secondary_reload_move (to_type, from_type, mode,
17855 (secondary_reload_info *)0, altivec_p))
17856 return false;
17857
17858 /* If we have a floating point or vector register class, we need to use
17859 memory to transfer the data. */
17860 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
17861 return true;
17862
17863 return false;
17864 }
17865
17866 /* Debug version of rs6000_secondary_memory_needed. */
17867 static bool
17868 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
17869 enum reg_class to_class,
17870 machine_mode mode)
17871 {
17872 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
17873
17874 fprintf (stderr,
17875 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
17876 "to_class = %s, mode = %s\n",
17877 ret ? "true" : "false",
17878 reg_class_names[from_class],
17879 reg_class_names[to_class],
17880 GET_MODE_NAME (mode));
17881
17882 return ret;
17883 }
17884
17885 /* Return the register class of a scratch register needed to copy IN into
17886 or out of a register in RCLASS in MODE. If it can be done directly,
17887 NO_REGS is returned. */
17888
17889 static enum reg_class
17890 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
17891 rtx in)
17892 {
17893 int regno;
17894
17895 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
17896 #if TARGET_MACHO
17897 && MACHOPIC_INDIRECT
17898 #endif
17899 ))
17900 {
17901 /* We cannot copy a symbolic operand directly into anything
17902 other than BASE_REGS for TARGET_ELF. So indicate that a
17903 register from BASE_REGS is needed as an intermediate
17904 register.
17905
17906 On Darwin, pic addresses require a load from memory, which
17907 needs a base register. */
17908 if (rclass != BASE_REGS
17909 && (GET_CODE (in) == SYMBOL_REF
17910 || GET_CODE (in) == HIGH
17911 || GET_CODE (in) == LABEL_REF
17912 || GET_CODE (in) == CONST))
17913 return BASE_REGS;
17914 }
17915
17916 if (GET_CODE (in) == REG)
17917 {
17918 regno = REGNO (in);
17919 if (regno >= FIRST_PSEUDO_REGISTER)
17920 {
17921 regno = true_regnum (in);
17922 if (regno >= FIRST_PSEUDO_REGISTER)
17923 regno = -1;
17924 }
17925 }
17926 else if (GET_CODE (in) == SUBREG)
17927 {
17928 regno = true_regnum (in);
17929 if (regno >= FIRST_PSEUDO_REGISTER)
17930 regno = -1;
17931 }
17932 else
17933 regno = -1;
17934
17935 /* If we have VSX register moves, prefer moving scalar values between
17936 Altivec registers and GPR by going via an FPR (and then via memory)
17937 instead of reloading the secondary memory address for Altivec moves. */
17938 if (TARGET_VSX
17939 && GET_MODE_SIZE (mode) < 16
17940 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
17941 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
17942 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
17943 && (regno >= 0 && INT_REGNO_P (regno)))))
17944 return FLOAT_REGS;
17945
17946 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
17947 into anything. */
17948 if (rclass == GENERAL_REGS || rclass == BASE_REGS
17949 || (regno >= 0 && INT_REGNO_P (regno)))
17950 return NO_REGS;
17951
17952 /* Constants, memory, and VSX registers can go into VSX registers (both the
17953 traditional floating point and the altivec registers). */
17954 if (rclass == VSX_REGS
17955 && (regno == -1 || VSX_REGNO_P (regno)))
17956 return NO_REGS;
17957
17958 /* Constants, memory, and FP registers can go into FP registers. */
17959 if ((regno == -1 || FP_REGNO_P (regno))
17960 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
17961 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
17962
17963 /* Memory, and AltiVec registers can go into AltiVec registers. */
17964 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
17965 && rclass == ALTIVEC_REGS)
17966 return NO_REGS;
17967
17968 /* We can copy among the CR registers. */
17969 if ((rclass == CR_REGS || rclass == CR0_REGS)
17970 && regno >= 0 && CR_REGNO_P (regno))
17971 return NO_REGS;
17972
17973 /* Otherwise, we need GENERAL_REGS. */
17974 return GENERAL_REGS;
17975 }
17976
17977 /* Debug version of rs6000_secondary_reload_class. */
17978 static enum reg_class
17979 rs6000_debug_secondary_reload_class (enum reg_class rclass,
17980 machine_mode mode, rtx in)
17981 {
17982 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
17983 fprintf (stderr,
17984 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
17985 "mode = %s, input rtx:\n",
17986 reg_class_names[ret], reg_class_names[rclass],
17987 GET_MODE_NAME (mode));
17988 debug_rtx (in);
17989
17990 return ret;
17991 }
17992
17993 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
17994
17995 static bool
17996 rs6000_cannot_change_mode_class (machine_mode from,
17997 machine_mode to,
17998 enum reg_class rclass)
17999 {
18000 unsigned from_size = GET_MODE_SIZE (from);
18001 unsigned to_size = GET_MODE_SIZE (to);
18002
18003 if (from_size != to_size)
18004 {
18005 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
18006
18007 if (reg_classes_intersect_p (xclass, rclass))
18008 {
18009 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
18010 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
18011
18012 /* Don't allow 64-bit types to overlap with 128-bit types that take a
18013 single register under VSX because the scalar part of the register
18014 is in the upper 64-bits, and not the lower 64-bits. Types like
18015 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
18016 IEEE floating point can't overlap, and neither can small
18017 values. */
18018
18019 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
18020 return true;
18021
18022 /* TDmode in floating-mode registers must always go into a register
18023 pair with the most significant word in the even-numbered register
18024 to match ISA requirements. In little-endian mode, this does not
18025 match subreg numbering, so we cannot allow subregs. */
18026 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
18027 return true;
18028
18029 if (from_size < 8 || to_size < 8)
18030 return true;
18031
18032 if (from_size == 8 && (8 * to_nregs) != to_size)
18033 return true;
18034
18035 if (to_size == 8 && (8 * from_nregs) != from_size)
18036 return true;
18037
18038 return false;
18039 }
18040 else
18041 return false;
18042 }
18043
18044 if (TARGET_E500_DOUBLE
18045 && ((((to) == DFmode) + ((from) == DFmode)) == 1
18046 || (((to) == TFmode) + ((from) == TFmode)) == 1
18047 || (((to) == DDmode) + ((from) == DDmode)) == 1
18048 || (((to) == TDmode) + ((from) == TDmode)) == 1
18049 || (((to) == DImode) + ((from) == DImode)) == 1))
18050 return true;
18051
18052 /* Since the VSX register set includes traditional floating point registers
18053 and altivec registers, just check for the size being different instead of
18054 trying to check whether the modes are vector modes. Otherwise it won't
18055 allow say DF and DI to change classes. For types like TFmode and TDmode
18056 that take 2 64-bit registers, rather than a single 128-bit register, don't
18057 allow subregs of those types to other 128 bit types. */
18058 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
18059 {
18060 unsigned num_regs = (from_size + 15) / 16;
18061 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
18062 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
18063 return true;
18064
18065 return (from_size != 8 && from_size != 16);
18066 }
18067
18068 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
18069 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
18070 return true;
18071
18072 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
18073 && reg_classes_intersect_p (GENERAL_REGS, rclass))
18074 return true;
18075
18076 return false;
18077 }
18078
18079 /* Debug version of rs6000_cannot_change_mode_class. */
18080 static bool
18081 rs6000_debug_cannot_change_mode_class (machine_mode from,
18082 machine_mode to,
18083 enum reg_class rclass)
18084 {
18085 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
18086
18087 fprintf (stderr,
18088 "rs6000_cannot_change_mode_class, return %s, from = %s, "
18089 "to = %s, rclass = %s\n",
18090 ret ? "true" : "false",
18091 GET_MODE_NAME (from), GET_MODE_NAME (to),
18092 reg_class_names[rclass]);
18093
18094 return ret;
18095 }
18096 \f
18097 /* Return a string to do a move operation of 128 bits of data. */
18098
18099 const char *
18100 rs6000_output_move_128bit (rtx operands[])
18101 {
18102 rtx dest = operands[0];
18103 rtx src = operands[1];
18104 machine_mode mode = GET_MODE (dest);
18105 int dest_regno;
18106 int src_regno;
18107 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
18108 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
18109
18110 if (REG_P (dest))
18111 {
18112 dest_regno = REGNO (dest);
18113 dest_gpr_p = INT_REGNO_P (dest_regno);
18114 dest_fp_p = FP_REGNO_P (dest_regno);
18115 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
18116 dest_vsx_p = dest_fp_p | dest_vmx_p;
18117 }
18118 else
18119 {
18120 dest_regno = -1;
18121 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
18122 }
18123
18124 if (REG_P (src))
18125 {
18126 src_regno = REGNO (src);
18127 src_gpr_p = INT_REGNO_P (src_regno);
18128 src_fp_p = FP_REGNO_P (src_regno);
18129 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
18130 src_vsx_p = src_fp_p | src_vmx_p;
18131 }
18132 else
18133 {
18134 src_regno = -1;
18135 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
18136 }
18137
18138 /* Register moves. */
18139 if (dest_regno >= 0 && src_regno >= 0)
18140 {
18141 if (dest_gpr_p)
18142 {
18143 if (src_gpr_p)
18144 return "#";
18145
18146 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
18147 return "#";
18148 }
18149
18150 else if (TARGET_VSX && dest_vsx_p)
18151 {
18152 if (src_vsx_p)
18153 return "xxlor %x0,%x1,%x1";
18154
18155 else if (TARGET_DIRECT_MOVE && src_gpr_p)
18156 return "#";
18157 }
18158
18159 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
18160 return "vor %0,%1,%1";
18161
18162 else if (dest_fp_p && src_fp_p)
18163 return "#";
18164 }
18165
18166 /* Loads. */
18167 else if (dest_regno >= 0 && MEM_P (src))
18168 {
18169 if (dest_gpr_p)
18170 {
18171 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
18172 return "lq %0,%1";
18173 else
18174 return "#";
18175 }
18176
18177 else if (TARGET_ALTIVEC && dest_vmx_p
18178 && altivec_indexed_or_indirect_operand (src, mode))
18179 return "lvx %0,%y1";
18180
18181 else if (TARGET_VSX && dest_vsx_p)
18182 {
18183 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
18184 return "lxvw4x %x0,%y1";
18185 else
18186 return "lxvd2x %x0,%y1";
18187 }
18188
18189 else if (TARGET_ALTIVEC && dest_vmx_p)
18190 return "lvx %0,%y1";
18191
18192 else if (dest_fp_p)
18193 return "#";
18194 }
18195
18196 /* Stores. */
18197 else if (src_regno >= 0 && MEM_P (dest))
18198 {
18199 if (src_gpr_p)
18200 {
18201 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
18202 return "stq %1,%0";
18203 else
18204 return "#";
18205 }
18206
18207 else if (TARGET_ALTIVEC && src_vmx_p
18208 && altivec_indexed_or_indirect_operand (src, mode))
18209 return "stvx %1,%y0";
18210
18211 else if (TARGET_VSX && src_vsx_p)
18212 {
18213 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
18214 return "stxvw4x %x1,%y0";
18215 else
18216 return "stxvd2x %x1,%y0";
18217 }
18218
18219 else if (TARGET_ALTIVEC && src_vmx_p)
18220 return "stvx %1,%y0";
18221
18222 else if (src_fp_p)
18223 return "#";
18224 }
18225
18226 /* Constants. */
18227 else if (dest_regno >= 0
18228 && (GET_CODE (src) == CONST_INT
18229 || GET_CODE (src) == CONST_WIDE_INT
18230 || GET_CODE (src) == CONST_DOUBLE
18231 || GET_CODE (src) == CONST_VECTOR))
18232 {
18233 if (dest_gpr_p)
18234 return "#";
18235
18236 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
18237 return "xxlxor %x0,%x0,%x0";
18238
18239 else if (TARGET_ALTIVEC && dest_vmx_p)
18240 return output_vec_const_move (operands);
18241 }
18242
18243 if (TARGET_DEBUG_ADDR)
18244 {
18245 fprintf (stderr, "\n===== Bad 128 bit move:\n");
18246 debug_rtx (gen_rtx_SET (dest, src));
18247 }
18248
18249 gcc_unreachable ();
18250 }
18251
18252 /* Validate a 128-bit move. */
18253 bool
18254 rs6000_move_128bit_ok_p (rtx operands[])
18255 {
18256 machine_mode mode = GET_MODE (operands[0]);
18257 return (gpc_reg_operand (operands[0], mode)
18258 || gpc_reg_operand (operands[1], mode));
18259 }
18260
18261 /* Return true if a 128-bit move needs to be split. */
18262 bool
18263 rs6000_split_128bit_ok_p (rtx operands[])
18264 {
18265 if (!reload_completed)
18266 return false;
18267
18268 if (!gpr_or_gpr_p (operands[0], operands[1]))
18269 return false;
18270
18271 if (quad_load_store_p (operands[0], operands[1]))
18272 return false;
18273
18274 return true;
18275 }
18276
18277 \f
18278 /* Given a comparison operation, return the bit number in CCR to test. We
18279 know this is a valid comparison.
18280
18281 SCC_P is 1 if this is for an scc. That means that %D will have been
18282 used instead of %C, so the bits will be in different places.
18283
18284 Return -1 if OP isn't a valid comparison for some reason. */
18285
18286 int
18287 ccr_bit (rtx op, int scc_p)
18288 {
18289 enum rtx_code code = GET_CODE (op);
18290 machine_mode cc_mode;
18291 int cc_regnum;
18292 int base_bit;
18293 rtx reg;
18294
18295 if (!COMPARISON_P (op))
18296 return -1;
18297
18298 reg = XEXP (op, 0);
18299
18300 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
18301
18302 cc_mode = GET_MODE (reg);
18303 cc_regnum = REGNO (reg);
18304 base_bit = 4 * (cc_regnum - CR0_REGNO);
18305
18306 validate_condition_mode (code, cc_mode);
18307
18308 /* When generating a sCOND operation, only positive conditions are
18309 allowed. */
18310 gcc_assert (!scc_p
18311 || code == EQ || code == GT || code == LT || code == UNORDERED
18312 || code == GTU || code == LTU);
18313
18314 switch (code)
18315 {
18316 case NE:
18317 return scc_p ? base_bit + 3 : base_bit + 2;
18318 case EQ:
18319 return base_bit + 2;
18320 case GT: case GTU: case UNLE:
18321 return base_bit + 1;
18322 case LT: case LTU: case UNGE:
18323 return base_bit;
18324 case ORDERED: case UNORDERED:
18325 return base_bit + 3;
18326
18327 case GE: case GEU:
18328 /* If scc, we will have done a cror to put the bit in the
18329 unordered position. So test that bit. For integer, this is ! LT
18330 unless this is an scc insn. */
18331 return scc_p ? base_bit + 3 : base_bit;
18332
18333 case LE: case LEU:
18334 return scc_p ? base_bit + 3 : base_bit + 1;
18335
18336 default:
18337 gcc_unreachable ();
18338 }
18339 }
18340 \f
18341 /* Return the GOT register. */
18342
18343 rtx
18344 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
18345 {
18346 /* The second flow pass currently (June 1999) can't update
18347 regs_ever_live without disturbing other parts of the compiler, so
18348 update it here to make the prolog/epilogue code happy. */
18349 if (!can_create_pseudo_p ()
18350 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
18351 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
18352
18353 crtl->uses_pic_offset_table = 1;
18354
18355 return pic_offset_table_rtx;
18356 }
18357 \f
18358 static rs6000_stack_t stack_info;
18359
18360 /* Function to init struct machine_function.
18361 This will be called, via a pointer variable,
18362 from push_function_context. */
18363
18364 static struct machine_function *
18365 rs6000_init_machine_status (void)
18366 {
18367 stack_info.reload_completed = 0;
18368 return ggc_cleared_alloc<machine_function> ();
18369 }
18370 \f
18371 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
18372
18373 int
18374 extract_MB (rtx op)
18375 {
18376 int i;
18377 unsigned long val = INTVAL (op);
18378
18379 /* If the high bit is zero, the value is the first 1 bit we find
18380 from the left. */
18381 if ((val & 0x80000000) == 0)
18382 {
18383 gcc_assert (val & 0xffffffff);
18384
18385 i = 1;
18386 while (((val <<= 1) & 0x80000000) == 0)
18387 ++i;
18388 return i;
18389 }
18390
18391 /* If the high bit is set and the low bit is not, or the mask is all
18392 1's, the value is zero. */
18393 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
18394 return 0;
18395
18396 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
18397 from the right. */
18398 i = 31;
18399 while (((val >>= 1) & 1) != 0)
18400 --i;
18401
18402 return i;
18403 }
18404
18405 int
18406 extract_ME (rtx op)
18407 {
18408 int i;
18409 unsigned long val = INTVAL (op);
18410
18411 /* If the low bit is zero, the value is the first 1 bit we find from
18412 the right. */
18413 if ((val & 1) == 0)
18414 {
18415 gcc_assert (val & 0xffffffff);
18416
18417 i = 30;
18418 while (((val >>= 1) & 1) == 0)
18419 --i;
18420
18421 return i;
18422 }
18423
18424 /* If the low bit is set and the high bit is not, or the mask is all
18425 1's, the value is 31. */
18426 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
18427 return 31;
18428
18429 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
18430 from the left. */
18431 i = 0;
18432 while (((val <<= 1) & 0x80000000) != 0)
18433 ++i;
18434
18435 return i;
18436 }
18437
18438 /* Write out a function code label. */
18439
18440 void
18441 rs6000_output_function_entry (FILE *file, const char *fname)
18442 {
18443 if (fname[0] != '.')
18444 {
18445 switch (DEFAULT_ABI)
18446 {
18447 default:
18448 gcc_unreachable ();
18449
18450 case ABI_AIX:
18451 if (DOT_SYMBOLS)
18452 putc ('.', file);
18453 else
18454 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
18455 break;
18456
18457 case ABI_ELFv2:
18458 case ABI_V4:
18459 case ABI_DARWIN:
18460 break;
18461 }
18462 }
18463
18464 RS6000_OUTPUT_BASENAME (file, fname);
18465 }
18466
18467 /* Print an operand. Recognize special options, documented below. */
18468
18469 #if TARGET_ELF
18470 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
18471 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
18472 #else
18473 #define SMALL_DATA_RELOC "sda21"
18474 #define SMALL_DATA_REG 0
18475 #endif
18476
18477 void
18478 print_operand (FILE *file, rtx x, int code)
18479 {
18480 int i;
18481 unsigned HOST_WIDE_INT uval;
18482
18483 switch (code)
18484 {
18485 /* %a is output_address. */
18486
18487 case 'b':
18488 /* If constant, low-order 16 bits of constant, unsigned.
18489 Otherwise, write normally. */
18490 if (INT_P (x))
18491 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
18492 else
18493 print_operand (file, x, 0);
18494 return;
18495
18496 case 'B':
18497 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
18498 for 64-bit mask direction. */
18499 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
18500 return;
18501
18502 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
18503 output_operand. */
18504
18505 case 'D':
18506 /* Like 'J' but get to the GT bit only. */
18507 gcc_assert (REG_P (x));
18508
18509 /* Bit 1 is GT bit. */
18510 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
18511
18512 /* Add one for shift count in rlinm for scc. */
18513 fprintf (file, "%d", i + 1);
18514 return;
18515
18516 case 'e':
18517 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
18518 if (! INT_P (x))
18519 {
18520 output_operand_lossage ("invalid %%e value");
18521 return;
18522 }
18523
18524 uval = INTVAL (x);
18525 if ((uval & 0xffff) == 0 && uval != 0)
18526 putc ('s', file);
18527 return;
18528
18529 case 'E':
18530 /* X is a CR register. Print the number of the EQ bit of the CR */
18531 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18532 output_operand_lossage ("invalid %%E value");
18533 else
18534 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
18535 return;
18536
18537 case 'f':
18538 /* X is a CR register. Print the shift count needed to move it
18539 to the high-order four bits. */
18540 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18541 output_operand_lossage ("invalid %%f value");
18542 else
18543 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
18544 return;
18545
18546 case 'F':
18547 /* Similar, but print the count for the rotate in the opposite
18548 direction. */
18549 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18550 output_operand_lossage ("invalid %%F value");
18551 else
18552 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
18553 return;
18554
18555 case 'G':
18556 /* X is a constant integer. If it is negative, print "m",
18557 otherwise print "z". This is to make an aze or ame insn. */
18558 if (GET_CODE (x) != CONST_INT)
18559 output_operand_lossage ("invalid %%G value");
18560 else if (INTVAL (x) >= 0)
18561 putc ('z', file);
18562 else
18563 putc ('m', file);
18564 return;
18565
18566 case 'h':
18567 /* If constant, output low-order five bits. Otherwise, write
18568 normally. */
18569 if (INT_P (x))
18570 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
18571 else
18572 print_operand (file, x, 0);
18573 return;
18574
18575 case 'H':
18576 /* If constant, output low-order six bits. Otherwise, write
18577 normally. */
18578 if (INT_P (x))
18579 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
18580 else
18581 print_operand (file, x, 0);
18582 return;
18583
18584 case 'I':
18585 /* Print `i' if this is a constant, else nothing. */
18586 if (INT_P (x))
18587 putc ('i', file);
18588 return;
18589
18590 case 'j':
18591 /* Write the bit number in CCR for jump. */
18592 i = ccr_bit (x, 0);
18593 if (i == -1)
18594 output_operand_lossage ("invalid %%j code");
18595 else
18596 fprintf (file, "%d", i);
18597 return;
18598
18599 case 'J':
18600 /* Similar, but add one for shift count in rlinm for scc and pass
18601 scc flag to `ccr_bit'. */
18602 i = ccr_bit (x, 1);
18603 if (i == -1)
18604 output_operand_lossage ("invalid %%J code");
18605 else
18606 /* If we want bit 31, write a shift count of zero, not 32. */
18607 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18608 return;
18609
18610 case 'k':
18611 /* X must be a constant. Write the 1's complement of the
18612 constant. */
18613 if (! INT_P (x))
18614 output_operand_lossage ("invalid %%k value");
18615 else
18616 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
18617 return;
18618
18619 case 'K':
18620 /* X must be a symbolic constant on ELF. Write an
18621 expression suitable for an 'addi' that adds in the low 16
18622 bits of the MEM. */
18623 if (GET_CODE (x) == CONST)
18624 {
18625 if (GET_CODE (XEXP (x, 0)) != PLUS
18626 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
18627 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
18628 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
18629 output_operand_lossage ("invalid %%K value");
18630 }
18631 print_operand_address (file, x);
18632 fputs ("@l", file);
18633 return;
18634
18635 /* %l is output_asm_label. */
18636
18637 case 'L':
18638 /* Write second word of DImode or DFmode reference. Works on register
18639 or non-indexed memory only. */
18640 if (REG_P (x))
18641 fputs (reg_names[REGNO (x) + 1], file);
18642 else if (MEM_P (x))
18643 {
18644 /* Handle possible auto-increment. Since it is pre-increment and
18645 we have already done it, we can just use an offset of word. */
18646 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18647 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18648 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18649 UNITS_PER_WORD));
18650 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18651 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18652 UNITS_PER_WORD));
18653 else
18654 output_address (XEXP (adjust_address_nv (x, SImode,
18655 UNITS_PER_WORD),
18656 0));
18657
18658 if (small_data_operand (x, GET_MODE (x)))
18659 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18660 reg_names[SMALL_DATA_REG]);
18661 }
18662 return;
18663
18664 case 'm':
18665 /* MB value for a mask operand. */
18666 if (! mask_operand (x, SImode))
18667 output_operand_lossage ("invalid %%m value");
18668
18669 fprintf (file, "%d", extract_MB (x));
18670 return;
18671
18672 case 'M':
18673 /* ME value for a mask operand. */
18674 if (! mask_operand (x, SImode))
18675 output_operand_lossage ("invalid %%M value");
18676
18677 fprintf (file, "%d", extract_ME (x));
18678 return;
18679
18680 /* %n outputs the negative of its operand. */
18681
18682 case 'N':
18683 /* Write the number of elements in the vector times 4. */
18684 if (GET_CODE (x) != PARALLEL)
18685 output_operand_lossage ("invalid %%N value");
18686 else
18687 fprintf (file, "%d", XVECLEN (x, 0) * 4);
18688 return;
18689
18690 case 'O':
18691 /* Similar, but subtract 1 first. */
18692 if (GET_CODE (x) != PARALLEL)
18693 output_operand_lossage ("invalid %%O value");
18694 else
18695 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
18696 return;
18697
18698 case 'p':
18699 /* X is a CONST_INT that is a power of two. Output the logarithm. */
18700 if (! INT_P (x)
18701 || INTVAL (x) < 0
18702 || (i = exact_log2 (INTVAL (x))) < 0)
18703 output_operand_lossage ("invalid %%p value");
18704 else
18705 fprintf (file, "%d", i);
18706 return;
18707
18708 case 'P':
18709 /* The operand must be an indirect memory reference. The result
18710 is the register name. */
18711 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
18712 || REGNO (XEXP (x, 0)) >= 32)
18713 output_operand_lossage ("invalid %%P value");
18714 else
18715 fputs (reg_names[REGNO (XEXP (x, 0))], file);
18716 return;
18717
18718 case 'q':
18719 /* This outputs the logical code corresponding to a boolean
18720 expression. The expression may have one or both operands
18721 negated (if one, only the first one). For condition register
18722 logical operations, it will also treat the negated
18723 CR codes as NOTs, but not handle NOTs of them. */
18724 {
18725 const char *const *t = 0;
18726 const char *s;
18727 enum rtx_code code = GET_CODE (x);
18728 static const char * const tbl[3][3] = {
18729 { "and", "andc", "nor" },
18730 { "or", "orc", "nand" },
18731 { "xor", "eqv", "xor" } };
18732
18733 if (code == AND)
18734 t = tbl[0];
18735 else if (code == IOR)
18736 t = tbl[1];
18737 else if (code == XOR)
18738 t = tbl[2];
18739 else
18740 output_operand_lossage ("invalid %%q value");
18741
18742 if (GET_CODE (XEXP (x, 0)) != NOT)
18743 s = t[0];
18744 else
18745 {
18746 if (GET_CODE (XEXP (x, 1)) == NOT)
18747 s = t[2];
18748 else
18749 s = t[1];
18750 }
18751
18752 fputs (s, file);
18753 }
18754 return;
18755
18756 case 'Q':
18757 if (! TARGET_MFCRF)
18758 return;
18759 fputc (',', file);
18760 /* FALLTHRU */
18761
18762 case 'R':
18763 /* X is a CR register. Print the mask for `mtcrf'. */
18764 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18765 output_operand_lossage ("invalid %%R value");
18766 else
18767 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
18768 return;
18769
18770 case 's':
18771 /* Low 5 bits of 32 - value */
18772 if (! INT_P (x))
18773 output_operand_lossage ("invalid %%s value");
18774 else
18775 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
18776 return;
18777
18778 case 'S':
18779 /* PowerPC64 mask position. All 0's is excluded.
18780 CONST_INT 32-bit mask is considered sign-extended so any
18781 transition must occur within the CONST_INT, not on the boundary. */
18782 if (! mask64_operand (x, DImode))
18783 output_operand_lossage ("invalid %%S value");
18784
18785 uval = INTVAL (x);
18786
18787 if (uval & 1) /* Clear Left */
18788 {
18789 #if HOST_BITS_PER_WIDE_INT > 64
18790 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18791 #endif
18792 i = 64;
18793 }
18794 else /* Clear Right */
18795 {
18796 uval = ~uval;
18797 #if HOST_BITS_PER_WIDE_INT > 64
18798 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18799 #endif
18800 i = 63;
18801 }
18802 while (uval != 0)
18803 --i, uval >>= 1;
18804 gcc_assert (i >= 0);
18805 fprintf (file, "%d", i);
18806 return;
18807
18808 case 't':
18809 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
18810 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
18811
18812 /* Bit 3 is OV bit. */
18813 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
18814
18815 /* If we want bit 31, write a shift count of zero, not 32. */
18816 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18817 return;
18818
18819 case 'T':
18820 /* Print the symbolic name of a branch target register. */
18821 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
18822 && REGNO (x) != CTR_REGNO))
18823 output_operand_lossage ("invalid %%T value");
18824 else if (REGNO (x) == LR_REGNO)
18825 fputs ("lr", file);
18826 else
18827 fputs ("ctr", file);
18828 return;
18829
18830 case 'u':
18831 /* High-order or low-order 16 bits of constant, whichever is non-zero,
18832 for use in unsigned operand. */
18833 if (! INT_P (x))
18834 {
18835 output_operand_lossage ("invalid %%u value");
18836 return;
18837 }
18838
18839 uval = INTVAL (x);
18840 if ((uval & 0xffff) == 0)
18841 uval >>= 16;
18842
18843 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
18844 return;
18845
18846 case 'v':
18847 /* High-order 16 bits of constant for use in signed operand. */
18848 if (! INT_P (x))
18849 output_operand_lossage ("invalid %%v value");
18850 else
18851 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
18852 (INTVAL (x) >> 16) & 0xffff);
18853 return;
18854
18855 case 'U':
18856 /* Print `u' if this has an auto-increment or auto-decrement. */
18857 if (MEM_P (x)
18858 && (GET_CODE (XEXP (x, 0)) == PRE_INC
18859 || GET_CODE (XEXP (x, 0)) == PRE_DEC
18860 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
18861 putc ('u', file);
18862 return;
18863
18864 case 'V':
18865 /* Print the trap code for this operand. */
18866 switch (GET_CODE (x))
18867 {
18868 case EQ:
18869 fputs ("eq", file); /* 4 */
18870 break;
18871 case NE:
18872 fputs ("ne", file); /* 24 */
18873 break;
18874 case LT:
18875 fputs ("lt", file); /* 16 */
18876 break;
18877 case LE:
18878 fputs ("le", file); /* 20 */
18879 break;
18880 case GT:
18881 fputs ("gt", file); /* 8 */
18882 break;
18883 case GE:
18884 fputs ("ge", file); /* 12 */
18885 break;
18886 case LTU:
18887 fputs ("llt", file); /* 2 */
18888 break;
18889 case LEU:
18890 fputs ("lle", file); /* 6 */
18891 break;
18892 case GTU:
18893 fputs ("lgt", file); /* 1 */
18894 break;
18895 case GEU:
18896 fputs ("lge", file); /* 5 */
18897 break;
18898 default:
18899 gcc_unreachable ();
18900 }
18901 break;
18902
18903 case 'w':
18904 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
18905 normally. */
18906 if (INT_P (x))
18907 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
18908 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
18909 else
18910 print_operand (file, x, 0);
18911 return;
18912
18913 case 'W':
18914 /* MB value for a PowerPC64 rldic operand. */
18915 i = clz_hwi (INTVAL (x));
18916
18917 fprintf (file, "%d", i);
18918 return;
18919
18920 case 'x':
18921 /* X is a FPR or Altivec register used in a VSX context. */
18922 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
18923 output_operand_lossage ("invalid %%x value");
18924 else
18925 {
18926 int reg = REGNO (x);
18927 int vsx_reg = (FP_REGNO_P (reg)
18928 ? reg - 32
18929 : reg - FIRST_ALTIVEC_REGNO + 32);
18930
18931 #ifdef TARGET_REGNAMES
18932 if (TARGET_REGNAMES)
18933 fprintf (file, "%%vs%d", vsx_reg);
18934 else
18935 #endif
18936 fprintf (file, "%d", vsx_reg);
18937 }
18938 return;
18939
18940 case 'X':
18941 if (MEM_P (x)
18942 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
18943 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
18944 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
18945 putc ('x', file);
18946 return;
18947
18948 case 'Y':
18949 /* Like 'L', for third word of TImode/PTImode */
18950 if (REG_P (x))
18951 fputs (reg_names[REGNO (x) + 2], file);
18952 else if (MEM_P (x))
18953 {
18954 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18955 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18956 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18957 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18958 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18959 else
18960 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
18961 if (small_data_operand (x, GET_MODE (x)))
18962 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18963 reg_names[SMALL_DATA_REG]);
18964 }
18965 return;
18966
18967 case 'z':
18968 /* X is a SYMBOL_REF. Write out the name preceded by a
18969 period and without any trailing data in brackets. Used for function
18970 names. If we are configured for System V (or the embedded ABI) on
18971 the PowerPC, do not emit the period, since those systems do not use
18972 TOCs and the like. */
18973 gcc_assert (GET_CODE (x) == SYMBOL_REF);
18974
18975 /* For macho, check to see if we need a stub. */
18976 if (TARGET_MACHO)
18977 {
18978 const char *name = XSTR (x, 0);
18979 #if TARGET_MACHO
18980 if (darwin_emit_branch_islands
18981 && MACHOPIC_INDIRECT
18982 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
18983 name = machopic_indirection_name (x, /*stub_p=*/true);
18984 #endif
18985 assemble_name (file, name);
18986 }
18987 else if (!DOT_SYMBOLS)
18988 assemble_name (file, XSTR (x, 0));
18989 else
18990 rs6000_output_function_entry (file, XSTR (x, 0));
18991 return;
18992
18993 case 'Z':
18994 /* Like 'L', for last word of TImode/PTImode. */
18995 if (REG_P (x))
18996 fputs (reg_names[REGNO (x) + 3], file);
18997 else if (MEM_P (x))
18998 {
18999 if (GET_CODE (XEXP (x, 0)) == PRE_INC
19000 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
19001 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
19002 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
19003 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
19004 else
19005 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
19006 if (small_data_operand (x, GET_MODE (x)))
19007 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
19008 reg_names[SMALL_DATA_REG]);
19009 }
19010 return;
19011
19012 /* Print AltiVec or SPE memory operand. */
19013 case 'y':
19014 {
19015 rtx tmp;
19016
19017 gcc_assert (MEM_P (x));
19018
19019 tmp = XEXP (x, 0);
19020
19021 /* Ugly hack because %y is overloaded. */
19022 if ((TARGET_SPE || TARGET_E500_DOUBLE)
19023 && (GET_MODE_SIZE (GET_MODE (x)) == 8
19024 || GET_MODE (x) == TFmode
19025 || GET_MODE (x) == TImode
19026 || GET_MODE (x) == PTImode))
19027 {
19028 /* Handle [reg]. */
19029 if (REG_P (tmp))
19030 {
19031 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
19032 break;
19033 }
19034 /* Handle [reg+UIMM]. */
19035 else if (GET_CODE (tmp) == PLUS &&
19036 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
19037 {
19038 int x;
19039
19040 gcc_assert (REG_P (XEXP (tmp, 0)));
19041
19042 x = INTVAL (XEXP (tmp, 1));
19043 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
19044 break;
19045 }
19046
19047 /* Fall through. Must be [reg+reg]. */
19048 }
19049 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
19050 && GET_CODE (tmp) == AND
19051 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
19052 && INTVAL (XEXP (tmp, 1)) == -16)
19053 tmp = XEXP (tmp, 0);
19054 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
19055 && GET_CODE (tmp) == PRE_MODIFY)
19056 tmp = XEXP (tmp, 1);
19057 if (REG_P (tmp))
19058 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
19059 else
19060 {
19061 if (GET_CODE (tmp) != PLUS
19062 || !REG_P (XEXP (tmp, 0))
19063 || !REG_P (XEXP (tmp, 1)))
19064 {
19065 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
19066 break;
19067 }
19068
19069 if (REGNO (XEXP (tmp, 0)) == 0)
19070 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
19071 reg_names[ REGNO (XEXP (tmp, 0)) ]);
19072 else
19073 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
19074 reg_names[ REGNO (XEXP (tmp, 1)) ]);
19075 }
19076 break;
19077 }
19078
19079 case 0:
19080 if (REG_P (x))
19081 fprintf (file, "%s", reg_names[REGNO (x)]);
19082 else if (MEM_P (x))
19083 {
19084 /* We need to handle PRE_INC and PRE_DEC here, since we need to
19085 know the width from the mode. */
19086 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
19087 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
19088 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
19089 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
19090 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
19091 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
19092 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
19093 output_address (XEXP (XEXP (x, 0), 1));
19094 else
19095 output_address (XEXP (x, 0));
19096 }
19097 else
19098 {
19099 if (toc_relative_expr_p (x, false))
19100 /* This hack along with a corresponding hack in
19101 rs6000_output_addr_const_extra arranges to output addends
19102 where the assembler expects to find them. eg.
19103 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
19104 without this hack would be output as "x@toc+4". We
19105 want "x+4@toc". */
19106 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
19107 else
19108 output_addr_const (file, x);
19109 }
19110 return;
19111
19112 case '&':
19113 if (const char *name = get_some_local_dynamic_name ())
19114 assemble_name (file, name);
19115 else
19116 output_operand_lossage ("'%%&' used without any "
19117 "local dynamic TLS references");
19118 return;
19119
19120 default:
19121 output_operand_lossage ("invalid %%xn code");
19122 }
19123 }
19124 \f
19125 /* Print the address of an operand. */
19126
19127 void
19128 print_operand_address (FILE *file, rtx x)
19129 {
19130 if (REG_P (x))
19131 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
19132 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
19133 || GET_CODE (x) == LABEL_REF)
19134 {
19135 output_addr_const (file, x);
19136 if (small_data_operand (x, GET_MODE (x)))
19137 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
19138 reg_names[SMALL_DATA_REG]);
19139 else
19140 gcc_assert (!TARGET_TOC);
19141 }
19142 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
19143 && REG_P (XEXP (x, 1)))
19144 {
19145 if (REGNO (XEXP (x, 0)) == 0)
19146 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
19147 reg_names[ REGNO (XEXP (x, 0)) ]);
19148 else
19149 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
19150 reg_names[ REGNO (XEXP (x, 1)) ]);
19151 }
19152 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
19153 && GET_CODE (XEXP (x, 1)) == CONST_INT)
19154 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
19155 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
19156 #if TARGET_MACHO
19157 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
19158 && CONSTANT_P (XEXP (x, 1)))
19159 {
19160 fprintf (file, "lo16(");
19161 output_addr_const (file, XEXP (x, 1));
19162 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
19163 }
19164 #endif
19165 #if TARGET_ELF
19166 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
19167 && CONSTANT_P (XEXP (x, 1)))
19168 {
19169 output_addr_const (file, XEXP (x, 1));
19170 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
19171 }
19172 #endif
19173 else if (toc_relative_expr_p (x, false))
19174 {
19175 /* This hack along with a corresponding hack in
19176 rs6000_output_addr_const_extra arranges to output addends
19177 where the assembler expects to find them. eg.
19178 (lo_sum (reg 9)
19179 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
19180 without this hack would be output as "x@toc+8@l(9)". We
19181 want "x+8@toc@l(9)". */
19182 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
19183 if (GET_CODE (x) == LO_SUM)
19184 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
19185 else
19186 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
19187 }
19188 else
19189 gcc_unreachable ();
19190 }
19191 \f
19192 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
19193
19194 static bool
19195 rs6000_output_addr_const_extra (FILE *file, rtx x)
19196 {
19197 if (GET_CODE (x) == UNSPEC)
19198 switch (XINT (x, 1))
19199 {
19200 case UNSPEC_TOCREL:
19201 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
19202 && REG_P (XVECEXP (x, 0, 1))
19203 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
19204 output_addr_const (file, XVECEXP (x, 0, 0));
19205 if (x == tocrel_base && tocrel_offset != const0_rtx)
19206 {
19207 if (INTVAL (tocrel_offset) >= 0)
19208 fprintf (file, "+");
19209 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
19210 }
19211 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
19212 {
19213 putc ('-', file);
19214 assemble_name (file, toc_label_name);
19215 }
19216 else if (TARGET_ELF)
19217 fputs ("@toc", file);
19218 return true;
19219
19220 #if TARGET_MACHO
19221 case UNSPEC_MACHOPIC_OFFSET:
19222 output_addr_const (file, XVECEXP (x, 0, 0));
19223 putc ('-', file);
19224 machopic_output_function_base_name (file);
19225 return true;
19226 #endif
19227 }
19228 return false;
19229 }
19230 \f
19231 /* Target hook for assembling integer objects. The PowerPC version has
19232 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
19233 is defined. It also needs to handle DI-mode objects on 64-bit
19234 targets. */
19235
19236 static bool
19237 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
19238 {
19239 #ifdef RELOCATABLE_NEEDS_FIXUP
19240 /* Special handling for SI values. */
19241 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
19242 {
19243 static int recurse = 0;
19244
19245 /* For -mrelocatable, we mark all addresses that need to be fixed up in
19246 the .fixup section. Since the TOC section is already relocated, we
19247 don't need to mark it here. We used to skip the text section, but it
19248 should never be valid for relocated addresses to be placed in the text
19249 section. */
19250 if (TARGET_RELOCATABLE
19251 && in_section != toc_section
19252 && !recurse
19253 && !CONST_SCALAR_INT_P (x)
19254 && CONSTANT_P (x))
19255 {
19256 char buf[256];
19257
19258 recurse = 1;
19259 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
19260 fixuplabelno++;
19261 ASM_OUTPUT_LABEL (asm_out_file, buf);
19262 fprintf (asm_out_file, "\t.long\t(");
19263 output_addr_const (asm_out_file, x);
19264 fprintf (asm_out_file, ")@fixup\n");
19265 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
19266 ASM_OUTPUT_ALIGN (asm_out_file, 2);
19267 fprintf (asm_out_file, "\t.long\t");
19268 assemble_name (asm_out_file, buf);
19269 fprintf (asm_out_file, "\n\t.previous\n");
19270 recurse = 0;
19271 return true;
19272 }
19273 /* Remove initial .'s to turn a -mcall-aixdesc function
19274 address into the address of the descriptor, not the function
19275 itself. */
19276 else if (GET_CODE (x) == SYMBOL_REF
19277 && XSTR (x, 0)[0] == '.'
19278 && DEFAULT_ABI == ABI_AIX)
19279 {
19280 const char *name = XSTR (x, 0);
19281 while (*name == '.')
19282 name++;
19283
19284 fprintf (asm_out_file, "\t.long\t%s\n", name);
19285 return true;
19286 }
19287 }
19288 #endif /* RELOCATABLE_NEEDS_FIXUP */
19289 return default_assemble_integer (x, size, aligned_p);
19290 }
19291
19292 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
19293 /* Emit an assembler directive to set symbol visibility for DECL to
19294 VISIBILITY_TYPE. */
19295
19296 static void
19297 rs6000_assemble_visibility (tree decl, int vis)
19298 {
19299 if (TARGET_XCOFF)
19300 return;
19301
19302 /* Functions need to have their entry point symbol visibility set as
19303 well as their descriptor symbol visibility. */
19304 if (DEFAULT_ABI == ABI_AIX
19305 && DOT_SYMBOLS
19306 && TREE_CODE (decl) == FUNCTION_DECL)
19307 {
19308 static const char * const visibility_types[] = {
19309 NULL, "internal", "hidden", "protected"
19310 };
19311
19312 const char *name, *type;
19313
19314 name = ((* targetm.strip_name_encoding)
19315 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
19316 type = visibility_types[vis];
19317
19318 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
19319 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
19320 }
19321 else
19322 default_assemble_visibility (decl, vis);
19323 }
19324 #endif
19325 \f
19326 enum rtx_code
19327 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
19328 {
19329 /* Reversal of FP compares takes care -- an ordered compare
19330 becomes an unordered compare and vice versa. */
19331 if (mode == CCFPmode
19332 && (!flag_finite_math_only
19333 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
19334 || code == UNEQ || code == LTGT))
19335 return reverse_condition_maybe_unordered (code);
19336 else
19337 return reverse_condition (code);
19338 }
19339
19340 /* Generate a compare for CODE. Return a brand-new rtx that
19341 represents the result of the compare. */
19342
19343 static rtx
19344 rs6000_generate_compare (rtx cmp, machine_mode mode)
19345 {
19346 machine_mode comp_mode;
19347 rtx compare_result;
19348 enum rtx_code code = GET_CODE (cmp);
19349 rtx op0 = XEXP (cmp, 0);
19350 rtx op1 = XEXP (cmp, 1);
19351
19352 if (FLOAT_MODE_P (mode))
19353 comp_mode = CCFPmode;
19354 else if (code == GTU || code == LTU
19355 || code == GEU || code == LEU)
19356 comp_mode = CCUNSmode;
19357 else if ((code == EQ || code == NE)
19358 && unsigned_reg_p (op0)
19359 && (unsigned_reg_p (op1)
19360 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
19361 /* These are unsigned values, perhaps there will be a later
19362 ordering compare that can be shared with this one. */
19363 comp_mode = CCUNSmode;
19364 else
19365 comp_mode = CCmode;
19366
19367 /* If we have an unsigned compare, make sure we don't have a signed value as
19368 an immediate. */
19369 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
19370 && INTVAL (op1) < 0)
19371 {
19372 op0 = copy_rtx_if_shared (op0);
19373 op1 = force_reg (GET_MODE (op0), op1);
19374 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
19375 }
19376
19377 /* First, the compare. */
19378 compare_result = gen_reg_rtx (comp_mode);
19379
19380 /* E500 FP compare instructions on the GPRs. Yuck! */
19381 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
19382 && FLOAT_MODE_P (mode))
19383 {
19384 rtx cmp, or_result, compare_result2;
19385 machine_mode op_mode = GET_MODE (op0);
19386 bool reverse_p;
19387
19388 if (op_mode == VOIDmode)
19389 op_mode = GET_MODE (op1);
19390
19391 /* First reverse the condition codes that aren't directly supported. */
19392 switch (code)
19393 {
19394 case NE:
19395 case UNLT:
19396 case UNLE:
19397 case UNGT:
19398 case UNGE:
19399 code = reverse_condition_maybe_unordered (code);
19400 reverse_p = true;
19401 break;
19402
19403 case EQ:
19404 case LT:
19405 case LE:
19406 case GT:
19407 case GE:
19408 reverse_p = false;
19409 break;
19410
19411 default:
19412 gcc_unreachable ();
19413 }
19414
19415 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
19416 This explains the following mess. */
19417
19418 switch (code)
19419 {
19420 case EQ:
19421 switch (op_mode)
19422 {
19423 case SFmode:
19424 cmp = (flag_finite_math_only && !flag_trapping_math)
19425 ? gen_tstsfeq_gpr (compare_result, op0, op1)
19426 : gen_cmpsfeq_gpr (compare_result, op0, op1);
19427 break;
19428
19429 case DFmode:
19430 cmp = (flag_finite_math_only && !flag_trapping_math)
19431 ? gen_tstdfeq_gpr (compare_result, op0, op1)
19432 : gen_cmpdfeq_gpr (compare_result, op0, op1);
19433 break;
19434
19435 case TFmode:
19436 cmp = (flag_finite_math_only && !flag_trapping_math)
19437 ? gen_tsttfeq_gpr (compare_result, op0, op1)
19438 : gen_cmptfeq_gpr (compare_result, op0, op1);
19439 break;
19440
19441 default:
19442 gcc_unreachable ();
19443 }
19444 break;
19445
19446 case GT:
19447 case GE:
19448 switch (op_mode)
19449 {
19450 case SFmode:
19451 cmp = (flag_finite_math_only && !flag_trapping_math)
19452 ? gen_tstsfgt_gpr (compare_result, op0, op1)
19453 : gen_cmpsfgt_gpr (compare_result, op0, op1);
19454 break;
19455
19456 case DFmode:
19457 cmp = (flag_finite_math_only && !flag_trapping_math)
19458 ? gen_tstdfgt_gpr (compare_result, op0, op1)
19459 : gen_cmpdfgt_gpr (compare_result, op0, op1);
19460 break;
19461
19462 case TFmode:
19463 cmp = (flag_finite_math_only && !flag_trapping_math)
19464 ? gen_tsttfgt_gpr (compare_result, op0, op1)
19465 : gen_cmptfgt_gpr (compare_result, op0, op1);
19466 break;
19467
19468 default:
19469 gcc_unreachable ();
19470 }
19471 break;
19472
19473 case LT:
19474 case LE:
19475 switch (op_mode)
19476 {
19477 case SFmode:
19478 cmp = (flag_finite_math_only && !flag_trapping_math)
19479 ? gen_tstsflt_gpr (compare_result, op0, op1)
19480 : gen_cmpsflt_gpr (compare_result, op0, op1);
19481 break;
19482
19483 case DFmode:
19484 cmp = (flag_finite_math_only && !flag_trapping_math)
19485 ? gen_tstdflt_gpr (compare_result, op0, op1)
19486 : gen_cmpdflt_gpr (compare_result, op0, op1);
19487 break;
19488
19489 case TFmode:
19490 cmp = (flag_finite_math_only && !flag_trapping_math)
19491 ? gen_tsttflt_gpr (compare_result, op0, op1)
19492 : gen_cmptflt_gpr (compare_result, op0, op1);
19493 break;
19494
19495 default:
19496 gcc_unreachable ();
19497 }
19498 break;
19499
19500 default:
19501 gcc_unreachable ();
19502 }
19503
19504 /* Synthesize LE and GE from LT/GT || EQ. */
19505 if (code == LE || code == GE)
19506 {
19507 emit_insn (cmp);
19508
19509 compare_result2 = gen_reg_rtx (CCFPmode);
19510
19511 /* Do the EQ. */
19512 switch (op_mode)
19513 {
19514 case SFmode:
19515 cmp = (flag_finite_math_only && !flag_trapping_math)
19516 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
19517 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
19518 break;
19519
19520 case DFmode:
19521 cmp = (flag_finite_math_only && !flag_trapping_math)
19522 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
19523 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
19524 break;
19525
19526 case TFmode:
19527 cmp = (flag_finite_math_only && !flag_trapping_math)
19528 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
19529 : gen_cmptfeq_gpr (compare_result2, op0, op1);
19530 break;
19531
19532 default:
19533 gcc_unreachable ();
19534 }
19535
19536 emit_insn (cmp);
19537
19538 /* OR them together. */
19539 or_result = gen_reg_rtx (CCFPmode);
19540 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
19541 compare_result2);
19542 compare_result = or_result;
19543 }
19544
19545 code = reverse_p ? NE : EQ;
19546
19547 emit_insn (cmp);
19548 }
19549 else
19550 {
19551 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
19552 CLOBBERs to match cmptf_internal2 pattern. */
19553 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
19554 && GET_MODE (op0) == TFmode
19555 && !TARGET_IEEEQUAD
19556 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
19557 emit_insn (gen_rtx_PARALLEL (VOIDmode,
19558 gen_rtvec (10,
19559 gen_rtx_SET (compare_result,
19560 gen_rtx_COMPARE (comp_mode, op0, op1)),
19561 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19562 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19563 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19564 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19565 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19566 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19567 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19568 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19569 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
19570 else if (GET_CODE (op1) == UNSPEC
19571 && XINT (op1, 1) == UNSPEC_SP_TEST)
19572 {
19573 rtx op1b = XVECEXP (op1, 0, 0);
19574 comp_mode = CCEQmode;
19575 compare_result = gen_reg_rtx (CCEQmode);
19576 if (TARGET_64BIT)
19577 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
19578 else
19579 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
19580 }
19581 else
19582 emit_insn (gen_rtx_SET (compare_result,
19583 gen_rtx_COMPARE (comp_mode, op0, op1)));
19584 }
19585
19586 /* Some kinds of FP comparisons need an OR operation;
19587 under flag_finite_math_only we don't bother. */
19588 if (FLOAT_MODE_P (mode)
19589 && !flag_finite_math_only
19590 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
19591 && (code == LE || code == GE
19592 || code == UNEQ || code == LTGT
19593 || code == UNGT || code == UNLT))
19594 {
19595 enum rtx_code or1, or2;
19596 rtx or1_rtx, or2_rtx, compare2_rtx;
19597 rtx or_result = gen_reg_rtx (CCEQmode);
19598
19599 switch (code)
19600 {
19601 case LE: or1 = LT; or2 = EQ; break;
19602 case GE: or1 = GT; or2 = EQ; break;
19603 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
19604 case LTGT: or1 = LT; or2 = GT; break;
19605 case UNGT: or1 = UNORDERED; or2 = GT; break;
19606 case UNLT: or1 = UNORDERED; or2 = LT; break;
19607 default: gcc_unreachable ();
19608 }
19609 validate_condition_mode (or1, comp_mode);
19610 validate_condition_mode (or2, comp_mode);
19611 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
19612 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
19613 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
19614 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
19615 const_true_rtx);
19616 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
19617
19618 compare_result = or_result;
19619 code = EQ;
19620 }
19621
19622 validate_condition_mode (code, GET_MODE (compare_result));
19623
19624 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
19625 }
19626
19627
19628 /* Emit the RTL for an sISEL pattern. */
19629
19630 void
19631 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
19632 {
19633 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
19634 }
19635
19636 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
19637 can be used as that dest register. Return the dest register. */
19638
19639 rtx
19640 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
19641 {
19642 if (op2 == const0_rtx)
19643 return op1;
19644
19645 if (GET_CODE (scratch) == SCRATCH)
19646 scratch = gen_reg_rtx (mode);
19647
19648 if (logical_operand (op2, mode))
19649 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
19650 else
19651 emit_insn (gen_rtx_SET (scratch,
19652 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
19653
19654 return scratch;
19655 }
19656
19657 void
19658 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
19659 {
19660 rtx condition_rtx;
19661 machine_mode op_mode;
19662 enum rtx_code cond_code;
19663 rtx result = operands[0];
19664
19665 condition_rtx = rs6000_generate_compare (operands[1], mode);
19666 cond_code = GET_CODE (condition_rtx);
19667
19668 if (FLOAT_MODE_P (mode)
19669 && !TARGET_FPRS && TARGET_HARD_FLOAT)
19670 {
19671 rtx t;
19672
19673 PUT_MODE (condition_rtx, SImode);
19674 t = XEXP (condition_rtx, 0);
19675
19676 gcc_assert (cond_code == NE || cond_code == EQ);
19677
19678 if (cond_code == NE)
19679 emit_insn (gen_e500_flip_gt_bit (t, t));
19680
19681 emit_insn (gen_move_from_CR_gt_bit (result, t));
19682 return;
19683 }
19684
19685 if (cond_code == NE
19686 || cond_code == GE || cond_code == LE
19687 || cond_code == GEU || cond_code == LEU
19688 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
19689 {
19690 rtx not_result = gen_reg_rtx (CCEQmode);
19691 rtx not_op, rev_cond_rtx;
19692 machine_mode cc_mode;
19693
19694 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
19695
19696 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
19697 SImode, XEXP (condition_rtx, 0), const0_rtx);
19698 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
19699 emit_insn (gen_rtx_SET (not_result, not_op));
19700 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
19701 }
19702
19703 op_mode = GET_MODE (XEXP (operands[1], 0));
19704 if (op_mode == VOIDmode)
19705 op_mode = GET_MODE (XEXP (operands[1], 1));
19706
19707 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
19708 {
19709 PUT_MODE (condition_rtx, DImode);
19710 convert_move (result, condition_rtx, 0);
19711 }
19712 else
19713 {
19714 PUT_MODE (condition_rtx, SImode);
19715 emit_insn (gen_rtx_SET (result, condition_rtx));
19716 }
19717 }
19718
19719 /* Emit a branch of kind CODE to location LOC. */
19720
19721 void
19722 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
19723 {
19724 rtx condition_rtx, loc_ref;
19725
19726 condition_rtx = rs6000_generate_compare (operands[0], mode);
19727 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
19728 emit_jump_insn (gen_rtx_SET (pc_rtx,
19729 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
19730 loc_ref, pc_rtx)));
19731 }
19732
19733 /* Return the string to output a conditional branch to LABEL, which is
19734 the operand template of the label, or NULL if the branch is really a
19735 conditional return.
19736
19737 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
19738 condition code register and its mode specifies what kind of
19739 comparison we made.
19740
19741 REVERSED is nonzero if we should reverse the sense of the comparison.
19742
19743 INSN is the insn. */
19744
19745 char *
19746 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
19747 {
19748 static char string[64];
19749 enum rtx_code code = GET_CODE (op);
19750 rtx cc_reg = XEXP (op, 0);
19751 machine_mode mode = GET_MODE (cc_reg);
19752 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
19753 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
19754 int really_reversed = reversed ^ need_longbranch;
19755 char *s = string;
19756 const char *ccode;
19757 const char *pred;
19758 rtx note;
19759
19760 validate_condition_mode (code, mode);
19761
19762 /* Work out which way this really branches. We could use
19763 reverse_condition_maybe_unordered here always but this
19764 makes the resulting assembler clearer. */
19765 if (really_reversed)
19766 {
19767 /* Reversal of FP compares takes care -- an ordered compare
19768 becomes an unordered compare and vice versa. */
19769 if (mode == CCFPmode)
19770 code = reverse_condition_maybe_unordered (code);
19771 else
19772 code = reverse_condition (code);
19773 }
19774
19775 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
19776 {
19777 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
19778 to the GT bit. */
19779 switch (code)
19780 {
19781 case EQ:
19782 /* Opposite of GT. */
19783 code = GT;
19784 break;
19785
19786 case NE:
19787 code = UNLE;
19788 break;
19789
19790 default:
19791 gcc_unreachable ();
19792 }
19793 }
19794
19795 switch (code)
19796 {
19797 /* Not all of these are actually distinct opcodes, but
19798 we distinguish them for clarity of the resulting assembler. */
19799 case NE: case LTGT:
19800 ccode = "ne"; break;
19801 case EQ: case UNEQ:
19802 ccode = "eq"; break;
19803 case GE: case GEU:
19804 ccode = "ge"; break;
19805 case GT: case GTU: case UNGT:
19806 ccode = "gt"; break;
19807 case LE: case LEU:
19808 ccode = "le"; break;
19809 case LT: case LTU: case UNLT:
19810 ccode = "lt"; break;
19811 case UNORDERED: ccode = "un"; break;
19812 case ORDERED: ccode = "nu"; break;
19813 case UNGE: ccode = "nl"; break;
19814 case UNLE: ccode = "ng"; break;
19815 default:
19816 gcc_unreachable ();
19817 }
19818
19819 /* Maybe we have a guess as to how likely the branch is. */
19820 pred = "";
19821 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
19822 if (note != NULL_RTX)
19823 {
19824 /* PROB is the difference from 50%. */
19825 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
19826
19827 /* Only hint for highly probable/improbable branches on newer
19828 cpus as static prediction overrides processor dynamic
19829 prediction. For older cpus we may as well always hint, but
19830 assume not taken for branches that are very close to 50% as a
19831 mispredicted taken branch is more expensive than a
19832 mispredicted not-taken branch. */
19833 if (rs6000_always_hint
19834 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
19835 && br_prob_note_reliable_p (note)))
19836 {
19837 if (abs (prob) > REG_BR_PROB_BASE / 20
19838 && ((prob > 0) ^ need_longbranch))
19839 pred = "+";
19840 else
19841 pred = "-";
19842 }
19843 }
19844
19845 if (label == NULL)
19846 s += sprintf (s, "b%slr%s ", ccode, pred);
19847 else
19848 s += sprintf (s, "b%s%s ", ccode, pred);
19849
19850 /* We need to escape any '%' characters in the reg_names string.
19851 Assume they'd only be the first character.... */
19852 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
19853 *s++ = '%';
19854 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
19855
19856 if (label != NULL)
19857 {
19858 /* If the branch distance was too far, we may have to use an
19859 unconditional branch to go the distance. */
19860 if (need_longbranch)
19861 s += sprintf (s, ",$+8\n\tb %s", label);
19862 else
19863 s += sprintf (s, ",%s", label);
19864 }
19865
19866 return string;
19867 }
19868
19869 /* Return the string to flip the GT bit on a CR. */
19870 char *
19871 output_e500_flip_gt_bit (rtx dst, rtx src)
19872 {
19873 static char string[64];
19874 int a, b;
19875
19876 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
19877 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
19878
19879 /* GT bit. */
19880 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
19881 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
19882
19883 sprintf (string, "crnot %d,%d", a, b);
19884 return string;
19885 }
19886
19887 /* Return insn for VSX or Altivec comparisons. */
19888
19889 static rtx
19890 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
19891 {
19892 rtx mask;
19893 machine_mode mode = GET_MODE (op0);
19894
19895 switch (code)
19896 {
19897 default:
19898 break;
19899
19900 case GE:
19901 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
19902 return NULL_RTX;
19903
19904 case EQ:
19905 case GT:
19906 case GTU:
19907 case ORDERED:
19908 case UNORDERED:
19909 case UNEQ:
19910 case LTGT:
19911 mask = gen_reg_rtx (mode);
19912 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
19913 return mask;
19914 }
19915
19916 return NULL_RTX;
19917 }
19918
19919 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
19920 DMODE is expected destination mode. This is a recursive function. */
19921
19922 static rtx
19923 rs6000_emit_vector_compare (enum rtx_code rcode,
19924 rtx op0, rtx op1,
19925 machine_mode dmode)
19926 {
19927 rtx mask;
19928 bool swap_operands = false;
19929 bool try_again = false;
19930
19931 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
19932 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
19933
19934 /* See if the comparison works as is. */
19935 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19936 if (mask)
19937 return mask;
19938
19939 switch (rcode)
19940 {
19941 case LT:
19942 rcode = GT;
19943 swap_operands = true;
19944 try_again = true;
19945 break;
19946 case LTU:
19947 rcode = GTU;
19948 swap_operands = true;
19949 try_again = true;
19950 break;
19951 case NE:
19952 case UNLE:
19953 case UNLT:
19954 case UNGE:
19955 case UNGT:
19956 /* Invert condition and try again.
19957 e.g., A != B becomes ~(A==B). */
19958 {
19959 enum rtx_code rev_code;
19960 enum insn_code nor_code;
19961 rtx mask2;
19962
19963 rev_code = reverse_condition_maybe_unordered (rcode);
19964 if (rev_code == UNKNOWN)
19965 return NULL_RTX;
19966
19967 nor_code = optab_handler (one_cmpl_optab, dmode);
19968 if (nor_code == CODE_FOR_nothing)
19969 return NULL_RTX;
19970
19971 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
19972 if (!mask2)
19973 return NULL_RTX;
19974
19975 mask = gen_reg_rtx (dmode);
19976 emit_insn (GEN_FCN (nor_code) (mask, mask2));
19977 return mask;
19978 }
19979 break;
19980 case GE:
19981 case GEU:
19982 case LE:
19983 case LEU:
19984 /* Try GT/GTU/LT/LTU OR EQ */
19985 {
19986 rtx c_rtx, eq_rtx;
19987 enum insn_code ior_code;
19988 enum rtx_code new_code;
19989
19990 switch (rcode)
19991 {
19992 case GE:
19993 new_code = GT;
19994 break;
19995
19996 case GEU:
19997 new_code = GTU;
19998 break;
19999
20000 case LE:
20001 new_code = LT;
20002 break;
20003
20004 case LEU:
20005 new_code = LTU;
20006 break;
20007
20008 default:
20009 gcc_unreachable ();
20010 }
20011
20012 ior_code = optab_handler (ior_optab, dmode);
20013 if (ior_code == CODE_FOR_nothing)
20014 return NULL_RTX;
20015
20016 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
20017 if (!c_rtx)
20018 return NULL_RTX;
20019
20020 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
20021 if (!eq_rtx)
20022 return NULL_RTX;
20023
20024 mask = gen_reg_rtx (dmode);
20025 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
20026 return mask;
20027 }
20028 break;
20029 default:
20030 return NULL_RTX;
20031 }
20032
20033 if (try_again)
20034 {
20035 if (swap_operands)
20036 std::swap (op0, op1);
20037
20038 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
20039 if (mask)
20040 return mask;
20041 }
20042
20043 /* You only get two chances. */
20044 return NULL_RTX;
20045 }
20046
20047 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
20048 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
20049 operands for the relation operation COND. */
20050
20051 int
20052 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
20053 rtx cond, rtx cc_op0, rtx cc_op1)
20054 {
20055 machine_mode dest_mode = GET_MODE (dest);
20056 machine_mode mask_mode = GET_MODE (cc_op0);
20057 enum rtx_code rcode = GET_CODE (cond);
20058 machine_mode cc_mode = CCmode;
20059 rtx mask;
20060 rtx cond2;
20061 rtx tmp;
20062 bool invert_move = false;
20063
20064 if (VECTOR_UNIT_NONE_P (dest_mode))
20065 return 0;
20066
20067 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
20068 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
20069
20070 switch (rcode)
20071 {
20072 /* Swap operands if we can, and fall back to doing the operation as
20073 specified, and doing a NOR to invert the test. */
20074 case NE:
20075 case UNLE:
20076 case UNLT:
20077 case UNGE:
20078 case UNGT:
20079 /* Invert condition and try again.
20080 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
20081 invert_move = true;
20082 rcode = reverse_condition_maybe_unordered (rcode);
20083 if (rcode == UNKNOWN)
20084 return 0;
20085 break;
20086
20087 /* Mark unsigned tests with CCUNSmode. */
20088 case GTU:
20089 case GEU:
20090 case LTU:
20091 case LEU:
20092 cc_mode = CCUNSmode;
20093 break;
20094
20095 default:
20096 break;
20097 }
20098
20099 /* Get the vector mask for the given relational operations. */
20100 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
20101
20102 if (!mask)
20103 return 0;
20104
20105 if (invert_move)
20106 {
20107 tmp = op_true;
20108 op_true = op_false;
20109 op_false = tmp;
20110 }
20111
20112 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
20113 CONST0_RTX (dest_mode));
20114 emit_insn (gen_rtx_SET (dest,
20115 gen_rtx_IF_THEN_ELSE (dest_mode,
20116 cond2,
20117 op_true,
20118 op_false)));
20119 return 1;
20120 }
20121
20122 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
20123 operands of the last comparison is nonzero/true, FALSE_COND if it
20124 is zero/false. Return 0 if the hardware has no such operation. */
20125
20126 int
20127 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
20128 {
20129 enum rtx_code code = GET_CODE (op);
20130 rtx op0 = XEXP (op, 0);
20131 rtx op1 = XEXP (op, 1);
20132 REAL_VALUE_TYPE c1;
20133 machine_mode compare_mode = GET_MODE (op0);
20134 machine_mode result_mode = GET_MODE (dest);
20135 rtx temp;
20136 bool is_against_zero;
20137
20138 /* These modes should always match. */
20139 if (GET_MODE (op1) != compare_mode
20140 /* In the isel case however, we can use a compare immediate, so
20141 op1 may be a small constant. */
20142 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
20143 return 0;
20144 if (GET_MODE (true_cond) != result_mode)
20145 return 0;
20146 if (GET_MODE (false_cond) != result_mode)
20147 return 0;
20148
20149 /* Don't allow using floating point comparisons for integer results for
20150 now. */
20151 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
20152 return 0;
20153
20154 /* First, work out if the hardware can do this at all, or
20155 if it's too slow.... */
20156 if (!FLOAT_MODE_P (compare_mode))
20157 {
20158 if (TARGET_ISEL)
20159 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
20160 return 0;
20161 }
20162 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
20163 && SCALAR_FLOAT_MODE_P (compare_mode))
20164 return 0;
20165
20166 is_against_zero = op1 == CONST0_RTX (compare_mode);
20167
20168 /* A floating-point subtract might overflow, underflow, or produce
20169 an inexact result, thus changing the floating-point flags, so it
20170 can't be generated if we care about that. It's safe if one side
20171 of the construct is zero, since then no subtract will be
20172 generated. */
20173 if (SCALAR_FLOAT_MODE_P (compare_mode)
20174 && flag_trapping_math && ! is_against_zero)
20175 return 0;
20176
20177 /* Eliminate half of the comparisons by switching operands, this
20178 makes the remaining code simpler. */
20179 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
20180 || code == LTGT || code == LT || code == UNLE)
20181 {
20182 code = reverse_condition_maybe_unordered (code);
20183 temp = true_cond;
20184 true_cond = false_cond;
20185 false_cond = temp;
20186 }
20187
20188 /* UNEQ and LTGT take four instructions for a comparison with zero,
20189 it'll probably be faster to use a branch here too. */
20190 if (code == UNEQ && HONOR_NANS (compare_mode))
20191 return 0;
20192
20193 if (GET_CODE (op1) == CONST_DOUBLE)
20194 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
20195
20196 /* We're going to try to implement comparisons by performing
20197 a subtract, then comparing against zero. Unfortunately,
20198 Inf - Inf is NaN which is not zero, and so if we don't
20199 know that the operand is finite and the comparison
20200 would treat EQ different to UNORDERED, we can't do it. */
20201 if (HONOR_INFINITIES (compare_mode)
20202 && code != GT && code != UNGE
20203 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
20204 /* Constructs of the form (a OP b ? a : b) are safe. */
20205 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
20206 || (! rtx_equal_p (op0, true_cond)
20207 && ! rtx_equal_p (op1, true_cond))))
20208 return 0;
20209
20210 /* At this point we know we can use fsel. */
20211
20212 /* Reduce the comparison to a comparison against zero. */
20213 if (! is_against_zero)
20214 {
20215 temp = gen_reg_rtx (compare_mode);
20216 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
20217 op0 = temp;
20218 op1 = CONST0_RTX (compare_mode);
20219 }
20220
20221 /* If we don't care about NaNs we can reduce some of the comparisons
20222 down to faster ones. */
20223 if (! HONOR_NANS (compare_mode))
20224 switch (code)
20225 {
20226 case GT:
20227 code = LE;
20228 temp = true_cond;
20229 true_cond = false_cond;
20230 false_cond = temp;
20231 break;
20232 case UNGE:
20233 code = GE;
20234 break;
20235 case UNEQ:
20236 code = EQ;
20237 break;
20238 default:
20239 break;
20240 }
20241
20242 /* Now, reduce everything down to a GE. */
20243 switch (code)
20244 {
20245 case GE:
20246 break;
20247
20248 case LE:
20249 temp = gen_reg_rtx (compare_mode);
20250 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
20251 op0 = temp;
20252 break;
20253
20254 case ORDERED:
20255 temp = gen_reg_rtx (compare_mode);
20256 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
20257 op0 = temp;
20258 break;
20259
20260 case EQ:
20261 temp = gen_reg_rtx (compare_mode);
20262 emit_insn (gen_rtx_SET (temp,
20263 gen_rtx_NEG (compare_mode,
20264 gen_rtx_ABS (compare_mode, op0))));
20265 op0 = temp;
20266 break;
20267
20268 case UNGE:
20269 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
20270 temp = gen_reg_rtx (result_mode);
20271 emit_insn (gen_rtx_SET (temp,
20272 gen_rtx_IF_THEN_ELSE (result_mode,
20273 gen_rtx_GE (VOIDmode,
20274 op0, op1),
20275 true_cond, false_cond)));
20276 false_cond = true_cond;
20277 true_cond = temp;
20278
20279 temp = gen_reg_rtx (compare_mode);
20280 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
20281 op0 = temp;
20282 break;
20283
20284 case GT:
20285 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
20286 temp = gen_reg_rtx (result_mode);
20287 emit_insn (gen_rtx_SET (temp,
20288 gen_rtx_IF_THEN_ELSE (result_mode,
20289 gen_rtx_GE (VOIDmode,
20290 op0, op1),
20291 true_cond, false_cond)));
20292 true_cond = false_cond;
20293 false_cond = temp;
20294
20295 temp = gen_reg_rtx (compare_mode);
20296 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
20297 op0 = temp;
20298 break;
20299
20300 default:
20301 gcc_unreachable ();
20302 }
20303
20304 emit_insn (gen_rtx_SET (dest,
20305 gen_rtx_IF_THEN_ELSE (result_mode,
20306 gen_rtx_GE (VOIDmode,
20307 op0, op1),
20308 true_cond, false_cond)));
20309 return 1;
20310 }
20311
20312 /* Same as above, but for ints (isel). */
20313
20314 static int
20315 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
20316 {
20317 rtx condition_rtx, cr;
20318 machine_mode mode = GET_MODE (dest);
20319 enum rtx_code cond_code;
20320 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
20321 bool signedp;
20322
20323 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
20324 return 0;
20325
20326 /* We still have to do the compare, because isel doesn't do a
20327 compare, it just looks at the CRx bits set by a previous compare
20328 instruction. */
20329 condition_rtx = rs6000_generate_compare (op, mode);
20330 cond_code = GET_CODE (condition_rtx);
20331 cr = XEXP (condition_rtx, 0);
20332 signedp = GET_MODE (cr) == CCmode;
20333
20334 isel_func = (mode == SImode
20335 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
20336 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
20337
20338 switch (cond_code)
20339 {
20340 case LT: case GT: case LTU: case GTU: case EQ:
20341 /* isel handles these directly. */
20342 break;
20343
20344 default:
20345 /* We need to swap the sense of the comparison. */
20346 {
20347 std::swap (false_cond, true_cond);
20348 PUT_CODE (condition_rtx, reverse_condition (cond_code));
20349 }
20350 break;
20351 }
20352
20353 false_cond = force_reg (mode, false_cond);
20354 if (true_cond != const0_rtx)
20355 true_cond = force_reg (mode, true_cond);
20356
20357 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
20358
20359 return 1;
20360 }
20361
20362 const char *
20363 output_isel (rtx *operands)
20364 {
20365 enum rtx_code code;
20366
20367 code = GET_CODE (operands[1]);
20368
20369 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
20370 {
20371 gcc_assert (GET_CODE (operands[2]) == REG
20372 && GET_CODE (operands[3]) == REG);
20373 PUT_CODE (operands[1], reverse_condition (code));
20374 return "isel %0,%3,%2,%j1";
20375 }
20376
20377 return "isel %0,%2,%3,%j1";
20378 }
20379
20380 void
20381 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
20382 {
20383 machine_mode mode = GET_MODE (op0);
20384 enum rtx_code c;
20385 rtx target;
20386
20387 /* VSX/altivec have direct min/max insns. */
20388 if ((code == SMAX || code == SMIN)
20389 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
20390 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
20391 {
20392 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
20393 return;
20394 }
20395
20396 if (code == SMAX || code == SMIN)
20397 c = GE;
20398 else
20399 c = GEU;
20400
20401 if (code == SMAX || code == UMAX)
20402 target = emit_conditional_move (dest, c, op0, op1, mode,
20403 op0, op1, mode, 0);
20404 else
20405 target = emit_conditional_move (dest, c, op0, op1, mode,
20406 op1, op0, mode, 0);
20407 gcc_assert (target);
20408 if (target != dest)
20409 emit_move_insn (dest, target);
20410 }
20411
20412 /* A subroutine of the atomic operation splitters. Jump to LABEL if
20413 COND is true. Mark the jump as unlikely to be taken. */
20414
20415 static void
20416 emit_unlikely_jump (rtx cond, rtx label)
20417 {
20418 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
20419 rtx x;
20420
20421 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
20422 x = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
20423 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
20424 }
20425
20426 /* A subroutine of the atomic operation splitters. Emit a load-locked
20427 instruction in MODE. For QI/HImode, possibly use a pattern than includes
20428 the zero_extend operation. */
20429
20430 static void
20431 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
20432 {
20433 rtx (*fn) (rtx, rtx) = NULL;
20434
20435 switch (mode)
20436 {
20437 case QImode:
20438 fn = gen_load_lockedqi;
20439 break;
20440 case HImode:
20441 fn = gen_load_lockedhi;
20442 break;
20443 case SImode:
20444 if (GET_MODE (mem) == QImode)
20445 fn = gen_load_lockedqi_si;
20446 else if (GET_MODE (mem) == HImode)
20447 fn = gen_load_lockedhi_si;
20448 else
20449 fn = gen_load_lockedsi;
20450 break;
20451 case DImode:
20452 fn = gen_load_lockeddi;
20453 break;
20454 case TImode:
20455 fn = gen_load_lockedti;
20456 break;
20457 default:
20458 gcc_unreachable ();
20459 }
20460 emit_insn (fn (reg, mem));
20461 }
20462
20463 /* A subroutine of the atomic operation splitters. Emit a store-conditional
20464 instruction in MODE. */
20465
20466 static void
20467 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
20468 {
20469 rtx (*fn) (rtx, rtx, rtx) = NULL;
20470
20471 switch (mode)
20472 {
20473 case QImode:
20474 fn = gen_store_conditionalqi;
20475 break;
20476 case HImode:
20477 fn = gen_store_conditionalhi;
20478 break;
20479 case SImode:
20480 fn = gen_store_conditionalsi;
20481 break;
20482 case DImode:
20483 fn = gen_store_conditionaldi;
20484 break;
20485 case TImode:
20486 fn = gen_store_conditionalti;
20487 break;
20488 default:
20489 gcc_unreachable ();
20490 }
20491
20492 /* Emit sync before stwcx. to address PPC405 Erratum. */
20493 if (PPC405_ERRATUM77)
20494 emit_insn (gen_hwsync ());
20495
20496 emit_insn (fn (res, mem, val));
20497 }
20498
20499 /* Expand barriers before and after a load_locked/store_cond sequence. */
20500
20501 static rtx
20502 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
20503 {
20504 rtx addr = XEXP (mem, 0);
20505 int strict_p = (reload_in_progress || reload_completed);
20506
20507 if (!legitimate_indirect_address_p (addr, strict_p)
20508 && !legitimate_indexed_address_p (addr, strict_p))
20509 {
20510 addr = force_reg (Pmode, addr);
20511 mem = replace_equiv_address_nv (mem, addr);
20512 }
20513
20514 switch (model)
20515 {
20516 case MEMMODEL_RELAXED:
20517 case MEMMODEL_CONSUME:
20518 case MEMMODEL_ACQUIRE:
20519 case MEMMODEL_SYNC_ACQUIRE:
20520 break;
20521 case MEMMODEL_RELEASE:
20522 case MEMMODEL_SYNC_RELEASE:
20523 case MEMMODEL_ACQ_REL:
20524 emit_insn (gen_lwsync ());
20525 break;
20526 case MEMMODEL_SEQ_CST:
20527 case MEMMODEL_SYNC_SEQ_CST:
20528 emit_insn (gen_hwsync ());
20529 break;
20530 default:
20531 gcc_unreachable ();
20532 }
20533 return mem;
20534 }
20535
20536 static void
20537 rs6000_post_atomic_barrier (enum memmodel model)
20538 {
20539 switch (model)
20540 {
20541 case MEMMODEL_RELAXED:
20542 case MEMMODEL_CONSUME:
20543 case MEMMODEL_RELEASE:
20544 case MEMMODEL_SYNC_RELEASE:
20545 break;
20546 case MEMMODEL_ACQUIRE:
20547 case MEMMODEL_SYNC_ACQUIRE:
20548 case MEMMODEL_ACQ_REL:
20549 case MEMMODEL_SEQ_CST:
20550 case MEMMODEL_SYNC_SEQ_CST:
20551 emit_insn (gen_isync ());
20552 break;
20553 default:
20554 gcc_unreachable ();
20555 }
20556 }
20557
20558 /* A subroutine of the various atomic expanders. For sub-word operations,
20559 we must adjust things to operate on SImode. Given the original MEM,
20560 return a new aligned memory. Also build and return the quantities by
20561 which to shift and mask. */
20562
20563 static rtx
20564 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
20565 {
20566 rtx addr, align, shift, mask, mem;
20567 HOST_WIDE_INT shift_mask;
20568 machine_mode mode = GET_MODE (orig_mem);
20569
20570 /* For smaller modes, we have to implement this via SImode. */
20571 shift_mask = (mode == QImode ? 0x18 : 0x10);
20572
20573 addr = XEXP (orig_mem, 0);
20574 addr = force_reg (GET_MODE (addr), addr);
20575
20576 /* Aligned memory containing subword. Generate a new memory. We
20577 do not want any of the existing MEM_ATTR data, as we're now
20578 accessing memory outside the original object. */
20579 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
20580 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20581 mem = gen_rtx_MEM (SImode, align);
20582 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
20583 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
20584 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
20585
20586 /* Shift amount for subword relative to aligned word. */
20587 shift = gen_reg_rtx (SImode);
20588 addr = gen_lowpart (SImode, addr);
20589 rtx tmp = gen_reg_rtx (SImode);
20590 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
20591 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
20592 if (BYTES_BIG_ENDIAN)
20593 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
20594 shift, 1, OPTAB_LIB_WIDEN);
20595 *pshift = shift;
20596
20597 /* Mask for insertion. */
20598 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
20599 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
20600 *pmask = mask;
20601
20602 return mem;
20603 }
20604
20605 /* A subroutine of the various atomic expanders. For sub-word operands,
20606 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
20607
20608 static rtx
20609 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
20610 {
20611 rtx x;
20612
20613 x = gen_reg_rtx (SImode);
20614 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
20615 gen_rtx_NOT (SImode, mask),
20616 oldval)));
20617
20618 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
20619
20620 return x;
20621 }
20622
20623 /* A subroutine of the various atomic expanders. For sub-word operands,
20624 extract WIDE to NARROW via SHIFT. */
20625
20626 static void
20627 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
20628 {
20629 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
20630 wide, 1, OPTAB_LIB_WIDEN);
20631 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
20632 }
20633
20634 /* Expand an atomic compare and swap operation. */
20635
20636 void
20637 rs6000_expand_atomic_compare_and_swap (rtx operands[])
20638 {
20639 rtx boolval, retval, mem, oldval, newval, cond;
20640 rtx label1, label2, x, mask, shift;
20641 machine_mode mode, orig_mode;
20642 enum memmodel mod_s, mod_f;
20643 bool is_weak;
20644
20645 boolval = operands[0];
20646 retval = operands[1];
20647 mem = operands[2];
20648 oldval = operands[3];
20649 newval = operands[4];
20650 is_weak = (INTVAL (operands[5]) != 0);
20651 mod_s = memmodel_from_int (INTVAL (operands[6]));
20652 mod_f = memmodel_from_int (INTVAL (operands[7]));
20653 orig_mode = mode = GET_MODE (mem);
20654
20655 mask = shift = NULL_RTX;
20656 if (mode == QImode || mode == HImode)
20657 {
20658 /* Before power8, we didn't have access to lbarx/lharx, so generate a
20659 lwarx and shift/mask operations. With power8, we need to do the
20660 comparison in SImode, but the store is still done in QI/HImode. */
20661 oldval = convert_modes (SImode, mode, oldval, 1);
20662
20663 if (!TARGET_SYNC_HI_QI)
20664 {
20665 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20666
20667 /* Shift and mask OLDVAL into position with the word. */
20668 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
20669 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20670
20671 /* Shift and mask NEWVAL into position within the word. */
20672 newval = convert_modes (SImode, mode, newval, 1);
20673 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
20674 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20675 }
20676
20677 /* Prepare to adjust the return value. */
20678 retval = gen_reg_rtx (SImode);
20679 mode = SImode;
20680 }
20681 else if (reg_overlap_mentioned_p (retval, oldval))
20682 oldval = copy_to_reg (oldval);
20683
20684 mem = rs6000_pre_atomic_barrier (mem, mod_s);
20685
20686 label1 = NULL_RTX;
20687 if (!is_weak)
20688 {
20689 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20690 emit_label (XEXP (label1, 0));
20691 }
20692 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20693
20694 emit_load_locked (mode, retval, mem);
20695
20696 x = retval;
20697 if (mask)
20698 {
20699 x = expand_simple_binop (SImode, AND, retval, mask,
20700 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20701 }
20702
20703 cond = gen_reg_rtx (CCmode);
20704 /* If we have TImode, synthesize a comparison. */
20705 if (mode != TImode)
20706 x = gen_rtx_COMPARE (CCmode, x, oldval);
20707 else
20708 {
20709 rtx xor1_result = gen_reg_rtx (DImode);
20710 rtx xor2_result = gen_reg_rtx (DImode);
20711 rtx or_result = gen_reg_rtx (DImode);
20712 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
20713 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
20714 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
20715 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
20716
20717 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
20718 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
20719 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
20720 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
20721 }
20722
20723 emit_insn (gen_rtx_SET (cond, x));
20724
20725 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20726 emit_unlikely_jump (x, label2);
20727
20728 x = newval;
20729 if (mask)
20730 x = rs6000_mask_atomic_subword (retval, newval, mask);
20731
20732 emit_store_conditional (orig_mode, cond, mem, x);
20733
20734 if (!is_weak)
20735 {
20736 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20737 emit_unlikely_jump (x, label1);
20738 }
20739
20740 if (!is_mm_relaxed (mod_f))
20741 emit_label (XEXP (label2, 0));
20742
20743 rs6000_post_atomic_barrier (mod_s);
20744
20745 if (is_mm_relaxed (mod_f))
20746 emit_label (XEXP (label2, 0));
20747
20748 if (shift)
20749 rs6000_finish_atomic_subword (operands[1], retval, shift);
20750 else if (mode != GET_MODE (operands[1]))
20751 convert_move (operands[1], retval, 1);
20752
20753 /* In all cases, CR0 contains EQ on success, and NE on failure. */
20754 x = gen_rtx_EQ (SImode, cond, const0_rtx);
20755 emit_insn (gen_rtx_SET (boolval, x));
20756 }
20757
20758 /* Expand an atomic exchange operation. */
20759
20760 void
20761 rs6000_expand_atomic_exchange (rtx operands[])
20762 {
20763 rtx retval, mem, val, cond;
20764 machine_mode mode;
20765 enum memmodel model;
20766 rtx label, x, mask, shift;
20767
20768 retval = operands[0];
20769 mem = operands[1];
20770 val = operands[2];
20771 model = (enum memmodel) INTVAL (operands[3]);
20772 mode = GET_MODE (mem);
20773
20774 mask = shift = NULL_RTX;
20775 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
20776 {
20777 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20778
20779 /* Shift and mask VAL into position with the word. */
20780 val = convert_modes (SImode, mode, val, 1);
20781 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20782 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20783
20784 /* Prepare to adjust the return value. */
20785 retval = gen_reg_rtx (SImode);
20786 mode = SImode;
20787 }
20788
20789 mem = rs6000_pre_atomic_barrier (mem, model);
20790
20791 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20792 emit_label (XEXP (label, 0));
20793
20794 emit_load_locked (mode, retval, mem);
20795
20796 x = val;
20797 if (mask)
20798 x = rs6000_mask_atomic_subword (retval, val, mask);
20799
20800 cond = gen_reg_rtx (CCmode);
20801 emit_store_conditional (mode, cond, mem, x);
20802
20803 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20804 emit_unlikely_jump (x, label);
20805
20806 rs6000_post_atomic_barrier (model);
20807
20808 if (shift)
20809 rs6000_finish_atomic_subword (operands[0], retval, shift);
20810 }
20811
20812 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
20813 to perform. MEM is the memory on which to operate. VAL is the second
20814 operand of the binary operator. BEFORE and AFTER are optional locations to
20815 return the value of MEM either before of after the operation. MODEL_RTX
20816 is a CONST_INT containing the memory model to use. */
20817
20818 void
20819 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
20820 rtx orig_before, rtx orig_after, rtx model_rtx)
20821 {
20822 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
20823 machine_mode mode = GET_MODE (mem);
20824 machine_mode store_mode = mode;
20825 rtx label, x, cond, mask, shift;
20826 rtx before = orig_before, after = orig_after;
20827
20828 mask = shift = NULL_RTX;
20829 /* On power8, we want to use SImode for the operation. On previous systems,
20830 use the operation in a subword and shift/mask to get the proper byte or
20831 halfword. */
20832 if (mode == QImode || mode == HImode)
20833 {
20834 if (TARGET_SYNC_HI_QI)
20835 {
20836 val = convert_modes (SImode, mode, val, 1);
20837
20838 /* Prepare to adjust the return value. */
20839 before = gen_reg_rtx (SImode);
20840 if (after)
20841 after = gen_reg_rtx (SImode);
20842 mode = SImode;
20843 }
20844 else
20845 {
20846 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20847
20848 /* Shift and mask VAL into position with the word. */
20849 val = convert_modes (SImode, mode, val, 1);
20850 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20851 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20852
20853 switch (code)
20854 {
20855 case IOR:
20856 case XOR:
20857 /* We've already zero-extended VAL. That is sufficient to
20858 make certain that it does not affect other bits. */
20859 mask = NULL;
20860 break;
20861
20862 case AND:
20863 /* If we make certain that all of the other bits in VAL are
20864 set, that will be sufficient to not affect other bits. */
20865 x = gen_rtx_NOT (SImode, mask);
20866 x = gen_rtx_IOR (SImode, x, val);
20867 emit_insn (gen_rtx_SET (val, x));
20868 mask = NULL;
20869 break;
20870
20871 case NOT:
20872 case PLUS:
20873 case MINUS:
20874 /* These will all affect bits outside the field and need
20875 adjustment via MASK within the loop. */
20876 break;
20877
20878 default:
20879 gcc_unreachable ();
20880 }
20881
20882 /* Prepare to adjust the return value. */
20883 before = gen_reg_rtx (SImode);
20884 if (after)
20885 after = gen_reg_rtx (SImode);
20886 store_mode = mode = SImode;
20887 }
20888 }
20889
20890 mem = rs6000_pre_atomic_barrier (mem, model);
20891
20892 label = gen_label_rtx ();
20893 emit_label (label);
20894 label = gen_rtx_LABEL_REF (VOIDmode, label);
20895
20896 if (before == NULL_RTX)
20897 before = gen_reg_rtx (mode);
20898
20899 emit_load_locked (mode, before, mem);
20900
20901 if (code == NOT)
20902 {
20903 x = expand_simple_binop (mode, AND, before, val,
20904 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20905 after = expand_simple_unop (mode, NOT, x, after, 1);
20906 }
20907 else
20908 {
20909 after = expand_simple_binop (mode, code, before, val,
20910 after, 1, OPTAB_LIB_WIDEN);
20911 }
20912
20913 x = after;
20914 if (mask)
20915 {
20916 x = expand_simple_binop (SImode, AND, after, mask,
20917 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20918 x = rs6000_mask_atomic_subword (before, x, mask);
20919 }
20920 else if (store_mode != mode)
20921 x = convert_modes (store_mode, mode, x, 1);
20922
20923 cond = gen_reg_rtx (CCmode);
20924 emit_store_conditional (store_mode, cond, mem, x);
20925
20926 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20927 emit_unlikely_jump (x, label);
20928
20929 rs6000_post_atomic_barrier (model);
20930
20931 if (shift)
20932 {
20933 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
20934 then do the calcuations in a SImode register. */
20935 if (orig_before)
20936 rs6000_finish_atomic_subword (orig_before, before, shift);
20937 if (orig_after)
20938 rs6000_finish_atomic_subword (orig_after, after, shift);
20939 }
20940 else if (store_mode != mode)
20941 {
20942 /* QImode/HImode on machines with lbarx/lharx where we do the native
20943 operation and then do the calcuations in a SImode register. */
20944 if (orig_before)
20945 convert_move (orig_before, before, 1);
20946 if (orig_after)
20947 convert_move (orig_after, after, 1);
20948 }
20949 else if (orig_after && after != orig_after)
20950 emit_move_insn (orig_after, after);
20951 }
20952
20953 /* Emit instructions to move SRC to DST. Called by splitters for
20954 multi-register moves. It will emit at most one instruction for
20955 each register that is accessed; that is, it won't emit li/lis pairs
20956 (or equivalent for 64-bit code). One of SRC or DST must be a hard
20957 register. */
20958
20959 void
20960 rs6000_split_multireg_move (rtx dst, rtx src)
20961 {
20962 /* The register number of the first register being moved. */
20963 int reg;
20964 /* The mode that is to be moved. */
20965 machine_mode mode;
20966 /* The mode that the move is being done in, and its size. */
20967 machine_mode reg_mode;
20968 int reg_mode_size;
20969 /* The number of registers that will be moved. */
20970 int nregs;
20971
20972 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
20973 mode = GET_MODE (dst);
20974 nregs = hard_regno_nregs[reg][mode];
20975 if (FP_REGNO_P (reg))
20976 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
20977 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
20978 else if (ALTIVEC_REGNO_P (reg))
20979 reg_mode = V16QImode;
20980 else if (TARGET_E500_DOUBLE && mode == TFmode)
20981 reg_mode = DFmode;
20982 else
20983 reg_mode = word_mode;
20984 reg_mode_size = GET_MODE_SIZE (reg_mode);
20985
20986 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
20987
20988 /* TDmode residing in FP registers is special, since the ISA requires that
20989 the lower-numbered word of a register pair is always the most significant
20990 word, even in little-endian mode. This does not match the usual subreg
20991 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
20992 the appropriate constituent registers "by hand" in little-endian mode.
20993
20994 Note we do not need to check for destructive overlap here since TDmode
20995 can only reside in even/odd register pairs. */
20996 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
20997 {
20998 rtx p_src, p_dst;
20999 int i;
21000
21001 for (i = 0; i < nregs; i++)
21002 {
21003 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
21004 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
21005 else
21006 p_src = simplify_gen_subreg (reg_mode, src, mode,
21007 i * reg_mode_size);
21008
21009 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
21010 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
21011 else
21012 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
21013 i * reg_mode_size);
21014
21015 emit_insn (gen_rtx_SET (p_dst, p_src));
21016 }
21017
21018 return;
21019 }
21020
21021 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
21022 {
21023 /* Move register range backwards, if we might have destructive
21024 overlap. */
21025 int i;
21026 for (i = nregs - 1; i >= 0; i--)
21027 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
21028 i * reg_mode_size),
21029 simplify_gen_subreg (reg_mode, src, mode,
21030 i * reg_mode_size)));
21031 }
21032 else
21033 {
21034 int i;
21035 int j = -1;
21036 bool used_update = false;
21037 rtx restore_basereg = NULL_RTX;
21038
21039 if (MEM_P (src) && INT_REGNO_P (reg))
21040 {
21041 rtx breg;
21042
21043 if (GET_CODE (XEXP (src, 0)) == PRE_INC
21044 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
21045 {
21046 rtx delta_rtx;
21047 breg = XEXP (XEXP (src, 0), 0);
21048 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
21049 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
21050 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
21051 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
21052 src = replace_equiv_address (src, breg);
21053 }
21054 else if (! rs6000_offsettable_memref_p (src, reg_mode))
21055 {
21056 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
21057 {
21058 rtx basereg = XEXP (XEXP (src, 0), 0);
21059 if (TARGET_UPDATE)
21060 {
21061 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
21062 emit_insn (gen_rtx_SET (ndst,
21063 gen_rtx_MEM (reg_mode,
21064 XEXP (src, 0))));
21065 used_update = true;
21066 }
21067 else
21068 emit_insn (gen_rtx_SET (basereg,
21069 XEXP (XEXP (src, 0), 1)));
21070 src = replace_equiv_address (src, basereg);
21071 }
21072 else
21073 {
21074 rtx basereg = gen_rtx_REG (Pmode, reg);
21075 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
21076 src = replace_equiv_address (src, basereg);
21077 }
21078 }
21079
21080 breg = XEXP (src, 0);
21081 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
21082 breg = XEXP (breg, 0);
21083
21084 /* If the base register we are using to address memory is
21085 also a destination reg, then change that register last. */
21086 if (REG_P (breg)
21087 && REGNO (breg) >= REGNO (dst)
21088 && REGNO (breg) < REGNO (dst) + nregs)
21089 j = REGNO (breg) - REGNO (dst);
21090 }
21091 else if (MEM_P (dst) && INT_REGNO_P (reg))
21092 {
21093 rtx breg;
21094
21095 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
21096 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
21097 {
21098 rtx delta_rtx;
21099 breg = XEXP (XEXP (dst, 0), 0);
21100 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
21101 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
21102 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
21103
21104 /* We have to update the breg before doing the store.
21105 Use store with update, if available. */
21106
21107 if (TARGET_UPDATE)
21108 {
21109 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
21110 emit_insn (TARGET_32BIT
21111 ? (TARGET_POWERPC64
21112 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
21113 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
21114 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
21115 used_update = true;
21116 }
21117 else
21118 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
21119 dst = replace_equiv_address (dst, breg);
21120 }
21121 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
21122 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
21123 {
21124 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
21125 {
21126 rtx basereg = XEXP (XEXP (dst, 0), 0);
21127 if (TARGET_UPDATE)
21128 {
21129 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
21130 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
21131 XEXP (dst, 0)),
21132 nsrc));
21133 used_update = true;
21134 }
21135 else
21136 emit_insn (gen_rtx_SET (basereg,
21137 XEXP (XEXP (dst, 0), 1)));
21138 dst = replace_equiv_address (dst, basereg);
21139 }
21140 else
21141 {
21142 rtx basereg = XEXP (XEXP (dst, 0), 0);
21143 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
21144 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
21145 && REG_P (basereg)
21146 && REG_P (offsetreg)
21147 && REGNO (basereg) != REGNO (offsetreg));
21148 if (REGNO (basereg) == 0)
21149 {
21150 rtx tmp = offsetreg;
21151 offsetreg = basereg;
21152 basereg = tmp;
21153 }
21154 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
21155 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
21156 dst = replace_equiv_address (dst, basereg);
21157 }
21158 }
21159 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
21160 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
21161 }
21162
21163 for (i = 0; i < nregs; i++)
21164 {
21165 /* Calculate index to next subword. */
21166 ++j;
21167 if (j == nregs)
21168 j = 0;
21169
21170 /* If compiler already emitted move of first word by
21171 store with update, no need to do anything. */
21172 if (j == 0 && used_update)
21173 continue;
21174
21175 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
21176 j * reg_mode_size),
21177 simplify_gen_subreg (reg_mode, src, mode,
21178 j * reg_mode_size)));
21179 }
21180 if (restore_basereg != NULL_RTX)
21181 emit_insn (restore_basereg);
21182 }
21183 }
21184
21185 \f
21186 /* This page contains routines that are used to determine what the
21187 function prologue and epilogue code will do and write them out. */
21188
21189 static inline bool
21190 save_reg_p (int r)
21191 {
21192 return !call_used_regs[r] && df_regs_ever_live_p (r);
21193 }
21194
21195 /* Return the first fixed-point register that is required to be
21196 saved. 32 if none. */
21197
21198 int
21199 first_reg_to_save (void)
21200 {
21201 int first_reg;
21202
21203 /* Find lowest numbered live register. */
21204 for (first_reg = 13; first_reg <= 31; first_reg++)
21205 if (save_reg_p (first_reg))
21206 break;
21207
21208 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
21209 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
21210 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
21211 || (TARGET_TOC && TARGET_MINIMAL_TOC))
21212 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21213 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
21214
21215 #if TARGET_MACHO
21216 if (flag_pic
21217 && crtl->uses_pic_offset_table
21218 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
21219 return RS6000_PIC_OFFSET_TABLE_REGNUM;
21220 #endif
21221
21222 return first_reg;
21223 }
21224
21225 /* Similar, for FP regs. */
21226
21227 int
21228 first_fp_reg_to_save (void)
21229 {
21230 int first_reg;
21231
21232 /* Find lowest numbered live register. */
21233 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
21234 if (save_reg_p (first_reg))
21235 break;
21236
21237 return first_reg;
21238 }
21239
21240 /* Similar, for AltiVec regs. */
21241
21242 static int
21243 first_altivec_reg_to_save (void)
21244 {
21245 int i;
21246
21247 /* Stack frame remains as is unless we are in AltiVec ABI. */
21248 if (! TARGET_ALTIVEC_ABI)
21249 return LAST_ALTIVEC_REGNO + 1;
21250
21251 /* On Darwin, the unwind routines are compiled without
21252 TARGET_ALTIVEC, and use save_world to save/restore the
21253 altivec registers when necessary. */
21254 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
21255 && ! TARGET_ALTIVEC)
21256 return FIRST_ALTIVEC_REGNO + 20;
21257
21258 /* Find lowest numbered live register. */
21259 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
21260 if (save_reg_p (i))
21261 break;
21262
21263 return i;
21264 }
21265
21266 /* Return a 32-bit mask of the AltiVec registers we need to set in
21267 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
21268 the 32-bit word is 0. */
21269
21270 static unsigned int
21271 compute_vrsave_mask (void)
21272 {
21273 unsigned int i, mask = 0;
21274
21275 /* On Darwin, the unwind routines are compiled without
21276 TARGET_ALTIVEC, and use save_world to save/restore the
21277 call-saved altivec registers when necessary. */
21278 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
21279 && ! TARGET_ALTIVEC)
21280 mask |= 0xFFF;
21281
21282 /* First, find out if we use _any_ altivec registers. */
21283 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
21284 if (df_regs_ever_live_p (i))
21285 mask |= ALTIVEC_REG_BIT (i);
21286
21287 if (mask == 0)
21288 return mask;
21289
21290 /* Next, remove the argument registers from the set. These must
21291 be in the VRSAVE mask set by the caller, so we don't need to add
21292 them in again. More importantly, the mask we compute here is
21293 used to generate CLOBBERs in the set_vrsave insn, and we do not
21294 wish the argument registers to die. */
21295 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
21296 mask &= ~ALTIVEC_REG_BIT (i);
21297
21298 /* Similarly, remove the return value from the set. */
21299 {
21300 bool yes = false;
21301 diddle_return_value (is_altivec_return_reg, &yes);
21302 if (yes)
21303 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
21304 }
21305
21306 return mask;
21307 }
21308
21309 /* For a very restricted set of circumstances, we can cut down the
21310 size of prologues/epilogues by calling our own save/restore-the-world
21311 routines. */
21312
21313 static void
21314 compute_save_world_info (rs6000_stack_t *info_ptr)
21315 {
21316 info_ptr->world_save_p = 1;
21317 info_ptr->world_save_p
21318 = (WORLD_SAVE_P (info_ptr)
21319 && DEFAULT_ABI == ABI_DARWIN
21320 && !cfun->has_nonlocal_label
21321 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
21322 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
21323 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
21324 && info_ptr->cr_save_p);
21325
21326 /* This will not work in conjunction with sibcalls. Make sure there
21327 are none. (This check is expensive, but seldom executed.) */
21328 if (WORLD_SAVE_P (info_ptr))
21329 {
21330 rtx_insn *insn;
21331 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
21332 if (CALL_P (insn) && SIBLING_CALL_P (insn))
21333 {
21334 info_ptr->world_save_p = 0;
21335 break;
21336 }
21337 }
21338
21339 if (WORLD_SAVE_P (info_ptr))
21340 {
21341 /* Even if we're not touching VRsave, make sure there's room on the
21342 stack for it, if it looks like we're calling SAVE_WORLD, which
21343 will attempt to save it. */
21344 info_ptr->vrsave_size = 4;
21345
21346 /* If we are going to save the world, we need to save the link register too. */
21347 info_ptr->lr_save_p = 1;
21348
21349 /* "Save" the VRsave register too if we're saving the world. */
21350 if (info_ptr->vrsave_mask == 0)
21351 info_ptr->vrsave_mask = compute_vrsave_mask ();
21352
21353 /* Because the Darwin register save/restore routines only handle
21354 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
21355 check. */
21356 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
21357 && (info_ptr->first_altivec_reg_save
21358 >= FIRST_SAVED_ALTIVEC_REGNO));
21359 }
21360 return;
21361 }
21362
21363
21364 static void
21365 is_altivec_return_reg (rtx reg, void *xyes)
21366 {
21367 bool *yes = (bool *) xyes;
21368 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
21369 *yes = true;
21370 }
21371
21372 \f
21373 /* Look for user-defined global regs in the range FIRST to LAST-1.
21374 We should not restore these, and so cannot use lmw or out-of-line
21375 restore functions if there are any. We also can't save them
21376 (well, emit frame notes for them), because frame unwinding during
21377 exception handling will restore saved registers. */
21378
21379 static bool
21380 global_regs_p (unsigned first, unsigned last)
21381 {
21382 while (first < last)
21383 if (global_regs[first++])
21384 return true;
21385 return false;
21386 }
21387
21388 /* Determine the strategy for savings/restoring registers. */
21389
21390 enum {
21391 SAVRES_MULTIPLE = 0x1,
21392 SAVE_INLINE_FPRS = 0x2,
21393 SAVE_INLINE_GPRS = 0x4,
21394 REST_INLINE_FPRS = 0x8,
21395 REST_INLINE_GPRS = 0x10,
21396 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
21397 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
21398 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
21399 SAVE_INLINE_VRS = 0x100,
21400 REST_INLINE_VRS = 0x200
21401 };
21402
21403 static int
21404 rs6000_savres_strategy (rs6000_stack_t *info,
21405 bool using_static_chain_p)
21406 {
21407 int strategy = 0;
21408 bool lr_save_p;
21409
21410 if (TARGET_MULTIPLE
21411 && !TARGET_POWERPC64
21412 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
21413 && info->first_gp_reg_save < 31
21414 && !global_regs_p (info->first_gp_reg_save, 32))
21415 strategy |= SAVRES_MULTIPLE;
21416
21417 if (crtl->calls_eh_return
21418 || cfun->machine->ra_need_lr)
21419 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
21420 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
21421 | SAVE_INLINE_VRS | REST_INLINE_VRS);
21422
21423 if (info->first_fp_reg_save == 64
21424 /* The out-of-line FP routines use double-precision stores;
21425 we can't use those routines if we don't have such stores. */
21426 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
21427 || global_regs_p (info->first_fp_reg_save, 64))
21428 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
21429
21430 if (info->first_gp_reg_save == 32
21431 || (!(strategy & SAVRES_MULTIPLE)
21432 && global_regs_p (info->first_gp_reg_save, 32)))
21433 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21434
21435 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
21436 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
21437 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21438
21439 /* Define cutoff for using out-of-line functions to save registers. */
21440 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
21441 {
21442 if (!optimize_size)
21443 {
21444 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
21445 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21446 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21447 }
21448 else
21449 {
21450 /* Prefer out-of-line restore if it will exit. */
21451 if (info->first_fp_reg_save > 61)
21452 strategy |= SAVE_INLINE_FPRS;
21453 if (info->first_gp_reg_save > 29)
21454 {
21455 if (info->first_fp_reg_save == 64)
21456 strategy |= SAVE_INLINE_GPRS;
21457 else
21458 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21459 }
21460 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
21461 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21462 }
21463 }
21464 else if (DEFAULT_ABI == ABI_DARWIN)
21465 {
21466 if (info->first_fp_reg_save > 60)
21467 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
21468 if (info->first_gp_reg_save > 29)
21469 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21470 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21471 }
21472 else
21473 {
21474 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
21475 if (info->first_fp_reg_save > 61)
21476 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
21477 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21478 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21479 }
21480
21481 /* Don't bother to try to save things out-of-line if r11 is occupied
21482 by the static chain. It would require too much fiddling and the
21483 static chain is rarely used anyway. FPRs are saved w.r.t the stack
21484 pointer on Darwin, and AIX uses r1 or r12. */
21485 if (using_static_chain_p
21486 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
21487 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
21488 | SAVE_INLINE_GPRS
21489 | SAVE_INLINE_VRS | REST_INLINE_VRS);
21490
21491 /* We can only use the out-of-line routines to restore if we've
21492 saved all the registers from first_fp_reg_save in the prologue.
21493 Otherwise, we risk loading garbage. */
21494 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
21495 {
21496 int i;
21497
21498 for (i = info->first_fp_reg_save; i < 64; i++)
21499 if (!save_reg_p (i))
21500 {
21501 strategy |= REST_INLINE_FPRS;
21502 break;
21503 }
21504 }
21505
21506 /* If we are going to use store multiple, then don't even bother
21507 with the out-of-line routines, since the store-multiple
21508 instruction will always be smaller. */
21509 if ((strategy & SAVRES_MULTIPLE))
21510 strategy |= SAVE_INLINE_GPRS;
21511
21512 /* info->lr_save_p isn't yet set if the only reason lr needs to be
21513 saved is an out-of-line save or restore. Set up the value for
21514 the next test (excluding out-of-line gpr restore). */
21515 lr_save_p = (info->lr_save_p
21516 || !(strategy & SAVE_INLINE_GPRS)
21517 || !(strategy & SAVE_INLINE_FPRS)
21518 || !(strategy & SAVE_INLINE_VRS)
21519 || !(strategy & REST_INLINE_FPRS)
21520 || !(strategy & REST_INLINE_VRS));
21521
21522 /* The situation is more complicated with load multiple. We'd
21523 prefer to use the out-of-line routines for restores, since the
21524 "exit" out-of-line routines can handle the restore of LR and the
21525 frame teardown. However if doesn't make sense to use the
21526 out-of-line routine if that is the only reason we'd need to save
21527 LR, and we can't use the "exit" out-of-line gpr restore if we
21528 have saved some fprs; In those cases it is advantageous to use
21529 load multiple when available. */
21530 if ((strategy & SAVRES_MULTIPLE)
21531 && (!lr_save_p
21532 || info->first_fp_reg_save != 64))
21533 strategy |= REST_INLINE_GPRS;
21534
21535 /* Saving CR interferes with the exit routines used on the SPE, so
21536 just punt here. */
21537 if (TARGET_SPE_ABI
21538 && info->spe_64bit_regs_used
21539 && info->cr_save_p)
21540 strategy |= REST_INLINE_GPRS;
21541
21542 /* We can only use load multiple or the out-of-line routines to
21543 restore if we've used store multiple or out-of-line routines
21544 in the prologue, i.e. if we've saved all the registers from
21545 first_gp_reg_save. Otherwise, we risk loading garbage. */
21546 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
21547 == SAVE_INLINE_GPRS)
21548 {
21549 int i;
21550
21551 for (i = info->first_gp_reg_save; i < 32; i++)
21552 if (!save_reg_p (i))
21553 {
21554 strategy |= REST_INLINE_GPRS;
21555 break;
21556 }
21557 }
21558
21559 if (TARGET_ELF && TARGET_64BIT)
21560 {
21561 if (!(strategy & SAVE_INLINE_FPRS))
21562 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
21563 else if (!(strategy & SAVE_INLINE_GPRS)
21564 && info->first_fp_reg_save == 64)
21565 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
21566 }
21567 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
21568 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
21569
21570 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
21571 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
21572
21573 return strategy;
21574 }
21575
21576 /* Calculate the stack information for the current function. This is
21577 complicated by having two separate calling sequences, the AIX calling
21578 sequence and the V.4 calling sequence.
21579
21580 AIX (and Darwin/Mac OS X) stack frames look like:
21581 32-bit 64-bit
21582 SP----> +---------------------------------------+
21583 | back chain to caller | 0 0
21584 +---------------------------------------+
21585 | saved CR | 4 8 (8-11)
21586 +---------------------------------------+
21587 | saved LR | 8 16
21588 +---------------------------------------+
21589 | reserved for compilers | 12 24
21590 +---------------------------------------+
21591 | reserved for binders | 16 32
21592 +---------------------------------------+
21593 | saved TOC pointer | 20 40
21594 +---------------------------------------+
21595 | Parameter save area (P) | 24 48
21596 +---------------------------------------+
21597 | Alloca space (A) | 24+P etc.
21598 +---------------------------------------+
21599 | Local variable space (L) | 24+P+A
21600 +---------------------------------------+
21601 | Float/int conversion temporary (X) | 24+P+A+L
21602 +---------------------------------------+
21603 | Save area for AltiVec registers (W) | 24+P+A+L+X
21604 +---------------------------------------+
21605 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
21606 +---------------------------------------+
21607 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
21608 +---------------------------------------+
21609 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
21610 +---------------------------------------+
21611 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
21612 +---------------------------------------+
21613 old SP->| back chain to caller's caller |
21614 +---------------------------------------+
21615
21616 The required alignment for AIX configurations is two words (i.e., 8
21617 or 16 bytes).
21618
21619 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
21620
21621 SP----> +---------------------------------------+
21622 | Back chain to caller | 0
21623 +---------------------------------------+
21624 | Save area for CR | 8
21625 +---------------------------------------+
21626 | Saved LR | 16
21627 +---------------------------------------+
21628 | Saved TOC pointer | 24
21629 +---------------------------------------+
21630 | Parameter save area (P) | 32
21631 +---------------------------------------+
21632 | Alloca space (A) | 32+P
21633 +---------------------------------------+
21634 | Local variable space (L) | 32+P+A
21635 +---------------------------------------+
21636 | Save area for AltiVec registers (W) | 32+P+A+L
21637 +---------------------------------------+
21638 | AltiVec alignment padding (Y) | 32+P+A+L+W
21639 +---------------------------------------+
21640 | Save area for GP registers (G) | 32+P+A+L+W+Y
21641 +---------------------------------------+
21642 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
21643 +---------------------------------------+
21644 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
21645 +---------------------------------------+
21646
21647
21648 V.4 stack frames look like:
21649
21650 SP----> +---------------------------------------+
21651 | back chain to caller | 0
21652 +---------------------------------------+
21653 | caller's saved LR | 4
21654 +---------------------------------------+
21655 | Parameter save area (P) | 8
21656 +---------------------------------------+
21657 | Alloca space (A) | 8+P
21658 +---------------------------------------+
21659 | Varargs save area (V) | 8+P+A
21660 +---------------------------------------+
21661 | Local variable space (L) | 8+P+A+V
21662 +---------------------------------------+
21663 | Float/int conversion temporary (X) | 8+P+A+V+L
21664 +---------------------------------------+
21665 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
21666 +---------------------------------------+
21667 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
21668 +---------------------------------------+
21669 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
21670 +---------------------------------------+
21671 | SPE: area for 64-bit GP registers |
21672 +---------------------------------------+
21673 | SPE alignment padding |
21674 +---------------------------------------+
21675 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
21676 +---------------------------------------+
21677 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
21678 +---------------------------------------+
21679 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
21680 +---------------------------------------+
21681 old SP->| back chain to caller's caller |
21682 +---------------------------------------+
21683
21684 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
21685 given. (But note below and in sysv4.h that we require only 8 and
21686 may round up the size of our stack frame anyways. The historical
21687 reason is early versions of powerpc-linux which didn't properly
21688 align the stack at program startup. A happy side-effect is that
21689 -mno-eabi libraries can be used with -meabi programs.)
21690
21691 The EABI configuration defaults to the V.4 layout. However,
21692 the stack alignment requirements may differ. If -mno-eabi is not
21693 given, the required stack alignment is 8 bytes; if -mno-eabi is
21694 given, the required alignment is 16 bytes. (But see V.4 comment
21695 above.) */
21696
21697 #ifndef ABI_STACK_BOUNDARY
21698 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
21699 #endif
21700
21701 static rs6000_stack_t *
21702 rs6000_stack_info (void)
21703 {
21704 /* We should never be called for thunks, we are not set up for that. */
21705 gcc_assert (!cfun->is_thunk);
21706
21707 rs6000_stack_t *info_ptr = &stack_info;
21708 int reg_size = TARGET_32BIT ? 4 : 8;
21709 int ehrd_size;
21710 int ehcr_size;
21711 int save_align;
21712 int first_gp;
21713 HOST_WIDE_INT non_fixed_size;
21714 bool using_static_chain_p;
21715
21716 if (reload_completed && info_ptr->reload_completed)
21717 return info_ptr;
21718
21719 memset (info_ptr, 0, sizeof (*info_ptr));
21720 info_ptr->reload_completed = reload_completed;
21721
21722 if (TARGET_SPE)
21723 {
21724 /* Cache value so we don't rescan instruction chain over and over. */
21725 if (cfun->machine->insn_chain_scanned_p == 0)
21726 cfun->machine->insn_chain_scanned_p
21727 = spe_func_has_64bit_regs_p () + 1;
21728 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
21729 }
21730
21731 /* Select which calling sequence. */
21732 info_ptr->abi = DEFAULT_ABI;
21733
21734 /* Calculate which registers need to be saved & save area size. */
21735 info_ptr->first_gp_reg_save = first_reg_to_save ();
21736 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
21737 even if it currently looks like we won't. Reload may need it to
21738 get at a constant; if so, it will have already created a constant
21739 pool entry for it. */
21740 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
21741 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
21742 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
21743 && crtl->uses_const_pool
21744 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
21745 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
21746 else
21747 first_gp = info_ptr->first_gp_reg_save;
21748
21749 info_ptr->gp_size = reg_size * (32 - first_gp);
21750
21751 /* For the SPE, we have an additional upper 32-bits on each GPR.
21752 Ideally we should save the entire 64-bits only when the upper
21753 half is used in SIMD instructions. Since we only record
21754 registers live (not the size they are used in), this proves
21755 difficult because we'd have to traverse the instruction chain at
21756 the right time, taking reload into account. This is a real pain,
21757 so we opt to save the GPRs in 64-bits always if but one register
21758 gets used in 64-bits. Otherwise, all the registers in the frame
21759 get saved in 32-bits.
21760
21761 So... since when we save all GPRs (except the SP) in 64-bits, the
21762 traditional GP save area will be empty. */
21763 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21764 info_ptr->gp_size = 0;
21765
21766 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
21767 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
21768
21769 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
21770 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
21771 - info_ptr->first_altivec_reg_save);
21772
21773 /* Does this function call anything? */
21774 info_ptr->calls_p = (! crtl->is_leaf
21775 || cfun->machine->ra_needs_full_frame);
21776
21777 /* Determine if we need to save the condition code registers. */
21778 if (df_regs_ever_live_p (CR2_REGNO)
21779 || df_regs_ever_live_p (CR3_REGNO)
21780 || df_regs_ever_live_p (CR4_REGNO))
21781 {
21782 info_ptr->cr_save_p = 1;
21783 if (DEFAULT_ABI == ABI_V4)
21784 info_ptr->cr_size = reg_size;
21785 }
21786
21787 /* If the current function calls __builtin_eh_return, then we need
21788 to allocate stack space for registers that will hold data for
21789 the exception handler. */
21790 if (crtl->calls_eh_return)
21791 {
21792 unsigned int i;
21793 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
21794 continue;
21795
21796 /* SPE saves EH registers in 64-bits. */
21797 ehrd_size = i * (TARGET_SPE_ABI
21798 && info_ptr->spe_64bit_regs_used != 0
21799 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
21800 }
21801 else
21802 ehrd_size = 0;
21803
21804 /* In the ELFv2 ABI, we also need to allocate space for separate
21805 CR field save areas if the function calls __builtin_eh_return. */
21806 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
21807 {
21808 /* This hard-codes that we have three call-saved CR fields. */
21809 ehcr_size = 3 * reg_size;
21810 /* We do *not* use the regular CR save mechanism. */
21811 info_ptr->cr_save_p = 0;
21812 }
21813 else
21814 ehcr_size = 0;
21815
21816 /* Determine various sizes. */
21817 info_ptr->reg_size = reg_size;
21818 info_ptr->fixed_size = RS6000_SAVE_AREA;
21819 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
21820 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
21821 TARGET_ALTIVEC ? 16 : 8);
21822 if (FRAME_GROWS_DOWNWARD)
21823 info_ptr->vars_size
21824 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
21825 + info_ptr->parm_size,
21826 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
21827 - (info_ptr->fixed_size + info_ptr->vars_size
21828 + info_ptr->parm_size);
21829
21830 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21831 info_ptr->spe_gp_size = 8 * (32 - first_gp);
21832 else
21833 info_ptr->spe_gp_size = 0;
21834
21835 if (TARGET_ALTIVEC_ABI)
21836 info_ptr->vrsave_mask = compute_vrsave_mask ();
21837 else
21838 info_ptr->vrsave_mask = 0;
21839
21840 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
21841 info_ptr->vrsave_size = 4;
21842 else
21843 info_ptr->vrsave_size = 0;
21844
21845 compute_save_world_info (info_ptr);
21846
21847 /* Calculate the offsets. */
21848 switch (DEFAULT_ABI)
21849 {
21850 case ABI_NONE:
21851 default:
21852 gcc_unreachable ();
21853
21854 case ABI_AIX:
21855 case ABI_ELFv2:
21856 case ABI_DARWIN:
21857 info_ptr->fp_save_offset = - info_ptr->fp_size;
21858 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21859
21860 if (TARGET_ALTIVEC_ABI)
21861 {
21862 info_ptr->vrsave_save_offset
21863 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
21864
21865 /* Align stack so vector save area is on a quadword boundary.
21866 The padding goes above the vectors. */
21867 if (info_ptr->altivec_size != 0)
21868 info_ptr->altivec_padding_size
21869 = info_ptr->vrsave_save_offset & 0xF;
21870 else
21871 info_ptr->altivec_padding_size = 0;
21872
21873 info_ptr->altivec_save_offset
21874 = info_ptr->vrsave_save_offset
21875 - info_ptr->altivec_padding_size
21876 - info_ptr->altivec_size;
21877 gcc_assert (info_ptr->altivec_size == 0
21878 || info_ptr->altivec_save_offset % 16 == 0);
21879
21880 /* Adjust for AltiVec case. */
21881 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
21882 }
21883 else
21884 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
21885
21886 info_ptr->ehcr_offset = info_ptr->ehrd_offset - ehcr_size;
21887 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
21888 info_ptr->lr_save_offset = 2*reg_size;
21889 break;
21890
21891 case ABI_V4:
21892 info_ptr->fp_save_offset = - info_ptr->fp_size;
21893 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21894 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
21895
21896 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21897 {
21898 /* Align stack so SPE GPR save area is aligned on a
21899 double-word boundary. */
21900 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
21901 info_ptr->spe_padding_size
21902 = 8 - (-info_ptr->cr_save_offset % 8);
21903 else
21904 info_ptr->spe_padding_size = 0;
21905
21906 info_ptr->spe_gp_save_offset
21907 = info_ptr->cr_save_offset
21908 - info_ptr->spe_padding_size
21909 - info_ptr->spe_gp_size;
21910
21911 /* Adjust for SPE case. */
21912 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
21913 }
21914 else if (TARGET_ALTIVEC_ABI)
21915 {
21916 info_ptr->vrsave_save_offset
21917 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
21918
21919 /* Align stack so vector save area is on a quadword boundary. */
21920 if (info_ptr->altivec_size != 0)
21921 info_ptr->altivec_padding_size
21922 = 16 - (-info_ptr->vrsave_save_offset % 16);
21923 else
21924 info_ptr->altivec_padding_size = 0;
21925
21926 info_ptr->altivec_save_offset
21927 = info_ptr->vrsave_save_offset
21928 - info_ptr->altivec_padding_size
21929 - info_ptr->altivec_size;
21930
21931 /* Adjust for AltiVec case. */
21932 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
21933 }
21934 else
21935 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
21936 info_ptr->ehrd_offset -= ehrd_size;
21937 info_ptr->lr_save_offset = reg_size;
21938 break;
21939 }
21940
21941 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
21942 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
21943 + info_ptr->gp_size
21944 + info_ptr->altivec_size
21945 + info_ptr->altivec_padding_size
21946 + info_ptr->spe_gp_size
21947 + info_ptr->spe_padding_size
21948 + ehrd_size
21949 + ehcr_size
21950 + info_ptr->cr_size
21951 + info_ptr->vrsave_size,
21952 save_align);
21953
21954 non_fixed_size = (info_ptr->vars_size
21955 + info_ptr->parm_size
21956 + info_ptr->save_size);
21957
21958 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
21959 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
21960
21961 /* Determine if we need to save the link register. */
21962 if (info_ptr->calls_p
21963 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21964 && crtl->profile
21965 && !TARGET_PROFILE_KERNEL)
21966 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
21967 #ifdef TARGET_RELOCATABLE
21968 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
21969 #endif
21970 || rs6000_ra_ever_killed ())
21971 info_ptr->lr_save_p = 1;
21972
21973 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
21974 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
21975 && call_used_regs[STATIC_CHAIN_REGNUM]);
21976 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
21977 using_static_chain_p);
21978
21979 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
21980 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
21981 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
21982 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
21983 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
21984 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
21985 info_ptr->lr_save_p = 1;
21986
21987 if (info_ptr->lr_save_p)
21988 df_set_regs_ever_live (LR_REGNO, true);
21989
21990 /* Determine if we need to allocate any stack frame:
21991
21992 For AIX we need to push the stack if a frame pointer is needed
21993 (because the stack might be dynamically adjusted), if we are
21994 debugging, if we make calls, or if the sum of fp_save, gp_save,
21995 and local variables are more than the space needed to save all
21996 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
21997 + 18*8 = 288 (GPR13 reserved).
21998
21999 For V.4 we don't have the stack cushion that AIX uses, but assume
22000 that the debugger can handle stackless frames. */
22001
22002 if (info_ptr->calls_p)
22003 info_ptr->push_p = 1;
22004
22005 else if (DEFAULT_ABI == ABI_V4)
22006 info_ptr->push_p = non_fixed_size != 0;
22007
22008 else if (frame_pointer_needed)
22009 info_ptr->push_p = 1;
22010
22011 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
22012 info_ptr->push_p = 1;
22013
22014 else
22015 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
22016
22017 /* Zero offsets if we're not saving those registers. */
22018 if (info_ptr->fp_size == 0)
22019 info_ptr->fp_save_offset = 0;
22020
22021 if (info_ptr->gp_size == 0)
22022 info_ptr->gp_save_offset = 0;
22023
22024 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
22025 info_ptr->altivec_save_offset = 0;
22026
22027 /* Zero VRSAVE offset if not saved and restored. */
22028 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
22029 info_ptr->vrsave_save_offset = 0;
22030
22031 if (! TARGET_SPE_ABI
22032 || info_ptr->spe_64bit_regs_used == 0
22033 || info_ptr->spe_gp_size == 0)
22034 info_ptr->spe_gp_save_offset = 0;
22035
22036 if (! info_ptr->lr_save_p)
22037 info_ptr->lr_save_offset = 0;
22038
22039 if (! info_ptr->cr_save_p)
22040 info_ptr->cr_save_offset = 0;
22041
22042 return info_ptr;
22043 }
22044
22045 /* Return true if the current function uses any GPRs in 64-bit SIMD
22046 mode. */
22047
22048 static bool
22049 spe_func_has_64bit_regs_p (void)
22050 {
22051 rtx_insn *insns, *insn;
22052
22053 /* Functions that save and restore all the call-saved registers will
22054 need to save/restore the registers in 64-bits. */
22055 if (crtl->calls_eh_return
22056 || cfun->calls_setjmp
22057 || crtl->has_nonlocal_goto)
22058 return true;
22059
22060 insns = get_insns ();
22061
22062 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
22063 {
22064 if (INSN_P (insn))
22065 {
22066 rtx i;
22067
22068 /* FIXME: This should be implemented with attributes...
22069
22070 (set_attr "spe64" "true")....then,
22071 if (get_spe64(insn)) return true;
22072
22073 It's the only reliable way to do the stuff below. */
22074
22075 i = PATTERN (insn);
22076 if (GET_CODE (i) == SET)
22077 {
22078 machine_mode mode = GET_MODE (SET_SRC (i));
22079
22080 if (SPE_VECTOR_MODE (mode))
22081 return true;
22082 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
22083 return true;
22084 }
22085 }
22086 }
22087
22088 return false;
22089 }
22090
22091 static void
22092 debug_stack_info (rs6000_stack_t *info)
22093 {
22094 const char *abi_string;
22095
22096 if (! info)
22097 info = rs6000_stack_info ();
22098
22099 fprintf (stderr, "\nStack information for function %s:\n",
22100 ((current_function_decl && DECL_NAME (current_function_decl))
22101 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
22102 : "<unknown>"));
22103
22104 switch (info->abi)
22105 {
22106 default: abi_string = "Unknown"; break;
22107 case ABI_NONE: abi_string = "NONE"; break;
22108 case ABI_AIX: abi_string = "AIX"; break;
22109 case ABI_ELFv2: abi_string = "ELFv2"; break;
22110 case ABI_DARWIN: abi_string = "Darwin"; break;
22111 case ABI_V4: abi_string = "V.4"; break;
22112 }
22113
22114 fprintf (stderr, "\tABI = %5s\n", abi_string);
22115
22116 if (TARGET_ALTIVEC_ABI)
22117 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
22118
22119 if (TARGET_SPE_ABI)
22120 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
22121
22122 if (info->first_gp_reg_save != 32)
22123 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
22124
22125 if (info->first_fp_reg_save != 64)
22126 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
22127
22128 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
22129 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
22130 info->first_altivec_reg_save);
22131
22132 if (info->lr_save_p)
22133 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
22134
22135 if (info->cr_save_p)
22136 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
22137
22138 if (info->vrsave_mask)
22139 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
22140
22141 if (info->push_p)
22142 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
22143
22144 if (info->calls_p)
22145 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
22146
22147 if (info->gp_save_offset)
22148 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
22149
22150 if (info->fp_save_offset)
22151 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
22152
22153 if (info->altivec_save_offset)
22154 fprintf (stderr, "\taltivec_save_offset = %5d\n",
22155 info->altivec_save_offset);
22156
22157 if (info->spe_gp_save_offset)
22158 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
22159 info->spe_gp_save_offset);
22160
22161 if (info->vrsave_save_offset)
22162 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
22163 info->vrsave_save_offset);
22164
22165 if (info->lr_save_offset)
22166 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
22167
22168 if (info->cr_save_offset)
22169 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
22170
22171 if (info->varargs_save_offset)
22172 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
22173
22174 if (info->total_size)
22175 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
22176 info->total_size);
22177
22178 if (info->vars_size)
22179 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
22180 info->vars_size);
22181
22182 if (info->parm_size)
22183 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
22184
22185 if (info->fixed_size)
22186 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
22187
22188 if (info->gp_size)
22189 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
22190
22191 if (info->spe_gp_size)
22192 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
22193
22194 if (info->fp_size)
22195 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
22196
22197 if (info->altivec_size)
22198 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
22199
22200 if (info->vrsave_size)
22201 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
22202
22203 if (info->altivec_padding_size)
22204 fprintf (stderr, "\taltivec_padding_size= %5d\n",
22205 info->altivec_padding_size);
22206
22207 if (info->spe_padding_size)
22208 fprintf (stderr, "\tspe_padding_size = %5d\n",
22209 info->spe_padding_size);
22210
22211 if (info->cr_size)
22212 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
22213
22214 if (info->save_size)
22215 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
22216
22217 if (info->reg_size != 4)
22218 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
22219
22220 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
22221
22222 fprintf (stderr, "\n");
22223 }
22224
22225 rtx
22226 rs6000_return_addr (int count, rtx frame)
22227 {
22228 /* Currently we don't optimize very well between prolog and body
22229 code and for PIC code the code can be actually quite bad, so
22230 don't try to be too clever here. */
22231 if (count != 0
22232 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
22233 {
22234 cfun->machine->ra_needs_full_frame = 1;
22235
22236 return
22237 gen_rtx_MEM
22238 (Pmode,
22239 memory_address
22240 (Pmode,
22241 plus_constant (Pmode,
22242 copy_to_reg
22243 (gen_rtx_MEM (Pmode,
22244 memory_address (Pmode, frame))),
22245 RETURN_ADDRESS_OFFSET)));
22246 }
22247
22248 cfun->machine->ra_need_lr = 1;
22249 return get_hard_reg_initial_val (Pmode, LR_REGNO);
22250 }
22251
22252 /* Say whether a function is a candidate for sibcall handling or not. */
22253
22254 static bool
22255 rs6000_function_ok_for_sibcall (tree decl, tree exp)
22256 {
22257 tree fntype;
22258
22259 if (decl)
22260 fntype = TREE_TYPE (decl);
22261 else
22262 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
22263
22264 /* We can't do it if the called function has more vector parameters
22265 than the current function; there's nowhere to put the VRsave code. */
22266 if (TARGET_ALTIVEC_ABI
22267 && TARGET_ALTIVEC_VRSAVE
22268 && !(decl && decl == current_function_decl))
22269 {
22270 function_args_iterator args_iter;
22271 tree type;
22272 int nvreg = 0;
22273
22274 /* Functions with vector parameters are required to have a
22275 prototype, so the argument type info must be available
22276 here. */
22277 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
22278 if (TREE_CODE (type) == VECTOR_TYPE
22279 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
22280 nvreg++;
22281
22282 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
22283 if (TREE_CODE (type) == VECTOR_TYPE
22284 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
22285 nvreg--;
22286
22287 if (nvreg > 0)
22288 return false;
22289 }
22290
22291 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
22292 functions, because the callee may have a different TOC pointer to
22293 the caller and there's no way to ensure we restore the TOC when
22294 we return. With the secure-plt SYSV ABI we can't make non-local
22295 calls when -fpic/PIC because the plt call stubs use r30. */
22296 if (DEFAULT_ABI == ABI_DARWIN
22297 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22298 && decl
22299 && !DECL_EXTERNAL (decl)
22300 && (*targetm.binds_local_p) (decl))
22301 || (DEFAULT_ABI == ABI_V4
22302 && (!TARGET_SECURE_PLT
22303 || !flag_pic
22304 || (decl
22305 && (*targetm.binds_local_p) (decl)))))
22306 {
22307 tree attr_list = TYPE_ATTRIBUTES (fntype);
22308
22309 if (!lookup_attribute ("longcall", attr_list)
22310 || lookup_attribute ("shortcall", attr_list))
22311 return true;
22312 }
22313
22314 return false;
22315 }
22316
22317 static int
22318 rs6000_ra_ever_killed (void)
22319 {
22320 rtx_insn *top;
22321 rtx reg;
22322 rtx_insn *insn;
22323
22324 if (cfun->is_thunk)
22325 return 0;
22326
22327 if (cfun->machine->lr_save_state)
22328 return cfun->machine->lr_save_state - 1;
22329
22330 /* regs_ever_live has LR marked as used if any sibcalls are present,
22331 but this should not force saving and restoring in the
22332 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
22333 clobbers LR, so that is inappropriate. */
22334
22335 /* Also, the prologue can generate a store into LR that
22336 doesn't really count, like this:
22337
22338 move LR->R0
22339 bcl to set PIC register
22340 move LR->R31
22341 move R0->LR
22342
22343 When we're called from the epilogue, we need to avoid counting
22344 this as a store. */
22345
22346 push_topmost_sequence ();
22347 top = get_insns ();
22348 pop_topmost_sequence ();
22349 reg = gen_rtx_REG (Pmode, LR_REGNO);
22350
22351 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
22352 {
22353 if (INSN_P (insn))
22354 {
22355 if (CALL_P (insn))
22356 {
22357 if (!SIBLING_CALL_P (insn))
22358 return 1;
22359 }
22360 else if (find_regno_note (insn, REG_INC, LR_REGNO))
22361 return 1;
22362 else if (set_of (reg, insn) != NULL_RTX
22363 && !prologue_epilogue_contains (insn))
22364 return 1;
22365 }
22366 }
22367 return 0;
22368 }
22369 \f
22370 /* Emit instructions needed to load the TOC register.
22371 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
22372 a constant pool; or for SVR4 -fpic. */
22373
22374 void
22375 rs6000_emit_load_toc_table (int fromprolog)
22376 {
22377 rtx dest;
22378 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
22379
22380 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
22381 {
22382 char buf[30];
22383 rtx lab, tmp1, tmp2, got;
22384
22385 lab = gen_label_rtx ();
22386 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
22387 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
22388 if (flag_pic == 2)
22389 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
22390 else
22391 got = rs6000_got_sym ();
22392 tmp1 = tmp2 = dest;
22393 if (!fromprolog)
22394 {
22395 tmp1 = gen_reg_rtx (Pmode);
22396 tmp2 = gen_reg_rtx (Pmode);
22397 }
22398 emit_insn (gen_load_toc_v4_PIC_1 (lab));
22399 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
22400 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
22401 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
22402 }
22403 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
22404 {
22405 emit_insn (gen_load_toc_v4_pic_si ());
22406 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
22407 }
22408 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
22409 {
22410 char buf[30];
22411 rtx temp0 = (fromprolog
22412 ? gen_rtx_REG (Pmode, 0)
22413 : gen_reg_rtx (Pmode));
22414
22415 if (fromprolog)
22416 {
22417 rtx symF, symL;
22418
22419 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
22420 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
22421
22422 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
22423 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
22424
22425 emit_insn (gen_load_toc_v4_PIC_1 (symF));
22426 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
22427 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
22428 }
22429 else
22430 {
22431 rtx tocsym, lab;
22432
22433 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
22434 lab = gen_label_rtx ();
22435 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
22436 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
22437 if (TARGET_LINK_STACK)
22438 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
22439 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
22440 }
22441 emit_insn (gen_addsi3 (dest, temp0, dest));
22442 }
22443 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
22444 {
22445 /* This is for AIX code running in non-PIC ELF32. */
22446 char buf[30];
22447 rtx realsym;
22448 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
22449 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
22450
22451 emit_insn (gen_elf_high (dest, realsym));
22452 emit_insn (gen_elf_low (dest, dest, realsym));
22453 }
22454 else
22455 {
22456 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
22457
22458 if (TARGET_32BIT)
22459 emit_insn (gen_load_toc_aix_si (dest));
22460 else
22461 emit_insn (gen_load_toc_aix_di (dest));
22462 }
22463 }
22464
22465 /* Emit instructions to restore the link register after determining where
22466 its value has been stored. */
22467
22468 void
22469 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
22470 {
22471 rs6000_stack_t *info = rs6000_stack_info ();
22472 rtx operands[2];
22473
22474 operands[0] = source;
22475 operands[1] = scratch;
22476
22477 if (info->lr_save_p)
22478 {
22479 rtx frame_rtx = stack_pointer_rtx;
22480 HOST_WIDE_INT sp_offset = 0;
22481 rtx tmp;
22482
22483 if (frame_pointer_needed
22484 || cfun->calls_alloca
22485 || info->total_size > 32767)
22486 {
22487 tmp = gen_frame_mem (Pmode, frame_rtx);
22488 emit_move_insn (operands[1], tmp);
22489 frame_rtx = operands[1];
22490 }
22491 else if (info->push_p)
22492 sp_offset = info->total_size;
22493
22494 tmp = plus_constant (Pmode, frame_rtx,
22495 info->lr_save_offset + sp_offset);
22496 tmp = gen_frame_mem (Pmode, tmp);
22497 emit_move_insn (tmp, operands[0]);
22498 }
22499 else
22500 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
22501
22502 /* Freeze lr_save_p. We've just emitted rtl that depends on the
22503 state of lr_save_p so any change from here on would be a bug. In
22504 particular, stop rs6000_ra_ever_killed from considering the SET
22505 of lr we may have added just above. */
22506 cfun->machine->lr_save_state = info->lr_save_p + 1;
22507 }
22508
22509 static GTY(()) alias_set_type set = -1;
22510
22511 alias_set_type
22512 get_TOC_alias_set (void)
22513 {
22514 if (set == -1)
22515 set = new_alias_set ();
22516 return set;
22517 }
22518
22519 /* This returns nonzero if the current function uses the TOC. This is
22520 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
22521 is generated by the ABI_V4 load_toc_* patterns. */
22522 #if TARGET_ELF
22523 static int
22524 uses_TOC (void)
22525 {
22526 rtx_insn *insn;
22527
22528 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
22529 if (INSN_P (insn))
22530 {
22531 rtx pat = PATTERN (insn);
22532 int i;
22533
22534 if (GET_CODE (pat) == PARALLEL)
22535 for (i = 0; i < XVECLEN (pat, 0); i++)
22536 {
22537 rtx sub = XVECEXP (pat, 0, i);
22538 if (GET_CODE (sub) == USE)
22539 {
22540 sub = XEXP (sub, 0);
22541 if (GET_CODE (sub) == UNSPEC
22542 && XINT (sub, 1) == UNSPEC_TOC)
22543 return 1;
22544 }
22545 }
22546 }
22547 return 0;
22548 }
22549 #endif
22550
22551 rtx
22552 create_TOC_reference (rtx symbol, rtx largetoc_reg)
22553 {
22554 rtx tocrel, tocreg, hi;
22555
22556 if (TARGET_DEBUG_ADDR)
22557 {
22558 if (GET_CODE (symbol) == SYMBOL_REF)
22559 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
22560 XSTR (symbol, 0));
22561 else
22562 {
22563 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
22564 GET_RTX_NAME (GET_CODE (symbol)));
22565 debug_rtx (symbol);
22566 }
22567 }
22568
22569 if (!can_create_pseudo_p ())
22570 df_set_regs_ever_live (TOC_REGISTER, true);
22571
22572 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
22573 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
22574 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
22575 return tocrel;
22576
22577 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
22578 if (largetoc_reg != NULL)
22579 {
22580 emit_move_insn (largetoc_reg, hi);
22581 hi = largetoc_reg;
22582 }
22583 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
22584 }
22585
22586 /* Issue assembly directives that create a reference to the given DWARF
22587 FRAME_TABLE_LABEL from the current function section. */
22588 void
22589 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
22590 {
22591 fprintf (asm_out_file, "\t.ref %s\n",
22592 (* targetm.strip_name_encoding) (frame_table_label));
22593 }
22594 \f
22595 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
22596 and the change to the stack pointer. */
22597
22598 static void
22599 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
22600 {
22601 rtvec p;
22602 int i;
22603 rtx regs[3];
22604
22605 i = 0;
22606 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22607 if (hard_frame_needed)
22608 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
22609 if (!(REGNO (fp) == STACK_POINTER_REGNUM
22610 || (hard_frame_needed
22611 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
22612 regs[i++] = fp;
22613
22614 p = rtvec_alloc (i);
22615 while (--i >= 0)
22616 {
22617 rtx mem = gen_frame_mem (BLKmode, regs[i]);
22618 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
22619 }
22620
22621 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
22622 }
22623
22624 /* Emit the correct code for allocating stack space, as insns.
22625 If COPY_REG, make sure a copy of the old frame is left there.
22626 The generated code may use hard register 0 as a temporary. */
22627
22628 static void
22629 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
22630 {
22631 rtx_insn *insn;
22632 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22633 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
22634 rtx todec = gen_int_mode (-size, Pmode);
22635 rtx par, set, mem;
22636
22637 if (INTVAL (todec) != -size)
22638 {
22639 warning (0, "stack frame too large");
22640 emit_insn (gen_trap ());
22641 return;
22642 }
22643
22644 if (crtl->limit_stack)
22645 {
22646 if (REG_P (stack_limit_rtx)
22647 && REGNO (stack_limit_rtx) > 1
22648 && REGNO (stack_limit_rtx) <= 31)
22649 {
22650 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
22651 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22652 const0_rtx));
22653 }
22654 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
22655 && TARGET_32BIT
22656 && DEFAULT_ABI == ABI_V4)
22657 {
22658 rtx toload = gen_rtx_CONST (VOIDmode,
22659 gen_rtx_PLUS (Pmode,
22660 stack_limit_rtx,
22661 GEN_INT (size)));
22662
22663 emit_insn (gen_elf_high (tmp_reg, toload));
22664 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
22665 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22666 const0_rtx));
22667 }
22668 else
22669 warning (0, "stack limit expression is not supported");
22670 }
22671
22672 if (copy_reg)
22673 {
22674 if (copy_off != 0)
22675 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
22676 else
22677 emit_move_insn (copy_reg, stack_reg);
22678 }
22679
22680 if (size > 32767)
22681 {
22682 /* Need a note here so that try_split doesn't get confused. */
22683 if (get_last_insn () == NULL_RTX)
22684 emit_note (NOTE_INSN_DELETED);
22685 insn = emit_move_insn (tmp_reg, todec);
22686 try_split (PATTERN (insn), insn, 0);
22687 todec = tmp_reg;
22688 }
22689
22690 insn = emit_insn (TARGET_32BIT
22691 ? gen_movsi_update_stack (stack_reg, stack_reg,
22692 todec, stack_reg)
22693 : gen_movdi_di_update_stack (stack_reg, stack_reg,
22694 todec, stack_reg));
22695 /* Since we didn't use gen_frame_mem to generate the MEM, grab
22696 it now and set the alias set/attributes. The above gen_*_update
22697 calls will generate a PARALLEL with the MEM set being the first
22698 operation. */
22699 par = PATTERN (insn);
22700 gcc_assert (GET_CODE (par) == PARALLEL);
22701 set = XVECEXP (par, 0, 0);
22702 gcc_assert (GET_CODE (set) == SET);
22703 mem = SET_DEST (set);
22704 gcc_assert (MEM_P (mem));
22705 MEM_NOTRAP_P (mem) = 1;
22706 set_mem_alias_set (mem, get_frame_alias_set ());
22707
22708 RTX_FRAME_RELATED_P (insn) = 1;
22709 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
22710 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
22711 GEN_INT (-size))));
22712 }
22713
22714 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
22715
22716 #if PROBE_INTERVAL > 32768
22717 #error Cannot use indexed addressing mode for stack probing
22718 #endif
22719
22720 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
22721 inclusive. These are offsets from the current stack pointer. */
22722
22723 static void
22724 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
22725 {
22726 /* See if we have a constant small number of probes to generate. If so,
22727 that's the easy case. */
22728 if (first + size <= 32768)
22729 {
22730 HOST_WIDE_INT i;
22731
22732 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
22733 it exceeds SIZE. If only one probe is needed, this will not
22734 generate any code. Then probe at FIRST + SIZE. */
22735 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
22736 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22737 -(first + i)));
22738
22739 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22740 -(first + size)));
22741 }
22742
22743 /* Otherwise, do the same as above, but in a loop. Note that we must be
22744 extra careful with variables wrapping around because we might be at
22745 the very top (or the very bottom) of the address space and we have
22746 to be able to handle this case properly; in particular, we use an
22747 equality test for the loop condition. */
22748 else
22749 {
22750 HOST_WIDE_INT rounded_size;
22751 rtx r12 = gen_rtx_REG (Pmode, 12);
22752 rtx r0 = gen_rtx_REG (Pmode, 0);
22753
22754 /* Sanity check for the addressing mode we're going to use. */
22755 gcc_assert (first <= 32768);
22756
22757 /* Step 1: round SIZE to the previous multiple of the interval. */
22758
22759 rounded_size = size & -PROBE_INTERVAL;
22760
22761
22762 /* Step 2: compute initial and final value of the loop counter. */
22763
22764 /* TEST_ADDR = SP + FIRST. */
22765 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
22766 -first)));
22767
22768 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
22769 if (rounded_size > 32768)
22770 {
22771 emit_move_insn (r0, GEN_INT (-rounded_size));
22772 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
22773 }
22774 else
22775 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
22776 -rounded_size)));
22777
22778
22779 /* Step 3: the loop
22780
22781 while (TEST_ADDR != LAST_ADDR)
22782 {
22783 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
22784 probe at TEST_ADDR
22785 }
22786
22787 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
22788 until it is equal to ROUNDED_SIZE. */
22789
22790 if (TARGET_64BIT)
22791 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
22792 else
22793 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
22794
22795
22796 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
22797 that SIZE is equal to ROUNDED_SIZE. */
22798
22799 if (size != rounded_size)
22800 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
22801 }
22802 }
22803
22804 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
22805 absolute addresses. */
22806
22807 const char *
22808 output_probe_stack_range (rtx reg1, rtx reg2)
22809 {
22810 static int labelno = 0;
22811 char loop_lab[32], end_lab[32];
22812 rtx xops[2];
22813
22814 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
22815 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
22816
22817 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
22818
22819 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
22820 xops[0] = reg1;
22821 xops[1] = reg2;
22822 if (TARGET_64BIT)
22823 output_asm_insn ("cmpd 0,%0,%1", xops);
22824 else
22825 output_asm_insn ("cmpw 0,%0,%1", xops);
22826
22827 fputs ("\tbeq 0,", asm_out_file);
22828 assemble_name_raw (asm_out_file, end_lab);
22829 fputc ('\n', asm_out_file);
22830
22831 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
22832 xops[1] = GEN_INT (-PROBE_INTERVAL);
22833 output_asm_insn ("addi %0,%0,%1", xops);
22834
22835 /* Probe at TEST_ADDR and branch. */
22836 xops[1] = gen_rtx_REG (Pmode, 0);
22837 output_asm_insn ("stw %1,0(%0)", xops);
22838 fprintf (asm_out_file, "\tb ");
22839 assemble_name_raw (asm_out_file, loop_lab);
22840 fputc ('\n', asm_out_file);
22841
22842 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
22843
22844 return "";
22845 }
22846
22847 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
22848 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
22849 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
22850 deduce these equivalences by itself so it wasn't necessary to hold
22851 its hand so much. Don't be tempted to always supply d2_f_d_e with
22852 the actual cfa register, ie. r31 when we are using a hard frame
22853 pointer. That fails when saving regs off r1, and sched moves the
22854 r31 setup past the reg saves. */
22855
22856 static rtx
22857 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
22858 rtx reg2, rtx rreg)
22859 {
22860 rtx real, temp;
22861
22862 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
22863 {
22864 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
22865 int i;
22866
22867 gcc_checking_assert (val == 0);
22868 real = PATTERN (insn);
22869 if (GET_CODE (real) == PARALLEL)
22870 for (i = 0; i < XVECLEN (real, 0); i++)
22871 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22872 {
22873 rtx set = XVECEXP (real, 0, i);
22874
22875 RTX_FRAME_RELATED_P (set) = 1;
22876 }
22877 RTX_FRAME_RELATED_P (insn) = 1;
22878 return insn;
22879 }
22880
22881 /* copy_rtx will not make unique copies of registers, so we need to
22882 ensure we don't have unwanted sharing here. */
22883 if (reg == reg2)
22884 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22885
22886 if (reg == rreg)
22887 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22888
22889 real = copy_rtx (PATTERN (insn));
22890
22891 if (reg2 != NULL_RTX)
22892 real = replace_rtx (real, reg2, rreg);
22893
22894 if (REGNO (reg) == STACK_POINTER_REGNUM)
22895 gcc_checking_assert (val == 0);
22896 else
22897 real = replace_rtx (real, reg,
22898 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
22899 STACK_POINTER_REGNUM),
22900 GEN_INT (val)));
22901
22902 /* We expect that 'real' is either a SET or a PARALLEL containing
22903 SETs (and possibly other stuff). In a PARALLEL, all the SETs
22904 are important so they all have to be marked RTX_FRAME_RELATED_P. */
22905
22906 if (GET_CODE (real) == SET)
22907 {
22908 rtx set = real;
22909
22910 temp = simplify_rtx (SET_SRC (set));
22911 if (temp)
22912 SET_SRC (set) = temp;
22913 temp = simplify_rtx (SET_DEST (set));
22914 if (temp)
22915 SET_DEST (set) = temp;
22916 if (GET_CODE (SET_DEST (set)) == MEM)
22917 {
22918 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22919 if (temp)
22920 XEXP (SET_DEST (set), 0) = temp;
22921 }
22922 }
22923 else
22924 {
22925 int i;
22926
22927 gcc_assert (GET_CODE (real) == PARALLEL);
22928 for (i = 0; i < XVECLEN (real, 0); i++)
22929 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22930 {
22931 rtx set = XVECEXP (real, 0, i);
22932
22933 temp = simplify_rtx (SET_SRC (set));
22934 if (temp)
22935 SET_SRC (set) = temp;
22936 temp = simplify_rtx (SET_DEST (set));
22937 if (temp)
22938 SET_DEST (set) = temp;
22939 if (GET_CODE (SET_DEST (set)) == MEM)
22940 {
22941 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22942 if (temp)
22943 XEXP (SET_DEST (set), 0) = temp;
22944 }
22945 RTX_FRAME_RELATED_P (set) = 1;
22946 }
22947 }
22948
22949 RTX_FRAME_RELATED_P (insn) = 1;
22950 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
22951
22952 return insn;
22953 }
22954
22955 /* Returns an insn that has a vrsave set operation with the
22956 appropriate CLOBBERs. */
22957
22958 static rtx
22959 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
22960 {
22961 int nclobs, i;
22962 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
22963 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
22964
22965 clobs[0]
22966 = gen_rtx_SET (vrsave,
22967 gen_rtx_UNSPEC_VOLATILE (SImode,
22968 gen_rtvec (2, reg, vrsave),
22969 UNSPECV_SET_VRSAVE));
22970
22971 nclobs = 1;
22972
22973 /* We need to clobber the registers in the mask so the scheduler
22974 does not move sets to VRSAVE before sets of AltiVec registers.
22975
22976 However, if the function receives nonlocal gotos, reload will set
22977 all call saved registers live. We will end up with:
22978
22979 (set (reg 999) (mem))
22980 (parallel [ (set (reg vrsave) (unspec blah))
22981 (clobber (reg 999))])
22982
22983 The clobber will cause the store into reg 999 to be dead, and
22984 flow will attempt to delete an epilogue insn. In this case, we
22985 need an unspec use/set of the register. */
22986
22987 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
22988 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22989 {
22990 if (!epiloguep || call_used_regs [i])
22991 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
22992 gen_rtx_REG (V4SImode, i));
22993 else
22994 {
22995 rtx reg = gen_rtx_REG (V4SImode, i);
22996
22997 clobs[nclobs++]
22998 = gen_rtx_SET (reg,
22999 gen_rtx_UNSPEC (V4SImode,
23000 gen_rtvec (1, reg), 27));
23001 }
23002 }
23003
23004 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
23005
23006 for (i = 0; i < nclobs; ++i)
23007 XVECEXP (insn, 0, i) = clobs[i];
23008
23009 return insn;
23010 }
23011
23012 static rtx
23013 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
23014 {
23015 rtx addr, mem;
23016
23017 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
23018 mem = gen_frame_mem (GET_MODE (reg), addr);
23019 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
23020 }
23021
23022 static rtx
23023 gen_frame_load (rtx reg, rtx frame_reg, int offset)
23024 {
23025 return gen_frame_set (reg, frame_reg, offset, false);
23026 }
23027
23028 static rtx
23029 gen_frame_store (rtx reg, rtx frame_reg, int offset)
23030 {
23031 return gen_frame_set (reg, frame_reg, offset, true);
23032 }
23033
23034 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
23035 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
23036
23037 static rtx
23038 emit_frame_save (rtx frame_reg, machine_mode mode,
23039 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
23040 {
23041 rtx reg, insn;
23042
23043 /* Some cases that need register indexed addressing. */
23044 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
23045 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
23046 || (TARGET_E500_DOUBLE && mode == DFmode)
23047 || (TARGET_SPE_ABI
23048 && SPE_VECTOR_MODE (mode)
23049 && !SPE_CONST_OFFSET_OK (offset))));
23050
23051 reg = gen_rtx_REG (mode, regno);
23052 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
23053 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
23054 NULL_RTX, NULL_RTX);
23055 }
23056
23057 /* Emit an offset memory reference suitable for a frame store, while
23058 converting to a valid addressing mode. */
23059
23060 static rtx
23061 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
23062 {
23063 rtx int_rtx, offset_rtx;
23064
23065 int_rtx = GEN_INT (offset);
23066
23067 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
23068 || (TARGET_E500_DOUBLE && mode == DFmode))
23069 {
23070 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
23071 emit_move_insn (offset_rtx, int_rtx);
23072 }
23073 else
23074 offset_rtx = int_rtx;
23075
23076 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
23077 }
23078
23079 #ifndef TARGET_FIX_AND_CONTINUE
23080 #define TARGET_FIX_AND_CONTINUE 0
23081 #endif
23082
23083 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
23084 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
23085 #define LAST_SAVRES_REGISTER 31
23086 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
23087
23088 enum {
23089 SAVRES_LR = 0x1,
23090 SAVRES_SAVE = 0x2,
23091 SAVRES_REG = 0x0c,
23092 SAVRES_GPR = 0,
23093 SAVRES_FPR = 4,
23094 SAVRES_VR = 8
23095 };
23096
23097 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
23098
23099 /* Temporary holding space for an out-of-line register save/restore
23100 routine name. */
23101 static char savres_routine_name[30];
23102
23103 /* Return the name for an out-of-line register save/restore routine.
23104 We are saving/restoring GPRs if GPR is true. */
23105
23106 static char *
23107 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
23108 {
23109 const char *prefix = "";
23110 const char *suffix = "";
23111
23112 /* Different targets are supposed to define
23113 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
23114 routine name could be defined with:
23115
23116 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
23117
23118 This is a nice idea in practice, but in reality, things are
23119 complicated in several ways:
23120
23121 - ELF targets have save/restore routines for GPRs.
23122
23123 - SPE targets use different prefixes for 32/64-bit registers, and
23124 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
23125
23126 - PPC64 ELF targets have routines for save/restore of GPRs that
23127 differ in what they do with the link register, so having a set
23128 prefix doesn't work. (We only use one of the save routines at
23129 the moment, though.)
23130
23131 - PPC32 elf targets have "exit" versions of the restore routines
23132 that restore the link register and can save some extra space.
23133 These require an extra suffix. (There are also "tail" versions
23134 of the restore routines and "GOT" versions of the save routines,
23135 but we don't generate those at present. Same problems apply,
23136 though.)
23137
23138 We deal with all this by synthesizing our own prefix/suffix and
23139 using that for the simple sprintf call shown above. */
23140 if (TARGET_SPE)
23141 {
23142 /* No floating point saves on the SPE. */
23143 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
23144
23145 if ((sel & SAVRES_SAVE))
23146 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
23147 else
23148 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
23149
23150 if ((sel & SAVRES_LR))
23151 suffix = "_x";
23152 }
23153 else if (DEFAULT_ABI == ABI_V4)
23154 {
23155 if (TARGET_64BIT)
23156 goto aix_names;
23157
23158 if ((sel & SAVRES_REG) == SAVRES_GPR)
23159 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
23160 else if ((sel & SAVRES_REG) == SAVRES_FPR)
23161 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
23162 else if ((sel & SAVRES_REG) == SAVRES_VR)
23163 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
23164 else
23165 abort ();
23166
23167 if ((sel & SAVRES_LR))
23168 suffix = "_x";
23169 }
23170 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23171 {
23172 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
23173 /* No out-of-line save/restore routines for GPRs on AIX. */
23174 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
23175 #endif
23176
23177 aix_names:
23178 if ((sel & SAVRES_REG) == SAVRES_GPR)
23179 prefix = ((sel & SAVRES_SAVE)
23180 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
23181 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
23182 else if ((sel & SAVRES_REG) == SAVRES_FPR)
23183 {
23184 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
23185 if ((sel & SAVRES_LR))
23186 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
23187 else
23188 #endif
23189 {
23190 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
23191 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
23192 }
23193 }
23194 else if ((sel & SAVRES_REG) == SAVRES_VR)
23195 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
23196 else
23197 abort ();
23198 }
23199
23200 if (DEFAULT_ABI == ABI_DARWIN)
23201 {
23202 /* The Darwin approach is (slightly) different, in order to be
23203 compatible with code generated by the system toolchain. There is a
23204 single symbol for the start of save sequence, and the code here
23205 embeds an offset into that code on the basis of the first register
23206 to be saved. */
23207 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
23208 if ((sel & SAVRES_REG) == SAVRES_GPR)
23209 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
23210 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
23211 (regno - 13) * 4, prefix, regno);
23212 else if ((sel & SAVRES_REG) == SAVRES_FPR)
23213 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
23214 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
23215 else if ((sel & SAVRES_REG) == SAVRES_VR)
23216 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
23217 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
23218 else
23219 abort ();
23220 }
23221 else
23222 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
23223
23224 return savres_routine_name;
23225 }
23226
23227 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
23228 We are saving/restoring GPRs if GPR is true. */
23229
23230 static rtx
23231 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
23232 {
23233 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
23234 ? info->first_gp_reg_save
23235 : (sel & SAVRES_REG) == SAVRES_FPR
23236 ? info->first_fp_reg_save - 32
23237 : (sel & SAVRES_REG) == SAVRES_VR
23238 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
23239 : -1);
23240 rtx sym;
23241 int select = sel;
23242
23243 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
23244 versions of the gpr routines. */
23245 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
23246 && info->spe_64bit_regs_used)
23247 select ^= SAVRES_FPR ^ SAVRES_GPR;
23248
23249 /* Don't generate bogus routine names. */
23250 gcc_assert (FIRST_SAVRES_REGISTER <= regno
23251 && regno <= LAST_SAVRES_REGISTER
23252 && select >= 0 && select <= 12);
23253
23254 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
23255
23256 if (sym == NULL)
23257 {
23258 char *name;
23259
23260 name = rs6000_savres_routine_name (info, regno, sel);
23261
23262 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
23263 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
23264 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
23265 }
23266
23267 return sym;
23268 }
23269
23270 /* Emit a sequence of insns, including a stack tie if needed, for
23271 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
23272 reset the stack pointer, but move the base of the frame into
23273 reg UPDT_REGNO for use by out-of-line register restore routines. */
23274
23275 static rtx
23276 rs6000_emit_stack_reset (rs6000_stack_t *info,
23277 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
23278 unsigned updt_regno)
23279 {
23280 rtx updt_reg_rtx;
23281
23282 /* This blockage is needed so that sched doesn't decide to move
23283 the sp change before the register restores. */
23284 if (DEFAULT_ABI == ABI_V4
23285 || (TARGET_SPE_ABI
23286 && info->spe_64bit_regs_used != 0
23287 && info->first_gp_reg_save != 32))
23288 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
23289
23290 /* If we are restoring registers out-of-line, we will be using the
23291 "exit" variants of the restore routines, which will reset the
23292 stack for us. But we do need to point updt_reg into the
23293 right place for those routines. */
23294 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
23295
23296 if (frame_off != 0)
23297 return emit_insn (gen_add3_insn (updt_reg_rtx,
23298 frame_reg_rtx, GEN_INT (frame_off)));
23299 else if (REGNO (frame_reg_rtx) != updt_regno)
23300 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
23301
23302 return NULL_RTX;
23303 }
23304
23305 /* Return the register number used as a pointer by out-of-line
23306 save/restore functions. */
23307
23308 static inline unsigned
23309 ptr_regno_for_savres (int sel)
23310 {
23311 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23312 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
23313 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
23314 }
23315
23316 /* Construct a parallel rtx describing the effect of a call to an
23317 out-of-line register save/restore routine, and emit the insn
23318 or jump_insn as appropriate. */
23319
23320 static rtx
23321 rs6000_emit_savres_rtx (rs6000_stack_t *info,
23322 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
23323 machine_mode reg_mode, int sel)
23324 {
23325 int i;
23326 int offset, start_reg, end_reg, n_regs, use_reg;
23327 int reg_size = GET_MODE_SIZE (reg_mode);
23328 rtx sym;
23329 rtvec p;
23330 rtx par, insn;
23331
23332 offset = 0;
23333 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
23334 ? info->first_gp_reg_save
23335 : (sel & SAVRES_REG) == SAVRES_FPR
23336 ? info->first_fp_reg_save
23337 : (sel & SAVRES_REG) == SAVRES_VR
23338 ? info->first_altivec_reg_save
23339 : -1);
23340 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
23341 ? 32
23342 : (sel & SAVRES_REG) == SAVRES_FPR
23343 ? 64
23344 : (sel & SAVRES_REG) == SAVRES_VR
23345 ? LAST_ALTIVEC_REGNO + 1
23346 : -1);
23347 n_regs = end_reg - start_reg;
23348 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
23349 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
23350 + n_regs);
23351
23352 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
23353 RTVEC_ELT (p, offset++) = ret_rtx;
23354
23355 RTVEC_ELT (p, offset++)
23356 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
23357
23358 sym = rs6000_savres_routine_sym (info, sel);
23359 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
23360
23361 use_reg = ptr_regno_for_savres (sel);
23362 if ((sel & SAVRES_REG) == SAVRES_VR)
23363 {
23364 /* Vector regs are saved/restored using [reg+reg] addressing. */
23365 RTVEC_ELT (p, offset++)
23366 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
23367 RTVEC_ELT (p, offset++)
23368 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
23369 }
23370 else
23371 RTVEC_ELT (p, offset++)
23372 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
23373
23374 for (i = 0; i < end_reg - start_reg; i++)
23375 RTVEC_ELT (p, i + offset)
23376 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
23377 frame_reg_rtx, save_area_offset + reg_size * i,
23378 (sel & SAVRES_SAVE) != 0);
23379
23380 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
23381 RTVEC_ELT (p, i + offset)
23382 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
23383
23384 par = gen_rtx_PARALLEL (VOIDmode, p);
23385
23386 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
23387 {
23388 insn = emit_jump_insn (par);
23389 JUMP_LABEL (insn) = ret_rtx;
23390 }
23391 else
23392 insn = emit_insn (par);
23393 return insn;
23394 }
23395
23396 /* Emit code to store CR fields that need to be saved into REG. */
23397
23398 static void
23399 rs6000_emit_move_from_cr (rtx reg)
23400 {
23401 /* Only the ELFv2 ABI allows storing only selected fields. */
23402 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
23403 {
23404 int i, cr_reg[8], count = 0;
23405
23406 /* Collect CR fields that must be saved. */
23407 for (i = 0; i < 8; i++)
23408 if (save_reg_p (CR0_REGNO + i))
23409 cr_reg[count++] = i;
23410
23411 /* If it's just a single one, use mfcrf. */
23412 if (count == 1)
23413 {
23414 rtvec p = rtvec_alloc (1);
23415 rtvec r = rtvec_alloc (2);
23416 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
23417 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
23418 RTVEC_ELT (p, 0)
23419 = gen_rtx_SET (reg,
23420 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
23421
23422 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23423 return;
23424 }
23425
23426 /* ??? It might be better to handle count == 2 / 3 cases here
23427 as well, using logical operations to combine the values. */
23428 }
23429
23430 emit_insn (gen_movesi_from_cr (reg));
23431 }
23432
23433 /* Determine whether the gp REG is really used. */
23434
23435 static bool
23436 rs6000_reg_live_or_pic_offset_p (int reg)
23437 {
23438 /* If the function calls eh_return, claim used all the registers that would
23439 be checked for liveness otherwise. This is required for the PIC offset
23440 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
23441 register allocation purposes in this case. */
23442
23443 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
23444 && (!call_used_regs[reg]
23445 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23446 && !TARGET_SINGLE_PIC_BASE
23447 && TARGET_TOC && TARGET_MINIMAL_TOC)))
23448 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23449 && !TARGET_SINGLE_PIC_BASE
23450 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
23451 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
23452 }
23453
23454 /* Emit function prologue as insns. */
23455
23456 void
23457 rs6000_emit_prologue (void)
23458 {
23459 rs6000_stack_t *info = rs6000_stack_info ();
23460 machine_mode reg_mode = Pmode;
23461 int reg_size = TARGET_32BIT ? 4 : 8;
23462 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
23463 rtx frame_reg_rtx = sp_reg_rtx;
23464 unsigned int cr_save_regno;
23465 rtx cr_save_rtx = NULL_RTX;
23466 rtx insn;
23467 int strategy;
23468 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
23469 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
23470 && call_used_regs[STATIC_CHAIN_REGNUM]);
23471 /* Offset to top of frame for frame_reg and sp respectively. */
23472 HOST_WIDE_INT frame_off = 0;
23473 HOST_WIDE_INT sp_off = 0;
23474
23475 #ifdef ENABLE_CHECKING
23476 /* Track and check usage of r0, r11, r12. */
23477 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
23478 #define START_USE(R) do \
23479 { \
23480 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
23481 reg_inuse |= 1 << (R); \
23482 } while (0)
23483 #define END_USE(R) do \
23484 { \
23485 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
23486 reg_inuse &= ~(1 << (R)); \
23487 } while (0)
23488 #define NOT_INUSE(R) do \
23489 { \
23490 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
23491 } while (0)
23492 #else
23493 #define START_USE(R) do {} while (0)
23494 #define END_USE(R) do {} while (0)
23495 #define NOT_INUSE(R) do {} while (0)
23496 #endif
23497
23498 if (DEFAULT_ABI == ABI_ELFv2)
23499 {
23500 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
23501
23502 /* With -mminimal-toc we may generate an extra use of r2 below. */
23503 if (!TARGET_SINGLE_PIC_BASE
23504 && TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23505 cfun->machine->r2_setup_needed = true;
23506 }
23507
23508
23509 if (flag_stack_usage_info)
23510 current_function_static_stack_size = info->total_size;
23511
23512 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
23513 {
23514 HOST_WIDE_INT size = info->total_size;
23515
23516 if (crtl->is_leaf && !cfun->calls_alloca)
23517 {
23518 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
23519 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
23520 size - STACK_CHECK_PROTECT);
23521 }
23522 else if (size > 0)
23523 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
23524 }
23525
23526 if (TARGET_FIX_AND_CONTINUE)
23527 {
23528 /* gdb on darwin arranges to forward a function from the old
23529 address by modifying the first 5 instructions of the function
23530 to branch to the overriding function. This is necessary to
23531 permit function pointers that point to the old function to
23532 actually forward to the new function. */
23533 emit_insn (gen_nop ());
23534 emit_insn (gen_nop ());
23535 emit_insn (gen_nop ());
23536 emit_insn (gen_nop ());
23537 emit_insn (gen_nop ());
23538 }
23539
23540 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
23541 {
23542 reg_mode = V2SImode;
23543 reg_size = 8;
23544 }
23545
23546 /* Handle world saves specially here. */
23547 if (WORLD_SAVE_P (info))
23548 {
23549 int i, j, sz;
23550 rtx treg;
23551 rtvec p;
23552 rtx reg0;
23553
23554 /* save_world expects lr in r0. */
23555 reg0 = gen_rtx_REG (Pmode, 0);
23556 if (info->lr_save_p)
23557 {
23558 insn = emit_move_insn (reg0,
23559 gen_rtx_REG (Pmode, LR_REGNO));
23560 RTX_FRAME_RELATED_P (insn) = 1;
23561 }
23562
23563 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
23564 assumptions about the offsets of various bits of the stack
23565 frame. */
23566 gcc_assert (info->gp_save_offset == -220
23567 && info->fp_save_offset == -144
23568 && info->lr_save_offset == 8
23569 && info->cr_save_offset == 4
23570 && info->push_p
23571 && info->lr_save_p
23572 && (!crtl->calls_eh_return
23573 || info->ehrd_offset == -432)
23574 && info->vrsave_save_offset == -224
23575 && info->altivec_save_offset == -416);
23576
23577 treg = gen_rtx_REG (SImode, 11);
23578 emit_move_insn (treg, GEN_INT (-info->total_size));
23579
23580 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
23581 in R11. It also clobbers R12, so beware! */
23582
23583 /* Preserve CR2 for save_world prologues */
23584 sz = 5;
23585 sz += 32 - info->first_gp_reg_save;
23586 sz += 64 - info->first_fp_reg_save;
23587 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
23588 p = rtvec_alloc (sz);
23589 j = 0;
23590 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
23591 gen_rtx_REG (SImode,
23592 LR_REGNO));
23593 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
23594 gen_rtx_SYMBOL_REF (Pmode,
23595 "*save_world"));
23596 /* We do floats first so that the instruction pattern matches
23597 properly. */
23598 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23599 RTVEC_ELT (p, j++)
23600 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23601 ? DFmode : SFmode,
23602 info->first_fp_reg_save + i),
23603 frame_reg_rtx,
23604 info->fp_save_offset + frame_off + 8 * i);
23605 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
23606 RTVEC_ELT (p, j++)
23607 = gen_frame_store (gen_rtx_REG (V4SImode,
23608 info->first_altivec_reg_save + i),
23609 frame_reg_rtx,
23610 info->altivec_save_offset + frame_off + 16 * i);
23611 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23612 RTVEC_ELT (p, j++)
23613 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23614 frame_reg_rtx,
23615 info->gp_save_offset + frame_off + reg_size * i);
23616
23617 /* CR register traditionally saved as CR2. */
23618 RTVEC_ELT (p, j++)
23619 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
23620 frame_reg_rtx, info->cr_save_offset + frame_off);
23621 /* Explain about use of R0. */
23622 if (info->lr_save_p)
23623 RTVEC_ELT (p, j++)
23624 = gen_frame_store (reg0,
23625 frame_reg_rtx, info->lr_save_offset + frame_off);
23626 /* Explain what happens to the stack pointer. */
23627 {
23628 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
23629 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
23630 }
23631
23632 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23633 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23634 treg, GEN_INT (-info->total_size));
23635 sp_off = frame_off = info->total_size;
23636 }
23637
23638 strategy = info->savres_strategy;
23639
23640 /* For V.4, update stack before we do any saving and set back pointer. */
23641 if (! WORLD_SAVE_P (info)
23642 && info->push_p
23643 && (DEFAULT_ABI == ABI_V4
23644 || crtl->calls_eh_return))
23645 {
23646 bool need_r11 = (TARGET_SPE
23647 ? (!(strategy & SAVE_INLINE_GPRS)
23648 && info->spe_64bit_regs_used == 0)
23649 : (!(strategy & SAVE_INLINE_FPRS)
23650 || !(strategy & SAVE_INLINE_GPRS)
23651 || !(strategy & SAVE_INLINE_VRS)));
23652 int ptr_regno = -1;
23653 rtx ptr_reg = NULL_RTX;
23654 int ptr_off = 0;
23655
23656 if (info->total_size < 32767)
23657 frame_off = info->total_size;
23658 else if (need_r11)
23659 ptr_regno = 11;
23660 else if (info->cr_save_p
23661 || info->lr_save_p
23662 || info->first_fp_reg_save < 64
23663 || info->first_gp_reg_save < 32
23664 || info->altivec_size != 0
23665 || info->vrsave_mask != 0
23666 || crtl->calls_eh_return)
23667 ptr_regno = 12;
23668 else
23669 {
23670 /* The prologue won't be saving any regs so there is no need
23671 to set up a frame register to access any frame save area.
23672 We also won't be using frame_off anywhere below, but set
23673 the correct value anyway to protect against future
23674 changes to this function. */
23675 frame_off = info->total_size;
23676 }
23677 if (ptr_regno != -1)
23678 {
23679 /* Set up the frame offset to that needed by the first
23680 out-of-line save function. */
23681 START_USE (ptr_regno);
23682 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23683 frame_reg_rtx = ptr_reg;
23684 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
23685 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
23686 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
23687 ptr_off = info->gp_save_offset + info->gp_size;
23688 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
23689 ptr_off = info->altivec_save_offset + info->altivec_size;
23690 frame_off = -ptr_off;
23691 }
23692 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23693 sp_off = info->total_size;
23694 if (frame_reg_rtx != sp_reg_rtx)
23695 rs6000_emit_stack_tie (frame_reg_rtx, false);
23696 }
23697
23698 /* If we use the link register, get it into r0. */
23699 if (!WORLD_SAVE_P (info) && info->lr_save_p)
23700 {
23701 rtx addr, reg, mem;
23702
23703 reg = gen_rtx_REG (Pmode, 0);
23704 START_USE (0);
23705 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
23706 RTX_FRAME_RELATED_P (insn) = 1;
23707
23708 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
23709 | SAVE_NOINLINE_FPRS_SAVES_LR)))
23710 {
23711 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23712 GEN_INT (info->lr_save_offset + frame_off));
23713 mem = gen_rtx_MEM (Pmode, addr);
23714 /* This should not be of rs6000_sr_alias_set, because of
23715 __builtin_return_address. */
23716
23717 insn = emit_move_insn (mem, reg);
23718 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23719 NULL_RTX, NULL_RTX);
23720 END_USE (0);
23721 }
23722 }
23723
23724 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
23725 r12 will be needed by out-of-line gpr restore. */
23726 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23727 && !(strategy & (SAVE_INLINE_GPRS
23728 | SAVE_NOINLINE_GPRS_SAVES_LR))
23729 ? 11 : 12);
23730 if (!WORLD_SAVE_P (info)
23731 && info->cr_save_p
23732 && REGNO (frame_reg_rtx) != cr_save_regno
23733 && !(using_static_chain_p && cr_save_regno == 11))
23734 {
23735 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
23736 START_USE (cr_save_regno);
23737 rs6000_emit_move_from_cr (cr_save_rtx);
23738 }
23739
23740 /* Do any required saving of fpr's. If only one or two to save, do
23741 it ourselves. Otherwise, call function. */
23742 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
23743 {
23744 int i;
23745 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23746 if (save_reg_p (info->first_fp_reg_save + i))
23747 emit_frame_save (frame_reg_rtx,
23748 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23749 ? DFmode : SFmode),
23750 info->first_fp_reg_save + i,
23751 info->fp_save_offset + frame_off + 8 * i,
23752 sp_off - frame_off);
23753 }
23754 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
23755 {
23756 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23757 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23758 unsigned ptr_regno = ptr_regno_for_savres (sel);
23759 rtx ptr_reg = frame_reg_rtx;
23760
23761 if (REGNO (frame_reg_rtx) == ptr_regno)
23762 gcc_checking_assert (frame_off == 0);
23763 else
23764 {
23765 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23766 NOT_INUSE (ptr_regno);
23767 emit_insn (gen_add3_insn (ptr_reg,
23768 frame_reg_rtx, GEN_INT (frame_off)));
23769 }
23770 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23771 info->fp_save_offset,
23772 info->lr_save_offset,
23773 DFmode, sel);
23774 rs6000_frame_related (insn, ptr_reg, sp_off,
23775 NULL_RTX, NULL_RTX);
23776 if (lr)
23777 END_USE (0);
23778 }
23779
23780 /* Save GPRs. This is done as a PARALLEL if we are using
23781 the store-multiple instructions. */
23782 if (!WORLD_SAVE_P (info)
23783 && TARGET_SPE_ABI
23784 && info->spe_64bit_regs_used != 0
23785 && info->first_gp_reg_save != 32)
23786 {
23787 int i;
23788 rtx spe_save_area_ptr;
23789 HOST_WIDE_INT save_off;
23790 int ool_adjust = 0;
23791
23792 /* Determine whether we can address all of the registers that need
23793 to be saved with an offset from frame_reg_rtx that fits in
23794 the small const field for SPE memory instructions. */
23795 int spe_regs_addressable
23796 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
23797 + reg_size * (32 - info->first_gp_reg_save - 1))
23798 && (strategy & SAVE_INLINE_GPRS));
23799
23800 if (spe_regs_addressable)
23801 {
23802 spe_save_area_ptr = frame_reg_rtx;
23803 save_off = frame_off;
23804 }
23805 else
23806 {
23807 /* Make r11 point to the start of the SPE save area. We need
23808 to be careful here if r11 is holding the static chain. If
23809 it is, then temporarily save it in r0. */
23810 HOST_WIDE_INT offset;
23811
23812 if (!(strategy & SAVE_INLINE_GPRS))
23813 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
23814 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
23815 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
23816 save_off = frame_off - offset;
23817
23818 if (using_static_chain_p)
23819 {
23820 rtx r0 = gen_rtx_REG (Pmode, 0);
23821
23822 START_USE (0);
23823 gcc_assert (info->first_gp_reg_save > 11);
23824
23825 emit_move_insn (r0, spe_save_area_ptr);
23826 }
23827 else if (REGNO (frame_reg_rtx) != 11)
23828 START_USE (11);
23829
23830 emit_insn (gen_addsi3 (spe_save_area_ptr,
23831 frame_reg_rtx, GEN_INT (offset)));
23832 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
23833 frame_off = -info->spe_gp_save_offset + ool_adjust;
23834 }
23835
23836 if ((strategy & SAVE_INLINE_GPRS))
23837 {
23838 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23839 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23840 emit_frame_save (spe_save_area_ptr, reg_mode,
23841 info->first_gp_reg_save + i,
23842 (info->spe_gp_save_offset + save_off
23843 + reg_size * i),
23844 sp_off - save_off);
23845 }
23846 else
23847 {
23848 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
23849 info->spe_gp_save_offset + save_off,
23850 0, reg_mode,
23851 SAVRES_SAVE | SAVRES_GPR);
23852
23853 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
23854 NULL_RTX, NULL_RTX);
23855 }
23856
23857 /* Move the static chain pointer back. */
23858 if (!spe_regs_addressable)
23859 {
23860 if (using_static_chain_p)
23861 {
23862 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
23863 END_USE (0);
23864 }
23865 else if (REGNO (frame_reg_rtx) != 11)
23866 END_USE (11);
23867 }
23868 }
23869 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
23870 {
23871 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
23872 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
23873 unsigned ptr_regno = ptr_regno_for_savres (sel);
23874 rtx ptr_reg = frame_reg_rtx;
23875 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
23876 int end_save = info->gp_save_offset + info->gp_size;
23877 int ptr_off;
23878
23879 if (!ptr_set_up)
23880 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23881
23882 /* Need to adjust r11 (r12) if we saved any FPRs. */
23883 if (end_save + frame_off != 0)
23884 {
23885 rtx offset = GEN_INT (end_save + frame_off);
23886
23887 if (ptr_set_up)
23888 frame_off = -end_save;
23889 else
23890 NOT_INUSE (ptr_regno);
23891 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23892 }
23893 else if (!ptr_set_up)
23894 {
23895 NOT_INUSE (ptr_regno);
23896 emit_move_insn (ptr_reg, frame_reg_rtx);
23897 }
23898 ptr_off = -end_save;
23899 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23900 info->gp_save_offset + ptr_off,
23901 info->lr_save_offset + ptr_off,
23902 reg_mode, sel);
23903 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
23904 NULL_RTX, NULL_RTX);
23905 if (lr)
23906 END_USE (0);
23907 }
23908 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
23909 {
23910 rtvec p;
23911 int i;
23912 p = rtvec_alloc (32 - info->first_gp_reg_save);
23913 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23914 RTVEC_ELT (p, i)
23915 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23916 frame_reg_rtx,
23917 info->gp_save_offset + frame_off + reg_size * i);
23918 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23919 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23920 NULL_RTX, NULL_RTX);
23921 }
23922 else if (!WORLD_SAVE_P (info))
23923 {
23924 int i;
23925 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23926 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23927 emit_frame_save (frame_reg_rtx, reg_mode,
23928 info->first_gp_reg_save + i,
23929 info->gp_save_offset + frame_off + reg_size * i,
23930 sp_off - frame_off);
23931 }
23932
23933 if (crtl->calls_eh_return)
23934 {
23935 unsigned int i;
23936 rtvec p;
23937
23938 for (i = 0; ; ++i)
23939 {
23940 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23941 if (regno == INVALID_REGNUM)
23942 break;
23943 }
23944
23945 p = rtvec_alloc (i);
23946
23947 for (i = 0; ; ++i)
23948 {
23949 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23950 if (regno == INVALID_REGNUM)
23951 break;
23952
23953 insn
23954 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
23955 sp_reg_rtx,
23956 info->ehrd_offset + sp_off + reg_size * (int) i);
23957 RTVEC_ELT (p, i) = insn;
23958 RTX_FRAME_RELATED_P (insn) = 1;
23959 }
23960
23961 insn = emit_insn (gen_blockage ());
23962 RTX_FRAME_RELATED_P (insn) = 1;
23963 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
23964 }
23965
23966 /* In AIX ABI we need to make sure r2 is really saved. */
23967 if (TARGET_AIX && crtl->calls_eh_return)
23968 {
23969 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
23970 rtx save_insn, join_insn, note;
23971 long toc_restore_insn;
23972
23973 tmp_reg = gen_rtx_REG (Pmode, 11);
23974 tmp_reg_si = gen_rtx_REG (SImode, 11);
23975 if (using_static_chain_p)
23976 {
23977 START_USE (0);
23978 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
23979 }
23980 else
23981 START_USE (11);
23982 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
23983 /* Peek at instruction to which this function returns. If it's
23984 restoring r2, then we know we've already saved r2. We can't
23985 unconditionally save r2 because the value we have will already
23986 be updated if we arrived at this function via a plt call or
23987 toc adjusting stub. */
23988 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
23989 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
23990 + RS6000_TOC_SAVE_SLOT);
23991 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
23992 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
23993 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
23994 validate_condition_mode (EQ, CCUNSmode);
23995 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
23996 emit_insn (gen_rtx_SET (compare_result,
23997 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
23998 toc_save_done = gen_label_rtx ();
23999 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
24000 gen_rtx_EQ (VOIDmode, compare_result,
24001 const0_rtx),
24002 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
24003 pc_rtx);
24004 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
24005 JUMP_LABEL (jump) = toc_save_done;
24006 LABEL_NUSES (toc_save_done) += 1;
24007
24008 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
24009 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
24010 sp_off - frame_off);
24011
24012 emit_label (toc_save_done);
24013
24014 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
24015 have a CFG that has different saves along different paths.
24016 Move the note to a dummy blockage insn, which describes that
24017 R2 is unconditionally saved after the label. */
24018 /* ??? An alternate representation might be a special insn pattern
24019 containing both the branch and the store. That might let the
24020 code that minimizes the number of DW_CFA_advance opcodes better
24021 freedom in placing the annotations. */
24022 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
24023 if (note)
24024 remove_note (save_insn, note);
24025 else
24026 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
24027 copy_rtx (PATTERN (save_insn)), NULL_RTX);
24028 RTX_FRAME_RELATED_P (save_insn) = 0;
24029
24030 join_insn = emit_insn (gen_blockage ());
24031 REG_NOTES (join_insn) = note;
24032 RTX_FRAME_RELATED_P (join_insn) = 1;
24033
24034 if (using_static_chain_p)
24035 {
24036 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
24037 END_USE (0);
24038 }
24039 else
24040 END_USE (11);
24041 }
24042
24043 /* Save CR if we use any that must be preserved. */
24044 if (!WORLD_SAVE_P (info) && info->cr_save_p)
24045 {
24046 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
24047 GEN_INT (info->cr_save_offset + frame_off));
24048 rtx mem = gen_frame_mem (SImode, addr);
24049
24050 /* If we didn't copy cr before, do so now using r0. */
24051 if (cr_save_rtx == NULL_RTX)
24052 {
24053 START_USE (0);
24054 cr_save_rtx = gen_rtx_REG (SImode, 0);
24055 rs6000_emit_move_from_cr (cr_save_rtx);
24056 }
24057
24058 /* Saving CR requires a two-instruction sequence: one instruction
24059 to move the CR to a general-purpose register, and a second
24060 instruction that stores the GPR to memory.
24061
24062 We do not emit any DWARF CFI records for the first of these,
24063 because we cannot properly represent the fact that CR is saved in
24064 a register. One reason is that we cannot express that multiple
24065 CR fields are saved; another reason is that on 64-bit, the size
24066 of the CR register in DWARF (4 bytes) differs from the size of
24067 a general-purpose register.
24068
24069 This means if any intervening instruction were to clobber one of
24070 the call-saved CR fields, we'd have incorrect CFI. To prevent
24071 this from happening, we mark the store to memory as a use of
24072 those CR fields, which prevents any such instruction from being
24073 scheduled in between the two instructions. */
24074 rtx crsave_v[9];
24075 int n_crsave = 0;
24076 int i;
24077
24078 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
24079 for (i = 0; i < 8; i++)
24080 if (save_reg_p (CR0_REGNO + i))
24081 crsave_v[n_crsave++]
24082 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
24083
24084 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
24085 gen_rtvec_v (n_crsave, crsave_v)));
24086 END_USE (REGNO (cr_save_rtx));
24087
24088 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
24089 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
24090 so we need to construct a frame expression manually. */
24091 RTX_FRAME_RELATED_P (insn) = 1;
24092
24093 /* Update address to be stack-pointer relative, like
24094 rs6000_frame_related would do. */
24095 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
24096 GEN_INT (info->cr_save_offset + sp_off));
24097 mem = gen_frame_mem (SImode, addr);
24098
24099 if (DEFAULT_ABI == ABI_ELFv2)
24100 {
24101 /* In the ELFv2 ABI we generate separate CFI records for each
24102 CR field that was actually saved. They all point to the
24103 same 32-bit stack slot. */
24104 rtx crframe[8];
24105 int n_crframe = 0;
24106
24107 for (i = 0; i < 8; i++)
24108 if (save_reg_p (CR0_REGNO + i))
24109 {
24110 crframe[n_crframe]
24111 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
24112
24113 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
24114 n_crframe++;
24115 }
24116
24117 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24118 gen_rtx_PARALLEL (VOIDmode,
24119 gen_rtvec_v (n_crframe, crframe)));
24120 }
24121 else
24122 {
24123 /* In other ABIs, by convention, we use a single CR regnum to
24124 represent the fact that all call-saved CR fields are saved.
24125 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
24126 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
24127 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
24128 }
24129 }
24130
24131 /* In the ELFv2 ABI we need to save all call-saved CR fields into
24132 *separate* slots if the routine calls __builtin_eh_return, so
24133 that they can be independently restored by the unwinder. */
24134 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24135 {
24136 int i, cr_off = info->ehcr_offset;
24137 rtx crsave;
24138
24139 /* ??? We might get better performance by using multiple mfocrf
24140 instructions. */
24141 crsave = gen_rtx_REG (SImode, 0);
24142 emit_insn (gen_movesi_from_cr (crsave));
24143
24144 for (i = 0; i < 8; i++)
24145 if (!call_used_regs[CR0_REGNO + i])
24146 {
24147 rtvec p = rtvec_alloc (2);
24148 RTVEC_ELT (p, 0)
24149 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
24150 RTVEC_ELT (p, 1)
24151 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
24152
24153 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24154
24155 RTX_FRAME_RELATED_P (insn) = 1;
24156 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24157 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
24158 sp_reg_rtx, cr_off + sp_off));
24159
24160 cr_off += reg_size;
24161 }
24162 }
24163
24164 /* Update stack and set back pointer unless this is V.4,
24165 for which it was done previously. */
24166 if (!WORLD_SAVE_P (info) && info->push_p
24167 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
24168 {
24169 rtx ptr_reg = NULL;
24170 int ptr_off = 0;
24171
24172 /* If saving altivec regs we need to be able to address all save
24173 locations using a 16-bit offset. */
24174 if ((strategy & SAVE_INLINE_VRS) == 0
24175 || (info->altivec_size != 0
24176 && (info->altivec_save_offset + info->altivec_size - 16
24177 + info->total_size - frame_off) > 32767)
24178 || (info->vrsave_size != 0
24179 && (info->vrsave_save_offset
24180 + info->total_size - frame_off) > 32767))
24181 {
24182 int sel = SAVRES_SAVE | SAVRES_VR;
24183 unsigned ptr_regno = ptr_regno_for_savres (sel);
24184
24185 if (using_static_chain_p
24186 && ptr_regno == STATIC_CHAIN_REGNUM)
24187 ptr_regno = 12;
24188 if (REGNO (frame_reg_rtx) != ptr_regno)
24189 START_USE (ptr_regno);
24190 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
24191 frame_reg_rtx = ptr_reg;
24192 ptr_off = info->altivec_save_offset + info->altivec_size;
24193 frame_off = -ptr_off;
24194 }
24195 else if (REGNO (frame_reg_rtx) == 1)
24196 frame_off = info->total_size;
24197 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
24198 sp_off = info->total_size;
24199 if (frame_reg_rtx != sp_reg_rtx)
24200 rs6000_emit_stack_tie (frame_reg_rtx, false);
24201 }
24202
24203 /* Set frame pointer, if needed. */
24204 if (frame_pointer_needed)
24205 {
24206 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
24207 sp_reg_rtx);
24208 RTX_FRAME_RELATED_P (insn) = 1;
24209 }
24210
24211 /* Save AltiVec registers if needed. Save here because the red zone does
24212 not always include AltiVec registers. */
24213 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
24214 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
24215 {
24216 int end_save = info->altivec_save_offset + info->altivec_size;
24217 int ptr_off;
24218 /* Oddly, the vector save/restore functions point r0 at the end
24219 of the save area, then use r11 or r12 to load offsets for
24220 [reg+reg] addressing. */
24221 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24222 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
24223 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24224
24225 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
24226 NOT_INUSE (0);
24227 if (end_save + frame_off != 0)
24228 {
24229 rtx offset = GEN_INT (end_save + frame_off);
24230
24231 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24232 }
24233 else
24234 emit_move_insn (ptr_reg, frame_reg_rtx);
24235
24236 ptr_off = -end_save;
24237 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24238 info->altivec_save_offset + ptr_off,
24239 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
24240 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
24241 NULL_RTX, NULL_RTX);
24242 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
24243 {
24244 /* The oddity mentioned above clobbered our frame reg. */
24245 emit_move_insn (frame_reg_rtx, ptr_reg);
24246 frame_off = ptr_off;
24247 }
24248 }
24249 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
24250 && info->altivec_size != 0)
24251 {
24252 int i;
24253
24254 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24255 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24256 {
24257 rtx areg, savereg, mem;
24258 int offset;
24259
24260 offset = (info->altivec_save_offset + frame_off
24261 + 16 * (i - info->first_altivec_reg_save));
24262
24263 savereg = gen_rtx_REG (V4SImode, i);
24264
24265 NOT_INUSE (0);
24266 areg = gen_rtx_REG (Pmode, 0);
24267 emit_move_insn (areg, GEN_INT (offset));
24268
24269 /* AltiVec addressing mode is [reg+reg]. */
24270 mem = gen_frame_mem (V4SImode,
24271 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
24272
24273 /* Rather than emitting a generic move, force use of the stvx
24274 instruction, which we always want. In particular we don't
24275 want xxpermdi/stxvd2x for little endian. */
24276 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
24277
24278 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
24279 areg, GEN_INT (offset));
24280 }
24281 }
24282
24283 /* VRSAVE is a bit vector representing which AltiVec registers
24284 are used. The OS uses this to determine which vector
24285 registers to save on a context switch. We need to save
24286 VRSAVE on the stack frame, add whatever AltiVec registers we
24287 used in this function, and do the corresponding magic in the
24288 epilogue. */
24289
24290 if (!WORLD_SAVE_P (info)
24291 && TARGET_ALTIVEC
24292 && TARGET_ALTIVEC_VRSAVE
24293 && info->vrsave_mask != 0)
24294 {
24295 rtx reg, vrsave;
24296 int offset;
24297 int save_regno;
24298
24299 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
24300 be using r12 as frame_reg_rtx and r11 as the static chain
24301 pointer for nested functions. */
24302 save_regno = 12;
24303 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24304 && !using_static_chain_p)
24305 save_regno = 11;
24306 else if (REGNO (frame_reg_rtx) == 12)
24307 {
24308 save_regno = 11;
24309 if (using_static_chain_p)
24310 save_regno = 0;
24311 }
24312
24313 NOT_INUSE (save_regno);
24314 reg = gen_rtx_REG (SImode, save_regno);
24315 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
24316 if (TARGET_MACHO)
24317 emit_insn (gen_get_vrsave_internal (reg));
24318 else
24319 emit_insn (gen_rtx_SET (reg, vrsave));
24320
24321 /* Save VRSAVE. */
24322 offset = info->vrsave_save_offset + frame_off;
24323 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
24324
24325 /* Include the registers in the mask. */
24326 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
24327
24328 insn = emit_insn (generate_set_vrsave (reg, info, 0));
24329 }
24330
24331 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
24332 if (!TARGET_SINGLE_PIC_BASE
24333 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
24334 || (DEFAULT_ABI == ABI_V4
24335 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
24336 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
24337 {
24338 /* If emit_load_toc_table will use the link register, we need to save
24339 it. We use R12 for this purpose because emit_load_toc_table
24340 can use register 0. This allows us to use a plain 'blr' to return
24341 from the procedure more often. */
24342 int save_LR_around_toc_setup = (TARGET_ELF
24343 && DEFAULT_ABI == ABI_V4
24344 && flag_pic
24345 && ! info->lr_save_p
24346 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
24347 if (save_LR_around_toc_setup)
24348 {
24349 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
24350 rtx tmp = gen_rtx_REG (Pmode, 12);
24351
24352 insn = emit_move_insn (tmp, lr);
24353 RTX_FRAME_RELATED_P (insn) = 1;
24354
24355 rs6000_emit_load_toc_table (TRUE);
24356
24357 insn = emit_move_insn (lr, tmp);
24358 add_reg_note (insn, REG_CFA_RESTORE, lr);
24359 RTX_FRAME_RELATED_P (insn) = 1;
24360 }
24361 else
24362 rs6000_emit_load_toc_table (TRUE);
24363 }
24364
24365 #if TARGET_MACHO
24366 if (!TARGET_SINGLE_PIC_BASE
24367 && DEFAULT_ABI == ABI_DARWIN
24368 && flag_pic && crtl->uses_pic_offset_table)
24369 {
24370 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
24371 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
24372
24373 /* Save and restore LR locally around this call (in R0). */
24374 if (!info->lr_save_p)
24375 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
24376
24377 emit_insn (gen_load_macho_picbase (src));
24378
24379 emit_move_insn (gen_rtx_REG (Pmode,
24380 RS6000_PIC_OFFSET_TABLE_REGNUM),
24381 lr);
24382
24383 if (!info->lr_save_p)
24384 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
24385 }
24386 #endif
24387
24388 /* If we need to, save the TOC register after doing the stack setup.
24389 Do not emit eh frame info for this save. The unwinder wants info,
24390 conceptually attached to instructions in this function, about
24391 register values in the caller of this function. This R2 may have
24392 already been changed from the value in the caller.
24393 We don't attempt to write accurate DWARF EH frame info for R2
24394 because code emitted by gcc for a (non-pointer) function call
24395 doesn't save and restore R2. Instead, R2 is managed out-of-line
24396 by a linker generated plt call stub when the function resides in
24397 a shared library. This behaviour is costly to describe in DWARF,
24398 both in terms of the size of DWARF info and the time taken in the
24399 unwinder to interpret it. R2 changes, apart from the
24400 calls_eh_return case earlier in this function, are handled by
24401 linux-unwind.h frob_update_context. */
24402 if (rs6000_save_toc_in_prologue_p ())
24403 {
24404 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
24405 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
24406 }
24407 }
24408
24409 /* Output .extern statements for the save/restore routines we use. */
24410
24411 static void
24412 rs6000_output_savres_externs (FILE *file)
24413 {
24414 rs6000_stack_t *info = rs6000_stack_info ();
24415
24416 if (TARGET_DEBUG_STACK)
24417 debug_stack_info (info);
24418
24419 /* Write .extern for any function we will call to save and restore
24420 fp values. */
24421 if (info->first_fp_reg_save < 64
24422 && !TARGET_MACHO
24423 && !TARGET_ELF)
24424 {
24425 char *name;
24426 int regno = info->first_fp_reg_save - 32;
24427
24428 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
24429 {
24430 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
24431 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
24432 name = rs6000_savres_routine_name (info, regno, sel);
24433 fprintf (file, "\t.extern %s\n", name);
24434 }
24435 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
24436 {
24437 bool lr = (info->savres_strategy
24438 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24439 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24440 name = rs6000_savres_routine_name (info, regno, sel);
24441 fprintf (file, "\t.extern %s\n", name);
24442 }
24443 }
24444 }
24445
24446 /* Write function prologue. */
24447
24448 static void
24449 rs6000_output_function_prologue (FILE *file,
24450 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
24451 {
24452 if (!cfun->is_thunk)
24453 rs6000_output_savres_externs (file);
24454
24455 /* ELFv2 ABI r2 setup code and local entry point. This must follow
24456 immediately after the global entry point label. */
24457 if (DEFAULT_ABI == ABI_ELFv2 && cfun->machine->r2_setup_needed)
24458 {
24459 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
24460
24461 fprintf (file, "0:\taddis 2,12,.TOC.-0b@ha\n");
24462 fprintf (file, "\taddi 2,2,.TOC.-0b@l\n");
24463
24464 fputs ("\t.localentry\t", file);
24465 assemble_name (file, name);
24466 fputs (",.-", file);
24467 assemble_name (file, name);
24468 fputs ("\n", file);
24469 }
24470
24471 /* Output -mprofile-kernel code. This needs to be done here instead of
24472 in output_function_profile since it must go after the ELFv2 ABI
24473 local entry point. */
24474 if (TARGET_PROFILE_KERNEL && crtl->profile)
24475 {
24476 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24477 gcc_assert (!TARGET_32BIT);
24478
24479 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
24480
24481 /* In the ELFv2 ABI we have no compiler stack word. It must be
24482 the resposibility of _mcount to preserve the static chain
24483 register if required. */
24484 if (DEFAULT_ABI != ABI_ELFv2
24485 && cfun->static_chain_decl != NULL)
24486 {
24487 asm_fprintf (file, "\tstd %s,24(%s)\n",
24488 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24489 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24490 asm_fprintf (file, "\tld %s,24(%s)\n",
24491 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24492 }
24493 else
24494 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24495 }
24496
24497 rs6000_pic_labelno++;
24498 }
24499
24500 /* Non-zero if vmx regs are restored before the frame pop, zero if
24501 we restore after the pop when possible. */
24502 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
24503
24504 /* Restoring cr is a two step process: loading a reg from the frame
24505 save, then moving the reg to cr. For ABI_V4 we must let the
24506 unwinder know that the stack location is no longer valid at or
24507 before the stack deallocation, but we can't emit a cfa_restore for
24508 cr at the stack deallocation like we do for other registers.
24509 The trouble is that it is possible for the move to cr to be
24510 scheduled after the stack deallocation. So say exactly where cr
24511 is located on each of the two insns. */
24512
24513 static rtx
24514 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
24515 {
24516 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
24517 rtx reg = gen_rtx_REG (SImode, regno);
24518 rtx_insn *insn = emit_move_insn (reg, mem);
24519
24520 if (!exit_func && DEFAULT_ABI == ABI_V4)
24521 {
24522 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
24523 rtx set = gen_rtx_SET (reg, cr);
24524
24525 add_reg_note (insn, REG_CFA_REGISTER, set);
24526 RTX_FRAME_RELATED_P (insn) = 1;
24527 }
24528 return reg;
24529 }
24530
24531 /* Reload CR from REG. */
24532
24533 static void
24534 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
24535 {
24536 int count = 0;
24537 int i;
24538
24539 if (using_mfcr_multiple)
24540 {
24541 for (i = 0; i < 8; i++)
24542 if (save_reg_p (CR0_REGNO + i))
24543 count++;
24544 gcc_assert (count);
24545 }
24546
24547 if (using_mfcr_multiple && count > 1)
24548 {
24549 rtx_insn *insn;
24550 rtvec p;
24551 int ndx;
24552
24553 p = rtvec_alloc (count);
24554
24555 ndx = 0;
24556 for (i = 0; i < 8; i++)
24557 if (save_reg_p (CR0_REGNO + i))
24558 {
24559 rtvec r = rtvec_alloc (2);
24560 RTVEC_ELT (r, 0) = reg;
24561 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
24562 RTVEC_ELT (p, ndx) =
24563 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
24564 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
24565 ndx++;
24566 }
24567 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24568 gcc_assert (ndx == count);
24569
24570 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24571 CR field separately. */
24572 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
24573 {
24574 for (i = 0; i < 8; i++)
24575 if (save_reg_p (CR0_REGNO + i))
24576 add_reg_note (insn, REG_CFA_RESTORE,
24577 gen_rtx_REG (SImode, CR0_REGNO + i));
24578
24579 RTX_FRAME_RELATED_P (insn) = 1;
24580 }
24581 }
24582 else
24583 for (i = 0; i < 8; i++)
24584 if (save_reg_p (CR0_REGNO + i))
24585 {
24586 rtx insn = emit_insn (gen_movsi_to_cr_one
24587 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24588
24589 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24590 CR field separately, attached to the insn that in fact
24591 restores this particular CR field. */
24592 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
24593 {
24594 add_reg_note (insn, REG_CFA_RESTORE,
24595 gen_rtx_REG (SImode, CR0_REGNO + i));
24596
24597 RTX_FRAME_RELATED_P (insn) = 1;
24598 }
24599 }
24600
24601 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
24602 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
24603 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24604 {
24605 rtx_insn *insn = get_last_insn ();
24606 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
24607
24608 add_reg_note (insn, REG_CFA_RESTORE, cr);
24609 RTX_FRAME_RELATED_P (insn) = 1;
24610 }
24611 }
24612
24613 /* Like cr, the move to lr instruction can be scheduled after the
24614 stack deallocation, but unlike cr, its stack frame save is still
24615 valid. So we only need to emit the cfa_restore on the correct
24616 instruction. */
24617
24618 static void
24619 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
24620 {
24621 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
24622 rtx reg = gen_rtx_REG (Pmode, regno);
24623
24624 emit_move_insn (reg, mem);
24625 }
24626
24627 static void
24628 restore_saved_lr (int regno, bool exit_func)
24629 {
24630 rtx reg = gen_rtx_REG (Pmode, regno);
24631 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
24632 rtx_insn *insn = emit_move_insn (lr, reg);
24633
24634 if (!exit_func && flag_shrink_wrap)
24635 {
24636 add_reg_note (insn, REG_CFA_RESTORE, lr);
24637 RTX_FRAME_RELATED_P (insn) = 1;
24638 }
24639 }
24640
24641 static rtx
24642 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
24643 {
24644 if (DEFAULT_ABI == ABI_ELFv2)
24645 {
24646 int i;
24647 for (i = 0; i < 8; i++)
24648 if (save_reg_p (CR0_REGNO + i))
24649 {
24650 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
24651 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
24652 cfa_restores);
24653 }
24654 }
24655 else if (info->cr_save_p)
24656 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24657 gen_rtx_REG (SImode, CR2_REGNO),
24658 cfa_restores);
24659
24660 if (info->lr_save_p)
24661 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24662 gen_rtx_REG (Pmode, LR_REGNO),
24663 cfa_restores);
24664 return cfa_restores;
24665 }
24666
24667 /* Return true if OFFSET from stack pointer can be clobbered by signals.
24668 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
24669 below stack pointer not cloberred by signals. */
24670
24671 static inline bool
24672 offset_below_red_zone_p (HOST_WIDE_INT offset)
24673 {
24674 return offset < (DEFAULT_ABI == ABI_V4
24675 ? 0
24676 : TARGET_32BIT ? -220 : -288);
24677 }
24678
24679 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
24680
24681 static void
24682 emit_cfa_restores (rtx cfa_restores)
24683 {
24684 rtx_insn *insn = get_last_insn ();
24685 rtx *loc = &REG_NOTES (insn);
24686
24687 while (*loc)
24688 loc = &XEXP (*loc, 1);
24689 *loc = cfa_restores;
24690 RTX_FRAME_RELATED_P (insn) = 1;
24691 }
24692
24693 /* Emit function epilogue as insns. */
24694
24695 void
24696 rs6000_emit_epilogue (int sibcall)
24697 {
24698 rs6000_stack_t *info;
24699 int restoring_GPRs_inline;
24700 int restoring_FPRs_inline;
24701 int using_load_multiple;
24702 int using_mtcr_multiple;
24703 int use_backchain_to_restore_sp;
24704 int restore_lr;
24705 int strategy;
24706 HOST_WIDE_INT frame_off = 0;
24707 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
24708 rtx frame_reg_rtx = sp_reg_rtx;
24709 rtx cfa_restores = NULL_RTX;
24710 rtx insn;
24711 rtx cr_save_reg = NULL_RTX;
24712 machine_mode reg_mode = Pmode;
24713 int reg_size = TARGET_32BIT ? 4 : 8;
24714 int i;
24715 bool exit_func;
24716 unsigned ptr_regno;
24717
24718 info = rs6000_stack_info ();
24719
24720 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24721 {
24722 reg_mode = V2SImode;
24723 reg_size = 8;
24724 }
24725
24726 strategy = info->savres_strategy;
24727 using_load_multiple = strategy & SAVRES_MULTIPLE;
24728 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
24729 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
24730 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
24731 || rs6000_cpu == PROCESSOR_PPC603
24732 || rs6000_cpu == PROCESSOR_PPC750
24733 || optimize_size);
24734 /* Restore via the backchain when we have a large frame, since this
24735 is more efficient than an addis, addi pair. The second condition
24736 here will not trigger at the moment; We don't actually need a
24737 frame pointer for alloca, but the generic parts of the compiler
24738 give us one anyway. */
24739 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
24740 || (cfun->calls_alloca
24741 && !frame_pointer_needed));
24742 restore_lr = (info->lr_save_p
24743 && (restoring_FPRs_inline
24744 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
24745 && (restoring_GPRs_inline
24746 || info->first_fp_reg_save < 64));
24747
24748 if (WORLD_SAVE_P (info))
24749 {
24750 int i, j;
24751 char rname[30];
24752 const char *alloc_rname;
24753 rtvec p;
24754
24755 /* eh_rest_world_r10 will return to the location saved in the LR
24756 stack slot (which is not likely to be our caller.)
24757 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
24758 rest_world is similar, except any R10 parameter is ignored.
24759 The exception-handling stuff that was here in 2.95 is no
24760 longer necessary. */
24761
24762 p = rtvec_alloc (9
24763 + 1
24764 + 32 - info->first_gp_reg_save
24765 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
24766 + 63 + 1 - info->first_fp_reg_save);
24767
24768 strcpy (rname, ((crtl->calls_eh_return) ?
24769 "*eh_rest_world_r10" : "*rest_world"));
24770 alloc_rname = ggc_strdup (rname);
24771
24772 j = 0;
24773 RTVEC_ELT (p, j++) = ret_rtx;
24774 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
24775 gen_rtx_REG (Pmode,
24776 LR_REGNO));
24777 RTVEC_ELT (p, j++)
24778 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
24779 /* The instruction pattern requires a clobber here;
24780 it is shared with the restVEC helper. */
24781 RTVEC_ELT (p, j++)
24782 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
24783
24784 {
24785 /* CR register traditionally saved as CR2. */
24786 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
24787 RTVEC_ELT (p, j++)
24788 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
24789 if (flag_shrink_wrap)
24790 {
24791 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24792 gen_rtx_REG (Pmode, LR_REGNO),
24793 cfa_restores);
24794 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24795 }
24796 }
24797
24798 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24799 {
24800 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24801 RTVEC_ELT (p, j++)
24802 = gen_frame_load (reg,
24803 frame_reg_rtx, info->gp_save_offset + reg_size * i);
24804 if (flag_shrink_wrap)
24805 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24806 }
24807 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
24808 {
24809 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
24810 RTVEC_ELT (p, j++)
24811 = gen_frame_load (reg,
24812 frame_reg_rtx, info->altivec_save_offset + 16 * i);
24813 if (flag_shrink_wrap)
24814 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24815 }
24816 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
24817 {
24818 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24819 ? DFmode : SFmode),
24820 info->first_fp_reg_save + i);
24821 RTVEC_ELT (p, j++)
24822 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
24823 if (flag_shrink_wrap)
24824 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24825 }
24826 RTVEC_ELT (p, j++)
24827 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
24828 RTVEC_ELT (p, j++)
24829 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
24830 RTVEC_ELT (p, j++)
24831 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
24832 RTVEC_ELT (p, j++)
24833 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
24834 RTVEC_ELT (p, j++)
24835 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
24836 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24837
24838 if (flag_shrink_wrap)
24839 {
24840 REG_NOTES (insn) = cfa_restores;
24841 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24842 RTX_FRAME_RELATED_P (insn) = 1;
24843 }
24844 return;
24845 }
24846
24847 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
24848 if (info->push_p)
24849 frame_off = info->total_size;
24850
24851 /* Restore AltiVec registers if we must do so before adjusting the
24852 stack. */
24853 if (TARGET_ALTIVEC_ABI
24854 && info->altivec_size != 0
24855 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24856 || (DEFAULT_ABI != ABI_V4
24857 && offset_below_red_zone_p (info->altivec_save_offset))))
24858 {
24859 int i;
24860 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24861
24862 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
24863 if (use_backchain_to_restore_sp)
24864 {
24865 int frame_regno = 11;
24866
24867 if ((strategy & REST_INLINE_VRS) == 0)
24868 {
24869 /* Of r11 and r12, select the one not clobbered by an
24870 out-of-line restore function for the frame register. */
24871 frame_regno = 11 + 12 - scratch_regno;
24872 }
24873 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
24874 emit_move_insn (frame_reg_rtx,
24875 gen_rtx_MEM (Pmode, sp_reg_rtx));
24876 frame_off = 0;
24877 }
24878 else if (frame_pointer_needed)
24879 frame_reg_rtx = hard_frame_pointer_rtx;
24880
24881 if ((strategy & REST_INLINE_VRS) == 0)
24882 {
24883 int end_save = info->altivec_save_offset + info->altivec_size;
24884 int ptr_off;
24885 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24886 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24887
24888 if (end_save + frame_off != 0)
24889 {
24890 rtx offset = GEN_INT (end_save + frame_off);
24891
24892 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24893 }
24894 else
24895 emit_move_insn (ptr_reg, frame_reg_rtx);
24896
24897 ptr_off = -end_save;
24898 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24899 info->altivec_save_offset + ptr_off,
24900 0, V4SImode, SAVRES_VR);
24901 }
24902 else
24903 {
24904 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24905 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24906 {
24907 rtx addr, areg, mem, reg;
24908
24909 areg = gen_rtx_REG (Pmode, 0);
24910 emit_move_insn
24911 (areg, GEN_INT (info->altivec_save_offset
24912 + frame_off
24913 + 16 * (i - info->first_altivec_reg_save)));
24914
24915 /* AltiVec addressing mode is [reg+reg]. */
24916 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24917 mem = gen_frame_mem (V4SImode, addr);
24918
24919 reg = gen_rtx_REG (V4SImode, i);
24920 /* Rather than emitting a generic move, force use of the
24921 lvx instruction, which we always want. In particular
24922 we don't want lxvd2x/xxpermdi for little endian. */
24923 (void) emit_insn (gen_altivec_lvx_v4si_internal (reg, mem));
24924 }
24925 }
24926
24927 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24928 if (((strategy & REST_INLINE_VRS) == 0
24929 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24930 && (flag_shrink_wrap
24931 || (offset_below_red_zone_p
24932 (info->altivec_save_offset
24933 + 16 * (i - info->first_altivec_reg_save)))))
24934 {
24935 rtx reg = gen_rtx_REG (V4SImode, i);
24936 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24937 }
24938 }
24939
24940 /* Restore VRSAVE if we must do so before adjusting the stack. */
24941 if (TARGET_ALTIVEC
24942 && TARGET_ALTIVEC_VRSAVE
24943 && info->vrsave_mask != 0
24944 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24945 || (DEFAULT_ABI != ABI_V4
24946 && offset_below_red_zone_p (info->vrsave_save_offset))))
24947 {
24948 rtx reg;
24949
24950 if (frame_reg_rtx == sp_reg_rtx)
24951 {
24952 if (use_backchain_to_restore_sp)
24953 {
24954 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24955 emit_move_insn (frame_reg_rtx,
24956 gen_rtx_MEM (Pmode, sp_reg_rtx));
24957 frame_off = 0;
24958 }
24959 else if (frame_pointer_needed)
24960 frame_reg_rtx = hard_frame_pointer_rtx;
24961 }
24962
24963 reg = gen_rtx_REG (SImode, 12);
24964 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24965 info->vrsave_save_offset + frame_off));
24966
24967 emit_insn (generate_set_vrsave (reg, info, 1));
24968 }
24969
24970 insn = NULL_RTX;
24971 /* If we have a large stack frame, restore the old stack pointer
24972 using the backchain. */
24973 if (use_backchain_to_restore_sp)
24974 {
24975 if (frame_reg_rtx == sp_reg_rtx)
24976 {
24977 /* Under V.4, don't reset the stack pointer until after we're done
24978 loading the saved registers. */
24979 if (DEFAULT_ABI == ABI_V4)
24980 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24981
24982 insn = emit_move_insn (frame_reg_rtx,
24983 gen_rtx_MEM (Pmode, sp_reg_rtx));
24984 frame_off = 0;
24985 }
24986 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24987 && DEFAULT_ABI == ABI_V4)
24988 /* frame_reg_rtx has been set up by the altivec restore. */
24989 ;
24990 else
24991 {
24992 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
24993 frame_reg_rtx = sp_reg_rtx;
24994 }
24995 }
24996 /* If we have a frame pointer, we can restore the old stack pointer
24997 from it. */
24998 else if (frame_pointer_needed)
24999 {
25000 frame_reg_rtx = sp_reg_rtx;
25001 if (DEFAULT_ABI == ABI_V4)
25002 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
25003 /* Prevent reordering memory accesses against stack pointer restore. */
25004 else if (cfun->calls_alloca
25005 || offset_below_red_zone_p (-info->total_size))
25006 rs6000_emit_stack_tie (frame_reg_rtx, true);
25007
25008 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
25009 GEN_INT (info->total_size)));
25010 frame_off = 0;
25011 }
25012 else if (info->push_p
25013 && DEFAULT_ABI != ABI_V4
25014 && !crtl->calls_eh_return)
25015 {
25016 /* Prevent reordering memory accesses against stack pointer restore. */
25017 if (cfun->calls_alloca
25018 || offset_below_red_zone_p (-info->total_size))
25019 rs6000_emit_stack_tie (frame_reg_rtx, false);
25020 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
25021 GEN_INT (info->total_size)));
25022 frame_off = 0;
25023 }
25024 if (insn && frame_reg_rtx == sp_reg_rtx)
25025 {
25026 if (cfa_restores)
25027 {
25028 REG_NOTES (insn) = cfa_restores;
25029 cfa_restores = NULL_RTX;
25030 }
25031 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
25032 RTX_FRAME_RELATED_P (insn) = 1;
25033 }
25034
25035 /* Restore AltiVec registers if we have not done so already. */
25036 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
25037 && TARGET_ALTIVEC_ABI
25038 && info->altivec_size != 0
25039 && (DEFAULT_ABI == ABI_V4
25040 || !offset_below_red_zone_p (info->altivec_save_offset)))
25041 {
25042 int i;
25043
25044 if ((strategy & REST_INLINE_VRS) == 0)
25045 {
25046 int end_save = info->altivec_save_offset + info->altivec_size;
25047 int ptr_off;
25048 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
25049 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
25050 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
25051
25052 if (end_save + frame_off != 0)
25053 {
25054 rtx offset = GEN_INT (end_save + frame_off);
25055
25056 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
25057 }
25058 else
25059 emit_move_insn (ptr_reg, frame_reg_rtx);
25060
25061 ptr_off = -end_save;
25062 insn = rs6000_emit_savres_rtx (info, scratch_reg,
25063 info->altivec_save_offset + ptr_off,
25064 0, V4SImode, SAVRES_VR);
25065 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
25066 {
25067 /* Frame reg was clobbered by out-of-line save. Restore it
25068 from ptr_reg, and if we are calling out-of-line gpr or
25069 fpr restore set up the correct pointer and offset. */
25070 unsigned newptr_regno = 1;
25071 if (!restoring_GPRs_inline)
25072 {
25073 bool lr = info->gp_save_offset + info->gp_size == 0;
25074 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
25075 newptr_regno = ptr_regno_for_savres (sel);
25076 end_save = info->gp_save_offset + info->gp_size;
25077 }
25078 else if (!restoring_FPRs_inline)
25079 {
25080 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
25081 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
25082 newptr_regno = ptr_regno_for_savres (sel);
25083 end_save = info->fp_save_offset + info->fp_size;
25084 }
25085
25086 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
25087 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
25088
25089 if (end_save + ptr_off != 0)
25090 {
25091 rtx offset = GEN_INT (end_save + ptr_off);
25092
25093 frame_off = -end_save;
25094 if (TARGET_32BIT)
25095 emit_insn (gen_addsi3_carry (frame_reg_rtx,
25096 ptr_reg, offset));
25097 else
25098 emit_insn (gen_adddi3_carry (frame_reg_rtx,
25099 ptr_reg, offset));
25100 }
25101 else
25102 {
25103 frame_off = ptr_off;
25104 emit_move_insn (frame_reg_rtx, ptr_reg);
25105 }
25106 }
25107 }
25108 else
25109 {
25110 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
25111 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25112 {
25113 rtx addr, areg, mem, reg;
25114
25115 areg = gen_rtx_REG (Pmode, 0);
25116 emit_move_insn
25117 (areg, GEN_INT (info->altivec_save_offset
25118 + frame_off
25119 + 16 * (i - info->first_altivec_reg_save)));
25120
25121 /* AltiVec addressing mode is [reg+reg]. */
25122 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
25123 mem = gen_frame_mem (V4SImode, addr);
25124
25125 reg = gen_rtx_REG (V4SImode, i);
25126 /* Rather than emitting a generic move, force use of the
25127 lvx instruction, which we always want. In particular
25128 we don't want lxvd2x/xxpermdi for little endian. */
25129 (void) emit_insn (gen_altivec_lvx_v4si_internal (reg, mem));
25130 }
25131 }
25132
25133 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
25134 if (((strategy & REST_INLINE_VRS) == 0
25135 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
25136 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
25137 {
25138 rtx reg = gen_rtx_REG (V4SImode, i);
25139 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
25140 }
25141 }
25142
25143 /* Restore VRSAVE if we have not done so already. */
25144 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
25145 && TARGET_ALTIVEC
25146 && TARGET_ALTIVEC_VRSAVE
25147 && info->vrsave_mask != 0
25148 && (DEFAULT_ABI == ABI_V4
25149 || !offset_below_red_zone_p (info->vrsave_save_offset)))
25150 {
25151 rtx reg;
25152
25153 reg = gen_rtx_REG (SImode, 12);
25154 emit_insn (gen_frame_load (reg, frame_reg_rtx,
25155 info->vrsave_save_offset + frame_off));
25156
25157 emit_insn (generate_set_vrsave (reg, info, 1));
25158 }
25159
25160 /* If we exit by an out-of-line restore function on ABI_V4 then that
25161 function will deallocate the stack, so we don't need to worry
25162 about the unwinder restoring cr from an invalid stack frame
25163 location. */
25164 exit_func = (!restoring_FPRs_inline
25165 || (!restoring_GPRs_inline
25166 && info->first_fp_reg_save == 64));
25167
25168 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
25169 *separate* slots if the routine calls __builtin_eh_return, so
25170 that they can be independently restored by the unwinder. */
25171 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
25172 {
25173 int i, cr_off = info->ehcr_offset;
25174
25175 for (i = 0; i < 8; i++)
25176 if (!call_used_regs[CR0_REGNO + i])
25177 {
25178 rtx reg = gen_rtx_REG (SImode, 0);
25179 emit_insn (gen_frame_load (reg, frame_reg_rtx,
25180 cr_off + frame_off));
25181
25182 insn = emit_insn (gen_movsi_to_cr_one
25183 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
25184
25185 if (!exit_func && flag_shrink_wrap)
25186 {
25187 add_reg_note (insn, REG_CFA_RESTORE,
25188 gen_rtx_REG (SImode, CR0_REGNO + i));
25189
25190 RTX_FRAME_RELATED_P (insn) = 1;
25191 }
25192
25193 cr_off += reg_size;
25194 }
25195 }
25196
25197 /* Get the old lr if we saved it. If we are restoring registers
25198 out-of-line, then the out-of-line routines can do this for us. */
25199 if (restore_lr && restoring_GPRs_inline)
25200 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
25201
25202 /* Get the old cr if we saved it. */
25203 if (info->cr_save_p)
25204 {
25205 unsigned cr_save_regno = 12;
25206
25207 if (!restoring_GPRs_inline)
25208 {
25209 /* Ensure we don't use the register used by the out-of-line
25210 gpr register restore below. */
25211 bool lr = info->gp_save_offset + info->gp_size == 0;
25212 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
25213 int gpr_ptr_regno = ptr_regno_for_savres (sel);
25214
25215 if (gpr_ptr_regno == 12)
25216 cr_save_regno = 11;
25217 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
25218 }
25219 else if (REGNO (frame_reg_rtx) == 12)
25220 cr_save_regno = 11;
25221
25222 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
25223 info->cr_save_offset + frame_off,
25224 exit_func);
25225 }
25226
25227 /* Set LR here to try to overlap restores below. */
25228 if (restore_lr && restoring_GPRs_inline)
25229 restore_saved_lr (0, exit_func);
25230
25231 /* Load exception handler data registers, if needed. */
25232 if (crtl->calls_eh_return)
25233 {
25234 unsigned int i, regno;
25235
25236 if (TARGET_AIX)
25237 {
25238 rtx reg = gen_rtx_REG (reg_mode, 2);
25239 emit_insn (gen_frame_load (reg, frame_reg_rtx,
25240 frame_off + RS6000_TOC_SAVE_SLOT));
25241 }
25242
25243 for (i = 0; ; ++i)
25244 {
25245 rtx mem;
25246
25247 regno = EH_RETURN_DATA_REGNO (i);
25248 if (regno == INVALID_REGNUM)
25249 break;
25250
25251 /* Note: possible use of r0 here to address SPE regs. */
25252 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
25253 info->ehrd_offset + frame_off
25254 + reg_size * (int) i);
25255
25256 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
25257 }
25258 }
25259
25260 /* Restore GPRs. This is done as a PARALLEL if we are using
25261 the load-multiple instructions. */
25262 if (TARGET_SPE_ABI
25263 && info->spe_64bit_regs_used
25264 && info->first_gp_reg_save != 32)
25265 {
25266 /* Determine whether we can address all of the registers that need
25267 to be saved with an offset from frame_reg_rtx that fits in
25268 the small const field for SPE memory instructions. */
25269 int spe_regs_addressable
25270 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
25271 + reg_size * (32 - info->first_gp_reg_save - 1))
25272 && restoring_GPRs_inline);
25273
25274 if (!spe_regs_addressable)
25275 {
25276 int ool_adjust = 0;
25277 rtx old_frame_reg_rtx = frame_reg_rtx;
25278 /* Make r11 point to the start of the SPE save area. We worried about
25279 not clobbering it when we were saving registers in the prologue.
25280 There's no need to worry here because the static chain is passed
25281 anew to every function. */
25282
25283 if (!restoring_GPRs_inline)
25284 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
25285 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
25286 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
25287 GEN_INT (info->spe_gp_save_offset
25288 + frame_off
25289 - ool_adjust)));
25290 /* Keep the invariant that frame_reg_rtx + frame_off points
25291 at the top of the stack frame. */
25292 frame_off = -info->spe_gp_save_offset + ool_adjust;
25293 }
25294
25295 if (restoring_GPRs_inline)
25296 {
25297 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
25298
25299 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
25300 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
25301 {
25302 rtx offset, addr, mem, reg;
25303
25304 /* We're doing all this to ensure that the immediate offset
25305 fits into the immediate field of 'evldd'. */
25306 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
25307
25308 offset = GEN_INT (spe_offset + reg_size * i);
25309 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
25310 mem = gen_rtx_MEM (V2SImode, addr);
25311 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
25312
25313 emit_move_insn (reg, mem);
25314 }
25315 }
25316 else
25317 rs6000_emit_savres_rtx (info, frame_reg_rtx,
25318 info->spe_gp_save_offset + frame_off,
25319 info->lr_save_offset + frame_off,
25320 reg_mode,
25321 SAVRES_GPR | SAVRES_LR);
25322 }
25323 else if (!restoring_GPRs_inline)
25324 {
25325 /* We are jumping to an out-of-line function. */
25326 rtx ptr_reg;
25327 int end_save = info->gp_save_offset + info->gp_size;
25328 bool can_use_exit = end_save == 0;
25329 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
25330 int ptr_off;
25331
25332 /* Emit stack reset code if we need it. */
25333 ptr_regno = ptr_regno_for_savres (sel);
25334 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
25335 if (can_use_exit)
25336 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
25337 else if (end_save + frame_off != 0)
25338 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
25339 GEN_INT (end_save + frame_off)));
25340 else if (REGNO (frame_reg_rtx) != ptr_regno)
25341 emit_move_insn (ptr_reg, frame_reg_rtx);
25342 if (REGNO (frame_reg_rtx) == ptr_regno)
25343 frame_off = -end_save;
25344
25345 if (can_use_exit && info->cr_save_p)
25346 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
25347
25348 ptr_off = -end_save;
25349 rs6000_emit_savres_rtx (info, ptr_reg,
25350 info->gp_save_offset + ptr_off,
25351 info->lr_save_offset + ptr_off,
25352 reg_mode, sel);
25353 }
25354 else if (using_load_multiple)
25355 {
25356 rtvec p;
25357 p = rtvec_alloc (32 - info->first_gp_reg_save);
25358 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
25359 RTVEC_ELT (p, i)
25360 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
25361 frame_reg_rtx,
25362 info->gp_save_offset + frame_off + reg_size * i);
25363 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
25364 }
25365 else
25366 {
25367 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
25368 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
25369 emit_insn (gen_frame_load
25370 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
25371 frame_reg_rtx,
25372 info->gp_save_offset + frame_off + reg_size * i));
25373 }
25374
25375 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
25376 {
25377 /* If the frame pointer was used then we can't delay emitting
25378 a REG_CFA_DEF_CFA note. This must happen on the insn that
25379 restores the frame pointer, r31. We may have already emitted
25380 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
25381 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
25382 be harmless if emitted. */
25383 if (frame_pointer_needed)
25384 {
25385 insn = get_last_insn ();
25386 add_reg_note (insn, REG_CFA_DEF_CFA,
25387 plus_constant (Pmode, frame_reg_rtx, frame_off));
25388 RTX_FRAME_RELATED_P (insn) = 1;
25389 }
25390
25391 /* Set up cfa_restores. We always need these when
25392 shrink-wrapping. If not shrink-wrapping then we only need
25393 the cfa_restore when the stack location is no longer valid.
25394 The cfa_restores must be emitted on or before the insn that
25395 invalidates the stack, and of course must not be emitted
25396 before the insn that actually does the restore. The latter
25397 is why it is a bad idea to emit the cfa_restores as a group
25398 on the last instruction here that actually does a restore:
25399 That insn may be reordered with respect to others doing
25400 restores. */
25401 if (flag_shrink_wrap
25402 && !restoring_GPRs_inline
25403 && info->first_fp_reg_save == 64)
25404 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
25405
25406 for (i = info->first_gp_reg_save; i < 32; i++)
25407 if (!restoring_GPRs_inline
25408 || using_load_multiple
25409 || rs6000_reg_live_or_pic_offset_p (i))
25410 {
25411 rtx reg = gen_rtx_REG (reg_mode, i);
25412
25413 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
25414 }
25415 }
25416
25417 if (!restoring_GPRs_inline
25418 && info->first_fp_reg_save == 64)
25419 {
25420 /* We are jumping to an out-of-line function. */
25421 if (cfa_restores)
25422 emit_cfa_restores (cfa_restores);
25423 return;
25424 }
25425
25426 if (restore_lr && !restoring_GPRs_inline)
25427 {
25428 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
25429 restore_saved_lr (0, exit_func);
25430 }
25431
25432 /* Restore fpr's if we need to do it without calling a function. */
25433 if (restoring_FPRs_inline)
25434 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
25435 if (save_reg_p (info->first_fp_reg_save + i))
25436 {
25437 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
25438 ? DFmode : SFmode),
25439 info->first_fp_reg_save + i);
25440 emit_insn (gen_frame_load (reg, frame_reg_rtx,
25441 info->fp_save_offset + frame_off + 8 * i));
25442 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
25443 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
25444 }
25445
25446 /* If we saved cr, restore it here. Just those that were used. */
25447 if (info->cr_save_p)
25448 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
25449
25450 /* If this is V.4, unwind the stack pointer after all of the loads
25451 have been done, or set up r11 if we are restoring fp out of line. */
25452 ptr_regno = 1;
25453 if (!restoring_FPRs_inline)
25454 {
25455 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
25456 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
25457 ptr_regno = ptr_regno_for_savres (sel);
25458 }
25459
25460 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
25461 if (REGNO (frame_reg_rtx) == ptr_regno)
25462 frame_off = 0;
25463
25464 if (insn && restoring_FPRs_inline)
25465 {
25466 if (cfa_restores)
25467 {
25468 REG_NOTES (insn) = cfa_restores;
25469 cfa_restores = NULL_RTX;
25470 }
25471 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
25472 RTX_FRAME_RELATED_P (insn) = 1;
25473 }
25474
25475 if (crtl->calls_eh_return)
25476 {
25477 rtx sa = EH_RETURN_STACKADJ_RTX;
25478 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
25479 }
25480
25481 if (!sibcall)
25482 {
25483 rtvec p;
25484 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
25485 if (! restoring_FPRs_inline)
25486 {
25487 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
25488 RTVEC_ELT (p, 0) = ret_rtx;
25489 }
25490 else
25491 {
25492 if (cfa_restores)
25493 {
25494 /* We can't hang the cfa_restores off a simple return,
25495 since the shrink-wrap code sometimes uses an existing
25496 return. This means there might be a path from
25497 pre-prologue code to this return, and dwarf2cfi code
25498 wants the eh_frame unwinder state to be the same on
25499 all paths to any point. So we need to emit the
25500 cfa_restores before the return. For -m64 we really
25501 don't need epilogue cfa_restores at all, except for
25502 this irritating dwarf2cfi with shrink-wrap
25503 requirement; The stack red-zone means eh_frame info
25504 from the prologue telling the unwinder to restore
25505 from the stack is perfectly good right to the end of
25506 the function. */
25507 emit_insn (gen_blockage ());
25508 emit_cfa_restores (cfa_restores);
25509 cfa_restores = NULL_RTX;
25510 }
25511 p = rtvec_alloc (2);
25512 RTVEC_ELT (p, 0) = simple_return_rtx;
25513 }
25514
25515 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
25516 ? gen_rtx_USE (VOIDmode,
25517 gen_rtx_REG (Pmode, LR_REGNO))
25518 : gen_rtx_CLOBBER (VOIDmode,
25519 gen_rtx_REG (Pmode, LR_REGNO)));
25520
25521 /* If we have to restore more than two FP registers, branch to the
25522 restore function. It will return to our caller. */
25523 if (! restoring_FPRs_inline)
25524 {
25525 int i;
25526 int reg;
25527 rtx sym;
25528
25529 if (flag_shrink_wrap)
25530 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
25531
25532 sym = rs6000_savres_routine_sym (info,
25533 SAVRES_FPR | (lr ? SAVRES_LR : 0));
25534 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
25535 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
25536 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
25537
25538 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
25539 {
25540 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
25541
25542 RTVEC_ELT (p, i + 4)
25543 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
25544 if (flag_shrink_wrap)
25545 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
25546 cfa_restores);
25547 }
25548 }
25549
25550 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
25551 }
25552
25553 if (cfa_restores)
25554 {
25555 if (sibcall)
25556 /* Ensure the cfa_restores are hung off an insn that won't
25557 be reordered above other restores. */
25558 emit_insn (gen_blockage ());
25559
25560 emit_cfa_restores (cfa_restores);
25561 }
25562 }
25563
25564 /* Write function epilogue. */
25565
25566 static void
25567 rs6000_output_function_epilogue (FILE *file,
25568 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
25569 {
25570 #if TARGET_MACHO
25571 macho_branch_islands ();
25572 /* Mach-O doesn't support labels at the end of objects, so if
25573 it looks like we might want one, insert a NOP. */
25574 {
25575 rtx_insn *insn = get_last_insn ();
25576 rtx_insn *deleted_debug_label = NULL;
25577 while (insn
25578 && NOTE_P (insn)
25579 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
25580 {
25581 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
25582 notes only, instead set their CODE_LABEL_NUMBER to -1,
25583 otherwise there would be code generation differences
25584 in between -g and -g0. */
25585 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25586 deleted_debug_label = insn;
25587 insn = PREV_INSN (insn);
25588 }
25589 if (insn
25590 && (LABEL_P (insn)
25591 || (NOTE_P (insn)
25592 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
25593 fputs ("\tnop\n", file);
25594 else if (deleted_debug_label)
25595 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
25596 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25597 CODE_LABEL_NUMBER (insn) = -1;
25598 }
25599 #endif
25600
25601 /* Output a traceback table here. See /usr/include/sys/debug.h for info
25602 on its format.
25603
25604 We don't output a traceback table if -finhibit-size-directive was
25605 used. The documentation for -finhibit-size-directive reads
25606 ``don't output a @code{.size} assembler directive, or anything
25607 else that would cause trouble if the function is split in the
25608 middle, and the two halves are placed at locations far apart in
25609 memory.'' The traceback table has this property, since it
25610 includes the offset from the start of the function to the
25611 traceback table itself.
25612
25613 System V.4 Powerpc's (and the embedded ABI derived from it) use a
25614 different traceback table. */
25615 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25616 && ! flag_inhibit_size_directive
25617 && rs6000_traceback != traceback_none && !cfun->is_thunk)
25618 {
25619 const char *fname = NULL;
25620 const char *language_string = lang_hooks.name;
25621 int fixed_parms = 0, float_parms = 0, parm_info = 0;
25622 int i;
25623 int optional_tbtab;
25624 rs6000_stack_t *info = rs6000_stack_info ();
25625
25626 if (rs6000_traceback == traceback_full)
25627 optional_tbtab = 1;
25628 else if (rs6000_traceback == traceback_part)
25629 optional_tbtab = 0;
25630 else
25631 optional_tbtab = !optimize_size && !TARGET_ELF;
25632
25633 if (optional_tbtab)
25634 {
25635 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
25636 while (*fname == '.') /* V.4 encodes . in the name */
25637 fname++;
25638
25639 /* Need label immediately before tbtab, so we can compute
25640 its offset from the function start. */
25641 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25642 ASM_OUTPUT_LABEL (file, fname);
25643 }
25644
25645 /* The .tbtab pseudo-op can only be used for the first eight
25646 expressions, since it can't handle the possibly variable
25647 length fields that follow. However, if you omit the optional
25648 fields, the assembler outputs zeros for all optional fields
25649 anyways, giving each variable length field is minimum length
25650 (as defined in sys/debug.h). Thus we can not use the .tbtab
25651 pseudo-op at all. */
25652
25653 /* An all-zero word flags the start of the tbtab, for debuggers
25654 that have to find it by searching forward from the entry
25655 point or from the current pc. */
25656 fputs ("\t.long 0\n", file);
25657
25658 /* Tbtab format type. Use format type 0. */
25659 fputs ("\t.byte 0,", file);
25660
25661 /* Language type. Unfortunately, there does not seem to be any
25662 official way to discover the language being compiled, so we
25663 use language_string.
25664 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
25665 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
25666 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
25667 either, so for now use 0. */
25668 if (lang_GNU_C ()
25669 || ! strcmp (language_string, "GNU GIMPLE")
25670 || ! strcmp (language_string, "GNU Go")
25671 || ! strcmp (language_string, "libgccjit"))
25672 i = 0;
25673 else if (! strcmp (language_string, "GNU F77")
25674 || lang_GNU_Fortran ())
25675 i = 1;
25676 else if (! strcmp (language_string, "GNU Pascal"))
25677 i = 2;
25678 else if (! strcmp (language_string, "GNU Ada"))
25679 i = 3;
25680 else if (lang_GNU_CXX ()
25681 || ! strcmp (language_string, "GNU Objective-C++"))
25682 i = 9;
25683 else if (! strcmp (language_string, "GNU Java"))
25684 i = 13;
25685 else if (! strcmp (language_string, "GNU Objective-C"))
25686 i = 14;
25687 else
25688 gcc_unreachable ();
25689 fprintf (file, "%d,", i);
25690
25691 /* 8 single bit fields: global linkage (not set for C extern linkage,
25692 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
25693 from start of procedure stored in tbtab, internal function, function
25694 has controlled storage, function has no toc, function uses fp,
25695 function logs/aborts fp operations. */
25696 /* Assume that fp operations are used if any fp reg must be saved. */
25697 fprintf (file, "%d,",
25698 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
25699
25700 /* 6 bitfields: function is interrupt handler, name present in
25701 proc table, function calls alloca, on condition directives
25702 (controls stack walks, 3 bits), saves condition reg, saves
25703 link reg. */
25704 /* The `function calls alloca' bit seems to be set whenever reg 31 is
25705 set up as a frame pointer, even when there is no alloca call. */
25706 fprintf (file, "%d,",
25707 ((optional_tbtab << 6)
25708 | ((optional_tbtab & frame_pointer_needed) << 5)
25709 | (info->cr_save_p << 1)
25710 | (info->lr_save_p)));
25711
25712 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
25713 (6 bits). */
25714 fprintf (file, "%d,",
25715 (info->push_p << 7) | (64 - info->first_fp_reg_save));
25716
25717 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
25718 fprintf (file, "%d,", (32 - first_reg_to_save ()));
25719
25720 if (optional_tbtab)
25721 {
25722 /* Compute the parameter info from the function decl argument
25723 list. */
25724 tree decl;
25725 int next_parm_info_bit = 31;
25726
25727 for (decl = DECL_ARGUMENTS (current_function_decl);
25728 decl; decl = DECL_CHAIN (decl))
25729 {
25730 rtx parameter = DECL_INCOMING_RTL (decl);
25731 machine_mode mode = GET_MODE (parameter);
25732
25733 if (GET_CODE (parameter) == REG)
25734 {
25735 if (SCALAR_FLOAT_MODE_P (mode))
25736 {
25737 int bits;
25738
25739 float_parms++;
25740
25741 switch (mode)
25742 {
25743 case SFmode:
25744 case SDmode:
25745 bits = 0x2;
25746 break;
25747
25748 case DFmode:
25749 case DDmode:
25750 case TFmode:
25751 case TDmode:
25752 bits = 0x3;
25753 break;
25754
25755 default:
25756 gcc_unreachable ();
25757 }
25758
25759 /* If only one bit will fit, don't or in this entry. */
25760 if (next_parm_info_bit > 0)
25761 parm_info |= (bits << (next_parm_info_bit - 1));
25762 next_parm_info_bit -= 2;
25763 }
25764 else
25765 {
25766 fixed_parms += ((GET_MODE_SIZE (mode)
25767 + (UNITS_PER_WORD - 1))
25768 / UNITS_PER_WORD);
25769 next_parm_info_bit -= 1;
25770 }
25771 }
25772 }
25773 }
25774
25775 /* Number of fixed point parameters. */
25776 /* This is actually the number of words of fixed point parameters; thus
25777 an 8 byte struct counts as 2; and thus the maximum value is 8. */
25778 fprintf (file, "%d,", fixed_parms);
25779
25780 /* 2 bitfields: number of floating point parameters (7 bits), parameters
25781 all on stack. */
25782 /* This is actually the number of fp registers that hold parameters;
25783 and thus the maximum value is 13. */
25784 /* Set parameters on stack bit if parameters are not in their original
25785 registers, regardless of whether they are on the stack? Xlc
25786 seems to set the bit when not optimizing. */
25787 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
25788
25789 if (! optional_tbtab)
25790 return;
25791
25792 /* Optional fields follow. Some are variable length. */
25793
25794 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
25795 11 double float. */
25796 /* There is an entry for each parameter in a register, in the order that
25797 they occur in the parameter list. Any intervening arguments on the
25798 stack are ignored. If the list overflows a long (max possible length
25799 34 bits) then completely leave off all elements that don't fit. */
25800 /* Only emit this long if there was at least one parameter. */
25801 if (fixed_parms || float_parms)
25802 fprintf (file, "\t.long %d\n", parm_info);
25803
25804 /* Offset from start of code to tb table. */
25805 fputs ("\t.long ", file);
25806 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25807 RS6000_OUTPUT_BASENAME (file, fname);
25808 putc ('-', file);
25809 rs6000_output_function_entry (file, fname);
25810 putc ('\n', file);
25811
25812 /* Interrupt handler mask. */
25813 /* Omit this long, since we never set the interrupt handler bit
25814 above. */
25815
25816 /* Number of CTL (controlled storage) anchors. */
25817 /* Omit this long, since the has_ctl bit is never set above. */
25818
25819 /* Displacement into stack of each CTL anchor. */
25820 /* Omit this list of longs, because there are no CTL anchors. */
25821
25822 /* Length of function name. */
25823 if (*fname == '*')
25824 ++fname;
25825 fprintf (file, "\t.short %d\n", (int) strlen (fname));
25826
25827 /* Function name. */
25828 assemble_string (fname, strlen (fname));
25829
25830 /* Register for alloca automatic storage; this is always reg 31.
25831 Only emit this if the alloca bit was set above. */
25832 if (frame_pointer_needed)
25833 fputs ("\t.byte 31\n", file);
25834
25835 fputs ("\t.align 2\n", file);
25836 }
25837 }
25838 \f
25839 /* A C compound statement that outputs the assembler code for a thunk
25840 function, used to implement C++ virtual function calls with
25841 multiple inheritance. The thunk acts as a wrapper around a virtual
25842 function, adjusting the implicit object parameter before handing
25843 control off to the real function.
25844
25845 First, emit code to add the integer DELTA to the location that
25846 contains the incoming first argument. Assume that this argument
25847 contains a pointer, and is the one used to pass the `this' pointer
25848 in C++. This is the incoming argument *before* the function
25849 prologue, e.g. `%o0' on a sparc. The addition must preserve the
25850 values of all other incoming arguments.
25851
25852 After the addition, emit code to jump to FUNCTION, which is a
25853 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
25854 not touch the return address. Hence returning from FUNCTION will
25855 return to whoever called the current `thunk'.
25856
25857 The effect must be as if FUNCTION had been called directly with the
25858 adjusted first argument. This macro is responsible for emitting
25859 all of the code for a thunk function; output_function_prologue()
25860 and output_function_epilogue() are not invoked.
25861
25862 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
25863 been extracted from it.) It might possibly be useful on some
25864 targets, but probably not.
25865
25866 If you do not define this macro, the target-independent code in the
25867 C++ frontend will generate a less efficient heavyweight thunk that
25868 calls FUNCTION instead of jumping to it. The generic approach does
25869 not support varargs. */
25870
25871 static void
25872 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
25873 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
25874 tree function)
25875 {
25876 rtx this_rtx, funexp;
25877 rtx_insn *insn;
25878
25879 reload_completed = 1;
25880 epilogue_completed = 1;
25881
25882 /* Mark the end of the (empty) prologue. */
25883 emit_note (NOTE_INSN_PROLOGUE_END);
25884
25885 /* Find the "this" pointer. If the function returns a structure,
25886 the structure return pointer is in r3. */
25887 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
25888 this_rtx = gen_rtx_REG (Pmode, 4);
25889 else
25890 this_rtx = gen_rtx_REG (Pmode, 3);
25891
25892 /* Apply the constant offset, if required. */
25893 if (delta)
25894 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
25895
25896 /* Apply the offset from the vtable, if required. */
25897 if (vcall_offset)
25898 {
25899 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
25900 rtx tmp = gen_rtx_REG (Pmode, 12);
25901
25902 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
25903 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
25904 {
25905 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
25906 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
25907 }
25908 else
25909 {
25910 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
25911
25912 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
25913 }
25914 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
25915 }
25916
25917 /* Generate a tail call to the target function. */
25918 if (!TREE_USED (function))
25919 {
25920 assemble_external (function);
25921 TREE_USED (function) = 1;
25922 }
25923 funexp = XEXP (DECL_RTL (function), 0);
25924 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
25925
25926 #if TARGET_MACHO
25927 if (MACHOPIC_INDIRECT)
25928 funexp = machopic_indirect_call_target (funexp);
25929 #endif
25930
25931 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
25932 generate sibcall RTL explicitly. */
25933 insn = emit_call_insn (
25934 gen_rtx_PARALLEL (VOIDmode,
25935 gen_rtvec (4,
25936 gen_rtx_CALL (VOIDmode,
25937 funexp, const0_rtx),
25938 gen_rtx_USE (VOIDmode, const0_rtx),
25939 gen_rtx_USE (VOIDmode,
25940 gen_rtx_REG (SImode,
25941 LR_REGNO)),
25942 simple_return_rtx)));
25943 SIBLING_CALL_P (insn) = 1;
25944 emit_barrier ();
25945
25946 /* Ensure we have a global entry point for the thunk. ??? We could
25947 avoid that if the target routine doesn't need a global entry point,
25948 but we do not know whether this is the case at this point. */
25949 if (DEFAULT_ABI == ABI_ELFv2)
25950 cfun->machine->r2_setup_needed = true;
25951
25952 /* Run just enough of rest_of_compilation to get the insns emitted.
25953 There's not really enough bulk here to make other passes such as
25954 instruction scheduling worth while. Note that use_thunk calls
25955 assemble_start_function and assemble_end_function. */
25956 insn = get_insns ();
25957 shorten_branches (insn);
25958 final_start_function (insn, file, 1);
25959 final (insn, file, 1);
25960 final_end_function ();
25961
25962 reload_completed = 0;
25963 epilogue_completed = 0;
25964 }
25965 \f
25966 /* A quick summary of the various types of 'constant-pool tables'
25967 under PowerPC:
25968
25969 Target Flags Name One table per
25970 AIX (none) AIX TOC object file
25971 AIX -mfull-toc AIX TOC object file
25972 AIX -mminimal-toc AIX minimal TOC translation unit
25973 SVR4/EABI (none) SVR4 SDATA object file
25974 SVR4/EABI -fpic SVR4 pic object file
25975 SVR4/EABI -fPIC SVR4 PIC translation unit
25976 SVR4/EABI -mrelocatable EABI TOC function
25977 SVR4/EABI -maix AIX TOC object file
25978 SVR4/EABI -maix -mminimal-toc
25979 AIX minimal TOC translation unit
25980
25981 Name Reg. Set by entries contains:
25982 made by addrs? fp? sum?
25983
25984 AIX TOC 2 crt0 as Y option option
25985 AIX minimal TOC 30 prolog gcc Y Y option
25986 SVR4 SDATA 13 crt0 gcc N Y N
25987 SVR4 pic 30 prolog ld Y not yet N
25988 SVR4 PIC 30 prolog gcc Y option option
25989 EABI TOC 30 prolog gcc Y option option
25990
25991 */
25992
25993 /* Hash functions for the hash table. */
25994
25995 static unsigned
25996 rs6000_hash_constant (rtx k)
25997 {
25998 enum rtx_code code = GET_CODE (k);
25999 machine_mode mode = GET_MODE (k);
26000 unsigned result = (code << 3) ^ mode;
26001 const char *format;
26002 int flen, fidx;
26003
26004 format = GET_RTX_FORMAT (code);
26005 flen = strlen (format);
26006 fidx = 0;
26007
26008 switch (code)
26009 {
26010 case LABEL_REF:
26011 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
26012
26013 case CONST_WIDE_INT:
26014 {
26015 int i;
26016 flen = CONST_WIDE_INT_NUNITS (k);
26017 for (i = 0; i < flen; i++)
26018 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
26019 return result;
26020 }
26021
26022 case CONST_DOUBLE:
26023 if (mode != VOIDmode)
26024 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
26025 flen = 2;
26026 break;
26027
26028 case CODE_LABEL:
26029 fidx = 3;
26030 break;
26031
26032 default:
26033 break;
26034 }
26035
26036 for (; fidx < flen; fidx++)
26037 switch (format[fidx])
26038 {
26039 case 's':
26040 {
26041 unsigned i, len;
26042 const char *str = XSTR (k, fidx);
26043 len = strlen (str);
26044 result = result * 613 + len;
26045 for (i = 0; i < len; i++)
26046 result = result * 613 + (unsigned) str[i];
26047 break;
26048 }
26049 case 'u':
26050 case 'e':
26051 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
26052 break;
26053 case 'i':
26054 case 'n':
26055 result = result * 613 + (unsigned) XINT (k, fidx);
26056 break;
26057 case 'w':
26058 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
26059 result = result * 613 + (unsigned) XWINT (k, fidx);
26060 else
26061 {
26062 size_t i;
26063 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
26064 result = result * 613 + (unsigned) (XWINT (k, fidx)
26065 >> CHAR_BIT * i);
26066 }
26067 break;
26068 case '0':
26069 break;
26070 default:
26071 gcc_unreachable ();
26072 }
26073
26074 return result;
26075 }
26076
26077 hashval_t
26078 toc_hasher::hash (toc_hash_struct *thc)
26079 {
26080 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
26081 }
26082
26083 /* Compare H1 and H2 for equivalence. */
26084
26085 bool
26086 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
26087 {
26088 rtx r1 = h1->key;
26089 rtx r2 = h2->key;
26090
26091 if (h1->key_mode != h2->key_mode)
26092 return 0;
26093
26094 return rtx_equal_p (r1, r2);
26095 }
26096
26097 /* These are the names given by the C++ front-end to vtables, and
26098 vtable-like objects. Ideally, this logic should not be here;
26099 instead, there should be some programmatic way of inquiring as
26100 to whether or not an object is a vtable. */
26101
26102 #define VTABLE_NAME_P(NAME) \
26103 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
26104 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
26105 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
26106 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
26107 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
26108
26109 #ifdef NO_DOLLAR_IN_LABEL
26110 /* Return a GGC-allocated character string translating dollar signs in
26111 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
26112
26113 const char *
26114 rs6000_xcoff_strip_dollar (const char *name)
26115 {
26116 char *strip, *p;
26117 const char *q;
26118 size_t len;
26119
26120 q = (const char *) strchr (name, '$');
26121
26122 if (q == 0 || q == name)
26123 return name;
26124
26125 len = strlen (name);
26126 strip = XALLOCAVEC (char, len + 1);
26127 strcpy (strip, name);
26128 p = strip + (q - name);
26129 while (p)
26130 {
26131 *p = '_';
26132 p = strchr (p + 1, '$');
26133 }
26134
26135 return ggc_alloc_string (strip, len);
26136 }
26137 #endif
26138
26139 void
26140 rs6000_output_symbol_ref (FILE *file, rtx x)
26141 {
26142 /* Currently C++ toc references to vtables can be emitted before it
26143 is decided whether the vtable is public or private. If this is
26144 the case, then the linker will eventually complain that there is
26145 a reference to an unknown section. Thus, for vtables only,
26146 we emit the TOC reference to reference the symbol and not the
26147 section. */
26148 const char *name = XSTR (x, 0);
26149
26150 tree decl = SYMBOL_REF_DECL (x);
26151 if (decl /* sync condition with assemble_external () */
26152 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
26153 && (TREE_CODE (decl) == VAR_DECL
26154 || TREE_CODE (decl) == FUNCTION_DECL)
26155 && name[strlen (name) - 1] != ']')
26156 {
26157 name = concat (name,
26158 (TREE_CODE (decl) == FUNCTION_DECL
26159 ? "[DS]" : "[UA]"),
26160 NULL);
26161 XSTR (x, 0) = name;
26162 }
26163
26164 if (VTABLE_NAME_P (name))
26165 {
26166 RS6000_OUTPUT_BASENAME (file, name);
26167 }
26168 else
26169 assemble_name (file, name);
26170 }
26171
26172 /* Output a TOC entry. We derive the entry name from what is being
26173 written. */
26174
26175 void
26176 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
26177 {
26178 char buf[256];
26179 const char *name = buf;
26180 rtx base = x;
26181 HOST_WIDE_INT offset = 0;
26182
26183 gcc_assert (!TARGET_NO_TOC);
26184
26185 /* When the linker won't eliminate them, don't output duplicate
26186 TOC entries (this happens on AIX if there is any kind of TOC,
26187 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
26188 CODE_LABELs. */
26189 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
26190 {
26191 struct toc_hash_struct *h;
26192
26193 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
26194 time because GGC is not initialized at that point. */
26195 if (toc_hash_table == NULL)
26196 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
26197
26198 h = ggc_alloc<toc_hash_struct> ();
26199 h->key = x;
26200 h->key_mode = mode;
26201 h->labelno = labelno;
26202
26203 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
26204 if (*found == NULL)
26205 *found = h;
26206 else /* This is indeed a duplicate.
26207 Set this label equal to that label. */
26208 {
26209 fputs ("\t.set ", file);
26210 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
26211 fprintf (file, "%d,", labelno);
26212 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
26213 fprintf (file, "%d\n", ((*found)->labelno));
26214
26215 #ifdef HAVE_AS_TLS
26216 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
26217 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
26218 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
26219 {
26220 fputs ("\t.set ", file);
26221 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
26222 fprintf (file, "%d,", labelno);
26223 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
26224 fprintf (file, "%d\n", ((*found)->labelno));
26225 }
26226 #endif
26227 return;
26228 }
26229 }
26230
26231 /* If we're going to put a double constant in the TOC, make sure it's
26232 aligned properly when strict alignment is on. */
26233 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
26234 && STRICT_ALIGNMENT
26235 && GET_MODE_BITSIZE (mode) >= 64
26236 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
26237 ASM_OUTPUT_ALIGN (file, 3);
26238 }
26239
26240 (*targetm.asm_out.internal_label) (file, "LC", labelno);
26241
26242 /* Handle FP constants specially. Note that if we have a minimal
26243 TOC, things we put here aren't actually in the TOC, so we can allow
26244 FP constants. */
26245 if (GET_CODE (x) == CONST_DOUBLE &&
26246 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
26247 {
26248 REAL_VALUE_TYPE rv;
26249 long k[4];
26250
26251 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
26252 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
26253 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
26254 else
26255 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
26256
26257 if (TARGET_64BIT)
26258 {
26259 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26260 fputs (DOUBLE_INT_ASM_OP, file);
26261 else
26262 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
26263 k[0] & 0xffffffff, k[1] & 0xffffffff,
26264 k[2] & 0xffffffff, k[3] & 0xffffffff);
26265 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
26266 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
26267 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
26268 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
26269 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
26270 return;
26271 }
26272 else
26273 {
26274 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26275 fputs ("\t.long ", file);
26276 else
26277 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
26278 k[0] & 0xffffffff, k[1] & 0xffffffff,
26279 k[2] & 0xffffffff, k[3] & 0xffffffff);
26280 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
26281 k[0] & 0xffffffff, k[1] & 0xffffffff,
26282 k[2] & 0xffffffff, k[3] & 0xffffffff);
26283 return;
26284 }
26285 }
26286 else if (GET_CODE (x) == CONST_DOUBLE &&
26287 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
26288 {
26289 REAL_VALUE_TYPE rv;
26290 long k[2];
26291
26292 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
26293
26294 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
26295 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
26296 else
26297 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
26298
26299 if (TARGET_64BIT)
26300 {
26301 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26302 fputs (DOUBLE_INT_ASM_OP, file);
26303 else
26304 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
26305 k[0] & 0xffffffff, k[1] & 0xffffffff);
26306 fprintf (file, "0x%lx%08lx\n",
26307 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
26308 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
26309 return;
26310 }
26311 else
26312 {
26313 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26314 fputs ("\t.long ", file);
26315 else
26316 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
26317 k[0] & 0xffffffff, k[1] & 0xffffffff);
26318 fprintf (file, "0x%lx,0x%lx\n",
26319 k[0] & 0xffffffff, k[1] & 0xffffffff);
26320 return;
26321 }
26322 }
26323 else if (GET_CODE (x) == CONST_DOUBLE &&
26324 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
26325 {
26326 REAL_VALUE_TYPE rv;
26327 long l;
26328
26329 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
26330 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
26331 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
26332 else
26333 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
26334
26335 if (TARGET_64BIT)
26336 {
26337 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26338 fputs (DOUBLE_INT_ASM_OP, file);
26339 else
26340 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
26341 if (WORDS_BIG_ENDIAN)
26342 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
26343 else
26344 fprintf (file, "0x%lx\n", l & 0xffffffff);
26345 return;
26346 }
26347 else
26348 {
26349 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26350 fputs ("\t.long ", file);
26351 else
26352 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
26353 fprintf (file, "0x%lx\n", l & 0xffffffff);
26354 return;
26355 }
26356 }
26357 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
26358 {
26359 unsigned HOST_WIDE_INT low;
26360 HOST_WIDE_INT high;
26361
26362 low = INTVAL (x) & 0xffffffff;
26363 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
26364
26365 /* TOC entries are always Pmode-sized, so when big-endian
26366 smaller integer constants in the TOC need to be padded.
26367 (This is still a win over putting the constants in
26368 a separate constant pool, because then we'd have
26369 to have both a TOC entry _and_ the actual constant.)
26370
26371 For a 32-bit target, CONST_INT values are loaded and shifted
26372 entirely within `low' and can be stored in one TOC entry. */
26373
26374 /* It would be easy to make this work, but it doesn't now. */
26375 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
26376
26377 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
26378 {
26379 low |= high << 32;
26380 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
26381 high = (HOST_WIDE_INT) low >> 32;
26382 low &= 0xffffffff;
26383 }
26384
26385 if (TARGET_64BIT)
26386 {
26387 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26388 fputs (DOUBLE_INT_ASM_OP, file);
26389 else
26390 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
26391 (long) high & 0xffffffff, (long) low & 0xffffffff);
26392 fprintf (file, "0x%lx%08lx\n",
26393 (long) high & 0xffffffff, (long) low & 0xffffffff);
26394 return;
26395 }
26396 else
26397 {
26398 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
26399 {
26400 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26401 fputs ("\t.long ", file);
26402 else
26403 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
26404 (long) high & 0xffffffff, (long) low & 0xffffffff);
26405 fprintf (file, "0x%lx,0x%lx\n",
26406 (long) high & 0xffffffff, (long) low & 0xffffffff);
26407 }
26408 else
26409 {
26410 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26411 fputs ("\t.long ", file);
26412 else
26413 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
26414 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
26415 }
26416 return;
26417 }
26418 }
26419
26420 if (GET_CODE (x) == CONST)
26421 {
26422 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
26423 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
26424
26425 base = XEXP (XEXP (x, 0), 0);
26426 offset = INTVAL (XEXP (XEXP (x, 0), 1));
26427 }
26428
26429 switch (GET_CODE (base))
26430 {
26431 case SYMBOL_REF:
26432 name = XSTR (base, 0);
26433 break;
26434
26435 case LABEL_REF:
26436 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
26437 CODE_LABEL_NUMBER (XEXP (base, 0)));
26438 break;
26439
26440 case CODE_LABEL:
26441 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
26442 break;
26443
26444 default:
26445 gcc_unreachable ();
26446 }
26447
26448 if (TARGET_ELF || TARGET_MINIMAL_TOC)
26449 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
26450 else
26451 {
26452 fputs ("\t.tc ", file);
26453 RS6000_OUTPUT_BASENAME (file, name);
26454
26455 if (offset < 0)
26456 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
26457 else if (offset)
26458 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
26459
26460 /* Mark large TOC symbols on AIX with [TE] so they are mapped
26461 after other TOC symbols, reducing overflow of small TOC access
26462 to [TC] symbols. */
26463 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
26464 ? "[TE]," : "[TC],", file);
26465 }
26466
26467 /* Currently C++ toc references to vtables can be emitted before it
26468 is decided whether the vtable is public or private. If this is
26469 the case, then the linker will eventually complain that there is
26470 a TOC reference to an unknown section. Thus, for vtables only,
26471 we emit the TOC reference to reference the symbol and not the
26472 section. */
26473 if (VTABLE_NAME_P (name))
26474 {
26475 RS6000_OUTPUT_BASENAME (file, name);
26476 if (offset < 0)
26477 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
26478 else if (offset > 0)
26479 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
26480 }
26481 else
26482 output_addr_const (file, x);
26483
26484 #if HAVE_AS_TLS
26485 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
26486 && SYMBOL_REF_TLS_MODEL (base) != 0)
26487 {
26488 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
26489 fputs ("@le", file);
26490 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
26491 fputs ("@ie", file);
26492 /* Use global-dynamic for local-dynamic. */
26493 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
26494 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
26495 {
26496 putc ('\n', file);
26497 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
26498 fputs ("\t.tc .", file);
26499 RS6000_OUTPUT_BASENAME (file, name);
26500 fputs ("[TC],", file);
26501 output_addr_const (file, x);
26502 fputs ("@m", file);
26503 }
26504 }
26505 #endif
26506
26507 putc ('\n', file);
26508 }
26509 \f
26510 /* Output an assembler pseudo-op to write an ASCII string of N characters
26511 starting at P to FILE.
26512
26513 On the RS/6000, we have to do this using the .byte operation and
26514 write out special characters outside the quoted string.
26515 Also, the assembler is broken; very long strings are truncated,
26516 so we must artificially break them up early. */
26517
26518 void
26519 output_ascii (FILE *file, const char *p, int n)
26520 {
26521 char c;
26522 int i, count_string;
26523 const char *for_string = "\t.byte \"";
26524 const char *for_decimal = "\t.byte ";
26525 const char *to_close = NULL;
26526
26527 count_string = 0;
26528 for (i = 0; i < n; i++)
26529 {
26530 c = *p++;
26531 if (c >= ' ' && c < 0177)
26532 {
26533 if (for_string)
26534 fputs (for_string, file);
26535 putc (c, file);
26536
26537 /* Write two quotes to get one. */
26538 if (c == '"')
26539 {
26540 putc (c, file);
26541 ++count_string;
26542 }
26543
26544 for_string = NULL;
26545 for_decimal = "\"\n\t.byte ";
26546 to_close = "\"\n";
26547 ++count_string;
26548
26549 if (count_string >= 512)
26550 {
26551 fputs (to_close, file);
26552
26553 for_string = "\t.byte \"";
26554 for_decimal = "\t.byte ";
26555 to_close = NULL;
26556 count_string = 0;
26557 }
26558 }
26559 else
26560 {
26561 if (for_decimal)
26562 fputs (for_decimal, file);
26563 fprintf (file, "%d", c);
26564
26565 for_string = "\n\t.byte \"";
26566 for_decimal = ", ";
26567 to_close = "\n";
26568 count_string = 0;
26569 }
26570 }
26571
26572 /* Now close the string if we have written one. Then end the line. */
26573 if (to_close)
26574 fputs (to_close, file);
26575 }
26576 \f
26577 /* Generate a unique section name for FILENAME for a section type
26578 represented by SECTION_DESC. Output goes into BUF.
26579
26580 SECTION_DESC can be any string, as long as it is different for each
26581 possible section type.
26582
26583 We name the section in the same manner as xlc. The name begins with an
26584 underscore followed by the filename (after stripping any leading directory
26585 names) with the last period replaced by the string SECTION_DESC. If
26586 FILENAME does not contain a period, SECTION_DESC is appended to the end of
26587 the name. */
26588
26589 void
26590 rs6000_gen_section_name (char **buf, const char *filename,
26591 const char *section_desc)
26592 {
26593 const char *q, *after_last_slash, *last_period = 0;
26594 char *p;
26595 int len;
26596
26597 after_last_slash = filename;
26598 for (q = filename; *q; q++)
26599 {
26600 if (*q == '/')
26601 after_last_slash = q + 1;
26602 else if (*q == '.')
26603 last_period = q;
26604 }
26605
26606 len = strlen (after_last_slash) + strlen (section_desc) + 2;
26607 *buf = (char *) xmalloc (len);
26608
26609 p = *buf;
26610 *p++ = '_';
26611
26612 for (q = after_last_slash; *q; q++)
26613 {
26614 if (q == last_period)
26615 {
26616 strcpy (p, section_desc);
26617 p += strlen (section_desc);
26618 break;
26619 }
26620
26621 else if (ISALNUM (*q))
26622 *p++ = *q;
26623 }
26624
26625 if (last_period == 0)
26626 strcpy (p, section_desc);
26627 else
26628 *p = '\0';
26629 }
26630 \f
26631 /* Emit profile function. */
26632
26633 void
26634 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
26635 {
26636 /* Non-standard profiling for kernels, which just saves LR then calls
26637 _mcount without worrying about arg saves. The idea is to change
26638 the function prologue as little as possible as it isn't easy to
26639 account for arg save/restore code added just for _mcount. */
26640 if (TARGET_PROFILE_KERNEL)
26641 return;
26642
26643 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26644 {
26645 #ifndef NO_PROFILE_COUNTERS
26646 # define NO_PROFILE_COUNTERS 0
26647 #endif
26648 if (NO_PROFILE_COUNTERS)
26649 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26650 LCT_NORMAL, VOIDmode, 0);
26651 else
26652 {
26653 char buf[30];
26654 const char *label_name;
26655 rtx fun;
26656
26657 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26658 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
26659 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
26660
26661 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26662 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
26663 }
26664 }
26665 else if (DEFAULT_ABI == ABI_DARWIN)
26666 {
26667 const char *mcount_name = RS6000_MCOUNT;
26668 int caller_addr_regno = LR_REGNO;
26669
26670 /* Be conservative and always set this, at least for now. */
26671 crtl->uses_pic_offset_table = 1;
26672
26673 #if TARGET_MACHO
26674 /* For PIC code, set up a stub and collect the caller's address
26675 from r0, which is where the prologue puts it. */
26676 if (MACHOPIC_INDIRECT
26677 && crtl->uses_pic_offset_table)
26678 caller_addr_regno = 0;
26679 #endif
26680 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
26681 LCT_NORMAL, VOIDmode, 1,
26682 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
26683 }
26684 }
26685
26686 /* Write function profiler code. */
26687
26688 void
26689 output_function_profiler (FILE *file, int labelno)
26690 {
26691 char buf[100];
26692
26693 switch (DEFAULT_ABI)
26694 {
26695 default:
26696 gcc_unreachable ();
26697
26698 case ABI_V4:
26699 if (!TARGET_32BIT)
26700 {
26701 warning (0, "no profiling of 64-bit code for this ABI");
26702 return;
26703 }
26704 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26705 fprintf (file, "\tmflr %s\n", reg_names[0]);
26706 if (NO_PROFILE_COUNTERS)
26707 {
26708 asm_fprintf (file, "\tstw %s,4(%s)\n",
26709 reg_names[0], reg_names[1]);
26710 }
26711 else if (TARGET_SECURE_PLT && flag_pic)
26712 {
26713 if (TARGET_LINK_STACK)
26714 {
26715 char name[32];
26716 get_ppc476_thunk_name (name);
26717 asm_fprintf (file, "\tbl %s\n", name);
26718 }
26719 else
26720 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
26721 asm_fprintf (file, "\tstw %s,4(%s)\n",
26722 reg_names[0], reg_names[1]);
26723 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26724 asm_fprintf (file, "\taddis %s,%s,",
26725 reg_names[12], reg_names[12]);
26726 assemble_name (file, buf);
26727 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
26728 assemble_name (file, buf);
26729 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
26730 }
26731 else if (flag_pic == 1)
26732 {
26733 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
26734 asm_fprintf (file, "\tstw %s,4(%s)\n",
26735 reg_names[0], reg_names[1]);
26736 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26737 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
26738 assemble_name (file, buf);
26739 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
26740 }
26741 else if (flag_pic > 1)
26742 {
26743 asm_fprintf (file, "\tstw %s,4(%s)\n",
26744 reg_names[0], reg_names[1]);
26745 /* Now, we need to get the address of the label. */
26746 if (TARGET_LINK_STACK)
26747 {
26748 char name[32];
26749 get_ppc476_thunk_name (name);
26750 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
26751 assemble_name (file, buf);
26752 fputs ("-.\n1:", file);
26753 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26754 asm_fprintf (file, "\taddi %s,%s,4\n",
26755 reg_names[11], reg_names[11]);
26756 }
26757 else
26758 {
26759 fputs ("\tbcl 20,31,1f\n\t.long ", file);
26760 assemble_name (file, buf);
26761 fputs ("-.\n1:", file);
26762 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26763 }
26764 asm_fprintf (file, "\tlwz %s,0(%s)\n",
26765 reg_names[0], reg_names[11]);
26766 asm_fprintf (file, "\tadd %s,%s,%s\n",
26767 reg_names[0], reg_names[0], reg_names[11]);
26768 }
26769 else
26770 {
26771 asm_fprintf (file, "\tlis %s,", reg_names[12]);
26772 assemble_name (file, buf);
26773 fputs ("@ha\n", file);
26774 asm_fprintf (file, "\tstw %s,4(%s)\n",
26775 reg_names[0], reg_names[1]);
26776 asm_fprintf (file, "\tla %s,", reg_names[0]);
26777 assemble_name (file, buf);
26778 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
26779 }
26780
26781 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
26782 fprintf (file, "\tbl %s%s\n",
26783 RS6000_MCOUNT, flag_pic ? "@plt" : "");
26784 break;
26785
26786 case ABI_AIX:
26787 case ABI_ELFv2:
26788 case ABI_DARWIN:
26789 /* Don't do anything, done in output_profile_hook (). */
26790 break;
26791 }
26792 }
26793
26794 \f
26795
26796 /* The following variable value is the last issued insn. */
26797
26798 static rtx last_scheduled_insn;
26799
26800 /* The following variable helps to balance issuing of load and
26801 store instructions */
26802
26803 static int load_store_pendulum;
26804
26805 /* Power4 load update and store update instructions are cracked into a
26806 load or store and an integer insn which are executed in the same cycle.
26807 Branches have their own dispatch slot which does not count against the
26808 GCC issue rate, but it changes the program flow so there are no other
26809 instructions to issue in this cycle. */
26810
26811 static int
26812 rs6000_variable_issue_1 (rtx_insn *insn, int more)
26813 {
26814 last_scheduled_insn = insn;
26815 if (GET_CODE (PATTERN (insn)) == USE
26816 || GET_CODE (PATTERN (insn)) == CLOBBER)
26817 {
26818 cached_can_issue_more = more;
26819 return cached_can_issue_more;
26820 }
26821
26822 if (insn_terminates_group_p (insn, current_group))
26823 {
26824 cached_can_issue_more = 0;
26825 return cached_can_issue_more;
26826 }
26827
26828 /* If no reservation, but reach here */
26829 if (recog_memoized (insn) < 0)
26830 return more;
26831
26832 if (rs6000_sched_groups)
26833 {
26834 if (is_microcoded_insn (insn))
26835 cached_can_issue_more = 0;
26836 else if (is_cracked_insn (insn))
26837 cached_can_issue_more = more > 2 ? more - 2 : 0;
26838 else
26839 cached_can_issue_more = more - 1;
26840
26841 return cached_can_issue_more;
26842 }
26843
26844 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
26845 return 0;
26846
26847 cached_can_issue_more = more - 1;
26848 return cached_can_issue_more;
26849 }
26850
26851 static int
26852 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
26853 {
26854 int r = rs6000_variable_issue_1 (insn, more);
26855 if (verbose)
26856 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
26857 return r;
26858 }
26859
26860 /* Adjust the cost of a scheduling dependency. Return the new cost of
26861 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
26862
26863 static int
26864 rs6000_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
26865 {
26866 enum attr_type attr_type;
26867
26868 if (! recog_memoized (insn))
26869 return 0;
26870
26871 switch (REG_NOTE_KIND (link))
26872 {
26873 case REG_DEP_TRUE:
26874 {
26875 /* Data dependency; DEP_INSN writes a register that INSN reads
26876 some cycles later. */
26877
26878 /* Separate a load from a narrower, dependent store. */
26879 if (rs6000_sched_groups
26880 && GET_CODE (PATTERN (insn)) == SET
26881 && GET_CODE (PATTERN (dep_insn)) == SET
26882 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
26883 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
26884 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
26885 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
26886 return cost + 14;
26887
26888 attr_type = get_attr_type (insn);
26889
26890 switch (attr_type)
26891 {
26892 case TYPE_JMPREG:
26893 /* Tell the first scheduling pass about the latency between
26894 a mtctr and bctr (and mtlr and br/blr). The first
26895 scheduling pass will not know about this latency since
26896 the mtctr instruction, which has the latency associated
26897 to it, will be generated by reload. */
26898 return 4;
26899 case TYPE_BRANCH:
26900 /* Leave some extra cycles between a compare and its
26901 dependent branch, to inhibit expensive mispredicts. */
26902 if ((rs6000_cpu_attr == CPU_PPC603
26903 || rs6000_cpu_attr == CPU_PPC604
26904 || rs6000_cpu_attr == CPU_PPC604E
26905 || rs6000_cpu_attr == CPU_PPC620
26906 || rs6000_cpu_attr == CPU_PPC630
26907 || rs6000_cpu_attr == CPU_PPC750
26908 || rs6000_cpu_attr == CPU_PPC7400
26909 || rs6000_cpu_attr == CPU_PPC7450
26910 || rs6000_cpu_attr == CPU_PPCE5500
26911 || rs6000_cpu_attr == CPU_PPCE6500
26912 || rs6000_cpu_attr == CPU_POWER4
26913 || rs6000_cpu_attr == CPU_POWER5
26914 || rs6000_cpu_attr == CPU_POWER7
26915 || rs6000_cpu_attr == CPU_POWER8
26916 || rs6000_cpu_attr == CPU_CELL)
26917 && recog_memoized (dep_insn)
26918 && (INSN_CODE (dep_insn) >= 0))
26919
26920 switch (get_attr_type (dep_insn))
26921 {
26922 case TYPE_CMP:
26923 case TYPE_FPCOMPARE:
26924 case TYPE_CR_LOGICAL:
26925 case TYPE_DELAYED_CR:
26926 return cost + 2;
26927 case TYPE_EXTS:
26928 case TYPE_MUL:
26929 if (get_attr_dot (dep_insn) == DOT_YES)
26930 return cost + 2;
26931 else
26932 break;
26933 case TYPE_SHIFT:
26934 if (get_attr_dot (dep_insn) == DOT_YES
26935 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
26936 return cost + 2;
26937 else
26938 break;
26939 default:
26940 break;
26941 }
26942 break;
26943
26944 case TYPE_STORE:
26945 case TYPE_FPSTORE:
26946 if ((rs6000_cpu == PROCESSOR_POWER6)
26947 && recog_memoized (dep_insn)
26948 && (INSN_CODE (dep_insn) >= 0))
26949 {
26950
26951 if (GET_CODE (PATTERN (insn)) != SET)
26952 /* If this happens, we have to extend this to schedule
26953 optimally. Return default for now. */
26954 return cost;
26955
26956 /* Adjust the cost for the case where the value written
26957 by a fixed point operation is used as the address
26958 gen value on a store. */
26959 switch (get_attr_type (dep_insn))
26960 {
26961 case TYPE_LOAD:
26962 case TYPE_CNTLZ:
26963 {
26964 if (! store_data_bypass_p (dep_insn, insn))
26965 return get_attr_sign_extend (dep_insn)
26966 == SIGN_EXTEND_YES ? 6 : 4;
26967 break;
26968 }
26969 case TYPE_SHIFT:
26970 {
26971 if (! store_data_bypass_p (dep_insn, insn))
26972 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
26973 6 : 3;
26974 break;
26975 }
26976 case TYPE_INTEGER:
26977 case TYPE_ADD:
26978 case TYPE_LOGICAL:
26979 case TYPE_EXTS:
26980 case TYPE_INSERT:
26981 {
26982 if (! store_data_bypass_p (dep_insn, insn))
26983 return 3;
26984 break;
26985 }
26986 case TYPE_STORE:
26987 case TYPE_FPLOAD:
26988 case TYPE_FPSTORE:
26989 {
26990 if (get_attr_update (dep_insn) == UPDATE_YES
26991 && ! store_data_bypass_p (dep_insn, insn))
26992 return 3;
26993 break;
26994 }
26995 case TYPE_MUL:
26996 {
26997 if (! store_data_bypass_p (dep_insn, insn))
26998 return 17;
26999 break;
27000 }
27001 case TYPE_DIV:
27002 {
27003 if (! store_data_bypass_p (dep_insn, insn))
27004 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
27005 break;
27006 }
27007 default:
27008 break;
27009 }
27010 }
27011 break;
27012
27013 case TYPE_LOAD:
27014 if ((rs6000_cpu == PROCESSOR_POWER6)
27015 && recog_memoized (dep_insn)
27016 && (INSN_CODE (dep_insn) >= 0))
27017 {
27018
27019 /* Adjust the cost for the case where the value written
27020 by a fixed point instruction is used within the address
27021 gen portion of a subsequent load(u)(x) */
27022 switch (get_attr_type (dep_insn))
27023 {
27024 case TYPE_LOAD:
27025 case TYPE_CNTLZ:
27026 {
27027 if (set_to_load_agen (dep_insn, insn))
27028 return get_attr_sign_extend (dep_insn)
27029 == SIGN_EXTEND_YES ? 6 : 4;
27030 break;
27031 }
27032 case TYPE_SHIFT:
27033 {
27034 if (set_to_load_agen (dep_insn, insn))
27035 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
27036 6 : 3;
27037 break;
27038 }
27039 case TYPE_INTEGER:
27040 case TYPE_ADD:
27041 case TYPE_LOGICAL:
27042 case TYPE_EXTS:
27043 case TYPE_INSERT:
27044 {
27045 if (set_to_load_agen (dep_insn, insn))
27046 return 3;
27047 break;
27048 }
27049 case TYPE_STORE:
27050 case TYPE_FPLOAD:
27051 case TYPE_FPSTORE:
27052 {
27053 if (get_attr_update (dep_insn) == UPDATE_YES
27054 && set_to_load_agen (dep_insn, insn))
27055 return 3;
27056 break;
27057 }
27058 case TYPE_MUL:
27059 {
27060 if (set_to_load_agen (dep_insn, insn))
27061 return 17;
27062 break;
27063 }
27064 case TYPE_DIV:
27065 {
27066 if (set_to_load_agen (dep_insn, insn))
27067 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
27068 break;
27069 }
27070 default:
27071 break;
27072 }
27073 }
27074 break;
27075
27076 case TYPE_FPLOAD:
27077 if ((rs6000_cpu == PROCESSOR_POWER6)
27078 && get_attr_update (insn) == UPDATE_NO
27079 && recog_memoized (dep_insn)
27080 && (INSN_CODE (dep_insn) >= 0)
27081 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
27082 return 2;
27083
27084 default:
27085 break;
27086 }
27087
27088 /* Fall out to return default cost. */
27089 }
27090 break;
27091
27092 case REG_DEP_OUTPUT:
27093 /* Output dependency; DEP_INSN writes a register that INSN writes some
27094 cycles later. */
27095 if ((rs6000_cpu == PROCESSOR_POWER6)
27096 && recog_memoized (dep_insn)
27097 && (INSN_CODE (dep_insn) >= 0))
27098 {
27099 attr_type = get_attr_type (insn);
27100
27101 switch (attr_type)
27102 {
27103 case TYPE_FP:
27104 if (get_attr_type (dep_insn) == TYPE_FP)
27105 return 1;
27106 break;
27107 case TYPE_FPLOAD:
27108 if (get_attr_update (insn) == UPDATE_NO
27109 && get_attr_type (dep_insn) == TYPE_MFFGPR)
27110 return 2;
27111 break;
27112 default:
27113 break;
27114 }
27115 }
27116 case REG_DEP_ANTI:
27117 /* Anti dependency; DEP_INSN reads a register that INSN writes some
27118 cycles later. */
27119 return 0;
27120
27121 default:
27122 gcc_unreachable ();
27123 }
27124
27125 return cost;
27126 }
27127
27128 /* Debug version of rs6000_adjust_cost. */
27129
27130 static int
27131 rs6000_debug_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
27132 int cost)
27133 {
27134 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
27135
27136 if (ret != cost)
27137 {
27138 const char *dep;
27139
27140 switch (REG_NOTE_KIND (link))
27141 {
27142 default: dep = "unknown depencency"; break;
27143 case REG_DEP_TRUE: dep = "data dependency"; break;
27144 case REG_DEP_OUTPUT: dep = "output dependency"; break;
27145 case REG_DEP_ANTI: dep = "anti depencency"; break;
27146 }
27147
27148 fprintf (stderr,
27149 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
27150 "%s, insn:\n", ret, cost, dep);
27151
27152 debug_rtx (insn);
27153 }
27154
27155 return ret;
27156 }
27157
27158 /* The function returns a true if INSN is microcoded.
27159 Return false otherwise. */
27160
27161 static bool
27162 is_microcoded_insn (rtx_insn *insn)
27163 {
27164 if (!insn || !NONDEBUG_INSN_P (insn)
27165 || GET_CODE (PATTERN (insn)) == USE
27166 || GET_CODE (PATTERN (insn)) == CLOBBER)
27167 return false;
27168
27169 if (rs6000_cpu_attr == CPU_CELL)
27170 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
27171
27172 if (rs6000_sched_groups
27173 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
27174 {
27175 enum attr_type type = get_attr_type (insn);
27176 if ((type == TYPE_LOAD
27177 && get_attr_update (insn) == UPDATE_YES
27178 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
27179 || ((type == TYPE_LOAD || type == TYPE_STORE)
27180 && get_attr_update (insn) == UPDATE_YES
27181 && get_attr_indexed (insn) == INDEXED_YES)
27182 || type == TYPE_MFCR)
27183 return true;
27184 }
27185
27186 return false;
27187 }
27188
27189 /* The function returns true if INSN is cracked into 2 instructions
27190 by the processor (and therefore occupies 2 issue slots). */
27191
27192 static bool
27193 is_cracked_insn (rtx_insn *insn)
27194 {
27195 if (!insn || !NONDEBUG_INSN_P (insn)
27196 || GET_CODE (PATTERN (insn)) == USE
27197 || GET_CODE (PATTERN (insn)) == CLOBBER)
27198 return false;
27199
27200 if (rs6000_sched_groups
27201 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
27202 {
27203 enum attr_type type = get_attr_type (insn);
27204 if ((type == TYPE_LOAD
27205 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27206 && get_attr_update (insn) == UPDATE_NO)
27207 || (type == TYPE_LOAD
27208 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
27209 && get_attr_update (insn) == UPDATE_YES
27210 && get_attr_indexed (insn) == INDEXED_NO)
27211 || (type == TYPE_STORE
27212 && get_attr_update (insn) == UPDATE_YES
27213 && get_attr_indexed (insn) == INDEXED_NO)
27214 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
27215 && get_attr_update (insn) == UPDATE_YES)
27216 || type == TYPE_DELAYED_CR
27217 || (type == TYPE_EXTS
27218 && get_attr_dot (insn) == DOT_YES)
27219 || (type == TYPE_SHIFT
27220 && get_attr_dot (insn) == DOT_YES
27221 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
27222 || (type == TYPE_MUL
27223 && get_attr_dot (insn) == DOT_YES)
27224 || type == TYPE_DIV
27225 || (type == TYPE_INSERT
27226 && get_attr_size (insn) == SIZE_32))
27227 return true;
27228 }
27229
27230 return false;
27231 }
27232
27233 /* The function returns true if INSN can be issued only from
27234 the branch slot. */
27235
27236 static bool
27237 is_branch_slot_insn (rtx_insn *insn)
27238 {
27239 if (!insn || !NONDEBUG_INSN_P (insn)
27240 || GET_CODE (PATTERN (insn)) == USE
27241 || GET_CODE (PATTERN (insn)) == CLOBBER)
27242 return false;
27243
27244 if (rs6000_sched_groups)
27245 {
27246 enum attr_type type = get_attr_type (insn);
27247 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
27248 return true;
27249 return false;
27250 }
27251
27252 return false;
27253 }
27254
27255 /* The function returns true if out_inst sets a value that is
27256 used in the address generation computation of in_insn */
27257 static bool
27258 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
27259 {
27260 rtx out_set, in_set;
27261
27262 /* For performance reasons, only handle the simple case where
27263 both loads are a single_set. */
27264 out_set = single_set (out_insn);
27265 if (out_set)
27266 {
27267 in_set = single_set (in_insn);
27268 if (in_set)
27269 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
27270 }
27271
27272 return false;
27273 }
27274
27275 /* Try to determine base/offset/size parts of the given MEM.
27276 Return true if successful, false if all the values couldn't
27277 be determined.
27278
27279 This function only looks for REG or REG+CONST address forms.
27280 REG+REG address form will return false. */
27281
27282 static bool
27283 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
27284 HOST_WIDE_INT *size)
27285 {
27286 rtx addr_rtx;
27287 if MEM_SIZE_KNOWN_P (mem)
27288 *size = MEM_SIZE (mem);
27289 else
27290 return false;
27291
27292 addr_rtx = (XEXP (mem, 0));
27293 if (GET_CODE (addr_rtx) == PRE_MODIFY)
27294 addr_rtx = XEXP (addr_rtx, 1);
27295
27296 *offset = 0;
27297 while (GET_CODE (addr_rtx) == PLUS
27298 && CONST_INT_P (XEXP (addr_rtx, 1)))
27299 {
27300 *offset += INTVAL (XEXP (addr_rtx, 1));
27301 addr_rtx = XEXP (addr_rtx, 0);
27302 }
27303 if (!REG_P (addr_rtx))
27304 return false;
27305
27306 *base = addr_rtx;
27307 return true;
27308 }
27309
27310 /* The function returns true if the target storage location of
27311 mem1 is adjacent to the target storage location of mem2 */
27312 /* Return 1 if memory locations are adjacent. */
27313
27314 static bool
27315 adjacent_mem_locations (rtx mem1, rtx mem2)
27316 {
27317 rtx reg1, reg2;
27318 HOST_WIDE_INT off1, size1, off2, size2;
27319
27320 if (get_memref_parts (mem1, &reg1, &off1, &size1)
27321 && get_memref_parts (mem2, &reg2, &off2, &size2))
27322 return ((REGNO (reg1) == REGNO (reg2))
27323 && ((off1 + size1 == off2)
27324 || (off2 + size2 == off1)));
27325
27326 return false;
27327 }
27328
27329 /* This function returns true if it can be determined that the two MEM
27330 locations overlap by at least 1 byte based on base reg/offset/size. */
27331
27332 static bool
27333 mem_locations_overlap (rtx mem1, rtx mem2)
27334 {
27335 rtx reg1, reg2;
27336 HOST_WIDE_INT off1, size1, off2, size2;
27337
27338 if (get_memref_parts (mem1, &reg1, &off1, &size1)
27339 && get_memref_parts (mem2, &reg2, &off2, &size2))
27340 return ((REGNO (reg1) == REGNO (reg2))
27341 && (((off1 <= off2) && (off1 + size1 > off2))
27342 || ((off2 <= off1) && (off2 + size2 > off1))));
27343
27344 return false;
27345 }
27346
27347 /* A C statement (sans semicolon) to update the integer scheduling
27348 priority INSN_PRIORITY (INSN). Increase the priority to execute the
27349 INSN earlier, reduce the priority to execute INSN later. Do not
27350 define this macro if you do not need to adjust the scheduling
27351 priorities of insns. */
27352
27353 static int
27354 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
27355 {
27356 rtx load_mem, str_mem;
27357 /* On machines (like the 750) which have asymmetric integer units,
27358 where one integer unit can do multiply and divides and the other
27359 can't, reduce the priority of multiply/divide so it is scheduled
27360 before other integer operations. */
27361
27362 #if 0
27363 if (! INSN_P (insn))
27364 return priority;
27365
27366 if (GET_CODE (PATTERN (insn)) == USE)
27367 return priority;
27368
27369 switch (rs6000_cpu_attr) {
27370 case CPU_PPC750:
27371 switch (get_attr_type (insn))
27372 {
27373 default:
27374 break;
27375
27376 case TYPE_MUL:
27377 case TYPE_DIV:
27378 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
27379 priority, priority);
27380 if (priority >= 0 && priority < 0x01000000)
27381 priority >>= 3;
27382 break;
27383 }
27384 }
27385 #endif
27386
27387 if (insn_must_be_first_in_group (insn)
27388 && reload_completed
27389 && current_sched_info->sched_max_insns_priority
27390 && rs6000_sched_restricted_insns_priority)
27391 {
27392
27393 /* Prioritize insns that can be dispatched only in the first
27394 dispatch slot. */
27395 if (rs6000_sched_restricted_insns_priority == 1)
27396 /* Attach highest priority to insn. This means that in
27397 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
27398 precede 'priority' (critical path) considerations. */
27399 return current_sched_info->sched_max_insns_priority;
27400 else if (rs6000_sched_restricted_insns_priority == 2)
27401 /* Increase priority of insn by a minimal amount. This means that in
27402 haifa-sched.c:ready_sort(), only 'priority' (critical path)
27403 considerations precede dispatch-slot restriction considerations. */
27404 return (priority + 1);
27405 }
27406
27407 if (rs6000_cpu == PROCESSOR_POWER6
27408 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
27409 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
27410 /* Attach highest priority to insn if the scheduler has just issued two
27411 stores and this instruction is a load, or two loads and this instruction
27412 is a store. Power6 wants loads and stores scheduled alternately
27413 when possible */
27414 return current_sched_info->sched_max_insns_priority;
27415
27416 return priority;
27417 }
27418
27419 /* Return true if the instruction is nonpipelined on the Cell. */
27420 static bool
27421 is_nonpipeline_insn (rtx_insn *insn)
27422 {
27423 enum attr_type type;
27424 if (!insn || !NONDEBUG_INSN_P (insn)
27425 || GET_CODE (PATTERN (insn)) == USE
27426 || GET_CODE (PATTERN (insn)) == CLOBBER)
27427 return false;
27428
27429 type = get_attr_type (insn);
27430 if (type == TYPE_MUL
27431 || type == TYPE_DIV
27432 || type == TYPE_SDIV
27433 || type == TYPE_DDIV
27434 || type == TYPE_SSQRT
27435 || type == TYPE_DSQRT
27436 || type == TYPE_MFCR
27437 || type == TYPE_MFCRF
27438 || type == TYPE_MFJMPR)
27439 {
27440 return true;
27441 }
27442 return false;
27443 }
27444
27445
27446 /* Return how many instructions the machine can issue per cycle. */
27447
27448 static int
27449 rs6000_issue_rate (void)
27450 {
27451 /* Unless scheduling for register pressure, use issue rate of 1 for
27452 first scheduling pass to decrease degradation. */
27453 if (!reload_completed && !flag_sched_pressure)
27454 return 1;
27455
27456 switch (rs6000_cpu_attr) {
27457 case CPU_RS64A:
27458 case CPU_PPC601: /* ? */
27459 case CPU_PPC7450:
27460 return 3;
27461 case CPU_PPC440:
27462 case CPU_PPC603:
27463 case CPU_PPC750:
27464 case CPU_PPC7400:
27465 case CPU_PPC8540:
27466 case CPU_PPC8548:
27467 case CPU_CELL:
27468 case CPU_PPCE300C2:
27469 case CPU_PPCE300C3:
27470 case CPU_PPCE500MC:
27471 case CPU_PPCE500MC64:
27472 case CPU_PPCE5500:
27473 case CPU_PPCE6500:
27474 case CPU_TITAN:
27475 return 2;
27476 case CPU_PPC476:
27477 case CPU_PPC604:
27478 case CPU_PPC604E:
27479 case CPU_PPC620:
27480 case CPU_PPC630:
27481 return 4;
27482 case CPU_POWER4:
27483 case CPU_POWER5:
27484 case CPU_POWER6:
27485 case CPU_POWER7:
27486 return 5;
27487 case CPU_POWER8:
27488 return 7;
27489 default:
27490 return 1;
27491 }
27492 }
27493
27494 /* Return how many instructions to look ahead for better insn
27495 scheduling. */
27496
27497 static int
27498 rs6000_use_sched_lookahead (void)
27499 {
27500 switch (rs6000_cpu_attr)
27501 {
27502 case CPU_PPC8540:
27503 case CPU_PPC8548:
27504 return 4;
27505
27506 case CPU_CELL:
27507 return (reload_completed ? 8 : 0);
27508
27509 default:
27510 return 0;
27511 }
27512 }
27513
27514 /* We are choosing insn from the ready queue. Return zero if INSN can be
27515 chosen. */
27516 static int
27517 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
27518 {
27519 if (ready_index == 0)
27520 return 0;
27521
27522 if (rs6000_cpu_attr != CPU_CELL)
27523 return 0;
27524
27525 gcc_assert (insn != NULL_RTX && INSN_P (insn));
27526
27527 if (!reload_completed
27528 || is_nonpipeline_insn (insn)
27529 || is_microcoded_insn (insn))
27530 return 1;
27531
27532 return 0;
27533 }
27534
27535 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
27536 and return true. */
27537
27538 static bool
27539 find_mem_ref (rtx pat, rtx *mem_ref)
27540 {
27541 const char * fmt;
27542 int i, j;
27543
27544 /* stack_tie does not produce any real memory traffic. */
27545 if (tie_operand (pat, VOIDmode))
27546 return false;
27547
27548 if (GET_CODE (pat) == MEM)
27549 {
27550 *mem_ref = pat;
27551 return true;
27552 }
27553
27554 /* Recursively process the pattern. */
27555 fmt = GET_RTX_FORMAT (GET_CODE (pat));
27556
27557 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
27558 {
27559 if (fmt[i] == 'e')
27560 {
27561 if (find_mem_ref (XEXP (pat, i), mem_ref))
27562 return true;
27563 }
27564 else if (fmt[i] == 'E')
27565 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
27566 {
27567 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
27568 return true;
27569 }
27570 }
27571
27572 return false;
27573 }
27574
27575 /* Determine if PAT is a PATTERN of a load insn. */
27576
27577 static bool
27578 is_load_insn1 (rtx pat, rtx *load_mem)
27579 {
27580 if (!pat || pat == NULL_RTX)
27581 return false;
27582
27583 if (GET_CODE (pat) == SET)
27584 return find_mem_ref (SET_SRC (pat), load_mem);
27585
27586 if (GET_CODE (pat) == PARALLEL)
27587 {
27588 int i;
27589
27590 for (i = 0; i < XVECLEN (pat, 0); i++)
27591 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
27592 return true;
27593 }
27594
27595 return false;
27596 }
27597
27598 /* Determine if INSN loads from memory. */
27599
27600 static bool
27601 is_load_insn (rtx insn, rtx *load_mem)
27602 {
27603 if (!insn || !INSN_P (insn))
27604 return false;
27605
27606 if (CALL_P (insn))
27607 return false;
27608
27609 return is_load_insn1 (PATTERN (insn), load_mem);
27610 }
27611
27612 /* Determine if PAT is a PATTERN of a store insn. */
27613
27614 static bool
27615 is_store_insn1 (rtx pat, rtx *str_mem)
27616 {
27617 if (!pat || pat == NULL_RTX)
27618 return false;
27619
27620 if (GET_CODE (pat) == SET)
27621 return find_mem_ref (SET_DEST (pat), str_mem);
27622
27623 if (GET_CODE (pat) == PARALLEL)
27624 {
27625 int i;
27626
27627 for (i = 0; i < XVECLEN (pat, 0); i++)
27628 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
27629 return true;
27630 }
27631
27632 return false;
27633 }
27634
27635 /* Determine if INSN stores to memory. */
27636
27637 static bool
27638 is_store_insn (rtx insn, rtx *str_mem)
27639 {
27640 if (!insn || !INSN_P (insn))
27641 return false;
27642
27643 return is_store_insn1 (PATTERN (insn), str_mem);
27644 }
27645
27646 /* Returns whether the dependence between INSN and NEXT is considered
27647 costly by the given target. */
27648
27649 static bool
27650 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
27651 {
27652 rtx insn;
27653 rtx next;
27654 rtx load_mem, str_mem;
27655
27656 /* If the flag is not enabled - no dependence is considered costly;
27657 allow all dependent insns in the same group.
27658 This is the most aggressive option. */
27659 if (rs6000_sched_costly_dep == no_dep_costly)
27660 return false;
27661
27662 /* If the flag is set to 1 - a dependence is always considered costly;
27663 do not allow dependent instructions in the same group.
27664 This is the most conservative option. */
27665 if (rs6000_sched_costly_dep == all_deps_costly)
27666 return true;
27667
27668 insn = DEP_PRO (dep);
27669 next = DEP_CON (dep);
27670
27671 if (rs6000_sched_costly_dep == store_to_load_dep_costly
27672 && is_load_insn (next, &load_mem)
27673 && is_store_insn (insn, &str_mem))
27674 /* Prevent load after store in the same group. */
27675 return true;
27676
27677 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
27678 && is_load_insn (next, &load_mem)
27679 && is_store_insn (insn, &str_mem)
27680 && DEP_TYPE (dep) == REG_DEP_TRUE
27681 && mem_locations_overlap(str_mem, load_mem))
27682 /* Prevent load after store in the same group if it is a true
27683 dependence. */
27684 return true;
27685
27686 /* The flag is set to X; dependences with latency >= X are considered costly,
27687 and will not be scheduled in the same group. */
27688 if (rs6000_sched_costly_dep <= max_dep_latency
27689 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
27690 return true;
27691
27692 return false;
27693 }
27694
27695 /* Return the next insn after INSN that is found before TAIL is reached,
27696 skipping any "non-active" insns - insns that will not actually occupy
27697 an issue slot. Return NULL_RTX if such an insn is not found. */
27698
27699 static rtx_insn *
27700 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
27701 {
27702 if (insn == NULL_RTX || insn == tail)
27703 return NULL;
27704
27705 while (1)
27706 {
27707 insn = NEXT_INSN (insn);
27708 if (insn == NULL_RTX || insn == tail)
27709 return NULL;
27710
27711 if (CALL_P (insn)
27712 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
27713 || (NONJUMP_INSN_P (insn)
27714 && GET_CODE (PATTERN (insn)) != USE
27715 && GET_CODE (PATTERN (insn)) != CLOBBER
27716 && INSN_CODE (insn) != CODE_FOR_stack_tie))
27717 break;
27718 }
27719 return insn;
27720 }
27721
27722 /* We are about to begin issuing insns for this clock cycle. */
27723
27724 static int
27725 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
27726 rtx_insn **ready ATTRIBUTE_UNUSED,
27727 int *pn_ready ATTRIBUTE_UNUSED,
27728 int clock_var ATTRIBUTE_UNUSED)
27729 {
27730 int n_ready = *pn_ready;
27731
27732 if (sched_verbose)
27733 fprintf (dump, "// rs6000_sched_reorder :\n");
27734
27735 /* Reorder the ready list, if the second to last ready insn
27736 is a nonepipeline insn. */
27737 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
27738 {
27739 if (is_nonpipeline_insn (ready[n_ready - 1])
27740 && (recog_memoized (ready[n_ready - 2]) > 0))
27741 /* Simply swap first two insns. */
27742 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
27743 }
27744
27745 if (rs6000_cpu == PROCESSOR_POWER6)
27746 load_store_pendulum = 0;
27747
27748 return rs6000_issue_rate ();
27749 }
27750
27751 /* Like rs6000_sched_reorder, but called after issuing each insn. */
27752
27753 static int
27754 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
27755 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
27756 {
27757 if (sched_verbose)
27758 fprintf (dump, "// rs6000_sched_reorder2 :\n");
27759
27760 /* For Power6, we need to handle some special cases to try and keep the
27761 store queue from overflowing and triggering expensive flushes.
27762
27763 This code monitors how load and store instructions are being issued
27764 and skews the ready list one way or the other to increase the likelihood
27765 that a desired instruction is issued at the proper time.
27766
27767 A couple of things are done. First, we maintain a "load_store_pendulum"
27768 to track the current state of load/store issue.
27769
27770 - If the pendulum is at zero, then no loads or stores have been
27771 issued in the current cycle so we do nothing.
27772
27773 - If the pendulum is 1, then a single load has been issued in this
27774 cycle and we attempt to locate another load in the ready list to
27775 issue with it.
27776
27777 - If the pendulum is -2, then two stores have already been
27778 issued in this cycle, so we increase the priority of the first load
27779 in the ready list to increase it's likelihood of being chosen first
27780 in the next cycle.
27781
27782 - If the pendulum is -1, then a single store has been issued in this
27783 cycle and we attempt to locate another store in the ready list to
27784 issue with it, preferring a store to an adjacent memory location to
27785 facilitate store pairing in the store queue.
27786
27787 - If the pendulum is 2, then two loads have already been
27788 issued in this cycle, so we increase the priority of the first store
27789 in the ready list to increase it's likelihood of being chosen first
27790 in the next cycle.
27791
27792 - If the pendulum < -2 or > 2, then do nothing.
27793
27794 Note: This code covers the most common scenarios. There exist non
27795 load/store instructions which make use of the LSU and which
27796 would need to be accounted for to strictly model the behavior
27797 of the machine. Those instructions are currently unaccounted
27798 for to help minimize compile time overhead of this code.
27799 */
27800 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
27801 {
27802 int pos;
27803 int i;
27804 rtx_insn *tmp;
27805 rtx load_mem, str_mem;
27806
27807 if (is_store_insn (last_scheduled_insn, &str_mem))
27808 /* Issuing a store, swing the load_store_pendulum to the left */
27809 load_store_pendulum--;
27810 else if (is_load_insn (last_scheduled_insn, &load_mem))
27811 /* Issuing a load, swing the load_store_pendulum to the right */
27812 load_store_pendulum++;
27813 else
27814 return cached_can_issue_more;
27815
27816 /* If the pendulum is balanced, or there is only one instruction on
27817 the ready list, then all is well, so return. */
27818 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
27819 return cached_can_issue_more;
27820
27821 if (load_store_pendulum == 1)
27822 {
27823 /* A load has been issued in this cycle. Scan the ready list
27824 for another load to issue with it */
27825 pos = *pn_ready-1;
27826
27827 while (pos >= 0)
27828 {
27829 if (is_load_insn (ready[pos], &load_mem))
27830 {
27831 /* Found a load. Move it to the head of the ready list,
27832 and adjust it's priority so that it is more likely to
27833 stay there */
27834 tmp = ready[pos];
27835 for (i=pos; i<*pn_ready-1; i++)
27836 ready[i] = ready[i + 1];
27837 ready[*pn_ready-1] = tmp;
27838
27839 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27840 INSN_PRIORITY (tmp)++;
27841 break;
27842 }
27843 pos--;
27844 }
27845 }
27846 else if (load_store_pendulum == -2)
27847 {
27848 /* Two stores have been issued in this cycle. Increase the
27849 priority of the first load in the ready list to favor it for
27850 issuing in the next cycle. */
27851 pos = *pn_ready-1;
27852
27853 while (pos >= 0)
27854 {
27855 if (is_load_insn (ready[pos], &load_mem)
27856 && !sel_sched_p ()
27857 && INSN_PRIORITY_KNOWN (ready[pos]))
27858 {
27859 INSN_PRIORITY (ready[pos])++;
27860
27861 /* Adjust the pendulum to account for the fact that a load
27862 was found and increased in priority. This is to prevent
27863 increasing the priority of multiple loads */
27864 load_store_pendulum--;
27865
27866 break;
27867 }
27868 pos--;
27869 }
27870 }
27871 else if (load_store_pendulum == -1)
27872 {
27873 /* A store has been issued in this cycle. Scan the ready list for
27874 another store to issue with it, preferring a store to an adjacent
27875 memory location */
27876 int first_store_pos = -1;
27877
27878 pos = *pn_ready-1;
27879
27880 while (pos >= 0)
27881 {
27882 if (is_store_insn (ready[pos], &str_mem))
27883 {
27884 rtx str_mem2;
27885 /* Maintain the index of the first store found on the
27886 list */
27887 if (first_store_pos == -1)
27888 first_store_pos = pos;
27889
27890 if (is_store_insn (last_scheduled_insn, &str_mem2)
27891 && adjacent_mem_locations (str_mem, str_mem2))
27892 {
27893 /* Found an adjacent store. Move it to the head of the
27894 ready list, and adjust it's priority so that it is
27895 more likely to stay there */
27896 tmp = ready[pos];
27897 for (i=pos; i<*pn_ready-1; i++)
27898 ready[i] = ready[i + 1];
27899 ready[*pn_ready-1] = tmp;
27900
27901 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27902 INSN_PRIORITY (tmp)++;
27903
27904 first_store_pos = -1;
27905
27906 break;
27907 };
27908 }
27909 pos--;
27910 }
27911
27912 if (first_store_pos >= 0)
27913 {
27914 /* An adjacent store wasn't found, but a non-adjacent store was,
27915 so move the non-adjacent store to the front of the ready
27916 list, and adjust its priority so that it is more likely to
27917 stay there. */
27918 tmp = ready[first_store_pos];
27919 for (i=first_store_pos; i<*pn_ready-1; i++)
27920 ready[i] = ready[i + 1];
27921 ready[*pn_ready-1] = tmp;
27922 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27923 INSN_PRIORITY (tmp)++;
27924 }
27925 }
27926 else if (load_store_pendulum == 2)
27927 {
27928 /* Two loads have been issued in this cycle. Increase the priority
27929 of the first store in the ready list to favor it for issuing in
27930 the next cycle. */
27931 pos = *pn_ready-1;
27932
27933 while (pos >= 0)
27934 {
27935 if (is_store_insn (ready[pos], &str_mem)
27936 && !sel_sched_p ()
27937 && INSN_PRIORITY_KNOWN (ready[pos]))
27938 {
27939 INSN_PRIORITY (ready[pos])++;
27940
27941 /* Adjust the pendulum to account for the fact that a store
27942 was found and increased in priority. This is to prevent
27943 increasing the priority of multiple stores */
27944 load_store_pendulum++;
27945
27946 break;
27947 }
27948 pos--;
27949 }
27950 }
27951 }
27952
27953 return cached_can_issue_more;
27954 }
27955
27956 /* Return whether the presence of INSN causes a dispatch group termination
27957 of group WHICH_GROUP.
27958
27959 If WHICH_GROUP == current_group, this function will return true if INSN
27960 causes the termination of the current group (i.e, the dispatch group to
27961 which INSN belongs). This means that INSN will be the last insn in the
27962 group it belongs to.
27963
27964 If WHICH_GROUP == previous_group, this function will return true if INSN
27965 causes the termination of the previous group (i.e, the dispatch group that
27966 precedes the group to which INSN belongs). This means that INSN will be
27967 the first insn in the group it belongs to). */
27968
27969 static bool
27970 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
27971 {
27972 bool first, last;
27973
27974 if (! insn)
27975 return false;
27976
27977 first = insn_must_be_first_in_group (insn);
27978 last = insn_must_be_last_in_group (insn);
27979
27980 if (first && last)
27981 return true;
27982
27983 if (which_group == current_group)
27984 return last;
27985 else if (which_group == previous_group)
27986 return first;
27987
27988 return false;
27989 }
27990
27991
27992 static bool
27993 insn_must_be_first_in_group (rtx_insn *insn)
27994 {
27995 enum attr_type type;
27996
27997 if (!insn
27998 || NOTE_P (insn)
27999 || DEBUG_INSN_P (insn)
28000 || GET_CODE (PATTERN (insn)) == USE
28001 || GET_CODE (PATTERN (insn)) == CLOBBER)
28002 return false;
28003
28004 switch (rs6000_cpu)
28005 {
28006 case PROCESSOR_POWER5:
28007 if (is_cracked_insn (insn))
28008 return true;
28009 case PROCESSOR_POWER4:
28010 if (is_microcoded_insn (insn))
28011 return true;
28012
28013 if (!rs6000_sched_groups)
28014 return false;
28015
28016 type = get_attr_type (insn);
28017
28018 switch (type)
28019 {
28020 case TYPE_MFCR:
28021 case TYPE_MFCRF:
28022 case TYPE_MTCR:
28023 case TYPE_DELAYED_CR:
28024 case TYPE_CR_LOGICAL:
28025 case TYPE_MTJMPR:
28026 case TYPE_MFJMPR:
28027 case TYPE_DIV:
28028 case TYPE_LOAD_L:
28029 case TYPE_STORE_C:
28030 case TYPE_ISYNC:
28031 case TYPE_SYNC:
28032 return true;
28033 default:
28034 break;
28035 }
28036 break;
28037 case PROCESSOR_POWER6:
28038 type = get_attr_type (insn);
28039
28040 switch (type)
28041 {
28042 case TYPE_EXTS:
28043 case TYPE_CNTLZ:
28044 case TYPE_TRAP:
28045 case TYPE_MUL:
28046 case TYPE_INSERT:
28047 case TYPE_FPCOMPARE:
28048 case TYPE_MFCR:
28049 case TYPE_MTCR:
28050 case TYPE_MFJMPR:
28051 case TYPE_MTJMPR:
28052 case TYPE_ISYNC:
28053 case TYPE_SYNC:
28054 case TYPE_LOAD_L:
28055 case TYPE_STORE_C:
28056 return true;
28057 case TYPE_SHIFT:
28058 if (get_attr_dot (insn) == DOT_NO
28059 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
28060 return true;
28061 else
28062 break;
28063 case TYPE_DIV:
28064 if (get_attr_size (insn) == SIZE_32)
28065 return true;
28066 else
28067 break;
28068 case TYPE_LOAD:
28069 case TYPE_STORE:
28070 case TYPE_FPLOAD:
28071 case TYPE_FPSTORE:
28072 if (get_attr_update (insn) == UPDATE_YES)
28073 return true;
28074 else
28075 break;
28076 default:
28077 break;
28078 }
28079 break;
28080 case PROCESSOR_POWER7:
28081 type = get_attr_type (insn);
28082
28083 switch (type)
28084 {
28085 case TYPE_CR_LOGICAL:
28086 case TYPE_MFCR:
28087 case TYPE_MFCRF:
28088 case TYPE_MTCR:
28089 case TYPE_DIV:
28090 case TYPE_ISYNC:
28091 case TYPE_LOAD_L:
28092 case TYPE_STORE_C:
28093 case TYPE_MFJMPR:
28094 case TYPE_MTJMPR:
28095 return true;
28096 case TYPE_MUL:
28097 case TYPE_SHIFT:
28098 case TYPE_EXTS:
28099 if (get_attr_dot (insn) == DOT_YES)
28100 return true;
28101 else
28102 break;
28103 case TYPE_LOAD:
28104 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
28105 || get_attr_update (insn) == UPDATE_YES)
28106 return true;
28107 else
28108 break;
28109 case TYPE_STORE:
28110 case TYPE_FPLOAD:
28111 case TYPE_FPSTORE:
28112 if (get_attr_update (insn) == UPDATE_YES)
28113 return true;
28114 else
28115 break;
28116 default:
28117 break;
28118 }
28119 break;
28120 case PROCESSOR_POWER8:
28121 type = get_attr_type (insn);
28122
28123 switch (type)
28124 {
28125 case TYPE_CR_LOGICAL:
28126 case TYPE_DELAYED_CR:
28127 case TYPE_MFCR:
28128 case TYPE_MFCRF:
28129 case TYPE_MTCR:
28130 case TYPE_SYNC:
28131 case TYPE_ISYNC:
28132 case TYPE_LOAD_L:
28133 case TYPE_STORE_C:
28134 case TYPE_VECSTORE:
28135 case TYPE_MFJMPR:
28136 case TYPE_MTJMPR:
28137 return true;
28138 case TYPE_SHIFT:
28139 case TYPE_EXTS:
28140 case TYPE_MUL:
28141 if (get_attr_dot (insn) == DOT_YES)
28142 return true;
28143 else
28144 break;
28145 case TYPE_LOAD:
28146 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
28147 || get_attr_update (insn) == UPDATE_YES)
28148 return true;
28149 else
28150 break;
28151 case TYPE_STORE:
28152 if (get_attr_update (insn) == UPDATE_YES
28153 && get_attr_indexed (insn) == INDEXED_YES)
28154 return true;
28155 else
28156 break;
28157 default:
28158 break;
28159 }
28160 break;
28161 default:
28162 break;
28163 }
28164
28165 return false;
28166 }
28167
28168 static bool
28169 insn_must_be_last_in_group (rtx_insn *insn)
28170 {
28171 enum attr_type type;
28172
28173 if (!insn
28174 || NOTE_P (insn)
28175 || DEBUG_INSN_P (insn)
28176 || GET_CODE (PATTERN (insn)) == USE
28177 || GET_CODE (PATTERN (insn)) == CLOBBER)
28178 return false;
28179
28180 switch (rs6000_cpu) {
28181 case PROCESSOR_POWER4:
28182 case PROCESSOR_POWER5:
28183 if (is_microcoded_insn (insn))
28184 return true;
28185
28186 if (is_branch_slot_insn (insn))
28187 return true;
28188
28189 break;
28190 case PROCESSOR_POWER6:
28191 type = get_attr_type (insn);
28192
28193 switch (type)
28194 {
28195 case TYPE_EXTS:
28196 case TYPE_CNTLZ:
28197 case TYPE_TRAP:
28198 case TYPE_MUL:
28199 case TYPE_FPCOMPARE:
28200 case TYPE_MFCR:
28201 case TYPE_MTCR:
28202 case TYPE_MFJMPR:
28203 case TYPE_MTJMPR:
28204 case TYPE_ISYNC:
28205 case TYPE_SYNC:
28206 case TYPE_LOAD_L:
28207 case TYPE_STORE_C:
28208 return true;
28209 case TYPE_SHIFT:
28210 if (get_attr_dot (insn) == DOT_NO
28211 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
28212 return true;
28213 else
28214 break;
28215 case TYPE_DIV:
28216 if (get_attr_size (insn) == SIZE_32)
28217 return true;
28218 else
28219 break;
28220 default:
28221 break;
28222 }
28223 break;
28224 case PROCESSOR_POWER7:
28225 type = get_attr_type (insn);
28226
28227 switch (type)
28228 {
28229 case TYPE_ISYNC:
28230 case TYPE_SYNC:
28231 case TYPE_LOAD_L:
28232 case TYPE_STORE_C:
28233 return true;
28234 case TYPE_LOAD:
28235 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
28236 && get_attr_update (insn) == UPDATE_YES)
28237 return true;
28238 else
28239 break;
28240 case TYPE_STORE:
28241 if (get_attr_update (insn) == UPDATE_YES
28242 && get_attr_indexed (insn) == INDEXED_YES)
28243 return true;
28244 else
28245 break;
28246 default:
28247 break;
28248 }
28249 break;
28250 case PROCESSOR_POWER8:
28251 type = get_attr_type (insn);
28252
28253 switch (type)
28254 {
28255 case TYPE_MFCR:
28256 case TYPE_MTCR:
28257 case TYPE_ISYNC:
28258 case TYPE_SYNC:
28259 case TYPE_LOAD_L:
28260 case TYPE_STORE_C:
28261 return true;
28262 case TYPE_LOAD:
28263 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
28264 && get_attr_update (insn) == UPDATE_YES)
28265 return true;
28266 else
28267 break;
28268 case TYPE_STORE:
28269 if (get_attr_update (insn) == UPDATE_YES
28270 && get_attr_indexed (insn) == INDEXED_YES)
28271 return true;
28272 else
28273 break;
28274 default:
28275 break;
28276 }
28277 break;
28278 default:
28279 break;
28280 }
28281
28282 return false;
28283 }
28284
28285 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
28286 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
28287
28288 static bool
28289 is_costly_group (rtx *group_insns, rtx next_insn)
28290 {
28291 int i;
28292 int issue_rate = rs6000_issue_rate ();
28293
28294 for (i = 0; i < issue_rate; i++)
28295 {
28296 sd_iterator_def sd_it;
28297 dep_t dep;
28298 rtx insn = group_insns[i];
28299
28300 if (!insn)
28301 continue;
28302
28303 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
28304 {
28305 rtx next = DEP_CON (dep);
28306
28307 if (next == next_insn
28308 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
28309 return true;
28310 }
28311 }
28312
28313 return false;
28314 }
28315
28316 /* Utility of the function redefine_groups.
28317 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
28318 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
28319 to keep it "far" (in a separate group) from GROUP_INSNS, following
28320 one of the following schemes, depending on the value of the flag
28321 -minsert_sched_nops = X:
28322 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
28323 in order to force NEXT_INSN into a separate group.
28324 (2) X < sched_finish_regroup_exact: insert exactly X nops.
28325 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
28326 insertion (has a group just ended, how many vacant issue slots remain in the
28327 last group, and how many dispatch groups were encountered so far). */
28328
28329 static int
28330 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
28331 rtx_insn *next_insn, bool *group_end, int can_issue_more,
28332 int *group_count)
28333 {
28334 rtx nop;
28335 bool force;
28336 int issue_rate = rs6000_issue_rate ();
28337 bool end = *group_end;
28338 int i;
28339
28340 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
28341 return can_issue_more;
28342
28343 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
28344 return can_issue_more;
28345
28346 force = is_costly_group (group_insns, next_insn);
28347 if (!force)
28348 return can_issue_more;
28349
28350 if (sched_verbose > 6)
28351 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
28352 *group_count ,can_issue_more);
28353
28354 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
28355 {
28356 if (*group_end)
28357 can_issue_more = 0;
28358
28359 /* Since only a branch can be issued in the last issue_slot, it is
28360 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
28361 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
28362 in this case the last nop will start a new group and the branch
28363 will be forced to the new group. */
28364 if (can_issue_more && !is_branch_slot_insn (next_insn))
28365 can_issue_more--;
28366
28367 /* Do we have a special group ending nop? */
28368 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
28369 || rs6000_cpu_attr == CPU_POWER8)
28370 {
28371 nop = gen_group_ending_nop ();
28372 emit_insn_before (nop, next_insn);
28373 can_issue_more = 0;
28374 }
28375 else
28376 while (can_issue_more > 0)
28377 {
28378 nop = gen_nop ();
28379 emit_insn_before (nop, next_insn);
28380 can_issue_more--;
28381 }
28382
28383 *group_end = true;
28384 return 0;
28385 }
28386
28387 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
28388 {
28389 int n_nops = rs6000_sched_insert_nops;
28390
28391 /* Nops can't be issued from the branch slot, so the effective
28392 issue_rate for nops is 'issue_rate - 1'. */
28393 if (can_issue_more == 0)
28394 can_issue_more = issue_rate;
28395 can_issue_more--;
28396 if (can_issue_more == 0)
28397 {
28398 can_issue_more = issue_rate - 1;
28399 (*group_count)++;
28400 end = true;
28401 for (i = 0; i < issue_rate; i++)
28402 {
28403 group_insns[i] = 0;
28404 }
28405 }
28406
28407 while (n_nops > 0)
28408 {
28409 nop = gen_nop ();
28410 emit_insn_before (nop, next_insn);
28411 if (can_issue_more == issue_rate - 1) /* new group begins */
28412 end = false;
28413 can_issue_more--;
28414 if (can_issue_more == 0)
28415 {
28416 can_issue_more = issue_rate - 1;
28417 (*group_count)++;
28418 end = true;
28419 for (i = 0; i < issue_rate; i++)
28420 {
28421 group_insns[i] = 0;
28422 }
28423 }
28424 n_nops--;
28425 }
28426
28427 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
28428 can_issue_more++;
28429
28430 /* Is next_insn going to start a new group? */
28431 *group_end
28432 = (end
28433 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
28434 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
28435 || (can_issue_more < issue_rate &&
28436 insn_terminates_group_p (next_insn, previous_group)));
28437 if (*group_end && end)
28438 (*group_count)--;
28439
28440 if (sched_verbose > 6)
28441 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
28442 *group_count, can_issue_more);
28443 return can_issue_more;
28444 }
28445
28446 return can_issue_more;
28447 }
28448
28449 /* This function tries to synch the dispatch groups that the compiler "sees"
28450 with the dispatch groups that the processor dispatcher is expected to
28451 form in practice. It tries to achieve this synchronization by forcing the
28452 estimated processor grouping on the compiler (as opposed to the function
28453 'pad_goups' which tries to force the scheduler's grouping on the processor).
28454
28455 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
28456 examines the (estimated) dispatch groups that will be formed by the processor
28457 dispatcher. It marks these group boundaries to reflect the estimated
28458 processor grouping, overriding the grouping that the scheduler had marked.
28459 Depending on the value of the flag '-minsert-sched-nops' this function can
28460 force certain insns into separate groups or force a certain distance between
28461 them by inserting nops, for example, if there exists a "costly dependence"
28462 between the insns.
28463
28464 The function estimates the group boundaries that the processor will form as
28465 follows: It keeps track of how many vacant issue slots are available after
28466 each insn. A subsequent insn will start a new group if one of the following
28467 4 cases applies:
28468 - no more vacant issue slots remain in the current dispatch group.
28469 - only the last issue slot, which is the branch slot, is vacant, but the next
28470 insn is not a branch.
28471 - only the last 2 or less issue slots, including the branch slot, are vacant,
28472 which means that a cracked insn (which occupies two issue slots) can't be
28473 issued in this group.
28474 - less than 'issue_rate' slots are vacant, and the next insn always needs to
28475 start a new group. */
28476
28477 static int
28478 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
28479 rtx_insn *tail)
28480 {
28481 rtx_insn *insn, *next_insn;
28482 int issue_rate;
28483 int can_issue_more;
28484 int slot, i;
28485 bool group_end;
28486 int group_count = 0;
28487 rtx *group_insns;
28488
28489 /* Initialize. */
28490 issue_rate = rs6000_issue_rate ();
28491 group_insns = XALLOCAVEC (rtx, issue_rate);
28492 for (i = 0; i < issue_rate; i++)
28493 {
28494 group_insns[i] = 0;
28495 }
28496 can_issue_more = issue_rate;
28497 slot = 0;
28498 insn = get_next_active_insn (prev_head_insn, tail);
28499 group_end = false;
28500
28501 while (insn != NULL_RTX)
28502 {
28503 slot = (issue_rate - can_issue_more);
28504 group_insns[slot] = insn;
28505 can_issue_more =
28506 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
28507 if (insn_terminates_group_p (insn, current_group))
28508 can_issue_more = 0;
28509
28510 next_insn = get_next_active_insn (insn, tail);
28511 if (next_insn == NULL_RTX)
28512 return group_count + 1;
28513
28514 /* Is next_insn going to start a new group? */
28515 group_end
28516 = (can_issue_more == 0
28517 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
28518 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
28519 || (can_issue_more < issue_rate &&
28520 insn_terminates_group_p (next_insn, previous_group)));
28521
28522 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
28523 next_insn, &group_end, can_issue_more,
28524 &group_count);
28525
28526 if (group_end)
28527 {
28528 group_count++;
28529 can_issue_more = 0;
28530 for (i = 0; i < issue_rate; i++)
28531 {
28532 group_insns[i] = 0;
28533 }
28534 }
28535
28536 if (GET_MODE (next_insn) == TImode && can_issue_more)
28537 PUT_MODE (next_insn, VOIDmode);
28538 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
28539 PUT_MODE (next_insn, TImode);
28540
28541 insn = next_insn;
28542 if (can_issue_more == 0)
28543 can_issue_more = issue_rate;
28544 } /* while */
28545
28546 return group_count;
28547 }
28548
28549 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
28550 dispatch group boundaries that the scheduler had marked. Pad with nops
28551 any dispatch groups which have vacant issue slots, in order to force the
28552 scheduler's grouping on the processor dispatcher. The function
28553 returns the number of dispatch groups found. */
28554
28555 static int
28556 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
28557 rtx_insn *tail)
28558 {
28559 rtx_insn *insn, *next_insn;
28560 rtx nop;
28561 int issue_rate;
28562 int can_issue_more;
28563 int group_end;
28564 int group_count = 0;
28565
28566 /* Initialize issue_rate. */
28567 issue_rate = rs6000_issue_rate ();
28568 can_issue_more = issue_rate;
28569
28570 insn = get_next_active_insn (prev_head_insn, tail);
28571 next_insn = get_next_active_insn (insn, tail);
28572
28573 while (insn != NULL_RTX)
28574 {
28575 can_issue_more =
28576 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
28577
28578 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
28579
28580 if (next_insn == NULL_RTX)
28581 break;
28582
28583 if (group_end)
28584 {
28585 /* If the scheduler had marked group termination at this location
28586 (between insn and next_insn), and neither insn nor next_insn will
28587 force group termination, pad the group with nops to force group
28588 termination. */
28589 if (can_issue_more
28590 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
28591 && !insn_terminates_group_p (insn, current_group)
28592 && !insn_terminates_group_p (next_insn, previous_group))
28593 {
28594 if (!is_branch_slot_insn (next_insn))
28595 can_issue_more--;
28596
28597 while (can_issue_more)
28598 {
28599 nop = gen_nop ();
28600 emit_insn_before (nop, next_insn);
28601 can_issue_more--;
28602 }
28603 }
28604
28605 can_issue_more = issue_rate;
28606 group_count++;
28607 }
28608
28609 insn = next_insn;
28610 next_insn = get_next_active_insn (insn, tail);
28611 }
28612
28613 return group_count;
28614 }
28615
28616 /* We're beginning a new block. Initialize data structures as necessary. */
28617
28618 static void
28619 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
28620 int sched_verbose ATTRIBUTE_UNUSED,
28621 int max_ready ATTRIBUTE_UNUSED)
28622 {
28623 last_scheduled_insn = NULL_RTX;
28624 load_store_pendulum = 0;
28625 }
28626
28627 /* The following function is called at the end of scheduling BB.
28628 After reload, it inserts nops at insn group bundling. */
28629
28630 static void
28631 rs6000_sched_finish (FILE *dump, int sched_verbose)
28632 {
28633 int n_groups;
28634
28635 if (sched_verbose)
28636 fprintf (dump, "=== Finishing schedule.\n");
28637
28638 if (reload_completed && rs6000_sched_groups)
28639 {
28640 /* Do not run sched_finish hook when selective scheduling enabled. */
28641 if (sel_sched_p ())
28642 return;
28643
28644 if (rs6000_sched_insert_nops == sched_finish_none)
28645 return;
28646
28647 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
28648 n_groups = pad_groups (dump, sched_verbose,
28649 current_sched_info->prev_head,
28650 current_sched_info->next_tail);
28651 else
28652 n_groups = redefine_groups (dump, sched_verbose,
28653 current_sched_info->prev_head,
28654 current_sched_info->next_tail);
28655
28656 if (sched_verbose >= 6)
28657 {
28658 fprintf (dump, "ngroups = %d\n", n_groups);
28659 print_rtl (dump, current_sched_info->prev_head);
28660 fprintf (dump, "Done finish_sched\n");
28661 }
28662 }
28663 }
28664
28665 struct _rs6000_sched_context
28666 {
28667 short cached_can_issue_more;
28668 rtx last_scheduled_insn;
28669 int load_store_pendulum;
28670 };
28671
28672 typedef struct _rs6000_sched_context rs6000_sched_context_def;
28673 typedef rs6000_sched_context_def *rs6000_sched_context_t;
28674
28675 /* Allocate store for new scheduling context. */
28676 static void *
28677 rs6000_alloc_sched_context (void)
28678 {
28679 return xmalloc (sizeof (rs6000_sched_context_def));
28680 }
28681
28682 /* If CLEAN_P is true then initializes _SC with clean data,
28683 and from the global context otherwise. */
28684 static void
28685 rs6000_init_sched_context (void *_sc, bool clean_p)
28686 {
28687 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28688
28689 if (clean_p)
28690 {
28691 sc->cached_can_issue_more = 0;
28692 sc->last_scheduled_insn = NULL_RTX;
28693 sc->load_store_pendulum = 0;
28694 }
28695 else
28696 {
28697 sc->cached_can_issue_more = cached_can_issue_more;
28698 sc->last_scheduled_insn = last_scheduled_insn;
28699 sc->load_store_pendulum = load_store_pendulum;
28700 }
28701 }
28702
28703 /* Sets the global scheduling context to the one pointed to by _SC. */
28704 static void
28705 rs6000_set_sched_context (void *_sc)
28706 {
28707 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28708
28709 gcc_assert (sc != NULL);
28710
28711 cached_can_issue_more = sc->cached_can_issue_more;
28712 last_scheduled_insn = sc->last_scheduled_insn;
28713 load_store_pendulum = sc->load_store_pendulum;
28714 }
28715
28716 /* Free _SC. */
28717 static void
28718 rs6000_free_sched_context (void *_sc)
28719 {
28720 gcc_assert (_sc != NULL);
28721
28722 free (_sc);
28723 }
28724
28725 \f
28726 /* Length in units of the trampoline for entering a nested function. */
28727
28728 int
28729 rs6000_trampoline_size (void)
28730 {
28731 int ret = 0;
28732
28733 switch (DEFAULT_ABI)
28734 {
28735 default:
28736 gcc_unreachable ();
28737
28738 case ABI_AIX:
28739 ret = (TARGET_32BIT) ? 12 : 24;
28740 break;
28741
28742 case ABI_ELFv2:
28743 gcc_assert (!TARGET_32BIT);
28744 ret = 32;
28745 break;
28746
28747 case ABI_DARWIN:
28748 case ABI_V4:
28749 ret = (TARGET_32BIT) ? 40 : 48;
28750 break;
28751 }
28752
28753 return ret;
28754 }
28755
28756 /* Emit RTL insns to initialize the variable parts of a trampoline.
28757 FNADDR is an RTX for the address of the function's pure code.
28758 CXT is an RTX for the static chain value for the function. */
28759
28760 static void
28761 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
28762 {
28763 int regsize = (TARGET_32BIT) ? 4 : 8;
28764 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
28765 rtx ctx_reg = force_reg (Pmode, cxt);
28766 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
28767
28768 switch (DEFAULT_ABI)
28769 {
28770 default:
28771 gcc_unreachable ();
28772
28773 /* Under AIX, just build the 3 word function descriptor */
28774 case ABI_AIX:
28775 {
28776 rtx fnmem, fn_reg, toc_reg;
28777
28778 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28779 error ("You cannot take the address of a nested function if you use "
28780 "the -mno-pointers-to-nested-functions option.");
28781
28782 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
28783 fn_reg = gen_reg_rtx (Pmode);
28784 toc_reg = gen_reg_rtx (Pmode);
28785
28786 /* Macro to shorten the code expansions below. */
28787 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
28788
28789 m_tramp = replace_equiv_address (m_tramp, addr);
28790
28791 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
28792 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
28793 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
28794 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
28795 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
28796
28797 # undef MEM_PLUS
28798 }
28799 break;
28800
28801 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
28802 case ABI_ELFv2:
28803 case ABI_DARWIN:
28804 case ABI_V4:
28805 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
28806 LCT_NORMAL, VOIDmode, 4,
28807 addr, Pmode,
28808 GEN_INT (rs6000_trampoline_size ()), SImode,
28809 fnaddr, Pmode,
28810 ctx_reg, Pmode);
28811 break;
28812 }
28813 }
28814
28815 \f
28816 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
28817 identifier as an argument, so the front end shouldn't look it up. */
28818
28819 static bool
28820 rs6000_attribute_takes_identifier_p (const_tree attr_id)
28821 {
28822 return is_attribute_p ("altivec", attr_id);
28823 }
28824
28825 /* Handle the "altivec" attribute. The attribute may have
28826 arguments as follows:
28827
28828 __attribute__((altivec(vector__)))
28829 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
28830 __attribute__((altivec(bool__))) (always followed by 'unsigned')
28831
28832 and may appear more than once (e.g., 'vector bool char') in a
28833 given declaration. */
28834
28835 static tree
28836 rs6000_handle_altivec_attribute (tree *node,
28837 tree name ATTRIBUTE_UNUSED,
28838 tree args,
28839 int flags ATTRIBUTE_UNUSED,
28840 bool *no_add_attrs)
28841 {
28842 tree type = *node, result = NULL_TREE;
28843 machine_mode mode;
28844 int unsigned_p;
28845 char altivec_type
28846 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
28847 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
28848 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
28849 : '?');
28850
28851 while (POINTER_TYPE_P (type)
28852 || TREE_CODE (type) == FUNCTION_TYPE
28853 || TREE_CODE (type) == METHOD_TYPE
28854 || TREE_CODE (type) == ARRAY_TYPE)
28855 type = TREE_TYPE (type);
28856
28857 mode = TYPE_MODE (type);
28858
28859 /* Check for invalid AltiVec type qualifiers. */
28860 if (type == long_double_type_node)
28861 error ("use of %<long double%> in AltiVec types is invalid");
28862 else if (type == boolean_type_node)
28863 error ("use of boolean types in AltiVec types is invalid");
28864 else if (TREE_CODE (type) == COMPLEX_TYPE)
28865 error ("use of %<complex%> in AltiVec types is invalid");
28866 else if (DECIMAL_FLOAT_MODE_P (mode))
28867 error ("use of decimal floating point types in AltiVec types is invalid");
28868 else if (!TARGET_VSX)
28869 {
28870 if (type == long_unsigned_type_node || type == long_integer_type_node)
28871 {
28872 if (TARGET_64BIT)
28873 error ("use of %<long%> in AltiVec types is invalid for "
28874 "64-bit code without -mvsx");
28875 else if (rs6000_warn_altivec_long)
28876 warning (0, "use of %<long%> in AltiVec types is deprecated; "
28877 "use %<int%>");
28878 }
28879 else if (type == long_long_unsigned_type_node
28880 || type == long_long_integer_type_node)
28881 error ("use of %<long long%> in AltiVec types is invalid without "
28882 "-mvsx");
28883 else if (type == double_type_node)
28884 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
28885 }
28886
28887 switch (altivec_type)
28888 {
28889 case 'v':
28890 unsigned_p = TYPE_UNSIGNED (type);
28891 switch (mode)
28892 {
28893 case TImode:
28894 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
28895 break;
28896 case DImode:
28897 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
28898 break;
28899 case SImode:
28900 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
28901 break;
28902 case HImode:
28903 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
28904 break;
28905 case QImode:
28906 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
28907 break;
28908 case SFmode: result = V4SF_type_node; break;
28909 case DFmode: result = V2DF_type_node; break;
28910 /* If the user says 'vector int bool', we may be handed the 'bool'
28911 attribute _before_ the 'vector' attribute, and so select the
28912 proper type in the 'b' case below. */
28913 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
28914 case V2DImode: case V2DFmode:
28915 result = type;
28916 default: break;
28917 }
28918 break;
28919 case 'b':
28920 switch (mode)
28921 {
28922 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
28923 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
28924 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
28925 case QImode: case V16QImode: result = bool_V16QI_type_node;
28926 default: break;
28927 }
28928 break;
28929 case 'p':
28930 switch (mode)
28931 {
28932 case V8HImode: result = pixel_V8HI_type_node;
28933 default: break;
28934 }
28935 default: break;
28936 }
28937
28938 /* Propagate qualifiers attached to the element type
28939 onto the vector type. */
28940 if (result && result != type && TYPE_QUALS (type))
28941 result = build_qualified_type (result, TYPE_QUALS (type));
28942
28943 *no_add_attrs = true; /* No need to hang on to the attribute. */
28944
28945 if (result)
28946 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
28947
28948 return NULL_TREE;
28949 }
28950
28951 /* AltiVec defines four built-in scalar types that serve as vector
28952 elements; we must teach the compiler how to mangle them. */
28953
28954 static const char *
28955 rs6000_mangle_type (const_tree type)
28956 {
28957 type = TYPE_MAIN_VARIANT (type);
28958
28959 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28960 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28961 return NULL;
28962
28963 if (type == bool_char_type_node) return "U6__boolc";
28964 if (type == bool_short_type_node) return "U6__bools";
28965 if (type == pixel_type_node) return "u7__pixel";
28966 if (type == bool_int_type_node) return "U6__booli";
28967 if (type == bool_long_type_node) return "U6__booll";
28968
28969 /* Mangle IBM extended float long double as `g' (__float128) on
28970 powerpc*-linux where long-double-64 previously was the default. */
28971 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
28972 && TARGET_ELF
28973 && TARGET_LONG_DOUBLE_128
28974 && !TARGET_IEEEQUAD)
28975 return "g";
28976
28977 /* For all other types, use normal C++ mangling. */
28978 return NULL;
28979 }
28980
28981 /* Handle a "longcall" or "shortcall" attribute; arguments as in
28982 struct attribute_spec.handler. */
28983
28984 static tree
28985 rs6000_handle_longcall_attribute (tree *node, tree name,
28986 tree args ATTRIBUTE_UNUSED,
28987 int flags ATTRIBUTE_UNUSED,
28988 bool *no_add_attrs)
28989 {
28990 if (TREE_CODE (*node) != FUNCTION_TYPE
28991 && TREE_CODE (*node) != FIELD_DECL
28992 && TREE_CODE (*node) != TYPE_DECL)
28993 {
28994 warning (OPT_Wattributes, "%qE attribute only applies to functions",
28995 name);
28996 *no_add_attrs = true;
28997 }
28998
28999 return NULL_TREE;
29000 }
29001
29002 /* Set longcall attributes on all functions declared when
29003 rs6000_default_long_calls is true. */
29004 static void
29005 rs6000_set_default_type_attributes (tree type)
29006 {
29007 if (rs6000_default_long_calls
29008 && (TREE_CODE (type) == FUNCTION_TYPE
29009 || TREE_CODE (type) == METHOD_TYPE))
29010 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
29011 NULL_TREE,
29012 TYPE_ATTRIBUTES (type));
29013
29014 #if TARGET_MACHO
29015 darwin_set_default_type_attributes (type);
29016 #endif
29017 }
29018
29019 /* Return a reference suitable for calling a function with the
29020 longcall attribute. */
29021
29022 rtx
29023 rs6000_longcall_ref (rtx call_ref)
29024 {
29025 const char *call_name;
29026 tree node;
29027
29028 if (GET_CODE (call_ref) != SYMBOL_REF)
29029 return call_ref;
29030
29031 /* System V adds '.' to the internal name, so skip them. */
29032 call_name = XSTR (call_ref, 0);
29033 if (*call_name == '.')
29034 {
29035 while (*call_name == '.')
29036 call_name++;
29037
29038 node = get_identifier (call_name);
29039 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
29040 }
29041
29042 return force_reg (Pmode, call_ref);
29043 }
29044 \f
29045 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
29046 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
29047 #endif
29048
29049 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
29050 struct attribute_spec.handler. */
29051 static tree
29052 rs6000_handle_struct_attribute (tree *node, tree name,
29053 tree args ATTRIBUTE_UNUSED,
29054 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
29055 {
29056 tree *type = NULL;
29057 if (DECL_P (*node))
29058 {
29059 if (TREE_CODE (*node) == TYPE_DECL)
29060 type = &TREE_TYPE (*node);
29061 }
29062 else
29063 type = node;
29064
29065 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
29066 || TREE_CODE (*type) == UNION_TYPE)))
29067 {
29068 warning (OPT_Wattributes, "%qE attribute ignored", name);
29069 *no_add_attrs = true;
29070 }
29071
29072 else if ((is_attribute_p ("ms_struct", name)
29073 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
29074 || ((is_attribute_p ("gcc_struct", name)
29075 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
29076 {
29077 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
29078 name);
29079 *no_add_attrs = true;
29080 }
29081
29082 return NULL_TREE;
29083 }
29084
29085 static bool
29086 rs6000_ms_bitfield_layout_p (const_tree record_type)
29087 {
29088 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
29089 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
29090 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
29091 }
29092 \f
29093 #ifdef USING_ELFOS_H
29094
29095 /* A get_unnamed_section callback, used for switching to toc_section. */
29096
29097 static void
29098 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
29099 {
29100 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29101 && TARGET_MINIMAL_TOC
29102 && !TARGET_RELOCATABLE)
29103 {
29104 if (!toc_initialized)
29105 {
29106 toc_initialized = 1;
29107 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
29108 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
29109 fprintf (asm_out_file, "\t.tc ");
29110 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
29111 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
29112 fprintf (asm_out_file, "\n");
29113
29114 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29115 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
29116 fprintf (asm_out_file, " = .+32768\n");
29117 }
29118 else
29119 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29120 }
29121 else if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29122 && !TARGET_RELOCATABLE)
29123 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
29124 else
29125 {
29126 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29127 if (!toc_initialized)
29128 {
29129 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
29130 fprintf (asm_out_file, " = .+32768\n");
29131 toc_initialized = 1;
29132 }
29133 }
29134 }
29135
29136 /* Implement TARGET_ASM_INIT_SECTIONS. */
29137
29138 static void
29139 rs6000_elf_asm_init_sections (void)
29140 {
29141 toc_section
29142 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
29143
29144 sdata2_section
29145 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
29146 SDATA2_SECTION_ASM_OP);
29147 }
29148
29149 /* Implement TARGET_SELECT_RTX_SECTION. */
29150
29151 static section *
29152 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
29153 unsigned HOST_WIDE_INT align)
29154 {
29155 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
29156 return toc_section;
29157 else
29158 return default_elf_select_rtx_section (mode, x, align);
29159 }
29160 \f
29161 /* For a SYMBOL_REF, set generic flags and then perform some
29162 target-specific processing.
29163
29164 When the AIX ABI is requested on a non-AIX system, replace the
29165 function name with the real name (with a leading .) rather than the
29166 function descriptor name. This saves a lot of overriding code to
29167 read the prefixes. */
29168
29169 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
29170 static void
29171 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
29172 {
29173 default_encode_section_info (decl, rtl, first);
29174
29175 if (first
29176 && TREE_CODE (decl) == FUNCTION_DECL
29177 && !TARGET_AIX
29178 && DEFAULT_ABI == ABI_AIX)
29179 {
29180 rtx sym_ref = XEXP (rtl, 0);
29181 size_t len = strlen (XSTR (sym_ref, 0));
29182 char *str = XALLOCAVEC (char, len + 2);
29183 str[0] = '.';
29184 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
29185 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
29186 }
29187 }
29188
29189 static inline bool
29190 compare_section_name (const char *section, const char *templ)
29191 {
29192 int len;
29193
29194 len = strlen (templ);
29195 return (strncmp (section, templ, len) == 0
29196 && (section[len] == 0 || section[len] == '.'));
29197 }
29198
29199 bool
29200 rs6000_elf_in_small_data_p (const_tree decl)
29201 {
29202 if (rs6000_sdata == SDATA_NONE)
29203 return false;
29204
29205 /* We want to merge strings, so we never consider them small data. */
29206 if (TREE_CODE (decl) == STRING_CST)
29207 return false;
29208
29209 /* Functions are never in the small data area. */
29210 if (TREE_CODE (decl) == FUNCTION_DECL)
29211 return false;
29212
29213 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
29214 {
29215 const char *section = DECL_SECTION_NAME (decl);
29216 if (compare_section_name (section, ".sdata")
29217 || compare_section_name (section, ".sdata2")
29218 || compare_section_name (section, ".gnu.linkonce.s")
29219 || compare_section_name (section, ".sbss")
29220 || compare_section_name (section, ".sbss2")
29221 || compare_section_name (section, ".gnu.linkonce.sb")
29222 || strcmp (section, ".PPC.EMB.sdata0") == 0
29223 || strcmp (section, ".PPC.EMB.sbss0") == 0)
29224 return true;
29225 }
29226 else
29227 {
29228 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
29229
29230 if (size > 0
29231 && size <= g_switch_value
29232 /* If it's not public, and we're not going to reference it there,
29233 there's no need to put it in the small data section. */
29234 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
29235 return true;
29236 }
29237
29238 return false;
29239 }
29240
29241 #endif /* USING_ELFOS_H */
29242 \f
29243 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
29244
29245 static bool
29246 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
29247 {
29248 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
29249 }
29250
29251 /* Do not place thread-local symbols refs in the object blocks. */
29252
29253 static bool
29254 rs6000_use_blocks_for_decl_p (const_tree decl)
29255 {
29256 return !DECL_THREAD_LOCAL_P (decl);
29257 }
29258 \f
29259 /* Return a REG that occurs in ADDR with coefficient 1.
29260 ADDR can be effectively incremented by incrementing REG.
29261
29262 r0 is special and we must not select it as an address
29263 register by this routine since our caller will try to
29264 increment the returned register via an "la" instruction. */
29265
29266 rtx
29267 find_addr_reg (rtx addr)
29268 {
29269 while (GET_CODE (addr) == PLUS)
29270 {
29271 if (GET_CODE (XEXP (addr, 0)) == REG
29272 && REGNO (XEXP (addr, 0)) != 0)
29273 addr = XEXP (addr, 0);
29274 else if (GET_CODE (XEXP (addr, 1)) == REG
29275 && REGNO (XEXP (addr, 1)) != 0)
29276 addr = XEXP (addr, 1);
29277 else if (CONSTANT_P (XEXP (addr, 0)))
29278 addr = XEXP (addr, 1);
29279 else if (CONSTANT_P (XEXP (addr, 1)))
29280 addr = XEXP (addr, 0);
29281 else
29282 gcc_unreachable ();
29283 }
29284 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
29285 return addr;
29286 }
29287
29288 void
29289 rs6000_fatal_bad_address (rtx op)
29290 {
29291 fatal_insn ("bad address", op);
29292 }
29293
29294 #if TARGET_MACHO
29295
29296 typedef struct branch_island_d {
29297 tree function_name;
29298 tree label_name;
29299 int line_number;
29300 } branch_island;
29301
29302
29303 static vec<branch_island, va_gc> *branch_islands;
29304
29305 /* Remember to generate a branch island for far calls to the given
29306 function. */
29307
29308 static void
29309 add_compiler_branch_island (tree label_name, tree function_name,
29310 int line_number)
29311 {
29312 branch_island bi = {function_name, label_name, line_number};
29313 vec_safe_push (branch_islands, bi);
29314 }
29315
29316 /* Generate far-jump branch islands for everything recorded in
29317 branch_islands. Invoked immediately after the last instruction of
29318 the epilogue has been emitted; the branch islands must be appended
29319 to, and contiguous with, the function body. Mach-O stubs are
29320 generated in machopic_output_stub(). */
29321
29322 static void
29323 macho_branch_islands (void)
29324 {
29325 char tmp_buf[512];
29326
29327 while (!vec_safe_is_empty (branch_islands))
29328 {
29329 branch_island *bi = &branch_islands->last ();
29330 const char *label = IDENTIFIER_POINTER (bi->label_name);
29331 const char *name = IDENTIFIER_POINTER (bi->function_name);
29332 char name_buf[512];
29333 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
29334 if (name[0] == '*' || name[0] == '&')
29335 strcpy (name_buf, name+1);
29336 else
29337 {
29338 name_buf[0] = '_';
29339 strcpy (name_buf+1, name);
29340 }
29341 strcpy (tmp_buf, "\n");
29342 strcat (tmp_buf, label);
29343 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
29344 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
29345 dbxout_stabd (N_SLINE, bi->line_number);
29346 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
29347 if (flag_pic)
29348 {
29349 if (TARGET_LINK_STACK)
29350 {
29351 char name[32];
29352 get_ppc476_thunk_name (name);
29353 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
29354 strcat (tmp_buf, name);
29355 strcat (tmp_buf, "\n");
29356 strcat (tmp_buf, label);
29357 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
29358 }
29359 else
29360 {
29361 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
29362 strcat (tmp_buf, label);
29363 strcat (tmp_buf, "_pic\n");
29364 strcat (tmp_buf, label);
29365 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
29366 }
29367
29368 strcat (tmp_buf, "\taddis r11,r11,ha16(");
29369 strcat (tmp_buf, name_buf);
29370 strcat (tmp_buf, " - ");
29371 strcat (tmp_buf, label);
29372 strcat (tmp_buf, "_pic)\n");
29373
29374 strcat (tmp_buf, "\tmtlr r0\n");
29375
29376 strcat (tmp_buf, "\taddi r12,r11,lo16(");
29377 strcat (tmp_buf, name_buf);
29378 strcat (tmp_buf, " - ");
29379 strcat (tmp_buf, label);
29380 strcat (tmp_buf, "_pic)\n");
29381
29382 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
29383 }
29384 else
29385 {
29386 strcat (tmp_buf, ":\nlis r12,hi16(");
29387 strcat (tmp_buf, name_buf);
29388 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
29389 strcat (tmp_buf, name_buf);
29390 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
29391 }
29392 output_asm_insn (tmp_buf, 0);
29393 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
29394 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
29395 dbxout_stabd (N_SLINE, bi->line_number);
29396 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
29397 branch_islands->pop ();
29398 }
29399 }
29400
29401 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
29402 already there or not. */
29403
29404 static int
29405 no_previous_def (tree function_name)
29406 {
29407 branch_island *bi;
29408 unsigned ix;
29409
29410 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
29411 if (function_name == bi->function_name)
29412 return 0;
29413 return 1;
29414 }
29415
29416 /* GET_PREV_LABEL gets the label name from the previous definition of
29417 the function. */
29418
29419 static tree
29420 get_prev_label (tree function_name)
29421 {
29422 branch_island *bi;
29423 unsigned ix;
29424
29425 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
29426 if (function_name == bi->function_name)
29427 return bi->label_name;
29428 return NULL_TREE;
29429 }
29430
29431 /* INSN is either a function call or a millicode call. It may have an
29432 unconditional jump in its delay slot.
29433
29434 CALL_DEST is the routine we are calling. */
29435
29436 char *
29437 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
29438 int cookie_operand_number)
29439 {
29440 static char buf[256];
29441 if (darwin_emit_branch_islands
29442 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
29443 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
29444 {
29445 tree labelname;
29446 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
29447
29448 if (no_previous_def (funname))
29449 {
29450 rtx label_rtx = gen_label_rtx ();
29451 char *label_buf, temp_buf[256];
29452 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
29453 CODE_LABEL_NUMBER (label_rtx));
29454 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
29455 labelname = get_identifier (label_buf);
29456 add_compiler_branch_island (labelname, funname, insn_line (insn));
29457 }
29458 else
29459 labelname = get_prev_label (funname);
29460
29461 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
29462 instruction will reach 'foo', otherwise link as 'bl L42'".
29463 "L42" should be a 'branch island', that will do a far jump to
29464 'foo'. Branch islands are generated in
29465 macho_branch_islands(). */
29466 sprintf (buf, "jbsr %%z%d,%.246s",
29467 dest_operand_number, IDENTIFIER_POINTER (labelname));
29468 }
29469 else
29470 sprintf (buf, "bl %%z%d", dest_operand_number);
29471 return buf;
29472 }
29473
29474 /* Generate PIC and indirect symbol stubs. */
29475
29476 void
29477 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29478 {
29479 unsigned int length;
29480 char *symbol_name, *lazy_ptr_name;
29481 char *local_label_0;
29482 static int label = 0;
29483
29484 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29485 symb = (*targetm.strip_name_encoding) (symb);
29486
29487
29488 length = strlen (symb);
29489 symbol_name = XALLOCAVEC (char, length + 32);
29490 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29491
29492 lazy_ptr_name = XALLOCAVEC (char, length + 32);
29493 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
29494
29495 if (flag_pic == 2)
29496 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
29497 else
29498 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
29499
29500 if (flag_pic == 2)
29501 {
29502 fprintf (file, "\t.align 5\n");
29503
29504 fprintf (file, "%s:\n", stub);
29505 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29506
29507 label++;
29508 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
29509 sprintf (local_label_0, "\"L%011d$spb\"", label);
29510
29511 fprintf (file, "\tmflr r0\n");
29512 if (TARGET_LINK_STACK)
29513 {
29514 char name[32];
29515 get_ppc476_thunk_name (name);
29516 fprintf (file, "\tbl %s\n", name);
29517 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
29518 }
29519 else
29520 {
29521 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
29522 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
29523 }
29524 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
29525 lazy_ptr_name, local_label_0);
29526 fprintf (file, "\tmtlr r0\n");
29527 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
29528 (TARGET_64BIT ? "ldu" : "lwzu"),
29529 lazy_ptr_name, local_label_0);
29530 fprintf (file, "\tmtctr r12\n");
29531 fprintf (file, "\tbctr\n");
29532 }
29533 else
29534 {
29535 fprintf (file, "\t.align 4\n");
29536
29537 fprintf (file, "%s:\n", stub);
29538 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29539
29540 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
29541 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
29542 (TARGET_64BIT ? "ldu" : "lwzu"),
29543 lazy_ptr_name);
29544 fprintf (file, "\tmtctr r12\n");
29545 fprintf (file, "\tbctr\n");
29546 }
29547
29548 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
29549 fprintf (file, "%s:\n", lazy_ptr_name);
29550 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29551 fprintf (file, "%sdyld_stub_binding_helper\n",
29552 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
29553 }
29554
29555 /* Legitimize PIC addresses. If the address is already
29556 position-independent, we return ORIG. Newly generated
29557 position-independent addresses go into a reg. This is REG if non
29558 zero, otherwise we allocate register(s) as necessary. */
29559
29560 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
29561
29562 rtx
29563 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
29564 rtx reg)
29565 {
29566 rtx base, offset;
29567
29568 if (reg == NULL && ! reload_in_progress && ! reload_completed)
29569 reg = gen_reg_rtx (Pmode);
29570
29571 if (GET_CODE (orig) == CONST)
29572 {
29573 rtx reg_temp;
29574
29575 if (GET_CODE (XEXP (orig, 0)) == PLUS
29576 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
29577 return orig;
29578
29579 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
29580
29581 /* Use a different reg for the intermediate value, as
29582 it will be marked UNCHANGING. */
29583 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
29584 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
29585 Pmode, reg_temp);
29586 offset =
29587 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
29588 Pmode, reg);
29589
29590 if (GET_CODE (offset) == CONST_INT)
29591 {
29592 if (SMALL_INT (offset))
29593 return plus_constant (Pmode, base, INTVAL (offset));
29594 else if (! reload_in_progress && ! reload_completed)
29595 offset = force_reg (Pmode, offset);
29596 else
29597 {
29598 rtx mem = force_const_mem (Pmode, orig);
29599 return machopic_legitimize_pic_address (mem, Pmode, reg);
29600 }
29601 }
29602 return gen_rtx_PLUS (Pmode, base, offset);
29603 }
29604
29605 /* Fall back on generic machopic code. */
29606 return machopic_legitimize_pic_address (orig, mode, reg);
29607 }
29608
29609 /* Output a .machine directive for the Darwin assembler, and call
29610 the generic start_file routine. */
29611
29612 static void
29613 rs6000_darwin_file_start (void)
29614 {
29615 static const struct
29616 {
29617 const char *arg;
29618 const char *name;
29619 HOST_WIDE_INT if_set;
29620 } mapping[] = {
29621 { "ppc64", "ppc64", MASK_64BIT },
29622 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
29623 { "power4", "ppc970", 0 },
29624 { "G5", "ppc970", 0 },
29625 { "7450", "ppc7450", 0 },
29626 { "7400", "ppc7400", MASK_ALTIVEC },
29627 { "G4", "ppc7400", 0 },
29628 { "750", "ppc750", 0 },
29629 { "740", "ppc750", 0 },
29630 { "G3", "ppc750", 0 },
29631 { "604e", "ppc604e", 0 },
29632 { "604", "ppc604", 0 },
29633 { "603e", "ppc603", 0 },
29634 { "603", "ppc603", 0 },
29635 { "601", "ppc601", 0 },
29636 { NULL, "ppc", 0 } };
29637 const char *cpu_id = "";
29638 size_t i;
29639
29640 rs6000_file_start ();
29641 darwin_file_start ();
29642
29643 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
29644
29645 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
29646 cpu_id = rs6000_default_cpu;
29647
29648 if (global_options_set.x_rs6000_cpu_index)
29649 cpu_id = processor_target_table[rs6000_cpu_index].name;
29650
29651 /* Look through the mapping array. Pick the first name that either
29652 matches the argument, has a bit set in IF_SET that is also set
29653 in the target flags, or has a NULL name. */
29654
29655 i = 0;
29656 while (mapping[i].arg != NULL
29657 && strcmp (mapping[i].arg, cpu_id) != 0
29658 && (mapping[i].if_set & rs6000_isa_flags) == 0)
29659 i++;
29660
29661 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
29662 }
29663
29664 #endif /* TARGET_MACHO */
29665
29666 #if TARGET_ELF
29667 static int
29668 rs6000_elf_reloc_rw_mask (void)
29669 {
29670 if (flag_pic)
29671 return 3;
29672 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29673 return 2;
29674 else
29675 return 0;
29676 }
29677
29678 /* Record an element in the table of global constructors. SYMBOL is
29679 a SYMBOL_REF of the function to be called; PRIORITY is a number
29680 between 0 and MAX_INIT_PRIORITY.
29681
29682 This differs from default_named_section_asm_out_constructor in
29683 that we have special handling for -mrelocatable. */
29684
29685 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
29686 static void
29687 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
29688 {
29689 const char *section = ".ctors";
29690 char buf[16];
29691
29692 if (priority != DEFAULT_INIT_PRIORITY)
29693 {
29694 sprintf (buf, ".ctors.%.5u",
29695 /* Invert the numbering so the linker puts us in the proper
29696 order; constructors are run from right to left, and the
29697 linker sorts in increasing order. */
29698 MAX_INIT_PRIORITY - priority);
29699 section = buf;
29700 }
29701
29702 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29703 assemble_align (POINTER_SIZE);
29704
29705 if (TARGET_RELOCATABLE)
29706 {
29707 fputs ("\t.long (", asm_out_file);
29708 output_addr_const (asm_out_file, symbol);
29709 fputs (")@fixup\n", asm_out_file);
29710 }
29711 else
29712 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29713 }
29714
29715 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
29716 static void
29717 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
29718 {
29719 const char *section = ".dtors";
29720 char buf[16];
29721
29722 if (priority != DEFAULT_INIT_PRIORITY)
29723 {
29724 sprintf (buf, ".dtors.%.5u",
29725 /* Invert the numbering so the linker puts us in the proper
29726 order; constructors are run from right to left, and the
29727 linker sorts in increasing order. */
29728 MAX_INIT_PRIORITY - priority);
29729 section = buf;
29730 }
29731
29732 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29733 assemble_align (POINTER_SIZE);
29734
29735 if (TARGET_RELOCATABLE)
29736 {
29737 fputs ("\t.long (", asm_out_file);
29738 output_addr_const (asm_out_file, symbol);
29739 fputs (")@fixup\n", asm_out_file);
29740 }
29741 else
29742 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29743 }
29744
29745 void
29746 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
29747 {
29748 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
29749 {
29750 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
29751 ASM_OUTPUT_LABEL (file, name);
29752 fputs (DOUBLE_INT_ASM_OP, file);
29753 rs6000_output_function_entry (file, name);
29754 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
29755 if (DOT_SYMBOLS)
29756 {
29757 fputs ("\t.size\t", file);
29758 assemble_name (file, name);
29759 fputs (",24\n\t.type\t.", file);
29760 assemble_name (file, name);
29761 fputs (",@function\n", file);
29762 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
29763 {
29764 fputs ("\t.globl\t.", file);
29765 assemble_name (file, name);
29766 putc ('\n', file);
29767 }
29768 }
29769 else
29770 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29771 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29772 rs6000_output_function_entry (file, name);
29773 fputs (":\n", file);
29774 return;
29775 }
29776
29777 if (TARGET_RELOCATABLE
29778 && !TARGET_SECURE_PLT
29779 && (get_pool_size () != 0 || crtl->profile)
29780 && uses_TOC ())
29781 {
29782 char buf[256];
29783
29784 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
29785
29786 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
29787 fprintf (file, "\t.long ");
29788 assemble_name (file, buf);
29789 putc ('-', file);
29790 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
29791 assemble_name (file, buf);
29792 putc ('\n', file);
29793 }
29794
29795 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29796 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29797
29798 if (DEFAULT_ABI == ABI_AIX)
29799 {
29800 const char *desc_name, *orig_name;
29801
29802 orig_name = (*targetm.strip_name_encoding) (name);
29803 desc_name = orig_name;
29804 while (*desc_name == '.')
29805 desc_name++;
29806
29807 if (TREE_PUBLIC (decl))
29808 fprintf (file, "\t.globl %s\n", desc_name);
29809
29810 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29811 fprintf (file, "%s:\n", desc_name);
29812 fprintf (file, "\t.long %s\n", orig_name);
29813 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
29814 fputs ("\t.long 0\n", file);
29815 fprintf (file, "\t.previous\n");
29816 }
29817 ASM_OUTPUT_LABEL (file, name);
29818 }
29819
29820 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
29821 static void
29822 rs6000_elf_file_end (void)
29823 {
29824 #ifdef HAVE_AS_GNU_ATTRIBUTE
29825 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
29826 {
29827 if (rs6000_passes_float)
29828 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
29829 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
29830 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
29831 : 2));
29832 if (rs6000_passes_vector)
29833 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
29834 (TARGET_ALTIVEC_ABI ? 2
29835 : TARGET_SPE_ABI ? 3
29836 : 1));
29837 if (rs6000_returns_struct)
29838 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
29839 aix_struct_return ? 2 : 1);
29840 }
29841 #endif
29842 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
29843 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
29844 file_end_indicate_exec_stack ();
29845 #endif
29846 }
29847 #endif
29848
29849 #if TARGET_XCOFF
29850 static void
29851 rs6000_xcoff_asm_output_anchor (rtx symbol)
29852 {
29853 char buffer[100];
29854
29855 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
29856 SYMBOL_REF_BLOCK_OFFSET (symbol));
29857 fprintf (asm_out_file, "%s", SET_ASM_OP);
29858 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
29859 fprintf (asm_out_file, ",");
29860 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
29861 fprintf (asm_out_file, "\n");
29862 }
29863
29864 static void
29865 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
29866 {
29867 fputs (GLOBAL_ASM_OP, stream);
29868 RS6000_OUTPUT_BASENAME (stream, name);
29869 putc ('\n', stream);
29870 }
29871
29872 /* A get_unnamed_decl callback, used for read-only sections. PTR
29873 points to the section string variable. */
29874
29875 static void
29876 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
29877 {
29878 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
29879 *(const char *const *) directive,
29880 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29881 }
29882
29883 /* Likewise for read-write sections. */
29884
29885 static void
29886 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
29887 {
29888 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
29889 *(const char *const *) directive,
29890 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29891 }
29892
29893 static void
29894 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
29895 {
29896 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
29897 *(const char *const *) directive,
29898 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29899 }
29900
29901 /* A get_unnamed_section callback, used for switching to toc_section. */
29902
29903 static void
29904 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
29905 {
29906 if (TARGET_MINIMAL_TOC)
29907 {
29908 /* toc_section is always selected at least once from
29909 rs6000_xcoff_file_start, so this is guaranteed to
29910 always be defined once and only once in each file. */
29911 if (!toc_initialized)
29912 {
29913 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
29914 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
29915 toc_initialized = 1;
29916 }
29917 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
29918 (TARGET_32BIT ? "" : ",3"));
29919 }
29920 else
29921 fputs ("\t.toc\n", asm_out_file);
29922 }
29923
29924 /* Implement TARGET_ASM_INIT_SECTIONS. */
29925
29926 static void
29927 rs6000_xcoff_asm_init_sections (void)
29928 {
29929 read_only_data_section
29930 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29931 &xcoff_read_only_section_name);
29932
29933 private_data_section
29934 = get_unnamed_section (SECTION_WRITE,
29935 rs6000_xcoff_output_readwrite_section_asm_op,
29936 &xcoff_private_data_section_name);
29937
29938 tls_data_section
29939 = get_unnamed_section (SECTION_TLS,
29940 rs6000_xcoff_output_tls_section_asm_op,
29941 &xcoff_tls_data_section_name);
29942
29943 tls_private_data_section
29944 = get_unnamed_section (SECTION_TLS,
29945 rs6000_xcoff_output_tls_section_asm_op,
29946 &xcoff_private_data_section_name);
29947
29948 read_only_private_data_section
29949 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29950 &xcoff_private_data_section_name);
29951
29952 toc_section
29953 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
29954
29955 readonly_data_section = read_only_data_section;
29956 exception_section = data_section;
29957 }
29958
29959 static int
29960 rs6000_xcoff_reloc_rw_mask (void)
29961 {
29962 return 3;
29963 }
29964
29965 static void
29966 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
29967 tree decl ATTRIBUTE_UNUSED)
29968 {
29969 int smclass;
29970 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
29971
29972 if (flags & SECTION_CODE)
29973 smclass = 0;
29974 else if (flags & SECTION_TLS)
29975 smclass = 3;
29976 else if (flags & SECTION_WRITE)
29977 smclass = 2;
29978 else
29979 smclass = 1;
29980
29981 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
29982 (flags & SECTION_CODE) ? "." : "",
29983 name, suffix[smclass], flags & SECTION_ENTSIZE);
29984 }
29985
29986 #define IN_NAMED_SECTION(DECL) \
29987 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
29988 && DECL_SECTION_NAME (DECL) != NULL)
29989
29990 static section *
29991 rs6000_xcoff_select_section (tree decl, int reloc,
29992 unsigned HOST_WIDE_INT align)
29993 {
29994 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
29995 named section. */
29996 if (align > BIGGEST_ALIGNMENT)
29997 {
29998 resolve_unique_section (decl, reloc, true);
29999 if (IN_NAMED_SECTION (decl))
30000 return get_named_section (decl, NULL, reloc);
30001 }
30002
30003 if (decl_readonly_section (decl, reloc))
30004 {
30005 if (TREE_PUBLIC (decl))
30006 return read_only_data_section;
30007 else
30008 return read_only_private_data_section;
30009 }
30010 else
30011 {
30012 #if HAVE_AS_TLS
30013 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
30014 {
30015 if (TREE_PUBLIC (decl))
30016 return tls_data_section;
30017 else if (bss_initializer_p (decl))
30018 {
30019 /* Convert to COMMON to emit in BSS. */
30020 DECL_COMMON (decl) = 1;
30021 return tls_comm_section;
30022 }
30023 else
30024 return tls_private_data_section;
30025 }
30026 else
30027 #endif
30028 if (TREE_PUBLIC (decl))
30029 return data_section;
30030 else
30031 return private_data_section;
30032 }
30033 }
30034
30035 static void
30036 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
30037 {
30038 const char *name;
30039
30040 /* Use select_section for private data and uninitialized data with
30041 alignment <= BIGGEST_ALIGNMENT. */
30042 if (!TREE_PUBLIC (decl)
30043 || DECL_COMMON (decl)
30044 || (DECL_INITIAL (decl) == NULL_TREE
30045 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
30046 || DECL_INITIAL (decl) == error_mark_node
30047 || (flag_zero_initialized_in_bss
30048 && initializer_zerop (DECL_INITIAL (decl))))
30049 return;
30050
30051 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
30052 name = (*targetm.strip_name_encoding) (name);
30053 set_decl_section_name (decl, name);
30054 }
30055
30056 /* Select section for constant in constant pool.
30057
30058 On RS/6000, all constants are in the private read-only data area.
30059 However, if this is being placed in the TOC it must be output as a
30060 toc entry. */
30061
30062 static section *
30063 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
30064 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
30065 {
30066 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
30067 return toc_section;
30068 else
30069 return read_only_private_data_section;
30070 }
30071
30072 /* Remove any trailing [DS] or the like from the symbol name. */
30073
30074 static const char *
30075 rs6000_xcoff_strip_name_encoding (const char *name)
30076 {
30077 size_t len;
30078 if (*name == '*')
30079 name++;
30080 len = strlen (name);
30081 if (name[len - 1] == ']')
30082 return ggc_alloc_string (name, len - 4);
30083 else
30084 return name;
30085 }
30086
30087 /* Section attributes. AIX is always PIC. */
30088
30089 static unsigned int
30090 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
30091 {
30092 unsigned int align;
30093 unsigned int flags = default_section_type_flags (decl, name, reloc);
30094
30095 /* Align to at least UNIT size. */
30096 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
30097 align = MIN_UNITS_PER_WORD;
30098 else
30099 /* Increase alignment of large objects if not already stricter. */
30100 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
30101 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
30102 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
30103
30104 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
30105 }
30106
30107 /* Output at beginning of assembler file.
30108
30109 Initialize the section names for the RS/6000 at this point.
30110
30111 Specify filename, including full path, to assembler.
30112
30113 We want to go into the TOC section so at least one .toc will be emitted.
30114 Also, in order to output proper .bs/.es pairs, we need at least one static
30115 [RW] section emitted.
30116
30117 Finally, declare mcount when profiling to make the assembler happy. */
30118
30119 static void
30120 rs6000_xcoff_file_start (void)
30121 {
30122 rs6000_gen_section_name (&xcoff_bss_section_name,
30123 main_input_filename, ".bss_");
30124 rs6000_gen_section_name (&xcoff_private_data_section_name,
30125 main_input_filename, ".rw_");
30126 rs6000_gen_section_name (&xcoff_read_only_section_name,
30127 main_input_filename, ".ro_");
30128 rs6000_gen_section_name (&xcoff_tls_data_section_name,
30129 main_input_filename, ".tls_");
30130 rs6000_gen_section_name (&xcoff_tbss_section_name,
30131 main_input_filename, ".tbss_[UL]");
30132
30133 fputs ("\t.file\t", asm_out_file);
30134 output_quoted_string (asm_out_file, main_input_filename);
30135 fputc ('\n', asm_out_file);
30136 if (write_symbols != NO_DEBUG)
30137 switch_to_section (private_data_section);
30138 switch_to_section (text_section);
30139 if (profile_flag)
30140 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
30141 rs6000_file_start ();
30142 }
30143
30144 /* Output at end of assembler file.
30145 On the RS/6000, referencing data should automatically pull in text. */
30146
30147 static void
30148 rs6000_xcoff_file_end (void)
30149 {
30150 switch_to_section (text_section);
30151 fputs ("_section_.text:\n", asm_out_file);
30152 switch_to_section (data_section);
30153 fputs (TARGET_32BIT
30154 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
30155 asm_out_file);
30156 }
30157
30158 struct declare_alias_data
30159 {
30160 FILE *file;
30161 bool function_descriptor;
30162 };
30163
30164 /* Declare alias N. A helper function for for_node_and_aliases. */
30165
30166 static bool
30167 rs6000_declare_alias (struct symtab_node *n, void *d)
30168 {
30169 struct declare_alias_data *data = (struct declare_alias_data *)d;
30170 /* Main symbol is output specially, because varasm machinery does part of
30171 the job for us - we do not need to declare .globl/lglobs and such. */
30172 if (!n->alias || n->weakref)
30173 return false;
30174
30175 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
30176 return false;
30177
30178 /* Prevent assemble_alias from trying to use .set pseudo operation
30179 that does not behave as expected by the middle-end. */
30180 TREE_ASM_WRITTEN (n->decl) = true;
30181
30182 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
30183 char *buffer = (char *) alloca (strlen (name) + 2);
30184 char *p;
30185 int dollar_inside = 0;
30186
30187 strcpy (buffer, name);
30188 p = strchr (buffer, '$');
30189 while (p) {
30190 *p = '_';
30191 dollar_inside++;
30192 p = strchr (p + 1, '$');
30193 }
30194 if (TREE_PUBLIC (n->decl))
30195 {
30196 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
30197 {
30198 if (dollar_inside) {
30199 if (data->function_descriptor)
30200 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
30201 else
30202 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
30203 }
30204 if (data->function_descriptor)
30205 fputs ("\t.globl .", data->file);
30206 else
30207 fputs ("\t.globl ", data->file);
30208 RS6000_OUTPUT_BASENAME (data->file, buffer);
30209 putc ('\n', data->file);
30210 }
30211 #ifdef ASM_WEAKEN_DECL
30212 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
30213 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
30214 #endif
30215 }
30216 else
30217 {
30218 if (dollar_inside)
30219 {
30220 if (data->function_descriptor)
30221 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
30222 else
30223 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
30224 }
30225 if (data->function_descriptor)
30226 fputs ("\t.lglobl .", data->file);
30227 else
30228 fputs ("\t.lglobl ", data->file);
30229 RS6000_OUTPUT_BASENAME (data->file, buffer);
30230 putc ('\n', data->file);
30231 }
30232 if (data->function_descriptor)
30233 fputs (".", data->file);
30234 RS6000_OUTPUT_BASENAME (data->file, buffer);
30235 fputs (":\n", data->file);
30236 return false;
30237 }
30238
30239 /* This macro produces the initial definition of a function name.
30240 On the RS/6000, we need to place an extra '.' in the function name and
30241 output the function descriptor.
30242 Dollar signs are converted to underscores.
30243
30244 The csect for the function will have already been created when
30245 text_section was selected. We do have to go back to that csect, however.
30246
30247 The third and fourth parameters to the .function pseudo-op (16 and 044)
30248 are placeholders which no longer have any use.
30249
30250 Because AIX assembler's .set command has unexpected semantics, we output
30251 all aliases as alternative labels in front of the definition. */
30252
30253 void
30254 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
30255 {
30256 char *buffer = (char *) alloca (strlen (name) + 1);
30257 char *p;
30258 int dollar_inside = 0;
30259 struct declare_alias_data data = {file, false};
30260
30261 strcpy (buffer, name);
30262 p = strchr (buffer, '$');
30263 while (p) {
30264 *p = '_';
30265 dollar_inside++;
30266 p = strchr (p + 1, '$');
30267 }
30268 if (TREE_PUBLIC (decl))
30269 {
30270 if (!RS6000_WEAK || !DECL_WEAK (decl))
30271 {
30272 if (dollar_inside) {
30273 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
30274 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
30275 }
30276 fputs ("\t.globl .", file);
30277 RS6000_OUTPUT_BASENAME (file, buffer);
30278 putc ('\n', file);
30279 }
30280 }
30281 else
30282 {
30283 if (dollar_inside) {
30284 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
30285 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
30286 }
30287 fputs ("\t.lglobl .", file);
30288 RS6000_OUTPUT_BASENAME (file, buffer);
30289 putc ('\n', file);
30290 }
30291 fputs ("\t.csect ", file);
30292 RS6000_OUTPUT_BASENAME (file, buffer);
30293 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
30294 RS6000_OUTPUT_BASENAME (file, buffer);
30295 fputs (":\n", file);
30296 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
30297 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
30298 RS6000_OUTPUT_BASENAME (file, buffer);
30299 fputs (", TOC[tc0], 0\n", file);
30300 in_section = NULL;
30301 switch_to_section (function_section (decl));
30302 putc ('.', file);
30303 RS6000_OUTPUT_BASENAME (file, buffer);
30304 fputs (":\n", file);
30305 data.function_descriptor = true;
30306 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
30307 if (write_symbols != NO_DEBUG && !DECL_IGNORED_P (decl))
30308 xcoffout_declare_function (file, decl, buffer);
30309 return;
30310 }
30311
30312 /* This macro produces the initial definition of a object (variable) name.
30313 Because AIX assembler's .set command has unexpected semantics, we output
30314 all aliases as alternative labels in front of the definition. */
30315
30316 void
30317 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
30318 {
30319 struct declare_alias_data data = {file, false};
30320 RS6000_OUTPUT_BASENAME (file, name);
30321 fputs (":\n", file);
30322 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
30323 }
30324
30325 #ifdef HAVE_AS_TLS
30326 static void
30327 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
30328 {
30329 rtx symbol;
30330 int flags;
30331
30332 default_encode_section_info (decl, rtl, first);
30333
30334 /* Careful not to prod global register variables. */
30335 if (!MEM_P (rtl))
30336 return;
30337 symbol = XEXP (rtl, 0);
30338 if (GET_CODE (symbol) != SYMBOL_REF)
30339 return;
30340
30341 flags = SYMBOL_REF_FLAGS (symbol);
30342
30343 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
30344 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
30345
30346 SYMBOL_REF_FLAGS (symbol) = flags;
30347 }
30348 #endif /* HAVE_AS_TLS */
30349 #endif /* TARGET_XCOFF */
30350
30351 /* Compute a (partial) cost for rtx X. Return true if the complete
30352 cost has been computed, and false if subexpressions should be
30353 scanned. In either case, *TOTAL contains the cost result. */
30354
30355 static bool
30356 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
30357 int *total, bool speed)
30358 {
30359 machine_mode mode = GET_MODE (x);
30360
30361 switch (code)
30362 {
30363 /* On the RS/6000, if it is valid in the insn, it is free. */
30364 case CONST_INT:
30365 if (((outer_code == SET
30366 || outer_code == PLUS
30367 || outer_code == MINUS)
30368 && (satisfies_constraint_I (x)
30369 || satisfies_constraint_L (x)))
30370 || (outer_code == AND
30371 && (satisfies_constraint_K (x)
30372 || (mode == SImode
30373 ? satisfies_constraint_L (x)
30374 : satisfies_constraint_J (x))
30375 || mask_operand (x, mode)
30376 || (mode == DImode
30377 && mask64_operand (x, DImode))))
30378 || ((outer_code == IOR || outer_code == XOR)
30379 && (satisfies_constraint_K (x)
30380 || (mode == SImode
30381 ? satisfies_constraint_L (x)
30382 : satisfies_constraint_J (x))))
30383 || outer_code == ASHIFT
30384 || outer_code == ASHIFTRT
30385 || outer_code == LSHIFTRT
30386 || outer_code == ROTATE
30387 || outer_code == ROTATERT
30388 || outer_code == ZERO_EXTRACT
30389 || (outer_code == MULT
30390 && satisfies_constraint_I (x))
30391 || ((outer_code == DIV || outer_code == UDIV
30392 || outer_code == MOD || outer_code == UMOD)
30393 && exact_log2 (INTVAL (x)) >= 0)
30394 || (outer_code == COMPARE
30395 && (satisfies_constraint_I (x)
30396 || satisfies_constraint_K (x)))
30397 || ((outer_code == EQ || outer_code == NE)
30398 && (satisfies_constraint_I (x)
30399 || satisfies_constraint_K (x)
30400 || (mode == SImode
30401 ? satisfies_constraint_L (x)
30402 : satisfies_constraint_J (x))))
30403 || (outer_code == GTU
30404 && satisfies_constraint_I (x))
30405 || (outer_code == LTU
30406 && satisfies_constraint_P (x)))
30407 {
30408 *total = 0;
30409 return true;
30410 }
30411 else if ((outer_code == PLUS
30412 && reg_or_add_cint_operand (x, VOIDmode))
30413 || (outer_code == MINUS
30414 && reg_or_sub_cint_operand (x, VOIDmode))
30415 || ((outer_code == SET
30416 || outer_code == IOR
30417 || outer_code == XOR)
30418 && (INTVAL (x)
30419 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
30420 {
30421 *total = COSTS_N_INSNS (1);
30422 return true;
30423 }
30424 /* FALLTHRU */
30425
30426 case CONST_DOUBLE:
30427 case CONST_WIDE_INT:
30428 case CONST:
30429 case HIGH:
30430 case SYMBOL_REF:
30431 case MEM:
30432 /* When optimizing for size, MEM should be slightly more expensive
30433 than generating address, e.g., (plus (reg) (const)).
30434 L1 cache latency is about two instructions. */
30435 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
30436 return true;
30437
30438 case LABEL_REF:
30439 *total = 0;
30440 return true;
30441
30442 case PLUS:
30443 case MINUS:
30444 if (FLOAT_MODE_P (mode))
30445 *total = rs6000_cost->fp;
30446 else
30447 *total = COSTS_N_INSNS (1);
30448 return false;
30449
30450 case MULT:
30451 if (GET_CODE (XEXP (x, 1)) == CONST_INT
30452 && satisfies_constraint_I (XEXP (x, 1)))
30453 {
30454 if (INTVAL (XEXP (x, 1)) >= -256
30455 && INTVAL (XEXP (x, 1)) <= 255)
30456 *total = rs6000_cost->mulsi_const9;
30457 else
30458 *total = rs6000_cost->mulsi_const;
30459 }
30460 else if (mode == SFmode)
30461 *total = rs6000_cost->fp;
30462 else if (FLOAT_MODE_P (mode))
30463 *total = rs6000_cost->dmul;
30464 else if (mode == DImode)
30465 *total = rs6000_cost->muldi;
30466 else
30467 *total = rs6000_cost->mulsi;
30468 return false;
30469
30470 case FMA:
30471 if (mode == SFmode)
30472 *total = rs6000_cost->fp;
30473 else
30474 *total = rs6000_cost->dmul;
30475 break;
30476
30477 case DIV:
30478 case MOD:
30479 if (FLOAT_MODE_P (mode))
30480 {
30481 *total = mode == DFmode ? rs6000_cost->ddiv
30482 : rs6000_cost->sdiv;
30483 return false;
30484 }
30485 /* FALLTHRU */
30486
30487 case UDIV:
30488 case UMOD:
30489 if (GET_CODE (XEXP (x, 1)) == CONST_INT
30490 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
30491 {
30492 if (code == DIV || code == MOD)
30493 /* Shift, addze */
30494 *total = COSTS_N_INSNS (2);
30495 else
30496 /* Shift */
30497 *total = COSTS_N_INSNS (1);
30498 }
30499 else
30500 {
30501 if (GET_MODE (XEXP (x, 1)) == DImode)
30502 *total = rs6000_cost->divdi;
30503 else
30504 *total = rs6000_cost->divsi;
30505 }
30506 /* Add in shift and subtract for MOD. */
30507 if (code == MOD || code == UMOD)
30508 *total += COSTS_N_INSNS (2);
30509 return false;
30510
30511 case CTZ:
30512 case FFS:
30513 *total = COSTS_N_INSNS (4);
30514 return false;
30515
30516 case POPCOUNT:
30517 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
30518 return false;
30519
30520 case PARITY:
30521 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
30522 return false;
30523
30524 case NOT:
30525 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
30526 {
30527 *total = 0;
30528 return false;
30529 }
30530 /* FALLTHRU */
30531
30532 case AND:
30533 case CLZ:
30534 case IOR:
30535 case XOR:
30536 case ZERO_EXTRACT:
30537 *total = COSTS_N_INSNS (1);
30538 return false;
30539
30540 case ASHIFT:
30541 case ASHIFTRT:
30542 case LSHIFTRT:
30543 case ROTATE:
30544 case ROTATERT:
30545 /* Handle mul_highpart. */
30546 if (outer_code == TRUNCATE
30547 && GET_CODE (XEXP (x, 0)) == MULT)
30548 {
30549 if (mode == DImode)
30550 *total = rs6000_cost->muldi;
30551 else
30552 *total = rs6000_cost->mulsi;
30553 return true;
30554 }
30555 else if (outer_code == AND)
30556 *total = 0;
30557 else
30558 *total = COSTS_N_INSNS (1);
30559 return false;
30560
30561 case SIGN_EXTEND:
30562 case ZERO_EXTEND:
30563 if (GET_CODE (XEXP (x, 0)) == MEM)
30564 *total = 0;
30565 else
30566 *total = COSTS_N_INSNS (1);
30567 return false;
30568
30569 case COMPARE:
30570 case NEG:
30571 case ABS:
30572 if (!FLOAT_MODE_P (mode))
30573 {
30574 *total = COSTS_N_INSNS (1);
30575 return false;
30576 }
30577 /* FALLTHRU */
30578
30579 case FLOAT:
30580 case UNSIGNED_FLOAT:
30581 case FIX:
30582 case UNSIGNED_FIX:
30583 case FLOAT_TRUNCATE:
30584 *total = rs6000_cost->fp;
30585 return false;
30586
30587 case FLOAT_EXTEND:
30588 if (mode == DFmode)
30589 *total = rs6000_cost->sfdf_convert;
30590 else
30591 *total = rs6000_cost->fp;
30592 return false;
30593
30594 case UNSPEC:
30595 switch (XINT (x, 1))
30596 {
30597 case UNSPEC_FRSP:
30598 *total = rs6000_cost->fp;
30599 return true;
30600
30601 default:
30602 break;
30603 }
30604 break;
30605
30606 case CALL:
30607 case IF_THEN_ELSE:
30608 if (!speed)
30609 {
30610 *total = COSTS_N_INSNS (1);
30611 return true;
30612 }
30613 else if (FLOAT_MODE_P (mode)
30614 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
30615 {
30616 *total = rs6000_cost->fp;
30617 return false;
30618 }
30619 break;
30620
30621 case NE:
30622 case EQ:
30623 case GTU:
30624 case LTU:
30625 /* Carry bit requires mode == Pmode.
30626 NEG or PLUS already counted so only add one. */
30627 if (mode == Pmode
30628 && (outer_code == NEG || outer_code == PLUS))
30629 {
30630 *total = COSTS_N_INSNS (1);
30631 return true;
30632 }
30633 if (outer_code == SET)
30634 {
30635 if (XEXP (x, 1) == const0_rtx)
30636 {
30637 if (TARGET_ISEL && !TARGET_MFCRF)
30638 *total = COSTS_N_INSNS (8);
30639 else
30640 *total = COSTS_N_INSNS (2);
30641 return true;
30642 }
30643 else
30644 {
30645 *total = COSTS_N_INSNS (3);
30646 return false;
30647 }
30648 }
30649 /* FALLTHRU */
30650
30651 case GT:
30652 case LT:
30653 case UNORDERED:
30654 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
30655 {
30656 if (TARGET_ISEL && !TARGET_MFCRF)
30657 *total = COSTS_N_INSNS (8);
30658 else
30659 *total = COSTS_N_INSNS (2);
30660 return true;
30661 }
30662 /* CC COMPARE. */
30663 if (outer_code == COMPARE)
30664 {
30665 *total = 0;
30666 return true;
30667 }
30668 break;
30669
30670 default:
30671 break;
30672 }
30673
30674 return false;
30675 }
30676
30677 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
30678
30679 static bool
30680 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
30681 bool speed)
30682 {
30683 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
30684
30685 fprintf (stderr,
30686 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
30687 "opno = %d, total = %d, speed = %s, x:\n",
30688 ret ? "complete" : "scan inner",
30689 GET_RTX_NAME (code),
30690 GET_RTX_NAME (outer_code),
30691 opno,
30692 *total,
30693 speed ? "true" : "false");
30694
30695 debug_rtx (x);
30696
30697 return ret;
30698 }
30699
30700 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
30701
30702 static int
30703 rs6000_debug_address_cost (rtx x, machine_mode mode,
30704 addr_space_t as, bool speed)
30705 {
30706 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
30707
30708 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
30709 ret, speed ? "true" : "false");
30710 debug_rtx (x);
30711
30712 return ret;
30713 }
30714
30715
30716 /* A C expression returning the cost of moving data from a register of class
30717 CLASS1 to one of CLASS2. */
30718
30719 static int
30720 rs6000_register_move_cost (machine_mode mode,
30721 reg_class_t from, reg_class_t to)
30722 {
30723 int ret;
30724
30725 if (TARGET_DEBUG_COST)
30726 dbg_cost_ctrl++;
30727
30728 /* Moves from/to GENERAL_REGS. */
30729 if (reg_classes_intersect_p (to, GENERAL_REGS)
30730 || reg_classes_intersect_p (from, GENERAL_REGS))
30731 {
30732 reg_class_t rclass = from;
30733
30734 if (! reg_classes_intersect_p (to, GENERAL_REGS))
30735 rclass = to;
30736
30737 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
30738 ret = (rs6000_memory_move_cost (mode, rclass, false)
30739 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
30740
30741 /* It's more expensive to move CR_REGS than CR0_REGS because of the
30742 shift. */
30743 else if (rclass == CR_REGS)
30744 ret = 4;
30745
30746 /* For those processors that have slow LR/CTR moves, make them more
30747 expensive than memory in order to bias spills to memory .*/
30748 else if ((rs6000_cpu == PROCESSOR_POWER6
30749 || rs6000_cpu == PROCESSOR_POWER7
30750 || rs6000_cpu == PROCESSOR_POWER8)
30751 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
30752 ret = 6 * hard_regno_nregs[0][mode];
30753
30754 else
30755 /* A move will cost one instruction per GPR moved. */
30756 ret = 2 * hard_regno_nregs[0][mode];
30757 }
30758
30759 /* If we have VSX, we can easily move between FPR or Altivec registers. */
30760 else if (VECTOR_MEM_VSX_P (mode)
30761 && reg_classes_intersect_p (to, VSX_REGS)
30762 && reg_classes_intersect_p (from, VSX_REGS))
30763 ret = 2 * hard_regno_nregs[32][mode];
30764
30765 /* Moving between two similar registers is just one instruction. */
30766 else if (reg_classes_intersect_p (to, from))
30767 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
30768
30769 /* Everything else has to go through GENERAL_REGS. */
30770 else
30771 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
30772 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
30773
30774 if (TARGET_DEBUG_COST)
30775 {
30776 if (dbg_cost_ctrl == 1)
30777 fprintf (stderr,
30778 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
30779 ret, GET_MODE_NAME (mode), reg_class_names[from],
30780 reg_class_names[to]);
30781 dbg_cost_ctrl--;
30782 }
30783
30784 return ret;
30785 }
30786
30787 /* A C expressions returning the cost of moving data of MODE from a register to
30788 or from memory. */
30789
30790 static int
30791 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
30792 bool in ATTRIBUTE_UNUSED)
30793 {
30794 int ret;
30795
30796 if (TARGET_DEBUG_COST)
30797 dbg_cost_ctrl++;
30798
30799 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
30800 ret = 4 * hard_regno_nregs[0][mode];
30801 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
30802 || reg_classes_intersect_p (rclass, VSX_REGS)))
30803 ret = 4 * hard_regno_nregs[32][mode];
30804 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
30805 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
30806 else
30807 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
30808
30809 if (TARGET_DEBUG_COST)
30810 {
30811 if (dbg_cost_ctrl == 1)
30812 fprintf (stderr,
30813 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
30814 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
30815 dbg_cost_ctrl--;
30816 }
30817
30818 return ret;
30819 }
30820
30821 /* Returns a code for a target-specific builtin that implements
30822 reciprocal of the function, or NULL_TREE if not available. */
30823
30824 static tree
30825 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
30826 bool sqrt ATTRIBUTE_UNUSED)
30827 {
30828 if (optimize_insn_for_size_p ())
30829 return NULL_TREE;
30830
30831 if (md_fn)
30832 switch (fn)
30833 {
30834 case VSX_BUILTIN_XVSQRTDP:
30835 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
30836 return NULL_TREE;
30837
30838 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
30839
30840 case VSX_BUILTIN_XVSQRTSP:
30841 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
30842 return NULL_TREE;
30843
30844 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
30845
30846 default:
30847 return NULL_TREE;
30848 }
30849
30850 else
30851 switch (fn)
30852 {
30853 case BUILT_IN_SQRT:
30854 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
30855 return NULL_TREE;
30856
30857 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
30858
30859 case BUILT_IN_SQRTF:
30860 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
30861 return NULL_TREE;
30862
30863 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
30864
30865 default:
30866 return NULL_TREE;
30867 }
30868 }
30869
30870 /* Load up a constant. If the mode is a vector mode, splat the value across
30871 all of the vector elements. */
30872
30873 static rtx
30874 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
30875 {
30876 rtx reg;
30877
30878 if (mode == SFmode || mode == DFmode)
30879 {
30880 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
30881 reg = force_reg (mode, d);
30882 }
30883 else if (mode == V4SFmode)
30884 {
30885 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
30886 rtvec v = gen_rtvec (4, d, d, d, d);
30887 reg = gen_reg_rtx (mode);
30888 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30889 }
30890 else if (mode == V2DFmode)
30891 {
30892 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
30893 rtvec v = gen_rtvec (2, d, d);
30894 reg = gen_reg_rtx (mode);
30895 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30896 }
30897 else
30898 gcc_unreachable ();
30899
30900 return reg;
30901 }
30902
30903 /* Generate an FMA instruction. */
30904
30905 static void
30906 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
30907 {
30908 machine_mode mode = GET_MODE (target);
30909 rtx dst;
30910
30911 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30912 gcc_assert (dst != NULL);
30913
30914 if (dst != target)
30915 emit_move_insn (target, dst);
30916 }
30917
30918 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
30919
30920 static void
30921 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
30922 {
30923 machine_mode mode = GET_MODE (target);
30924 rtx dst;
30925
30926 /* Altivec does not support fms directly;
30927 generate in terms of fma in that case. */
30928 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
30929 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
30930 else
30931 {
30932 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
30933 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30934 }
30935 gcc_assert (dst != NULL);
30936
30937 if (dst != target)
30938 emit_move_insn (target, dst);
30939 }
30940
30941 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
30942
30943 static void
30944 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
30945 {
30946 machine_mode mode = GET_MODE (dst);
30947 rtx r;
30948
30949 /* This is a tad more complicated, since the fnma_optab is for
30950 a different expression: fma(-m1, m2, a), which is the same
30951 thing except in the case of signed zeros.
30952
30953 Fortunately we know that if FMA is supported that FNMSUB is
30954 also supported in the ISA. Just expand it directly. */
30955
30956 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
30957
30958 r = gen_rtx_NEG (mode, a);
30959 r = gen_rtx_FMA (mode, m1, m2, r);
30960 r = gen_rtx_NEG (mode, r);
30961 emit_insn (gen_rtx_SET (dst, r));
30962 }
30963
30964 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
30965 add a reg_note saying that this was a division. Support both scalar and
30966 vector divide. Assumes no trapping math and finite arguments. */
30967
30968 void
30969 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
30970 {
30971 machine_mode mode = GET_MODE (dst);
30972 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
30973 int i;
30974
30975 /* Low precision estimates guarantee 5 bits of accuracy. High
30976 precision estimates guarantee 14 bits of accuracy. SFmode
30977 requires 23 bits of accuracy. DFmode requires 52 bits of
30978 accuracy. Each pass at least doubles the accuracy, leading
30979 to the following. */
30980 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30981 if (mode == DFmode || mode == V2DFmode)
30982 passes++;
30983
30984 enum insn_code code = optab_handler (smul_optab, mode);
30985 insn_gen_fn gen_mul = GEN_FCN (code);
30986
30987 gcc_assert (code != CODE_FOR_nothing);
30988
30989 one = rs6000_load_constant_and_splat (mode, dconst1);
30990
30991 /* x0 = 1./d estimate */
30992 x0 = gen_reg_rtx (mode);
30993 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
30994 UNSPEC_FRES)));
30995
30996 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
30997 if (passes > 1) {
30998
30999 /* e0 = 1. - d * x0 */
31000 e0 = gen_reg_rtx (mode);
31001 rs6000_emit_nmsub (e0, d, x0, one);
31002
31003 /* x1 = x0 + e0 * x0 */
31004 x1 = gen_reg_rtx (mode);
31005 rs6000_emit_madd (x1, e0, x0, x0);
31006
31007 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
31008 ++i, xprev = xnext, eprev = enext) {
31009
31010 /* enext = eprev * eprev */
31011 enext = gen_reg_rtx (mode);
31012 emit_insn (gen_mul (enext, eprev, eprev));
31013
31014 /* xnext = xprev + enext * xprev */
31015 xnext = gen_reg_rtx (mode);
31016 rs6000_emit_madd (xnext, enext, xprev, xprev);
31017 }
31018
31019 } else
31020 xprev = x0;
31021
31022 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
31023
31024 /* u = n * xprev */
31025 u = gen_reg_rtx (mode);
31026 emit_insn (gen_mul (u, n, xprev));
31027
31028 /* v = n - (d * u) */
31029 v = gen_reg_rtx (mode);
31030 rs6000_emit_nmsub (v, d, u, n);
31031
31032 /* dst = (v * xprev) + u */
31033 rs6000_emit_madd (dst, v, xprev, u);
31034
31035 if (note_p)
31036 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
31037 }
31038
31039 /* Newton-Raphson approximation of single/double-precision floating point
31040 rsqrt. Assumes no trapping math and finite arguments. */
31041
31042 void
31043 rs6000_emit_swrsqrt (rtx dst, rtx src)
31044 {
31045 machine_mode mode = GET_MODE (src);
31046 rtx x0 = gen_reg_rtx (mode);
31047 rtx y = gen_reg_rtx (mode);
31048
31049 /* Low precision estimates guarantee 5 bits of accuracy. High
31050 precision estimates guarantee 14 bits of accuracy. SFmode
31051 requires 23 bits of accuracy. DFmode requires 52 bits of
31052 accuracy. Each pass at least doubles the accuracy, leading
31053 to the following. */
31054 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
31055 if (mode == DFmode || mode == V2DFmode)
31056 passes++;
31057
31058 REAL_VALUE_TYPE dconst3_2;
31059 int i;
31060 rtx halfthree;
31061 enum insn_code code = optab_handler (smul_optab, mode);
31062 insn_gen_fn gen_mul = GEN_FCN (code);
31063
31064 gcc_assert (code != CODE_FOR_nothing);
31065
31066 /* Load up the constant 1.5 either as a scalar, or as a vector. */
31067 real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED);
31068 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
31069
31070 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
31071
31072 /* x0 = rsqrt estimate */
31073 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
31074 UNSPEC_RSQRT)));
31075
31076 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
31077 rs6000_emit_msub (y, src, halfthree, src);
31078
31079 for (i = 0; i < passes; i++)
31080 {
31081 rtx x1 = gen_reg_rtx (mode);
31082 rtx u = gen_reg_rtx (mode);
31083 rtx v = gen_reg_rtx (mode);
31084
31085 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
31086 emit_insn (gen_mul (u, x0, x0));
31087 rs6000_emit_nmsub (v, y, u, halfthree);
31088 emit_insn (gen_mul (x1, x0, v));
31089 x0 = x1;
31090 }
31091
31092 emit_move_insn (dst, x0);
31093 return;
31094 }
31095
31096 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
31097 (Power7) targets. DST is the target, and SRC is the argument operand. */
31098
31099 void
31100 rs6000_emit_popcount (rtx dst, rtx src)
31101 {
31102 machine_mode mode = GET_MODE (dst);
31103 rtx tmp1, tmp2;
31104
31105 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
31106 if (TARGET_POPCNTD)
31107 {
31108 if (mode == SImode)
31109 emit_insn (gen_popcntdsi2 (dst, src));
31110 else
31111 emit_insn (gen_popcntddi2 (dst, src));
31112 return;
31113 }
31114
31115 tmp1 = gen_reg_rtx (mode);
31116
31117 if (mode == SImode)
31118 {
31119 emit_insn (gen_popcntbsi2 (tmp1, src));
31120 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
31121 NULL_RTX, 0);
31122 tmp2 = force_reg (SImode, tmp2);
31123 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
31124 }
31125 else
31126 {
31127 emit_insn (gen_popcntbdi2 (tmp1, src));
31128 tmp2 = expand_mult (DImode, tmp1,
31129 GEN_INT ((HOST_WIDE_INT)
31130 0x01010101 << 32 | 0x01010101),
31131 NULL_RTX, 0);
31132 tmp2 = force_reg (DImode, tmp2);
31133 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
31134 }
31135 }
31136
31137
31138 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
31139 target, and SRC is the argument operand. */
31140
31141 void
31142 rs6000_emit_parity (rtx dst, rtx src)
31143 {
31144 machine_mode mode = GET_MODE (dst);
31145 rtx tmp;
31146
31147 tmp = gen_reg_rtx (mode);
31148
31149 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
31150 if (TARGET_CMPB)
31151 {
31152 if (mode == SImode)
31153 {
31154 emit_insn (gen_popcntbsi2 (tmp, src));
31155 emit_insn (gen_paritysi2_cmpb (dst, tmp));
31156 }
31157 else
31158 {
31159 emit_insn (gen_popcntbdi2 (tmp, src));
31160 emit_insn (gen_paritydi2_cmpb (dst, tmp));
31161 }
31162 return;
31163 }
31164
31165 if (mode == SImode)
31166 {
31167 /* Is mult+shift >= shift+xor+shift+xor? */
31168 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
31169 {
31170 rtx tmp1, tmp2, tmp3, tmp4;
31171
31172 tmp1 = gen_reg_rtx (SImode);
31173 emit_insn (gen_popcntbsi2 (tmp1, src));
31174
31175 tmp2 = gen_reg_rtx (SImode);
31176 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
31177 tmp3 = gen_reg_rtx (SImode);
31178 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
31179
31180 tmp4 = gen_reg_rtx (SImode);
31181 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
31182 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
31183 }
31184 else
31185 rs6000_emit_popcount (tmp, src);
31186 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
31187 }
31188 else
31189 {
31190 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
31191 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
31192 {
31193 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
31194
31195 tmp1 = gen_reg_rtx (DImode);
31196 emit_insn (gen_popcntbdi2 (tmp1, src));
31197
31198 tmp2 = gen_reg_rtx (DImode);
31199 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
31200 tmp3 = gen_reg_rtx (DImode);
31201 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
31202
31203 tmp4 = gen_reg_rtx (DImode);
31204 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
31205 tmp5 = gen_reg_rtx (DImode);
31206 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
31207
31208 tmp6 = gen_reg_rtx (DImode);
31209 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
31210 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
31211 }
31212 else
31213 rs6000_emit_popcount (tmp, src);
31214 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
31215 }
31216 }
31217
31218 /* Expand an Altivec constant permutation for little endian mode.
31219 There are two issues: First, the two input operands must be
31220 swapped so that together they form a double-wide array in LE
31221 order. Second, the vperm instruction has surprising behavior
31222 in LE mode: it interprets the elements of the source vectors
31223 in BE mode ("left to right") and interprets the elements of
31224 the destination vector in LE mode ("right to left"). To
31225 correct for this, we must subtract each element of the permute
31226 control vector from 31.
31227
31228 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
31229 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
31230 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
31231 serve as the permute control vector. Then, in BE mode,
31232
31233 vperm 9,10,11,12
31234
31235 places the desired result in vr9. However, in LE mode the
31236 vector contents will be
31237
31238 vr10 = 00000003 00000002 00000001 00000000
31239 vr11 = 00000007 00000006 00000005 00000004
31240
31241 The result of the vperm using the same permute control vector is
31242
31243 vr9 = 05000000 07000000 01000000 03000000
31244
31245 That is, the leftmost 4 bytes of vr10 are interpreted as the
31246 source for the rightmost 4 bytes of vr9, and so on.
31247
31248 If we change the permute control vector to
31249
31250 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
31251
31252 and issue
31253
31254 vperm 9,11,10,12
31255
31256 we get the desired
31257
31258 vr9 = 00000006 00000004 00000002 00000000. */
31259
31260 void
31261 altivec_expand_vec_perm_const_le (rtx operands[4])
31262 {
31263 unsigned int i;
31264 rtx perm[16];
31265 rtx constv, unspec;
31266 rtx target = operands[0];
31267 rtx op0 = operands[1];
31268 rtx op1 = operands[2];
31269 rtx sel = operands[3];
31270
31271 /* Unpack and adjust the constant selector. */
31272 for (i = 0; i < 16; ++i)
31273 {
31274 rtx e = XVECEXP (sel, 0, i);
31275 unsigned int elt = 31 - (INTVAL (e) & 31);
31276 perm[i] = GEN_INT (elt);
31277 }
31278
31279 /* Expand to a permute, swapping the inputs and using the
31280 adjusted selector. */
31281 if (!REG_P (op0))
31282 op0 = force_reg (V16QImode, op0);
31283 if (!REG_P (op1))
31284 op1 = force_reg (V16QImode, op1);
31285
31286 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
31287 constv = force_reg (V16QImode, constv);
31288 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
31289 UNSPEC_VPERM);
31290 if (!REG_P (target))
31291 {
31292 rtx tmp = gen_reg_rtx (V16QImode);
31293 emit_move_insn (tmp, unspec);
31294 unspec = tmp;
31295 }
31296
31297 emit_move_insn (target, unspec);
31298 }
31299
31300 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
31301 permute control vector. But here it's not a constant, so we must
31302 generate a vector NAND or NOR to do the adjustment. */
31303
31304 void
31305 altivec_expand_vec_perm_le (rtx operands[4])
31306 {
31307 rtx notx, iorx, unspec;
31308 rtx target = operands[0];
31309 rtx op0 = operands[1];
31310 rtx op1 = operands[2];
31311 rtx sel = operands[3];
31312 rtx tmp = target;
31313 rtx norreg = gen_reg_rtx (V16QImode);
31314 machine_mode mode = GET_MODE (target);
31315
31316 /* Get everything in regs so the pattern matches. */
31317 if (!REG_P (op0))
31318 op0 = force_reg (mode, op0);
31319 if (!REG_P (op1))
31320 op1 = force_reg (mode, op1);
31321 if (!REG_P (sel))
31322 sel = force_reg (V16QImode, sel);
31323 if (!REG_P (target))
31324 tmp = gen_reg_rtx (mode);
31325
31326 /* Invert the selector with a VNAND if available, else a VNOR.
31327 The VNAND is preferred for future fusion opportunities. */
31328 notx = gen_rtx_NOT (V16QImode, sel);
31329 iorx = (TARGET_P8_VECTOR
31330 ? gen_rtx_IOR (V16QImode, notx, notx)
31331 : gen_rtx_AND (V16QImode, notx, notx));
31332 emit_insn (gen_rtx_SET (norreg, iorx));
31333
31334 /* Permute with operands reversed and adjusted selector. */
31335 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
31336 UNSPEC_VPERM);
31337
31338 /* Copy into target, possibly by way of a register. */
31339 if (!REG_P (target))
31340 {
31341 emit_move_insn (tmp, unspec);
31342 unspec = tmp;
31343 }
31344
31345 emit_move_insn (target, unspec);
31346 }
31347
31348 /* Expand an Altivec constant permutation. Return true if we match
31349 an efficient implementation; false to fall back to VPERM. */
31350
31351 bool
31352 altivec_expand_vec_perm_const (rtx operands[4])
31353 {
31354 struct altivec_perm_insn {
31355 HOST_WIDE_INT mask;
31356 enum insn_code impl;
31357 unsigned char perm[16];
31358 };
31359 static const struct altivec_perm_insn patterns[] = {
31360 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
31361 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
31362 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
31363 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
31364 { OPTION_MASK_ALTIVEC,
31365 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
31366 : CODE_FOR_altivec_vmrglb_direct),
31367 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
31368 { OPTION_MASK_ALTIVEC,
31369 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
31370 : CODE_FOR_altivec_vmrglh_direct),
31371 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
31372 { OPTION_MASK_ALTIVEC,
31373 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
31374 : CODE_FOR_altivec_vmrglw_direct),
31375 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
31376 { OPTION_MASK_ALTIVEC,
31377 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
31378 : CODE_FOR_altivec_vmrghb_direct),
31379 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
31380 { OPTION_MASK_ALTIVEC,
31381 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
31382 : CODE_FOR_altivec_vmrghh_direct),
31383 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
31384 { OPTION_MASK_ALTIVEC,
31385 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
31386 : CODE_FOR_altivec_vmrghw_direct),
31387 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
31388 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
31389 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
31390 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
31391 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
31392 };
31393
31394 unsigned int i, j, elt, which;
31395 unsigned char perm[16];
31396 rtx target, op0, op1, sel, x;
31397 bool one_vec;
31398
31399 target = operands[0];
31400 op0 = operands[1];
31401 op1 = operands[2];
31402 sel = operands[3];
31403
31404 /* Unpack the constant selector. */
31405 for (i = which = 0; i < 16; ++i)
31406 {
31407 rtx e = XVECEXP (sel, 0, i);
31408 elt = INTVAL (e) & 31;
31409 which |= (elt < 16 ? 1 : 2);
31410 perm[i] = elt;
31411 }
31412
31413 /* Simplify the constant selector based on operands. */
31414 switch (which)
31415 {
31416 default:
31417 gcc_unreachable ();
31418
31419 case 3:
31420 one_vec = false;
31421 if (!rtx_equal_p (op0, op1))
31422 break;
31423 /* FALLTHRU */
31424
31425 case 2:
31426 for (i = 0; i < 16; ++i)
31427 perm[i] &= 15;
31428 op0 = op1;
31429 one_vec = true;
31430 break;
31431
31432 case 1:
31433 op1 = op0;
31434 one_vec = true;
31435 break;
31436 }
31437
31438 /* Look for splat patterns. */
31439 if (one_vec)
31440 {
31441 elt = perm[0];
31442
31443 for (i = 0; i < 16; ++i)
31444 if (perm[i] != elt)
31445 break;
31446 if (i == 16)
31447 {
31448 if (!BYTES_BIG_ENDIAN)
31449 elt = 15 - elt;
31450 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
31451 return true;
31452 }
31453
31454 if (elt % 2 == 0)
31455 {
31456 for (i = 0; i < 16; i += 2)
31457 if (perm[i] != elt || perm[i + 1] != elt + 1)
31458 break;
31459 if (i == 16)
31460 {
31461 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
31462 x = gen_reg_rtx (V8HImode);
31463 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
31464 GEN_INT (field)));
31465 emit_move_insn (target, gen_lowpart (V16QImode, x));
31466 return true;
31467 }
31468 }
31469
31470 if (elt % 4 == 0)
31471 {
31472 for (i = 0; i < 16; i += 4)
31473 if (perm[i] != elt
31474 || perm[i + 1] != elt + 1
31475 || perm[i + 2] != elt + 2
31476 || perm[i + 3] != elt + 3)
31477 break;
31478 if (i == 16)
31479 {
31480 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
31481 x = gen_reg_rtx (V4SImode);
31482 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
31483 GEN_INT (field)));
31484 emit_move_insn (target, gen_lowpart (V16QImode, x));
31485 return true;
31486 }
31487 }
31488 }
31489
31490 /* Look for merge and pack patterns. */
31491 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
31492 {
31493 bool swapped;
31494
31495 if ((patterns[j].mask & rs6000_isa_flags) == 0)
31496 continue;
31497
31498 elt = patterns[j].perm[0];
31499 if (perm[0] == elt)
31500 swapped = false;
31501 else if (perm[0] == elt + 16)
31502 swapped = true;
31503 else
31504 continue;
31505 for (i = 1; i < 16; ++i)
31506 {
31507 elt = patterns[j].perm[i];
31508 if (swapped)
31509 elt = (elt >= 16 ? elt - 16 : elt + 16);
31510 else if (one_vec && elt >= 16)
31511 elt -= 16;
31512 if (perm[i] != elt)
31513 break;
31514 }
31515 if (i == 16)
31516 {
31517 enum insn_code icode = patterns[j].impl;
31518 machine_mode omode = insn_data[icode].operand[0].mode;
31519 machine_mode imode = insn_data[icode].operand[1].mode;
31520
31521 /* For little-endian, don't use vpkuwum and vpkuhum if the
31522 underlying vector type is not V4SI and V8HI, respectively.
31523 For example, using vpkuwum with a V8HI picks up the even
31524 halfwords (BE numbering) when the even halfwords (LE
31525 numbering) are what we need. */
31526 if (!BYTES_BIG_ENDIAN
31527 && icode == CODE_FOR_altivec_vpkuwum_direct
31528 && ((GET_CODE (op0) == REG
31529 && GET_MODE (op0) != V4SImode)
31530 || (GET_CODE (op0) == SUBREG
31531 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
31532 continue;
31533 if (!BYTES_BIG_ENDIAN
31534 && icode == CODE_FOR_altivec_vpkuhum_direct
31535 && ((GET_CODE (op0) == REG
31536 && GET_MODE (op0) != V8HImode)
31537 || (GET_CODE (op0) == SUBREG
31538 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
31539 continue;
31540
31541 /* For little-endian, the two input operands must be swapped
31542 (or swapped back) to ensure proper right-to-left numbering
31543 from 0 to 2N-1. */
31544 if (swapped ^ !BYTES_BIG_ENDIAN)
31545 std::swap (op0, op1);
31546 if (imode != V16QImode)
31547 {
31548 op0 = gen_lowpart (imode, op0);
31549 op1 = gen_lowpart (imode, op1);
31550 }
31551 if (omode == V16QImode)
31552 x = target;
31553 else
31554 x = gen_reg_rtx (omode);
31555 emit_insn (GEN_FCN (icode) (x, op0, op1));
31556 if (omode != V16QImode)
31557 emit_move_insn (target, gen_lowpart (V16QImode, x));
31558 return true;
31559 }
31560 }
31561
31562 if (!BYTES_BIG_ENDIAN)
31563 {
31564 altivec_expand_vec_perm_const_le (operands);
31565 return true;
31566 }
31567
31568 return false;
31569 }
31570
31571 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
31572 Return true if we match an efficient implementation. */
31573
31574 static bool
31575 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
31576 unsigned char perm0, unsigned char perm1)
31577 {
31578 rtx x;
31579
31580 /* If both selectors come from the same operand, fold to single op. */
31581 if ((perm0 & 2) == (perm1 & 2))
31582 {
31583 if (perm0 & 2)
31584 op0 = op1;
31585 else
31586 op1 = op0;
31587 }
31588 /* If both operands are equal, fold to simpler permutation. */
31589 if (rtx_equal_p (op0, op1))
31590 {
31591 perm0 = perm0 & 1;
31592 perm1 = (perm1 & 1) + 2;
31593 }
31594 /* If the first selector comes from the second operand, swap. */
31595 else if (perm0 & 2)
31596 {
31597 if (perm1 & 2)
31598 return false;
31599 perm0 -= 2;
31600 perm1 += 2;
31601 std::swap (op0, op1);
31602 }
31603 /* If the second selector does not come from the second operand, fail. */
31604 else if ((perm1 & 2) == 0)
31605 return false;
31606
31607 /* Success! */
31608 if (target != NULL)
31609 {
31610 machine_mode vmode, dmode;
31611 rtvec v;
31612
31613 vmode = GET_MODE (target);
31614 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
31615 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
31616 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
31617 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
31618 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
31619 emit_insn (gen_rtx_SET (target, x));
31620 }
31621 return true;
31622 }
31623
31624 bool
31625 rs6000_expand_vec_perm_const (rtx operands[4])
31626 {
31627 rtx target, op0, op1, sel;
31628 unsigned char perm0, perm1;
31629
31630 target = operands[0];
31631 op0 = operands[1];
31632 op1 = operands[2];
31633 sel = operands[3];
31634
31635 /* Unpack the constant selector. */
31636 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
31637 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
31638
31639 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
31640 }
31641
31642 /* Test whether a constant permutation is supported. */
31643
31644 static bool
31645 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
31646 const unsigned char *sel)
31647 {
31648 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
31649 if (TARGET_ALTIVEC)
31650 return true;
31651
31652 /* Check for ps_merge* or evmerge* insns. */
31653 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
31654 || (TARGET_SPE && vmode == V2SImode))
31655 {
31656 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
31657 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
31658 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
31659 }
31660
31661 return false;
31662 }
31663
31664 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
31665
31666 static void
31667 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
31668 machine_mode vmode, unsigned nelt, rtx perm[])
31669 {
31670 machine_mode imode;
31671 rtx x;
31672
31673 imode = vmode;
31674 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
31675 {
31676 imode = GET_MODE_INNER (vmode);
31677 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
31678 imode = mode_for_vector (imode, nelt);
31679 }
31680
31681 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
31682 x = expand_vec_perm (vmode, op0, op1, x, target);
31683 if (x != target)
31684 emit_move_insn (target, x);
31685 }
31686
31687 /* Expand an extract even operation. */
31688
31689 void
31690 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
31691 {
31692 machine_mode vmode = GET_MODE (target);
31693 unsigned i, nelt = GET_MODE_NUNITS (vmode);
31694 rtx perm[16];
31695
31696 for (i = 0; i < nelt; i++)
31697 perm[i] = GEN_INT (i * 2);
31698
31699 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31700 }
31701
31702 /* Expand a vector interleave operation. */
31703
31704 void
31705 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
31706 {
31707 machine_mode vmode = GET_MODE (target);
31708 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
31709 rtx perm[16];
31710
31711 high = (highp ? 0 : nelt / 2);
31712 for (i = 0; i < nelt / 2; i++)
31713 {
31714 perm[i * 2] = GEN_INT (i + high);
31715 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
31716 }
31717
31718 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31719 }
31720
31721 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
31722 void
31723 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
31724 {
31725 HOST_WIDE_INT hwi_scale (scale);
31726 REAL_VALUE_TYPE r_pow;
31727 rtvec v = rtvec_alloc (2);
31728 rtx elt;
31729 rtx scale_vec = gen_reg_rtx (V2DFmode);
31730 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
31731 elt = CONST_DOUBLE_FROM_REAL_VALUE (r_pow, DFmode);
31732 RTVEC_ELT (v, 0) = elt;
31733 RTVEC_ELT (v, 1) = elt;
31734 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
31735 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
31736 }
31737
31738 /* Return an RTX representing where to find the function value of a
31739 function returning MODE. */
31740 static rtx
31741 rs6000_complex_function_value (machine_mode mode)
31742 {
31743 unsigned int regno;
31744 rtx r1, r2;
31745 machine_mode inner = GET_MODE_INNER (mode);
31746 unsigned int inner_bytes = GET_MODE_SIZE (inner);
31747
31748 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31749 regno = FP_ARG_RETURN;
31750 else
31751 {
31752 regno = GP_ARG_RETURN;
31753
31754 /* 32-bit is OK since it'll go in r3/r4. */
31755 if (TARGET_32BIT && inner_bytes >= 4)
31756 return gen_rtx_REG (mode, regno);
31757 }
31758
31759 if (inner_bytes >= 8)
31760 return gen_rtx_REG (mode, regno);
31761
31762 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
31763 const0_rtx);
31764 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
31765 GEN_INT (inner_bytes));
31766 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
31767 }
31768
31769 /* Return an rtx describing a return value of MODE as a PARALLEL
31770 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
31771 stride REG_STRIDE. */
31772
31773 static rtx
31774 rs6000_parallel_return (machine_mode mode,
31775 int n_elts, machine_mode elt_mode,
31776 unsigned int regno, unsigned int reg_stride)
31777 {
31778 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
31779
31780 int i;
31781 for (i = 0; i < n_elts; i++)
31782 {
31783 rtx r = gen_rtx_REG (elt_mode, regno);
31784 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
31785 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
31786 regno += reg_stride;
31787 }
31788
31789 return par;
31790 }
31791
31792 /* Target hook for TARGET_FUNCTION_VALUE.
31793
31794 On the SPE, both FPs and vectors are returned in r3.
31795
31796 On RS/6000 an integer value is in r3 and a floating-point value is in
31797 fp1, unless -msoft-float. */
31798
31799 static rtx
31800 rs6000_function_value (const_tree valtype,
31801 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
31802 bool outgoing ATTRIBUTE_UNUSED)
31803 {
31804 machine_mode mode;
31805 unsigned int regno;
31806 machine_mode elt_mode;
31807 int n_elts;
31808
31809 /* Special handling for structs in darwin64. */
31810 if (TARGET_MACHO
31811 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
31812 {
31813 CUMULATIVE_ARGS valcum;
31814 rtx valret;
31815
31816 valcum.words = 0;
31817 valcum.fregno = FP_ARG_MIN_REG;
31818 valcum.vregno = ALTIVEC_ARG_MIN_REG;
31819 /* Do a trial code generation as if this were going to be passed as
31820 an argument; if any part goes in memory, we return NULL. */
31821 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
31822 if (valret)
31823 return valret;
31824 /* Otherwise fall through to standard ABI rules. */
31825 }
31826
31827 mode = TYPE_MODE (valtype);
31828
31829 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
31830 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
31831 {
31832 int first_reg, n_regs;
31833
31834 if (SCALAR_FLOAT_MODE_P (elt_mode))
31835 {
31836 /* _Decimal128 must use even/odd register pairs. */
31837 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31838 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
31839 }
31840 else
31841 {
31842 first_reg = ALTIVEC_ARG_RETURN;
31843 n_regs = 1;
31844 }
31845
31846 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
31847 }
31848
31849 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
31850 if (TARGET_32BIT && TARGET_POWERPC64)
31851 switch (mode)
31852 {
31853 default:
31854 break;
31855 case DImode:
31856 case SCmode:
31857 case DCmode:
31858 case TCmode:
31859 int count = GET_MODE_SIZE (mode) / 4;
31860 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
31861 }
31862
31863 if ((INTEGRAL_TYPE_P (valtype)
31864 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
31865 || POINTER_TYPE_P (valtype))
31866 mode = TARGET_32BIT ? SImode : DImode;
31867
31868 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31869 /* _Decimal128 must use an even/odd register pair. */
31870 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31871 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
31872 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
31873 regno = FP_ARG_RETURN;
31874 else if (TREE_CODE (valtype) == COMPLEX_TYPE
31875 && targetm.calls.split_complex_arg)
31876 return rs6000_complex_function_value (mode);
31877 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31878 return register is used in both cases, and we won't see V2DImode/V2DFmode
31879 for pure altivec, combine the two cases. */
31880 else if (TREE_CODE (valtype) == VECTOR_TYPE
31881 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
31882 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
31883 regno = ALTIVEC_ARG_RETURN;
31884 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31885 && (mode == DFmode || mode == DCmode
31886 || mode == TFmode || mode == TCmode))
31887 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31888 else
31889 regno = GP_ARG_RETURN;
31890
31891 return gen_rtx_REG (mode, regno);
31892 }
31893
31894 /* Define how to find the value returned by a library function
31895 assuming the value has mode MODE. */
31896 rtx
31897 rs6000_libcall_value (machine_mode mode)
31898 {
31899 unsigned int regno;
31900
31901 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31902 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
31903 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
31904
31905 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31906 /* _Decimal128 must use an even/odd register pair. */
31907 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31908 else if (SCALAR_FLOAT_MODE_P (mode)
31909 && TARGET_HARD_FLOAT && TARGET_FPRS
31910 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
31911 regno = FP_ARG_RETURN;
31912 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31913 return register is used in both cases, and we won't see V2DImode/V2DFmode
31914 for pure altivec, combine the two cases. */
31915 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
31916 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
31917 regno = ALTIVEC_ARG_RETURN;
31918 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
31919 return rs6000_complex_function_value (mode);
31920 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31921 && (mode == DFmode || mode == DCmode
31922 || mode == TFmode || mode == TCmode))
31923 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31924 else
31925 regno = GP_ARG_RETURN;
31926
31927 return gen_rtx_REG (mode, regno);
31928 }
31929
31930
31931 /* Return true if we use LRA instead of reload pass. */
31932 static bool
31933 rs6000_lra_p (void)
31934 {
31935 return rs6000_lra_flag;
31936 }
31937
31938 /* Given FROM and TO register numbers, say whether this elimination is allowed.
31939 Frame pointer elimination is automatically handled.
31940
31941 For the RS/6000, if frame pointer elimination is being done, we would like
31942 to convert ap into fp, not sp.
31943
31944 We need r30 if -mminimal-toc was specified, and there are constant pool
31945 references. */
31946
31947 static bool
31948 rs6000_can_eliminate (const int from, const int to)
31949 {
31950 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
31951 ? ! frame_pointer_needed
31952 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
31953 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
31954 : true);
31955 }
31956
31957 /* Define the offset between two registers, FROM to be eliminated and its
31958 replacement TO, at the start of a routine. */
31959 HOST_WIDE_INT
31960 rs6000_initial_elimination_offset (int from, int to)
31961 {
31962 rs6000_stack_t *info = rs6000_stack_info ();
31963 HOST_WIDE_INT offset;
31964
31965 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31966 offset = info->push_p ? 0 : -info->total_size;
31967 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31968 {
31969 offset = info->push_p ? 0 : -info->total_size;
31970 if (FRAME_GROWS_DOWNWARD)
31971 offset += info->fixed_size + info->vars_size + info->parm_size;
31972 }
31973 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31974 offset = FRAME_GROWS_DOWNWARD
31975 ? info->fixed_size + info->vars_size + info->parm_size
31976 : 0;
31977 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31978 offset = info->total_size;
31979 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31980 offset = info->push_p ? info->total_size : 0;
31981 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
31982 offset = 0;
31983 else
31984 gcc_unreachable ();
31985
31986 return offset;
31987 }
31988
31989 static rtx
31990 rs6000_dwarf_register_span (rtx reg)
31991 {
31992 rtx parts[8];
31993 int i, words;
31994 unsigned regno = REGNO (reg);
31995 machine_mode mode = GET_MODE (reg);
31996
31997 if (TARGET_SPE
31998 && regno < 32
31999 && (SPE_VECTOR_MODE (GET_MODE (reg))
32000 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
32001 && mode != SFmode && mode != SDmode && mode != SCmode)))
32002 ;
32003 else
32004 return NULL_RTX;
32005
32006 regno = REGNO (reg);
32007
32008 /* The duality of the SPE register size wreaks all kinds of havoc.
32009 This is a way of distinguishing r0 in 32-bits from r0 in
32010 64-bits. */
32011 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
32012 gcc_assert (words <= 4);
32013 for (i = 0; i < words; i++, regno++)
32014 {
32015 if (BYTES_BIG_ENDIAN)
32016 {
32017 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
32018 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
32019 }
32020 else
32021 {
32022 parts[2 * i] = gen_rtx_REG (SImode, regno);
32023 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
32024 }
32025 }
32026
32027 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
32028 }
32029
32030 /* Fill in sizes for SPE register high parts in table used by unwinder. */
32031
32032 static void
32033 rs6000_init_dwarf_reg_sizes_extra (tree address)
32034 {
32035 if (TARGET_SPE)
32036 {
32037 int i;
32038 machine_mode mode = TYPE_MODE (char_type_node);
32039 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
32040 rtx mem = gen_rtx_MEM (BLKmode, addr);
32041 rtx value = gen_int_mode (4, mode);
32042
32043 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
32044 {
32045 int column = DWARF_REG_TO_UNWIND_COLUMN
32046 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
32047 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
32048
32049 emit_move_insn (adjust_address (mem, mode, offset), value);
32050 }
32051 }
32052
32053 if (TARGET_MACHO && ! TARGET_ALTIVEC)
32054 {
32055 int i;
32056 machine_mode mode = TYPE_MODE (char_type_node);
32057 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
32058 rtx mem = gen_rtx_MEM (BLKmode, addr);
32059 rtx value = gen_int_mode (16, mode);
32060
32061 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
32062 The unwinder still needs to know the size of Altivec registers. */
32063
32064 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
32065 {
32066 int column = DWARF_REG_TO_UNWIND_COLUMN
32067 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
32068 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
32069
32070 emit_move_insn (adjust_address (mem, mode, offset), value);
32071 }
32072 }
32073 }
32074
32075 /* Map internal gcc register numbers to debug format register numbers.
32076 FORMAT specifies the type of debug register number to use:
32077 0 -- debug information, except for frame-related sections
32078 1 -- DWARF .debug_frame section
32079 2 -- DWARF .eh_frame section */
32080
32081 unsigned int
32082 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
32083 {
32084 /* We never use the GCC internal number for SPE high registers.
32085 Those are mapped to the 1200..1231 range for all debug formats. */
32086 if (SPE_HIGH_REGNO_P (regno))
32087 return regno - FIRST_SPE_HIGH_REGNO + 1200;
32088
32089 /* Except for the above, we use the internal number for non-DWARF
32090 debug information, and also for .eh_frame. */
32091 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
32092 return regno;
32093
32094 /* On some platforms, we use the standard DWARF register
32095 numbering for .debug_info and .debug_frame. */
32096 #ifdef RS6000_USE_DWARF_NUMBERING
32097 if (regno <= 63)
32098 return regno;
32099 if (regno == LR_REGNO)
32100 return 108;
32101 if (regno == CTR_REGNO)
32102 return 109;
32103 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
32104 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
32105 The actual code emitted saves the whole of CR, so we map CR2_REGNO
32106 to the DWARF reg for CR. */
32107 if (format == 1 && regno == CR2_REGNO)
32108 return 64;
32109 if (CR_REGNO_P (regno))
32110 return regno - CR0_REGNO + 86;
32111 if (regno == CA_REGNO)
32112 return 101; /* XER */
32113 if (ALTIVEC_REGNO_P (regno))
32114 return regno - FIRST_ALTIVEC_REGNO + 1124;
32115 if (regno == VRSAVE_REGNO)
32116 return 356;
32117 if (regno == VSCR_REGNO)
32118 return 67;
32119 if (regno == SPE_ACC_REGNO)
32120 return 99;
32121 if (regno == SPEFSCR_REGNO)
32122 return 612;
32123 #endif
32124 return regno;
32125 }
32126
32127 /* target hook eh_return_filter_mode */
32128 static machine_mode
32129 rs6000_eh_return_filter_mode (void)
32130 {
32131 return TARGET_32BIT ? SImode : word_mode;
32132 }
32133
32134 /* Target hook for scalar_mode_supported_p. */
32135 static bool
32136 rs6000_scalar_mode_supported_p (machine_mode mode)
32137 {
32138 /* -m32 does not support TImode. This is the default, from
32139 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
32140 same ABI as for -m32. But default_scalar_mode_supported_p allows
32141 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
32142 for -mpowerpc64. */
32143 if (TARGET_32BIT && mode == TImode)
32144 return false;
32145
32146 if (DECIMAL_FLOAT_MODE_P (mode))
32147 return default_decimal_float_supported_p ();
32148 else
32149 return default_scalar_mode_supported_p (mode);
32150 }
32151
32152 /* Target hook for vector_mode_supported_p. */
32153 static bool
32154 rs6000_vector_mode_supported_p (machine_mode mode)
32155 {
32156
32157 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
32158 return true;
32159
32160 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
32161 return true;
32162
32163 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
32164 return true;
32165
32166 else
32167 return false;
32168 }
32169
32170 /* Target hook for invalid_arg_for_unprototyped_fn. */
32171 static const char *
32172 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
32173 {
32174 return (!rs6000_darwin64_abi
32175 && typelist == 0
32176 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
32177 && (funcdecl == NULL_TREE
32178 || (TREE_CODE (funcdecl) == FUNCTION_DECL
32179 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
32180 ? N_("AltiVec argument passed to unprototyped function")
32181 : NULL;
32182 }
32183
32184 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
32185 setup by using __stack_chk_fail_local hidden function instead of
32186 calling __stack_chk_fail directly. Otherwise it is better to call
32187 __stack_chk_fail directly. */
32188
32189 static tree ATTRIBUTE_UNUSED
32190 rs6000_stack_protect_fail (void)
32191 {
32192 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
32193 ? default_hidden_stack_protect_fail ()
32194 : default_external_stack_protect_fail ();
32195 }
32196
32197 void
32198 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
32199 int num_operands ATTRIBUTE_UNUSED)
32200 {
32201 if (rs6000_warn_cell_microcode)
32202 {
32203 const char *temp;
32204 int insn_code_number = recog_memoized (insn);
32205 location_t location = INSN_LOCATION (insn);
32206
32207 /* Punt on insns we cannot recognize. */
32208 if (insn_code_number < 0)
32209 return;
32210
32211 temp = get_insn_template (insn_code_number, insn);
32212
32213 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
32214 warning_at (location, OPT_mwarn_cell_microcode,
32215 "emitting microcode insn %s\t[%s] #%d",
32216 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
32217 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
32218 warning_at (location, OPT_mwarn_cell_microcode,
32219 "emitting conditional microcode insn %s\t[%s] #%d",
32220 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
32221 }
32222 }
32223
32224 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
32225
32226 #if TARGET_ELF
32227 static unsigned HOST_WIDE_INT
32228 rs6000_asan_shadow_offset (void)
32229 {
32230 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
32231 }
32232 #endif
32233 \f
32234 /* Mask options that we want to support inside of attribute((target)) and
32235 #pragma GCC target operations. Note, we do not include things like
32236 64/32-bit, endianess, hard/soft floating point, etc. that would have
32237 different calling sequences. */
32238
32239 struct rs6000_opt_mask {
32240 const char *name; /* option name */
32241 HOST_WIDE_INT mask; /* mask to set */
32242 bool invert; /* invert sense of mask */
32243 bool valid_target; /* option is a target option */
32244 };
32245
32246 static struct rs6000_opt_mask const rs6000_opt_masks[] =
32247 {
32248 { "altivec", OPTION_MASK_ALTIVEC, false, true },
32249 { "cmpb", OPTION_MASK_CMPB, false, true },
32250 { "crypto", OPTION_MASK_CRYPTO, false, true },
32251 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
32252 { "dlmzb", OPTION_MASK_DLMZB, false, true },
32253 { "fprnd", OPTION_MASK_FPRND, false, true },
32254 { "hard-dfp", OPTION_MASK_DFP, false, true },
32255 { "htm", OPTION_MASK_HTM, false, true },
32256 { "isel", OPTION_MASK_ISEL, false, true },
32257 { "mfcrf", OPTION_MASK_MFCRF, false, true },
32258 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
32259 { "mulhw", OPTION_MASK_MULHW, false, true },
32260 { "multiple", OPTION_MASK_MULTIPLE, false, true },
32261 { "popcntb", OPTION_MASK_POPCNTB, false, true },
32262 { "popcntd", OPTION_MASK_POPCNTD, false, true },
32263 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
32264 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
32265 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
32266 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
32267 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
32268 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
32269 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
32270 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
32271 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
32272 { "string", OPTION_MASK_STRING, false, true },
32273 { "update", OPTION_MASK_NO_UPDATE, true , true },
32274 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
32275 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
32276 { "vsx", OPTION_MASK_VSX, false, true },
32277 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
32278 #ifdef OPTION_MASK_64BIT
32279 #if TARGET_AIX_OS
32280 { "aix64", OPTION_MASK_64BIT, false, false },
32281 { "aix32", OPTION_MASK_64BIT, true, false },
32282 #else
32283 { "64", OPTION_MASK_64BIT, false, false },
32284 { "32", OPTION_MASK_64BIT, true, false },
32285 #endif
32286 #endif
32287 #ifdef OPTION_MASK_EABI
32288 { "eabi", OPTION_MASK_EABI, false, false },
32289 #endif
32290 #ifdef OPTION_MASK_LITTLE_ENDIAN
32291 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
32292 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
32293 #endif
32294 #ifdef OPTION_MASK_RELOCATABLE
32295 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
32296 #endif
32297 #ifdef OPTION_MASK_STRICT_ALIGN
32298 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
32299 #endif
32300 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
32301 { "string", OPTION_MASK_STRING, false, false },
32302 };
32303
32304 /* Builtin mask mapping for printing the flags. */
32305 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
32306 {
32307 { "altivec", RS6000_BTM_ALTIVEC, false, false },
32308 { "vsx", RS6000_BTM_VSX, false, false },
32309 { "spe", RS6000_BTM_SPE, false, false },
32310 { "paired", RS6000_BTM_PAIRED, false, false },
32311 { "fre", RS6000_BTM_FRE, false, false },
32312 { "fres", RS6000_BTM_FRES, false, false },
32313 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
32314 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
32315 { "popcntd", RS6000_BTM_POPCNTD, false, false },
32316 { "cell", RS6000_BTM_CELL, false, false },
32317 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
32318 { "crypto", RS6000_BTM_CRYPTO, false, false },
32319 { "htm", RS6000_BTM_HTM, false, false },
32320 { "hard-dfp", RS6000_BTM_DFP, false, false },
32321 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
32322 { "long-double-128", RS6000_BTM_LDBL128, false, false },
32323 };
32324
32325 /* Option variables that we want to support inside attribute((target)) and
32326 #pragma GCC target operations. */
32327
32328 struct rs6000_opt_var {
32329 const char *name; /* option name */
32330 size_t global_offset; /* offset of the option in global_options. */
32331 size_t target_offset; /* offset of the option in target optiosn. */
32332 };
32333
32334 static struct rs6000_opt_var const rs6000_opt_vars[] =
32335 {
32336 { "friz",
32337 offsetof (struct gcc_options, x_TARGET_FRIZ),
32338 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
32339 { "avoid-indexed-addresses",
32340 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
32341 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
32342 { "paired",
32343 offsetof (struct gcc_options, x_rs6000_paired_float),
32344 offsetof (struct cl_target_option, x_rs6000_paired_float), },
32345 { "longcall",
32346 offsetof (struct gcc_options, x_rs6000_default_long_calls),
32347 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
32348 { "optimize-swaps",
32349 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
32350 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
32351 { "allow-movmisalign",
32352 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
32353 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
32354 { "allow-df-permute",
32355 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
32356 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
32357 { "sched-groups",
32358 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
32359 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
32360 { "always-hint",
32361 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
32362 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
32363 { "align-branch-targets",
32364 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
32365 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
32366 { "vectorize-builtins",
32367 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
32368 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
32369 { "tls-markers",
32370 offsetof (struct gcc_options, x_tls_markers),
32371 offsetof (struct cl_target_option, x_tls_markers), },
32372 { "sched-prolog",
32373 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
32374 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
32375 { "sched-epilog",
32376 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
32377 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
32378 { "gen-cell-microcode",
32379 offsetof (struct gcc_options, x_rs6000_gen_cell_microcode),
32380 offsetof (struct cl_target_option, x_rs6000_gen_cell_microcode), },
32381 { "warn-cell-microcode",
32382 offsetof (struct gcc_options, x_rs6000_warn_cell_microcode),
32383 offsetof (struct cl_target_option, x_rs6000_warn_cell_microcode), },
32384 };
32385
32386 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
32387 parsing. Return true if there were no errors. */
32388
32389 static bool
32390 rs6000_inner_target_options (tree args, bool attr_p)
32391 {
32392 bool ret = true;
32393
32394 if (args == NULL_TREE)
32395 ;
32396
32397 else if (TREE_CODE (args) == STRING_CST)
32398 {
32399 char *p = ASTRDUP (TREE_STRING_POINTER (args));
32400 char *q;
32401
32402 while ((q = strtok (p, ",")) != NULL)
32403 {
32404 bool error_p = false;
32405 bool not_valid_p = false;
32406 const char *cpu_opt = NULL;
32407
32408 p = NULL;
32409 if (strncmp (q, "cpu=", 4) == 0)
32410 {
32411 int cpu_index = rs6000_cpu_name_lookup (q+4);
32412 if (cpu_index >= 0)
32413 rs6000_cpu_index = cpu_index;
32414 else
32415 {
32416 error_p = true;
32417 cpu_opt = q+4;
32418 }
32419 }
32420 else if (strncmp (q, "tune=", 5) == 0)
32421 {
32422 int tune_index = rs6000_cpu_name_lookup (q+5);
32423 if (tune_index >= 0)
32424 rs6000_tune_index = tune_index;
32425 else
32426 {
32427 error_p = true;
32428 cpu_opt = q+5;
32429 }
32430 }
32431 else
32432 {
32433 size_t i;
32434 bool invert = false;
32435 char *r = q;
32436
32437 error_p = true;
32438 if (strncmp (r, "no-", 3) == 0)
32439 {
32440 invert = true;
32441 r += 3;
32442 }
32443
32444 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
32445 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
32446 {
32447 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
32448
32449 if (!rs6000_opt_masks[i].valid_target)
32450 not_valid_p = true;
32451 else
32452 {
32453 error_p = false;
32454 rs6000_isa_flags_explicit |= mask;
32455
32456 /* VSX needs altivec, so -mvsx automagically sets
32457 altivec and disables -mavoid-indexed-addresses. */
32458 if (!invert)
32459 {
32460 if (mask == OPTION_MASK_VSX)
32461 {
32462 mask |= OPTION_MASK_ALTIVEC;
32463 TARGET_AVOID_XFORM = 0;
32464 }
32465 }
32466
32467 if (rs6000_opt_masks[i].invert)
32468 invert = !invert;
32469
32470 if (invert)
32471 rs6000_isa_flags &= ~mask;
32472 else
32473 rs6000_isa_flags |= mask;
32474 }
32475 break;
32476 }
32477
32478 if (error_p && !not_valid_p)
32479 {
32480 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
32481 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
32482 {
32483 size_t j = rs6000_opt_vars[i].global_offset;
32484 *((int *) ((char *)&global_options + j)) = !invert;
32485 error_p = false;
32486 not_valid_p = false;
32487 break;
32488 }
32489 }
32490 }
32491
32492 if (error_p)
32493 {
32494 const char *eprefix, *esuffix;
32495
32496 ret = false;
32497 if (attr_p)
32498 {
32499 eprefix = "__attribute__((__target__(";
32500 esuffix = ")))";
32501 }
32502 else
32503 {
32504 eprefix = "#pragma GCC target ";
32505 esuffix = "";
32506 }
32507
32508 if (cpu_opt)
32509 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
32510 q, esuffix);
32511 else if (not_valid_p)
32512 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
32513 else
32514 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
32515 }
32516 }
32517 }
32518
32519 else if (TREE_CODE (args) == TREE_LIST)
32520 {
32521 do
32522 {
32523 tree value = TREE_VALUE (args);
32524 if (value)
32525 {
32526 bool ret2 = rs6000_inner_target_options (value, attr_p);
32527 if (!ret2)
32528 ret = false;
32529 }
32530 args = TREE_CHAIN (args);
32531 }
32532 while (args != NULL_TREE);
32533 }
32534
32535 else
32536 gcc_unreachable ();
32537
32538 return ret;
32539 }
32540
32541 /* Print out the target options as a list for -mdebug=target. */
32542
32543 static void
32544 rs6000_debug_target_options (tree args, const char *prefix)
32545 {
32546 if (args == NULL_TREE)
32547 fprintf (stderr, "%s<NULL>", prefix);
32548
32549 else if (TREE_CODE (args) == STRING_CST)
32550 {
32551 char *p = ASTRDUP (TREE_STRING_POINTER (args));
32552 char *q;
32553
32554 while ((q = strtok (p, ",")) != NULL)
32555 {
32556 p = NULL;
32557 fprintf (stderr, "%s\"%s\"", prefix, q);
32558 prefix = ", ";
32559 }
32560 }
32561
32562 else if (TREE_CODE (args) == TREE_LIST)
32563 {
32564 do
32565 {
32566 tree value = TREE_VALUE (args);
32567 if (value)
32568 {
32569 rs6000_debug_target_options (value, prefix);
32570 prefix = ", ";
32571 }
32572 args = TREE_CHAIN (args);
32573 }
32574 while (args != NULL_TREE);
32575 }
32576
32577 else
32578 gcc_unreachable ();
32579
32580 return;
32581 }
32582
32583 \f
32584 /* Hook to validate attribute((target("..."))). */
32585
32586 static bool
32587 rs6000_valid_attribute_p (tree fndecl,
32588 tree ARG_UNUSED (name),
32589 tree args,
32590 int flags)
32591 {
32592 struct cl_target_option cur_target;
32593 bool ret;
32594 tree old_optimize = build_optimization_node (&global_options);
32595 tree new_target, new_optimize;
32596 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
32597
32598 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
32599
32600 if (TARGET_DEBUG_TARGET)
32601 {
32602 tree tname = DECL_NAME (fndecl);
32603 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
32604 if (tname)
32605 fprintf (stderr, "function: %.*s\n",
32606 (int) IDENTIFIER_LENGTH (tname),
32607 IDENTIFIER_POINTER (tname));
32608 else
32609 fprintf (stderr, "function: unknown\n");
32610
32611 fprintf (stderr, "args:");
32612 rs6000_debug_target_options (args, " ");
32613 fprintf (stderr, "\n");
32614
32615 if (flags)
32616 fprintf (stderr, "flags: 0x%x\n", flags);
32617
32618 fprintf (stderr, "--------------------\n");
32619 }
32620
32621 old_optimize = build_optimization_node (&global_options);
32622 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
32623
32624 /* If the function changed the optimization levels as well as setting target
32625 options, start with the optimizations specified. */
32626 if (func_optimize && func_optimize != old_optimize)
32627 cl_optimization_restore (&global_options,
32628 TREE_OPTIMIZATION (func_optimize));
32629
32630 /* The target attributes may also change some optimization flags, so update
32631 the optimization options if necessary. */
32632 cl_target_option_save (&cur_target, &global_options);
32633 rs6000_cpu_index = rs6000_tune_index = -1;
32634 ret = rs6000_inner_target_options (args, true);
32635
32636 /* Set up any additional state. */
32637 if (ret)
32638 {
32639 ret = rs6000_option_override_internal (false);
32640 new_target = build_target_option_node (&global_options);
32641 }
32642 else
32643 new_target = NULL;
32644
32645 new_optimize = build_optimization_node (&global_options);
32646
32647 if (!new_target)
32648 ret = false;
32649
32650 else if (fndecl)
32651 {
32652 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
32653
32654 if (old_optimize != new_optimize)
32655 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
32656 }
32657
32658 cl_target_option_restore (&global_options, &cur_target);
32659
32660 if (old_optimize != new_optimize)
32661 cl_optimization_restore (&global_options,
32662 TREE_OPTIMIZATION (old_optimize));
32663
32664 return ret;
32665 }
32666
32667 \f
32668 /* Hook to validate the current #pragma GCC target and set the state, and
32669 update the macros based on what was changed. If ARGS is NULL, then
32670 POP_TARGET is used to reset the options. */
32671
32672 bool
32673 rs6000_pragma_target_parse (tree args, tree pop_target)
32674 {
32675 tree prev_tree = build_target_option_node (&global_options);
32676 tree cur_tree;
32677 struct cl_target_option *prev_opt, *cur_opt;
32678 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
32679 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
32680
32681 if (TARGET_DEBUG_TARGET)
32682 {
32683 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
32684 fprintf (stderr, "args:");
32685 rs6000_debug_target_options (args, " ");
32686 fprintf (stderr, "\n");
32687
32688 if (pop_target)
32689 {
32690 fprintf (stderr, "pop_target:\n");
32691 debug_tree (pop_target);
32692 }
32693 else
32694 fprintf (stderr, "pop_target: <NULL>\n");
32695
32696 fprintf (stderr, "--------------------\n");
32697 }
32698
32699 if (! args)
32700 {
32701 cur_tree = ((pop_target)
32702 ? pop_target
32703 : target_option_default_node);
32704 cl_target_option_restore (&global_options,
32705 TREE_TARGET_OPTION (cur_tree));
32706 }
32707 else
32708 {
32709 rs6000_cpu_index = rs6000_tune_index = -1;
32710 if (!rs6000_inner_target_options (args, false)
32711 || !rs6000_option_override_internal (false)
32712 || (cur_tree = build_target_option_node (&global_options))
32713 == NULL_TREE)
32714 {
32715 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
32716 fprintf (stderr, "invalid pragma\n");
32717
32718 return false;
32719 }
32720 }
32721
32722 target_option_current_node = cur_tree;
32723
32724 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
32725 change the macros that are defined. */
32726 if (rs6000_target_modify_macros_ptr)
32727 {
32728 prev_opt = TREE_TARGET_OPTION (prev_tree);
32729 prev_bumask = prev_opt->x_rs6000_builtin_mask;
32730 prev_flags = prev_opt->x_rs6000_isa_flags;
32731
32732 cur_opt = TREE_TARGET_OPTION (cur_tree);
32733 cur_flags = cur_opt->x_rs6000_isa_flags;
32734 cur_bumask = cur_opt->x_rs6000_builtin_mask;
32735
32736 diff_bumask = (prev_bumask ^ cur_bumask);
32737 diff_flags = (prev_flags ^ cur_flags);
32738
32739 if ((diff_flags != 0) || (diff_bumask != 0))
32740 {
32741 /* Delete old macros. */
32742 rs6000_target_modify_macros_ptr (false,
32743 prev_flags & diff_flags,
32744 prev_bumask & diff_bumask);
32745
32746 /* Define new macros. */
32747 rs6000_target_modify_macros_ptr (true,
32748 cur_flags & diff_flags,
32749 cur_bumask & diff_bumask);
32750 }
32751 }
32752
32753 return true;
32754 }
32755
32756 \f
32757 /* Remember the last target of rs6000_set_current_function. */
32758 static GTY(()) tree rs6000_previous_fndecl;
32759
32760 /* Establish appropriate back-end context for processing the function
32761 FNDECL. The argument might be NULL to indicate processing at top
32762 level, outside of any function scope. */
32763 static void
32764 rs6000_set_current_function (tree fndecl)
32765 {
32766 tree old_tree = (rs6000_previous_fndecl
32767 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
32768 : NULL_TREE);
32769
32770 tree new_tree = (fndecl
32771 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
32772 : NULL_TREE);
32773
32774 if (TARGET_DEBUG_TARGET)
32775 {
32776 bool print_final = false;
32777 fprintf (stderr, "\n==================== rs6000_set_current_function");
32778
32779 if (fndecl)
32780 fprintf (stderr, ", fndecl %s (%p)",
32781 (DECL_NAME (fndecl)
32782 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
32783 : "<unknown>"), (void *)fndecl);
32784
32785 if (rs6000_previous_fndecl)
32786 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
32787
32788 fprintf (stderr, "\n");
32789 if (new_tree)
32790 {
32791 fprintf (stderr, "\nnew fndecl target specific options:\n");
32792 debug_tree (new_tree);
32793 print_final = true;
32794 }
32795
32796 if (old_tree)
32797 {
32798 fprintf (stderr, "\nold fndecl target specific options:\n");
32799 debug_tree (old_tree);
32800 print_final = true;
32801 }
32802
32803 if (print_final)
32804 fprintf (stderr, "--------------------\n");
32805 }
32806
32807 /* Only change the context if the function changes. This hook is called
32808 several times in the course of compiling a function, and we don't want to
32809 slow things down too much or call target_reinit when it isn't safe. */
32810 if (fndecl && fndecl != rs6000_previous_fndecl)
32811 {
32812 rs6000_previous_fndecl = fndecl;
32813 if (old_tree == new_tree)
32814 ;
32815
32816 else if (new_tree && new_tree != target_option_default_node)
32817 {
32818 cl_target_option_restore (&global_options,
32819 TREE_TARGET_OPTION (new_tree));
32820 if (TREE_TARGET_GLOBALS (new_tree))
32821 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32822 else
32823 TREE_TARGET_GLOBALS (new_tree)
32824 = save_target_globals_default_opts ();
32825 }
32826
32827 else if (old_tree && old_tree != target_option_default_node)
32828 {
32829 new_tree = target_option_current_node;
32830 cl_target_option_restore (&global_options,
32831 TREE_TARGET_OPTION (new_tree));
32832 if (TREE_TARGET_GLOBALS (new_tree))
32833 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32834 else if (new_tree == target_option_default_node)
32835 restore_target_globals (&default_target_globals);
32836 else
32837 TREE_TARGET_GLOBALS (new_tree)
32838 = save_target_globals_default_opts ();
32839 }
32840 }
32841 }
32842
32843 \f
32844 /* Save the current options */
32845
32846 static void
32847 rs6000_function_specific_save (struct cl_target_option *ptr,
32848 struct gcc_options *opts)
32849 {
32850 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
32851 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
32852 }
32853
32854 /* Restore the current options */
32855
32856 static void
32857 rs6000_function_specific_restore (struct gcc_options *opts,
32858 struct cl_target_option *ptr)
32859
32860 {
32861 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
32862 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
32863 (void) rs6000_option_override_internal (false);
32864 }
32865
32866 /* Print the current options */
32867
32868 static void
32869 rs6000_function_specific_print (FILE *file, int indent,
32870 struct cl_target_option *ptr)
32871 {
32872 rs6000_print_isa_options (file, indent, "Isa options set",
32873 ptr->x_rs6000_isa_flags);
32874
32875 rs6000_print_isa_options (file, indent, "Isa options explicit",
32876 ptr->x_rs6000_isa_flags_explicit);
32877 }
32878
32879 /* Helper function to print the current isa or misc options on a line. */
32880
32881 static void
32882 rs6000_print_options_internal (FILE *file,
32883 int indent,
32884 const char *string,
32885 HOST_WIDE_INT flags,
32886 const char *prefix,
32887 const struct rs6000_opt_mask *opts,
32888 size_t num_elements)
32889 {
32890 size_t i;
32891 size_t start_column = 0;
32892 size_t cur_column;
32893 size_t max_column = 76;
32894 const char *comma = "";
32895
32896 if (indent)
32897 start_column += fprintf (file, "%*s", indent, "");
32898
32899 if (!flags)
32900 {
32901 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
32902 return;
32903 }
32904
32905 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
32906
32907 /* Print the various mask options. */
32908 cur_column = start_column;
32909 for (i = 0; i < num_elements; i++)
32910 {
32911 if ((flags & opts[i].mask) != 0)
32912 {
32913 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
32914 size_t len = (strlen (comma)
32915 + strlen (prefix)
32916 + strlen (no_str)
32917 + strlen (rs6000_opt_masks[i].name));
32918
32919 cur_column += len;
32920 if (cur_column > max_column)
32921 {
32922 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
32923 cur_column = start_column + len;
32924 comma = "";
32925 }
32926
32927 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
32928 rs6000_opt_masks[i].name);
32929 flags &= ~ opts[i].mask;
32930 comma = ", ";
32931 }
32932 }
32933
32934 fputs ("\n", file);
32935 }
32936
32937 /* Helper function to print the current isa options on a line. */
32938
32939 static void
32940 rs6000_print_isa_options (FILE *file, int indent, const char *string,
32941 HOST_WIDE_INT flags)
32942 {
32943 rs6000_print_options_internal (file, indent, string, flags, "-m",
32944 &rs6000_opt_masks[0],
32945 ARRAY_SIZE (rs6000_opt_masks));
32946 }
32947
32948 static void
32949 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
32950 HOST_WIDE_INT flags)
32951 {
32952 rs6000_print_options_internal (file, indent, string, flags, "",
32953 &rs6000_builtin_mask_names[0],
32954 ARRAY_SIZE (rs6000_builtin_mask_names));
32955 }
32956
32957 \f
32958 /* Hook to determine if one function can safely inline another. */
32959
32960 static bool
32961 rs6000_can_inline_p (tree caller, tree callee)
32962 {
32963 bool ret = false;
32964 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
32965 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
32966
32967 /* If callee has no option attributes, then it is ok to inline. */
32968 if (!callee_tree)
32969 ret = true;
32970
32971 /* If caller has no option attributes, but callee does then it is not ok to
32972 inline. */
32973 else if (!caller_tree)
32974 ret = false;
32975
32976 else
32977 {
32978 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
32979 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
32980
32981 /* Callee's options should a subset of the caller's, i.e. a vsx function
32982 can inline an altivec function but a non-vsx function can't inline a
32983 vsx function. */
32984 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32985 == callee_opts->x_rs6000_isa_flags)
32986 ret = true;
32987 }
32988
32989 if (TARGET_DEBUG_TARGET)
32990 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32991 (DECL_NAME (caller)
32992 ? IDENTIFIER_POINTER (DECL_NAME (caller))
32993 : "<unknown>"),
32994 (DECL_NAME (callee)
32995 ? IDENTIFIER_POINTER (DECL_NAME (callee))
32996 : "<unknown>"),
32997 (ret ? "can" : "cannot"));
32998
32999 return ret;
33000 }
33001 \f
33002 /* Allocate a stack temp and fixup the address so it meets the particular
33003 memory requirements (either offetable or REG+REG addressing). */
33004
33005 rtx
33006 rs6000_allocate_stack_temp (machine_mode mode,
33007 bool offsettable_p,
33008 bool reg_reg_p)
33009 {
33010 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
33011 rtx addr = XEXP (stack, 0);
33012 int strict_p = (reload_in_progress || reload_completed);
33013
33014 if (!legitimate_indirect_address_p (addr, strict_p))
33015 {
33016 if (offsettable_p
33017 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
33018 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
33019
33020 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
33021 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
33022 }
33023
33024 return stack;
33025 }
33026
33027 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
33028 to such a form to deal with memory reference instructions like STFIWX that
33029 only take reg+reg addressing. */
33030
33031 rtx
33032 rs6000_address_for_fpconvert (rtx x)
33033 {
33034 int strict_p = (reload_in_progress || reload_completed);
33035 rtx addr;
33036
33037 gcc_assert (MEM_P (x));
33038 addr = XEXP (x, 0);
33039 if (! legitimate_indirect_address_p (addr, strict_p)
33040 && ! legitimate_indexed_address_p (addr, strict_p))
33041 {
33042 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
33043 {
33044 rtx reg = XEXP (addr, 0);
33045 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
33046 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
33047 gcc_assert (REG_P (reg));
33048 emit_insn (gen_add3_insn (reg, reg, size_rtx));
33049 addr = reg;
33050 }
33051 else if (GET_CODE (addr) == PRE_MODIFY)
33052 {
33053 rtx reg = XEXP (addr, 0);
33054 rtx expr = XEXP (addr, 1);
33055 gcc_assert (REG_P (reg));
33056 gcc_assert (GET_CODE (expr) == PLUS);
33057 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
33058 addr = reg;
33059 }
33060
33061 x = replace_equiv_address (x, copy_addr_to_reg (addr));
33062 }
33063
33064 return x;
33065 }
33066
33067 /* Given a memory reference, if it is not in the form for altivec memory
33068 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
33069 convert to the altivec format. */
33070
33071 rtx
33072 rs6000_address_for_altivec (rtx x)
33073 {
33074 gcc_assert (MEM_P (x));
33075 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
33076 {
33077 rtx addr = XEXP (x, 0);
33078 int strict_p = (reload_in_progress || reload_completed);
33079
33080 if (!legitimate_indexed_address_p (addr, strict_p)
33081 && !legitimate_indirect_address_p (addr, strict_p))
33082 addr = copy_to_mode_reg (Pmode, addr);
33083
33084 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
33085 x = change_address (x, GET_MODE (x), addr);
33086 }
33087
33088 return x;
33089 }
33090
33091 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
33092
33093 On the RS/6000, all integer constants are acceptable, most won't be valid
33094 for particular insns, though. Only easy FP constants are acceptable. */
33095
33096 static bool
33097 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
33098 {
33099 if (TARGET_ELF && tls_referenced_p (x))
33100 return false;
33101
33102 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
33103 || GET_MODE (x) == VOIDmode
33104 || (TARGET_POWERPC64 && mode == DImode)
33105 || easy_fp_constant (x, mode)
33106 || easy_vector_constant (x, mode));
33107 }
33108
33109 \f
33110 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
33111
33112 static bool
33113 chain_already_loaded (rtx_insn *last)
33114 {
33115 for (; last != NULL; last = PREV_INSN (last))
33116 {
33117 if (NONJUMP_INSN_P (last))
33118 {
33119 rtx patt = PATTERN (last);
33120
33121 if (GET_CODE (patt) == SET)
33122 {
33123 rtx lhs = XEXP (patt, 0);
33124
33125 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
33126 return true;
33127 }
33128 }
33129 }
33130 return false;
33131 }
33132
33133 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
33134
33135 void
33136 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
33137 {
33138 const bool direct_call_p
33139 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
33140 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
33141 rtx toc_load = NULL_RTX;
33142 rtx toc_restore = NULL_RTX;
33143 rtx func_addr;
33144 rtx abi_reg = NULL_RTX;
33145 rtx call[4];
33146 int n_call;
33147 rtx insn;
33148
33149 /* Handle longcall attributes. */
33150 if (INTVAL (cookie) & CALL_LONG)
33151 func_desc = rs6000_longcall_ref (func_desc);
33152
33153 /* Handle indirect calls. */
33154 if (GET_CODE (func_desc) != SYMBOL_REF
33155 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
33156 {
33157 /* Save the TOC into its reserved slot before the call,
33158 and prepare to restore it after the call. */
33159 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
33160 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
33161 rtx stack_toc_mem = gen_frame_mem (Pmode,
33162 gen_rtx_PLUS (Pmode, stack_ptr,
33163 stack_toc_offset));
33164 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
33165 gen_rtvec (1, stack_toc_offset),
33166 UNSPEC_TOCSLOT);
33167 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
33168
33169 /* Can we optimize saving the TOC in the prologue or
33170 do we need to do it at every call? */
33171 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
33172 cfun->machine->save_toc_in_prologue = true;
33173 else
33174 {
33175 MEM_VOLATILE_P (stack_toc_mem) = 1;
33176 emit_move_insn (stack_toc_mem, toc_reg);
33177 }
33178
33179 if (DEFAULT_ABI == ABI_ELFv2)
33180 {
33181 /* A function pointer in the ELFv2 ABI is just a plain address, but
33182 the ABI requires it to be loaded into r12 before the call. */
33183 func_addr = gen_rtx_REG (Pmode, 12);
33184 emit_move_insn (func_addr, func_desc);
33185 abi_reg = func_addr;
33186 }
33187 else
33188 {
33189 /* A function pointer under AIX is a pointer to a data area whose
33190 first word contains the actual address of the function, whose
33191 second word contains a pointer to its TOC, and whose third word
33192 contains a value to place in the static chain register (r11).
33193 Note that if we load the static chain, our "trampoline" need
33194 not have any executable code. */
33195
33196 /* Load up address of the actual function. */
33197 func_desc = force_reg (Pmode, func_desc);
33198 func_addr = gen_reg_rtx (Pmode);
33199 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
33200
33201 /* Prepare to load the TOC of the called function. Note that the
33202 TOC load must happen immediately before the actual call so
33203 that unwinding the TOC registers works correctly. See the
33204 comment in frob_update_context. */
33205 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
33206 rtx func_toc_mem = gen_rtx_MEM (Pmode,
33207 gen_rtx_PLUS (Pmode, func_desc,
33208 func_toc_offset));
33209 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
33210
33211 /* If we have a static chain, load it up. But, if the call was
33212 originally direct, the 3rd word has not been written since no
33213 trampoline has been built, so we ought not to load it, lest we
33214 override a static chain value. */
33215 if (!direct_call_p
33216 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
33217 && !chain_already_loaded (get_current_sequence ()->next->last))
33218 {
33219 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
33220 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
33221 rtx func_sc_mem = gen_rtx_MEM (Pmode,
33222 gen_rtx_PLUS (Pmode, func_desc,
33223 func_sc_offset));
33224 emit_move_insn (sc_reg, func_sc_mem);
33225 abi_reg = sc_reg;
33226 }
33227 }
33228 }
33229 else
33230 {
33231 /* Direct calls use the TOC: for local calls, the callee will
33232 assume the TOC register is set; for non-local calls, the
33233 PLT stub needs the TOC register. */
33234 abi_reg = toc_reg;
33235 func_addr = func_desc;
33236 }
33237
33238 /* Create the call. */
33239 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
33240 if (value != NULL_RTX)
33241 call[0] = gen_rtx_SET (value, call[0]);
33242 n_call = 1;
33243
33244 if (toc_load)
33245 call[n_call++] = toc_load;
33246 if (toc_restore)
33247 call[n_call++] = toc_restore;
33248
33249 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
33250
33251 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
33252 insn = emit_call_insn (insn);
33253
33254 /* Mention all registers defined by the ABI to hold information
33255 as uses in CALL_INSN_FUNCTION_USAGE. */
33256 if (abi_reg)
33257 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
33258 }
33259
33260 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
33261
33262 void
33263 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
33264 {
33265 rtx call[2];
33266 rtx insn;
33267
33268 gcc_assert (INTVAL (cookie) == 0);
33269
33270 /* Create the call. */
33271 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
33272 if (value != NULL_RTX)
33273 call[0] = gen_rtx_SET (value, call[0]);
33274
33275 call[1] = simple_return_rtx;
33276
33277 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
33278 insn = emit_call_insn (insn);
33279
33280 /* Note use of the TOC register. */
33281 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
33282 /* We need to also mark a use of the link register since the function we
33283 sibling-call to will use it to return to our caller. */
33284 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
33285 }
33286
33287 /* Return whether we need to always update the saved TOC pointer when we update
33288 the stack pointer. */
33289
33290 static bool
33291 rs6000_save_toc_in_prologue_p (void)
33292 {
33293 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
33294 }
33295
33296 #ifdef HAVE_GAS_HIDDEN
33297 # define USE_HIDDEN_LINKONCE 1
33298 #else
33299 # define USE_HIDDEN_LINKONCE 0
33300 #endif
33301
33302 /* Fills in the label name that should be used for a 476 link stack thunk. */
33303
33304 void
33305 get_ppc476_thunk_name (char name[32])
33306 {
33307 gcc_assert (TARGET_LINK_STACK);
33308
33309 if (USE_HIDDEN_LINKONCE)
33310 sprintf (name, "__ppc476.get_thunk");
33311 else
33312 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
33313 }
33314
33315 /* This function emits the simple thunk routine that is used to preserve
33316 the link stack on the 476 cpu. */
33317
33318 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
33319 static void
33320 rs6000_code_end (void)
33321 {
33322 char name[32];
33323 tree decl;
33324
33325 if (!TARGET_LINK_STACK)
33326 return;
33327
33328 get_ppc476_thunk_name (name);
33329
33330 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
33331 build_function_type_list (void_type_node, NULL_TREE));
33332 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
33333 NULL_TREE, void_type_node);
33334 TREE_PUBLIC (decl) = 1;
33335 TREE_STATIC (decl) = 1;
33336
33337 #if RS6000_WEAK
33338 if (USE_HIDDEN_LINKONCE)
33339 {
33340 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
33341 targetm.asm_out.unique_section (decl, 0);
33342 switch_to_section (get_named_section (decl, NULL, 0));
33343 DECL_WEAK (decl) = 1;
33344 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
33345 targetm.asm_out.globalize_label (asm_out_file, name);
33346 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
33347 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
33348 }
33349 else
33350 #endif
33351 {
33352 switch_to_section (text_section);
33353 ASM_OUTPUT_LABEL (asm_out_file, name);
33354 }
33355
33356 DECL_INITIAL (decl) = make_node (BLOCK);
33357 current_function_decl = decl;
33358 init_function_start (decl);
33359 first_function_block_is_cold = false;
33360 /* Make sure unwind info is emitted for the thunk if needed. */
33361 final_start_function (emit_barrier (), asm_out_file, 1);
33362
33363 fputs ("\tblr\n", asm_out_file);
33364
33365 final_end_function ();
33366 init_insn_lengths ();
33367 free_after_compilation (cfun);
33368 set_cfun (NULL);
33369 current_function_decl = NULL;
33370 }
33371
33372 /* Add r30 to hard reg set if the prologue sets it up and it is not
33373 pic_offset_table_rtx. */
33374
33375 static void
33376 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
33377 {
33378 if (!TARGET_SINGLE_PIC_BASE
33379 && TARGET_TOC
33380 && TARGET_MINIMAL_TOC
33381 && get_pool_size () != 0)
33382 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
33383 }
33384
33385 \f
33386 /* Helper function for rs6000_split_logical to emit a logical instruction after
33387 spliting the operation to single GPR registers.
33388
33389 DEST is the destination register.
33390 OP1 and OP2 are the input source registers.
33391 CODE is the base operation (AND, IOR, XOR, NOT).
33392 MODE is the machine mode.
33393 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
33394 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
33395 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
33396
33397 static void
33398 rs6000_split_logical_inner (rtx dest,
33399 rtx op1,
33400 rtx op2,
33401 enum rtx_code code,
33402 machine_mode mode,
33403 bool complement_final_p,
33404 bool complement_op1_p,
33405 bool complement_op2_p)
33406 {
33407 rtx bool_rtx;
33408
33409 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
33410 if (op2 && GET_CODE (op2) == CONST_INT
33411 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
33412 && !complement_final_p && !complement_op1_p && !complement_op2_p)
33413 {
33414 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
33415 HOST_WIDE_INT value = INTVAL (op2) & mask;
33416
33417 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
33418 if (code == AND)
33419 {
33420 if (value == 0)
33421 {
33422 emit_insn (gen_rtx_SET (dest, const0_rtx));
33423 return;
33424 }
33425
33426 else if (value == mask)
33427 {
33428 if (!rtx_equal_p (dest, op1))
33429 emit_insn (gen_rtx_SET (dest, op1));
33430 return;
33431 }
33432 }
33433
33434 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
33435 into separate ORI/ORIS or XORI/XORIS instrucitons. */
33436 else if (code == IOR || code == XOR)
33437 {
33438 if (value == 0)
33439 {
33440 if (!rtx_equal_p (dest, op1))
33441 emit_insn (gen_rtx_SET (dest, op1));
33442 return;
33443 }
33444 }
33445 }
33446
33447 if (code == AND && mode == SImode
33448 && !complement_final_p && !complement_op1_p && !complement_op2_p)
33449 {
33450 emit_insn (gen_andsi3 (dest, op1, op2));
33451 return;
33452 }
33453
33454 if (complement_op1_p)
33455 op1 = gen_rtx_NOT (mode, op1);
33456
33457 if (complement_op2_p)
33458 op2 = gen_rtx_NOT (mode, op2);
33459
33460 /* For canonical RTL, if only one arm is inverted it is the first. */
33461 if (!complement_op1_p && complement_op2_p)
33462 std::swap (op1, op2);
33463
33464 bool_rtx = ((code == NOT)
33465 ? gen_rtx_NOT (mode, op1)
33466 : gen_rtx_fmt_ee (code, mode, op1, op2));
33467
33468 if (complement_final_p)
33469 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
33470
33471 emit_insn (gen_rtx_SET (dest, bool_rtx));
33472 }
33473
33474 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
33475 operations are split immediately during RTL generation to allow for more
33476 optimizations of the AND/IOR/XOR.
33477
33478 OPERANDS is an array containing the destination and two input operands.
33479 CODE is the base operation (AND, IOR, XOR, NOT).
33480 MODE is the machine mode.
33481 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
33482 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
33483 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
33484 CLOBBER_REG is either NULL or a scratch register of type CC to allow
33485 formation of the AND instructions. */
33486
33487 static void
33488 rs6000_split_logical_di (rtx operands[3],
33489 enum rtx_code code,
33490 bool complement_final_p,
33491 bool complement_op1_p,
33492 bool complement_op2_p)
33493 {
33494 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
33495 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
33496 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
33497 enum hi_lo { hi = 0, lo = 1 };
33498 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
33499 size_t i;
33500
33501 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
33502 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
33503 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
33504 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
33505
33506 if (code == NOT)
33507 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
33508 else
33509 {
33510 if (GET_CODE (operands[2]) != CONST_INT)
33511 {
33512 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
33513 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
33514 }
33515 else
33516 {
33517 HOST_WIDE_INT value = INTVAL (operands[2]);
33518 HOST_WIDE_INT value_hi_lo[2];
33519
33520 gcc_assert (!complement_final_p);
33521 gcc_assert (!complement_op1_p);
33522 gcc_assert (!complement_op2_p);
33523
33524 value_hi_lo[hi] = value >> 32;
33525 value_hi_lo[lo] = value & lower_32bits;
33526
33527 for (i = 0; i < 2; i++)
33528 {
33529 HOST_WIDE_INT sub_value = value_hi_lo[i];
33530
33531 if (sub_value & sign_bit)
33532 sub_value |= upper_32bits;
33533
33534 op2_hi_lo[i] = GEN_INT (sub_value);
33535
33536 /* If this is an AND instruction, check to see if we need to load
33537 the value in a register. */
33538 if (code == AND && sub_value != -1 && sub_value != 0
33539 && !and_operand (op2_hi_lo[i], SImode))
33540 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
33541 }
33542 }
33543 }
33544
33545 for (i = 0; i < 2; i++)
33546 {
33547 /* Split large IOR/XOR operations. */
33548 if ((code == IOR || code == XOR)
33549 && GET_CODE (op2_hi_lo[i]) == CONST_INT
33550 && !complement_final_p
33551 && !complement_op1_p
33552 && !complement_op2_p
33553 && !logical_const_operand (op2_hi_lo[i], SImode))
33554 {
33555 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
33556 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
33557 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
33558 rtx tmp = gen_reg_rtx (SImode);
33559
33560 /* Make sure the constant is sign extended. */
33561 if ((hi_16bits & sign_bit) != 0)
33562 hi_16bits |= upper_32bits;
33563
33564 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
33565 code, SImode, false, false, false);
33566
33567 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
33568 code, SImode, false, false, false);
33569 }
33570 else
33571 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
33572 code, SImode, complement_final_p,
33573 complement_op1_p, complement_op2_p);
33574 }
33575
33576 return;
33577 }
33578
33579 /* Split the insns that make up boolean operations operating on multiple GPR
33580 registers. The boolean MD patterns ensure that the inputs either are
33581 exactly the same as the output registers, or there is no overlap.
33582
33583 OPERANDS is an array containing the destination and two input operands.
33584 CODE is the base operation (AND, IOR, XOR, NOT).
33585 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
33586 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
33587 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
33588
33589 void
33590 rs6000_split_logical (rtx operands[3],
33591 enum rtx_code code,
33592 bool complement_final_p,
33593 bool complement_op1_p,
33594 bool complement_op2_p)
33595 {
33596 machine_mode mode = GET_MODE (operands[0]);
33597 machine_mode sub_mode;
33598 rtx op0, op1, op2;
33599 int sub_size, regno0, regno1, nregs, i;
33600
33601 /* If this is DImode, use the specialized version that can run before
33602 register allocation. */
33603 if (mode == DImode && !TARGET_POWERPC64)
33604 {
33605 rs6000_split_logical_di (operands, code, complement_final_p,
33606 complement_op1_p, complement_op2_p);
33607 return;
33608 }
33609
33610 op0 = operands[0];
33611 op1 = operands[1];
33612 op2 = (code == NOT) ? NULL_RTX : operands[2];
33613 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
33614 sub_size = GET_MODE_SIZE (sub_mode);
33615 regno0 = REGNO (op0);
33616 regno1 = REGNO (op1);
33617
33618 gcc_assert (reload_completed);
33619 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33620 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33621
33622 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
33623 gcc_assert (nregs > 1);
33624
33625 if (op2 && REG_P (op2))
33626 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
33627
33628 for (i = 0; i < nregs; i++)
33629 {
33630 int offset = i * sub_size;
33631 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
33632 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
33633 rtx sub_op2 = ((code == NOT)
33634 ? NULL_RTX
33635 : simplify_subreg (sub_mode, op2, mode, offset));
33636
33637 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
33638 complement_final_p, complement_op1_p,
33639 complement_op2_p);
33640 }
33641
33642 return;
33643 }
33644
33645 \f
33646 /* Return true if the peephole2 can combine a load involving a combination of
33647 an addis instruction and a load with an offset that can be fused together on
33648 a power8. */
33649
33650 bool
33651 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
33652 rtx addis_value, /* addis value. */
33653 rtx target, /* target register that is loaded. */
33654 rtx mem) /* bottom part of the memory addr. */
33655 {
33656 rtx addr;
33657 rtx base_reg;
33658
33659 /* Validate arguments. */
33660 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
33661 return false;
33662
33663 if (!base_reg_operand (target, GET_MODE (target)))
33664 return false;
33665
33666 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
33667 return false;
33668
33669 /* Allow sign/zero extension. */
33670 if (GET_CODE (mem) == ZERO_EXTEND
33671 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
33672 mem = XEXP (mem, 0);
33673
33674 if (!MEM_P (mem))
33675 return false;
33676
33677 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
33678 return false;
33679
33680 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
33681 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33682 return false;
33683
33684 /* Validate that the register used to load the high value is either the
33685 register being loaded, or we can safely replace its use.
33686
33687 This function is only called from the peephole2 pass and we assume that
33688 there are 2 instructions in the peephole (addis and load), so we want to
33689 check if the target register was not used in the memory address and the
33690 register to hold the addis result is dead after the peephole. */
33691 if (REGNO (addis_reg) != REGNO (target))
33692 {
33693 if (reg_mentioned_p (target, mem))
33694 return false;
33695
33696 if (!peep2_reg_dead_p (2, addis_reg))
33697 return false;
33698
33699 /* If the target register being loaded is the stack pointer, we must
33700 avoid loading any other value into it, even temporarily. */
33701 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
33702 return false;
33703 }
33704
33705 base_reg = XEXP (addr, 0);
33706 return REGNO (addis_reg) == REGNO (base_reg);
33707 }
33708
33709 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33710 sequence. We adjust the addis register to use the target register. If the
33711 load sign extends, we adjust the code to do the zero extending load, and an
33712 explicit sign extension later since the fusion only covers zero extending
33713 loads.
33714
33715 The operands are:
33716 operands[0] register set with addis (to be replaced with target)
33717 operands[1] value set via addis
33718 operands[2] target register being loaded
33719 operands[3] D-form memory reference using operands[0]. */
33720
33721 void
33722 expand_fusion_gpr_load (rtx *operands)
33723 {
33724 rtx addis_value = operands[1];
33725 rtx target = operands[2];
33726 rtx orig_mem = operands[3];
33727 rtx new_addr, new_mem, orig_addr, offset;
33728 enum rtx_code plus_or_lo_sum;
33729 machine_mode target_mode = GET_MODE (target);
33730 machine_mode extend_mode = target_mode;
33731 machine_mode ptr_mode = Pmode;
33732 enum rtx_code extend = UNKNOWN;
33733
33734 if (GET_CODE (orig_mem) == ZERO_EXTEND
33735 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
33736 {
33737 extend = GET_CODE (orig_mem);
33738 orig_mem = XEXP (orig_mem, 0);
33739 target_mode = GET_MODE (orig_mem);
33740 }
33741
33742 gcc_assert (MEM_P (orig_mem));
33743
33744 orig_addr = XEXP (orig_mem, 0);
33745 plus_or_lo_sum = GET_CODE (orig_addr);
33746 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
33747
33748 offset = XEXP (orig_addr, 1);
33749 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
33750 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
33751
33752 if (extend != UNKNOWN)
33753 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
33754
33755 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
33756 UNSPEC_FUSION_GPR);
33757 emit_insn (gen_rtx_SET (target, new_mem));
33758
33759 if (extend == SIGN_EXTEND)
33760 {
33761 int sub_off = ((BYTES_BIG_ENDIAN)
33762 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
33763 : 0);
33764 rtx sign_reg
33765 = simplify_subreg (target_mode, target, extend_mode, sub_off);
33766
33767 emit_insn (gen_rtx_SET (target,
33768 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
33769 }
33770
33771 return;
33772 }
33773
33774 /* Return a string to fuse an addis instruction with a gpr load to the same
33775 register that we loaded up the addis instruction. The address that is used
33776 is the logical address that was formed during peephole2:
33777 (lo_sum (high) (low-part))
33778
33779 The code is complicated, so we call output_asm_insn directly, and just
33780 return "". */
33781
33782 const char *
33783 emit_fusion_gpr_load (rtx target, rtx mem)
33784 {
33785 rtx addis_value;
33786 rtx fuse_ops[10];
33787 rtx addr;
33788 rtx load_offset;
33789 const char *addis_str = NULL;
33790 const char *load_str = NULL;
33791 const char *mode_name = NULL;
33792 char insn_template[80];
33793 machine_mode mode;
33794 const char *comment_str = ASM_COMMENT_START;
33795
33796 if (GET_CODE (mem) == ZERO_EXTEND)
33797 mem = XEXP (mem, 0);
33798
33799 gcc_assert (REG_P (target) && MEM_P (mem));
33800
33801 if (*comment_str == ' ')
33802 comment_str++;
33803
33804 addr = XEXP (mem, 0);
33805 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33806 gcc_unreachable ();
33807
33808 addis_value = XEXP (addr, 0);
33809 load_offset = XEXP (addr, 1);
33810
33811 /* Now emit the load instruction to the same register. */
33812 mode = GET_MODE (mem);
33813 switch (mode)
33814 {
33815 case QImode:
33816 mode_name = "char";
33817 load_str = "lbz";
33818 break;
33819
33820 case HImode:
33821 mode_name = "short";
33822 load_str = "lhz";
33823 break;
33824
33825 case SImode:
33826 mode_name = "int";
33827 load_str = "lwz";
33828 break;
33829
33830 case DImode:
33831 gcc_assert (TARGET_POWERPC64);
33832 mode_name = "long";
33833 load_str = "ld";
33834 break;
33835
33836 default:
33837 gcc_unreachable ();
33838 }
33839
33840 /* Emit the addis instruction. */
33841 fuse_ops[0] = target;
33842 if (satisfies_constraint_L (addis_value))
33843 {
33844 fuse_ops[1] = addis_value;
33845 addis_str = "lis %0,%v1";
33846 }
33847
33848 else if (GET_CODE (addis_value) == PLUS)
33849 {
33850 rtx op0 = XEXP (addis_value, 0);
33851 rtx op1 = XEXP (addis_value, 1);
33852
33853 if (REG_P (op0) && CONST_INT_P (op1)
33854 && satisfies_constraint_L (op1))
33855 {
33856 fuse_ops[1] = op0;
33857 fuse_ops[2] = op1;
33858 addis_str = "addis %0,%1,%v2";
33859 }
33860 }
33861
33862 else if (GET_CODE (addis_value) == HIGH)
33863 {
33864 rtx value = XEXP (addis_value, 0);
33865 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
33866 {
33867 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
33868 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
33869 if (TARGET_ELF)
33870 addis_str = "addis %0,%2,%1@toc@ha";
33871
33872 else if (TARGET_XCOFF)
33873 addis_str = "addis %0,%1@u(%2)";
33874
33875 else
33876 gcc_unreachable ();
33877 }
33878
33879 else if (GET_CODE (value) == PLUS)
33880 {
33881 rtx op0 = XEXP (value, 0);
33882 rtx op1 = XEXP (value, 1);
33883
33884 if (GET_CODE (op0) == UNSPEC
33885 && XINT (op0, 1) == UNSPEC_TOCREL
33886 && CONST_INT_P (op1))
33887 {
33888 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
33889 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
33890 fuse_ops[3] = op1;
33891 if (TARGET_ELF)
33892 addis_str = "addis %0,%2,%1+%3@toc@ha";
33893
33894 else if (TARGET_XCOFF)
33895 addis_str = "addis %0,%1+%3@u(%2)";
33896
33897 else
33898 gcc_unreachable ();
33899 }
33900 }
33901
33902 else if (satisfies_constraint_L (value))
33903 {
33904 fuse_ops[1] = value;
33905 addis_str = "lis %0,%v1";
33906 }
33907
33908 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
33909 {
33910 fuse_ops[1] = value;
33911 addis_str = "lis %0,%1@ha";
33912 }
33913 }
33914
33915 if (!addis_str)
33916 fatal_insn ("Could not generate addis value for fusion", addis_value);
33917
33918 sprintf (insn_template, "%s\t\t%s gpr load fusion, type %s", addis_str,
33919 comment_str, mode_name);
33920 output_asm_insn (insn_template, fuse_ops);
33921
33922 /* Emit the D-form load instruction. */
33923 if (CONST_INT_P (load_offset) && satisfies_constraint_I (load_offset))
33924 {
33925 sprintf (insn_template, "%s %%0,%%1(%%0)", load_str);
33926 fuse_ops[1] = load_offset;
33927 output_asm_insn (insn_template, fuse_ops);
33928 }
33929
33930 else if (GET_CODE (load_offset) == UNSPEC
33931 && XINT (load_offset, 1) == UNSPEC_TOCREL)
33932 {
33933 if (TARGET_ELF)
33934 sprintf (insn_template, "%s %%0,%%1@toc@l(%%0)", load_str);
33935
33936 else if (TARGET_XCOFF)
33937 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33938
33939 else
33940 gcc_unreachable ();
33941
33942 fuse_ops[1] = XVECEXP (load_offset, 0, 0);
33943 output_asm_insn (insn_template, fuse_ops);
33944 }
33945
33946 else if (GET_CODE (load_offset) == PLUS
33947 && GET_CODE (XEXP (load_offset, 0)) == UNSPEC
33948 && XINT (XEXP (load_offset, 0), 1) == UNSPEC_TOCREL
33949 && CONST_INT_P (XEXP (load_offset, 1)))
33950 {
33951 rtx tocrel_unspec = XEXP (load_offset, 0);
33952 if (TARGET_ELF)
33953 sprintf (insn_template, "%s %%0,%%1+%%2@toc@l(%%0)", load_str);
33954
33955 else if (TARGET_XCOFF)
33956 sprintf (insn_template, "%s %%0,%%1+%%2@l(%%0)", load_str);
33957
33958 else
33959 gcc_unreachable ();
33960
33961 fuse_ops[1] = XVECEXP (tocrel_unspec, 0, 0);
33962 fuse_ops[2] = XEXP (load_offset, 1);
33963 output_asm_insn (insn_template, fuse_ops);
33964 }
33965
33966 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (load_offset))
33967 {
33968 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33969
33970 fuse_ops[1] = load_offset;
33971 output_asm_insn (insn_template, fuse_ops);
33972 }
33973
33974 else
33975 fatal_insn ("Unable to generate load offset for fusion", load_offset);
33976
33977 return "";
33978 }
33979 \f
33980 /* Analyze vector computations and remove unnecessary doubleword
33981 swaps (xxswapdi instructions). This pass is performed only
33982 for little-endian VSX code generation.
33983
33984 For this specific case, loads and stores of 4x32 and 2x64 vectors
33985 are inefficient. These are implemented using the lvx2dx and
33986 stvx2dx instructions, which invert the order of doublewords in
33987 a vector register. Thus the code generation inserts an xxswapdi
33988 after each such load, and prior to each such store. (For spill
33989 code after register assignment, an additional xxswapdi is inserted
33990 following each store in order to return a hard register to its
33991 unpermuted value.)
33992
33993 The extra xxswapdi instructions reduce performance. This can be
33994 particularly bad for vectorized code. The purpose of this pass
33995 is to reduce the number of xxswapdi instructions required for
33996 correctness.
33997
33998 The primary insight is that much code that operates on vectors
33999 does not care about the relative order of elements in a register,
34000 so long as the correct memory order is preserved. If we have
34001 a computation where all input values are provided by lvxd2x/xxswapdi
34002 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
34003 and all intermediate computations are pure SIMD (independent of
34004 element order), then all the xxswapdi's associated with the loads
34005 and stores may be removed.
34006
34007 This pass uses some of the infrastructure and logical ideas from
34008 the "web" pass in web.c. We create maximal webs of computations
34009 fitting the description above using union-find. Each such web is
34010 then optimized by removing its unnecessary xxswapdi instructions.
34011
34012 The pass is placed prior to global optimization so that we can
34013 perform the optimization in the safest and simplest way possible;
34014 that is, by replacing each xxswapdi insn with a register copy insn.
34015 Subsequent forward propagation will remove copies where possible.
34016
34017 There are some operations sensitive to element order for which we
34018 can still allow the operation, provided we modify those operations.
34019 These include CONST_VECTORs, for which we must swap the first and
34020 second halves of the constant vector; and SUBREGs, for which we
34021 must adjust the byte offset to account for the swapped doublewords.
34022 A remaining opportunity would be non-immediate-form splats, for
34023 which we should adjust the selected lane of the input. We should
34024 also make code generation adjustments for sum-across operations,
34025 since this is a common vectorizer reduction.
34026
34027 Because we run prior to the first split, we can see loads and stores
34028 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
34029 vector loads and stores that have not yet been split into a permuting
34030 load/store and a swap. (One way this can happen is with a builtin
34031 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
34032 than deleting a swap, we convert the load/store into a permuting
34033 load/store (which effectively removes the swap). */
34034
34035 /* Notes on Permutes
34036
34037 We do not currently handle computations that contain permutes. There
34038 is a general transformation that can be performed correctly, but it
34039 may introduce more expensive code than it replaces. To handle these
34040 would require a cost model to determine when to perform the optimization.
34041 This commentary records how this could be done if desired.
34042
34043 The most general permute is something like this (example for V16QI):
34044
34045 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
34046 (parallel [(const_int a0) (const_int a1)
34047 ...
34048 (const_int a14) (const_int a15)]))
34049
34050 where a0,...,a15 are in [0,31] and select elements from op1 and op2
34051 to produce in the result.
34052
34053 Regardless of mode, we can convert the PARALLEL to a mask of 16
34054 byte-element selectors. Let's call this M, with M[i] representing
34055 the ith byte-element selector value. Then if we swap doublewords
34056 throughout the computation, we can get correct behavior by replacing
34057 M with M' as follows:
34058
34059 { M[i+8]+8 : i < 8, M[i+8] in [0,7] U [16,23]
34060 M'[i] = { M[i+8]-8 : i < 8, M[i+8] in [8,15] U [24,31]
34061 { M[i-8]+8 : i >= 8, M[i-8] in [0,7] U [16,23]
34062 { M[i-8]-8 : i >= 8, M[i-8] in [8,15] U [24,31]
34063
34064 This seems promising at first, since we are just replacing one mask
34065 with another. But certain masks are preferable to others. If M
34066 is a mask that matches a vmrghh pattern, for example, M' certainly
34067 will not. Instead of a single vmrghh, we would generate a load of
34068 M' and a vperm. So we would need to know how many xxswapd's we can
34069 remove as a result of this transformation to determine if it's
34070 profitable; and preferably the logic would need to be aware of all
34071 the special preferable masks.
34072
34073 Another form of permute is an UNSPEC_VPERM, in which the mask is
34074 already in a register. In some cases, this mask may be a constant
34075 that we can discover with ud-chains, in which case the above
34076 transformation is ok. However, the common usage here is for the
34077 mask to be produced by an UNSPEC_LVSL, in which case the mask
34078 cannot be known at compile time. In such a case we would have to
34079 generate several instructions to compute M' as above at run time,
34080 and a cost model is needed again. */
34081
34082 /* This is based on the union-find logic in web.c. web_entry_base is
34083 defined in df.h. */
34084 class swap_web_entry : public web_entry_base
34085 {
34086 public:
34087 /* Pointer to the insn. */
34088 rtx_insn *insn;
34089 /* Set if insn contains a mention of a vector register. All other
34090 fields are undefined if this field is unset. */
34091 unsigned int is_relevant : 1;
34092 /* Set if insn is a load. */
34093 unsigned int is_load : 1;
34094 /* Set if insn is a store. */
34095 unsigned int is_store : 1;
34096 /* Set if insn is a doubleword swap. This can either be a register swap
34097 or a permuting load or store (test is_load and is_store for this). */
34098 unsigned int is_swap : 1;
34099 /* Set if the insn has a live-in use of a parameter register. */
34100 unsigned int is_live_in : 1;
34101 /* Set if the insn has a live-out def of a return register. */
34102 unsigned int is_live_out : 1;
34103 /* Set if the insn contains a subreg reference of a vector register. */
34104 unsigned int contains_subreg : 1;
34105 /* Set if the insn contains a 128-bit integer operand. */
34106 unsigned int is_128_int : 1;
34107 /* Set if this is a call-insn. */
34108 unsigned int is_call : 1;
34109 /* Set if this insn does not perform a vector operation for which
34110 element order matters, or if we know how to fix it up if it does.
34111 Undefined if is_swap is set. */
34112 unsigned int is_swappable : 1;
34113 /* A nonzero value indicates what kind of special handling for this
34114 insn is required if doublewords are swapped. Undefined if
34115 is_swappable is not set. */
34116 unsigned int special_handling : 3;
34117 /* Set if the web represented by this entry cannot be optimized. */
34118 unsigned int web_not_optimizable : 1;
34119 /* Set if this insn should be deleted. */
34120 unsigned int will_delete : 1;
34121 };
34122
34123 enum special_handling_values {
34124 SH_NONE = 0,
34125 SH_CONST_VECTOR,
34126 SH_SUBREG,
34127 SH_NOSWAP_LD,
34128 SH_NOSWAP_ST,
34129 SH_EXTRACT,
34130 SH_SPLAT
34131 };
34132
34133 /* Union INSN with all insns containing definitions that reach USE.
34134 Detect whether USE is live-in to the current function. */
34135 static void
34136 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
34137 {
34138 struct df_link *link = DF_REF_CHAIN (use);
34139
34140 if (!link)
34141 insn_entry[INSN_UID (insn)].is_live_in = 1;
34142
34143 while (link)
34144 {
34145 if (DF_REF_IS_ARTIFICIAL (link->ref))
34146 insn_entry[INSN_UID (insn)].is_live_in = 1;
34147
34148 if (DF_REF_INSN_INFO (link->ref))
34149 {
34150 rtx def_insn = DF_REF_INSN (link->ref);
34151 (void)unionfind_union (insn_entry + INSN_UID (insn),
34152 insn_entry + INSN_UID (def_insn));
34153 }
34154
34155 link = link->next;
34156 }
34157 }
34158
34159 /* Union INSN with all insns containing uses reached from DEF.
34160 Detect whether DEF is live-out from the current function. */
34161 static void
34162 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
34163 {
34164 struct df_link *link = DF_REF_CHAIN (def);
34165
34166 if (!link)
34167 insn_entry[INSN_UID (insn)].is_live_out = 1;
34168
34169 while (link)
34170 {
34171 /* This could be an eh use or some other artificial use;
34172 we treat these all the same (killing the optimization). */
34173 if (DF_REF_IS_ARTIFICIAL (link->ref))
34174 insn_entry[INSN_UID (insn)].is_live_out = 1;
34175
34176 if (DF_REF_INSN_INFO (link->ref))
34177 {
34178 rtx use_insn = DF_REF_INSN (link->ref);
34179 (void)unionfind_union (insn_entry + INSN_UID (insn),
34180 insn_entry + INSN_UID (use_insn));
34181 }
34182
34183 link = link->next;
34184 }
34185 }
34186
34187 /* Return 1 iff INSN is a load insn, including permuting loads that
34188 represent an lvxd2x instruction; else return 0. */
34189 static unsigned int
34190 insn_is_load_p (rtx insn)
34191 {
34192 rtx body = PATTERN (insn);
34193
34194 if (GET_CODE (body) == SET)
34195 {
34196 if (GET_CODE (SET_SRC (body)) == MEM)
34197 return 1;
34198
34199 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
34200 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
34201 return 1;
34202
34203 return 0;
34204 }
34205
34206 if (GET_CODE (body) != PARALLEL)
34207 return 0;
34208
34209 rtx set = XVECEXP (body, 0, 0);
34210
34211 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
34212 return 1;
34213
34214 return 0;
34215 }
34216
34217 /* Return 1 iff INSN is a store insn, including permuting stores that
34218 represent an stvxd2x instruction; else return 0. */
34219 static unsigned int
34220 insn_is_store_p (rtx insn)
34221 {
34222 rtx body = PATTERN (insn);
34223 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
34224 return 1;
34225 if (GET_CODE (body) != PARALLEL)
34226 return 0;
34227 rtx set = XVECEXP (body, 0, 0);
34228 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
34229 return 1;
34230 return 0;
34231 }
34232
34233 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
34234 a permuting load, or a permuting store. */
34235 static unsigned int
34236 insn_is_swap_p (rtx insn)
34237 {
34238 rtx body = PATTERN (insn);
34239 if (GET_CODE (body) != SET)
34240 return 0;
34241 rtx rhs = SET_SRC (body);
34242 if (GET_CODE (rhs) != VEC_SELECT)
34243 return 0;
34244 rtx parallel = XEXP (rhs, 1);
34245 if (GET_CODE (parallel) != PARALLEL)
34246 return 0;
34247 unsigned int len = XVECLEN (parallel, 0);
34248 if (len != 2 && len != 4 && len != 8 && len != 16)
34249 return 0;
34250 for (unsigned int i = 0; i < len / 2; ++i)
34251 {
34252 rtx op = XVECEXP (parallel, 0, i);
34253 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
34254 return 0;
34255 }
34256 for (unsigned int i = len / 2; i < len; ++i)
34257 {
34258 rtx op = XVECEXP (parallel, 0, i);
34259 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
34260 return 0;
34261 }
34262 return 1;
34263 }
34264
34265 /* Return 1 iff OP is an operand that will not be affected by having
34266 vector doublewords swapped in memory. */
34267 static unsigned int
34268 rtx_is_swappable_p (rtx op, unsigned int *special)
34269 {
34270 enum rtx_code code = GET_CODE (op);
34271 int i, j;
34272 rtx parallel;
34273
34274 switch (code)
34275 {
34276 case LABEL_REF:
34277 case SYMBOL_REF:
34278 case CLOBBER:
34279 case REG:
34280 return 1;
34281
34282 case VEC_CONCAT:
34283 case ASM_INPUT:
34284 case ASM_OPERANDS:
34285 return 0;
34286
34287 case CONST_VECTOR:
34288 {
34289 *special = SH_CONST_VECTOR;
34290 return 1;
34291 }
34292
34293 case VEC_DUPLICATE:
34294 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
34295 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
34296 it represents a vector splat for which we can do special
34297 handling. */
34298 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
34299 return 1;
34300 else if (GET_CODE (XEXP (op, 0)) == REG
34301 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
34302 /* This catches V2DF and V2DI splat, at a minimum. */
34303 return 1;
34304 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
34305 /* If the duplicated item is from a select, defer to the select
34306 processing to see if we can change the lane for the splat. */
34307 return rtx_is_swappable_p (XEXP (op, 0), special);
34308 else
34309 return 0;
34310
34311 case VEC_SELECT:
34312 /* A vec_extract operation is ok if we change the lane. */
34313 if (GET_CODE (XEXP (op, 0)) == REG
34314 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
34315 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
34316 && XVECLEN (parallel, 0) == 1
34317 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
34318 {
34319 *special = SH_EXTRACT;
34320 return 1;
34321 }
34322 else
34323 return 0;
34324
34325 case UNSPEC:
34326 {
34327 /* Various operations are unsafe for this optimization, at least
34328 without significant additional work. Permutes are obviously
34329 problematic, as both the permute control vector and the ordering
34330 of the target values are invalidated by doubleword swapping.
34331 Vector pack and unpack modify the number of vector lanes.
34332 Merge-high/low will not operate correctly on swapped operands.
34333 Vector shifts across element boundaries are clearly uncool,
34334 as are vector select and concatenate operations. Vector
34335 sum-across instructions define one operand with a specific
34336 order-dependent element, so additional fixup code would be
34337 needed to make those work. Vector set and non-immediate-form
34338 vector splat are element-order sensitive. A few of these
34339 cases might be workable with special handling if required.
34340 Adding cost modeling would be appropriate in some cases. */
34341 int val = XINT (op, 1);
34342 switch (val)
34343 {
34344 default:
34345 break;
34346 case UNSPEC_VMRGH_DIRECT:
34347 case UNSPEC_VMRGL_DIRECT:
34348 case UNSPEC_VPACK_SIGN_SIGN_SAT:
34349 case UNSPEC_VPACK_SIGN_UNS_SAT:
34350 case UNSPEC_VPACK_UNS_UNS_MOD:
34351 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
34352 case UNSPEC_VPACK_UNS_UNS_SAT:
34353 case UNSPEC_VPERM:
34354 case UNSPEC_VPERM_UNS:
34355 case UNSPEC_VPERMHI:
34356 case UNSPEC_VPERMSI:
34357 case UNSPEC_VPKPX:
34358 case UNSPEC_VSLDOI:
34359 case UNSPEC_VSLO:
34360 case UNSPEC_VSRO:
34361 case UNSPEC_VSUM2SWS:
34362 case UNSPEC_VSUM4S:
34363 case UNSPEC_VSUM4UBS:
34364 case UNSPEC_VSUMSWS:
34365 case UNSPEC_VSUMSWS_DIRECT:
34366 case UNSPEC_VSX_CONCAT:
34367 case UNSPEC_VSX_SET:
34368 case UNSPEC_VSX_SLDWI:
34369 case UNSPEC_VUNPACK_HI_SIGN:
34370 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
34371 case UNSPEC_VUNPACK_LO_SIGN:
34372 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
34373 case UNSPEC_VUPKHPX:
34374 case UNSPEC_VUPKHS_V4SF:
34375 case UNSPEC_VUPKHU_V4SF:
34376 case UNSPEC_VUPKLPX:
34377 case UNSPEC_VUPKLS_V4SF:
34378 case UNSPEC_VUPKLU_V4SF:
34379 case UNSPEC_VSX_CVDPSPN:
34380 case UNSPEC_VSX_CVSPDP:
34381 case UNSPEC_VSX_CVSPDPN:
34382 return 0;
34383 case UNSPEC_VSPLT_DIRECT:
34384 *special = SH_SPLAT;
34385 return 1;
34386 }
34387 }
34388
34389 default:
34390 break;
34391 }
34392
34393 const char *fmt = GET_RTX_FORMAT (code);
34394 int ok = 1;
34395
34396 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
34397 if (fmt[i] == 'e' || fmt[i] == 'u')
34398 {
34399 unsigned int special_op = SH_NONE;
34400 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
34401 if (special_op == SH_NONE)
34402 continue;
34403 /* Ensure we never have two kinds of special handling
34404 for the same insn. */
34405 if (*special != SH_NONE && *special != special_op)
34406 return 0;
34407 *special = special_op;
34408 }
34409 else if (fmt[i] == 'E')
34410 for (j = 0; j < XVECLEN (op, i); ++j)
34411 {
34412 unsigned int special_op = SH_NONE;
34413 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
34414 if (special_op == SH_NONE)
34415 continue;
34416 /* Ensure we never have two kinds of special handling
34417 for the same insn. */
34418 if (*special != SH_NONE && *special != special_op)
34419 return 0;
34420 *special = special_op;
34421 }
34422
34423 return ok;
34424 }
34425
34426 /* Return 1 iff INSN is an operand that will not be affected by
34427 having vector doublewords swapped in memory (in which case
34428 *SPECIAL is unchanged), or that can be modified to be correct
34429 if vector doublewords are swapped in memory (in which case
34430 *SPECIAL is changed to a value indicating how). */
34431 static unsigned int
34432 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
34433 unsigned int *special)
34434 {
34435 /* Calls are always bad. */
34436 if (GET_CODE (insn) == CALL_INSN)
34437 return 0;
34438
34439 /* Loads and stores seen here are not permuting, but we can still
34440 fix them up by converting them to permuting ones. Exceptions:
34441 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
34442 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
34443 for the SET source. */
34444 rtx body = PATTERN (insn);
34445 int i = INSN_UID (insn);
34446
34447 if (insn_entry[i].is_load)
34448 {
34449 if (GET_CODE (body) == SET)
34450 {
34451 *special = SH_NOSWAP_LD;
34452 return 1;
34453 }
34454 else
34455 return 0;
34456 }
34457
34458 if (insn_entry[i].is_store)
34459 {
34460 if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) != UNSPEC)
34461 {
34462 *special = SH_NOSWAP_ST;
34463 return 1;
34464 }
34465 else
34466 return 0;
34467 }
34468
34469 /* A convert to single precision can be left as is provided that
34470 all of its uses are in xxspltw instructions that splat BE element
34471 zero. */
34472 if (GET_CODE (body) == SET
34473 && GET_CODE (SET_SRC (body)) == UNSPEC
34474 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
34475 {
34476 df_ref def;
34477 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34478
34479 FOR_EACH_INSN_INFO_DEF (def, insn_info)
34480 {
34481 struct df_link *link = DF_REF_CHAIN (def);
34482 if (!link)
34483 return 0;
34484
34485 for (; link; link = link->next) {
34486 rtx use_insn = DF_REF_INSN (link->ref);
34487 rtx use_body = PATTERN (use_insn);
34488 if (GET_CODE (use_body) != SET
34489 || GET_CODE (SET_SRC (use_body)) != UNSPEC
34490 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
34491 || XEXP (XEXP (SET_SRC (use_body), 0), 1) != const0_rtx)
34492 return 0;
34493 }
34494 }
34495
34496 return 1;
34497 }
34498
34499 /* Otherwise check the operands for vector lane violations. */
34500 return rtx_is_swappable_p (body, special);
34501 }
34502
34503 enum chain_purpose { FOR_LOADS, FOR_STORES };
34504
34505 /* Return true if the UD or DU chain headed by LINK is non-empty,
34506 and every entry on the chain references an insn that is a
34507 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
34508 register swap must have only permuting loads as reaching defs.
34509 If PURPOSE is FOR_STORES, each such register swap must have only
34510 register swaps or permuting stores as reached uses. */
34511 static bool
34512 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
34513 enum chain_purpose purpose)
34514 {
34515 if (!link)
34516 return false;
34517
34518 for (; link; link = link->next)
34519 {
34520 if (!VECTOR_MODE_P (GET_MODE (DF_REF_REG (link->ref))))
34521 continue;
34522
34523 if (DF_REF_IS_ARTIFICIAL (link->ref))
34524 return false;
34525
34526 rtx reached_insn = DF_REF_INSN (link->ref);
34527 unsigned uid = INSN_UID (reached_insn);
34528 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
34529
34530 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
34531 || insn_entry[uid].is_store)
34532 return false;
34533
34534 if (purpose == FOR_LOADS)
34535 {
34536 df_ref use;
34537 FOR_EACH_INSN_INFO_USE (use, insn_info)
34538 {
34539 struct df_link *swap_link = DF_REF_CHAIN (use);
34540
34541 while (swap_link)
34542 {
34543 if (DF_REF_IS_ARTIFICIAL (link->ref))
34544 return false;
34545
34546 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
34547 unsigned uid2 = INSN_UID (swap_def_insn);
34548
34549 /* Only permuting loads are allowed. */
34550 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
34551 return false;
34552
34553 swap_link = swap_link->next;
34554 }
34555 }
34556 }
34557 else if (purpose == FOR_STORES)
34558 {
34559 df_ref def;
34560 FOR_EACH_INSN_INFO_DEF (def, insn_info)
34561 {
34562 struct df_link *swap_link = DF_REF_CHAIN (def);
34563
34564 while (swap_link)
34565 {
34566 if (DF_REF_IS_ARTIFICIAL (link->ref))
34567 return false;
34568
34569 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
34570 unsigned uid2 = INSN_UID (swap_use_insn);
34571
34572 /* Permuting stores or register swaps are allowed. */
34573 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
34574 return false;
34575
34576 swap_link = swap_link->next;
34577 }
34578 }
34579 }
34580 }
34581
34582 return true;
34583 }
34584
34585 /* Mark the xxswapdi instructions associated with permuting loads and
34586 stores for removal. Note that we only flag them for deletion here,
34587 as there is a possibility of a swap being reached from multiple
34588 loads, etc. */
34589 static void
34590 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
34591 {
34592 rtx insn = insn_entry[i].insn;
34593 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34594
34595 if (insn_entry[i].is_load)
34596 {
34597 df_ref def;
34598 FOR_EACH_INSN_INFO_DEF (def, insn_info)
34599 {
34600 struct df_link *link = DF_REF_CHAIN (def);
34601
34602 /* We know by now that these are swaps, so we can delete
34603 them confidently. */
34604 while (link)
34605 {
34606 rtx use_insn = DF_REF_INSN (link->ref);
34607 insn_entry[INSN_UID (use_insn)].will_delete = 1;
34608 link = link->next;
34609 }
34610 }
34611 }
34612 else if (insn_entry[i].is_store)
34613 {
34614 df_ref use;
34615 FOR_EACH_INSN_INFO_USE (use, insn_info)
34616 {
34617 /* Ignore uses for addressability. */
34618 machine_mode mode = GET_MODE (DF_REF_REG (use));
34619 if (!VECTOR_MODE_P (mode))
34620 continue;
34621
34622 struct df_link *link = DF_REF_CHAIN (use);
34623
34624 /* We know by now that these are swaps, so we can delete
34625 them confidently. */
34626 while (link)
34627 {
34628 rtx def_insn = DF_REF_INSN (link->ref);
34629 insn_entry[INSN_UID (def_insn)].will_delete = 1;
34630 link = link->next;
34631 }
34632 }
34633 }
34634 }
34635
34636 /* OP is either a CONST_VECTOR or an expression containing one.
34637 Swap the first half of the vector with the second in the first
34638 case. Recurse to find it in the second. */
34639 static void
34640 swap_const_vector_halves (rtx op)
34641 {
34642 int i;
34643 enum rtx_code code = GET_CODE (op);
34644 if (GET_CODE (op) == CONST_VECTOR)
34645 {
34646 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
34647 for (i = 0; i < half_units; ++i)
34648 {
34649 rtx temp = CONST_VECTOR_ELT (op, i);
34650 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
34651 CONST_VECTOR_ELT (op, i + half_units) = temp;
34652 }
34653 }
34654 else
34655 {
34656 int j;
34657 const char *fmt = GET_RTX_FORMAT (code);
34658 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
34659 if (fmt[i] == 'e' || fmt[i] == 'u')
34660 swap_const_vector_halves (XEXP (op, i));
34661 else if (fmt[i] == 'E')
34662 for (j = 0; j < XVECLEN (op, i); ++j)
34663 swap_const_vector_halves (XVECEXP (op, i, j));
34664 }
34665 }
34666
34667 /* Find all subregs of a vector expression that perform a narrowing,
34668 and adjust the subreg index to account for doubleword swapping. */
34669 static void
34670 adjust_subreg_index (rtx op)
34671 {
34672 enum rtx_code code = GET_CODE (op);
34673 if (code == SUBREG
34674 && (GET_MODE_SIZE (GET_MODE (op))
34675 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
34676 {
34677 unsigned int index = SUBREG_BYTE (op);
34678 if (index < 8)
34679 index += 8;
34680 else
34681 index -= 8;
34682 SUBREG_BYTE (op) = index;
34683 }
34684
34685 const char *fmt = GET_RTX_FORMAT (code);
34686 int i,j;
34687 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
34688 if (fmt[i] == 'e' || fmt[i] == 'u')
34689 adjust_subreg_index (XEXP (op, i));
34690 else if (fmt[i] == 'E')
34691 for (j = 0; j < XVECLEN (op, i); ++j)
34692 adjust_subreg_index (XVECEXP (op, i, j));
34693 }
34694
34695 /* Convert the non-permuting load INSN to a permuting one. */
34696 static void
34697 permute_load (rtx_insn *insn)
34698 {
34699 rtx body = PATTERN (insn);
34700 rtx mem_op = SET_SRC (body);
34701 rtx tgt_reg = SET_DEST (body);
34702 machine_mode mode = GET_MODE (tgt_reg);
34703 int n_elts = GET_MODE_NUNITS (mode);
34704 int half_elts = n_elts / 2;
34705 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
34706 int i, j;
34707 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
34708 XVECEXP (par, 0, i) = GEN_INT (j);
34709 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
34710 XVECEXP (par, 0, i) = GEN_INT (j);
34711 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
34712 SET_SRC (body) = sel;
34713 INSN_CODE (insn) = -1; /* Force re-recognition. */
34714 df_insn_rescan (insn);
34715
34716 if (dump_file)
34717 fprintf (dump_file, "Replacing load %d with permuted load\n",
34718 INSN_UID (insn));
34719 }
34720
34721 /* Convert the non-permuting store INSN to a permuting one. */
34722 static void
34723 permute_store (rtx_insn *insn)
34724 {
34725 rtx body = PATTERN (insn);
34726 rtx src_reg = SET_SRC (body);
34727 machine_mode mode = GET_MODE (src_reg);
34728 int n_elts = GET_MODE_NUNITS (mode);
34729 int half_elts = n_elts / 2;
34730 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
34731 int i, j;
34732 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
34733 XVECEXP (par, 0, i) = GEN_INT (j);
34734 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
34735 XVECEXP (par, 0, i) = GEN_INT (j);
34736 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
34737 SET_SRC (body) = sel;
34738 INSN_CODE (insn) = -1; /* Force re-recognition. */
34739 df_insn_rescan (insn);
34740
34741 if (dump_file)
34742 fprintf (dump_file, "Replacing store %d with permuted store\n",
34743 INSN_UID (insn));
34744 }
34745
34746 /* Given OP that contains a vector extract operation, adjust the index
34747 of the extracted lane to account for the doubleword swap. */
34748 static void
34749 adjust_extract (rtx_insn *insn)
34750 {
34751 rtx pattern = PATTERN (insn);
34752 if (GET_CODE (pattern) == PARALLEL)
34753 pattern = XVECEXP (pattern, 0, 0);
34754 rtx src = SET_SRC (pattern);
34755 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
34756 account for that. */
34757 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
34758 rtx par = XEXP (sel, 1);
34759 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
34760 int lane = INTVAL (XVECEXP (par, 0, 0));
34761 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
34762 XVECEXP (par, 0, 0) = GEN_INT (lane);
34763 INSN_CODE (insn) = -1; /* Force re-recognition. */
34764 df_insn_rescan (insn);
34765
34766 if (dump_file)
34767 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
34768 }
34769
34770 /* Given OP that contains a vector direct-splat operation, adjust the index
34771 of the source lane to account for the doubleword swap. */
34772 static void
34773 adjust_splat (rtx_insn *insn)
34774 {
34775 rtx body = PATTERN (insn);
34776 rtx unspec = XEXP (body, 1);
34777 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
34778 int lane = INTVAL (XVECEXP (unspec, 0, 1));
34779 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
34780 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
34781 INSN_CODE (insn) = -1; /* Force re-recognition. */
34782 df_insn_rescan (insn);
34783
34784 if (dump_file)
34785 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
34786 }
34787
34788 /* The insn described by INSN_ENTRY[I] can be swapped, but only
34789 with special handling. Take care of that here. */
34790 static void
34791 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
34792 {
34793 rtx_insn *insn = insn_entry[i].insn;
34794 rtx body = PATTERN (insn);
34795
34796 switch (insn_entry[i].special_handling)
34797 {
34798 default:
34799 gcc_unreachable ();
34800 case SH_CONST_VECTOR:
34801 {
34802 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
34803 gcc_assert (GET_CODE (body) == SET);
34804 rtx rhs = SET_SRC (body);
34805 swap_const_vector_halves (rhs);
34806 if (dump_file)
34807 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
34808 break;
34809 }
34810 case SH_SUBREG:
34811 /* A subreg of the same size is already safe. For subregs that
34812 select a smaller portion of a reg, adjust the index for
34813 swapped doublewords. */
34814 adjust_subreg_index (body);
34815 if (dump_file)
34816 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
34817 break;
34818 case SH_NOSWAP_LD:
34819 /* Convert a non-permuting load to a permuting one. */
34820 permute_load (insn);
34821 break;
34822 case SH_NOSWAP_ST:
34823 /* Convert a non-permuting store to a permuting one. */
34824 permute_store (insn);
34825 break;
34826 case SH_EXTRACT:
34827 /* Change the lane on an extract operation. */
34828 adjust_extract (insn);
34829 break;
34830 case SH_SPLAT:
34831 /* Change the lane on a direct-splat operation. */
34832 adjust_splat (insn);
34833 break;
34834 }
34835 }
34836
34837 /* Find the insn from the Ith table entry, which is known to be a
34838 register swap Y = SWAP(X). Replace it with a copy Y = X. */
34839 static void
34840 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
34841 {
34842 rtx_insn *insn = insn_entry[i].insn;
34843 rtx body = PATTERN (insn);
34844 rtx src_reg = XEXP (SET_SRC (body), 0);
34845 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
34846 rtx_insn *new_insn = emit_insn_before (copy, insn);
34847 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
34848 df_insn_rescan (new_insn);
34849
34850 if (dump_file)
34851 {
34852 unsigned int new_uid = INSN_UID (new_insn);
34853 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
34854 }
34855
34856 df_insn_delete (insn);
34857 remove_insn (insn);
34858 insn->set_deleted ();
34859 }
34860
34861 /* Dump the swap table to DUMP_FILE. */
34862 static void
34863 dump_swap_insn_table (swap_web_entry *insn_entry)
34864 {
34865 int e = get_max_uid ();
34866 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
34867
34868 for (int i = 0; i < e; ++i)
34869 if (insn_entry[i].is_relevant)
34870 {
34871 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
34872 fprintf (dump_file, "%6d %6d ", i,
34873 pred_entry && pred_entry->insn
34874 ? INSN_UID (pred_entry->insn) : 0);
34875 if (insn_entry[i].is_load)
34876 fputs ("load ", dump_file);
34877 if (insn_entry[i].is_store)
34878 fputs ("store ", dump_file);
34879 if (insn_entry[i].is_swap)
34880 fputs ("swap ", dump_file);
34881 if (insn_entry[i].is_live_in)
34882 fputs ("live-in ", dump_file);
34883 if (insn_entry[i].is_live_out)
34884 fputs ("live-out ", dump_file);
34885 if (insn_entry[i].contains_subreg)
34886 fputs ("subreg ", dump_file);
34887 if (insn_entry[i].is_128_int)
34888 fputs ("int128 ", dump_file);
34889 if (insn_entry[i].is_call)
34890 fputs ("call ", dump_file);
34891 if (insn_entry[i].is_swappable)
34892 {
34893 fputs ("swappable ", dump_file);
34894 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
34895 fputs ("special:constvec ", dump_file);
34896 else if (insn_entry[i].special_handling == SH_SUBREG)
34897 fputs ("special:subreg ", dump_file);
34898 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
34899 fputs ("special:load ", dump_file);
34900 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
34901 fputs ("special:store ", dump_file);
34902 else if (insn_entry[i].special_handling == SH_EXTRACT)
34903 fputs ("special:extract ", dump_file);
34904 else if (insn_entry[i].special_handling == SH_SPLAT)
34905 fputs ("special:splat ", dump_file);
34906 }
34907 if (insn_entry[i].web_not_optimizable)
34908 fputs ("unoptimizable ", dump_file);
34909 if (insn_entry[i].will_delete)
34910 fputs ("delete ", dump_file);
34911 fputs ("\n", dump_file);
34912 }
34913 fputs ("\n", dump_file);
34914 }
34915
34916 /* Main entry point for this pass. */
34917 unsigned int
34918 rs6000_analyze_swaps (function *fun)
34919 {
34920 swap_web_entry *insn_entry;
34921 basic_block bb;
34922 rtx_insn *insn;
34923
34924 /* Dataflow analysis for use-def chains. */
34925 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
34926 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
34927 df_analyze ();
34928 df_set_flags (DF_DEFER_INSN_RESCAN);
34929
34930 /* Allocate structure to represent webs of insns. */
34931 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
34932
34933 /* Walk the insns to gather basic data. */
34934 FOR_ALL_BB_FN (bb, fun)
34935 FOR_BB_INSNS (bb, insn)
34936 {
34937 unsigned int uid = INSN_UID (insn);
34938 if (NONDEBUG_INSN_P (insn))
34939 {
34940 insn_entry[uid].insn = insn;
34941
34942 if (GET_CODE (insn) == CALL_INSN)
34943 insn_entry[uid].is_call = 1;
34944
34945 /* Walk the uses and defs to see if we mention vector regs.
34946 Record any constraints on optimization of such mentions. */
34947 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34948 df_ref mention;
34949 FOR_EACH_INSN_INFO_USE (mention, insn_info)
34950 {
34951 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34952 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
34953
34954 /* If a use gets its value from a call insn, it will be
34955 a hard register and will look like (reg:V4SI 3 3).
34956 The df analysis creates two mentions for GPR3 and GPR4,
34957 both DImode. We must recognize this and treat it as a
34958 vector mention to ensure the call is unioned with this
34959 use. */
34960 if (mode == DImode && DF_REF_INSN_INFO (mention))
34961 {
34962 rtx feeder = DF_REF_INSN (mention);
34963 /* FIXME: It is pretty hard to get from the df mention
34964 to the mode of the use in the insn. We arbitrarily
34965 pick a vector mode here, even though the use might
34966 be a real DImode. We can be too conservative
34967 (create a web larger than necessary) because of
34968 this, so consider eventually fixing this. */
34969 if (GET_CODE (feeder) == CALL_INSN)
34970 mode = V4SImode;
34971 }
34972
34973 if (VECTOR_MODE_P (mode) || mode == TImode)
34974 {
34975 insn_entry[uid].is_relevant = 1;
34976 if (mode == TImode || mode == V1TImode)
34977 insn_entry[uid].is_128_int = 1;
34978 if (DF_REF_INSN_INFO (mention))
34979 insn_entry[uid].contains_subreg
34980 = !rtx_equal_p (DF_REF_REG (mention),
34981 DF_REF_REAL_REG (mention));
34982 union_defs (insn_entry, insn, mention);
34983 }
34984 }
34985 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
34986 {
34987 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34988 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
34989
34990 /* If we're loading up a hard vector register for a call,
34991 it looks like (set (reg:V4SI 9 9) (...)). The df
34992 analysis creates two mentions for GPR9 and GPR10, both
34993 DImode. So relying on the mode from the mentions
34994 isn't sufficient to ensure we union the call into the
34995 web with the parameter setup code. */
34996 if (mode == DImode && GET_CODE (insn) == SET
34997 && VECTOR_MODE_P (GET_MODE (SET_DEST (insn))))
34998 mode = GET_MODE (SET_DEST (insn));
34999
35000 if (VECTOR_MODE_P (mode) || mode == TImode)
35001 {
35002 insn_entry[uid].is_relevant = 1;
35003 if (mode == TImode || mode == V1TImode)
35004 insn_entry[uid].is_128_int = 1;
35005 if (DF_REF_INSN_INFO (mention))
35006 insn_entry[uid].contains_subreg
35007 = !rtx_equal_p (DF_REF_REG (mention),
35008 DF_REF_REAL_REG (mention));
35009 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
35010 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
35011 insn_entry[uid].is_live_out = 1;
35012 union_uses (insn_entry, insn, mention);
35013 }
35014 }
35015
35016 if (insn_entry[uid].is_relevant)
35017 {
35018 /* Determine if this is a load or store. */
35019 insn_entry[uid].is_load = insn_is_load_p (insn);
35020 insn_entry[uid].is_store = insn_is_store_p (insn);
35021
35022 /* Determine if this is a doubleword swap. If not,
35023 determine whether it can legally be swapped. */
35024 if (insn_is_swap_p (insn))
35025 insn_entry[uid].is_swap = 1;
35026 else
35027 {
35028 unsigned int special = SH_NONE;
35029 insn_entry[uid].is_swappable
35030 = insn_is_swappable_p (insn_entry, insn, &special);
35031 if (special != SH_NONE && insn_entry[uid].contains_subreg)
35032 insn_entry[uid].is_swappable = 0;
35033 else if (special != SH_NONE)
35034 insn_entry[uid].special_handling = special;
35035 else if (insn_entry[uid].contains_subreg)
35036 insn_entry[uid].special_handling = SH_SUBREG;
35037 }
35038 }
35039 }
35040 }
35041
35042 if (dump_file)
35043 {
35044 fprintf (dump_file, "\nSwap insn entry table when first built\n");
35045 dump_swap_insn_table (insn_entry);
35046 }
35047
35048 /* Record unoptimizable webs. */
35049 unsigned e = get_max_uid (), i;
35050 for (i = 0; i < e; ++i)
35051 {
35052 if (!insn_entry[i].is_relevant)
35053 continue;
35054
35055 swap_web_entry *root
35056 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
35057
35058 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
35059 || (insn_entry[i].contains_subreg
35060 && insn_entry[i].special_handling != SH_SUBREG)
35061 || insn_entry[i].is_128_int || insn_entry[i].is_call
35062 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
35063 root->web_not_optimizable = 1;
35064
35065 /* If we have loads or stores that aren't permuting then the
35066 optimization isn't appropriate. */
35067 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
35068 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
35069 root->web_not_optimizable = 1;
35070
35071 /* If we have permuting loads or stores that are not accompanied
35072 by a register swap, the optimization isn't appropriate. */
35073 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
35074 {
35075 rtx insn = insn_entry[i].insn;
35076 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
35077 df_ref def;
35078
35079 FOR_EACH_INSN_INFO_DEF (def, insn_info)
35080 {
35081 struct df_link *link = DF_REF_CHAIN (def);
35082
35083 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
35084 {
35085 root->web_not_optimizable = 1;
35086 break;
35087 }
35088 }
35089 }
35090 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
35091 {
35092 rtx insn = insn_entry[i].insn;
35093 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
35094 df_ref use;
35095
35096 FOR_EACH_INSN_INFO_USE (use, insn_info)
35097 {
35098 struct df_link *link = DF_REF_CHAIN (use);
35099
35100 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
35101 {
35102 root->web_not_optimizable = 1;
35103 break;
35104 }
35105 }
35106 }
35107 }
35108
35109 if (dump_file)
35110 {
35111 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
35112 dump_swap_insn_table (insn_entry);
35113 }
35114
35115 /* For each load and store in an optimizable web (which implies
35116 the loads and stores are permuting), find the associated
35117 register swaps and mark them for removal. Due to various
35118 optimizations we may mark the same swap more than once. Also
35119 perform special handling for swappable insns that require it. */
35120 for (i = 0; i < e; ++i)
35121 if ((insn_entry[i].is_load || insn_entry[i].is_store)
35122 && insn_entry[i].is_swap)
35123 {
35124 swap_web_entry* root_entry
35125 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
35126 if (!root_entry->web_not_optimizable)
35127 mark_swaps_for_removal (insn_entry, i);
35128 }
35129 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
35130 {
35131 swap_web_entry* root_entry
35132 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
35133 if (!root_entry->web_not_optimizable)
35134 handle_special_swappables (insn_entry, i);
35135 }
35136
35137 /* Now delete the swaps marked for removal. */
35138 for (i = 0; i < e; ++i)
35139 if (insn_entry[i].will_delete)
35140 replace_swap_with_copy (insn_entry, i);
35141
35142 /* Clean up. */
35143 free (insn_entry);
35144 return 0;
35145 }
35146
35147 const pass_data pass_data_analyze_swaps =
35148 {
35149 RTL_PASS, /* type */
35150 "swaps", /* name */
35151 OPTGROUP_NONE, /* optinfo_flags */
35152 TV_NONE, /* tv_id */
35153 0, /* properties_required */
35154 0, /* properties_provided */
35155 0, /* properties_destroyed */
35156 0, /* todo_flags_start */
35157 TODO_df_finish, /* todo_flags_finish */
35158 };
35159
35160 class pass_analyze_swaps : public rtl_opt_pass
35161 {
35162 public:
35163 pass_analyze_swaps(gcc::context *ctxt)
35164 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
35165 {}
35166
35167 /* opt_pass methods: */
35168 virtual bool gate (function *)
35169 {
35170 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
35171 && rs6000_optimize_swaps);
35172 }
35173
35174 virtual unsigned int execute (function *fun)
35175 {
35176 return rs6000_analyze_swaps (fun);
35177 }
35178
35179 }; // class pass_analyze_swaps
35180
35181 rtl_opt_pass *
35182 make_pass_analyze_swaps (gcc::context *ctxt)
35183 {
35184 return new pass_analyze_swaps (ctxt);
35185 }
35186
35187 #ifdef RS6000_GLIBC_ATOMIC_FENV
35188 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
35189 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
35190 #endif
35191
35192 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
35193
35194 static void
35195 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
35196 {
35197 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
35198 {
35199 #ifdef RS6000_GLIBC_ATOMIC_FENV
35200 if (atomic_hold_decl == NULL_TREE)
35201 {
35202 atomic_hold_decl
35203 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
35204 get_identifier ("__atomic_feholdexcept"),
35205 build_function_type_list (void_type_node,
35206 double_ptr_type_node,
35207 NULL_TREE));
35208 TREE_PUBLIC (atomic_hold_decl) = 1;
35209 DECL_EXTERNAL (atomic_hold_decl) = 1;
35210 }
35211
35212 if (atomic_clear_decl == NULL_TREE)
35213 {
35214 atomic_clear_decl
35215 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
35216 get_identifier ("__atomic_feclearexcept"),
35217 build_function_type_list (void_type_node,
35218 NULL_TREE));
35219 TREE_PUBLIC (atomic_clear_decl) = 1;
35220 DECL_EXTERNAL (atomic_clear_decl) = 1;
35221 }
35222
35223 tree const_double = build_qualified_type (double_type_node,
35224 TYPE_QUAL_CONST);
35225 tree const_double_ptr = build_pointer_type (const_double);
35226 if (atomic_update_decl == NULL_TREE)
35227 {
35228 atomic_update_decl
35229 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
35230 get_identifier ("__atomic_feupdateenv"),
35231 build_function_type_list (void_type_node,
35232 const_double_ptr,
35233 NULL_TREE));
35234 TREE_PUBLIC (atomic_update_decl) = 1;
35235 DECL_EXTERNAL (atomic_update_decl) = 1;
35236 }
35237
35238 tree fenv_var = create_tmp_var (double_type_node);
35239 mark_addressable (fenv_var);
35240 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
35241
35242 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
35243 *clear = build_call_expr (atomic_clear_decl, 0);
35244 *update = build_call_expr (atomic_update_decl, 1,
35245 fold_convert (const_double_ptr, fenv_addr));
35246 #endif
35247 return;
35248 }
35249
35250 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
35251 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
35252 tree call_mffs = build_call_expr (mffs, 0);
35253
35254 /* Generates the equivalent of feholdexcept (&fenv_var)
35255
35256 *fenv_var = __builtin_mffs ();
35257 double fenv_hold;
35258 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
35259 __builtin_mtfsf (0xff, fenv_hold); */
35260
35261 /* Mask to clear everything except for the rounding modes and non-IEEE
35262 arithmetic flag. */
35263 const unsigned HOST_WIDE_INT hold_exception_mask =
35264 HOST_WIDE_INT_C (0xffffffff00000007);
35265
35266 tree fenv_var = create_tmp_var (double_type_node);
35267
35268 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
35269
35270 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
35271 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
35272 build_int_cst (uint64_type_node,
35273 hold_exception_mask));
35274
35275 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
35276 fenv_llu_and);
35277
35278 tree hold_mtfsf = build_call_expr (mtfsf, 2,
35279 build_int_cst (unsigned_type_node, 0xff),
35280 fenv_hold_mtfsf);
35281
35282 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
35283
35284 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
35285
35286 double fenv_clear = __builtin_mffs ();
35287 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
35288 __builtin_mtfsf (0xff, fenv_clear); */
35289
35290 /* Mask to clear everything except for the rounding modes and non-IEEE
35291 arithmetic flag. */
35292 const unsigned HOST_WIDE_INT clear_exception_mask =
35293 HOST_WIDE_INT_C (0xffffffff00000000);
35294
35295 tree fenv_clear = create_tmp_var (double_type_node);
35296
35297 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
35298
35299 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
35300 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
35301 fenv_clean_llu,
35302 build_int_cst (uint64_type_node,
35303 clear_exception_mask));
35304
35305 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
35306 fenv_clear_llu_and);
35307
35308 tree clear_mtfsf = build_call_expr (mtfsf, 2,
35309 build_int_cst (unsigned_type_node, 0xff),
35310 fenv_clear_mtfsf);
35311
35312 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
35313
35314 /* Generates the equivalent of feupdateenv (&fenv_var)
35315
35316 double old_fenv = __builtin_mffs ();
35317 double fenv_update;
35318 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
35319 (*(uint64_t*)fenv_var 0x1ff80fff);
35320 __builtin_mtfsf (0xff, fenv_update); */
35321
35322 const unsigned HOST_WIDE_INT update_exception_mask =
35323 HOST_WIDE_INT_C (0xffffffff1fffff00);
35324 const unsigned HOST_WIDE_INT new_exception_mask =
35325 HOST_WIDE_INT_C (0x1ff80fff);
35326
35327 tree old_fenv = create_tmp_var (double_type_node);
35328 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
35329
35330 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
35331 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
35332 build_int_cst (uint64_type_node,
35333 update_exception_mask));
35334
35335 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
35336 build_int_cst (uint64_type_node,
35337 new_exception_mask));
35338
35339 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
35340 old_llu_and, new_llu_and);
35341
35342 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
35343 new_llu_mask);
35344
35345 tree update_mtfsf = build_call_expr (mtfsf, 2,
35346 build_int_cst (unsigned_type_node, 0xff),
35347 fenv_update_mtfsf);
35348
35349 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
35350 }
35351
35352 \f
35353 struct gcc_target targetm = TARGET_INITIALIZER;
35354
35355 #include "gt-rs6000.h"
This page took 1.595526 seconds and 4 git commands to generate.