]> gcc.gnu.org Git - gcc.git/blob - gcc/config/sparc/sparc.c
sparc-protos.h (sparc_va_arg): Remove.
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com)
5 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
6 at Cygnus Support.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "recog.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "tm_p.h"
46 #include "debug.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "cfglayout.h"
50 #include "tree-gimple.h"
51
52 #ifdef HAVE_AS_RELAX_OPTION
53 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
54 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
55 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
56 somebody does not branch between the sethi and jmp. */
57 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
58 #else
59 #define LEAF_SIBCALL_SLOT_RESERVED_P \
60 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
61 #endif
62
63 /* Global variables for machine-dependent things. */
64
65 /* Size of frame. Need to know this to emit return insns from leaf procedures.
66 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
67 reload pass. This is important as the value is later used for scheduling
68 (to see what can go in a delay slot).
69 APPARENT_FSIZE is the size of the stack less the register save area and less
70 the outgoing argument area. It is used when saving call preserved regs. */
71 static HOST_WIDE_INT apparent_fsize;
72 static HOST_WIDE_INT actual_fsize;
73
74 /* Number of live general or floating point registers needed to be
75 saved (as 4-byte quantities). */
76 static int num_gfregs;
77
78 /* The alias set for prologue/epilogue register save/restore. */
79 static GTY(()) int sparc_sr_alias_set;
80
81 /* Save the operands last given to a compare for use when we
82 generate a scc or bcc insn. */
83 rtx sparc_compare_op0, sparc_compare_op1;
84
85 /* Vector to say how input registers are mapped to output registers.
86 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
87 eliminate it. You must use -fomit-frame-pointer to get that. */
88 char leaf_reg_remap[] =
89 { 0, 1, 2, 3, 4, 5, 6, 7,
90 -1, -1, -1, -1, -1, -1, 14, -1,
91 -1, -1, -1, -1, -1, -1, -1, -1,
92 8, 9, 10, 11, 12, 13, -1, 15,
93
94 32, 33, 34, 35, 36, 37, 38, 39,
95 40, 41, 42, 43, 44, 45, 46, 47,
96 48, 49, 50, 51, 52, 53, 54, 55,
97 56, 57, 58, 59, 60, 61, 62, 63,
98 64, 65, 66, 67, 68, 69, 70, 71,
99 72, 73, 74, 75, 76, 77, 78, 79,
100 80, 81, 82, 83, 84, 85, 86, 87,
101 88, 89, 90, 91, 92, 93, 94, 95,
102 96, 97, 98, 99, 100};
103
104 /* Vector, indexed by hard register number, which contains 1
105 for a register that is allowable in a candidate for leaf
106 function treatment. */
107 char sparc_leaf_regs[] =
108 { 1, 1, 1, 1, 1, 1, 1, 1,
109 0, 0, 0, 0, 0, 0, 1, 0,
110 0, 0, 0, 0, 0, 0, 0, 0,
111 1, 1, 1, 1, 1, 1, 0, 1,
112 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1};
121
122 struct machine_function GTY(())
123 {
124 /* Some local-dynamic TLS symbol name. */
125 const char *some_ld_name;
126 };
127
128 /* Register we pretend to think the frame pointer is allocated to.
129 Normally, this is %fp, but if we are in a leaf procedure, this
130 is %sp+"something". We record "something" separately as it may
131 be too big for reg+constant addressing. */
132
133 static rtx frame_base_reg;
134 static HOST_WIDE_INT frame_base_offset;
135
136 static void sparc_init_modes (void);
137 static void scan_record_type (tree, int *, int *, int *);
138 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
139 tree, int, int, int *, int *);
140
141 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
142 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
143
144 static void sparc_output_addr_vec (rtx);
145 static void sparc_output_addr_diff_vec (rtx);
146 static void sparc_output_deferred_case_vectors (void);
147 static rtx sparc_builtin_saveregs (void);
148 static int epilogue_renumber (rtx *, int);
149 static bool sparc_assemble_integer (rtx, unsigned int, int);
150 static int set_extends (rtx);
151 static void load_pic_register (void);
152 static int save_or_restore_regs (int, int, rtx, int, int);
153 static void emit_save_regs (void);
154 static void emit_restore_regs (void);
155 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
156 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
157 #ifdef OBJECT_FORMAT_ELF
158 static void sparc_elf_asm_named_section (const char *, unsigned int);
159 #endif
160
161 static int sparc_adjust_cost (rtx, rtx, rtx, int);
162 static int sparc_issue_rate (void);
163 static void sparc_sched_init (FILE *, int, int);
164 static int sparc_use_sched_lookahead (void);
165
166 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
167 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
168 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
169 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
170 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
171
172 static bool sparc_function_ok_for_sibcall (tree, tree);
173 static void sparc_init_libfuncs (void);
174 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
175 HOST_WIDE_INT, tree);
176 static struct machine_function * sparc_init_machine_status (void);
177 static bool sparc_cannot_force_const_mem (rtx);
178 static rtx sparc_tls_get_addr (void);
179 static rtx sparc_tls_got (void);
180 static const char *get_some_local_dynamic_name (void);
181 static int get_some_local_dynamic_name_1 (rtx *, void *);
182 static bool sparc_rtx_costs (rtx, int, int, int *);
183 static bool sparc_promote_prototypes (tree);
184 static rtx sparc_struct_value_rtx (tree, int);
185 static bool sparc_return_in_memory (tree, tree);
186 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
187 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
188 \f
189 /* Option handling. */
190
191 /* Code model option as passed by user. */
192 const char *sparc_cmodel_string;
193 /* Parsed value. */
194 enum cmodel sparc_cmodel;
195
196 char sparc_hard_reg_printed[8];
197
198 struct sparc_cpu_select sparc_select[] =
199 {
200 /* switch name, tune arch */
201 { (char *)0, "default", 1, 1 },
202 { (char *)0, "-mcpu=", 1, 1 },
203 { (char *)0, "-mtune=", 1, 0 },
204 { 0, 0, 0, 0 }
205 };
206
207 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
208 enum processor_type sparc_cpu;
209 \f
210 /* Initialize the GCC target structure. */
211
212 /* The sparc default is to use .half rather than .short for aligned
213 HI objects. Use .word instead of .long on non-ELF systems. */
214 #undef TARGET_ASM_ALIGNED_HI_OP
215 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
216 #ifndef OBJECT_FORMAT_ELF
217 #undef TARGET_ASM_ALIGNED_SI_OP
218 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
219 #endif
220
221 #undef TARGET_ASM_UNALIGNED_HI_OP
222 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
223 #undef TARGET_ASM_UNALIGNED_SI_OP
224 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
225 #undef TARGET_ASM_UNALIGNED_DI_OP
226 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
227
228 /* The target hook has to handle DI-mode values. */
229 #undef TARGET_ASM_INTEGER
230 #define TARGET_ASM_INTEGER sparc_assemble_integer
231
232 #undef TARGET_ASM_FUNCTION_PROLOGUE
233 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
234 #undef TARGET_ASM_FUNCTION_EPILOGUE
235 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
236
237 #undef TARGET_SCHED_ADJUST_COST
238 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
239 #undef TARGET_SCHED_ISSUE_RATE
240 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
241 #undef TARGET_SCHED_INIT
242 #define TARGET_SCHED_INIT sparc_sched_init
243 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
244 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE hook_int_void_1
245 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
246 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
247
248 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
249 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
250
251 #undef TARGET_INIT_LIBFUNCS
252 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
253
254 #ifdef HAVE_AS_TLS
255 #undef TARGET_HAVE_TLS
256 #define TARGET_HAVE_TLS true
257 #endif
258 #undef TARGET_CANNOT_FORCE_CONST_MEM
259 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
260
261 #undef TARGET_ASM_OUTPUT_MI_THUNK
262 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
263 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
264 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
265
266 #undef TARGET_RTX_COSTS
267 #define TARGET_RTX_COSTS sparc_rtx_costs
268 #undef TARGET_ADDRESS_COST
269 #define TARGET_ADDRESS_COST hook_int_rtx_0
270
271 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
272 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
273 test for this value. */
274 #undef TARGET_PROMOTE_FUNCTION_ARGS
275 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
276
277 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
278 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
279 test for this value. */
280 #undef TARGET_PROMOTE_FUNCTION_RETURN
281 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
282
283 #undef TARGET_PROMOTE_PROTOTYPES
284 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
285
286 #undef TARGET_STRUCT_VALUE_RTX
287 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
288 #undef TARGET_RETURN_IN_MEMORY
289 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
290
291 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
292 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
293 #undef TARGET_STRICT_ARGUMENT_NAMING
294 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
295
296 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
297 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
298
299 #undef TARGET_LATE_RTL_PROLOGUE_EPILOGUE
300 #define TARGET_LATE_RTL_PROLOGUE_EPILOGUE true
301
302 struct gcc_target targetm = TARGET_INITIALIZER;
303 \f
304 /* Validate and override various options, and do some machine dependent
305 initialization. */
306
307 void
308 sparc_override_options (void)
309 {
310 static struct code_model {
311 const char *const name;
312 const int value;
313 } const cmodels[] = {
314 { "32", CM_32 },
315 { "medlow", CM_MEDLOW },
316 { "medmid", CM_MEDMID },
317 { "medany", CM_MEDANY },
318 { "embmedany", CM_EMBMEDANY },
319 { 0, 0 }
320 };
321 const struct code_model *cmodel;
322 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
323 static struct cpu_default {
324 const int cpu;
325 const char *const name;
326 } const cpu_default[] = {
327 /* There must be one entry here for each TARGET_CPU value. */
328 { TARGET_CPU_sparc, "cypress" },
329 { TARGET_CPU_sparclet, "tsc701" },
330 { TARGET_CPU_sparclite, "f930" },
331 { TARGET_CPU_v8, "v8" },
332 { TARGET_CPU_hypersparc, "hypersparc" },
333 { TARGET_CPU_sparclite86x, "sparclite86x" },
334 { TARGET_CPU_supersparc, "supersparc" },
335 { TARGET_CPU_v9, "v9" },
336 { TARGET_CPU_ultrasparc, "ultrasparc" },
337 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
338 { 0, 0 }
339 };
340 const struct cpu_default *def;
341 /* Table of values for -m{cpu,tune}=. */
342 static struct cpu_table {
343 const char *const name;
344 const enum processor_type processor;
345 const int disable;
346 const int enable;
347 } const cpu_table[] = {
348 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
349 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
350 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
351 /* TI TMS390Z55 supersparc */
352 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
353 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
354 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
355 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
356 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
357 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
358 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
359 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
360 MASK_SPARCLITE },
361 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
362 /* TEMIC sparclet */
363 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
364 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
365 /* TI ultrasparc I, II, IIi */
366 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
367 /* Although insns using %y are deprecated, it is a clear win on current
368 ultrasparcs. */
369 |MASK_DEPRECATED_V8_INSNS},
370 /* TI ultrasparc III */
371 /* ??? Check if %y issue still holds true in ultra3. */
372 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
373 { 0, 0, 0, 0 }
374 };
375 const struct cpu_table *cpu;
376 const struct sparc_cpu_select *sel;
377 int fpu;
378
379 #ifndef SPARC_BI_ARCH
380 /* Check for unsupported architecture size. */
381 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
382 error ("%s is not supported by this configuration",
383 DEFAULT_ARCH32_P ? "-m64" : "-m32");
384 #endif
385
386 /* We force all 64bit archs to use 128 bit long double */
387 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
388 {
389 error ("-mlong-double-64 not allowed with -m64");
390 target_flags |= MASK_LONG_DOUBLE_128;
391 }
392
393 /* Code model selection. */
394 sparc_cmodel = SPARC_DEFAULT_CMODEL;
395
396 #ifdef SPARC_BI_ARCH
397 if (TARGET_ARCH32)
398 sparc_cmodel = CM_32;
399 #endif
400
401 if (sparc_cmodel_string != NULL)
402 {
403 if (TARGET_ARCH64)
404 {
405 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
406 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
407 break;
408 if (cmodel->name == NULL)
409 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
410 else
411 sparc_cmodel = cmodel->value;
412 }
413 else
414 error ("-mcmodel= is not supported on 32 bit systems");
415 }
416
417 fpu = TARGET_FPU; /* save current -mfpu status */
418
419 /* Set the default CPU. */
420 for (def = &cpu_default[0]; def->name; ++def)
421 if (def->cpu == TARGET_CPU_DEFAULT)
422 break;
423 if (! def->name)
424 abort ();
425 sparc_select[0].string = def->name;
426
427 for (sel = &sparc_select[0]; sel->name; ++sel)
428 {
429 if (sel->string)
430 {
431 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
432 if (! strcmp (sel->string, cpu->name))
433 {
434 if (sel->set_tune_p)
435 sparc_cpu = cpu->processor;
436
437 if (sel->set_arch_p)
438 {
439 target_flags &= ~cpu->disable;
440 target_flags |= cpu->enable;
441 }
442 break;
443 }
444
445 if (! cpu->name)
446 error ("bad value (%s) for %s switch", sel->string, sel->name);
447 }
448 }
449
450 /* If -mfpu or -mno-fpu was explicitly used, don't override with
451 the processor default. Clear MASK_FPU_SET to avoid confusing
452 the reverse mapping from switch values to names. */
453 if (TARGET_FPU_SET)
454 {
455 target_flags = (target_flags & ~MASK_FPU) | fpu;
456 target_flags &= ~MASK_FPU_SET;
457 }
458
459 /* Don't allow -mvis if FPU is disabled. */
460 if (! TARGET_FPU)
461 target_flags &= ~MASK_VIS;
462
463 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
464 are available.
465 -m64 also implies v9. */
466 if (TARGET_VIS || TARGET_ARCH64)
467 {
468 target_flags |= MASK_V9;
469 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
470 }
471
472 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
473 if (TARGET_V9 && TARGET_ARCH32)
474 target_flags |= MASK_DEPRECATED_V8_INSNS;
475
476 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
477 if (! TARGET_V9 || TARGET_ARCH64)
478 target_flags &= ~MASK_V8PLUS;
479
480 /* Don't use stack biasing in 32 bit mode. */
481 if (TARGET_ARCH32)
482 target_flags &= ~MASK_STACK_BIAS;
483
484 /* Supply a default value for align_functions. */
485 if (align_functions == 0
486 && (sparc_cpu == PROCESSOR_ULTRASPARC
487 || sparc_cpu == PROCESSOR_ULTRASPARC3))
488 align_functions = 32;
489
490 /* Validate PCC_STRUCT_RETURN. */
491 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
492 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
493
494 /* Only use .uaxword when compiling for a 64-bit target. */
495 if (!TARGET_ARCH64)
496 targetm.asm_out.unaligned_op.di = NULL;
497
498 /* Do various machine dependent initializations. */
499 sparc_init_modes ();
500
501 /* Acquire a unique set number for our register saves and restores. */
502 sparc_sr_alias_set = new_alias_set ();
503
504 /* Set up function hooks. */
505 init_machine_status = sparc_init_machine_status;
506 }
507 \f
508 /* Miscellaneous utilities. */
509
510 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
511 or branch on register contents instructions. */
512
513 int
514 v9_regcmp_p (enum rtx_code code)
515 {
516 return (code == EQ || code == NE || code == GE || code == LT
517 || code == LE || code == GT);
518 }
519
520 \f
521 /* Operand constraints. */
522
523 /* Return nonzero only if OP is a register of mode MODE,
524 or const0_rtx. */
525
526 int
527 reg_or_0_operand (rtx op, enum machine_mode mode)
528 {
529 if (register_operand (op, mode))
530 return 1;
531 if (op == const0_rtx)
532 return 1;
533 if (GET_MODE (op) == VOIDmode && GET_CODE (op) == CONST_DOUBLE
534 && CONST_DOUBLE_HIGH (op) == 0
535 && CONST_DOUBLE_LOW (op) == 0)
536 return 1;
537 if (fp_zero_operand (op, mode))
538 return 1;
539 return 0;
540 }
541
542 /* Return nonzero only if OP is const1_rtx. */
543
544 int
545 const1_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
546 {
547 return op == const1_rtx;
548 }
549
550 /* Nonzero if OP is a floating point value with value 0.0. */
551
552 int
553 fp_zero_operand (rtx op, enum machine_mode mode)
554 {
555 if (GET_MODE_CLASS (GET_MODE (op)) != MODE_FLOAT)
556 return 0;
557 return op == CONST0_RTX (mode);
558 }
559
560 /* Nonzero if OP is a register operand in floating point register. */
561
562 int
563 fp_register_operand (rtx op, enum machine_mode mode)
564 {
565 if (! register_operand (op, mode))
566 return 0;
567 if (GET_CODE (op) == SUBREG)
568 op = SUBREG_REG (op);
569 return GET_CODE (op) == REG && SPARC_FP_REG_P (REGNO (op));
570 }
571
572 /* Nonzero if OP is a floating point constant which can
573 be loaded into an integer register using a single
574 sethi instruction. */
575
576 int
577 fp_sethi_p (rtx op)
578 {
579 if (GET_CODE (op) == CONST_DOUBLE)
580 {
581 REAL_VALUE_TYPE r;
582 long i;
583
584 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
585 if (REAL_VALUES_EQUAL (r, dconst0) &&
586 ! REAL_VALUE_MINUS_ZERO (r))
587 return 0;
588 REAL_VALUE_TO_TARGET_SINGLE (r, i);
589 if (SPARC_SETHI_P (i))
590 return 1;
591 }
592
593 return 0;
594 }
595
596 /* Nonzero if OP is a floating point constant which can
597 be loaded into an integer register using a single
598 mov instruction. */
599
600 int
601 fp_mov_p (rtx op)
602 {
603 if (GET_CODE (op) == CONST_DOUBLE)
604 {
605 REAL_VALUE_TYPE r;
606 long i;
607
608 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
609 if (REAL_VALUES_EQUAL (r, dconst0) &&
610 ! REAL_VALUE_MINUS_ZERO (r))
611 return 0;
612 REAL_VALUE_TO_TARGET_SINGLE (r, i);
613 if (SPARC_SIMM13_P (i))
614 return 1;
615 }
616
617 return 0;
618 }
619
620 /* Nonzero if OP is a floating point constant which can
621 be loaded into an integer register using a high/losum
622 instruction sequence. */
623
624 int
625 fp_high_losum_p (rtx op)
626 {
627 /* The constraints calling this should only be in
628 SFmode move insns, so any constant which cannot
629 be moved using a single insn will do. */
630 if (GET_CODE (op) == CONST_DOUBLE)
631 {
632 REAL_VALUE_TYPE r;
633 long i;
634
635 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
636 if (REAL_VALUES_EQUAL (r, dconst0) &&
637 ! REAL_VALUE_MINUS_ZERO (r))
638 return 0;
639 REAL_VALUE_TO_TARGET_SINGLE (r, i);
640 if (! SPARC_SETHI_P (i)
641 && ! SPARC_SIMM13_P (i))
642 return 1;
643 }
644
645 return 0;
646 }
647
648 /* Nonzero if OP is an integer register. */
649
650 int
651 intreg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
652 {
653 return (register_operand (op, SImode)
654 || (TARGET_ARCH64 && register_operand (op, DImode)));
655 }
656
657 /* Nonzero if OP is a floating point condition code register. */
658
659 int
660 fcc_reg_operand (rtx op, enum machine_mode mode)
661 {
662 /* This can happen when recog is called from combine. Op may be a MEM.
663 Fail instead of calling abort in this case. */
664 if (GET_CODE (op) != REG)
665 return 0;
666
667 if (mode != VOIDmode && mode != GET_MODE (op))
668 return 0;
669 if (mode == VOIDmode
670 && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
671 return 0;
672
673 #if 0 /* ??? ==> 1 when %fcc0-3 are pseudos first. See gen_compare_reg(). */
674 if (reg_renumber == 0)
675 return REGNO (op) >= FIRST_PSEUDO_REGISTER;
676 return REGNO_OK_FOR_CCFP_P (REGNO (op));
677 #else
678 return (unsigned) REGNO (op) - SPARC_FIRST_V9_FCC_REG < 4;
679 #endif
680 }
681
682 /* Nonzero if OP is a floating point condition code fcc0 register. */
683
684 int
685 fcc0_reg_operand (rtx op, enum machine_mode mode)
686 {
687 /* This can happen when recog is called from combine. Op may be a MEM.
688 Fail instead of calling abort in this case. */
689 if (GET_CODE (op) != REG)
690 return 0;
691
692 if (mode != VOIDmode && mode != GET_MODE (op))
693 return 0;
694 if (mode == VOIDmode
695 && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
696 return 0;
697
698 return REGNO (op) == SPARC_FCC_REG;
699 }
700
701 /* Nonzero if OP is an integer or floating point condition code register. */
702
703 int
704 icc_or_fcc_reg_operand (rtx op, enum machine_mode mode)
705 {
706 if (GET_CODE (op) == REG && REGNO (op) == SPARC_ICC_REG)
707 {
708 if (mode != VOIDmode && mode != GET_MODE (op))
709 return 0;
710 if (mode == VOIDmode
711 && GET_MODE (op) != CCmode && GET_MODE (op) != CCXmode)
712 return 0;
713 return 1;
714 }
715
716 return fcc_reg_operand (op, mode);
717 }
718
719 /* Call insn on SPARC can take a PC-relative constant address, or any regular
720 memory address. */
721
722 int
723 call_operand (rtx op, enum machine_mode mode)
724 {
725 if (GET_CODE (op) != MEM)
726 abort ();
727 op = XEXP (op, 0);
728 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
729 }
730
731 int
732 call_operand_address (rtx op, enum machine_mode mode)
733 {
734 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
735 }
736
737 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
738 otherwise return 0. */
739
740 int
741 tls_symbolic_operand (rtx op)
742 {
743 if (GET_CODE (op) != SYMBOL_REF)
744 return 0;
745 return SYMBOL_REF_TLS_MODEL (op);
746 }
747
748 int
749 tgd_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
750 {
751 return tls_symbolic_operand (op) == TLS_MODEL_GLOBAL_DYNAMIC;
752 }
753
754 int
755 tld_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
756 {
757 return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_DYNAMIC;
758 }
759
760 int
761 tie_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
762 {
763 return tls_symbolic_operand (op) == TLS_MODEL_INITIAL_EXEC;
764 }
765
766 int
767 tle_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
768 {
769 return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_EXEC;
770 }
771
772 /* Returns 1 if OP is either a symbol reference or a sum of a symbol
773 reference and a constant. */
774
775 int
776 symbolic_operand (register rtx op, enum machine_mode mode)
777 {
778 enum machine_mode omode = GET_MODE (op);
779
780 if (omode != mode && omode != VOIDmode && mode != VOIDmode)
781 return 0;
782
783 switch (GET_CODE (op))
784 {
785 case SYMBOL_REF:
786 return !SYMBOL_REF_TLS_MODEL (op);
787
788 case LABEL_REF:
789 return 1;
790
791 case CONST:
792 op = XEXP (op, 0);
793 return (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
794 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
795 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
796 && GET_CODE (XEXP (op, 1)) == CONST_INT);
797
798 default:
799 return 0;
800 }
801 }
802
803 /* Return truth value of statement that OP is a symbolic memory
804 operand of mode MODE. */
805
806 int
807 symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
808 {
809 if (GET_CODE (op) == SUBREG)
810 op = SUBREG_REG (op);
811 if (GET_CODE (op) != MEM)
812 return 0;
813 op = XEXP (op, 0);
814 return ((GET_CODE (op) == SYMBOL_REF && !SYMBOL_REF_TLS_MODEL (op))
815 || GET_CODE (op) == CONST || GET_CODE (op) == HIGH
816 || GET_CODE (op) == LABEL_REF);
817 }
818
819 /* Return truth value of statement that OP is a LABEL_REF of mode MODE. */
820
821 int
822 label_ref_operand (rtx op, enum machine_mode mode)
823 {
824 if (GET_CODE (op) != LABEL_REF)
825 return 0;
826 if (GET_MODE (op) != mode)
827 return 0;
828 return 1;
829 }
830
831 /* Return 1 if the operand is an argument used in generating pic references
832 in either the medium/low or medium/anywhere code models of sparc64. */
833
834 int
835 sp64_medium_pic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
836 {
837 /* Check for (const (minus (symbol_ref:GOT)
838 (const (minus (label) (pc))))). */
839 if (GET_CODE (op) != CONST)
840 return 0;
841 op = XEXP (op, 0);
842 if (GET_CODE (op) != MINUS)
843 return 0;
844 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
845 return 0;
846 /* ??? Ensure symbol is GOT. */
847 if (GET_CODE (XEXP (op, 1)) != CONST)
848 return 0;
849 if (GET_CODE (XEXP (XEXP (op, 1), 0)) != MINUS)
850 return 0;
851 return 1;
852 }
853
854 /* Return 1 if the operand is a data segment reference. This includes
855 the readonly data segment, or in other words anything but the text segment.
856 This is needed in the medium/anywhere code model on v9. These values
857 are accessed with EMBMEDANY_BASE_REG. */
858
859 int
860 data_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
861 {
862 switch (GET_CODE (op))
863 {
864 case SYMBOL_REF :
865 return ! SYMBOL_REF_FUNCTION_P (op);
866 case PLUS :
867 /* Assume canonical format of symbol + constant.
868 Fall through. */
869 case CONST :
870 return data_segment_operand (XEXP (op, 0), VOIDmode);
871 default :
872 return 0;
873 }
874 }
875
876 /* Return 1 if the operand is a text segment reference.
877 This is needed in the medium/anywhere code model on v9. */
878
879 int
880 text_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
881 {
882 switch (GET_CODE (op))
883 {
884 case LABEL_REF :
885 return 1;
886 case SYMBOL_REF :
887 return SYMBOL_REF_FUNCTION_P (op);
888 case PLUS :
889 /* Assume canonical format of symbol + constant.
890 Fall through. */
891 case CONST :
892 return text_segment_operand (XEXP (op, 0), VOIDmode);
893 default :
894 return 0;
895 }
896 }
897
898 /* Return 1 if the operand is either a register or a memory operand that is
899 not symbolic. */
900
901 int
902 reg_or_nonsymb_mem_operand (register rtx op, enum machine_mode mode)
903 {
904 if (register_operand (op, mode))
905 return 1;
906
907 if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode))
908 return 1;
909
910 return 0;
911 }
912
913 int
914 splittable_symbolic_memory_operand (rtx op,
915 enum machine_mode mode ATTRIBUTE_UNUSED)
916 {
917 if (GET_CODE (op) != MEM)
918 return 0;
919 if (! symbolic_operand (XEXP (op, 0), Pmode))
920 return 0;
921 return 1;
922 }
923
924 int
925 splittable_immediate_memory_operand (rtx op,
926 enum machine_mode mode ATTRIBUTE_UNUSED)
927 {
928 if (GET_CODE (op) != MEM)
929 return 0;
930 if (! immediate_operand (XEXP (op, 0), Pmode))
931 return 0;
932 return 1;
933 }
934
935 /* Return truth value of whether OP is EQ or NE. */
936
937 int
938 eq_or_neq (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
939 {
940 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
941 }
942
943 /* Return 1 if this is a comparison operator, but not an EQ, NE, GEU,
944 or LTU for non-floating-point. We handle those specially. */
945
946 int
947 normal_comp_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
948 {
949 enum rtx_code code;
950
951 if (!COMPARISON_P (op))
952 return 0;
953
954 if (GET_MODE (XEXP (op, 0)) == CCFPmode
955 || GET_MODE (XEXP (op, 0)) == CCFPEmode)
956 return 1;
957
958 code = GET_CODE (op);
959 return (code != NE && code != EQ && code != GEU && code != LTU);
960 }
961
962 /* Return 1 if this is a comparison operator. This allows the use of
963 MATCH_OPERATOR to recognize all the branch insns. */
964
965 int
966 noov_compare_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
967 {
968 enum rtx_code code;
969
970 if (!COMPARISON_P (op))
971 return 0;
972
973 code = GET_CODE (op);
974 if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode
975 || GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
976 /* These are the only branches which work with CC_NOOVmode. */
977 return (code == EQ || code == NE || code == GE || code == LT);
978 return 1;
979 }
980
981 /* Return 1 if this is a 64-bit comparison operator. This allows the use of
982 MATCH_OPERATOR to recognize all the branch insns. */
983
984 int
985 noov_compare64_op (register rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
986 {
987 enum rtx_code code;
988
989 if (! TARGET_V9)
990 return 0;
991
992 if (!COMPARISON_P (op))
993 return 0;
994
995 code = GET_CODE (op);
996 if (GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
997 /* These are the only branches which work with CCX_NOOVmode. */
998 return (code == EQ || code == NE || code == GE || code == LT);
999 return (GET_MODE (XEXP (op, 0)) == CCXmode);
1000 }
1001
1002 /* Nonzero if OP is a comparison operator suitable for use in v9
1003 conditional move or branch on register contents instructions. */
1004
1005 int
1006 v9_regcmp_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1007 {
1008 enum rtx_code code;
1009
1010 if (!COMPARISON_P (op))
1011 return 0;
1012
1013 code = GET_CODE (op);
1014 return v9_regcmp_p (code);
1015 }
1016
1017 /* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */
1018
1019 int
1020 extend_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1021 {
1022 return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND;
1023 }
1024
1025 /* Return nonzero if OP is an operator of mode MODE which can set
1026 the condition codes explicitly. We do not include PLUS and MINUS
1027 because these require CC_NOOVmode, which we handle explicitly. */
1028
1029 int
1030 cc_arithop (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1031 {
1032 if (GET_CODE (op) == AND
1033 || GET_CODE (op) == IOR
1034 || GET_CODE (op) == XOR)
1035 return 1;
1036
1037 return 0;
1038 }
1039
1040 /* Return nonzero if OP is an operator of mode MODE which can bitwise
1041 complement its second operand and set the condition codes explicitly. */
1042
1043 int
1044 cc_arithopn (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1045 {
1046 /* XOR is not here because combine canonicalizes (xor (not ...) ...)
1047 and (xor ... (not ...)) to (not (xor ...)). */
1048 return (GET_CODE (op) == AND
1049 || GET_CODE (op) == IOR);
1050 }
1051 \f
1052 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1053 signed 13 bit immediate field. This is an acceptable SImode operand for
1054 most 3 address instructions. */
1055
1056 int
1057 arith_operand (rtx op, enum machine_mode mode)
1058 {
1059 if (register_operand (op, mode))
1060 return 1;
1061 if (GET_CODE (op) != CONST_INT)
1062 return 0;
1063 return SMALL_INT32 (op);
1064 }
1065
1066 /* Return true if OP is a constant 4096 */
1067
1068 int
1069 arith_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1070 {
1071 if (GET_CODE (op) != CONST_INT)
1072 return 0;
1073 else
1074 return INTVAL (op) == 4096;
1075 }
1076
1077 /* Return true if OP is suitable as second operand for add/sub */
1078
1079 int
1080 arith_add_operand (rtx op, enum machine_mode mode)
1081 {
1082 return arith_operand (op, mode) || arith_4096_operand (op, mode);
1083 }
1084
1085 /* Return true if OP is a CONST_INT or a CONST_DOUBLE which can fit in the
1086 immediate field of OR and XOR instructions. Used for 64-bit
1087 constant formation patterns. */
1088 int
1089 const64_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1090 {
1091 return ((GET_CODE (op) == CONST_INT
1092 && SPARC_SIMM13_P (INTVAL (op)))
1093 #if HOST_BITS_PER_WIDE_INT != 64
1094 || (GET_CODE (op) == CONST_DOUBLE
1095 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
1096 && (CONST_DOUBLE_HIGH (op) ==
1097 ((CONST_DOUBLE_LOW (op) & 0x80000000) != 0 ?
1098 (HOST_WIDE_INT)-1 : 0)))
1099 #endif
1100 );
1101 }
1102
1103 /* The same, but only for sethi instructions. */
1104 int
1105 const64_high_operand (rtx op, enum machine_mode mode)
1106 {
1107 return ((GET_CODE (op) == CONST_INT
1108 && (INTVAL (op) & ~(HOST_WIDE_INT)0x3ff) != 0
1109 && SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
1110 )
1111 || (GET_CODE (op) == CONST_DOUBLE
1112 && CONST_DOUBLE_HIGH (op) == 0
1113 && (CONST_DOUBLE_LOW (op) & ~(HOST_WIDE_INT)0x3ff) != 0
1114 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op))));
1115 }
1116
1117 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1118 signed 11 bit immediate field. This is an acceptable SImode operand for
1119 the movcc instructions. */
1120
1121 int
1122 arith11_operand (rtx op, enum machine_mode mode)
1123 {
1124 return (register_operand (op, mode)
1125 || (GET_CODE (op) == CONST_INT && SPARC_SIMM11_P (INTVAL (op))));
1126 }
1127
1128 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1129 signed 10 bit immediate field. This is an acceptable SImode operand for
1130 the movrcc instructions. */
1131
1132 int
1133 arith10_operand (rtx op, enum machine_mode mode)
1134 {
1135 return (register_operand (op, mode)
1136 || (GET_CODE (op) == CONST_INT && SPARC_SIMM10_P (INTVAL (op))));
1137 }
1138
1139 /* Return true if OP is a register, is a CONST_INT that fits in a 13 bit
1140 immediate field, or is a CONST_DOUBLE whose both parts fit in a 13 bit
1141 immediate field.
1142 ARCH64: Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1143 can fit in a 13 bit immediate field. This is an acceptable DImode operand
1144 for most 3 address instructions. */
1145
1146 int
1147 arith_double_operand (rtx op, enum machine_mode mode)
1148 {
1149 return (register_operand (op, mode)
1150 || (GET_CODE (op) == CONST_INT && SMALL_INT (op))
1151 || (! TARGET_ARCH64
1152 && GET_CODE (op) == CONST_DOUBLE
1153 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1154 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_HIGH (op) + 0x1000) < 0x2000)
1155 || (TARGET_ARCH64
1156 && GET_CODE (op) == CONST_DOUBLE
1157 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1158 && ((CONST_DOUBLE_HIGH (op) == -1
1159 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000)
1160 || (CONST_DOUBLE_HIGH (op) == 0
1161 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
1162 }
1163
1164 /* Return true if OP is a constant 4096 for DImode on ARCH64 */
1165
1166 int
1167 arith_double_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1168 {
1169 return (TARGET_ARCH64 &&
1170 ((GET_CODE (op) == CONST_INT && INTVAL (op) == 4096) ||
1171 (GET_CODE (op) == CONST_DOUBLE &&
1172 CONST_DOUBLE_LOW (op) == 4096 &&
1173 CONST_DOUBLE_HIGH (op) == 0)));
1174 }
1175
1176 /* Return true if OP is suitable as second operand for add/sub in DImode */
1177
1178 int
1179 arith_double_add_operand (rtx op, enum machine_mode mode)
1180 {
1181 return arith_double_operand (op, mode) || arith_double_4096_operand (op, mode);
1182 }
1183
1184 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1185 can fit in an 11 bit immediate field. This is an acceptable DImode
1186 operand for the movcc instructions. */
1187 /* ??? Replace with arith11_operand? */
1188
1189 int
1190 arith11_double_operand (rtx op, enum machine_mode mode)
1191 {
1192 return (register_operand (op, mode)
1193 || (GET_CODE (op) == CONST_DOUBLE
1194 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1195 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x400) < 0x800
1196 && ((CONST_DOUBLE_HIGH (op) == -1
1197 && (CONST_DOUBLE_LOW (op) & 0x400) == 0x400)
1198 || (CONST_DOUBLE_HIGH (op) == 0
1199 && (CONST_DOUBLE_LOW (op) & 0x400) == 0)))
1200 || (GET_CODE (op) == CONST_INT
1201 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1202 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x400) < 0x800));
1203 }
1204
1205 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1206 can fit in an 10 bit immediate field. This is an acceptable DImode
1207 operand for the movrcc instructions. */
1208 /* ??? Replace with arith10_operand? */
1209
1210 int
1211 arith10_double_operand (rtx op, enum machine_mode mode)
1212 {
1213 return (register_operand (op, mode)
1214 || (GET_CODE (op) == CONST_DOUBLE
1215 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1216 && (unsigned) (CONST_DOUBLE_LOW (op) + 0x200) < 0x400
1217 && ((CONST_DOUBLE_HIGH (op) == -1
1218 && (CONST_DOUBLE_LOW (op) & 0x200) == 0x200)
1219 || (CONST_DOUBLE_HIGH (op) == 0
1220 && (CONST_DOUBLE_LOW (op) & 0x200) == 0)))
1221 || (GET_CODE (op) == CONST_INT
1222 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1223 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x200) < 0x400));
1224 }
1225
1226 /* Return truth value of whether OP is an integer which fits the
1227 range constraining immediate operands in most three-address insns,
1228 which have a 13 bit immediate field. */
1229
1230 int
1231 small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1232 {
1233 return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
1234 }
1235
1236 int
1237 small_int_or_double (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1238 {
1239 return ((GET_CODE (op) == CONST_INT && SMALL_INT (op))
1240 || (GET_CODE (op) == CONST_DOUBLE
1241 && CONST_DOUBLE_HIGH (op) == 0
1242 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))));
1243 }
1244
1245 /* Recognize operand values for the umul instruction. That instruction sign
1246 extends immediate values just like all other sparc instructions, but
1247 interprets the extended result as an unsigned number. */
1248
1249 int
1250 uns_small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1251 {
1252 #if HOST_BITS_PER_WIDE_INT > 32
1253 /* All allowed constants will fit a CONST_INT. */
1254 return (GET_CODE (op) == CONST_INT
1255 && ((INTVAL (op) >= 0 && INTVAL (op) < 0x1000)
1256 || (INTVAL (op) >= 0xFFFFF000
1257 && INTVAL (op) <= 0xFFFFFFFF)));
1258 #else
1259 return ((GET_CODE (op) == CONST_INT && (unsigned) INTVAL (op) < 0x1000)
1260 || (GET_CODE (op) == CONST_DOUBLE
1261 && CONST_DOUBLE_HIGH (op) == 0
1262 && (unsigned) CONST_DOUBLE_LOW (op) - 0xFFFFF000 < 0x1000));
1263 #endif
1264 }
1265
1266 int
1267 uns_arith_operand (rtx op, enum machine_mode mode)
1268 {
1269 return register_operand (op, mode) || uns_small_int (op, mode);
1270 }
1271
1272 /* Return truth value of statement that OP is a call-clobbered register. */
1273 int
1274 clobbered_register (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1275 {
1276 return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]);
1277 }
1278
1279 /* Return 1 if OP is a valid operand for the source of a move insn. */
1280
1281 int
1282 input_operand (rtx op, enum machine_mode mode)
1283 {
1284 /* If both modes are non-void they must be the same. */
1285 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1286 return 0;
1287
1288 /* Allow any one instruction integer constant, and all CONST_INT
1289 variants when we are working in DImode and !arch64. */
1290 if (GET_MODE_CLASS (mode) == MODE_INT
1291 && ((GET_CODE (op) == CONST_INT
1292 && (SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
1293 || SPARC_SIMM13_P (INTVAL (op))
1294 || (mode == DImode
1295 && ! TARGET_ARCH64)))
1296 || (TARGET_ARCH64
1297 && GET_CODE (op) == CONST_DOUBLE
1298 && ((CONST_DOUBLE_HIGH (op) == 0
1299 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op)))
1300 ||
1301 #if HOST_BITS_PER_WIDE_INT == 64
1302 (CONST_DOUBLE_HIGH (op) == 0
1303 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op)))
1304 #else
1305 (SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
1306 && (((CONST_DOUBLE_LOW (op) & 0x80000000) == 0
1307 && CONST_DOUBLE_HIGH (op) == 0)
1308 || (CONST_DOUBLE_HIGH (op) == -1
1309 && CONST_DOUBLE_LOW (op) & 0x80000000) != 0))
1310 #endif
1311 ))))
1312 return 1;
1313
1314 /* If !arch64 and this is a DImode const, allow it so that
1315 the splits can be generated. */
1316 if (! TARGET_ARCH64
1317 && mode == DImode
1318 && GET_CODE (op) == CONST_DOUBLE)
1319 return 1;
1320
1321 if (register_operand (op, mode))
1322 return 1;
1323
1324 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1325 && GET_CODE (op) == CONST_DOUBLE)
1326 return 1;
1327
1328 /* If this is a SUBREG, look inside so that we handle
1329 paradoxical ones. */
1330 if (GET_CODE (op) == SUBREG)
1331 op = SUBREG_REG (op);
1332
1333 /* Check for valid MEM forms. */
1334 if (GET_CODE (op) == MEM)
1335 {
1336 rtx inside = XEXP (op, 0);
1337
1338 if (GET_CODE (inside) == LO_SUM)
1339 {
1340 /* We can't allow these because all of the splits
1341 (eventually as they trickle down into DFmode
1342 splits) require offsettable memory references. */
1343 if (! TARGET_V9
1344 && GET_MODE (op) == TFmode)
1345 return 0;
1346
1347 return (register_operand (XEXP (inside, 0), Pmode)
1348 && CONSTANT_P (XEXP (inside, 1)));
1349 }
1350 return memory_address_p (mode, inside);
1351 }
1352
1353 return 0;
1354 }
1355
1356 /* Return 1 if OP is valid for the lhs of a compare insn. */
1357
1358 int
1359 compare_operand (rtx op, enum machine_mode mode)
1360 {
1361 if (GET_CODE (op) == ZERO_EXTRACT)
1362 return (register_operand (XEXP (op, 0), mode)
1363 && small_int_or_double (XEXP (op, 1), mode)
1364 && small_int_or_double (XEXP (op, 2), mode)
1365 /* This matches cmp_zero_extract. */
1366 && ((mode == SImode
1367 && ((GET_CODE (XEXP (op, 2)) == CONST_INT
1368 && INTVAL (XEXP (op, 2)) > 19)
1369 || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE
1370 && CONST_DOUBLE_LOW (XEXP (op, 2)) > 19)))
1371 /* This matches cmp_zero_extract_sp64. */
1372 || (mode == DImode
1373 && TARGET_ARCH64
1374 && ((GET_CODE (XEXP (op, 2)) == CONST_INT
1375 && INTVAL (XEXP (op, 2)) > 51)
1376 || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE
1377 && CONST_DOUBLE_LOW (XEXP (op, 2)) > 51)))));
1378 else
1379 return register_operand (op, mode);
1380 }
1381
1382 \f
1383 /* We know it can't be done in one insn when we get here,
1384 the movsi expander guarantees this. */
1385 void
1386 sparc_emit_set_const32 (rtx op0, rtx op1)
1387 {
1388 enum machine_mode mode = GET_MODE (op0);
1389 rtx temp;
1390
1391 if (GET_CODE (op1) == CONST_INT)
1392 {
1393 HOST_WIDE_INT value = INTVAL (op1);
1394
1395 if (SPARC_SETHI_P (value & GET_MODE_MASK (mode))
1396 || SPARC_SIMM13_P (value))
1397 abort ();
1398 }
1399
1400 /* Full 2-insn decomposition is needed. */
1401 if (reload_in_progress || reload_completed)
1402 temp = op0;
1403 else
1404 temp = gen_reg_rtx (mode);
1405
1406 if (GET_CODE (op1) == CONST_INT)
1407 {
1408 /* Emit them as real moves instead of a HIGH/LO_SUM,
1409 this way CSE can see everything and reuse intermediate
1410 values if it wants. */
1411 if (TARGET_ARCH64
1412 && HOST_BITS_PER_WIDE_INT != 64
1413 && (INTVAL (op1) & 0x80000000) != 0)
1414 emit_insn (gen_rtx_SET
1415 (VOIDmode, temp,
1416 immed_double_const (INTVAL (op1) & ~(HOST_WIDE_INT)0x3ff,
1417 0, DImode)));
1418 else
1419 emit_insn (gen_rtx_SET (VOIDmode, temp,
1420 GEN_INT (INTVAL (op1)
1421 & ~(HOST_WIDE_INT)0x3ff)));
1422
1423 emit_insn (gen_rtx_SET (VOIDmode,
1424 op0,
1425 gen_rtx_IOR (mode, temp,
1426 GEN_INT (INTVAL (op1) & 0x3ff))));
1427 }
1428 else
1429 {
1430 /* A symbol, emit in the traditional way. */
1431 emit_insn (gen_rtx_SET (VOIDmode, temp,
1432 gen_rtx_HIGH (mode, op1)));
1433 emit_insn (gen_rtx_SET (VOIDmode,
1434 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1435
1436 }
1437 }
1438
1439 \f
1440 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1441 If TEMP is non-zero, we are forbidden to use any other scratch
1442 registers. Otherwise, we are allowed to generate them as needed.
1443
1444 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1445 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1446 void
1447 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1448 {
1449 rtx temp1, temp2, temp3, temp4, temp5;
1450 rtx ti_temp = 0;
1451
1452 if (temp && GET_MODE (temp) == TImode)
1453 {
1454 ti_temp = temp;
1455 temp = gen_rtx_REG (DImode, REGNO (temp));
1456 }
1457
1458 /* SPARC-V9 code-model support. */
1459 switch (sparc_cmodel)
1460 {
1461 case CM_MEDLOW:
1462 /* The range spanned by all instructions in the object is less
1463 than 2^31 bytes (2GB) and the distance from any instruction
1464 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1465 than 2^31 bytes (2GB).
1466
1467 The executable must be in the low 4TB of the virtual address
1468 space.
1469
1470 sethi %hi(symbol), %temp1
1471 or %temp1, %lo(symbol), %reg */
1472 if (temp)
1473 temp1 = temp; /* op0 is allowed. */
1474 else
1475 temp1 = gen_reg_rtx (DImode);
1476
1477 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1478 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1479 break;
1480
1481 case CM_MEDMID:
1482 /* The range spanned by all instructions in the object is less
1483 than 2^31 bytes (2GB) and the distance from any instruction
1484 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1485 than 2^31 bytes (2GB).
1486
1487 The executable must be in the low 16TB of the virtual address
1488 space.
1489
1490 sethi %h44(symbol), %temp1
1491 or %temp1, %m44(symbol), %temp2
1492 sllx %temp2, 12, %temp3
1493 or %temp3, %l44(symbol), %reg */
1494 if (temp)
1495 {
1496 temp1 = op0;
1497 temp2 = op0;
1498 temp3 = temp; /* op0 is allowed. */
1499 }
1500 else
1501 {
1502 temp1 = gen_reg_rtx (DImode);
1503 temp2 = gen_reg_rtx (DImode);
1504 temp3 = gen_reg_rtx (DImode);
1505 }
1506
1507 emit_insn (gen_seth44 (temp1, op1));
1508 emit_insn (gen_setm44 (temp2, temp1, op1));
1509 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1510 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1511 emit_insn (gen_setl44 (op0, temp3, op1));
1512 break;
1513
1514 case CM_MEDANY:
1515 /* The range spanned by all instructions in the object is less
1516 than 2^31 bytes (2GB) and the distance from any instruction
1517 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1518 than 2^31 bytes (2GB).
1519
1520 The executable can be placed anywhere in the virtual address
1521 space.
1522
1523 sethi %hh(symbol), %temp1
1524 sethi %lm(symbol), %temp2
1525 or %temp1, %hm(symbol), %temp3
1526 sllx %temp3, 32, %temp4
1527 or %temp4, %temp2, %temp5
1528 or %temp5, %lo(symbol), %reg */
1529 if (temp)
1530 {
1531 /* It is possible that one of the registers we got for operands[2]
1532 might coincide with that of operands[0] (which is why we made
1533 it TImode). Pick the other one to use as our scratch. */
1534 if (rtx_equal_p (temp, op0))
1535 {
1536 if (ti_temp)
1537 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1538 else
1539 abort();
1540 }
1541 temp1 = op0;
1542 temp2 = temp; /* op0 is _not_ allowed, see above. */
1543 temp3 = op0;
1544 temp4 = op0;
1545 temp5 = op0;
1546 }
1547 else
1548 {
1549 temp1 = gen_reg_rtx (DImode);
1550 temp2 = gen_reg_rtx (DImode);
1551 temp3 = gen_reg_rtx (DImode);
1552 temp4 = gen_reg_rtx (DImode);
1553 temp5 = gen_reg_rtx (DImode);
1554 }
1555
1556 emit_insn (gen_sethh (temp1, op1));
1557 emit_insn (gen_setlm (temp2, op1));
1558 emit_insn (gen_sethm (temp3, temp1, op1));
1559 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1560 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1561 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1562 gen_rtx_PLUS (DImode, temp4, temp2)));
1563 emit_insn (gen_setlo (op0, temp5, op1));
1564 break;
1565
1566 case CM_EMBMEDANY:
1567 /* Old old old backwards compatibility kruft here.
1568 Essentially it is MEDLOW with a fixed 64-bit
1569 virtual base added to all data segment addresses.
1570 Text-segment stuff is computed like MEDANY, we can't
1571 reuse the code above because the relocation knobs
1572 look different.
1573
1574 Data segment: sethi %hi(symbol), %temp1
1575 add %temp1, EMBMEDANY_BASE_REG, %temp2
1576 or %temp2, %lo(symbol), %reg */
1577 if (data_segment_operand (op1, GET_MODE (op1)))
1578 {
1579 if (temp)
1580 {
1581 temp1 = temp; /* op0 is allowed. */
1582 temp2 = op0;
1583 }
1584 else
1585 {
1586 temp1 = gen_reg_rtx (DImode);
1587 temp2 = gen_reg_rtx (DImode);
1588 }
1589
1590 emit_insn (gen_embmedany_sethi (temp1, op1));
1591 emit_insn (gen_embmedany_brsum (temp2, temp1));
1592 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1593 }
1594
1595 /* Text segment: sethi %uhi(symbol), %temp1
1596 sethi %hi(symbol), %temp2
1597 or %temp1, %ulo(symbol), %temp3
1598 sllx %temp3, 32, %temp4
1599 or %temp4, %temp2, %temp5
1600 or %temp5, %lo(symbol), %reg */
1601 else
1602 {
1603 if (temp)
1604 {
1605 /* It is possible that one of the registers we got for operands[2]
1606 might coincide with that of operands[0] (which is why we made
1607 it TImode). Pick the other one to use as our scratch. */
1608 if (rtx_equal_p (temp, op0))
1609 {
1610 if (ti_temp)
1611 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1612 else
1613 abort();
1614 }
1615 temp1 = op0;
1616 temp2 = temp; /* op0 is _not_ allowed, see above. */
1617 temp3 = op0;
1618 temp4 = op0;
1619 temp5 = op0;
1620 }
1621 else
1622 {
1623 temp1 = gen_reg_rtx (DImode);
1624 temp2 = gen_reg_rtx (DImode);
1625 temp3 = gen_reg_rtx (DImode);
1626 temp4 = gen_reg_rtx (DImode);
1627 temp5 = gen_reg_rtx (DImode);
1628 }
1629
1630 emit_insn (gen_embmedany_textuhi (temp1, op1));
1631 emit_insn (gen_embmedany_texthi (temp2, op1));
1632 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1633 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1634 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1635 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1636 gen_rtx_PLUS (DImode, temp4, temp2)));
1637 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1638 }
1639 break;
1640
1641 default:
1642 abort();
1643 }
1644 }
1645
1646 /* These avoid problems when cross compiling. If we do not
1647 go through all this hair then the optimizer will see
1648 invalid REG_EQUAL notes or in some cases none at all. */
1649 static void sparc_emit_set_safe_HIGH64 (rtx, HOST_WIDE_INT);
1650 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1651 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1652 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1653
1654 #if HOST_BITS_PER_WIDE_INT == 64
1655 #define GEN_HIGHINT64(__x) GEN_INT ((__x) & ~(HOST_WIDE_INT)0x3ff)
1656 #define GEN_INT64(__x) GEN_INT (__x)
1657 #else
1658 #define GEN_HIGHINT64(__x) \
1659 immed_double_const ((__x) & ~(HOST_WIDE_INT)0x3ff, 0, DImode)
1660 #define GEN_INT64(__x) \
1661 immed_double_const ((__x) & 0xffffffff, \
1662 ((__x) & 0x80000000 ? -1 : 0), DImode)
1663 #endif
1664
1665 /* The optimizer is not to assume anything about exactly
1666 which bits are set for a HIGH, they are unspecified.
1667 Unfortunately this leads to many missed optimizations
1668 during CSE. We mask out the non-HIGH bits, and matches
1669 a plain movdi, to alleviate this problem. */
1670 static void
1671 sparc_emit_set_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1672 {
1673 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_HIGHINT64 (val)));
1674 }
1675
1676 static rtx
1677 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1678 {
1679 return gen_rtx_SET (VOIDmode, dest, GEN_INT64 (val));
1680 }
1681
1682 static rtx
1683 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1684 {
1685 return gen_rtx_IOR (DImode, src, GEN_INT64 (val));
1686 }
1687
1688 static rtx
1689 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1690 {
1691 return gen_rtx_XOR (DImode, src, GEN_INT64 (val));
1692 }
1693
1694 /* Worker routines for 64-bit constant formation on arch64.
1695 One of the key things to be doing in these emissions is
1696 to create as many temp REGs as possible. This makes it
1697 possible for half-built constants to be used later when
1698 such values are similar to something required later on.
1699 Without doing this, the optimizer cannot see such
1700 opportunities. */
1701
1702 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1703 unsigned HOST_WIDE_INT, int);
1704
1705 static void
1706 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1707 unsigned HOST_WIDE_INT low_bits, int is_neg)
1708 {
1709 unsigned HOST_WIDE_INT high_bits;
1710
1711 if (is_neg)
1712 high_bits = (~low_bits) & 0xffffffff;
1713 else
1714 high_bits = low_bits;
1715
1716 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1717 if (!is_neg)
1718 {
1719 emit_insn (gen_rtx_SET (VOIDmode, op0,
1720 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1721 }
1722 else
1723 {
1724 /* If we are XOR'ing with -1, then we should emit a one's complement
1725 instead. This way the combiner will notice logical operations
1726 such as ANDN later on and substitute. */
1727 if ((low_bits & 0x3ff) == 0x3ff)
1728 {
1729 emit_insn (gen_rtx_SET (VOIDmode, op0,
1730 gen_rtx_NOT (DImode, temp)));
1731 }
1732 else
1733 {
1734 emit_insn (gen_rtx_SET (VOIDmode, op0,
1735 gen_safe_XOR64 (temp,
1736 (-(HOST_WIDE_INT)0x400
1737 | (low_bits & 0x3ff)))));
1738 }
1739 }
1740 }
1741
1742 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1743 unsigned HOST_WIDE_INT, int);
1744
1745 static void
1746 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1747 unsigned HOST_WIDE_INT high_bits,
1748 unsigned HOST_WIDE_INT low_immediate,
1749 int shift_count)
1750 {
1751 rtx temp2 = op0;
1752
1753 if ((high_bits & 0xfffffc00) != 0)
1754 {
1755 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1756 if ((high_bits & ~0xfffffc00) != 0)
1757 emit_insn (gen_rtx_SET (VOIDmode, op0,
1758 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1759 else
1760 temp2 = temp;
1761 }
1762 else
1763 {
1764 emit_insn (gen_safe_SET64 (temp, high_bits));
1765 temp2 = temp;
1766 }
1767
1768 /* Now shift it up into place. */
1769 emit_insn (gen_rtx_SET (VOIDmode, op0,
1770 gen_rtx_ASHIFT (DImode, temp2,
1771 GEN_INT (shift_count))));
1772
1773 /* If there is a low immediate part piece, finish up by
1774 putting that in as well. */
1775 if (low_immediate != 0)
1776 emit_insn (gen_rtx_SET (VOIDmode, op0,
1777 gen_safe_OR64 (op0, low_immediate)));
1778 }
1779
1780 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1781 unsigned HOST_WIDE_INT);
1782
1783 /* Full 64-bit constant decomposition. Even though this is the
1784 'worst' case, we still optimize a few things away. */
1785 static void
1786 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1787 unsigned HOST_WIDE_INT high_bits,
1788 unsigned HOST_WIDE_INT low_bits)
1789 {
1790 rtx sub_temp;
1791
1792 if (reload_in_progress || reload_completed)
1793 sub_temp = op0;
1794 else
1795 sub_temp = gen_reg_rtx (DImode);
1796
1797 if ((high_bits & 0xfffffc00) != 0)
1798 {
1799 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1800 if ((high_bits & ~0xfffffc00) != 0)
1801 emit_insn (gen_rtx_SET (VOIDmode,
1802 sub_temp,
1803 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1804 else
1805 sub_temp = temp;
1806 }
1807 else
1808 {
1809 emit_insn (gen_safe_SET64 (temp, high_bits));
1810 sub_temp = temp;
1811 }
1812
1813 if (!reload_in_progress && !reload_completed)
1814 {
1815 rtx temp2 = gen_reg_rtx (DImode);
1816 rtx temp3 = gen_reg_rtx (DImode);
1817 rtx temp4 = gen_reg_rtx (DImode);
1818
1819 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1820 gen_rtx_ASHIFT (DImode, sub_temp,
1821 GEN_INT (32))));
1822
1823 sparc_emit_set_safe_HIGH64 (temp2, low_bits);
1824 if ((low_bits & ~0xfffffc00) != 0)
1825 {
1826 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1827 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1828 emit_insn (gen_rtx_SET (VOIDmode, op0,
1829 gen_rtx_PLUS (DImode, temp4, temp3)));
1830 }
1831 else
1832 {
1833 emit_insn (gen_rtx_SET (VOIDmode, op0,
1834 gen_rtx_PLUS (DImode, temp4, temp2)));
1835 }
1836 }
1837 else
1838 {
1839 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1840 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1841 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1842 int to_shift = 12;
1843
1844 /* We are in the middle of reload, so this is really
1845 painful. However we do still make an attempt to
1846 avoid emitting truly stupid code. */
1847 if (low1 != const0_rtx)
1848 {
1849 emit_insn (gen_rtx_SET (VOIDmode, op0,
1850 gen_rtx_ASHIFT (DImode, sub_temp,
1851 GEN_INT (to_shift))));
1852 emit_insn (gen_rtx_SET (VOIDmode, op0,
1853 gen_rtx_IOR (DImode, op0, low1)));
1854 sub_temp = op0;
1855 to_shift = 12;
1856 }
1857 else
1858 {
1859 to_shift += 12;
1860 }
1861 if (low2 != const0_rtx)
1862 {
1863 emit_insn (gen_rtx_SET (VOIDmode, op0,
1864 gen_rtx_ASHIFT (DImode, sub_temp,
1865 GEN_INT (to_shift))));
1866 emit_insn (gen_rtx_SET (VOIDmode, op0,
1867 gen_rtx_IOR (DImode, op0, low2)));
1868 sub_temp = op0;
1869 to_shift = 8;
1870 }
1871 else
1872 {
1873 to_shift += 8;
1874 }
1875 emit_insn (gen_rtx_SET (VOIDmode, op0,
1876 gen_rtx_ASHIFT (DImode, sub_temp,
1877 GEN_INT (to_shift))));
1878 if (low3 != const0_rtx)
1879 emit_insn (gen_rtx_SET (VOIDmode, op0,
1880 gen_rtx_IOR (DImode, op0, low3)));
1881 /* phew... */
1882 }
1883 }
1884
1885 /* Analyze a 64-bit constant for certain properties. */
1886 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1887 unsigned HOST_WIDE_INT,
1888 int *, int *, int *);
1889
1890 static void
1891 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1892 unsigned HOST_WIDE_INT low_bits,
1893 int *hbsp, int *lbsp, int *abbasp)
1894 {
1895 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1896 int i;
1897
1898 lowest_bit_set = highest_bit_set = -1;
1899 i = 0;
1900 do
1901 {
1902 if ((lowest_bit_set == -1)
1903 && ((low_bits >> i) & 1))
1904 lowest_bit_set = i;
1905 if ((highest_bit_set == -1)
1906 && ((high_bits >> (32 - i - 1)) & 1))
1907 highest_bit_set = (64 - i - 1);
1908 }
1909 while (++i < 32
1910 && ((highest_bit_set == -1)
1911 || (lowest_bit_set == -1)));
1912 if (i == 32)
1913 {
1914 i = 0;
1915 do
1916 {
1917 if ((lowest_bit_set == -1)
1918 && ((high_bits >> i) & 1))
1919 lowest_bit_set = i + 32;
1920 if ((highest_bit_set == -1)
1921 && ((low_bits >> (32 - i - 1)) & 1))
1922 highest_bit_set = 32 - i - 1;
1923 }
1924 while (++i < 32
1925 && ((highest_bit_set == -1)
1926 || (lowest_bit_set == -1)));
1927 }
1928 /* If there are no bits set this should have gone out
1929 as one instruction! */
1930 if (lowest_bit_set == -1
1931 || highest_bit_set == -1)
1932 abort ();
1933 all_bits_between_are_set = 1;
1934 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1935 {
1936 if (i < 32)
1937 {
1938 if ((low_bits & (1 << i)) != 0)
1939 continue;
1940 }
1941 else
1942 {
1943 if ((high_bits & (1 << (i - 32))) != 0)
1944 continue;
1945 }
1946 all_bits_between_are_set = 0;
1947 break;
1948 }
1949 *hbsp = highest_bit_set;
1950 *lbsp = lowest_bit_set;
1951 *abbasp = all_bits_between_are_set;
1952 }
1953
1954 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1955
1956 static int
1957 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1958 unsigned HOST_WIDE_INT low_bits)
1959 {
1960 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1961
1962 if (high_bits == 0
1963 || high_bits == 0xffffffff)
1964 return 1;
1965
1966 analyze_64bit_constant (high_bits, low_bits,
1967 &highest_bit_set, &lowest_bit_set,
1968 &all_bits_between_are_set);
1969
1970 if ((highest_bit_set == 63
1971 || lowest_bit_set == 0)
1972 && all_bits_between_are_set != 0)
1973 return 1;
1974
1975 if ((highest_bit_set - lowest_bit_set) < 21)
1976 return 1;
1977
1978 return 0;
1979 }
1980
1981 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1982 unsigned HOST_WIDE_INT,
1983 int, int);
1984
1985 static unsigned HOST_WIDE_INT
1986 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1987 unsigned HOST_WIDE_INT low_bits,
1988 int lowest_bit_set, int shift)
1989 {
1990 HOST_WIDE_INT hi, lo;
1991
1992 if (lowest_bit_set < 32)
1993 {
1994 lo = (low_bits >> lowest_bit_set) << shift;
1995 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1996 }
1997 else
1998 {
1999 lo = 0;
2000 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2001 }
2002 if (hi & lo)
2003 abort ();
2004 return (hi | lo);
2005 }
2006
2007 /* Here we are sure to be arch64 and this is an integer constant
2008 being loaded into a register. Emit the most efficient
2009 insn sequence possible. Detection of all the 1-insn cases
2010 has been done already. */
2011 void
2012 sparc_emit_set_const64 (rtx op0, rtx op1)
2013 {
2014 unsigned HOST_WIDE_INT high_bits, low_bits;
2015 int lowest_bit_set, highest_bit_set;
2016 int all_bits_between_are_set;
2017 rtx temp = 0;
2018
2019 /* Sanity check that we know what we are working with. */
2020 if (! TARGET_ARCH64)
2021 abort ();
2022
2023 if (GET_CODE (op0) != SUBREG)
2024 {
2025 if (GET_CODE (op0) != REG
2026 || (REGNO (op0) >= SPARC_FIRST_FP_REG
2027 && REGNO (op0) <= SPARC_LAST_V9_FP_REG))
2028 abort ();
2029 }
2030
2031 if (reload_in_progress || reload_completed)
2032 temp = op0;
2033
2034 if (GET_CODE (op1) != CONST_DOUBLE
2035 && GET_CODE (op1) != CONST_INT)
2036 {
2037 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2038 return;
2039 }
2040
2041 if (! temp)
2042 temp = gen_reg_rtx (DImode);
2043
2044 if (GET_CODE (op1) == CONST_DOUBLE)
2045 {
2046 #if HOST_BITS_PER_WIDE_INT == 64
2047 high_bits = (CONST_DOUBLE_LOW (op1) >> 32) & 0xffffffff;
2048 low_bits = CONST_DOUBLE_LOW (op1) & 0xffffffff;
2049 #else
2050 high_bits = CONST_DOUBLE_HIGH (op1);
2051 low_bits = CONST_DOUBLE_LOW (op1);
2052 #endif
2053 }
2054 else
2055 {
2056 #if HOST_BITS_PER_WIDE_INT == 64
2057 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2058 low_bits = (INTVAL (op1) & 0xffffffff);
2059 #else
2060 high_bits = ((INTVAL (op1) < 0) ?
2061 0xffffffff :
2062 0x00000000);
2063 low_bits = INTVAL (op1);
2064 #endif
2065 }
2066
2067 /* low_bits bits 0 --> 31
2068 high_bits bits 32 --> 63 */
2069
2070 analyze_64bit_constant (high_bits, low_bits,
2071 &highest_bit_set, &lowest_bit_set,
2072 &all_bits_between_are_set);
2073
2074 /* First try for a 2-insn sequence. */
2075
2076 /* These situations are preferred because the optimizer can
2077 * do more things with them:
2078 * 1) mov -1, %reg
2079 * sllx %reg, shift, %reg
2080 * 2) mov -1, %reg
2081 * srlx %reg, shift, %reg
2082 * 3) mov some_small_const, %reg
2083 * sllx %reg, shift, %reg
2084 */
2085 if (((highest_bit_set == 63
2086 || lowest_bit_set == 0)
2087 && all_bits_between_are_set != 0)
2088 || ((highest_bit_set - lowest_bit_set) < 12))
2089 {
2090 HOST_WIDE_INT the_const = -1;
2091 int shift = lowest_bit_set;
2092
2093 if ((highest_bit_set != 63
2094 && lowest_bit_set != 0)
2095 || all_bits_between_are_set == 0)
2096 {
2097 the_const =
2098 create_simple_focus_bits (high_bits, low_bits,
2099 lowest_bit_set, 0);
2100 }
2101 else if (lowest_bit_set == 0)
2102 shift = -(63 - highest_bit_set);
2103
2104 if (! SPARC_SIMM13_P (the_const))
2105 abort ();
2106
2107 emit_insn (gen_safe_SET64 (temp, the_const));
2108 if (shift > 0)
2109 emit_insn (gen_rtx_SET (VOIDmode,
2110 op0,
2111 gen_rtx_ASHIFT (DImode,
2112 temp,
2113 GEN_INT (shift))));
2114 else if (shift < 0)
2115 emit_insn (gen_rtx_SET (VOIDmode,
2116 op0,
2117 gen_rtx_LSHIFTRT (DImode,
2118 temp,
2119 GEN_INT (-shift))));
2120 else
2121 abort ();
2122 return;
2123 }
2124
2125 /* Now a range of 22 or less bits set somewhere.
2126 * 1) sethi %hi(focus_bits), %reg
2127 * sllx %reg, shift, %reg
2128 * 2) sethi %hi(focus_bits), %reg
2129 * srlx %reg, shift, %reg
2130 */
2131 if ((highest_bit_set - lowest_bit_set) < 21)
2132 {
2133 unsigned HOST_WIDE_INT focus_bits =
2134 create_simple_focus_bits (high_bits, low_bits,
2135 lowest_bit_set, 10);
2136
2137 if (! SPARC_SETHI_P (focus_bits))
2138 abort ();
2139
2140 sparc_emit_set_safe_HIGH64 (temp, focus_bits);
2141
2142 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2143 if (lowest_bit_set < 10)
2144 emit_insn (gen_rtx_SET (VOIDmode,
2145 op0,
2146 gen_rtx_LSHIFTRT (DImode, temp,
2147 GEN_INT (10 - lowest_bit_set))));
2148 else if (lowest_bit_set > 10)
2149 emit_insn (gen_rtx_SET (VOIDmode,
2150 op0,
2151 gen_rtx_ASHIFT (DImode, temp,
2152 GEN_INT (lowest_bit_set - 10))));
2153 else
2154 abort ();
2155 return;
2156 }
2157
2158 /* 1) sethi %hi(low_bits), %reg
2159 * or %reg, %lo(low_bits), %reg
2160 * 2) sethi %hi(~low_bits), %reg
2161 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2162 */
2163 if (high_bits == 0
2164 || high_bits == 0xffffffff)
2165 {
2166 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2167 (high_bits == 0xffffffff));
2168 return;
2169 }
2170
2171 /* Now, try 3-insn sequences. */
2172
2173 /* 1) sethi %hi(high_bits), %reg
2174 * or %reg, %lo(high_bits), %reg
2175 * sllx %reg, 32, %reg
2176 */
2177 if (low_bits == 0)
2178 {
2179 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2180 return;
2181 }
2182
2183 /* We may be able to do something quick
2184 when the constant is negated, so try that. */
2185 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2186 (~low_bits) & 0xfffffc00))
2187 {
2188 /* NOTE: The trailing bits get XOR'd so we need the
2189 non-negated bits, not the negated ones. */
2190 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2191
2192 if ((((~high_bits) & 0xffffffff) == 0
2193 && ((~low_bits) & 0x80000000) == 0)
2194 || (((~high_bits) & 0xffffffff) == 0xffffffff
2195 && ((~low_bits) & 0x80000000) != 0))
2196 {
2197 int fast_int = (~low_bits & 0xffffffff);
2198
2199 if ((SPARC_SETHI_P (fast_int)
2200 && (~high_bits & 0xffffffff) == 0)
2201 || SPARC_SIMM13_P (fast_int))
2202 emit_insn (gen_safe_SET64 (temp, fast_int));
2203 else
2204 sparc_emit_set_const64 (temp, GEN_INT64 (fast_int));
2205 }
2206 else
2207 {
2208 rtx negated_const;
2209 #if HOST_BITS_PER_WIDE_INT == 64
2210 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2211 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2212 #else
2213 negated_const = immed_double_const ((~low_bits) & 0xfffffc00,
2214 (~high_bits) & 0xffffffff,
2215 DImode);
2216 #endif
2217 sparc_emit_set_const64 (temp, negated_const);
2218 }
2219
2220 /* If we are XOR'ing with -1, then we should emit a one's complement
2221 instead. This way the combiner will notice logical operations
2222 such as ANDN later on and substitute. */
2223 if (trailing_bits == 0x3ff)
2224 {
2225 emit_insn (gen_rtx_SET (VOIDmode, op0,
2226 gen_rtx_NOT (DImode, temp)));
2227 }
2228 else
2229 {
2230 emit_insn (gen_rtx_SET (VOIDmode,
2231 op0,
2232 gen_safe_XOR64 (temp,
2233 (-0x400 | trailing_bits))));
2234 }
2235 return;
2236 }
2237
2238 /* 1) sethi %hi(xxx), %reg
2239 * or %reg, %lo(xxx), %reg
2240 * sllx %reg, yyy, %reg
2241 *
2242 * ??? This is just a generalized version of the low_bits==0
2243 * thing above, FIXME...
2244 */
2245 if ((highest_bit_set - lowest_bit_set) < 32)
2246 {
2247 unsigned HOST_WIDE_INT focus_bits =
2248 create_simple_focus_bits (high_bits, low_bits,
2249 lowest_bit_set, 0);
2250
2251 /* We can't get here in this state. */
2252 if (highest_bit_set < 32
2253 || lowest_bit_set >= 32)
2254 abort ();
2255
2256 /* So what we know is that the set bits straddle the
2257 middle of the 64-bit word. */
2258 sparc_emit_set_const64_quick2 (op0, temp,
2259 focus_bits, 0,
2260 lowest_bit_set);
2261 return;
2262 }
2263
2264 /* 1) sethi %hi(high_bits), %reg
2265 * or %reg, %lo(high_bits), %reg
2266 * sllx %reg, 32, %reg
2267 * or %reg, low_bits, %reg
2268 */
2269 if (SPARC_SIMM13_P(low_bits)
2270 && ((int)low_bits > 0))
2271 {
2272 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2273 return;
2274 }
2275
2276 /* The easiest way when all else fails, is full decomposition. */
2277 #if 0
2278 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
2279 high_bits, low_bits, ~high_bits, ~low_bits);
2280 #endif
2281 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2282 }
2283
2284 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2285 return the mode to be used for the comparison. For floating-point,
2286 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2287 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2288 processing is needed. */
2289
2290 enum machine_mode
2291 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2292 {
2293 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2294 {
2295 switch (op)
2296 {
2297 case EQ:
2298 case NE:
2299 case UNORDERED:
2300 case ORDERED:
2301 case UNLT:
2302 case UNLE:
2303 case UNGT:
2304 case UNGE:
2305 case UNEQ:
2306 case LTGT:
2307 return CCFPmode;
2308
2309 case LT:
2310 case LE:
2311 case GT:
2312 case GE:
2313 return CCFPEmode;
2314
2315 default:
2316 abort ();
2317 }
2318 }
2319 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2320 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2321 {
2322 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2323 return CCX_NOOVmode;
2324 else
2325 return CC_NOOVmode;
2326 }
2327 else
2328 {
2329 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2330 return CCXmode;
2331 else
2332 return CCmode;
2333 }
2334 }
2335
2336 /* X and Y are two things to compare using CODE. Emit the compare insn and
2337 return the rtx for the cc reg in the proper mode. */
2338
2339 rtx
2340 gen_compare_reg (enum rtx_code code, rtx x, rtx y)
2341 {
2342 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
2343 rtx cc_reg;
2344
2345 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2346 fcc regs (cse can't tell they're really call clobbered regs and will
2347 remove a duplicate comparison even if there is an intervening function
2348 call - it will then try to reload the cc reg via an int reg which is why
2349 we need the movcc patterns). It is possible to provide the movcc
2350 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2351 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2352 to tell cse that CCFPE mode registers (even pseudos) are call
2353 clobbered. */
2354
2355 /* ??? This is an experiment. Rather than making changes to cse which may
2356 or may not be easy/clean, we do our own cse. This is possible because
2357 we will generate hard registers. Cse knows they're call clobbered (it
2358 doesn't know the same thing about pseudos). If we guess wrong, no big
2359 deal, but if we win, great! */
2360
2361 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2362 #if 1 /* experiment */
2363 {
2364 int reg;
2365 /* We cycle through the registers to ensure they're all exercised. */
2366 static int next_fcc_reg = 0;
2367 /* Previous x,y for each fcc reg. */
2368 static rtx prev_args[4][2];
2369
2370 /* Scan prev_args for x,y. */
2371 for (reg = 0; reg < 4; reg++)
2372 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2373 break;
2374 if (reg == 4)
2375 {
2376 reg = next_fcc_reg;
2377 prev_args[reg][0] = x;
2378 prev_args[reg][1] = y;
2379 next_fcc_reg = (next_fcc_reg + 1) & 3;
2380 }
2381 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2382 }
2383 #else
2384 cc_reg = gen_reg_rtx (mode);
2385 #endif /* ! experiment */
2386 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2387 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2388 else
2389 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2390
2391 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2392 gen_rtx_COMPARE (mode, x, y)));
2393
2394 return cc_reg;
2395 }
2396
2397 /* This function is used for v9 only.
2398 CODE is the code for an Scc's comparison.
2399 OPERANDS[0] is the target of the Scc insn.
2400 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2401 been generated yet).
2402
2403 This function is needed to turn
2404
2405 (set (reg:SI 110)
2406 (gt (reg:CCX 100 %icc)
2407 (const_int 0)))
2408 into
2409 (set (reg:SI 110)
2410 (gt:DI (reg:CCX 100 %icc)
2411 (const_int 0)))
2412
2413 IE: The instruction recognizer needs to see the mode of the comparison to
2414 find the right instruction. We could use "gt:DI" right in the
2415 define_expand, but leaving it out allows us to handle DI, SI, etc.
2416
2417 We refer to the global sparc compare operands sparc_compare_op0 and
2418 sparc_compare_op1. */
2419
2420 int
2421 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2422 {
2423 rtx temp, op0, op1;
2424
2425 if (! TARGET_ARCH64
2426 && (GET_MODE (sparc_compare_op0) == DImode
2427 || GET_MODE (operands[0]) == DImode))
2428 return 0;
2429
2430 op0 = sparc_compare_op0;
2431 op1 = sparc_compare_op1;
2432
2433 /* Try to use the movrCC insns. */
2434 if (TARGET_ARCH64
2435 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
2436 && op1 == const0_rtx
2437 && v9_regcmp_p (compare_code))
2438 {
2439 /* Special case for op0 != 0. This can be done with one instruction if
2440 operands[0] == sparc_compare_op0. */
2441
2442 if (compare_code == NE
2443 && GET_MODE (operands[0]) == DImode
2444 && rtx_equal_p (op0, operands[0]))
2445 {
2446 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2447 gen_rtx_IF_THEN_ELSE (DImode,
2448 gen_rtx_fmt_ee (compare_code, DImode,
2449 op0, const0_rtx),
2450 const1_rtx,
2451 operands[0])));
2452 return 1;
2453 }
2454
2455 if (reg_overlap_mentioned_p (operands[0], op0))
2456 {
2457 /* Handle the case where operands[0] == sparc_compare_op0.
2458 We "early clobber" the result. */
2459 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2460 emit_move_insn (op0, sparc_compare_op0);
2461 }
2462
2463 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2464 if (GET_MODE (op0) != DImode)
2465 {
2466 temp = gen_reg_rtx (DImode);
2467 convert_move (temp, op0, 0);
2468 }
2469 else
2470 temp = op0;
2471 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2472 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2473 gen_rtx_fmt_ee (compare_code, DImode,
2474 temp, const0_rtx),
2475 const1_rtx,
2476 operands[0])));
2477 return 1;
2478 }
2479 else
2480 {
2481 operands[1] = gen_compare_reg (compare_code, op0, op1);
2482
2483 switch (GET_MODE (operands[1]))
2484 {
2485 case CCmode :
2486 case CCXmode :
2487 case CCFPEmode :
2488 case CCFPmode :
2489 break;
2490 default :
2491 abort ();
2492 }
2493 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2494 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2495 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2496 gen_rtx_fmt_ee (compare_code,
2497 GET_MODE (operands[1]),
2498 operands[1], const0_rtx),
2499 const1_rtx, operands[0])));
2500 return 1;
2501 }
2502 }
2503
2504 /* Emit a conditional jump insn for the v9 architecture using comparison code
2505 CODE and jump target LABEL.
2506 This function exists to take advantage of the v9 brxx insns. */
2507
2508 void
2509 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2510 {
2511 emit_jump_insn (gen_rtx_SET (VOIDmode,
2512 pc_rtx,
2513 gen_rtx_IF_THEN_ELSE (VOIDmode,
2514 gen_rtx_fmt_ee (code, GET_MODE (op0),
2515 op0, const0_rtx),
2516 gen_rtx_LABEL_REF (VOIDmode, label),
2517 pc_rtx)));
2518 }
2519
2520 /* Generate a DFmode part of a hard TFmode register.
2521 REG is the TFmode hard register, LOW is 1 for the
2522 low 64bit of the register and 0 otherwise.
2523 */
2524 rtx
2525 gen_df_reg (rtx reg, int low)
2526 {
2527 int regno = REGNO (reg);
2528
2529 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2530 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2531 return gen_rtx_REG (DFmode, regno);
2532 }
2533 \f
2534 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2535 Unlike normal calls, TFmode operands are passed by reference. It is
2536 assumed that no more than 3 operands are required. */
2537
2538 static void
2539 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2540 {
2541 rtx ret_slot = NULL, arg[3], func_sym;
2542 int i;
2543
2544 /* We only expect to be called for conversions, unary, and binary ops. */
2545 if (nargs < 2 || nargs > 3)
2546 abort ();
2547
2548 for (i = 0; i < nargs; ++i)
2549 {
2550 rtx this_arg = operands[i];
2551 rtx this_slot;
2552
2553 /* TFmode arguments and return values are passed by reference. */
2554 if (GET_MODE (this_arg) == TFmode)
2555 {
2556 int force_stack_temp;
2557
2558 force_stack_temp = 0;
2559 if (TARGET_BUGGY_QP_LIB && i == 0)
2560 force_stack_temp = 1;
2561
2562 if (GET_CODE (this_arg) == MEM
2563 && ! force_stack_temp)
2564 this_arg = XEXP (this_arg, 0);
2565 else if (CONSTANT_P (this_arg)
2566 && ! force_stack_temp)
2567 {
2568 this_slot = force_const_mem (TFmode, this_arg);
2569 this_arg = XEXP (this_slot, 0);
2570 }
2571 else
2572 {
2573 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2574
2575 /* Operand 0 is the return value. We'll copy it out later. */
2576 if (i > 0)
2577 emit_move_insn (this_slot, this_arg);
2578 else
2579 ret_slot = this_slot;
2580
2581 this_arg = XEXP (this_slot, 0);
2582 }
2583 }
2584
2585 arg[i] = this_arg;
2586 }
2587
2588 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2589
2590 if (GET_MODE (operands[0]) == TFmode)
2591 {
2592 if (nargs == 2)
2593 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2594 arg[0], GET_MODE (arg[0]),
2595 arg[1], GET_MODE (arg[1]));
2596 else
2597 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2598 arg[0], GET_MODE (arg[0]),
2599 arg[1], GET_MODE (arg[1]),
2600 arg[2], GET_MODE (arg[2]));
2601
2602 if (ret_slot)
2603 emit_move_insn (operands[0], ret_slot);
2604 }
2605 else
2606 {
2607 rtx ret;
2608
2609 if (nargs != 2)
2610 abort ();
2611
2612 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2613 GET_MODE (operands[0]), 1,
2614 arg[1], GET_MODE (arg[1]));
2615
2616 if (ret != operands[0])
2617 emit_move_insn (operands[0], ret);
2618 }
2619 }
2620
2621 /* Expand soft-float TFmode calls to sparc abi routines. */
2622
2623 static void
2624 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2625 {
2626 const char *func;
2627
2628 switch (code)
2629 {
2630 case PLUS:
2631 func = "_Qp_add";
2632 break;
2633 case MINUS:
2634 func = "_Qp_sub";
2635 break;
2636 case MULT:
2637 func = "_Qp_mul";
2638 break;
2639 case DIV:
2640 func = "_Qp_div";
2641 break;
2642 default:
2643 abort ();
2644 }
2645
2646 emit_soft_tfmode_libcall (func, 3, operands);
2647 }
2648
2649 static void
2650 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2651 {
2652 const char *func;
2653
2654 switch (code)
2655 {
2656 case SQRT:
2657 func = "_Qp_sqrt";
2658 break;
2659 default:
2660 abort ();
2661 }
2662
2663 emit_soft_tfmode_libcall (func, 2, operands);
2664 }
2665
2666 static void
2667 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2668 {
2669 const char *func;
2670
2671 switch (code)
2672 {
2673 case FLOAT_EXTEND:
2674 switch (GET_MODE (operands[1]))
2675 {
2676 case SFmode:
2677 func = "_Qp_stoq";
2678 break;
2679 case DFmode:
2680 func = "_Qp_dtoq";
2681 break;
2682 default:
2683 abort ();
2684 }
2685 break;
2686
2687 case FLOAT_TRUNCATE:
2688 switch (GET_MODE (operands[0]))
2689 {
2690 case SFmode:
2691 func = "_Qp_qtos";
2692 break;
2693 case DFmode:
2694 func = "_Qp_qtod";
2695 break;
2696 default:
2697 abort ();
2698 }
2699 break;
2700
2701 case FLOAT:
2702 switch (GET_MODE (operands[1]))
2703 {
2704 case SImode:
2705 func = "_Qp_itoq";
2706 break;
2707 case DImode:
2708 func = "_Qp_xtoq";
2709 break;
2710 default:
2711 abort ();
2712 }
2713 break;
2714
2715 case UNSIGNED_FLOAT:
2716 switch (GET_MODE (operands[1]))
2717 {
2718 case SImode:
2719 func = "_Qp_uitoq";
2720 break;
2721 case DImode:
2722 func = "_Qp_uxtoq";
2723 break;
2724 default:
2725 abort ();
2726 }
2727 break;
2728
2729 case FIX:
2730 switch (GET_MODE (operands[0]))
2731 {
2732 case SImode:
2733 func = "_Qp_qtoi";
2734 break;
2735 case DImode:
2736 func = "_Qp_qtox";
2737 break;
2738 default:
2739 abort ();
2740 }
2741 break;
2742
2743 case UNSIGNED_FIX:
2744 switch (GET_MODE (operands[0]))
2745 {
2746 case SImode:
2747 func = "_Qp_qtoui";
2748 break;
2749 case DImode:
2750 func = "_Qp_qtoux";
2751 break;
2752 default:
2753 abort ();
2754 }
2755 break;
2756
2757 default:
2758 abort ();
2759 }
2760
2761 emit_soft_tfmode_libcall (func, 2, operands);
2762 }
2763
2764 /* Expand a hard-float tfmode operation. All arguments must be in
2765 registers. */
2766
2767 static void
2768 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2769 {
2770 rtx op, dest;
2771
2772 if (GET_RTX_CLASS (code) == RTX_UNARY)
2773 {
2774 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2775 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2776 }
2777 else
2778 {
2779 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2780 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2781 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2782 operands[1], operands[2]);
2783 }
2784
2785 if (register_operand (operands[0], VOIDmode))
2786 dest = operands[0];
2787 else
2788 dest = gen_reg_rtx (GET_MODE (operands[0]));
2789
2790 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2791
2792 if (dest != operands[0])
2793 emit_move_insn (operands[0], dest);
2794 }
2795
2796 void
2797 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2798 {
2799 if (TARGET_HARD_QUAD)
2800 emit_hard_tfmode_operation (code, operands);
2801 else
2802 emit_soft_tfmode_binop (code, operands);
2803 }
2804
2805 void
2806 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2807 {
2808 if (TARGET_HARD_QUAD)
2809 emit_hard_tfmode_operation (code, operands);
2810 else
2811 emit_soft_tfmode_unop (code, operands);
2812 }
2813
2814 void
2815 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2816 {
2817 if (TARGET_HARD_QUAD)
2818 emit_hard_tfmode_operation (code, operands);
2819 else
2820 emit_soft_tfmode_cvt (code, operands);
2821 }
2822 \f
2823 /* Return nonzero if a branch/jump/call instruction will be emitting
2824 nop into its delay slot. */
2825
2826 int
2827 empty_delay_slot (rtx insn)
2828 {
2829 rtx seq;
2830
2831 /* If no previous instruction (should not happen), return true. */
2832 if (PREV_INSN (insn) == NULL)
2833 return 1;
2834
2835 seq = NEXT_INSN (PREV_INSN (insn));
2836 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2837 return 0;
2838
2839 return 1;
2840 }
2841
2842 /* Return nonzero if TRIAL can go into the call delay slot. */
2843
2844 int
2845 tls_call_delay (rtx trial)
2846 {
2847 rtx pat, unspec;
2848
2849 /* Binutils allows
2850 call __tls_get_addr, %tgd_call (foo)
2851 add %l7, %o0, %o0, %tgd_add (foo)
2852 while Sun as/ld does not. */
2853 if (TARGET_GNU_TLS || !TARGET_TLS)
2854 return 1;
2855
2856 pat = PATTERN (trial);
2857 if (GET_CODE (pat) != SET || GET_CODE (SET_DEST (pat)) != PLUS)
2858 return 1;
2859
2860 unspec = XEXP (SET_DEST (pat), 1);
2861 if (GET_CODE (unspec) != UNSPEC
2862 || (XINT (unspec, 1) != UNSPEC_TLSGD
2863 && XINT (unspec, 1) != UNSPEC_TLSLDM))
2864 return 1;
2865
2866 return 0;
2867 }
2868
2869 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2870 instruction. RETURN_P is true if the v9 variant 'return' is to be
2871 considered in the test too.
2872
2873 TRIAL must be a SET whose destination is a REG appropriate for the
2874 'restore' instruction or, if RETURN_P is true, for the 'return'
2875 instruction. */
2876
2877 static int
2878 eligible_for_restore_insn (rtx trial, bool return_p)
2879 {
2880 rtx pat = PATTERN (trial);
2881 rtx src = SET_SRC (pat);
2882
2883 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2884 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2885 && arith_operand (src, GET_MODE (src)))
2886 {
2887 if (TARGET_ARCH64)
2888 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2889 else
2890 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2891 }
2892
2893 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2894 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2895 && arith_double_operand (src, GET_MODE (src)))
2896 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2897
2898 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2899 else if (! TARGET_FPU && register_operand (src, SFmode))
2900 return 1;
2901
2902 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2903 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2904 return 1;
2905
2906 /* If we have the 'return' instruction, anything that does not use
2907 local or output registers and can go into a delay slot wins. */
2908 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2909 && (get_attr_in_uncond_branch_delay (trial)
2910 == IN_UNCOND_BRANCH_DELAY_TRUE))
2911 return 1;
2912
2913 /* The 'restore src1,src2,dest' pattern for SImode. */
2914 else if (GET_CODE (src) == PLUS
2915 && register_operand (XEXP (src, 0), SImode)
2916 && arith_operand (XEXP (src, 1), SImode))
2917 return 1;
2918
2919 /* The 'restore src1,src2,dest' pattern for DImode. */
2920 else if (GET_CODE (src) == PLUS
2921 && register_operand (XEXP (src, 0), DImode)
2922 && arith_double_operand (XEXP (src, 1), DImode))
2923 return 1;
2924
2925 /* The 'restore src1,%lo(src2),dest' pattern. */
2926 else if (GET_CODE (src) == LO_SUM
2927 && ! TARGET_CM_MEDMID
2928 && ((register_operand (XEXP (src, 0), SImode)
2929 && immediate_operand (XEXP (src, 1), SImode))
2930 || (TARGET_ARCH64
2931 && register_operand (XEXP (src, 0), DImode)
2932 && immediate_operand (XEXP (src, 1), DImode))))
2933 return 1;
2934
2935 /* The 'restore src,src,dest' pattern. */
2936 else if (GET_CODE (src) == ASHIFT
2937 && (register_operand (XEXP (src, 0), SImode)
2938 || register_operand (XEXP (src, 0), DImode))
2939 && XEXP (src, 1) == const1_rtx)
2940 return 1;
2941
2942 return 0;
2943 }
2944
2945 /* Return nonzero if TRIAL can go into the function return's
2946 delay slot. */
2947
2948 int
2949 eligible_for_return_delay (rtx trial)
2950 {
2951 int leaf_function_p = current_function_uses_only_leaf_regs;
2952 rtx pat;
2953
2954 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2955 return 0;
2956
2957 if (get_attr_length (trial) != 1)
2958 return 0;
2959
2960 /* If there are any call-saved registers, we should scan TRIAL if it
2961 does not reference them. For now just make it easy. */
2962 if (num_gfregs)
2963 return 0;
2964
2965 /* If the function uses __builtin_eh_return, the eh_return machinery
2966 occupies the delay slot. */
2967 if (current_function_calls_eh_return)
2968 return 0;
2969
2970 /* In the case of a true leaf function, anything can go into the slot. */
2971 if (leaf_function_p)
2972 return get_attr_in_uncond_branch_delay (trial)
2973 == IN_UNCOND_BRANCH_DELAY_TRUE;
2974
2975 pat = PATTERN (trial);
2976
2977 /* Otherwise, only operations which can be done in tandem with
2978 a `restore' or `return' insn can go into the delay slot. */
2979 if (GET_CODE (SET_DEST (pat)) != REG
2980 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2981 return 0;
2982
2983 /* If this instruction sets up floating point register and we have a return
2984 instruction, it can probably go in. But restore will not work
2985 with FP_REGS. */
2986 if (REGNO (SET_DEST (pat)) >= 32)
2987 return (TARGET_V9
2988 && ! epilogue_renumber (&pat, 1)
2989 && (get_attr_in_uncond_branch_delay (trial)
2990 == IN_UNCOND_BRANCH_DELAY_TRUE));
2991
2992 return eligible_for_restore_insn (trial, true);
2993 }
2994
2995 /* Return nonzero if TRIAL can go into the sibling call's
2996 delay slot. */
2997
2998 int
2999 eligible_for_sibcall_delay (rtx trial)
3000 {
3001 int leaf_function_p = current_function_uses_only_leaf_regs;
3002 rtx pat;
3003
3004 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3005 return 0;
3006
3007 if (get_attr_length (trial) != 1)
3008 return 0;
3009
3010 pat = PATTERN (trial);
3011
3012 if (leaf_function_p)
3013 {
3014 /* If the tail call is done using the call instruction,
3015 we have to restore %o7 in the delay slot. */
3016 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3017 return 0;
3018
3019 /* %g1 is used to build the function address */
3020 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3021 return 0;
3022
3023 return 1;
3024 }
3025
3026 /* Otherwise, only operations which can be done in tandem with
3027 a `restore' insn can go into the delay slot. */
3028 if (GET_CODE (SET_DEST (pat)) != REG
3029 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3030 || REGNO (SET_DEST (pat)) >= 32)
3031 return 0;
3032
3033 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3034 in most cases. */
3035 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3036 return 0;
3037
3038 return eligible_for_restore_insn (trial, false);
3039 }
3040
3041 int
3042 short_branch (int uid1, int uid2)
3043 {
3044 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
3045
3046 /* Leave a few words of "slop". */
3047 if (delta >= -1023 && delta <= 1022)
3048 return 1;
3049
3050 return 0;
3051 }
3052
3053 /* Return nonzero if REG is not used after INSN.
3054 We assume REG is a reload reg, and therefore does
3055 not live past labels or calls or jumps. */
3056 int
3057 reg_unused_after (rtx reg, rtx insn)
3058 {
3059 enum rtx_code code, prev_code = UNKNOWN;
3060
3061 while ((insn = NEXT_INSN (insn)))
3062 {
3063 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
3064 return 1;
3065
3066 code = GET_CODE (insn);
3067 if (GET_CODE (insn) == CODE_LABEL)
3068 return 1;
3069
3070 if (INSN_P (insn))
3071 {
3072 rtx set = single_set (insn);
3073 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
3074 if (set && in_src)
3075 return 0;
3076 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
3077 return 1;
3078 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
3079 return 0;
3080 }
3081 prev_code = code;
3082 }
3083 return 1;
3084 }
3085 \f
3086 /* Determine if it's legal to put X into the constant pool. This
3087 is not possible if X contains the address of a symbol that is
3088 not constant (TLS) or not known at final link time (PIC). */
3089
3090 static bool
3091 sparc_cannot_force_const_mem (rtx x)
3092 {
3093 switch (GET_CODE (x))
3094 {
3095 case CONST_INT:
3096 case CONST_DOUBLE:
3097 /* Accept all non-symbolic constants. */
3098 return false;
3099
3100 case LABEL_REF:
3101 /* Labels are OK iff we are non-PIC. */
3102 return flag_pic != 0;
3103
3104 case SYMBOL_REF:
3105 /* 'Naked' TLS symbol references are never OK,
3106 non-TLS symbols are OK iff we are non-PIC. */
3107 if (SYMBOL_REF_TLS_MODEL (x))
3108 return true;
3109 else
3110 return flag_pic != 0;
3111
3112 case CONST:
3113 return sparc_cannot_force_const_mem (XEXP (x, 0));
3114 case PLUS:
3115 case MINUS:
3116 return sparc_cannot_force_const_mem (XEXP (x, 0))
3117 || sparc_cannot_force_const_mem (XEXP (x, 1));
3118 case UNSPEC:
3119 return true;
3120 default:
3121 abort ();
3122 }
3123 }
3124 \f
3125 /* The table we use to reference PIC data. */
3126 static GTY(()) rtx global_offset_table;
3127
3128 /* The function we use to get at it. */
3129 static GTY(()) rtx add_pc_to_pic_symbol;
3130 static GTY(()) char add_pc_to_pic_symbol_name[256];
3131
3132 /* Ensure that we are not using patterns that are not OK with PIC. */
3133
3134 int
3135 check_pic (int i)
3136 {
3137 switch (flag_pic)
3138 {
3139 case 1:
3140 if (GET_CODE (recog_data.operand[i]) == SYMBOL_REF
3141 || (GET_CODE (recog_data.operand[i]) == CONST
3142 && ! (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
3143 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
3144 == global_offset_table)
3145 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
3146 == CONST))))
3147 abort ();
3148 case 2:
3149 default:
3150 return 1;
3151 }
3152 }
3153
3154 /* Return true if X is an address which needs a temporary register when
3155 reloaded while generating PIC code. */
3156
3157 int
3158 pic_address_needs_scratch (rtx x)
3159 {
3160 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3161 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3162 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3163 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3164 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3165 return 1;
3166
3167 return 0;
3168 }
3169
3170 /* Determine if a given RTX is a valid constant. We already know this
3171 satisfies CONSTANT_P. */
3172
3173 bool
3174 legitimate_constant_p (rtx x)
3175 {
3176 rtx inner;
3177
3178 switch (GET_CODE (x))
3179 {
3180 case SYMBOL_REF:
3181 /* TLS symbols are not constant. */
3182 if (SYMBOL_REF_TLS_MODEL (x))
3183 return false;
3184 break;
3185
3186 case CONST:
3187 inner = XEXP (x, 0);
3188
3189 /* Offsets of TLS symbols are never valid.
3190 Discourage CSE from creating them. */
3191 if (GET_CODE (inner) == PLUS
3192 && tls_symbolic_operand (XEXP (inner, 0)))
3193 return false;
3194 break;
3195
3196 case CONST_DOUBLE:
3197 if (GET_MODE (x) == VOIDmode)
3198 return true;
3199
3200 /* Floating point constants are generally not ok.
3201 The only exception is 0.0 in VIS. */
3202 if (TARGET_VIS
3203 && (GET_MODE (x) == SFmode
3204 || GET_MODE (x) == DFmode
3205 || GET_MODE (x) == TFmode)
3206 && fp_zero_operand (x, GET_MODE (x)))
3207 return true;
3208
3209 return false;
3210
3211 default:
3212 break;
3213 }
3214
3215 return true;
3216 }
3217
3218 /* Determine if a given RTX is a valid constant address. */
3219
3220 bool
3221 constant_address_p (rtx x)
3222 {
3223 switch (GET_CODE (x))
3224 {
3225 case LABEL_REF:
3226 case CONST_INT:
3227 case HIGH:
3228 return true;
3229
3230 case CONST:
3231 if (flag_pic && pic_address_needs_scratch (x))
3232 return false;
3233 return legitimate_constant_p (x);
3234
3235 case SYMBOL_REF:
3236 return !flag_pic && legitimate_constant_p (x);
3237
3238 default:
3239 return false;
3240 }
3241 }
3242
3243 /* Nonzero if the constant value X is a legitimate general operand
3244 when generating PIC code. It is given that flag_pic is on and
3245 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3246
3247 bool
3248 legitimate_pic_operand_p (rtx x)
3249 {
3250 if (pic_address_needs_scratch (x))
3251 return false;
3252 if (tls_symbolic_operand (x)
3253 || (GET_CODE (x) == CONST
3254 && GET_CODE (XEXP (x, 0)) == PLUS
3255 && tls_symbolic_operand (XEXP (XEXP (x, 0), 0))))
3256 return false;
3257 return true;
3258 }
3259
3260 /* Return nonzero if ADDR is a valid memory address.
3261 STRICT specifies whether strict register checking applies. */
3262
3263 int
3264 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
3265 {
3266 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL, imm2;
3267
3268 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3269 rs1 = addr;
3270 else if (GET_CODE (addr) == PLUS)
3271 {
3272 rs1 = XEXP (addr, 0);
3273 rs2 = XEXP (addr, 1);
3274
3275 /* Canonicalize. REG comes first, if there are no regs,
3276 LO_SUM comes first. */
3277 if (!REG_P (rs1)
3278 && GET_CODE (rs1) != SUBREG
3279 && (REG_P (rs2)
3280 || GET_CODE (rs2) == SUBREG
3281 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3282 {
3283 rs1 = XEXP (addr, 1);
3284 rs2 = XEXP (addr, 0);
3285 }
3286
3287 if ((flag_pic == 1
3288 && rs1 == pic_offset_table_rtx
3289 && !REG_P (rs2)
3290 && GET_CODE (rs2) != SUBREG
3291 && GET_CODE (rs2) != LO_SUM
3292 && GET_CODE (rs2) != MEM
3293 && !tls_symbolic_operand (rs2)
3294 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3295 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3296 || ((REG_P (rs1)
3297 || GET_CODE (rs1) == SUBREG)
3298 && RTX_OK_FOR_OFFSET_P (rs2)))
3299 {
3300 imm1 = rs2;
3301 rs2 = NULL;
3302 }
3303 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3304 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3305 {
3306 /* We prohibit REG + REG for TFmode when there are no instructions
3307 which accept REG+REG instructions. We do this because REG+REG
3308 is not an offsetable address. If we get the situation in reload
3309 where source and destination of a movtf pattern are both MEMs with
3310 REG+REG address, then only one of them gets converted to an
3311 offsetable address. */
3312 if (mode == TFmode
3313 && !(TARGET_FPU && TARGET_ARCH64 && TARGET_V9
3314 && TARGET_HARD_QUAD))
3315 return 0;
3316
3317 /* We prohibit REG + REG on ARCH32 if not optimizing for
3318 DFmode/DImode because then mem_min_alignment is likely to be zero
3319 after reload and the forced split would lack a matching splitter
3320 pattern. */
3321 if (TARGET_ARCH32 && !optimize
3322 && (mode == DFmode || mode == DImode))
3323 return 0;
3324 }
3325 else if (USE_AS_OFFSETABLE_LO10
3326 && GET_CODE (rs1) == LO_SUM
3327 && TARGET_ARCH64
3328 && ! TARGET_CM_MEDMID
3329 && RTX_OK_FOR_OLO10_P (rs2))
3330 {
3331 imm2 = rs2;
3332 rs2 = NULL;
3333 imm1 = XEXP (rs1, 1);
3334 rs1 = XEXP (rs1, 0);
3335 if (! CONSTANT_P (imm1) || tls_symbolic_operand (rs1))
3336 return 0;
3337 }
3338 }
3339 else if (GET_CODE (addr) == LO_SUM)
3340 {
3341 rs1 = XEXP (addr, 0);
3342 imm1 = XEXP (addr, 1);
3343
3344 if (! CONSTANT_P (imm1) || tls_symbolic_operand (rs1))
3345 return 0;
3346
3347 /* We can't allow TFmode, because an offset greater than or equal to the
3348 alignment (8) may cause the LO_SUM to overflow if !v9. */
3349 if (mode == TFmode && !TARGET_V9)
3350 return 0;
3351 }
3352 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3353 return 1;
3354 else
3355 return 0;
3356
3357 if (GET_CODE (rs1) == SUBREG)
3358 rs1 = SUBREG_REG (rs1);
3359 if (!REG_P (rs1))
3360 return 0;
3361
3362 if (rs2)
3363 {
3364 if (GET_CODE (rs2) == SUBREG)
3365 rs2 = SUBREG_REG (rs2);
3366 if (!REG_P (rs2))
3367 return 0;
3368 }
3369
3370 if (strict)
3371 {
3372 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3373 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3374 return 0;
3375 }
3376 else
3377 {
3378 if ((REGNO (rs1) >= 32
3379 && REGNO (rs1) != FRAME_POINTER_REGNUM
3380 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3381 || (rs2
3382 && (REGNO (rs2) >= 32
3383 && REGNO (rs2) != FRAME_POINTER_REGNUM
3384 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3385 return 0;
3386 }
3387 return 1;
3388 }
3389
3390 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3391
3392 static GTY(()) rtx sparc_tls_symbol;
3393 static rtx
3394 sparc_tls_get_addr (void)
3395 {
3396 if (!sparc_tls_symbol)
3397 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3398
3399 return sparc_tls_symbol;
3400 }
3401
3402 static rtx
3403 sparc_tls_got (void)
3404 {
3405 rtx temp;
3406 if (flag_pic)
3407 {
3408 current_function_uses_pic_offset_table = 1;
3409 return pic_offset_table_rtx;
3410 }
3411
3412 if (!global_offset_table)
3413 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3414 temp = gen_reg_rtx (Pmode);
3415 emit_move_insn (temp, global_offset_table);
3416 return temp;
3417 }
3418
3419
3420 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3421 this (thread-local) address. */
3422
3423 rtx
3424 legitimize_tls_address (rtx addr)
3425 {
3426 rtx temp1, temp2, temp3, ret, o0, got, insn;
3427
3428 if (no_new_pseudos)
3429 abort ();
3430
3431 if (GET_CODE (addr) == SYMBOL_REF)
3432 switch (SYMBOL_REF_TLS_MODEL (addr))
3433 {
3434 case TLS_MODEL_GLOBAL_DYNAMIC:
3435 start_sequence ();
3436 temp1 = gen_reg_rtx (SImode);
3437 temp2 = gen_reg_rtx (SImode);
3438 ret = gen_reg_rtx (Pmode);
3439 o0 = gen_rtx_REG (Pmode, 8);
3440 got = sparc_tls_got ();
3441 emit_insn (gen_tgd_hi22 (temp1, addr));
3442 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3443 if (TARGET_ARCH32)
3444 {
3445 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3446 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3447 addr, const1_rtx));
3448 }
3449 else
3450 {
3451 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3452 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3453 addr, const1_rtx));
3454 }
3455 CALL_INSN_FUNCTION_USAGE (insn)
3456 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3457 CALL_INSN_FUNCTION_USAGE (insn));
3458 insn = get_insns ();
3459 end_sequence ();
3460 emit_libcall_block (insn, ret, o0, addr);
3461 break;
3462
3463 case TLS_MODEL_LOCAL_DYNAMIC:
3464 start_sequence ();
3465 temp1 = gen_reg_rtx (SImode);
3466 temp2 = gen_reg_rtx (SImode);
3467 temp3 = gen_reg_rtx (Pmode);
3468 ret = gen_reg_rtx (Pmode);
3469 o0 = gen_rtx_REG (Pmode, 8);
3470 got = sparc_tls_got ();
3471 emit_insn (gen_tldm_hi22 (temp1));
3472 emit_insn (gen_tldm_lo10 (temp2, temp1));
3473 if (TARGET_ARCH32)
3474 {
3475 emit_insn (gen_tldm_add32 (o0, got, temp2));
3476 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3477 const1_rtx));
3478 }
3479 else
3480 {
3481 emit_insn (gen_tldm_add64 (o0, got, temp2));
3482 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3483 const1_rtx));
3484 }
3485 CALL_INSN_FUNCTION_USAGE (insn)
3486 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3487 CALL_INSN_FUNCTION_USAGE (insn));
3488 insn = get_insns ();
3489 end_sequence ();
3490 emit_libcall_block (insn, temp3, o0,
3491 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3492 UNSPEC_TLSLD_BASE));
3493 temp1 = gen_reg_rtx (SImode);
3494 temp2 = gen_reg_rtx (SImode);
3495 emit_insn (gen_tldo_hix22 (temp1, addr));
3496 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3497 if (TARGET_ARCH32)
3498 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3499 else
3500 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3501 break;
3502
3503 case TLS_MODEL_INITIAL_EXEC:
3504 temp1 = gen_reg_rtx (SImode);
3505 temp2 = gen_reg_rtx (SImode);
3506 temp3 = gen_reg_rtx (Pmode);
3507 got = sparc_tls_got ();
3508 emit_insn (gen_tie_hi22 (temp1, addr));
3509 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3510 if (TARGET_ARCH32)
3511 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3512 else
3513 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3514 if (TARGET_SUN_TLS)
3515 {
3516 ret = gen_reg_rtx (Pmode);
3517 if (TARGET_ARCH32)
3518 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3519 temp3, addr));
3520 else
3521 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3522 temp3, addr));
3523 }
3524 else
3525 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3526 break;
3527
3528 case TLS_MODEL_LOCAL_EXEC:
3529 temp1 = gen_reg_rtx (Pmode);
3530 temp2 = gen_reg_rtx (Pmode);
3531 if (TARGET_ARCH32)
3532 {
3533 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3534 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3535 }
3536 else
3537 {
3538 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3539 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3540 }
3541 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3542 break;
3543
3544 default:
3545 abort ();
3546 }
3547
3548 else
3549 abort (); /* for now ... */
3550
3551 return ret;
3552 }
3553
3554
3555 /* Legitimize PIC addresses. If the address is already position-independent,
3556 we return ORIG. Newly generated position-independent addresses go into a
3557 reg. This is REG if nonzero, otherwise we allocate register(s) as
3558 necessary. */
3559
3560 rtx
3561 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3562 rtx reg)
3563 {
3564 if (GET_CODE (orig) == SYMBOL_REF)
3565 {
3566 rtx pic_ref, address;
3567 rtx insn;
3568
3569 if (reg == 0)
3570 {
3571 if (reload_in_progress || reload_completed)
3572 abort ();
3573 else
3574 reg = gen_reg_rtx (Pmode);
3575 }
3576
3577 if (flag_pic == 2)
3578 {
3579 /* If not during reload, allocate another temp reg here for loading
3580 in the address, so that these instructions can be optimized
3581 properly. */
3582 rtx temp_reg = ((reload_in_progress || reload_completed)
3583 ? reg : gen_reg_rtx (Pmode));
3584
3585 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3586 won't get confused into thinking that these two instructions
3587 are loading in the true address of the symbol. If in the
3588 future a PIC rtx exists, that should be used instead. */
3589 if (Pmode == SImode)
3590 {
3591 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3592 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3593 }
3594 else
3595 {
3596 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3597 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3598 }
3599 address = temp_reg;
3600 }
3601 else
3602 address = orig;
3603
3604 pic_ref = gen_rtx_MEM (Pmode,
3605 gen_rtx_PLUS (Pmode,
3606 pic_offset_table_rtx, address));
3607 current_function_uses_pic_offset_table = 1;
3608 RTX_UNCHANGING_P (pic_ref) = 1;
3609 insn = emit_move_insn (reg, pic_ref);
3610 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3611 by loop. */
3612 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3613 REG_NOTES (insn));
3614 return reg;
3615 }
3616 else if (GET_CODE (orig) == CONST)
3617 {
3618 rtx base, offset;
3619
3620 if (GET_CODE (XEXP (orig, 0)) == PLUS
3621 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3622 return orig;
3623
3624 if (reg == 0)
3625 {
3626 if (reload_in_progress || reload_completed)
3627 abort ();
3628 else
3629 reg = gen_reg_rtx (Pmode);
3630 }
3631
3632 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3633 {
3634 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3635 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3636 base == reg ? 0 : reg);
3637 }
3638 else
3639 abort ();
3640
3641 if (GET_CODE (offset) == CONST_INT)
3642 {
3643 if (SMALL_INT (offset))
3644 return plus_constant (base, INTVAL (offset));
3645 else if (! reload_in_progress && ! reload_completed)
3646 offset = force_reg (Pmode, offset);
3647 else
3648 /* If we reach here, then something is seriously wrong. */
3649 abort ();
3650 }
3651 return gen_rtx_PLUS (Pmode, base, offset);
3652 }
3653 else if (GET_CODE (orig) == LABEL_REF)
3654 /* ??? Why do we do this? */
3655 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3656 the register is live instead, in case it is eliminated. */
3657 current_function_uses_pic_offset_table = 1;
3658
3659 return orig;
3660 }
3661
3662 /* Try machine-dependent ways of modifying an illegitimate address X
3663 to be legitimate. If we find one, return the new, valid address.
3664
3665 OLDX is the address as it was before break_out_memory_refs was called.
3666 In some cases it is useful to look at this to decide what needs to be done.
3667
3668 MODE is the mode of the operand pointed to by X. */
3669
3670 rtx
3671 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3672 {
3673 rtx orig_x = x;
3674
3675 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3676 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3677 force_operand (XEXP (x, 0), NULL_RTX));
3678 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3679 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3680 force_operand (XEXP (x, 1), NULL_RTX));
3681 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3682 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3683 XEXP (x, 1));
3684 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3685 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3686 force_operand (XEXP (x, 1), NULL_RTX));
3687
3688 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3689 return x;
3690
3691 if (tls_symbolic_operand (x))
3692 x = legitimize_tls_address (x);
3693 else if (flag_pic)
3694 x = legitimize_pic_address (x, mode, 0);
3695 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3696 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3697 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3698 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3699 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3700 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3701 else if (GET_CODE (x) == SYMBOL_REF
3702 || GET_CODE (x) == CONST
3703 || GET_CODE (x) == LABEL_REF)
3704 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3705 return x;
3706 }
3707
3708 /* Emit the special PIC prologue. */
3709
3710 static void
3711 load_pic_register (void)
3712 {
3713 int orig_flag_pic = flag_pic;
3714
3715 /* If we haven't emitted the special helper function, do so now. */
3716 if (add_pc_to_pic_symbol_name[0] == 0)
3717 {
3718 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3719 int align;
3720
3721 ASM_GENERATE_INTERNAL_LABEL (add_pc_to_pic_symbol_name, "LADDPC", 0);
3722 text_section ();
3723
3724 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3725 if (align > 0)
3726 ASM_OUTPUT_ALIGN (asm_out_file, align);
3727 ASM_OUTPUT_LABEL (asm_out_file, add_pc_to_pic_symbol_name);
3728 if (flag_delayed_branch)
3729 fprintf (asm_out_file, "\tjmp %%o7+8\n\t add\t%%o7, %s, %s\n",
3730 pic_name, pic_name);
3731 else
3732 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp %%o7+8\n\t nop\n",
3733 pic_name, pic_name);
3734 }
3735
3736 /* Initialize every time through, since we can't easily
3737 know this to be permanent. */
3738 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3739 add_pc_to_pic_symbol = gen_rtx_SYMBOL_REF (Pmode, add_pc_to_pic_symbol_name);
3740
3741 flag_pic = 0;
3742 emit_insn (gen_load_pcrel_sym (pic_offset_table_rtx, global_offset_table,
3743 add_pc_to_pic_symbol));
3744 flag_pic = orig_flag_pic;
3745
3746 /* Need to emit this whether or not we obey regdecls,
3747 since setjmp/longjmp can cause life info to screw up.
3748 ??? In the case where we don't obey regdecls, this is not sufficient
3749 since we may not fall out the bottom. */
3750 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3751 }
3752 \f
3753 /* Return 1 if RTX is a MEM which is known to be aligned to at
3754 least a DESIRED byte boundary. */
3755
3756 int
3757 mem_min_alignment (rtx mem, int desired)
3758 {
3759 rtx addr, base, offset;
3760
3761 /* If it's not a MEM we can't accept it. */
3762 if (GET_CODE (mem) != MEM)
3763 return 0;
3764
3765 addr = XEXP (mem, 0);
3766 base = offset = NULL_RTX;
3767 if (GET_CODE (addr) == PLUS)
3768 {
3769 if (GET_CODE (XEXP (addr, 0)) == REG)
3770 {
3771 base = XEXP (addr, 0);
3772
3773 /* What we are saying here is that if the base
3774 REG is aligned properly, the compiler will make
3775 sure any REG based index upon it will be so
3776 as well. */
3777 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3778 offset = XEXP (addr, 1);
3779 else
3780 offset = const0_rtx;
3781 }
3782 }
3783 else if (GET_CODE (addr) == REG)
3784 {
3785 base = addr;
3786 offset = const0_rtx;
3787 }
3788
3789 if (base != NULL_RTX)
3790 {
3791 int regno = REGNO (base);
3792
3793 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3794 {
3795 /* Check if the compiler has recorded some information
3796 about the alignment of the base REG. If reload has
3797 completed, we already matched with proper alignments.
3798 If not running global_alloc, reload might give us
3799 unaligned pointer to local stack though. */
3800 if (((cfun != 0
3801 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3802 || (optimize && reload_completed))
3803 && (INTVAL (offset) & (desired - 1)) == 0)
3804 return 1;
3805 }
3806 else
3807 {
3808 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3809 return 1;
3810 }
3811 }
3812 else if (! TARGET_UNALIGNED_DOUBLES
3813 || CONSTANT_P (addr)
3814 || GET_CODE (addr) == LO_SUM)
3815 {
3816 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3817 is true, in which case we can only assume that an access is aligned if
3818 it is to a constant address, or the address involves a LO_SUM. */
3819 return 1;
3820 }
3821
3822 /* An obviously unaligned address. */
3823 return 0;
3824 }
3825
3826 \f
3827 /* Vectors to keep interesting information about registers where it can easily
3828 be got. We used to use the actual mode value as the bit number, but there
3829 are more than 32 modes now. Instead we use two tables: one indexed by
3830 hard register number, and one indexed by mode. */
3831
3832 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3833 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
3834 mapped into one sparc_mode_class mode. */
3835
3836 enum sparc_mode_class {
3837 S_MODE, D_MODE, T_MODE, O_MODE,
3838 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3839 CC_MODE, CCFP_MODE
3840 };
3841
3842 /* Modes for single-word and smaller quantities. */
3843 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3844
3845 /* Modes for double-word and smaller quantities. */
3846 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3847
3848 /* Modes for quad-word and smaller quantities. */
3849 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3850
3851 /* Modes for 8-word and smaller quantities. */
3852 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3853
3854 /* Modes for single-float quantities. We must allow any single word or
3855 smaller quantity. This is because the fix/float conversion instructions
3856 take integer inputs/outputs from the float registers. */
3857 #define SF_MODES (S_MODES)
3858
3859 /* Modes for double-float and smaller quantities. */
3860 #define DF_MODES (S_MODES | D_MODES)
3861
3862 /* Modes for double-float only quantities. */
3863 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3864
3865 /* Modes for quad-float only quantities. */
3866 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3867
3868 /* Modes for quad-float and smaller quantities. */
3869 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3870
3871 /* Modes for quad-float and double-float quantities. */
3872 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3873
3874 /* Modes for quad-float pair only quantities. */
3875 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3876
3877 /* Modes for quad-float pairs and smaller quantities. */
3878 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3879
3880 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3881
3882 /* Modes for condition codes. */
3883 #define CC_MODES (1 << (int) CC_MODE)
3884 #define CCFP_MODES (1 << (int) CCFP_MODE)
3885
3886 /* Value is 1 if register/mode pair is acceptable on sparc.
3887 The funny mixture of D and T modes is because integer operations
3888 do not specially operate on tetra quantities, so non-quad-aligned
3889 registers can hold quadword quantities (except %o4 and %i4 because
3890 they cross fixed registers). */
3891
3892 /* This points to either the 32 bit or the 64 bit version. */
3893 const int *hard_regno_mode_classes;
3894
3895 static const int hard_32bit_mode_classes[] = {
3896 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3897 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3898 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3899 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3900
3901 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3902 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3903 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3904 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3905
3906 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3907 and none can hold SFmode/SImode values. */
3908 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3909 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3910 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3911 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3912
3913 /* %fcc[0123] */
3914 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3915
3916 /* %icc */
3917 CC_MODES
3918 };
3919
3920 static const int hard_64bit_mode_classes[] = {
3921 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3922 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3923 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3924 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3925
3926 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3927 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3928 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3929 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3930
3931 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3932 and none can hold SFmode/SImode values. */
3933 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3934 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3935 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3936 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3937
3938 /* %fcc[0123] */
3939 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3940
3941 /* %icc */
3942 CC_MODES
3943 };
3944
3945 int sparc_mode_class [NUM_MACHINE_MODES];
3946
3947 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3948
3949 static void
3950 sparc_init_modes (void)
3951 {
3952 int i;
3953
3954 for (i = 0; i < NUM_MACHINE_MODES; i++)
3955 {
3956 switch (GET_MODE_CLASS (i))
3957 {
3958 case MODE_INT:
3959 case MODE_PARTIAL_INT:
3960 case MODE_COMPLEX_INT:
3961 if (GET_MODE_SIZE (i) <= 4)
3962 sparc_mode_class[i] = 1 << (int) S_MODE;
3963 else if (GET_MODE_SIZE (i) == 8)
3964 sparc_mode_class[i] = 1 << (int) D_MODE;
3965 else if (GET_MODE_SIZE (i) == 16)
3966 sparc_mode_class[i] = 1 << (int) T_MODE;
3967 else if (GET_MODE_SIZE (i) == 32)
3968 sparc_mode_class[i] = 1 << (int) O_MODE;
3969 else
3970 sparc_mode_class[i] = 0;
3971 break;
3972 case MODE_FLOAT:
3973 case MODE_COMPLEX_FLOAT:
3974 if (GET_MODE_SIZE (i) <= 4)
3975 sparc_mode_class[i] = 1 << (int) SF_MODE;
3976 else if (GET_MODE_SIZE (i) == 8)
3977 sparc_mode_class[i] = 1 << (int) DF_MODE;
3978 else if (GET_MODE_SIZE (i) == 16)
3979 sparc_mode_class[i] = 1 << (int) TF_MODE;
3980 else if (GET_MODE_SIZE (i) == 32)
3981 sparc_mode_class[i] = 1 << (int) OF_MODE;
3982 else
3983 sparc_mode_class[i] = 0;
3984 break;
3985 case MODE_CC:
3986 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3987 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3988 else
3989 sparc_mode_class[i] = 1 << (int) CC_MODE;
3990 break;
3991 default:
3992 sparc_mode_class[i] = 0;
3993 break;
3994 }
3995 }
3996
3997 if (TARGET_ARCH64)
3998 hard_regno_mode_classes = hard_64bit_mode_classes;
3999 else
4000 hard_regno_mode_classes = hard_32bit_mode_classes;
4001
4002 /* Initialize the array used by REGNO_REG_CLASS. */
4003 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4004 {
4005 if (i < 16 && TARGET_V8PLUS)
4006 sparc_regno_reg_class[i] = I64_REGS;
4007 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4008 sparc_regno_reg_class[i] = GENERAL_REGS;
4009 else if (i < 64)
4010 sparc_regno_reg_class[i] = FP_REGS;
4011 else if (i < 96)
4012 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4013 else if (i < 100)
4014 sparc_regno_reg_class[i] = FPCC_REGS;
4015 else
4016 sparc_regno_reg_class[i] = NO_REGS;
4017 }
4018 }
4019 \f
4020 /* Compute the frame size required by the function. This function is called
4021 during the reload pass and also by sparc_expand_prologue. */
4022
4023 HOST_WIDE_INT
4024 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
4025 {
4026 int outgoing_args_size = (current_function_outgoing_args_size
4027 + REG_PARM_STACK_SPACE (current_function_decl));
4028 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
4029 int i;
4030
4031 if (TARGET_ARCH64)
4032 {
4033 for (i = 0; i < 8; i++)
4034 if (regs_ever_live[i] && ! call_used_regs[i])
4035 n_regs += 2;
4036 }
4037 else
4038 {
4039 for (i = 0; i < 8; i += 2)
4040 if ((regs_ever_live[i] && ! call_used_regs[i])
4041 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
4042 n_regs += 2;
4043 }
4044
4045 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4046 if ((regs_ever_live[i] && ! call_used_regs[i])
4047 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
4048 n_regs += 2;
4049
4050 /* Set up values for use in prologue and epilogue. */
4051 num_gfregs = n_regs;
4052
4053 if (leaf_function_p
4054 && n_regs == 0
4055 && size == 0
4056 && current_function_outgoing_args_size == 0)
4057 actual_fsize = apparent_fsize = 0;
4058 else
4059 {
4060 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4061 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
4062 apparent_fsize += n_regs * 4;
4063 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
4064 }
4065
4066 /* Make sure nothing can clobber our register windows.
4067 If a SAVE must be done, or there is a stack-local variable,
4068 the register window area must be allocated.
4069 ??? For v8 we apparently need an additional 8 bytes of reserved space. */
4070 if (! leaf_function_p || size > 0)
4071 actual_fsize += (16 * UNITS_PER_WORD) + (TARGET_ARCH64 ? 0 : 8);
4072
4073 return SPARC_STACK_ALIGN (actual_fsize);
4074 }
4075
4076 /* Output any necessary .register pseudo-ops. */
4077
4078 void
4079 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4080 {
4081 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4082 int i;
4083
4084 if (TARGET_ARCH32)
4085 return;
4086
4087 /* Check if %g[2367] were used without
4088 .register being printed for them already. */
4089 for (i = 2; i < 8; i++)
4090 {
4091 if (regs_ever_live [i]
4092 && ! sparc_hard_reg_printed [i])
4093 {
4094 sparc_hard_reg_printed [i] = 1;
4095 fprintf (file, "\t.register\t%%g%d, #scratch\n", i);
4096 }
4097 if (i == 3) i = 5;
4098 }
4099 #endif
4100 }
4101
4102 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4103 as needed. LOW should be double-word aligned for 32-bit registers.
4104 Return the new OFFSET. */
4105
4106 #define SORR_SAVE 0
4107 #define SORR_RESTORE 1
4108
4109 static int
4110 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4111 {
4112 rtx mem, insn;
4113 int i;
4114
4115 if (TARGET_ARCH64 && high <= 32)
4116 {
4117 for (i = low; i < high; i++)
4118 {
4119 if (regs_ever_live[i] && ! call_used_regs[i])
4120 {
4121 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4122 set_mem_alias_set (mem, sparc_sr_alias_set);
4123 if (action == SORR_SAVE)
4124 {
4125 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4126 RTX_FRAME_RELATED_P (insn) = 1;
4127 }
4128 else /* action == SORR_RESTORE */
4129 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4130 offset += 8;
4131 }
4132 }
4133 }
4134 else
4135 {
4136 for (i = low; i < high; i += 2)
4137 {
4138 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
4139 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
4140 enum machine_mode mode;
4141 int regno;
4142
4143 if (reg0 && reg1)
4144 {
4145 mode = i < 32 ? DImode : DFmode;
4146 regno = i;
4147 }
4148 else if (reg0)
4149 {
4150 mode = i < 32 ? SImode : SFmode;
4151 regno = i;
4152 }
4153 else if (reg1)
4154 {
4155 mode = i < 32 ? SImode : SFmode;
4156 regno = i + 1;
4157 offset += 4;
4158 }
4159 else
4160 continue;
4161
4162 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4163 set_mem_alias_set (mem, sparc_sr_alias_set);
4164 if (action == SORR_SAVE)
4165 {
4166 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4167 RTX_FRAME_RELATED_P (insn) = 1;
4168 }
4169 else /* action == SORR_RESTORE */
4170 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4171
4172 /* Always preserve double-word alignment. */
4173 offset = (offset + 7) & -8;
4174 }
4175 }
4176
4177 return offset;
4178 }
4179
4180 /* Emit code to save call-saved registers. */
4181
4182 static void
4183 emit_save_regs (void)
4184 {
4185 HOST_WIDE_INT offset;
4186 rtx base;
4187
4188 offset = frame_base_offset - apparent_fsize;
4189
4190 if (offset < -4096 || offset + num_gfregs * 4 > 4096)
4191 {
4192 /* ??? This might be optimized a little as %g1 might already have a
4193 value close enough that a single add insn will do. */
4194 /* ??? Although, all of this is probably only a temporary fix
4195 because if %g1 can hold a function result, then
4196 sparc_expand_epilogue will lose (the result will be
4197 clobbered). */
4198 base = gen_rtx_REG (Pmode, 1);
4199 emit_move_insn (base, GEN_INT (offset));
4200 emit_insn (gen_rtx_SET (VOIDmode,
4201 base,
4202 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4203 offset = 0;
4204 }
4205 else
4206 base = frame_base_reg;
4207
4208 offset = save_or_restore_regs (0, 8, base, offset, SORR_SAVE);
4209 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, SORR_SAVE);
4210 }
4211
4212 /* Emit code to restore call-saved registers. */
4213
4214 static void
4215 emit_restore_regs (void)
4216 {
4217 HOST_WIDE_INT offset;
4218 rtx base;
4219
4220 offset = frame_base_offset - apparent_fsize;
4221
4222 if (offset < -4096 || offset + num_gfregs * 4 > 4096 - 8 /*double*/)
4223 {
4224 base = gen_rtx_REG (Pmode, 1);
4225 emit_move_insn (base, GEN_INT (offset));
4226 emit_insn (gen_rtx_SET (VOIDmode,
4227 base,
4228 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4229 offset = 0;
4230 }
4231 else
4232 base = frame_base_reg;
4233
4234 offset = save_or_restore_regs (0, 8, base, offset, SORR_RESTORE);
4235 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, SORR_RESTORE);
4236 }
4237
4238 /* Emit an increment for the stack pointer. */
4239
4240 static void
4241 emit_stack_pointer_increment (rtx increment)
4242 {
4243 if (TARGET_ARCH64)
4244 emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, increment));
4245 else
4246 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, increment));
4247 }
4248
4249 /* Emit a decrement for the stack pointer. */
4250
4251 static void
4252 emit_stack_pointer_decrement (rtx decrement)
4253 {
4254 if (TARGET_ARCH64)
4255 emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, decrement));
4256 else
4257 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, decrement));
4258 }
4259
4260 /* Expand the function prologue. The prologue is responsible for reserving
4261 storage for the frame, saving the call-saved registers and loading the
4262 PIC register if needed. */
4263
4264 void
4265 sparc_expand_prologue (void)
4266 {
4267 int leaf_function_p = current_function_uses_only_leaf_regs;
4268
4269 /* Need to use actual_fsize, since we are also allocating
4270 space for our callee (and our own register save area). */
4271 actual_fsize = sparc_compute_frame_size (get_frame_size(), leaf_function_p);
4272
4273 if (leaf_function_p)
4274 {
4275 frame_base_reg = stack_pointer_rtx;
4276 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4277 }
4278 else
4279 {
4280 frame_base_reg = hard_frame_pointer_rtx;
4281 frame_base_offset = SPARC_STACK_BIAS;
4282 }
4283
4284 if (actual_fsize == 0)
4285 /* do nothing. */ ;
4286 else if (leaf_function_p)
4287 {
4288 if (actual_fsize <= 4096)
4289 emit_stack_pointer_increment (GEN_INT (- actual_fsize));
4290 else if (actual_fsize <= 8192)
4291 {
4292 emit_stack_pointer_increment (GEN_INT (-4096));
4293 emit_stack_pointer_increment (GEN_INT (4096 - actual_fsize));
4294 }
4295 else
4296 {
4297 rtx reg = gen_rtx_REG (Pmode, 1);
4298 emit_move_insn (reg, GEN_INT (-actual_fsize));
4299 emit_stack_pointer_increment (reg);
4300 }
4301 }
4302 else
4303 {
4304 if (actual_fsize <= 4096)
4305 emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4306 else if (actual_fsize <= 8192)
4307 {
4308 emit_insn (gen_save_register_window (GEN_INT (-4096)));
4309 emit_stack_pointer_increment (GEN_INT (4096 - actual_fsize));
4310 }
4311 else
4312 {
4313 rtx reg = gen_rtx_REG (Pmode, 1);
4314 emit_move_insn (reg, GEN_INT (-actual_fsize));
4315 emit_insn (gen_save_register_window (reg));
4316 }
4317 }
4318
4319 /* Call-saved registers are saved just above the outgoing argument area. */
4320 if (num_gfregs)
4321 emit_save_regs ();
4322
4323 /* Load the PIC register if needed. */
4324 if (flag_pic && current_function_uses_pic_offset_table)
4325 load_pic_register ();
4326 }
4327
4328 /* This function generates the assembly code for function entry, which boils
4329 down to emitting the necessary .register directives. It also informs the
4330 DWARF-2 back-end on the layout of the frame.
4331
4332 ??? Historical cruft: "On SPARC, move-double insns between fpu and cpu need
4333 an 8-byte block of memory. If any fpu reg is used in the function, we
4334 allocate such a block here, at the bottom of the frame, just in case it's
4335 needed." Could this explain the -8 in emit_restore_regs? */
4336
4337 static void
4338 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4339 {
4340 int leaf_function_p = current_function_uses_only_leaf_regs;
4341
4342 sparc_output_scratch_registers (file);
4343
4344 if (dwarf2out_do_frame () && actual_fsize)
4345 {
4346 char *label = dwarf2out_cfi_label ();
4347
4348 /* The canonical frame address refers to the top of the frame. */
4349 dwarf2out_def_cfa (label,
4350 leaf_function_p
4351 ? STACK_POINTER_REGNUM
4352 : HARD_FRAME_POINTER_REGNUM,
4353 frame_base_offset);
4354
4355 if (! leaf_function_p)
4356 {
4357 /* Note the register window save. This tells the unwinder that
4358 it needs to restore the window registers from the previous
4359 frame's window save area at 0(cfa). */
4360 dwarf2out_window_save (label);
4361
4362 /* The return address (-8) is now in %i7. */
4363 dwarf2out_return_reg (label, 31);
4364 }
4365 }
4366 }
4367
4368 /* Expand the function epilogue, either normal or part of a sibcall.
4369 We emit all the instructions except the return or the call. */
4370
4371 void
4372 sparc_expand_epilogue (void)
4373 {
4374 int leaf_function_p = current_function_uses_only_leaf_regs;
4375
4376 if (num_gfregs)
4377 emit_restore_regs ();
4378
4379 if (actual_fsize == 0)
4380 /* do nothing. */ ;
4381 else if (leaf_function_p)
4382 {
4383 if (actual_fsize <= 4096)
4384 emit_stack_pointer_decrement (GEN_INT (- actual_fsize));
4385 else if (actual_fsize <= 8192)
4386 {
4387 emit_stack_pointer_decrement (GEN_INT (-4096));
4388 emit_stack_pointer_decrement (GEN_INT (4096 - actual_fsize));
4389 }
4390 else
4391 {
4392 rtx reg = gen_rtx_REG (Pmode, 1);
4393 emit_move_insn (reg, GEN_INT (-actual_fsize));
4394 emit_stack_pointer_decrement (reg);
4395 }
4396 }
4397 }
4398
4399 /* This function generates the assembly code for function exit. */
4400
4401 static void
4402 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4403 {
4404 /* If code does not drop into the epilogue, we have to still output
4405 a dummy nop for the sake of sane backtraces. Otherwise, if the
4406 last two instructions of a function were "call foo; dslot;" this
4407 can make the return PC of foo (ie. address of call instruction
4408 plus 8) point to the first instruction in the next function. */
4409
4410 rtx insn, last_real_insn;
4411
4412 insn = get_last_insn ();
4413
4414 last_real_insn = prev_real_insn (insn);
4415 if (last_real_insn
4416 && GET_CODE (last_real_insn) == INSN
4417 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4418 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4419
4420 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4421 fputs("\tnop\n", file);
4422
4423 sparc_output_deferred_case_vectors ();
4424 }
4425
4426 /* Output a 'restore' instruction. */
4427
4428 static void
4429 output_restore (rtx pat)
4430 {
4431 rtx operands[3];
4432
4433 if (! pat)
4434 {
4435 fputs ("\t restore\n", asm_out_file);
4436 return;
4437 }
4438
4439 if (GET_CODE (pat) != SET)
4440 abort ();
4441
4442 operands[0] = SET_DEST (pat);
4443 pat = SET_SRC (pat);
4444
4445 switch (GET_CODE (pat))
4446 {
4447 case PLUS:
4448 operands[1] = XEXP (pat, 0);
4449 operands[2] = XEXP (pat, 1);
4450 output_asm_insn (" restore %r1, %2, %Y0", operands);
4451 break;
4452 case LO_SUM:
4453 operands[1] = XEXP (pat, 0);
4454 operands[2] = XEXP (pat, 1);
4455 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4456 break;
4457 case ASHIFT:
4458 operands[1] = XEXP (pat, 0);
4459 if (XEXP (pat, 1) != const1_rtx)
4460 abort();
4461 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4462 break;
4463 default:
4464 operands[1] = pat;
4465 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4466 break;
4467 }
4468 }
4469
4470 /* Output a return. */
4471
4472 const char *
4473 output_return (rtx insn)
4474 {
4475 int leaf_function_p = current_function_uses_only_leaf_regs;
4476 bool delay_slot_filled_p = dbr_sequence_length () > 0;
4477 /* True if the caller has placed an "unimp" insn immediately after the call.
4478 This insn is used in the 32-bit ABI when calling a function that returns
4479 a non zero-sized structure. The 64-bit ABI doesn't have it. Be careful
4480 to have this test be the same as that used on the call. */
4481 bool sparc_skip_caller_unimp
4482 = ! TARGET_ARCH64
4483 && current_function_returns_struct
4484 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
4485 == INTEGER_CST)
4486 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl)));
4487
4488 if (leaf_function_p)
4489 {
4490 /* This is a leaf function so we don't have to bother restoring the
4491 register window, which frees us from dealing with the convoluted
4492 semantics of restore/return. We simply output the jump to the
4493 return address and the insn in the delay slot, which usually is
4494 the substraction restoring the stack pointer %sp. */
4495
4496 if (current_function_calls_eh_return)
4497 abort ();
4498
4499 fprintf (asm_out_file, "\tjmp\t%%o7+%d\n", sparc_skip_caller_unimp ? 12 : 8);
4500
4501 if (delay_slot_filled_p)
4502 {
4503 rtx delay = NEXT_INSN (insn);
4504 if (! delay)
4505 abort ();
4506
4507 final_scan_insn (delay, asm_out_file, 1, 0, 1, NULL);
4508 PATTERN (delay) = gen_blockage ();
4509 INSN_CODE (delay) = -1;
4510 }
4511 else
4512 fputs ("\t nop\n", asm_out_file);
4513 }
4514 else
4515 {
4516 /* This is a regular function so we have to restore the register window.
4517 We may have a pending insn for the delay slot, which will be either
4518 combined with the 'restore' instruction or put in the delay slot of
4519 the 'return' instruction. */
4520
4521 if (current_function_calls_eh_return)
4522 {
4523 /* If the function uses __builtin_eh_return, the eh_return
4524 machinery occupies the delay slot. */
4525 if (delay_slot_filled_p || sparc_skip_caller_unimp)
4526 abort ();
4527
4528 if (! flag_delayed_branch)
4529 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4530
4531 if (TARGET_V9)
4532 fputs ("\treturn\t%i7+8\n", asm_out_file);
4533 else
4534 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4535
4536 if (flag_delayed_branch)
4537 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4538 else
4539 fputs ("\t nop\n", asm_out_file);
4540 }
4541 else if (delay_slot_filled_p)
4542 {
4543 rtx delay, pat;
4544
4545 delay = NEXT_INSN (insn);
4546 if (! delay)
4547 abort ();
4548
4549 pat = PATTERN (delay);
4550
4551 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4552 {
4553 epilogue_renumber (&pat, 0);
4554 fprintf (asm_out_file, "\treturn\t%%i7+%d\n",
4555 sparc_skip_caller_unimp ? 12 : 8);
4556 final_scan_insn (delay, asm_out_file, 1, 0, 1, NULL);
4557 }
4558 else
4559 {
4560 fprintf (asm_out_file, "\tjmp\t%%i7+%d\n",
4561 sparc_skip_caller_unimp ? 12 : 8);
4562 output_restore (pat);
4563 }
4564
4565 PATTERN (delay) = gen_blockage ();
4566 INSN_CODE (delay) = -1;
4567 }
4568 else
4569 {
4570 /* The delay slot is empty. */
4571 if (TARGET_V9)
4572 fprintf (asm_out_file, "\treturn\t%%i7+%d\n\t nop\n",
4573 sparc_skip_caller_unimp ? 12 : 8);
4574 else if (flag_delayed_branch)
4575 fprintf (asm_out_file, "\tjmp\t%%i7+%d\n\t restore\n",
4576 sparc_skip_caller_unimp ? 12 : 8);
4577 else
4578 fprintf (asm_out_file, "\trestore\n\tjmp\t%%o7+%d\n\t nop\n",
4579 sparc_skip_caller_unimp ? 12 : 8);
4580 }
4581 }
4582
4583 return "";
4584 }
4585
4586 /* Output a sibling call. */
4587
4588 const char *
4589 output_sibcall (rtx insn, rtx call_operand)
4590 {
4591 int leaf_function_p = current_function_uses_only_leaf_regs;
4592 bool delay_slot_filled_p = dbr_sequence_length () > 0;
4593 rtx operands[1];
4594
4595 if (! flag_delayed_branch)
4596 abort();
4597
4598 operands[0] = call_operand;
4599
4600 if (leaf_function_p)
4601 {
4602 /* This is a leaf function so we don't have to bother restoring the
4603 register window. We simply output the jump to the function and
4604 the insn in the delay slot (if any). */
4605
4606 if (LEAF_SIBCALL_SLOT_RESERVED_P && delay_slot_filled_p)
4607 abort();
4608
4609 if (delay_slot_filled_p)
4610 {
4611 rtx delay = NEXT_INSN (insn);
4612 if (! delay)
4613 abort ();
4614
4615 output_asm_insn ("sethi\t%%hi(%a0), %%g1", operands);
4616 output_asm_insn ("jmp\t%%g1 + %%lo(%a0)", operands);
4617 final_scan_insn (delay, asm_out_file, 1, 0, 1, NULL);
4618
4619 PATTERN (delay) = gen_blockage ();
4620 INSN_CODE (delay) = -1;
4621 }
4622 else
4623 {
4624 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4625 it into branch if possible. */
4626 output_asm_insn ("or\t%%o7, %%g0, %%g1", operands);
4627 output_asm_insn ("call\t%a0, 0", operands);
4628 output_asm_insn (" or\t%%g1, %%g0, %%o7", operands);
4629 }
4630 }
4631 else
4632 {
4633 /* This is a regular function so we have to restore the register window.
4634 We may have a pending insn for the delay slot, which will be combined
4635 with the 'restore' instruction. */
4636
4637 output_asm_insn ("call\t%a0, 0", operands);
4638
4639 if (delay_slot_filled_p)
4640 {
4641 rtx delay = NEXT_INSN (insn);
4642 if (! delay)
4643 abort ();
4644
4645 output_restore (PATTERN (delay));
4646
4647 PATTERN (delay) = gen_blockage ();
4648 INSN_CODE (delay) = -1;
4649 }
4650 else
4651 output_restore (NULL_RTX);
4652 }
4653
4654 return "";
4655 }
4656 \f
4657 /* Functions for handling argument passing.
4658
4659 For 32-bit, the first 6 args are normally in registers and the rest are
4660 pushed. Any arg that starts within the first 6 words is at least
4661 partially passed in a register unless its data type forbids.
4662
4663 For 64-bit, the argument registers are laid out as an array of 16 elements
4664 and arguments are added sequentially. The first 6 int args and up to the
4665 first 16 fp args (depending on size) are passed in regs.
4666
4667 Slot Stack Integral Float Float in structure Double Long Double
4668 ---- ----- -------- ----- ------------------ ------ -----------
4669 15 [SP+248] %f31 %f30,%f31 %d30
4670 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4671 13 [SP+232] %f27 %f26,%f27 %d26
4672 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4673 11 [SP+216] %f23 %f22,%f23 %d22
4674 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4675 9 [SP+200] %f19 %f18,%f19 %d18
4676 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4677 7 [SP+184] %f15 %f14,%f15 %d14
4678 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4679 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4680 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4681 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4682 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4683 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4684 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4685
4686 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4687
4688 Integral arguments are always passed as 64-bit quantities appropriately
4689 extended.
4690
4691 Passing of floating point values is handled as follows.
4692 If a prototype is in scope:
4693 If the value is in a named argument (i.e. not a stdarg function or a
4694 value not part of the `...') then the value is passed in the appropriate
4695 fp reg.
4696 If the value is part of the `...' and is passed in one of the first 6
4697 slots then the value is passed in the appropriate int reg.
4698 If the value is part of the `...' and is not passed in one of the first 6
4699 slots then the value is passed in memory.
4700 If a prototype is not in scope:
4701 If the value is one of the first 6 arguments the value is passed in the
4702 appropriate integer reg and the appropriate fp reg.
4703 If the value is not one of the first 6 arguments the value is passed in
4704 the appropriate fp reg and in memory.
4705
4706
4707 Summary of the calling conventions implemented by GCC on SPARC:
4708
4709 32-bit ABI:
4710 size argument return value
4711
4712 small integer <4 int. reg. int. reg.
4713 word 4 int. reg. int. reg.
4714 double word 8 int. reg. int. reg.
4715
4716 _Complex small integer <8 int. reg. int. reg.
4717 _Complex word 8 int. reg. int. reg.
4718 _Complex double word 16 memory int. reg.
4719
4720 vector integer <=8 int. reg. FP reg.
4721 vector integer >8 memory memory
4722
4723 float 4 int. reg. FP reg.
4724 double 8 int. reg. FP reg.
4725 long double 16 memory memory
4726
4727 _Complex float 8 memory FP reg.
4728 _Complex double 16 memory FP reg.
4729 _Complex long double 32 memory FP reg.
4730
4731 vector float <=32 memory FP reg.
4732 vector float >32 memory memory
4733
4734 aggregate any memory memory
4735
4736
4737
4738 64-bit ABI:
4739 size argument return value
4740
4741 small integer <8 int. reg. int. reg.
4742 word 8 int. reg. int. reg.
4743 double word 16 int. reg. int. reg.
4744
4745 _Complex small integer <16 int. reg. int. reg.
4746 _Complex word 16 int. reg. int. reg.
4747 _Complex double word 32 memory int. reg.
4748
4749 vector integer <=16 FP reg. FP reg.
4750 vector integer 16<s<=32 memory FP reg.
4751 vector integer >32 memory memory
4752
4753 float 4 FP reg. FP reg.
4754 double 8 FP reg. FP reg.
4755 long double 16 FP reg. FP reg.
4756
4757 _Complex float 8 FP reg. FP reg.
4758 _Complex double 16 FP reg. FP reg.
4759 _Complex long double 32 memory FP reg.
4760
4761 vector float <=16 FP reg. FP reg.
4762 vector float 16<s<=32 memory FP reg.
4763 vector float >32 memory memory
4764
4765 aggregate <=16 reg. reg.
4766 aggregate 16<s<=32 memory reg.
4767 aggregate >32 memory memory
4768
4769
4770
4771 Note #1: complex floating-point types follow the extended SPARC ABIs as
4772 implemented by the Sun compiler.
4773
4774 Note #2: integral vector types follow the scalar floating-point types
4775 conventions to match what is implemented by the Sun VIS SDK.
4776
4777 Note #3: floating-point vector types follow the complex floating-point
4778 types conventions. */
4779
4780
4781 /* Maximum number of int regs for args. */
4782 #define SPARC_INT_ARG_MAX 6
4783 /* Maximum number of fp regs for args. */
4784 #define SPARC_FP_ARG_MAX 16
4785
4786 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4787
4788 /* Handle the INIT_CUMULATIVE_ARGS macro.
4789 Initialize a variable CUM of type CUMULATIVE_ARGS
4790 for a call to a function whose data type is FNTYPE.
4791 For a library call, FNTYPE is 0. */
4792
4793 void
4794 init_cumulative_args (struct sparc_args *cum, tree fntype,
4795 rtx libname ATTRIBUTE_UNUSED,
4796 tree fndecl ATTRIBUTE_UNUSED)
4797 {
4798 cum->words = 0;
4799 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4800 cum->libcall_p = fntype == 0;
4801 }
4802
4803 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4804 When a prototype says `char' or `short', really pass an `int'. */
4805
4806 static bool
4807 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4808 {
4809 return TARGET_ARCH32 ? true : false;
4810 }
4811
4812 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4813
4814 static bool
4815 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4816 {
4817 return TARGET_ARCH64 ? true : false;
4818 }
4819
4820 /* Scan the record type TYPE and return the following predicates:
4821 - INTREGS_P: the record contains at least one field or sub-field
4822 that is eligible for promotion in integer registers.
4823 - FP_REGS_P: the record contains at least one field or sub-field
4824 that is eligible for promotion in floating-point registers.
4825 - PACKED_P: the record contains at least one field that is packed.
4826
4827 Sub-fields are not taken into account for the PACKED_P predicate. */
4828
4829 static void
4830 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4831 {
4832 tree field;
4833
4834 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4835 {
4836 if (TREE_CODE (field) == FIELD_DECL)
4837 {
4838 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4839 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4840 else if (FLOAT_TYPE_P (TREE_TYPE (field)) && TARGET_FPU)
4841 *fpregs_p = 1;
4842 else
4843 *intregs_p = 1;
4844
4845 if (packed_p && DECL_PACKED (field))
4846 *packed_p = 1;
4847 }
4848 }
4849 }
4850
4851 /* Compute the slot number to pass an argument in.
4852 Return the slot number or -1 if passing on the stack.
4853
4854 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4855 the preceding args and about the function being called.
4856 MODE is the argument's machine mode.
4857 TYPE is the data type of the argument (as a tree).
4858 This is null for libcalls where that information may
4859 not be available.
4860 NAMED is nonzero if this argument is a named parameter
4861 (otherwise it is an extra parameter matching an ellipsis).
4862 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4863 *PREGNO records the register number to use if scalar type.
4864 *PPADDING records the amount of padding needed in words. */
4865
4866 static int
4867 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4868 tree type, int named, int incoming_p,
4869 int *pregno, int *ppadding)
4870 {
4871 int regbase = (incoming_p
4872 ? SPARC_INCOMING_INT_ARG_FIRST
4873 : SPARC_OUTGOING_INT_ARG_FIRST);
4874 int slotno = cum->words;
4875 int regno;
4876
4877 *ppadding = 0;
4878
4879 if (type && TREE_ADDRESSABLE (type))
4880 return -1;
4881
4882 if (TARGET_ARCH32
4883 && mode == BLKmode
4884 && type
4885 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4886 return -1;
4887
4888 /* For SPARC64, objects requiring 16-byte alignment get it. */
4889 if (TARGET_ARCH64
4890 && GET_MODE_ALIGNMENT (mode) >= 2 * BITS_PER_WORD
4891 && (slotno & 1) != 0)
4892 slotno++, *ppadding = 1;
4893
4894 switch (GET_MODE_CLASS (mode))
4895 {
4896 case MODE_FLOAT:
4897 case MODE_COMPLEX_FLOAT:
4898 case MODE_VECTOR_INT:
4899 case MODE_VECTOR_FLOAT:
4900 if (TARGET_ARCH64 && TARGET_FPU && named)
4901 {
4902 if (slotno >= SPARC_FP_ARG_MAX)
4903 return -1;
4904 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4905 /* Arguments filling only one single FP register are
4906 right-justified in the outer double FP register. */
4907 if (GET_MODE_SIZE (mode) <= 4)
4908 regno++;
4909 break;
4910 }
4911 /* fallthrough */
4912
4913 case MODE_INT:
4914 case MODE_COMPLEX_INT:
4915 if (slotno >= SPARC_INT_ARG_MAX)
4916 return -1;
4917 regno = regbase + slotno;
4918 break;
4919
4920 case MODE_RANDOM:
4921 if (mode == VOIDmode)
4922 /* MODE is VOIDmode when generating the actual call. */
4923 return -1;
4924
4925 if (mode != BLKmode)
4926 abort ();
4927
4928 /* For SPARC64, objects requiring 16-byte alignment get it. */
4929 if (TARGET_ARCH64
4930 && type
4931 && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
4932 && (slotno & 1) != 0)
4933 slotno++, *ppadding = 1;
4934
4935 if (TARGET_ARCH32 || (type && TREE_CODE (type) == UNION_TYPE))
4936 {
4937 if (slotno >= SPARC_INT_ARG_MAX)
4938 return -1;
4939 regno = regbase + slotno;
4940 }
4941 else /* TARGET_ARCH64 && type && TREE_CODE (type) == RECORD_TYPE */
4942 {
4943 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4944
4945 /* First see what kinds of registers we would need. */
4946 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4947
4948 /* The ABI obviously doesn't specify how packed structures
4949 are passed. These are defined to be passed in int regs
4950 if possible, otherwise memory. */
4951 if (packed_p || !named)
4952 fpregs_p = 0, intregs_p = 1;
4953
4954 /* If all arg slots are filled, then must pass on stack. */
4955 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4956 return -1;
4957
4958 /* If there are only int args and all int arg slots are filled,
4959 then must pass on stack. */
4960 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4961 return -1;
4962
4963 /* Note that even if all int arg slots are filled, fp members may
4964 still be passed in regs if such regs are available.
4965 *PREGNO isn't set because there may be more than one, it's up
4966 to the caller to compute them. */
4967 return slotno;
4968 }
4969 break;
4970
4971 default :
4972 abort ();
4973 }
4974
4975 *pregno = regno;
4976 return slotno;
4977 }
4978
4979 /* Handle recursive register counting for structure field layout. */
4980
4981 struct function_arg_record_value_parms
4982 {
4983 rtx ret; /* return expression being built. */
4984 int slotno; /* slot number of the argument. */
4985 int named; /* whether the argument is named. */
4986 int regbase; /* regno of the base register. */
4987 int stack; /* 1 if part of the argument is on the stack. */
4988 int intoffset; /* offset of the first pending integer field. */
4989 unsigned int nregs; /* number of words passed in registers. */
4990 };
4991
4992 static void function_arg_record_value_3
4993 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4994 static void function_arg_record_value_2
4995 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4996 static void function_arg_record_value_1
4997 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4998 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4999 static rtx function_arg_union_value (int, enum machine_mode, int);
5000
5001 /* A subroutine of function_arg_record_value. Traverse the structure
5002 recursively and determine how many registers will be required. */
5003
5004 static void
5005 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
5006 struct function_arg_record_value_parms *parms,
5007 bool packed_p)
5008 {
5009 tree field;
5010
5011 /* We need to compute how many registers are needed so we can
5012 allocate the PARALLEL but before we can do that we need to know
5013 whether there are any packed fields. The ABI obviously doesn't
5014 specify how structures are passed in this case, so they are
5015 defined to be passed in int regs if possible, otherwise memory,
5016 regardless of whether there are fp values present. */
5017
5018 if (! packed_p)
5019 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5020 {
5021 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5022 {
5023 packed_p = true;
5024 break;
5025 }
5026 }
5027
5028 /* Compute how many registers we need. */
5029 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5030 {
5031 if (TREE_CODE (field) == FIELD_DECL)
5032 {
5033 HOST_WIDE_INT bitpos = startbitpos;
5034
5035 if (DECL_SIZE (field) != 0
5036 && host_integerp (bit_position (field), 1))
5037 bitpos += int_bit_position (field);
5038
5039 /* ??? FIXME: else assume zero offset. */
5040
5041 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5042 function_arg_record_value_1 (TREE_TYPE (field),
5043 bitpos,
5044 parms,
5045 packed_p);
5046 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5047 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5048 && TARGET_FPU
5049 && parms->named
5050 && ! packed_p)
5051 {
5052 if (parms->intoffset != -1)
5053 {
5054 unsigned int startbit, endbit;
5055 int intslots, this_slotno;
5056
5057 startbit = parms->intoffset & -BITS_PER_WORD;
5058 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5059
5060 intslots = (endbit - startbit) / BITS_PER_WORD;
5061 this_slotno = parms->slotno + parms->intoffset
5062 / BITS_PER_WORD;
5063
5064 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5065 {
5066 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5067 /* We need to pass this field on the stack. */
5068 parms->stack = 1;
5069 }
5070
5071 parms->nregs += intslots;
5072 parms->intoffset = -1;
5073 }
5074
5075 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5076 If it wasn't true we wouldn't be here. */
5077 parms->nregs += 1;
5078 if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5079 parms->nregs += 1;
5080 }
5081 else
5082 {
5083 if (parms->intoffset == -1)
5084 parms->intoffset = bitpos;
5085 }
5086 }
5087 }
5088 }
5089
5090 /* A subroutine of function_arg_record_value. Assign the bits of the
5091 structure between parms->intoffset and bitpos to integer registers. */
5092
5093 static void
5094 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5095 struct function_arg_record_value_parms *parms)
5096 {
5097 enum machine_mode mode;
5098 unsigned int regno;
5099 unsigned int startbit, endbit;
5100 int this_slotno, intslots, intoffset;
5101 rtx reg;
5102
5103 if (parms->intoffset == -1)
5104 return;
5105
5106 intoffset = parms->intoffset;
5107 parms->intoffset = -1;
5108
5109 startbit = intoffset & -BITS_PER_WORD;
5110 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5111 intslots = (endbit - startbit) / BITS_PER_WORD;
5112 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5113
5114 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5115 if (intslots <= 0)
5116 return;
5117
5118 /* If this is the trailing part of a word, only load that much into
5119 the register. Otherwise load the whole register. Note that in
5120 the latter case we may pick up unwanted bits. It's not a problem
5121 at the moment but may wish to revisit. */
5122
5123 if (intoffset % BITS_PER_WORD != 0)
5124 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5125 MODE_INT, 0);
5126 else
5127 mode = word_mode;
5128
5129 intoffset /= BITS_PER_UNIT;
5130 do
5131 {
5132 regno = parms->regbase + this_slotno;
5133 reg = gen_rtx_REG (mode, regno);
5134 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5135 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5136
5137 this_slotno += 1;
5138 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5139 mode = word_mode;
5140 parms->nregs += 1;
5141 intslots -= 1;
5142 }
5143 while (intslots > 0);
5144 }
5145
5146 /* A subroutine of function_arg_record_value. Traverse the structure
5147 recursively and assign bits to floating point registers. Track which
5148 bits in between need integer registers; invoke function_arg_record_value_3
5149 to make that happen. */
5150
5151 static void
5152 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
5153 struct function_arg_record_value_parms *parms,
5154 bool packed_p)
5155 {
5156 tree field;
5157
5158 if (! packed_p)
5159 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5160 {
5161 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5162 {
5163 packed_p = true;
5164 break;
5165 }
5166 }
5167
5168 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5169 {
5170 if (TREE_CODE (field) == FIELD_DECL)
5171 {
5172 HOST_WIDE_INT bitpos = startbitpos;
5173
5174 if (DECL_SIZE (field) != 0
5175 && host_integerp (bit_position (field), 1))
5176 bitpos += int_bit_position (field);
5177
5178 /* ??? FIXME: else assume zero offset. */
5179
5180 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5181 function_arg_record_value_2 (TREE_TYPE (field),
5182 bitpos,
5183 parms,
5184 packed_p);
5185 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5186 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5187 && TARGET_FPU
5188 && parms->named
5189 && ! packed_p)
5190 {
5191 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5192 int regno;
5193 enum machine_mode mode = DECL_MODE (field);
5194 rtx reg;
5195
5196 function_arg_record_value_3 (bitpos, parms);
5197 switch (mode)
5198 {
5199 case SCmode: mode = SFmode; break;
5200 case DCmode: mode = DFmode; break;
5201 case TCmode: mode = TFmode; break;
5202 default: break;
5203 }
5204 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5205 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5206 regno++;
5207 reg = gen_rtx_REG (mode, regno);
5208 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5209 = gen_rtx_EXPR_LIST (VOIDmode, reg,
5210 GEN_INT (bitpos / BITS_PER_UNIT));
5211 parms->nregs += 1;
5212 if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5213 {
5214 regno += GET_MODE_SIZE (mode) / 4;
5215 reg = gen_rtx_REG (mode, regno);
5216 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5217 = gen_rtx_EXPR_LIST (VOIDmode, reg,
5218 GEN_INT ((bitpos + GET_MODE_BITSIZE (mode))
5219 / BITS_PER_UNIT));
5220 parms->nregs += 1;
5221 }
5222 }
5223 else
5224 {
5225 if (parms->intoffset == -1)
5226 parms->intoffset = bitpos;
5227 }
5228 }
5229 }
5230 }
5231
5232 /* Used by function_arg and function_value to implement the complex
5233 conventions of the 64-bit ABI for passing and returning structures.
5234 Return an expression valid as a return value for the two macros
5235 FUNCTION_ARG and FUNCTION_VALUE.
5236
5237 TYPE is the data type of the argument (as a tree).
5238 This is null for libcalls where that information may
5239 not be available.
5240 MODE is the argument's machine mode.
5241 SLOTNO is the index number of the argument's slot in the parameter array.
5242 NAMED is nonzero if this argument is a named parameter
5243 (otherwise it is an extra parameter matching an ellipsis).
5244 REGBASE is the regno of the base register for the parameter array. */
5245
5246 static rtx
5247 function_arg_record_value (tree type, enum machine_mode mode,
5248 int slotno, int named, int regbase)
5249 {
5250 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5251 struct function_arg_record_value_parms parms;
5252 unsigned int nregs;
5253
5254 parms.ret = NULL_RTX;
5255 parms.slotno = slotno;
5256 parms.named = named;
5257 parms.regbase = regbase;
5258 parms.stack = 0;
5259
5260 /* Compute how many registers we need. */
5261 parms.nregs = 0;
5262 parms.intoffset = 0;
5263 function_arg_record_value_1 (type, 0, &parms, false);
5264
5265 /* Take into account pending integer fields. */
5266 if (parms.intoffset != -1)
5267 {
5268 unsigned int startbit, endbit;
5269 int intslots, this_slotno;
5270
5271 startbit = parms.intoffset & -BITS_PER_WORD;
5272 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5273 intslots = (endbit - startbit) / BITS_PER_WORD;
5274 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5275
5276 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5277 {
5278 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5279 /* We need to pass this field on the stack. */
5280 parms.stack = 1;
5281 }
5282
5283 parms.nregs += intslots;
5284 }
5285 nregs = parms.nregs;
5286
5287 /* Allocate the vector and handle some annoying special cases. */
5288 if (nregs == 0)
5289 {
5290 /* ??? Empty structure has no value? Duh? */
5291 if (typesize <= 0)
5292 {
5293 /* Though there's nothing really to store, return a word register
5294 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5295 leads to breakage due to the fact that there are zero bytes to
5296 load. */
5297 return gen_rtx_REG (mode, regbase);
5298 }
5299 else
5300 {
5301 /* ??? C++ has structures with no fields, and yet a size. Give up
5302 for now and pass everything back in integer registers. */
5303 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5304 }
5305 if (nregs + slotno > SPARC_INT_ARG_MAX)
5306 nregs = SPARC_INT_ARG_MAX - slotno;
5307 }
5308 if (nregs == 0)
5309 abort ();
5310
5311 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5312
5313 /* If at least one field must be passed on the stack, generate
5314 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5315 also be passed on the stack. We can't do much better because the
5316 semantics of FUNCTION_ARG_PARTIAL_NREGS doesn't handle the case
5317 of structures for which the fields passed exclusively in registers
5318 are not at the beginning of the structure. */
5319 if (parms.stack)
5320 XVECEXP (parms.ret, 0, 0)
5321 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5322
5323 /* Fill in the entries. */
5324 parms.nregs = 0;
5325 parms.intoffset = 0;
5326 function_arg_record_value_2 (type, 0, &parms, false);
5327 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5328
5329 if (parms.nregs != nregs)
5330 abort ();
5331
5332 return parms.ret;
5333 }
5334
5335 /* Used by function_arg and function_value to implement the conventions
5336 of the 64-bit ABI for passing and returning unions.
5337 Return an expression valid as a return value for the two macros
5338 FUNCTION_ARG and FUNCTION_VALUE.
5339
5340 SIZE is the size in bytes of the union.
5341 MODE is the argument's machine mode.
5342 REGNO is the hard register the union will be passed in. */
5343
5344 static rtx
5345 function_arg_union_value (int size, enum machine_mode mode, int regno)
5346 {
5347 int nwords = ROUND_ADVANCE (size), i;
5348 rtx regs;
5349
5350 /* Unions are passed left-justified. */
5351 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5352
5353 for (i = 0; i < nwords; i++)
5354 XVECEXP (regs, 0, i)
5355 = gen_rtx_EXPR_LIST (VOIDmode,
5356 gen_rtx_REG (word_mode, regno + i),
5357 GEN_INT (UNITS_PER_WORD * i));
5358
5359 return regs;
5360 }
5361
5362 /* Handle the FUNCTION_ARG macro.
5363 Determine where to put an argument to a function.
5364 Value is zero to push the argument on the stack,
5365 or a hard register in which to store the argument.
5366
5367 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5368 the preceding args and about the function being called.
5369 MODE is the argument's machine mode.
5370 TYPE is the data type of the argument (as a tree).
5371 This is null for libcalls where that information may
5372 not be available.
5373 NAMED is nonzero if this argument is a named parameter
5374 (otherwise it is an extra parameter matching an ellipsis).
5375 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5376
5377 rtx
5378 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5379 tree type, int named, int incoming_p)
5380 {
5381 int regbase = (incoming_p
5382 ? SPARC_INCOMING_INT_ARG_FIRST
5383 : SPARC_OUTGOING_INT_ARG_FIRST);
5384 int slotno, regno, padding;
5385 rtx reg;
5386
5387 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5388 &regno, &padding);
5389
5390 if (slotno == -1)
5391 return 0;
5392
5393 if (TARGET_ARCH32)
5394 {
5395 reg = gen_rtx_REG (mode, regno);
5396 return reg;
5397 }
5398
5399 if (type && TREE_CODE (type) == RECORD_TYPE)
5400 {
5401 /* Structures up to 16 bytes in size are passed in arg slots on the
5402 stack and are promoted to registers where possible. */
5403
5404 if (int_size_in_bytes (type) > 16)
5405 abort (); /* shouldn't get here */
5406
5407 return function_arg_record_value (type, mode, slotno, named, regbase);
5408 }
5409 else if (type && TREE_CODE (type) == UNION_TYPE)
5410 {
5411 HOST_WIDE_INT size = int_size_in_bytes (type);
5412
5413 if (size > 16)
5414 abort (); /* shouldn't get here */
5415
5416 return function_arg_union_value (size, mode, regno);
5417 }
5418 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5419 but also have the slot allocated for them.
5420 If no prototype is in scope fp values in register slots get passed
5421 in two places, either fp regs and int regs or fp regs and memory. */
5422 else if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5423 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5424 || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
5425 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
5426 && SPARC_FP_REG_P (regno))
5427 {
5428 reg = gen_rtx_REG (mode, regno);
5429 if (cum->prototype_p || cum->libcall_p)
5430 {
5431 /* "* 2" because fp reg numbers are recorded in 4 byte
5432 quantities. */
5433 #if 0
5434 /* ??? This will cause the value to be passed in the fp reg and
5435 in the stack. When a prototype exists we want to pass the
5436 value in the reg but reserve space on the stack. That's an
5437 optimization, and is deferred [for a bit]. */
5438 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5439 return gen_rtx_PARALLEL (mode,
5440 gen_rtvec (2,
5441 gen_rtx_EXPR_LIST (VOIDmode,
5442 NULL_RTX, const0_rtx),
5443 gen_rtx_EXPR_LIST (VOIDmode,
5444 reg, const0_rtx)));
5445 else
5446 #else
5447 /* ??? It seems that passing back a register even when past
5448 the area declared by REG_PARM_STACK_SPACE will allocate
5449 space appropriately, and will not copy the data onto the
5450 stack, exactly as we desire.
5451
5452 This is due to locate_and_pad_parm being called in
5453 expand_call whenever reg_parm_stack_space > 0, which
5454 while beneficial to our example here, would seem to be
5455 in error from what had been intended. Ho hum... -- r~ */
5456 #endif
5457 return reg;
5458 }
5459 else
5460 {
5461 rtx v0, v1;
5462
5463 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5464 {
5465 int intreg;
5466
5467 /* On incoming, we don't need to know that the value
5468 is passed in %f0 and %i0, and it confuses other parts
5469 causing needless spillage even on the simplest cases. */
5470 if (incoming_p)
5471 return reg;
5472
5473 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5474 + (regno - SPARC_FP_ARG_FIRST) / 2);
5475
5476 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5477 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5478 const0_rtx);
5479 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5480 }
5481 else
5482 {
5483 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5484 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5485 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5486 }
5487 }
5488 }
5489 else
5490 {
5491 /* Scalar or complex int. */
5492 reg = gen_rtx_REG (mode, regno);
5493 }
5494
5495 return reg;
5496 }
5497
5498 /* Handle the FUNCTION_ARG_PARTIAL_NREGS macro.
5499 For an arg passed partly in registers and partly in memory,
5500 this is the number of registers used.
5501 For args passed entirely in registers or entirely in memory, zero.
5502
5503 Any arg that starts in the first 6 regs but won't entirely fit in them
5504 needs partial registers on v8. On v9, structures with integer
5505 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5506 values that begin in the last fp reg [where "last fp reg" varies with the
5507 mode] will be split between that reg and memory. */
5508
5509 int
5510 function_arg_partial_nregs (const struct sparc_args *cum,
5511 enum machine_mode mode, tree type, int named)
5512 {
5513 int slotno, regno, padding;
5514
5515 /* We pass 0 for incoming_p here, it doesn't matter. */
5516 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5517
5518 if (slotno == -1)
5519 return 0;
5520
5521 if (TARGET_ARCH32)
5522 {
5523 if ((slotno + (mode == BLKmode
5524 ? ROUND_ADVANCE (int_size_in_bytes (type))
5525 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5526 > SPARC_INT_ARG_MAX)
5527 return SPARC_INT_ARG_MAX - slotno;
5528 }
5529 else
5530 {
5531 /* We are guaranteed by function_arg_pass_by_reference that the size
5532 of the argument is not greater than 16 bytes, so we only need to
5533 return 1 if the argument is partially passed in registers. */
5534
5535 if (type && AGGREGATE_TYPE_P (type))
5536 {
5537 int size = int_size_in_bytes (type);
5538
5539 if (size > UNITS_PER_WORD
5540 && slotno == SPARC_INT_ARG_MAX - 1)
5541 return 1;
5542 }
5543 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5544 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5545 && ! (TARGET_FPU && named)))
5546 {
5547 /* The complex types are passed as packed types. */
5548 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5549 && slotno == SPARC_INT_ARG_MAX - 1)
5550 return 1;
5551 }
5552 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5553 {
5554 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5555 > SPARC_FP_ARG_MAX)
5556 return 1;
5557 }
5558 }
5559
5560 return 0;
5561 }
5562
5563 /* Handle the FUNCTION_ARG_PASS_BY_REFERENCE macro.
5564 !v9: The SPARC ABI stipulates passing struct arguments (of any size) and
5565 quad-precision floats by invisible reference.
5566 v9: Aggregates greater than 16 bytes are passed by reference.
5567 For Pascal, also pass arrays by reference. */
5568
5569 int
5570 function_arg_pass_by_reference (const struct sparc_args *cum ATTRIBUTE_UNUSED,
5571 enum machine_mode mode, tree type,
5572 int named ATTRIBUTE_UNUSED)
5573 {
5574 if (TARGET_ARCH32)
5575 {
5576 return ((type && AGGREGATE_TYPE_P (type))
5577 /* Extended ABI (as implemented by the Sun compiler) says
5578 that all complex floats are passed in memory. */
5579 || mode == SCmode
5580 /* Enforce the 2-word cap for passing arguments in registers.
5581 This affects CDImode, TFmode, DCmode, TCmode and large
5582 vector modes. */
5583 || GET_MODE_SIZE (mode) > 8);
5584 }
5585 else
5586 {
5587 return ((type && TREE_CODE (type) == ARRAY_TYPE)
5588 || (type
5589 && AGGREGATE_TYPE_P (type)
5590 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5591 /* Enforce the 2-word cap for passing arguments in registers.
5592 This affects CTImode, TCmode and large vector modes. */
5593 || GET_MODE_SIZE (mode) > 16);
5594 }
5595 }
5596
5597 /* Handle the FUNCTION_ARG_ADVANCE macro.
5598 Update the data in CUM to advance over an argument
5599 of mode MODE and data type TYPE.
5600 TYPE is null for libcalls where that information may not be available. */
5601
5602 void
5603 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5604 tree type, int named)
5605 {
5606 int slotno, regno, padding;
5607
5608 /* We pass 0 for incoming_p here, it doesn't matter. */
5609 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5610
5611 /* If register required leading padding, add it. */
5612 if (slotno != -1)
5613 cum->words += padding;
5614
5615 if (TARGET_ARCH32)
5616 {
5617 cum->words += (mode != BLKmode
5618 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5619 : ROUND_ADVANCE (int_size_in_bytes (type)));
5620 }
5621 else
5622 {
5623 if (type && AGGREGATE_TYPE_P (type))
5624 {
5625 int size = int_size_in_bytes (type);
5626
5627 if (size <= 8)
5628 ++cum->words;
5629 else if (size <= 16)
5630 cum->words += 2;
5631 else /* passed by reference */
5632 ++cum->words;
5633 }
5634 else
5635 {
5636 cum->words += (mode != BLKmode
5637 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5638 : ROUND_ADVANCE (int_size_in_bytes (type)));
5639 }
5640 }
5641 }
5642
5643 /* Handle the FUNCTION_ARG_PADDING macro.
5644 For the 64 bit ABI structs are always stored left shifted in their
5645 argument slot. */
5646
5647 enum direction
5648 function_arg_padding (enum machine_mode mode, tree type)
5649 {
5650 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5651 return upward;
5652
5653 /* Fall back to the default. */
5654 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5655 }
5656
5657 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5658 Specify whether to return the return value in memory. */
5659
5660 static bool
5661 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5662 {
5663 if (TARGET_ARCH32)
5664 /* Original SPARC 32-bit ABI says that quad-precision floats
5665 and all structures are returned in memory. Extended ABI
5666 (as implemented by the Sun compiler) says that all complex
5667 floats are returned in registers (8 FP registers at most
5668 for '_Complex long double'). Return all complex integers
5669 in registers (4 at most for '_Complex long long'). */
5670 return (TYPE_MODE (type) == BLKmode
5671 || TYPE_MODE (type) == TFmode
5672 /* Integral vector types follow the scalar FP types conventions. */
5673 || (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_VECTOR_INT
5674 && GET_MODE_SIZE (TYPE_MODE (type)) > 8)
5675 /* FP vector types follow the complex FP types conventions. */
5676 || (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_VECTOR_FLOAT
5677 && GET_MODE_SIZE (TYPE_MODE (type)) > 32));
5678 else
5679 /* Original SPARC 64-bit ABI says that structures and unions
5680 smaller than 32 bytes are returned in registers. Extended
5681 ABI (as implemented by the Sun compiler) says that all complex
5682 floats are returned in registers (8 FP registers at most
5683 for '_Complex long double'). Return all complex integers
5684 in registers (4 at most for '_Complex TItype'). */
5685 return ((TYPE_MODE (type) == BLKmode
5686 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32)
5687 || GET_MODE_SIZE (TYPE_MODE (type)) > 32);
5688 }
5689
5690 /* Handle the TARGET_STRUCT_VALUE target hook.
5691 Return where to find the structure return value address. */
5692
5693 static rtx
5694 sparc_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, int incoming)
5695 {
5696 if (TARGET_ARCH64)
5697 return 0;
5698 else
5699 {
5700 if (incoming)
5701 return gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5702 STRUCT_VALUE_OFFSET));
5703 else
5704 return gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5705 STRUCT_VALUE_OFFSET));
5706 }
5707 }
5708
5709 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5710 For v9, function return values are subject to the same rules as arguments,
5711 except that up to 32 bytes may be returned in registers. */
5712
5713 rtx
5714 function_value (tree type, enum machine_mode mode, int incoming_p)
5715 {
5716 /* Beware that the two values are swapped here wrt function_arg. */
5717 int regbase = (incoming_p
5718 ? SPARC_OUTGOING_INT_ARG_FIRST
5719 : SPARC_INCOMING_INT_ARG_FIRST);
5720 int regno;
5721
5722 if (TARGET_ARCH64 && type)
5723 {
5724 if (TREE_CODE (type) == RECORD_TYPE)
5725 {
5726 /* Structures up to 32 bytes in size are passed in registers,
5727 promoted to fp registers where possible. */
5728
5729 if (int_size_in_bytes (type) > 32)
5730 abort (); /* shouldn't get here */
5731
5732 return function_arg_record_value (type, mode, 0, 1, regbase);
5733 }
5734 else if (TREE_CODE (type) == UNION_TYPE)
5735 {
5736 HOST_WIDE_INT size = int_size_in_bytes (type);
5737
5738 if (size > 32)
5739 abort (); /* shouldn't get here */
5740
5741 return function_arg_union_value (size, mode, regbase);
5742 }
5743 else if (AGGREGATE_TYPE_P (type))
5744 {
5745 /* All other aggregate types are passed in an integer register
5746 in a mode corresponding to the size of the type. */
5747 HOST_WIDE_INT bytes = int_size_in_bytes (type);
5748
5749 if (bytes > 32)
5750 abort (); /* shouldn't get here */
5751
5752 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
5753
5754 /* ??? We probably should have made the same ABI change in
5755 3.4.0 as the one we made for unions. The latter was
5756 required by the SCD though, while the former is not
5757 specified, so we favored compatibility and efficiency.
5758
5759 Now we're stuck for aggregates larger than 16 bytes,
5760 because OImode vanished in the meantime. Let's not
5761 try to be unduly clever, and simply follow the ABI
5762 for unions in that case. */
5763 if (mode == BLKmode)
5764 return function_arg_union_value (bytes, mode, regbase);
5765 }
5766 else if (GET_MODE_CLASS (mode) == MODE_INT
5767 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5768 mode = word_mode;
5769 }
5770
5771 if (TARGET_FPU && (FLOAT_MODE_P (mode) || VECTOR_MODE_P (mode)))
5772 regno = SPARC_FP_ARG_FIRST;
5773 else
5774 regno = regbase;
5775
5776 return gen_rtx_REG (mode, regno);
5777 }
5778
5779 /* Do what is necessary for `va_start'. We look at the current function
5780 to determine if stdarg or varargs is used and return the address of
5781 the first unnamed parameter. */
5782
5783 static rtx
5784 sparc_builtin_saveregs (void)
5785 {
5786 int first_reg = current_function_args_info.words;
5787 rtx address;
5788 int regno;
5789
5790 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5791 emit_move_insn (gen_rtx_MEM (word_mode,
5792 gen_rtx_PLUS (Pmode,
5793 frame_pointer_rtx,
5794 GEN_INT (FIRST_PARM_OFFSET (0)
5795 + (UNITS_PER_WORD
5796 * regno)))),
5797 gen_rtx_REG (word_mode,
5798 SPARC_INCOMING_INT_ARG_FIRST + regno));
5799
5800 address = gen_rtx_PLUS (Pmode,
5801 frame_pointer_rtx,
5802 GEN_INT (FIRST_PARM_OFFSET (0)
5803 + UNITS_PER_WORD * first_reg));
5804
5805 return address;
5806 }
5807
5808 /* Implement `va_start' for stdarg. */
5809
5810 void
5811 sparc_va_start (tree valist, rtx nextarg)
5812 {
5813 nextarg = expand_builtin_saveregs ();
5814 std_expand_builtin_va_start (valist, nextarg);
5815 }
5816
5817 /* Implement `va_arg' for stdarg. */
5818
5819 static tree
5820 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5821 {
5822 HOST_WIDE_INT size, rsize, align;
5823 tree addr, incr;
5824 bool indirect;
5825 tree ptrtype = build_pointer_type (type);
5826
5827 if (function_arg_pass_by_reference (0, TYPE_MODE (type), type, 0))
5828 {
5829 indirect = true;
5830 size = rsize = UNITS_PER_WORD;
5831 align = 0;
5832 }
5833 else
5834 {
5835 indirect = false;
5836 size = int_size_in_bytes (type);
5837 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5838 align = 0;
5839
5840 if (TARGET_ARCH64)
5841 {
5842 /* For SPARC64, objects requiring 16-byte alignment get it. */
5843 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5844 align = 2 * UNITS_PER_WORD;
5845
5846 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5847 are given whole slots as needed. */
5848 if (AGGREGATE_TYPE_P (type))
5849 {
5850 if (size == 0)
5851 size = rsize = UNITS_PER_WORD;
5852 else
5853 size = rsize;
5854 }
5855 }
5856 }
5857
5858 incr = valist;
5859 if (align)
5860 {
5861 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5862 ssize_int (align - 1)));
5863 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
5864 ssize_int (-align)));
5865 }
5866
5867 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5868 addr = incr;
5869
5870 if (BYTES_BIG_ENDIAN && size < rsize)
5871 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5872 ssize_int (rsize - size)));
5873
5874 if (indirect)
5875 {
5876 addr = fold_convert (build_pointer_type (ptrtype), addr);
5877 addr = build_fold_indirect_ref (addr);
5878 }
5879 /* If the address isn't aligned properly for the type,
5880 we may need to copy to a temporary.
5881 FIXME: This is inefficient. Usually we can do this
5882 in registers. */
5883 else if (align == 0
5884 && TYPE_ALIGN (type) > BITS_PER_WORD)
5885 {
5886 tree tmp = create_tmp_var (type, "va_arg_tmp");
5887 tree dest_addr = build_fold_addr_expr (tmp);
5888
5889 tree copy = build_function_call_expr
5890 (implicit_built_in_decls[BUILT_IN_MEMCPY],
5891 tree_cons (NULL_TREE, dest_addr,
5892 tree_cons (NULL_TREE, addr,
5893 tree_cons (NULL_TREE, size_int (rsize),
5894 NULL_TREE))));
5895
5896 gimplify_and_add (copy, pre_p);
5897 addr = dest_addr;
5898 }
5899 else
5900 addr = fold_convert (ptrtype, addr);
5901
5902 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
5903 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
5904 gimplify_and_add (incr, post_p);
5905
5906 return build_fold_indirect_ref (addr);
5907 }
5908 \f
5909 /* Return the string to output a conditional branch to LABEL, which is
5910 the operand number of the label. OP is the conditional expression.
5911 XEXP (OP, 0) is assumed to be a condition code register (integer or
5912 floating point) and its mode specifies what kind of comparison we made.
5913
5914 REVERSED is nonzero if we should reverse the sense of the comparison.
5915
5916 ANNUL is nonzero if we should generate an annulling branch.
5917
5918 NOOP is nonzero if we have to follow this branch by a noop.
5919
5920 INSN, if set, is the insn. */
5921
5922 const char *
5923 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5924 int noop, rtx insn)
5925 {
5926 static char string[64];
5927 enum rtx_code code = GET_CODE (op);
5928 rtx cc_reg = XEXP (op, 0);
5929 enum machine_mode mode = GET_MODE (cc_reg);
5930 const char *labelno, *branch;
5931 int spaces = 8, far;
5932 char *p;
5933
5934 /* v9 branches are limited to +-1MB. If it is too far away,
5935 change
5936
5937 bne,pt %xcc, .LC30
5938
5939 to
5940
5941 be,pn %xcc, .+12
5942 nop
5943 ba .LC30
5944
5945 and
5946
5947 fbne,a,pn %fcc2, .LC29
5948
5949 to
5950
5951 fbe,pt %fcc2, .+16
5952 nop
5953 ba .LC29 */
5954
5955 far = get_attr_length (insn) >= 3;
5956 if (reversed ^ far)
5957 {
5958 /* Reversal of FP compares takes care -- an ordered compare
5959 becomes an unordered compare and vice versa. */
5960 if (mode == CCFPmode || mode == CCFPEmode)
5961 code = reverse_condition_maybe_unordered (code);
5962 else
5963 code = reverse_condition (code);
5964 }
5965
5966 /* Start by writing the branch condition. */
5967 if (mode == CCFPmode || mode == CCFPEmode)
5968 {
5969 switch (code)
5970 {
5971 case NE:
5972 branch = "fbne";
5973 break;
5974 case EQ:
5975 branch = "fbe";
5976 break;
5977 case GE:
5978 branch = "fbge";
5979 break;
5980 case GT:
5981 branch = "fbg";
5982 break;
5983 case LE:
5984 branch = "fble";
5985 break;
5986 case LT:
5987 branch = "fbl";
5988 break;
5989 case UNORDERED:
5990 branch = "fbu";
5991 break;
5992 case ORDERED:
5993 branch = "fbo";
5994 break;
5995 case UNGT:
5996 branch = "fbug";
5997 break;
5998 case UNLT:
5999 branch = "fbul";
6000 break;
6001 case UNEQ:
6002 branch = "fbue";
6003 break;
6004 case UNGE:
6005 branch = "fbuge";
6006 break;
6007 case UNLE:
6008 branch = "fbule";
6009 break;
6010 case LTGT:
6011 branch = "fblg";
6012 break;
6013
6014 default:
6015 abort ();
6016 }
6017
6018 /* ??? !v9: FP branches cannot be preceded by another floating point
6019 insn. Because there is currently no concept of pre-delay slots,
6020 we can fix this only by always emitting a nop before a floating
6021 point branch. */
6022
6023 string[0] = '\0';
6024 if (! TARGET_V9)
6025 strcpy (string, "nop\n\t");
6026 strcat (string, branch);
6027 }
6028 else
6029 {
6030 switch (code)
6031 {
6032 case NE:
6033 branch = "bne";
6034 break;
6035 case EQ:
6036 branch = "be";
6037 break;
6038 case GE:
6039 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6040 branch = "bpos";
6041 else
6042 branch = "bge";
6043 break;
6044 case GT:
6045 branch = "bg";
6046 break;
6047 case LE:
6048 branch = "ble";
6049 break;
6050 case LT:
6051 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6052 branch = "bneg";
6053 else
6054 branch = "bl";
6055 break;
6056 case GEU:
6057 branch = "bgeu";
6058 break;
6059 case GTU:
6060 branch = "bgu";
6061 break;
6062 case LEU:
6063 branch = "bleu";
6064 break;
6065 case LTU:
6066 branch = "blu";
6067 break;
6068
6069 default:
6070 abort ();
6071 }
6072 strcpy (string, branch);
6073 }
6074 spaces -= strlen (branch);
6075 p = strchr (string, '\0');
6076
6077 /* Now add the annulling, the label, and a possible noop. */
6078 if (annul && ! far)
6079 {
6080 strcpy (p, ",a");
6081 p += 2;
6082 spaces -= 2;
6083 }
6084
6085 if (! TARGET_V9)
6086 labelno = "";
6087 else
6088 {
6089 rtx note;
6090 int v8 = 0;
6091
6092 if (! far && insn && INSN_ADDRESSES_SET_P ())
6093 {
6094 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6095 - INSN_ADDRESSES (INSN_UID (insn)));
6096 /* Leave some instructions for "slop". */
6097 if (delta < -260000 || delta >= 260000)
6098 v8 = 1;
6099 }
6100
6101 if (mode == CCFPmode || mode == CCFPEmode)
6102 {
6103 static char v9_fcc_labelno[] = "%%fccX, ";
6104 /* Set the char indicating the number of the fcc reg to use. */
6105 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6106 labelno = v9_fcc_labelno;
6107 if (v8)
6108 {
6109 if (REGNO (cc_reg) == SPARC_FCC_REG)
6110 labelno = "";
6111 else
6112 abort ();
6113 }
6114 }
6115 else if (mode == CCXmode || mode == CCX_NOOVmode)
6116 {
6117 labelno = "%%xcc, ";
6118 if (v8)
6119 abort ();
6120 }
6121 else
6122 {
6123 labelno = "%%icc, ";
6124 if (v8)
6125 labelno = "";
6126 }
6127
6128 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6129 {
6130 strcpy (p,
6131 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6132 ? ",pt" : ",pn");
6133 p += 3;
6134 spaces -= 3;
6135 }
6136 }
6137 if (spaces > 0)
6138 *p++ = '\t';
6139 else
6140 *p++ = ' ';
6141 strcpy (p, labelno);
6142 p = strchr (p, '\0');
6143 if (far)
6144 {
6145 strcpy (p, ".+12\n\t nop\n\tb\t");
6146 if (annul || noop)
6147 p[3] = '6';
6148 p += 14;
6149 }
6150 *p++ = '%';
6151 *p++ = 'l';
6152 /* Set the char indicating the number of the operand containing the
6153 label_ref. */
6154 *p++ = label + '0';
6155 *p = '\0';
6156 if (noop)
6157 strcpy (p, "\n\t nop");
6158
6159 return string;
6160 }
6161
6162 /* Emit a library call comparison between floating point X and Y.
6163 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6164 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6165 values as arguments instead of the TFmode registers themselves,
6166 that's why we cannot call emit_float_lib_cmp. */
6167 void
6168 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6169 {
6170 const char *qpfunc;
6171 rtx slot0, slot1, result, tem, tem2;
6172 enum machine_mode mode;
6173
6174 switch (comparison)
6175 {
6176 case EQ:
6177 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6178 break;
6179
6180 case NE:
6181 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6182 break;
6183
6184 case GT:
6185 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6186 break;
6187
6188 case GE:
6189 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6190 break;
6191
6192 case LT:
6193 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6194 break;
6195
6196 case LE:
6197 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6198 break;
6199
6200 case ORDERED:
6201 case UNORDERED:
6202 case UNGT:
6203 case UNLT:
6204 case UNEQ:
6205 case UNGE:
6206 case UNLE:
6207 case LTGT:
6208 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6209 break;
6210
6211 default:
6212 abort();
6213 break;
6214 }
6215
6216 if (TARGET_ARCH64)
6217 {
6218 if (GET_CODE (x) != MEM)
6219 {
6220 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6221 emit_insn (gen_rtx_SET (VOIDmode, slot0, x));
6222 }
6223 else
6224 slot0 = x;
6225
6226 if (GET_CODE (y) != MEM)
6227 {
6228 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6229 emit_insn (gen_rtx_SET (VOIDmode, slot1, y));
6230 }
6231 else
6232 slot1 = y;
6233
6234 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6235 DImode, 2,
6236 XEXP (slot0, 0), Pmode,
6237 XEXP (slot1, 0), Pmode);
6238
6239 mode = DImode;
6240 }
6241 else
6242 {
6243 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6244 SImode, 2,
6245 x, TFmode, y, TFmode);
6246
6247 mode = SImode;
6248 }
6249
6250
6251 /* Immediately move the result of the libcall into a pseudo
6252 register so reload doesn't clobber the value if it needs
6253 the return register for a spill reg. */
6254 result = gen_reg_rtx (mode);
6255 emit_move_insn (result, hard_libcall_value (mode));
6256
6257 switch (comparison)
6258 {
6259 default:
6260 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6261 break;
6262 case ORDERED:
6263 case UNORDERED:
6264 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6265 NULL_RTX, mode, 0);
6266 break;
6267 case UNGT:
6268 case UNGE:
6269 emit_cmp_insn (result, const1_rtx,
6270 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6271 break;
6272 case UNLE:
6273 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6274 break;
6275 case UNLT:
6276 tem = gen_reg_rtx (mode);
6277 if (TARGET_ARCH32)
6278 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6279 else
6280 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6281 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6282 break;
6283 case UNEQ:
6284 case LTGT:
6285 tem = gen_reg_rtx (mode);
6286 if (TARGET_ARCH32)
6287 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6288 else
6289 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6290 tem2 = gen_reg_rtx (mode);
6291 if (TARGET_ARCH32)
6292 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6293 else
6294 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6295 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6296 NULL_RTX, mode, 0);
6297 break;
6298 }
6299 }
6300
6301 /* Generate an unsigned DImode to FP conversion. This is the same code
6302 optabs would emit if we didn't have TFmode patterns. */
6303
6304 void
6305 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6306 {
6307 rtx neglab, donelab, i0, i1, f0, in, out;
6308
6309 out = operands[0];
6310 in = force_reg (DImode, operands[1]);
6311 neglab = gen_label_rtx ();
6312 donelab = gen_label_rtx ();
6313 i0 = gen_reg_rtx (DImode);
6314 i1 = gen_reg_rtx (DImode);
6315 f0 = gen_reg_rtx (mode);
6316
6317 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6318
6319 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6320 emit_jump_insn (gen_jump (donelab));
6321 emit_barrier ();
6322
6323 emit_label (neglab);
6324
6325 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6326 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6327 emit_insn (gen_iordi3 (i0, i0, i1));
6328 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6329 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6330
6331 emit_label (donelab);
6332 }
6333
6334 /* Generate an FP to unsigned DImode conversion. This is the same code
6335 optabs would emit if we didn't have TFmode patterns. */
6336
6337 void
6338 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6339 {
6340 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6341
6342 out = operands[0];
6343 in = force_reg (mode, operands[1]);
6344 neglab = gen_label_rtx ();
6345 donelab = gen_label_rtx ();
6346 i0 = gen_reg_rtx (DImode);
6347 i1 = gen_reg_rtx (DImode);
6348 limit = gen_reg_rtx (mode);
6349 f0 = gen_reg_rtx (mode);
6350
6351 emit_move_insn (limit,
6352 CONST_DOUBLE_FROM_REAL_VALUE (
6353 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6354 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6355
6356 emit_insn (gen_rtx_SET (VOIDmode,
6357 out,
6358 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6359 emit_jump_insn (gen_jump (donelab));
6360 emit_barrier ();
6361
6362 emit_label (neglab);
6363
6364 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6365 emit_insn (gen_rtx_SET (VOIDmode,
6366 i0,
6367 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6368 emit_insn (gen_movdi (i1, const1_rtx));
6369 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6370 emit_insn (gen_xordi3 (out, i0, i1));
6371
6372 emit_label (donelab);
6373 }
6374
6375 /* Return the string to output a conditional branch to LABEL, testing
6376 register REG. LABEL is the operand number of the label; REG is the
6377 operand number of the reg. OP is the conditional expression. The mode
6378 of REG says what kind of comparison we made.
6379
6380 REVERSED is nonzero if we should reverse the sense of the comparison.
6381
6382 ANNUL is nonzero if we should generate an annulling branch.
6383
6384 NOOP is nonzero if we have to follow this branch by a noop. */
6385
6386 const char *
6387 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6388 int annul, int noop, rtx insn)
6389 {
6390 static char string[64];
6391 enum rtx_code code = GET_CODE (op);
6392 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6393 rtx note;
6394 int far;
6395 char *p;
6396
6397 /* branch on register are limited to +-128KB. If it is too far away,
6398 change
6399
6400 brnz,pt %g1, .LC30
6401
6402 to
6403
6404 brz,pn %g1, .+12
6405 nop
6406 ba,pt %xcc, .LC30
6407
6408 and
6409
6410 brgez,a,pn %o1, .LC29
6411
6412 to
6413
6414 brlz,pt %o1, .+16
6415 nop
6416 ba,pt %xcc, .LC29 */
6417
6418 far = get_attr_length (insn) >= 3;
6419
6420 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6421 if (reversed ^ far)
6422 code = reverse_condition (code);
6423
6424 /* Only 64 bit versions of these instructions exist. */
6425 if (mode != DImode)
6426 abort ();
6427
6428 /* Start by writing the branch condition. */
6429
6430 switch (code)
6431 {
6432 case NE:
6433 strcpy (string, "brnz");
6434 break;
6435
6436 case EQ:
6437 strcpy (string, "brz");
6438 break;
6439
6440 case GE:
6441 strcpy (string, "brgez");
6442 break;
6443
6444 case LT:
6445 strcpy (string, "brlz");
6446 break;
6447
6448 case LE:
6449 strcpy (string, "brlez");
6450 break;
6451
6452 case GT:
6453 strcpy (string, "brgz");
6454 break;
6455
6456 default:
6457 abort ();
6458 }
6459
6460 p = strchr (string, '\0');
6461
6462 /* Now add the annulling, reg, label, and nop. */
6463 if (annul && ! far)
6464 {
6465 strcpy (p, ",a");
6466 p += 2;
6467 }
6468
6469 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6470 {
6471 strcpy (p,
6472 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6473 ? ",pt" : ",pn");
6474 p += 3;
6475 }
6476
6477 *p = p < string + 8 ? '\t' : ' ';
6478 p++;
6479 *p++ = '%';
6480 *p++ = '0' + reg;
6481 *p++ = ',';
6482 *p++ = ' ';
6483 if (far)
6484 {
6485 int veryfar = 1, delta;
6486
6487 if (INSN_ADDRESSES_SET_P ())
6488 {
6489 delta = (INSN_ADDRESSES (INSN_UID (dest))
6490 - INSN_ADDRESSES (INSN_UID (insn)));
6491 /* Leave some instructions for "slop". */
6492 if (delta >= -260000 && delta < 260000)
6493 veryfar = 0;
6494 }
6495
6496 strcpy (p, ".+12\n\t nop\n\t");
6497 if (annul || noop)
6498 p[3] = '6';
6499 p += 12;
6500 if (veryfar)
6501 {
6502 strcpy (p, "b\t");
6503 p += 2;
6504 }
6505 else
6506 {
6507 strcpy (p, "ba,pt\t%%xcc, ");
6508 p += 13;
6509 }
6510 }
6511 *p++ = '%';
6512 *p++ = 'l';
6513 *p++ = '0' + label;
6514 *p = '\0';
6515
6516 if (noop)
6517 strcpy (p, "\n\t nop");
6518
6519 return string;
6520 }
6521
6522 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6523 Such instructions cannot be used in the delay slot of return insn on v9.
6524 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6525 */
6526
6527 static int
6528 epilogue_renumber (register rtx *where, int test)
6529 {
6530 register const char *fmt;
6531 register int i;
6532 register enum rtx_code code;
6533
6534 if (*where == 0)
6535 return 0;
6536
6537 code = GET_CODE (*where);
6538
6539 switch (code)
6540 {
6541 case REG:
6542 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6543 return 1;
6544 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6545 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6546 case SCRATCH:
6547 case CC0:
6548 case PC:
6549 case CONST_INT:
6550 case CONST_DOUBLE:
6551 return 0;
6552
6553 /* Do not replace the frame pointer with the stack pointer because
6554 it can cause the delayed instruction to load below the stack.
6555 This occurs when instructions like:
6556
6557 (set (reg/i:SI 24 %i0)
6558 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6559 (const_int -20 [0xffffffec])) 0))
6560
6561 are in the return delayed slot. */
6562 case PLUS:
6563 if (GET_CODE (XEXP (*where, 0)) == REG
6564 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6565 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6566 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6567 return 1;
6568 break;
6569
6570 case MEM:
6571 if (SPARC_STACK_BIAS
6572 && GET_CODE (XEXP (*where, 0)) == REG
6573 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6574 return 1;
6575 break;
6576
6577 default:
6578 break;
6579 }
6580
6581 fmt = GET_RTX_FORMAT (code);
6582
6583 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6584 {
6585 if (fmt[i] == 'E')
6586 {
6587 register int j;
6588 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6589 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6590 return 1;
6591 }
6592 else if (fmt[i] == 'e'
6593 && epilogue_renumber (&(XEXP (*where, i)), test))
6594 return 1;
6595 }
6596 return 0;
6597 }
6598 \f
6599 /* Leaf functions and non-leaf functions have different needs. */
6600
6601 static const int
6602 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6603
6604 static const int
6605 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6606
6607 static const int *const reg_alloc_orders[] = {
6608 reg_leaf_alloc_order,
6609 reg_nonleaf_alloc_order};
6610
6611 void
6612 order_regs_for_local_alloc (void)
6613 {
6614 static int last_order_nonleaf = 1;
6615
6616 if (regs_ever_live[15] != last_order_nonleaf)
6617 {
6618 last_order_nonleaf = !last_order_nonleaf;
6619 memcpy ((char *) reg_alloc_order,
6620 (const char *) reg_alloc_orders[last_order_nonleaf],
6621 FIRST_PSEUDO_REGISTER * sizeof (int));
6622 }
6623 }
6624 \f
6625 /* Return 1 if REG and MEM are legitimate enough to allow the various
6626 mem<-->reg splits to be run. */
6627
6628 int
6629 sparc_splitdi_legitimate (rtx reg, rtx mem)
6630 {
6631 /* Punt if we are here by mistake. */
6632 if (! reload_completed)
6633 abort ();
6634
6635 /* We must have an offsettable memory reference. */
6636 if (! offsettable_memref_p (mem))
6637 return 0;
6638
6639 /* If we have legitimate args for ldd/std, we do not want
6640 the split to happen. */
6641 if ((REGNO (reg) % 2) == 0
6642 && mem_min_alignment (mem, 8))
6643 return 0;
6644
6645 /* Success. */
6646 return 1;
6647 }
6648
6649 /* Return 1 if x and y are some kind of REG and they refer to
6650 different hard registers. This test is guaranteed to be
6651 run after reload. */
6652
6653 int
6654 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6655 {
6656 if (GET_CODE (x) != REG)
6657 return 0;
6658 if (GET_CODE (y) != REG)
6659 return 0;
6660 if (REGNO (x) == REGNO (y))
6661 return 0;
6662 return 1;
6663 }
6664
6665 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6666 This makes them candidates for using ldd and std insns.
6667
6668 Note reg1 and reg2 *must* be hard registers. */
6669
6670 int
6671 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6672 {
6673 /* We might have been passed a SUBREG. */
6674 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6675 return 0;
6676
6677 if (REGNO (reg1) % 2 != 0)
6678 return 0;
6679
6680 /* Integer ldd is deprecated in SPARC V9 */
6681 if (TARGET_V9 && REGNO (reg1) < 32)
6682 return 0;
6683
6684 return (REGNO (reg1) == REGNO (reg2) - 1);
6685 }
6686
6687 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6688 an ldd or std insn.
6689
6690 This can only happen when addr1 and addr2, the addresses in mem1
6691 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6692 addr1 must also be aligned on a 64-bit boundary.
6693
6694 Also iff dependent_reg_rtx is not null it should not be used to
6695 compute the address for mem1, i.e. we cannot optimize a sequence
6696 like:
6697 ld [%o0], %o0
6698 ld [%o0 + 4], %o1
6699 to
6700 ldd [%o0], %o0
6701 nor:
6702 ld [%g3 + 4], %g3
6703 ld [%g3], %g2
6704 to
6705 ldd [%g3], %g2
6706
6707 But, note that the transformation from:
6708 ld [%g2 + 4], %g3
6709 ld [%g2], %g2
6710 to
6711 ldd [%g2], %g2
6712 is perfectly fine. Thus, the peephole2 patterns always pass us
6713 the destination register of the first load, never the second one.
6714
6715 For stores we don't have a similar problem, so dependent_reg_rtx is
6716 NULL_RTX. */
6717
6718 int
6719 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6720 {
6721 rtx addr1, addr2;
6722 unsigned int reg1;
6723 HOST_WIDE_INT offset1;
6724
6725 /* The mems cannot be volatile. */
6726 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6727 return 0;
6728
6729 /* MEM1 should be aligned on a 64-bit boundary. */
6730 if (MEM_ALIGN (mem1) < 64)
6731 return 0;
6732
6733 addr1 = XEXP (mem1, 0);
6734 addr2 = XEXP (mem2, 0);
6735
6736 /* Extract a register number and offset (if used) from the first addr. */
6737 if (GET_CODE (addr1) == PLUS)
6738 {
6739 /* If not a REG, return zero. */
6740 if (GET_CODE (XEXP (addr1, 0)) != REG)
6741 return 0;
6742 else
6743 {
6744 reg1 = REGNO (XEXP (addr1, 0));
6745 /* The offset must be constant! */
6746 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6747 return 0;
6748 offset1 = INTVAL (XEXP (addr1, 1));
6749 }
6750 }
6751 else if (GET_CODE (addr1) != REG)
6752 return 0;
6753 else
6754 {
6755 reg1 = REGNO (addr1);
6756 /* This was a simple (mem (reg)) expression. Offset is 0. */
6757 offset1 = 0;
6758 }
6759
6760 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6761 if (GET_CODE (addr2) != PLUS)
6762 return 0;
6763
6764 if (GET_CODE (XEXP (addr2, 0)) != REG
6765 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6766 return 0;
6767
6768 if (reg1 != REGNO (XEXP (addr2, 0)))
6769 return 0;
6770
6771 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6772 return 0;
6773
6774 /* The first offset must be evenly divisible by 8 to ensure the
6775 address is 64 bit aligned. */
6776 if (offset1 % 8 != 0)
6777 return 0;
6778
6779 /* The offset for the second addr must be 4 more than the first addr. */
6780 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6781 return 0;
6782
6783 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6784 instructions. */
6785 return 1;
6786 }
6787
6788 /* Return 1 if reg is a pseudo, or is the first register in
6789 a hard register pair. This makes it a candidate for use in
6790 ldd and std insns. */
6791
6792 int
6793 register_ok_for_ldd (rtx reg)
6794 {
6795 /* We might have been passed a SUBREG. */
6796 if (GET_CODE (reg) != REG)
6797 return 0;
6798
6799 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6800 return (REGNO (reg) % 2 == 0);
6801 else
6802 return 1;
6803 }
6804 \f
6805 /* Print operand X (an rtx) in assembler syntax to file FILE.
6806 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6807 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6808
6809 void
6810 print_operand (FILE *file, rtx x, int code)
6811 {
6812 switch (code)
6813 {
6814 case '#':
6815 /* Output a 'nop' if there's nothing for the delay slot. */
6816 if (dbr_sequence_length () == 0)
6817 fputs ("\n\t nop", file);
6818 return;
6819 case '*':
6820 /* Output an annul flag if there's nothing for the delay slot and we
6821 are optimizing. This is always used with '(' below. */
6822 /* Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6823 this is a dbx bug. So, we only do this when optimizing. */
6824 /* On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6825 Always emit a nop in case the next instruction is a branch. */
6826 if (dbr_sequence_length () == 0
6827 && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6828 fputs (",a", file);
6829 return;
6830 case '(':
6831 /* Output a 'nop' if there's nothing for the delay slot and we are
6832 not optimizing. This is always used with '*' above. */
6833 if (dbr_sequence_length () == 0
6834 && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6835 fputs ("\n\t nop", file);
6836 return;
6837 case '_':
6838 /* Output the Embedded Medium/Anywhere code model base register. */
6839 fputs (EMBMEDANY_BASE_REG, file);
6840 return;
6841 case '@':
6842 /* Print out what we are using as the frame pointer. This might
6843 be %fp, or might be %sp+offset. */
6844 /* ??? What if offset is too big? Perhaps the caller knows it isn't? */
6845 fprintf (file, "%s+"HOST_WIDE_INT_PRINT_DEC,
6846 reg_names[REGNO (frame_base_reg)], frame_base_offset);
6847 return;
6848 case '&':
6849 /* Print some local dynamic TLS name. */
6850 assemble_name (file, get_some_local_dynamic_name ());
6851 return;
6852 case 'Y':
6853 /* Adjust the operand to take into account a RESTORE operation. */
6854 if (GET_CODE (x) == CONST_INT)
6855 break;
6856 else if (GET_CODE (x) != REG)
6857 output_operand_lossage ("invalid %%Y operand");
6858 else if (REGNO (x) < 8)
6859 fputs (reg_names[REGNO (x)], file);
6860 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6861 fputs (reg_names[REGNO (x)-16], file);
6862 else
6863 output_operand_lossage ("invalid %%Y operand");
6864 return;
6865 case 'L':
6866 /* Print out the low order register name of a register pair. */
6867 if (WORDS_BIG_ENDIAN)
6868 fputs (reg_names[REGNO (x)+1], file);
6869 else
6870 fputs (reg_names[REGNO (x)], file);
6871 return;
6872 case 'H':
6873 /* Print out the high order register name of a register pair. */
6874 if (WORDS_BIG_ENDIAN)
6875 fputs (reg_names[REGNO (x)], file);
6876 else
6877 fputs (reg_names[REGNO (x)+1], file);
6878 return;
6879 case 'R':
6880 /* Print out the second register name of a register pair or quad.
6881 I.e., R (%o0) => %o1. */
6882 fputs (reg_names[REGNO (x)+1], file);
6883 return;
6884 case 'S':
6885 /* Print out the third register name of a register quad.
6886 I.e., S (%o0) => %o2. */
6887 fputs (reg_names[REGNO (x)+2], file);
6888 return;
6889 case 'T':
6890 /* Print out the fourth register name of a register quad.
6891 I.e., T (%o0) => %o3. */
6892 fputs (reg_names[REGNO (x)+3], file);
6893 return;
6894 case 'x':
6895 /* Print a condition code register. */
6896 if (REGNO (x) == SPARC_ICC_REG)
6897 {
6898 /* We don't handle CC[X]_NOOVmode because they're not supposed
6899 to occur here. */
6900 if (GET_MODE (x) == CCmode)
6901 fputs ("%icc", file);
6902 else if (GET_MODE (x) == CCXmode)
6903 fputs ("%xcc", file);
6904 else
6905 abort ();
6906 }
6907 else
6908 /* %fccN register */
6909 fputs (reg_names[REGNO (x)], file);
6910 return;
6911 case 'm':
6912 /* Print the operand's address only. */
6913 output_address (XEXP (x, 0));
6914 return;
6915 case 'r':
6916 /* In this case we need a register. Use %g0 if the
6917 operand is const0_rtx. */
6918 if (x == const0_rtx
6919 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6920 {
6921 fputs ("%g0", file);
6922 return;
6923 }
6924 else
6925 break;
6926
6927 case 'A':
6928 switch (GET_CODE (x))
6929 {
6930 case IOR: fputs ("or", file); break;
6931 case AND: fputs ("and", file); break;
6932 case XOR: fputs ("xor", file); break;
6933 default: output_operand_lossage ("invalid %%A operand");
6934 }
6935 return;
6936
6937 case 'B':
6938 switch (GET_CODE (x))
6939 {
6940 case IOR: fputs ("orn", file); break;
6941 case AND: fputs ("andn", file); break;
6942 case XOR: fputs ("xnor", file); break;
6943 default: output_operand_lossage ("invalid %%B operand");
6944 }
6945 return;
6946
6947 /* These are used by the conditional move instructions. */
6948 case 'c' :
6949 case 'C':
6950 {
6951 enum rtx_code rc = GET_CODE (x);
6952
6953 if (code == 'c')
6954 {
6955 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6956 if (mode == CCFPmode || mode == CCFPEmode)
6957 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6958 else
6959 rc = reverse_condition (GET_CODE (x));
6960 }
6961 switch (rc)
6962 {
6963 case NE: fputs ("ne", file); break;
6964 case EQ: fputs ("e", file); break;
6965 case GE: fputs ("ge", file); break;
6966 case GT: fputs ("g", file); break;
6967 case LE: fputs ("le", file); break;
6968 case LT: fputs ("l", file); break;
6969 case GEU: fputs ("geu", file); break;
6970 case GTU: fputs ("gu", file); break;
6971 case LEU: fputs ("leu", file); break;
6972 case LTU: fputs ("lu", file); break;
6973 case LTGT: fputs ("lg", file); break;
6974 case UNORDERED: fputs ("u", file); break;
6975 case ORDERED: fputs ("o", file); break;
6976 case UNLT: fputs ("ul", file); break;
6977 case UNLE: fputs ("ule", file); break;
6978 case UNGT: fputs ("ug", file); break;
6979 case UNGE: fputs ("uge", file); break;
6980 case UNEQ: fputs ("ue", file); break;
6981 default: output_operand_lossage (code == 'c'
6982 ? "invalid %%c operand"
6983 : "invalid %%C operand");
6984 }
6985 return;
6986 }
6987
6988 /* These are used by the movr instruction pattern. */
6989 case 'd':
6990 case 'D':
6991 {
6992 enum rtx_code rc = (code == 'd'
6993 ? reverse_condition (GET_CODE (x))
6994 : GET_CODE (x));
6995 switch (rc)
6996 {
6997 case NE: fputs ("ne", file); break;
6998 case EQ: fputs ("e", file); break;
6999 case GE: fputs ("gez", file); break;
7000 case LT: fputs ("lz", file); break;
7001 case LE: fputs ("lez", file); break;
7002 case GT: fputs ("gz", file); break;
7003 default: output_operand_lossage (code == 'd'
7004 ? "invalid %%d operand"
7005 : "invalid %%D operand");
7006 }
7007 return;
7008 }
7009
7010 case 'b':
7011 {
7012 /* Print a sign-extended character. */
7013 int i = trunc_int_for_mode (INTVAL (x), QImode);
7014 fprintf (file, "%d", i);
7015 return;
7016 }
7017
7018 case 'f':
7019 /* Operand must be a MEM; write its address. */
7020 if (GET_CODE (x) != MEM)
7021 output_operand_lossage ("invalid %%f operand");
7022 output_address (XEXP (x, 0));
7023 return;
7024
7025 case 's':
7026 {
7027 /* Print a sign-extended 32-bit value. */
7028 HOST_WIDE_INT i;
7029 if (GET_CODE(x) == CONST_INT)
7030 i = INTVAL (x);
7031 else if (GET_CODE(x) == CONST_DOUBLE)
7032 i = CONST_DOUBLE_LOW (x);
7033 else
7034 {
7035 output_operand_lossage ("invalid %%s operand");
7036 return;
7037 }
7038 i = trunc_int_for_mode (i, SImode);
7039 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7040 return;
7041 }
7042
7043 case 0:
7044 /* Do nothing special. */
7045 break;
7046
7047 default:
7048 /* Undocumented flag. */
7049 output_operand_lossage ("invalid operand output code");
7050 }
7051
7052 if (GET_CODE (x) == REG)
7053 fputs (reg_names[REGNO (x)], file);
7054 else if (GET_CODE (x) == MEM)
7055 {
7056 fputc ('[', file);
7057 /* Poor Sun assembler doesn't understand absolute addressing. */
7058 if (CONSTANT_P (XEXP (x, 0)))
7059 fputs ("%g0+", file);
7060 output_address (XEXP (x, 0));
7061 fputc (']', file);
7062 }
7063 else if (GET_CODE (x) == HIGH)
7064 {
7065 fputs ("%hi(", file);
7066 output_addr_const (file, XEXP (x, 0));
7067 fputc (')', file);
7068 }
7069 else if (GET_CODE (x) == LO_SUM)
7070 {
7071 print_operand (file, XEXP (x, 0), 0);
7072 if (TARGET_CM_MEDMID)
7073 fputs ("+%l44(", file);
7074 else
7075 fputs ("+%lo(", file);
7076 output_addr_const (file, XEXP (x, 1));
7077 fputc (')', file);
7078 }
7079 else if (GET_CODE (x) == CONST_DOUBLE
7080 && (GET_MODE (x) == VOIDmode
7081 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7082 {
7083 if (CONST_DOUBLE_HIGH (x) == 0)
7084 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7085 else if (CONST_DOUBLE_HIGH (x) == -1
7086 && CONST_DOUBLE_LOW (x) < 0)
7087 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7088 else
7089 output_operand_lossage ("long long constant not a valid immediate operand");
7090 }
7091 else if (GET_CODE (x) == CONST_DOUBLE)
7092 output_operand_lossage ("floating point constant not a valid immediate operand");
7093 else { output_addr_const (file, x); }
7094 }
7095 \f
7096 /* Target hook for assembling integer objects. The sparc version has
7097 special handling for aligned DI-mode objects. */
7098
7099 static bool
7100 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7101 {
7102 /* ??? We only output .xword's for symbols and only then in environments
7103 where the assembler can handle them. */
7104 if (aligned_p && size == 8
7105 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7106 {
7107 if (TARGET_V9)
7108 {
7109 assemble_integer_with_op ("\t.xword\t", x);
7110 return true;
7111 }
7112 else
7113 {
7114 assemble_aligned_integer (4, const0_rtx);
7115 assemble_aligned_integer (4, x);
7116 return true;
7117 }
7118 }
7119 return default_assemble_integer (x, size, aligned_p);
7120 }
7121 \f
7122 /* Return the value of a code used in the .proc pseudo-op that says
7123 what kind of result this function returns. For non-C types, we pick
7124 the closest C type. */
7125
7126 #ifndef SHORT_TYPE_SIZE
7127 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7128 #endif
7129
7130 #ifndef INT_TYPE_SIZE
7131 #define INT_TYPE_SIZE BITS_PER_WORD
7132 #endif
7133
7134 #ifndef LONG_TYPE_SIZE
7135 #define LONG_TYPE_SIZE BITS_PER_WORD
7136 #endif
7137
7138 #ifndef LONG_LONG_TYPE_SIZE
7139 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7140 #endif
7141
7142 #ifndef FLOAT_TYPE_SIZE
7143 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7144 #endif
7145
7146 #ifndef DOUBLE_TYPE_SIZE
7147 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7148 #endif
7149
7150 #ifndef LONG_DOUBLE_TYPE_SIZE
7151 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7152 #endif
7153
7154 unsigned long
7155 sparc_type_code (register tree type)
7156 {
7157 register unsigned long qualifiers = 0;
7158 register unsigned shift;
7159
7160 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7161 setting more, since some assemblers will give an error for this. Also,
7162 we must be careful to avoid shifts of 32 bits or more to avoid getting
7163 unpredictable results. */
7164
7165 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7166 {
7167 switch (TREE_CODE (type))
7168 {
7169 case ERROR_MARK:
7170 return qualifiers;
7171
7172 case ARRAY_TYPE:
7173 qualifiers |= (3 << shift);
7174 break;
7175
7176 case FUNCTION_TYPE:
7177 case METHOD_TYPE:
7178 qualifiers |= (2 << shift);
7179 break;
7180
7181 case POINTER_TYPE:
7182 case REFERENCE_TYPE:
7183 case OFFSET_TYPE:
7184 qualifiers |= (1 << shift);
7185 break;
7186
7187 case RECORD_TYPE:
7188 return (qualifiers | 8);
7189
7190 case UNION_TYPE:
7191 case QUAL_UNION_TYPE:
7192 return (qualifiers | 9);
7193
7194 case ENUMERAL_TYPE:
7195 return (qualifiers | 10);
7196
7197 case VOID_TYPE:
7198 return (qualifiers | 16);
7199
7200 case INTEGER_TYPE:
7201 /* If this is a range type, consider it to be the underlying
7202 type. */
7203 if (TREE_TYPE (type) != 0)
7204 break;
7205
7206 /* Carefully distinguish all the standard types of C,
7207 without messing up if the language is not C. We do this by
7208 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7209 look at both the names and the above fields, but that's redundant.
7210 Any type whose size is between two C types will be considered
7211 to be the wider of the two types. Also, we do not have a
7212 special code to use for "long long", so anything wider than
7213 long is treated the same. Note that we can't distinguish
7214 between "int" and "long" in this code if they are the same
7215 size, but that's fine, since neither can the assembler. */
7216
7217 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7218 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7219
7220 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7221 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7222
7223 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7224 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7225
7226 else
7227 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7228
7229 case REAL_TYPE:
7230 /* If this is a range type, consider it to be the underlying
7231 type. */
7232 if (TREE_TYPE (type) != 0)
7233 break;
7234
7235 /* Carefully distinguish all the standard types of C,
7236 without messing up if the language is not C. */
7237
7238 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7239 return (qualifiers | 6);
7240
7241 else
7242 return (qualifiers | 7);
7243
7244 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7245 /* ??? We need to distinguish between double and float complex types,
7246 but I don't know how yet because I can't reach this code from
7247 existing front-ends. */
7248 return (qualifiers | 7); /* Who knows? */
7249
7250 case VECTOR_TYPE:
7251 case CHAR_TYPE: /* GNU Pascal CHAR type. Not used in C. */
7252 case BOOLEAN_TYPE: /* GNU Fortran BOOLEAN type. */
7253 case FILE_TYPE: /* GNU Pascal FILE type. */
7254 case SET_TYPE: /* GNU Pascal SET type. */
7255 case LANG_TYPE: /* ? */
7256 return qualifiers;
7257
7258 default:
7259 abort (); /* Not a type! */
7260 }
7261 }
7262
7263 return qualifiers;
7264 }
7265 \f
7266 /* Nested function support. */
7267
7268 /* Emit RTL insns to initialize the variable parts of a trampoline.
7269 FNADDR is an RTX for the address of the function's pure code.
7270 CXT is an RTX for the static chain value for the function.
7271
7272 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7273 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7274 (to store insns). This is a bit excessive. Perhaps a different
7275 mechanism would be better here.
7276
7277 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7278
7279 void
7280 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7281 {
7282 /* SPARC 32-bit trampoline:
7283
7284 sethi %hi(fn), %g1
7285 sethi %hi(static), %g2
7286 jmp %g1+%lo(fn)
7287 or %g2, %lo(static), %g2
7288
7289 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7290 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7291 */
7292
7293 emit_move_insn
7294 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7295 expand_binop (SImode, ior_optab,
7296 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7297 size_int (10), 0, 1),
7298 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7299 NULL_RTX, 1, OPTAB_DIRECT));
7300
7301 emit_move_insn
7302 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7303 expand_binop (SImode, ior_optab,
7304 expand_shift (RSHIFT_EXPR, SImode, cxt,
7305 size_int (10), 0, 1),
7306 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7307 NULL_RTX, 1, OPTAB_DIRECT));
7308
7309 emit_move_insn
7310 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7311 expand_binop (SImode, ior_optab,
7312 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7313 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7314 NULL_RTX, 1, OPTAB_DIRECT));
7315
7316 emit_move_insn
7317 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7318 expand_binop (SImode, ior_optab,
7319 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7320 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7321 NULL_RTX, 1, OPTAB_DIRECT));
7322
7323 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7324 aligned on a 16 byte boundary so one flush clears it all. */
7325 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7326 if (sparc_cpu != PROCESSOR_ULTRASPARC
7327 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7328 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7329 plus_constant (tramp, 8)))));
7330
7331 /* Call __enable_execute_stack after writing onto the stack to make sure
7332 the stack address is accessible. */
7333 #ifdef TRANSFER_FROM_TRAMPOLINE
7334 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7335 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7336 #endif
7337
7338 }
7339
7340 /* The 64-bit version is simpler because it makes more sense to load the
7341 values as "immediate" data out of the trampoline. It's also easier since
7342 we can read the PC without clobbering a register. */
7343
7344 void
7345 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7346 {
7347 /* SPARC 64-bit trampoline:
7348
7349 rd %pc, %g1
7350 ldx [%g1+24], %g5
7351 jmp %g5
7352 ldx [%g1+16], %g5
7353 +16 bytes data
7354 */
7355
7356 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7357 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7358 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7359 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7360 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7361 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7362 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7363 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7364 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7365 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7366 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7367
7368 if (sparc_cpu != PROCESSOR_ULTRASPARC
7369 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7370 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7371
7372 /* Call __enable_execute_stack after writing onto the stack to make sure
7373 the stack address is accessible. */
7374 #ifdef TRANSFER_FROM_TRAMPOLINE
7375 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7376 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7377 #endif
7378 }
7379 \f
7380 /* Adjust the cost of a scheduling dependency. Return the new cost of
7381 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7382
7383 static int
7384 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7385 {
7386 enum attr_type insn_type;
7387
7388 if (! recog_memoized (insn))
7389 return 0;
7390
7391 insn_type = get_attr_type (insn);
7392
7393 if (REG_NOTE_KIND (link) == 0)
7394 {
7395 /* Data dependency; DEP_INSN writes a register that INSN reads some
7396 cycles later. */
7397
7398 /* if a load, then the dependence must be on the memory address;
7399 add an extra "cycle". Note that the cost could be two cycles
7400 if the reg was written late in an instruction group; we ca not tell
7401 here. */
7402 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7403 return cost + 3;
7404
7405 /* Get the delay only if the address of the store is the dependence. */
7406 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7407 {
7408 rtx pat = PATTERN(insn);
7409 rtx dep_pat = PATTERN (dep_insn);
7410
7411 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7412 return cost; /* This should not happen! */
7413
7414 /* The dependency between the two instructions was on the data that
7415 is being stored. Assume that this implies that the address of the
7416 store is not dependent. */
7417 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7418 return cost;
7419
7420 return cost + 3; /* An approximation. */
7421 }
7422
7423 /* A shift instruction cannot receive its data from an instruction
7424 in the same cycle; add a one cycle penalty. */
7425 if (insn_type == TYPE_SHIFT)
7426 return cost + 3; /* Split before cascade into shift. */
7427 }
7428 else
7429 {
7430 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7431 INSN writes some cycles later. */
7432
7433 /* These are only significant for the fpu unit; writing a fp reg before
7434 the fpu has finished with it stalls the processor. */
7435
7436 /* Reusing an integer register causes no problems. */
7437 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7438 return 0;
7439 }
7440
7441 return cost;
7442 }
7443
7444 static int
7445 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7446 {
7447 enum attr_type insn_type, dep_type;
7448 rtx pat = PATTERN(insn);
7449 rtx dep_pat = PATTERN (dep_insn);
7450
7451 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7452 return cost;
7453
7454 insn_type = get_attr_type (insn);
7455 dep_type = get_attr_type (dep_insn);
7456
7457 switch (REG_NOTE_KIND (link))
7458 {
7459 case 0:
7460 /* Data dependency; DEP_INSN writes a register that INSN reads some
7461 cycles later. */
7462
7463 switch (insn_type)
7464 {
7465 case TYPE_STORE:
7466 case TYPE_FPSTORE:
7467 /* Get the delay iff the address of the store is the dependence. */
7468 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7469 return cost;
7470
7471 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7472 return cost;
7473 return cost + 3;
7474
7475 case TYPE_LOAD:
7476 case TYPE_SLOAD:
7477 case TYPE_FPLOAD:
7478 /* If a load, then the dependence must be on the memory address. If
7479 the addresses aren't equal, then it might be a false dependency */
7480 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7481 {
7482 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7483 || GET_CODE (SET_DEST (dep_pat)) != MEM
7484 || GET_CODE (SET_SRC (pat)) != MEM
7485 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7486 XEXP (SET_SRC (pat), 0)))
7487 return cost + 2;
7488
7489 return cost + 8;
7490 }
7491 break;
7492
7493 case TYPE_BRANCH:
7494 /* Compare to branch latency is 0. There is no benefit from
7495 separating compare and branch. */
7496 if (dep_type == TYPE_COMPARE)
7497 return 0;
7498 /* Floating point compare to branch latency is less than
7499 compare to conditional move. */
7500 if (dep_type == TYPE_FPCMP)
7501 return cost - 1;
7502 break;
7503 default:
7504 break;
7505 }
7506 break;
7507
7508 case REG_DEP_ANTI:
7509 /* Anti-dependencies only penalize the fpu unit. */
7510 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7511 return 0;
7512 break;
7513
7514 default:
7515 break;
7516 }
7517
7518 return cost;
7519 }
7520
7521 static int
7522 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7523 {
7524 switch (sparc_cpu)
7525 {
7526 case PROCESSOR_SUPERSPARC:
7527 cost = supersparc_adjust_cost (insn, link, dep, cost);
7528 break;
7529 case PROCESSOR_HYPERSPARC:
7530 case PROCESSOR_SPARCLITE86X:
7531 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7532 break;
7533 default:
7534 break;
7535 }
7536 return cost;
7537 }
7538
7539 static void
7540 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7541 int sched_verbose ATTRIBUTE_UNUSED,
7542 int max_ready ATTRIBUTE_UNUSED)
7543 {
7544 }
7545
7546 static int
7547 sparc_use_sched_lookahead (void)
7548 {
7549 if (sparc_cpu == PROCESSOR_ULTRASPARC
7550 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7551 return 4;
7552 if ((1 << sparc_cpu) &
7553 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7554 (1 << PROCESSOR_SPARCLITE86X)))
7555 return 3;
7556 return 0;
7557 }
7558
7559 static int
7560 sparc_issue_rate (void)
7561 {
7562 switch (sparc_cpu)
7563 {
7564 default:
7565 return 1;
7566 case PROCESSOR_V9:
7567 /* Assume V9 processors are capable of at least dual-issue. */
7568 return 2;
7569 case PROCESSOR_SUPERSPARC:
7570 return 3;
7571 case PROCESSOR_HYPERSPARC:
7572 case PROCESSOR_SPARCLITE86X:
7573 return 2;
7574 case PROCESSOR_ULTRASPARC:
7575 case PROCESSOR_ULTRASPARC3:
7576 return 4;
7577 }
7578 }
7579
7580 static int
7581 set_extends (rtx insn)
7582 {
7583 register rtx pat = PATTERN (insn);
7584
7585 switch (GET_CODE (SET_SRC (pat)))
7586 {
7587 /* Load and some shift instructions zero extend. */
7588 case MEM:
7589 case ZERO_EXTEND:
7590 /* sethi clears the high bits */
7591 case HIGH:
7592 /* LO_SUM is used with sethi. sethi cleared the high
7593 bits and the values used with lo_sum are positive */
7594 case LO_SUM:
7595 /* Store flag stores 0 or 1 */
7596 case LT: case LTU:
7597 case GT: case GTU:
7598 case LE: case LEU:
7599 case GE: case GEU:
7600 case EQ:
7601 case NE:
7602 return 1;
7603 case AND:
7604 {
7605 rtx op0 = XEXP (SET_SRC (pat), 0);
7606 rtx op1 = XEXP (SET_SRC (pat), 1);
7607 if (GET_CODE (op1) == CONST_INT)
7608 return INTVAL (op1) >= 0;
7609 if (GET_CODE (op0) != REG)
7610 return 0;
7611 if (sparc_check_64 (op0, insn) == 1)
7612 return 1;
7613 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7614 }
7615 case IOR:
7616 case XOR:
7617 {
7618 rtx op0 = XEXP (SET_SRC (pat), 0);
7619 rtx op1 = XEXP (SET_SRC (pat), 1);
7620 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7621 return 0;
7622 if (GET_CODE (op1) == CONST_INT)
7623 return INTVAL (op1) >= 0;
7624 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7625 }
7626 case LSHIFTRT:
7627 return GET_MODE (SET_SRC (pat)) == SImode;
7628 /* Positive integers leave the high bits zero. */
7629 case CONST_DOUBLE:
7630 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7631 case CONST_INT:
7632 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7633 case ASHIFTRT:
7634 case SIGN_EXTEND:
7635 return - (GET_MODE (SET_SRC (pat)) == SImode);
7636 case REG:
7637 return sparc_check_64 (SET_SRC (pat), insn);
7638 default:
7639 return 0;
7640 }
7641 }
7642
7643 /* We _ought_ to have only one kind per function, but... */
7644 static GTY(()) rtx sparc_addr_diff_list;
7645 static GTY(()) rtx sparc_addr_list;
7646
7647 void
7648 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7649 {
7650 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7651 if (diff)
7652 sparc_addr_diff_list
7653 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7654 else
7655 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7656 }
7657
7658 static void
7659 sparc_output_addr_vec (rtx vec)
7660 {
7661 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7662 int idx, vlen = XVECLEN (body, 0);
7663
7664 #ifdef ASM_OUTPUT_ADDR_VEC_START
7665 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7666 #endif
7667
7668 #ifdef ASM_OUTPUT_CASE_LABEL
7669 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7670 NEXT_INSN (lab));
7671 #else
7672 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7673 #endif
7674
7675 for (idx = 0; idx < vlen; idx++)
7676 {
7677 ASM_OUTPUT_ADDR_VEC_ELT
7678 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7679 }
7680
7681 #ifdef ASM_OUTPUT_ADDR_VEC_END
7682 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7683 #endif
7684 }
7685
7686 static void
7687 sparc_output_addr_diff_vec (rtx vec)
7688 {
7689 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7690 rtx base = XEXP (XEXP (body, 0), 0);
7691 int idx, vlen = XVECLEN (body, 1);
7692
7693 #ifdef ASM_OUTPUT_ADDR_VEC_START
7694 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7695 #endif
7696
7697 #ifdef ASM_OUTPUT_CASE_LABEL
7698 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7699 NEXT_INSN (lab));
7700 #else
7701 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7702 #endif
7703
7704 for (idx = 0; idx < vlen; idx++)
7705 {
7706 ASM_OUTPUT_ADDR_DIFF_ELT
7707 (asm_out_file,
7708 body,
7709 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7710 CODE_LABEL_NUMBER (base));
7711 }
7712
7713 #ifdef ASM_OUTPUT_ADDR_VEC_END
7714 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7715 #endif
7716 }
7717
7718 static void
7719 sparc_output_deferred_case_vectors (void)
7720 {
7721 rtx t;
7722 int align;
7723
7724 if (sparc_addr_list == NULL_RTX
7725 && sparc_addr_diff_list == NULL_RTX)
7726 return;
7727
7728 /* Align to cache line in the function's code section. */
7729 function_section (current_function_decl);
7730
7731 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7732 if (align > 0)
7733 ASM_OUTPUT_ALIGN (asm_out_file, align);
7734
7735 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7736 sparc_output_addr_vec (XEXP (t, 0));
7737 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7738 sparc_output_addr_diff_vec (XEXP (t, 0));
7739
7740 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7741 }
7742
7743 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7744 unknown. Return 1 if the high bits are zero, -1 if the register is
7745 sign extended. */
7746 int
7747 sparc_check_64 (rtx x, rtx insn)
7748 {
7749 /* If a register is set only once it is safe to ignore insns this
7750 code does not know how to handle. The loop will either recognize
7751 the single set and return the correct value or fail to recognize
7752 it and return 0. */
7753 int set_once = 0;
7754 rtx y = x;
7755
7756 if (GET_CODE (x) != REG)
7757 abort ();
7758
7759 if (GET_MODE (x) == DImode)
7760 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7761
7762 if (flag_expensive_optimizations
7763 && REG_N_SETS (REGNO (y)) == 1)
7764 set_once = 1;
7765
7766 if (insn == 0)
7767 {
7768 if (set_once)
7769 insn = get_last_insn_anywhere ();
7770 else
7771 return 0;
7772 }
7773
7774 while ((insn = PREV_INSN (insn)))
7775 {
7776 switch (GET_CODE (insn))
7777 {
7778 case JUMP_INSN:
7779 case NOTE:
7780 break;
7781 case CODE_LABEL:
7782 case CALL_INSN:
7783 default:
7784 if (! set_once)
7785 return 0;
7786 break;
7787 case INSN:
7788 {
7789 rtx pat = PATTERN (insn);
7790 if (GET_CODE (pat) != SET)
7791 return 0;
7792 if (rtx_equal_p (x, SET_DEST (pat)))
7793 return set_extends (insn);
7794 if (y && rtx_equal_p (y, SET_DEST (pat)))
7795 return set_extends (insn);
7796 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7797 return 0;
7798 }
7799 }
7800 }
7801 return 0;
7802 }
7803
7804 /* Returns assembly code to perform a DImode shift using
7805 a 64-bit global or out register on SPARC-V8+. */
7806 const char *
7807 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7808 {
7809 static char asm_code[60];
7810
7811 /* The scratch register is only required when the destination
7812 register is not a 64-bit global or out register. */
7813 if (which_alternative != 2)
7814 operands[3] = operands[0];
7815
7816 /* We can only shift by constants <= 63. */
7817 if (GET_CODE (operands[2]) == CONST_INT)
7818 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7819
7820 if (GET_CODE (operands[1]) == CONST_INT)
7821 {
7822 output_asm_insn ("mov\t%1, %3", operands);
7823 }
7824 else
7825 {
7826 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7827 if (sparc_check_64 (operands[1], insn) <= 0)
7828 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7829 output_asm_insn ("or\t%L1, %3, %3", operands);
7830 }
7831
7832 strcpy(asm_code, opcode);
7833
7834 if (which_alternative != 2)
7835 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7836 else
7837 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7838 }
7839 \f
7840 /* Output rtl to increment the profiler label LABELNO
7841 for profiling a function entry. */
7842
7843 void
7844 sparc_profile_hook (int labelno)
7845 {
7846 char buf[32];
7847 rtx lab, fun;
7848
7849 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7850 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7851 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7852
7853 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7854 }
7855 \f
7856 #ifdef OBJECT_FORMAT_ELF
7857 static void
7858 sparc_elf_asm_named_section (const char *name, unsigned int flags)
7859 {
7860 if (flags & SECTION_MERGE)
7861 {
7862 /* entsize cannot be expressed in this section attributes
7863 encoding style. */
7864 default_elf_asm_named_section (name, flags);
7865 return;
7866 }
7867
7868 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7869
7870 if (!(flags & SECTION_DEBUG))
7871 fputs (",#alloc", asm_out_file);
7872 if (flags & SECTION_WRITE)
7873 fputs (",#write", asm_out_file);
7874 if (flags & SECTION_TLS)
7875 fputs (",#tls", asm_out_file);
7876 if (flags & SECTION_CODE)
7877 fputs (",#execinstr", asm_out_file);
7878
7879 /* ??? Handle SECTION_BSS. */
7880
7881 fputc ('\n', asm_out_file);
7882 }
7883 #endif /* OBJECT_FORMAT_ELF */
7884
7885 /* We do not allow indirect calls to be optimized into sibling calls.
7886
7887 We cannot use sibling calls when delayed branches are disabled
7888 because they will likely require the call delay slot to be filled.
7889
7890 Also, on SPARC 32-bit we cannot emit a sibling call when the
7891 current function returns a structure. This is because the "unimp
7892 after call" convention would cause the callee to return to the
7893 wrong place. The generic code already disallows cases where the
7894 function being called returns a structure.
7895
7896 It may seem strange how this last case could occur. Usually there
7897 is code after the call which jumps to epilogue code which dumps the
7898 return value into the struct return area. That ought to invalidate
7899 the sibling call right? Well, in the C++ case we can end up passing
7900 the pointer to the struct return area to a constructor (which returns
7901 void) and then nothing else happens. Such a sibling call would look
7902 valid without the added check here. */
7903 static bool
7904 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7905 {
7906 return (decl
7907 && flag_delayed_branch
7908 && (TARGET_ARCH64 || ! current_function_returns_struct));
7909 }
7910 \f
7911 /* libfunc renaming. */
7912 #include "config/gofast.h"
7913
7914 static void
7915 sparc_init_libfuncs (void)
7916 {
7917 if (TARGET_ARCH32)
7918 {
7919 /* Use the subroutines that Sun's library provides for integer
7920 multiply and divide. The `*' prevents an underscore from
7921 being prepended by the compiler. .umul is a little faster
7922 than .mul. */
7923 set_optab_libfunc (smul_optab, SImode, "*.umul");
7924 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7925 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7926 set_optab_libfunc (smod_optab, SImode, "*.rem");
7927 set_optab_libfunc (umod_optab, SImode, "*.urem");
7928
7929 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7930 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7931 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7932 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7933 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7934 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7935
7936 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7937 is because with soft-float, the SFmode and DFmode sqrt
7938 instructions will be absent, and the compiler will notice and
7939 try to use the TFmode sqrt instruction for calls to the
7940 builtin function sqrt, but this fails. */
7941 if (TARGET_FPU)
7942 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7943
7944 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7945 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7946 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7947 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7948 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7949 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7950
7951 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7952 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7953 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7954 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7955
7956 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7957 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7958 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7959
7960 if (DITF_CONVERSION_LIBFUNCS)
7961 {
7962 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7963 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7964 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7965 }
7966
7967 if (SUN_CONVERSION_LIBFUNCS)
7968 {
7969 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7970 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7971 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7972 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7973 }
7974 }
7975 if (TARGET_ARCH64)
7976 {
7977 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7978 do not exist in the library. Make sure the compiler does not
7979 emit calls to them by accident. (It should always use the
7980 hardware instructions.) */
7981 set_optab_libfunc (smul_optab, SImode, 0);
7982 set_optab_libfunc (sdiv_optab, SImode, 0);
7983 set_optab_libfunc (udiv_optab, SImode, 0);
7984 set_optab_libfunc (smod_optab, SImode, 0);
7985 set_optab_libfunc (umod_optab, SImode, 0);
7986
7987 if (SUN_INTEGER_MULTIPLY_64)
7988 {
7989 set_optab_libfunc (smul_optab, DImode, "__mul64");
7990 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7991 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7992 set_optab_libfunc (smod_optab, DImode, "__rem64");
7993 set_optab_libfunc (umod_optab, DImode, "__urem64");
7994 }
7995
7996 if (SUN_CONVERSION_LIBFUNCS)
7997 {
7998 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7999 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8000 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8001 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8002 }
8003 }
8004
8005 gofast_maybe_init_libfuncs ();
8006 }
8007 \f
8008 int
8009 sparc_extra_constraint_check (rtx op, int c, int strict)
8010 {
8011 int reload_ok_mem;
8012
8013 if (TARGET_ARCH64
8014 && (c == 'T' || c == 'U'))
8015 return 0;
8016
8017 switch (c)
8018 {
8019 case 'Q':
8020 return fp_sethi_p (op);
8021
8022 case 'R':
8023 return fp_mov_p (op);
8024
8025 case 'S':
8026 return fp_high_losum_p (op);
8027
8028 case 'U':
8029 if (! strict
8030 || (GET_CODE (op) == REG
8031 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8032 || reg_renumber[REGNO (op)] >= 0)))
8033 return register_ok_for_ldd (op);
8034
8035 return 0;
8036
8037 case 'W':
8038 case 'T':
8039 break;
8040
8041 default:
8042 return 0;
8043 }
8044
8045 /* Our memory extra constraints have to emulate the
8046 behavior of 'm' and 'o' in order for reload to work
8047 correctly. */
8048 if (GET_CODE (op) == MEM)
8049 {
8050 reload_ok_mem = 0;
8051 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8052 && (! strict
8053 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8054 reload_ok_mem = 1;
8055 }
8056 else
8057 {
8058 reload_ok_mem = (reload_in_progress
8059 && GET_CODE (op) == REG
8060 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8061 && reg_renumber [REGNO (op)] < 0);
8062 }
8063
8064 return reload_ok_mem;
8065 }
8066
8067 /* ??? This duplicates information provided to the compiler by the
8068 ??? scheduler description. Some day, teach genautomata to output
8069 ??? the latencies and then CSE will just use that. */
8070
8071 static bool
8072 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8073 {
8074 switch (code)
8075 {
8076 case PLUS: case MINUS: case ABS: case NEG:
8077 case FLOAT: case UNSIGNED_FLOAT:
8078 case FIX: case UNSIGNED_FIX:
8079 case FLOAT_EXTEND: case FLOAT_TRUNCATE:
8080 if (FLOAT_MODE_P (GET_MODE (x)))
8081 {
8082 switch (sparc_cpu)
8083 {
8084 case PROCESSOR_ULTRASPARC:
8085 case PROCESSOR_ULTRASPARC3:
8086 *total = COSTS_N_INSNS (4);
8087 return true;
8088
8089 case PROCESSOR_SUPERSPARC:
8090 *total = COSTS_N_INSNS (3);
8091 return true;
8092
8093 case PROCESSOR_CYPRESS:
8094 *total = COSTS_N_INSNS (5);
8095 return true;
8096
8097 case PROCESSOR_HYPERSPARC:
8098 case PROCESSOR_SPARCLITE86X:
8099 default:
8100 *total = COSTS_N_INSNS (1);
8101 return true;
8102 }
8103 }
8104
8105 *total = COSTS_N_INSNS (1);
8106 return true;
8107
8108 case SQRT:
8109 switch (sparc_cpu)
8110 {
8111 case PROCESSOR_ULTRASPARC:
8112 if (GET_MODE (x) == SFmode)
8113 *total = COSTS_N_INSNS (13);
8114 else
8115 *total = COSTS_N_INSNS (23);
8116 return true;
8117
8118 case PROCESSOR_ULTRASPARC3:
8119 if (GET_MODE (x) == SFmode)
8120 *total = COSTS_N_INSNS (20);
8121 else
8122 *total = COSTS_N_INSNS (29);
8123 return true;
8124
8125 case PROCESSOR_SUPERSPARC:
8126 *total = COSTS_N_INSNS (12);
8127 return true;
8128
8129 case PROCESSOR_CYPRESS:
8130 *total = COSTS_N_INSNS (63);
8131 return true;
8132
8133 case PROCESSOR_HYPERSPARC:
8134 case PROCESSOR_SPARCLITE86X:
8135 *total = COSTS_N_INSNS (17);
8136 return true;
8137
8138 default:
8139 *total = COSTS_N_INSNS (30);
8140 return true;
8141 }
8142
8143 case COMPARE:
8144 if (FLOAT_MODE_P (GET_MODE (x)))
8145 {
8146 switch (sparc_cpu)
8147 {
8148 case PROCESSOR_ULTRASPARC:
8149 case PROCESSOR_ULTRASPARC3:
8150 *total = COSTS_N_INSNS (1);
8151 return true;
8152
8153 case PROCESSOR_SUPERSPARC:
8154 *total = COSTS_N_INSNS (3);
8155 return true;
8156
8157 case PROCESSOR_CYPRESS:
8158 *total = COSTS_N_INSNS (5);
8159 return true;
8160
8161 case PROCESSOR_HYPERSPARC:
8162 case PROCESSOR_SPARCLITE86X:
8163 default:
8164 *total = COSTS_N_INSNS (1);
8165 return true;
8166 }
8167 }
8168
8169 /* ??? Maybe mark integer compares as zero cost on
8170 ??? all UltraSPARC processors because the result
8171 ??? can be bypassed to a branch in the same group. */
8172
8173 *total = COSTS_N_INSNS (1);
8174 return true;
8175
8176 case MULT:
8177 if (FLOAT_MODE_P (GET_MODE (x)))
8178 {
8179 switch (sparc_cpu)
8180 {
8181 case PROCESSOR_ULTRASPARC:
8182 case PROCESSOR_ULTRASPARC3:
8183 *total = COSTS_N_INSNS (4);
8184 return true;
8185
8186 case PROCESSOR_SUPERSPARC:
8187 *total = COSTS_N_INSNS (3);
8188 return true;
8189
8190 case PROCESSOR_CYPRESS:
8191 *total = COSTS_N_INSNS (7);
8192 return true;
8193
8194 case PROCESSOR_HYPERSPARC:
8195 case PROCESSOR_SPARCLITE86X:
8196 *total = COSTS_N_INSNS (1);
8197 return true;
8198
8199 default:
8200 *total = COSTS_N_INSNS (5);
8201 return true;
8202 }
8203 }
8204
8205 /* The latency is actually variable for Ultra-I/II
8206 And if one of the inputs have a known constant
8207 value, we could calculate this precisely.
8208
8209 However, for that to be useful we would need to
8210 add some machine description changes which would
8211 make sure small constants ended up in rs1 of the
8212 multiply instruction. This is because the multiply
8213 latency is determined by the number of clear (or
8214 set if the value is negative) bits starting from
8215 the most significant bit of the first input.
8216
8217 The algorithm for computing num_cycles of a multiply
8218 on Ultra-I/II is:
8219
8220 if (rs1 < 0)
8221 highest_bit = highest_clear_bit(rs1);
8222 else
8223 highest_bit = highest_set_bit(rs1);
8224 if (num_bits < 3)
8225 highest_bit = 3;
8226 num_cycles = 4 + ((highest_bit - 3) / 2);
8227
8228 If we did that we would have to also consider register
8229 allocation issues that would result from forcing such
8230 a value into a register.
8231
8232 There are other similar tricks we could play if we
8233 knew, for example, that one input was an array index.
8234
8235 Since we do not play any such tricks currently the
8236 safest thing to do is report the worst case latency. */
8237 if (sparc_cpu == PROCESSOR_ULTRASPARC)
8238 {
8239 *total = (GET_MODE (x) == DImode
8240 ? COSTS_N_INSNS (34) : COSTS_N_INSNS (19));
8241 return true;
8242 }
8243
8244 /* Multiply latency on Ultra-III, fortunately, is constant. */
8245 if (sparc_cpu == PROCESSOR_ULTRASPARC3)
8246 {
8247 *total = COSTS_N_INSNS (6);
8248 return true;
8249 }
8250
8251 if (sparc_cpu == PROCESSOR_HYPERSPARC
8252 || sparc_cpu == PROCESSOR_SPARCLITE86X)
8253 {
8254 *total = COSTS_N_INSNS (17);
8255 return true;
8256 }
8257
8258 *total = (TARGET_HARD_MUL ? COSTS_N_INSNS (5) : COSTS_N_INSNS (25));
8259 return true;
8260
8261 case DIV:
8262 case UDIV:
8263 case MOD:
8264 case UMOD:
8265 if (FLOAT_MODE_P (GET_MODE (x)))
8266 {
8267 switch (sparc_cpu)
8268 {
8269 case PROCESSOR_ULTRASPARC:
8270 if (GET_MODE (x) == SFmode)
8271 *total = COSTS_N_INSNS (13);
8272 else
8273 *total = COSTS_N_INSNS (23);
8274 return true;
8275
8276 case PROCESSOR_ULTRASPARC3:
8277 if (GET_MODE (x) == SFmode)
8278 *total = COSTS_N_INSNS (17);
8279 else
8280 *total = COSTS_N_INSNS (20);
8281 return true;
8282
8283 case PROCESSOR_SUPERSPARC:
8284 if (GET_MODE (x) == SFmode)
8285 *total = COSTS_N_INSNS (6);
8286 else
8287 *total = COSTS_N_INSNS (9);
8288 return true;
8289
8290 case PROCESSOR_HYPERSPARC:
8291 case PROCESSOR_SPARCLITE86X:
8292 if (GET_MODE (x) == SFmode)
8293 *total = COSTS_N_INSNS (8);
8294 else
8295 *total = COSTS_N_INSNS (12);
8296 return true;
8297
8298 default:
8299 *total = COSTS_N_INSNS (7);
8300 return true;
8301 }
8302 }
8303
8304 if (sparc_cpu == PROCESSOR_ULTRASPARC)
8305 *total = (GET_MODE (x) == DImode
8306 ? COSTS_N_INSNS (68) : COSTS_N_INSNS (37));
8307 else if (sparc_cpu == PROCESSOR_ULTRASPARC3)
8308 *total = (GET_MODE (x) == DImode
8309 ? COSTS_N_INSNS (71) : COSTS_N_INSNS (40));
8310 else
8311 *total = COSTS_N_INSNS (25);
8312 return true;
8313
8314 case IF_THEN_ELSE:
8315 /* Conditional moves. */
8316 switch (sparc_cpu)
8317 {
8318 case PROCESSOR_ULTRASPARC:
8319 *total = COSTS_N_INSNS (2);
8320 return true;
8321
8322 case PROCESSOR_ULTRASPARC3:
8323 if (FLOAT_MODE_P (GET_MODE (x)))
8324 *total = COSTS_N_INSNS (3);
8325 else
8326 *total = COSTS_N_INSNS (2);
8327 return true;
8328
8329 default:
8330 *total = COSTS_N_INSNS (1);
8331 return true;
8332 }
8333
8334 case MEM:
8335 /* If outer-code is SIGN/ZERO extension we have to subtract
8336 out COSTS_N_INSNS (1) from whatever we return in determining
8337 the cost. */
8338 switch (sparc_cpu)
8339 {
8340 case PROCESSOR_ULTRASPARC:
8341 if (outer_code == ZERO_EXTEND)
8342 *total = COSTS_N_INSNS (1);
8343 else
8344 *total = COSTS_N_INSNS (2);
8345 return true;
8346
8347 case PROCESSOR_ULTRASPARC3:
8348 if (outer_code == ZERO_EXTEND)
8349 {
8350 if (GET_MODE (x) == QImode
8351 || GET_MODE (x) == HImode
8352 || outer_code == SIGN_EXTEND)
8353 *total = COSTS_N_INSNS (2);
8354 else
8355 *total = COSTS_N_INSNS (1);
8356 }
8357 else
8358 {
8359 /* This handles sign extension (3 cycles)
8360 and everything else (2 cycles). */
8361 *total = COSTS_N_INSNS (2);
8362 }
8363 return true;
8364
8365 case PROCESSOR_SUPERSPARC:
8366 if (FLOAT_MODE_P (GET_MODE (x))
8367 || outer_code == ZERO_EXTEND
8368 || outer_code == SIGN_EXTEND)
8369 *total = COSTS_N_INSNS (0);
8370 else
8371 *total = COSTS_N_INSNS (1);
8372 return true;
8373
8374 case PROCESSOR_TSC701:
8375 if (outer_code == ZERO_EXTEND
8376 || outer_code == SIGN_EXTEND)
8377 *total = COSTS_N_INSNS (2);
8378 else
8379 *total = COSTS_N_INSNS (3);
8380 return true;
8381
8382 case PROCESSOR_CYPRESS:
8383 if (outer_code == ZERO_EXTEND
8384 || outer_code == SIGN_EXTEND)
8385 *total = COSTS_N_INSNS (1);
8386 else
8387 *total = COSTS_N_INSNS (2);
8388 return true;
8389
8390 case PROCESSOR_HYPERSPARC:
8391 case PROCESSOR_SPARCLITE86X:
8392 default:
8393 if (outer_code == ZERO_EXTEND
8394 || outer_code == SIGN_EXTEND)
8395 *total = COSTS_N_INSNS (0);
8396 else
8397 *total = COSTS_N_INSNS (1);
8398 return true;
8399 }
8400
8401 case CONST_INT:
8402 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8403 {
8404 *total = 0;
8405 return true;
8406 }
8407 /* FALLTHRU */
8408
8409 case HIGH:
8410 *total = 2;
8411 return true;
8412
8413 case CONST:
8414 case LABEL_REF:
8415 case SYMBOL_REF:
8416 *total = 4;
8417 return true;
8418
8419 case CONST_DOUBLE:
8420 if (GET_MODE (x) == DImode
8421 && ((XINT (x, 3) == 0
8422 && (unsigned HOST_WIDE_INT) XINT (x, 2) < 0x1000)
8423 || (XINT (x, 3) == -1
8424 && XINT (x, 2) < 0
8425 && XINT (x, 2) >= -0x1000)))
8426 *total = 0;
8427 else
8428 *total = 8;
8429 return true;
8430
8431 default:
8432 return false;
8433 }
8434 }
8435
8436 /* Emit the sequence of insns SEQ while preserving the register REG. */
8437
8438 static void
8439 emit_and_preserve (rtx seq, rtx reg)
8440 {
8441 rtx slot = gen_rtx_MEM (word_mode,
8442 plus_constant (stack_pointer_rtx, SPARC_STACK_BIAS));
8443
8444 emit_stack_pointer_decrement (GEN_INT (UNITS_PER_WORD));
8445 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8446 emit_insn (seq);
8447 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8448 emit_stack_pointer_increment (GEN_INT (UNITS_PER_WORD));
8449 }
8450
8451 /* Output code to add DELTA to the first argument, and then jump to FUNCTION.
8452 Used for C++ multiple inheritance. */
8453
8454 static void
8455 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8456 HOST_WIDE_INT delta,
8457 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8458 tree function)
8459 {
8460 rtx this, insn, funexp, delta_rtx;
8461 unsigned int int_arg_first;
8462
8463 reload_completed = 1;
8464 epilogue_completed = 1;
8465 no_new_pseudos = 1;
8466 reset_block_changes ();
8467
8468 emit_note (NOTE_INSN_PROLOGUE_END);
8469
8470 if (flag_delayed_branch)
8471 {
8472 /* We will emit a regular sibcall below, so we need to instruct
8473 output_sibcall that we are in a leaf function. */
8474 current_function_uses_only_leaf_regs = 1;
8475
8476 /* This will cause final.c to invoke leaf_renumber_regs so we
8477 must behave as if we were in a not-yet-leafified function. */
8478 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8479 }
8480 else
8481 {
8482 /* We will emit the sibcall manually below, so we will need to
8483 manually spill non-leaf registers. */
8484 current_function_uses_only_leaf_regs = 0;
8485
8486 /* We really are in a leaf function. */
8487 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8488 }
8489
8490 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8491 returns a structure, the structure return pointer is there instead. */
8492 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8493 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8494 else
8495 this = gen_rtx_REG (Pmode, int_arg_first);
8496
8497 /* Add DELTA. When possible use a plain add, otherwise load it into
8498 a register first. */
8499 delta_rtx = GEN_INT (delta);
8500 if (!SPARC_SIMM13_P (delta))
8501 {
8502 rtx scratch = gen_rtx_REG (Pmode, 1);
8503
8504 if (input_operand (delta_rtx, GET_MODE (scratch)))
8505 emit_insn (gen_rtx_SET (VOIDmode, scratch, delta_rtx));
8506 else
8507 {
8508 if (TARGET_ARCH64)
8509 sparc_emit_set_const64 (scratch, delta_rtx);
8510 else
8511 sparc_emit_set_const32 (scratch, delta_rtx);
8512 }
8513
8514 delta_rtx = scratch;
8515 }
8516
8517 emit_insn (gen_rtx_SET (VOIDmode,
8518 this,
8519 gen_rtx_PLUS (Pmode, this, delta_rtx)));
8520
8521 /* Generate a tail call to the target function. */
8522 if (! TREE_USED (function))
8523 {
8524 assemble_external (function);
8525 TREE_USED (function) = 1;
8526 }
8527 funexp = XEXP (DECL_RTL (function), 0);
8528
8529 if (flag_delayed_branch)
8530 {
8531 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8532 insn = emit_call_insn (gen_sibcall (funexp));
8533 SIBLING_CALL_P (insn) = 1;
8534 }
8535 else
8536 {
8537 /* The hoops we have to jump through in order to generate a sibcall
8538 without using delay slots... */
8539 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
8540
8541 if (flag_pic)
8542 {
8543 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8544 start_sequence ();
8545 load_pic_register (); /* clobbers %o7 */
8546 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8547 seq = get_insns ();
8548 end_sequence ();
8549 emit_and_preserve (seq, spill_reg);
8550 }
8551 else if (TARGET_ARCH32)
8552 {
8553 emit_insn (gen_rtx_SET (VOIDmode,
8554 scratch,
8555 gen_rtx_HIGH (SImode, funexp)));
8556 emit_insn (gen_rtx_SET (VOIDmode,
8557 scratch,
8558 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8559 }
8560 else /* TARGET_ARCH64 */
8561 {
8562 switch (sparc_cmodel)
8563 {
8564 case CM_MEDLOW:
8565 case CM_MEDMID:
8566 /* The destination can serve as a temporary. */
8567 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8568 break;
8569
8570 case CM_MEDANY:
8571 case CM_EMBMEDANY:
8572 /* The destination cannot serve as a temporary. */
8573 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8574 start_sequence ();
8575 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8576 seq = get_insns ();
8577 end_sequence ();
8578 emit_and_preserve (seq, spill_reg);
8579 break;
8580
8581 default:
8582 abort();
8583 }
8584 }
8585
8586 emit_jump_insn (gen_indirect_jump (scratch));
8587 }
8588
8589 emit_barrier ();
8590
8591 /* Run just enough of rest_of_compilation to get the insns emitted.
8592 There's not really enough bulk here to make other passes such as
8593 instruction scheduling worth while. Note that use_thunk calls
8594 assemble_start_function and assemble_end_function. */
8595 insn = get_insns ();
8596 insn_locators_initialize ();
8597 shorten_branches (insn);
8598 final_start_function (insn, file, 1);
8599 final (insn, file, 1, 0);
8600 final_end_function ();
8601
8602 reload_completed = 0;
8603 epilogue_completed = 0;
8604 no_new_pseudos = 0;
8605 }
8606
8607 /* How to allocate a 'struct machine_function'. */
8608
8609 static struct machine_function *
8610 sparc_init_machine_status (void)
8611 {
8612 return ggc_alloc_cleared (sizeof (struct machine_function));
8613 }
8614
8615 /* Locate some local-dynamic symbol still in use by this function
8616 so that we can print its name in local-dynamic base patterns. */
8617
8618 static const char *
8619 get_some_local_dynamic_name (void)
8620 {
8621 rtx insn;
8622
8623 if (cfun->machine->some_ld_name)
8624 return cfun->machine->some_ld_name;
8625
8626 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8627 if (INSN_P (insn)
8628 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8629 return cfun->machine->some_ld_name;
8630
8631 abort ();
8632 }
8633
8634 static int
8635 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8636 {
8637 rtx x = *px;
8638
8639 if (x
8640 && GET_CODE (x) == SYMBOL_REF
8641 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8642 {
8643 cfun->machine->some_ld_name = XSTR (x, 0);
8644 return 1;
8645 }
8646
8647 return 0;
8648 }
8649
8650 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
8651 We need to emit DTP-relative relocations. */
8652
8653 void
8654 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8655 {
8656 switch (size)
8657 {
8658 case 4:
8659 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8660 break;
8661 case 8:
8662 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8663 break;
8664 default:
8665 abort ();
8666 }
8667 output_addr_const (file, x);
8668 fputs (")", file);
8669 }
8670
8671 #include "gt-sparc.h"
This page took 0.413318 seconds and 5 git commands to generate.