]> gcc.gnu.org Git - gcc.git/blob - gcc/config/sparc/sparc.c
configure.in: Check whether gas supports -relax.
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for Sun SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com)
5 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
6 at Cygnus Support.
7
8 This file is part of GNU CC.
9
10 GNU CC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 GNU CC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "insn-flags.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "recog.h"
41 #include "toplev.h"
42 #include "ggc.h"
43 #include "tm_p.h"
44
45 /* 1 if the caller has placed an "unimp" insn immediately after the call.
46 This is used in v8 code when calling a function that returns a structure.
47 v9 doesn't have this. Be careful to have this test be the same as that
48 used on the call. */
49
50 #define SKIP_CALLERS_UNIMP_P \
51 (!TARGET_ARCH64 && current_function_returns_struct \
52 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))) \
53 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl))) \
54 == INTEGER_CST))
55
56 /* Global variables for machine-dependent things. */
57
58 /* Size of frame. Need to know this to emit return insns from leaf procedures.
59 ACTUAL_FSIZE is set by compute_frame_size() which is called during the
60 reload pass. This is important as the value is later used in insn
61 scheduling (to see what can go in a delay slot).
62 APPARENT_FSIZE is the size of the stack less the register save area and less
63 the outgoing argument area. It is used when saving call preserved regs. */
64 static int apparent_fsize;
65 static int actual_fsize;
66
67 /* Number of live general or floating point registers needed to be saved
68 (as 4-byte quantities). This is only done if TARGET_EPILOGUE. */
69 static int num_gfregs;
70
71 /* Save the operands last given to a compare for use when we
72 generate a scc or bcc insn. */
73
74 rtx sparc_compare_op0, sparc_compare_op1;
75
76 /* We may need an epilogue if we spill too many registers.
77 If this is non-zero, then we branch here for the epilogue. */
78 static rtx leaf_label;
79
80 #ifdef LEAF_REGISTERS
81
82 /* Vector to say how input registers are mapped to output
83 registers. FRAME_POINTER_REGNUM cannot be remapped by
84 this function to eliminate it. You must use -fomit-frame-pointer
85 to get that. */
86 char leaf_reg_remap[] =
87 { 0, 1, 2, 3, 4, 5, 6, 7,
88 -1, -1, -1, -1, -1, -1, 14, -1,
89 -1, -1, -1, -1, -1, -1, -1, -1,
90 8, 9, 10, 11, 12, 13, -1, 15,
91
92 32, 33, 34, 35, 36, 37, 38, 39,
93 40, 41, 42, 43, 44, 45, 46, 47,
94 48, 49, 50, 51, 52, 53, 54, 55,
95 56, 57, 58, 59, 60, 61, 62, 63,
96 64, 65, 66, 67, 68, 69, 70, 71,
97 72, 73, 74, 75, 76, 77, 78, 79,
98 80, 81, 82, 83, 84, 85, 86, 87,
99 88, 89, 90, 91, 92, 93, 94, 95,
100 96, 97, 98, 99, 100};
101
102 /* Vector, indexed by hard register number, which contains 1
103 for a register that is allowable in a candidate for leaf
104 function treatment. */
105 char sparc_leaf_regs[] =
106 { 1, 1, 1, 1, 1, 1, 1, 1,
107 0, 0, 0, 0, 0, 0, 1, 0,
108 0, 0, 0, 0, 0, 0, 0, 0,
109 1, 1, 1, 1, 1, 1, 0, 1,
110 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1};
119
120 #endif
121
122 /* Name of where we pretend to think the frame pointer points.
123 Normally, this is "%fp", but if we are in a leaf procedure,
124 this is "%sp+something". We record "something" separately as it may be
125 too big for reg+constant addressing. */
126
127 static const char *frame_base_name;
128 static int frame_base_offset;
129
130 static rtx pic_setup_code PARAMS ((void));
131 static void sparc_init_modes PARAMS ((void));
132 static int save_regs PARAMS ((FILE *, int, int, const char *,
133 int, int, int));
134 static int restore_regs PARAMS ((FILE *, int, int, const char *, int, int));
135 static void build_big_number PARAMS ((FILE *, int, const char *));
136 static int function_arg_slotno PARAMS ((const CUMULATIVE_ARGS *,
137 enum machine_mode, tree, int, int,
138 int *, int *));
139
140 static int supersparc_adjust_cost PARAMS ((rtx, rtx, rtx, int));
141 static int hypersparc_adjust_cost PARAMS ((rtx, rtx, rtx, int));
142 static int ultrasparc_adjust_cost PARAMS ((rtx, rtx, rtx, int));
143
144 static void sparc_output_addr_vec PARAMS ((rtx));
145 static void sparc_output_addr_diff_vec PARAMS ((rtx));
146 static void sparc_output_deferred_case_vectors PARAMS ((void));
147 static void sparc_add_gc_roots PARAMS ((void));
148 static void mark_ultrasparc_pipeline_state PARAMS ((void *));
149 static int check_return_regs PARAMS ((rtx));
150 static int epilogue_renumber PARAMS ((rtx *, int));
151 static int ultra_cmove_results_ready_p PARAMS ((rtx));
152 static int ultra_fpmode_conflict_exists PARAMS ((enum machine_mode));
153 static rtx *ultra_find_type PARAMS ((int, rtx *, int));
154 static void ultra_build_types_avail PARAMS ((rtx *, int));
155 static void ultra_flush_pipeline PARAMS ((void));
156 static void ultra_rescan_pipeline_state PARAMS ((rtx *, int));
157 static int set_extends PARAMS ((rtx, rtx));
158 static void output_restore_regs PARAMS ((FILE *, int));
159 \f
160 /* Option handling. */
161
162 /* Code model option as passed by user. */
163 const char *sparc_cmodel_string;
164 /* Parsed value. */
165 enum cmodel sparc_cmodel;
166
167 char sparc_hard_reg_printed[8];
168
169 struct sparc_cpu_select sparc_select[] =
170 {
171 /* switch name, tune arch */
172 { (char *)0, "default", 1, 1 },
173 { (char *)0, "-mcpu=", 1, 1 },
174 { (char *)0, "-mtune=", 1, 0 },
175 { 0, 0, 0, 0 }
176 };
177
178 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
179 enum processor_type sparc_cpu;
180
181 /* Validate and override various options, and do some machine dependent
182 initialization. */
183
184 void
185 sparc_override_options ()
186 {
187 static struct code_model {
188 const char *name;
189 int value;
190 } cmodels[] = {
191 { "32", CM_32 },
192 { "medlow", CM_MEDLOW },
193 { "medmid", CM_MEDMID },
194 { "medany", CM_MEDANY },
195 { "embmedany", CM_EMBMEDANY },
196 { 0, 0 }
197 };
198 struct code_model *cmodel;
199 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
200 static struct cpu_default {
201 int cpu;
202 const char *name;
203 } cpu_default[] = {
204 /* There must be one entry here for each TARGET_CPU value. */
205 { TARGET_CPU_sparc, "cypress" },
206 { TARGET_CPU_sparclet, "tsc701" },
207 { TARGET_CPU_sparclite, "f930" },
208 { TARGET_CPU_v8, "v8" },
209 { TARGET_CPU_hypersparc, "hypersparc" },
210 { TARGET_CPU_sparclite86x, "sparclite86x" },
211 { TARGET_CPU_supersparc, "supersparc" },
212 { TARGET_CPU_v9, "v9" },
213 { TARGET_CPU_ultrasparc, "ultrasparc" },
214 { 0, 0 }
215 };
216 struct cpu_default *def;
217 /* Table of values for -m{cpu,tune}=. */
218 static struct cpu_table {
219 const char *name;
220 enum processor_type processor;
221 int disable;
222 int enable;
223 } cpu_table[] = {
224 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
225 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
226 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
227 /* TI TMS390Z55 supersparc */
228 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
229 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
230 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
231 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
232 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
233 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
234 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
235 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
236 MASK_SPARCLITE },
237 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
238 /* TEMIC sparclet */
239 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
240 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
241 /* TI ultrasparc I, II, IIi */
242 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
243 /* Although insns using %y are deprecated, it is a clear win on current
244 ultrasparcs. */
245 |MASK_DEPRECATED_V8_INSNS },
246 { 0, 0, 0, 0 }
247 };
248 struct cpu_table *cpu;
249 struct sparc_cpu_select *sel;
250 int fpu;
251
252 #ifndef SPARC_BI_ARCH
253 /* Check for unsupported architecture size. */
254 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
255 {
256 error ("%s is not supported by this configuration",
257 DEFAULT_ARCH32_P ? "-m64" : "-m32");
258 }
259 #endif
260
261 /* At the moment we don't allow different pointer size and architecture */
262 if (! TARGET_64BIT != ! TARGET_PTR64)
263 {
264 error ("-mptr%d not allowed on -m%d",
265 TARGET_PTR64 ? 64 : 32, TARGET_64BIT ? 64 : 32);
266 if (TARGET_64BIT)
267 target_flags |= MASK_PTR64;
268 else
269 target_flags &= ~MASK_PTR64;
270 }
271
272 /* We force all 64bit archs to use 128 bit long double */
273 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
274 {
275 error ("-mlong-double-64 not allowed with -m64");
276 target_flags |= MASK_LONG_DOUBLE_128;
277 }
278
279 /* Code model selection. */
280 sparc_cmodel = SPARC_DEFAULT_CMODEL;
281
282 #ifdef SPARC_BI_ARCH
283 if (TARGET_ARCH32)
284 sparc_cmodel = CM_32;
285 #endif
286
287 if (sparc_cmodel_string != NULL)
288 {
289 if (TARGET_ARCH64)
290 {
291 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
292 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
293 break;
294 if (cmodel->name == NULL)
295 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
296 else
297 sparc_cmodel = cmodel->value;
298 }
299 else
300 error ("-mcmodel= is not supported on 32 bit systems");
301 }
302
303 fpu = TARGET_FPU; /* save current -mfpu status */
304
305 /* Set the default CPU. */
306 for (def = &cpu_default[0]; def->name; ++def)
307 if (def->cpu == TARGET_CPU_DEFAULT)
308 break;
309 if (! def->name)
310 abort ();
311 sparc_select[0].string = def->name;
312
313 for (sel = &sparc_select[0]; sel->name; ++sel)
314 {
315 if (sel->string)
316 {
317 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
318 if (! strcmp (sel->string, cpu->name))
319 {
320 if (sel->set_tune_p)
321 sparc_cpu = cpu->processor;
322
323 if (sel->set_arch_p)
324 {
325 target_flags &= ~cpu->disable;
326 target_flags |= cpu->enable;
327 }
328 break;
329 }
330
331 if (! cpu->name)
332 error ("bad value (%s) for %s switch", sel->string, sel->name);
333 }
334 }
335
336 /* If -mfpu or -mno-fpu was explicitly used, don't override with
337 the processor default. Clear MASK_FPU_SET to avoid confusing
338 the reverse mapping from switch values to names. */
339 if (TARGET_FPU_SET)
340 {
341 target_flags = (target_flags & ~MASK_FPU) | fpu;
342 target_flags &= ~MASK_FPU_SET;
343 }
344
345 /* Don't allow -mvis if FPU is disabled. */
346 if (! TARGET_FPU)
347 target_flags &= ~MASK_VIS;
348
349 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
350 are available.
351 -m64 also implies v9. */
352 if (TARGET_VIS || TARGET_ARCH64)
353 target_flags |= MASK_V9;
354
355 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
356 if (TARGET_V9 && TARGET_ARCH32)
357 target_flags |= MASK_DEPRECATED_V8_INSNS;
358
359 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
360 if (! TARGET_V9 || TARGET_ARCH64)
361 target_flags &= ~MASK_V8PLUS;
362
363 /* Don't use stack biasing in 32 bit mode. */
364 if (TARGET_ARCH32)
365 target_flags &= ~MASK_STACK_BIAS;
366
367 /* Supply a default value for align_functions. */
368 if (align_functions == 0 && sparc_cpu == PROCESSOR_ULTRASPARC)
369 align_functions = 32;
370
371 /* Validate PCC_STRUCT_RETURN. */
372 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
373 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
374
375 /* Do various machine dependent initializations. */
376 sparc_init_modes ();
377
378 if ((profile_flag || profile_block_flag)
379 && sparc_cmodel != CM_32 && sparc_cmodel != CM_MEDLOW)
380 {
381 error ("profiling does not support code models other than medlow");
382 }
383
384 /* Register global variables with the garbage collector. */
385 sparc_add_gc_roots ();
386 }
387 \f
388 /* Miscellaneous utilities. */
389
390 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
391 or branch on register contents instructions. */
392
393 int
394 v9_regcmp_p (code)
395 enum rtx_code code;
396 {
397 return (code == EQ || code == NE || code == GE || code == LT
398 || code == LE || code == GT);
399 }
400
401 \f
402 /* Operand constraints. */
403
404 /* Return non-zero only if OP is a register of mode MODE,
405 or const0_rtx. */
406
407 int
408 reg_or_0_operand (op, mode)
409 rtx op;
410 enum machine_mode mode;
411 {
412 if (register_operand (op, mode))
413 return 1;
414 if (op == const0_rtx)
415 return 1;
416 if (GET_MODE (op) == VOIDmode && GET_CODE (op) == CONST_DOUBLE
417 && CONST_DOUBLE_HIGH (op) == 0
418 && CONST_DOUBLE_LOW (op) == 0)
419 return 1;
420 if (fp_zero_operand (op, mode))
421 return 1;
422 return 0;
423 }
424
425 /* Nonzero if OP is a floating point value with value 0.0. */
426
427 int
428 fp_zero_operand (op, mode)
429 rtx op;
430 enum machine_mode mode;
431 {
432 if (GET_MODE_CLASS (GET_MODE (op)) != MODE_FLOAT)
433 return 0;
434 return op == CONST0_RTX (mode);
435 }
436
437 /* Nonzero if OP is a floating point constant which can
438 be loaded into an integer register using a single
439 sethi instruction. */
440
441 int
442 fp_sethi_p (op)
443 rtx op;
444 {
445 if (GET_CODE (op) == CONST_DOUBLE)
446 {
447 REAL_VALUE_TYPE r;
448 long i;
449
450 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
451 if (REAL_VALUES_EQUAL (r, dconst0) &&
452 ! REAL_VALUE_MINUS_ZERO (r))
453 return 0;
454 REAL_VALUE_TO_TARGET_SINGLE (r, i);
455 if (SPARC_SETHI_P (i))
456 return 1;
457 }
458
459 return 0;
460 }
461
462 /* Nonzero if OP is a floating point constant which can
463 be loaded into an integer register using a single
464 mov instruction. */
465
466 int
467 fp_mov_p (op)
468 rtx op;
469 {
470 if (GET_CODE (op) == CONST_DOUBLE)
471 {
472 REAL_VALUE_TYPE r;
473 long i;
474
475 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
476 if (REAL_VALUES_EQUAL (r, dconst0) &&
477 ! REAL_VALUE_MINUS_ZERO (r))
478 return 0;
479 REAL_VALUE_TO_TARGET_SINGLE (r, i);
480 if (SPARC_SIMM13_P (i))
481 return 1;
482 }
483
484 return 0;
485 }
486
487 /* Nonzero if OP is a floating point constant which can
488 be loaded into an integer register using a high/losum
489 instruction sequence. */
490
491 int
492 fp_high_losum_p (op)
493 rtx op;
494 {
495 /* The constraints calling this should only be in
496 SFmode move insns, so any constant which cannot
497 be moved using a single insn will do. */
498 if (GET_CODE (op) == CONST_DOUBLE)
499 {
500 REAL_VALUE_TYPE r;
501 long i;
502
503 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
504 if (REAL_VALUES_EQUAL (r, dconst0) &&
505 ! REAL_VALUE_MINUS_ZERO (r))
506 return 0;
507 REAL_VALUE_TO_TARGET_SINGLE (r, i);
508 if (! SPARC_SETHI_P (i)
509 && ! SPARC_SIMM13_P (i))
510 return 1;
511 }
512
513 return 0;
514 }
515
516 /* Nonzero if OP is an integer register. */
517
518 int
519 intreg_operand (op, mode)
520 rtx op;
521 enum machine_mode mode ATTRIBUTE_UNUSED;
522 {
523 return (register_operand (op, SImode)
524 || (TARGET_ARCH64 && register_operand (op, DImode)));
525 }
526
527 /* Nonzero if OP is a floating point condition code register. */
528
529 int
530 fcc_reg_operand (op, mode)
531 rtx op;
532 enum machine_mode mode;
533 {
534 /* This can happen when recog is called from combine. Op may be a MEM.
535 Fail instead of calling abort in this case. */
536 if (GET_CODE (op) != REG)
537 return 0;
538
539 if (mode != VOIDmode && mode != GET_MODE (op))
540 return 0;
541 if (mode == VOIDmode
542 && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
543 return 0;
544
545 #if 0 /* ??? ==> 1 when %fcc0-3 are pseudos first. See gen_compare_reg(). */
546 if (reg_renumber == 0)
547 return REGNO (op) >= FIRST_PSEUDO_REGISTER;
548 return REGNO_OK_FOR_CCFP_P (REGNO (op));
549 #else
550 return (unsigned) REGNO (op) - SPARC_FIRST_V9_FCC_REG < 4;
551 #endif
552 }
553
554 /* Nonzero if OP is an integer or floating point condition code register. */
555
556 int
557 icc_or_fcc_reg_operand (op, mode)
558 rtx op;
559 enum machine_mode mode;
560 {
561 if (GET_CODE (op) == REG && REGNO (op) == SPARC_ICC_REG)
562 {
563 if (mode != VOIDmode && mode != GET_MODE (op))
564 return 0;
565 if (mode == VOIDmode
566 && GET_MODE (op) != CCmode && GET_MODE (op) != CCXmode)
567 return 0;
568 return 1;
569 }
570
571 return fcc_reg_operand (op, mode);
572 }
573
574 /* Nonzero if OP can appear as the dest of a RESTORE insn. */
575 int
576 restore_operand (op, mode)
577 rtx op;
578 enum machine_mode mode;
579 {
580 return (GET_CODE (op) == REG && GET_MODE (op) == mode
581 && (REGNO (op) < 8 || (REGNO (op) >= 24 && REGNO (op) < 32)));
582 }
583
584 /* Call insn on SPARC can take a PC-relative constant address, or any regular
585 memory address. */
586
587 int
588 call_operand (op, mode)
589 rtx op;
590 enum machine_mode mode;
591 {
592 if (GET_CODE (op) != MEM)
593 abort ();
594 op = XEXP (op, 0);
595 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
596 }
597
598 int
599 call_operand_address (op, mode)
600 rtx op;
601 enum machine_mode mode;
602 {
603 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
604 }
605
606 /* Returns 1 if OP is either a symbol reference or a sum of a symbol
607 reference and a constant. */
608
609 int
610 symbolic_operand (op, mode)
611 register rtx op;
612 enum machine_mode mode;
613 {
614 enum machine_mode omode = GET_MODE (op);
615
616 if (omode != mode && omode != VOIDmode && mode != VOIDmode)
617 return 0;
618
619 switch (GET_CODE (op))
620 {
621 case SYMBOL_REF:
622 case LABEL_REF:
623 return 1;
624
625 case CONST:
626 op = XEXP (op, 0);
627 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
628 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
629 && GET_CODE (XEXP (op, 1)) == CONST_INT);
630
631 default:
632 return 0;
633 }
634 }
635
636 /* Return truth value of statement that OP is a symbolic memory
637 operand of mode MODE. */
638
639 int
640 symbolic_memory_operand (op, mode)
641 rtx op;
642 enum machine_mode mode ATTRIBUTE_UNUSED;
643 {
644 if (GET_CODE (op) == SUBREG)
645 op = SUBREG_REG (op);
646 if (GET_CODE (op) != MEM)
647 return 0;
648 op = XEXP (op, 0);
649 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
650 || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
651 }
652
653 /* Return truth value of statement that OP is a LABEL_REF of mode MODE. */
654
655 int
656 label_ref_operand (op, mode)
657 rtx op;
658 enum machine_mode mode;
659 {
660 if (GET_CODE (op) != LABEL_REF)
661 return 0;
662 if (GET_MODE (op) != mode)
663 return 0;
664 return 1;
665 }
666
667 /* Return 1 if the operand is an argument used in generating pic references
668 in either the medium/low or medium/anywhere code models of sparc64. */
669
670 int
671 sp64_medium_pic_operand (op, mode)
672 rtx op;
673 enum machine_mode mode ATTRIBUTE_UNUSED;
674 {
675 /* Check for (const (minus (symbol_ref:GOT)
676 (const (minus (label) (pc))))). */
677 if (GET_CODE (op) != CONST)
678 return 0;
679 op = XEXP (op, 0);
680 if (GET_CODE (op) != MINUS)
681 return 0;
682 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
683 return 0;
684 /* ??? Ensure symbol is GOT. */
685 if (GET_CODE (XEXP (op, 1)) != CONST)
686 return 0;
687 if (GET_CODE (XEXP (XEXP (op, 1), 0)) != MINUS)
688 return 0;
689 return 1;
690 }
691
692 /* Return 1 if the operand is a data segment reference. This includes
693 the readonly data segment, or in other words anything but the text segment.
694 This is needed in the medium/anywhere code model on v9. These values
695 are accessed with EMBMEDANY_BASE_REG. */
696
697 int
698 data_segment_operand (op, mode)
699 rtx op;
700 enum machine_mode mode ATTRIBUTE_UNUSED;
701 {
702 switch (GET_CODE (op))
703 {
704 case SYMBOL_REF :
705 return ! SYMBOL_REF_FLAG (op);
706 case PLUS :
707 /* Assume canonical format of symbol + constant.
708 Fall through. */
709 case CONST :
710 return data_segment_operand (XEXP (op, 0), VOIDmode);
711 default :
712 return 0;
713 }
714 }
715
716 /* Return 1 if the operand is a text segment reference.
717 This is needed in the medium/anywhere code model on v9. */
718
719 int
720 text_segment_operand (op, mode)
721 rtx op;
722 enum machine_mode mode ATTRIBUTE_UNUSED;
723 {
724 switch (GET_CODE (op))
725 {
726 case LABEL_REF :
727 return 1;
728 case SYMBOL_REF :
729 return SYMBOL_REF_FLAG (op);
730 case PLUS :
731 /* Assume canonical format of symbol + constant.
732 Fall through. */
733 case CONST :
734 return text_segment_operand (XEXP (op, 0), VOIDmode);
735 default :
736 return 0;
737 }
738 }
739
740 /* Return 1 if the operand is either a register or a memory operand that is
741 not symbolic. */
742
743 int
744 reg_or_nonsymb_mem_operand (op, mode)
745 register rtx op;
746 enum machine_mode mode;
747 {
748 if (register_operand (op, mode))
749 return 1;
750
751 if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode))
752 return 1;
753
754 return 0;
755 }
756
757 int
758 splittable_symbolic_memory_operand (op, mode)
759 rtx op;
760 enum machine_mode mode ATTRIBUTE_UNUSED;
761 {
762 if (GET_CODE (op) != MEM)
763 return 0;
764 if (! symbolic_operand (XEXP (op, 0), Pmode))
765 return 0;
766 return 1;
767 }
768
769 int
770 splittable_immediate_memory_operand (op, mode)
771 rtx op;
772 enum machine_mode mode ATTRIBUTE_UNUSED;
773 {
774 if (GET_CODE (op) != MEM)
775 return 0;
776 if (! immediate_operand (XEXP (op, 0), Pmode))
777 return 0;
778 return 1;
779 }
780
781 /* Return truth value of whether OP is EQ or NE. */
782
783 int
784 eq_or_neq (op, mode)
785 rtx op;
786 enum machine_mode mode ATTRIBUTE_UNUSED;
787 {
788 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
789 }
790
791 /* Return 1 if this is a comparison operator, but not an EQ, NE, GEU,
792 or LTU for non-floating-point. We handle those specially. */
793
794 int
795 normal_comp_operator (op, mode)
796 rtx op;
797 enum machine_mode mode ATTRIBUTE_UNUSED;
798 {
799 enum rtx_code code = GET_CODE (op);
800
801 if (GET_RTX_CLASS (code) != '<')
802 return 0;
803
804 if (GET_MODE (XEXP (op, 0)) == CCFPmode
805 || GET_MODE (XEXP (op, 0)) == CCFPEmode)
806 return 1;
807
808 return (code != NE && code != EQ && code != GEU && code != LTU);
809 }
810
811 /* Return 1 if this is a comparison operator. This allows the use of
812 MATCH_OPERATOR to recognize all the branch insns. */
813
814 int
815 noov_compare_op (op, mode)
816 register rtx op;
817 enum machine_mode mode ATTRIBUTE_UNUSED;
818 {
819 enum rtx_code code = GET_CODE (op);
820
821 if (GET_RTX_CLASS (code) != '<')
822 return 0;
823
824 if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode)
825 /* These are the only branches which work with CC_NOOVmode. */
826 return (code == EQ || code == NE || code == GE || code == LT);
827 return 1;
828 }
829
830 /* Nonzero if OP is a comparison operator suitable for use in v9
831 conditional move or branch on register contents instructions. */
832
833 int
834 v9_regcmp_op (op, mode)
835 register rtx op;
836 enum machine_mode mode ATTRIBUTE_UNUSED;
837 {
838 enum rtx_code code = GET_CODE (op);
839
840 if (GET_RTX_CLASS (code) != '<')
841 return 0;
842
843 return v9_regcmp_p (code);
844 }
845
846 /* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */
847
848 int
849 extend_op (op, mode)
850 rtx op;
851 enum machine_mode mode ATTRIBUTE_UNUSED;
852 {
853 return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND;
854 }
855
856 /* Return nonzero if OP is an operator of mode MODE which can set
857 the condition codes explicitly. We do not include PLUS and MINUS
858 because these require CC_NOOVmode, which we handle explicitly. */
859
860 int
861 cc_arithop (op, mode)
862 rtx op;
863 enum machine_mode mode ATTRIBUTE_UNUSED;
864 {
865 if (GET_CODE (op) == AND
866 || GET_CODE (op) == IOR
867 || GET_CODE (op) == XOR)
868 return 1;
869
870 return 0;
871 }
872
873 /* Return nonzero if OP is an operator of mode MODE which can bitwise
874 complement its second operand and set the condition codes explicitly. */
875
876 int
877 cc_arithopn (op, mode)
878 rtx op;
879 enum machine_mode mode ATTRIBUTE_UNUSED;
880 {
881 /* XOR is not here because combine canonicalizes (xor (not ...) ...)
882 and (xor ... (not ...)) to (not (xor ...)). */
883 return (GET_CODE (op) == AND
884 || GET_CODE (op) == IOR);
885 }
886 \f
887 /* Return true if OP is a register, or is a CONST_INT that can fit in a
888 signed 13 bit immediate field. This is an acceptable SImode operand for
889 most 3 address instructions. */
890
891 int
892 arith_operand (op, mode)
893 rtx op;
894 enum machine_mode mode;
895 {
896 int val;
897 if (register_operand (op, mode))
898 return 1;
899 if (GET_CODE (op) != CONST_INT)
900 return 0;
901 val = INTVAL (op) & 0xffffffff;
902 return SPARC_SIMM13_P (val);
903 }
904
905 /* Return true if OP is a constant 4096 */
906
907 int
908 arith_4096_operand (op, mode)
909 rtx op;
910 enum machine_mode mode ATTRIBUTE_UNUSED;
911 {
912 int val;
913 if (GET_CODE (op) != CONST_INT)
914 return 0;
915 val = INTVAL (op) & 0xffffffff;
916 return val == 4096;
917 }
918
919 /* Return true if OP is suitable as second operand for add/sub */
920
921 int
922 arith_add_operand (op, mode)
923 rtx op;
924 enum machine_mode mode;
925 {
926 return arith_operand (op, mode) || arith_4096_operand (op, mode);
927 }
928
929 /* Return true if OP is a CONST_INT or a CONST_DOUBLE which can fit in the
930 immediate field of OR and XOR instructions. Used for 64-bit
931 constant formation patterns. */
932 int
933 const64_operand (op, mode)
934 rtx op;
935 enum machine_mode mode ATTRIBUTE_UNUSED;
936 {
937 return ((GET_CODE (op) == CONST_INT
938 && SPARC_SIMM13_P (INTVAL (op)))
939 #if HOST_BITS_PER_WIDE_INT != 64
940 || (GET_CODE (op) == CONST_DOUBLE
941 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
942 && (CONST_DOUBLE_HIGH (op) ==
943 ((CONST_DOUBLE_LOW (op) & 0x80000000) != 0 ?
944 (HOST_WIDE_INT)0xffffffff : 0)))
945 #endif
946 );
947 }
948
949 /* The same, but only for sethi instructions. */
950 int
951 const64_high_operand (op, mode)
952 rtx op;
953 enum machine_mode mode ATTRIBUTE_UNUSED;
954 {
955 return ((GET_CODE (op) == CONST_INT
956 && (INTVAL (op) & 0xfffffc00) != 0
957 && SPARC_SETHI_P (INTVAL (op))
958 #if HOST_BITS_PER_WIDE_INT != 64
959 /* Must be positive on non-64bit host else the
960 optimizer is fooled into thinking that sethi
961 sign extends, even though it does not. */
962 && INTVAL (op) >= 0
963 #endif
964 )
965 || (GET_CODE (op) == CONST_DOUBLE
966 && CONST_DOUBLE_HIGH (op) == 0
967 && (CONST_DOUBLE_LOW (op) & 0xfffffc00) != 0
968 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op))));
969 }
970
971 /* Return true if OP is a register, or is a CONST_INT that can fit in a
972 signed 11 bit immediate field. This is an acceptable SImode operand for
973 the movcc instructions. */
974
975 int
976 arith11_operand (op, mode)
977 rtx op;
978 enum machine_mode mode;
979 {
980 return (register_operand (op, mode)
981 || (GET_CODE (op) == CONST_INT && SPARC_SIMM11_P (INTVAL (op))));
982 }
983
984 /* Return true if OP is a register, or is a CONST_INT that can fit in a
985 signed 10 bit immediate field. This is an acceptable SImode operand for
986 the movrcc instructions. */
987
988 int
989 arith10_operand (op, mode)
990 rtx op;
991 enum machine_mode mode;
992 {
993 return (register_operand (op, mode)
994 || (GET_CODE (op) == CONST_INT && SPARC_SIMM10_P (INTVAL (op))));
995 }
996
997 /* Return true if OP is a register, is a CONST_INT that fits in a 13 bit
998 immediate field, or is a CONST_DOUBLE whose both parts fit in a 13 bit
999 immediate field.
1000 v9: Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1001 can fit in a 13 bit immediate field. This is an acceptable DImode operand
1002 for most 3 address instructions. */
1003
1004 int
1005 arith_double_operand (op, mode)
1006 rtx op;
1007 enum machine_mode mode;
1008 {
1009 return (register_operand (op, mode)
1010 || (GET_CODE (op) == CONST_INT && SMALL_INT (op))
1011 || (! TARGET_ARCH64
1012 && GET_CODE (op) == CONST_DOUBLE
1013 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1014 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_HIGH (op) + 0x1000) < 0x2000)
1015 || (TARGET_ARCH64
1016 && GET_CODE (op) == CONST_DOUBLE
1017 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1018 && ((CONST_DOUBLE_HIGH (op) == -1
1019 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000)
1020 || (CONST_DOUBLE_HIGH (op) == 0
1021 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
1022 }
1023
1024 /* Return true if OP is a constant 4096 for DImode on ARCH64 */
1025
1026 int
1027 arith_double_4096_operand (op, mode)
1028 rtx op;
1029 enum machine_mode mode ATTRIBUTE_UNUSED;
1030 {
1031 return (TARGET_ARCH64 &&
1032 ((GET_CODE (op) == CONST_INT && INTVAL (op) == 4096) ||
1033 (GET_CODE (op) == CONST_DOUBLE &&
1034 CONST_DOUBLE_LOW (op) == 4096 &&
1035 CONST_DOUBLE_HIGH (op) == 0)));
1036 }
1037
1038 /* Return true if OP is suitable as second operand for add/sub in DImode */
1039
1040 int
1041 arith_double_add_operand (op, mode)
1042 rtx op;
1043 enum machine_mode mode;
1044 {
1045 return arith_double_operand (op, mode) || arith_double_4096_operand (op, mode);
1046 }
1047
1048 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1049 can fit in an 11 bit immediate field. This is an acceptable DImode
1050 operand for the movcc instructions. */
1051 /* ??? Replace with arith11_operand? */
1052
1053 int
1054 arith11_double_operand (op, mode)
1055 rtx op;
1056 enum machine_mode mode;
1057 {
1058 return (register_operand (op, mode)
1059 || (GET_CODE (op) == CONST_DOUBLE
1060 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1061 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x400) < 0x800
1062 && ((CONST_DOUBLE_HIGH (op) == -1
1063 && (CONST_DOUBLE_LOW (op) & 0x400) == 0x400)
1064 || (CONST_DOUBLE_HIGH (op) == 0
1065 && (CONST_DOUBLE_LOW (op) & 0x400) == 0)))
1066 || (GET_CODE (op) == CONST_INT
1067 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1068 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x400) < 0x800));
1069 }
1070
1071 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1072 can fit in an 10 bit immediate field. This is an acceptable DImode
1073 operand for the movrcc instructions. */
1074 /* ??? Replace with arith10_operand? */
1075
1076 int
1077 arith10_double_operand (op, mode)
1078 rtx op;
1079 enum machine_mode mode;
1080 {
1081 return (register_operand (op, mode)
1082 || (GET_CODE (op) == CONST_DOUBLE
1083 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1084 && (unsigned) (CONST_DOUBLE_LOW (op) + 0x200) < 0x400
1085 && ((CONST_DOUBLE_HIGH (op) == -1
1086 && (CONST_DOUBLE_LOW (op) & 0x200) == 0x200)
1087 || (CONST_DOUBLE_HIGH (op) == 0
1088 && (CONST_DOUBLE_LOW (op) & 0x200) == 0)))
1089 || (GET_CODE (op) == CONST_INT
1090 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1091 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x200) < 0x400));
1092 }
1093
1094 /* Return truth value of whether OP is a integer which fits the
1095 range constraining immediate operands in most three-address insns,
1096 which have a 13 bit immediate field. */
1097
1098 int
1099 small_int (op, mode)
1100 rtx op;
1101 enum machine_mode mode ATTRIBUTE_UNUSED;
1102 {
1103 return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
1104 }
1105
1106 int
1107 small_int_or_double (op, mode)
1108 rtx op;
1109 enum machine_mode mode ATTRIBUTE_UNUSED;
1110 {
1111 return ((GET_CODE (op) == CONST_INT && SMALL_INT (op))
1112 || (GET_CODE (op) == CONST_DOUBLE
1113 && CONST_DOUBLE_HIGH (op) == 0
1114 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))));
1115 }
1116
1117 /* Recognize operand values for the umul instruction. That instruction sign
1118 extends immediate values just like all other sparc instructions, but
1119 interprets the extended result as an unsigned number. */
1120
1121 int
1122 uns_small_int (op, mode)
1123 rtx op;
1124 enum machine_mode mode ATTRIBUTE_UNUSED;
1125 {
1126 #if HOST_BITS_PER_WIDE_INT > 32
1127 /* All allowed constants will fit a CONST_INT. */
1128 return (GET_CODE (op) == CONST_INT
1129 && ((INTVAL (op) >= 0 && INTVAL (op) < 0x1000)
1130 || (INTVAL (op) >= 0xFFFFF000
1131 && INTVAL (op) < 0x100000000)));
1132 #else
1133 return ((GET_CODE (op) == CONST_INT && (unsigned) INTVAL (op) < 0x1000)
1134 || (GET_CODE (op) == CONST_DOUBLE
1135 && CONST_DOUBLE_HIGH (op) == 0
1136 && (unsigned) CONST_DOUBLE_LOW (op) - 0xFFFFF000 < 0x1000));
1137 #endif
1138 }
1139
1140 int
1141 uns_arith_operand (op, mode)
1142 rtx op;
1143 enum machine_mode mode;
1144 {
1145 return register_operand (op, mode) || uns_small_int (op, mode);
1146 }
1147
1148 /* Return truth value of statement that OP is a call-clobbered register. */
1149 int
1150 clobbered_register (op, mode)
1151 rtx op;
1152 enum machine_mode mode ATTRIBUTE_UNUSED;
1153 {
1154 return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]);
1155 }
1156
1157 /* Return 1 if OP is a valid operand for the source of a move insn. */
1158
1159 int
1160 input_operand (op, mode)
1161 rtx op;
1162 enum machine_mode mode;
1163 {
1164 /* If both modes are non-void they must be the same. */
1165 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1166 return 0;
1167
1168 /* Only a tiny bit of handling for CONSTANT_P_RTX is necessary. */
1169 if (GET_CODE (op) == CONST && GET_CODE (XEXP (op, 0)) == CONSTANT_P_RTX)
1170 return 1;
1171
1172 /* Allow any one instruction integer constant, and all CONST_INT
1173 variants when we are working in DImode and !arch64. */
1174 if (GET_MODE_CLASS (mode) == MODE_INT
1175 && ((GET_CODE (op) == CONST_INT
1176 && ((SPARC_SETHI_P (INTVAL (op))
1177 && (! TARGET_ARCH64
1178 || (INTVAL (op) >= 0)
1179 || mode == SImode
1180 || mode == HImode
1181 || mode == QImode))
1182 || SPARC_SIMM13_P (INTVAL (op))
1183 || (mode == DImode
1184 && ! TARGET_ARCH64)))
1185 || (TARGET_ARCH64
1186 && GET_CODE (op) == CONST_DOUBLE
1187 && ((CONST_DOUBLE_HIGH (op) == 0
1188 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op)))
1189 ||
1190 #if HOST_BITS_PER_WIDE_INT == 64
1191 (CONST_DOUBLE_HIGH (op) == 0
1192 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op)))
1193 #else
1194 (SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
1195 && (((CONST_DOUBLE_LOW (op) & 0x80000000) == 0
1196 && CONST_DOUBLE_HIGH (op) == 0)
1197 || (CONST_DOUBLE_HIGH (op) == -1)))
1198 #endif
1199 ))))
1200 return 1;
1201
1202 /* If !arch64 and this is a DImode const, allow it so that
1203 the splits can be generated. */
1204 if (! TARGET_ARCH64
1205 && mode == DImode
1206 && GET_CODE (op) == CONST_DOUBLE)
1207 return 1;
1208
1209 if (register_operand (op, mode))
1210 return 1;
1211
1212 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1213 && GET_CODE (op) == CONST_DOUBLE)
1214 return 1;
1215
1216 /* If this is a SUBREG, look inside so that we handle
1217 paradoxical ones. */
1218 if (GET_CODE (op) == SUBREG)
1219 op = SUBREG_REG (op);
1220
1221 /* Check for valid MEM forms. */
1222 if (GET_CODE (op) == MEM)
1223 {
1224 rtx inside = XEXP (op, 0);
1225
1226 if (GET_CODE (inside) == LO_SUM)
1227 {
1228 /* We can't allow these because all of the splits
1229 (eventually as they trickle down into DFmode
1230 splits) require offsettable memory references. */
1231 if (! TARGET_V9
1232 && GET_MODE (op) == TFmode)
1233 return 0;
1234
1235 return (register_operand (XEXP (inside, 0), Pmode)
1236 && CONSTANT_P (XEXP (inside, 1)));
1237 }
1238 return memory_address_p (mode, inside);
1239 }
1240
1241 return 0;
1242 }
1243
1244 \f
1245 /* We know it can't be done in one insn when we get here,
1246 the movsi expander guarentees this. */
1247 void
1248 sparc_emit_set_const32 (op0, op1)
1249 rtx op0;
1250 rtx op1;
1251 {
1252 enum machine_mode mode = GET_MODE (op0);
1253 rtx temp;
1254
1255 if (GET_CODE (op1) == CONST_INT)
1256 {
1257 HOST_WIDE_INT value = INTVAL (op1);
1258
1259 if (SPARC_SETHI_P (value)
1260 || SPARC_SIMM13_P (value))
1261 abort ();
1262 }
1263
1264 /* Full 2-insn decomposition is needed. */
1265 if (reload_in_progress || reload_completed)
1266 temp = op0;
1267 else
1268 temp = gen_reg_rtx (mode);
1269
1270 if (GET_CODE (op1) == CONST_INT)
1271 {
1272 /* Emit them as real moves instead of a HIGH/LO_SUM,
1273 this way CSE can see everything and reuse intermediate
1274 values if it wants. */
1275 if (TARGET_ARCH64
1276 && HOST_BITS_PER_WIDE_INT != 64
1277 && (INTVAL (op1) & 0x80000000) != 0)
1278 {
1279 emit_insn (gen_rtx_SET (VOIDmode,
1280 temp,
1281 gen_rtx_CONST_DOUBLE (VOIDmode, const0_rtx,
1282 INTVAL (op1) & 0xfffffc00, 0)));
1283 }
1284 else
1285 {
1286 emit_insn (gen_rtx_SET (VOIDmode,
1287 temp,
1288 GEN_INT (INTVAL (op1) & 0xfffffc00)));
1289 }
1290 emit_insn (gen_rtx_SET (VOIDmode,
1291 op0,
1292 gen_rtx_IOR (mode,
1293 temp,
1294 GEN_INT (INTVAL (op1) & 0x3ff))));
1295 }
1296 else
1297 {
1298 /* A symbol, emit in the traditional way. */
1299 emit_insn (gen_rtx_SET (VOIDmode,
1300 temp,
1301 gen_rtx_HIGH (mode,
1302 op1)));
1303 emit_insn (gen_rtx_SET (VOIDmode,
1304 op0,
1305 gen_rtx_LO_SUM (mode,
1306 temp,
1307 op1)));
1308
1309 }
1310 }
1311
1312 \f
1313 /* Sparc-v9 code-model support. */
1314 void
1315 sparc_emit_set_symbolic_const64 (op0, op1, temp1)
1316 rtx op0;
1317 rtx op1;
1318 rtx temp1;
1319 {
1320 switch (sparc_cmodel)
1321 {
1322 case CM_MEDLOW:
1323 /* The range spanned by all instructions in the object is less
1324 than 2^31 bytes (2GB) and the distance from any instruction
1325 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1326 than 2^31 bytes (2GB).
1327
1328 The executable must be in the low 4TB of the virtual address
1329 space.
1330
1331 sethi %hi(symbol), %temp
1332 or %temp, %lo(symbol), %reg */
1333 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1334 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1335 break;
1336
1337 case CM_MEDMID:
1338 /* The range spanned by all instructions in the object is less
1339 than 2^31 bytes (2GB) and the distance from any instruction
1340 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1341 than 2^31 bytes (2GB).
1342
1343 The executable must be in the low 16TB of the virtual address
1344 space.
1345
1346 sethi %h44(symbol), %temp1
1347 or %temp1, %m44(symbol), %temp2
1348 sllx %temp2, 12, %temp3
1349 or %temp3, %l44(symbol), %reg */
1350 emit_insn (gen_seth44 (op0, op1));
1351 emit_insn (gen_setm44 (op0, op0, op1));
1352 emit_insn (gen_rtx_SET (VOIDmode, temp1,
1353 gen_rtx_ASHIFT (DImode, op0, GEN_INT (12))));
1354 emit_insn (gen_setl44 (op0, temp1, op1));
1355 break;
1356
1357 case CM_MEDANY:
1358 /* The range spanned by all instructions in the object is less
1359 than 2^31 bytes (2GB) and the distance from any instruction
1360 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1361 than 2^31 bytes (2GB).
1362
1363 The executable can be placed anywhere in the virtual address
1364 space.
1365
1366 sethi %hh(symbol), %temp1
1367 sethi %lm(symbol), %temp2
1368 or %temp1, %hm(symbol), %temp3
1369 or %temp2, %lo(symbol), %temp4
1370 sllx %temp3, 32, %temp5
1371 or %temp4, %temp5, %reg */
1372
1373 /* Getting this right wrt. reloading is really tricky.
1374 We _MUST_ have a separate temporary at this point,
1375 if we don't barf immediately instead of generating
1376 incorrect code. */
1377 if (temp1 == op0)
1378 abort ();
1379
1380 emit_insn (gen_sethh (op0, op1));
1381 emit_insn (gen_setlm (temp1, op1));
1382 emit_insn (gen_sethm (op0, op0, op1));
1383 emit_insn (gen_rtx_SET (VOIDmode, op0,
1384 gen_rtx_ASHIFT (DImode, op0, GEN_INT (32))));
1385 emit_insn (gen_rtx_SET (VOIDmode, op0,
1386 gen_rtx_PLUS (DImode, op0, temp1)));
1387 emit_insn (gen_setlo (op0, op0, op1));
1388 break;
1389
1390 case CM_EMBMEDANY:
1391 /* Old old old backwards compatibility kruft here.
1392 Essentially it is MEDLOW with a fixed 64-bit
1393 virtual base added to all data segment addresses.
1394 Text-segment stuff is computed like MEDANY, we can't
1395 reuse the code above because the relocation knobs
1396 look different.
1397
1398 Data segment: sethi %hi(symbol), %temp1
1399 or %temp1, %lo(symbol), %temp2
1400 add %temp2, EMBMEDANY_BASE_REG, %reg
1401
1402 Text segment: sethi %uhi(symbol), %temp1
1403 sethi %hi(symbol), %temp2
1404 or %temp1, %ulo(symbol), %temp3
1405 or %temp2, %lo(symbol), %temp4
1406 sllx %temp3, 32, %temp5
1407 or %temp4, %temp5, %reg */
1408 if (data_segment_operand (op1, GET_MODE (op1)))
1409 {
1410 emit_insn (gen_embmedany_sethi (temp1, op1));
1411 emit_insn (gen_embmedany_brsum (op0, temp1));
1412 emit_insn (gen_embmedany_losum (op0, op0, op1));
1413 }
1414 else
1415 {
1416 /* Getting this right wrt. reloading is really tricky.
1417 We _MUST_ have a separate temporary at this point,
1418 so we barf immediately instead of generating
1419 incorrect code. */
1420 if (temp1 == op0)
1421 abort ();
1422
1423 emit_insn (gen_embmedany_textuhi (op0, op1));
1424 emit_insn (gen_embmedany_texthi (temp1, op1));
1425 emit_insn (gen_embmedany_textulo (op0, op0, op1));
1426 emit_insn (gen_rtx_SET (VOIDmode, op0,
1427 gen_rtx_ASHIFT (DImode, op0, GEN_INT (32))));
1428 emit_insn (gen_rtx_SET (VOIDmode, op0,
1429 gen_rtx_PLUS (DImode, op0, temp1)));
1430 emit_insn (gen_embmedany_textlo (op0, op0, op1));
1431 }
1432 break;
1433
1434 default:
1435 abort();
1436 }
1437 }
1438
1439 /* These avoid problems when cross compiling. If we do not
1440 go through all this hair then the optimizer will see
1441 invalid REG_EQUAL notes or in some cases none at all. */
1442 static void sparc_emit_set_safe_HIGH64 PARAMS ((rtx, HOST_WIDE_INT));
1443 static rtx gen_safe_SET64 PARAMS ((rtx, HOST_WIDE_INT));
1444 static rtx gen_safe_OR64 PARAMS ((rtx, HOST_WIDE_INT));
1445 static rtx gen_safe_XOR64 PARAMS ((rtx, HOST_WIDE_INT));
1446
1447 #if HOST_BITS_PER_WIDE_INT == 64
1448 #define GEN_HIGHINT64(__x) GEN_INT ((__x) & 0xfffffc00)
1449 #define GEN_INT64(__x) GEN_INT (__x)
1450 #else
1451 #define GEN_HIGHINT64(__x) \
1452 gen_rtx_CONST_DOUBLE (VOIDmode, const0_rtx, \
1453 (__x) & 0xfffffc00, 0)
1454 #define GEN_INT64(__x) \
1455 gen_rtx_CONST_DOUBLE (VOIDmode, const0_rtx, \
1456 (__x) & 0xffffffff, \
1457 ((__x) & 0x80000000 \
1458 ? 0xffffffff : 0))
1459 #endif
1460
1461 /* The optimizer is not to assume anything about exactly
1462 which bits are set for a HIGH, they are unspecified.
1463 Unfortunately this leads to many missed optimizations
1464 during CSE. We mask out the non-HIGH bits, and matches
1465 a plain movdi, to alleviate this problem. */
1466 static void
1467 sparc_emit_set_safe_HIGH64 (dest, val)
1468 rtx dest;
1469 HOST_WIDE_INT val;
1470 {
1471 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_HIGHINT64 (val)));
1472 }
1473
1474 static rtx
1475 gen_safe_SET64 (dest, val)
1476 rtx dest;
1477 HOST_WIDE_INT val;
1478 {
1479 return gen_rtx_SET (VOIDmode, dest, GEN_INT64 (val));
1480 }
1481
1482 static rtx
1483 gen_safe_OR64 (src, val)
1484 rtx src;
1485 HOST_WIDE_INT val;
1486 {
1487 return gen_rtx_IOR (DImode, src, GEN_INT64 (val));
1488 }
1489
1490 static rtx
1491 gen_safe_XOR64 (src, val)
1492 rtx src;
1493 HOST_WIDE_INT val;
1494 {
1495 return gen_rtx_XOR (DImode, src, GEN_INT64 (val));
1496 }
1497
1498 /* Worker routines for 64-bit constant formation on arch64.
1499 One of the key things to be doing in these emissions is
1500 to create as many temp REGs as possible. This makes it
1501 possible for half-built constants to be used later when
1502 such values are similar to something required later on.
1503 Without doing this, the optimizer cannot see such
1504 opportunities. */
1505
1506 static void sparc_emit_set_const64_quick1
1507 PARAMS ((rtx, rtx, unsigned HOST_WIDE_INT, int));
1508
1509 static void
1510 sparc_emit_set_const64_quick1 (op0, temp, low_bits, is_neg)
1511 rtx op0;
1512 rtx temp;
1513 unsigned HOST_WIDE_INT low_bits;
1514 int is_neg;
1515 {
1516 unsigned HOST_WIDE_INT high_bits;
1517
1518 if (is_neg)
1519 high_bits = (~low_bits) & 0xffffffff;
1520 else
1521 high_bits = low_bits;
1522
1523 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1524 if (!is_neg)
1525 {
1526 emit_insn (gen_rtx_SET (VOIDmode, op0,
1527 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1528 }
1529 else
1530 {
1531 /* If we are XOR'ing with -1, then we should emit a one's complement
1532 instead. This way the combiner will notice logical operations
1533 such as ANDN later on and substitute. */
1534 if ((low_bits & 0x3ff) == 0x3ff)
1535 {
1536 emit_insn (gen_rtx_SET (VOIDmode, op0,
1537 gen_rtx_NOT (DImode, temp)));
1538 }
1539 else
1540 {
1541 emit_insn (gen_rtx_SET (VOIDmode, op0,
1542 gen_safe_XOR64 (temp,
1543 (-0x400 | (low_bits & 0x3ff)))));
1544 }
1545 }
1546 }
1547
1548 static void sparc_emit_set_const64_quick2
1549 PARAMS ((rtx, rtx, unsigned HOST_WIDE_INT,
1550 unsigned HOST_WIDE_INT, int));
1551
1552 static void
1553 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_immediate, shift_count)
1554 rtx op0;
1555 rtx temp;
1556 unsigned HOST_WIDE_INT high_bits;
1557 unsigned HOST_WIDE_INT low_immediate;
1558 int shift_count;
1559 {
1560 rtx temp2 = op0;
1561
1562 if ((high_bits & 0xfffffc00) != 0)
1563 {
1564 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1565 if ((high_bits & ~0xfffffc00) != 0)
1566 emit_insn (gen_rtx_SET (VOIDmode, op0,
1567 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1568 else
1569 temp2 = temp;
1570 }
1571 else
1572 {
1573 emit_insn (gen_safe_SET64 (temp, high_bits));
1574 temp2 = temp;
1575 }
1576
1577 /* Now shift it up into place. */
1578 emit_insn (gen_rtx_SET (VOIDmode, op0,
1579 gen_rtx_ASHIFT (DImode, temp2,
1580 GEN_INT (shift_count))));
1581
1582 /* If there is a low immediate part piece, finish up by
1583 putting that in as well. */
1584 if (low_immediate != 0)
1585 emit_insn (gen_rtx_SET (VOIDmode, op0,
1586 gen_safe_OR64 (op0, low_immediate)));
1587 }
1588
1589 static void sparc_emit_set_const64_longway
1590 PARAMS ((rtx, rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT));
1591
1592 /* Full 64-bit constant decomposition. Even though this is the
1593 'worst' case, we still optimize a few things away. */
1594 static void
1595 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits)
1596 rtx op0;
1597 rtx temp;
1598 unsigned HOST_WIDE_INT high_bits;
1599 unsigned HOST_WIDE_INT low_bits;
1600 {
1601 rtx sub_temp;
1602
1603 if (reload_in_progress || reload_completed)
1604 sub_temp = op0;
1605 else
1606 sub_temp = gen_reg_rtx (DImode);
1607
1608 if ((high_bits & 0xfffffc00) != 0)
1609 {
1610 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1611 if ((high_bits & ~0xfffffc00) != 0)
1612 emit_insn (gen_rtx_SET (VOIDmode,
1613 sub_temp,
1614 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1615 else
1616 sub_temp = temp;
1617 }
1618 else
1619 {
1620 emit_insn (gen_safe_SET64 (temp, high_bits));
1621 sub_temp = temp;
1622 }
1623
1624 if (!reload_in_progress && !reload_completed)
1625 {
1626 rtx temp2 = gen_reg_rtx (DImode);
1627 rtx temp3 = gen_reg_rtx (DImode);
1628 rtx temp4 = gen_reg_rtx (DImode);
1629
1630 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1631 gen_rtx_ASHIFT (DImode, sub_temp,
1632 GEN_INT (32))));
1633
1634 sparc_emit_set_safe_HIGH64 (temp2, low_bits);
1635 if ((low_bits & ~0xfffffc00) != 0)
1636 {
1637 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1638 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1639 emit_insn (gen_rtx_SET (VOIDmode, op0,
1640 gen_rtx_PLUS (DImode, temp4, temp3)));
1641 }
1642 else
1643 {
1644 emit_insn (gen_rtx_SET (VOIDmode, op0,
1645 gen_rtx_PLUS (DImode, temp4, temp2)));
1646 }
1647 }
1648 else
1649 {
1650 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1651 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1652 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1653 int to_shift = 12;
1654
1655 /* We are in the middle of reload, so this is really
1656 painful. However we do still make an attempt to
1657 avoid emitting truly stupid code. */
1658 if (low1 != const0_rtx)
1659 {
1660 emit_insn (gen_rtx_SET (VOIDmode, op0,
1661 gen_rtx_ASHIFT (DImode, sub_temp,
1662 GEN_INT (to_shift))));
1663 emit_insn (gen_rtx_SET (VOIDmode, op0,
1664 gen_rtx_IOR (DImode, op0, low1)));
1665 sub_temp = op0;
1666 to_shift = 12;
1667 }
1668 else
1669 {
1670 to_shift += 12;
1671 }
1672 if (low2 != const0_rtx)
1673 {
1674 emit_insn (gen_rtx_SET (VOIDmode, op0,
1675 gen_rtx_ASHIFT (DImode, sub_temp,
1676 GEN_INT (to_shift))));
1677 emit_insn (gen_rtx_SET (VOIDmode, op0,
1678 gen_rtx_IOR (DImode, op0, low2)));
1679 sub_temp = op0;
1680 to_shift = 8;
1681 }
1682 else
1683 {
1684 to_shift += 8;
1685 }
1686 emit_insn (gen_rtx_SET (VOIDmode, op0,
1687 gen_rtx_ASHIFT (DImode, sub_temp,
1688 GEN_INT (to_shift))));
1689 if (low3 != const0_rtx)
1690 emit_insn (gen_rtx_SET (VOIDmode, op0,
1691 gen_rtx_IOR (DImode, op0, low3)));
1692 /* phew... */
1693 }
1694 }
1695
1696 /* Analyze a 64-bit constant for certain properties. */
1697 static void analyze_64bit_constant
1698 PARAMS ((unsigned HOST_WIDE_INT,
1699 unsigned HOST_WIDE_INT,
1700 int *, int *, int *));
1701
1702 static void
1703 analyze_64bit_constant (high_bits, low_bits, hbsp, lbsp, abbasp)
1704 unsigned HOST_WIDE_INT high_bits, low_bits;
1705 int *hbsp, *lbsp, *abbasp;
1706 {
1707 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1708 int i;
1709
1710 lowest_bit_set = highest_bit_set = -1;
1711 i = 0;
1712 do
1713 {
1714 if ((lowest_bit_set == -1)
1715 && ((low_bits >> i) & 1))
1716 lowest_bit_set = i;
1717 if ((highest_bit_set == -1)
1718 && ((high_bits >> (32 - i - 1)) & 1))
1719 highest_bit_set = (64 - i - 1);
1720 }
1721 while (++i < 32
1722 && ((highest_bit_set == -1)
1723 || (lowest_bit_set == -1)));
1724 if (i == 32)
1725 {
1726 i = 0;
1727 do
1728 {
1729 if ((lowest_bit_set == -1)
1730 && ((high_bits >> i) & 1))
1731 lowest_bit_set = i + 32;
1732 if ((highest_bit_set == -1)
1733 && ((low_bits >> (32 - i - 1)) & 1))
1734 highest_bit_set = 32 - i - 1;
1735 }
1736 while (++i < 32
1737 && ((highest_bit_set == -1)
1738 || (lowest_bit_set == -1)));
1739 }
1740 /* If there are no bits set this should have gone out
1741 as one instruction! */
1742 if (lowest_bit_set == -1
1743 || highest_bit_set == -1)
1744 abort ();
1745 all_bits_between_are_set = 1;
1746 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1747 {
1748 if (i < 32)
1749 {
1750 if ((low_bits & (1 << i)) != 0)
1751 continue;
1752 }
1753 else
1754 {
1755 if ((high_bits & (1 << (i - 32))) != 0)
1756 continue;
1757 }
1758 all_bits_between_are_set = 0;
1759 break;
1760 }
1761 *hbsp = highest_bit_set;
1762 *lbsp = lowest_bit_set;
1763 *abbasp = all_bits_between_are_set;
1764 }
1765
1766 static int const64_is_2insns
1767 PARAMS ((unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT));
1768
1769 static int
1770 const64_is_2insns (high_bits, low_bits)
1771 unsigned HOST_WIDE_INT high_bits, low_bits;
1772 {
1773 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1774
1775 if (high_bits == 0
1776 || high_bits == 0xffffffff)
1777 return 1;
1778
1779 analyze_64bit_constant (high_bits, low_bits,
1780 &highest_bit_set, &lowest_bit_set,
1781 &all_bits_between_are_set);
1782
1783 if ((highest_bit_set == 63
1784 || lowest_bit_set == 0)
1785 && all_bits_between_are_set != 0)
1786 return 1;
1787
1788 if ((highest_bit_set - lowest_bit_set) < 21)
1789 return 1;
1790
1791 return 0;
1792 }
1793
1794 static unsigned HOST_WIDE_INT create_simple_focus_bits
1795 PARAMS ((unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1796 int, int));
1797
1798 static unsigned HOST_WIDE_INT
1799 create_simple_focus_bits (high_bits, low_bits, lowest_bit_set, shift)
1800 unsigned HOST_WIDE_INT high_bits, low_bits;
1801 int lowest_bit_set, shift;
1802 {
1803 HOST_WIDE_INT hi, lo;
1804
1805 if (lowest_bit_set < 32)
1806 {
1807 lo = (low_bits >> lowest_bit_set) << shift;
1808 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1809 }
1810 else
1811 {
1812 lo = 0;
1813 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1814 }
1815 if (hi & lo)
1816 abort ();
1817 return (hi | lo);
1818 }
1819
1820 /* Here we are sure to be arch64 and this is an integer constant
1821 being loaded into a register. Emit the most efficient
1822 insn sequence possible. Detection of all the 1-insn cases
1823 has been done already. */
1824 void
1825 sparc_emit_set_const64 (op0, op1)
1826 rtx op0;
1827 rtx op1;
1828 {
1829 unsigned HOST_WIDE_INT high_bits, low_bits;
1830 int lowest_bit_set, highest_bit_set;
1831 int all_bits_between_are_set;
1832 rtx temp;
1833
1834 /* Sanity check that we know what we are working with. */
1835 if (! TARGET_ARCH64
1836 || GET_CODE (op0) != REG
1837 || (REGNO (op0) >= SPARC_FIRST_FP_REG
1838 && REGNO (op0) <= SPARC_LAST_V9_FP_REG))
1839 abort ();
1840
1841 if (reload_in_progress || reload_completed)
1842 temp = op0;
1843 else
1844 temp = gen_reg_rtx (DImode);
1845
1846 if (GET_CODE (op1) != CONST_DOUBLE
1847 && GET_CODE (op1) != CONST_INT)
1848 {
1849 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1850 return;
1851 }
1852
1853 if (GET_CODE (op1) == CONST_DOUBLE)
1854 {
1855 #if HOST_BITS_PER_WIDE_INT == 64
1856 high_bits = (CONST_DOUBLE_LOW (op1) >> 32) & 0xffffffff;
1857 low_bits = CONST_DOUBLE_LOW (op1) & 0xffffffff;
1858 #else
1859 high_bits = CONST_DOUBLE_HIGH (op1);
1860 low_bits = CONST_DOUBLE_LOW (op1);
1861 #endif
1862 }
1863 else
1864 {
1865 #if HOST_BITS_PER_WIDE_INT == 64
1866 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1867 low_bits = (INTVAL (op1) & 0xffffffff);
1868 #else
1869 high_bits = ((INTVAL (op1) < 0) ?
1870 0xffffffff :
1871 0x00000000);
1872 low_bits = INTVAL (op1);
1873 #endif
1874 }
1875
1876 /* low_bits bits 0 --> 31
1877 high_bits bits 32 --> 63 */
1878
1879 analyze_64bit_constant (high_bits, low_bits,
1880 &highest_bit_set, &lowest_bit_set,
1881 &all_bits_between_are_set);
1882
1883 /* First try for a 2-insn sequence. */
1884
1885 /* These situations are preferred because the optimizer can
1886 * do more things with them:
1887 * 1) mov -1, %reg
1888 * sllx %reg, shift, %reg
1889 * 2) mov -1, %reg
1890 * srlx %reg, shift, %reg
1891 * 3) mov some_small_const, %reg
1892 * sllx %reg, shift, %reg
1893 */
1894 if (((highest_bit_set == 63
1895 || lowest_bit_set == 0)
1896 && all_bits_between_are_set != 0)
1897 || ((highest_bit_set - lowest_bit_set) < 12))
1898 {
1899 HOST_WIDE_INT the_const = -1;
1900 int shift = lowest_bit_set;
1901
1902 if ((highest_bit_set != 63
1903 && lowest_bit_set != 0)
1904 || all_bits_between_are_set == 0)
1905 {
1906 the_const =
1907 create_simple_focus_bits (high_bits, low_bits,
1908 lowest_bit_set, 0);
1909 }
1910 else if (lowest_bit_set == 0)
1911 shift = -(63 - highest_bit_set);
1912
1913 if (! SPARC_SIMM13_P (the_const))
1914 abort ();
1915
1916 emit_insn (gen_safe_SET64 (temp, the_const));
1917 if (shift > 0)
1918 emit_insn (gen_rtx_SET (VOIDmode,
1919 op0,
1920 gen_rtx_ASHIFT (DImode,
1921 temp,
1922 GEN_INT (shift))));
1923 else if (shift < 0)
1924 emit_insn (gen_rtx_SET (VOIDmode,
1925 op0,
1926 gen_rtx_LSHIFTRT (DImode,
1927 temp,
1928 GEN_INT (-shift))));
1929 else
1930 abort ();
1931 return;
1932 }
1933
1934 /* Now a range of 22 or less bits set somewhere.
1935 * 1) sethi %hi(focus_bits), %reg
1936 * sllx %reg, shift, %reg
1937 * 2) sethi %hi(focus_bits), %reg
1938 * srlx %reg, shift, %reg
1939 */
1940 if ((highest_bit_set - lowest_bit_set) < 21)
1941 {
1942 unsigned HOST_WIDE_INT focus_bits =
1943 create_simple_focus_bits (high_bits, low_bits,
1944 lowest_bit_set, 10);
1945
1946 if (! SPARC_SETHI_P (focus_bits))
1947 abort ();
1948
1949 sparc_emit_set_safe_HIGH64 (temp, focus_bits);
1950
1951 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1952 if (lowest_bit_set < 10)
1953 emit_insn (gen_rtx_SET (VOIDmode,
1954 op0,
1955 gen_rtx_LSHIFTRT (DImode, temp,
1956 GEN_INT (10 - lowest_bit_set))));
1957 else if (lowest_bit_set > 10)
1958 emit_insn (gen_rtx_SET (VOIDmode,
1959 op0,
1960 gen_rtx_ASHIFT (DImode, temp,
1961 GEN_INT (lowest_bit_set - 10))));
1962 else
1963 abort ();
1964 return;
1965 }
1966
1967 /* 1) sethi %hi(low_bits), %reg
1968 * or %reg, %lo(low_bits), %reg
1969 * 2) sethi %hi(~low_bits), %reg
1970 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1971 */
1972 if (high_bits == 0
1973 || high_bits == 0xffffffff)
1974 {
1975 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1976 (high_bits == 0xffffffff));
1977 return;
1978 }
1979
1980 /* Now, try 3-insn sequences. */
1981
1982 /* 1) sethi %hi(high_bits), %reg
1983 * or %reg, %lo(high_bits), %reg
1984 * sllx %reg, 32, %reg
1985 */
1986 if (low_bits == 0)
1987 {
1988 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1989 return;
1990 }
1991
1992 /* We may be able to do something quick
1993 when the constant is negated, so try that. */
1994 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1995 (~low_bits) & 0xfffffc00))
1996 {
1997 /* NOTE: The trailing bits get XOR'd so we need the
1998 non-negated bits, not the negated ones. */
1999 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2000
2001 if ((((~high_bits) & 0xffffffff) == 0
2002 && ((~low_bits) & 0x80000000) == 0)
2003 || (((~high_bits) & 0xffffffff) == 0xffffffff
2004 && ((~low_bits) & 0x80000000) != 0))
2005 {
2006 int fast_int = (~low_bits & 0xffffffff);
2007
2008 if ((SPARC_SETHI_P (fast_int)
2009 && (~high_bits & 0xffffffff) == 0)
2010 || SPARC_SIMM13_P (fast_int))
2011 emit_insn (gen_safe_SET64 (temp, fast_int));
2012 else
2013 sparc_emit_set_const64 (temp, GEN_INT64 (fast_int));
2014 }
2015 else
2016 {
2017 rtx negated_const;
2018 #if HOST_BITS_PER_WIDE_INT == 64
2019 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2020 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2021 #else
2022 negated_const = gen_rtx_CONST_DOUBLE (DImode, const0_rtx,
2023 (~low_bits) & 0xfffffc00,
2024 (~high_bits) & 0xffffffff);
2025 #endif
2026 sparc_emit_set_const64 (temp, negated_const);
2027 }
2028
2029 /* If we are XOR'ing with -1, then we should emit a one's complement
2030 instead. This way the combiner will notice logical operations
2031 such as ANDN later on and substitute. */
2032 if (trailing_bits == 0x3ff)
2033 {
2034 emit_insn (gen_rtx_SET (VOIDmode, op0,
2035 gen_rtx_NOT (DImode, temp)));
2036 }
2037 else
2038 {
2039 emit_insn (gen_rtx_SET (VOIDmode,
2040 op0,
2041 gen_safe_XOR64 (temp,
2042 (-0x400 | trailing_bits))));
2043 }
2044 return;
2045 }
2046
2047 /* 1) sethi %hi(xxx), %reg
2048 * or %reg, %lo(xxx), %reg
2049 * sllx %reg, yyy, %reg
2050 *
2051 * ??? This is just a generalized version of the low_bits==0
2052 * thing above, FIXME...
2053 */
2054 if ((highest_bit_set - lowest_bit_set) < 32)
2055 {
2056 unsigned HOST_WIDE_INT focus_bits =
2057 create_simple_focus_bits (high_bits, low_bits,
2058 lowest_bit_set, 0);
2059
2060 /* We can't get here in this state. */
2061 if (highest_bit_set < 32
2062 || lowest_bit_set >= 32)
2063 abort ();
2064
2065 /* So what we know is that the set bits straddle the
2066 middle of the 64-bit word. */
2067 sparc_emit_set_const64_quick2 (op0, temp,
2068 focus_bits, 0,
2069 lowest_bit_set);
2070 return;
2071 }
2072
2073 /* 1) sethi %hi(high_bits), %reg
2074 * or %reg, %lo(high_bits), %reg
2075 * sllx %reg, 32, %reg
2076 * or %reg, low_bits, %reg
2077 */
2078 if (SPARC_SIMM13_P(low_bits)
2079 && ((int)low_bits > 0))
2080 {
2081 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2082 return;
2083 }
2084
2085 /* The easiest way when all else fails, is full decomposition. */
2086 #if 0
2087 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
2088 high_bits, low_bits, ~high_bits, ~low_bits);
2089 #endif
2090 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2091 }
2092
2093 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2094 return the mode to be used for the comparison. For floating-point,
2095 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2096 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2097 processing is needed. */
2098
2099 enum machine_mode
2100 select_cc_mode (op, x, y)
2101 enum rtx_code op;
2102 rtx x;
2103 rtx y ATTRIBUTE_UNUSED;
2104 {
2105 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2106 {
2107 switch (op)
2108 {
2109 case EQ:
2110 case NE:
2111 case UNORDERED:
2112 case ORDERED:
2113 case UNLT:
2114 case UNLE:
2115 case UNGT:
2116 case UNGE:
2117 case UNEQ:
2118 case LTGT:
2119 return CCFPmode;
2120
2121 case LT:
2122 case LE:
2123 case GT:
2124 case GE:
2125 return CCFPEmode;
2126
2127 default:
2128 abort ();
2129 }
2130 }
2131 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2132 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2133 {
2134 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2135 return CCX_NOOVmode;
2136 else
2137 return CC_NOOVmode;
2138 }
2139 else
2140 {
2141 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2142 return CCXmode;
2143 else
2144 return CCmode;
2145 }
2146 }
2147
2148 /* X and Y are two things to compare using CODE. Emit the compare insn and
2149 return the rtx for the cc reg in the proper mode. */
2150
2151 rtx
2152 gen_compare_reg (code, x, y)
2153 enum rtx_code code;
2154 rtx x, y;
2155 {
2156 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
2157 rtx cc_reg;
2158
2159 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2160 fcc regs (cse can't tell they're really call clobbered regs and will
2161 remove a duplicate comparison even if there is an intervening function
2162 call - it will then try to reload the cc reg via an int reg which is why
2163 we need the movcc patterns). It is possible to provide the movcc
2164 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2165 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2166 to tell cse that CCFPE mode registers (even pseudos) are call
2167 clobbered. */
2168
2169 /* ??? This is an experiment. Rather than making changes to cse which may
2170 or may not be easy/clean, we do our own cse. This is possible because
2171 we will generate hard registers. Cse knows they're call clobbered (it
2172 doesn't know the same thing about pseudos). If we guess wrong, no big
2173 deal, but if we win, great! */
2174
2175 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2176 #if 1 /* experiment */
2177 {
2178 int reg;
2179 /* We cycle through the registers to ensure they're all exercised. */
2180 static int next_fcc_reg = 0;
2181 /* Previous x,y for each fcc reg. */
2182 static rtx prev_args[4][2];
2183
2184 /* Scan prev_args for x,y. */
2185 for (reg = 0; reg < 4; reg++)
2186 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2187 break;
2188 if (reg == 4)
2189 {
2190 reg = next_fcc_reg;
2191 prev_args[reg][0] = x;
2192 prev_args[reg][1] = y;
2193 next_fcc_reg = (next_fcc_reg + 1) & 3;
2194 }
2195 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2196 }
2197 #else
2198 cc_reg = gen_reg_rtx (mode);
2199 #endif /* ! experiment */
2200 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2201 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2202 else
2203 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2204
2205 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2206 gen_rtx_COMPARE (mode, x, y)));
2207
2208 return cc_reg;
2209 }
2210
2211 /* This function is used for v9 only.
2212 CODE is the code for an Scc's comparison.
2213 OPERANDS[0] is the target of the Scc insn.
2214 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2215 been generated yet).
2216
2217 This function is needed to turn
2218
2219 (set (reg:SI 110)
2220 (gt (reg:CCX 100 %icc)
2221 (const_int 0)))
2222 into
2223 (set (reg:SI 110)
2224 (gt:DI (reg:CCX 100 %icc)
2225 (const_int 0)))
2226
2227 IE: The instruction recognizer needs to see the mode of the comparison to
2228 find the right instruction. We could use "gt:DI" right in the
2229 define_expand, but leaving it out allows us to handle DI, SI, etc.
2230
2231 We refer to the global sparc compare operands sparc_compare_op0 and
2232 sparc_compare_op1. */
2233
2234 int
2235 gen_v9_scc (compare_code, operands)
2236 enum rtx_code compare_code;
2237 register rtx *operands;
2238 {
2239 rtx temp, op0, op1;
2240
2241 if (! TARGET_ARCH64
2242 && (GET_MODE (sparc_compare_op0) == DImode
2243 || GET_MODE (operands[0]) == DImode))
2244 return 0;
2245
2246 /* Handle the case where operands[0] == sparc_compare_op0.
2247 We "early clobber" the result. */
2248 if (REGNO (operands[0]) == REGNO (sparc_compare_op0))
2249 {
2250 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2251 emit_move_insn (op0, sparc_compare_op0);
2252 }
2253 else
2254 op0 = sparc_compare_op0;
2255 /* For consistency in the following. */
2256 op1 = sparc_compare_op1;
2257
2258 /* Try to use the movrCC insns. */
2259 if (TARGET_ARCH64
2260 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
2261 && op1 == const0_rtx
2262 && v9_regcmp_p (compare_code))
2263 {
2264 /* Special case for op0 != 0. This can be done with one instruction if
2265 operands[0] == sparc_compare_op0. We don't assume they are equal
2266 now though. */
2267
2268 if (compare_code == NE
2269 && GET_MODE (operands[0]) == DImode
2270 && GET_MODE (op0) == DImode)
2271 {
2272 emit_insn (gen_rtx_SET (VOIDmode, operands[0], op0));
2273 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2274 gen_rtx_IF_THEN_ELSE (DImode,
2275 gen_rtx_fmt_ee (compare_code, DImode,
2276 op0, const0_rtx),
2277 const1_rtx,
2278 operands[0])));
2279 return 1;
2280 }
2281
2282 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2283 if (GET_MODE (op0) != DImode)
2284 {
2285 temp = gen_reg_rtx (DImode);
2286 convert_move (temp, op0, 0);
2287 }
2288 else
2289 temp = op0;
2290 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2291 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2292 gen_rtx_fmt_ee (compare_code, DImode,
2293 temp, const0_rtx),
2294 const1_rtx,
2295 operands[0])));
2296 return 1;
2297 }
2298 else
2299 {
2300 operands[1] = gen_compare_reg (compare_code, op0, op1);
2301
2302 switch (GET_MODE (operands[1]))
2303 {
2304 case CCmode :
2305 case CCXmode :
2306 case CCFPEmode :
2307 case CCFPmode :
2308 break;
2309 default :
2310 abort ();
2311 }
2312 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2313 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2314 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2315 gen_rtx_fmt_ee (compare_code,
2316 GET_MODE (operands[1]),
2317 operands[1], const0_rtx),
2318 const1_rtx, operands[0])));
2319 return 1;
2320 }
2321 }
2322
2323 /* Emit a conditional jump insn for the v9 architecture using comparison code
2324 CODE and jump target LABEL.
2325 This function exists to take advantage of the v9 brxx insns. */
2326
2327 void
2328 emit_v9_brxx_insn (code, op0, label)
2329 enum rtx_code code;
2330 rtx op0, label;
2331 {
2332 emit_jump_insn (gen_rtx_SET (VOIDmode,
2333 pc_rtx,
2334 gen_rtx_IF_THEN_ELSE (VOIDmode,
2335 gen_rtx_fmt_ee (code, GET_MODE (op0),
2336 op0, const0_rtx),
2337 gen_rtx_LABEL_REF (VOIDmode, label),
2338 pc_rtx)));
2339 }
2340
2341 /* Generate a DFmode part of a hard TFmode register.
2342 REG is the TFmode hard register, LOW is 1 for the
2343 low 64bit of the register and 0 otherwise.
2344 */
2345 rtx
2346 gen_df_reg (reg, low)
2347 rtx reg;
2348 int low;
2349 {
2350 int regno = REGNO (reg);
2351
2352 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2353 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2354 return gen_rtx_REG (DFmode, regno);
2355 }
2356 \f
2357 /* Return nonzero if a return peephole merging return with
2358 setting of output register is ok. */
2359 int
2360 leaf_return_peephole_ok ()
2361 {
2362 return (actual_fsize == 0);
2363 }
2364
2365 /* Return nonzero if TRIAL can go into the function epilogue's
2366 delay slot. SLOT is the slot we are trying to fill. */
2367
2368 int
2369 eligible_for_epilogue_delay (trial, slot)
2370 rtx trial;
2371 int slot;
2372 {
2373 rtx pat, src;
2374
2375 if (slot >= 1)
2376 return 0;
2377
2378 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2379 return 0;
2380
2381 if (get_attr_length (trial) != 1)
2382 return 0;
2383
2384 /* If there are any call-saved registers, we should scan TRIAL if it
2385 does not reference them. For now just make it easy. */
2386 if (num_gfregs)
2387 return 0;
2388
2389 /* In the case of a true leaf function, anything can go into the delay slot.
2390 A delay slot only exists however if the frame size is zero, otherwise
2391 we will put an insn to adjust the stack after the return. */
2392 if (current_function_uses_only_leaf_regs)
2393 {
2394 if (leaf_return_peephole_ok ())
2395 return ((get_attr_in_uncond_branch_delay (trial)
2396 == IN_BRANCH_DELAY_TRUE));
2397 return 0;
2398 }
2399
2400 pat = PATTERN (trial);
2401
2402 /* Otherwise, only operations which can be done in tandem with
2403 a `restore' or `return' insn can go into the delay slot. */
2404 if (GET_CODE (SET_DEST (pat)) != REG
2405 || REGNO (SET_DEST (pat)) < 24)
2406 return 0;
2407
2408 /* If this instruction sets up floating point register and we have a return
2409 instruction, it can probably go in. But restore will not work
2410 with FP_REGS. */
2411 if (REGNO (SET_DEST (pat)) >= 32)
2412 {
2413 if (TARGET_V9 && ! epilogue_renumber (&pat, 1)
2414 && (get_attr_in_uncond_branch_delay (trial) == IN_BRANCH_DELAY_TRUE))
2415 return 1;
2416 return 0;
2417 }
2418
2419 /* The set of insns matched here must agree precisely with the set of
2420 patterns paired with a RETURN in sparc.md. */
2421
2422 src = SET_SRC (pat);
2423
2424 /* This matches "*return_[qhs]i" or even "*return_di" on TARGET_ARCH64. */
2425 if (arith_operand (src, GET_MODE (src)))
2426 {
2427 if (TARGET_ARCH64)
2428 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2429 else
2430 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2431 }
2432
2433 /* This matches "*return_di". */
2434 else if (arith_double_operand (src, GET_MODE (src)))
2435 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2436
2437 /* This matches "*return_sf_no_fpu". */
2438 else if (! TARGET_FPU && restore_operand (SET_DEST (pat), SFmode)
2439 && register_operand (src, SFmode))
2440 return 1;
2441
2442 /* If we have return instruction, anything that does not use
2443 local or output registers and can go into a delay slot wins. */
2444 else if (TARGET_V9 && ! epilogue_renumber (&pat, 1)
2445 && (get_attr_in_uncond_branch_delay (trial) == IN_BRANCH_DELAY_TRUE))
2446 return 1;
2447
2448 /* This matches "*return_addsi". */
2449 else if (GET_CODE (src) == PLUS
2450 && arith_operand (XEXP (src, 0), SImode)
2451 && arith_operand (XEXP (src, 1), SImode)
2452 && (register_operand (XEXP (src, 0), SImode)
2453 || register_operand (XEXP (src, 1), SImode)))
2454 return 1;
2455
2456 /* This matches "*return_adddi". */
2457 else if (GET_CODE (src) == PLUS
2458 && arith_double_operand (XEXP (src, 0), DImode)
2459 && arith_double_operand (XEXP (src, 1), DImode)
2460 && (register_operand (XEXP (src, 0), DImode)
2461 || register_operand (XEXP (src, 1), DImode)))
2462 return 1;
2463
2464 /* This can match "*return_losum_[sd]i".
2465 Catch only some cases, so that return_losum* don't have
2466 to be too big. */
2467 else if (GET_CODE (src) == LO_SUM
2468 && ! TARGET_CM_MEDMID
2469 && ((register_operand (XEXP (src, 0), SImode)
2470 && immediate_operand (XEXP (src, 1), SImode))
2471 || (TARGET_ARCH64
2472 && register_operand (XEXP (src, 0), DImode)
2473 && immediate_operand (XEXP (src, 1), DImode))))
2474 return 1;
2475
2476 /* sll{,x} reg,1,reg2 is add reg,reg,reg2 as well. */
2477 else if (GET_CODE (src) == ASHIFT
2478 && (register_operand (XEXP (src, 0), SImode)
2479 || register_operand (XEXP (src, 0), DImode))
2480 && XEXP (src, 1) == const1_rtx)
2481 return 1;
2482
2483 return 0;
2484 }
2485
2486 /* Return nonzero if TRIAL can go into the sibling call
2487 delay slot. */
2488
2489 int
2490 eligible_for_sibcall_delay (trial)
2491 rtx trial;
2492 {
2493 rtx pat, src;
2494
2495 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2496 return 0;
2497
2498 if (get_attr_length (trial) != 1 || profile_block_flag == 2)
2499 return 0;
2500
2501 pat = PATTERN (trial);
2502
2503 if (current_function_uses_only_leaf_regs)
2504 {
2505 /* If the tail call is done using the call instruction,
2506 we have to restore %o7 in the delay slot. */
2507 if ((TARGET_ARCH64 && ! TARGET_CM_MEDLOW) || flag_pic)
2508 return 0;
2509
2510 /* %g1 is used to build the function address */
2511 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2512 return 0;
2513
2514 return 1;
2515 }
2516
2517 /* Otherwise, only operations which can be done in tandem with
2518 a `restore' insn can go into the delay slot. */
2519 if (GET_CODE (SET_DEST (pat)) != REG
2520 || REGNO (SET_DEST (pat)) < 24
2521 || REGNO (SET_DEST (pat)) >= 32)
2522 return 0;
2523
2524 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2525 in most cases. */
2526 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2527 return 0;
2528
2529 src = SET_SRC (pat);
2530
2531 if (arith_operand (src, GET_MODE (src)))
2532 {
2533 if (TARGET_ARCH64)
2534 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2535 else
2536 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2537 }
2538
2539 else if (arith_double_operand (src, GET_MODE (src)))
2540 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2541
2542 else if (! TARGET_FPU && restore_operand (SET_DEST (pat), SFmode)
2543 && register_operand (src, SFmode))
2544 return 1;
2545
2546 else if (GET_CODE (src) == PLUS
2547 && arith_operand (XEXP (src, 0), SImode)
2548 && arith_operand (XEXP (src, 1), SImode)
2549 && (register_operand (XEXP (src, 0), SImode)
2550 || register_operand (XEXP (src, 1), SImode)))
2551 return 1;
2552
2553 else if (GET_CODE (src) == PLUS
2554 && arith_double_operand (XEXP (src, 0), DImode)
2555 && arith_double_operand (XEXP (src, 1), DImode)
2556 && (register_operand (XEXP (src, 0), DImode)
2557 || register_operand (XEXP (src, 1), DImode)))
2558 return 1;
2559
2560 else if (GET_CODE (src) == LO_SUM
2561 && ! TARGET_CM_MEDMID
2562 && ((register_operand (XEXP (src, 0), SImode)
2563 && immediate_operand (XEXP (src, 1), SImode))
2564 || (TARGET_ARCH64
2565 && register_operand (XEXP (src, 0), DImode)
2566 && immediate_operand (XEXP (src, 1), DImode))))
2567 return 1;
2568
2569 else if (GET_CODE (src) == ASHIFT
2570 && (register_operand (XEXP (src, 0), SImode)
2571 || register_operand (XEXP (src, 0), DImode))
2572 && XEXP (src, 1) == const1_rtx)
2573 return 1;
2574
2575 return 0;
2576 }
2577
2578 static int
2579 check_return_regs (x)
2580 rtx x;
2581 {
2582 switch (GET_CODE (x))
2583 {
2584 case REG:
2585 return IN_OR_GLOBAL_P (x);
2586
2587 case CONST_INT:
2588 case CONST_DOUBLE:
2589 case CONST:
2590 case SYMBOL_REF:
2591 case LABEL_REF:
2592 return 1;
2593
2594 case SET:
2595 case IOR:
2596 case AND:
2597 case XOR:
2598 case PLUS:
2599 case MINUS:
2600 if (check_return_regs (XEXP (x, 1)) == 0)
2601 return 0;
2602 case NOT:
2603 case NEG:
2604 case MEM:
2605 return check_return_regs (XEXP (x, 0));
2606
2607 default:
2608 return 0;
2609 }
2610
2611 }
2612
2613 /* Return 1 if TRIAL references only in and global registers. */
2614 int
2615 eligible_for_return_delay (trial)
2616 rtx trial;
2617 {
2618 if (GET_CODE (PATTERN (trial)) != SET)
2619 return 0;
2620
2621 return check_return_regs (PATTERN (trial));
2622 }
2623
2624 int
2625 short_branch (uid1, uid2)
2626 int uid1, uid2;
2627 {
2628 int delta = insn_addresses[uid1] - insn_addresses[uid2];
2629
2630 /* Leave a few words of "slop". */
2631 if (delta >= -1023 && delta <= 1022)
2632 return 1;
2633
2634 return 0;
2635 }
2636
2637 /* Return non-zero if REG is not used after INSN.
2638 We assume REG is a reload reg, and therefore does
2639 not live past labels or calls or jumps. */
2640 int
2641 reg_unused_after (reg, insn)
2642 rtx reg;
2643 rtx insn;
2644 {
2645 enum rtx_code code, prev_code = UNKNOWN;
2646
2647 while ((insn = NEXT_INSN (insn)))
2648 {
2649 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2650 return 1;
2651
2652 code = GET_CODE (insn);
2653 if (GET_CODE (insn) == CODE_LABEL)
2654 return 1;
2655
2656 if (GET_RTX_CLASS (code) == 'i')
2657 {
2658 rtx set = single_set (insn);
2659 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2660 if (set && in_src)
2661 return 0;
2662 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2663 return 1;
2664 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2665 return 0;
2666 }
2667 prev_code = code;
2668 }
2669 return 1;
2670 }
2671 \f
2672 /* The table we use to reference PIC data. */
2673 static rtx global_offset_table;
2674
2675 /* The function we use to get at it. */
2676 static rtx get_pc_symbol;
2677 static char get_pc_symbol_name[256];
2678
2679 /* Ensure that we are not using patterns that are not OK with PIC. */
2680
2681 int
2682 check_pic (i)
2683 int i;
2684 {
2685 switch (flag_pic)
2686 {
2687 case 1:
2688 if (GET_CODE (recog_data.operand[i]) == SYMBOL_REF
2689 || (GET_CODE (recog_data.operand[i]) == CONST
2690 && ! (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2691 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2692 == global_offset_table)
2693 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2694 == CONST))))
2695 abort ();
2696 case 2:
2697 default:
2698 return 1;
2699 }
2700 }
2701
2702 /* Return true if X is an address which needs a temporary register when
2703 reloaded while generating PIC code. */
2704
2705 int
2706 pic_address_needs_scratch (x)
2707 rtx x;
2708 {
2709 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2710 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2711 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2712 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2713 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2714 return 1;
2715
2716 return 0;
2717 }
2718
2719 /* Legitimize PIC addresses. If the address is already position-independent,
2720 we return ORIG. Newly generated position-independent addresses go into a
2721 reg. This is REG if non zero, otherwise we allocate register(s) as
2722 necessary. */
2723
2724 rtx
2725 legitimize_pic_address (orig, mode, reg)
2726 rtx orig;
2727 enum machine_mode mode ATTRIBUTE_UNUSED;
2728 rtx reg;
2729 {
2730 if (GET_CODE (orig) == SYMBOL_REF)
2731 {
2732 rtx pic_ref, address;
2733 rtx insn;
2734
2735 if (reg == 0)
2736 {
2737 if (reload_in_progress || reload_completed)
2738 abort ();
2739 else
2740 reg = gen_reg_rtx (Pmode);
2741 }
2742
2743 if (flag_pic == 2)
2744 {
2745 /* If not during reload, allocate another temp reg here for loading
2746 in the address, so that these instructions can be optimized
2747 properly. */
2748 rtx temp_reg = ((reload_in_progress || reload_completed)
2749 ? reg : gen_reg_rtx (Pmode));
2750
2751 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
2752 won't get confused into thinking that these two instructions
2753 are loading in the true address of the symbol. If in the
2754 future a PIC rtx exists, that should be used instead. */
2755 if (Pmode == SImode)
2756 {
2757 emit_insn (gen_movsi_high_pic (temp_reg, orig));
2758 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
2759 }
2760 else
2761 {
2762 emit_insn (gen_movdi_high_pic (temp_reg, orig));
2763 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
2764 }
2765 address = temp_reg;
2766 }
2767 else
2768 address = orig;
2769
2770 pic_ref = gen_rtx_MEM (Pmode,
2771 gen_rtx_PLUS (Pmode,
2772 pic_offset_table_rtx, address));
2773 current_function_uses_pic_offset_table = 1;
2774 RTX_UNCHANGING_P (pic_ref) = 1;
2775 insn = emit_move_insn (reg, pic_ref);
2776 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2777 by loop. */
2778 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2779 REG_NOTES (insn));
2780 return reg;
2781 }
2782 else if (GET_CODE (orig) == CONST)
2783 {
2784 rtx base, offset;
2785
2786 if (GET_CODE (XEXP (orig, 0)) == PLUS
2787 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2788 return orig;
2789
2790 if (reg == 0)
2791 {
2792 if (reload_in_progress || reload_completed)
2793 abort ();
2794 else
2795 reg = gen_reg_rtx (Pmode);
2796 }
2797
2798 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2799 {
2800 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2801 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2802 base == reg ? 0 : reg);
2803 }
2804 else
2805 abort ();
2806
2807 if (GET_CODE (offset) == CONST_INT)
2808 {
2809 if (SMALL_INT (offset))
2810 return plus_constant_for_output (base, INTVAL (offset));
2811 else if (! reload_in_progress && ! reload_completed)
2812 offset = force_reg (Pmode, offset);
2813 else
2814 /* If we reach here, then something is seriously wrong. */
2815 abort ();
2816 }
2817 return gen_rtx_PLUS (Pmode, base, offset);
2818 }
2819 else if (GET_CODE (orig) == LABEL_REF)
2820 /* ??? Why do we do this? */
2821 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
2822 the register is live instead, in case it is eliminated. */
2823 current_function_uses_pic_offset_table = 1;
2824
2825 return orig;
2826 }
2827
2828 /* Return the RTX for insns to set the PIC register. */
2829
2830 static rtx
2831 pic_setup_code ()
2832 {
2833 rtx seq;
2834
2835 start_sequence ();
2836 emit_insn (gen_get_pc (pic_offset_table_rtx, global_offset_table,
2837 get_pc_symbol));
2838 seq = gen_sequence ();
2839 end_sequence ();
2840
2841 return seq;
2842 }
2843
2844 /* Emit special PIC prologues and epilogues. */
2845
2846 void
2847 finalize_pic ()
2848 {
2849 /* Labels to get the PC in the prologue of this function. */
2850 int orig_flag_pic = flag_pic;
2851 rtx insn;
2852
2853 if (current_function_uses_pic_offset_table == 0)
2854 return;
2855
2856 if (! flag_pic)
2857 abort ();
2858
2859 /* If we havn't emitted the special get_pc helper function, do so now. */
2860 if (get_pc_symbol_name[0] == 0)
2861 {
2862 int align;
2863
2864 ASM_GENERATE_INTERNAL_LABEL (get_pc_symbol_name, "LGETPC", 0);
2865 text_section ();
2866
2867 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2868 if (align > 0)
2869 ASM_OUTPUT_ALIGN (asm_out_file, align);
2870 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LGETPC", 0);
2871 fputs ("\tretl\n\tadd %o7,%l7,%l7\n", asm_out_file);
2872 }
2873
2874 /* Initialize every time through, since we can't easily
2875 know this to be permanent. */
2876 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2877 get_pc_symbol = gen_rtx_SYMBOL_REF (Pmode, get_pc_symbol_name);
2878 flag_pic = 0;
2879
2880 emit_insn_after (pic_setup_code (), get_insns ());
2881
2882 /* Insert the code in each nonlocal goto receiver.
2883 If you make changes here or to the nonlocal_goto_receiver
2884 pattern, make sure the unspec_volatile numbers still
2885 match. */
2886 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2887 if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
2888 && XINT (PATTERN (insn), 1) == 5)
2889 emit_insn_after (pic_setup_code (), insn);
2890
2891 flag_pic = orig_flag_pic;
2892
2893 /* Need to emit this whether or not we obey regdecls,
2894 since setjmp/longjmp can cause life info to screw up.
2895 ??? In the case where we don't obey regdecls, this is not sufficient
2896 since we may not fall out the bottom. */
2897 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2898 }
2899 \f
2900 /* Return 1 if RTX is a MEM which is known to be aligned to at
2901 least an 8 byte boundary. */
2902
2903 int
2904 mem_min_alignment (mem, desired)
2905 rtx mem;
2906 int desired;
2907 {
2908 rtx addr, base, offset;
2909
2910 /* If it's not a MEM we can't accept it. */
2911 if (GET_CODE (mem) != MEM)
2912 return 0;
2913
2914 addr = XEXP (mem, 0);
2915 base = offset = NULL_RTX;
2916 if (GET_CODE (addr) == PLUS)
2917 {
2918 if (GET_CODE (XEXP (addr, 0)) == REG)
2919 {
2920 base = XEXP (addr, 0);
2921
2922 /* What we are saying here is that if the base
2923 REG is aligned properly, the compiler will make
2924 sure any REG based index upon it will be so
2925 as well. */
2926 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2927 offset = XEXP (addr, 1);
2928 else
2929 offset = const0_rtx;
2930 }
2931 }
2932 else if (GET_CODE (addr) == REG)
2933 {
2934 base = addr;
2935 offset = const0_rtx;
2936 }
2937
2938 if (base != NULL_RTX)
2939 {
2940 int regno = REGNO (base);
2941
2942 if (regno != FRAME_POINTER_REGNUM
2943 && regno != STACK_POINTER_REGNUM)
2944 {
2945 /* Check if the compiler has recorded some information
2946 about the alignment of the base REG. If reload has
2947 completed, we already matched with proper alignments.
2948 If not running global_alloc, reload might give us
2949 unaligned pointer to local stack though. */
2950 if (((cfun != 0
2951 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
2952 || (optimize && reload_completed))
2953 && (INTVAL (offset) & (desired - 1)) == 0)
2954 return 1;
2955 }
2956 else
2957 {
2958 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
2959 return 1;
2960 }
2961 }
2962 else if (! TARGET_UNALIGNED_DOUBLES
2963 || CONSTANT_P (addr)
2964 || GET_CODE (addr) == LO_SUM)
2965 {
2966 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
2967 is true, in which case we can only assume that an access is aligned if
2968 it is to a constant address, or the address involves a LO_SUM. */
2969 return 1;
2970 }
2971
2972 /* An obviously unaligned address. */
2973 return 0;
2974 }
2975
2976 \f
2977 /* Vectors to keep interesting information about registers where it can easily
2978 be got. We use to use the actual mode value as the bit number, but there
2979 are more than 32 modes now. Instead we use two tables: one indexed by
2980 hard register number, and one indexed by mode. */
2981
2982 /* The purpose of sparc_mode_class is to shrink the range of modes so that
2983 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
2984 mapped into one sparc_mode_class mode. */
2985
2986 enum sparc_mode_class {
2987 S_MODE, D_MODE, T_MODE, O_MODE,
2988 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
2989 CC_MODE, CCFP_MODE
2990 };
2991
2992 /* Modes for single-word and smaller quantities. */
2993 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
2994
2995 /* Modes for double-word and smaller quantities. */
2996 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
2997
2998 /* Modes for quad-word and smaller quantities. */
2999 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3000
3001 /* Modes for 8-word and smaller quantities. */
3002 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3003
3004 /* Modes for single-float quantities. We must allow any single word or
3005 smaller quantity. This is because the fix/float conversion instructions
3006 take integer inputs/outputs from the float registers. */
3007 #define SF_MODES (S_MODES)
3008
3009 /* Modes for double-float and smaller quantities. */
3010 #define DF_MODES (S_MODES | D_MODES)
3011
3012 /* Modes for double-float only quantities. */
3013 #define DF_MODES_NO_S (D_MODES)
3014
3015 /* Modes for quad-float only quantities. */
3016 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3017
3018 /* Modes for quad-float and smaller quantities. */
3019 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3020
3021 /* Modes for quad-float and double-float quantities. */
3022 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3023
3024 /* Modes for quad-float pair only quantities. */
3025 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3026
3027 /* Modes for quad-float pairs and smaller quantities. */
3028 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3029
3030 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3031
3032 /* Modes for condition codes. */
3033 #define CC_MODES (1 << (int) CC_MODE)
3034 #define CCFP_MODES (1 << (int) CCFP_MODE)
3035
3036 /* Value is 1 if register/mode pair is acceptable on sparc.
3037 The funny mixture of D and T modes is because integer operations
3038 do not specially operate on tetra quantities, so non-quad-aligned
3039 registers can hold quadword quantities (except %o4 and %i4 because
3040 they cross fixed registers). */
3041
3042 /* This points to either the 32 bit or the 64 bit version. */
3043 int *hard_regno_mode_classes;
3044
3045 static int hard_32bit_mode_classes[] = {
3046 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3047 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3048 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3049 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3050
3051 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3052 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3053 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3054 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3055
3056 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3057 and none can hold SFmode/SImode values. */
3058 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3059 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3060 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3061 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3062
3063 /* %fcc[0123] */
3064 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3065
3066 /* %icc */
3067 CC_MODES
3068 };
3069
3070 static int hard_64bit_mode_classes[] = {
3071 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3072 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3073 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3074 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3075
3076 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3077 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3078 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3079 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3080
3081 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3082 and none can hold SFmode/SImode values. */
3083 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3084 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3085 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3086 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3087
3088 /* %fcc[0123] */
3089 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3090
3091 /* %icc */
3092 CC_MODES
3093 };
3094
3095 int sparc_mode_class [NUM_MACHINE_MODES];
3096
3097 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3098
3099 static void
3100 sparc_init_modes ()
3101 {
3102 int i;
3103
3104 for (i = 0; i < NUM_MACHINE_MODES; i++)
3105 {
3106 switch (GET_MODE_CLASS (i))
3107 {
3108 case MODE_INT:
3109 case MODE_PARTIAL_INT:
3110 case MODE_COMPLEX_INT:
3111 if (GET_MODE_SIZE (i) <= 4)
3112 sparc_mode_class[i] = 1 << (int) S_MODE;
3113 else if (GET_MODE_SIZE (i) == 8)
3114 sparc_mode_class[i] = 1 << (int) D_MODE;
3115 else if (GET_MODE_SIZE (i) == 16)
3116 sparc_mode_class[i] = 1 << (int) T_MODE;
3117 else if (GET_MODE_SIZE (i) == 32)
3118 sparc_mode_class[i] = 1 << (int) O_MODE;
3119 else
3120 sparc_mode_class[i] = 0;
3121 break;
3122 case MODE_FLOAT:
3123 case MODE_COMPLEX_FLOAT:
3124 if (GET_MODE_SIZE (i) <= 4)
3125 sparc_mode_class[i] = 1 << (int) SF_MODE;
3126 else if (GET_MODE_SIZE (i) == 8)
3127 sparc_mode_class[i] = 1 << (int) DF_MODE;
3128 else if (GET_MODE_SIZE (i) == 16)
3129 sparc_mode_class[i] = 1 << (int) TF_MODE;
3130 else if (GET_MODE_SIZE (i) == 32)
3131 sparc_mode_class[i] = 1 << (int) OF_MODE;
3132 else
3133 sparc_mode_class[i] = 0;
3134 break;
3135 case MODE_CC:
3136 default:
3137 /* mode_class hasn't been initialized yet for EXTRA_CC_MODES, so
3138 we must explicitly check for them here. */
3139 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3140 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3141 else if (i == (int) CCmode || i == (int) CC_NOOVmode
3142 || i == (int) CCXmode || i == (int) CCX_NOOVmode)
3143 sparc_mode_class[i] = 1 << (int) CC_MODE;
3144 else
3145 sparc_mode_class[i] = 0;
3146 break;
3147 }
3148 }
3149
3150 if (TARGET_ARCH64)
3151 hard_regno_mode_classes = hard_64bit_mode_classes;
3152 else
3153 hard_regno_mode_classes = hard_32bit_mode_classes;
3154
3155 /* Initialize the array used by REGNO_REG_CLASS. */
3156 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3157 {
3158 if (i < 16 && TARGET_V8PLUS)
3159 sparc_regno_reg_class[i] = I64_REGS;
3160 else if (i < 32)
3161 sparc_regno_reg_class[i] = GENERAL_REGS;
3162 else if (i < 64)
3163 sparc_regno_reg_class[i] = FP_REGS;
3164 else if (i < 96)
3165 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3166 else if (i < 100)
3167 sparc_regno_reg_class[i] = FPCC_REGS;
3168 else
3169 sparc_regno_reg_class[i] = NO_REGS;
3170 }
3171 }
3172 \f
3173 /* Save non call used registers from LOW to HIGH at BASE+OFFSET.
3174 N_REGS is the number of 4-byte regs saved thus far. This applies even to
3175 v9 int regs as it simplifies the code. */
3176
3177 static int
3178 save_regs (file, low, high, base, offset, n_regs, real_offset)
3179 FILE *file;
3180 int low, high;
3181 const char *base;
3182 int offset;
3183 int n_regs;
3184 int real_offset;
3185 {
3186 int i;
3187
3188 if (TARGET_ARCH64 && high <= 32)
3189 {
3190 for (i = low; i < high; i++)
3191 {
3192 if (regs_ever_live[i] && ! call_used_regs[i])
3193 {
3194 fprintf (file, "\tstx\t%s, [%s+%d]\n",
3195 reg_names[i], base, offset + 4 * n_regs);
3196 if (dwarf2out_do_frame ())
3197 dwarf2out_reg_save ("", i, real_offset + 4 * n_regs);
3198 n_regs += 2;
3199 }
3200 }
3201 }
3202 else
3203 {
3204 for (i = low; i < high; i += 2)
3205 {
3206 if (regs_ever_live[i] && ! call_used_regs[i])
3207 {
3208 if (regs_ever_live[i+1] && ! call_used_regs[i+1])
3209 {
3210 fprintf (file, "\tstd\t%s, [%s+%d]\n",
3211 reg_names[i], base, offset + 4 * n_regs);
3212 if (dwarf2out_do_frame ())
3213 {
3214 char *l = dwarf2out_cfi_label ();
3215 dwarf2out_reg_save (l, i, real_offset + 4 * n_regs);
3216 dwarf2out_reg_save (l, i+1, real_offset + 4 * n_regs + 4);
3217 }
3218 n_regs += 2;
3219 }
3220 else
3221 {
3222 fprintf (file, "\tst\t%s, [%s+%d]\n",
3223 reg_names[i], base, offset + 4 * n_regs);
3224 if (dwarf2out_do_frame ())
3225 dwarf2out_reg_save ("", i, real_offset + 4 * n_regs);
3226 n_regs += 2;
3227 }
3228 }
3229 else
3230 {
3231 if (regs_ever_live[i+1] && ! call_used_regs[i+1])
3232 {
3233 fprintf (file, "\tst\t%s, [%s+%d]\n",
3234 reg_names[i+1], base, offset + 4 * n_regs + 4);
3235 if (dwarf2out_do_frame ())
3236 dwarf2out_reg_save ("", i + 1, real_offset + 4 * n_regs + 4);
3237 n_regs += 2;
3238 }
3239 }
3240 }
3241 }
3242 return n_regs;
3243 }
3244
3245 /* Restore non call used registers from LOW to HIGH at BASE+OFFSET.
3246
3247 N_REGS is the number of 4-byte regs saved thus far. This applies even to
3248 v9 int regs as it simplifies the code. */
3249
3250 static int
3251 restore_regs (file, low, high, base, offset, n_regs)
3252 FILE *file;
3253 int low, high;
3254 const char *base;
3255 int offset;
3256 int n_regs;
3257 {
3258 int i;
3259
3260 if (TARGET_ARCH64 && high <= 32)
3261 {
3262 for (i = low; i < high; i++)
3263 {
3264 if (regs_ever_live[i] && ! call_used_regs[i])
3265 fprintf (file, "\tldx\t[%s+%d], %s\n",
3266 base, offset + 4 * n_regs, reg_names[i]),
3267 n_regs += 2;
3268 }
3269 }
3270 else
3271 {
3272 for (i = low; i < high; i += 2)
3273 {
3274 if (regs_ever_live[i] && ! call_used_regs[i])
3275 if (regs_ever_live[i+1] && ! call_used_regs[i+1])
3276 fprintf (file, "\tldd\t[%s+%d], %s\n",
3277 base, offset + 4 * n_regs, reg_names[i]),
3278 n_regs += 2;
3279 else
3280 fprintf (file, "\tld\t[%s+%d],%s\n",
3281 base, offset + 4 * n_regs, reg_names[i]),
3282 n_regs += 2;
3283 else if (regs_ever_live[i+1] && ! call_used_regs[i+1])
3284 fprintf (file, "\tld\t[%s+%d],%s\n",
3285 base, offset + 4 * n_regs + 4, reg_names[i+1]),
3286 n_regs += 2;
3287 }
3288 }
3289 return n_regs;
3290 }
3291
3292 /* Compute the frame size required by the function. This function is called
3293 during the reload pass and also by output_function_prologue(). */
3294
3295 int
3296 compute_frame_size (size, leaf_function)
3297 int size;
3298 int leaf_function;
3299 {
3300 int n_regs = 0, i;
3301 int outgoing_args_size = (current_function_outgoing_args_size
3302 + REG_PARM_STACK_SPACE (current_function_decl));
3303
3304 if (TARGET_EPILOGUE)
3305 {
3306 /* N_REGS is the number of 4-byte regs saved thus far. This applies
3307 even to v9 int regs to be consistent with save_regs/restore_regs. */
3308
3309 if (TARGET_ARCH64)
3310 {
3311 for (i = 0; i < 8; i++)
3312 if (regs_ever_live[i] && ! call_used_regs[i])
3313 n_regs += 2;
3314 }
3315 else
3316 {
3317 for (i = 0; i < 8; i += 2)
3318 if ((regs_ever_live[i] && ! call_used_regs[i])
3319 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3320 n_regs += 2;
3321 }
3322
3323 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3324 if ((regs_ever_live[i] && ! call_used_regs[i])
3325 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3326 n_regs += 2;
3327 }
3328
3329 /* Set up values for use in `function_epilogue'. */
3330 num_gfregs = n_regs;
3331
3332 if (leaf_function && n_regs == 0
3333 && size == 0 && current_function_outgoing_args_size == 0)
3334 {
3335 actual_fsize = apparent_fsize = 0;
3336 }
3337 else
3338 {
3339 /* We subtract STARTING_FRAME_OFFSET, remember it's negative.
3340 The stack bias (if any) is taken out to undo its effects. */
3341 apparent_fsize = (size - STARTING_FRAME_OFFSET + SPARC_STACK_BIAS + 7) & -8;
3342 apparent_fsize += n_regs * 4;
3343 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3344 }
3345
3346 /* Make sure nothing can clobber our register windows.
3347 If a SAVE must be done, or there is a stack-local variable,
3348 the register window area must be allocated.
3349 ??? For v8 we apparently need an additional 8 bytes of reserved space. */
3350 if (leaf_function == 0 || size > 0)
3351 actual_fsize += (16 * UNITS_PER_WORD) + (TARGET_ARCH64 ? 0 : 8);
3352
3353 return SPARC_STACK_ALIGN (actual_fsize);
3354 }
3355
3356 /* Build a (32 bit) big number in a register. */
3357 /* ??? We may be able to use the set macro here too. */
3358
3359 static void
3360 build_big_number (file, num, reg)
3361 FILE *file;
3362 int num;
3363 const char *reg;
3364 {
3365 if (num >= 0 || ! TARGET_ARCH64)
3366 {
3367 fprintf (file, "\tsethi\t%%hi(%d), %s\n", num, reg);
3368 if ((num & 0x3ff) != 0)
3369 fprintf (file, "\tor\t%s, %%lo(%d), %s\n", reg, num, reg);
3370 }
3371 else /* num < 0 && TARGET_ARCH64 */
3372 {
3373 /* Sethi does not sign extend, so we must use a little trickery
3374 to use it for negative numbers. Invert the constant before
3375 loading it in, then use xor immediate to invert the loaded bits
3376 (along with the upper 32 bits) to the desired constant. This
3377 works because the sethi and immediate fields overlap. */
3378 int asize = num;
3379 int inv = ~asize;
3380 int low = -0x400 + (asize & 0x3FF);
3381
3382 fprintf (file, "\tsethi\t%%hi(%d), %s\n\txor\t%s, %d, %s\n",
3383 inv, reg, reg, low, reg);
3384 }
3385 }
3386
3387 /* Output any necessary .register pseudo-ops. */
3388 void
3389 sparc_output_scratch_registers (file)
3390 FILE *file ATTRIBUTE_UNUSED;
3391 {
3392 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3393 int i;
3394
3395 if (TARGET_ARCH32)
3396 return;
3397
3398 /* Check if %g[2367] were used without
3399 .register being printed for them already. */
3400 for (i = 2; i < 8; i++)
3401 {
3402 if (regs_ever_live [i]
3403 && ! sparc_hard_reg_printed [i])
3404 {
3405 sparc_hard_reg_printed [i] = 1;
3406 fprintf (file, "\t.register\t%%g%d, #scratch\n", i);
3407 }
3408 if (i == 3) i = 5;
3409 }
3410 #endif
3411 }
3412
3413 /* Output code for the function prologue. */
3414
3415 void
3416 output_function_prologue (file, size, leaf_function)
3417 FILE *file;
3418 int size;
3419 int leaf_function;
3420 {
3421 sparc_output_scratch_registers (file);
3422
3423 /* Need to use actual_fsize, since we are also allocating
3424 space for our callee (and our own register save area). */
3425 actual_fsize = compute_frame_size (size, leaf_function);
3426
3427 if (leaf_function)
3428 {
3429 frame_base_name = "%sp";
3430 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
3431 }
3432 else
3433 {
3434 frame_base_name = "%fp";
3435 frame_base_offset = SPARC_STACK_BIAS;
3436 }
3437
3438 /* This is only for the human reader. */
3439 fprintf (file, "\t%s#PROLOGUE# 0\n", ASM_COMMENT_START);
3440
3441 if (actual_fsize == 0)
3442 /* do nothing. */ ;
3443 else if (! leaf_function)
3444 {
3445 if (actual_fsize <= 4096)
3446 fprintf (file, "\tsave\t%%sp, -%d, %%sp\n", actual_fsize);
3447 else if (actual_fsize <= 8192)
3448 {
3449 fprintf (file, "\tsave\t%%sp, -4096, %%sp\n");
3450 fprintf (file, "\tadd\t%%sp, -%d, %%sp\n", actual_fsize - 4096);
3451 }
3452 else
3453 {
3454 build_big_number (file, -actual_fsize, "%g1");
3455 fprintf (file, "\tsave\t%%sp, %%g1, %%sp\n");
3456 }
3457 }
3458 else /* leaf function */
3459 {
3460 if (actual_fsize <= 4096)
3461 fprintf (file, "\tadd\t%%sp, -%d, %%sp\n", actual_fsize);
3462 else if (actual_fsize <= 8192)
3463 {
3464 fprintf (file, "\tadd\t%%sp, -4096, %%sp\n");
3465 fprintf (file, "\tadd\t%%sp, -%d, %%sp\n", actual_fsize - 4096);
3466 }
3467 else
3468 {
3469 build_big_number (file, -actual_fsize, "%g1");
3470 fprintf (file, "\tadd\t%%sp, %%g1, %%sp\n");
3471 }
3472 }
3473
3474 if (dwarf2out_do_frame () && actual_fsize)
3475 {
3476 char *label = dwarf2out_cfi_label ();
3477
3478 /* The canonical frame address refers to the top of the frame. */
3479 dwarf2out_def_cfa (label, (leaf_function ? STACK_POINTER_REGNUM
3480 : FRAME_POINTER_REGNUM),
3481 frame_base_offset);
3482
3483 if (! leaf_function)
3484 {
3485 /* Note the register window save. This tells the unwinder that
3486 it needs to restore the window registers from the previous
3487 frame's window save area at 0(cfa). */
3488 dwarf2out_window_save (label);
3489
3490 /* The return address (-8) is now in %i7. */
3491 dwarf2out_return_reg (label, 31);
3492 }
3493 }
3494
3495 /* If doing anything with PIC, do it now. */
3496 if (! flag_pic)
3497 fprintf (file, "\t%s#PROLOGUE# 1\n", ASM_COMMENT_START);
3498
3499 /* Call saved registers are saved just above the outgoing argument area. */
3500 if (num_gfregs)
3501 {
3502 int offset, real_offset, n_regs;
3503 const char *base;
3504
3505 real_offset = -apparent_fsize;
3506 offset = -apparent_fsize + frame_base_offset;
3507 if (offset < -4096 || offset + num_gfregs * 4 > 4096)
3508 {
3509 /* ??? This might be optimized a little as %g1 might already have a
3510 value close enough that a single add insn will do. */
3511 /* ??? Although, all of this is probably only a temporary fix
3512 because if %g1 can hold a function result, then
3513 output_function_epilogue will lose (the result will get
3514 clobbered). */
3515 build_big_number (file, offset, "%g1");
3516 fprintf (file, "\tadd\t%s, %%g1, %%g1\n", frame_base_name);
3517 base = "%g1";
3518 offset = 0;
3519 }
3520 else
3521 {
3522 base = frame_base_name;
3523 }
3524
3525 n_regs = 0;
3526 if (TARGET_EPILOGUE && ! leaf_function)
3527 /* ??? Originally saved regs 0-15 here. */
3528 n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
3529 else if (leaf_function)
3530 /* ??? Originally saved regs 0-31 here. */
3531 n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
3532 if (TARGET_EPILOGUE)
3533 save_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs,
3534 real_offset);
3535 }
3536
3537 leaf_label = 0;
3538 if (leaf_function && actual_fsize != 0)
3539 {
3540 /* warning ("leaf procedure with frame size %d", actual_fsize); */
3541 if (! TARGET_EPILOGUE)
3542 leaf_label = gen_label_rtx ();
3543 }
3544 }
3545
3546 /* Output code to restore any call saved registers. */
3547
3548 static void
3549 output_restore_regs (file, leaf_function)
3550 FILE *file;
3551 int leaf_function;
3552 {
3553 int offset, n_regs;
3554 const char *base;
3555
3556 offset = -apparent_fsize + frame_base_offset;
3557 if (offset < -4096 || offset + num_gfregs * 4 > 4096 - 8 /*double*/)
3558 {
3559 build_big_number (file, offset, "%g1");
3560 fprintf (file, "\tadd\t%s, %%g1, %%g1\n", frame_base_name);
3561 base = "%g1";
3562 offset = 0;
3563 }
3564 else
3565 {
3566 base = frame_base_name;
3567 }
3568
3569 n_regs = 0;
3570 if (TARGET_EPILOGUE && ! leaf_function)
3571 /* ??? Originally saved regs 0-15 here. */
3572 n_regs = restore_regs (file, 0, 8, base, offset, 0);
3573 else if (leaf_function)
3574 /* ??? Originally saved regs 0-31 here. */
3575 n_regs = restore_regs (file, 0, 8, base, offset, 0);
3576 if (TARGET_EPILOGUE)
3577 restore_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs);
3578 }
3579
3580 /* Output code for the function epilogue. */
3581
3582 void
3583 output_function_epilogue (file, size, leaf_function)
3584 FILE *file;
3585 int size ATTRIBUTE_UNUSED;
3586 int leaf_function;
3587 {
3588 const char *ret;
3589
3590 if (leaf_label)
3591 {
3592 emit_label_after (leaf_label, get_last_insn ());
3593 final_scan_insn (get_last_insn (), file, 0, 0, 1);
3594 }
3595
3596 #ifdef FUNCTION_BLOCK_PROFILER_EXIT
3597 else if (profile_block_flag == 2)
3598 {
3599 FUNCTION_BLOCK_PROFILER_EXIT(file);
3600 }
3601 #endif
3602
3603 else if (current_function_epilogue_delay_list == 0)
3604 {
3605 /* If code does not drop into the epilogue, we need
3606 do nothing except output pending case vectors. */
3607 rtx insn = get_last_insn ();
3608 if (GET_CODE (insn) == NOTE)
3609 insn = prev_nonnote_insn (insn);
3610 if (insn && GET_CODE (insn) == BARRIER)
3611 goto output_vectors;
3612 }
3613
3614 if (num_gfregs)
3615 output_restore_regs (file, leaf_function);
3616
3617 /* Work out how to skip the caller's unimp instruction if required. */
3618 if (leaf_function)
3619 ret = (SKIP_CALLERS_UNIMP_P ? "jmp\t%o7+12" : "retl");
3620 else
3621 ret = (SKIP_CALLERS_UNIMP_P ? "jmp\t%i7+12" : "ret");
3622
3623 if (TARGET_EPILOGUE || leaf_label)
3624 {
3625 int old_target_epilogue = TARGET_EPILOGUE;
3626 target_flags &= ~old_target_epilogue;
3627
3628 if (! leaf_function)
3629 {
3630 /* If we wound up with things in our delay slot, flush them here. */
3631 if (current_function_epilogue_delay_list)
3632 {
3633 rtx delay = PATTERN (XEXP (current_function_epilogue_delay_list, 0));
3634
3635 if (TARGET_V9 && ! epilogue_renumber (&delay, 1))
3636 {
3637 epilogue_renumber (&delay, 0);
3638 fputs (SKIP_CALLERS_UNIMP_P
3639 ? "\treturn\t%i7+12\n"
3640 : "\treturn\t%i7+8\n", file);
3641 final_scan_insn (XEXP (current_function_epilogue_delay_list, 0), file, 1, 0, 0);
3642 }
3643 else
3644 {
3645 rtx insn = emit_jump_insn_after (gen_rtx_RETURN (VOIDmode),
3646 get_last_insn ());
3647 rtx src;
3648
3649 if (GET_CODE (delay) != SET)
3650 abort();
3651
3652 src = SET_SRC (delay);
3653 if (GET_CODE (src) == ASHIFT)
3654 {
3655 if (XEXP (src, 1) != const1_rtx)
3656 abort();
3657 SET_SRC (delay) = gen_rtx_PLUS (GET_MODE (src), XEXP (src, 0),
3658 XEXP (src, 0));
3659 }
3660
3661 PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode,
3662 gen_rtvec (2, delay, PATTERN (insn)));
3663 final_scan_insn (insn, file, 1, 0, 1);
3664 }
3665 }
3666 else if (TARGET_V9 && ! SKIP_CALLERS_UNIMP_P)
3667 fputs ("\treturn\t%i7+8\n\tnop\n", file);
3668 else
3669 fprintf (file, "\t%s\n\trestore\n", ret);
3670 }
3671 /* All of the following cases are for leaf functions. */
3672 else if (current_function_epilogue_delay_list)
3673 {
3674 /* eligible_for_epilogue_delay_slot ensures that if this is a
3675 leaf function, then we will only have insn in the delay slot
3676 if the frame size is zero, thus no adjust for the stack is
3677 needed here. */
3678 if (actual_fsize != 0)
3679 abort ();
3680 fprintf (file, "\t%s\n", ret);
3681 final_scan_insn (XEXP (current_function_epilogue_delay_list, 0),
3682 file, 1, 0, 1);
3683 }
3684 /* Output 'nop' instead of 'sub %sp,-0,%sp' when no frame, so as to
3685 avoid generating confusing assembly language output. */
3686 else if (actual_fsize == 0)
3687 fprintf (file, "\t%s\n\tnop\n", ret);
3688 else if (actual_fsize <= 4096)
3689 fprintf (file, "\t%s\n\tsub\t%%sp, -%d, %%sp\n", ret, actual_fsize);
3690 else if (actual_fsize <= 8192)
3691 fprintf (file, "\tsub\t%%sp, -4096, %%sp\n\t%s\n\tsub\t%%sp, -%d, %%sp\n",
3692 ret, actual_fsize - 4096);
3693 else if ((actual_fsize & 0x3ff) == 0)
3694 fprintf (file, "\tsethi\t%%hi(%d), %%g1\n\t%s\n\tadd\t%%sp, %%g1, %%sp\n",
3695 actual_fsize, ret);
3696 else
3697 fprintf (file, "\tsethi\t%%hi(%d), %%g1\n\tor\t%%g1, %%lo(%d), %%g1\n\t%s\n\tadd\t%%sp, %%g1, %%sp\n",
3698 actual_fsize, actual_fsize, ret);
3699 target_flags |= old_target_epilogue;
3700 }
3701
3702 output_vectors:
3703 sparc_output_deferred_case_vectors ();
3704 }
3705
3706 /* Output a sibling call. */
3707
3708 const char *
3709 output_sibcall (insn, call_operand)
3710 rtx insn, call_operand;
3711 {
3712 int leaf_regs = current_function_uses_only_leaf_regs;
3713 rtx operands[3];
3714 int delay_slot = dbr_sequence_length () > 0;
3715
3716 if (num_gfregs)
3717 {
3718 /* Call to restore global regs might clobber
3719 the delay slot. Instead of checking for this
3720 output the delay slot now. */
3721 if (delay_slot)
3722 {
3723 rtx delay = NEXT_INSN (insn);
3724
3725 if (! delay)
3726 abort ();
3727
3728 final_scan_insn (delay, asm_out_file, 1, 0, 1);
3729 PATTERN (delay) = gen_blockage ();
3730 INSN_CODE (delay) = -1;
3731 delay_slot = 0;
3732 }
3733 output_restore_regs (asm_out_file, leaf_regs);
3734 }
3735
3736 operands[0] = call_operand;
3737
3738 if (leaf_regs)
3739 {
3740 #ifdef HAVE_AS_RELAX_OPTION
3741 /* If as and ld are relaxing tail call insns into branch always,
3742 use or %o7,%g0,X; call Y; or X,%g0,%o7 always, so that it can
3743 be optimized. With sethi/jmpl as nor ld has no easy way how to
3744 find out if somebody does not branch between the sethi and jmpl. */
3745 int spare_slot = 0;
3746 #else
3747 int spare_slot = ((TARGET_ARCH32 || TARGET_CM_MEDLOW) && ! flag_pic);
3748 #endif
3749 int size = 0;
3750
3751 if ((actual_fsize || ! spare_slot) && delay_slot)
3752 {
3753 rtx delay = NEXT_INSN (insn);
3754
3755 if (! delay)
3756 abort ();
3757
3758 final_scan_insn (delay, asm_out_file, 1, 0, 1);
3759 PATTERN (delay) = gen_blockage ();
3760 INSN_CODE (delay) = -1;
3761 delay_slot = 0;
3762 }
3763 if (actual_fsize)
3764 {
3765 if (actual_fsize <= 4096)
3766 size = actual_fsize;
3767 else if (actual_fsize <= 8192)
3768 {
3769 fputs ("\tsub\t%sp, -4096, %sp\n", asm_out_file);
3770 size = actual_fsize - 4096;
3771 }
3772 else if ((actual_fsize & 0x3ff) == 0)
3773 fprintf (asm_out_file,
3774 "\tsethi\t%%hi(%d), %%g1\n\tadd\t%%sp, %%g1, %%sp\n",
3775 actual_fsize);
3776 else
3777 {
3778 fprintf (asm_out_file,
3779 "\tsethi\t%%hi(%d), %%g1\n\tor\t%%g1, %%lo(%d), %%g1\n",
3780 actual_fsize, actual_fsize);
3781 fputs ("\tadd\t%%sp, %%g1, %%sp\n", asm_out_file);
3782 }
3783 }
3784 if (spare_slot)
3785 {
3786 output_asm_insn ("sethi\t%%hi(%a0), %%g1", operands);
3787 output_asm_insn ("jmpl\t%%g1 + %%lo(%a0), %%g0", operands);
3788 if (size)
3789 fprintf (asm_out_file, "\t sub\t%%sp, -%d, %%sp\n", size);
3790 else if (! delay_slot)
3791 fputs ("\t nop\n", asm_out_file);
3792 }
3793 else
3794 {
3795 if (size)
3796 fprintf (asm_out_file, "\tsub\t%%sp, -%d, %%sp\n", size);
3797 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
3798 it into branch if possible. */
3799 output_asm_insn ("or\t%%o7, %%g0, %%g1", operands);
3800 output_asm_insn ("call\t%a0, 0", operands);
3801 output_asm_insn (" or\t%%g1, %%g0, %%o7", operands);
3802 }
3803 return "";
3804 }
3805
3806 output_asm_insn ("call\t%a0, 0", operands);
3807 if (delay_slot)
3808 {
3809 rtx delay = NEXT_INSN (insn), pat;
3810
3811 if (! delay)
3812 abort ();
3813
3814 pat = PATTERN (delay);
3815 if (GET_CODE (pat) != SET)
3816 abort ();
3817
3818 operands[0] = SET_DEST (pat);
3819 pat = SET_SRC (pat);
3820 switch (GET_CODE (pat))
3821 {
3822 case PLUS:
3823 operands[1] = XEXP (pat, 0);
3824 operands[2] = XEXP (pat, 1);
3825 output_asm_insn (" restore %r1, %2, %Y0", operands);
3826 break;
3827 case LO_SUM:
3828 operands[1] = XEXP (pat, 0);
3829 operands[2] = XEXP (pat, 1);
3830 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
3831 break;
3832 case ASHIFT:
3833 operands[1] = XEXP (pat, 0);
3834 output_asm_insn (" restore %r1, %r1, %Y0", operands);
3835 break;
3836 default:
3837 operands[1] = pat;
3838 output_asm_insn (" restore %%g0, %1, %Y0", operands);
3839 break;
3840 }
3841 PATTERN (delay) = gen_blockage ();
3842 INSN_CODE (delay) = -1;
3843 }
3844 else
3845 fputs ("\t restore\n", asm_out_file);
3846 return "";
3847 }
3848 \f
3849 /* Functions for handling argument passing.
3850
3851 For v8 the first six args are normally in registers and the rest are
3852 pushed. Any arg that starts within the first 6 words is at least
3853 partially passed in a register unless its data type forbids.
3854
3855 For v9, the argument registers are laid out as an array of 16 elements
3856 and arguments are added sequentially. The first 6 int args and up to the
3857 first 16 fp args (depending on size) are passed in regs.
3858
3859 Slot Stack Integral Float Float in structure Double Long Double
3860 ---- ----- -------- ----- ------------------ ------ -----------
3861 15 [SP+248] %f31 %f30,%f31 %d30
3862 14 [SP+240] %f29 %f28,%f29 %d28 %q28
3863 13 [SP+232] %f27 %f26,%f27 %d26
3864 12 [SP+224] %f25 %f24,%f25 %d24 %q24
3865 11 [SP+216] %f23 %f22,%f23 %d22
3866 10 [SP+208] %f21 %f20,%f21 %d20 %q20
3867 9 [SP+200] %f19 %f18,%f19 %d18
3868 8 [SP+192] %f17 %f16,%f17 %d16 %q16
3869 7 [SP+184] %f15 %f14,%f15 %d14
3870 6 [SP+176] %f13 %f12,%f13 %d12 %q12
3871 5 [SP+168] %o5 %f11 %f10,%f11 %d10
3872 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
3873 3 [SP+152] %o3 %f7 %f6,%f7 %d6
3874 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
3875 1 [SP+136] %o1 %f3 %f2,%f3 %d2
3876 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
3877
3878 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
3879
3880 Integral arguments are always passed as 64 bit quantities appropriately
3881 extended.
3882
3883 Passing of floating point values is handled as follows.
3884 If a prototype is in scope:
3885 If the value is in a named argument (i.e. not a stdarg function or a
3886 value not part of the `...') then the value is passed in the appropriate
3887 fp reg.
3888 If the value is part of the `...' and is passed in one of the first 6
3889 slots then the value is passed in the appropriate int reg.
3890 If the value is part of the `...' and is not passed in one of the first 6
3891 slots then the value is passed in memory.
3892 If a prototype is not in scope:
3893 If the value is one of the first 6 arguments the value is passed in the
3894 appropriate integer reg and the appropriate fp reg.
3895 If the value is not one of the first 6 arguments the value is passed in
3896 the appropriate fp reg and in memory.
3897 */
3898
3899 /* Maximum number of int regs for args. */
3900 #define SPARC_INT_ARG_MAX 6
3901 /* Maximum number of fp regs for args. */
3902 #define SPARC_FP_ARG_MAX 16
3903
3904 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
3905
3906 /* Handle the INIT_CUMULATIVE_ARGS macro.
3907 Initialize a variable CUM of type CUMULATIVE_ARGS
3908 for a call to a function whose data type is FNTYPE.
3909 For a library call, FNTYPE is 0. */
3910
3911 void
3912 init_cumulative_args (cum, fntype, libname, indirect)
3913 CUMULATIVE_ARGS *cum;
3914 tree fntype;
3915 rtx libname ATTRIBUTE_UNUSED;
3916 int indirect ATTRIBUTE_UNUSED;
3917 {
3918 cum->words = 0;
3919 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
3920 cum->libcall_p = fntype == 0;
3921 }
3922
3923 /* Compute the slot number to pass an argument in.
3924 Returns the slot number or -1 if passing on the stack.
3925
3926 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3927 the preceding args and about the function being called.
3928 MODE is the argument's machine mode.
3929 TYPE is the data type of the argument (as a tree).
3930 This is null for libcalls where that information may
3931 not be available.
3932 NAMED is nonzero if this argument is a named parameter
3933 (otherwise it is an extra parameter matching an ellipsis).
3934 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
3935 *PREGNO records the register number to use if scalar type.
3936 *PPADDING records the amount of padding needed in words. */
3937
3938 static int
3939 function_arg_slotno (cum, mode, type, named, incoming_p, pregno, ppadding)
3940 const CUMULATIVE_ARGS *cum;
3941 enum machine_mode mode;
3942 tree type;
3943 int named;
3944 int incoming_p;
3945 int *pregno;
3946 int *ppadding;
3947 {
3948 int regbase = (incoming_p
3949 ? SPARC_INCOMING_INT_ARG_FIRST
3950 : SPARC_OUTGOING_INT_ARG_FIRST);
3951 int slotno = cum->words;
3952 int regno;
3953
3954 *ppadding = 0;
3955
3956 if (type != 0 && TREE_ADDRESSABLE (type))
3957 return -1;
3958 if (TARGET_ARCH32
3959 && type != 0 && mode == BLKmode
3960 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
3961 return -1;
3962
3963 switch (mode)
3964 {
3965 case VOIDmode :
3966 /* MODE is VOIDmode when generating the actual call.
3967 See emit_call_1. */
3968 return -1;
3969
3970 case QImode : case CQImode :
3971 case HImode : case CHImode :
3972 case SImode : case CSImode :
3973 case DImode : case CDImode :
3974 if (slotno >= SPARC_INT_ARG_MAX)
3975 return -1;
3976 regno = regbase + slotno;
3977 break;
3978
3979 case SFmode : case SCmode :
3980 case DFmode : case DCmode :
3981 case TFmode : case TCmode :
3982 if (TARGET_ARCH32)
3983 {
3984 if (slotno >= SPARC_INT_ARG_MAX)
3985 return -1;
3986 regno = regbase + slotno;
3987 }
3988 else
3989 {
3990 if ((mode == TFmode || mode == TCmode)
3991 && (slotno & 1) != 0)
3992 slotno++, *ppadding = 1;
3993 if (TARGET_FPU && named)
3994 {
3995 if (slotno >= SPARC_FP_ARG_MAX)
3996 return -1;
3997 regno = SPARC_FP_ARG_FIRST + slotno * 2;
3998 if (mode == SFmode)
3999 regno++;
4000 }
4001 else
4002 {
4003 if (slotno >= SPARC_INT_ARG_MAX)
4004 return -1;
4005 regno = regbase + slotno;
4006 }
4007 }
4008 break;
4009
4010 case BLKmode :
4011 /* For sparc64, objects requiring 16 byte alignment get it. */
4012 if (TARGET_ARCH64)
4013 {
4014 if (type && TYPE_ALIGN (type) == 128 && (slotno & 1) != 0)
4015 slotno++, *ppadding = 1;
4016 }
4017
4018 if (TARGET_ARCH32
4019 || (type && TREE_CODE (type) == UNION_TYPE))
4020 {
4021 if (slotno >= SPARC_INT_ARG_MAX)
4022 return -1;
4023 regno = regbase + slotno;
4024 }
4025 else
4026 {
4027 tree field;
4028 int intregs_p = 0, fpregs_p = 0;
4029 /* The ABI obviously doesn't specify how packed
4030 structures are passed. These are defined to be passed
4031 in int regs if possible, otherwise memory. */
4032 int packed_p = 0;
4033
4034 /* First see what kinds of registers we need. */
4035 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4036 {
4037 if (TREE_CODE (field) == FIELD_DECL)
4038 {
4039 if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4040 && TARGET_FPU)
4041 fpregs_p = 1;
4042 else
4043 intregs_p = 1;
4044 if (DECL_PACKED (field))
4045 packed_p = 1;
4046 }
4047 }
4048 if (packed_p || !named)
4049 fpregs_p = 0, intregs_p = 1;
4050
4051 /* If all arg slots are filled, then must pass on stack. */
4052 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4053 return -1;
4054 /* If there are only int args and all int arg slots are filled,
4055 then must pass on stack. */
4056 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4057 return -1;
4058 /* Note that even if all int arg slots are filled, fp members may
4059 still be passed in regs if such regs are available.
4060 *PREGNO isn't set because there may be more than one, it's up
4061 to the caller to compute them. */
4062 return slotno;
4063 }
4064 break;
4065
4066 default :
4067 abort ();
4068 }
4069
4070 *pregno = regno;
4071 return slotno;
4072 }
4073
4074 /* Handle recursive register counting for structure field layout. */
4075
4076 struct function_arg_record_value_parms
4077 {
4078 rtx ret;
4079 int slotno, named, regbase;
4080 unsigned int nregs;
4081 int intoffset;
4082 };
4083
4084 static void function_arg_record_value_3
4085 PARAMS ((HOST_WIDE_INT, struct function_arg_record_value_parms *));
4086 static void function_arg_record_value_2
4087 PARAMS ((tree, HOST_WIDE_INT,
4088 struct function_arg_record_value_parms *));
4089 static void function_arg_record_value_1
4090 PARAMS ((tree, HOST_WIDE_INT,
4091 struct function_arg_record_value_parms *));
4092 static rtx function_arg_record_value
4093 PARAMS ((tree, enum machine_mode, int, int, int));
4094
4095 static void
4096 function_arg_record_value_1 (type, startbitpos, parms)
4097 tree type;
4098 HOST_WIDE_INT startbitpos;
4099 struct function_arg_record_value_parms *parms;
4100 {
4101 tree field;
4102
4103 /* The ABI obviously doesn't specify how packed structures are
4104 passed. These are defined to be passed in int regs if possible,
4105 otherwise memory. */
4106 int packed_p = 0;
4107
4108 /* We need to compute how many registers are needed so we can
4109 allocate the PARALLEL but before we can do that we need to know
4110 whether there are any packed fields. If there are, int regs are
4111 used regardless of whether there are fp values present. */
4112 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4113 {
4114 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4115 {
4116 packed_p = 1;
4117 break;
4118 }
4119 }
4120
4121 /* Compute how many registers we need. */
4122 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4123 {
4124 if (TREE_CODE (field) == FIELD_DECL)
4125 {
4126 HOST_WIDE_INT bitpos = startbitpos;
4127
4128 if (DECL_SIZE (field) != 0
4129 && host_integerp (bit_position (field), 1))
4130 bitpos += int_bit_position (field);
4131
4132 /* ??? FIXME: else assume zero offset. */
4133
4134 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4135 function_arg_record_value_1 (TREE_TYPE (field), bitpos, parms);
4136 else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4137 && TARGET_FPU
4138 && ! packed_p
4139 && parms->named)
4140 {
4141 if (parms->intoffset != -1)
4142 {
4143 int intslots, this_slotno;
4144
4145 intslots = (bitpos - parms->intoffset + BITS_PER_WORD - 1)
4146 / BITS_PER_WORD;
4147 this_slotno = parms->slotno + parms->intoffset
4148 / BITS_PER_WORD;
4149
4150 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4151 intslots = MAX (intslots, 0);
4152 parms->nregs += intslots;
4153 parms->intoffset = -1;
4154 }
4155
4156 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4157 If it wasn't true we wouldn't be here. */
4158 parms->nregs += 1;
4159 }
4160 else
4161 {
4162 if (parms->intoffset == -1)
4163 parms->intoffset = bitpos;
4164 }
4165 }
4166 }
4167 }
4168
4169 /* Handle recursive structure field register assignment. */
4170
4171 static void
4172 function_arg_record_value_3 (bitpos, parms)
4173 HOST_WIDE_INT bitpos;
4174 struct function_arg_record_value_parms *parms;
4175 {
4176 enum machine_mode mode;
4177 unsigned int regno;
4178 int this_slotno, intslots, intoffset;
4179 rtx reg;
4180
4181 if (parms->intoffset == -1)
4182 return;
4183
4184 intoffset = parms->intoffset;
4185 parms->intoffset = -1;
4186
4187 intslots = (bitpos - intoffset + BITS_PER_WORD - 1) / BITS_PER_WORD;
4188 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4189
4190 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4191 if (intslots <= 0)
4192 return;
4193
4194 /* If this is the trailing part of a word, only load that much into
4195 the register. Otherwise load the whole register. Note that in
4196 the latter case we may pick up unwanted bits. It's not a problem
4197 at the moment but may wish to revisit. */
4198
4199 if (intoffset % BITS_PER_WORD != 0)
4200 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4201 MODE_INT, 0);
4202 else
4203 mode = word_mode;
4204
4205 intoffset /= BITS_PER_UNIT;
4206 do
4207 {
4208 regno = parms->regbase + this_slotno;
4209 reg = gen_rtx_REG (mode, regno);
4210 XVECEXP (parms->ret, 0, parms->nregs)
4211 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4212
4213 this_slotno += 1;
4214 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4215 parms->nregs += 1;
4216 intslots -= 1;
4217 }
4218 while (intslots > 0);
4219 }
4220
4221 static void
4222 function_arg_record_value_2 (type, startbitpos, parms)
4223 tree type;
4224 HOST_WIDE_INT startbitpos;
4225 struct function_arg_record_value_parms *parms;
4226 {
4227 tree field;
4228 int packed_p = 0;
4229
4230 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4231 {
4232 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4233 {
4234 packed_p = 1;
4235 break;
4236 }
4237 }
4238
4239 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4240 {
4241 if (TREE_CODE (field) == FIELD_DECL)
4242 {
4243 HOST_WIDE_INT bitpos = startbitpos;
4244
4245 if (DECL_SIZE (field) != 0
4246 && host_integerp (bit_position (field), 1))
4247 bitpos += int_bit_position (field);
4248
4249 /* ??? FIXME: else assume zero offset. */
4250
4251 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4252 function_arg_record_value_2 (TREE_TYPE (field), bitpos, parms);
4253 else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4254 && TARGET_FPU
4255 && ! packed_p
4256 && parms->named)
4257 {
4258 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4259 rtx reg;
4260
4261 function_arg_record_value_3 (bitpos, parms);
4262
4263 reg = gen_rtx_REG (DECL_MODE (field),
4264 (SPARC_FP_ARG_FIRST + this_slotno * 2
4265 + (DECL_MODE (field) == SFmode
4266 && (bitpos & 32) != 0)));
4267 XVECEXP (parms->ret, 0, parms->nregs)
4268 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4269 GEN_INT (bitpos / BITS_PER_UNIT));
4270 parms->nregs += 1;
4271 }
4272 else
4273 {
4274 if (parms->intoffset == -1)
4275 parms->intoffset = bitpos;
4276 }
4277 }
4278 }
4279 }
4280
4281 static rtx
4282 function_arg_record_value (type, mode, slotno, named, regbase)
4283 tree type;
4284 enum machine_mode mode;
4285 int slotno, named, regbase;
4286 {
4287 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4288 struct function_arg_record_value_parms parms;
4289 unsigned int nregs;
4290
4291 parms.ret = NULL_RTX;
4292 parms.slotno = slotno;
4293 parms.named = named;
4294 parms.regbase = regbase;
4295
4296 /* Compute how many registers we need. */
4297 parms.nregs = 0;
4298 parms.intoffset = 0;
4299 function_arg_record_value_1 (type, 0, &parms);
4300
4301 if (parms.intoffset != -1)
4302 {
4303 int intslots, this_slotno;
4304
4305 intslots = (typesize*BITS_PER_UNIT - parms.intoffset + BITS_PER_WORD - 1)
4306 / BITS_PER_WORD;
4307 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4308
4309 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4310 intslots = MAX (intslots, 0);
4311
4312 parms.nregs += intslots;
4313 }
4314 nregs = parms.nregs;
4315
4316 /* Allocate the vector and handle some annoying special cases. */
4317 if (nregs == 0)
4318 {
4319 /* ??? Empty structure has no value? Duh? */
4320 if (typesize <= 0)
4321 {
4322 /* Though there's nothing really to store, return a word register
4323 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
4324 leads to breakage due to the fact that there are zero bytes to
4325 load. */
4326 return gen_rtx_REG (mode, regbase);
4327 }
4328 else
4329 {
4330 /* ??? C++ has structures with no fields, and yet a size. Give up
4331 for now and pass everything back in integer registers. */
4332 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4333 }
4334 if (nregs + slotno > SPARC_INT_ARG_MAX)
4335 nregs = SPARC_INT_ARG_MAX - slotno;
4336 }
4337 if (nregs == 0)
4338 abort ();
4339
4340 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
4341
4342 /* Fill in the entries. */
4343 parms.nregs = 0;
4344 parms.intoffset = 0;
4345 function_arg_record_value_2 (type, 0, &parms);
4346 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
4347
4348 if (parms.nregs != nregs)
4349 abort ();
4350
4351 return parms.ret;
4352 }
4353
4354 /* Handle the FUNCTION_ARG macro.
4355 Determine where to put an argument to a function.
4356 Value is zero to push the argument on the stack,
4357 or a hard register in which to store the argument.
4358
4359 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4360 the preceding args and about the function being called.
4361 MODE is the argument's machine mode.
4362 TYPE is the data type of the argument (as a tree).
4363 This is null for libcalls where that information may
4364 not be available.
4365 NAMED is nonzero if this argument is a named parameter
4366 (otherwise it is an extra parameter matching an ellipsis).
4367 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
4368
4369 rtx
4370 function_arg (cum, mode, type, named, incoming_p)
4371 const CUMULATIVE_ARGS *cum;
4372 enum machine_mode mode;
4373 tree type;
4374 int named;
4375 int incoming_p;
4376 {
4377 int regbase = (incoming_p
4378 ? SPARC_INCOMING_INT_ARG_FIRST
4379 : SPARC_OUTGOING_INT_ARG_FIRST);
4380 int slotno, regno, padding;
4381 rtx reg;
4382
4383 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
4384 &regno, &padding);
4385
4386 if (slotno == -1)
4387 return 0;
4388
4389 if (TARGET_ARCH32)
4390 {
4391 reg = gen_rtx_REG (mode, regno);
4392 return reg;
4393 }
4394
4395 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
4396 but also have the slot allocated for them.
4397 If no prototype is in scope fp values in register slots get passed
4398 in two places, either fp regs and int regs or fp regs and memory. */
4399 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
4400 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4401 && SPARC_FP_REG_P (regno))
4402 {
4403 reg = gen_rtx_REG (mode, regno);
4404 if (cum->prototype_p || cum->libcall_p)
4405 {
4406 /* "* 2" because fp reg numbers are recorded in 4 byte
4407 quantities. */
4408 #if 0
4409 /* ??? This will cause the value to be passed in the fp reg and
4410 in the stack. When a prototype exists we want to pass the
4411 value in the reg but reserve space on the stack. That's an
4412 optimization, and is deferred [for a bit]. */
4413 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
4414 return gen_rtx_PARALLEL (mode,
4415 gen_rtvec (2,
4416 gen_rtx_EXPR_LIST (VOIDmode,
4417 NULL_RTX, const0_rtx),
4418 gen_rtx_EXPR_LIST (VOIDmode,
4419 reg, const0_rtx)));
4420 else
4421 #else
4422 /* ??? It seems that passing back a register even when past
4423 the area declared by REG_PARM_STACK_SPACE will allocate
4424 space appropriately, and will not copy the data onto the
4425 stack, exactly as we desire.
4426
4427 This is due to locate_and_pad_parm being called in
4428 expand_call whenever reg_parm_stack_space > 0, which
4429 while benefical to our example here, would seem to be
4430 in error from what had been intended. Ho hum... -- r~ */
4431 #endif
4432 return reg;
4433 }
4434 else
4435 {
4436 rtx v0, v1;
4437
4438 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
4439 {
4440 int intreg;
4441
4442 /* On incoming, we don't need to know that the value
4443 is passed in %f0 and %i0, and it confuses other parts
4444 causing needless spillage even on the simplest cases. */
4445 if (incoming_p)
4446 return reg;
4447
4448 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
4449 + (regno - SPARC_FP_ARG_FIRST) / 2);
4450
4451 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
4452 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
4453 const0_rtx);
4454 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
4455 }
4456 else
4457 {
4458 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
4459 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
4460 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
4461 }
4462 }
4463 }
4464 else if (type && TREE_CODE (type) == RECORD_TYPE)
4465 {
4466 /* Structures up to 16 bytes in size are passed in arg slots on the
4467 stack and are promoted to registers where possible. */
4468
4469 if (int_size_in_bytes (type) > 16)
4470 abort (); /* shouldn't get here */
4471
4472 return function_arg_record_value (type, mode, slotno, named, regbase);
4473 }
4474 else if (type && TREE_CODE (type) == UNION_TYPE)
4475 {
4476 enum machine_mode mode;
4477 int bytes = int_size_in_bytes (type);
4478
4479 if (bytes > 16)
4480 abort ();
4481
4482 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
4483 reg = gen_rtx_REG (mode, regno);
4484 }
4485 else
4486 {
4487 /* Scalar or complex int. */
4488 reg = gen_rtx_REG (mode, regno);
4489 }
4490
4491 return reg;
4492 }
4493
4494 /* Handle the FUNCTION_ARG_PARTIAL_NREGS macro.
4495 For an arg passed partly in registers and partly in memory,
4496 this is the number of registers used.
4497 For args passed entirely in registers or entirely in memory, zero.
4498
4499 Any arg that starts in the first 6 regs but won't entirely fit in them
4500 needs partial registers on v8. On v9, structures with integer
4501 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
4502 values that begin in the last fp reg [where "last fp reg" varies with the
4503 mode] will be split between that reg and memory. */
4504
4505 int
4506 function_arg_partial_nregs (cum, mode, type, named)
4507 const CUMULATIVE_ARGS *cum;
4508 enum machine_mode mode;
4509 tree type;
4510 int named;
4511 {
4512 int slotno, regno, padding;
4513
4514 /* We pass 0 for incoming_p here, it doesn't matter. */
4515 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
4516
4517 if (slotno == -1)
4518 return 0;
4519
4520 if (TARGET_ARCH32)
4521 {
4522 if ((slotno + (mode == BLKmode
4523 ? ROUND_ADVANCE (int_size_in_bytes (type))
4524 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
4525 > NPARM_REGS (SImode))
4526 return NPARM_REGS (SImode) - slotno;
4527 return 0;
4528 }
4529 else
4530 {
4531 if (type && AGGREGATE_TYPE_P (type))
4532 {
4533 int size = int_size_in_bytes (type);
4534 int align = TYPE_ALIGN (type);
4535
4536 if (align == 16)
4537 slotno += slotno & 1;
4538 if (size > 8 && size <= 16
4539 && slotno == SPARC_INT_ARG_MAX - 1)
4540 return 1;
4541 }
4542 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
4543 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4544 && ! TARGET_FPU))
4545 {
4546 if (GET_MODE_ALIGNMENT (mode) == 128)
4547 {
4548 slotno += slotno & 1;
4549 if (slotno == SPARC_INT_ARG_MAX - 2)
4550 return 1;
4551 }
4552 else
4553 {
4554 if (slotno == SPARC_INT_ARG_MAX - 1)
4555 return 1;
4556 }
4557 }
4558 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4559 {
4560 if (GET_MODE_ALIGNMENT (mode) == 128)
4561 slotno += slotno & 1;
4562 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
4563 > SPARC_FP_ARG_MAX)
4564 return 1;
4565 }
4566 return 0;
4567 }
4568 }
4569
4570 /* Handle the FUNCTION_ARG_PASS_BY_REFERENCE macro.
4571 !v9: The SPARC ABI stipulates passing struct arguments (of any size) and
4572 quad-precision floats by invisible reference.
4573 v9: Aggregates greater than 16 bytes are passed by reference.
4574 For Pascal, also pass arrays by reference. */
4575
4576 int
4577 function_arg_pass_by_reference (cum, mode, type, named)
4578 const CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED;
4579 enum machine_mode mode;
4580 tree type;
4581 int named ATTRIBUTE_UNUSED;
4582 {
4583 if (TARGET_ARCH32)
4584 {
4585 return ((type && AGGREGATE_TYPE_P (type))
4586 || mode == TFmode || mode == TCmode);
4587 }
4588 else
4589 {
4590 return ((type && TREE_CODE (type) == ARRAY_TYPE)
4591 /* Consider complex values as aggregates, so care for TCmode. */
4592 || GET_MODE_SIZE (mode) > 16
4593 || (type && AGGREGATE_TYPE_P (type)
4594 && int_size_in_bytes (type) > 16));
4595 }
4596 }
4597
4598 /* Handle the FUNCTION_ARG_ADVANCE macro.
4599 Update the data in CUM to advance over an argument
4600 of mode MODE and data type TYPE.
4601 TYPE is null for libcalls where that information may not be available. */
4602
4603 void
4604 function_arg_advance (cum, mode, type, named)
4605 CUMULATIVE_ARGS *cum;
4606 enum machine_mode mode;
4607 tree type;
4608 int named;
4609 {
4610 int slotno, regno, padding;
4611
4612 /* We pass 0 for incoming_p here, it doesn't matter. */
4613 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
4614
4615 /* If register required leading padding, add it. */
4616 if (slotno != -1)
4617 cum->words += padding;
4618
4619 if (TARGET_ARCH32)
4620 {
4621 cum->words += (mode != BLKmode
4622 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
4623 : ROUND_ADVANCE (int_size_in_bytes (type)));
4624 }
4625 else
4626 {
4627 if (type && AGGREGATE_TYPE_P (type))
4628 {
4629 int size = int_size_in_bytes (type);
4630
4631 if (size <= 8)
4632 ++cum->words;
4633 else if (size <= 16)
4634 cum->words += 2;
4635 else /* passed by reference */
4636 ++cum->words;
4637 }
4638 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
4639 {
4640 cum->words += 2;
4641 }
4642 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4643 {
4644 cum->words += GET_MODE_SIZE (mode) / UNITS_PER_WORD;
4645 }
4646 else
4647 {
4648 cum->words += (mode != BLKmode
4649 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
4650 : ROUND_ADVANCE (int_size_in_bytes (type)));
4651 }
4652 }
4653 }
4654
4655 /* Handle the FUNCTION_ARG_PADDING macro.
4656 For the 64 bit ABI structs are always stored left shifted in their
4657 argument slot. */
4658
4659 enum direction
4660 function_arg_padding (mode, type)
4661 enum machine_mode mode;
4662 tree type;
4663 {
4664 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
4665 return upward;
4666
4667 /* This is the default definition. */
4668 return (! BYTES_BIG_ENDIAN
4669 ? upward
4670 : ((mode == BLKmode
4671 ? (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
4672 && int_size_in_bytes (type) < (PARM_BOUNDARY / BITS_PER_UNIT))
4673 : GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
4674 ? downward : upward));
4675 }
4676
4677 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
4678 For v9, function return values are subject to the same rules as arguments,
4679 except that up to 32-bytes may be returned in registers. */
4680
4681 rtx
4682 function_value (type, mode, incoming_p)
4683 tree type;
4684 enum machine_mode mode;
4685 int incoming_p;
4686 {
4687 int regno;
4688 int regbase = (incoming_p
4689 ? SPARC_OUTGOING_INT_ARG_FIRST
4690 : SPARC_INCOMING_INT_ARG_FIRST);
4691
4692 if (TARGET_ARCH64 && type)
4693 {
4694 if (TREE_CODE (type) == RECORD_TYPE)
4695 {
4696 /* Structures up to 32 bytes in size are passed in registers,
4697 promoted to fp registers where possible. */
4698
4699 if (int_size_in_bytes (type) > 32)
4700 abort (); /* shouldn't get here */
4701
4702 return function_arg_record_value (type, mode, 0, 1, regbase);
4703 }
4704 else if (TREE_CODE (type) == UNION_TYPE)
4705 {
4706 int bytes = int_size_in_bytes (type);
4707
4708 if (bytes > 32)
4709 abort ();
4710
4711 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
4712 }
4713 }
4714
4715 if (TARGET_ARCH64
4716 && GET_MODE_CLASS (mode) == MODE_INT
4717 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
4718 && type && TREE_CODE (type) != UNION_TYPE)
4719 mode = DImode;
4720
4721 if (incoming_p)
4722 regno = BASE_RETURN_VALUE_REG (mode);
4723 else
4724 regno = BASE_OUTGOING_VALUE_REG (mode);
4725
4726 return gen_rtx_REG (mode, regno);
4727 }
4728
4729 /* Do what is necessary for `va_start'. We look at the current function
4730 to determine if stdarg or varargs is used and return the address of
4731 the first unnamed parameter. */
4732
4733 rtx
4734 sparc_builtin_saveregs ()
4735 {
4736 int first_reg = current_function_args_info.words;
4737 rtx address;
4738 int regno;
4739
4740 for (regno = first_reg; regno < NPARM_REGS (word_mode); regno++)
4741 emit_move_insn (gen_rtx_MEM (word_mode,
4742 gen_rtx_PLUS (Pmode,
4743 frame_pointer_rtx,
4744 GEN_INT (STACK_POINTER_OFFSET
4745 + UNITS_PER_WORD * regno))),
4746 gen_rtx_REG (word_mode,
4747 BASE_INCOMING_ARG_REG (word_mode) + regno));
4748
4749 address = gen_rtx_PLUS (Pmode,
4750 frame_pointer_rtx,
4751 GEN_INT (STACK_POINTER_OFFSET
4752 + UNITS_PER_WORD * first_reg));
4753
4754 if (current_function_check_memory_usage
4755 && first_reg < NPARM_REGS (word_mode))
4756 emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
4757 address, ptr_mode,
4758 GEN_INT (UNITS_PER_WORD
4759 * (NPARM_REGS (word_mode) - first_reg)),
4760 TYPE_MODE (sizetype), GEN_INT (MEMORY_USE_RW),
4761 TYPE_MODE (integer_type_node));
4762
4763 return address;
4764 }
4765
4766 /* Implement `va_start' for varargs and stdarg. */
4767
4768 void
4769 sparc_va_start (stdarg_p, valist, nextarg)
4770 int stdarg_p ATTRIBUTE_UNUSED;
4771 tree valist;
4772 rtx nextarg;
4773 {
4774 nextarg = expand_builtin_saveregs ();
4775 std_expand_builtin_va_start (1, valist, nextarg);
4776 }
4777
4778 /* Implement `va_arg'. */
4779
4780 rtx
4781 sparc_va_arg (valist, type)
4782 tree valist, type;
4783 {
4784 HOST_WIDE_INT size, rsize, align;
4785 tree addr, incr;
4786 rtx addr_rtx;
4787 int indirect = 0;
4788
4789 /* Round up sizeof(type) to a word. */
4790 size = int_size_in_bytes (type);
4791 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4792 align = 0;
4793
4794 if (TARGET_ARCH64)
4795 {
4796 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
4797 align = 2 * UNITS_PER_WORD;
4798
4799 if (AGGREGATE_TYPE_P (type))
4800 {
4801 if (size > 16)
4802 {
4803 indirect = 1;
4804 size = rsize = UNITS_PER_WORD;
4805 }
4806 else
4807 size = rsize;
4808 }
4809 }
4810 else
4811 {
4812 if (AGGREGATE_TYPE_P (type)
4813 || TYPE_MODE (type) == TFmode
4814 || TYPE_MODE (type) == TCmode)
4815 {
4816 indirect = 1;
4817 size = rsize = UNITS_PER_WORD;
4818 }
4819 else
4820 {
4821 /* ??? The old va-sparc.h implementation, for 8 byte objects
4822 copied stuff to a temporary -- I don't see that that
4823 provides any more alignment than the stack slot did. */
4824 }
4825 }
4826
4827 incr = valist;
4828 if (align)
4829 {
4830 incr = fold (build (PLUS_EXPR, ptr_type_node, incr,
4831 build_int_2 (align - 1, 0)));
4832 incr = fold (build (BIT_AND_EXPR, ptr_type_node, incr,
4833 build_int_2 (-align, -1)));
4834 }
4835
4836 addr = incr = save_expr (incr);
4837 if (BYTES_BIG_ENDIAN && size < rsize)
4838 {
4839 addr = fold (build (PLUS_EXPR, ptr_type_node, incr,
4840 build_int_2 (rsize - size, 0)));
4841 }
4842 incr = fold (build (PLUS_EXPR, ptr_type_node, incr,
4843 build_int_2 (rsize, 0)));
4844
4845 incr = build (MODIFY_EXPR, ptr_type_node, valist, incr);
4846 TREE_SIDE_EFFECTS (incr) = 1;
4847 expand_expr (incr, const0_rtx, VOIDmode, EXPAND_NORMAL);
4848
4849 addr_rtx = expand_expr (addr, NULL, Pmode, EXPAND_NORMAL);
4850
4851 if (indirect)
4852 {
4853 addr_rtx = force_reg (Pmode, addr_rtx);
4854 addr_rtx = gen_rtx_MEM (Pmode, addr_rtx);
4855 MEM_ALIAS_SET (addr_rtx) = get_varargs_alias_set ();
4856 }
4857
4858 return addr_rtx;
4859 }
4860 \f
4861 /* Return the string to output a conditional branch to LABEL, which is
4862 the operand number of the label. OP is the conditional expression.
4863 XEXP (OP, 0) is assumed to be a condition code register (integer or
4864 floating point) and its mode specifies what kind of comparison we made.
4865
4866 REVERSED is non-zero if we should reverse the sense of the comparison.
4867
4868 ANNUL is non-zero if we should generate an annulling branch.
4869
4870 NOOP is non-zero if we have to follow this branch by a noop.
4871
4872 INSN, if set, is the insn. */
4873
4874 char *
4875 output_cbranch (op, label, reversed, annul, noop, insn)
4876 rtx op;
4877 int label;
4878 int reversed, annul, noop;
4879 rtx insn;
4880 {
4881 static char string[32];
4882 enum rtx_code code = GET_CODE (op);
4883 rtx cc_reg = XEXP (op, 0);
4884 enum machine_mode mode = GET_MODE (cc_reg);
4885 static char v8_labelno[] = "%lX";
4886 static char v9_icc_labelno[] = "%%icc, %lX";
4887 static char v9_xcc_labelno[] = "%%xcc, %lX";
4888 static char v9_fcc_labelno[] = "%%fccX, %lY";
4889 char *labelno;
4890 const char *branch;
4891 int labeloff, spaces = 8;
4892
4893 if (reversed)
4894 {
4895 /* Reversal of FP compares takes care -- an ordered compare
4896 becomes an unordered compare and vice versa. */
4897 if (mode == CCFPmode || mode == CCFPEmode)
4898 code = reverse_condition_maybe_unordered (code);
4899 else
4900 code = reverse_condition (code);
4901 }
4902
4903 /* Start by writing the branch condition. */
4904 if (mode == CCFPmode || mode == CCFPEmode)
4905 {
4906 switch (code)
4907 {
4908 case NE:
4909 branch = "fbne";
4910 break;
4911 case EQ:
4912 branch = "fbe";
4913 break;
4914 case GE:
4915 branch = "fbge";
4916 break;
4917 case GT:
4918 branch = "fbg";
4919 break;
4920 case LE:
4921 branch = "fble";
4922 break;
4923 case LT:
4924 branch = "fbl";
4925 break;
4926 case UNORDERED:
4927 branch = "fbu";
4928 break;
4929 case ORDERED:
4930 branch = "fbo";
4931 break;
4932 case UNGT:
4933 branch = "fbug";
4934 break;
4935 case UNLT:
4936 branch = "fbul";
4937 break;
4938 case UNEQ:
4939 branch = "fbue";
4940 break;
4941 case UNGE:
4942 branch = "fbuge";
4943 break;
4944 case UNLE:
4945 branch = "fbule";
4946 break;
4947 case LTGT:
4948 branch = "fblg";
4949 break;
4950
4951 default:
4952 abort ();
4953 }
4954
4955 /* ??? !v9: FP branches cannot be preceded by another floating point
4956 insn. Because there is currently no concept of pre-delay slots,
4957 we can fix this only by always emitting a nop before a floating
4958 point branch. */
4959
4960 string[0] = '\0';
4961 if (! TARGET_V9)
4962 strcpy (string, "nop\n\t");
4963 strcat (string, branch);
4964 }
4965 else
4966 {
4967 switch (code)
4968 {
4969 case NE:
4970 branch = "bne";
4971 break;
4972 case EQ:
4973 branch = "be";
4974 break;
4975 case GE:
4976 if (mode == CC_NOOVmode)
4977 branch = "bpos";
4978 else
4979 branch = "bge";
4980 break;
4981 case GT:
4982 branch = "bg";
4983 break;
4984 case LE:
4985 branch = "ble";
4986 break;
4987 case LT:
4988 if (mode == CC_NOOVmode)
4989 branch = "bneg";
4990 else
4991 branch = "bl";
4992 break;
4993 case GEU:
4994 branch = "bgeu";
4995 break;
4996 case GTU:
4997 branch = "bgu";
4998 break;
4999 case LEU:
5000 branch = "bleu";
5001 break;
5002 case LTU:
5003 branch = "blu";
5004 break;
5005
5006 default:
5007 abort ();
5008 }
5009 strcpy (string, branch);
5010 }
5011 spaces -= strlen (branch);
5012
5013 /* Now add the annulling, the label, and a possible noop. */
5014 if (annul)
5015 {
5016 strcat (string, ",a");
5017 spaces -= 2;
5018 }
5019
5020 if (! TARGET_V9)
5021 {
5022 labeloff = 2;
5023 labelno = v8_labelno;
5024 }
5025 else
5026 {
5027 rtx note;
5028
5029 if (insn && (note = find_reg_note (insn, REG_BR_PRED, NULL_RTX)))
5030 {
5031 strcat (string,
5032 INTVAL (XEXP (note, 0)) & ATTR_FLAG_likely ? ",pt" : ",pn");
5033 spaces -= 3;
5034 }
5035
5036 labeloff = 9;
5037 if (mode == CCFPmode || mode == CCFPEmode)
5038 {
5039 labeloff = 10;
5040 labelno = v9_fcc_labelno;
5041 /* Set the char indicating the number of the fcc reg to use. */
5042 labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
5043 }
5044 else if (mode == CCXmode || mode == CCX_NOOVmode)
5045 labelno = v9_xcc_labelno;
5046 else
5047 labelno = v9_icc_labelno;
5048 }
5049 /* Set the char indicating the number of the operand containing the
5050 label_ref. */
5051 labelno[labeloff] = label + '0';
5052 if (spaces > 0)
5053 strcat (string, "\t");
5054 else
5055 strcat (string, " ");
5056 strcat (string, labelno);
5057
5058 if (noop)
5059 strcat (string, "\n\tnop");
5060
5061 return string;
5062 }
5063
5064 /* Emit a library call comparison between floating point X and Y.
5065 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
5066 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
5067 values as arguments instead of the TFmode registers themselves,
5068 that's why we cannot call emit_float_lib_cmp. */
5069 void
5070 sparc_emit_float_lib_cmp (x, y, comparison)
5071 rtx x, y;
5072 enum rtx_code comparison;
5073 {
5074 const char *qpfunc;
5075 rtx slot0, slot1, result, tem, tem2;
5076 enum machine_mode mode;
5077
5078 switch (comparison)
5079 {
5080 case EQ:
5081 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
5082 break;
5083
5084 case NE:
5085 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
5086 break;
5087
5088 case GT:
5089 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
5090 break;
5091
5092 case GE:
5093 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
5094 break;
5095
5096 case LT:
5097 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
5098 break;
5099
5100 case LE:
5101 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
5102 break;
5103
5104 case ORDERED:
5105 case UNORDERED:
5106 case UNGT:
5107 case UNLT:
5108 case UNEQ:
5109 case UNGE:
5110 case UNLE:
5111 case LTGT:
5112 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
5113 break;
5114
5115 default:
5116 abort();
5117 break;
5118 }
5119
5120 if (TARGET_ARCH64)
5121 {
5122 if (GET_CODE (x) != MEM)
5123 {
5124 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
5125 emit_insn (gen_rtx_SET (VOIDmode, slot0, x));
5126 }
5127 else
5128 slot0 = x;
5129
5130 if (GET_CODE (y) != MEM)
5131 {
5132 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
5133 emit_insn (gen_rtx_SET (VOIDmode, slot1, y));
5134 }
5135 else
5136 slot1 = y;
5137
5138 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), 1,
5139 DImode, 2,
5140 XEXP (slot0, 0), Pmode,
5141 XEXP (slot1, 0), Pmode);
5142
5143 mode = DImode;
5144 }
5145 else
5146 {
5147 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), 1,
5148 SImode, 2,
5149 x, TFmode, y, TFmode);
5150
5151 mode = SImode;
5152 }
5153
5154
5155 /* Immediately move the result of the libcall into a pseudo
5156 register so reload doesn't clobber the value if it needs
5157 the return register for a spill reg. */
5158 result = gen_reg_rtx (mode);
5159 emit_move_insn (result, hard_libcall_value (mode));
5160
5161 switch (comparison)
5162 {
5163 default:
5164 emit_cmp_insn (result, const0_rtx, NE,
5165 NULL_RTX, mode, 0, 0);
5166 break;
5167 case ORDERED:
5168 case UNORDERED:
5169 emit_cmp_insn (result, GEN_INT(3),
5170 (comparison == UNORDERED) ? EQ : NE,
5171 NULL_RTX, mode, 0, 0);
5172 break;
5173 case UNGT:
5174 case UNGE:
5175 emit_cmp_insn (result, const1_rtx,
5176 (comparison == UNGT) ? GT : NE,
5177 NULL_RTX, mode, 0, 0);
5178 break;
5179 case UNLE:
5180 emit_cmp_insn (result, const2_rtx, NE,
5181 NULL_RTX, mode, 0, 0);
5182 break;
5183 case UNLT:
5184 tem = gen_reg_rtx (mode);
5185 if (TARGET_ARCH32)
5186 emit_insn (gen_andsi3 (tem, result, const1_rtx));
5187 else
5188 emit_insn (gen_anddi3 (tem, result, const1_rtx));
5189 emit_cmp_insn (tem, const0_rtx, NE,
5190 NULL_RTX, mode, 0, 0);
5191 break;
5192 case UNEQ:
5193 case LTGT:
5194 tem = gen_reg_rtx (mode);
5195 if (TARGET_ARCH32)
5196 emit_insn (gen_addsi3 (tem, result, const1_rtx));
5197 else
5198 emit_insn (gen_adddi3 (tem, result, const1_rtx));
5199 tem2 = gen_reg_rtx (mode);
5200 if (TARGET_ARCH32)
5201 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
5202 else
5203 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
5204 emit_cmp_insn (tem2, const0_rtx,
5205 (comparison == UNEQ) ? EQ : NE,
5206 NULL_RTX, mode, 0, 0);
5207 break;
5208 }
5209 }
5210
5211 /* Return the string to output a conditional branch to LABEL, testing
5212 register REG. LABEL is the operand number of the label; REG is the
5213 operand number of the reg. OP is the conditional expression. The mode
5214 of REG says what kind of comparison we made.
5215
5216 REVERSED is non-zero if we should reverse the sense of the comparison.
5217
5218 ANNUL is non-zero if we should generate an annulling branch.
5219
5220 NOOP is non-zero if we have to follow this branch by a noop. */
5221
5222 char *
5223 output_v9branch (op, reg, label, reversed, annul, noop, insn)
5224 rtx op;
5225 int reg, label;
5226 int reversed, annul, noop;
5227 rtx insn;
5228 {
5229 static char string[20];
5230 enum rtx_code code = GET_CODE (op);
5231 enum machine_mode mode = GET_MODE (XEXP (op, 0));
5232 static char labelno[] = "%X, %lX";
5233 rtx note;
5234 int spaces = 8;
5235
5236 /* If not floating-point or if EQ or NE, we can just reverse the code. */
5237 if (reversed)
5238 code = reverse_condition (code), reversed = 0;
5239
5240 /* Only 64 bit versions of these instructions exist. */
5241 if (mode != DImode)
5242 abort ();
5243
5244 /* Start by writing the branch condition. */
5245
5246 switch (code)
5247 {
5248 case NE:
5249 strcpy (string, "brnz");
5250 spaces -= 4;
5251 break;
5252
5253 case EQ:
5254 strcpy (string, "brz");
5255 spaces -= 3;
5256 break;
5257
5258 case GE:
5259 strcpy (string, "brgez");
5260 spaces -= 5;
5261 break;
5262
5263 case LT:
5264 strcpy (string, "brlz");
5265 spaces -= 4;
5266 break;
5267
5268 case LE:
5269 strcpy (string, "brlez");
5270 spaces -= 5;
5271 break;
5272
5273 case GT:
5274 strcpy (string, "brgz");
5275 spaces -= 4;
5276 break;
5277
5278 default:
5279 abort ();
5280 }
5281
5282 /* Now add the annulling, reg, label, and nop. */
5283 if (annul)
5284 {
5285 strcat (string, ",a");
5286 spaces -= 2;
5287 }
5288
5289 if (insn && (note = find_reg_note (insn, REG_BR_PRED, NULL_RTX)))
5290 {
5291 strcat (string,
5292 INTVAL (XEXP (note, 0)) & ATTR_FLAG_likely ? ",pt" : ",pn");
5293 spaces -= 3;
5294 }
5295
5296 labelno[1] = reg + '0';
5297 labelno[6] = label + '0';
5298 if (spaces > 0)
5299 strcat (string, "\t");
5300 else
5301 strcat (string, " ");
5302 strcat (string, labelno);
5303
5304 if (noop)
5305 strcat (string, "\n\tnop");
5306
5307 return string;
5308 }
5309
5310 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
5311 Such instructions cannot be used in the delay slot of return insn on v9.
5312 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
5313 */
5314
5315 static int
5316 epilogue_renumber (where, test)
5317 register rtx *where;
5318 int test;
5319 {
5320 register const char *fmt;
5321 register int i;
5322 register enum rtx_code code;
5323
5324 if (*where == 0)
5325 return 0;
5326
5327 code = GET_CODE (*where);
5328
5329 switch (code)
5330 {
5331 case REG:
5332 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
5333 return 1;
5334 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
5335 *where = gen_rtx (REG, GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
5336 case SCRATCH:
5337 case CC0:
5338 case PC:
5339 case CONST_INT:
5340 case CONST_DOUBLE:
5341 return 0;
5342
5343 default:
5344 break;
5345 }
5346
5347 fmt = GET_RTX_FORMAT (code);
5348
5349 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5350 {
5351 if (fmt[i] == 'E')
5352 {
5353 register int j;
5354 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
5355 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
5356 return 1;
5357 }
5358 else if (fmt[i] == 'e'
5359 && epilogue_renumber (&(XEXP (*where, i)), test))
5360 return 1;
5361 }
5362 return 0;
5363 }
5364
5365 /* Output assembler code to return from a function. */
5366
5367 const char *
5368 output_return (operands)
5369 rtx *operands;
5370 {
5371 rtx delay = final_sequence ? XVECEXP (final_sequence, 0, 1) : 0;
5372
5373 if (leaf_label)
5374 {
5375 operands[0] = leaf_label;
5376 return "b%* %l0%(";
5377 }
5378 else if (current_function_uses_only_leaf_regs)
5379 {
5380 /* No delay slot in a leaf function. */
5381 if (delay)
5382 abort ();
5383
5384 /* If we didn't allocate a frame pointer for the current function,
5385 the stack pointer might have been adjusted. Output code to
5386 restore it now. */
5387
5388 operands[0] = GEN_INT (actual_fsize);
5389
5390 /* Use sub of negated value in first two cases instead of add to
5391 allow actual_fsize == 4096. */
5392
5393 if (actual_fsize <= 4096)
5394 {
5395 if (SKIP_CALLERS_UNIMP_P)
5396 return "jmp\t%%o7+12\n\tsub\t%%sp, -%0, %%sp";
5397 else
5398 return "retl\n\tsub\t%%sp, -%0, %%sp";
5399 }
5400 else if (actual_fsize <= 8192)
5401 {
5402 operands[0] = GEN_INT (actual_fsize - 4096);
5403 if (SKIP_CALLERS_UNIMP_P)
5404 return "sub\t%%sp, -4096, %%sp\n\tjmp\t%%o7+12\n\tsub\t%%sp, -%0, %%sp";
5405 else
5406 return "sub\t%%sp, -4096, %%sp\n\tretl\n\tsub\t%%sp, -%0, %%sp";
5407 }
5408 else if (SKIP_CALLERS_UNIMP_P)
5409 {
5410 if ((actual_fsize & 0x3ff) != 0)
5411 return "sethi\t%%hi(%a0), %%g1\n\tor\t%%g1, %%lo(%a0), %%g1\n\tjmp\t%%o7+12\n\tadd\t%%sp, %%g1, %%sp";
5412 else
5413 return "sethi\t%%hi(%a0), %%g1\n\tjmp\t%%o7+12\n\tadd\t%%sp, %%g1, %%sp";
5414 }
5415 else
5416 {
5417 if ((actual_fsize & 0x3ff) != 0)
5418 return "sethi %%hi(%a0),%%g1\n\tor %%g1,%%lo(%a0),%%g1\n\tretl\n\tadd %%sp,%%g1,%%sp";
5419 else
5420 return "sethi %%hi(%a0),%%g1\n\tretl\n\tadd %%sp,%%g1,%%sp";
5421 }
5422 }
5423 else if (TARGET_V9)
5424 {
5425 if (delay)
5426 {
5427 epilogue_renumber (&SET_DEST (PATTERN (delay)), 0);
5428 epilogue_renumber (&SET_SRC (PATTERN (delay)), 0);
5429 }
5430 if (SKIP_CALLERS_UNIMP_P)
5431 return "return\t%%i7+12%#";
5432 else
5433 return "return\t%%i7+8%#";
5434 }
5435 else
5436 {
5437 if (delay)
5438 abort ();
5439 if (SKIP_CALLERS_UNIMP_P)
5440 return "jmp\t%%i7+12\n\trestore";
5441 else
5442 return "ret\n\trestore";
5443 }
5444 }
5445 \f
5446 /* Leaf functions and non-leaf functions have different needs. */
5447
5448 static int
5449 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
5450
5451 static int
5452 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
5453
5454 static int *reg_alloc_orders[] = {
5455 reg_leaf_alloc_order,
5456 reg_nonleaf_alloc_order};
5457
5458 void
5459 order_regs_for_local_alloc ()
5460 {
5461 static int last_order_nonleaf = 1;
5462
5463 if (regs_ever_live[15] != last_order_nonleaf)
5464 {
5465 last_order_nonleaf = !last_order_nonleaf;
5466 bcopy ((char *) reg_alloc_orders[last_order_nonleaf],
5467 (char *) reg_alloc_order, FIRST_PSEUDO_REGISTER * sizeof (int));
5468 }
5469 }
5470 \f
5471 /* Return 1 if REG and MEM are legitimate enough to allow the various
5472 mem<-->reg splits to be run. */
5473
5474 int
5475 sparc_splitdi_legitimate (reg, mem)
5476 rtx reg;
5477 rtx mem;
5478 {
5479 /* Punt if we are here by mistake. */
5480 if (! reload_completed)
5481 abort ();
5482
5483 /* We must have an offsettable memory reference. */
5484 if (! offsettable_memref_p (mem))
5485 return 0;
5486
5487 /* If we have legitimate args for ldd/std, we do not want
5488 the split to happen. */
5489 if ((REGNO (reg) % 2) == 0
5490 && mem_min_alignment (mem, 8))
5491 return 0;
5492
5493 /* Success. */
5494 return 1;
5495 }
5496
5497 /* Return 1 if x and y are some kind of REG and they refer to
5498 different hard registers. This test is guarenteed to be
5499 run after reload. */
5500
5501 int
5502 sparc_absnegfloat_split_legitimate (x, y)
5503 rtx x, y;
5504 {
5505 if (GET_CODE (x) == SUBREG)
5506 x = alter_subreg (x);
5507 if (GET_CODE (x) != REG)
5508 return 0;
5509 if (GET_CODE (y) == SUBREG)
5510 y = alter_subreg (y);
5511 if (GET_CODE (y) != REG)
5512 return 0;
5513 if (REGNO (x) == REGNO (y))
5514 return 0;
5515 return 1;
5516 }
5517
5518 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
5519 This makes them candidates for using ldd and std insns.
5520
5521 Note reg1 and reg2 *must* be hard registers. */
5522
5523 int
5524 registers_ok_for_ldd_peep (reg1, reg2)
5525 rtx reg1, reg2;
5526 {
5527 /* We might have been passed a SUBREG. */
5528 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
5529 return 0;
5530
5531 if (REGNO (reg1) % 2 != 0)
5532 return 0;
5533
5534 /* Integer ldd is deprecated in SPARC V9 */
5535 if (TARGET_V9 && REGNO (reg1) < 32)
5536 return 0;
5537
5538 return (REGNO (reg1) == REGNO (reg2) - 1);
5539 }
5540
5541 /* Return 1 if addr1 and addr2 are suitable for use in an ldd or
5542 std insn.
5543
5544 This can only happen when addr1 and addr2 are consecutive memory
5545 locations (addr1 + 4 == addr2). addr1 must also be aligned on a
5546 64 bit boundary (addr1 % 8 == 0).
5547
5548 We know %sp and %fp are kept aligned on a 64 bit boundary. Other
5549 registers are assumed to *never* be properly aligned and are
5550 rejected.
5551
5552 Knowing %sp and %fp are kept aligned on a 64 bit boundary, we
5553 need only check that the offset for addr1 % 8 == 0. */
5554
5555 int
5556 addrs_ok_for_ldd_peep (addr1, addr2)
5557 rtx addr1, addr2;
5558 {
5559 int reg1, offset1;
5560
5561 /* Extract a register number and offset (if used) from the first addr. */
5562 if (GET_CODE (addr1) == PLUS)
5563 {
5564 /* If not a REG, return zero. */
5565 if (GET_CODE (XEXP (addr1, 0)) != REG)
5566 return 0;
5567 else
5568 {
5569 reg1 = REGNO (XEXP (addr1, 0));
5570 /* The offset must be constant! */
5571 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
5572 return 0;
5573 offset1 = INTVAL (XEXP (addr1, 1));
5574 }
5575 }
5576 else if (GET_CODE (addr1) != REG)
5577 return 0;
5578 else
5579 {
5580 reg1 = REGNO (addr1);
5581 /* This was a simple (mem (reg)) expression. Offset is 0. */
5582 offset1 = 0;
5583 }
5584
5585 /* Make sure the second address is a (mem (plus (reg) (const_int). */
5586 if (GET_CODE (addr2) != PLUS)
5587 return 0;
5588
5589 if (GET_CODE (XEXP (addr2, 0)) != REG
5590 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
5591 return 0;
5592
5593 /* Only %fp and %sp are allowed. Additionally both addresses must
5594 use the same register. */
5595 if (reg1 != FRAME_POINTER_REGNUM && reg1 != STACK_POINTER_REGNUM)
5596 return 0;
5597
5598 if (reg1 != REGNO (XEXP (addr2, 0)))
5599 return 0;
5600
5601 /* The first offset must be evenly divisible by 8 to ensure the
5602 address is 64 bit aligned. */
5603 if (offset1 % 8 != 0)
5604 return 0;
5605
5606 /* The offset for the second addr must be 4 more than the first addr. */
5607 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
5608 return 0;
5609
5610 /* All the tests passed. addr1 and addr2 are valid for ldd and std
5611 instructions. */
5612 return 1;
5613 }
5614
5615 /* Return 1 if reg is a pseudo, or is the first register in
5616 a hard register pair. This makes it a candidate for use in
5617 ldd and std insns. */
5618
5619 int
5620 register_ok_for_ldd (reg)
5621 rtx reg;
5622 {
5623 /* We might have been passed a SUBREG. */
5624 if (GET_CODE (reg) != REG)
5625 return 0;
5626
5627 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
5628 return (REGNO (reg) % 2 == 0);
5629 else
5630 return 1;
5631 }
5632 \f
5633 /* Print operand X (an rtx) in assembler syntax to file FILE.
5634 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5635 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5636
5637 void
5638 print_operand (file, x, code)
5639 FILE *file;
5640 rtx x;
5641 int code;
5642 {
5643 switch (code)
5644 {
5645 case '#':
5646 /* Output a 'nop' if there's nothing for the delay slot. */
5647 if (dbr_sequence_length () == 0)
5648 fputs ("\n\t nop", file);
5649 return;
5650 case '*':
5651 /* Output an annul flag if there's nothing for the delay slot and we
5652 are optimizing. This is always used with '(' below. */
5653 /* Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
5654 this is a dbx bug. So, we only do this when optimizing. */
5655 /* On UltraSPARC, a branch in a delay slot causes a pipeline flush.
5656 Always emit a nop in case the next instruction is a branch. */
5657 if (dbr_sequence_length () == 0
5658 && (optimize && (int)sparc_cpu < PROCESSOR_V9))
5659 fputs (",a", file);
5660 return;
5661 case '(':
5662 /* Output a 'nop' if there's nothing for the delay slot and we are
5663 not optimizing. This is always used with '*' above. */
5664 if (dbr_sequence_length () == 0
5665 && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
5666 fputs ("\n\t nop", file);
5667 return;
5668 case '_':
5669 /* Output the Embedded Medium/Anywhere code model base register. */
5670 fputs (EMBMEDANY_BASE_REG, file);
5671 return;
5672 case '@':
5673 /* Print out what we are using as the frame pointer. This might
5674 be %fp, or might be %sp+offset. */
5675 /* ??? What if offset is too big? Perhaps the caller knows it isn't? */
5676 fprintf (file, "%s+%d", frame_base_name, frame_base_offset);
5677 return;
5678 case 'Y':
5679 /* Adjust the operand to take into account a RESTORE operation. */
5680 if (GET_CODE (x) == CONST_INT)
5681 break;
5682 else if (GET_CODE (x) != REG)
5683 output_operand_lossage ("Invalid %%Y operand");
5684 else if (REGNO (x) < 8)
5685 fputs (reg_names[REGNO (x)], file);
5686 else if (REGNO (x) >= 24 && REGNO (x) < 32)
5687 fputs (reg_names[REGNO (x)-16], file);
5688 else
5689 output_operand_lossage ("Invalid %%Y operand");
5690 return;
5691 case 'L':
5692 /* Print out the low order register name of a register pair. */
5693 if (WORDS_BIG_ENDIAN)
5694 fputs (reg_names[REGNO (x)+1], file);
5695 else
5696 fputs (reg_names[REGNO (x)], file);
5697 return;
5698 case 'H':
5699 /* Print out the high order register name of a register pair. */
5700 if (WORDS_BIG_ENDIAN)
5701 fputs (reg_names[REGNO (x)], file);
5702 else
5703 fputs (reg_names[REGNO (x)+1], file);
5704 return;
5705 case 'R':
5706 /* Print out the second register name of a register pair or quad.
5707 I.e., R (%o0) => %o1. */
5708 fputs (reg_names[REGNO (x)+1], file);
5709 return;
5710 case 'S':
5711 /* Print out the third register name of a register quad.
5712 I.e., S (%o0) => %o2. */
5713 fputs (reg_names[REGNO (x)+2], file);
5714 return;
5715 case 'T':
5716 /* Print out the fourth register name of a register quad.
5717 I.e., T (%o0) => %o3. */
5718 fputs (reg_names[REGNO (x)+3], file);
5719 return;
5720 case 'x':
5721 /* Print a condition code register. */
5722 if (REGNO (x) == SPARC_ICC_REG)
5723 {
5724 /* We don't handle CC[X]_NOOVmode because they're not supposed
5725 to occur here. */
5726 if (GET_MODE (x) == CCmode)
5727 fputs ("%icc", file);
5728 else if (GET_MODE (x) == CCXmode)
5729 fputs ("%xcc", file);
5730 else
5731 abort ();
5732 }
5733 else
5734 /* %fccN register */
5735 fputs (reg_names[REGNO (x)], file);
5736 return;
5737 case 'm':
5738 /* Print the operand's address only. */
5739 output_address (XEXP (x, 0));
5740 return;
5741 case 'r':
5742 /* In this case we need a register. Use %g0 if the
5743 operand is const0_rtx. */
5744 if (x == const0_rtx
5745 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
5746 {
5747 fputs ("%g0", file);
5748 return;
5749 }
5750 else
5751 break;
5752
5753 case 'A':
5754 switch (GET_CODE (x))
5755 {
5756 case IOR: fputs ("or", file); break;
5757 case AND: fputs ("and", file); break;
5758 case XOR: fputs ("xor", file); break;
5759 default: output_operand_lossage ("Invalid %%A operand");
5760 }
5761 return;
5762
5763 case 'B':
5764 switch (GET_CODE (x))
5765 {
5766 case IOR: fputs ("orn", file); break;
5767 case AND: fputs ("andn", file); break;
5768 case XOR: fputs ("xnor", file); break;
5769 default: output_operand_lossage ("Invalid %%B operand");
5770 }
5771 return;
5772
5773 /* These are used by the conditional move instructions. */
5774 case 'c' :
5775 case 'C':
5776 {
5777 enum rtx_code rc = GET_CODE (x);
5778
5779 if (code == 'c')
5780 {
5781 enum machine_mode mode = GET_MODE (XEXP (x, 0));
5782 if (mode == CCFPmode || mode == CCFPEmode)
5783 rc = reverse_condition_maybe_unordered (GET_CODE (x));
5784 else
5785 rc = reverse_condition (GET_CODE (x));
5786 }
5787 switch (rc)
5788 {
5789 case NE: fputs ("ne", file); break;
5790 case EQ: fputs ("e", file); break;
5791 case GE: fputs ("ge", file); break;
5792 case GT: fputs ("g", file); break;
5793 case LE: fputs ("le", file); break;
5794 case LT: fputs ("l", file); break;
5795 case GEU: fputs ("geu", file); break;
5796 case GTU: fputs ("gu", file); break;
5797 case LEU: fputs ("leu", file); break;
5798 case LTU: fputs ("lu", file); break;
5799 case LTGT: fputs ("lg", file); break;
5800 case UNORDERED: fputs ("u", file); break;
5801 case ORDERED: fputs ("o", file); break;
5802 case UNLT: fputs ("ul", file); break;
5803 case UNLE: fputs ("ule", file); break;
5804 case UNGT: fputs ("ug", file); break;
5805 case UNGE: fputs ("uge", file); break;
5806 case UNEQ: fputs ("ue", file); break;
5807 default: output_operand_lossage (code == 'c'
5808 ? "Invalid %%c operand"
5809 : "Invalid %%C operand");
5810 }
5811 return;
5812 }
5813
5814 /* These are used by the movr instruction pattern. */
5815 case 'd':
5816 case 'D':
5817 {
5818 enum rtx_code rc = (code == 'd'
5819 ? reverse_condition (GET_CODE (x))
5820 : GET_CODE (x));
5821 switch (rc)
5822 {
5823 case NE: fputs ("ne", file); break;
5824 case EQ: fputs ("e", file); break;
5825 case GE: fputs ("gez", file); break;
5826 case LT: fputs ("lz", file); break;
5827 case LE: fputs ("lez", file); break;
5828 case GT: fputs ("gz", file); break;
5829 default: output_operand_lossage (code == 'd'
5830 ? "Invalid %%d operand"
5831 : "Invalid %%D operand");
5832 }
5833 return;
5834 }
5835
5836 case 'b':
5837 {
5838 /* Print a sign-extended character. */
5839 int i = INTVAL (x) & 0xff;
5840 if (i & 0x80)
5841 i |= 0xffffff00;
5842 fprintf (file, "%d", i);
5843 return;
5844 }
5845
5846 case 'f':
5847 /* Operand must be a MEM; write its address. */
5848 if (GET_CODE (x) != MEM)
5849 output_operand_lossage ("Invalid %%f operand");
5850 output_address (XEXP (x, 0));
5851 return;
5852
5853 case 0:
5854 /* Do nothing special. */
5855 break;
5856
5857 default:
5858 /* Undocumented flag. */
5859 output_operand_lossage ("invalid operand output code");
5860 }
5861
5862 if (GET_CODE (x) == REG)
5863 fputs (reg_names[REGNO (x)], file);
5864 else if (GET_CODE (x) == MEM)
5865 {
5866 fputc ('[', file);
5867 /* Poor Sun assembler doesn't understand absolute addressing. */
5868 if (CONSTANT_P (XEXP (x, 0)))
5869 fputs ("%g0+", file);
5870 output_address (XEXP (x, 0));
5871 fputc (']', file);
5872 }
5873 else if (GET_CODE (x) == HIGH)
5874 {
5875 fputs ("%hi(", file);
5876 output_addr_const (file, XEXP (x, 0));
5877 fputc (')', file);
5878 }
5879 else if (GET_CODE (x) == LO_SUM)
5880 {
5881 print_operand (file, XEXP (x, 0), 0);
5882 if (TARGET_CM_MEDMID)
5883 fputs ("+%l44(", file);
5884 else
5885 fputs ("+%lo(", file);
5886 output_addr_const (file, XEXP (x, 1));
5887 fputc (')', file);
5888 }
5889 else if (GET_CODE (x) == CONST_DOUBLE
5890 && (GET_MODE (x) == VOIDmode
5891 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
5892 {
5893 if (CONST_DOUBLE_HIGH (x) == 0)
5894 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
5895 else if (CONST_DOUBLE_HIGH (x) == -1
5896 && CONST_DOUBLE_LOW (x) < 0)
5897 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
5898 else
5899 output_operand_lossage ("long long constant not a valid immediate operand");
5900 }
5901 else if (GET_CODE (x) == CONST_DOUBLE)
5902 output_operand_lossage ("floating point constant not a valid immediate operand");
5903 else { output_addr_const (file, x); }
5904 }
5905 \f
5906 /* This function outputs assembler code for VALUE to FILE, where VALUE is
5907 a 64 bit (DImode) value. */
5908
5909 /* ??? If there is a 64 bit counterpart to .word that the assembler
5910 understands, then using that would simply this code greatly. */
5911 /* ??? We only output .xword's for symbols and only then in environments
5912 where the assembler can handle them. */
5913
5914 void
5915 output_double_int (file, value)
5916 FILE *file;
5917 rtx value;
5918 {
5919 if (GET_CODE (value) == CONST_INT)
5920 {
5921 /* ??? This has endianness issues. */
5922 #if HOST_BITS_PER_WIDE_INT == 64
5923 HOST_WIDE_INT xword = INTVAL (value);
5924 HOST_WIDE_INT high, low;
5925
5926 high = (xword >> 32) & 0xffffffff;
5927 low = xword & 0xffffffff;
5928 ASM_OUTPUT_INT (file, GEN_INT (high));
5929 ASM_OUTPUT_INT (file, GEN_INT (low));
5930 #else
5931 if (INTVAL (value) < 0)
5932 ASM_OUTPUT_INT (file, constm1_rtx);
5933 else
5934 ASM_OUTPUT_INT (file, const0_rtx);
5935 ASM_OUTPUT_INT (file, value);
5936 #endif
5937 }
5938 else if (GET_CODE (value) == CONST_DOUBLE)
5939 {
5940 ASM_OUTPUT_INT (file, GEN_INT (CONST_DOUBLE_HIGH (value)));
5941 ASM_OUTPUT_INT (file, GEN_INT (CONST_DOUBLE_LOW (value)));
5942 }
5943 else if (GET_CODE (value) == SYMBOL_REF
5944 || GET_CODE (value) == CONST
5945 || GET_CODE (value) == PLUS
5946 || (TARGET_ARCH64 &&
5947 (GET_CODE (value) == LABEL_REF
5948 || GET_CODE (value) == CODE_LABEL
5949 || GET_CODE (value) == MINUS)))
5950 {
5951 if (! TARGET_V9)
5952 {
5953 ASM_OUTPUT_INT (file, const0_rtx);
5954 ASM_OUTPUT_INT (file, value);
5955 }
5956 else
5957 {
5958 fprintf (file, "\t%s\t", ASM_LONGLONG);
5959 output_addr_const (file, value);
5960 fprintf (file, "\n");
5961 }
5962 }
5963 else
5964 abort ();
5965 }
5966 \f
5967 /* Return the value of a code used in the .proc pseudo-op that says
5968 what kind of result this function returns. For non-C types, we pick
5969 the closest C type. */
5970
5971 #ifndef CHAR_TYPE_SIZE
5972 #define CHAR_TYPE_SIZE BITS_PER_UNIT
5973 #endif
5974
5975 #ifndef SHORT_TYPE_SIZE
5976 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
5977 #endif
5978
5979 #ifndef INT_TYPE_SIZE
5980 #define INT_TYPE_SIZE BITS_PER_WORD
5981 #endif
5982
5983 #ifndef LONG_TYPE_SIZE
5984 #define LONG_TYPE_SIZE BITS_PER_WORD
5985 #endif
5986
5987 #ifndef LONG_LONG_TYPE_SIZE
5988 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
5989 #endif
5990
5991 #ifndef FLOAT_TYPE_SIZE
5992 #define FLOAT_TYPE_SIZE BITS_PER_WORD
5993 #endif
5994
5995 #ifndef DOUBLE_TYPE_SIZE
5996 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
5997 #endif
5998
5999 #ifndef LONG_DOUBLE_TYPE_SIZE
6000 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
6001 #endif
6002
6003 unsigned long
6004 sparc_type_code (type)
6005 register tree type;
6006 {
6007 register unsigned long qualifiers = 0;
6008 register unsigned shift;
6009
6010 /* Only the first 30 bits of the qualifier are valid. We must refrain from
6011 setting more, since some assemblers will give an error for this. Also,
6012 we must be careful to avoid shifts of 32 bits or more to avoid getting
6013 unpredictable results. */
6014
6015 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
6016 {
6017 switch (TREE_CODE (type))
6018 {
6019 case ERROR_MARK:
6020 return qualifiers;
6021
6022 case ARRAY_TYPE:
6023 qualifiers |= (3 << shift);
6024 break;
6025
6026 case FUNCTION_TYPE:
6027 case METHOD_TYPE:
6028 qualifiers |= (2 << shift);
6029 break;
6030
6031 case POINTER_TYPE:
6032 case REFERENCE_TYPE:
6033 case OFFSET_TYPE:
6034 qualifiers |= (1 << shift);
6035 break;
6036
6037 case RECORD_TYPE:
6038 return (qualifiers | 8);
6039
6040 case UNION_TYPE:
6041 case QUAL_UNION_TYPE:
6042 return (qualifiers | 9);
6043
6044 case ENUMERAL_TYPE:
6045 return (qualifiers | 10);
6046
6047 case VOID_TYPE:
6048 return (qualifiers | 16);
6049
6050 case INTEGER_TYPE:
6051 /* If this is a range type, consider it to be the underlying
6052 type. */
6053 if (TREE_TYPE (type) != 0)
6054 break;
6055
6056 /* Carefully distinguish all the standard types of C,
6057 without messing up if the language is not C. We do this by
6058 testing TYPE_PRECISION and TREE_UNSIGNED. The old code used to
6059 look at both the names and the above fields, but that's redundant.
6060 Any type whose size is between two C types will be considered
6061 to be the wider of the two types. Also, we do not have a
6062 special code to use for "long long", so anything wider than
6063 long is treated the same. Note that we can't distinguish
6064 between "int" and "long" in this code if they are the same
6065 size, but that's fine, since neither can the assembler. */
6066
6067 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
6068 return (qualifiers | (TREE_UNSIGNED (type) ? 12 : 2));
6069
6070 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
6071 return (qualifiers | (TREE_UNSIGNED (type) ? 13 : 3));
6072
6073 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
6074 return (qualifiers | (TREE_UNSIGNED (type) ? 14 : 4));
6075
6076 else
6077 return (qualifiers | (TREE_UNSIGNED (type) ? 15 : 5));
6078
6079 case REAL_TYPE:
6080 /* If this is a range type, consider it to be the underlying
6081 type. */
6082 if (TREE_TYPE (type) != 0)
6083 break;
6084
6085 /* Carefully distinguish all the standard types of C,
6086 without messing up if the language is not C. */
6087
6088 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
6089 return (qualifiers | 6);
6090
6091 else
6092 return (qualifiers | 7);
6093
6094 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
6095 /* ??? We need to distinguish between double and float complex types,
6096 but I don't know how yet because I can't reach this code from
6097 existing front-ends. */
6098 return (qualifiers | 7); /* Who knows? */
6099
6100 case CHAR_TYPE: /* GNU Pascal CHAR type. Not used in C. */
6101 case BOOLEAN_TYPE: /* GNU Fortran BOOLEAN type. */
6102 case FILE_TYPE: /* GNU Pascal FILE type. */
6103 case SET_TYPE: /* GNU Pascal SET type. */
6104 case LANG_TYPE: /* ? */
6105 return qualifiers;
6106
6107 default:
6108 abort (); /* Not a type! */
6109 }
6110 }
6111
6112 return qualifiers;
6113 }
6114 \f
6115 /* Nested function support. */
6116
6117 /* Emit RTL insns to initialize the variable parts of a trampoline.
6118 FNADDR is an RTX for the address of the function's pure code.
6119 CXT is an RTX for the static chain value for the function.
6120
6121 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
6122 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
6123 (to store insns). This is a bit excessive. Perhaps a different
6124 mechanism would be better here.
6125
6126 Emit enough FLUSH insns to synchronize the data and instruction caches. */
6127
6128 void
6129 sparc_initialize_trampoline (tramp, fnaddr, cxt)
6130 rtx tramp, fnaddr, cxt;
6131 {
6132 /* SPARC 32 bit trampoline:
6133
6134 sethi %hi(fn), %g1
6135 sethi %hi(static), %g2
6136 jmp %g1+%lo(fn)
6137 or %g2, %lo(static), %g2
6138
6139 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
6140 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
6141 */
6142 #ifdef TRANSFER_FROM_TRAMPOLINE
6143 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
6144 0, VOIDmode, 1, tramp, Pmode);
6145 #endif
6146
6147 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
6148 expand_binop (SImode, ior_optab,
6149 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
6150 size_int (10), 0, 1),
6151 GEN_INT (0x03000000),
6152 NULL_RTX, 1, OPTAB_DIRECT));
6153
6154 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
6155 expand_binop (SImode, ior_optab,
6156 expand_shift (RSHIFT_EXPR, SImode, cxt,
6157 size_int (10), 0, 1),
6158 GEN_INT (0x05000000),
6159 NULL_RTX, 1, OPTAB_DIRECT));
6160
6161 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
6162 expand_binop (SImode, ior_optab,
6163 expand_and (fnaddr, GEN_INT (0x3ff), NULL_RTX),
6164 GEN_INT (0x81c06000),
6165 NULL_RTX, 1, OPTAB_DIRECT));
6166
6167 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
6168 expand_binop (SImode, ior_optab,
6169 expand_and (cxt, GEN_INT (0x3ff), NULL_RTX),
6170 GEN_INT (0x8410a000),
6171 NULL_RTX, 1, OPTAB_DIRECT));
6172
6173 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
6174 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
6175 aligned on a 16 byte boundary so one flush clears it all. */
6176 if (sparc_cpu != PROCESSOR_ULTRASPARC)
6177 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
6178 plus_constant (tramp, 8)))));
6179 }
6180
6181 /* The 64 bit version is simpler because it makes more sense to load the
6182 values as "immediate" data out of the trampoline. It's also easier since
6183 we can read the PC without clobbering a register. */
6184
6185 void
6186 sparc64_initialize_trampoline (tramp, fnaddr, cxt)
6187 rtx tramp, fnaddr, cxt;
6188 {
6189 #ifdef TRANSFER_FROM_TRAMPOLINE
6190 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
6191 0, VOIDmode, 1, tramp, Pmode);
6192 #endif
6193
6194 /*
6195 rd %pc, %g1
6196 ldx [%g1+24], %g5
6197 jmp %g5
6198 ldx [%g1+16], %g5
6199 +16 bytes data
6200 */
6201
6202 emit_move_insn (gen_rtx_MEM (SImode, tramp),
6203 GEN_INT (0x83414000));
6204 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
6205 GEN_INT (0xca586018));
6206 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
6207 GEN_INT (0x81c14000));
6208 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
6209 GEN_INT (0xca586010));
6210 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
6211 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
6212 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
6213
6214 if (sparc_cpu != PROCESSOR_ULTRASPARC)
6215 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
6216 }
6217 \f
6218 /* Subroutines to support a flat (single) register window calling
6219 convention. */
6220
6221 /* Single-register window sparc stack frames look like:
6222
6223 Before call After call
6224 +-----------------------+ +-----------------------+
6225 high | | | |
6226 mem | caller's temps. | | caller's temps. |
6227 | | | |
6228 +-----------------------+ +-----------------------+
6229 | | | |
6230 | arguments on stack. | | arguments on stack. |
6231 | | | |
6232 +-----------------------+FP+92->+-----------------------+
6233 | 6 words to save | | 6 words to save |
6234 | arguments passed | | arguments passed |
6235 | in registers, even | | in registers, even |
6236 | if not passed. | | if not passed. |
6237 SP+68->+-----------------------+FP+68->+-----------------------+
6238 | 1 word struct addr | | 1 word struct addr |
6239 +-----------------------+FP+64->+-----------------------+
6240 | | | |
6241 | 16 word reg save area | | 16 word reg save area |
6242 | | | |
6243 SP->+-----------------------+ FP->+-----------------------+
6244 | 4 word area for |
6245 | fp/alu reg moves |
6246 FP-16->+-----------------------+
6247 | |
6248 | local variables |
6249 | |
6250 +-----------------------+
6251 | |
6252 | fp register save |
6253 | |
6254 +-----------------------+
6255 | |
6256 | gp register save |
6257 | |
6258 +-----------------------+
6259 | |
6260 | alloca allocations |
6261 | |
6262 +-----------------------+
6263 | |
6264 | arguments on stack |
6265 | |
6266 SP+92->+-----------------------+
6267 | 6 words to save |
6268 | arguments passed |
6269 | in registers, even |
6270 low | if not passed. |
6271 memory SP+68->+-----------------------+
6272 | 1 word struct addr |
6273 SP+64->+-----------------------+
6274 | |
6275 I 16 word reg save area |
6276 | |
6277 SP->+-----------------------+ */
6278
6279 /* Structure to be filled in by sparc_flat_compute_frame_size with register
6280 save masks, and offsets for the current function. */
6281
6282 struct sparc_frame_info
6283 {
6284 unsigned long total_size; /* # bytes that the entire frame takes up. */
6285 unsigned long var_size; /* # bytes that variables take up. */
6286 unsigned long args_size; /* # bytes that outgoing arguments take up. */
6287 unsigned long extra_size; /* # bytes of extra gunk. */
6288 unsigned int gp_reg_size; /* # bytes needed to store gp regs. */
6289 unsigned int fp_reg_size; /* # bytes needed to store fp regs. */
6290 unsigned long gmask; /* Mask of saved gp registers. */
6291 unsigned long fmask; /* Mask of saved fp registers. */
6292 unsigned long reg_offset; /* Offset from new sp to store regs. */
6293 int initialized; /* Nonzero if frame size already calculated. */
6294 };
6295
6296 /* Current frame information calculated by sparc_flat_compute_frame_size. */
6297 struct sparc_frame_info current_frame_info;
6298
6299 /* Zero structure to initialize current_frame_info. */
6300 struct sparc_frame_info zero_frame_info;
6301
6302 /* Tell prologue and epilogue if register REGNO should be saved / restored. */
6303
6304 #define RETURN_ADDR_REGNUM 15
6305 #define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
6306 #define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
6307
6308 #define MUST_SAVE_REGISTER(regno) \
6309 ((regs_ever_live[regno] && !call_used_regs[regno]) \
6310 || (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) \
6311 || (regno == RETURN_ADDR_REGNUM && regs_ever_live[RETURN_ADDR_REGNUM]))
6312
6313 /* Return the bytes needed to compute the frame pointer from the current
6314 stack pointer. */
6315
6316 unsigned long
6317 sparc_flat_compute_frame_size (size)
6318 int size; /* # of var. bytes allocated. */
6319 {
6320 int regno;
6321 unsigned long total_size; /* # bytes that the entire frame takes up. */
6322 unsigned long var_size; /* # bytes that variables take up. */
6323 unsigned long args_size; /* # bytes that outgoing arguments take up. */
6324 unsigned long extra_size; /* # extra bytes. */
6325 unsigned int gp_reg_size; /* # bytes needed to store gp regs. */
6326 unsigned int fp_reg_size; /* # bytes needed to store fp regs. */
6327 unsigned long gmask; /* Mask of saved gp registers. */
6328 unsigned long fmask; /* Mask of saved fp registers. */
6329 unsigned long reg_offset; /* Offset to register save area. */
6330 int need_aligned_p; /* 1 if need the save area 8 byte aligned. */
6331
6332 /* This is the size of the 16 word reg save area, 1 word struct addr
6333 area, and 4 word fp/alu register copy area. */
6334 extra_size = -STARTING_FRAME_OFFSET + FIRST_PARM_OFFSET(0);
6335 var_size = size;
6336 gp_reg_size = 0;
6337 fp_reg_size = 0;
6338 gmask = 0;
6339 fmask = 0;
6340 reg_offset = 0;
6341 need_aligned_p = 0;
6342
6343 args_size = 0;
6344 if (!leaf_function_p ())
6345 {
6346 /* Also include the size needed for the 6 parameter registers. */
6347 args_size = current_function_outgoing_args_size + 24;
6348 }
6349 total_size = var_size + args_size;
6350
6351 /* Calculate space needed for gp registers. */
6352 for (regno = 1; regno <= 31; regno++)
6353 {
6354 if (MUST_SAVE_REGISTER (regno))
6355 {
6356 /* If we need to save two regs in a row, ensure there's room to bump
6357 up the address to align it to a doubleword boundary. */
6358 if ((regno & 0x1) == 0 && MUST_SAVE_REGISTER (regno+1))
6359 {
6360 if (gp_reg_size % 8 != 0)
6361 gp_reg_size += 4;
6362 gp_reg_size += 2 * UNITS_PER_WORD;
6363 gmask |= 3 << regno;
6364 regno++;
6365 need_aligned_p = 1;
6366 }
6367 else
6368 {
6369 gp_reg_size += UNITS_PER_WORD;
6370 gmask |= 1 << regno;
6371 }
6372 }
6373 }
6374
6375 /* Calculate space needed for fp registers. */
6376 for (regno = 32; regno <= 63; regno++)
6377 {
6378 if (regs_ever_live[regno] && !call_used_regs[regno])
6379 {
6380 fp_reg_size += UNITS_PER_WORD;
6381 fmask |= 1 << (regno - 32);
6382 }
6383 }
6384
6385 if (gmask || fmask)
6386 {
6387 int n;
6388 reg_offset = FIRST_PARM_OFFSET(0) + args_size;
6389 /* Ensure save area is 8 byte aligned if we need it. */
6390 n = reg_offset % 8;
6391 if (need_aligned_p && n != 0)
6392 {
6393 total_size += 8 - n;
6394 reg_offset += 8 - n;
6395 }
6396 total_size += gp_reg_size + fp_reg_size;
6397 }
6398
6399 /* If we must allocate a stack frame at all, we must also allocate
6400 room for register window spillage, so as to be binary compatible
6401 with libraries and operating systems that do not use -mflat. */
6402 if (total_size > 0)
6403 total_size += extra_size;
6404 else
6405 extra_size = 0;
6406
6407 total_size = SPARC_STACK_ALIGN (total_size);
6408
6409 /* Save other computed information. */
6410 current_frame_info.total_size = total_size;
6411 current_frame_info.var_size = var_size;
6412 current_frame_info.args_size = args_size;
6413 current_frame_info.extra_size = extra_size;
6414 current_frame_info.gp_reg_size = gp_reg_size;
6415 current_frame_info.fp_reg_size = fp_reg_size;
6416 current_frame_info.gmask = gmask;
6417 current_frame_info.fmask = fmask;
6418 current_frame_info.reg_offset = reg_offset;
6419 current_frame_info.initialized = reload_completed;
6420
6421 /* Ok, we're done. */
6422 return total_size;
6423 }
6424 \f
6425 /* Save/restore registers in GMASK and FMASK at register BASE_REG plus offset
6426 OFFSET.
6427
6428 BASE_REG must be 8 byte aligned. This allows us to test OFFSET for
6429 appropriate alignment and use DOUBLEWORD_OP when we can. We assume
6430 [BASE_REG+OFFSET] will always be a valid address.
6431
6432 WORD_OP is either "st" for save, "ld" for restore.
6433 DOUBLEWORD_OP is either "std" for save, "ldd" for restore. */
6434
6435 void
6436 sparc_flat_save_restore (file, base_reg, offset, gmask, fmask, word_op,
6437 doubleword_op, base_offset)
6438 FILE *file;
6439 const char *base_reg;
6440 unsigned int offset;
6441 unsigned long gmask;
6442 unsigned long fmask;
6443 const char *word_op;
6444 const char *doubleword_op;
6445 unsigned long base_offset;
6446 {
6447 int regno;
6448
6449 if (gmask == 0 && fmask == 0)
6450 return;
6451
6452 /* Save registers starting from high to low. We've already saved the
6453 previous frame pointer and previous return address for the debugger's
6454 sake. The debugger allows us to not need a nop in the epilog if at least
6455 one register is reloaded in addition to return address. */
6456
6457 if (gmask)
6458 {
6459 for (regno = 1; regno <= 31; regno++)
6460 {
6461 if ((gmask & (1L << regno)) != 0)
6462 {
6463 if ((regno & 0x1) == 0 && ((gmask & (1L << (regno+1))) != 0))
6464 {
6465 /* We can save two registers in a row. If we're not at a
6466 double word boundary, move to one.
6467 sparc_flat_compute_frame_size ensures there's room to do
6468 this. */
6469 if (offset % 8 != 0)
6470 offset += UNITS_PER_WORD;
6471
6472 if (word_op[0] == 's')
6473 {
6474 fprintf (file, "\t%s\t%s, [%s+%d]\n",
6475 doubleword_op, reg_names[regno],
6476 base_reg, offset);
6477 if (dwarf2out_do_frame ())
6478 {
6479 char *l = dwarf2out_cfi_label ();
6480 dwarf2out_reg_save (l, regno, offset + base_offset);
6481 dwarf2out_reg_save
6482 (l, regno+1, offset+base_offset + UNITS_PER_WORD);
6483 }
6484 }
6485 else
6486 fprintf (file, "\t%s\t[%s+%d], %s\n",
6487 doubleword_op, base_reg, offset,
6488 reg_names[regno]);
6489
6490 offset += 2 * UNITS_PER_WORD;
6491 regno++;
6492 }
6493 else
6494 {
6495 if (word_op[0] == 's')
6496 {
6497 fprintf (file, "\t%s\t%s, [%s+%d]\n",
6498 word_op, reg_names[regno],
6499 base_reg, offset);
6500 if (dwarf2out_do_frame ())
6501 dwarf2out_reg_save ("", regno, offset + base_offset);
6502 }
6503 else
6504 fprintf (file, "\t%s\t[%s+%d], %s\n",
6505 word_op, base_reg, offset, reg_names[regno]);
6506
6507 offset += UNITS_PER_WORD;
6508 }
6509 }
6510 }
6511 }
6512
6513 if (fmask)
6514 {
6515 for (regno = 32; regno <= 63; regno++)
6516 {
6517 if ((fmask & (1L << (regno - 32))) != 0)
6518 {
6519 if (word_op[0] == 's')
6520 {
6521 fprintf (file, "\t%s\t%s, [%s+%d]\n",
6522 word_op, reg_names[regno],
6523 base_reg, offset);
6524 if (dwarf2out_do_frame ())
6525 dwarf2out_reg_save ("", regno, offset + base_offset);
6526 }
6527 else
6528 fprintf (file, "\t%s\t[%s+%d], %s\n",
6529 word_op, base_reg, offset, reg_names[regno]);
6530
6531 offset += UNITS_PER_WORD;
6532 }
6533 }
6534 }
6535 }
6536 \f
6537 /* Set up the stack and frame (if desired) for the function. */
6538
6539 void
6540 sparc_flat_output_function_prologue (file, size)
6541 FILE *file;
6542 int size;
6543 {
6544 const char *sp_str = reg_names[STACK_POINTER_REGNUM];
6545 unsigned long gmask = current_frame_info.gmask;
6546
6547 sparc_output_scratch_registers (file);
6548
6549 /* This is only for the human reader. */
6550 fprintf (file, "\t%s#PROLOGUE# 0\n", ASM_COMMENT_START);
6551 fprintf (file, "\t%s# vars= %ld, regs= %d/%d, args= %d, extra= %ld\n",
6552 ASM_COMMENT_START,
6553 current_frame_info.var_size,
6554 current_frame_info.gp_reg_size / 4,
6555 current_frame_info.fp_reg_size / 4,
6556 current_function_outgoing_args_size,
6557 current_frame_info.extra_size);
6558
6559 size = SPARC_STACK_ALIGN (size);
6560 size = (! current_frame_info.initialized
6561 ? sparc_flat_compute_frame_size (size)
6562 : current_frame_info.total_size);
6563
6564 /* These cases shouldn't happen. Catch them now. */
6565 if (size == 0 && (gmask || current_frame_info.fmask))
6566 abort ();
6567
6568 /* Allocate our stack frame by decrementing %sp.
6569 At present, the only algorithm gdb can use to determine if this is a
6570 flat frame is if we always set %i7 if we set %sp. This can be optimized
6571 in the future by putting in some sort of debugging information that says
6572 this is a `flat' function. However, there is still the case of debugging
6573 code without such debugging information (including cases where most fns
6574 have such info, but there is one that doesn't). So, always do this now
6575 so we don't get a lot of code out there that gdb can't handle.
6576 If the frame pointer isn't needn't then that's ok - gdb won't be able to
6577 distinguish us from a non-flat function but there won't (and shouldn't)
6578 be any differences anyway. The return pc is saved (if necessary) right
6579 after %i7 so gdb won't have to look too far to find it. */
6580 if (size > 0)
6581 {
6582 unsigned int reg_offset = current_frame_info.reg_offset;
6583 const char *fp_str = reg_names[FRAME_POINTER_REGNUM];
6584 const char *t1_str = "%g1";
6585
6586 /* Things get a little tricky if local variables take up more than ~4096
6587 bytes and outgoing arguments take up more than ~4096 bytes. When that
6588 happens, the register save area can't be accessed from either end of
6589 the frame. Handle this by decrementing %sp to the start of the gp
6590 register save area, save the regs, update %i7, and then set %sp to its
6591 final value. Given that we only have one scratch register to play
6592 with it is the cheapest solution, and it helps gdb out as it won't
6593 slow down recognition of flat functions.
6594 Don't change the order of insns emitted here without checking with
6595 the gdb folk first. */
6596
6597 /* Is the entire register save area offsettable from %sp? */
6598 if (reg_offset < 4096 - 64 * (unsigned) UNITS_PER_WORD)
6599 {
6600 if (size <= 4096)
6601 {
6602 fprintf (file, "\tadd\t%s, %d, %s\n",
6603 sp_str, -size, sp_str);
6604 if (gmask & FRAME_POINTER_MASK)
6605 {
6606 fprintf (file, "\tst\t%s, [%s+%d]\n",
6607 fp_str, sp_str, reg_offset);
6608 fprintf (file, "\tsub\t%s, %d, %s\t%s# set up frame pointer\n",
6609 sp_str, -size, fp_str, ASM_COMMENT_START);
6610 reg_offset += 4;
6611 }
6612 }
6613 else
6614 {
6615 fprintf (file, "\tset\t%d, %s\n\tsub\t%s, %s, %s\n",
6616 size, t1_str, sp_str, t1_str, sp_str);
6617 if (gmask & FRAME_POINTER_MASK)
6618 {
6619 fprintf (file, "\tst\t%s, [%s+%d]\n",
6620 fp_str, sp_str, reg_offset);
6621 fprintf (file, "\tadd\t%s, %s, %s\t%s# set up frame pointer\n",
6622 sp_str, t1_str, fp_str, ASM_COMMENT_START);
6623 reg_offset += 4;
6624 }
6625 }
6626 if (dwarf2out_do_frame ())
6627 {
6628 char *l = dwarf2out_cfi_label ();
6629 if (gmask & FRAME_POINTER_MASK)
6630 {
6631 dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
6632 reg_offset - 4 - size);
6633 dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
6634 }
6635 else
6636 dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size);
6637 }
6638 if (gmask & RETURN_ADDR_MASK)
6639 {
6640 fprintf (file, "\tst\t%s, [%s+%d]\n",
6641 reg_names[RETURN_ADDR_REGNUM], sp_str, reg_offset);
6642 if (dwarf2out_do_frame ())
6643 dwarf2out_return_save ("", reg_offset - size);
6644 reg_offset += 4;
6645 }
6646 sparc_flat_save_restore (file, sp_str, reg_offset,
6647 gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
6648 current_frame_info.fmask,
6649 "st", "std", -size);
6650 }
6651 else
6652 {
6653 /* Subtract %sp in two steps, but make sure there is always a
6654 64 byte register save area, and %sp is properly aligned. */
6655 /* Amount to decrement %sp by, the first time. */
6656 unsigned int size1 = ((size - reg_offset + 64) + 15) & -16;
6657 /* Offset to register save area from %sp. */
6658 unsigned int offset = size1 - (size - reg_offset);
6659
6660 if (size1 <= 4096)
6661 {
6662 fprintf (file, "\tadd\t%s, %d, %s\n",
6663 sp_str, -size1, sp_str);
6664 if (gmask & FRAME_POINTER_MASK)
6665 {
6666 fprintf (file, "\tst\t%s, [%s+%d]\n\tsub\t%s, %d, %s\t%s# set up frame pointer\n",
6667 fp_str, sp_str, offset, sp_str, -size1, fp_str,
6668 ASM_COMMENT_START);
6669 offset += 4;
6670 }
6671 }
6672 else
6673 {
6674 fprintf (file, "\tset\t%d, %s\n\tsub\t%s, %s, %s\n",
6675 size1, t1_str, sp_str, t1_str, sp_str);
6676 if (gmask & FRAME_POINTER_MASK)
6677 {
6678 fprintf (file, "\tst\t%s, [%s+%d]\n\tadd\t%s, %s, %s\t%s# set up frame pointer\n",
6679 fp_str, sp_str, offset, sp_str, t1_str, fp_str,
6680 ASM_COMMENT_START);
6681 offset += 4;
6682 }
6683 }
6684 if (dwarf2out_do_frame ())
6685 {
6686 char *l = dwarf2out_cfi_label ();
6687 if (gmask & FRAME_POINTER_MASK)
6688 {
6689 dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
6690 offset - 4 - size1);
6691 dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
6692 }
6693 else
6694 dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size1);
6695 }
6696 if (gmask & RETURN_ADDR_MASK)
6697 {
6698 fprintf (file, "\tst\t%s, [%s+%d]\n",
6699 reg_names[RETURN_ADDR_REGNUM], sp_str, offset);
6700 if (dwarf2out_do_frame ())
6701 /* offset - size1 == reg_offset - size
6702 if reg_offset were updated above like offset. */
6703 dwarf2out_return_save ("", offset - size1);
6704 offset += 4;
6705 }
6706 sparc_flat_save_restore (file, sp_str, offset,
6707 gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
6708 current_frame_info.fmask,
6709 "st", "std", -size1);
6710 fprintf (file, "\tset\t%d, %s\n\tsub\t%s, %s, %s\n",
6711 size - size1, t1_str, sp_str, t1_str, sp_str);
6712 if (dwarf2out_do_frame ())
6713 if (! (gmask & FRAME_POINTER_MASK))
6714 dwarf2out_def_cfa ("", STACK_POINTER_REGNUM, size);
6715 }
6716 }
6717
6718 fprintf (file, "\t%s#PROLOGUE# 1\n", ASM_COMMENT_START);
6719 }
6720 \f
6721 /* Do any necessary cleanup after a function to restore stack, frame,
6722 and regs. */
6723
6724 void
6725 sparc_flat_output_function_epilogue (file, size)
6726 FILE *file;
6727 int size;
6728 {
6729 rtx epilogue_delay = current_function_epilogue_delay_list;
6730 int noepilogue = FALSE;
6731
6732 /* This is only for the human reader. */
6733 fprintf (file, "\t%s#EPILOGUE#\n", ASM_COMMENT_START);
6734
6735 /* The epilogue does not depend on any registers, but the stack
6736 registers, so we assume that if we have 1 pending nop, it can be
6737 ignored, and 2 it must be filled (2 nops occur for integer
6738 multiply and divide). */
6739
6740 size = SPARC_STACK_ALIGN (size);
6741 size = (!current_frame_info.initialized
6742 ? sparc_flat_compute_frame_size (size)
6743 : current_frame_info.total_size);
6744
6745 if (size == 0 && epilogue_delay == 0)
6746 {
6747 rtx insn = get_last_insn ();
6748
6749 /* If the last insn was a BARRIER, we don't have to write any code
6750 because a jump (aka return) was put there. */
6751 if (GET_CODE (insn) == NOTE)
6752 insn = prev_nonnote_insn (insn);
6753 if (insn && GET_CODE (insn) == BARRIER)
6754 noepilogue = TRUE;
6755 }
6756
6757 if (!noepilogue)
6758 {
6759 unsigned int reg_offset = current_frame_info.reg_offset;
6760 unsigned int size1;
6761 const char *sp_str = reg_names[STACK_POINTER_REGNUM];
6762 const char *fp_str = reg_names[FRAME_POINTER_REGNUM];
6763 const char *t1_str = "%g1";
6764
6765 /* In the reload sequence, we don't need to fill the load delay
6766 slots for most of the loads, also see if we can fill the final
6767 delay slot if not otherwise filled by the reload sequence. */
6768
6769 if (size > 4095)
6770 fprintf (file, "\tset\t%d, %s\n", size, t1_str);
6771
6772 if (frame_pointer_needed)
6773 {
6774 if (size > 4095)
6775 fprintf (file,"\tsub\t%s, %s, %s\t\t%s# sp not trusted here\n",
6776 fp_str, t1_str, sp_str, ASM_COMMENT_START);
6777 else
6778 fprintf (file,"\tsub\t%s, %d, %s\t\t%s# sp not trusted here\n",
6779 fp_str, size, sp_str, ASM_COMMENT_START);
6780 }
6781
6782 /* Is the entire register save area offsettable from %sp? */
6783 if (reg_offset < 4096 - 64 * (unsigned) UNITS_PER_WORD)
6784 {
6785 size1 = 0;
6786 }
6787 else
6788 {
6789 /* Restore %sp in two steps, but make sure there is always a
6790 64 byte register save area, and %sp is properly aligned. */
6791 /* Amount to increment %sp by, the first time. */
6792 size1 = ((reg_offset - 64 - 16) + 15) & -16;
6793 /* Offset to register save area from %sp. */
6794 reg_offset = size1 - reg_offset;
6795
6796 fprintf (file, "\tset\t%d, %s\n\tadd\t%s, %s, %s\n",
6797 size1, t1_str, sp_str, t1_str, sp_str);
6798 }
6799
6800 /* We must restore the frame pointer and return address reg first
6801 because they are treated specially by the prologue output code. */
6802 if (current_frame_info.gmask & FRAME_POINTER_MASK)
6803 {
6804 fprintf (file, "\tld\t[%s+%d], %s\n",
6805 sp_str, reg_offset, fp_str);
6806 reg_offset += 4;
6807 }
6808 if (current_frame_info.gmask & RETURN_ADDR_MASK)
6809 {
6810 fprintf (file, "\tld\t[%s+%d], %s\n",
6811 sp_str, reg_offset, reg_names[RETURN_ADDR_REGNUM]);
6812 reg_offset += 4;
6813 }
6814
6815 /* Restore any remaining saved registers. */
6816 sparc_flat_save_restore (file, sp_str, reg_offset,
6817 current_frame_info.gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
6818 current_frame_info.fmask,
6819 "ld", "ldd", 0);
6820
6821 /* If we had to increment %sp in two steps, record it so the second
6822 restoration in the epilogue finishes up. */
6823 if (size1 > 0)
6824 {
6825 size -= size1;
6826 if (size > 4095)
6827 fprintf (file, "\tset\t%d, %s\n",
6828 size, t1_str);
6829 }
6830
6831 if (current_function_returns_struct)
6832 fprintf (file, "\tjmp\t%%o7+12\n");
6833 else
6834 fprintf (file, "\tretl\n");
6835
6836 /* If the only register saved is the return address, we need a
6837 nop, unless we have an instruction to put into it. Otherwise
6838 we don't since reloading multiple registers doesn't reference
6839 the register being loaded. */
6840
6841 if (epilogue_delay)
6842 {
6843 if (size)
6844 abort ();
6845 final_scan_insn (XEXP (epilogue_delay, 0), file, 1, -2, 1);
6846 }
6847
6848 else if (size > 4095)
6849 fprintf (file, "\tadd\t%s, %s, %s\n", sp_str, t1_str, sp_str);
6850
6851 else if (size > 0)
6852 fprintf (file, "\tadd\t%s, %d, %s\n", sp_str, size, sp_str);
6853
6854 else
6855 fprintf (file, "\tnop\n");
6856 }
6857
6858 /* Reset state info for each function. */
6859 current_frame_info = zero_frame_info;
6860
6861 sparc_output_deferred_case_vectors ();
6862 }
6863 \f
6864 /* Define the number of delay slots needed for the function epilogue.
6865
6866 On the sparc, we need a slot if either no stack has been allocated,
6867 or the only register saved is the return register. */
6868
6869 int
6870 sparc_flat_epilogue_delay_slots ()
6871 {
6872 if (!current_frame_info.initialized)
6873 (void) sparc_flat_compute_frame_size (get_frame_size ());
6874
6875 if (current_frame_info.total_size == 0)
6876 return 1;
6877
6878 return 0;
6879 }
6880
6881 /* Return true is TRIAL is a valid insn for the epilogue delay slot.
6882 Any single length instruction which doesn't reference the stack or frame
6883 pointer is OK. */
6884
6885 int
6886 sparc_flat_eligible_for_epilogue_delay (trial, slot)
6887 rtx trial;
6888 int slot ATTRIBUTE_UNUSED;
6889 {
6890 rtx pat = PATTERN (trial);
6891
6892 if (get_attr_length (trial) != 1)
6893 return 0;
6894
6895 if (! reg_mentioned_p (stack_pointer_rtx, pat)
6896 && ! reg_mentioned_p (frame_pointer_rtx, pat))
6897 return 1;
6898
6899 return 0;
6900 }
6901 \f
6902 /* Adjust the cost of a scheduling dependency. Return the new cost of
6903 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6904
6905 static int
6906 supersparc_adjust_cost (insn, link, dep_insn, cost)
6907 rtx insn;
6908 rtx link;
6909 rtx dep_insn;
6910 int cost;
6911 {
6912 enum attr_type insn_type;
6913
6914 if (! recog_memoized (insn))
6915 return 0;
6916
6917 insn_type = get_attr_type (insn);
6918
6919 if (REG_NOTE_KIND (link) == 0)
6920 {
6921 /* Data dependency; DEP_INSN writes a register that INSN reads some
6922 cycles later. */
6923
6924 /* if a load, then the dependence must be on the memory address;
6925 add an extra "cycle". Note that the cost could be two cycles
6926 if the reg was written late in an instruction group; we ca not tell
6927 here. */
6928 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
6929 return cost + 3;
6930
6931 /* Get the delay only if the address of the store is the dependence. */
6932 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
6933 {
6934 rtx pat = PATTERN(insn);
6935 rtx dep_pat = PATTERN (dep_insn);
6936
6937 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
6938 return cost; /* This should not happen! */
6939
6940 /* The dependency between the two instructions was on the data that
6941 is being stored. Assume that this implies that the address of the
6942 store is not dependent. */
6943 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
6944 return cost;
6945
6946 return cost + 3; /* An approximation. */
6947 }
6948
6949 /* A shift instruction cannot receive its data from an instruction
6950 in the same cycle; add a one cycle penalty. */
6951 if (insn_type == TYPE_SHIFT)
6952 return cost + 3; /* Split before cascade into shift. */
6953 }
6954 else
6955 {
6956 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
6957 INSN writes some cycles later. */
6958
6959 /* These are only significant for the fpu unit; writing a fp reg before
6960 the fpu has finished with it stalls the processor. */
6961
6962 /* Reusing an integer register causes no problems. */
6963 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
6964 return 0;
6965 }
6966
6967 return cost;
6968 }
6969
6970 static int
6971 hypersparc_adjust_cost (insn, link, dep_insn, cost)
6972 rtx insn;
6973 rtx link;
6974 rtx dep_insn;
6975 int cost;
6976 {
6977 enum attr_type insn_type, dep_type;
6978 rtx pat = PATTERN(insn);
6979 rtx dep_pat = PATTERN (dep_insn);
6980
6981 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
6982 return cost;
6983
6984 insn_type = get_attr_type (insn);
6985 dep_type = get_attr_type (dep_insn);
6986
6987 switch (REG_NOTE_KIND (link))
6988 {
6989 case 0:
6990 /* Data dependency; DEP_INSN writes a register that INSN reads some
6991 cycles later. */
6992
6993 switch (insn_type)
6994 {
6995 case TYPE_STORE:
6996 case TYPE_FPSTORE:
6997 /* Get the delay iff the address of the store is the dependence. */
6998 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
6999 return cost;
7000
7001 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7002 return cost;
7003 return cost + 3;
7004
7005 case TYPE_LOAD:
7006 case TYPE_SLOAD:
7007 case TYPE_FPLOAD:
7008 /* If a load, then the dependence must be on the memory address. If
7009 the addresses aren't equal, then it might be a false dependency */
7010 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7011 {
7012 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7013 || GET_CODE (SET_DEST (dep_pat)) != MEM
7014 || GET_CODE (SET_SRC (pat)) != MEM
7015 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7016 XEXP (SET_SRC (pat), 0)))
7017 return cost + 2;
7018
7019 return cost + 8;
7020 }
7021 break;
7022
7023 case TYPE_BRANCH:
7024 /* Compare to branch latency is 0. There is no benefit from
7025 separating compare and branch. */
7026 if (dep_type == TYPE_COMPARE)
7027 return 0;
7028 /* Floating point compare to branch latency is less than
7029 compare to conditional move. */
7030 if (dep_type == TYPE_FPCMP)
7031 return cost - 1;
7032 break;
7033 default:
7034 break;
7035 }
7036 break;
7037
7038 case REG_DEP_ANTI:
7039 /* Anti-dependencies only penalize the fpu unit. */
7040 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7041 return 0;
7042 break;
7043
7044 default:
7045 break;
7046 }
7047
7048 return cost;
7049 }
7050
7051 static int
7052 ultrasparc_adjust_cost (insn, link, dep_insn, cost)
7053 rtx insn;
7054 rtx link;
7055 rtx dep_insn;
7056 int cost;
7057 {
7058 enum attr_type insn_type, dep_type;
7059 rtx pat = PATTERN(insn);
7060 rtx dep_pat = PATTERN (dep_insn);
7061
7062 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7063 return cost;
7064
7065 insn_type = get_attr_type (insn);
7066 dep_type = get_attr_type (dep_insn);
7067
7068 /* Nothing issues in parallel with integer multiplies, so
7069 mark as zero cost since the scheduler can not do anything
7070 about it. */
7071 if (insn_type == TYPE_IMUL)
7072 return 0;
7073
7074 #define SLOW_FP(dep_type) \
7075 (dep_type == TYPE_FPSQRTS || dep_type == TYPE_FPSQRTD || \
7076 dep_type == TYPE_FPDIVS || dep_type == TYPE_FPDIVD)
7077
7078 switch (REG_NOTE_KIND (link))
7079 {
7080 case 0:
7081 /* Data dependency; DEP_INSN writes a register that INSN reads some
7082 cycles later. */
7083
7084 if (dep_type == TYPE_CMOVE)
7085 {
7086 /* Instructions that read the result of conditional moves cannot
7087 be in the same group or the following group. */
7088 return cost + 1;
7089 }
7090
7091 switch (insn_type)
7092 {
7093 /* UltraSPARC can dual issue a store and an instruction setting
7094 the value stored, except for divide and square root. */
7095 case TYPE_FPSTORE:
7096 if (! SLOW_FP (dep_type))
7097 return 0;
7098 return cost;
7099
7100 case TYPE_STORE:
7101 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7102 return cost;
7103
7104 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7105 /* The dependency between the two instructions is on the data
7106 that is being stored. Assume that the address of the store
7107 is not also dependent. */
7108 return 0;
7109 return cost;
7110
7111 case TYPE_LOAD:
7112 case TYPE_SLOAD:
7113 case TYPE_FPLOAD:
7114 /* A load does not return data until at least 11 cycles after
7115 a store to the same location. 3 cycles are accounted for
7116 in the load latency; add the other 8 here. */
7117 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7118 {
7119 /* If the addresses are not equal this may be a false
7120 dependency because pointer aliasing could not be
7121 determined. Add only 2 cycles in that case. 2 is
7122 an arbitrary compromise between 8, which would cause
7123 the scheduler to generate worse code elsewhere to
7124 compensate for a dependency which might not really
7125 exist, and 0. */
7126 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7127 || GET_CODE (SET_SRC (pat)) != MEM
7128 || GET_CODE (SET_DEST (dep_pat)) != MEM
7129 || ! rtx_equal_p (XEXP (SET_SRC (pat), 0),
7130 XEXP (SET_DEST (dep_pat), 0)))
7131 return cost + 2;
7132
7133 return cost + 8;
7134 }
7135 return cost;
7136
7137 case TYPE_BRANCH:
7138 /* Compare to branch latency is 0. There is no benefit from
7139 separating compare and branch. */
7140 if (dep_type == TYPE_COMPARE)
7141 return 0;
7142 /* Floating point compare to branch latency is less than
7143 compare to conditional move. */
7144 if (dep_type == TYPE_FPCMP)
7145 return cost - 1;
7146 return cost;
7147
7148 case TYPE_FPCMOVE:
7149 /* FMOVR class instructions can not issue in the same cycle
7150 or the cycle after an instruction which writes any
7151 integer register. Model this as cost 2 for dependent
7152 instructions. */
7153 if ((dep_type == TYPE_IALU || dep_type == TYPE_UNARY
7154 || dep_type == TYPE_BINARY)
7155 && cost < 2)
7156 return 2;
7157 /* Otherwise check as for integer conditional moves. */
7158
7159 case TYPE_CMOVE:
7160 /* Conditional moves involving integer registers wait until
7161 3 cycles after loads return data. The interlock applies
7162 to all loads, not just dependent loads, but that is hard
7163 to model. */
7164 if (dep_type == TYPE_LOAD || dep_type == TYPE_SLOAD)
7165 return cost + 3;
7166 return cost;
7167
7168 default:
7169 break;
7170 }
7171 break;
7172
7173 case REG_DEP_ANTI:
7174 /* Divide and square root lock destination registers for full latency. */
7175 if (! SLOW_FP (dep_type))
7176 return 0;
7177 break;
7178
7179 case REG_DEP_OUTPUT:
7180 /* IEU and FPU instruction that have the same destination
7181 register cannot be grouped together. */
7182 return cost + 1;
7183
7184 default:
7185 break;
7186 }
7187
7188 /* Other costs not accounted for:
7189 - Single precision floating point loads lock the other half of
7190 the even/odd register pair.
7191 - Several hazards associated with ldd/std are ignored because these
7192 instructions are rarely generated for V9.
7193 - The floating point pipeline can not have both a single and double
7194 precision operation active at the same time. Format conversions
7195 and graphics instructions are given honorary double precision status.
7196 - call and jmpl are always the first instruction in a group. */
7197
7198 return cost;
7199
7200 #undef SLOW_FP
7201 }
7202
7203 int
7204 sparc_adjust_cost(insn, link, dep, cost)
7205 rtx insn;
7206 rtx link;
7207 rtx dep;
7208 int cost;
7209 {
7210 switch (sparc_cpu)
7211 {
7212 case PROCESSOR_SUPERSPARC:
7213 cost = supersparc_adjust_cost (insn, link, dep, cost);
7214 break;
7215 case PROCESSOR_HYPERSPARC:
7216 case PROCESSOR_SPARCLITE86X:
7217 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7218 break;
7219 case PROCESSOR_ULTRASPARC:
7220 cost = ultrasparc_adjust_cost (insn, link, dep, cost);
7221 break;
7222 default:
7223 break;
7224 }
7225 return cost;
7226 }
7227
7228 /* This describes the state of the UltraSPARC pipeline during
7229 instruction scheduling. */
7230
7231 #define TMASK(__x) ((unsigned)1 << ((int)(__x)))
7232 #define UMASK(__x) ((unsigned)1 << ((int)(__x)))
7233
7234 enum ultra_code { NONE=0, /* no insn at all */
7235 IEU0, /* shifts and conditional moves */
7236 IEU1, /* condition code setting insns, calls+jumps */
7237 IEUN, /* all other single cycle ieu insns */
7238 LSU, /* loads and stores */
7239 CTI, /* branches */
7240 FPM, /* FPU pipeline 1, multiplies and divides */
7241 FPA, /* FPU pipeline 2, all other operations */
7242 SINGLE, /* single issue instructions */
7243 NUM_ULTRA_CODES };
7244
7245 static enum ultra_code ultra_code_from_mask PARAMS ((int));
7246 static void ultra_schedule_insn PARAMS ((rtx *, rtx *, int, enum ultra_code));
7247
7248 static const char *ultra_code_names[NUM_ULTRA_CODES] = {
7249 "NONE", "IEU0", "IEU1", "IEUN", "LSU", "CTI",
7250 "FPM", "FPA", "SINGLE" };
7251
7252 struct ultrasparc_pipeline_state {
7253 /* The insns in this group. */
7254 rtx group[4];
7255
7256 /* The code for each insn. */
7257 enum ultra_code codes[4];
7258
7259 /* Which insns in this group have been committed by the
7260 scheduler. This is how we determine how many more
7261 can issue this cycle. */
7262 char commit[4];
7263
7264 /* How many insns in this group. */
7265 char group_size;
7266
7267 /* Mask of free slots still in this group. */
7268 char free_slot_mask;
7269
7270 /* The slotter uses the following to determine what other
7271 insn types can still make their way into this group. */
7272 char contents [NUM_ULTRA_CODES];
7273 char num_ieu_insns;
7274 };
7275
7276 #define ULTRA_NUM_HIST 8
7277 static struct ultrasparc_pipeline_state ultra_pipe_hist[ULTRA_NUM_HIST];
7278 static int ultra_cur_hist;
7279 static int ultra_cycles_elapsed;
7280
7281 #define ultra_pipe (ultra_pipe_hist[ultra_cur_hist])
7282
7283 /* Given TYPE_MASK compute the ultra_code it has. */
7284 static enum ultra_code
7285 ultra_code_from_mask (type_mask)
7286 int type_mask;
7287 {
7288 if (type_mask & (TMASK (TYPE_SHIFT) | TMASK (TYPE_CMOVE)))
7289 return IEU0;
7290 else if (type_mask & (TMASK (TYPE_COMPARE) |
7291 TMASK (TYPE_CALL) |
7292 TMASK (TYPE_SIBCALL) |
7293 TMASK (TYPE_UNCOND_BRANCH)))
7294 return IEU1;
7295 else if (type_mask & (TMASK (TYPE_IALU) | TMASK (TYPE_BINARY) |
7296 TMASK (TYPE_MOVE) | TMASK (TYPE_UNARY)))
7297 return IEUN;
7298 else if (type_mask & (TMASK (TYPE_LOAD) | TMASK (TYPE_SLOAD) |
7299 TMASK (TYPE_STORE) | TMASK (TYPE_FPLOAD) |
7300 TMASK (TYPE_FPSTORE)))
7301 return LSU;
7302 else if (type_mask & (TMASK (TYPE_FPMUL) | TMASK (TYPE_FPDIVS) |
7303 TMASK (TYPE_FPDIVD) | TMASK (TYPE_FPSQRTS) |
7304 TMASK (TYPE_FPSQRTD)))
7305 return FPM;
7306 else if (type_mask & (TMASK (TYPE_FPMOVE) | TMASK (TYPE_FPCMOVE) |
7307 TMASK (TYPE_FP) | TMASK (TYPE_FPCMP)))
7308 return FPA;
7309 else if (type_mask & TMASK (TYPE_BRANCH))
7310 return CTI;
7311
7312 return SINGLE;
7313 }
7314
7315 /* Check INSN (a conditional move) and make sure that it's
7316 results are available at this cycle. Return 1 if the
7317 results are in fact ready. */
7318 static int
7319 ultra_cmove_results_ready_p (insn)
7320 rtx insn;
7321 {
7322 struct ultrasparc_pipeline_state *up;
7323 int entry, slot;
7324
7325 /* If this got dispatched in the previous
7326 group, the results are not ready. */
7327 entry = (ultra_cur_hist - 1) & (ULTRA_NUM_HIST - 1);
7328 up = &ultra_pipe_hist[entry];
7329 slot = 4;
7330 while (--slot >= 0)
7331 if (up->group[slot] == insn)
7332 return 0;
7333
7334 return 1;
7335 }
7336
7337 /* Walk backwards in pipeline history looking for FPU
7338 operations which use a mode different than FPMODE and
7339 will create a stall if an insn using FPMODE were to be
7340 dispatched this cycle. */
7341 static int
7342 ultra_fpmode_conflict_exists (fpmode)
7343 enum machine_mode fpmode;
7344 {
7345 int hist_ent;
7346 int hist_lim;
7347
7348 hist_ent = (ultra_cur_hist - 1) & (ULTRA_NUM_HIST - 1);
7349 if (ultra_cycles_elapsed < 4)
7350 hist_lim = ultra_cycles_elapsed;
7351 else
7352 hist_lim = 4;
7353 while (hist_lim > 0)
7354 {
7355 struct ultrasparc_pipeline_state *up = &ultra_pipe_hist[hist_ent];
7356 int slot = 4;
7357
7358 while (--slot >= 0)
7359 {
7360 rtx insn = up->group[slot];
7361 enum machine_mode this_mode;
7362 rtx pat;
7363
7364 if (! insn
7365 || GET_CODE (insn) != INSN
7366 || (pat = PATTERN (insn)) == 0
7367 || GET_CODE (pat) != SET)
7368 continue;
7369
7370 this_mode = GET_MODE (SET_DEST (pat));
7371 if ((this_mode != SFmode
7372 && this_mode != DFmode)
7373 || this_mode == fpmode)
7374 continue;
7375
7376 /* If it is not FMOV, FABS, FNEG, FDIV, or FSQRT then
7377 we will get a stall. Loads and stores are independant
7378 of these rules. */
7379 if (GET_CODE (SET_SRC (pat)) != ABS
7380 && GET_CODE (SET_SRC (pat)) != NEG
7381 && ((TMASK (get_attr_type (insn)) &
7382 (TMASK (TYPE_FPDIVS) | TMASK (TYPE_FPDIVD) |
7383 TMASK (TYPE_FPMOVE) | TMASK (TYPE_FPSQRTS) |
7384 TMASK (TYPE_FPSQRTD) |
7385 TMASK (TYPE_LOAD) | TMASK (TYPE_STORE))) == 0))
7386 return 1;
7387 }
7388 hist_lim--;
7389 hist_ent = (hist_ent - 1) & (ULTRA_NUM_HIST - 1);
7390 }
7391
7392 /* No conflicts, safe to dispatch. */
7393 return 0;
7394 }
7395
7396 /* Find an instruction in LIST which has one of the
7397 type attributes enumerated in TYPE_MASK. START
7398 says where to begin the search.
7399
7400 NOTE: This scheme depends upon the fact that we
7401 have less than 32 distinct type attributes. */
7402
7403 static int ultra_types_avail;
7404
7405 static rtx *
7406 ultra_find_type (type_mask, list, start)
7407 int type_mask;
7408 rtx *list;
7409 int start;
7410 {
7411 int i;
7412
7413 /* Short circuit if no such insn exists in the ready
7414 at the moment. */
7415 if ((type_mask & ultra_types_avail) == 0)
7416 return 0;
7417
7418 for (i = start; i >= 0; i--)
7419 {
7420 rtx insn = list[i];
7421
7422 if (recog_memoized (insn) >= 0
7423 && (TMASK(get_attr_type (insn)) & type_mask))
7424 {
7425 enum machine_mode fpmode = SFmode;
7426 rtx pat = 0;
7427 int slot;
7428 int check_depend = 0;
7429 int check_fpmode_conflict = 0;
7430
7431 if (GET_CODE (insn) == INSN
7432 && (pat = PATTERN(insn)) != 0
7433 && GET_CODE (pat) == SET
7434 && !(type_mask & (TMASK (TYPE_STORE) |
7435 TMASK (TYPE_FPSTORE))))
7436 {
7437 check_depend = 1;
7438 if (GET_MODE (SET_DEST (pat)) == SFmode
7439 || GET_MODE (SET_DEST (pat)) == DFmode)
7440 {
7441 fpmode = GET_MODE (SET_DEST (pat));
7442 check_fpmode_conflict = 1;
7443 }
7444 }
7445
7446 slot = 4;
7447 while(--slot >= 0)
7448 {
7449 rtx slot_insn = ultra_pipe.group[slot];
7450 rtx slot_pat;
7451
7452 /* Already issued, bad dependency, or FPU
7453 mode conflict. */
7454 if (slot_insn != 0
7455 && (slot_pat = PATTERN (slot_insn)) != 0
7456 && ((insn == slot_insn)
7457 || (check_depend == 1
7458 && GET_CODE (slot_insn) == INSN
7459 && GET_CODE (slot_pat) == SET
7460 && ((GET_CODE (SET_DEST (slot_pat)) == REG
7461 && GET_CODE (SET_SRC (pat)) == REG
7462 && REGNO (SET_DEST (slot_pat)) ==
7463 REGNO (SET_SRC (pat)))
7464 || (GET_CODE (SET_DEST (slot_pat)) == SUBREG
7465 && GET_CODE (SET_SRC (pat)) == SUBREG
7466 && REGNO (SUBREG_REG (SET_DEST (slot_pat))) ==
7467 REGNO (SUBREG_REG (SET_SRC (pat)))
7468 && SUBREG_WORD (SET_DEST (slot_pat)) ==
7469 SUBREG_WORD (SET_SRC (pat)))))
7470 || (check_fpmode_conflict == 1
7471 && GET_CODE (slot_insn) == INSN
7472 && GET_CODE (slot_pat) == SET
7473 && (GET_MODE (SET_DEST (slot_pat)) == SFmode
7474 || GET_MODE (SET_DEST (slot_pat)) == DFmode)
7475 && GET_MODE (SET_DEST (slot_pat)) != fpmode)))
7476 goto next;
7477 }
7478
7479 /* Check for peculiar result availability and dispatch
7480 interference situations. */
7481 if (pat != 0
7482 && ultra_cycles_elapsed > 0)
7483 {
7484 rtx link;
7485
7486 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
7487 {
7488 rtx link_insn = XEXP (link, 0);
7489 if (GET_CODE (link_insn) == INSN
7490 && recog_memoized (link_insn) >= 0
7491 && (TMASK (get_attr_type (link_insn)) &
7492 (TMASK (TYPE_CMOVE) | TMASK (TYPE_FPCMOVE)))
7493 && ! ultra_cmove_results_ready_p (link_insn))
7494 goto next;
7495 }
7496
7497 if (check_fpmode_conflict
7498 && ultra_fpmode_conflict_exists (fpmode))
7499 goto next;
7500 }
7501
7502 return &list[i];
7503 }
7504 next:
7505 ;
7506 }
7507 return 0;
7508 }
7509
7510 static void
7511 ultra_build_types_avail (ready, n_ready)
7512 rtx *ready;
7513 int n_ready;
7514 {
7515 int i = n_ready - 1;
7516
7517 ultra_types_avail = 0;
7518 while(i >= 0)
7519 {
7520 rtx insn = ready[i];
7521
7522 if (recog_memoized (insn) >= 0)
7523 ultra_types_avail |= TMASK (get_attr_type (insn));
7524
7525 i -= 1;
7526 }
7527 }
7528
7529 /* Place insn pointed to my IP into the pipeline.
7530 Make element THIS of READY be that insn if it
7531 is not already. TYPE indicates the pipeline class
7532 this insn falls into. */
7533 static void
7534 ultra_schedule_insn (ip, ready, this, type)
7535 rtx *ip;
7536 rtx *ready;
7537 int this;
7538 enum ultra_code type;
7539 {
7540 int pipe_slot;
7541 char mask = ultra_pipe.free_slot_mask;
7542 rtx temp;
7543
7544 /* Obtain free slot. */
7545 for (pipe_slot = 0; pipe_slot < 4; pipe_slot++)
7546 if ((mask & (1 << pipe_slot)) != 0)
7547 break;
7548 if (pipe_slot == 4)
7549 abort ();
7550
7551 /* In it goes, and it hasn't been committed yet. */
7552 ultra_pipe.group[pipe_slot] = *ip;
7553 ultra_pipe.codes[pipe_slot] = type;
7554 ultra_pipe.contents[type] = 1;
7555 if (UMASK (type) &
7556 (UMASK (IEUN) | UMASK (IEU0) | UMASK (IEU1)))
7557 ultra_pipe.num_ieu_insns += 1;
7558
7559 ultra_pipe.free_slot_mask = (mask & ~(1 << pipe_slot));
7560 ultra_pipe.group_size += 1;
7561 ultra_pipe.commit[pipe_slot] = 0;
7562
7563 /* Update ready list. */
7564 temp = *ip;
7565 while (ip != &ready[this])
7566 {
7567 ip[0] = ip[1];
7568 ++ip;
7569 }
7570 *ip = temp;
7571 }
7572
7573 /* Advance to the next pipeline group. */
7574 static void
7575 ultra_flush_pipeline ()
7576 {
7577 ultra_cur_hist = (ultra_cur_hist + 1) & (ULTRA_NUM_HIST - 1);
7578 ultra_cycles_elapsed += 1;
7579 bzero ((char *) &ultra_pipe, sizeof ultra_pipe);
7580 ultra_pipe.free_slot_mask = 0xf;
7581 }
7582
7583 /* Init our data structures for this current block. */
7584 void
7585 ultrasparc_sched_init (dump, sched_verbose)
7586 FILE *dump ATTRIBUTE_UNUSED;
7587 int sched_verbose ATTRIBUTE_UNUSED;
7588 {
7589 bzero ((char *) ultra_pipe_hist, sizeof ultra_pipe_hist);
7590 ultra_cur_hist = 0;
7591 ultra_cycles_elapsed = 0;
7592 ultra_pipe.free_slot_mask = 0xf;
7593 }
7594
7595 /* INSN has been scheduled, update pipeline commit state
7596 and return how many instructions are still to be
7597 scheduled in this group. */
7598 int
7599 ultrasparc_variable_issue (insn)
7600 rtx insn;
7601 {
7602 struct ultrasparc_pipeline_state *up = &ultra_pipe;
7603 int i, left_to_fire;
7604
7605 left_to_fire = 0;
7606 for (i = 0; i < 4; i++)
7607 {
7608 if (up->group[i] == 0)
7609 continue;
7610
7611 if (up->group[i] == insn)
7612 {
7613 up->commit[i] = 1;
7614 }
7615 else if (! up->commit[i])
7616 left_to_fire++;
7617 }
7618
7619 return left_to_fire;
7620 }
7621
7622 /* In actual_hazard_this_instance, we may have yanked some
7623 instructions from the ready list due to conflict cost
7624 adjustments. If so, and such an insn was in our pipeline
7625 group, remove it and update state. */
7626 static void
7627 ultra_rescan_pipeline_state (ready, n_ready)
7628 rtx *ready;
7629 int n_ready;
7630 {
7631 struct ultrasparc_pipeline_state *up = &ultra_pipe;
7632 int i;
7633
7634 for (i = 0; i < 4; i++)
7635 {
7636 rtx insn = up->group[i];
7637 int j;
7638
7639 if (! insn)
7640 continue;
7641
7642 /* If it has been committed, then it was removed from
7643 the ready list because it was actually scheduled,
7644 and that is not the case we are searching for here. */
7645 if (up->commit[i] != 0)
7646 continue;
7647
7648 for (j = n_ready - 1; j >= 0; j--)
7649 if (ready[j] == insn)
7650 break;
7651
7652 /* If we didn't find it, toss it. */
7653 if (j < 0)
7654 {
7655 enum ultra_code ucode = up->codes[i];
7656
7657 up->group[i] = 0;
7658 up->codes[i] = NONE;
7659 up->contents[ucode] = 0;
7660 if (UMASK (ucode) &
7661 (UMASK (IEUN) | UMASK (IEU0) | UMASK (IEU1)))
7662 up->num_ieu_insns -= 1;
7663
7664 up->free_slot_mask |= (1 << i);
7665 up->group_size -= 1;
7666 up->commit[i] = 0;
7667 }
7668 }
7669 }
7670
7671 void
7672 ultrasparc_sched_reorder (dump, sched_verbose, ready, n_ready)
7673 FILE *dump;
7674 int sched_verbose;
7675 rtx *ready;
7676 int n_ready;
7677 {
7678 struct ultrasparc_pipeline_state *up = &ultra_pipe;
7679 int i, this_insn;
7680
7681 if (sched_verbose)
7682 {
7683 int n;
7684
7685 fprintf (dump, "\n;;\tUltraSPARC Looking at [");
7686 for (n = n_ready - 1; n >= 0; n--)
7687 {
7688 rtx insn = ready[n];
7689 enum ultra_code ucode;
7690
7691 if (recog_memoized (insn) < 0)
7692 continue;
7693 ucode = ultra_code_from_mask (TMASK (get_attr_type (insn)));
7694 if (n != 0)
7695 fprintf (dump, "%s(%d) ",
7696 ultra_code_names[ucode],
7697 INSN_UID (insn));
7698 else
7699 fprintf (dump, "%s(%d)",
7700 ultra_code_names[ucode],
7701 INSN_UID (insn));
7702 }
7703 fprintf (dump, "]\n");
7704 }
7705
7706 this_insn = n_ready - 1;
7707
7708 /* Skip over junk we don't understand. */
7709 while ((this_insn >= 0)
7710 && recog_memoized (ready[this_insn]) < 0)
7711 this_insn--;
7712
7713 ultra_build_types_avail (ready, this_insn + 1);
7714
7715 while (this_insn >= 0) {
7716 int old_group_size = up->group_size;
7717
7718 if (up->group_size != 0)
7719 {
7720 int num_committed;
7721
7722 num_committed = (up->commit[0] + up->commit[1] +
7723 up->commit[2] + up->commit[3]);
7724 /* If nothing has been commited from our group, or all of
7725 them have. Clear out the (current cycle's) pipeline
7726 state and start afresh. */
7727 if (num_committed == 0
7728 || num_committed == up->group_size)
7729 {
7730 ultra_flush_pipeline ();
7731 up = &ultra_pipe;
7732 old_group_size = 0;
7733 }
7734 else
7735 {
7736 /* OK, some ready list insns got requeued and thus removed
7737 from the ready list. Account for this fact. */
7738 ultra_rescan_pipeline_state (ready, n_ready);
7739
7740 /* Something "changed", make this look like a newly
7741 formed group so the code at the end of the loop
7742 knows that progress was in fact made. */
7743 if (up->group_size != old_group_size)
7744 old_group_size = 0;
7745 }
7746 }
7747
7748 if (up->group_size == 0)
7749 {
7750 /* If the pipeline is (still) empty and we have any single
7751 group insns, get them out now as this is a good time. */
7752 rtx *ip = ultra_find_type ((TMASK (TYPE_RETURN) | TMASK (TYPE_ADDRESS) |
7753 TMASK (TYPE_IMUL) | TMASK (TYPE_CMOVE) |
7754 TMASK (TYPE_MULTI) | TMASK (TYPE_MISC)),
7755 ready, this_insn);
7756 if (ip)
7757 {
7758 ultra_schedule_insn (ip, ready, this_insn, SINGLE);
7759 break;
7760 }
7761
7762 /* If we are not in the process of emptying out the pipe, try to
7763 obtain an instruction which must be the first in it's group. */
7764 ip = ultra_find_type ((TMASK (TYPE_CALL) |
7765 TMASK (TYPE_SIBCALL) |
7766 TMASK (TYPE_CALL_NO_DELAY_SLOT) |
7767 TMASK (TYPE_UNCOND_BRANCH)),
7768 ready, this_insn);
7769 if (ip)
7770 {
7771 ultra_schedule_insn (ip, ready, this_insn, IEU1);
7772 this_insn--;
7773 }
7774 else if ((ip = ultra_find_type ((TMASK (TYPE_FPDIVS) |
7775 TMASK (TYPE_FPDIVD) |
7776 TMASK (TYPE_FPSQRTS) |
7777 TMASK (TYPE_FPSQRTD)),
7778 ready, this_insn)) != 0)
7779 {
7780 ultra_schedule_insn (ip, ready, this_insn, FPM);
7781 this_insn--;
7782 }
7783 }
7784
7785 /* Try to fill the integer pipeline. First, look for an IEU0 specific
7786 operation. We can't do more IEU operations if the first 3 slots are
7787 all full or we have dispatched two IEU insns already. */
7788 if ((up->free_slot_mask & 0x7) != 0
7789 && up->num_ieu_insns < 2
7790 && up->contents[IEU0] == 0
7791 && up->contents[IEUN] == 0)
7792 {
7793 rtx *ip = ultra_find_type (TMASK(TYPE_SHIFT), ready, this_insn);
7794 if (ip)
7795 {
7796 ultra_schedule_insn (ip, ready, this_insn, IEU0);
7797 this_insn--;
7798 }
7799 }
7800
7801 /* If we can, try to find an IEU1 specific or an unnamed
7802 IEU instruction. */
7803 if ((up->free_slot_mask & 0x7) != 0
7804 && up->num_ieu_insns < 2)
7805 {
7806 rtx *ip = ultra_find_type ((TMASK (TYPE_IALU) | TMASK (TYPE_BINARY) |
7807 TMASK (TYPE_MOVE) | TMASK (TYPE_UNARY) |
7808 (up->contents[IEU1] == 0 ? TMASK (TYPE_COMPARE) : 0)),
7809 ready, this_insn);
7810 if (ip)
7811 {
7812 rtx insn = *ip;
7813
7814 ultra_schedule_insn (ip, ready, this_insn,
7815 (!up->contents[IEU1]
7816 && get_attr_type (insn) == TYPE_COMPARE)
7817 ? IEU1 : IEUN);
7818 this_insn--;
7819 }
7820 }
7821
7822 /* If only one IEU insn has been found, try to find another unnamed
7823 IEU operation or an IEU1 specific one. */
7824 if ((up->free_slot_mask & 0x7) != 0
7825 && up->num_ieu_insns < 2)
7826 {
7827 rtx *ip;
7828 int tmask = (TMASK (TYPE_IALU) | TMASK (TYPE_BINARY) |
7829 TMASK (TYPE_MOVE) | TMASK (TYPE_UNARY));
7830
7831 if (!up->contents[IEU1])
7832 tmask |= TMASK (TYPE_COMPARE);
7833 ip = ultra_find_type (tmask, ready, this_insn);
7834 if (ip)
7835 {
7836 rtx insn = *ip;
7837
7838 ultra_schedule_insn (ip, ready, this_insn,
7839 (!up->contents[IEU1]
7840 && get_attr_type (insn) == TYPE_COMPARE)
7841 ? IEU1 : IEUN);
7842 this_insn--;
7843 }
7844 }
7845
7846 /* Try for a load or store, but such an insn can only be issued
7847 if it is within' one of the first 3 slots. */
7848 if ((up->free_slot_mask & 0x7) != 0
7849 && up->contents[LSU] == 0)
7850 {
7851 rtx *ip = ultra_find_type ((TMASK (TYPE_LOAD) | TMASK (TYPE_SLOAD) |
7852 TMASK (TYPE_STORE) | TMASK (TYPE_FPLOAD) |
7853 TMASK (TYPE_FPSTORE)), ready, this_insn);
7854 if (ip)
7855 {
7856 ultra_schedule_insn (ip, ready, this_insn, LSU);
7857 this_insn--;
7858 }
7859 }
7860
7861 /* Now find FPU operations, first FPM class. But not divisions or
7862 square-roots because those will break the group up. Unlike all
7863 the previous types, these can go in any slot. */
7864 if (up->free_slot_mask != 0
7865 && up->contents[FPM] == 0)
7866 {
7867 rtx *ip = ultra_find_type (TMASK (TYPE_FPMUL), ready, this_insn);
7868 if (ip)
7869 {
7870 ultra_schedule_insn (ip, ready, this_insn, FPM);
7871 this_insn--;
7872 }
7873 }
7874
7875 /* Continue on with FPA class if we have not filled the group already. */
7876 if (up->free_slot_mask != 0
7877 && up->contents[FPA] == 0)
7878 {
7879 rtx *ip = ultra_find_type ((TMASK (TYPE_FPMOVE) | TMASK (TYPE_FPCMOVE) |
7880 TMASK (TYPE_FP) | TMASK (TYPE_FPCMP)),
7881 ready, this_insn);
7882 if (ip)
7883 {
7884 ultra_schedule_insn (ip, ready, this_insn, FPA);
7885 this_insn--;
7886 }
7887 }
7888
7889 /* Finally, maybe stick a branch in here. */
7890 if (up->free_slot_mask != 0
7891 && up->contents[CTI] == 0)
7892 {
7893 rtx *ip = ultra_find_type (TMASK (TYPE_BRANCH), ready, this_insn);
7894
7895 /* Try to slip in a branch only if it is one of the
7896 next 2 in the ready list. */
7897 if (ip && ((&ready[this_insn] - ip) < 2))
7898 {
7899 ultra_schedule_insn (ip, ready, this_insn, CTI);
7900 this_insn--;
7901 }
7902 }
7903
7904 up->group_size = 0;
7905 for (i = 0; i < 4; i++)
7906 if ((up->free_slot_mask & (1 << i)) == 0)
7907 up->group_size++;
7908
7909 /* See if we made any progress... */
7910 if (old_group_size != up->group_size)
7911 break;
7912
7913 /* Clean out the (current cycle's) pipeline state
7914 and try once more. If we placed no instructions
7915 into the pipeline at all, it means a real hard
7916 conflict exists with some earlier issued instruction
7917 so we must advance to the next cycle to clear it up. */
7918 if (up->group_size == 0)
7919 {
7920 ultra_flush_pipeline ();
7921 up = &ultra_pipe;
7922 }
7923 else
7924 {
7925 bzero ((char *) &ultra_pipe, sizeof ultra_pipe);
7926 ultra_pipe.free_slot_mask = 0xf;
7927 }
7928 }
7929
7930 if (sched_verbose)
7931 {
7932 int n, gsize;
7933
7934 fprintf (dump, ";;\tUltraSPARC Launched [");
7935 gsize = up->group_size;
7936 for (n = 0; n < 4; n++)
7937 {
7938 rtx insn = up->group[n];
7939
7940 if (! insn)
7941 continue;
7942
7943 gsize -= 1;
7944 if (gsize != 0)
7945 fprintf (dump, "%s(%d) ",
7946 ultra_code_names[up->codes[n]],
7947 INSN_UID (insn));
7948 else
7949 fprintf (dump, "%s(%d)",
7950 ultra_code_names[up->codes[n]],
7951 INSN_UID (insn));
7952 }
7953 fprintf (dump, "]\n");
7954 }
7955 }
7956
7957 int
7958 sparc_issue_rate ()
7959 {
7960 switch (sparc_cpu)
7961 {
7962 default:
7963 return 1;
7964 case PROCESSOR_V9:
7965 /* Assume V9 processors are capable of at least dual-issue. */
7966 return 2;
7967 case PROCESSOR_SUPERSPARC:
7968 return 3;
7969 case PROCESSOR_HYPERSPARC:
7970 case PROCESSOR_SPARCLITE86X:
7971 return 2;
7972 case PROCESSOR_ULTRASPARC:
7973 return 4;
7974 }
7975 }
7976
7977 static int
7978 set_extends(x, insn)
7979 rtx x, insn;
7980 {
7981 register rtx pat = PATTERN (insn);
7982
7983 switch (GET_CODE (SET_SRC (pat)))
7984 {
7985 /* Load and some shift instructions zero extend. */
7986 case MEM:
7987 case ZERO_EXTEND:
7988 /* sethi clears the high bits */
7989 case HIGH:
7990 /* LO_SUM is used with sethi. sethi cleared the high
7991 bits and the values used with lo_sum are positive */
7992 case LO_SUM:
7993 /* Store flag stores 0 or 1 */
7994 case LT: case LTU:
7995 case GT: case GTU:
7996 case LE: case LEU:
7997 case GE: case GEU:
7998 case EQ:
7999 case NE:
8000 return 1;
8001 case AND:
8002 {
8003 rtx op1 = XEXP (SET_SRC (pat), 1);
8004 if (GET_CODE (op1) == CONST_INT)
8005 return INTVAL (op1) >= 0;
8006 if (GET_CODE (XEXP (SET_SRC (pat), 0)) == REG
8007 && sparc_check_64 (XEXP (SET_SRC (pat), 0), insn) == 1)
8008 return 1;
8009 if (GET_CODE (op1) == REG
8010 && sparc_check_64 ((op1), insn) == 1)
8011 return 1;
8012 }
8013 case ASHIFT:
8014 case LSHIFTRT:
8015 return GET_MODE (SET_SRC (pat)) == SImode;
8016 /* Positive integers leave the high bits zero. */
8017 case CONST_DOUBLE:
8018 return ! (CONST_DOUBLE_LOW (x) & 0x80000000);
8019 case CONST_INT:
8020 return ! (INTVAL (x) & 0x80000000);
8021 case ASHIFTRT:
8022 case SIGN_EXTEND:
8023 return - (GET_MODE (SET_SRC (pat)) == SImode);
8024 default:
8025 return 0;
8026 }
8027 }
8028
8029 /* We _ought_ to have only one kind per function, but... */
8030 static rtx sparc_addr_diff_list;
8031 static rtx sparc_addr_list;
8032
8033 void
8034 sparc_defer_case_vector (lab, vec, diff)
8035 rtx lab, vec;
8036 int diff;
8037 {
8038 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8039 if (diff)
8040 sparc_addr_diff_list
8041 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8042 else
8043 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8044 }
8045
8046 static void
8047 sparc_output_addr_vec (vec)
8048 rtx vec;
8049 {
8050 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8051 int idx, vlen = XVECLEN (body, 0);
8052
8053 #ifdef ASM_OUTPUT_ADDR_VEC_START
8054 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8055 #endif
8056
8057 #ifdef ASM_OUTPUT_CASE_LABEL
8058 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8059 NEXT_INSN (lab));
8060 #else
8061 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8062 #endif
8063
8064 for (idx = 0; idx < vlen; idx++)
8065 {
8066 ASM_OUTPUT_ADDR_VEC_ELT
8067 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8068 }
8069
8070 #ifdef ASM_OUTPUT_ADDR_VEC_END
8071 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8072 #endif
8073 }
8074
8075 static void
8076 sparc_output_addr_diff_vec (vec)
8077 rtx vec;
8078 {
8079 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8080 rtx base = XEXP (XEXP (body, 0), 0);
8081 int idx, vlen = XVECLEN (body, 1);
8082
8083 #ifdef ASM_OUTPUT_ADDR_VEC_START
8084 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8085 #endif
8086
8087 #ifdef ASM_OUTPUT_CASE_LABEL
8088 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8089 NEXT_INSN (lab));
8090 #else
8091 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8092 #endif
8093
8094 for (idx = 0; idx < vlen; idx++)
8095 {
8096 ASM_OUTPUT_ADDR_DIFF_ELT
8097 (asm_out_file,
8098 body,
8099 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8100 CODE_LABEL_NUMBER (base));
8101 }
8102
8103 #ifdef ASM_OUTPUT_ADDR_VEC_END
8104 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8105 #endif
8106 }
8107
8108 static void
8109 sparc_output_deferred_case_vectors ()
8110 {
8111 rtx t;
8112 int align;
8113
8114 if (sparc_addr_list == NULL_RTX
8115 && sparc_addr_diff_list == NULL_RTX)
8116 return;
8117
8118 /* Align to cache line in the function's code section. */
8119 function_section (current_function_decl);
8120
8121 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8122 if (align > 0)
8123 ASM_OUTPUT_ALIGN (asm_out_file, align);
8124
8125 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8126 sparc_output_addr_vec (XEXP (t, 0));
8127 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8128 sparc_output_addr_diff_vec (XEXP (t, 0));
8129
8130 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8131 }
8132
8133 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8134 unknown. Return 1 if the high bits are zero, -1 if the register is
8135 sign extended. */
8136 int
8137 sparc_check_64 (x, insn)
8138 rtx x, insn;
8139 {
8140 /* If a register is set only once it is safe to ignore insns this
8141 code does not know how to handle. The loop will either recognize
8142 the single set and return the correct value or fail to recognize
8143 it and return 0. */
8144 int set_once = 0;
8145
8146 if (GET_CODE (x) == REG
8147 && flag_expensive_optimizations
8148 && REG_N_SETS (REGNO (x)) == 1)
8149 set_once = 1;
8150
8151 if (insn == 0)
8152 {
8153 if (set_once)
8154 insn = get_last_insn_anywhere ();
8155 else
8156 return 0;
8157 }
8158
8159 while ((insn = PREV_INSN (insn)))
8160 {
8161 switch (GET_CODE (insn))
8162 {
8163 case JUMP_INSN:
8164 case NOTE:
8165 break;
8166 case CODE_LABEL:
8167 case CALL_INSN:
8168 default:
8169 if (! set_once)
8170 return 0;
8171 break;
8172 case INSN:
8173 {
8174 rtx pat = PATTERN (insn);
8175 if (GET_CODE (pat) != SET)
8176 return 0;
8177 if (rtx_equal_p (x, SET_DEST (pat)))
8178 return set_extends (x, insn);
8179 if (reg_overlap_mentioned_p (SET_DEST (pat), x))
8180 return 0;
8181 }
8182 }
8183 }
8184 return 0;
8185 }
8186
8187 char *
8188 sparc_v8plus_shift (operands, insn, opcode)
8189 rtx *operands;
8190 rtx insn;
8191 const char *opcode;
8192 {
8193 static char asm_code[60];
8194
8195 if (GET_CODE (operands[3]) == SCRATCH)
8196 operands[3] = operands[0];
8197 if (GET_CODE (operands[1]) == CONST_INT)
8198 {
8199 output_asm_insn ("mov %1,%3", operands);
8200 }
8201 else
8202 {
8203 output_asm_insn ("sllx %H1,32,%3", operands);
8204 if (sparc_check_64 (operands[1], insn) <= 0)
8205 output_asm_insn ("srl %L1,0,%L1", operands);
8206 output_asm_insn ("or %L1,%3,%3", operands);
8207 }
8208
8209 strcpy(asm_code, opcode);
8210 if (which_alternative != 2)
8211 return strcat (asm_code, " %0,%2,%L0\n\tsrlx %L0,32,%H0");
8212 else
8213 return strcat (asm_code, " %3,%2,%3\n\tsrlx %3,32,%H0\n\tmov %3,%L0");
8214 }
8215
8216
8217 /* Return 1 if DEST and SRC reference only global and in registers. */
8218
8219 int
8220 sparc_return_peephole_ok (dest, src)
8221 rtx dest, src;
8222 {
8223 if (! TARGET_V9)
8224 return 0;
8225 if (current_function_uses_only_leaf_regs)
8226 return 0;
8227 if (GET_CODE (src) != CONST_INT
8228 && (GET_CODE (src) != REG || ! IN_OR_GLOBAL_P (src)))
8229 return 0;
8230 return IN_OR_GLOBAL_P (dest);
8231 }
8232 \f
8233 /* Output assembler code to FILE to increment profiler label # LABELNO
8234 for profiling a function entry.
8235
8236 32 bit sparc uses %g2 as the STATIC_CHAIN_REGNUM which gets clobbered
8237 during profiling so we need to save/restore it around the call to mcount.
8238 We're guaranteed that a save has just been done, and we use the space
8239 allocated for intreg/fpreg value passing. */
8240
8241 void
8242 sparc_function_profiler (file, labelno)
8243 FILE *file;
8244 int labelno;
8245 {
8246 char buf[32];
8247 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8248
8249 if (! TARGET_ARCH64)
8250 fputs ("\tst\t%g2,[%fp-4]\n", file);
8251
8252 fputs ("\tsethi\t%hi(", file);
8253 assemble_name (file, buf);
8254 fputs ("),%o0\n", file);
8255
8256 fputs ("\tcall\t", file);
8257 assemble_name (file, MCOUNT_FUNCTION);
8258 putc ('\n', file);
8259
8260 fputs ("\t or\t%o0,%lo(", file);
8261 assemble_name (file, buf);
8262 fputs ("),%o0\n", file);
8263
8264 if (! TARGET_ARCH64)
8265 fputs ("\tld\t[%fp-4],%g2\n", file);
8266 }
8267
8268
8269 /* The following macro shall output assembler code to FILE
8270 to initialize basic-block profiling.
8271
8272 If profile_block_flag == 2
8273
8274 Output code to call the subroutine `__bb_init_trace_func'
8275 and pass two parameters to it. The first parameter is
8276 the address of a block allocated in the object module.
8277 The second parameter is the number of the first basic block
8278 of the function.
8279
8280 The name of the block is a local symbol made with this statement:
8281
8282 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
8283
8284 Of course, since you are writing the definition of
8285 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
8286 can take a short cut in the definition of this macro and use the
8287 name that you know will result.
8288
8289 The number of the first basic block of the function is
8290 passed to the macro in BLOCK_OR_LABEL.
8291
8292 If described in a virtual assembler language the code to be
8293 output looks like:
8294
8295 parameter1 <- LPBX0
8296 parameter2 <- BLOCK_OR_LABEL
8297 call __bb_init_trace_func
8298
8299 else if profile_block_flag != 0
8300
8301 Output code to call the subroutine `__bb_init_func'
8302 and pass one single parameter to it, which is the same
8303 as the first parameter to `__bb_init_trace_func'.
8304
8305 The first word of this parameter is a flag which will be nonzero if
8306 the object module has already been initialized. So test this word
8307 first, and do not call `__bb_init_func' if the flag is nonzero.
8308 Note: When profile_block_flag == 2 the test need not be done
8309 but `__bb_init_trace_func' *must* be called.
8310
8311 BLOCK_OR_LABEL may be used to generate a label number as a
8312 branch destination in case `__bb_init_func' will not be called.
8313
8314 If described in a virtual assembler language the code to be
8315 output looks like:
8316
8317 cmp (LPBX0),0
8318 jne local_label
8319 parameter1 <- LPBX0
8320 call __bb_init_func
8321 local_label:
8322
8323 */
8324
8325 void
8326 sparc_function_block_profiler(file, block_or_label)
8327 FILE *file;
8328 int block_or_label;
8329 {
8330 char LPBX[32];
8331 ASM_GENERATE_INTERNAL_LABEL (LPBX, "LPBX", 0);
8332
8333 if (profile_block_flag == 2)
8334 {
8335 fputs ("\tsethi\t%hi(", file);
8336 assemble_name (file, LPBX);
8337 fputs ("),%o0\n", file);
8338
8339 fprintf (file, "\tsethi\t%%hi(%d),%%o1\n", block_or_label);
8340
8341 fputs ("\tor\t%o0,%lo(", file);
8342 assemble_name (file, LPBX);
8343 fputs ("),%o0\n", file);
8344
8345 fprintf (file, "\tcall\t%s__bb_init_trace_func\n", user_label_prefix);
8346
8347 fprintf (file, "\t or\t%%o1,%%lo(%d),%%o1\n", block_or_label);
8348 }
8349 else if (profile_block_flag != 0)
8350 {
8351 char LPBY[32];
8352 ASM_GENERATE_INTERNAL_LABEL (LPBY, "LPBY", block_or_label);
8353
8354 fputs ("\tsethi\t%hi(", file);
8355 assemble_name (file, LPBX);
8356 fputs ("),%o0\n", file);
8357
8358 fputs ("\tld\t[%lo(", file);
8359 assemble_name (file, LPBX);
8360 fputs (")+%o0],%o1\n", file);
8361
8362 fputs ("\ttst\t%o1\n", file);
8363
8364 if (TARGET_V9)
8365 {
8366 fputs ("\tbne,pn\t%icc,", file);
8367 assemble_name (file, LPBY);
8368 putc ('\n', file);
8369 }
8370 else
8371 {
8372 fputs ("\tbne\t", file);
8373 assemble_name (file, LPBY);
8374 putc ('\n', file);
8375 }
8376
8377 fputs ("\t or\t%o0,%lo(", file);
8378 assemble_name (file, LPBX);
8379 fputs ("),%o0\n", file);
8380
8381 fprintf (file, "\tcall\t%s__bb_init_func\n\t nop\n", user_label_prefix);
8382
8383 ASM_OUTPUT_INTERNAL_LABEL (file, "LPBY", block_or_label);
8384 }
8385 }
8386
8387 /* The following macro shall output assembler code to FILE
8388 to increment a counter associated with basic block number BLOCKNO.
8389
8390 If profile_block_flag == 2
8391
8392 Output code to initialize the global structure `__bb' and
8393 call the function `__bb_trace_func' which will increment the
8394 counter.
8395
8396 `__bb' consists of two words. In the first word the number
8397 of the basic block has to be stored. In the second word
8398 the address of a block allocated in the object module
8399 has to be stored.
8400
8401 The basic block number is given by BLOCKNO.
8402
8403 The address of the block is given by the label created with
8404
8405 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
8406
8407 by FUNCTION_BLOCK_PROFILER.
8408
8409 Of course, since you are writing the definition of
8410 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
8411 can take a short cut in the definition of this macro and use the
8412 name that you know will result.
8413
8414 If described in a virtual assembler language the code to be
8415 output looks like:
8416
8417 move BLOCKNO -> (__bb)
8418 move LPBX0 -> (__bb+4)
8419 call __bb_trace_func
8420
8421 Note that function `__bb_trace_func' must not change the
8422 machine state, especially the flag register. To grant
8423 this, you must output code to save and restore registers
8424 either in this macro or in the macros MACHINE_STATE_SAVE
8425 and MACHINE_STATE_RESTORE. The last two macros will be
8426 used in the function `__bb_trace_func', so you must make
8427 sure that the function prologue does not change any
8428 register prior to saving it with MACHINE_STATE_SAVE.
8429
8430 else if profile_block_flag != 0
8431
8432 Output code to increment the counter directly.
8433 Basic blocks are numbered separately from zero within each
8434 compiled object module. The count associated with block number
8435 BLOCKNO is at index BLOCKNO in an array of words; the name of
8436 this array is a local symbol made with this statement:
8437
8438 ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
8439
8440 Of course, since you are writing the definition of
8441 `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
8442 can take a short cut in the definition of this macro and use the
8443 name that you know will result.
8444
8445 If described in a virtual assembler language, the code to be
8446 output looks like:
8447
8448 inc (LPBX2+4*BLOCKNO)
8449
8450 */
8451
8452 void
8453 sparc_block_profiler(file, blockno)
8454 FILE *file;
8455 int blockno;
8456 {
8457 char LPBX[32];
8458 int bbreg = TARGET_ARCH64 ? 4 : 2;
8459
8460 if (profile_block_flag == 2)
8461 {
8462 ASM_GENERATE_INTERNAL_LABEL (LPBX, "LPBX", 0);
8463
8464 fprintf (file, "\tsethi\t%%hi(%s__bb),%%g1\n", user_label_prefix);
8465 fprintf (file, "\tsethi\t%%hi(%d),%%g%d\n", blockno, bbreg);
8466 fprintf (file, "\tor\t%%g1,%%lo(%s__bb),%%g1\n", user_label_prefix);
8467 fprintf (file, "\tor\t%%g%d,%%lo(%d),%%g%d\n", bbreg, blockno, bbreg);
8468
8469 fprintf (file, "\tst\t%%g%d,[%%g1]\n", bbreg);
8470
8471 fputs ("\tsethi\t%hi(", file);
8472 assemble_name (file, LPBX);
8473 fprintf (file, "),%%g%d\n", bbreg);
8474
8475 fputs ("\tor\t%o2,%lo(", file);
8476 assemble_name (file, LPBX);
8477 fprintf (file, "),%%g%d\n", bbreg);
8478
8479 fprintf (file, "\tst\t%%g%d,[%%g1+4]\n", bbreg);
8480 fprintf (file, "\tmov\t%%o7,%%g%d\n", bbreg);
8481
8482 fprintf (file, "\tcall\t%s__bb_trace_func\n\t nop\n", user_label_prefix);
8483
8484 fprintf (file, "\tmov\t%%g%d,%%o7\n", bbreg);
8485 }
8486 else if (profile_block_flag != 0)
8487 {
8488 ASM_GENERATE_INTERNAL_LABEL (LPBX, "LPBX", 2);
8489
8490 fputs ("\tsethi\t%hi(", file);
8491 assemble_name (file, LPBX);
8492 fprintf (file, "+%d),%%g1\n", blockno*4);
8493
8494 fputs ("\tld\t[%g1+%lo(", file);
8495 assemble_name (file, LPBX);
8496 if (TARGET_ARCH64 && USE_AS_OFFSETABLE_LO10)
8497 fprintf (file, ")+%d],%%g%d\n", blockno*4, bbreg);
8498 else
8499 fprintf (file, "+%d)],%%g%d\n", blockno*4, bbreg);
8500
8501 fprintf (file, "\tadd\t%%g%d,1,%%g%d\n", bbreg, bbreg);
8502
8503 fprintf (file, "\tst\t%%g%d,[%%g1+%%lo(", bbreg);
8504 assemble_name (file, LPBX);
8505 if (TARGET_ARCH64 && USE_AS_OFFSETABLE_LO10)
8506 fprintf (file, ")+%d]\n", blockno*4);
8507 else
8508 fprintf (file, "+%d)]\n", blockno*4);
8509 }
8510 }
8511
8512 /* The following macro shall output assembler code to FILE
8513 to indicate a return from function during basic-block profiling.
8514
8515 If profile_block_flag == 2:
8516
8517 Output assembler code to call function `__bb_trace_ret'.
8518
8519 Note that function `__bb_trace_ret' must not change the
8520 machine state, especially the flag register. To grant
8521 this, you must output code to save and restore registers
8522 either in this macro or in the macros MACHINE_STATE_SAVE_RET
8523 and MACHINE_STATE_RESTORE_RET. The last two macros will be
8524 used in the function `__bb_trace_ret', so you must make
8525 sure that the function prologue does not change any
8526 register prior to saving it with MACHINE_STATE_SAVE_RET.
8527
8528 else if profile_block_flag != 0:
8529
8530 The macro will not be used, so it need not distinguish
8531 these cases.
8532 */
8533
8534 void
8535 sparc_function_block_profiler_exit(file)
8536 FILE *file;
8537 {
8538 if (profile_block_flag == 2)
8539 fprintf (file, "\tcall\t%s__bb_trace_ret\n\t nop\n", user_label_prefix);
8540 else
8541 abort ();
8542 }
8543
8544 /* Mark ARG, which is really a struct ultrasparc_pipline_state *, for
8545 GC. */
8546
8547 static void
8548 mark_ultrasparc_pipeline_state (arg)
8549 void *arg;
8550 {
8551 struct ultrasparc_pipeline_state *ups;
8552 size_t i;
8553
8554 ups = (struct ultrasparc_pipeline_state *) arg;
8555 for (i = 0; i < sizeof (ups->group) / sizeof (rtx); ++i)
8556 ggc_mark_rtx (ups->group[i]);
8557 }
8558
8559 /* Called to register all of our global variables with the garbage
8560 collector. */
8561
8562 static void
8563 sparc_add_gc_roots ()
8564 {
8565 ggc_add_rtx_root (&sparc_compare_op0, 1);
8566 ggc_add_rtx_root (&sparc_compare_op1, 1);
8567 ggc_add_rtx_root (&leaf_label, 1);
8568 ggc_add_rtx_root (&global_offset_table, 1);
8569 ggc_add_rtx_root (&get_pc_symbol, 1);
8570 ggc_add_rtx_root (&sparc_addr_diff_list, 1);
8571 ggc_add_rtx_root (&sparc_addr_list, 1);
8572 ggc_add_root (ultra_pipe_hist,
8573 sizeof (ultra_pipe_hist) / sizeof (ultra_pipe_hist[0]),
8574 sizeof (ultra_pipe_hist[0]),
8575 &mark_ultrasparc_pipeline_state);
8576 }
This page took 0.428242 seconds and 5 git commands to generate.