1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
34 #include "insn-attr.h"
45 #include "integrate.h"
48 #include "target-def.h"
51 extern int rtx_equal_function_value_matters
;
53 /* Specify which cpu to schedule for. */
55 enum processor_type alpha_cpu
;
56 static const char * const alpha_cpu_name
[] =
61 /* Specify how accurate floating-point traps need to be. */
63 enum alpha_trap_precision alpha_tp
;
65 /* Specify the floating-point rounding mode. */
67 enum alpha_fp_rounding_mode alpha_fprm
;
69 /* Specify which things cause traps. */
71 enum alpha_fp_trap_mode alpha_fptm
;
73 /* Strings decoded into the above options. */
75 const char *alpha_cpu_string
; /* -mcpu= */
76 const char *alpha_tune_string
; /* -mtune= */
77 const char *alpha_tp_string
; /* -mtrap-precision=[p|s|i] */
78 const char *alpha_fprm_string
; /* -mfp-rounding-mode=[n|m|c|d] */
79 const char *alpha_fptm_string
; /* -mfp-trap-mode=[n|u|su|sui] */
80 const char *alpha_mlat_string
; /* -mmemory-latency= */
82 /* Save information from a "cmpxx" operation until the branch or scc is
85 struct alpha_compare alpha_compare
;
87 /* Non-zero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
90 static int inside_function
= FALSE
;
92 /* The number of cycles of latency we should assume on memory reads. */
94 int alpha_memory_latency
= 3;
96 /* Whether the function needs the GP. */
98 static int alpha_function_needs_gp
;
100 /* The alias set for prologue/epilogue register save/restore. */
102 static int alpha_sr_alias_set
;
104 /* The assembler name of the current function. */
106 static const char *alpha_fnname
;
108 /* The next explicit relocation sequence number. */
109 int alpha_next_sequence_number
= 1;
111 /* The literal and gpdisp sequence numbers for this insn, as printed
112 by %# and %* respectively. */
113 int alpha_this_literal_sequence_number
;
114 int alpha_this_gpdisp_sequence_number
;
116 /* Declarations of static functions. */
117 static bool decl_in_text_section
119 static bool local_symbol_p
121 static void alpha_set_memflags_1
122 PARAMS ((rtx
, int, int, int));
123 static rtx alpha_emit_set_const_1
124 PARAMS ((rtx
, enum machine_mode
, HOST_WIDE_INT
, int));
125 static void alpha_expand_unaligned_load_words
126 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
127 static void alpha_expand_unaligned_store_words
128 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
129 static void alpha_sa_mask
130 PARAMS ((unsigned long *imaskP
, unsigned long *fmaskP
));
131 static int find_lo_sum
132 PARAMS ((rtx
*, void *));
133 static int alpha_does_function_need_gp
135 static int alpha_ra_ever_killed
137 static const char *get_trap_mode_suffix
139 static const char *get_round_mode_suffix
141 static rtx set_frame_related_p
143 static const char *alpha_lookup_xfloating_lib_func
144 PARAMS ((enum rtx_code
));
145 static int alpha_compute_xfloating_mode_arg
146 PARAMS ((enum rtx_code
, enum alpha_fp_rounding_mode
));
147 static void alpha_emit_xfloating_libcall
148 PARAMS ((const char *, rtx
, rtx
[], int, rtx
));
149 static rtx alpha_emit_xfloating_compare
150 PARAMS ((enum rtx_code
, rtx
, rtx
));
151 static void alpha_output_function_end_prologue
153 static int alpha_adjust_cost
154 PARAMS ((rtx
, rtx
, rtx
, int));
155 static int alpha_issue_rate
157 static int alpha_variable_issue
158 PARAMS ((FILE *, int, rtx
, int));
160 #if TARGET_ABI_UNICOSMK
161 static void alpha_init_machine_status
162 PARAMS ((struct function
*p
));
163 static void alpha_mark_machine_status
164 PARAMS ((struct function
*p
));
165 static void alpha_free_machine_status
166 PARAMS ((struct function
*p
));
169 static void unicosmk_output_deferred_case_vectors
PARAMS ((FILE *));
170 static void unicosmk_gen_dsib
PARAMS ((unsigned long *imaskP
));
171 static void unicosmk_output_ssib
PARAMS ((FILE *, const char *));
172 static int unicosmk_need_dex
PARAMS ((rtx
));
174 /* Get the number of args of a function in one of two ways. */
175 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
176 #define NUM_ARGS current_function_args_info.num_args
178 #define NUM_ARGS current_function_args_info
184 /* Initialize the GCC target structure. */
185 #if TARGET_ABI_OPEN_VMS
186 const struct attribute_spec vms_attribute_table
[];
187 static unsigned int vms_section_type_flags
PARAMS ((tree
, const char *, int));
188 static void vms_asm_named_section
PARAMS ((const char *, unsigned int));
189 static void vms_asm_out_constructor
PARAMS ((rtx
, int));
190 static void vms_asm_out_destructor
PARAMS ((rtx
, int));
191 # undef TARGET_ATTRIBUTE_TABLE
192 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
193 # undef TARGET_SECTION_TYPE_FLAGS
194 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
197 #if TARGET_ABI_UNICOSMK
198 static void unicosmk_asm_named_section
PARAMS ((const char *, unsigned int));
199 static void unicosmk_insert_attributes
PARAMS ((tree
, tree
*));
200 static unsigned int unicosmk_section_type_flags
PARAMS ((tree
, const char *,
202 # undef TARGET_INSERT_ATTRIBUTES
203 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
204 # undef TARGET_SECTION_TYPE_FLAGS
205 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
208 #undef TARGET_ASM_ALIGNED_HI_OP
209 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
210 #undef TARGET_ASM_ALIGNED_DI_OP
211 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
213 /* Default unaligned ops are provided for ELF systems. To get unaligned
214 data for non-ELF systems, we have to turn off auto alignment. */
215 #ifndef OBJECT_FORMAT_ELF
216 #undef TARGET_ASM_UNALIGNED_HI_OP
217 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
218 #undef TARGET_ASM_UNALIGNED_SI_OP
219 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
220 #undef TARGET_ASM_UNALIGNED_DI_OP
221 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
224 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
225 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
227 #undef TARGET_SCHED_ADJUST_COST
228 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
229 #undef TARGET_SCHED_ISSUE_RATE
230 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
231 #undef TARGET_SCHED_VARIABLE_ISSUE
232 #define TARGET_SCHED_VARIABLE_ISSUE alpha_variable_issue
234 struct gcc_target targetm
= TARGET_INITIALIZER
;
236 /* Parse target option strings. */
242 static const struct cpu_table
{
243 const char *const name
;
244 const enum processor_type processor
;
247 #define EV5_MASK (MASK_CPU_EV5)
248 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
249 { "ev4", PROCESSOR_EV4
, 0 },
250 { "ev45", PROCESSOR_EV4
, 0 },
251 { "21064", PROCESSOR_EV4
, 0 },
252 { "ev5", PROCESSOR_EV5
, EV5_MASK
},
253 { "21164", PROCESSOR_EV5
, EV5_MASK
},
254 { "ev56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
255 { "21164a", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
256 { "pca56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
257 { "21164PC",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
258 { "21164pc",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
259 { "ev6", PROCESSOR_EV6
, EV6_MASK
},
260 { "21264", PROCESSOR_EV6
, EV6_MASK
},
261 { "ev67", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
262 { "21264a", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
266 /* Unicos/Mk doesn't have shared libraries. */
267 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
269 warning ("-f%s ignored for Unicos/Mk (not supported)",
270 (flag_pic
> 1) ? "PIC" : "pic");
274 /* On Unicos/Mk, the native compiler consistenly generates /d suffices for
275 floating-point instructions. Make that the default for this target. */
276 if (TARGET_ABI_UNICOSMK
)
277 alpha_fprm
= ALPHA_FPRM_DYN
;
279 alpha_fprm
= ALPHA_FPRM_NORM
;
281 alpha_tp
= ALPHA_TP_PROG
;
282 alpha_fptm
= ALPHA_FPTM_N
;
284 /* We cannot use su and sui qualifiers for conversion instructions on
285 Unicos/Mk. I'm not sure if this is due to assembler or hardware
286 limitations. Right now, we issue a warning if -mieee is specified
287 and then ignore it; eventually, we should either get it right or
288 disable the option altogether. */
292 if (TARGET_ABI_UNICOSMK
)
293 warning ("-mieee not supported on Unicos/Mk");
296 alpha_tp
= ALPHA_TP_INSN
;
297 alpha_fptm
= ALPHA_FPTM_SU
;
301 if (TARGET_IEEE_WITH_INEXACT
)
303 if (TARGET_ABI_UNICOSMK
)
304 warning ("-mieee-with-inexact not supported on Unicos/Mk");
307 alpha_tp
= ALPHA_TP_INSN
;
308 alpha_fptm
= ALPHA_FPTM_SUI
;
314 if (! strcmp (alpha_tp_string
, "p"))
315 alpha_tp
= ALPHA_TP_PROG
;
316 else if (! strcmp (alpha_tp_string
, "f"))
317 alpha_tp
= ALPHA_TP_FUNC
;
318 else if (! strcmp (alpha_tp_string
, "i"))
319 alpha_tp
= ALPHA_TP_INSN
;
321 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string
);
324 if (alpha_fprm_string
)
326 if (! strcmp (alpha_fprm_string
, "n"))
327 alpha_fprm
= ALPHA_FPRM_NORM
;
328 else if (! strcmp (alpha_fprm_string
, "m"))
329 alpha_fprm
= ALPHA_FPRM_MINF
;
330 else if (! strcmp (alpha_fprm_string
, "c"))
331 alpha_fprm
= ALPHA_FPRM_CHOP
;
332 else if (! strcmp (alpha_fprm_string
,"d"))
333 alpha_fprm
= ALPHA_FPRM_DYN
;
335 error ("bad value `%s' for -mfp-rounding-mode switch",
339 if (alpha_fptm_string
)
341 if (strcmp (alpha_fptm_string
, "n") == 0)
342 alpha_fptm
= ALPHA_FPTM_N
;
343 else if (strcmp (alpha_fptm_string
, "u") == 0)
344 alpha_fptm
= ALPHA_FPTM_U
;
345 else if (strcmp (alpha_fptm_string
, "su") == 0)
346 alpha_fptm
= ALPHA_FPTM_SU
;
347 else if (strcmp (alpha_fptm_string
, "sui") == 0)
348 alpha_fptm
= ALPHA_FPTM_SUI
;
350 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string
);
354 = TARGET_CPU_DEFAULT
& MASK_CPU_EV6
? PROCESSOR_EV6
355 : (TARGET_CPU_DEFAULT
& MASK_CPU_EV5
? PROCESSOR_EV5
: PROCESSOR_EV4
);
357 if (alpha_cpu_string
)
359 for (i
= 0; cpu_table
[i
].name
; i
++)
360 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
362 alpha_cpu
= cpu_table
[i
].processor
;
363 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
364 | MASK_CPU_EV5
| MASK_CPU_EV6
);
365 target_flags
|= cpu_table
[i
].flags
;
368 if (! cpu_table
[i
].name
)
369 error ("bad value `%s' for -mcpu switch", alpha_cpu_string
);
372 if (alpha_tune_string
)
374 for (i
= 0; cpu_table
[i
].name
; i
++)
375 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
377 alpha_cpu
= cpu_table
[i
].processor
;
380 if (! cpu_table
[i
].name
)
381 error ("bad value `%s' for -mcpu switch", alpha_tune_string
);
384 /* Do some sanity checks on the above options. */
386 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
388 warning ("trap mode not supported on Unicos/Mk");
389 alpha_fptm
= ALPHA_FPTM_N
;
392 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
393 && alpha_tp
!= ALPHA_TP_INSN
&& ! TARGET_CPU_EV6
)
395 warning ("fp software completion requires -mtrap-precision=i");
396 alpha_tp
= ALPHA_TP_INSN
;
401 /* Except for EV6 pass 1 (not released), we always have precise
402 arithmetic traps. Which means we can do software completion
403 without minding trap shadows. */
404 alpha_tp
= ALPHA_TP_PROG
;
407 if (TARGET_FLOAT_VAX
)
409 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
411 warning ("rounding mode not supported for VAX floats");
412 alpha_fprm
= ALPHA_FPRM_NORM
;
414 if (alpha_fptm
== ALPHA_FPTM_SUI
)
416 warning ("trap mode not supported for VAX floats");
417 alpha_fptm
= ALPHA_FPTM_SU
;
425 if (!alpha_mlat_string
)
426 alpha_mlat_string
= "L1";
428 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
429 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
431 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
432 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
433 && alpha_mlat_string
[2] == '\0')
435 static int const cache_latency
[][4] =
437 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
438 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
439 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
442 lat
= alpha_mlat_string
[1] - '0';
443 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_cpu
][lat
-1] == -1)
445 warning ("L%d cache latency unknown for %s",
446 lat
, alpha_cpu_name
[alpha_cpu
]);
450 lat
= cache_latency
[alpha_cpu
][lat
-1];
452 else if (! strcmp (alpha_mlat_string
, "main"))
454 /* Most current memories have about 370ns latency. This is
455 a reasonable guess for a fast cpu. */
460 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string
);
464 alpha_memory_latency
= lat
;
467 /* Default the definition of "small data" to 8 bytes. */
471 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
473 target_flags
|= MASK_SMALL_DATA
;
474 else if (flag_pic
== 2)
475 target_flags
&= ~MASK_SMALL_DATA
;
477 /* Align labels and loops for optimal branching. */
478 /* ??? Kludge these by not doing anything if we don't optimize and also if
479 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
480 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
482 if (align_loops
<= 0)
484 if (align_jumps
<= 0)
487 if (align_functions
<= 0)
488 align_functions
= 16;
490 /* Acquire a unique set number for our register saves and restores. */
491 alpha_sr_alias_set
= new_alias_set ();
493 /* Register variables and functions with the garbage collector. */
495 #if TARGET_ABI_UNICOSMK
496 /* Set up function hooks. */
497 init_machine_status
= alpha_init_machine_status
;
498 mark_machine_status
= alpha_mark_machine_status
;
499 free_machine_status
= alpha_free_machine_status
;
503 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
511 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
513 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
519 /* Returns 1 if OP is either the constant zero or a register. If a
520 register, it must be in the proper mode unless MODE is VOIDmode. */
523 reg_or_0_operand (op
, mode
)
525 enum machine_mode mode
;
527 return op
== const0_rtx
|| register_operand (op
, mode
);
530 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
534 reg_or_6bit_operand (op
, mode
)
536 enum machine_mode mode
;
538 return ((GET_CODE (op
) == CONST_INT
539 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64)
540 || register_operand (op
, mode
));
544 /* Return 1 if OP is an 8-bit constant or any register. */
547 reg_or_8bit_operand (op
, mode
)
549 enum machine_mode mode
;
551 return ((GET_CODE (op
) == CONST_INT
552 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100)
553 || register_operand (op
, mode
));
556 /* Return 1 if OP is an 8-bit constant. */
559 cint8_operand (op
, mode
)
561 enum machine_mode mode ATTRIBUTE_UNUSED
;
563 return ((GET_CODE (op
) == CONST_INT
564 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100));
567 /* Return 1 if the operand is a valid second operand to an add insn. */
570 add_operand (op
, mode
)
572 enum machine_mode mode
;
574 if (GET_CODE (op
) == CONST_INT
)
575 /* Constraints I, J, O and P are covered by K. */
576 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'K')
577 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'L'));
579 return register_operand (op
, mode
);
582 /* Return 1 if the operand is a valid second operand to a sign-extending
586 sext_add_operand (op
, mode
)
588 enum machine_mode mode
;
590 if (GET_CODE (op
) == CONST_INT
)
591 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'I')
592 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'O'));
594 return reg_not_elim_operand (op
, mode
);
597 /* Return 1 if OP is the constant 4 or 8. */
600 const48_operand (op
, mode
)
602 enum machine_mode mode ATTRIBUTE_UNUSED
;
604 return (GET_CODE (op
) == CONST_INT
605 && (INTVAL (op
) == 4 || INTVAL (op
) == 8));
608 /* Return 1 if OP is a valid first operand to an AND insn. */
611 and_operand (op
, mode
)
613 enum machine_mode mode
;
615 if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == VOIDmode
)
616 return (zap_mask (CONST_DOUBLE_LOW (op
))
617 && zap_mask (CONST_DOUBLE_HIGH (op
)));
619 if (GET_CODE (op
) == CONST_INT
)
620 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
621 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100
622 || zap_mask (INTVAL (op
)));
624 return register_operand (op
, mode
);
627 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
630 or_operand (op
, mode
)
632 enum machine_mode mode
;
634 if (GET_CODE (op
) == CONST_INT
)
635 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
636 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100);
638 return register_operand (op
, mode
);
641 /* Return 1 if OP is a constant that is the width, in bits, of an integral
642 mode smaller than DImode. */
645 mode_width_operand (op
, mode
)
647 enum machine_mode mode ATTRIBUTE_UNUSED
;
649 return (GET_CODE (op
) == CONST_INT
650 && (INTVAL (op
) == 8 || INTVAL (op
) == 16
651 || INTVAL (op
) == 32 || INTVAL (op
) == 64));
654 /* Return 1 if OP is a constant that is the width of an integral machine mode
655 smaller than an integer. */
658 mode_mask_operand (op
, mode
)
660 enum machine_mode mode ATTRIBUTE_UNUSED
;
662 #if HOST_BITS_PER_WIDE_INT == 32
663 if (GET_CODE (op
) == CONST_DOUBLE
)
664 return (CONST_DOUBLE_LOW (op
) == -1
665 && (CONST_DOUBLE_HIGH (op
) == -1
666 || CONST_DOUBLE_HIGH (op
) == 0));
668 if (GET_CODE (op
) == CONST_DOUBLE
)
669 return (CONST_DOUBLE_LOW (op
) == -1 && CONST_DOUBLE_HIGH (op
) == 0);
672 return (GET_CODE (op
) == CONST_INT
673 && (INTVAL (op
) == 0xff
674 || INTVAL (op
) == 0xffff
675 || INTVAL (op
) == (HOST_WIDE_INT
)0xffffffff
676 #if HOST_BITS_PER_WIDE_INT == 64
682 /* Return 1 if OP is a multiple of 8 less than 64. */
685 mul8_operand (op
, mode
)
687 enum machine_mode mode ATTRIBUTE_UNUSED
;
689 return (GET_CODE (op
) == CONST_INT
690 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64
691 && (INTVAL (op
) & 7) == 0);
694 /* Return 1 if OP is the constant zero in floating-point. */
697 fp0_operand (op
, mode
)
699 enum machine_mode mode
;
701 return (GET_MODE (op
) == mode
702 && GET_MODE_CLASS (mode
) == MODE_FLOAT
&& op
== CONST0_RTX (mode
));
705 /* Return 1 if OP is the floating-point constant zero or a register. */
708 reg_or_fp0_operand (op
, mode
)
710 enum machine_mode mode
;
712 return fp0_operand (op
, mode
) || register_operand (op
, mode
);
715 /* Return 1 if OP is a hard floating-point register. */
718 hard_fp_register_operand (op
, mode
)
720 enum machine_mode mode
;
722 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
725 if (GET_CODE (op
) == SUBREG
)
726 op
= SUBREG_REG (op
);
727 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == FLOAT_REGS
;
730 /* Return 1 if OP is a hard general register. */
733 hard_int_register_operand (op
, mode
)
735 enum machine_mode mode
;
737 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
740 if (GET_CODE (op
) == SUBREG
)
741 op
= SUBREG_REG (op
);
742 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == GENERAL_REGS
;
745 /* Return 1 if OP is a register or a constant integer. */
749 reg_or_cint_operand (op
, mode
)
751 enum machine_mode mode
;
753 return (GET_CODE (op
) == CONST_INT
754 || register_operand (op
, mode
));
757 /* Return 1 if OP is something that can be reloaded into a register;
758 if it is a MEM, it need not be valid. */
761 some_operand (op
, mode
)
763 enum machine_mode mode
;
765 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
768 switch (GET_CODE (op
))
770 case REG
: case MEM
: case CONST_DOUBLE
: case CONST_INT
: case LABEL_REF
:
771 case SYMBOL_REF
: case CONST
: case HIGH
:
775 return some_operand (SUBREG_REG (op
), VOIDmode
);
784 /* Likewise, but don't accept constants. */
787 some_ni_operand (op
, mode
)
789 enum machine_mode mode
;
791 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
794 if (GET_CODE (op
) == SUBREG
)
795 op
= SUBREG_REG (op
);
797 return (GET_CODE (op
) == REG
|| GET_CODE (op
) == MEM
);
800 /* Return 1 if OP is a valid operand for the source of a move insn. */
803 input_operand (op
, mode
)
805 enum machine_mode mode
;
807 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
810 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GET_MODE (op
) != mode
)
813 switch (GET_CODE (op
))
818 if (TARGET_EXPLICIT_RELOCS
)
820 /* We don't split symbolic operands into something unintelligable
821 until after reload, but we do not wish non-small, non-global
822 symbolic operands to be reconstructed from their high/lo_sum
824 return (small_symbolic_operand (op
, mode
)
825 || global_symbolic_operand (op
, mode
));
828 /* This handles both the Windows/NT and OSF cases. */
829 return mode
== ptr_mode
|| mode
== DImode
;
832 return (TARGET_EXPLICIT_RELOCS
833 && local_symbolic_operand (XEXP (op
, 0), mode
));
840 if (register_operand (op
, mode
))
842 /* ... fall through ... */
844 return ((TARGET_BWX
|| (mode
!= HImode
&& mode
!= QImode
))
845 && general_operand (op
, mode
));
848 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& op
== CONST0_RTX (mode
);
851 return mode
== QImode
|| mode
== HImode
|| add_operand (op
, mode
);
863 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
864 file, and in the same section as the current function. */
867 current_file_function_operand (op
, mode
)
869 enum machine_mode mode ATTRIBUTE_UNUSED
;
871 if (GET_CODE (op
) != SYMBOL_REF
)
874 /* Easy test for recursion. */
875 if (op
== XEXP (DECL_RTL (current_function_decl
), 0))
878 /* Otherwise, we need the DECL for the SYMBOL_REF, which we can't get.
879 So SYMBOL_REF_FLAG has been declared to imply that the function is
880 in the default text section. So we must also check that the current
881 function is also in the text section. */
882 if (SYMBOL_REF_FLAG (op
) && decl_in_text_section (current_function_decl
))
888 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
891 direct_call_operand (op
, mode
)
893 enum machine_mode mode
;
895 /* Must be defined in this file. */
896 if (! current_file_function_operand (op
, mode
))
899 /* If profiling is implemented via linker tricks, we can't jump
900 to the nogp alternate entry point. */
901 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
902 but is approximately correct for the OSF ABIs. Don't know
903 what to do for VMS, NT, or UMK. */
904 if (! TARGET_PROFILING_NEEDS_GP
905 && ! current_function_profile
)
911 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
912 a variable known to be defined in this file. */
918 const char *str
= XSTR (op
, 0);
920 /* ??? SYMBOL_REF_FLAG is set for local function symbols, but we
921 run into problems with the rtl inliner in that the symbol was
922 once external, but is local after inlining, which results in
923 unrecognizable insns. */
925 return (CONSTANT_POOL_ADDRESS_P (op
)
926 /* If @, then ENCODE_SECTION_INFO sez it's local. */
928 /* If *$, then ASM_GENERATE_INTERNAL_LABEL sez it's local. */
929 || (str
[0] == '*' && str
[1] == '$'));
933 local_symbolic_operand (op
, mode
)
935 enum machine_mode mode
;
937 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
940 if (GET_CODE (op
) == LABEL_REF
)
943 if (GET_CODE (op
) == CONST
944 && GET_CODE (XEXP (op
, 0)) == PLUS
945 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
946 op
= XEXP (XEXP (op
, 0), 0);
948 if (GET_CODE (op
) != SYMBOL_REF
)
951 return local_symbol_p (op
);
954 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
955 known to be defined in this file in the small data area. */
958 small_symbolic_operand (op
, mode
)
960 enum machine_mode mode ATTRIBUTE_UNUSED
;
964 if (! TARGET_SMALL_DATA
)
967 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
970 if (GET_CODE (op
) == CONST
971 && GET_CODE (XEXP (op
, 0)) == PLUS
972 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
973 op
= XEXP (XEXP (op
, 0), 0);
975 if (GET_CODE (op
) != SYMBOL_REF
)
978 if (CONSTANT_POOL_ADDRESS_P (op
))
979 return GET_MODE_SIZE (get_pool_mode (op
)) <= (unsigned) g_switch_value
;
983 return str
[0] == '@' && str
[1] == 's';
987 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
988 not known (or known not) to be defined in this file. */
991 global_symbolic_operand (op
, mode
)
993 enum machine_mode mode
;
995 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
998 if (GET_CODE (op
) == CONST
999 && GET_CODE (XEXP (op
, 0)) == PLUS
1000 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1001 op
= XEXP (XEXP (op
, 0), 0);
1003 if (GET_CODE (op
) != SYMBOL_REF
)
1006 return ! local_symbol_p (op
);
1009 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
1012 call_operand (op
, mode
)
1014 enum machine_mode mode
;
1019 if (GET_CODE (op
) == REG
)
1023 /* Disallow virtual registers to cope with pathalogical test cases
1024 such as compile/930117-1.c in which the virtual reg decomposes
1025 to the frame pointer. Which is a hard reg that is not $27. */
1026 return (REGNO (op
) == 27 || REGNO (op
) > LAST_VIRTUAL_REGISTER
);
1031 if (TARGET_ABI_UNICOSMK
)
1033 if (GET_CODE (op
) == SYMBOL_REF
)
1039 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
1040 possibly with an offset. */
1043 symbolic_operand (op
, mode
)
1045 enum machine_mode mode
;
1047 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1049 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
1051 if (GET_CODE (op
) == CONST
1052 && GET_CODE (XEXP (op
,0)) == PLUS
1053 && GET_CODE (XEXP (XEXP (op
,0), 0)) == SYMBOL_REF
1054 && GET_CODE (XEXP (XEXP (op
,0), 1)) == CONST_INT
)
1059 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1060 comparisons are valid in which insn. */
1063 alpha_comparison_operator (op
, mode
)
1065 enum machine_mode mode
;
1067 enum rtx_code code
= GET_CODE (op
);
1069 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1072 return (code
== EQ
|| code
== LE
|| code
== LT
1073 || code
== LEU
|| code
== LTU
);
1076 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1077 Here we know which comparisons are valid in which insn. */
1080 alpha_zero_comparison_operator (op
, mode
)
1082 enum machine_mode mode
;
1084 enum rtx_code code
= GET_CODE (op
);
1086 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1089 return (code
== EQ
|| code
== NE
|| code
== LE
|| code
== LT
1090 || code
== LEU
|| code
== LTU
);
1093 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1096 alpha_swapped_comparison_operator (op
, mode
)
1098 enum machine_mode mode
;
1100 enum rtx_code code
= GET_CODE (op
);
1102 if ((mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1103 || GET_RTX_CLASS (code
) != '<')
1106 code
= swap_condition (code
);
1107 return (code
== EQ
|| code
== LE
|| code
== LT
1108 || code
== LEU
|| code
== LTU
);
1111 /* Return 1 if OP is a signed comparison operation. */
1114 signed_comparison_operator (op
, mode
)
1116 enum machine_mode mode ATTRIBUTE_UNUSED
;
1118 enum rtx_code code
= GET_CODE (op
);
1120 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1123 return (code
== EQ
|| code
== NE
1124 || code
== LE
|| code
== LT
1125 || code
== GE
|| code
== GT
);
1128 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1129 Here we know which comparisons are valid in which insn. */
1132 alpha_fp_comparison_operator (op
, mode
)
1134 enum machine_mode mode
;
1136 enum rtx_code code
= GET_CODE (op
);
1138 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1141 return (code
== EQ
|| code
== LE
|| code
== LT
|| code
== UNORDERED
);
1144 /* Return 1 if this is a divide or modulus operator. */
1147 divmod_operator (op
, mode
)
1149 enum machine_mode mode ATTRIBUTE_UNUSED
;
1151 switch (GET_CODE (op
))
1153 case DIV
: case MOD
: case UDIV
: case UMOD
:
1163 /* Return 1 if this memory address is a known aligned register plus
1164 a constant. It must be a valid address. This means that we can do
1165 this as an aligned reference plus some offset.
1167 Take into account what reload will do. */
1170 aligned_memory_operand (op
, mode
)
1172 enum machine_mode mode
;
1176 if (reload_in_progress
)
1179 if (GET_CODE (tmp
) == SUBREG
)
1180 tmp
= SUBREG_REG (tmp
);
1181 if (GET_CODE (tmp
) == REG
1182 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1184 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1190 if (GET_CODE (op
) != MEM
1191 || GET_MODE (op
) != mode
)
1195 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1196 sorts of constructs. Dig for the real base register. */
1197 if (reload_in_progress
1198 && GET_CODE (op
) == PLUS
1199 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1200 base
= XEXP (XEXP (op
, 0), 0);
1203 if (! memory_address_p (mode
, op
))
1205 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1208 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) >= 32);
1211 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1214 unaligned_memory_operand (op
, mode
)
1216 enum machine_mode mode
;
1220 if (reload_in_progress
)
1223 if (GET_CODE (tmp
) == SUBREG
)
1224 tmp
= SUBREG_REG (tmp
);
1225 if (GET_CODE (tmp
) == REG
1226 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1228 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1234 if (GET_CODE (op
) != MEM
1235 || GET_MODE (op
) != mode
)
1239 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1240 sorts of constructs. Dig for the real base register. */
1241 if (reload_in_progress
1242 && GET_CODE (op
) == PLUS
1243 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1244 base
= XEXP (XEXP (op
, 0), 0);
1247 if (! memory_address_p (mode
, op
))
1249 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1252 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) < 32);
1255 /* Return 1 if OP is either a register or an unaligned memory location. */
1258 reg_or_unaligned_mem_operand (op
, mode
)
1260 enum machine_mode mode
;
1262 return register_operand (op
, mode
) || unaligned_memory_operand (op
, mode
);
1265 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1268 any_memory_operand (op
, mode
)
1270 enum machine_mode mode ATTRIBUTE_UNUSED
;
1272 return (GET_CODE (op
) == MEM
1273 || (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == REG
)
1274 || (reload_in_progress
&& GET_CODE (op
) == REG
1275 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)
1276 || (reload_in_progress
&& GET_CODE (op
) == SUBREG
1277 && GET_CODE (SUBREG_REG (op
)) == REG
1278 && REGNO (SUBREG_REG (op
)) >= FIRST_PSEUDO_REGISTER
));
1281 /* Returns 1 if OP is not an eliminable register.
1283 This exists to cure a pathological abort in the s8addq (et al) patterns,
1285 long foo () { long t; bar(); return (long) &t * 26107; }
1287 which run afoul of a hack in reload to cure a (presumably) similar
1288 problem with lea-type instructions on other targets. But there is
1289 one of us and many of them, so work around the problem by selectively
1290 preventing combine from making the optimization. */
1293 reg_not_elim_operand (op
, mode
)
1295 enum machine_mode mode
;
1298 if (GET_CODE (op
) == SUBREG
)
1299 inner
= SUBREG_REG (op
);
1300 if (inner
== frame_pointer_rtx
|| inner
== arg_pointer_rtx
)
1303 return register_operand (op
, mode
);
1306 /* Return 1 is OP is a memory location that is not a reference (using
1307 an AND) to an unaligned location. Take into account what reload
1311 normal_memory_operand (op
, mode
)
1313 enum machine_mode mode ATTRIBUTE_UNUSED
;
1315 if (reload_in_progress
)
1318 if (GET_CODE (tmp
) == SUBREG
)
1319 tmp
= SUBREG_REG (tmp
);
1320 if (GET_CODE (tmp
) == REG
1321 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1323 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1325 /* This may not have been assigned an equivalent address if it will
1326 be eliminated. In that case, it doesn't matter what we do. */
1332 return GET_CODE (op
) == MEM
&& GET_CODE (XEXP (op
, 0)) != AND
;
1335 /* Accept a register, but not a subreg of any kind. This allows us to
1336 avoid pathological cases in reload wrt data movement common in
1337 int->fp conversion. */
1340 reg_no_subreg_operand (op
, mode
)
1342 enum machine_mode mode
;
1344 if (GET_CODE (op
) != REG
)
1346 return register_operand (op
, mode
);
1349 /* Recognize an addition operation that includes a constant. Used to
1350 convince reload to canonize (plus (plus reg c1) c2) during register
1354 addition_operation (op
, mode
)
1356 enum machine_mode mode
;
1358 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1360 if (GET_CODE (op
) == PLUS
1361 && register_operand (XEXP (op
, 0), mode
)
1362 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1363 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op
, 1)), 'K'))
1368 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
1369 the range defined for C in [I-P]. */
1372 alpha_const_ok_for_letter_p (value
, c
)
1373 HOST_WIDE_INT value
;
1379 /* An unsigned 8 bit constant. */
1380 return (unsigned HOST_WIDE_INT
) value
< 0x100;
1382 /* The constant zero. */
1385 /* A signed 16 bit constant. */
1386 return (unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000;
1388 /* A shifted signed 16 bit constant appropriate for LDAH. */
1389 return ((value
& 0xffff) == 0
1390 && ((value
) >> 31 == -1 || value
>> 31 == 0));
1392 /* A constant that can be AND'ed with using a ZAP insn. */
1393 return zap_mask (value
);
1395 /* A complemented unsigned 8 bit constant. */
1396 return (unsigned HOST_WIDE_INT
) (~ value
) < 0x100;
1398 /* A negated unsigned 8 bit constant. */
1399 return (unsigned HOST_WIDE_INT
) (- value
) < 0x100;
1401 /* The constant 1, 2 or 3. */
1402 return value
== 1 || value
== 2 || value
== 3;
1409 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1410 matches for C in [GH]. */
1413 alpha_const_double_ok_for_letter_p (value
, c
)
1420 /* The floating point zero constant. */
1421 return (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
1422 && value
== CONST0_RTX (GET_MODE (value
)));
1425 /* A valid operand of a ZAP insn. */
1426 return (GET_MODE (value
) == VOIDmode
1427 && zap_mask (CONST_DOUBLE_LOW (value
))
1428 && zap_mask (CONST_DOUBLE_HIGH (value
)));
1435 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1439 alpha_extra_constraint (value
, c
)
1446 return normal_memory_operand (value
, VOIDmode
);
1448 return direct_call_operand (value
, Pmode
);
1450 return (GET_CODE (value
) == CONST_INT
1451 && (unsigned HOST_WIDE_INT
) INTVAL (value
) < 64);
1453 return GET_CODE (value
) == HIGH
;
1455 return TARGET_ABI_UNICOSMK
&& symbolic_operand (value
, VOIDmode
);
1462 /* Return 1 if this function can directly return via $26. */
1467 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
1469 && alpha_sa_size () == 0
1470 && get_frame_size () == 0
1471 && current_function_outgoing_args_size
== 0
1472 && current_function_pretend_args_size
== 0);
1475 /* Return the ADDR_VEC associated with a tablejump insn. */
1478 alpha_tablejump_addr_vec (insn
)
1483 tmp
= JUMP_LABEL (insn
);
1486 tmp
= NEXT_INSN (tmp
);
1489 if (GET_CODE (tmp
) == JUMP_INSN
1490 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
1491 return PATTERN (tmp
);
1495 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1498 alpha_tablejump_best_label (insn
)
1501 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
1502 rtx best_label
= NULL_RTX
;
1504 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1505 there for edge frequency counts from profile data. */
1509 int n_labels
= XVECLEN (jump_table
, 1);
1510 int best_count
= -1;
1513 for (i
= 0; i
< n_labels
; i
++)
1517 for (j
= i
+ 1; j
< n_labels
; j
++)
1518 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
1519 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
1522 if (count
> best_count
)
1523 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
1527 return best_label
? best_label
: const0_rtx
;
1530 /* Return true if the function DECL will be placed in the default text
1532 /* ??? Ideally we'd be able to always move from a SYMBOL_REF back to the
1533 decl, as that would allow us to determine if two functions are in the
1534 same section, which is what we really want to know. */
1537 decl_in_text_section (decl
)
1540 return (DECL_SECTION_NAME (decl
) == NULL_TREE
1541 && ! (flag_function_sections
1542 || (targetm
.have_named_sections
1543 && DECL_ONE_ONLY (decl
))));
1546 /* If we are referencing a function that is static, make the SYMBOL_REF
1547 special. We use this to see indicate we can branch to this function
1548 without setting PV or restoring GP.
1550 If this is a variable that is known to be defined locally, add "@v"
1551 to the name. If in addition the variable is to go in .sdata/.sbss,
1552 then add "@s" instead. */
1555 alpha_encode_section_info (decl
)
1558 const char *symbol_str
;
1559 bool is_local
, is_small
;
1561 if (TREE_CODE (decl
) == FUNCTION_DECL
)
1563 /* We mark public functions once they are emitted; otherwise we
1564 don't know that they exist in this unit of translation. */
1565 if (TREE_PUBLIC (decl
))
1567 /* Do not mark functions that are not in .text; otherwise we
1568 don't know that they are near enough for a direct branch. */
1569 if (! decl_in_text_section (decl
))
1572 SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl
), 0)) = 1;
1576 /* Early out if we're not going to do anything with this data. */
1577 if (! TARGET_EXPLICIT_RELOCS
)
1580 /* Careful not to prod global register variables. */
1581 if (TREE_CODE (decl
) != VAR_DECL
1582 || GET_CODE (DECL_RTL (decl
)) != MEM
1583 || GET_CODE (XEXP (DECL_RTL (decl
), 0)) != SYMBOL_REF
)
1586 symbol_str
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
1588 /* A variable is considered "local" if it is defined in this module. */
1590 if (DECL_EXTERNAL (decl
))
1592 /* Linkonce and weak data is never local. */
1593 else if (DECL_ONE_ONLY (decl
) || DECL_WEAK (decl
))
1595 else if (! TREE_PUBLIC (decl
))
1597 /* If PIC, then assume that any global name can be overridden by
1598 symbols resolved from other modules. */
1601 /* Uninitialized COMMON variable may be unified with symbols
1602 resolved from other modules. */
1603 else if (DECL_COMMON (decl
)
1604 && (DECL_INITIAL (decl
) == NULL
1605 || DECL_INITIAL (decl
) == error_mark_node
))
1607 /* Otherwise we're left with initialized (or non-common) global data
1608 which is of necessity defined locally. */
1612 /* Determine if DECL will wind up in .sdata/.sbss. */
1615 if (DECL_SECTION_NAME (decl
))
1617 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
1618 if (strcmp (section
, ".sdata") == 0
1619 || strcmp (section
, ".sbss") == 0)
1624 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
1626 /* If the variable has already been defined in the output file, then it
1627 is too late to put it in sdata if it wasn't put there in the first
1628 place. The test is here rather than above, because if it is already
1629 in sdata, then it can stay there. */
1631 if (TREE_ASM_WRITTEN (decl
))
1634 /* If this is an incomplete type with size 0, then we can't put it in
1635 sdata because it might be too big when completed. */
1636 else if (size
> 0 && size
<= g_switch_value
)
1640 /* Finally, encode this into the symbol string. */
1647 if (symbol_str
[0] == '@')
1649 if (symbol_str
[1] == (is_small
? 's' : 'v'))
1654 len
= strlen (symbol_str
) + 1;
1655 newstr
= alloca (len
+ 2);
1658 newstr
[1] = (is_small
? 's' : 'v');
1659 memcpy (newstr
+ 2, symbol_str
, len
);
1661 string
= ggc_alloc_string (newstr
, len
+ 2 - 1);
1662 XSTR (XEXP (DECL_RTL (decl
), 0), 0) = string
;
1664 else if (symbol_str
[0] == '@')
1668 /* legitimate_address_p recognizes an RTL expression that is a valid
1669 memory address for an instruction. The MODE argument is the
1670 machine mode for the MEM expression that wants to use this address.
1672 For Alpha, we have either a constant address or the sum of a
1673 register and a constant address, or just a register. For DImode,
1674 any of those forms can be surrounded with an AND that clear the
1675 low-order three bits; this is an "unaligned" access. */
1678 alpha_legitimate_address_p (mode
, x
, strict
)
1679 enum machine_mode mode
;
1683 /* If this is an ldq_u type address, discard the outer AND. */
1685 && GET_CODE (x
) == AND
1686 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1687 && INTVAL (XEXP (x
, 1)) == -8)
1690 /* Discard non-paradoxical subregs. */
1691 if (GET_CODE (x
) == SUBREG
1692 && (GET_MODE_SIZE (GET_MODE (x
))
1693 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1696 /* Unadorned general registers are valid. */
1699 ? STRICT_REG_OK_FOR_BASE_P (x
)
1700 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
1703 /* Constant addresses (i.e. +/- 32k) are valid. */
1704 if (CONSTANT_ADDRESS_P (x
))
1707 /* Register plus a small constant offset is valid. */
1708 if (GET_CODE (x
) == PLUS
)
1710 rtx ofs
= XEXP (x
, 1);
1713 /* Discard non-paradoxical subregs. */
1714 if (GET_CODE (x
) == SUBREG
1715 && (GET_MODE_SIZE (GET_MODE (x
))
1716 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1722 && NONSTRICT_REG_OK_FP_BASE_P (x
)
1723 && GET_CODE (ofs
) == CONST_INT
)
1726 ? STRICT_REG_OK_FOR_BASE_P (x
)
1727 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
1728 && CONSTANT_ADDRESS_P (ofs
))
1731 else if (GET_CODE (x
) == ADDRESSOF
1732 && GET_CODE (ofs
) == CONST_INT
)
1736 /* If we're managing explicit relocations, LO_SUM is valid, as
1737 are small data symbols. */
1738 else if (TARGET_EXPLICIT_RELOCS
)
1740 if (small_symbolic_operand (x
, Pmode
))
1743 if (GET_CODE (x
) == LO_SUM
)
1745 rtx ofs
= XEXP (x
, 1);
1748 /* Discard non-paradoxical subregs. */
1749 if (GET_CODE (x
) == SUBREG
1750 && (GET_MODE_SIZE (GET_MODE (x
))
1751 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1754 /* Must have a valid base register. */
1757 ? STRICT_REG_OK_FOR_BASE_P (x
)
1758 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
1761 /* The symbol must be local. */
1762 if (local_symbolic_operand (ofs
, Pmode
))
1770 /* Try machine-dependent ways of modifying an illegitimate address
1771 to be legitimate. If we find one, return the new, valid address. */
1774 alpha_legitimize_address (x
, scratch
, mode
)
1777 enum machine_mode mode ATTRIBUTE_UNUSED
;
1779 HOST_WIDE_INT addend
;
1781 /* If the address is (plus reg const_int) and the CONST_INT is not a
1782 valid offset, compute the high part of the constant and add it to
1783 the register. Then our address is (plus temp low-part-const). */
1784 if (GET_CODE (x
) == PLUS
1785 && GET_CODE (XEXP (x
, 0)) == REG
1786 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1787 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
1789 addend
= INTVAL (XEXP (x
, 1));
1794 /* If the address is (const (plus FOO const_int)), find the low-order
1795 part of the CONST_INT. Then load FOO plus any high-order part of the
1796 CONST_INT into a register. Our address is (plus reg low-part-const).
1797 This is done to reduce the number of GOT entries. */
1799 && GET_CODE (x
) == CONST
1800 && GET_CODE (XEXP (x
, 0)) == PLUS
1801 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
1803 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
1804 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
1808 /* If we have a (plus reg const), emit the load as in (2), then add
1809 the two registers, and finally generate (plus reg low-part-const) as
1812 && GET_CODE (x
) == PLUS
1813 && GET_CODE (XEXP (x
, 0)) == REG
1814 && GET_CODE (XEXP (x
, 1)) == CONST
1815 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
1816 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == CONST_INT
)
1818 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
1819 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
1820 XEXP (XEXP (XEXP (x
, 1), 0), 0),
1821 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
1825 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1826 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (x
, Pmode
))
1828 if (local_symbolic_operand (x
, Pmode
))
1830 if (small_symbolic_operand (x
, Pmode
))
1834 if (!no_new_pseudos
)
1835 scratch
= gen_reg_rtx (Pmode
);
1836 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
1837 gen_rtx_HIGH (Pmode
, x
)));
1838 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
1847 HOST_WIDE_INT low
, high
;
1849 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
1851 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
1855 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
1856 (no_new_pseudos
? scratch
: NULL_RTX
),
1857 1, OPTAB_LIB_WIDEN
);
1859 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
1860 (no_new_pseudos
? scratch
: NULL_RTX
),
1861 1, OPTAB_LIB_WIDEN
);
1863 return plus_constant (x
, low
);
1867 /* For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
1868 small symbolic operand until after reload. At which point we need
1869 to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
1870 so that sched2 has the proper dependency information. */
1873 some_small_symbolic_mem_operand (x
, mode
)
1875 enum machine_mode mode ATTRIBUTE_UNUSED
;
1877 /* Get rid of SIGN_EXTEND, etc. */
1878 while (GET_RTX_CLASS (GET_CODE (x
)) == '1')
1881 return (GET_CODE (x
) == MEM
1882 && small_symbolic_operand (XEXP (x
, 0), Pmode
));
1886 split_small_symbolic_mem_operand (x
)
1891 if (GET_CODE (x
) == MEM
)
1893 rtx tmp
= gen_rtx_LO_SUM (DImode
, pic_offset_table_rtx
, XEXP (x
, 0));
1894 return replace_equiv_address (x
, tmp
);
1899 while (GET_RTX_CLASS (GET_CODE (*p
)) == '1')
1902 *p
= split_small_symbolic_mem_operand (*p
);
1906 /* Try a machine-dependent way of reloading an illegitimate address
1907 operand. If we find one, push the reload and return the new rtx. */
1910 alpha_legitimize_reload_address (x
, mode
, opnum
, type
, ind_levels
)
1912 enum machine_mode mode ATTRIBUTE_UNUSED
;
1915 int ind_levels ATTRIBUTE_UNUSED
;
1917 /* We must recognize output that we have already generated ourselves. */
1918 if (GET_CODE (x
) == PLUS
1919 && GET_CODE (XEXP (x
, 0)) == PLUS
1920 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1921 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1922 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1924 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1925 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1930 /* We wish to handle large displacements off a base register by
1931 splitting the addend across an ldah and the mem insn. This
1932 cuts number of extra insns needed from 3 to 1. */
1933 if (GET_CODE (x
) == PLUS
1934 && GET_CODE (XEXP (x
, 0)) == REG
1935 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
1936 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
1937 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1939 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
1940 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
1942 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1944 /* Check for 32-bit overflow. */
1945 if (high
+ low
!= val
)
1948 /* Reload the high part into a base reg; leave the low part
1949 in the mem directly. */
1950 x
= gen_rtx_PLUS (GET_MODE (x
),
1951 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
1955 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
1956 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
1964 /* REF is an alignable memory location. Place an aligned SImode
1965 reference into *PALIGNED_MEM and the number of bits to shift into
1966 *PBITNUM. SCRATCH is a free register for use in reloading out
1967 of range stack slots. */
1970 get_aligned_mem (ref
, paligned_mem
, pbitnum
)
1972 rtx
*paligned_mem
, *pbitnum
;
1975 HOST_WIDE_INT offset
= 0;
1977 if (GET_CODE (ref
) != MEM
)
1980 if (reload_in_progress
1981 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
1983 base
= find_replacement (&XEXP (ref
, 0));
1985 if (! memory_address_p (GET_MODE (ref
), base
))
1990 base
= XEXP (ref
, 0);
1993 if (GET_CODE (base
) == PLUS
)
1994 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
1997 = widen_memory_access (ref
, SImode
, (offset
& ~3) - offset
);
1999 if (WORDS_BIG_ENDIAN
)
2000 *pbitnum
= GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref
))
2001 + (offset
& 3) * 8));
2003 *pbitnum
= GEN_INT ((offset
& 3) * 8);
2006 /* Similar, but just get the address. Handle the two reload cases.
2007 Add EXTRA_OFFSET to the address we return. */
2010 get_unaligned_address (ref
, extra_offset
)
2015 HOST_WIDE_INT offset
= 0;
2017 if (GET_CODE (ref
) != MEM
)
2020 if (reload_in_progress
2021 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
2023 base
= find_replacement (&XEXP (ref
, 0));
2025 if (! memory_address_p (GET_MODE (ref
), base
))
2030 base
= XEXP (ref
, 0);
2033 if (GET_CODE (base
) == PLUS
)
2034 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
2036 return plus_constant (base
, offset
+ extra_offset
);
2039 /* On the Alpha, all (non-symbolic) constants except zero go into
2040 a floating-point register via memory. Note that we cannot
2041 return anything that is not a subset of CLASS, and that some
2042 symbolic constants cannot be dropped to memory. */
2045 alpha_preferred_reload_class(x
, class)
2047 enum reg_class
class;
2049 /* Zero is present in any register class. */
2050 if (x
== CONST0_RTX (GET_MODE (x
)))
2053 /* These sorts of constants we can easily drop to memory. */
2054 if (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
)
2056 if (class == FLOAT_REGS
)
2058 if (class == ALL_REGS
)
2059 return GENERAL_REGS
;
2063 /* All other kinds of constants should not (and in the case of HIGH
2064 cannot) be dropped to memory -- instead we use a GENERAL_REGS
2065 secondary reload. */
2067 return (class == ALL_REGS
? GENERAL_REGS
: class);
2072 /* Loading and storing HImode or QImode values to and from memory
2073 usually requires a scratch register. The exceptions are loading
2074 QImode and HImode from an aligned address to a general register
2075 unless byte instructions are permitted.
2077 We also cannot load an unaligned address or a paradoxical SUBREG
2078 into an FP register.
2080 We also cannot do integral arithmetic into FP regs, as might result
2081 from register elimination into a DImode fp register. */
2084 secondary_reload_class (class, mode
, x
, in
)
2085 enum reg_class
class;
2086 enum machine_mode mode
;
2090 if ((mode
== QImode
|| mode
== HImode
) && ! TARGET_BWX
)
2092 if (GET_CODE (x
) == MEM
2093 || (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
2094 || (GET_CODE (x
) == SUBREG
2095 && (GET_CODE (SUBREG_REG (x
)) == MEM
2096 || (GET_CODE (SUBREG_REG (x
)) == REG
2097 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
))))
2099 if (!in
|| !aligned_memory_operand(x
, mode
))
2100 return GENERAL_REGS
;
2104 if (class == FLOAT_REGS
)
2106 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
2107 return GENERAL_REGS
;
2109 if (GET_CODE (x
) == SUBREG
2110 && (GET_MODE_SIZE (GET_MODE (x
))
2111 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
2112 return GENERAL_REGS
;
2114 if (in
&& INTEGRAL_MODE_P (mode
)
2115 && ! (memory_operand (x
, mode
) || x
== const0_rtx
))
2116 return GENERAL_REGS
;
2122 /* Subfunction of the following function. Update the flags of any MEM
2123 found in part of X. */
2126 alpha_set_memflags_1 (x
, in_struct_p
, volatile_p
, unchanging_p
)
2128 int in_struct_p
, volatile_p
, unchanging_p
;
2132 switch (GET_CODE (x
))
2136 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
2137 alpha_set_memflags_1 (XVECEXP (x
, 0, i
), in_struct_p
, volatile_p
,
2142 alpha_set_memflags_1 (PATTERN (x
), in_struct_p
, volatile_p
,
2147 alpha_set_memflags_1 (SET_DEST (x
), in_struct_p
, volatile_p
,
2149 alpha_set_memflags_1 (SET_SRC (x
), in_struct_p
, volatile_p
,
2154 MEM_IN_STRUCT_P (x
) = in_struct_p
;
2155 MEM_VOLATILE_P (x
) = volatile_p
;
2156 RTX_UNCHANGING_P (x
) = unchanging_p
;
2157 /* Sadly, we cannot use alias sets because the extra aliasing
2158 produced by the AND interferes. Given that two-byte quantities
2159 are the only thing we would be able to differentiate anyway,
2160 there does not seem to be any point in convoluting the early
2161 out of the alias check. */
2169 /* Given INSN, which is either an INSN or a SEQUENCE generated to
2170 perform a memory operation, look for any MEMs in either a SET_DEST or
2171 a SET_SRC and copy the in-struct, unchanging, and volatile flags from
2172 REF into each of the MEMs found. If REF is not a MEM, don't do
2176 alpha_set_memflags (insn
, ref
)
2180 int in_struct_p
, volatile_p
, unchanging_p
;
2182 if (GET_CODE (ref
) != MEM
)
2185 in_struct_p
= MEM_IN_STRUCT_P (ref
);
2186 volatile_p
= MEM_VOLATILE_P (ref
);
2187 unchanging_p
= RTX_UNCHANGING_P (ref
);
2189 /* This is only called from alpha.md, after having had something
2190 generated from one of the insn patterns. So if everything is
2191 zero, the pattern is already up-to-date. */
2192 if (! in_struct_p
&& ! volatile_p
&& ! unchanging_p
)
2195 alpha_set_memflags_1 (insn
, in_struct_p
, volatile_p
, unchanging_p
);
2198 /* Try to output insns to set TARGET equal to the constant C if it can be
2199 done in less than N insns. Do all computations in MODE. Returns the place
2200 where the output has been placed if it can be done and the insns have been
2201 emitted. If it would take more than N insns, zero is returned and no
2202 insns and emitted. */
2205 alpha_emit_set_const (target
, mode
, c
, n
)
2207 enum machine_mode mode
;
2214 /* Try 1 insn, then 2, then up to N. */
2215 for (i
= 1; i
<= n
; i
++)
2216 if ((pat
= alpha_emit_set_const_1 (target
, mode
, c
, i
)) != 0)
2222 /* Internal routine for the above to check for N or below insns. */
2225 alpha_emit_set_const_1 (target
, mode
, c
, n
)
2227 enum machine_mode mode
;
2233 /* Use a pseudo if highly optimizing and still generating RTL. */
2235 = (flag_expensive_optimizations
&& rtx_equal_function_value_matters
2239 #if HOST_BITS_PER_WIDE_INT == 64
2240 /* We are only called for SImode and DImode. If this is SImode, ensure that
2241 we are sign extended to a full word. This does not make any sense when
2242 cross-compiling on a narrow machine. */
2245 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2248 /* If this is a sign-extended 32-bit constant, we can do this in at most
2249 three insns, so do it if we have enough insns left. We always have
2250 a sign-extended 32-bit constant when compiling on a narrow machine. */
2252 if (HOST_BITS_PER_WIDE_INT
!= 64
2253 || c
>> 31 == -1 || c
>> 31 == 0)
2255 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
2256 HOST_WIDE_INT tmp1
= c
- low
;
2257 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
2258 HOST_WIDE_INT extra
= 0;
2260 /* If HIGH will be interpreted as negative but the constant is
2261 positive, we must adjust it to do two ldha insns. */
2263 if ((high
& 0x8000) != 0 && c
>= 0)
2267 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
2270 if (c
== low
|| (low
== 0 && extra
== 0))
2272 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2273 but that meant that we can't handle INT_MIN on 32-bit machines
2274 (like NT/Alpha), because we recurse indefinitely through
2275 emit_move_insn to gen_movdi. So instead, since we know exactly
2276 what we want, create it explicitly. */
2279 target
= gen_reg_rtx (mode
);
2280 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
2283 else if (n
>= 2 + (extra
!= 0))
2285 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16), subtarget
, mode
);
2288 temp
= expand_binop (mode
, add_optab
, temp
, GEN_INT (extra
<< 16),
2289 subtarget
, 0, OPTAB_WIDEN
);
2291 return expand_binop (mode
, add_optab
, temp
, GEN_INT (low
),
2292 target
, 0, OPTAB_WIDEN
);
2296 /* If we couldn't do it that way, try some other methods. But if we have
2297 no instructions left, don't bother. Likewise, if this is SImode and
2298 we can't make pseudos, we can't do anything since the expand_binop
2299 and expand_unop calls will widen and try to make pseudos. */
2302 || (mode
== SImode
&& ! rtx_equal_function_value_matters
))
2305 /* Next, see if we can load a related constant and then shift and possibly
2306 negate it to get the constant we want. Try this once each increasing
2307 numbers of insns. */
2309 for (i
= 1; i
< n
; i
++)
2311 /* First, see if minus some low bits, we've an easy load of
2314 new = ((c
& 0xffff) ^ 0x8000) - 0x8000;
2316 && (temp
= alpha_emit_set_const (subtarget
, mode
, c
- new, i
)) != 0)
2317 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new),
2318 target
, 0, OPTAB_WIDEN
);
2320 /* Next try complementing. */
2321 if ((temp
= alpha_emit_set_const (subtarget
, mode
, ~ c
, i
)) != 0)
2322 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
2324 /* Next try to form a constant and do a left shift. We can do this
2325 if some low-order bits are zero; the exact_log2 call below tells
2326 us that information. The bits we are shifting out could be any
2327 value, but here we'll just try the 0- and sign-extended forms of
2328 the constant. To try to increase the chance of having the same
2329 constant in more than one insn, start at the highest number of
2330 bits to shift, but try all possibilities in case a ZAPNOT will
2333 if ((bits
= exact_log2 (c
& - c
)) > 0)
2334 for (; bits
> 0; bits
--)
2335 if ((temp
= (alpha_emit_set_const
2336 (subtarget
, mode
, c
>> bits
, i
))) != 0
2337 || ((temp
= (alpha_emit_set_const
2339 ((unsigned HOST_WIDE_INT
) c
) >> bits
, i
)))
2341 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
2342 target
, 0, OPTAB_WIDEN
);
2344 /* Now try high-order zero bits. Here we try the shifted-in bits as
2345 all zero and all ones. Be careful to avoid shifting outside the
2346 mode and to avoid shifting outside the host wide int size. */
2347 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2348 confuse the recursive call and set all of the high 32 bits. */
2350 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2351 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64))) > 0)
2352 for (; bits
> 0; bits
--)
2353 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2355 || ((temp
= (alpha_emit_set_const
2357 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2360 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
2361 target
, 1, OPTAB_WIDEN
);
2363 /* Now try high-order 1 bits. We get that with a sign-extension.
2364 But one bit isn't enough here. Be careful to avoid shifting outside
2365 the mode and to avoid shifting outside the host wide int size. */
2367 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2368 - floor_log2 (~ c
) - 2)) > 0)
2369 for (; bits
> 0; bits
--)
2370 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2372 || ((temp
= (alpha_emit_set_const
2374 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2377 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
2378 target
, 0, OPTAB_WIDEN
);
2381 #if HOST_BITS_PER_WIDE_INT == 64
2382 /* Finally, see if can load a value into the target that is the same as the
2383 constant except that all bytes that are 0 are changed to be 0xff. If we
2384 can, then we can do a ZAPNOT to obtain the desired constant. */
2387 for (i
= 0; i
< 64; i
+= 8)
2388 if ((new & ((HOST_WIDE_INT
) 0xff << i
)) == 0)
2389 new |= (HOST_WIDE_INT
) 0xff << i
;
2391 /* We are only called for SImode and DImode. If this is SImode, ensure that
2392 we are sign extended to a full word. */
2395 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
2397 if (new != c
&& new != -1
2398 && (temp
= alpha_emit_set_const (subtarget
, mode
, new, n
- 1)) != 0)
2399 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new),
2400 target
, 0, OPTAB_WIDEN
);
2406 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2407 fall back to a straight forward decomposition. We do this to avoid
2408 exponential run times encountered when looking for longer sequences
2409 with alpha_emit_set_const. */
2412 alpha_emit_set_long_const (target
, c1
, c2
)
2414 HOST_WIDE_INT c1
, c2
;
2416 HOST_WIDE_INT d1
, d2
, d3
, d4
;
2418 /* Decompose the entire word */
2419 #if HOST_BITS_PER_WIDE_INT >= 64
2420 if (c2
!= -(c1
< 0))
2422 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2424 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2425 c1
= (c1
- d2
) >> 32;
2426 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2428 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2432 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
2434 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2438 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
2440 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2445 /* Construct the high word */
2448 emit_move_insn (target
, GEN_INT (d4
));
2450 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
2453 emit_move_insn (target
, GEN_INT (d3
));
2455 /* Shift it into place */
2456 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
2458 /* Add in the low bits. */
2460 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
2462 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
2467 /* Expand a move instruction; return true if all work is done.
2468 We don't handle non-bwx subword loads here. */
2471 alpha_expand_mov (mode
, operands
)
2472 enum machine_mode mode
;
2475 /* If the output is not a register, the input must be. */
2476 if (GET_CODE (operands
[0]) == MEM
2477 && ! reg_or_0_operand (operands
[1], mode
))
2478 operands
[1] = force_reg (mode
, operands
[1]);
2480 /* Allow legitimize_address to perform some simplifications. */
2481 if (symbolic_operand (operands
[1], mode
))
2483 rtx tmp
= alpha_legitimize_address (operands
[1], operands
[0], mode
);
2491 /* Early out for non-constants and valid constants. */
2492 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
2495 /* Split large integers. */
2496 if (GET_CODE (operands
[1]) == CONST_INT
2497 || GET_CODE (operands
[1]) == CONST_DOUBLE
)
2499 HOST_WIDE_INT i0
, i1
;
2500 rtx temp
= NULL_RTX
;
2502 if (GET_CODE (operands
[1]) == CONST_INT
)
2504 i0
= INTVAL (operands
[1]);
2507 else if (HOST_BITS_PER_WIDE_INT
>= 64)
2509 i0
= CONST_DOUBLE_LOW (operands
[1]);
2514 i0
= CONST_DOUBLE_LOW (operands
[1]);
2515 i1
= CONST_DOUBLE_HIGH (operands
[1]);
2518 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
2519 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3);
2521 if (!temp
&& TARGET_BUILD_CONSTANTS
)
2522 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
2526 if (rtx_equal_p (operands
[0], temp
))
2533 /* Otherwise we've nothing left but to drop the thing to memory. */
2534 operands
[1] = force_const_mem (DImode
, operands
[1]);
2535 if (reload_in_progress
)
2537 emit_move_insn (operands
[0], XEXP (operands
[1], 0));
2538 operands
[1] = copy_rtx (operands
[1]);
2539 XEXP (operands
[1], 0) = operands
[0];
2542 operands
[1] = validize_mem (operands
[1]);
2546 /* Expand a non-bwx QImode or HImode move instruction;
2547 return true if all work is done. */
2550 alpha_expand_mov_nobwx (mode
, operands
)
2551 enum machine_mode mode
;
2554 /* If the output is not a register, the input must be. */
2555 if (GET_CODE (operands
[0]) == MEM
)
2556 operands
[1] = force_reg (mode
, operands
[1]);
2558 /* Handle four memory cases, unaligned and aligned for either the input
2559 or the output. The only case where we can be called during reload is
2560 for aligned loads; all other cases require temporaries. */
2562 if (GET_CODE (operands
[1]) == MEM
2563 || (GET_CODE (operands
[1]) == SUBREG
2564 && GET_CODE (SUBREG_REG (operands
[1])) == MEM
)
2565 || (reload_in_progress
&& GET_CODE (operands
[1]) == REG
2566 && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
)
2567 || (reload_in_progress
&& GET_CODE (operands
[1]) == SUBREG
2568 && GET_CODE (SUBREG_REG (operands
[1])) == REG
2569 && REGNO (SUBREG_REG (operands
[1])) >= FIRST_PSEUDO_REGISTER
))
2571 if (aligned_memory_operand (operands
[1], mode
))
2573 if (reload_in_progress
)
2575 emit_insn ((mode
== QImode
2576 ? gen_reload_inqi_help
2577 : gen_reload_inhi_help
)
2578 (operands
[0], operands
[1],
2579 gen_rtx_REG (SImode
, REGNO (operands
[0]))));
2583 rtx aligned_mem
, bitnum
;
2584 rtx scratch
= gen_reg_rtx (SImode
);
2586 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
2588 emit_insn ((mode
== QImode
2589 ? gen_aligned_loadqi
2590 : gen_aligned_loadhi
)
2591 (operands
[0], aligned_mem
, bitnum
, scratch
));
2596 /* Don't pass these as parameters since that makes the generated
2597 code depend on parameter evaluation order which will cause
2598 bootstrap failures. */
2600 rtx temp1
= gen_reg_rtx (DImode
);
2601 rtx temp2
= gen_reg_rtx (DImode
);
2602 rtx seq
= ((mode
== QImode
2603 ? gen_unaligned_loadqi
2604 : gen_unaligned_loadhi
)
2605 (operands
[0], get_unaligned_address (operands
[1], 0),
2608 alpha_set_memflags (seq
, operands
[1]);
2614 if (GET_CODE (operands
[0]) == MEM
2615 || (GET_CODE (operands
[0]) == SUBREG
2616 && GET_CODE (SUBREG_REG (operands
[0])) == MEM
)
2617 || (reload_in_progress
&& GET_CODE (operands
[0]) == REG
2618 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
)
2619 || (reload_in_progress
&& GET_CODE (operands
[0]) == SUBREG
2620 && GET_CODE (SUBREG_REG (operands
[0])) == REG
2621 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
))
2623 if (aligned_memory_operand (operands
[0], mode
))
2625 rtx aligned_mem
, bitnum
;
2626 rtx temp1
= gen_reg_rtx (SImode
);
2627 rtx temp2
= gen_reg_rtx (SImode
);
2629 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
2631 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
2636 rtx temp1
= gen_reg_rtx (DImode
);
2637 rtx temp2
= gen_reg_rtx (DImode
);
2638 rtx temp3
= gen_reg_rtx (DImode
);
2639 rtx seq
= ((mode
== QImode
2640 ? gen_unaligned_storeqi
2641 : gen_unaligned_storehi
)
2642 (get_unaligned_address (operands
[0], 0),
2643 operands
[1], temp1
, temp2
, temp3
));
2645 alpha_set_memflags (seq
, operands
[0]);
2654 /* Generate an unsigned DImode to FP conversion. This is the same code
2655 optabs would emit if we didn't have TFmode patterns.
2657 For SFmode, this is the only construction I've found that can pass
2658 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2659 intermediates will work, because you'll get intermediate rounding
2660 that ruins the end result. Some of this could be fixed by turning
2661 on round-to-positive-infinity, but that requires diddling the fpsr,
2662 which kills performance. I tried turning this around and converting
2663 to a negative number, so that I could turn on /m, but either I did
2664 it wrong or there's something else cause I wound up with the exact
2665 same single-bit error. There is a branch-less form of this same code:
2676 fcmoveq $f10,$f11,$f0
2678 I'm not using it because it's the same number of instructions as
2679 this branch-full form, and it has more serialized long latency
2680 instructions on the critical path.
2682 For DFmode, we can avoid rounding errors by breaking up the word
2683 into two pieces, converting them separately, and adding them back:
2685 LC0: .long 0,0x5f800000
2690 cpyse $f11,$f31,$f10
2691 cpyse $f31,$f11,$f11
2699 This doesn't seem to be a clear-cut win over the optabs form.
2700 It probably all depends on the distribution of numbers being
2701 converted -- in the optabs form, all but high-bit-set has a
2702 much lower minimum execution time. */
2705 alpha_emit_floatuns (operands
)
2708 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
2709 enum machine_mode mode
;
2712 in
= force_reg (DImode
, operands
[1]);
2713 mode
= GET_MODE (out
);
2714 neglab
= gen_label_rtx ();
2715 donelab
= gen_label_rtx ();
2716 i0
= gen_reg_rtx (DImode
);
2717 i1
= gen_reg_rtx (DImode
);
2718 f0
= gen_reg_rtx (mode
);
2720 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
2722 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
2723 emit_jump_insn (gen_jump (donelab
));
2726 emit_label (neglab
);
2728 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
2729 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
2730 emit_insn (gen_iordi3 (i0
, i0
, i1
));
2731 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
2732 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
2734 emit_label (donelab
);
2737 /* Generate the comparison for a conditional branch. */
2740 alpha_emit_conditional_branch (code
)
2743 enum rtx_code cmp_code
, branch_code
;
2744 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
2745 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2748 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
2750 if (! TARGET_HAS_XFLOATING_LIBS
)
2753 /* X_floating library comparison functions return
2757 Convert the compare against the raw return value. */
2759 if (code
== UNORDERED
|| code
== ORDERED
)
2764 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
2766 alpha_compare
.fp_p
= 0;
2768 if (code
== UNORDERED
)
2770 else if (code
== ORDERED
)
2776 /* The general case: fold the comparison code to the types of compares
2777 that we have, choosing the branch as necessary. */
2780 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2782 /* We have these compares: */
2783 cmp_code
= code
, branch_code
= NE
;
2788 /* These must be reversed. */
2789 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
2792 case GE
: case GT
: case GEU
: case GTU
:
2793 /* For FP, we swap them, for INT, we reverse them. */
2794 if (alpha_compare
.fp_p
)
2796 cmp_code
= swap_condition (code
);
2798 tem
= op0
, op0
= op1
, op1
= tem
;
2802 cmp_code
= reverse_condition (code
);
2811 if (alpha_compare
.fp_p
)
2814 if (flag_unsafe_math_optimizations
)
2816 /* When we are not as concerned about non-finite values, and we
2817 are comparing against zero, we can branch directly. */
2818 if (op1
== CONST0_RTX (DFmode
))
2819 cmp_code
= NIL
, branch_code
= code
;
2820 else if (op0
== CONST0_RTX (DFmode
))
2822 /* Undo the swap we probably did just above. */
2823 tem
= op0
, op0
= op1
, op1
= tem
;
2824 branch_code
= swap_condition (cmp_code
);
2830 /* ??? We mark the the branch mode to be CCmode to prevent the
2831 compare and branch from being combined, since the compare
2832 insn follows IEEE rules that the branch does not. */
2833 branch_mode
= CCmode
;
2840 /* The following optimizations are only for signed compares. */
2841 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
2843 /* Whee. Compare and branch against 0 directly. */
2844 if (op1
== const0_rtx
)
2845 cmp_code
= NIL
, branch_code
= code
;
2847 /* We want to use cmpcc/bcc when we can, since there is a zero delay
2848 bypass between logicals and br/cmov on EV5. But we don't want to
2849 force valid immediate constants into registers needlessly. */
2850 else if (GET_CODE (op1
) == CONST_INT
)
2852 HOST_WIDE_INT v
= INTVAL (op1
), n
= -v
;
2854 if (! CONST_OK_FOR_LETTER_P (v
, 'I')
2855 && (CONST_OK_FOR_LETTER_P (n
, 'K')
2856 || CONST_OK_FOR_LETTER_P (n
, 'L')))
2858 cmp_code
= PLUS
, branch_code
= code
;
2864 if (!reg_or_0_operand (op0
, DImode
))
2865 op0
= force_reg (DImode
, op0
);
2866 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
2867 op1
= force_reg (DImode
, op1
);
2870 /* Emit an initial compare instruction, if necessary. */
2872 if (cmp_code
!= NIL
)
2874 tem
= gen_reg_rtx (cmp_mode
);
2875 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
2878 /* Zero the operands. */
2879 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2881 /* Return the branch comparison. */
2882 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
2885 /* Certain simplifications can be done to make invalid setcc operations
2886 valid. Return the final comparison, or NULL if we can't work. */
2889 alpha_emit_setcc (code
)
2892 enum rtx_code cmp_code
;
2893 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
2894 int fp_p
= alpha_compare
.fp_p
;
2897 /* Zero the operands. */
2898 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
2900 if (fp_p
&& GET_MODE (op0
) == TFmode
)
2902 if (! TARGET_HAS_XFLOATING_LIBS
)
2905 /* X_floating library comparison functions return
2909 Convert the compare against the raw return value. */
2911 if (code
== UNORDERED
|| code
== ORDERED
)
2916 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
2920 if (code
== UNORDERED
)
2922 else if (code
== ORDERED
)
2928 if (fp_p
&& !TARGET_FIX
)
2931 /* The general case: fold the comparison code to the types of compares
2932 that we have, choosing the branch as necessary. */
2937 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
2939 /* We have these compares. */
2941 cmp_code
= code
, code
= NE
;
2945 if (!fp_p
&& op1
== const0_rtx
)
2950 cmp_code
= reverse_condition (code
);
2954 case GE
: case GT
: case GEU
: case GTU
:
2955 /* These normally need swapping, but for integer zero we have
2956 special patterns that recognize swapped operands. */
2957 if (!fp_p
&& op1
== const0_rtx
)
2959 code
= swap_condition (code
);
2961 cmp_code
= code
, code
= NE
;
2962 tmp
= op0
, op0
= op1
, op1
= tmp
;
2971 if (!register_operand (op0
, DImode
))
2972 op0
= force_reg (DImode
, op0
);
2973 if (!reg_or_8bit_operand (op1
, DImode
))
2974 op1
= force_reg (DImode
, op1
);
2977 /* Emit an initial compare instruction, if necessary. */
2978 if (cmp_code
!= NIL
)
2980 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
2982 tmp
= gen_reg_rtx (mode
);
2983 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
2984 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
2986 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
2990 /* Return the setcc comparison. */
2991 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
2995 /* Rewrite a comparison against zero CMP of the form
2996 (CODE (cc0) (const_int 0)) so it can be written validly in
2997 a conditional move (if_then_else CMP ...).
2998 If both of the operands that set cc0 are non-zero we must emit
2999 an insn to perform the compare (it can't be done within
3000 the conditional move). */
3002 alpha_emit_conditional_move (cmp
, mode
)
3004 enum machine_mode mode
;
3006 enum rtx_code code
= GET_CODE (cmp
);
3007 enum rtx_code cmov_code
= NE
;
3008 rtx op0
= alpha_compare
.op0
;
3009 rtx op1
= alpha_compare
.op1
;
3010 int fp_p
= alpha_compare
.fp_p
;
3011 enum machine_mode cmp_mode
3012 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
3013 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
3014 enum machine_mode cmov_mode
= VOIDmode
;
3015 int local_fast_math
= flag_unsafe_math_optimizations
;
3018 /* Zero the operands. */
3019 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
3021 if (fp_p
!= FLOAT_MODE_P (mode
))
3023 enum rtx_code cmp_code
;
3028 /* If we have fp<->int register move instructions, do a cmov by
3029 performing the comparison in fp registers, and move the
3030 zero/non-zero value to integer registers, where we can then
3031 use a normal cmov, or vice-versa. */
3035 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3036 /* We have these compares. */
3037 cmp_code
= code
, code
= NE
;
3041 /* This must be reversed. */
3042 cmp_code
= EQ
, code
= EQ
;
3045 case GE
: case GT
: case GEU
: case GTU
:
3046 /* These normally need swapping, but for integer zero we have
3047 special patterns that recognize swapped operands. */
3048 if (!fp_p
&& op1
== const0_rtx
)
3049 cmp_code
= code
, code
= NE
;
3052 cmp_code
= swap_condition (code
);
3054 tem
= op0
, op0
= op1
, op1
= tem
;
3062 tem
= gen_reg_rtx (cmp_op_mode
);
3063 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
3064 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
3067 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
3068 op0
= gen_lowpart (cmp_op_mode
, tem
);
3069 op1
= CONST0_RTX (cmp_op_mode
);
3071 local_fast_math
= 1;
3074 /* We may be able to use a conditional move directly.
3075 This avoids emitting spurious compares. */
3076 if (signed_comparison_operator (cmp
, VOIDmode
)
3077 && (!fp_p
|| local_fast_math
)
3078 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
3079 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
3081 /* We can't put the comparison inside the conditional move;
3082 emit a compare instruction and put that inside the
3083 conditional move. Make sure we emit only comparisons we have;
3084 swap or reverse as necessary. */
3091 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3092 /* We have these compares: */
3096 /* This must be reversed. */
3097 code
= reverse_condition (code
);
3101 case GE
: case GT
: case GEU
: case GTU
:
3102 /* These must be swapped. */
3103 if (op1
!= CONST0_RTX (cmp_mode
))
3105 code
= swap_condition (code
);
3106 tem
= op0
, op0
= op1
, op1
= tem
;
3116 if (!reg_or_0_operand (op0
, DImode
))
3117 op0
= force_reg (DImode
, op0
);
3118 if (!reg_or_8bit_operand (op1
, DImode
))
3119 op1
= force_reg (DImode
, op1
);
3122 /* ??? We mark the branch mode to be CCmode to prevent the compare
3123 and cmov from being combined, since the compare insn follows IEEE
3124 rules that the cmov does not. */
3125 if (fp_p
&& !local_fast_math
)
3128 tem
= gen_reg_rtx (cmp_op_mode
);
3129 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
3130 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
3133 /* Simplify a conditional move of two constants into a setcc with
3134 arithmetic. This is done with a splitter since combine would
3135 just undo the work if done during code generation. It also catches
3136 cases we wouldn't have before cse. */
3139 alpha_split_conditional_move (code
, dest
, cond
, t_rtx
, f_rtx
)
3141 rtx dest
, cond
, t_rtx
, f_rtx
;
3143 HOST_WIDE_INT t
, f
, diff
;
3144 enum machine_mode mode
;
3145 rtx target
, subtarget
, tmp
;
3147 mode
= GET_MODE (dest
);
3152 if (((code
== NE
|| code
== EQ
) && diff
< 0)
3153 || (code
== GE
|| code
== GT
))
3155 code
= reverse_condition (code
);
3156 diff
= t
, t
= f
, f
= diff
;
3160 subtarget
= target
= dest
;
3163 target
= gen_lowpart (DImode
, dest
);
3164 if (! no_new_pseudos
)
3165 subtarget
= gen_reg_rtx (DImode
);
3170 if (f
== 0 && exact_log2 (diff
) > 0
3171 /* On EV6, we've got enough shifters to make non-arithmatic shifts
3172 viable over a longer latency cmove. On EV5, the E0 slot is a
3173 scarce resource, and on EV4 shift has the same latency as a cmove. */
3174 && (diff
<= 8 || alpha_cpu
== PROCESSOR_EV6
))
3176 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3177 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3179 tmp
= gen_rtx_ASHIFT (DImode
, subtarget
, GEN_INT (exact_log2 (t
)));
3180 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3182 else if (f
== 0 && t
== -1)
3184 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3185 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3187 emit_insn (gen_negdi2 (target
, subtarget
));
3189 else if (diff
== 1 || diff
== 4 || diff
== 8)
3193 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3194 emit_insn (gen_rtx_SET (VOIDmode
, subtarget
, tmp
));
3197 emit_insn (gen_adddi3 (target
, subtarget
, GEN_INT (f
)));
3200 add_op
= GEN_INT (f
);
3201 if (sext_add_operand (add_op
, mode
))
3203 tmp
= gen_rtx_MULT (DImode
, subtarget
, GEN_INT (diff
));
3204 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
3205 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3217 /* Look up the function X_floating library function name for the
3221 alpha_lookup_xfloating_lib_func (code
)
3226 const enum rtx_code code
;
3227 const char *const func
;
3230 static const struct xfloating_op vms_xfloating_ops
[] =
3232 { PLUS
, "OTS$ADD_X" },
3233 { MINUS
, "OTS$SUB_X" },
3234 { MULT
, "OTS$MUL_X" },
3235 { DIV
, "OTS$DIV_X" },
3236 { EQ
, "OTS$EQL_X" },
3237 { NE
, "OTS$NEQ_X" },
3238 { LT
, "OTS$LSS_X" },
3239 { LE
, "OTS$LEQ_X" },
3240 { GT
, "OTS$GTR_X" },
3241 { GE
, "OTS$GEQ_X" },
3242 { FIX
, "OTS$CVTXQ" },
3243 { FLOAT
, "OTS$CVTQX" },
3244 { UNSIGNED_FLOAT
, "OTS$CVTQUX" },
3245 { FLOAT_EXTEND
, "OTS$CVT_FLOAT_T_X" },
3246 { FLOAT_TRUNCATE
, "OTS$CVT_FLOAT_X_T" },
3249 static const struct xfloating_op osf_xfloating_ops
[] =
3251 { PLUS
, "_OtsAddX" },
3252 { MINUS
, "_OtsSubX" },
3253 { MULT
, "_OtsMulX" },
3254 { DIV
, "_OtsDivX" },
3261 { FIX
, "_OtsCvtXQ" },
3262 { FLOAT
, "_OtsCvtQX" },
3263 { UNSIGNED_FLOAT
, "_OtsCvtQUX" },
3264 { FLOAT_EXTEND
, "_OtsConvertFloatTX" },
3265 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT" },
3268 const struct xfloating_op
*ops
;
3269 const long n
= ARRAY_SIZE (osf_xfloating_ops
);
3272 /* How irritating. Nothing to key off for the table. Hardcode
3273 knowledge of the G_floating routines. */
3274 if (TARGET_FLOAT_VAX
)
3276 if (TARGET_ABI_OPEN_VMS
)
3278 if (code
== FLOAT_EXTEND
)
3279 return "OTS$CVT_FLOAT_G_X";
3280 if (code
== FLOAT_TRUNCATE
)
3281 return "OTS$CVT_FLOAT_X_G";
3285 if (code
== FLOAT_EXTEND
)
3286 return "_OtsConvertFloatGX";
3287 if (code
== FLOAT_TRUNCATE
)
3288 return "_OtsConvertFloatXG";
3292 if (TARGET_ABI_OPEN_VMS
)
3293 ops
= vms_xfloating_ops
;
3295 ops
= osf_xfloating_ops
;
3297 for (i
= 0; i
< n
; ++i
)
3298 if (ops
[i
].code
== code
)
3304 /* Most X_floating operations take the rounding mode as an argument.
3305 Compute that here. */
3308 alpha_compute_xfloating_mode_arg (code
, round
)
3310 enum alpha_fp_rounding_mode round
;
3316 case ALPHA_FPRM_NORM
:
3319 case ALPHA_FPRM_MINF
:
3322 case ALPHA_FPRM_CHOP
:
3325 case ALPHA_FPRM_DYN
:
3331 /* XXX For reference, round to +inf is mode = 3. */
3334 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
3340 /* Emit an X_floating library function call.
3342 Note that these functions do not follow normal calling conventions:
3343 TFmode arguments are passed in two integer registers (as opposed to
3344 indirect); TFmode return values appear in R16+R17.
3346 FUNC is the function name to call.
3347 TARGET is where the output belongs.
3348 OPERANDS are the inputs.
3349 NOPERANDS is the count of inputs.
3350 EQUIV is the expression equivalent for the function.
3354 alpha_emit_xfloating_libcall (func
, target
, operands
, noperands
, equiv
)
3361 rtx usage
= NULL_RTX
, tmp
, reg
;
3366 for (i
= 0; i
< noperands
; ++i
)
3368 switch (GET_MODE (operands
[i
]))
3371 reg
= gen_rtx_REG (TFmode
, regno
);
3376 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
3381 if (GET_CODE (operands
[i
]) != CONST_INT
)
3385 reg
= gen_rtx_REG (DImode
, regno
);
3393 emit_move_insn (reg
, operands
[i
]);
3394 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
3397 switch (GET_MODE (target
))
3400 reg
= gen_rtx_REG (TFmode
, 16);
3403 reg
= gen_rtx_REG (DFmode
, 32);
3406 reg
= gen_rtx_REG (DImode
, 0);
3412 tmp
= gen_rtx_MEM (QImode
, gen_rtx_SYMBOL_REF (Pmode
, (char *) func
));
3413 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
3414 const0_rtx
, const0_rtx
));
3415 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
3420 emit_libcall_block (tmp
, target
, reg
, equiv
);
3423 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3426 alpha_emit_xfloating_arith (code
, operands
)
3432 rtx out_operands
[3];
3434 func
= alpha_lookup_xfloating_lib_func (code
);
3435 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3437 out_operands
[0] = operands
[1];
3438 out_operands
[1] = operands
[2];
3439 out_operands
[2] = GEN_INT (mode
);
3440 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
3441 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
3445 /* Emit an X_floating library function call for a comparison. */
3448 alpha_emit_xfloating_compare (code
, op0
, op1
)
3453 rtx out
, operands
[2];
3455 func
= alpha_lookup_xfloating_lib_func (code
);
3459 out
= gen_reg_rtx (DImode
);
3461 /* ??? Strange mode for equiv because what's actually returned
3462 is -1,0,1, not a proper boolean value. */
3463 alpha_emit_xfloating_libcall (func
, out
, operands
, 2,
3464 gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
));
3469 /* Emit an X_floating library function call for a conversion. */
3472 alpha_emit_xfloating_cvt (code
, operands
)
3476 int noperands
= 1, mode
;
3477 rtx out_operands
[2];
3480 func
= alpha_lookup_xfloating_lib_func (code
);
3482 out_operands
[0] = operands
[1];
3487 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
3488 out_operands
[1] = GEN_INT (mode
);
3491 case FLOAT_TRUNCATE
:
3492 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
3493 out_operands
[1] = GEN_INT (mode
);
3500 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
3501 gen_rtx_fmt_e (code
, GET_MODE (operands
[0]),
3505 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3506 OP[0] into OP[0,1]. Naturally, output operand ordering is
3510 alpha_split_tfmode_pair (operands
)
3513 if (GET_CODE (operands
[1]) == REG
)
3515 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
3516 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
3518 else if (GET_CODE (operands
[1]) == MEM
)
3520 operands
[3] = adjust_address (operands
[1], DImode
, 8);
3521 operands
[2] = adjust_address (operands
[1], DImode
, 0);
3523 else if (operands
[1] == CONST0_RTX (TFmode
))
3524 operands
[2] = operands
[3] = const0_rtx
;
3528 if (GET_CODE (operands
[0]) == REG
)
3530 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
3531 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
3533 else if (GET_CODE (operands
[0]) == MEM
)
3535 operands
[1] = adjust_address (operands
[0], DImode
, 8);
3536 operands
[0] = adjust_address (operands
[0], DImode
, 0);
3542 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3543 op2 is a register containing the sign bit, operation is the
3544 logical operation to be performed. */
3547 alpha_split_tfmode_frobsign (operands
, operation
)
3549 rtx (*operation
) PARAMS ((rtx
, rtx
, rtx
));
3551 rtx high_bit
= operands
[2];
3555 alpha_split_tfmode_pair (operands
);
3557 /* Detect three flavours of operand overlap. */
3559 if (rtx_equal_p (operands
[0], operands
[2]))
3561 else if (rtx_equal_p (operands
[1], operands
[2]))
3563 if (rtx_equal_p (operands
[0], high_bit
))
3570 emit_move_insn (operands
[0], operands
[2]);
3572 /* ??? If the destination overlaps both source tf and high_bit, then
3573 assume source tf is dead in its entirety and use the other half
3574 for a scratch register. Otherwise "scratch" is just the proper
3575 destination register. */
3576 scratch
= operands
[move
< 2 ? 1 : 3];
3578 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
3582 emit_move_insn (operands
[0], operands
[2]);
3584 emit_move_insn (operands
[1], scratch
);
3588 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3592 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3593 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3594 lda r3,X(r11) lda r3,X+2(r11)
3595 extwl r1,r3,r1 extql r1,r3,r1
3596 extwh r2,r3,r2 extqh r2,r3,r2
3597 or r1.r2.r1 or r1,r2,r1
3600 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3601 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3602 lda r3,X(r11) lda r3,X(r11)
3603 extll r1,r3,r1 extll r1,r3,r1
3604 extlh r2,r3,r2 extlh r2,r3,r2
3605 or r1.r2.r1 addl r1,r2,r1
3607 quad: ldq_u r1,X(r11)
3616 alpha_expand_unaligned_load (tgt
, mem
, size
, ofs
, sign
)
3618 HOST_WIDE_INT size
, ofs
;
3621 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
3622 enum machine_mode mode
;
3624 meml
= gen_reg_rtx (DImode
);
3625 memh
= gen_reg_rtx (DImode
);
3626 addr
= gen_reg_rtx (DImode
);
3627 extl
= gen_reg_rtx (DImode
);
3628 exth
= gen_reg_rtx (DImode
);
3630 mema
= XEXP (mem
, 0);
3631 if (GET_CODE (mema
) == LO_SUM
)
3632 mema
= force_reg (Pmode
, mema
);
3634 /* AND addresses cannot be in any alias set, since they may implicitly
3635 alias surrounding code. Ideally we'd have some alias set that
3636 covered all types except those with alignment 8 or higher. */
3638 tmp
= change_address (mem
, DImode
,
3639 gen_rtx_AND (DImode
,
3640 plus_constant (mema
, ofs
),
3642 set_mem_alias_set (tmp
, 0);
3643 emit_move_insn (meml
, tmp
);
3645 tmp
= change_address (mem
, DImode
,
3646 gen_rtx_AND (DImode
,
3647 plus_constant (mema
, ofs
+ size
- 1),
3649 set_mem_alias_set (tmp
, 0);
3650 emit_move_insn (memh
, tmp
);
3652 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
3654 emit_move_insn (addr
, plus_constant (mema
, -1));
3656 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3657 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
3659 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3660 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
3661 addr
, 1, OPTAB_WIDEN
);
3663 else if (sign
&& size
== 2)
3665 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
3667 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
3668 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3670 /* We must use tgt here for the target. Alpha-vms port fails if we use
3671 addr for the target, because addr is marked as a pointer and combine
3672 knows that pointers are always sign-extended 32 bit values. */
3673 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
3674 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
3675 addr
, 1, OPTAB_WIDEN
);
3679 if (WORDS_BIG_ENDIAN
)
3681 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
3685 emit_insn (gen_extwh_be (extl
, meml
, addr
));
3690 emit_insn (gen_extlh_be (extl
, meml
, addr
));
3695 emit_insn (gen_extqh_be (extl
, meml
, addr
));
3702 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
3706 emit_move_insn (addr
, plus_constant (mema
, ofs
));
3707 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
3711 emit_insn (gen_extwh_le (exth
, memh
, addr
));
3716 emit_insn (gen_extlh_le (exth
, memh
, addr
));
3721 emit_insn (gen_extqh_le (exth
, memh
, addr
));
3730 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
3731 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
3736 emit_move_insn (tgt
, gen_lowpart(GET_MODE (tgt
), addr
));
3739 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3742 alpha_expand_unaligned_store (dst
, src
, size
, ofs
)
3744 HOST_WIDE_INT size
, ofs
;
3746 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
3748 dstl
= gen_reg_rtx (DImode
);
3749 dsth
= gen_reg_rtx (DImode
);
3750 insl
= gen_reg_rtx (DImode
);
3751 insh
= gen_reg_rtx (DImode
);
3753 dsta
= XEXP (dst
, 0);
3754 if (GET_CODE (dsta
) == LO_SUM
)
3755 dsta
= force_reg (Pmode
, dsta
);
3757 /* AND addresses cannot be in any alias set, since they may implicitly
3758 alias surrounding code. Ideally we'd have some alias set that
3759 covered all types except those with alignment 8 or higher. */
3761 meml
= change_address (dst
, DImode
,
3762 gen_rtx_AND (DImode
,
3763 plus_constant (dsta
, ofs
),
3765 set_mem_alias_set (meml
, 0);
3767 memh
= change_address (dst
, DImode
,
3768 gen_rtx_AND (DImode
,
3769 plus_constant (dsta
, ofs
+ size
- 1),
3771 set_mem_alias_set (memh
, 0);
3773 emit_move_insn (dsth
, memh
);
3774 emit_move_insn (dstl
, meml
);
3775 if (WORDS_BIG_ENDIAN
)
3777 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
3779 if (src
!= const0_rtx
)
3784 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
3787 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
3790 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
3793 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
3794 GEN_INT (size
*8), addr
));
3800 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
3803 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffffffff), addr
));
3807 #if HOST_BITS_PER_WIDE_INT == 32
3808 rtx msk
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3810 rtx msk
= constm1_rtx
;
3812 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
3817 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
3821 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
3823 if (src
!= const0_rtx
)
3825 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
3826 GEN_INT (size
*8), addr
));
3831 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
3834 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
3837 emit_insn (gen_insql_le (insl
, src
, addr
));
3842 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
3847 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
3850 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffffffff), addr
));
3854 #if HOST_BITS_PER_WIDE_INT == 32
3855 rtx msk
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3857 rtx msk
= constm1_rtx
;
3859 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
3865 if (src
!= const0_rtx
)
3867 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
3868 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
3871 if (WORDS_BIG_ENDIAN
)
3873 emit_move_insn (meml
, dstl
);
3874 emit_move_insn (memh
, dsth
);
3878 /* Must store high before low for degenerate case of aligned. */
3879 emit_move_insn (memh
, dsth
);
3880 emit_move_insn (meml
, dstl
);
3884 /* The block move code tries to maximize speed by separating loads and
3885 stores at the expense of register pressure: we load all of the data
3886 before we store it back out. There are two secondary effects worth
3887 mentioning, that this speeds copying to/from aligned and unaligned
3888 buffers, and that it makes the code significantly easier to write. */
3890 #define MAX_MOVE_WORDS 8
3892 /* Load an integral number of consecutive unaligned quadwords. */
3895 alpha_expand_unaligned_load_words (out_regs
, smem
, words
, ofs
)
3898 HOST_WIDE_INT words
, ofs
;
3900 rtx
const im8
= GEN_INT (-8);
3901 rtx
const i64
= GEN_INT (64);
3902 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
3903 rtx sreg
, areg
, tmp
, smema
;
3906 smema
= XEXP (smem
, 0);
3907 if (GET_CODE (smema
) == LO_SUM
)
3908 smema
= force_reg (Pmode
, smema
);
3910 /* Generate all the tmp registers we need. */
3911 for (i
= 0; i
< words
; ++i
)
3913 data_regs
[i
] = out_regs
[i
];
3914 ext_tmps
[i
] = gen_reg_rtx (DImode
);
3916 data_regs
[words
] = gen_reg_rtx (DImode
);
3919 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
3921 /* Load up all of the source data. */
3922 for (i
= 0; i
< words
; ++i
)
3924 tmp
= change_address (smem
, DImode
,
3925 gen_rtx_AND (DImode
,
3926 plus_constant (smema
, 8*i
),
3928 set_mem_alias_set (tmp
, 0);
3929 emit_move_insn (data_regs
[i
], tmp
);
3932 tmp
= change_address (smem
, DImode
,
3933 gen_rtx_AND (DImode
,
3934 plus_constant (smema
, 8*words
- 1),
3936 set_mem_alias_set (tmp
, 0);
3937 emit_move_insn (data_regs
[words
], tmp
);
3939 /* Extract the half-word fragments. Unfortunately DEC decided to make
3940 extxh with offset zero a noop instead of zeroing the register, so
3941 we must take care of that edge condition ourselves with cmov. */
3943 sreg
= copy_addr_to_reg (smema
);
3944 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
3946 if (WORDS_BIG_ENDIAN
)
3947 emit_move_insn (sreg
, plus_constant (sreg
, 7));
3948 for (i
= 0; i
< words
; ++i
)
3950 if (WORDS_BIG_ENDIAN
)
3952 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
3953 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
3957 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
3958 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
3960 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
3961 gen_rtx_IF_THEN_ELSE (DImode
,
3962 gen_rtx_EQ (DImode
, areg
,
3964 const0_rtx
, ext_tmps
[i
])));
3967 /* Merge the half-words into whole words. */
3968 for (i
= 0; i
< words
; ++i
)
3970 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
3971 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
3975 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3976 may be NULL to store zeros. */
3979 alpha_expand_unaligned_store_words (data_regs
, dmem
, words
, ofs
)
3982 HOST_WIDE_INT words
, ofs
;
3984 rtx
const im8
= GEN_INT (-8);
3985 rtx
const i64
= GEN_INT (64);
3986 #if HOST_BITS_PER_WIDE_INT == 32
3987 rtx
const im1
= immed_double_const (0xffffffff, 0xffffffff, DImode
);
3989 rtx
const im1
= constm1_rtx
;
3991 rtx ins_tmps
[MAX_MOVE_WORDS
];
3992 rtx st_tmp_1
, st_tmp_2
, dreg
;
3993 rtx st_addr_1
, st_addr_2
, dmema
;
3996 dmema
= XEXP (dmem
, 0);
3997 if (GET_CODE (dmema
) == LO_SUM
)
3998 dmema
= force_reg (Pmode
, dmema
);
4000 /* Generate all the tmp registers we need. */
4001 if (data_regs
!= NULL
)
4002 for (i
= 0; i
< words
; ++i
)
4003 ins_tmps
[i
] = gen_reg_rtx(DImode
);
4004 st_tmp_1
= gen_reg_rtx(DImode
);
4005 st_tmp_2
= gen_reg_rtx(DImode
);
4008 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
4010 st_addr_2
= change_address (dmem
, DImode
,
4011 gen_rtx_AND (DImode
,
4012 plus_constant (dmema
, words
*8 - 1),
4014 set_mem_alias_set (st_addr_2
, 0);
4016 st_addr_1
= change_address (dmem
, DImode
,
4017 gen_rtx_AND (DImode
, dmema
, im8
));
4018 set_mem_alias_set (st_addr_1
, 0);
4020 /* Load up the destination end bits. */
4021 emit_move_insn (st_tmp_2
, st_addr_2
);
4022 emit_move_insn (st_tmp_1
, st_addr_1
);
4024 /* Shift the input data into place. */
4025 dreg
= copy_addr_to_reg (dmema
);
4026 if (WORDS_BIG_ENDIAN
)
4027 emit_move_insn (dreg
, plus_constant (dreg
, 7));
4028 if (data_regs
!= NULL
)
4030 for (i
= words
-1; i
>= 0; --i
)
4032 if (WORDS_BIG_ENDIAN
)
4034 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
4035 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
4039 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
4040 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
4043 for (i
= words
-1; i
> 0; --i
)
4045 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
4046 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
4051 /* Split and merge the ends with the destination data. */
4052 if (WORDS_BIG_ENDIAN
)
4054 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, im1
, dreg
));
4055 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
4059 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
4060 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, im1
, dreg
));
4063 if (data_regs
!= NULL
)
4065 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
4066 st_tmp_2
, 1, OPTAB_WIDEN
);
4067 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
4068 st_tmp_1
, 1, OPTAB_WIDEN
);
4072 if (WORDS_BIG_ENDIAN
)
4073 emit_move_insn (st_addr_1
, st_tmp_1
);
4075 emit_move_insn (st_addr_2
, st_tmp_2
);
4076 for (i
= words
-1; i
> 0; --i
)
4078 rtx tmp
= change_address (dmem
, DImode
,
4079 gen_rtx_AND (DImode
,
4080 plus_constant(dmema
,
4081 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
4083 set_mem_alias_set (tmp
, 0);
4084 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
4086 if (WORDS_BIG_ENDIAN
)
4087 emit_move_insn (st_addr_2
, st_tmp_2
);
4089 emit_move_insn (st_addr_1
, st_tmp_1
);
4093 /* Expand string/block move operations.
4095 operands[0] is the pointer to the destination.
4096 operands[1] is the pointer to the source.
4097 operands[2] is the number of bytes to move.
4098 operands[3] is the alignment. */
4101 alpha_expand_block_move (operands
)
4104 rtx bytes_rtx
= operands
[2];
4105 rtx align_rtx
= operands
[3];
4106 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4107 HOST_WIDE_INT bytes
= orig_bytes
;
4108 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4109 HOST_WIDE_INT dst_align
= src_align
;
4110 rtx orig_src
= operands
[1];
4111 rtx orig_dst
= operands
[0];
4112 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
4114 unsigned int i
, words
, ofs
, nregs
= 0;
4116 if (orig_bytes
<= 0)
4118 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4121 /* Look for additional alignment information from recorded register info. */
4123 tmp
= XEXP (orig_src
, 0);
4124 if (GET_CODE (tmp
) == REG
)
4125 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4126 else if (GET_CODE (tmp
) == PLUS
4127 && GET_CODE (XEXP (tmp
, 0)) == REG
4128 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4130 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4131 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4135 if (a
>= 64 && c
% 8 == 0)
4137 else if (a
>= 32 && c
% 4 == 0)
4139 else if (a
>= 16 && c
% 2 == 0)
4144 tmp
= XEXP (orig_dst
, 0);
4145 if (GET_CODE (tmp
) == REG
)
4146 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4147 else if (GET_CODE (tmp
) == PLUS
4148 && GET_CODE (XEXP (tmp
, 0)) == REG
4149 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4151 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4152 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4156 if (a
>= 64 && c
% 8 == 0)
4158 else if (a
>= 32 && c
% 4 == 0)
4160 else if (a
>= 16 && c
% 2 == 0)
4165 /* Load the entire block into registers. */
4166 if (GET_CODE (XEXP (orig_src
, 0)) == ADDRESSOF
)
4168 enum machine_mode mode
;
4170 tmp
= XEXP (XEXP (orig_src
, 0), 0);
4172 /* Don't use the existing register if we're reading more than
4173 is held in the register. Nor if there is not a mode that
4174 handles the exact size. */
4175 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4177 && GET_MODE_SIZE (GET_MODE (tmp
)) >= bytes
)
4181 data_regs
[nregs
] = gen_lowpart (DImode
, tmp
);
4182 data_regs
[nregs
+ 1] = gen_highpart (DImode
, tmp
);
4186 data_regs
[nregs
++] = gen_lowpart (mode
, tmp
);
4191 /* No appropriate mode; fall back on memory. */
4192 orig_src
= replace_equiv_address (orig_src
,
4193 copy_addr_to_reg (XEXP (orig_src
, 0)));
4194 src_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4198 if (src_align
>= 64 && bytes
>= 8)
4202 for (i
= 0; i
< words
; ++i
)
4203 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4205 for (i
= 0; i
< words
; ++i
)
4206 emit_move_insn (data_regs
[nregs
+ i
],
4207 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
4214 if (src_align
>= 32 && bytes
>= 4)
4218 for (i
= 0; i
< words
; ++i
)
4219 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
4221 for (i
= 0; i
< words
; ++i
)
4222 emit_move_insn (data_regs
[nregs
+ i
],
4223 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
4234 for (i
= 0; i
< words
+1; ++i
)
4235 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4237 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
4245 if (! TARGET_BWX
&& bytes
>= 4)
4247 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
4248 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
4255 if (src_align
>= 16)
4258 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4259 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
4262 } while (bytes
>= 2);
4264 else if (! TARGET_BWX
)
4266 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4267 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
4275 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
4276 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
4283 if (nregs
> ARRAY_SIZE (data_regs
))
4286 /* Now save it back out again. */
4290 if (GET_CODE (XEXP (orig_dst
, 0)) == ADDRESSOF
)
4292 enum machine_mode mode
;
4293 tmp
= XEXP (XEXP (orig_dst
, 0), 0);
4295 mode
= mode_for_size (orig_bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4296 if (GET_MODE (tmp
) == mode
)
4300 emit_move_insn (tmp
, data_regs
[0]);
4305 else if (nregs
== 2 && mode
== TImode
)
4307 /* Undo the subregging done above when copying between
4308 two TImode registers. */
4309 if (GET_CODE (data_regs
[0]) == SUBREG
4310 && GET_MODE (SUBREG_REG (data_regs
[0])) == TImode
)
4311 emit_move_insn (tmp
, SUBREG_REG (data_regs
[0]));
4317 emit_move_insn (gen_lowpart (DImode
, tmp
), data_regs
[0]);
4318 emit_move_insn (gen_highpart (DImode
, tmp
), data_regs
[1]);
4322 emit_no_conflict_block (seq
, tmp
, data_regs
[0],
4323 data_regs
[1], NULL_RTX
);
4331 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4332 /* ??? Optimize mode < dst_mode with strict_low_part. */
4334 /* No appropriate mode; fall back on memory. We can speed things
4335 up by recognizing extra alignment information. */
4336 orig_dst
= replace_equiv_address (orig_dst
,
4337 copy_addr_to_reg (XEXP (orig_dst
, 0)));
4338 dst_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4341 /* Write out the data in whatever chunks reading the source allowed. */
4342 if (dst_align
>= 64)
4344 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4346 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
4353 if (dst_align
>= 32)
4355 /* If the source has remaining DImode regs, write them out in
4357 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4359 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
4360 NULL_RTX
, 1, OPTAB_WIDEN
);
4362 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4363 gen_lowpart (SImode
, data_regs
[i
]));
4364 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
4365 gen_lowpart (SImode
, tmp
));
4370 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4372 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
4379 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4381 /* Write out a remaining block of words using unaligned methods. */
4383 for (words
= 1; i
+ words
< nregs
; words
++)
4384 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
4388 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
4390 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
4397 /* Due to the above, this won't be aligned. */
4398 /* ??? If we have more than one of these, consider constructing full
4399 words in registers and using alpha_expand_unaligned_store_words. */
4400 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
4402 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
4407 if (dst_align
>= 16)
4408 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4410 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
4415 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
4417 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
4422 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == QImode
)
4424 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
4438 alpha_expand_block_clear (operands
)
4441 rtx bytes_rtx
= operands
[1];
4442 rtx align_rtx
= operands
[2];
4443 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4444 HOST_WIDE_INT bytes
= orig_bytes
;
4445 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4446 HOST_WIDE_INT alignofs
= 0;
4447 rtx orig_dst
= operands
[0];
4449 int i
, words
, ofs
= 0;
4451 if (orig_bytes
<= 0)
4453 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4456 /* Look for stricter alignment. */
4457 tmp
= XEXP (orig_dst
, 0);
4458 if (GET_CODE (tmp
) == REG
)
4459 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4460 else if (GET_CODE (tmp
) == PLUS
4461 && GET_CODE (XEXP (tmp
, 0)) == REG
4462 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4464 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4465 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4470 align
= a
, alignofs
= 8 - c
% 8;
4472 align
= a
, alignofs
= 4 - c
% 4;
4474 align
= a
, alignofs
= 2 - c
% 2;
4477 else if (GET_CODE (tmp
) == ADDRESSOF
)
4479 enum machine_mode mode
;
4481 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4482 if (GET_MODE (XEXP (tmp
, 0)) == mode
)
4484 emit_move_insn (XEXP (tmp
, 0), const0_rtx
);
4488 /* No appropriate mode; fall back on memory. */
4489 orig_dst
= replace_equiv_address (orig_dst
, copy_addr_to_reg (tmp
));
4490 align
= GET_MODE_BITSIZE (GET_MODE (XEXP (tmp
, 0)));
4493 /* Handle an unaligned prefix first. */
4497 #if HOST_BITS_PER_WIDE_INT >= 64
4498 /* Given that alignofs is bounded by align, the only time BWX could
4499 generate three stores is for a 7 byte fill. Prefer two individual
4500 stores over a load/mask/store sequence. */
4501 if ((!TARGET_BWX
|| alignofs
== 7)
4503 && !(alignofs
== 4 && bytes
>= 4))
4505 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
4506 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
4510 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
4511 set_mem_alias_set (mem
, 0);
4513 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
4514 if (bytes
< alignofs
)
4516 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
4527 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
4528 NULL_RTX
, 1, OPTAB_WIDEN
);
4530 emit_move_insn (mem
, tmp
);
4534 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
4536 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4541 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
4543 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
4548 if (alignofs
== 4 && bytes
>= 4)
4550 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4556 /* If we've not used the extra lead alignment information by now,
4557 we won't be able to. Downgrade align to match what's left over. */
4560 alignofs
= alignofs
& -alignofs
;
4561 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
4565 /* Handle a block of contiguous long-words. */
4567 if (align
>= 64 && bytes
>= 8)
4571 for (i
= 0; i
< words
; ++i
)
4572 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
4579 /* If the block is large and appropriately aligned, emit a single
4580 store followed by a sequence of stq_u insns. */
4582 if (align
>= 32 && bytes
> 16)
4586 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
4590 orig_dsta
= XEXP (orig_dst
, 0);
4591 if (GET_CODE (orig_dsta
) == LO_SUM
)
4592 orig_dsta
= force_reg (Pmode
, orig_dsta
);
4595 for (i
= 0; i
< words
; ++i
)
4598 = change_address (orig_dst
, DImode
,
4599 gen_rtx_AND (DImode
,
4600 plus_constant (orig_dsta
, ofs
+ i
*8),
4602 set_mem_alias_set (mem
, 0);
4603 emit_move_insn (mem
, const0_rtx
);
4606 /* Depending on the alignment, the first stq_u may have overlapped
4607 with the initial stl, which means that the last stq_u didn't
4608 write as much as it would appear. Leave those questionable bytes
4610 bytes
-= words
* 8 - 4;
4611 ofs
+= words
* 8 - 4;
4614 /* Handle a smaller block of aligned words. */
4616 if ((align
>= 64 && bytes
== 4)
4617 || (align
== 32 && bytes
>= 4))
4621 for (i
= 0; i
< words
; ++i
)
4622 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
4629 /* An unaligned block uses stq_u stores for as many as possible. */
4635 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
4641 /* Next clean up any trailing pieces. */
4643 #if HOST_BITS_PER_WIDE_INT >= 64
4644 /* Count the number of bits in BYTES for which aligned stores could
4647 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
4651 /* If we have appropriate alignment (and it wouldn't take too many
4652 instructions otherwise), mask out the bytes we need. */
4653 if (TARGET_BWX
? words
> 2 : bytes
> 0)
4660 mem
= adjust_address (orig_dst
, DImode
, ofs
);
4661 set_mem_alias_set (mem
, 0);
4663 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4665 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
4666 NULL_RTX
, 1, OPTAB_WIDEN
);
4668 emit_move_insn (mem
, tmp
);
4671 else if (align
>= 32 && bytes
< 4)
4676 mem
= adjust_address (orig_dst
, SImode
, ofs
);
4677 set_mem_alias_set (mem
, 0);
4679 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
4681 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
4682 NULL_RTX
, 1, OPTAB_WIDEN
);
4684 emit_move_insn (mem
, tmp
);
4690 if (!TARGET_BWX
&& bytes
>= 4)
4692 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
4702 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
4706 } while (bytes
>= 2);
4708 else if (! TARGET_BWX
)
4710 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
4718 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
4726 /* Adjust the cost of a scheduling dependency. Return the new cost of
4727 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4730 alpha_adjust_cost (insn
, link
, dep_insn
, cost
)
4737 enum attr_type insn_type
, dep_insn_type
;
4739 /* If the dependence is an anti-dependence, there is no cost. For an
4740 output dependence, there is sometimes a cost, but it doesn't seem
4741 worth handling those few cases. */
4743 if (REG_NOTE_KIND (link
) != 0)
4746 /* If we can't recognize the insns, we can't really do anything. */
4747 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
4750 insn_type
= get_attr_type (insn
);
4751 dep_insn_type
= get_attr_type (dep_insn
);
4753 /* Bring in the user-defined memory latency. */
4754 if (dep_insn_type
== TYPE_ILD
4755 || dep_insn_type
== TYPE_FLD
4756 || dep_insn_type
== TYPE_LDSYM
)
4757 cost
+= alpha_memory_latency
-1;
4762 /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
4763 being stored, we can sometimes lower the cost. */
4765 if ((insn_type
== TYPE_IST
|| insn_type
== TYPE_FST
)
4766 && (set
= single_set (dep_insn
)) != 0
4767 && GET_CODE (PATTERN (insn
)) == SET
4768 && rtx_equal_p (SET_DEST (set
), SET_SRC (PATTERN (insn
))))
4770 switch (dep_insn_type
)
4774 /* No savings here. */
4778 /* In these cases, we save one cycle. */
4782 /* In all other cases, we save two cycles. */
4783 return MAX (0, cost
- 2);
4787 /* Another case that needs adjustment is an arithmetic or logical
4788 operation. It's cost is usually one cycle, but we default it to
4789 two in the MD file. The only case that it is actually two is
4790 for the address in loads, stores, and jumps. */
4792 if (dep_insn_type
== TYPE_IADD
|| dep_insn_type
== TYPE_ILOG
)
4807 /* The final case is when a compare feeds into an integer branch;
4808 the cost is only one cycle in that case. */
4810 if (dep_insn_type
== TYPE_ICMP
&& insn_type
== TYPE_IBR
)
4815 /* And the lord DEC saith: "A special bypass provides an effective
4816 latency of 0 cycles for an ICMP or ILOG insn producing the test
4817 operand of an IBR or ICMOV insn." */
4819 if ((dep_insn_type
== TYPE_ICMP
|| dep_insn_type
== TYPE_ILOG
)
4820 && (set
= single_set (dep_insn
)) != 0)
4822 /* A branch only has one input. This must be it. */
4823 if (insn_type
== TYPE_IBR
)
4825 /* A conditional move has three, make sure it is the test. */
4826 if (insn_type
== TYPE_ICMOV
4827 && GET_CODE (set_src
= PATTERN (insn
)) == SET
4828 && GET_CODE (set_src
= SET_SRC (set_src
)) == IF_THEN_ELSE
4829 && rtx_equal_p (SET_DEST (set
), XEXP (set_src
, 0)))
4833 /* "The multiplier is unable to receive data from IEU bypass paths.
4834 The instruction issues at the expected time, but its latency is
4835 increased by the time it takes for the input data to become
4836 available to the multiplier" -- which happens in pipeline stage
4837 six, when results are comitted to the register file. */
4839 if (insn_type
== TYPE_IMUL
)
4841 switch (dep_insn_type
)
4843 /* These insns produce their results in pipeline stage five. */
4850 /* Other integer insns produce results in pipeline stage four. */
4858 /* There is additional latency to move the result of (most) FP
4859 operations anywhere but the FP register file. */
4861 if ((insn_type
== TYPE_FST
|| insn_type
== TYPE_FTOI
)
4862 && (dep_insn_type
== TYPE_FADD
||
4863 dep_insn_type
== TYPE_FMUL
||
4864 dep_insn_type
== TYPE_FCMOV
))
4870 /* Otherwise, return the default cost. */
4874 /* Function to initialize the issue rate used by the scheduler. */
4878 return (alpha_cpu
== PROCESSOR_EV4
? 2 : 4);
4882 alpha_variable_issue (dump
, verbose
, insn
, cim
)
4883 FILE *dump ATTRIBUTE_UNUSED
;
4884 int verbose ATTRIBUTE_UNUSED
;
4888 if (recog_memoized (insn
) < 0 || get_attr_type (insn
) == TYPE_MULTI
)
4895 /* Register global variables and machine-specific functions with the
4896 garbage collector. */
4898 #if TARGET_ABI_UNICOSMK
4900 alpha_init_machine_status (p
)
4904 (struct machine_function
*) xcalloc (1, sizeof (struct machine_function
));
4906 p
->machine
->first_ciw
= NULL_RTX
;
4907 p
->machine
->last_ciw
= NULL_RTX
;
4908 p
->machine
->ciw_count
= 0;
4909 p
->machine
->addr_list
= NULL_RTX
;
4913 alpha_mark_machine_status (p
)
4916 struct machine_function
*machine
= p
->machine
;
4920 ggc_mark_rtx (machine
->first_ciw
);
4921 ggc_mark_rtx (machine
->addr_list
);
4926 alpha_free_machine_status (p
)
4932 #endif /* TARGET_ABI_UNICOSMK */
4934 /* Functions to save and restore alpha_return_addr_rtx. */
4936 /* Start the ball rolling with RETURN_ADDR_RTX. */
4939 alpha_return_addr (count
, frame
)
4941 rtx frame ATTRIBUTE_UNUSED
;
4946 return get_hard_reg_initial_val (Pmode
, REG_RA
);
4949 /* Return or create a pseudo containing the gp value for the current
4950 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4953 alpha_gp_save_rtx ()
4955 return get_hard_reg_initial_val (DImode
, 29);
4959 alpha_ra_ever_killed ()
4963 #ifdef ASM_OUTPUT_MI_THUNK
4964 if (current_function_is_thunk
)
4967 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
4968 return regs_ever_live
[REG_RA
];
4970 push_topmost_sequence ();
4972 pop_topmost_sequence ();
4974 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
4978 /* Return the trap mode suffix applicable to the current
4979 instruction, or NULL. */
4982 get_trap_mode_suffix ()
4984 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
4988 case TRAP_SUFFIX_NONE
:
4991 case TRAP_SUFFIX_SU
:
4992 if (alpha_fptm
>= ALPHA_FPTM_SU
)
4996 case TRAP_SUFFIX_SUI
:
4997 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
5001 case TRAP_SUFFIX_V_SV
:
5009 case ALPHA_FPTM_SUI
:
5014 case TRAP_SUFFIX_V_SV_SVI
:
5023 case ALPHA_FPTM_SUI
:
5028 case TRAP_SUFFIX_U_SU_SUI
:
5037 case ALPHA_FPTM_SUI
:
5045 /* Return the rounding mode suffix applicable to the current
5046 instruction, or NULL. */
5049 get_round_mode_suffix ()
5051 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
5055 case ROUND_SUFFIX_NONE
:
5057 case ROUND_SUFFIX_NORMAL
:
5060 case ALPHA_FPRM_NORM
:
5062 case ALPHA_FPRM_MINF
:
5064 case ALPHA_FPRM_CHOP
:
5066 case ALPHA_FPRM_DYN
:
5071 case ROUND_SUFFIX_C
:
5077 /* Print an operand. Recognize special options, documented below. */
5080 print_operand (file
, x
, code
)
5090 /* Print the assembler name of the current function. */
5091 assemble_name (file
, alpha_fnname
);
5096 const char *trap
= get_trap_mode_suffix ();
5097 const char *round
= get_round_mode_suffix ();
5100 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
5101 (trap
? trap
: ""), (round
? round
: ""));
5106 /* Generates single precision instruction suffix. */
5107 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5111 /* Generates double precision instruction suffix. */
5112 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5116 if (alpha_this_literal_sequence_number
== 0)
5117 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5118 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5122 if (alpha_this_gpdisp_sequence_number
== 0)
5123 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5124 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5128 if (GET_CODE (x
) == HIGH
)
5129 output_addr_const (file
, XEXP (x
, 0));
5131 output_operand_lossage ("invalid %%H value");
5135 /* If this operand is the constant zero, write it as "$31". */
5136 if (GET_CODE (x
) == REG
)
5137 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5138 else if (x
== CONST0_RTX (GET_MODE (x
)))
5139 fprintf (file
, "$31");
5141 output_operand_lossage ("invalid %%r value");
5145 /* Similar, but for floating-point. */
5146 if (GET_CODE (x
) == REG
)
5147 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5148 else if (x
== CONST0_RTX (GET_MODE (x
)))
5149 fprintf (file
, "$f31");
5151 output_operand_lossage ("invalid %%R value");
5155 /* Write the 1's complement of a constant. */
5156 if (GET_CODE (x
) != CONST_INT
)
5157 output_operand_lossage ("invalid %%N value");
5159 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5163 /* Write 1 << C, for a constant C. */
5164 if (GET_CODE (x
) != CONST_INT
)
5165 output_operand_lossage ("invalid %%P value");
5167 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5171 /* Write the high-order 16 bits of a constant, sign-extended. */
5172 if (GET_CODE (x
) != CONST_INT
)
5173 output_operand_lossage ("invalid %%h value");
5175 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5179 /* Write the low-order 16 bits of a constant, sign-extended. */
5180 if (GET_CODE (x
) != CONST_INT
)
5181 output_operand_lossage ("invalid %%L value");
5183 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5184 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5188 /* Write mask for ZAP insn. */
5189 if (GET_CODE (x
) == CONST_DOUBLE
)
5191 HOST_WIDE_INT mask
= 0;
5192 HOST_WIDE_INT value
;
5194 value
= CONST_DOUBLE_LOW (x
);
5195 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5200 value
= CONST_DOUBLE_HIGH (x
);
5201 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5204 mask
|= (1 << (i
+ sizeof (int)));
5206 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5209 else if (GET_CODE (x
) == CONST_INT
)
5211 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5213 for (i
= 0; i
< 8; i
++, value
>>= 8)
5217 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5220 output_operand_lossage ("invalid %%m value");
5224 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5225 if (GET_CODE (x
) != CONST_INT
5226 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5227 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5228 output_operand_lossage ("invalid %%M value");
5230 fprintf (file
, "%s",
5231 (INTVAL (x
) == 8 ? "b"
5232 : INTVAL (x
) == 16 ? "w"
5233 : INTVAL (x
) == 32 ? "l"
5238 /* Similar, except do it from the mask. */
5239 if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xff)
5240 fprintf (file
, "b");
5241 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xffff)
5242 fprintf (file
, "w");
5243 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == 0xffffffff)
5244 fprintf (file
, "l");
5245 #if HOST_BITS_PER_WIDE_INT == 32
5246 else if (GET_CODE (x
) == CONST_DOUBLE
5247 && CONST_DOUBLE_HIGH (x
) == 0
5248 && CONST_DOUBLE_LOW (x
) == -1)
5249 fprintf (file
, "l");
5250 else if (GET_CODE (x
) == CONST_DOUBLE
5251 && CONST_DOUBLE_HIGH (x
) == -1
5252 && CONST_DOUBLE_LOW (x
) == -1)
5253 fprintf (file
, "q");
5255 else if (GET_CODE (x
) == CONST_INT
&& INTVAL (x
) == -1)
5256 fprintf (file
, "q");
5257 else if (GET_CODE (x
) == CONST_DOUBLE
5258 && CONST_DOUBLE_HIGH (x
) == 0
5259 && CONST_DOUBLE_LOW (x
) == -1)
5260 fprintf (file
, "q");
5263 output_operand_lossage ("invalid %%U value");
5267 /* Write the constant value divided by 8 for little-endian mode or
5268 (56 - value) / 8 for big-endian mode. */
5270 if (GET_CODE (x
) != CONST_INT
5271 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5274 || (INTVAL (x
) & 7) != 0)
5275 output_operand_lossage ("invalid %%s value");
5277 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5279 ? (56 - INTVAL (x
)) / 8
5284 /* Same, except compute (64 - c) / 8 */
5286 if (GET_CODE (x
) != CONST_INT
5287 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5288 && (INTVAL (x
) & 7) != 8)
5289 output_operand_lossage ("invalid %%s value");
5291 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5296 /* On Unicos/Mk systems: use a DEX expression if the symbol
5297 clashes with a register name. */
5298 int dex
= unicosmk_need_dex (x
);
5300 fprintf (file
, "DEX(%d)", dex
);
5302 output_addr_const (file
, x
);
5306 case 'C': case 'D': case 'c': case 'd':
5307 /* Write out comparison name. */
5309 enum rtx_code c
= GET_CODE (x
);
5311 if (GET_RTX_CLASS (c
) != '<')
5312 output_operand_lossage ("invalid %%C value");
5314 else if (code
== 'D')
5315 c
= reverse_condition (c
);
5316 else if (code
== 'c')
5317 c
= swap_condition (c
);
5318 else if (code
== 'd')
5319 c
= swap_condition (reverse_condition (c
));
5322 fprintf (file
, "ule");
5324 fprintf (file
, "ult");
5325 else if (c
== UNORDERED
)
5326 fprintf (file
, "un");
5328 fprintf (file
, "%s", GET_RTX_NAME (c
));
5333 /* Write the divide or modulus operator. */
5334 switch (GET_CODE (x
))
5337 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
5340 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
5343 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
5346 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
5349 output_operand_lossage ("invalid %%E value");
5355 /* Write "_u" for unaligned access. */
5356 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
5357 fprintf (file
, "_u");
5361 if (GET_CODE (x
) == REG
)
5362 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5363 else if (GET_CODE (x
) == MEM
)
5364 output_address (XEXP (x
, 0));
5366 output_addr_const (file
, x
);
5370 output_operand_lossage ("invalid %%xn code");
5375 print_operand_address (file
, addr
)
5380 HOST_WIDE_INT offset
= 0;
5382 if (GET_CODE (addr
) == AND
)
5383 addr
= XEXP (addr
, 0);
5385 if (GET_CODE (addr
) == PLUS
5386 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
5388 offset
= INTVAL (XEXP (addr
, 1));
5389 addr
= XEXP (addr
, 0);
5392 if (GET_CODE (addr
) == LO_SUM
)
5394 output_addr_const (file
, XEXP (addr
, 1));
5398 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
5401 addr
= XEXP (addr
, 0);
5402 if (GET_CODE (addr
) == REG
)
5403 basereg
= REGNO (addr
);
5404 else if (GET_CODE (addr
) == SUBREG
5405 && GET_CODE (SUBREG_REG (addr
)) == REG
)
5406 basereg
= subreg_regno (addr
);
5410 fprintf (file
, "($%d)\t\t!%s", basereg
,
5411 (basereg
== 29 ? "gprel" : "gprellow"));
5415 if (GET_CODE (addr
) == REG
)
5416 basereg
= REGNO (addr
);
5417 else if (GET_CODE (addr
) == SUBREG
5418 && GET_CODE (SUBREG_REG (addr
)) == REG
)
5419 basereg
= subreg_regno (addr
);
5420 else if (GET_CODE (addr
) == CONST_INT
)
5421 offset
= INTVAL (addr
);
5425 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
5426 fprintf (file
, "($%d)", basereg
);
5429 /* Emit RTL insns to initialize the variable parts of a trampoline at
5430 TRAMP. FNADDR is an RTX for the address of the function's pure
5431 code. CXT is an RTX for the static chain value for the function.
5433 The three offset parameters are for the individual template's
5434 layout. A JMPOFS < 0 indicates that the trampoline does not
5435 contain instructions at all.
5437 We assume here that a function will be called many more times than
5438 its address is taken (e.g., it might be passed to qsort), so we
5439 take the trouble to initialize the "hint" field in the JMP insn.
5440 Note that the hint field is PC (new) + 4 * bits 13:0. */
5443 alpha_initialize_trampoline (tramp
, fnaddr
, cxt
, fnofs
, cxtofs
, jmpofs
)
5444 rtx tramp
, fnaddr
, cxt
;
5445 int fnofs
, cxtofs
, jmpofs
;
5447 rtx temp
, temp1
, addr
;
5448 /* VMS really uses DImode pointers in memory at this point. */
5449 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
5451 #ifdef POINTERS_EXTEND_UNSIGNED
5452 fnaddr
= convert_memory_address (mode
, fnaddr
);
5453 cxt
= convert_memory_address (mode
, cxt
);
5456 /* Store function address and CXT. */
5457 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
5458 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
5459 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
5460 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
5462 /* This has been disabled since the hint only has a 32k range, and in
5463 no existing OS is the stack within 32k of the text segment. */
5464 if (0 && jmpofs
>= 0)
5466 /* Compute hint value. */
5467 temp
= force_operand (plus_constant (tramp
, jmpofs
+4), NULL_RTX
);
5468 temp
= expand_binop (DImode
, sub_optab
, fnaddr
, temp
, temp
, 1,
5470 temp
= expand_shift (RSHIFT_EXPR
, Pmode
, temp
,
5471 build_int_2 (2, 0), NULL_RTX
, 1);
5472 temp
= expand_and (gen_lowpart (SImode
, temp
), GEN_INT (0x3fff), 0);
5474 /* Merge in the hint. */
5475 addr
= memory_address (SImode
, plus_constant (tramp
, jmpofs
));
5476 temp1
= force_reg (SImode
, gen_rtx_MEM (SImode
, addr
));
5477 temp1
= expand_and (temp1
, GEN_INT (0xffffc000), NULL_RTX
);
5478 temp1
= expand_binop (SImode
, ior_optab
, temp1
, temp
, temp1
, 1,
5480 emit_move_insn (gen_rtx_MEM (SImode
, addr
), temp1
);
5483 #ifdef TRANSFER_FROM_TRAMPOLINE
5484 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
5485 0, VOIDmode
, 1, addr
, Pmode
);
5489 emit_insn (gen_imb ());
5492 /* Determine where to put an argument to a function.
5493 Value is zero to push the argument on the stack,
5494 or a hard register in which to store the argument.
5496 MODE is the argument's machine mode.
5497 TYPE is the data type of the argument (as a tree).
5498 This is null for libcalls where that information may
5500 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5501 the preceding args and about the function being called.
5502 NAMED is nonzero if this argument is a named parameter
5503 (otherwise it is an extra parameter matching an ellipsis).
5505 On Alpha the first 6 words of args are normally in registers
5506 and the rest are pushed. */
5509 function_arg (cum
, mode
, type
, named
)
5510 CUMULATIVE_ARGS cum
;
5511 enum machine_mode mode
;
5513 int named ATTRIBUTE_UNUSED
;
5518 /* Set up defaults for FP operands passed in FP registers, and
5519 integral operands passed in integer registers. */
5521 && (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
5522 || GET_MODE_CLASS (mode
) == MODE_FLOAT
))
5527 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5528 the three platforms, so we can't avoid conditional compilation. */
5529 #if TARGET_ABI_OPEN_VMS
5531 if (mode
== VOIDmode
)
5532 return alpha_arg_info_reg_val (cum
);
5534 num_args
= cum
.num_args
;
5535 if (num_args
>= 6 || MUST_PASS_IN_STACK (mode
, type
))
5539 #if TARGET_ABI_UNICOSMK
5543 /* If this is the last argument, generate the call info word (CIW). */
5544 /* ??? We don't include the caller's line number in the CIW because
5545 I don't know how to determine it if debug infos are turned off. */
5546 if (mode
== VOIDmode
)
5555 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
5556 if (cum
.reg_args_type
[i
])
5557 lo
|= (1 << (7 - i
));
5559 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
5562 lo
|= cum
.num_reg_words
;
5564 #if HOST_BITS_PER_WIDE_INT == 32
5565 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
5567 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
5568 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
5571 ciw
= immed_double_const (lo
, hi
, DImode
);
5573 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
5574 UNSPEC_UMK_LOAD_CIW
);
5577 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
5578 num_args
= cum
.num_reg_words
;
5579 if (MUST_PASS_IN_STACK (mode
, type
)
5580 || cum
.num_reg_words
+ size
> 6 || cum
.force_stack
)
5582 else if (type
&& TYPE_MODE (type
) == BLKmode
)
5586 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
5587 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
5589 /* The argument fits in two registers. Note that we still need to
5590 reserve a register for empty structures. */
5594 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
5597 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
5598 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
5599 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
5609 /* VOID is passed as a special flag for "last argument". */
5610 if (type
== void_type_node
)
5612 else if (MUST_PASS_IN_STACK (mode
, type
))
5614 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum
, mode
, type
, named
))
5617 #endif /* TARGET_ABI_UNICOSMK */
5618 #endif /* TARGET_ABI_OPEN_VMS */
5620 return gen_rtx_REG (mode
, num_args
+ basereg
);
5624 alpha_build_va_list ()
5626 tree base
, ofs
, record
, type_decl
;
5628 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5629 return ptr_type_node
;
5631 record
= make_lang_type (RECORD_TYPE
);
5632 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
5633 TREE_CHAIN (record
) = type_decl
;
5634 TYPE_NAME (record
) = type_decl
;
5636 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5638 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
5640 DECL_FIELD_CONTEXT (ofs
) = record
;
5642 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
5644 DECL_FIELD_CONTEXT (base
) = record
;
5645 TREE_CHAIN (base
) = ofs
;
5647 TYPE_FIELDS (record
) = base
;
5648 layout_type (record
);
5654 alpha_va_start (stdarg_p
, valist
, nextarg
)
5657 rtx nextarg ATTRIBUTE_UNUSED
;
5659 HOST_WIDE_INT offset
;
5660 tree t
, offset_field
, base_field
;
5662 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
5665 if (TARGET_ABI_UNICOSMK
)
5666 std_expand_builtin_va_start (stdarg_p
, valist
, nextarg
);
5668 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base
5669 up by 48, storing fp arg registers in the first 48 bytes, and the
5670 integer arg registers in the next 48 bytes. This is only done,
5671 however, if any integer registers need to be stored.
5673 If no integer registers need be stored, then we must subtract 48
5674 in order to account for the integer arg registers which are counted
5675 in argsize above, but which are not actually stored on the stack. */
5677 if (NUM_ARGS
<= 5 + stdarg_p
)
5678 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
5680 offset
= -6 * UNITS_PER_WORD
;
5682 if (TARGET_ABI_OPEN_VMS
)
5684 nextarg
= plus_constant (nextarg
, offset
);
5685 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
5686 t
= build (MODIFY_EXPR
, TREE_TYPE (valist
), valist
,
5687 make_tree (ptr_type_node
, nextarg
));
5688 TREE_SIDE_EFFECTS (t
) = 1;
5690 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5694 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
5695 offset_field
= TREE_CHAIN (base_field
);
5697 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
5698 valist
, base_field
);
5699 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
5700 valist
, offset_field
);
5702 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
5703 t
= build (PLUS_EXPR
, ptr_type_node
, t
, build_int_2 (offset
, 0));
5704 t
= build (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
5705 TREE_SIDE_EFFECTS (t
) = 1;
5706 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5708 t
= build_int_2 (NUM_ARGS
* UNITS_PER_WORD
, 0);
5709 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
5710 TREE_SIDE_EFFECTS (t
) = 1;
5711 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5716 alpha_va_arg (valist
, type
)
5719 HOST_WIDE_INT tsize
;
5722 tree offset_field
, base_field
, addr_tree
, addend
;
5723 tree wide_type
, wide_ofs
;
5726 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
5727 return std_expand_builtin_va_arg (valist
, type
);
5729 tsize
= ((TREE_INT_CST_LOW (TYPE_SIZE (type
)) / BITS_PER_UNIT
+ 7) / 8) * 8;
5731 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
5732 offset_field
= TREE_CHAIN (base_field
);
5734 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
5735 valist
, base_field
);
5736 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
5737 valist
, offset_field
);
5739 wide_type
= make_signed_type (64);
5740 wide_ofs
= save_expr (build1 (CONVERT_EXPR
, wide_type
, offset_field
));
5744 if (TYPE_MODE (type
) == TFmode
|| TYPE_MODE (type
) == TCmode
)
5747 tsize
= UNITS_PER_WORD
;
5749 else if (FLOAT_TYPE_P (type
))
5751 tree fpaddend
, cond
;
5753 fpaddend
= fold (build (PLUS_EXPR
, TREE_TYPE (addend
),
5754 addend
, build_int_2 (-6*8, 0)));
5756 cond
= fold (build (LT_EXPR
, integer_type_node
,
5757 wide_ofs
, build_int_2 (6*8, 0)));
5759 addend
= fold (build (COND_EXPR
, TREE_TYPE (addend
), cond
,
5763 addr_tree
= build (PLUS_EXPR
, TREE_TYPE (base_field
),
5764 base_field
, addend
);
5766 addr
= expand_expr (addr_tree
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
5767 addr
= copy_to_reg (addr
);
5769 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
,
5770 build (PLUS_EXPR
, TREE_TYPE (offset_field
),
5771 offset_field
, build_int_2 (tsize
, 0)));
5772 TREE_SIDE_EFFECTS (t
) = 1;
5773 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
5777 addr
= force_reg (Pmode
, addr
);
5778 addr
= gen_rtx_MEM (Pmode
, addr
);
5784 /* This page contains routines that are used to determine what the function
5785 prologue and epilogue code will do and write them out. */
5787 /* Compute the size of the save area in the stack. */
5789 /* These variables are used for communication between the following functions.
5790 They indicate various things about the current function being compiled
5791 that are used to tell what kind of prologue, epilogue and procedure
5792 descriptior to generate. */
5794 /* Nonzero if we need a stack procedure. */
5795 static int alpha_is_stack_procedure
;
5797 /* Register number (either FP or SP) that is used to unwind the frame. */
5798 static int vms_unwind_regno
;
5800 /* Register number used to save FP. We need not have one for RA since
5801 we don't modify it for register procedures. This is only defined
5802 for register frame procedures. */
5803 static int vms_save_fp_regno
;
5805 /* Register number used to reference objects off our PV. */
5806 static int vms_base_regno
;
5808 /* Compute register masks for saved registers. */
5811 alpha_sa_mask (imaskP
, fmaskP
)
5812 unsigned long *imaskP
;
5813 unsigned long *fmaskP
;
5815 unsigned long imask
= 0;
5816 unsigned long fmask
= 0;
5819 #ifdef ASM_OUTPUT_MI_THUNK
5820 if (!current_function_is_thunk
)
5823 if (TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
5824 imask
|= (1L << HARD_FRAME_POINTER_REGNUM
);
5826 /* One for every register we have to save. */
5827 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5828 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5829 && regs_ever_live
[i
] && i
!= REG_RA
5830 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
5835 fmask
|= (1L << (i
- 32));
5838 /* We need to restore these for the handler. */
5839 if (current_function_calls_eh_return
)
5843 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
5844 if (regno
== INVALID_REGNUM
)
5846 imask
|= 1L << regno
;
5850 if (!TARGET_ABI_UNICOSMK
)
5852 /* If any register spilled, then spill the return address also. */
5853 /* ??? This is required by the Digital stack unwind specification
5854 and isn't needed if we're doing Dwarf2 unwinding. */
5855 if (imask
|| fmask
|| alpha_ra_ever_killed ())
5856 imask
|= (1L << REG_RA
);
5870 #ifdef ASM_OUTPUT_MI_THUNK
5871 if (current_function_is_thunk
)
5876 if (TARGET_ABI_UNICOSMK
)
5878 for (i
= 9; i
< 15 && sa_size
== 0; i
++)
5879 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5880 && regs_ever_live
[i
])
5882 for (i
= 32 + 2; i
< 32 + 10 && sa_size
== 0; i
++)
5883 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5884 && regs_ever_live
[i
])
5889 /* One for every register we have to save. */
5890 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5891 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
5892 && regs_ever_live
[i
] && i
!= REG_RA
)
5897 if (TARGET_ABI_UNICOSMK
)
5899 /* We might not need to generate a frame if we don't make any calls
5900 (including calls to __T3E_MISMATCH if this is a vararg function),
5901 don't have any local variables which require stack slots, don't
5902 use alloca and have not determined that we need a frame for other
5905 alpha_is_stack_procedure
= sa_size
!= 0
5906 || alpha_ra_ever_killed ()
5907 || get_frame_size() != 0
5908 || current_function_outgoing_args_size
5909 || current_function_varargs
5910 || current_function_stdarg
5911 || current_function_calls_alloca
5912 || frame_pointer_needed
;
5914 /* Always reserve space for saving callee-saved registers if we
5915 need a frame as required by the calling convention. */
5916 if (alpha_is_stack_procedure
)
5919 else if (TARGET_ABI_OPEN_VMS
)
5921 /* Start by assuming we can use a register procedure if we don't
5922 make any calls (REG_RA not used) or need to save any
5923 registers and a stack procedure if we do. */
5924 alpha_is_stack_procedure
= sa_size
!= 0 || alpha_ra_ever_killed ();
5926 /* Decide whether to refer to objects off our PV via FP or PV.
5927 If we need FP for something else or if we receive a nonlocal
5928 goto (which expects PV to contain the value), we must use PV.
5929 Otherwise, start by assuming we can use FP. */
5930 vms_base_regno
= (frame_pointer_needed
5931 || current_function_has_nonlocal_label
5932 || alpha_is_stack_procedure
5933 || current_function_outgoing_args_size
5934 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
);
5936 /* If we want to copy PV into FP, we need to find some register
5937 in which to save FP. */
5939 vms_save_fp_regno
= -1;
5940 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
5941 for (i
= 0; i
< 32; i
++)
5942 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! regs_ever_live
[i
])
5943 vms_save_fp_regno
= i
;
5945 if (vms_save_fp_regno
== -1)
5946 vms_base_regno
= REG_PV
, alpha_is_stack_procedure
= 1;
5948 /* Stack unwinding should be done via FP unless we use it for PV. */
5949 vms_unwind_regno
= (vms_base_regno
== REG_PV
5950 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
5952 /* If this is a stack procedure, allow space for saving FP and RA. */
5953 if (alpha_is_stack_procedure
)
5958 /* If some registers were saved but not RA, RA must also be saved,
5959 so leave space for it. */
5960 if (!TARGET_ABI_UNICOSMK
&& (sa_size
!= 0 || alpha_ra_ever_killed ()))
5963 /* Our size must be even (multiple of 16 bytes). */
5972 alpha_pv_save_size ()
5975 return alpha_is_stack_procedure
? 8 : 0;
5982 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
5985 #if TARGET_ABI_OPEN_VMS
5987 const struct attribute_spec vms_attribute_table
[] =
5989 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
5990 { "overlaid", 0, 0, true, false, false, NULL
},
5991 { "global", 0, 0, true, false, false, NULL
},
5992 { "initialize", 0, 0, true, false, false, NULL
},
5993 { NULL
, 0, 0, false, false, false, NULL
}
5999 find_lo_sum (px
, data
)
6001 void *data ATTRIBUTE_UNUSED
;
6003 return GET_CODE (*px
) == LO_SUM
;
6007 alpha_does_function_need_gp ()
6011 /* The GP being variable is an OSF abi thing. */
6012 if (! TARGET_ABI_OSF
)
6015 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
6018 #ifdef ASM_OUTPUT_MI_THUNK
6019 if (current_function_is_thunk
)
6023 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
6024 Even if we are a static function, we still need to do this in case
6025 our address is taken and passed to something like qsort. */
6027 push_topmost_sequence ();
6028 insn
= get_insns ();
6029 pop_topmost_sequence ();
6031 for (; insn
; insn
= NEXT_INSN (insn
))
6033 && GET_CODE (PATTERN (insn
)) != USE
6034 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
6036 enum attr_type type
= get_attr_type (insn
);
6037 if (type
== TYPE_LDSYM
|| type
== TYPE_JSR
)
6039 if (TARGET_EXPLICIT_RELOCS
6040 && for_each_rtx (&PATTERN (insn
), find_lo_sum
, NULL
) > 0)
6047 /* Write a version stamp. Don't write anything if we are running as a
6048 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
6055 alpha_write_verstamp (file
)
6056 FILE *file ATTRIBUTE_UNUSED
;
6059 fprintf (file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
6063 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
6067 set_frame_related_p ()
6069 rtx seq
= gen_sequence ();
6072 if (GET_CODE (seq
) == SEQUENCE
)
6074 int i
= XVECLEN (seq
, 0);
6076 RTX_FRAME_RELATED_P (XVECEXP (seq
, 0, i
)) = 1;
6077 return emit_insn (seq
);
6081 seq
= emit_insn (seq
);
6082 RTX_FRAME_RELATED_P (seq
) = 1;
6087 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
6089 /* Write function prologue. */
6091 /* On vms we have two kinds of functions:
6093 - stack frame (PROC_STACK)
6094 these are 'normal' functions with local vars and which are
6095 calling other functions
6096 - register frame (PROC_REGISTER)
6097 keeps all data in registers, needs no stack
6099 We must pass this to the assembler so it can generate the
6100 proper pdsc (procedure descriptor)
6101 This is done with the '.pdesc' command.
6103 On not-vms, we don't really differentiate between the two, as we can
6104 simply allocate stack without saving registers. */
6107 alpha_expand_prologue ()
6109 /* Registers to save. */
6110 unsigned long imask
= 0;
6111 unsigned long fmask
= 0;
6112 /* Stack space needed for pushing registers clobbered by us. */
6113 HOST_WIDE_INT sa_size
;
6114 /* Complete stack size needed. */
6115 HOST_WIDE_INT frame_size
;
6116 /* Offset from base reg to register save area. */
6117 HOST_WIDE_INT reg_offset
;
6121 sa_size
= alpha_sa_size ();
6123 frame_size
= get_frame_size ();
6124 if (TARGET_ABI_OPEN_VMS
)
6125 frame_size
= ALPHA_ROUND (sa_size
6126 + (alpha_is_stack_procedure
? 8 : 0)
6128 + current_function_pretend_args_size
);
6129 else if (TARGET_ABI_UNICOSMK
)
6130 /* We have to allocate space for the DSIB if we generate a frame. */
6131 frame_size
= ALPHA_ROUND (sa_size
6132 + (alpha_is_stack_procedure
? 48 : 0))
6133 + ALPHA_ROUND (frame_size
6134 + current_function_outgoing_args_size
);
6136 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
6138 + ALPHA_ROUND (frame_size
6139 + current_function_pretend_args_size
));
6141 if (TARGET_ABI_OPEN_VMS
)
6144 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
6146 alpha_sa_mask (&imask
, &fmask
);
6148 /* Emit an insn to reload GP, if needed. */
6151 alpha_function_needs_gp
= alpha_does_function_need_gp ();
6152 if (alpha_function_needs_gp
)
6153 emit_insn (gen_prologue_ldgp ());
6156 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
6157 the call to mcount ourselves, rather than having the linker do it
6158 magically in response to -pg. Since _mcount has special linkage,
6159 don't represent the call as a call. */
6160 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
6161 emit_insn (gen_prologue_mcount ());
6163 if (TARGET_ABI_UNICOSMK
)
6164 unicosmk_gen_dsib (&imask
);
6166 /* Adjust the stack by the frame size. If the frame size is > 4096
6167 bytes, we need to be sure we probe somewhere in the first and last
6168 4096 bytes (we can probably get away without the latter test) and
6169 every 8192 bytes in between. If the frame size is > 32768, we
6170 do this in a loop. Otherwise, we generate the explicit probe
6173 Note that we are only allowed to adjust sp once in the prologue. */
6175 if (frame_size
<= 32768)
6177 if (frame_size
> 4096)
6182 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
6185 while ((probed
+= 8192) < frame_size
);
6187 /* We only have to do this probe if we aren't saving registers. */
6188 if (sa_size
== 0 && probed
+ 4096 < frame_size
)
6189 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
6192 if (frame_size
!= 0)
6193 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
6194 GEN_INT (TARGET_ABI_UNICOSMK
6200 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
6201 number of 8192 byte blocks to probe. We then probe each block
6202 in the loop and then set SP to the proper location. If the
6203 amount remaining is > 4096, we have to do one more probe if we
6204 are not saving any registers. */
6206 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
6207 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
6208 rtx ptr
= gen_rtx_REG (DImode
, 22);
6209 rtx count
= gen_rtx_REG (DImode
, 23);
6212 emit_move_insn (count
, GEN_INT (blocks
));
6213 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
6214 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
6216 /* Because of the difficulty in emitting a new basic block this
6217 late in the compilation, generate the loop as a single insn. */
6218 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
6220 if (leftover
> 4096 && sa_size
== 0)
6222 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
6223 MEM_VOLATILE_P (last
) = 1;
6224 emit_move_insn (last
, const0_rtx
);
6227 if (TARGET_ABI_WINDOWS_NT
)
6229 /* For NT stack unwind (done by 'reverse execution'), it's
6230 not OK to take the result of a loop, even though the value
6231 is already in ptr, so we reload it via a single operation
6232 and subtract it to sp.
6234 Yes, that's correct -- we have to reload the whole constant
6235 into a temporary via ldah+lda then subtract from sp. To
6236 ensure we get ldah+lda, we use a special pattern. */
6238 HOST_WIDE_INT lo
, hi
;
6239 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
6240 hi
= frame_size
- lo
;
6242 emit_move_insn (ptr
, GEN_INT (hi
));
6243 emit_insn (gen_nt_lda (ptr
, GEN_INT (lo
)));
6244 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
6249 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
6250 GEN_INT (-leftover
)));
6253 /* This alternative is special, because the DWARF code cannot
6254 possibly intuit through the loop above. So we invent this
6255 note it looks at instead. */
6256 RTX_FRAME_RELATED_P (seq
) = 1;
6258 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
6259 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
6260 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
6261 GEN_INT (TARGET_ABI_UNICOSMK
6267 if (!TARGET_ABI_UNICOSMK
)
6269 /* Cope with very large offsets to the register save area. */
6270 sa_reg
= stack_pointer_rtx
;
6271 if (reg_offset
+ sa_size
> 0x8000)
6273 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
6276 if (low
+ sa_size
<= 0x8000)
6277 bias
= reg_offset
- low
, reg_offset
= low
;
6279 bias
= reg_offset
, reg_offset
= 0;
6281 sa_reg
= gen_rtx_REG (DImode
, 24);
6282 FRP (emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
,
6286 /* Save regs in stack order. Beginning with VMS PV. */
6287 if (TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
6289 mem
= gen_rtx_MEM (DImode
, stack_pointer_rtx
);
6290 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6291 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_PV
)));
6294 /* Save register RA next. */
6295 if (imask
& (1L << REG_RA
))
6297 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6298 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6299 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
6300 imask
&= ~(1L << REG_RA
);
6304 /* Now save any other registers required to be saved. */
6305 for (i
= 0; i
< 32; i
++)
6306 if (imask
& (1L << i
))
6308 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6309 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6310 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
6314 for (i
= 0; i
< 32; i
++)
6315 if (fmask
& (1L << i
))
6317 mem
= gen_rtx_MEM (DFmode
, plus_constant (sa_reg
, reg_offset
));
6318 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6319 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
6323 else if (TARGET_ABI_UNICOSMK
&& alpha_is_stack_procedure
)
6325 /* The standard frame on the T3E includes space for saving registers.
6326 We just have to use it. We don't have to save the return address and
6327 the old frame pointer here - they are saved in the DSIB. */
6330 for (i
= 9; i
< 15; i
++)
6331 if (imask
& (1L << i
))
6333 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
6335 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6336 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
6339 for (i
= 2; i
< 10; i
++)
6340 if (fmask
& (1L << i
))
6342 mem
= gen_rtx_MEM (DFmode
, plus_constant (hard_frame_pointer_rtx
,
6344 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6345 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
6350 if (TARGET_ABI_OPEN_VMS
)
6352 if (!alpha_is_stack_procedure
)
6353 /* Register frame procedures save the fp. */
6354 /* ??? Ought to have a dwarf2 save for this. */
6355 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
6356 hard_frame_pointer_rtx
);
6358 if (vms_base_regno
!= REG_PV
)
6359 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
6360 gen_rtx_REG (DImode
, REG_PV
)));
6362 if (vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
6363 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
6365 /* If we have to allocate space for outgoing args, do it now. */
6366 if (current_function_outgoing_args_size
!= 0)
6369 plus_constant (hard_frame_pointer_rtx
,
6371 (current_function_outgoing_args_size
)))));
6373 else if (!TARGET_ABI_UNICOSMK
)
6375 /* If we need a frame pointer, set it from the stack pointer. */
6376 if (frame_pointer_needed
)
6378 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
6379 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
6381 /* This must always be the last instruction in the
6382 prologue, thus we emit a special move + clobber. */
6383 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
6384 stack_pointer_rtx
, sa_reg
)));
6388 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
6389 the prologue, for exception handling reasons, we cannot do this for
6390 any insn that might fault. We could prevent this for mems with a
6391 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
6392 have to prevent all such scheduling with a blockage.
6394 Linux, on the other hand, never bothered to implement OSF/1's
6395 exception handling, and so doesn't care about such things. Anyone
6396 planning to use dwarf2 frame-unwind info can also omit the blockage. */
6398 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
6399 emit_insn (gen_blockage ());
6402 /* Output the textual info surrounding the prologue. */
6405 alpha_start_function (file
, fnname
, decl
)
6408 tree decl ATTRIBUTE_UNUSED
;
6410 unsigned long imask
= 0;
6411 unsigned long fmask
= 0;
6412 /* Stack space needed for pushing registers clobbered by us. */
6413 HOST_WIDE_INT sa_size
;
6414 /* Complete stack size needed. */
6415 HOST_WIDE_INT frame_size
;
6416 /* Offset from base reg to register save area. */
6417 HOST_WIDE_INT reg_offset
;
6418 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
6421 /* Don't emit an extern directive for functions defined in the same file. */
6422 if (TARGET_ABI_UNICOSMK
)
6425 name_tree
= get_identifier (fnname
);
6426 TREE_ASM_WRITTEN (name_tree
) = 1;
6429 alpha_fnname
= fnname
;
6430 sa_size
= alpha_sa_size ();
6432 frame_size
= get_frame_size ();
6433 if (TARGET_ABI_OPEN_VMS
)
6434 frame_size
= ALPHA_ROUND (sa_size
6435 + (alpha_is_stack_procedure
? 8 : 0)
6437 + current_function_pretend_args_size
);
6438 else if (TARGET_ABI_UNICOSMK
)
6439 frame_size
= ALPHA_ROUND (sa_size
6440 + (alpha_is_stack_procedure
? 48 : 0))
6441 + ALPHA_ROUND (frame_size
6442 + current_function_outgoing_args_size
);
6444 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
6446 + ALPHA_ROUND (frame_size
6447 + current_function_pretend_args_size
));
6449 if (TARGET_ABI_OPEN_VMS
)
6452 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
6454 alpha_sa_mask (&imask
, &fmask
);
6456 /* Ecoff can handle multiple .file directives, so put out file and lineno.
6457 We have to do that before the .ent directive as we cannot switch
6458 files within procedures with native ecoff because line numbers are
6459 linked to procedure descriptors.
6460 Outputting the lineno helps debugging of one line functions as they
6461 would otherwise get no line number at all. Please note that we would
6462 like to put out last_linenum from final.c, but it is not accessible. */
6464 if (write_symbols
== SDB_DEBUG
)
6466 #ifdef ASM_OUTPUT_SOURCE_FILENAME
6467 ASM_OUTPUT_SOURCE_FILENAME (file
,
6468 DECL_SOURCE_FILE (current_function_decl
));
6470 #ifdef ASM_OUTPUT_SOURCE_LINE
6471 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
6472 ASM_OUTPUT_SOURCE_LINE (file
,
6473 DECL_SOURCE_LINE (current_function_decl
));
6477 /* Issue function start and label. */
6478 if (TARGET_ABI_OPEN_VMS
6479 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
6481 fputs ("\t.ent ", file
);
6482 assemble_name (file
, fnname
);
6485 /* If the function needs GP, we'll write the "..ng" label there.
6486 Otherwise, do it here. */
6487 if (TARGET_ABI_OSF
&& ! alpha_function_needs_gp
)
6490 assemble_name (file
, fnname
);
6491 fputs ("..ng:\n", file
);
6495 strcpy (entry_label
, fnname
);
6496 if (TARGET_ABI_OPEN_VMS
)
6497 strcat (entry_label
, "..en");
6499 /* For public functions, the label must be globalized by appending an
6500 additional colon. */
6501 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
6502 strcat (entry_label
, ":");
6504 ASM_OUTPUT_LABEL (file
, entry_label
);
6505 inside_function
= TRUE
;
6507 if (TARGET_ABI_OPEN_VMS
)
6508 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
6510 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
6511 && !flag_inhibit_size_directive
)
6513 /* Set flags in procedure descriptor to request IEEE-conformant
6514 math-library routines. The value we set it to is PDSC_EXC_IEEE
6515 (/usr/include/pdsc.h). */
6516 fputs ("\t.eflag 48\n", file
);
6519 /* Set up offsets to alpha virtual arg/local debugging pointer. */
6520 alpha_auto_offset
= -frame_size
+ current_function_pretend_args_size
;
6521 alpha_arg_offset
= -frame_size
+ 48;
6523 /* Describe our frame. If the frame size is larger than an integer,
6524 print it as zero to avoid an assembler error. We won't be
6525 properly describing such a frame, but that's the best we can do. */
6526 if (TARGET_ABI_UNICOSMK
)
6528 else if (TARGET_ABI_OPEN_VMS
)
6530 fprintf (file
, "\t.frame $%d,", vms_unwind_regno
);
6531 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6532 frame_size
>= ((HOST_WIDE_INT
) 1 << 31) ? 0 : frame_size
);
6533 fputs (",$26,", file
);
6534 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, reg_offset
);
6537 else if (!flag_inhibit_size_directive
)
6539 fprintf (file
, "\t.frame $%d,",
6540 (frame_pointer_needed
6541 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
));
6542 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6543 frame_size
>= (1l << 31) ? 0 : frame_size
);
6544 fprintf (file
, ",$26,%d\n", current_function_pretend_args_size
);
6547 /* Describe which registers were spilled. */
6548 if (TARGET_ABI_UNICOSMK
)
6550 else if (TARGET_ABI_OPEN_VMS
)
6553 /* ??? Does VMS care if mask contains ra? The old code didn't
6554 set it, so I don't here. */
6555 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1L << REG_RA
));
6557 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
6558 if (!alpha_is_stack_procedure
)
6559 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
6561 else if (!flag_inhibit_size_directive
)
6565 fprintf (file
, "\t.mask 0x%lx,", imask
);
6566 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6567 frame_size
>= (1l << 31) ? 0 : reg_offset
- frame_size
);
6570 for (i
= 0; i
< 32; ++i
)
6571 if (imask
& (1L << i
))
6577 fprintf (file
, "\t.fmask 0x%lx,", fmask
);
6578 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
6579 frame_size
>= (1l << 31) ? 0 : reg_offset
- frame_size
);
6584 #if TARGET_ABI_OPEN_VMS
6585 /* Ifdef'ed cause readonly_section and link_section are only
6587 readonly_section ();
6588 fprintf (file
, "\t.align 3\n");
6589 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
6590 fputs ("\t.ascii \"", file
);
6591 assemble_name (file
, fnname
);
6592 fputs ("\\0\"\n", file
);
6595 fprintf (file
, "\t.align 3\n");
6596 fputs ("\t.name ", file
);
6597 assemble_name (file
, fnname
);
6598 fputs ("..na\n", file
);
6599 ASM_OUTPUT_LABEL (file
, fnname
);
6600 fprintf (file
, "\t.pdesc ");
6601 assemble_name (file
, fnname
);
6602 fprintf (file
, "..en,%s\n", alpha_is_stack_procedure
? "stack" : "reg");
6603 alpha_need_linkage (fnname
, 1);
6608 /* Emit the .prologue note at the scheduled end of the prologue. */
6611 alpha_output_function_end_prologue (file
)
6614 if (TARGET_ABI_UNICOSMK
)
6616 else if (TARGET_ABI_OPEN_VMS
)
6617 fputs ("\t.prologue\n", file
);
6618 else if (TARGET_ABI_WINDOWS_NT
)
6619 fputs ("\t.prologue 0\n", file
);
6620 else if (!flag_inhibit_size_directive
)
6621 fprintf (file
, "\t.prologue %d\n", alpha_function_needs_gp
);
6624 /* Write function epilogue. */
6626 /* ??? At some point we will want to support full unwind, and so will
6627 need to mark the epilogue as well. At the moment, we just confuse
6630 #define FRP(exp) exp
6633 alpha_expand_epilogue ()
6635 /* Registers to save. */
6636 unsigned long imask
= 0;
6637 unsigned long fmask
= 0;
6638 /* Stack space needed for pushing registers clobbered by us. */
6639 HOST_WIDE_INT sa_size
;
6640 /* Complete stack size needed. */
6641 HOST_WIDE_INT frame_size
;
6642 /* Offset from base reg to register save area. */
6643 HOST_WIDE_INT reg_offset
;
6644 int fp_is_frame_pointer
, fp_offset
;
6645 rtx sa_reg
, sa_reg_exp
= NULL
;
6646 rtx sp_adj1
, sp_adj2
, mem
;
6650 sa_size
= alpha_sa_size ();
6652 frame_size
= get_frame_size ();
6653 if (TARGET_ABI_OPEN_VMS
)
6654 frame_size
= ALPHA_ROUND (sa_size
6655 + (alpha_is_stack_procedure
? 8 : 0)
6657 + current_function_pretend_args_size
);
6658 else if (TARGET_ABI_UNICOSMK
)
6659 frame_size
= ALPHA_ROUND (sa_size
6660 + (alpha_is_stack_procedure
? 48 : 0))
6661 + ALPHA_ROUND (frame_size
6662 + current_function_outgoing_args_size
);
6664 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
6666 + ALPHA_ROUND (frame_size
6667 + current_function_pretend_args_size
));
6669 if (TARGET_ABI_OPEN_VMS
)
6672 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
6674 alpha_sa_mask (&imask
, &fmask
);
6676 fp_is_frame_pointer
= ((TARGET_ABI_OPEN_VMS
&& alpha_is_stack_procedure
)
6677 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
6679 sa_reg
= stack_pointer_rtx
;
6681 if (current_function_calls_eh_return
)
6682 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
6686 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
6688 /* If we have a frame pointer, restore SP from it. */
6689 if ((TARGET_ABI_OPEN_VMS
6690 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
6691 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
6692 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
6694 /* Cope with very large offsets to the register save area. */
6695 if (reg_offset
+ sa_size
> 0x8000)
6697 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
6700 if (low
+ sa_size
<= 0x8000)
6701 bias
= reg_offset
- low
, reg_offset
= low
;
6703 bias
= reg_offset
, reg_offset
= 0;
6705 sa_reg
= gen_rtx_REG (DImode
, 22);
6706 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
6708 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
6711 /* Restore registers in order, excepting a true frame pointer. */
6713 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
6715 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6716 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
6719 imask
&= ~(1L << REG_RA
);
6721 for (i
= 0; i
< 32; ++i
)
6722 if (imask
& (1L << i
))
6724 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
6725 fp_offset
= reg_offset
;
6728 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
6729 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6730 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
6735 for (i
= 0; i
< 32; ++i
)
6736 if (fmask
& (1L << i
))
6738 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
6739 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6740 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
6744 else if (TARGET_ABI_UNICOSMK
&& alpha_is_stack_procedure
)
6746 /* Restore callee-saved general-purpose registers. */
6750 for (i
= 9; i
< 15; i
++)
6751 if (imask
& (1L << i
))
6753 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
6755 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6756 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
6760 for (i
= 2; i
< 10; i
++)
6761 if (fmask
& (1L << i
))
6763 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
6765 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6766 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
6770 /* Restore the return address from the DSIB. */
6772 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
6773 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6774 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
6777 if (frame_size
|| eh_ofs
)
6779 sp_adj1
= stack_pointer_rtx
;
6783 sp_adj1
= gen_rtx_REG (DImode
, 23);
6784 emit_move_insn (sp_adj1
,
6785 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
6788 /* If the stack size is large, begin computation into a temporary
6789 register so as not to interfere with a potential fp restore,
6790 which must be consecutive with an SP restore. */
6791 if (frame_size
< 32768
6792 && ! (TARGET_ABI_UNICOSMK
&& current_function_calls_alloca
))
6793 sp_adj2
= GEN_INT (frame_size
);
6794 else if (TARGET_ABI_UNICOSMK
)
6796 sp_adj1
= gen_rtx_REG (DImode
, 23);
6797 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
6798 sp_adj2
= const0_rtx
;
6800 else if (frame_size
< 0x40007fffL
)
6802 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
6804 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
6805 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
6809 sp_adj1
= gen_rtx_REG (DImode
, 23);
6810 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
6812 sp_adj2
= GEN_INT (low
);
6816 rtx tmp
= gen_rtx_REG (DImode
, 23);
6817 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3));
6820 /* We can't drop new things to memory this late, afaik,
6821 so build it up by pieces. */
6822 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
6823 -(frame_size
< 0)));
6829 /* From now on, things must be in order. So emit blockages. */
6831 /* Restore the frame pointer. */
6832 if (TARGET_ABI_UNICOSMK
)
6834 emit_insn (gen_blockage ());
6835 mem
= gen_rtx_MEM (DImode
,
6836 plus_constant (hard_frame_pointer_rtx
, -16));
6837 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6838 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
6840 else if (fp_is_frame_pointer
)
6842 emit_insn (gen_blockage ());
6843 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
6844 set_mem_alias_set (mem
, alpha_sr_alias_set
);
6845 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
6847 else if (TARGET_ABI_OPEN_VMS
)
6849 emit_insn (gen_blockage ());
6850 FRP (emit_move_insn (hard_frame_pointer_rtx
,
6851 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
6854 /* Restore the stack pointer. */
6855 emit_insn (gen_blockage ());
6856 if (sp_adj2
== const0_rtx
)
6857 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
6859 FRP (emit_move_insn (stack_pointer_rtx
,
6860 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
6864 if (TARGET_ABI_OPEN_VMS
&& !alpha_is_stack_procedure
)
6866 emit_insn (gen_blockage ());
6867 FRP (emit_move_insn (hard_frame_pointer_rtx
,
6868 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
6870 else if (TARGET_ABI_UNICOSMK
&& !alpha_is_stack_procedure
)
6872 /* Decrement the frame pointer if the function does not have a
6875 emit_insn (gen_blockage ());
6876 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
6877 hard_frame_pointer_rtx
, GEN_INT (-1))));
6882 /* Output the rest of the textual info surrounding the epilogue. */
6885 alpha_end_function (file
, fnname
, decl
)
6888 tree decl ATTRIBUTE_UNUSED
;
6890 /* End the function. */
6891 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
6893 fputs ("\t.end ", file
);
6894 assemble_name (file
, fnname
);
6897 inside_function
= FALSE
;
6899 /* Show that we know this function if it is called again.
6901 Don't do this for global functions in object files destined for a
6902 shared library because the function may be overridden by the application
6903 or other libraries. Similarly, don't do this for weak functions.
6905 Don't do this for functions not defined in the .text section, as
6906 otherwise it's not unlikely that the destination is out of range
6907 for a direct branch. */
6909 if (!DECL_WEAK (current_function_decl
)
6910 && (!flag_pic
|| !TREE_PUBLIC (current_function_decl
))
6911 && decl_in_text_section (current_function_decl
))
6912 SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl
), 0)) = 1;
6914 /* Output jump tables and the static subroutine information block. */
6915 if (TARGET_ABI_UNICOSMK
)
6917 unicosmk_output_ssib (file
, fnname
);
6918 unicosmk_output_deferred_case_vectors (file
);
6922 /* Debugging support. */
6926 /* Count the number of sdb related labels are generated (to find block
6927 start and end boundaries). */
6929 int sdb_label_count
= 0;
6931 /* Next label # for each statement. */
6933 static int sym_lineno
= 0;
6935 /* Count the number of .file directives, so that .loc is up to date. */
6937 static int num_source_filenames
= 0;
6939 /* Name of the file containing the current function. */
6941 static const char *current_function_file
= "";
6943 /* Offsets to alpha virtual arg/local debugging pointers. */
6945 long alpha_arg_offset
;
6946 long alpha_auto_offset
;
6948 /* Emit a new filename to a stream. */
6951 alpha_output_filename (stream
, name
)
6955 static int first_time
= TRUE
;
6956 char ltext_label_name
[100];
6961 ++num_source_filenames
;
6962 current_function_file
= name
;
6963 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6964 output_quoted_string (stream
, name
);
6965 fprintf (stream
, "\n");
6966 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
6967 fprintf (stream
, "\t#@stabs\n");
6970 else if (write_symbols
== DBX_DEBUG
)
6972 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name
, "Ltext", 0);
6973 fprintf (stream
, "%s", ASM_STABS_OP
);
6974 output_quoted_string (stream
, name
);
6975 fprintf (stream
, ",%d,0,0,%s\n", N_SOL
, <ext_label_name
[1]);
6978 else if (name
!= current_function_file
6979 && strcmp (name
, current_function_file
) != 0)
6981 if (inside_function
&& ! TARGET_GAS
)
6982 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
6985 ++num_source_filenames
;
6986 current_function_file
= name
;
6987 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
6990 output_quoted_string (stream
, name
);
6991 fprintf (stream
, "\n");
6995 /* Emit a linenumber to a stream. */
6998 alpha_output_lineno (stream
, line
)
7002 if (write_symbols
== DBX_DEBUG
)
7004 /* mips-tfile doesn't understand .stabd directives. */
7006 fprintf (stream
, "$LM%d:\n%s%d,0,%d,$LM%d\n",
7007 sym_lineno
, ASM_STABN_OP
, N_SLINE
, line
, sym_lineno
);
7010 fprintf (stream
, "\n\t.loc\t%d %d\n", num_source_filenames
, line
);
7013 /* Structure to show the current status of registers and memory. */
7015 struct shadow_summary
7018 unsigned int i
: 31; /* Mask of int regs */
7019 unsigned int fp
: 31; /* Mask of fp regs */
7020 unsigned int mem
: 1; /* mem == imem | fpmem */
7024 static void summarize_insn
PARAMS ((rtx
, struct shadow_summary
*, int));
7025 static void alpha_handle_trap_shadows
PARAMS ((rtx
));
7027 /* Summary the effects of expression X on the machine. Update SUM, a pointer
7028 to the summary structure. SET is nonzero if the insn is setting the
7029 object, otherwise zero. */
7032 summarize_insn (x
, sum
, set
)
7034 struct shadow_summary
*sum
;
7037 const char *format_ptr
;
7043 switch (GET_CODE (x
))
7045 /* ??? Note that this case would be incorrect if the Alpha had a
7046 ZERO_EXTRACT in SET_DEST. */
7048 summarize_insn (SET_SRC (x
), sum
, 0);
7049 summarize_insn (SET_DEST (x
), sum
, 1);
7053 summarize_insn (XEXP (x
, 0), sum
, 1);
7057 summarize_insn (XEXP (x
, 0), sum
, 0);
7061 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
7062 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
7066 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
7067 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
7071 summarize_insn (SUBREG_REG (x
), sum
, 0);
7076 int regno
= REGNO (x
);
7077 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
7079 if (regno
== 31 || regno
== 63)
7085 sum
->defd
.i
|= mask
;
7087 sum
->defd
.fp
|= mask
;
7092 sum
->used
.i
|= mask
;
7094 sum
->used
.fp
|= mask
;
7105 /* Find the regs used in memory address computation: */
7106 summarize_insn (XEXP (x
, 0), sum
, 0);
7109 case CONST_INT
: case CONST_DOUBLE
:
7110 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
7111 case SCRATCH
: case ASM_INPUT
:
7114 /* Handle common unary and binary ops for efficiency. */
7115 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
7116 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
7117 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
7118 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
7119 case NE
: case EQ
: case GE
: case GT
: case LE
:
7120 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
7121 summarize_insn (XEXP (x
, 0), sum
, 0);
7122 summarize_insn (XEXP (x
, 1), sum
, 0);
7125 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
7126 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
7127 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
7128 case SQRT
: case FFS
:
7129 summarize_insn (XEXP (x
, 0), sum
, 0);
7133 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
7134 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
7135 switch (format_ptr
[i
])
7138 summarize_insn (XEXP (x
, i
), sum
, 0);
7142 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
7143 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
7155 /* Ensure a sufficient number of `trapb' insns are in the code when
7156 the user requests code with a trap precision of functions or
7159 In naive mode, when the user requests a trap-precision of
7160 "instruction", a trapb is needed after every instruction that may
7161 generate a trap. This ensures that the code is resumption safe but
7164 When optimizations are turned on, we delay issuing a trapb as long
7165 as possible. In this context, a trap shadow is the sequence of
7166 instructions that starts with a (potentially) trap generating
7167 instruction and extends to the next trapb or call_pal instruction
7168 (but GCC never generates call_pal by itself). We can delay (and
7169 therefore sometimes omit) a trapb subject to the following
7172 (a) On entry to the trap shadow, if any Alpha register or memory
7173 location contains a value that is used as an operand value by some
7174 instruction in the trap shadow (live on entry), then no instruction
7175 in the trap shadow may modify the register or memory location.
7177 (b) Within the trap shadow, the computation of the base register
7178 for a memory load or store instruction may not involve using the
7179 result of an instruction that might generate an UNPREDICTABLE
7182 (c) Within the trap shadow, no register may be used more than once
7183 as a destination register. (This is to make life easier for the
7186 (d) The trap shadow may not include any branch instructions. */
7189 alpha_handle_trap_shadows (insns
)
7192 struct shadow_summary shadow
;
7193 int trap_pending
, exception_nesting
;
7197 exception_nesting
= 0;
7200 shadow
.used
.mem
= 0;
7201 shadow
.defd
= shadow
.used
;
7203 for (i
= insns
; i
; i
= NEXT_INSN (i
))
7205 if (GET_CODE (i
) == NOTE
)
7207 switch (NOTE_LINE_NUMBER (i
))
7209 case NOTE_INSN_EH_REGION_BEG
:
7210 exception_nesting
++;
7215 case NOTE_INSN_EH_REGION_END
:
7216 exception_nesting
--;
7221 case NOTE_INSN_EPILOGUE_BEG
:
7222 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
7227 else if (trap_pending
)
7229 if (alpha_tp
== ALPHA_TP_FUNC
)
7231 if (GET_CODE (i
) == JUMP_INSN
7232 && GET_CODE (PATTERN (i
)) == RETURN
)
7235 else if (alpha_tp
== ALPHA_TP_INSN
)
7239 struct shadow_summary sum
;
7244 sum
.defd
= sum
.used
;
7246 switch (GET_CODE (i
))
7249 /* Annoyingly, get_attr_trap will abort on these. */
7250 if (GET_CODE (PATTERN (i
)) == USE
7251 || GET_CODE (PATTERN (i
)) == CLOBBER
)
7254 summarize_insn (PATTERN (i
), &sum
, 0);
7256 if ((sum
.defd
.i
& shadow
.defd
.i
)
7257 || (sum
.defd
.fp
& shadow
.defd
.fp
))
7259 /* (c) would be violated */
7263 /* Combine shadow with summary of current insn: */
7264 shadow
.used
.i
|= sum
.used
.i
;
7265 shadow
.used
.fp
|= sum
.used
.fp
;
7266 shadow
.used
.mem
|= sum
.used
.mem
;
7267 shadow
.defd
.i
|= sum
.defd
.i
;
7268 shadow
.defd
.fp
|= sum
.defd
.fp
;
7269 shadow
.defd
.mem
|= sum
.defd
.mem
;
7271 if ((sum
.defd
.i
& shadow
.used
.i
)
7272 || (sum
.defd
.fp
& shadow
.used
.fp
)
7273 || (sum
.defd
.mem
& shadow
.used
.mem
))
7275 /* (a) would be violated (also takes care of (b)) */
7276 if (get_attr_trap (i
) == TRAP_YES
7277 && ((sum
.defd
.i
& sum
.used
.i
)
7278 || (sum
.defd
.fp
& sum
.used
.fp
)))
7297 n
= emit_insn_before (gen_trapb (), i
);
7298 PUT_MODE (n
, TImode
);
7299 PUT_MODE (i
, TImode
);
7303 shadow
.used
.mem
= 0;
7304 shadow
.defd
= shadow
.used
;
7309 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
7310 && GET_CODE (i
) == INSN
7311 && GET_CODE (PATTERN (i
)) != USE
7312 && GET_CODE (PATTERN (i
)) != CLOBBER
7313 && get_attr_trap (i
) == TRAP_YES
)
7315 if (optimize
&& !trap_pending
)
7316 summarize_insn (PATTERN (i
), &shadow
, 0);
7322 /* Alpha can only issue instruction groups simultaneously if they are
7323 suitibly aligned. This is very processor-specific. */
7325 enum alphaev4_pipe
{
7332 enum alphaev5_pipe
{
7343 static enum alphaev4_pipe alphaev4_insn_pipe
PARAMS ((rtx
));
7344 static enum alphaev5_pipe alphaev5_insn_pipe
PARAMS ((rtx
));
7345 static rtx alphaev4_next_group
PARAMS ((rtx
, int *, int *));
7346 static rtx alphaev5_next_group
PARAMS ((rtx
, int *, int *));
7347 static rtx alphaev4_next_nop
PARAMS ((int *));
7348 static rtx alphaev5_next_nop
PARAMS ((int *));
7350 static void alpha_align_insns
7351 PARAMS ((rtx
, unsigned int, rtx (*)(rtx
, int *, int *), rtx (*)(int *)));
7353 static enum alphaev4_pipe
7354 alphaev4_insn_pipe (insn
)
7357 if (recog_memoized (insn
) < 0)
7359 if (get_attr_length (insn
) != 4)
7362 switch (get_attr_type (insn
))
7395 static enum alphaev5_pipe
7396 alphaev5_insn_pipe (insn
)
7399 if (recog_memoized (insn
) < 0)
7401 if (get_attr_length (insn
) != 4)
7404 switch (get_attr_type (insn
))
7444 /* IN_USE is a mask of the slots currently filled within the insn group.
7445 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
7446 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
7448 LEN is, of course, the length of the group in bytes. */
7451 alphaev4_next_group (insn
, pin_use
, plen
)
7453 int *pin_use
, *plen
;
7460 || GET_CODE (PATTERN (insn
)) == CLOBBER
7461 || GET_CODE (PATTERN (insn
)) == USE
)
7466 enum alphaev4_pipe pipe
;
7468 pipe
= alphaev4_insn_pipe (insn
);
7472 /* Force complex instructions to start new groups. */
7476 /* If this is a completely unrecognized insn, its an asm.
7477 We don't know how long it is, so record length as -1 to
7478 signal a needed realignment. */
7479 if (recog_memoized (insn
) < 0)
7482 len
= get_attr_length (insn
);
7486 if (in_use
& EV4_IB0
)
7488 if (in_use
& EV4_IB1
)
7493 in_use
|= EV4_IB0
| EV4_IBX
;
7497 if (in_use
& EV4_IB0
)
7499 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
7507 if (in_use
& EV4_IB1
)
7517 /* Haifa doesn't do well scheduling branches. */
7518 if (GET_CODE (insn
) == JUMP_INSN
)
7522 insn
= next_nonnote_insn (insn
);
7524 if (!insn
|| ! INSN_P (insn
))
7527 /* Let Haifa tell us where it thinks insn group boundaries are. */
7528 if (GET_MODE (insn
) == TImode
)
7531 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
7536 insn
= next_nonnote_insn (insn
);
7544 /* IN_USE is a mask of the slots currently filled within the insn group.
7545 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
7546 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
7548 LEN is, of course, the length of the group in bytes. */
7551 alphaev5_next_group (insn
, pin_use
, plen
)
7553 int *pin_use
, *plen
;
7560 || GET_CODE (PATTERN (insn
)) == CLOBBER
7561 || GET_CODE (PATTERN (insn
)) == USE
)
7566 enum alphaev5_pipe pipe
;
7568 pipe
= alphaev5_insn_pipe (insn
);
7572 /* Force complex instructions to start new groups. */
7576 /* If this is a completely unrecognized insn, its an asm.
7577 We don't know how long it is, so record length as -1 to
7578 signal a needed realignment. */
7579 if (recog_memoized (insn
) < 0)
7582 len
= get_attr_length (insn
);
7585 /* ??? Most of the places below, we would like to abort, as
7586 it would indicate an error either in Haifa, or in the
7587 scheduling description. Unfortunately, Haifa never
7588 schedules the last instruction of the BB, so we don't
7589 have an accurate TI bit to go off. */
7591 if (in_use
& EV5_E0
)
7593 if (in_use
& EV5_E1
)
7598 in_use
|= EV5_E0
| EV5_E01
;
7602 if (in_use
& EV5_E0
)
7604 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
7612 if (in_use
& EV5_E1
)
7618 if (in_use
& EV5_FA
)
7620 if (in_use
& EV5_FM
)
7625 in_use
|= EV5_FA
| EV5_FAM
;
7629 if (in_use
& EV5_FA
)
7635 if (in_use
& EV5_FM
)
7648 /* Haifa doesn't do well scheduling branches. */
7649 /* ??? If this is predicted not-taken, slotting continues, except
7650 that no more IBR, FBR, or JSR insns may be slotted. */
7651 if (GET_CODE (insn
) == JUMP_INSN
)
7655 insn
= next_nonnote_insn (insn
);
7657 if (!insn
|| ! INSN_P (insn
))
7660 /* Let Haifa tell us where it thinks insn group boundaries are. */
7661 if (GET_MODE (insn
) == TImode
)
7664 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
7669 insn
= next_nonnote_insn (insn
);
7678 alphaev4_next_nop (pin_use
)
7681 int in_use
= *pin_use
;
7684 if (!(in_use
& EV4_IB0
))
7689 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
7694 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
7707 alphaev5_next_nop (pin_use
)
7710 int in_use
= *pin_use
;
7713 if (!(in_use
& EV5_E1
))
7718 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
7723 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
7735 /* The instruction group alignment main loop. */
7738 alpha_align_insns (insns
, max_align
, next_group
, next_nop
)
7740 unsigned int max_align
;
7741 rtx (*next_group
) PARAMS ((rtx
, int *, int *));
7742 rtx (*next_nop
) PARAMS ((int *));
7744 /* ALIGN is the known alignment for the insn group. */
7746 /* OFS is the offset of the current insn in the insn group. */
7748 int prev_in_use
, in_use
, len
;
7751 /* Let shorten branches care for assigning alignments to code labels. */
7752 shorten_branches (insns
);
7754 if (align_functions
< 4)
7756 else if ((unsigned int) align_functions
< max_align
)
7757 align
= align_functions
;
7761 ofs
= prev_in_use
= 0;
7763 if (GET_CODE (i
) == NOTE
)
7764 i
= next_nonnote_insn (i
);
7768 next
= (*next_group
) (i
, &in_use
, &len
);
7770 /* When we see a label, resync alignment etc. */
7771 if (GET_CODE (i
) == CODE_LABEL
)
7773 unsigned int new_align
= 1 << label_to_alignment (i
);
7775 if (new_align
>= align
)
7777 align
= new_align
< max_align
? new_align
: max_align
;
7781 else if (ofs
& (new_align
-1))
7782 ofs
= (ofs
| (new_align
-1)) + 1;
7787 /* Handle complex instructions special. */
7788 else if (in_use
== 0)
7790 /* Asms will have length < 0. This is a signal that we have
7791 lost alignment knowledge. Assume, however, that the asm
7792 will not mis-align instructions. */
7801 /* If the known alignment is smaller than the recognized insn group,
7802 realign the output. */
7803 else if ((int) align
< len
)
7805 unsigned int new_log_align
= len
> 8 ? 4 : 3;
7808 where
= prev
= prev_nonnote_insn (i
);
7809 if (!where
|| GET_CODE (where
) != CODE_LABEL
)
7812 /* Can't realign between a call and its gp reload. */
7813 if (! (TARGET_EXPLICIT_RELOCS
7814 && prev
&& GET_CODE (prev
) == CALL_INSN
))
7816 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
7817 align
= 1 << new_log_align
;
7822 /* If the group won't fit in the same INT16 as the previous,
7823 we need to add padding to keep the group together. Rather
7824 than simply leaving the insn filling to the assembler, we
7825 can make use of the knowledge of what sorts of instructions
7826 were issued in the previous group to make sure that all of
7827 the added nops are really free. */
7828 else if (ofs
+ len
> (int) align
)
7830 int nop_count
= (align
- ofs
) / 4;
7833 /* Insert nops before labels, branches, and calls to truely merge
7834 the execution of the nops with the previous instruction group. */
7835 where
= prev_nonnote_insn (i
);
7838 if (GET_CODE (where
) == CODE_LABEL
)
7840 rtx where2
= prev_nonnote_insn (where
);
7841 if (where2
&& GET_CODE (where2
) == JUMP_INSN
)
7844 else if (GET_CODE (where
) == INSN
)
7851 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
7852 while (--nop_count
);
7856 ofs
= (ofs
+ len
) & (align
- 1);
7857 prev_in_use
= in_use
;
7862 /* Machine dependent reorg pass. */
7868 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
7869 alpha_handle_trap_shadows (insns
);
7871 /* Due to the number of extra trapb insns, don't bother fixing up
7872 alignment when trap precision is instruction. Moreover, we can
7873 only do our job when sched2 is run. */
7874 if (optimize
&& !optimize_size
7875 && alpha_tp
!= ALPHA_TP_INSN
7876 && flag_schedule_insns_after_reload
)
7878 if (alpha_cpu
== PROCESSOR_EV4
)
7879 alpha_align_insns (insns
, 8, alphaev4_next_group
, alphaev4_next_nop
);
7880 else if (alpha_cpu
== PROCESSOR_EV5
)
7881 alpha_align_insns (insns
, 16, alphaev5_next_group
, alphaev5_next_nop
);
7885 /* Check a floating-point value for validity for a particular machine mode. */
7887 static const char * const float_strings
[] =
7889 /* These are for FLOAT_VAX. */
7890 "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
7891 "-1.70141173319264430e+38",
7892 "2.93873587705571877e-39", /* 2^-128 */
7893 "-2.93873587705571877e-39",
7894 /* These are for the default broken IEEE mode, which traps
7895 on infinity or denormal numbers. */
7896 "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
7897 "-3.402823466385288598117e+38",
7898 "1.1754943508222875079687e-38", /* 2^-126 */
7899 "-1.1754943508222875079687e-38",
7902 static REAL_VALUE_TYPE float_values
[8];
7903 static int inited_float_values
= 0;
7906 check_float_value (mode
, d
, overflow
)
7907 enum machine_mode mode
;
7909 int overflow ATTRIBUTE_UNUSED
;
7912 if (TARGET_IEEE
|| TARGET_IEEE_CONFORMANT
|| TARGET_IEEE_WITH_INEXACT
)
7915 if (inited_float_values
== 0)
7918 for (i
= 0; i
< 8; i
++)
7919 float_values
[i
] = REAL_VALUE_ATOF (float_strings
[i
], DFmode
);
7921 inited_float_values
= 1;
7927 REAL_VALUE_TYPE
*fvptr
;
7929 if (TARGET_FLOAT_VAX
)
7930 fvptr
= &float_values
[0];
7932 fvptr
= &float_values
[4];
7934 memcpy (&r
, d
, sizeof (REAL_VALUE_TYPE
));
7935 if (REAL_VALUES_LESS (fvptr
[0], r
))
7937 memcpy (d
, &fvptr
[0], sizeof (REAL_VALUE_TYPE
));
7940 else if (REAL_VALUES_LESS (r
, fvptr
[1]))
7942 memcpy (d
, &fvptr
[1], sizeof (REAL_VALUE_TYPE
));
7945 else if (REAL_VALUES_LESS (dconst0
, r
)
7946 && REAL_VALUES_LESS (r
, fvptr
[2]))
7948 memcpy (d
, &dconst0
, sizeof (REAL_VALUE_TYPE
));
7951 else if (REAL_VALUES_LESS (r
, dconst0
)
7952 && REAL_VALUES_LESS (fvptr
[3], r
))
7954 memcpy (d
, &dconst0
, sizeof (REAL_VALUE_TYPE
));
7962 #if TARGET_ABI_OPEN_VMS
7964 /* Return the VMS argument type corresponding to MODE. */
7967 alpha_arg_type (mode
)
7968 enum machine_mode mode
;
7973 return TARGET_FLOAT_VAX
? FF
: FS
;
7975 return TARGET_FLOAT_VAX
? FD
: FT
;
7981 /* Return an rtx for an integer representing the VMS Argument Information
7985 alpha_arg_info_reg_val (cum
)
7986 CUMULATIVE_ARGS cum
;
7988 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
7991 for (i
= 0; i
< 6; i
++)
7992 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
7994 return GEN_INT (regval
);
7997 #include <splay-tree.h>
7999 /* Structure to collect function names for final output
8002 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
8007 enum links_kind kind
;
8010 static splay_tree alpha_links
;
8012 static int mark_alpha_links_node
PARAMS ((splay_tree_node
, void *));
8013 static void mark_alpha_links
PARAMS ((void *));
8014 static int alpha_write_one_linkage
PARAMS ((splay_tree_node
, void *));
8016 /* Protect alpha_links from garbage collection. */
8019 mark_alpha_links_node (node
, data
)
8020 splay_tree_node node
;
8021 void *data ATTRIBUTE_UNUSED
;
8023 struct alpha_links
*links
= (struct alpha_links
*) node
->value
;
8024 ggc_mark_rtx (links
->linkage
);
8029 mark_alpha_links (ptr
)
8032 splay_tree tree
= *(splay_tree
*) ptr
;
8033 splay_tree_foreach (tree
, mark_alpha_links_node
, NULL
);
8036 /* Make (or fake) .linkage entry for function call.
8038 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
8040 Return an SYMBOL_REF rtx for the linkage. */
8043 alpha_need_linkage (name
, is_local
)
8047 splay_tree_node node
;
8048 struct alpha_links
*al
;
8055 /* Is this name already defined? */
8057 node
= splay_tree_lookup (alpha_links
, (splay_tree_key
) name
);
8060 al
= (struct alpha_links
*) node
->value
;
8063 /* Defined here but external assumed. */
8064 if (al
->kind
== KIND_EXTERN
)
8065 al
->kind
= KIND_LOCAL
;
8069 /* Used here but unused assumed. */
8070 if (al
->kind
== KIND_UNUSED
)
8071 al
->kind
= KIND_LOCAL
;
8078 alpha_links
= splay_tree_new ((splay_tree_compare_fn
) strcmp
,
8079 (splay_tree_delete_key_fn
) free
,
8080 (splay_tree_delete_key_fn
) free
);
8081 ggc_add_root (&alpha_links
, 1, 1, mark_alpha_links
);
8084 al
= (struct alpha_links
*) xmalloc (sizeof (struct alpha_links
));
8085 name
= xstrdup (name
);
8087 /* Assume external if no definition. */
8088 al
->kind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
8090 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
8091 get_identifier (name
);
8093 /* Construct a SYMBOL_REF for us to call. */
8095 size_t name_len
= strlen (name
);
8096 char *linksym
= alloca (name_len
+ 6);
8098 memcpy (linksym
+ 1, name
, name_len
);
8099 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
8100 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
8101 ggc_alloc_string (linksym
, name_len
+ 5));
8104 splay_tree_insert (alpha_links
, (splay_tree_key
) name
,
8105 (splay_tree_value
) al
);
8111 alpha_write_one_linkage (node
, data
)
8112 splay_tree_node node
;
8115 const char *const name
= (const char *) node
->key
;
8116 struct alpha_links
*links
= (struct alpha_links
*) node
->value
;
8117 FILE *stream
= (FILE *) data
;
8119 if (links
->kind
== KIND_UNUSED
8120 || ! TREE_SYMBOL_REFERENCED (get_identifier (name
)))
8123 fprintf (stream
, "$%s..lk:\n", name
);
8124 if (links
->kind
== KIND_LOCAL
)
8126 /* Local and used, build linkage pair. */
8127 fprintf (stream
, "\t.quad %s..en\n", name
);
8128 fprintf (stream
, "\t.quad %s\n", name
);
8132 /* External and used, request linkage pair. */
8133 fprintf (stream
, "\t.linkage %s\n", name
);
8140 alpha_write_linkage (stream
)
8145 readonly_section ();
8146 fprintf (stream
, "\t.align 3\n");
8147 splay_tree_foreach (alpha_links
, alpha_write_one_linkage
, stream
);
8151 /* Given a decl, a section name, and whether the decl initializer
8152 has relocs, choose attributes for the section. */
8154 #define SECTION_VMS_OVERLAY SECTION_FORGET
8155 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
8156 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
8159 vms_section_type_flags (decl
, name
, reloc
)
8164 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
8166 if (decl
&& DECL_ATTRIBUTES (decl
)
8167 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
8168 flags
|= SECTION_VMS_OVERLAY
;
8169 if (decl
&& DECL_ATTRIBUTES (decl
)
8170 && lookup_attribute ("global", DECL_ATTRIBUTES (decl
)))
8171 flags
|= SECTION_VMS_GLOBAL
;
8172 if (decl
&& DECL_ATTRIBUTES (decl
)
8173 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl
)))
8174 flags
|= SECTION_VMS_INITIALIZE
;
8179 /* Switch to an arbitrary section NAME with attributes as specified
8180 by FLAGS. ALIGN specifies any known alignment requirements for
8181 the section; 0 if the default should be used. */
8184 vms_asm_named_section (name
, flags
)
8188 fputc ('\n', asm_out_file
);
8189 fprintf (asm_out_file
, ".section\t%s", name
);
8191 if (flags
& SECTION_VMS_OVERLAY
)
8192 fprintf (asm_out_file
, ",OVR");
8193 if (flags
& SECTION_VMS_GLOBAL
)
8194 fprintf (asm_out_file
, ",GBL");
8195 if (flags
& SECTION_VMS_INITIALIZE
)
8196 fprintf (asm_out_file
, ",NOMOD");
8197 if (flags
& SECTION_DEBUG
)
8198 fprintf (asm_out_file
, ",NOWRT");
8200 fputc ('\n', asm_out_file
);
8203 /* Record an element in the table of global constructors. SYMBOL is
8204 a SYMBOL_REF of the function to be called; PRIORITY is a number
8205 between 0 and MAX_INIT_PRIORITY.
8207 Differs from default_ctors_section_asm_out_constructor in that the
8208 width of the .ctors entry is always 64 bits, rather than the 32 bits
8209 used by a normal pointer. */
8212 vms_asm_out_constructor (symbol
, priority
)
8214 int priority ATTRIBUTE_UNUSED
;
8217 assemble_align (BITS_PER_WORD
);
8218 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
8222 vms_asm_out_destructor (symbol
, priority
)
8224 int priority ATTRIBUTE_UNUSED
;
8227 assemble_align (BITS_PER_WORD
);
8228 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
8233 alpha_need_linkage (name
, is_local
)
8234 const char *name ATTRIBUTE_UNUSED
;
8235 int is_local ATTRIBUTE_UNUSED
;
8240 #endif /* TARGET_ABI_OPEN_VMS */
8242 #if TARGET_ABI_UNICOSMK
8244 static void unicosmk_output_module_name
PARAMS ((FILE *));
8245 static void unicosmk_output_default_externs
PARAMS ((FILE *));
8246 static void unicosmk_output_dex
PARAMS ((FILE *));
8247 static void unicosmk_output_externs
PARAMS ((FILE *));
8248 static void unicosmk_output_addr_vec
PARAMS ((FILE *, rtx
));
8249 static const char *unicosmk_ssib_name
PARAMS ((void));
8250 static int unicosmk_special_name
PARAMS ((const char *));
8252 /* Define the offset between two registers, one to be eliminated, and the
8253 other its replacement, at the start of a routine. */
8256 unicosmk_initial_elimination_offset (from
, to
)
8262 fixed_size
= alpha_sa_size();
8263 if (fixed_size
!= 0)
8266 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8268 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8270 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8271 return (ALPHA_ROUND (current_function_outgoing_args_size
)
8272 + ALPHA_ROUND (get_frame_size()));
8273 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8274 return (ALPHA_ROUND (fixed_size
)
8275 + ALPHA_ROUND (get_frame_size()
8276 + current_function_outgoing_args_size
));
8281 /* Output the module name for .ident and .end directives. We have to strip
8282 directories and add make sure that the module name starts with a letter
8286 unicosmk_output_module_name (file
)
8291 /* Strip directories. */
8293 name
= strrchr (main_input_filename
, '/');
8297 name
= main_input_filename
;
8299 /* CAM only accepts module names that start with a letter or '$'. We
8300 prefix the module name with a '$' if necessary. */
8302 if (!ISALPHA (*name
))
8303 fprintf (file
, "$%s", name
);
8308 /* Output text that to appear at the beginning of an assembler file. */
8311 unicosmk_asm_file_start (file
)
8316 fputs ("\t.ident\t", file
);
8317 unicosmk_output_module_name (file
);
8318 fputs ("\n\n", file
);
8320 /* The Unicos/Mk assembler uses different register names. Instead of trying
8321 to support them, we simply use micro definitions. */
8323 /* CAM has different register names: rN for the integer register N and fN
8324 for the floating-point register N. Instead of trying to use these in
8325 alpha.md, we define the symbols $N and $fN to refer to the appropriate
8328 for (i
= 0; i
< 32; ++i
)
8329 fprintf (file
, "$%d <- r%d\n", i
, i
);
8331 for (i
= 0; i
< 32; ++i
)
8332 fprintf (file
, "$f%d <- f%d\n", i
, i
);
8336 /* The .align directive fill unused space with zeroes which does not work
8337 in code sections. We define the macro 'gcc@code@align' which uses nops
8338 instead. Note that it assumes that code sections always have the
8339 biggest possible alignment since . refers to the current offset from
8340 the beginning of the section. */
8342 fputs ("\t.macro gcc@code@align n\n", file
);
8343 fputs ("gcc@n@bytes = 1 << n\n", file
);
8344 fputs ("gcc@here = . % gcc@n@bytes\n", file
);
8345 fputs ("\t.if ne, gcc@here, 0\n", file
);
8346 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", file
);
8347 fputs ("\tbis r31,r31,r31\n", file
);
8348 fputs ("\t.endr\n", file
);
8349 fputs ("\t.endif\n", file
);
8350 fputs ("\t.endm gcc@code@align\n\n", file
);
8352 /* Output extern declarations which should always be visible. */
8353 unicosmk_output_default_externs (file
);
8355 /* Open a dummy section. We always need to be inside a section for the
8356 section-switching code to work correctly.
8357 ??? This should be a module id or something like that. I still have to
8358 figure out what the rules for those are. */
8359 fputs ("\n\t.psect\t$SG00000,data\n", file
);
8362 /* Output text to appear at the end of an assembler file. This includes all
8363 pending extern declarations and DEX expressions. */
8366 unicosmk_asm_file_end (file
)
8369 fputs ("\t.endp\n\n", file
);
8371 /* Output all pending externs. */
8373 unicosmk_output_externs (file
);
8375 /* Output dex definitions used for functions whose names conflict with
8378 unicosmk_output_dex (file
);
8380 fputs ("\t.end\t", file
);
8381 unicosmk_output_module_name (file
);
8385 /* Output the definition of a common variable. */
8388 unicosmk_output_common (file
, name
, size
, align
)
8395 printf ("T3E__: common %s\n", name
);
8398 fputs("\t.endp\n\n\t.psect ", file
);
8399 assemble_name(file
, name
);
8400 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
8401 fprintf(file
, "\t.byte\t0:%d\n", size
);
8403 /* Mark the symbol as defined in this module. */
8404 name_tree
= get_identifier (name
);
8405 TREE_ASM_WRITTEN (name_tree
) = 1;
8408 #define SECTION_PUBLIC SECTION_MACH_DEP
8409 #define SECTION_MAIN (SECTION_PUBLIC << 1)
8410 static int current_section_align
;
8413 unicosmk_section_type_flags (decl
, name
, reloc
)
8416 int reloc ATTRIBUTE_UNUSED
;
8418 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
8423 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8425 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
8426 if (align_functions_log
> current_section_align
)
8427 current_section_align
= align_functions_log
;
8429 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
8430 flags
|= SECTION_MAIN
;
8433 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
8435 if (TREE_PUBLIC (decl
))
8436 flags
|= SECTION_PUBLIC
;
8441 /* Generate a section name for decl and associate it with the
8445 unicosmk_unique_section (decl
, reloc
)
8447 int reloc ATTRIBUTE_UNUSED
;
8455 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
8456 STRIP_NAME_ENCODING (name
, name
);
8457 len
= strlen (name
);
8459 if (TREE_CODE (decl
) == FUNCTION_DECL
)
8463 /* It is essential that we prefix the section name here because
8464 otherwise the section names generated for constructors and
8465 destructors confuse collect2. */
8467 string
= alloca (len
+ 6);
8468 sprintf (string
, "code@%s", name
);
8469 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
8471 else if (TREE_PUBLIC (decl
))
8472 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
8477 string
= alloca (len
+ 6);
8478 sprintf (string
, "data@%s", name
);
8479 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
8483 /* Switch to an arbitrary section NAME with attributes as specified
8484 by FLAGS. ALIGN specifies any known alignment requirements for
8485 the section; 0 if the default should be used. */
8488 unicosmk_asm_named_section (name
, flags
)
8494 /* Close the previous section. */
8496 fputs ("\t.endp\n\n", asm_out_file
);
8498 /* Find out what kind of section we are opening. */
8500 if (flags
& SECTION_MAIN
)
8501 fputs ("\t.start\tmain\n", asm_out_file
);
8503 if (flags
& SECTION_CODE
)
8505 else if (flags
& SECTION_PUBLIC
)
8510 if (current_section_align
!= 0)
8511 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
8512 current_section_align
, kind
);
8514 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
8518 unicosmk_insert_attributes (decl
, attr_ptr
)
8520 tree
*attr_ptr ATTRIBUTE_UNUSED
;
8523 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
8524 UNIQUE_SECTION (decl
, 0);
8527 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
8528 in code sections because .align fill unused space with zeroes. */
8531 unicosmk_output_align (file
, align
)
8535 if (inside_function
)
8536 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
8538 fprintf (file
, "\t.align\t%d\n", align
);
8541 /* Add a case vector to the current function's list of deferred case
8542 vectors. Case vectors have to be put into a separate section because CAM
8543 does not allow data definitions in code sections. */
8546 unicosmk_defer_case_vector (lab
, vec
)
8550 struct machine_function
*machine
= cfun
->machine
;
8552 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
8553 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
8554 machine
->addr_list
);
8557 /* Output a case vector. */
8560 unicosmk_output_addr_vec (file
, vec
)
8564 rtx lab
= XEXP (vec
, 0);
8565 rtx body
= XEXP (vec
, 1);
8566 int vlen
= XVECLEN (body
, 0);
8569 ASM_OUTPUT_INTERNAL_LABEL (file
, "L", CODE_LABEL_NUMBER (lab
));
8571 for (idx
= 0; idx
< vlen
; idx
++)
8573 ASM_OUTPUT_ADDR_VEC_ELT
8574 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
8578 /* Output current function's deferred case vectors. */
8581 unicosmk_output_deferred_case_vectors (file
)
8584 struct machine_function
*machine
= cfun
->machine
;
8587 if (machine
->addr_list
== NULL_RTX
)
8591 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
8592 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
8595 /* Set up the dynamic subprogram information block (DSIB) and update the
8596 frame pointer register ($15) for subroutines which have a frame. If the
8597 subroutine doesn't have a frame, simply increment $15. */
8600 unicosmk_gen_dsib (imaskP
)
8601 unsigned long * imaskP
;
8603 if (alpha_is_stack_procedure
)
8605 const char *ssib_name
;
8608 /* Allocate 64 bytes for the DSIB. */
8610 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
8612 emit_insn (gen_blockage ());
8614 /* Save the return address. */
8616 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
8617 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8618 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
8619 (*imaskP
) &= ~(1L << REG_RA
);
8621 /* Save the old frame pointer. */
8623 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
8624 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8625 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
8626 (*imaskP
) &= ~(1L << HARD_FRAME_POINTER_REGNUM
);
8628 emit_insn (gen_blockage ());
8630 /* Store the SSIB pointer. */
8632 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
8633 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
8634 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8636 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
8637 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
8638 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
8640 /* Save the CIW index. */
8642 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
8643 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8644 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
8646 emit_insn (gen_blockage ());
8648 /* Set the new frame pointer. */
8650 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8651 stack_pointer_rtx
, GEN_INT (64))));
8656 /* Increment the frame pointer register to indicate that we do not
8659 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8660 hard_frame_pointer_rtx
, GEN_INT (1))));
8664 #define SSIB_PREFIX "__SSIB_"
8665 #define SSIB_PREFIX_LEN 7
8667 /* Generate the name of the SSIB section for the current function. */
8670 unicosmk_ssib_name ()
8672 /* This is ok since CAM won't be able to deal with names longer than that
8675 static char name
[256];
8681 x
= DECL_RTL (cfun
->decl
);
8682 if (GET_CODE (x
) != MEM
)
8685 if (GET_CODE (x
) != SYMBOL_REF
)
8687 fnname
= XSTR (x
, 0);
8688 STRIP_NAME_ENCODING (fnname
, fnname
);
8690 len
= strlen (fnname
);
8691 if (len
+ SSIB_PREFIX_LEN
> 255)
8692 len
= 255 - SSIB_PREFIX_LEN
;
8694 strcpy (name
, SSIB_PREFIX
);
8695 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
8696 name
[len
+ SSIB_PREFIX_LEN
] = 0;
8701 /* Output the static subroutine information block for the current
8705 unicosmk_output_ssib (file
, fnname
)
8713 struct machine_function
*machine
= cfun
->machine
;
8716 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
8717 unicosmk_ssib_name ());
8719 /* Some required stuff and the function name length. */
8721 len
= strlen (fnname
);
8722 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
8725 ??? We don't do that yet. */
8727 fputs ("\t.quad\t0\n", file
);
8729 /* Function address. */
8731 fputs ("\t.quad\t", file
);
8732 assemble_name (file
, fnname
);
8735 fputs ("\t.quad\t0\n", file
);
8736 fputs ("\t.quad\t0\n", file
);
8739 ??? We do it the same way Cray CC does it but this could be
8742 for( i
= 0; i
< len
; i
++ )
8743 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
8744 if( (len
% 8) == 0 )
8745 fputs ("\t.quad\t0\n", file
);
8747 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
8749 /* All call information words used in the function. */
8751 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
8754 fprintf (file
, "\t.quad\t");
8755 #if HOST_BITS_PER_WIDE_INT == 32
8756 fprintf (file
, HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
8757 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
8759 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (ciw
));
8761 fprintf (file
, "\n");
8765 /* Add a call information word (CIW) to the list of the current function's
8766 CIWs and return its index.
8768 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
8771 unicosmk_add_call_info_word (x
)
8775 struct machine_function
*machine
= cfun
->machine
;
8777 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
8778 if (machine
->first_ciw
== NULL_RTX
)
8779 machine
->first_ciw
= node
;
8781 XEXP (machine
->last_ciw
, 1) = node
;
8783 machine
->last_ciw
= node
;
8784 ++machine
->ciw_count
;
8786 return GEN_INT (machine
->ciw_count
8787 + strlen (current_function_name
)/8 + 5);
8790 static char unicosmk_section_buf
[100];
8793 unicosmk_text_section ()
8795 static int count
= 0;
8796 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
8798 return unicosmk_section_buf
;
8802 unicosmk_data_section ()
8804 static int count
= 1;
8805 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
8807 return unicosmk_section_buf
;
8810 /* The Cray assembler doesn't accept extern declarations for symbols which
8811 are defined in the same file. We have to keep track of all global
8812 symbols which are referenced and/or defined in a source file and output
8813 extern declarations for those which are referenced but not defined at
8816 /* List of identifiers for which an extern declaration might have to be
8819 struct unicosmk_extern_list
8821 struct unicosmk_extern_list
*next
;
8825 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
8827 /* Output extern declarations which are required for every asm file. */
8830 unicosmk_output_default_externs (file
)
8833 static const char *const externs
[] =
8834 { "__T3E_MISMATCH" };
8839 n
= ARRAY_SIZE (externs
);
8841 for (i
= 0; i
< n
; i
++)
8842 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
8845 /* Output extern declarations for global symbols which are have been
8846 referenced but not defined. */
8849 unicosmk_output_externs (file
)
8852 struct unicosmk_extern_list
*p
;
8853 const char *real_name
;
8857 len
= strlen (user_label_prefix
);
8858 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
8860 /* We have to strip the encoding and possibly remove user_label_prefix
8861 from the identifier in order to handle -fleading-underscore and
8862 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
8863 STRIP_NAME_ENCODING (real_name
, p
->name
);
8864 if (len
&& p
->name
[0] == '*'
8865 && !memcmp (real_name
, user_label_prefix
, len
))
8868 name_tree
= get_identifier (real_name
);
8869 if (! TREE_ASM_WRITTEN (name_tree
))
8871 TREE_ASM_WRITTEN (name_tree
) = 1;
8872 fputs ("\t.extern\t", file
);
8873 assemble_name (file
, p
->name
);
8879 /* Record an extern. */
8882 unicosmk_add_extern (name
)
8885 struct unicosmk_extern_list
*p
;
8887 p
= (struct unicosmk_extern_list
*)
8888 permalloc (sizeof (struct unicosmk_extern_list
));
8889 p
->next
= unicosmk_extern_head
;
8891 unicosmk_extern_head
= p
;
8894 /* The Cray assembler generates incorrect code if identifiers which
8895 conflict with register names are used as instruction operands. We have
8896 to replace such identifiers with DEX expressions. */
8898 /* Structure to collect identifiers which have been replaced by DEX
8901 struct unicosmk_dex
{
8902 struct unicosmk_dex
*next
;
8906 /* List of identifiers which have been replaced by DEX expressions. The DEX
8907 number is determined by the position in the list. */
8909 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
8911 /* The number of elements in the DEX list. */
8913 static int unicosmk_dex_count
= 0;
8915 /* Check if NAME must be replaced by a DEX expression. */
8918 unicosmk_special_name (name
)
8927 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
8933 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
8936 return (name
[2] == '\0'
8937 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
8940 return (ISDIGIT (name
[1]) && name
[2] == '\0');
8944 /* Return the DEX number if X must be replaced by a DEX expression and 0
8948 unicosmk_need_dex (x
)
8951 struct unicosmk_dex
*dex
;
8955 if (GET_CODE (x
) != SYMBOL_REF
)
8959 if (! unicosmk_special_name (name
))
8962 i
= unicosmk_dex_count
;
8963 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
8965 if (! strcmp (name
, dex
->name
))
8970 dex
= (struct unicosmk_dex
*) permalloc (sizeof (struct unicosmk_dex
));
8972 dex
->next
= unicosmk_dex_list
;
8973 unicosmk_dex_list
= dex
;
8975 ++unicosmk_dex_count
;
8976 return unicosmk_dex_count
;
8979 /* Output the DEX definitions for this file. */
8982 unicosmk_output_dex (file
)
8985 struct unicosmk_dex
*dex
;
8988 if (unicosmk_dex_list
== NULL
)
8991 fprintf (file
, "\t.dexstart\n");
8993 i
= unicosmk_dex_count
;
8994 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
8996 fprintf (file
, "\tDEX (%d) = ", i
);
8997 assemble_name (file
, dex
->name
);
9002 fprintf (file
, "\t.dexend\n");
9008 unicosmk_output_deferred_case_vectors (file
)
9009 FILE *file ATTRIBUTE_UNUSED
;
9013 unicosmk_gen_dsib (imaskP
)
9014 unsigned long * imaskP ATTRIBUTE_UNUSED
;
9018 unicosmk_output_ssib (file
, fnname
)
9019 FILE * file ATTRIBUTE_UNUSED
;
9020 const char * fnname ATTRIBUTE_UNUSED
;
9024 unicosmk_add_call_info_word (x
)
9025 rtx x ATTRIBUTE_UNUSED
;
9031 unicosmk_need_dex (x
)
9032 rtx x ATTRIBUTE_UNUSED
;
9037 #endif /* TARGET_ABI_UNICOSMK */