1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
56 /* Specify which cpu to schedule for. */
58 enum processor_type alpha_cpu
;
59 static const char * const alpha_cpu_name
[] =
64 /* Specify how accurate floating-point traps need to be. */
66 enum alpha_trap_precision alpha_tp
;
68 /* Specify the floating-point rounding mode. */
70 enum alpha_fp_rounding_mode alpha_fprm
;
72 /* Specify which things cause traps. */
74 enum alpha_fp_trap_mode alpha_fptm
;
76 /* Specify bit size of immediate TLS offsets. */
78 int alpha_tls_size
= 32;
80 /* Strings decoded into the above options. */
82 const char *alpha_cpu_string
; /* -mcpu= */
83 const char *alpha_tune_string
; /* -mtune= */
84 const char *alpha_tp_string
; /* -mtrap-precision=[p|s|i] */
85 const char *alpha_fprm_string
; /* -mfp-rounding-mode=[n|m|c|d] */
86 const char *alpha_fptm_string
; /* -mfp-trap-mode=[n|u|su|sui] */
87 const char *alpha_mlat_string
; /* -mmemory-latency= */
88 const char *alpha_tls_size_string
; /* -mtls-size=[16|32|64] */
90 /* Save information from a "cmpxx" operation until the branch or scc is
93 struct alpha_compare alpha_compare
;
95 /* Nonzero if inside of a function, because the Alpha asm can't
96 handle .files inside of functions. */
98 static int inside_function
= FALSE
;
100 /* The number of cycles of latency we should assume on memory reads. */
102 int alpha_memory_latency
= 3;
104 /* Whether the function needs the GP. */
106 static int alpha_function_needs_gp
;
108 /* The alias set for prologue/epilogue register save/restore. */
110 static GTY(()) int alpha_sr_alias_set
;
112 /* The assembler name of the current function. */
114 static const char *alpha_fnname
;
116 /* The next explicit relocation sequence number. */
117 extern GTY(()) int alpha_next_sequence_number
;
118 int alpha_next_sequence_number
= 1;
120 /* The literal and gpdisp sequence numbers for this insn, as printed
121 by %# and %* respectively. */
122 extern GTY(()) int alpha_this_literal_sequence_number
;
123 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
124 int alpha_this_literal_sequence_number
;
125 int alpha_this_gpdisp_sequence_number
;
127 /* Costs of various operations on the different architectures. */
129 struct alpha_rtx_cost_data
131 unsigned char fp_add
;
132 unsigned char fp_mult
;
133 unsigned char fp_div_sf
;
134 unsigned char fp_div_df
;
135 unsigned char int_mult_si
;
136 unsigned char int_mult_di
;
137 unsigned char int_shift
;
138 unsigned char int_cmov
;
141 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
144 COSTS_N_INSNS (6), /* fp_add */
145 COSTS_N_INSNS (6), /* fp_mult */
146 COSTS_N_INSNS (34), /* fp_div_sf */
147 COSTS_N_INSNS (63), /* fp_div_df */
148 COSTS_N_INSNS (23), /* int_mult_si */
149 COSTS_N_INSNS (23), /* int_mult_di */
150 COSTS_N_INSNS (2), /* int_shift */
151 COSTS_N_INSNS (2), /* int_cmov */
154 COSTS_N_INSNS (4), /* fp_add */
155 COSTS_N_INSNS (4), /* fp_mult */
156 COSTS_N_INSNS (15), /* fp_div_sf */
157 COSTS_N_INSNS (22), /* fp_div_df */
158 COSTS_N_INSNS (8), /* int_mult_si */
159 COSTS_N_INSNS (12), /* int_mult_di */
160 COSTS_N_INSNS (1) + 1, /* int_shift */
161 COSTS_N_INSNS (1), /* int_cmov */
164 COSTS_N_INSNS (4), /* fp_add */
165 COSTS_N_INSNS (4), /* fp_mult */
166 COSTS_N_INSNS (12), /* fp_div_sf */
167 COSTS_N_INSNS (15), /* fp_div_df */
168 COSTS_N_INSNS (7), /* int_mult_si */
169 COSTS_N_INSNS (7), /* int_mult_di */
170 COSTS_N_INSNS (1), /* int_shift */
171 COSTS_N_INSNS (2), /* int_cmov */
175 /* Declarations of static functions. */
176 static bool alpha_function_ok_for_sibcall
177 PARAMS ((tree
, tree
));
178 static int tls_symbolic_operand_1
179 PARAMS ((rtx
, enum machine_mode
, int, int));
180 static enum tls_model tls_symbolic_operand_type
182 static bool decl_has_samegp
184 static bool alpha_in_small_data_p
186 static rtx get_tls_get_addr
188 static int some_small_symbolic_operand_1
189 PARAMS ((rtx
*, void *));
190 static int split_small_symbolic_operand_1
191 PARAMS ((rtx
*, void *));
192 static bool alpha_cannot_copy_insn_p
194 static bool alpha_rtx_costs
195 PARAMS ((rtx
, int, int, int *));
196 static void alpha_set_memflags_1
197 PARAMS ((rtx
, int, int, int));
198 static rtx alpha_emit_set_const_1
199 PARAMS ((rtx
, enum machine_mode
, HOST_WIDE_INT
, int));
200 static void alpha_expand_unaligned_load_words
201 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
202 static void alpha_expand_unaligned_store_words
203 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
204 static void alpha_init_builtins
206 static rtx alpha_expand_builtin
207 PARAMS ((tree
, rtx
, rtx
, enum machine_mode
, int));
208 static void alpha_sa_mask
209 PARAMS ((unsigned long *imaskP
, unsigned long *fmaskP
));
210 static int find_lo_sum_using_gp
211 PARAMS ((rtx
*, void *));
212 static int alpha_does_function_need_gp
214 static int alpha_ra_ever_killed
216 static const char *get_trap_mode_suffix
218 static const char *get_round_mode_suffix
220 static const char *get_some_local_dynamic_name
222 static int get_some_local_dynamic_name_1
223 PARAMS ((rtx
*, void *));
224 static rtx set_frame_related_p
226 static const char *alpha_lookup_xfloating_lib_func
227 PARAMS ((enum rtx_code
));
228 static int alpha_compute_xfloating_mode_arg
229 PARAMS ((enum rtx_code
, enum alpha_fp_rounding_mode
));
230 static void alpha_emit_xfloating_libcall
231 PARAMS ((const char *, rtx
, rtx
[], int, rtx
));
232 static rtx alpha_emit_xfloating_compare
233 PARAMS ((enum rtx_code
, rtx
, rtx
));
234 static void alpha_output_function_end_prologue
236 static int alpha_adjust_cost
237 PARAMS ((rtx
, rtx
, rtx
, int));
238 static int alpha_issue_rate
240 static int alpha_use_dfa_pipeline_interface
242 static int alpha_multipass_dfa_lookahead
244 static void alpha_reorg
247 #ifdef OBJECT_FORMAT_ELF
248 static void alpha_elf_select_rtx_section
249 PARAMS ((enum machine_mode
, rtx
, unsigned HOST_WIDE_INT
));
252 #if TARGET_ABI_OPEN_VMS
253 static bool alpha_linkage_symbol_p
254 PARAMS ((const char *symname
));
255 static int alpha_write_one_linkage
256 PARAMS ((splay_tree_node
, void *));
257 static void alpha_write_linkage
258 PARAMS ((FILE *, const char *, tree
));
262 static void alpha_output_mi_thunk_osf
263 PARAMS ((FILE *, tree
, HOST_WIDE_INT
, HOST_WIDE_INT
, tree
));
266 static struct machine_function
* alpha_init_machine_status
269 static void unicosmk_output_deferred_case_vectors
PARAMS ((FILE *));
270 static void unicosmk_gen_dsib
PARAMS ((unsigned long *imaskP
));
271 static void unicosmk_output_ssib
PARAMS ((FILE *, const char *));
272 static int unicosmk_need_dex
PARAMS ((rtx
));
273 #if TARGET_ABI_UNICOSMK
274 static void unicosmk_file_end
PARAMS ((void));
277 /* Get the number of args of a function in one of two ways. */
278 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
279 #define NUM_ARGS current_function_args_info.num_args
281 #define NUM_ARGS current_function_args_info
287 /* Initialize the GCC target structure. */
288 #if TARGET_ABI_OPEN_VMS
289 const struct attribute_spec vms_attribute_table
[];
290 static unsigned int vms_section_type_flags
PARAMS ((tree
, const char *, int));
291 static void vms_asm_named_section
PARAMS ((const char *, unsigned int));
292 static void vms_asm_out_constructor
PARAMS ((rtx
, int));
293 static void vms_asm_out_destructor
PARAMS ((rtx
, int));
294 # undef TARGET_ATTRIBUTE_TABLE
295 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
296 # undef TARGET_SECTION_TYPE_FLAGS
297 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
300 #undef TARGET_IN_SMALL_DATA_P
301 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
303 #if TARGET_ABI_UNICOSMK
304 static void unicosmk_asm_named_section
PARAMS ((const char *, unsigned int));
305 static void unicosmk_insert_attributes
PARAMS ((tree
, tree
*));
306 static unsigned int unicosmk_section_type_flags
PARAMS ((tree
, const char *,
308 static void unicosmk_unique_section
PARAMS ((tree
, int));
309 # undef TARGET_INSERT_ATTRIBUTES
310 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
311 # undef TARGET_SECTION_TYPE_FLAGS
312 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
313 # undef TARGET_ASM_UNIQUE_SECTION
314 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
315 # undef TARGET_ASM_GLOBALIZE_LABEL
316 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
319 #undef TARGET_ASM_ALIGNED_HI_OP
320 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
321 #undef TARGET_ASM_ALIGNED_DI_OP
322 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
324 /* Default unaligned ops are provided for ELF systems. To get unaligned
325 data for non-ELF systems, we have to turn off auto alignment. */
326 #ifndef OBJECT_FORMAT_ELF
327 #undef TARGET_ASM_UNALIGNED_HI_OP
328 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
329 #undef TARGET_ASM_UNALIGNED_SI_OP
330 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
331 #undef TARGET_ASM_UNALIGNED_DI_OP
332 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
335 #ifdef OBJECT_FORMAT_ELF
336 #undef TARGET_ASM_SELECT_RTX_SECTION
337 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
340 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
341 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
343 #undef TARGET_SCHED_ADJUST_COST
344 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
345 #undef TARGET_SCHED_ISSUE_RATE
346 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
347 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
348 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
349 alpha_use_dfa_pipeline_interface
350 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
351 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
352 alpha_multipass_dfa_lookahead
354 #undef TARGET_HAVE_TLS
355 #define TARGET_HAVE_TLS HAVE_AS_TLS
357 #undef TARGET_INIT_BUILTINS
358 #define TARGET_INIT_BUILTINS alpha_init_builtins
359 #undef TARGET_EXPAND_BUILTIN
360 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
362 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
363 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
364 #undef TARGET_CANNOT_COPY_INSN_P
365 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
368 #undef TARGET_ASM_OUTPUT_MI_THUNK
369 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
370 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
371 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
374 #undef TARGET_RTX_COSTS
375 #define TARGET_RTX_COSTS alpha_rtx_costs
376 #undef TARGET_ADDRESS_COST
377 #define TARGET_ADDRESS_COST hook_int_rtx_0
379 #undef TARGET_MACHINE_DEPENDENT_REORG
380 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
382 struct gcc_target targetm
= TARGET_INITIALIZER
;
384 /* Parse target option strings. */
390 static const struct cpu_table
{
391 const char *const name
;
392 const enum processor_type processor
;
395 #define EV5_MASK (MASK_CPU_EV5)
396 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
397 { "ev4", PROCESSOR_EV4
, 0 },
398 { "ev45", PROCESSOR_EV4
, 0 },
399 { "21064", PROCESSOR_EV4
, 0 },
400 { "ev5", PROCESSOR_EV5
, EV5_MASK
},
401 { "21164", PROCESSOR_EV5
, EV5_MASK
},
402 { "ev56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
403 { "21164a", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
404 { "pca56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
405 { "21164PC",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
406 { "21164pc",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
407 { "ev6", PROCESSOR_EV6
, EV6_MASK
},
408 { "21264", PROCESSOR_EV6
, EV6_MASK
},
409 { "ev67", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
410 { "21264a", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
414 /* Unicos/Mk doesn't have shared libraries. */
415 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
417 warning ("-f%s ignored for Unicos/Mk (not supported)",
418 (flag_pic
> 1) ? "PIC" : "pic");
422 /* On Unicos/Mk, the native compiler consistenly generates /d suffices for
423 floating-point instructions. Make that the default for this target. */
424 if (TARGET_ABI_UNICOSMK
)
425 alpha_fprm
= ALPHA_FPRM_DYN
;
427 alpha_fprm
= ALPHA_FPRM_NORM
;
429 alpha_tp
= ALPHA_TP_PROG
;
430 alpha_fptm
= ALPHA_FPTM_N
;
432 /* We cannot use su and sui qualifiers for conversion instructions on
433 Unicos/Mk. I'm not sure if this is due to assembler or hardware
434 limitations. Right now, we issue a warning if -mieee is specified
435 and then ignore it; eventually, we should either get it right or
436 disable the option altogether. */
440 if (TARGET_ABI_UNICOSMK
)
441 warning ("-mieee not supported on Unicos/Mk");
444 alpha_tp
= ALPHA_TP_INSN
;
445 alpha_fptm
= ALPHA_FPTM_SU
;
449 if (TARGET_IEEE_WITH_INEXACT
)
451 if (TARGET_ABI_UNICOSMK
)
452 warning ("-mieee-with-inexact not supported on Unicos/Mk");
455 alpha_tp
= ALPHA_TP_INSN
;
456 alpha_fptm
= ALPHA_FPTM_SUI
;
462 if (! strcmp (alpha_tp_string
, "p"))
463 alpha_tp
= ALPHA_TP_PROG
;
464 else if (! strcmp (alpha_tp_string
, "f"))
465 alpha_tp
= ALPHA_TP_FUNC
;
466 else if (! strcmp (alpha_tp_string
, "i"))
467 alpha_tp
= ALPHA_TP_INSN
;
469 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string
);
472 if (alpha_fprm_string
)
474 if (! strcmp (alpha_fprm_string
, "n"))
475 alpha_fprm
= ALPHA_FPRM_NORM
;
476 else if (! strcmp (alpha_fprm_string
, "m"))
477 alpha_fprm
= ALPHA_FPRM_MINF
;
478 else if (! strcmp (alpha_fprm_string
, "c"))
479 alpha_fprm
= ALPHA_FPRM_CHOP
;
480 else if (! strcmp (alpha_fprm_string
,"d"))
481 alpha_fprm
= ALPHA_FPRM_DYN
;
483 error ("bad value `%s' for -mfp-rounding-mode switch",
487 if (alpha_fptm_string
)
489 if (strcmp (alpha_fptm_string
, "n") == 0)
490 alpha_fptm
= ALPHA_FPTM_N
;
491 else if (strcmp (alpha_fptm_string
, "u") == 0)
492 alpha_fptm
= ALPHA_FPTM_U
;
493 else if (strcmp (alpha_fptm_string
, "su") == 0)
494 alpha_fptm
= ALPHA_FPTM_SU
;
495 else if (strcmp (alpha_fptm_string
, "sui") == 0)
496 alpha_fptm
= ALPHA_FPTM_SUI
;
498 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string
);
501 if (alpha_tls_size_string
)
503 if (strcmp (alpha_tls_size_string
, "16") == 0)
505 else if (strcmp (alpha_tls_size_string
, "32") == 0)
507 else if (strcmp (alpha_tls_size_string
, "64") == 0)
510 error ("bad value `%s' for -mtls-size switch", alpha_tls_size_string
);
514 = TARGET_CPU_DEFAULT
& MASK_CPU_EV6
? PROCESSOR_EV6
515 : (TARGET_CPU_DEFAULT
& MASK_CPU_EV5
? PROCESSOR_EV5
: PROCESSOR_EV4
);
517 if (alpha_cpu_string
)
519 for (i
= 0; cpu_table
[i
].name
; i
++)
520 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
522 alpha_cpu
= cpu_table
[i
].processor
;
523 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
524 | MASK_CPU_EV5
| MASK_CPU_EV6
);
525 target_flags
|= cpu_table
[i
].flags
;
528 if (! cpu_table
[i
].name
)
529 error ("bad value `%s' for -mcpu switch", alpha_cpu_string
);
532 if (alpha_tune_string
)
534 for (i
= 0; cpu_table
[i
].name
; i
++)
535 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
537 alpha_cpu
= cpu_table
[i
].processor
;
540 if (! cpu_table
[i
].name
)
541 error ("bad value `%s' for -mcpu switch", alpha_tune_string
);
544 /* Do some sanity checks on the above options. */
546 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
548 warning ("trap mode not supported on Unicos/Mk");
549 alpha_fptm
= ALPHA_FPTM_N
;
552 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
553 && alpha_tp
!= ALPHA_TP_INSN
&& ! TARGET_CPU_EV6
)
555 warning ("fp software completion requires -mtrap-precision=i");
556 alpha_tp
= ALPHA_TP_INSN
;
561 /* Except for EV6 pass 1 (not released), we always have precise
562 arithmetic traps. Which means we can do software completion
563 without minding trap shadows. */
564 alpha_tp
= ALPHA_TP_PROG
;
567 if (TARGET_FLOAT_VAX
)
569 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
571 warning ("rounding mode not supported for VAX floats");
572 alpha_fprm
= ALPHA_FPRM_NORM
;
574 if (alpha_fptm
== ALPHA_FPTM_SUI
)
576 warning ("trap mode not supported for VAX floats");
577 alpha_fptm
= ALPHA_FPTM_SU
;
585 if (!alpha_mlat_string
)
586 alpha_mlat_string
= "L1";
588 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
589 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
591 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
592 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
593 && alpha_mlat_string
[2] == '\0')
595 static int const cache_latency
[][4] =
597 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
598 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
599 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
602 lat
= alpha_mlat_string
[1] - '0';
603 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_cpu
][lat
-1] == -1)
605 warning ("L%d cache latency unknown for %s",
606 lat
, alpha_cpu_name
[alpha_cpu
]);
610 lat
= cache_latency
[alpha_cpu
][lat
-1];
612 else if (! strcmp (alpha_mlat_string
, "main"))
614 /* Most current memories have about 370ns latency. This is
615 a reasonable guess for a fast cpu. */
620 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string
);
624 alpha_memory_latency
= lat
;
627 /* Default the definition of "small data" to 8 bytes. */
631 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
633 target_flags
|= MASK_SMALL_DATA
;
634 else if (flag_pic
== 2)
635 target_flags
&= ~MASK_SMALL_DATA
;
637 /* Align labels and loops for optimal branching. */
638 /* ??? Kludge these by not doing anything if we don't optimize and also if
639 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
640 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
642 if (align_loops
<= 0)
644 if (align_jumps
<= 0)
647 if (align_functions
<= 0)
648 align_functions
= 16;
650 /* Acquire a unique set number for our register saves and restores. */
651 alpha_sr_alias_set
= new_alias_set ();
653 /* Register variables and functions with the garbage collector. */
655 /* Set up function hooks. */
656 init_machine_status
= alpha_init_machine_status
;
658 /* Tell the compiler when we're using VAX floating point. */
659 if (TARGET_FLOAT_VAX
)
661 real_format_for_mode
[SFmode
- QFmode
] = &vax_f_format
;
662 real_format_for_mode
[DFmode
- QFmode
] = &vax_g_format
;
663 real_format_for_mode
[TFmode
- QFmode
] = NULL
;
667 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
675 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
677 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
683 /* Returns 1 if OP is either the constant zero or a register. If a
684 register, it must be in the proper mode unless MODE is VOIDmode. */
687 reg_or_0_operand (op
, mode
)
689 enum machine_mode mode
;
691 return op
== CONST0_RTX (mode
) || register_operand (op
, mode
);
694 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
698 reg_or_6bit_operand (op
, mode
)
700 enum machine_mode mode
;
702 return ((GET_CODE (op
) == CONST_INT
703 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64)
704 || register_operand (op
, mode
));
708 /* Return 1 if OP is an 8-bit constant or any register. */
711 reg_or_8bit_operand (op
, mode
)
713 enum machine_mode mode
;
715 return ((GET_CODE (op
) == CONST_INT
716 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100)
717 || register_operand (op
, mode
));
720 /* Return 1 if OP is a constant or any register. */
723 reg_or_const_int_operand (op
, mode
)
725 enum machine_mode mode
;
727 return GET_CODE (op
) == CONST_INT
|| register_operand (op
, mode
);
730 /* Return 1 if OP is an 8-bit constant. */
733 cint8_operand (op
, mode
)
735 enum machine_mode mode ATTRIBUTE_UNUSED
;
737 return ((GET_CODE (op
) == CONST_INT
738 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100));
741 /* Return 1 if the operand is a valid second operand to an add insn. */
744 add_operand (op
, mode
)
746 enum machine_mode mode
;
748 if (GET_CODE (op
) == CONST_INT
)
749 /* Constraints I, J, O and P are covered by K. */
750 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'K')
751 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'L'));
753 return register_operand (op
, mode
);
756 /* Return 1 if the operand is a valid second operand to a sign-extending
760 sext_add_operand (op
, mode
)
762 enum machine_mode mode
;
764 if (GET_CODE (op
) == CONST_INT
)
765 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'I')
766 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'O'));
768 return reg_not_elim_operand (op
, mode
);
771 /* Return 1 if OP is the constant 4 or 8. */
774 const48_operand (op
, mode
)
776 enum machine_mode mode ATTRIBUTE_UNUSED
;
778 return (GET_CODE (op
) == CONST_INT
779 && (INTVAL (op
) == 4 || INTVAL (op
) == 8));
782 /* Return 1 if OP is a valid first operand to an AND insn. */
785 and_operand (op
, mode
)
787 enum machine_mode mode
;
789 if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == VOIDmode
)
790 return (zap_mask (CONST_DOUBLE_LOW (op
))
791 && zap_mask (CONST_DOUBLE_HIGH (op
)));
793 if (GET_CODE (op
) == CONST_INT
)
794 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
795 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100
796 || zap_mask (INTVAL (op
)));
798 return register_operand (op
, mode
);
801 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
804 or_operand (op
, mode
)
806 enum machine_mode mode
;
808 if (GET_CODE (op
) == CONST_INT
)
809 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
810 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100);
812 return register_operand (op
, mode
);
815 /* Return 1 if OP is a constant that is the width, in bits, of an integral
816 mode smaller than DImode. */
819 mode_width_operand (op
, mode
)
821 enum machine_mode mode ATTRIBUTE_UNUSED
;
823 return (GET_CODE (op
) == CONST_INT
824 && (INTVAL (op
) == 8 || INTVAL (op
) == 16
825 || INTVAL (op
) == 32 || INTVAL (op
) == 64));
828 /* Return 1 if OP is a constant that is the width of an integral machine mode
829 smaller than an integer. */
832 mode_mask_operand (op
, mode
)
834 enum machine_mode mode ATTRIBUTE_UNUSED
;
836 if (GET_CODE (op
) == CONST_INT
)
838 HOST_WIDE_INT value
= INTVAL (op
);
844 if (value
== 0xffffffff)
849 else if (HOST_BITS_PER_WIDE_INT
== 32 && GET_CODE (op
) == CONST_DOUBLE
)
851 if (CONST_DOUBLE_LOW (op
) == 0xffffffff && CONST_DOUBLE_HIGH (op
) == 0)
858 /* Return 1 if OP is a multiple of 8 less than 64. */
861 mul8_operand (op
, mode
)
863 enum machine_mode mode ATTRIBUTE_UNUSED
;
865 return (GET_CODE (op
) == CONST_INT
866 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64
867 && (INTVAL (op
) & 7) == 0);
870 /* Return 1 if OP is the zero constant for MODE. */
873 const0_operand (op
, mode
)
875 enum machine_mode mode
;
877 return op
== CONST0_RTX (mode
);
880 /* Return 1 if OP is a hard floating-point register. */
883 hard_fp_register_operand (op
, mode
)
885 enum machine_mode mode
;
887 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
890 if (GET_CODE (op
) == SUBREG
)
891 op
= SUBREG_REG (op
);
892 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == FLOAT_REGS
;
895 /* Return 1 if OP is a hard general register. */
898 hard_int_register_operand (op
, mode
)
900 enum machine_mode mode
;
902 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
905 if (GET_CODE (op
) == SUBREG
)
906 op
= SUBREG_REG (op
);
907 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == GENERAL_REGS
;
910 /* Return 1 if OP is a register or a constant integer. */
914 reg_or_cint_operand (op
, mode
)
916 enum machine_mode mode
;
918 return (GET_CODE (op
) == CONST_INT
919 || register_operand (op
, mode
));
922 /* Return 1 if OP is something that can be reloaded into a register;
923 if it is a MEM, it need not be valid. */
926 some_operand (op
, mode
)
928 enum machine_mode mode
;
930 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
933 switch (GET_CODE (op
))
947 return some_operand (SUBREG_REG (op
), VOIDmode
);
956 /* Likewise, but don't accept constants. */
959 some_ni_operand (op
, mode
)
961 enum machine_mode mode
;
963 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
966 if (GET_CODE (op
) == SUBREG
)
967 op
= SUBREG_REG (op
);
969 return (GET_CODE (op
) == REG
|| GET_CODE (op
) == MEM
);
972 /* Return 1 if OP is a valid operand for the source of a move insn. */
975 input_operand (op
, mode
)
977 enum machine_mode mode
;
979 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
982 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GET_MODE (op
) != mode
)
985 switch (GET_CODE (op
))
990 if (TARGET_EXPLICIT_RELOCS
)
992 /* We don't split symbolic operands into something unintelligable
993 until after reload, but we do not wish non-small, non-global
994 symbolic operands to be reconstructed from their high/lo_sum
996 return (small_symbolic_operand (op
, mode
)
997 || global_symbolic_operand (op
, mode
)
998 || gotdtp_symbolic_operand (op
, mode
)
999 || gottp_symbolic_operand (op
, mode
));
1002 /* This handles both the Windows/NT and OSF cases. */
1003 return mode
== ptr_mode
|| mode
== DImode
;
1006 return (TARGET_EXPLICIT_RELOCS
1007 && local_symbolic_operand (XEXP (op
, 0), mode
));
1014 if (register_operand (op
, mode
))
1016 /* ... fall through ... */
1018 return ((TARGET_BWX
|| (mode
!= HImode
&& mode
!= QImode
))
1019 && general_operand (op
, mode
));
1023 return op
== CONST0_RTX (mode
);
1026 return mode
== QImode
|| mode
== HImode
|| add_operand (op
, mode
);
1028 case CONSTANT_P_RTX
:
1038 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
1039 file, and in the same section as the current function. */
1042 samegp_function_operand (op
, mode
)
1044 enum machine_mode mode ATTRIBUTE_UNUSED
;
1046 if (GET_CODE (op
) != SYMBOL_REF
)
1049 /* Easy test for recursion. */
1050 if (op
== XEXP (DECL_RTL (current_function_decl
), 0))
1053 /* Functions that are not local can be overridden, and thus may
1054 not share the same gp. */
1055 if (! SYMBOL_REF_LOCAL_P (op
))
1058 /* If -msmall-data is in effect, assume that there is only one GP
1059 for the module, and so any local symbol has this property. We
1060 need explicit relocations to be able to enforce this for symbols
1061 not defined in this unit of translation, however. */
1062 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
1065 /* Functions that are not external are defined in this UoT,
1066 and thus must share the same gp. */
1067 return ! SYMBOL_REF_EXTERNAL_P (op
);
1070 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
1073 direct_call_operand (op
, mode
)
1075 enum machine_mode mode
;
1077 tree op_decl
, cfun_sec
, op_sec
;
1079 /* Must share the same GP. */
1080 if (!samegp_function_operand (op
, mode
))
1083 /* If profiling is implemented via linker tricks, we can't jump
1084 to the nogp alternate entry point. Note that current_function_profile
1085 would not be correct, since that doesn't indicate if the target
1086 function uses profiling. */
1087 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
1088 but is approximately correct for the OSF ABIs. Don't know
1089 what to do for VMS, NT, or UMK. */
1090 if (!TARGET_PROFILING_NEEDS_GP
&& profile_flag
)
1093 /* Must be a function. In some cases folks create thunks in static
1094 data structures and then make calls to them. If we allow the
1095 direct call, we'll get an error from the linker about !samegp reloc
1096 against a symbol without a .prologue directive. */
1097 if (!SYMBOL_REF_FUNCTION_P (op
))
1100 /* Must be "near" so that the branch is assumed to reach. With
1101 -msmall-text, this is assumed true of all local symbols. Since
1102 we've already checked samegp, locality is already assured. */
1103 if (TARGET_SMALL_TEXT
)
1106 /* Otherwise, a decl is "near" if it is defined in the same section. */
1107 if (flag_function_sections
)
1110 op_decl
= SYMBOL_REF_DECL (op
);
1111 if (DECL_ONE_ONLY (current_function_decl
)
1112 || (op_decl
&& DECL_ONE_ONLY (op_decl
)))
1115 cfun_sec
= DECL_SECTION_NAME (current_function_decl
);
1116 op_sec
= op_decl
? DECL_SECTION_NAME (op_decl
) : NULL
;
1117 return ((!cfun_sec
&& !op_sec
)
1118 || (cfun_sec
&& op_sec
1119 && strcmp (TREE_STRING_POINTER (cfun_sec
),
1120 TREE_STRING_POINTER (op_sec
)) == 0));
1123 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
1124 a (non-tls) variable known to be defined in this file. */
1127 local_symbolic_operand (op
, mode
)
1129 enum machine_mode mode
;
1131 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1134 if (GET_CODE (op
) == LABEL_REF
)
1137 if (GET_CODE (op
) == CONST
1138 && GET_CODE (XEXP (op
, 0)) == PLUS
1139 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1140 op
= XEXP (XEXP (op
, 0), 0);
1142 if (GET_CODE (op
) != SYMBOL_REF
)
1145 return SYMBOL_REF_LOCAL_P (op
) && !SYMBOL_REF_TLS_MODEL (op
);
1148 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1149 known to be defined in this file in the small data area. */
1152 small_symbolic_operand (op
, mode
)
1154 enum machine_mode mode ATTRIBUTE_UNUSED
;
1156 if (! TARGET_SMALL_DATA
)
1159 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1162 if (GET_CODE (op
) == CONST
1163 && GET_CODE (XEXP (op
, 0)) == PLUS
1164 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1165 op
= XEXP (XEXP (op
, 0), 0);
1167 if (GET_CODE (op
) != SYMBOL_REF
)
1170 /* ??? There's no encode_section_info equivalent for the rtl
1171 constant pool, so SYMBOL_FLAG_SMALL never gets set. */
1172 if (CONSTANT_POOL_ADDRESS_P (op
))
1173 return GET_MODE_SIZE (get_pool_mode (op
)) <= g_switch_value
;
1175 return (SYMBOL_REF_LOCAL_P (op
)
1176 && SYMBOL_REF_SMALL_P (op
)
1177 && SYMBOL_REF_TLS_MODEL (op
) == 0);
1180 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1181 not known (or known not) to be defined in this file. */
1184 global_symbolic_operand (op
, mode
)
1186 enum machine_mode mode
;
1188 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1191 if (GET_CODE (op
) == CONST
1192 && GET_CODE (XEXP (op
, 0)) == PLUS
1193 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1194 op
= XEXP (XEXP (op
, 0), 0);
1196 if (GET_CODE (op
) != SYMBOL_REF
)
1199 return !SYMBOL_REF_LOCAL_P (op
) && !SYMBOL_REF_TLS_MODEL (op
);
1202 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
1205 call_operand (op
, mode
)
1207 enum machine_mode mode
;
1212 if (GET_CODE (op
) == REG
)
1216 /* Disallow virtual registers to cope with pathalogical test cases
1217 such as compile/930117-1.c in which the virtual reg decomposes
1218 to the frame pointer. Which is a hard reg that is not $27. */
1219 return (REGNO (op
) == 27 || REGNO (op
) > LAST_VIRTUAL_REGISTER
);
1224 if (TARGET_ABI_UNICOSMK
)
1226 if (GET_CODE (op
) == SYMBOL_REF
)
1232 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
1233 possibly with an offset. */
1236 symbolic_operand (op
, mode
)
1238 enum machine_mode mode
;
1240 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1242 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
1244 if (GET_CODE (op
) == CONST
1245 && GET_CODE (XEXP (op
,0)) == PLUS
1246 && GET_CODE (XEXP (XEXP (op
,0), 0)) == SYMBOL_REF
1247 && GET_CODE (XEXP (XEXP (op
,0), 1)) == CONST_INT
)
1252 /* Return true if OP is valid for a particular TLS relocation. */
1255 tls_symbolic_operand_1 (op
, mode
, size
, unspec
)
1257 enum machine_mode mode
;
1260 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1263 if (GET_CODE (op
) != CONST
)
1267 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
1269 op
= XVECEXP (op
, 0, 0);
1271 if (GET_CODE (op
) != SYMBOL_REF
)
1274 if (SYMBOL_REF_LOCAL_P (op
))
1276 if (alpha_tls_size
> size
)
1285 switch (SYMBOL_REF_TLS_MODEL (op
))
1287 case TLS_MODEL_LOCAL_DYNAMIC
:
1288 return unspec
== UNSPEC_DTPREL
;
1289 case TLS_MODEL_INITIAL_EXEC
:
1290 return unspec
== UNSPEC_TPREL
&& size
== 64;
1291 case TLS_MODEL_LOCAL_EXEC
:
1292 return unspec
== UNSPEC_TPREL
;
1298 /* Return true if OP is valid for 16-bit DTP relative relocations. */
1301 dtp16_symbolic_operand (op
, mode
)
1303 enum machine_mode mode
;
1305 return tls_symbolic_operand_1 (op
, mode
, 16, UNSPEC_DTPREL
);
1308 /* Return true if OP is valid for 32-bit DTP relative relocations. */
1311 dtp32_symbolic_operand (op
, mode
)
1313 enum machine_mode mode
;
1315 return tls_symbolic_operand_1 (op
, mode
, 32, UNSPEC_DTPREL
);
1318 /* Return true if OP is valid for 64-bit DTP relative relocations. */
1321 gotdtp_symbolic_operand (op
, mode
)
1323 enum machine_mode mode
;
1325 return tls_symbolic_operand_1 (op
, mode
, 64, UNSPEC_DTPREL
);
1328 /* Return true if OP is valid for 16-bit TP relative relocations. */
1331 tp16_symbolic_operand (op
, mode
)
1333 enum machine_mode mode
;
1335 return tls_symbolic_operand_1 (op
, mode
, 16, UNSPEC_TPREL
);
1338 /* Return true if OP is valid for 32-bit TP relative relocations. */
1341 tp32_symbolic_operand (op
, mode
)
1343 enum machine_mode mode
;
1345 return tls_symbolic_operand_1 (op
, mode
, 32, UNSPEC_TPREL
);
1348 /* Return true if OP is valid for 64-bit TP relative relocations. */
1351 gottp_symbolic_operand (op
, mode
)
1353 enum machine_mode mode
;
1355 return tls_symbolic_operand_1 (op
, mode
, 64, UNSPEC_TPREL
);
1358 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1359 comparisons are valid in which insn. */
1362 alpha_comparison_operator (op
, mode
)
1364 enum machine_mode mode
;
1366 enum rtx_code code
= GET_CODE (op
);
1368 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1371 return (code
== EQ
|| code
== LE
|| code
== LT
1372 || code
== LEU
|| code
== LTU
);
1375 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1376 Here we know which comparisons are valid in which insn. */
1379 alpha_zero_comparison_operator (op
, mode
)
1381 enum machine_mode mode
;
1383 enum rtx_code code
= GET_CODE (op
);
1385 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1388 return (code
== EQ
|| code
== NE
|| code
== LE
|| code
== LT
1389 || code
== LEU
|| code
== LTU
);
1392 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1395 alpha_swapped_comparison_operator (op
, mode
)
1397 enum machine_mode mode
;
1399 enum rtx_code code
= GET_CODE (op
);
1401 if ((mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1402 || GET_RTX_CLASS (code
) != '<')
1405 code
= swap_condition (code
);
1406 return (code
== EQ
|| code
== LE
|| code
== LT
1407 || code
== LEU
|| code
== LTU
);
1410 /* Return 1 if OP is a signed comparison operation. */
1413 signed_comparison_operator (op
, mode
)
1415 enum machine_mode mode ATTRIBUTE_UNUSED
;
1417 enum rtx_code code
= GET_CODE (op
);
1419 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1422 return (code
== EQ
|| code
== NE
1423 || code
== LE
|| code
== LT
1424 || code
== GE
|| code
== GT
);
1427 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1428 Here we know which comparisons are valid in which insn. */
1431 alpha_fp_comparison_operator (op
, mode
)
1433 enum machine_mode mode
;
1435 enum rtx_code code
= GET_CODE (op
);
1437 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1440 return (code
== EQ
|| code
== LE
|| code
== LT
|| code
== UNORDERED
);
1443 /* Return 1 if this is a divide or modulus operator. */
1446 divmod_operator (op
, mode
)
1448 enum machine_mode mode ATTRIBUTE_UNUSED
;
1450 switch (GET_CODE (op
))
1452 case DIV
: case MOD
: case UDIV
: case UMOD
:
1462 /* Return 1 if this memory address is a known aligned register plus
1463 a constant. It must be a valid address. This means that we can do
1464 this as an aligned reference plus some offset.
1466 Take into account what reload will do. */
1469 aligned_memory_operand (op
, mode
)
1471 enum machine_mode mode
;
1475 if (reload_in_progress
)
1478 if (GET_CODE (tmp
) == SUBREG
)
1479 tmp
= SUBREG_REG (tmp
);
1480 if (GET_CODE (tmp
) == REG
1481 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1483 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1489 if (GET_CODE (op
) != MEM
1490 || GET_MODE (op
) != mode
)
1494 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1495 sorts of constructs. Dig for the real base register. */
1496 if (reload_in_progress
1497 && GET_CODE (op
) == PLUS
1498 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1499 base
= XEXP (XEXP (op
, 0), 0);
1502 if (! memory_address_p (mode
, op
))
1504 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1507 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) >= 32);
1510 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1513 unaligned_memory_operand (op
, mode
)
1515 enum machine_mode mode
;
1519 if (reload_in_progress
)
1522 if (GET_CODE (tmp
) == SUBREG
)
1523 tmp
= SUBREG_REG (tmp
);
1524 if (GET_CODE (tmp
) == REG
1525 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1527 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1533 if (GET_CODE (op
) != MEM
1534 || GET_MODE (op
) != mode
)
1538 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1539 sorts of constructs. Dig for the real base register. */
1540 if (reload_in_progress
1541 && GET_CODE (op
) == PLUS
1542 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1543 base
= XEXP (XEXP (op
, 0), 0);
1546 if (! memory_address_p (mode
, op
))
1548 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1551 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) < 32);
1554 /* Return 1 if OP is either a register or an unaligned memory location. */
1557 reg_or_unaligned_mem_operand (op
, mode
)
1559 enum machine_mode mode
;
1561 return register_operand (op
, mode
) || unaligned_memory_operand (op
, mode
);
1564 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1567 any_memory_operand (op
, mode
)
1569 enum machine_mode mode ATTRIBUTE_UNUSED
;
1571 return (GET_CODE (op
) == MEM
1572 || (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == REG
)
1573 || (reload_in_progress
&& GET_CODE (op
) == REG
1574 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)
1575 || (reload_in_progress
&& GET_CODE (op
) == SUBREG
1576 && GET_CODE (SUBREG_REG (op
)) == REG
1577 && REGNO (SUBREG_REG (op
)) >= FIRST_PSEUDO_REGISTER
));
1580 /* Returns 1 if OP is not an eliminable register.
1582 This exists to cure a pathological abort in the s8addq (et al) patterns,
1584 long foo () { long t; bar(); return (long) &t * 26107; }
1586 which run afoul of a hack in reload to cure a (presumably) similar
1587 problem with lea-type instructions on other targets. But there is
1588 one of us and many of them, so work around the problem by selectively
1589 preventing combine from making the optimization. */
1592 reg_not_elim_operand (op
, mode
)
1594 enum machine_mode mode
;
1597 if (GET_CODE (op
) == SUBREG
)
1598 inner
= SUBREG_REG (op
);
1599 if (inner
== frame_pointer_rtx
|| inner
== arg_pointer_rtx
)
1602 return register_operand (op
, mode
);
1605 /* Return 1 is OP is a memory location that is not a reference (using
1606 an AND) to an unaligned location. Take into account what reload
1610 normal_memory_operand (op
, mode
)
1612 enum machine_mode mode ATTRIBUTE_UNUSED
;
1614 if (reload_in_progress
)
1617 if (GET_CODE (tmp
) == SUBREG
)
1618 tmp
= SUBREG_REG (tmp
);
1619 if (GET_CODE (tmp
) == REG
1620 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1622 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1624 /* This may not have been assigned an equivalent address if it will
1625 be eliminated. In that case, it doesn't matter what we do. */
1631 return GET_CODE (op
) == MEM
&& GET_CODE (XEXP (op
, 0)) != AND
;
1634 /* Accept a register, but not a subreg of any kind. This allows us to
1635 avoid pathological cases in reload wrt data movement common in
1636 int->fp conversion. */
1639 reg_no_subreg_operand (op
, mode
)
1641 enum machine_mode mode
;
1643 if (GET_CODE (op
) != REG
)
1645 return register_operand (op
, mode
);
1648 /* Recognize an addition operation that includes a constant. Used to
1649 convince reload to canonize (plus (plus reg c1) c2) during register
1653 addition_operation (op
, mode
)
1655 enum machine_mode mode
;
1657 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1659 if (GET_CODE (op
) == PLUS
1660 && register_operand (XEXP (op
, 0), mode
)
1661 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1662 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op
, 1)), 'K'))
1667 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
1668 the range defined for C in [I-P]. */
1671 alpha_const_ok_for_letter_p (value
, c
)
1672 HOST_WIDE_INT value
;
1678 /* An unsigned 8 bit constant. */
1679 return (unsigned HOST_WIDE_INT
) value
< 0x100;
1681 /* The constant zero. */
1684 /* A signed 16 bit constant. */
1685 return (unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000;
1687 /* A shifted signed 16 bit constant appropriate for LDAH. */
1688 return ((value
& 0xffff) == 0
1689 && ((value
) >> 31 == -1 || value
>> 31 == 0));
1691 /* A constant that can be AND'ed with using a ZAP insn. */
1692 return zap_mask (value
);
1694 /* A complemented unsigned 8 bit constant. */
1695 return (unsigned HOST_WIDE_INT
) (~ value
) < 0x100;
1697 /* A negated unsigned 8 bit constant. */
1698 return (unsigned HOST_WIDE_INT
) (- value
) < 0x100;
1700 /* The constant 1, 2 or 3. */
1701 return value
== 1 || value
== 2 || value
== 3;
1708 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1709 matches for C in [GH]. */
1712 alpha_const_double_ok_for_letter_p (value
, c
)
1719 /* The floating point zero constant. */
1720 return (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
1721 && value
== CONST0_RTX (GET_MODE (value
)));
1724 /* A valid operand of a ZAP insn. */
1725 return (GET_MODE (value
) == VOIDmode
1726 && zap_mask (CONST_DOUBLE_LOW (value
))
1727 && zap_mask (CONST_DOUBLE_HIGH (value
)));
1734 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1738 alpha_extra_constraint (value
, c
)
1745 return normal_memory_operand (value
, VOIDmode
);
1747 return direct_call_operand (value
, Pmode
);
1749 return (GET_CODE (value
) == CONST_INT
1750 && (unsigned HOST_WIDE_INT
) INTVAL (value
) < 64);
1752 return GET_CODE (value
) == HIGH
;
1754 return TARGET_ABI_UNICOSMK
&& symbolic_operand (value
, VOIDmode
);
1756 return (GET_CODE (value
) == CONST_VECTOR
1757 && value
== CONST0_RTX (GET_MODE (value
)));
1763 /* Return 1 if this function can directly return via $26. */
1768 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
1770 && alpha_sa_size () == 0
1771 && get_frame_size () == 0
1772 && current_function_outgoing_args_size
== 0
1773 && current_function_pretend_args_size
== 0);
1776 /* Return the ADDR_VEC associated with a tablejump insn. */
1779 alpha_tablejump_addr_vec (insn
)
1784 tmp
= JUMP_LABEL (insn
);
1787 tmp
= NEXT_INSN (tmp
);
1790 if (GET_CODE (tmp
) == JUMP_INSN
1791 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
1792 return PATTERN (tmp
);
1796 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1799 alpha_tablejump_best_label (insn
)
1802 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
1803 rtx best_label
= NULL_RTX
;
1805 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1806 there for edge frequency counts from profile data. */
1810 int n_labels
= XVECLEN (jump_table
, 1);
1811 int best_count
= -1;
1814 for (i
= 0; i
< n_labels
; i
++)
1818 for (j
= i
+ 1; j
< n_labels
; j
++)
1819 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
1820 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
1823 if (count
> best_count
)
1824 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
1828 return best_label
? best_label
: const0_rtx
;
1831 /* Return the TLS model to use for SYMBOL. */
1833 static enum tls_model
1834 tls_symbolic_operand_type (symbol
)
1837 enum tls_model model
;
1839 if (GET_CODE (symbol
) != SYMBOL_REF
)
1841 model
= SYMBOL_REF_TLS_MODEL (symbol
);
1843 /* Local-exec with a 64-bit size is the same code as initial-exec. */
1844 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
1845 model
= TLS_MODEL_INITIAL_EXEC
;
1850 /* Return true if the function DECL will share the same GP as any
1851 function in the current unit of translation. */
1854 decl_has_samegp (decl
)
1857 /* Functions that are not local can be overridden, and thus may
1858 not share the same gp. */
1859 if (!(*targetm
.binds_local_p
) (decl
))
1862 /* If -msmall-data is in effect, assume that there is only one GP
1863 for the module, and so any local symbol has this property. We
1864 need explicit relocations to be able to enforce this for symbols
1865 not defined in this unit of translation, however. */
1866 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
1869 /* Functions that are not external are defined in this UoT. */
1870 /* ??? Irritatingly, static functions not yet emitted are still
1871 marked "external". Apply this to non-static functions only. */
1872 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
1875 /* Return true if EXP should be placed in the small data section. */
1878 alpha_in_small_data_p (exp
)
1881 /* We want to merge strings, so we never consider them small data. */
1882 if (TREE_CODE (exp
) == STRING_CST
)
1885 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
1887 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
1888 if (strcmp (section
, ".sdata") == 0
1889 || strcmp (section
, ".sbss") == 0)
1894 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
1896 /* If this is an incomplete type with size 0, then we can't put it
1897 in sdata because it might be too big when completed. */
1898 if (size
> 0 && (unsigned HOST_WIDE_INT
) size
<= g_switch_value
)
1905 #if TARGET_ABI_OPEN_VMS
1907 alpha_linkage_symbol_p (symname
)
1908 const char *symname
;
1910 int symlen
= strlen (symname
);
1913 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
1918 #define LINKAGE_SYMBOL_REF_P(X) \
1919 ((GET_CODE (X) == SYMBOL_REF \
1920 && alpha_linkage_symbol_p (XSTR (X, 0))) \
1921 || (GET_CODE (X) == CONST \
1922 && GET_CODE (XEXP (X, 0)) == PLUS \
1923 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
1924 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
1927 /* legitimate_address_p recognizes an RTL expression that is a valid
1928 memory address for an instruction. The MODE argument is the
1929 machine mode for the MEM expression that wants to use this address.
1931 For Alpha, we have either a constant address or the sum of a
1932 register and a constant address, or just a register. For DImode,
1933 any of those forms can be surrounded with an AND that clear the
1934 low-order three bits; this is an "unaligned" access. */
1937 alpha_legitimate_address_p (mode
, x
, strict
)
1938 enum machine_mode mode
;
1942 /* If this is an ldq_u type address, discard the outer AND. */
1944 && GET_CODE (x
) == AND
1945 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1946 && INTVAL (XEXP (x
, 1)) == -8)
1949 /* Discard non-paradoxical subregs. */
1950 if (GET_CODE (x
) == SUBREG
1951 && (GET_MODE_SIZE (GET_MODE (x
))
1952 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1955 /* Unadorned general registers are valid. */
1958 ? STRICT_REG_OK_FOR_BASE_P (x
)
1959 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
1962 /* Constant addresses (i.e. +/- 32k) are valid. */
1963 if (CONSTANT_ADDRESS_P (x
))
1966 #if TARGET_ABI_OPEN_VMS
1967 if (LINKAGE_SYMBOL_REF_P (x
))
1971 /* Register plus a small constant offset is valid. */
1972 if (GET_CODE (x
) == PLUS
)
1974 rtx ofs
= XEXP (x
, 1);
1977 /* Discard non-paradoxical subregs. */
1978 if (GET_CODE (x
) == SUBREG
1979 && (GET_MODE_SIZE (GET_MODE (x
))
1980 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1986 && NONSTRICT_REG_OK_FP_BASE_P (x
)
1987 && GET_CODE (ofs
) == CONST_INT
)
1990 ? STRICT_REG_OK_FOR_BASE_P (x
)
1991 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
1992 && CONSTANT_ADDRESS_P (ofs
))
1995 else if (GET_CODE (x
) == ADDRESSOF
1996 && GET_CODE (ofs
) == CONST_INT
)
2000 /* If we're managing explicit relocations, LO_SUM is valid, as
2001 are small data symbols. */
2002 else if (TARGET_EXPLICIT_RELOCS
)
2004 if (small_symbolic_operand (x
, Pmode
))
2007 if (GET_CODE (x
) == LO_SUM
)
2009 rtx ofs
= XEXP (x
, 1);
2012 /* Discard non-paradoxical subregs. */
2013 if (GET_CODE (x
) == SUBREG
2014 && (GET_MODE_SIZE (GET_MODE (x
))
2015 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
2018 /* Must have a valid base register. */
2021 ? STRICT_REG_OK_FOR_BASE_P (x
)
2022 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
2025 /* The symbol must be local. */
2026 if (local_symbolic_operand (ofs
, Pmode
)
2027 || dtp32_symbolic_operand (ofs
, Pmode
)
2028 || tp32_symbolic_operand (ofs
, Pmode
))
2036 /* Build the SYMBOL_REF for __tls_get_addr. */
2038 static GTY(()) rtx tls_get_addr_libfunc
;
2043 if (!tls_get_addr_libfunc
)
2044 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
2045 return tls_get_addr_libfunc
;
2048 /* Try machine-dependent ways of modifying an illegitimate address
2049 to be legitimate. If we find one, return the new, valid address. */
2052 alpha_legitimize_address (x
, scratch
, mode
)
2055 enum machine_mode mode ATTRIBUTE_UNUSED
;
2057 HOST_WIDE_INT addend
;
2059 /* If the address is (plus reg const_int) and the CONST_INT is not a
2060 valid offset, compute the high part of the constant and add it to
2061 the register. Then our address is (plus temp low-part-const). */
2062 if (GET_CODE (x
) == PLUS
2063 && GET_CODE (XEXP (x
, 0)) == REG
2064 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2065 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
2067 addend
= INTVAL (XEXP (x
, 1));
2072 /* If the address is (const (plus FOO const_int)), find the low-order
2073 part of the CONST_INT. Then load FOO plus any high-order part of the
2074 CONST_INT into a register. Our address is (plus reg low-part-const).
2075 This is done to reduce the number of GOT entries. */
2077 && GET_CODE (x
) == CONST
2078 && GET_CODE (XEXP (x
, 0)) == PLUS
2079 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
2081 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
2082 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
2086 /* If we have a (plus reg const), emit the load as in (2), then add
2087 the two registers, and finally generate (plus reg low-part-const) as
2090 && GET_CODE (x
) == PLUS
2091 && GET_CODE (XEXP (x
, 0)) == REG
2092 && GET_CODE (XEXP (x
, 1)) == CONST
2093 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
2094 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == CONST_INT
)
2096 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
2097 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
2098 XEXP (XEXP (XEXP (x
, 1), 0), 0),
2099 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2103 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
2104 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (x
, Pmode
))
2106 rtx r0
, r16
, eqv
, tga
, tp
, insn
, dest
, seq
;
2108 switch (tls_symbolic_operand_type (x
))
2110 case TLS_MODEL_GLOBAL_DYNAMIC
:
2113 r0
= gen_rtx_REG (Pmode
, 0);
2114 r16
= gen_rtx_REG (Pmode
, 16);
2115 tga
= get_tls_get_addr ();
2116 dest
= gen_reg_rtx (Pmode
);
2117 seq
= GEN_INT (alpha_next_sequence_number
++);
2119 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
2120 insn
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
2121 insn
= emit_call_insn (insn
);
2122 CONST_OR_PURE_CALL_P (insn
) = 1;
2123 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
2125 insn
= get_insns ();
2128 emit_libcall_block (insn
, dest
, r0
, x
);
2131 case TLS_MODEL_LOCAL_DYNAMIC
:
2134 r0
= gen_rtx_REG (Pmode
, 0);
2135 r16
= gen_rtx_REG (Pmode
, 16);
2136 tga
= get_tls_get_addr ();
2137 scratch
= gen_reg_rtx (Pmode
);
2138 seq
= GEN_INT (alpha_next_sequence_number
++);
2140 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
2141 insn
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
2142 insn
= emit_call_insn (insn
);
2143 CONST_OR_PURE_CALL_P (insn
) = 1;
2144 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
2146 insn
= get_insns ();
2149 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2150 UNSPEC_TLSLDM_CALL
);
2151 emit_libcall_block (insn
, scratch
, r0
, eqv
);
2153 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
2154 eqv
= gen_rtx_CONST (Pmode
, eqv
);
2156 if (alpha_tls_size
== 64)
2158 dest
= gen_reg_rtx (Pmode
);
2159 emit_insn (gen_rtx_SET (VOIDmode
, dest
, eqv
));
2160 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
2163 if (alpha_tls_size
== 32)
2165 insn
= gen_rtx_HIGH (Pmode
, eqv
);
2166 insn
= gen_rtx_PLUS (Pmode
, scratch
, insn
);
2167 scratch
= gen_reg_rtx (Pmode
);
2168 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, insn
));
2170 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
2172 case TLS_MODEL_INITIAL_EXEC
:
2173 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
2174 eqv
= gen_rtx_CONST (Pmode
, eqv
);
2175 tp
= gen_reg_rtx (Pmode
);
2176 scratch
= gen_reg_rtx (Pmode
);
2177 dest
= gen_reg_rtx (Pmode
);
2179 emit_insn (gen_load_tp (tp
));
2180 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, eqv
));
2181 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
2184 case TLS_MODEL_LOCAL_EXEC
:
2185 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
2186 eqv
= gen_rtx_CONST (Pmode
, eqv
);
2187 tp
= gen_reg_rtx (Pmode
);
2189 emit_insn (gen_load_tp (tp
));
2190 if (alpha_tls_size
== 32)
2192 insn
= gen_rtx_HIGH (Pmode
, eqv
);
2193 insn
= gen_rtx_PLUS (Pmode
, tp
, insn
);
2194 tp
= gen_reg_rtx (Pmode
);
2195 emit_insn (gen_rtx_SET (VOIDmode
, tp
, insn
));
2197 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
2200 if (local_symbolic_operand (x
, Pmode
))
2202 if (small_symbolic_operand (x
, Pmode
))
2206 if (!no_new_pseudos
)
2207 scratch
= gen_reg_rtx (Pmode
);
2208 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
2209 gen_rtx_HIGH (Pmode
, x
)));
2210 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
2219 HOST_WIDE_INT low
, high
;
2221 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
2223 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2227 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
2228 (no_new_pseudos
? scratch
: NULL_RTX
),
2229 1, OPTAB_LIB_WIDEN
);
2231 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
2232 (no_new_pseudos
? scratch
: NULL_RTX
),
2233 1, OPTAB_LIB_WIDEN
);
2235 return plus_constant (x
, low
);
2239 /* We do not allow indirect calls to be optimized into sibling calls, nor
2240 can we allow a call to a function with a different GP to be optimized
2244 alpha_function_ok_for_sibcall (decl
, exp
)
2246 tree exp ATTRIBUTE_UNUSED
;
2248 /* Can't do indirect tail calls, since we don't know if the target
2249 uses the same GP. */
2253 /* Otherwise, we can make a tail call if the target function shares
2255 return decl_has_samegp (decl
);
2258 /* For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
2259 small symbolic operand until after reload. At which point we need
2260 to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
2261 so that sched2 has the proper dependency information. */
2264 some_small_symbolic_operand (x
, mode
)
2266 enum machine_mode mode ATTRIBUTE_UNUSED
;
2268 return for_each_rtx (&x
, some_small_symbolic_operand_1
, NULL
);
2272 some_small_symbolic_operand_1 (px
, data
)
2274 void *data ATTRIBUTE_UNUSED
;
2278 /* Don't re-split. */
2279 if (GET_CODE (x
) == LO_SUM
)
2282 return small_symbolic_operand (x
, Pmode
) != 0;
2286 split_small_symbolic_operand (x
)
2290 for_each_rtx (&x
, split_small_symbolic_operand_1
, NULL
);
2295 split_small_symbolic_operand_1 (px
, data
)
2297 void *data ATTRIBUTE_UNUSED
;
2301 /* Don't re-split. */
2302 if (GET_CODE (x
) == LO_SUM
)
2305 if (small_symbolic_operand (x
, Pmode
))
2307 x
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
2315 /* Indicate that INSN cannot be duplicated. This is true for any insn
2316 that we've marked with gpdisp relocs, since those have to stay in
2317 1-1 correspondence with one another.
2319 Techinically we could copy them if we could set up a mapping from one
2320 sequence number to another, across the set of insns to be duplicated.
2321 This seems overly complicated and error-prone since interblock motion
2322 from sched-ebb could move one of the pair of insns to a different block. */
2325 alpha_cannot_copy_insn_p (insn
)
2330 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
2333 if (GET_CODE (insn
) != INSN
)
2335 if (asm_noperands (insn
) >= 0)
2338 pat
= PATTERN (insn
);
2339 if (GET_CODE (pat
) != SET
)
2341 pat
= SET_SRC (pat
);
2342 if (GET_CODE (pat
) == UNSPEC_VOLATILE
)
2344 if (XINT (pat
, 1) == UNSPECV_LDGP1
2345 || XINT (pat
, 1) == UNSPECV_PLDGP2
)
2348 else if (GET_CODE (pat
) == UNSPEC
)
2350 if (XINT (pat
, 1) == UNSPEC_LDGP2
)
2358 /* Try a machine-dependent way of reloading an illegitimate address
2359 operand. If we find one, push the reload and return the new rtx. */
2362 alpha_legitimize_reload_address (x
, mode
, opnum
, type
, ind_levels
)
2364 enum machine_mode mode ATTRIBUTE_UNUSED
;
2367 int ind_levels ATTRIBUTE_UNUSED
;
2369 /* We must recognize output that we have already generated ourselves. */
2370 if (GET_CODE (x
) == PLUS
2371 && GET_CODE (XEXP (x
, 0)) == PLUS
2372 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
2373 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
2374 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2376 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2377 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
2382 /* We wish to handle large displacements off a base register by
2383 splitting the addend across an ldah and the mem insn. This
2384 cuts number of extra insns needed from 3 to 1. */
2385 if (GET_CODE (x
) == PLUS
2386 && GET_CODE (XEXP (x
, 0)) == REG
2387 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
2388 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
2389 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2391 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
2392 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
2394 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
2396 /* Check for 32-bit overflow. */
2397 if (high
+ low
!= val
)
2400 /* Reload the high part into a base reg; leave the low part
2401 in the mem directly. */
2402 x
= gen_rtx_PLUS (GET_MODE (x
),
2403 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
2407 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2408 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
2416 /* Compute a (partial) cost for rtx X. Return true if the complete
2417 cost has been computed, and false if subexpressions should be
2418 scanned. In either case, *TOTAL contains the cost result. */
2421 alpha_rtx_costs (x
, code
, outer_code
, total
)
2423 int code
, outer_code
;
2426 enum machine_mode mode
= GET_MODE (x
);
2427 bool float_mode_p
= FLOAT_MODE_P (mode
);
2431 /* If this is an 8-bit constant, return zero since it can be used
2432 nearly anywhere with no cost. If it is a valid operand for an
2433 ADD or AND, likewise return 0 if we know it will be used in that
2434 context. Otherwise, return 2 since it might be used there later.
2435 All other constants take at least two insns. */
2437 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
2445 if (x
== CONST0_RTX (mode
))
2447 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
2448 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
2450 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
2453 *total
= COSTS_N_INSNS (2);
2459 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
2460 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
2461 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
2462 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
2463 else if (tls_symbolic_operand_type (x
))
2464 /* Estimate of cost for call_pal rduniq. */
2465 *total
= COSTS_N_INSNS (15);
2467 /* Otherwise we do a load from the GOT. */
2468 *total
= COSTS_N_INSNS (alpha_memory_latency
);
2474 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_add
;
2475 else if (GET_CODE (XEXP (x
, 0)) == MULT
2476 && const48_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
2478 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
)
2479 + rtx_cost (XEXP (x
, 1), outer_code
) + 2);
2486 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_mult
;
2487 else if (mode
== DImode
)
2488 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_mult_di
;
2490 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_mult_si
;
2494 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
2495 && INTVAL (XEXP (x
, 1)) <= 3)
2497 *total
= COSTS_N_INSNS (1);
2504 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_shift
;
2509 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_add
;
2511 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_cmov
;
2519 *total
= COSTS_N_INSNS (70); /* ??? */
2520 else if (mode
== SFmode
)
2521 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_div_sf
;
2523 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_div_df
;
2527 *total
= COSTS_N_INSNS (alpha_memory_latency
);
2533 *total
= COSTS_N_INSNS (1);
2541 *total
= COSTS_N_INSNS (1) + alpha_rtx_cost_data
[alpha_cpu
].int_cmov
;
2547 case UNSIGNED_FLOAT
:
2551 case FLOAT_TRUNCATE
:
2552 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_add
;
2560 /* REF is an alignable memory location. Place an aligned SImode
2561 reference into *PALIGNED_MEM and the number of bits to shift into
2562 *PBITNUM. SCRATCH is a free register for use in reloading out
2563 of range stack slots. */
2566 get_aligned_mem (ref
, paligned_mem
, pbitnum
)
2568 rtx
*paligned_mem
, *pbitnum
;
2571 HOST_WIDE_INT offset
= 0;
2573 if (GET_CODE (ref
) != MEM
)
2576 if (reload_in_progress
2577 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
2579 base
= find_replacement (&XEXP (ref
, 0));
2581 if (! memory_address_p (GET_MODE (ref
), base
))
2586 base
= XEXP (ref
, 0);
2589 if (GET_CODE (base
) == PLUS
)
2590 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
2593 = widen_memory_access (ref
, SImode
, (offset
& ~3) - offset
);
2595 if (WORDS_BIG_ENDIAN
)
2596 *pbitnum
= GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref
))
2597 + (offset
& 3) * 8));
2599 *pbitnum
= GEN_INT ((offset
& 3) * 8);
2602 /* Similar, but just get the address. Handle the two reload cases.
2603 Add EXTRA_OFFSET to the address we return. */
2606 get_unaligned_address (ref
, extra_offset
)
2611 HOST_WIDE_INT offset
= 0;
2613 if (GET_CODE (ref
) != MEM
)
2616 if (reload_in_progress
2617 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
2619 base
= find_replacement (&XEXP (ref
, 0));
2621 if (! memory_address_p (GET_MODE (ref
), base
))
2626 base
= XEXP (ref
, 0);
2629 if (GET_CODE (base
) == PLUS
)
2630 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
2632 return plus_constant (base
, offset
+ extra_offset
);
2635 /* On the Alpha, all (non-symbolic) constants except zero go into
2636 a floating-point register via memory. Note that we cannot
2637 return anything that is not a subset of CLASS, and that some
2638 symbolic constants cannot be dropped to memory. */
2641 alpha_preferred_reload_class(x
, class)
2643 enum reg_class
class;
2645 /* Zero is present in any register class. */
2646 if (x
== CONST0_RTX (GET_MODE (x
)))
2649 /* These sorts of constants we can easily drop to memory. */
2650 if (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
)
2652 if (class == FLOAT_REGS
)
2654 if (class == ALL_REGS
)
2655 return GENERAL_REGS
;
2659 /* All other kinds of constants should not (and in the case of HIGH
2660 cannot) be dropped to memory -- instead we use a GENERAL_REGS
2661 secondary reload. */
2663 return (class == ALL_REGS
? GENERAL_REGS
: class);
2668 /* Loading and storing HImode or QImode values to and from memory
2669 usually requires a scratch register. The exceptions are loading
2670 QImode and HImode from an aligned address to a general register
2671 unless byte instructions are permitted.
2673 We also cannot load an unaligned address or a paradoxical SUBREG
2674 into an FP register.
2676 We also cannot do integral arithmetic into FP regs, as might result
2677 from register elimination into a DImode fp register. */
2680 secondary_reload_class (class, mode
, x
, in
)
2681 enum reg_class
class;
2682 enum machine_mode mode
;
2686 if ((mode
== QImode
|| mode
== HImode
) && ! TARGET_BWX
)
2688 if (GET_CODE (x
) == MEM
2689 || (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
2690 || (GET_CODE (x
) == SUBREG
2691 && (GET_CODE (SUBREG_REG (x
)) == MEM
2692 || (GET_CODE (SUBREG_REG (x
)) == REG
2693 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
))))
2695 if (!in
|| !aligned_memory_operand(x
, mode
))
2696 return GENERAL_REGS
;
2700 if (class == FLOAT_REGS
)
2702 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
2703 return GENERAL_REGS
;
2705 if (GET_CODE (x
) == SUBREG
2706 && (GET_MODE_SIZE (GET_MODE (x
))
2707 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
2708 return GENERAL_REGS
;
2710 if (in
&& INTEGRAL_MODE_P (mode
)
2711 && ! (memory_operand (x
, mode
) || x
== const0_rtx
))
2712 return GENERAL_REGS
;
2718 /* Subfunction of the following function. Update the flags of any MEM
2719 found in part of X. */
2722 alpha_set_memflags_1 (x
, in_struct_p
, volatile_p
, unchanging_p
)
2724 int in_struct_p
, volatile_p
, unchanging_p
;
2728 switch (GET_CODE (x
))
2734 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
2735 alpha_set_memflags_1 (XVECEXP (x
, 0, i
), in_struct_p
, volatile_p
,
2740 alpha_set_memflags_1 (PATTERN (x
), in_struct_p
, volatile_p
,
2745 alpha_set_memflags_1 (SET_DEST (x
), in_struct_p
, volatile_p
,
2747 alpha_set_memflags_1 (SET_SRC (x
), in_struct_p
, volatile_p
,
2752 MEM_IN_STRUCT_P (x
) = in_struct_p
;
2753 MEM_VOLATILE_P (x
) = volatile_p
;
2754 RTX_UNCHANGING_P (x
) = unchanging_p
;
2755 /* Sadly, we cannot use alias sets because the extra aliasing
2756 produced by the AND interferes. Given that two-byte quantities
2757 are the only thing we would be able to differentiate anyway,
2758 there does not seem to be any point in convoluting the early
2759 out of the alias check. */
2767 /* Given INSN, which is an INSN list or the PATTERN of a single insn
2768 generated to perform a memory operation, look for any MEMs in either
2769 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
2770 volatile flags from REF into each of the MEMs found. If REF is not
2771 a MEM, don't do anything. */
2774 alpha_set_memflags (insn
, ref
)
2778 int in_struct_p
, volatile_p
, unchanging_p
;
2780 if (GET_CODE (ref
) != MEM
)
2783 in_struct_p
= MEM_IN_STRUCT_P (ref
);
2784 volatile_p
= MEM_VOLATILE_P (ref
);
2785 unchanging_p
= RTX_UNCHANGING_P (ref
);
2787 /* This is only called from alpha.md, after having had something
2788 generated from one of the insn patterns. So if everything is
2789 zero, the pattern is already up-to-date. */
2790 if (! in_struct_p
&& ! volatile_p
&& ! unchanging_p
)
2793 alpha_set_memflags_1 (insn
, in_struct_p
, volatile_p
, unchanging_p
);
2796 /* Try to output insns to set TARGET equal to the constant C if it can be
2797 done in less than N insns. Do all computations in MODE. Returns the place
2798 where the output has been placed if it can be done and the insns have been
2799 emitted. If it would take more than N insns, zero is returned and no
2800 insns and emitted. */
2803 alpha_emit_set_const (target
, mode
, c
, n
)
2805 enum machine_mode mode
;
2810 rtx orig_target
= target
;
2813 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2814 can't load this constant in one insn, do this in DImode. */
2815 if (no_new_pseudos
&& mode
== SImode
2816 && GET_CODE (target
) == REG
&& REGNO (target
) < FIRST_PSEUDO_REGISTER
2817 && (result
= alpha_emit_set_const_1 (target
, mode
, c
, 1)) == 0)
2819 target
= gen_lowpart (DImode
, target
);
2823 /* Try 1 insn, then 2, then up to N. */
2824 for (i
= 1; i
<= n
; i
++)
2826 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
);
2829 rtx insn
= get_last_insn ();
2830 rtx set
= single_set (insn
);
2831 if (! CONSTANT_P (SET_SRC (set
)))
2832 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
2837 /* Allow for the case where we changed the mode of TARGET. */
2838 if (result
== target
)
2839 result
= orig_target
;
2844 /* Internal routine for the above to check for N or below insns. */
2847 alpha_emit_set_const_1 (target
, mode
, c
, n
)
2849 enum machine_mode mode
;
2855 /* Use a pseudo if highly optimizing and still generating RTL. */
2857 = (flag_expensive_optimizations
&& !no_new_pseudos
? 0 : target
);
2860 /* If this is a sign-extended 32-bit constant, we can do this in at most
2861 three insns, so do it if we have enough insns left. We always have
2862 a sign-extended 32-bit constant when compiling on a narrow machine. */
2864 if (HOST_BITS_PER_WIDE_INT
!= 64
2865 || c
>> 31 == -1 || c
>> 31 == 0)
2867 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
2868 HOST_WIDE_INT tmp1
= c
- low
;
2869 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
2870 HOST_WIDE_INT extra
= 0;
2872 /* If HIGH will be interpreted as negative but the constant is
2873 positive, we must adjust it to do two ldha insns. */
2875 if ((high
& 0x8000) != 0 && c
>= 0)
2879 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
2882 if (c
== low
|| (low
== 0 && extra
== 0))
2884 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2885 but that meant that we can't handle INT_MIN on 32-bit machines
2886 (like NT/Alpha), because we recurse indefinitely through
2887 emit_move_insn to gen_movdi. So instead, since we know exactly
2888 what we want, create it explicitly. */
2891 target
= gen_reg_rtx (mode
);
2892 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
2895 else if (n
>= 2 + (extra
!= 0))
2897 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16), subtarget
, mode
);
2899 /* As of 2002-02-23, addsi3 is only available when not optimizing.
2900 This means that if we go through expand_binop, we'll try to
2901 generate extensions, etc, which will require new pseudos, which
2902 will fail during some split phases. The SImode add patterns
2903 still exist, but are not named. So build the insns by hand. */
2908 subtarget
= gen_reg_rtx (mode
);
2909 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
2910 insn
= gen_rtx_SET (VOIDmode
, subtarget
, insn
);
2916 target
= gen_reg_rtx (mode
);
2917 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2918 insn
= gen_rtx_SET (VOIDmode
, target
, insn
);
2924 /* If we couldn't do it that way, try some other methods. But if we have
2925 no instructions left, don't bother. Likewise, if this is SImode and
2926 we can't make pseudos, we can't do anything since the expand_binop
2927 and expand_unop calls will widen and try to make pseudos. */
2929 if (n
== 1 || (mode
== SImode
&& no_new_pseudos
))
2932 /* Next, see if we can load a related constant and then shift and possibly
2933 negate it to get the constant we want. Try this once each increasing
2934 numbers of insns. */
2936 for (i
= 1; i
< n
; i
++)
2938 /* First, see if minus some low bits, we've an easy load of
2941 new = ((c
& 0xffff) ^ 0x8000) - 0x8000;
2943 && (temp
= alpha_emit_set_const (subtarget
, mode
, c
- new, i
)) != 0)
2944 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new),
2945 target
, 0, OPTAB_WIDEN
);
2947 /* Next try complementing. */
2948 if ((temp
= alpha_emit_set_const (subtarget
, mode
, ~ c
, i
)) != 0)
2949 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
2951 /* Next try to form a constant and do a left shift. We can do this
2952 if some low-order bits are zero; the exact_log2 call below tells
2953 us that information. The bits we are shifting out could be any
2954 value, but here we'll just try the 0- and sign-extended forms of
2955 the constant. To try to increase the chance of having the same
2956 constant in more than one insn, start at the highest number of
2957 bits to shift, but try all possibilities in case a ZAPNOT will
2960 if ((bits
= exact_log2 (c
& - c
)) > 0)
2961 for (; bits
> 0; bits
--)
2962 if ((temp
= (alpha_emit_set_const
2963 (subtarget
, mode
, c
>> bits
, i
))) != 0
2964 || ((temp
= (alpha_emit_set_const
2966 ((unsigned HOST_WIDE_INT
) c
) >> bits
, i
)))
2968 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
2969 target
, 0, OPTAB_WIDEN
);
2971 /* Now try high-order zero bits. Here we try the shifted-in bits as
2972 all zero and all ones. Be careful to avoid shifting outside the
2973 mode and to avoid shifting outside the host wide int size. */
2974 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2975 confuse the recursive call and set all of the high 32 bits. */
2977 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2978 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64))) > 0)
2979 for (; bits
> 0; bits
--)
2980 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2982 || ((temp
= (alpha_emit_set_const
2984 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2987 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
2988 target
, 1, OPTAB_WIDEN
);
2990 /* Now try high-order 1 bits. We get that with a sign-extension.
2991 But one bit isn't enough here. Be careful to avoid shifting outside
2992 the mode and to avoid shifting outside the host wide int size. */
2994 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2995 - floor_log2 (~ c
) - 2)) > 0)
2996 for (; bits
> 0; bits
--)
2997 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2999 || ((temp
= (alpha_emit_set_const
3001 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
3004 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
3005 target
, 0, OPTAB_WIDEN
);
3008 #if HOST_BITS_PER_WIDE_INT == 64
3009 /* Finally, see if can load a value into the target that is the same as the
3010 constant except that all bytes that are 0 are changed to be 0xff. If we
3011 can, then we can do a ZAPNOT to obtain the desired constant. */
3014 for (i
= 0; i
< 64; i
+= 8)
3015 if ((new & ((HOST_WIDE_INT
) 0xff << i
)) == 0)
3016 new |= (HOST_WIDE_INT
) 0xff << i
;
3018 /* We are only called for SImode and DImode. If this is SImode, ensure that
3019 we are sign extended to a full word. */
3022 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
3024 if (new != c
&& new != -1
3025 && (temp
= alpha_emit_set_const (subtarget
, mode
, new, n
- 1)) != 0)
3026 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new),
3027 target
, 0, OPTAB_WIDEN
);
3033 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
3034 fall back to a straight forward decomposition. We do this to avoid
3035 exponential run times encountered when looking for longer sequences
3036 with alpha_emit_set_const. */
3039 alpha_emit_set_long_const (target
, c1
, c2
)
3041 HOST_WIDE_INT c1
, c2
;
3043 HOST_WIDE_INT d1
, d2
, d3
, d4
;
3045 /* Decompose the entire word */
3046 #if HOST_BITS_PER_WIDE_INT >= 64
3047 if (c2
!= -(c1
< 0))
3049 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
3051 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3052 c1
= (c1
- d2
) >> 32;
3053 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
3055 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3059 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
3061 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3065 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
3067 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3072 /* Construct the high word */
3075 emit_move_insn (target
, GEN_INT (d4
));
3077 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
3080 emit_move_insn (target
, GEN_INT (d3
));
3082 /* Shift it into place */
3083 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
3085 /* Add in the low bits. */
3087 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
3089 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
3094 /* Expand a move instruction; return true if all work is done.
3095 We don't handle non-bwx subword loads here. */
3098 alpha_expand_mov (mode
, operands
)
3099 enum machine_mode mode
;
3102 /* If the output is not a register, the input must be. */
3103 if (GET_CODE (operands
[0]) == MEM
3104 && ! reg_or_0_operand (operands
[1], mode
))
3105 operands
[1] = force_reg (mode
, operands
[1]);
3107 /* Allow legitimize_address to perform some simplifications. */
3108 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
3112 /* With RTL inlining, at -O3, rtl is generated, stored, then actually
3113 compiled at the end of compilation. In the meantime, someone can
3114 re-encode-section-info on some symbol changing it e.g. from global
3115 to local-not-small. If this happens, we'd have emitted a plain
3116 load rather than a high+losum load and not recognize the insn.
3118 So if rtl inlining is in effect, we delay the global/not-global
3119 decision until rest_of_compilation by wrapping it in an
3121 if (TARGET_EXPLICIT_RELOCS
&& flag_inline_functions
3122 && rtx_equal_function_value_matters
3123 && global_symbolic_operand (operands
[1], mode
))
3125 emit_insn (gen_movdi_er_maybe_g (operands
[0], operands
[1]));
3129 tmp
= alpha_legitimize_address (operands
[1], operands
[0], mode
);
3132 if (tmp
== operands
[0])
3139 /* Early out for non-constants and valid constants. */
3140 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
3143 /* Split large integers. */
3144 if (GET_CODE (operands
[1]) == CONST_INT
3145 || GET_CODE (operands
[1]) == CONST_DOUBLE
)
3147 HOST_WIDE_INT i0
, i1
;
3148 rtx temp
= NULL_RTX
;
3150 if (GET_CODE (operands
[1]) == CONST_INT
)
3152 i0
= INTVAL (operands
[1]);
3155 else if (HOST_BITS_PER_WIDE_INT
>= 64)
3157 i0
= CONST_DOUBLE_LOW (operands
[1]);
3162 i0
= CONST_DOUBLE_LOW (operands
[1]);
3163 i1
= CONST_DOUBLE_HIGH (operands
[1]);
3166 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
3167 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3);
3169 if (!temp
&& TARGET_BUILD_CONSTANTS
)
3170 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
3174 if (rtx_equal_p (operands
[0], temp
))
3181 /* Otherwise we've nothing left but to drop the thing to memory. */
3182 operands
[1] = force_const_mem (mode
, operands
[1]);
3183 if (reload_in_progress
)
3185 emit_move_insn (operands
[0], XEXP (operands
[1], 0));
3186 operands
[1] = copy_rtx (operands
[1]);
3187 XEXP (operands
[1], 0) = operands
[0];
3190 operands
[1] = validize_mem (operands
[1]);
3194 /* Expand a non-bwx QImode or HImode move instruction;
3195 return true if all work is done. */
3198 alpha_expand_mov_nobwx (mode
, operands
)
3199 enum machine_mode mode
;
3202 /* If the output is not a register, the input must be. */
3203 if (GET_CODE (operands
[0]) == MEM
)
3204 operands
[1] = force_reg (mode
, operands
[1]);
3206 /* Handle four memory cases, unaligned and aligned for either the input
3207 or the output. The only case where we can be called during reload is
3208 for aligned loads; all other cases require temporaries. */
3210 if (GET_CODE (operands
[1]) == MEM
3211 || (GET_CODE (operands
[1]) == SUBREG
3212 && GET_CODE (SUBREG_REG (operands
[1])) == MEM
)
3213 || (reload_in_progress
&& GET_CODE (operands
[1]) == REG
3214 && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
)
3215 || (reload_in_progress
&& GET_CODE (operands
[1]) == SUBREG
3216 && GET_CODE (SUBREG_REG (operands
[1])) == REG
3217 && REGNO (SUBREG_REG (operands
[1])) >= FIRST_PSEUDO_REGISTER
))
3219 if (aligned_memory_operand (operands
[1], mode
))
3221 if (reload_in_progress
)
3223 emit_insn ((mode
== QImode
3224 ? gen_reload_inqi_help
3225 : gen_reload_inhi_help
)
3226 (operands
[0], operands
[1],
3227 gen_rtx_REG (SImode
, REGNO (operands
[0]))));
3231 rtx aligned_mem
, bitnum
;
3232 rtx scratch
= gen_reg_rtx (SImode
);
3234 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
3236 emit_insn ((mode
== QImode
3237 ? gen_aligned_loadqi
3238 : gen_aligned_loadhi
)
3239 (operands
[0], aligned_mem
, bitnum
, scratch
));
3244 /* Don't pass these as parameters since that makes the generated
3245 code depend on parameter evaluation order which will cause
3246 bootstrap failures. */
3248 rtx temp1
= gen_reg_rtx (DImode
);
3249 rtx temp2
= gen_reg_rtx (DImode
);
3250 rtx seq
= ((mode
== QImode
3251 ? gen_unaligned_loadqi
3252 : gen_unaligned_loadhi
)
3253 (operands
[0], get_unaligned_address (operands
[1], 0),
3256 alpha_set_memflags (seq
, operands
[1]);
3262 if (GET_CODE (operands
[0]) == MEM
3263 || (GET_CODE (operands
[0]) == SUBREG
3264 && GET_CODE (SUBREG_REG (operands
[0])) == MEM
)
3265 || (reload_in_progress
&& GET_CODE (operands
[0]) == REG
3266 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
)
3267 || (reload_in_progress
&& GET_CODE (operands
[0]) == SUBREG
3268 && GET_CODE (SUBREG_REG (operands
[0])) == REG
3269 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
))
3271 if (aligned_memory_operand (operands
[0], mode
))
3273 rtx aligned_mem
, bitnum
;
3274 rtx temp1
= gen_reg_rtx (SImode
);
3275 rtx temp2
= gen_reg_rtx (SImode
);
3277 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
3279 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
3284 rtx temp1
= gen_reg_rtx (DImode
);
3285 rtx temp2
= gen_reg_rtx (DImode
);
3286 rtx temp3
= gen_reg_rtx (DImode
);
3287 rtx seq
= ((mode
== QImode
3288 ? gen_unaligned_storeqi
3289 : gen_unaligned_storehi
)
3290 (get_unaligned_address (operands
[0], 0),
3291 operands
[1], temp1
, temp2
, temp3
));
3293 alpha_set_memflags (seq
, operands
[0]);
3302 /* Generate an unsigned DImode to FP conversion. This is the same code
3303 optabs would emit if we didn't have TFmode patterns.
3305 For SFmode, this is the only construction I've found that can pass
3306 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
3307 intermediates will work, because you'll get intermediate rounding
3308 that ruins the end result. Some of this could be fixed by turning
3309 on round-to-positive-infinity, but that requires diddling the fpsr,
3310 which kills performance. I tried turning this around and converting
3311 to a negative number, so that I could turn on /m, but either I did
3312 it wrong or there's something else cause I wound up with the exact
3313 same single-bit error. There is a branch-less form of this same code:
3324 fcmoveq $f10,$f11,$f0
3326 I'm not using it because it's the same number of instructions as
3327 this branch-full form, and it has more serialized long latency
3328 instructions on the critical path.
3330 For DFmode, we can avoid rounding errors by breaking up the word
3331 into two pieces, converting them separately, and adding them back:
3333 LC0: .long 0,0x5f800000
3338 cpyse $f11,$f31,$f10
3339 cpyse $f31,$f11,$f11
3347 This doesn't seem to be a clear-cut win over the optabs form.
3348 It probably all depends on the distribution of numbers being
3349 converted -- in the optabs form, all but high-bit-set has a
3350 much lower minimum execution time. */
3353 alpha_emit_floatuns (operands
)
3356 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
3357 enum machine_mode mode
;
3360 in
= force_reg (DImode
, operands
[1]);
3361 mode
= GET_MODE (out
);
3362 neglab
= gen_label_rtx ();
3363 donelab
= gen_label_rtx ();
3364 i0
= gen_reg_rtx (DImode
);
3365 i1
= gen_reg_rtx (DImode
);
3366 f0
= gen_reg_rtx (mode
);
3368 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
3370 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
3371 emit_jump_insn (gen_jump (donelab
));
3374 emit_label (neglab
);
3376 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
3377 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
3378 emit_insn (gen_iordi3 (i0
, i0
, i1
));
3379 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
3380 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
3382 emit_label (donelab
);
3385 /* Generate the comparison for a conditional branch. */
3388 alpha_emit_conditional_branch (code
)
3391 enum rtx_code cmp_code
, branch_code
;
3392 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
3393 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
3396 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
3398 if (! TARGET_HAS_XFLOATING_LIBS
)
3401 /* X_floating library comparison functions return
3405 Convert the compare against the raw return value. */
3427 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
3429 alpha_compare
.fp_p
= 0;
3432 /* The general case: fold the comparison code to the types of compares
3433 that we have, choosing the branch as necessary. */
3436 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3438 /* We have these compares: */
3439 cmp_code
= code
, branch_code
= NE
;
3444 /* These must be reversed. */
3445 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
3448 case GE
: case GT
: case GEU
: case GTU
:
3449 /* For FP, we swap them, for INT, we reverse them. */
3450 if (alpha_compare
.fp_p
)
3452 cmp_code
= swap_condition (code
);
3454 tem
= op0
, op0
= op1
, op1
= tem
;
3458 cmp_code
= reverse_condition (code
);
3467 if (alpha_compare
.fp_p
)
3470 if (flag_unsafe_math_optimizations
)
3472 /* When we are not as concerned about non-finite values, and we
3473 are comparing against zero, we can branch directly. */
3474 if (op1
== CONST0_RTX (DFmode
))
3475 cmp_code
= NIL
, branch_code
= code
;
3476 else if (op0
== CONST0_RTX (DFmode
))
3478 /* Undo the swap we probably did just above. */
3479 tem
= op0
, op0
= op1
, op1
= tem
;
3480 branch_code
= swap_condition (cmp_code
);
3486 /* ??? We mark the branch mode to be CCmode to prevent the
3487 compare and branch from being combined, since the compare
3488 insn follows IEEE rules that the branch does not. */
3489 branch_mode
= CCmode
;
3496 /* The following optimizations are only for signed compares. */
3497 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
3499 /* Whee. Compare and branch against 0 directly. */
3500 if (op1
== const0_rtx
)
3501 cmp_code
= NIL
, branch_code
= code
;
3503 /* We want to use cmpcc/bcc when we can, since there is a zero delay
3504 bypass between logicals and br/cmov on EV5. But we don't want to
3505 force valid immediate constants into registers needlessly. */
3506 else if (GET_CODE (op1
) == CONST_INT
)
3508 HOST_WIDE_INT v
= INTVAL (op1
), n
= -v
;
3510 if (! CONST_OK_FOR_LETTER_P (v
, 'I')
3511 && (CONST_OK_FOR_LETTER_P (n
, 'K')
3512 || CONST_OK_FOR_LETTER_P (n
, 'L')))
3514 cmp_code
= PLUS
, branch_code
= code
;
3520 if (!reg_or_0_operand (op0
, DImode
))
3521 op0
= force_reg (DImode
, op0
);
3522 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
3523 op1
= force_reg (DImode
, op1
);
3526 /* Emit an initial compare instruction, if necessary. */
3528 if (cmp_code
!= NIL
)
3530 tem
= gen_reg_rtx (cmp_mode
);
3531 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
3534 /* Zero the operands. */
3535 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
3537 /* Return the branch comparison. */
3538 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
3541 /* Certain simplifications can be done to make invalid setcc operations
3542 valid. Return the final comparison, or NULL if we can't work. */
3545 alpha_emit_setcc (code
)
3548 enum rtx_code cmp_code
;
3549 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
3550 int fp_p
= alpha_compare
.fp_p
;
3553 /* Zero the operands. */
3554 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
3556 if (fp_p
&& GET_MODE (op0
) == TFmode
)
3558 if (! TARGET_HAS_XFLOATING_LIBS
)
3561 /* X_floating library comparison functions return
3565 Convert the compare against the raw return value. */
3567 if (code
== UNORDERED
|| code
== ORDERED
)
3572 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
3576 if (code
== UNORDERED
)
3578 else if (code
== ORDERED
)
3584 if (fp_p
&& !TARGET_FIX
)
3587 /* The general case: fold the comparison code to the types of compares
3588 that we have, choosing the branch as necessary. */
3593 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3595 /* We have these compares. */
3597 cmp_code
= code
, code
= NE
;
3601 if (!fp_p
&& op1
== const0_rtx
)
3606 cmp_code
= reverse_condition (code
);
3610 case GE
: case GT
: case GEU
: case GTU
:
3611 /* These normally need swapping, but for integer zero we have
3612 special patterns that recognize swapped operands. */
3613 if (!fp_p
&& op1
== const0_rtx
)
3615 code
= swap_condition (code
);
3617 cmp_code
= code
, code
= NE
;
3618 tmp
= op0
, op0
= op1
, op1
= tmp
;
3627 if (!register_operand (op0
, DImode
))
3628 op0
= force_reg (DImode
, op0
);
3629 if (!reg_or_8bit_operand (op1
, DImode
))
3630 op1
= force_reg (DImode
, op1
);
3633 /* Emit an initial compare instruction, if necessary. */
3634 if (cmp_code
!= NIL
)
3636 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
3638 tmp
= gen_reg_rtx (mode
);
3639 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
3640 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
3642 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
3646 /* Return the setcc comparison. */
3647 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
3651 /* Rewrite a comparison against zero CMP of the form
3652 (CODE (cc0) (const_int 0)) so it can be written validly in
3653 a conditional move (if_then_else CMP ...).
3654 If both of the operands that set cc0 are nonzero we must emit
3655 an insn to perform the compare (it can't be done within
3656 the conditional move). */
3658 alpha_emit_conditional_move (cmp
, mode
)
3660 enum machine_mode mode
;
3662 enum rtx_code code
= GET_CODE (cmp
);
3663 enum rtx_code cmov_code
= NE
;
3664 rtx op0
= alpha_compare
.op0
;
3665 rtx op1
= alpha_compare
.op1
;
3666 int fp_p
= alpha_compare
.fp_p
;
3667 enum machine_mode cmp_mode
3668 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
3669 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
3670 enum machine_mode cmov_mode
= VOIDmode
;
3671 int local_fast_math
= flag_unsafe_math_optimizations
;
3674 /* Zero the operands. */
3675 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
3677 if (fp_p
!= FLOAT_MODE_P (mode
))
3679 enum rtx_code cmp_code
;
3684 /* If we have fp<->int register move instructions, do a cmov by
3685 performing the comparison in fp registers, and move the
3686 zero/nonzero value to integer registers, where we can then
3687 use a normal cmov, or vice-versa. */
3691 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3692 /* We have these compares. */
3693 cmp_code
= code
, code
= NE
;
3697 /* This must be reversed. */
3698 cmp_code
= EQ
, code
= EQ
;
3701 case GE
: case GT
: case GEU
: case GTU
:
3702 /* These normally need swapping, but for integer zero we have
3703 special patterns that recognize swapped operands. */
3704 if (!fp_p
&& op1
== const0_rtx
)
3705 cmp_code
= code
, code
= NE
;
3708 cmp_code
= swap_condition (code
);
3710 tem
= op0
, op0
= op1
, op1
= tem
;
3718 tem
= gen_reg_rtx (cmp_op_mode
);
3719 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
3720 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
3723 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
3724 op0
= gen_lowpart (cmp_op_mode
, tem
);
3725 op1
= CONST0_RTX (cmp_op_mode
);
3727 local_fast_math
= 1;
3730 /* We may be able to use a conditional move directly.
3731 This avoids emitting spurious compares. */
3732 if (signed_comparison_operator (cmp
, VOIDmode
)
3733 && (!fp_p
|| local_fast_math
)
3734 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
3735 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
3737 /* We can't put the comparison inside the conditional move;
3738 emit a compare instruction and put that inside the
3739 conditional move. Make sure we emit only comparisons we have;
3740 swap or reverse as necessary. */
3747 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3748 /* We have these compares: */
3752 /* This must be reversed. */
3753 code
= reverse_condition (code
);
3757 case GE
: case GT
: case GEU
: case GTU
:
3758 /* These must be swapped. */
3759 if (op1
!= CONST0_RTX (cmp_mode
))
3761 code
= swap_condition (code
);
3762 tem
= op0
, op0
= op1
, op1
= tem
;
3772 if (!reg_or_0_operand (op0
, DImode
))
3773 op0
= force_reg (DImode
, op0
);
3774 if (!reg_or_8bit_operand (op1
, DImode
))
3775 op1
= force_reg (DImode
, op1
);
3778 /* ??? We mark the branch mode to be CCmode to prevent the compare
3779 and cmov from being combined, since the compare insn follows IEEE
3780 rules that the cmov does not. */
3781 if (fp_p
&& !local_fast_math
)
3784 tem
= gen_reg_rtx (cmp_op_mode
);
3785 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
3786 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
3789 /* Simplify a conditional move of two constants into a setcc with
3790 arithmetic. This is done with a splitter since combine would
3791 just undo the work if done during code generation. It also catches
3792 cases we wouldn't have before cse. */
3795 alpha_split_conditional_move (code
, dest
, cond
, t_rtx
, f_rtx
)
3797 rtx dest
, cond
, t_rtx
, f_rtx
;
3799 HOST_WIDE_INT t
, f
, diff
;
3800 enum machine_mode mode
;
3801 rtx target
, subtarget
, tmp
;
3803 mode
= GET_MODE (dest
);
3808 if (((code
== NE
|| code
== EQ
) && diff
< 0)
3809 || (code
== GE
|| code
== GT
))
3811 code
= reverse_condition (code
);
3812 diff
= t
, t
= f
, f
= diff
;
3816 subtarget
= target
= dest
;
3819 target
= gen_lowpart (DImode
, dest
);
3820 if (! no_new_pseudos
)
3821 subtarget
= gen_reg_rtx (DImode
);
3825 /* Below, we must be careful to use copy_rtx on target and subtarget
3826 in intermediate insns, as they may be a subreg rtx, which may not
3829 if (f
== 0 && exact_log2 (diff
) > 0
3830 /* On EV6, we've got enough shifters to make non-arithmatic shifts
3831 viable over a longer latency cmove. On EV5, the E0 slot is a
3832 scarce resource, and on EV4 shift has the same latency as a cmove. */
3833 && (diff
<= 8 || alpha_cpu
== PROCESSOR_EV6
))
3835 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3836 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
3838 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
3839 GEN_INT (exact_log2 (t
)));
3840 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3842 else if (f
== 0 && t
== -1)
3844 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3845 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
3847 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
3849 else if (diff
== 1 || diff
== 4 || diff
== 8)
3853 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3854 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
3857 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
3860 add_op
= GEN_INT (f
);
3861 if (sext_add_operand (add_op
, mode
))
3863 tmp
= gen_rtx_MULT (DImode
, copy_rtx (subtarget
),
3865 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
3866 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3878 /* Look up the function X_floating library function name for the
3882 alpha_lookup_xfloating_lib_func (code
)
3887 const enum rtx_code code
;
3888 const char *const func
;
3891 static const struct xfloating_op vms_xfloating_ops
[] =
3893 { PLUS
, "OTS$ADD_X" },
3894 { MINUS
, "OTS$SUB_X" },
3895 { MULT
, "OTS$MUL_X" },
3896 { DIV
, "OTS$DIV_X" },
3897 { EQ
, "OTS$EQL_X" },
3898 { NE
, "OTS$NEQ_X" },
3899 { LT
, "OTS$LSS_X" },
3900 { LE
, "OTS$LEQ_X" },
3901 { GT
, "OTS$GTR_X" },
3902 { GE
, "OTS$GEQ_X" },
3903 { FIX
, "OTS$CVTXQ" },
3904 { FLOAT
, "OTS$CVTQX" },
3905 { UNSIGNED_FLOAT
, "OTS$CVTQUX" },
3906 { FLOAT_EXTEND
, "OTS$CVT_FLOAT_T_X" },
3907 { FLOAT_TRUNCATE
, "OTS$CVT_FLOAT_X_T" },
3910 static const struct xfloating_op osf_xfloating_ops
[] =
3912 { PLUS
, "_OtsAddX" },
3913 { MINUS
, "_OtsSubX" },
3914 { MULT
, "_OtsMulX" },
3915 { DIV
, "_OtsDivX" },
3922 { FIX
, "_OtsCvtXQ" },
3923 { FLOAT
, "_OtsCvtQX" },
3924 { UNSIGNED_FLOAT
, "_OtsCvtQUX" },
3925 { FLOAT_EXTEND
, "_OtsConvertFloatTX" },
3926 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT" },
3929 const struct xfloating_op
*ops
;
3930 const long n
= ARRAY_SIZE (osf_xfloating_ops
);
3933 /* How irritating. Nothing to key off for the table. Hardcode
3934 knowledge of the G_floating routines. */
3935 if (TARGET_FLOAT_VAX
)
3937 if (TARGET_ABI_OPEN_VMS
)
3939 if (code
== FLOAT_EXTEND
)
3940 return "OTS$CVT_FLOAT_G_X";
3941 if (code
== FLOAT_TRUNCATE
)
3942 return "OTS$CVT_FLOAT_X_G";
3946 if (code
== FLOAT_EXTEND
)
3947 return "_OtsConvertFloatGX";
3948 if (code
== FLOAT_TRUNCATE
)
3949 return "_OtsConvertFloatXG";
3953 if (TARGET_ABI_OPEN_VMS
)
3954 ops
= vms_xfloating_ops
;
3956 ops
= osf_xfloating_ops
;
3958 for (i
= 0; i
< n
; ++i
)
3959 if (ops
[i
].code
== code
)
3965 /* Most X_floating operations take the rounding mode as an argument.
3966 Compute that here. */
3969 alpha_compute_xfloating_mode_arg (code
, round
)
3971 enum alpha_fp_rounding_mode round
;
3977 case ALPHA_FPRM_NORM
:
3980 case ALPHA_FPRM_MINF
:
3983 case ALPHA_FPRM_CHOP
:
3986 case ALPHA_FPRM_DYN
:
3992 /* XXX For reference, round to +inf is mode = 3. */
3995 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
4001 /* Emit an X_floating library function call.
4003 Note that these functions do not follow normal calling conventions:
4004 TFmode arguments are passed in two integer registers (as opposed to
4005 indirect); TFmode return values appear in R16+R17.
4007 FUNC is the function name to call.
4008 TARGET is where the output belongs.
4009 OPERANDS are the inputs.
4010 NOPERANDS is the count of inputs.
4011 EQUIV is the expression equivalent for the function.
4015 alpha_emit_xfloating_libcall (func
, target
, operands
, noperands
, equiv
)
4022 rtx usage
= NULL_RTX
, tmp
, reg
;
4027 for (i
= 0; i
< noperands
; ++i
)
4029 switch (GET_MODE (operands
[i
]))
4032 reg
= gen_rtx_REG (TFmode
, regno
);
4037 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
4042 if (GET_CODE (operands
[i
]) != CONST_INT
)
4046 reg
= gen_rtx_REG (DImode
, regno
);
4054 emit_move_insn (reg
, operands
[i
]);
4055 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
4058 switch (GET_MODE (target
))
4061 reg
= gen_rtx_REG (TFmode
, 16);
4064 reg
= gen_rtx_REG (DFmode
, 32);
4067 reg
= gen_rtx_REG (DImode
, 0);
4073 tmp
= gen_rtx_MEM (QImode
, init_one_libfunc (func
));
4074 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
4075 const0_rtx
, const0_rtx
));
4076 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
4081 emit_libcall_block (tmp
, target
, reg
, equiv
);
4084 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
4087 alpha_emit_xfloating_arith (code
, operands
)
4093 rtx out_operands
[3];
4095 func
= alpha_lookup_xfloating_lib_func (code
);
4096 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
4098 out_operands
[0] = operands
[1];
4099 out_operands
[1] = operands
[2];
4100 out_operands
[2] = GEN_INT (mode
);
4101 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
4102 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
4106 /* Emit an X_floating library function call for a comparison. */
4109 alpha_emit_xfloating_compare (code
, op0
, op1
)
4114 rtx out
, operands
[2];
4116 func
= alpha_lookup_xfloating_lib_func (code
);
4120 out
= gen_reg_rtx (DImode
);
4122 /* ??? Strange mode for equiv because what's actually returned
4123 is -1,0,1, not a proper boolean value. */
4124 alpha_emit_xfloating_libcall (func
, out
, operands
, 2,
4125 gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
));
4130 /* Emit an X_floating library function call for a conversion. */
4133 alpha_emit_xfloating_cvt (code
, operands
)
4137 int noperands
= 1, mode
;
4138 rtx out_operands
[2];
4141 func
= alpha_lookup_xfloating_lib_func (code
);
4143 out_operands
[0] = operands
[1];
4148 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
4149 out_operands
[1] = GEN_INT (mode
);
4152 case FLOAT_TRUNCATE
:
4153 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
4154 out_operands
[1] = GEN_INT (mode
);
4161 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
4162 gen_rtx_fmt_e (code
, GET_MODE (operands
[0]),
4166 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
4167 OP[0] into OP[0,1]. Naturally, output operand ordering is
4171 alpha_split_tfmode_pair (operands
)
4174 if (GET_CODE (operands
[1]) == REG
)
4176 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
4177 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
4179 else if (GET_CODE (operands
[1]) == MEM
)
4181 operands
[3] = adjust_address (operands
[1], DImode
, 8);
4182 operands
[2] = adjust_address (operands
[1], DImode
, 0);
4184 else if (operands
[1] == CONST0_RTX (TFmode
))
4185 operands
[2] = operands
[3] = const0_rtx
;
4189 if (GET_CODE (operands
[0]) == REG
)
4191 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
4192 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
4194 else if (GET_CODE (operands
[0]) == MEM
)
4196 operands
[1] = adjust_address (operands
[0], DImode
, 8);
4197 operands
[0] = adjust_address (operands
[0], DImode
, 0);
4203 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
4204 op2 is a register containing the sign bit, operation is the
4205 logical operation to be performed. */
4208 alpha_split_tfmode_frobsign (operands
, operation
)
4210 rtx (*operation
) PARAMS ((rtx
, rtx
, rtx
));
4212 rtx high_bit
= operands
[2];
4216 alpha_split_tfmode_pair (operands
);
4218 /* Detect three flavors of operand overlap. */
4220 if (rtx_equal_p (operands
[0], operands
[2]))
4222 else if (rtx_equal_p (operands
[1], operands
[2]))
4224 if (rtx_equal_p (operands
[0], high_bit
))
4231 emit_move_insn (operands
[0], operands
[2]);
4233 /* ??? If the destination overlaps both source tf and high_bit, then
4234 assume source tf is dead in its entirety and use the other half
4235 for a scratch register. Otherwise "scratch" is just the proper
4236 destination register. */
4237 scratch
= operands
[move
< 2 ? 1 : 3];
4239 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
4243 emit_move_insn (operands
[0], operands
[2]);
4245 emit_move_insn (operands
[1], scratch
);
4249 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
4253 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
4254 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
4255 lda r3,X(r11) lda r3,X+2(r11)
4256 extwl r1,r3,r1 extql r1,r3,r1
4257 extwh r2,r3,r2 extqh r2,r3,r2
4258 or r1.r2.r1 or r1,r2,r1
4261 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
4262 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
4263 lda r3,X(r11) lda r3,X(r11)
4264 extll r1,r3,r1 extll r1,r3,r1
4265 extlh r2,r3,r2 extlh r2,r3,r2
4266 or r1.r2.r1 addl r1,r2,r1
4268 quad: ldq_u r1,X(r11)
4277 alpha_expand_unaligned_load (tgt
, mem
, size
, ofs
, sign
)
4279 HOST_WIDE_INT size
, ofs
;
4282 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
4283 enum machine_mode mode
;
4285 meml
= gen_reg_rtx (DImode
);
4286 memh
= gen_reg_rtx (DImode
);
4287 addr
= gen_reg_rtx (DImode
);
4288 extl
= gen_reg_rtx (DImode
);
4289 exth
= gen_reg_rtx (DImode
);
4291 mema
= XEXP (mem
, 0);
4292 if (GET_CODE (mema
) == LO_SUM
)
4293 mema
= force_reg (Pmode
, mema
);
4295 /* AND addresses cannot be in any alias set, since they may implicitly
4296 alias surrounding code. Ideally we'd have some alias set that
4297 covered all types except those with alignment 8 or higher. */
4299 tmp
= change_address (mem
, DImode
,
4300 gen_rtx_AND (DImode
,
4301 plus_constant (mema
, ofs
),
4303 set_mem_alias_set (tmp
, 0);
4304 emit_move_insn (meml
, tmp
);
4306 tmp
= change_address (mem
, DImode
,
4307 gen_rtx_AND (DImode
,
4308 plus_constant (mema
, ofs
+ size
- 1),
4310 set_mem_alias_set (tmp
, 0);
4311 emit_move_insn (memh
, tmp
);
4313 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
4315 emit_move_insn (addr
, plus_constant (mema
, -1));
4317 emit_insn (gen_extqh_be (extl
, meml
, addr
));
4318 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
4320 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
4321 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
4322 addr
, 1, OPTAB_WIDEN
);
4324 else if (sign
&& size
== 2)
4326 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
4328 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
4329 emit_insn (gen_extqh_le (exth
, memh
, addr
));
4331 /* We must use tgt here for the target. Alpha-vms port fails if we use
4332 addr for the target, because addr is marked as a pointer and combine
4333 knows that pointers are always sign-extended 32 bit values. */
4334 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
4335 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
4336 addr
, 1, OPTAB_WIDEN
);
4340 if (WORDS_BIG_ENDIAN
)
4342 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
4346 emit_insn (gen_extwh_be (extl
, meml
, addr
));
4351 emit_insn (gen_extlh_be (extl
, meml
, addr
));
4356 emit_insn (gen_extqh_be (extl
, meml
, addr
));
4363 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
4367 emit_move_insn (addr
, plus_constant (mema
, ofs
));
4368 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
4372 emit_insn (gen_extwh_le (exth
, memh
, addr
));
4377 emit_insn (gen_extlh_le (exth
, memh
, addr
));
4382 emit_insn (gen_extqh_le (exth
, memh
, addr
));
4391 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
4392 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
4397 emit_move_insn (tgt
, gen_lowpart(GET_MODE (tgt
), addr
));
4400 /* Similarly, use ins and msk instructions to perform unaligned stores. */
4403 alpha_expand_unaligned_store (dst
, src
, size
, ofs
)
4405 HOST_WIDE_INT size
, ofs
;
4407 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
4409 dstl
= gen_reg_rtx (DImode
);
4410 dsth
= gen_reg_rtx (DImode
);
4411 insl
= gen_reg_rtx (DImode
);
4412 insh
= gen_reg_rtx (DImode
);
4414 dsta
= XEXP (dst
, 0);
4415 if (GET_CODE (dsta
) == LO_SUM
)
4416 dsta
= force_reg (Pmode
, dsta
);
4418 /* AND addresses cannot be in any alias set, since they may implicitly
4419 alias surrounding code. Ideally we'd have some alias set that
4420 covered all types except those with alignment 8 or higher. */
4422 meml
= change_address (dst
, DImode
,
4423 gen_rtx_AND (DImode
,
4424 plus_constant (dsta
, ofs
),
4426 set_mem_alias_set (meml
, 0);
4428 memh
= change_address (dst
, DImode
,
4429 gen_rtx_AND (DImode
,
4430 plus_constant (dsta
, ofs
+ size
- 1),
4432 set_mem_alias_set (memh
, 0);
4434 emit_move_insn (dsth
, memh
);
4435 emit_move_insn (dstl
, meml
);
4436 if (WORDS_BIG_ENDIAN
)
4438 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
4440 if (src
!= const0_rtx
)
4445 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
4448 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
4451 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
4454 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
4455 GEN_INT (size
*8), addr
));
4461 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
4465 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
4466 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
4470 emit_insn (gen_mskxl_be (dsth
, dsth
, constm1_rtx
, addr
));
4474 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
4478 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
4480 if (src
!= const0_rtx
)
4482 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
4483 GEN_INT (size
*8), addr
));
4488 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
4491 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
4494 emit_insn (gen_insql_le (insl
, src
, addr
));
4499 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
4504 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
4508 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
4509 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
4513 emit_insn (gen_mskxl_le (dstl
, dstl
, constm1_rtx
, addr
));
4518 if (src
!= const0_rtx
)
4520 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
4521 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
4524 if (WORDS_BIG_ENDIAN
)
4526 emit_move_insn (meml
, dstl
);
4527 emit_move_insn (memh
, dsth
);
4531 /* Must store high before low for degenerate case of aligned. */
4532 emit_move_insn (memh
, dsth
);
4533 emit_move_insn (meml
, dstl
);
4537 /* The block move code tries to maximize speed by separating loads and
4538 stores at the expense of register pressure: we load all of the data
4539 before we store it back out. There are two secondary effects worth
4540 mentioning, that this speeds copying to/from aligned and unaligned
4541 buffers, and that it makes the code significantly easier to write. */
4543 #define MAX_MOVE_WORDS 8
4545 /* Load an integral number of consecutive unaligned quadwords. */
4548 alpha_expand_unaligned_load_words (out_regs
, smem
, words
, ofs
)
4551 HOST_WIDE_INT words
, ofs
;
4553 rtx
const im8
= GEN_INT (-8);
4554 rtx
const i64
= GEN_INT (64);
4555 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
4556 rtx sreg
, areg
, tmp
, smema
;
4559 smema
= XEXP (smem
, 0);
4560 if (GET_CODE (smema
) == LO_SUM
)
4561 smema
= force_reg (Pmode
, smema
);
4563 /* Generate all the tmp registers we need. */
4564 for (i
= 0; i
< words
; ++i
)
4566 data_regs
[i
] = out_regs
[i
];
4567 ext_tmps
[i
] = gen_reg_rtx (DImode
);
4569 data_regs
[words
] = gen_reg_rtx (DImode
);
4572 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
4574 /* Load up all of the source data. */
4575 for (i
= 0; i
< words
; ++i
)
4577 tmp
= change_address (smem
, DImode
,
4578 gen_rtx_AND (DImode
,
4579 plus_constant (smema
, 8*i
),
4581 set_mem_alias_set (tmp
, 0);
4582 emit_move_insn (data_regs
[i
], tmp
);
4585 tmp
= change_address (smem
, DImode
,
4586 gen_rtx_AND (DImode
,
4587 plus_constant (smema
, 8*words
- 1),
4589 set_mem_alias_set (tmp
, 0);
4590 emit_move_insn (data_regs
[words
], tmp
);
4592 /* Extract the half-word fragments. Unfortunately DEC decided to make
4593 extxh with offset zero a noop instead of zeroing the register, so
4594 we must take care of that edge condition ourselves with cmov. */
4596 sreg
= copy_addr_to_reg (smema
);
4597 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
4599 if (WORDS_BIG_ENDIAN
)
4600 emit_move_insn (sreg
, plus_constant (sreg
, 7));
4601 for (i
= 0; i
< words
; ++i
)
4603 if (WORDS_BIG_ENDIAN
)
4605 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
4606 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
4610 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
4611 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
4613 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
4614 gen_rtx_IF_THEN_ELSE (DImode
,
4615 gen_rtx_EQ (DImode
, areg
,
4617 const0_rtx
, ext_tmps
[i
])));
4620 /* Merge the half-words into whole words. */
4621 for (i
= 0; i
< words
; ++i
)
4623 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
4624 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
4628 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
4629 may be NULL to store zeros. */
4632 alpha_expand_unaligned_store_words (data_regs
, dmem
, words
, ofs
)
4635 HOST_WIDE_INT words
, ofs
;
4637 rtx
const im8
= GEN_INT (-8);
4638 rtx
const i64
= GEN_INT (64);
4639 rtx ins_tmps
[MAX_MOVE_WORDS
];
4640 rtx st_tmp_1
, st_tmp_2
, dreg
;
4641 rtx st_addr_1
, st_addr_2
, dmema
;
4644 dmema
= XEXP (dmem
, 0);
4645 if (GET_CODE (dmema
) == LO_SUM
)
4646 dmema
= force_reg (Pmode
, dmema
);
4648 /* Generate all the tmp registers we need. */
4649 if (data_regs
!= NULL
)
4650 for (i
= 0; i
< words
; ++i
)
4651 ins_tmps
[i
] = gen_reg_rtx(DImode
);
4652 st_tmp_1
= gen_reg_rtx(DImode
);
4653 st_tmp_2
= gen_reg_rtx(DImode
);
4656 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
4658 st_addr_2
= change_address (dmem
, DImode
,
4659 gen_rtx_AND (DImode
,
4660 plus_constant (dmema
, words
*8 - 1),
4662 set_mem_alias_set (st_addr_2
, 0);
4664 st_addr_1
= change_address (dmem
, DImode
,
4665 gen_rtx_AND (DImode
, dmema
, im8
));
4666 set_mem_alias_set (st_addr_1
, 0);
4668 /* Load up the destination end bits. */
4669 emit_move_insn (st_tmp_2
, st_addr_2
);
4670 emit_move_insn (st_tmp_1
, st_addr_1
);
4672 /* Shift the input data into place. */
4673 dreg
= copy_addr_to_reg (dmema
);
4674 if (WORDS_BIG_ENDIAN
)
4675 emit_move_insn (dreg
, plus_constant (dreg
, 7));
4676 if (data_regs
!= NULL
)
4678 for (i
= words
-1; i
>= 0; --i
)
4680 if (WORDS_BIG_ENDIAN
)
4682 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
4683 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
4687 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
4688 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
4691 for (i
= words
-1; i
> 0; --i
)
4693 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
4694 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
4699 /* Split and merge the ends with the destination data. */
4700 if (WORDS_BIG_ENDIAN
)
4702 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, constm1_rtx
, dreg
));
4703 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
4707 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
4708 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, constm1_rtx
, dreg
));
4711 if (data_regs
!= NULL
)
4713 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
4714 st_tmp_2
, 1, OPTAB_WIDEN
);
4715 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
4716 st_tmp_1
, 1, OPTAB_WIDEN
);
4720 if (WORDS_BIG_ENDIAN
)
4721 emit_move_insn (st_addr_1
, st_tmp_1
);
4723 emit_move_insn (st_addr_2
, st_tmp_2
);
4724 for (i
= words
-1; i
> 0; --i
)
4726 rtx tmp
= change_address (dmem
, DImode
,
4727 gen_rtx_AND (DImode
,
4728 plus_constant(dmema
,
4729 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
4731 set_mem_alias_set (tmp
, 0);
4732 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
4734 if (WORDS_BIG_ENDIAN
)
4735 emit_move_insn (st_addr_2
, st_tmp_2
);
4737 emit_move_insn (st_addr_1
, st_tmp_1
);
4741 /* Expand string/block move operations.
4743 operands[0] is the pointer to the destination.
4744 operands[1] is the pointer to the source.
4745 operands[2] is the number of bytes to move.
4746 operands[3] is the alignment. */
4749 alpha_expand_block_move (operands
)
4752 rtx bytes_rtx
= operands
[2];
4753 rtx align_rtx
= operands
[3];
4754 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4755 HOST_WIDE_INT bytes
= orig_bytes
;
4756 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4757 HOST_WIDE_INT dst_align
= src_align
;
4758 rtx orig_src
= operands
[1];
4759 rtx orig_dst
= operands
[0];
4760 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
4762 unsigned int i
, words
, ofs
, nregs
= 0;
4764 if (orig_bytes
<= 0)
4766 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4769 /* Look for additional alignment information from recorded register info. */
4771 tmp
= XEXP (orig_src
, 0);
4772 if (GET_CODE (tmp
) == REG
)
4773 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4774 else if (GET_CODE (tmp
) == PLUS
4775 && GET_CODE (XEXP (tmp
, 0)) == REG
4776 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4778 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4779 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4783 if (a
>= 64 && c
% 8 == 0)
4785 else if (a
>= 32 && c
% 4 == 0)
4787 else if (a
>= 16 && c
% 2 == 0)
4792 tmp
= XEXP (orig_dst
, 0);
4793 if (GET_CODE (tmp
) == REG
)
4794 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4795 else if (GET_CODE (tmp
) == PLUS
4796 && GET_CODE (XEXP (tmp
, 0)) == REG
4797 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4799 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4800 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4804 if (a
>= 64 && c
% 8 == 0)
4806 else if (a
>= 32 && c
% 4 == 0)
4808 else if (a
>= 16 && c
% 2 == 0)
4813 /* Load the entire block into registers. */
4814 if (GET_CODE (XEXP (orig_src
, 0)) == ADDRESSOF
)
4816 enum machine_mode mode
;
4818 tmp
= XEXP (XEXP (orig_src
, 0), 0);
4820 /* Don't use the existing register if we're reading more than
4821 is held in the register. Nor if there is not a mode that
4822 handles the exact size. */
4823 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4825 && GET_MODE_SIZE (GET_MODE (tmp
)) >= bytes
)
4829 data_regs
[nregs
] = gen_lowpart (DImode
, tmp
);
4830 data_regs
[nregs
+ 1] = gen_highpart (DImode
, tmp
);
4834 data_regs
[nregs
++] = gen_lowpart (mode
, tmp
);
4839 /* No appropriate mode; fall back on memory. */
4840 orig_src
= replace_equiv_address (orig_src
,
4841 copy_addr_to_reg (XEXP (orig_src
, 0)));
4842 src_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4846 if (src_align
>= 64 && bytes
>= 8)
4850 for (i
= 0; i
< words
; ++i
)
4851 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4853 for (i
= 0; i
< words
; ++i
)
4854 emit_move_insn (data_regs
[nregs
+ i
],
4855 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
4862 if (src_align
>= 32 && bytes
>= 4)
4866 for (i
= 0; i
< words
; ++i
)
4867 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
4869 for (i
= 0; i
< words
; ++i
)
4870 emit_move_insn (data_regs
[nregs
+ i
],
4871 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
4882 for (i
= 0; i
< words
+1; ++i
)
4883 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4885 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
4893 if (! TARGET_BWX
&& bytes
>= 4)
4895 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
4896 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
4903 if (src_align
>= 16)
4906 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4907 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
4910 } while (bytes
>= 2);
4912 else if (! TARGET_BWX
)
4914 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4915 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
4923 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
4924 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
4931 if (nregs
> ARRAY_SIZE (data_regs
))
4934 /* Now save it back out again. */
4938 if (GET_CODE (XEXP (orig_dst
, 0)) == ADDRESSOF
)
4940 enum machine_mode mode
;
4941 tmp
= XEXP (XEXP (orig_dst
, 0), 0);
4943 mode
= mode_for_size (orig_bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4944 if (GET_MODE (tmp
) == mode
)
4948 emit_move_insn (tmp
, data_regs
[0]);
4953 else if (nregs
== 2 && mode
== TImode
)
4955 /* Undo the subregging done above when copying between
4956 two TImode registers. */
4957 if (GET_CODE (data_regs
[0]) == SUBREG
4958 && GET_MODE (SUBREG_REG (data_regs
[0])) == TImode
)
4959 emit_move_insn (tmp
, SUBREG_REG (data_regs
[0]));
4965 emit_move_insn (gen_lowpart (DImode
, tmp
), data_regs
[0]);
4966 emit_move_insn (gen_highpart (DImode
, tmp
), data_regs
[1]);
4970 emit_no_conflict_block (seq
, tmp
, data_regs
[0],
4971 data_regs
[1], NULL_RTX
);
4979 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4980 /* ??? Optimize mode < dst_mode with strict_low_part. */
4982 /* No appropriate mode; fall back on memory. We can speed things
4983 up by recognizing extra alignment information. */
4984 orig_dst
= replace_equiv_address (orig_dst
,
4985 copy_addr_to_reg (XEXP (orig_dst
, 0)));
4986 dst_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4989 /* Write out the data in whatever chunks reading the source allowed. */
4990 if (dst_align
>= 64)
4992 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4994 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
5001 if (dst_align
>= 32)
5003 /* If the source has remaining DImode regs, write them out in
5005 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
5007 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
5008 NULL_RTX
, 1, OPTAB_WIDEN
);
5010 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
5011 gen_lowpart (SImode
, data_regs
[i
]));
5012 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
5013 gen_lowpart (SImode
, tmp
));
5018 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
5020 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
5027 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
5029 /* Write out a remaining block of words using unaligned methods. */
5031 for (words
= 1; i
+ words
< nregs
; words
++)
5032 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
5036 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
5038 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
5045 /* Due to the above, this won't be aligned. */
5046 /* ??? If we have more than one of these, consider constructing full
5047 words in registers and using alpha_expand_unaligned_store_words. */
5048 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
5050 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
5055 if (dst_align
>= 16)
5056 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
5058 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
5063 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
5065 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
5070 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == QImode
)
5072 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
5086 alpha_expand_block_clear (operands
)
5089 rtx bytes_rtx
= operands
[1];
5090 rtx align_rtx
= operands
[2];
5091 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
5092 HOST_WIDE_INT bytes
= orig_bytes
;
5093 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
5094 HOST_WIDE_INT alignofs
= 0;
5095 rtx orig_dst
= operands
[0];
5097 int i
, words
, ofs
= 0;
5099 if (orig_bytes
<= 0)
5101 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
5104 /* Look for stricter alignment. */
5105 tmp
= XEXP (orig_dst
, 0);
5106 if (GET_CODE (tmp
) == REG
)
5107 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
5108 else if (GET_CODE (tmp
) == PLUS
5109 && GET_CODE (XEXP (tmp
, 0)) == REG
5110 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
5112 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
5113 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
5118 align
= a
, alignofs
= 8 - c
% 8;
5120 align
= a
, alignofs
= 4 - c
% 4;
5122 align
= a
, alignofs
= 2 - c
% 2;
5125 else if (GET_CODE (tmp
) == ADDRESSOF
)
5127 enum machine_mode mode
;
5129 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
5130 if (GET_MODE (XEXP (tmp
, 0)) == mode
)
5132 emit_move_insn (XEXP (tmp
, 0), const0_rtx
);
5136 /* No appropriate mode; fall back on memory. */
5137 orig_dst
= replace_equiv_address (orig_dst
, copy_addr_to_reg (tmp
));
5138 align
= GET_MODE_BITSIZE (GET_MODE (XEXP (tmp
, 0)));
5141 /* Handle an unaligned prefix first. */
5145 #if HOST_BITS_PER_WIDE_INT >= 64
5146 /* Given that alignofs is bounded by align, the only time BWX could
5147 generate three stores is for a 7 byte fill. Prefer two individual
5148 stores over a load/mask/store sequence. */
5149 if ((!TARGET_BWX
|| alignofs
== 7)
5151 && !(alignofs
== 4 && bytes
>= 4))
5153 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
5154 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
5158 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
5159 set_mem_alias_set (mem
, 0);
5161 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
5162 if (bytes
< alignofs
)
5164 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
5175 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
5176 NULL_RTX
, 1, OPTAB_WIDEN
);
5178 emit_move_insn (mem
, tmp
);
5182 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
5184 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
5189 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
5191 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
5196 if (alignofs
== 4 && bytes
>= 4)
5198 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
5204 /* If we've not used the extra lead alignment information by now,
5205 we won't be able to. Downgrade align to match what's left over. */
5208 alignofs
= alignofs
& -alignofs
;
5209 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
5213 /* Handle a block of contiguous long-words. */
5215 if (align
>= 64 && bytes
>= 8)
5219 for (i
= 0; i
< words
; ++i
)
5220 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
5227 /* If the block is large and appropriately aligned, emit a single
5228 store followed by a sequence of stq_u insns. */
5230 if (align
>= 32 && bytes
> 16)
5234 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
5238 orig_dsta
= XEXP (orig_dst
, 0);
5239 if (GET_CODE (orig_dsta
) == LO_SUM
)
5240 orig_dsta
= force_reg (Pmode
, orig_dsta
);
5243 for (i
= 0; i
< words
; ++i
)
5246 = change_address (orig_dst
, DImode
,
5247 gen_rtx_AND (DImode
,
5248 plus_constant (orig_dsta
, ofs
+ i
*8),
5250 set_mem_alias_set (mem
, 0);
5251 emit_move_insn (mem
, const0_rtx
);
5254 /* Depending on the alignment, the first stq_u may have overlapped
5255 with the initial stl, which means that the last stq_u didn't
5256 write as much as it would appear. Leave those questionable bytes
5258 bytes
-= words
* 8 - 4;
5259 ofs
+= words
* 8 - 4;
5262 /* Handle a smaller block of aligned words. */
5264 if ((align
>= 64 && bytes
== 4)
5265 || (align
== 32 && bytes
>= 4))
5269 for (i
= 0; i
< words
; ++i
)
5270 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
5277 /* An unaligned block uses stq_u stores for as many as possible. */
5283 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
5289 /* Next clean up any trailing pieces. */
5291 #if HOST_BITS_PER_WIDE_INT >= 64
5292 /* Count the number of bits in BYTES for which aligned stores could
5295 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
5299 /* If we have appropriate alignment (and it wouldn't take too many
5300 instructions otherwise), mask out the bytes we need. */
5301 if (TARGET_BWX
? words
> 2 : bytes
> 0)
5308 mem
= adjust_address (orig_dst
, DImode
, ofs
);
5309 set_mem_alias_set (mem
, 0);
5311 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
5313 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
5314 NULL_RTX
, 1, OPTAB_WIDEN
);
5316 emit_move_insn (mem
, tmp
);
5319 else if (align
>= 32 && bytes
< 4)
5324 mem
= adjust_address (orig_dst
, SImode
, ofs
);
5325 set_mem_alias_set (mem
, 0);
5327 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
5329 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
5330 NULL_RTX
, 1, OPTAB_WIDEN
);
5332 emit_move_insn (mem
, tmp
);
5338 if (!TARGET_BWX
&& bytes
>= 4)
5340 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
5350 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
5354 } while (bytes
>= 2);
5356 else if (! TARGET_BWX
)
5358 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
5366 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
5374 /* Returns a mask so that zap(x, value) == x & mask. */
5377 alpha_expand_zap_mask (value
)
5378 HOST_WIDE_INT value
;
5383 if (HOST_BITS_PER_WIDE_INT
>= 64)
5385 HOST_WIDE_INT mask
= 0;
5387 for (i
= 7; i
>= 0; --i
)
5390 if (!((value
>> i
) & 1))
5394 result
= gen_int_mode (mask
, DImode
);
5396 else if (HOST_BITS_PER_WIDE_INT
== 32)
5398 HOST_WIDE_INT mask_lo
= 0, mask_hi
= 0;
5400 for (i
= 7; i
>= 4; --i
)
5403 if (!((value
>> i
) & 1))
5407 for (i
= 3; i
>= 0; --i
)
5410 if (!((value
>> i
) & 1))
5414 result
= immed_double_const (mask_lo
, mask_hi
, DImode
);
5423 alpha_expand_builtin_vector_binop (gen
, mode
, op0
, op1
, op2
)
5424 rtx (*gen
) PARAMS ((rtx
, rtx
, rtx
));
5425 enum machine_mode mode
;
5428 op0
= gen_lowpart (mode
, op0
);
5430 if (op1
== const0_rtx
)
5431 op1
= CONST0_RTX (mode
);
5433 op1
= gen_lowpart (mode
, op1
);
5435 if (op2
== const0_rtx
)
5436 op2
= CONST0_RTX (mode
);
5438 op2
= gen_lowpart (mode
, op2
);
5440 emit_insn ((*gen
) (op0
, op1
, op2
));
5443 /* Adjust the cost of a scheduling dependency. Return the new cost of
5444 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5447 alpha_adjust_cost (insn
, link
, dep_insn
, cost
)
5453 enum attr_type insn_type
, dep_insn_type
;
5455 /* If the dependence is an anti-dependence, there is no cost. For an
5456 output dependence, there is sometimes a cost, but it doesn't seem
5457 worth handling those few cases. */
5458 if (REG_NOTE_KIND (link
) != 0)
5461 /* If we can't recognize the insns, we can't really do anything. */
5462 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
5465 insn_type
= get_attr_type (insn
);
5466 dep_insn_type
= get_attr_type (dep_insn
);
5468 /* Bring in the user-defined memory latency. */
5469 if (dep_insn_type
== TYPE_ILD
5470 || dep_insn_type
== TYPE_FLD
5471 || dep_insn_type
== TYPE_LDSYM
)
5472 cost
+= alpha_memory_latency
-1;
5474 /* Everything else handled in DFA bypasses now. */
5479 /* The number of instructions that can be issued per cycle. */
5484 return (alpha_cpu
== PROCESSOR_EV4
? 2 : 4);
5488 alpha_use_dfa_pipeline_interface ()
5493 /* How many alternative schedules to try. This should be as wide as the
5494 scheduling freedom in the DFA, but no wider. Making this value too
5495 large results extra work for the scheduler.
5497 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
5498 alternative schedules. For EV5, we can choose between E0/E1 and
5499 FA/FM. For EV6, an arithmatic insn can be issued to U0/U1/L0/L1. */
5502 alpha_multipass_dfa_lookahead ()
5504 return (alpha_cpu
== PROCESSOR_EV6
? 4 : 2);
5507 /* Machine-specific function data. */
5509 struct machine_function
GTY(())
5512 /* List of call information words for calls from this function. */
5513 struct rtx_def
*first_ciw
;
5514 struct rtx_def
*last_ciw
;
5517 /* List of deferred case vectors. */
5518 struct rtx_def
*addr_list
;
5521 const char *some_ld_name
;
5524 /* How to allocate a 'struct machine_function'. */
5526 static struct machine_function
*
5527 alpha_init_machine_status ()
5529 return ((struct machine_function
*)
5530 ggc_alloc_cleared (sizeof (struct machine_function
)));
5533 /* Functions to save and restore alpha_return_addr_rtx. */
5535 /* Start the ball rolling with RETURN_ADDR_RTX. */
5538 alpha_return_addr (count
, frame
)
5540 rtx frame ATTRIBUTE_UNUSED
;
5545 return get_hard_reg_initial_val (Pmode
, REG_RA
);
5548 /* Return or create a pseudo containing the gp value for the current
5549 function. Needed only if TARGET_LD_BUGGY_LDGP. */
5552 alpha_gp_save_rtx ()
5554 rtx r
= get_hard_reg_initial_val (DImode
, 29);
5555 if (GET_CODE (r
) != MEM
)
5556 r
= gen_mem_addressof (r
, NULL_TREE
, /*rescan=*/true);
5561 alpha_ra_ever_killed ()
5565 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
5566 return regs_ever_live
[REG_RA
];
5568 push_topmost_sequence ();
5570 pop_topmost_sequence ();
5572 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
5576 /* Return the trap mode suffix applicable to the current
5577 instruction, or NULL. */
5580 get_trap_mode_suffix ()
5582 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
5586 case TRAP_SUFFIX_NONE
:
5589 case TRAP_SUFFIX_SU
:
5590 if (alpha_fptm
>= ALPHA_FPTM_SU
)
5594 case TRAP_SUFFIX_SUI
:
5595 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
5599 case TRAP_SUFFIX_V_SV
:
5607 case ALPHA_FPTM_SUI
:
5612 case TRAP_SUFFIX_V_SV_SVI
:
5621 case ALPHA_FPTM_SUI
:
5626 case TRAP_SUFFIX_U_SU_SUI
:
5635 case ALPHA_FPTM_SUI
:
5643 /* Return the rounding mode suffix applicable to the current
5644 instruction, or NULL. */
5647 get_round_mode_suffix ()
5649 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
5653 case ROUND_SUFFIX_NONE
:
5655 case ROUND_SUFFIX_NORMAL
:
5658 case ALPHA_FPRM_NORM
:
5660 case ALPHA_FPRM_MINF
:
5662 case ALPHA_FPRM_CHOP
:
5664 case ALPHA_FPRM_DYN
:
5669 case ROUND_SUFFIX_C
:
5675 /* Locate some local-dynamic symbol still in use by this function
5676 so that we can print its name in some movdi_er_tlsldm pattern. */
5679 get_some_local_dynamic_name ()
5683 if (cfun
->machine
->some_ld_name
)
5684 return cfun
->machine
->some_ld_name
;
5686 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5688 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
5689 return cfun
->machine
->some_ld_name
;
5695 get_some_local_dynamic_name_1 (px
, data
)
5697 void *data ATTRIBUTE_UNUSED
;
5701 if (GET_CODE (x
) == SYMBOL_REF
5702 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
5704 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
5711 /* Print an operand. Recognize special options, documented below. */
5714 print_operand (file
, x
, code
)
5724 /* Print the assembler name of the current function. */
5725 assemble_name (file
, alpha_fnname
);
5729 assemble_name (file
, get_some_local_dynamic_name ());
5734 const char *trap
= get_trap_mode_suffix ();
5735 const char *round
= get_round_mode_suffix ();
5738 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
5739 (trap
? trap
: ""), (round
? round
: ""));
5744 /* Generates single precision instruction suffix. */
5745 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5749 /* Generates double precision instruction suffix. */
5750 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5754 /* Generates a nop after a noreturn call at the very end of the
5756 if (next_real_insn (current_output_insn
) == 0)
5757 fprintf (file
, "\n\tnop");
5761 if (alpha_this_literal_sequence_number
== 0)
5762 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5763 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5767 if (alpha_this_gpdisp_sequence_number
== 0)
5768 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5769 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5773 if (GET_CODE (x
) == HIGH
)
5774 output_addr_const (file
, XEXP (x
, 0));
5776 output_operand_lossage ("invalid %%H value");
5783 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5785 x
= XVECEXP (x
, 0, 0);
5786 lituse
= "lituse_tlsgd";
5788 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5790 x
= XVECEXP (x
, 0, 0);
5791 lituse
= "lituse_tlsldm";
5793 else if (GET_CODE (x
) == CONST_INT
)
5794 lituse
= "lituse_jsr";
5797 output_operand_lossage ("invalid %%J value");
5801 if (x
!= const0_rtx
)
5802 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5807 /* If this operand is the constant zero, write it as "$31". */
5808 if (GET_CODE (x
) == REG
)
5809 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5810 else if (x
== CONST0_RTX (GET_MODE (x
)))
5811 fprintf (file
, "$31");
5813 output_operand_lossage ("invalid %%r value");
5817 /* Similar, but for floating-point. */
5818 if (GET_CODE (x
) == REG
)
5819 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5820 else if (x
== CONST0_RTX (GET_MODE (x
)))
5821 fprintf (file
, "$f31");
5823 output_operand_lossage ("invalid %%R value");
5827 /* Write the 1's complement of a constant. */
5828 if (GET_CODE (x
) != CONST_INT
)
5829 output_operand_lossage ("invalid %%N value");
5831 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5835 /* Write 1 << C, for a constant C. */
5836 if (GET_CODE (x
) != CONST_INT
)
5837 output_operand_lossage ("invalid %%P value");
5839 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5843 /* Write the high-order 16 bits of a constant, sign-extended. */
5844 if (GET_CODE (x
) != CONST_INT
)
5845 output_operand_lossage ("invalid %%h value");
5847 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5851 /* Write the low-order 16 bits of a constant, sign-extended. */
5852 if (GET_CODE (x
) != CONST_INT
)
5853 output_operand_lossage ("invalid %%L value");
5855 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5856 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5860 /* Write mask for ZAP insn. */
5861 if (GET_CODE (x
) == CONST_DOUBLE
)
5863 HOST_WIDE_INT mask
= 0;
5864 HOST_WIDE_INT value
;
5866 value
= CONST_DOUBLE_LOW (x
);
5867 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5872 value
= CONST_DOUBLE_HIGH (x
);
5873 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5876 mask
|= (1 << (i
+ sizeof (int)));
5878 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5881 else if (GET_CODE (x
) == CONST_INT
)
5883 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5885 for (i
= 0; i
< 8; i
++, value
>>= 8)
5889 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5892 output_operand_lossage ("invalid %%m value");
5896 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5897 if (GET_CODE (x
) != CONST_INT
5898 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5899 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5900 output_operand_lossage ("invalid %%M value");
5902 fprintf (file
, "%s",
5903 (INTVAL (x
) == 8 ? "b"
5904 : INTVAL (x
) == 16 ? "w"
5905 : INTVAL (x
) == 32 ? "l"
5910 /* Similar, except do it from the mask. */
5911 if (GET_CODE (x
) == CONST_INT
)
5913 HOST_WIDE_INT value
= INTVAL (x
);
5920 if (value
== 0xffff)
5925 if (value
== 0xffffffff)
5936 else if (HOST_BITS_PER_WIDE_INT
== 32
5937 && GET_CODE (x
) == CONST_DOUBLE
5938 && CONST_DOUBLE_LOW (x
) == 0xffffffff
5939 && CONST_DOUBLE_HIGH (x
) == 0)
5944 output_operand_lossage ("invalid %%U value");
5948 /* Write the constant value divided by 8 for little-endian mode or
5949 (56 - value) / 8 for big-endian mode. */
5951 if (GET_CODE (x
) != CONST_INT
5952 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5955 || (INTVAL (x
) & 7) != 0)
5956 output_operand_lossage ("invalid %%s value");
5958 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5960 ? (56 - INTVAL (x
)) / 8
5965 /* Same, except compute (64 - c) / 8 */
5967 if (GET_CODE (x
) != CONST_INT
5968 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5969 && (INTVAL (x
) & 7) != 8)
5970 output_operand_lossage ("invalid %%s value");
5972 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5977 /* On Unicos/Mk systems: use a DEX expression if the symbol
5978 clashes with a register name. */
5979 int dex
= unicosmk_need_dex (x
);
5981 fprintf (file
, "DEX(%d)", dex
);
5983 output_addr_const (file
, x
);
5987 case 'C': case 'D': case 'c': case 'd':
5988 /* Write out comparison name. */
5990 enum rtx_code c
= GET_CODE (x
);
5992 if (GET_RTX_CLASS (c
) != '<')
5993 output_operand_lossage ("invalid %%C value");
5995 else if (code
== 'D')
5996 c
= reverse_condition (c
);
5997 else if (code
== 'c')
5998 c
= swap_condition (c
);
5999 else if (code
== 'd')
6000 c
= swap_condition (reverse_condition (c
));
6003 fprintf (file
, "ule");
6005 fprintf (file
, "ult");
6006 else if (c
== UNORDERED
)
6007 fprintf (file
, "un");
6009 fprintf (file
, "%s", GET_RTX_NAME (c
));
6014 /* Write the divide or modulus operator. */
6015 switch (GET_CODE (x
))
6018 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
6021 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
6024 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
6027 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
6030 output_operand_lossage ("invalid %%E value");
6036 /* Write "_u" for unaligned access. */
6037 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
6038 fprintf (file
, "_u");
6042 if (GET_CODE (x
) == REG
)
6043 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
6044 else if (GET_CODE (x
) == MEM
)
6045 output_address (XEXP (x
, 0));
6046 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
6048 switch (XINT (XEXP (x
, 0), 1))
6052 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
6055 output_operand_lossage ("unknown relocation unspec");
6060 output_addr_const (file
, x
);
6064 output_operand_lossage ("invalid %%xn code");
6069 print_operand_address (file
, addr
)
6074 HOST_WIDE_INT offset
= 0;
6076 if (GET_CODE (addr
) == AND
)
6077 addr
= XEXP (addr
, 0);
6079 if (GET_CODE (addr
) == PLUS
6080 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
6082 offset
= INTVAL (XEXP (addr
, 1));
6083 addr
= XEXP (addr
, 0);
6086 if (GET_CODE (addr
) == LO_SUM
)
6088 const char *reloc16
, *reloclo
;
6089 rtx op1
= XEXP (addr
, 1);
6091 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
6093 op1
= XEXP (op1
, 0);
6094 switch (XINT (op1
, 1))
6098 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
6102 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
6105 output_operand_lossage ("unknown relocation unspec");
6109 output_addr_const (file
, XVECEXP (op1
, 0, 0));
6114 reloclo
= "gprellow";
6115 output_addr_const (file
, op1
);
6119 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
6121 addr
= XEXP (addr
, 0);
6122 if (GET_CODE (addr
) == REG
)
6123 basereg
= REGNO (addr
);
6124 else if (GET_CODE (addr
) == SUBREG
6125 && GET_CODE (SUBREG_REG (addr
)) == REG
)
6126 basereg
= subreg_regno (addr
);
6130 fprintf (file
, "($%d)\t\t!%s", basereg
,
6131 (basereg
== 29 ? reloc16
: reloclo
));
6135 if (GET_CODE (addr
) == REG
)
6136 basereg
= REGNO (addr
);
6137 else if (GET_CODE (addr
) == SUBREG
6138 && GET_CODE (SUBREG_REG (addr
)) == REG
)
6139 basereg
= subreg_regno (addr
);
6140 else if (GET_CODE (addr
) == CONST_INT
)
6141 offset
= INTVAL (addr
);
6143 #if TARGET_ABI_OPEN_VMS
6144 else if (GET_CODE (addr
) == SYMBOL_REF
)
6146 fprintf (file
, "%s", XSTR (addr
, 0));
6149 else if (GET_CODE (addr
) == CONST
6150 && GET_CODE (XEXP (addr
, 0)) == PLUS
6151 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
)
6153 fprintf (file
, "%s+" HOST_WIDE_INT_PRINT_DEC
,
6154 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
6155 INTVAL (XEXP (XEXP (addr
, 0), 1)));
6163 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"($%d)", offset
, basereg
);
6166 /* Emit RTL insns to initialize the variable parts of a trampoline at
6167 TRAMP. FNADDR is an RTX for the address of the function's pure
6168 code. CXT is an RTX for the static chain value for the function.
6170 The three offset parameters are for the individual template's
6171 layout. A JMPOFS < 0 indicates that the trampoline does not
6172 contain instructions at all.
6174 We assume here that a function will be called many more times than
6175 its address is taken (e.g., it might be passed to qsort), so we
6176 take the trouble to initialize the "hint" field in the JMP insn.
6177 Note that the hint field is PC (new) + 4 * bits 13:0. */
6180 alpha_initialize_trampoline (tramp
, fnaddr
, cxt
, fnofs
, cxtofs
, jmpofs
)
6181 rtx tramp
, fnaddr
, cxt
;
6182 int fnofs
, cxtofs
, jmpofs
;
6184 rtx temp
, temp1
, addr
;
6185 /* VMS really uses DImode pointers in memory at this point. */
6186 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
6188 #ifdef POINTERS_EXTEND_UNSIGNED
6189 fnaddr
= convert_memory_address (mode
, fnaddr
);
6190 cxt
= convert_memory_address (mode
, cxt
);
6193 /* Store function address and CXT. */
6194 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
6195 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
6196 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
6197 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
6199 /* This has been disabled since the hint only has a 32k range, and in
6200 no existing OS is the stack within 32k of the text segment. */
6201 if (0 && jmpofs
>= 0)
6203 /* Compute hint value. */
6204 temp
= force_operand (plus_constant (tramp
, jmpofs
+4), NULL_RTX
);
6205 temp
= expand_binop (DImode
, sub_optab
, fnaddr
, temp
, temp
, 1,
6207 temp
= expand_shift (RSHIFT_EXPR
, Pmode
, temp
,
6208 build_int_2 (2, 0), NULL_RTX
, 1);
6209 temp
= expand_and (SImode
, gen_lowpart (SImode
, temp
),
6210 GEN_INT (0x3fff), 0);
6212 /* Merge in the hint. */
6213 addr
= memory_address (SImode
, plus_constant (tramp
, jmpofs
));
6214 temp1
= force_reg (SImode
, gen_rtx_MEM (SImode
, addr
));
6215 temp1
= expand_and (SImode
, temp1
, GEN_INT (0xffffc000), NULL_RTX
);
6216 temp1
= expand_binop (SImode
, ior_optab
, temp1
, temp
, temp1
, 1,
6218 emit_move_insn (gen_rtx_MEM (SImode
, addr
), temp1
);
6221 #ifdef TRANSFER_FROM_TRAMPOLINE
6222 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
6223 0, VOIDmode
, 1, tramp
, Pmode
);
6227 emit_insn (gen_imb ());
6230 /* Determine where to put an argument to a function.
6231 Value is zero to push the argument on the stack,
6232 or a hard register in which to store the argument.
6234 MODE is the argument's machine mode.
6235 TYPE is the data type of the argument (as a tree).
6236 This is null for libcalls where that information may
6238 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6239 the preceding args and about the function being called.
6240 NAMED is nonzero if this argument is a named parameter
6241 (otherwise it is an extra parameter matching an ellipsis).
6243 On Alpha the first 6 words of args are normally in registers
6244 and the rest are pushed. */
6247 function_arg (cum
, mode
, type
, named
)
6248 CUMULATIVE_ARGS cum
;
6249 enum machine_mode mode
;
6251 int named ATTRIBUTE_UNUSED
;
6256 /* Set up defaults for FP operands passed in FP registers, and
6257 integral operands passed in integer registers. */
6259 && (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
6260 || GET_MODE_CLASS (mode
) == MODE_FLOAT
))
6265 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
6266 the three platforms, so we can't avoid conditional compilation. */
6267 #if TARGET_ABI_OPEN_VMS
6269 if (mode
== VOIDmode
)
6270 return alpha_arg_info_reg_val (cum
);
6272 num_args
= cum
.num_args
;
6273 if (num_args
>= 6 || MUST_PASS_IN_STACK (mode
, type
))
6277 #if TARGET_ABI_UNICOSMK
6281 /* If this is the last argument, generate the call info word (CIW). */
6282 /* ??? We don't include the caller's line number in the CIW because
6283 I don't know how to determine it if debug infos are turned off. */
6284 if (mode
== VOIDmode
)
6293 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
6294 if (cum
.reg_args_type
[i
])
6295 lo
|= (1 << (7 - i
));
6297 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
6300 lo
|= cum
.num_reg_words
;
6302 #if HOST_BITS_PER_WIDE_INT == 32
6303 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
6305 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
6306 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
6309 ciw
= immed_double_const (lo
, hi
, DImode
);
6311 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
6312 UNSPEC_UMK_LOAD_CIW
);
6315 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
6316 num_args
= cum
.num_reg_words
;
6317 if (MUST_PASS_IN_STACK (mode
, type
)
6318 || cum
.num_reg_words
+ size
> 6 || cum
.force_stack
)
6320 else if (type
&& TYPE_MODE (type
) == BLKmode
)
6324 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
6325 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
6327 /* The argument fits in two registers. Note that we still need to
6328 reserve a register for empty structures. */
6332 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
6335 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
6336 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
6337 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
6347 /* VOID is passed as a special flag for "last argument". */
6348 if (type
== void_type_node
)
6350 else if (MUST_PASS_IN_STACK (mode
, type
))
6352 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum
, mode
, type
, named
))
6355 #endif /* TARGET_ABI_UNICOSMK */
6356 #endif /* TARGET_ABI_OPEN_VMS */
6358 return gen_rtx_REG (mode
, num_args
+ basereg
);
6362 alpha_build_va_list ()
6364 tree base
, ofs
, record
, type_decl
;
6366 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
6367 return ptr_type_node
;
6369 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
6370 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
6371 TREE_CHAIN (record
) = type_decl
;
6372 TYPE_NAME (record
) = type_decl
;
6374 /* C++? SET_IS_AGGR_TYPE (record, 1); */
6376 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
6378 DECL_FIELD_CONTEXT (ofs
) = record
;
6380 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
6382 DECL_FIELD_CONTEXT (base
) = record
;
6383 TREE_CHAIN (base
) = ofs
;
6385 TYPE_FIELDS (record
) = base
;
6386 layout_type (record
);
6391 /* Perform any needed actions needed for a function that is receiving a
6392 variable number of arguments.
6394 On the Alpha, we allocate space for all 12 arg registers, but only
6395 push those that are remaining. However, if NO registers need to be
6396 saved, don't allocate any space. This is not only because we won't
6397 need the space, but because AP includes the current_pretend_args_size
6398 and we don't want to mess up any ap-relative addresses already made.
6400 If we are not to use the floating-point registers, save the integer
6401 registers where we would put the floating-point registers. This is
6402 not the most efficient way to implement varargs with just one register
6403 class, but it isn't worth doing anything more efficient in this rare
6408 alpha_setup_incoming_varargs(cum
, mode
, type
, pretend_size
, no_rtl
)
6409 CUMULATIVE_ARGS cum
;
6410 enum machine_mode mode ATTRIBUTE_UNUSED
;
6411 tree type ATTRIBUTE_UNUSED
;
6420 int set
= get_varargs_alias_set ();
6423 tmp
= gen_rtx_MEM (BLKmode
,
6424 plus_constant (virtual_incoming_args_rtx
,
6425 (cum
+ 6) * UNITS_PER_WORD
));
6426 set_mem_alias_set (tmp
, set
);
6427 move_block_from_reg (16 + cum
, tmp
, 6 - cum
);
6429 tmp
= gen_rtx_MEM (BLKmode
,
6430 plus_constant (virtual_incoming_args_rtx
,
6431 cum
* UNITS_PER_WORD
));
6432 set_mem_alias_set (tmp
, set
);
6433 move_block_from_reg (16 + (TARGET_FPREGS
? 32 : 0) + cum
, tmp
,
6436 *pretend_size
= 12 * UNITS_PER_WORD
;
6441 alpha_va_start (valist
, nextarg
)
6443 rtx nextarg ATTRIBUTE_UNUSED
;
6445 HOST_WIDE_INT offset
;
6446 tree t
, offset_field
, base_field
;
6448 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6451 if (TARGET_ABI_UNICOSMK
)
6452 std_expand_builtin_va_start (valist
, nextarg
);
6454 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base
6455 up by 48, storing fp arg registers in the first 48 bytes, and the
6456 integer arg registers in the next 48 bytes. This is only done,
6457 however, if any integer registers need to be stored.
6459 If no integer registers need be stored, then we must subtract 48
6460 in order to account for the integer arg registers which are counted
6461 in argsize above, but which are not actually stored on the stack.
6462 Must further be careful here about structures straddling the last
6463 integer argument register; that futzes with pretend_args_size,
6464 which changes the meaning of AP. */
6467 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6469 offset
= -6 * UNITS_PER_WORD
+ current_function_pretend_args_size
;
6471 if (TARGET_ABI_OPEN_VMS
)
6473 nextarg
= plus_constant (nextarg
, offset
);
6474 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
6475 t
= build (MODIFY_EXPR
, TREE_TYPE (valist
), valist
,
6476 make_tree (ptr_type_node
, nextarg
));
6477 TREE_SIDE_EFFECTS (t
) = 1;
6479 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6483 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6484 offset_field
= TREE_CHAIN (base_field
);
6486 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
6487 valist
, base_field
);
6488 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
6489 valist
, offset_field
);
6491 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6492 t
= build (PLUS_EXPR
, ptr_type_node
, t
, build_int_2 (offset
, 0));
6493 t
= build (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
6494 TREE_SIDE_EFFECTS (t
) = 1;
6495 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6497 t
= build_int_2 (NUM_ARGS
* UNITS_PER_WORD
, 0);
6498 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
6499 TREE_SIDE_EFFECTS (t
) = 1;
6500 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6505 alpha_va_arg (valist
, type
)
6509 tree t
, type_size
, rounded_size
;
6510 tree offset_field
, base_field
, addr_tree
, addend
;
6511 tree wide_type
, wide_ofs
;
6514 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
6515 return std_expand_builtin_va_arg (valist
, type
);
6517 if (type
== error_mark_node
6518 || (type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
))) == NULL
6519 || TREE_OVERFLOW (type_size
))
6520 rounded_size
= size_zero_node
;
6522 rounded_size
= fold (build (MULT_EXPR
, sizetype
,
6523 fold (build (TRUNC_DIV_EXPR
, sizetype
,
6524 fold (build (PLUS_EXPR
, sizetype
,
6530 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6531 offset_field
= TREE_CHAIN (base_field
);
6533 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
6534 valist
, base_field
);
6535 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
6536 valist
, offset_field
);
6538 /* If the type could not be passed in registers, skip the block
6539 reserved for the registers. */
6540 if (MUST_PASS_IN_STACK (TYPE_MODE (type
), type
))
6542 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
,
6543 build (MAX_EXPR
, TREE_TYPE (offset_field
),
6544 offset_field
, build_int_2 (6*8, 0)));
6545 TREE_SIDE_EFFECTS (t
) = 1;
6546 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6549 wide_type
= make_signed_type (64);
6550 wide_ofs
= save_expr (build1 (CONVERT_EXPR
, wide_type
, offset_field
));
6554 if (TYPE_MODE (type
) == TFmode
|| TYPE_MODE (type
) == TCmode
)
6557 rounded_size
= size_int (UNITS_PER_WORD
);
6559 else if (FLOAT_TYPE_P (type
))
6561 tree fpaddend
, cond
;
6563 fpaddend
= fold (build (PLUS_EXPR
, TREE_TYPE (addend
),
6564 addend
, build_int_2 (-6*8, 0)));
6566 cond
= fold (build (LT_EXPR
, integer_type_node
,
6567 wide_ofs
, build_int_2 (6*8, 0)));
6569 addend
= fold (build (COND_EXPR
, TREE_TYPE (addend
), cond
,
6573 addr_tree
= build (PLUS_EXPR
, TREE_TYPE (base_field
),
6574 base_field
, addend
);
6576 addr
= expand_expr (addr_tree
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
6577 addr
= copy_to_reg (addr
);
6579 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
,
6580 build (PLUS_EXPR
, TREE_TYPE (offset_field
),
6581 offset_field
, rounded_size
));
6582 TREE_SIDE_EFFECTS (t
) = 1;
6583 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6587 addr
= force_reg (Pmode
, addr
);
6588 addr
= gen_rtx_MEM (Pmode
, addr
);
6598 ALPHA_BUILTIN_CMPBGE
,
6599 ALPHA_BUILTIN_EXTBL
,
6600 ALPHA_BUILTIN_EXTWL
,
6601 ALPHA_BUILTIN_EXTLL
,
6602 ALPHA_BUILTIN_EXTQL
,
6603 ALPHA_BUILTIN_EXTWH
,
6604 ALPHA_BUILTIN_EXTLH
,
6605 ALPHA_BUILTIN_EXTQH
,
6606 ALPHA_BUILTIN_INSBL
,
6607 ALPHA_BUILTIN_INSWL
,
6608 ALPHA_BUILTIN_INSLL
,
6609 ALPHA_BUILTIN_INSQL
,
6610 ALPHA_BUILTIN_INSWH
,
6611 ALPHA_BUILTIN_INSLH
,
6612 ALPHA_BUILTIN_INSQH
,
6613 ALPHA_BUILTIN_MSKBL
,
6614 ALPHA_BUILTIN_MSKWL
,
6615 ALPHA_BUILTIN_MSKLL
,
6616 ALPHA_BUILTIN_MSKQL
,
6617 ALPHA_BUILTIN_MSKWH
,
6618 ALPHA_BUILTIN_MSKLH
,
6619 ALPHA_BUILTIN_MSKQH
,
6620 ALPHA_BUILTIN_UMULH
,
6622 ALPHA_BUILTIN_ZAPNOT
,
6623 ALPHA_BUILTIN_AMASK
,
6624 ALPHA_BUILTIN_IMPLVER
,
6626 ALPHA_BUILTIN_THREAD_POINTER
,
6627 ALPHA_BUILTIN_SET_THREAD_POINTER
,
6630 ALPHA_BUILTIN_MINUB8
,
6631 ALPHA_BUILTIN_MINSB8
,
6632 ALPHA_BUILTIN_MINUW4
,
6633 ALPHA_BUILTIN_MINSW4
,
6634 ALPHA_BUILTIN_MAXUB8
,
6635 ALPHA_BUILTIN_MAXSB8
,
6636 ALPHA_BUILTIN_MAXUW4
,
6637 ALPHA_BUILTIN_MAXSW4
,
6641 ALPHA_BUILTIN_UNPKBL
,
6642 ALPHA_BUILTIN_UNPKBW
,
6647 ALPHA_BUILTIN_CTPOP
,
6652 static unsigned int const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6653 CODE_FOR_builtin_cmpbge
,
6654 CODE_FOR_builtin_extbl
,
6655 CODE_FOR_builtin_extwl
,
6656 CODE_FOR_builtin_extll
,
6657 CODE_FOR_builtin_extql
,
6658 CODE_FOR_builtin_extwh
,
6659 CODE_FOR_builtin_extlh
,
6660 CODE_FOR_builtin_extqh
,
6661 CODE_FOR_builtin_insbl
,
6662 CODE_FOR_builtin_inswl
,
6663 CODE_FOR_builtin_insll
,
6664 CODE_FOR_builtin_insql
,
6665 CODE_FOR_builtin_inswh
,
6666 CODE_FOR_builtin_inslh
,
6667 CODE_FOR_builtin_insqh
,
6668 CODE_FOR_builtin_mskbl
,
6669 CODE_FOR_builtin_mskwl
,
6670 CODE_FOR_builtin_mskll
,
6671 CODE_FOR_builtin_mskql
,
6672 CODE_FOR_builtin_mskwh
,
6673 CODE_FOR_builtin_msklh
,
6674 CODE_FOR_builtin_mskqh
,
6675 CODE_FOR_umuldi3_highpart
,
6676 CODE_FOR_builtin_zap
,
6677 CODE_FOR_builtin_zapnot
,
6678 CODE_FOR_builtin_amask
,
6679 CODE_FOR_builtin_implver
,
6680 CODE_FOR_builtin_rpcc
,
6685 CODE_FOR_builtin_minub8
,
6686 CODE_FOR_builtin_minsb8
,
6687 CODE_FOR_builtin_minuw4
,
6688 CODE_FOR_builtin_minsw4
,
6689 CODE_FOR_builtin_maxub8
,
6690 CODE_FOR_builtin_maxsb8
,
6691 CODE_FOR_builtin_maxuw4
,
6692 CODE_FOR_builtin_maxsw4
,
6693 CODE_FOR_builtin_perr
,
6694 CODE_FOR_builtin_pklb
,
6695 CODE_FOR_builtin_pkwb
,
6696 CODE_FOR_builtin_unpkbl
,
6697 CODE_FOR_builtin_unpkbw
,
6700 CODE_FOR_builtin_cttz
,
6701 CODE_FOR_builtin_ctlz
,
6702 CODE_FOR_builtin_ctpop
6705 struct alpha_builtin_def
6708 enum alpha_builtin code
;
6709 unsigned int target_mask
;
6712 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6713 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0 },
6714 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0 }
6717 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6718 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0 },
6719 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
},
6720 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
},
6721 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
},
6722 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
},
6723 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
},
6724 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
},
6725 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
}
6728 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6729 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0 },
6730 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0 },
6731 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0 },
6732 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0 },
6733 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0 },
6734 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0 },
6735 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0 },
6736 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0 },
6737 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0 },
6738 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0 },
6739 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0 },
6740 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0 },
6741 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0 },
6742 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0 },
6743 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0 },
6744 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0 },
6745 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0 },
6746 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0 },
6747 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0 },
6748 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0 },
6749 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0 },
6750 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0 },
6751 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0 },
6752 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0 },
6753 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0 },
6754 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
},
6755 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
},
6756 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
},
6757 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
},
6758 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
},
6759 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
},
6760 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
},
6761 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
},
6762 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
}
6766 alpha_init_builtins ()
6768 const struct alpha_builtin_def
*p
;
6772 ftype
= build_function_type (long_integer_type_node
, void_list_node
);
6774 p
= zero_arg_builtins
;
6775 for (i
= 0; i
< ARRAY_SIZE (zero_arg_builtins
); ++i
, ++p
)
6776 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6777 builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6780 ftype
= build_function_type_list (long_integer_type_node
,
6781 long_integer_type_node
, NULL_TREE
);
6783 p
= one_arg_builtins
;
6784 for (i
= 0; i
< ARRAY_SIZE (one_arg_builtins
); ++i
, ++p
)
6785 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6786 builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6789 ftype
= build_function_type_list (long_integer_type_node
,
6790 long_integer_type_node
,
6791 long_integer_type_node
, NULL_TREE
);
6793 p
= two_arg_builtins
;
6794 for (i
= 0; i
< ARRAY_SIZE (two_arg_builtins
); ++i
, ++p
)
6795 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6796 builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6799 ftype
= build_function_type (ptr_type_node
, void_list_node
);
6800 builtin_function ("__builtin_thread_pointer", ftype
,
6801 ALPHA_BUILTIN_THREAD_POINTER
, BUILT_IN_MD
,
6804 ftype
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
6805 builtin_function ("__builtin_set_thread_pointer", ftype
,
6806 ALPHA_BUILTIN_SET_THREAD_POINTER
, BUILT_IN_MD
,
6810 /* Expand an expression EXP that calls a built-in function,
6811 with result going to TARGET if that's convenient
6812 (and in mode MODE if that's convenient).
6813 SUBTARGET may be used as the target for computing one of EXP's operands.
6814 IGNORE is nonzero if the value is to be ignored. */
6817 alpha_expand_builtin (exp
, target
, subtarget
, mode
, ignore
)
6820 rtx subtarget ATTRIBUTE_UNUSED
;
6821 enum machine_mode mode ATTRIBUTE_UNUSED
;
6822 int ignore ATTRIBUTE_UNUSED
;
6826 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
6827 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6828 tree arglist
= TREE_OPERAND (exp
, 1);
6829 enum insn_code icode
;
6830 rtx op
[MAX_ARGS
], pat
;
6834 if (fcode
>= ALPHA_BUILTIN_max
)
6835 internal_error ("bad builtin fcode");
6836 icode
= code_for_builtin
[fcode
];
6838 internal_error ("bad builtin fcode");
6840 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6842 for (arglist
= TREE_OPERAND (exp
, 1), arity
= 0;
6844 arglist
= TREE_CHAIN (arglist
), arity
++)
6846 const struct insn_operand_data
*insn_op
;
6848 tree arg
= TREE_VALUE (arglist
);
6849 if (arg
== error_mark_node
)
6851 if (arity
> MAX_ARGS
)
6854 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6856 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, 0);
6858 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6859 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6864 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6866 || GET_MODE (target
) != tmode
6867 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6868 target
= gen_reg_rtx (tmode
);
6874 pat
= GEN_FCN (icode
) (target
);
6878 pat
= GEN_FCN (icode
) (target
, op
[0]);
6880 pat
= GEN_FCN (icode
) (op
[0]);
6883 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6898 /* This page contains routines that are used to determine what the function
6899 prologue and epilogue code will do and write them out. */
6901 /* Compute the size of the save area in the stack. */
6903 /* These variables are used for communication between the following functions.
6904 They indicate various things about the current function being compiled
6905 that are used to tell what kind of prologue, epilogue and procedure
6906 descriptior to generate. */
6908 /* Nonzero if we need a stack procedure. */
6909 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
6910 static enum alpha_procedure_types alpha_procedure_type
;
6912 /* Register number (either FP or SP) that is used to unwind the frame. */
6913 static int vms_unwind_regno
;
6915 /* Register number used to save FP. We need not have one for RA since
6916 we don't modify it for register procedures. This is only defined
6917 for register frame procedures. */
6918 static int vms_save_fp_regno
;
6920 /* Register number used to reference objects off our PV. */
6921 static int vms_base_regno
;
6923 /* Compute register masks for saved registers. */
6926 alpha_sa_mask (imaskP
, fmaskP
)
6927 unsigned long *imaskP
;
6928 unsigned long *fmaskP
;
6930 unsigned long imask
= 0;
6931 unsigned long fmask
= 0;
6934 /* Irritatingly, there are two kinds of thunks -- those created with
6935 TARGET_ASM_OUTPUT_MI_THUNK and those with DECL_THUNK_P that go
6936 through the regular part of the compiler. In the
6937 TARGET_ASM_OUTPUT_MI_THUNK case we don't have valid register life
6938 info, but assemble_start_function wants to output .frame and
6939 .mask directives. */
6940 if (current_function_is_thunk
&& !no_new_pseudos
)
6947 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
6948 imask
|= (1UL << HARD_FRAME_POINTER_REGNUM
);
6950 /* One for every register we have to save. */
6951 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6952 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
6953 && regs_ever_live
[i
] && i
!= REG_RA
6954 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
6957 imask
|= (1UL << i
);
6959 fmask
|= (1UL << (i
- 32));
6962 /* We need to restore these for the handler. */
6963 if (current_function_calls_eh_return
)
6966 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
6967 if (regno
== INVALID_REGNUM
)
6969 imask
|= 1UL << regno
;
6972 /* If any register spilled, then spill the return address also. */
6973 /* ??? This is required by the Digital stack unwind specification
6974 and isn't needed if we're doing Dwarf2 unwinding. */
6975 if (imask
|| fmask
|| alpha_ra_ever_killed ())
6976 imask
|= (1UL << REG_RA
);
6985 unsigned long mask
[2];
6989 alpha_sa_mask (&mask
[0], &mask
[1]);
6991 if (TARGET_ABI_UNICOSMK
)
6993 if (mask
[0] || mask
[1])
6998 for (j
= 0; j
< 2; ++j
)
6999 for (i
= 0; i
< 32; ++i
)
7000 if ((mask
[j
] >> i
) & 1)
7004 if (TARGET_ABI_UNICOSMK
)
7006 /* We might not need to generate a frame if we don't make any calls
7007 (including calls to __T3E_MISMATCH if this is a vararg function),
7008 don't have any local variables which require stack slots, don't
7009 use alloca and have not determined that we need a frame for other
7012 alpha_procedure_type
7013 = (sa_size
|| get_frame_size() != 0
7014 || current_function_outgoing_args_size
7015 || current_function_stdarg
|| current_function_calls_alloca
7016 || frame_pointer_needed
)
7017 ? PT_STACK
: PT_REGISTER
;
7019 /* Always reserve space for saving callee-saved registers if we
7020 need a frame as required by the calling convention. */
7021 if (alpha_procedure_type
== PT_STACK
)
7024 else if (TARGET_ABI_OPEN_VMS
)
7026 /* Start by assuming we can use a register procedure if we don't
7027 make any calls (REG_RA not used) or need to save any
7028 registers and a stack procedure if we do. */
7029 if ((mask
[0] >> REG_RA
) & 1)
7030 alpha_procedure_type
= PT_STACK
;
7031 else if (get_frame_size() != 0)
7032 alpha_procedure_type
= PT_REGISTER
;
7034 alpha_procedure_type
= PT_NULL
;
7036 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7037 made the final decision on stack procedure vs register procedure. */
7038 if (alpha_procedure_type
== PT_STACK
)
7041 /* Decide whether to refer to objects off our PV via FP or PV.
7042 If we need FP for something else or if we receive a nonlocal
7043 goto (which expects PV to contain the value), we must use PV.
7044 Otherwise, start by assuming we can use FP. */
7047 = (frame_pointer_needed
7048 || current_function_has_nonlocal_label
7049 || alpha_procedure_type
== PT_STACK
7050 || current_function_outgoing_args_size
)
7051 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7053 /* If we want to copy PV into FP, we need to find some register
7054 in which to save FP. */
7056 vms_save_fp_regno
= -1;
7057 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7058 for (i
= 0; i
< 32; i
++)
7059 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! regs_ever_live
[i
])
7060 vms_save_fp_regno
= i
;
7062 if (vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7063 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7064 else if (alpha_procedure_type
== PT_NULL
)
7065 vms_base_regno
= REG_PV
;
7067 /* Stack unwinding should be done via FP unless we use it for PV. */
7068 vms_unwind_regno
= (vms_base_regno
== REG_PV
7069 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7071 /* If this is a stack procedure, allow space for saving FP and RA. */
7072 if (alpha_procedure_type
== PT_STACK
)
7077 /* Our size must be even (multiple of 16 bytes). */
7085 /* Define the offset between two registers, one to be eliminated,
7086 and the other its replacement, at the start of a routine. */
7089 alpha_initial_elimination_offset (from
, to
)
7090 unsigned int from
, to ATTRIBUTE_UNUSED
;
7094 ret
= alpha_sa_size ();
7095 ret
+= ALPHA_ROUND (current_function_outgoing_args_size
);
7097 if (from
== FRAME_POINTER_REGNUM
)
7099 else if (from
== ARG_POINTER_REGNUM
)
7100 ret
+= (ALPHA_ROUND (get_frame_size ()
7101 + current_function_pretend_args_size
)
7102 - current_function_pretend_args_size
);
7110 alpha_pv_save_size ()
7113 return alpha_procedure_type
== PT_STACK
? 8 : 0;
7120 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
7123 #if TARGET_ABI_OPEN_VMS
7125 const struct attribute_spec vms_attribute_table
[] =
7127 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7128 { "overlaid", 0, 0, true, false, false, NULL
},
7129 { "global", 0, 0, true, false, false, NULL
},
7130 { "initialize", 0, 0, true, false, false, NULL
},
7131 { NULL
, 0, 0, false, false, false, NULL
}
7137 find_lo_sum_using_gp (px
, data
)
7139 void *data ATTRIBUTE_UNUSED
;
7141 return GET_CODE (*px
) == LO_SUM
&& XEXP (*px
, 0) == pic_offset_table_rtx
;
7145 alpha_find_lo_sum_using_gp (insn
)
7148 return for_each_rtx (&PATTERN (insn
), find_lo_sum_using_gp
, NULL
) > 0;
7152 alpha_does_function_need_gp ()
7156 /* The GP being variable is an OSF abi thing. */
7157 if (! TARGET_ABI_OSF
)
7160 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7163 if (current_function_is_thunk
)
7166 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7167 Even if we are a static function, we still need to do this in case
7168 our address is taken and passed to something like qsort. */
7170 push_topmost_sequence ();
7171 insn
= get_insns ();
7172 pop_topmost_sequence ();
7174 for (; insn
; insn
= NEXT_INSN (insn
))
7176 && GET_CODE (PATTERN (insn
)) != USE
7177 && GET_CODE (PATTERN (insn
)) != CLOBBER
7178 && get_attr_usegp (insn
))
7184 /* Write a version stamp. Don't write anything if we are running as a
7185 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
7192 alpha_write_verstamp (file
)
7193 FILE *file ATTRIBUTE_UNUSED
;
7196 fprintf (file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
7200 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7204 set_frame_related_p ()
7206 rtx seq
= get_insns ();
7217 while (insn
!= NULL_RTX
)
7219 RTX_FRAME_RELATED_P (insn
) = 1;
7220 insn
= NEXT_INSN (insn
);
7222 seq
= emit_insn (seq
);
7226 seq
= emit_insn (seq
);
7227 RTX_FRAME_RELATED_P (seq
) = 1;
7232 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7234 /* Write function prologue. */
7236 /* On vms we have two kinds of functions:
7238 - stack frame (PROC_STACK)
7239 these are 'normal' functions with local vars and which are
7240 calling other functions
7241 - register frame (PROC_REGISTER)
7242 keeps all data in registers, needs no stack
7244 We must pass this to the assembler so it can generate the
7245 proper pdsc (procedure descriptor)
7246 This is done with the '.pdesc' command.
7248 On not-vms, we don't really differentiate between the two, as we can
7249 simply allocate stack without saving registers. */
7252 alpha_expand_prologue ()
7254 /* Registers to save. */
7255 unsigned long imask
= 0;
7256 unsigned long fmask
= 0;
7257 /* Stack space needed for pushing registers clobbered by us. */
7258 HOST_WIDE_INT sa_size
;
7259 /* Complete stack size needed. */
7260 HOST_WIDE_INT frame_size
;
7261 /* Offset from base reg to register save area. */
7262 HOST_WIDE_INT reg_offset
;
7266 sa_size
= alpha_sa_size ();
7268 frame_size
= get_frame_size ();
7269 if (TARGET_ABI_OPEN_VMS
)
7270 frame_size
= ALPHA_ROUND (sa_size
7271 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7273 + current_function_pretend_args_size
);
7274 else if (TARGET_ABI_UNICOSMK
)
7275 /* We have to allocate space for the DSIB if we generate a frame. */
7276 frame_size
= ALPHA_ROUND (sa_size
7277 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7278 + ALPHA_ROUND (frame_size
7279 + current_function_outgoing_args_size
);
7281 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7283 + ALPHA_ROUND (frame_size
7284 + current_function_pretend_args_size
));
7286 if (TARGET_ABI_OPEN_VMS
)
7289 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7291 alpha_sa_mask (&imask
, &fmask
);
7293 /* Emit an insn to reload GP, if needed. */
7296 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7297 if (alpha_function_needs_gp
)
7298 emit_insn (gen_prologue_ldgp ());
7301 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7302 the call to mcount ourselves, rather than having the linker do it
7303 magically in response to -pg. Since _mcount has special linkage,
7304 don't represent the call as a call. */
7305 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7306 emit_insn (gen_prologue_mcount ());
7308 if (TARGET_ABI_UNICOSMK
)
7309 unicosmk_gen_dsib (&imask
);
7311 /* Adjust the stack by the frame size. If the frame size is > 4096
7312 bytes, we need to be sure we probe somewhere in the first and last
7313 4096 bytes (we can probably get away without the latter test) and
7314 every 8192 bytes in between. If the frame size is > 32768, we
7315 do this in a loop. Otherwise, we generate the explicit probe
7318 Note that we are only allowed to adjust sp once in the prologue. */
7320 if (frame_size
<= 32768)
7322 if (frame_size
> 4096)
7327 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7330 while ((probed
+= 8192) < frame_size
);
7332 /* We only have to do this probe if we aren't saving registers. */
7333 if (sa_size
== 0 && probed
+ 4096 < frame_size
)
7334 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
7337 if (frame_size
!= 0)
7338 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7339 GEN_INT (TARGET_ABI_UNICOSMK
7345 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7346 number of 8192 byte blocks to probe. We then probe each block
7347 in the loop and then set SP to the proper location. If the
7348 amount remaining is > 4096, we have to do one more probe if we
7349 are not saving any registers. */
7351 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
7352 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
7353 rtx ptr
= gen_rtx_REG (DImode
, 22);
7354 rtx count
= gen_rtx_REG (DImode
, 23);
7357 emit_move_insn (count
, GEN_INT (blocks
));
7358 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
7359 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
7361 /* Because of the difficulty in emitting a new basic block this
7362 late in the compilation, generate the loop as a single insn. */
7363 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7365 if (leftover
> 4096 && sa_size
== 0)
7367 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
7368 MEM_VOLATILE_P (last
) = 1;
7369 emit_move_insn (last
, const0_rtx
);
7372 if (TARGET_ABI_WINDOWS_NT
)
7374 /* For NT stack unwind (done by 'reverse execution'), it's
7375 not OK to take the result of a loop, even though the value
7376 is already in ptr, so we reload it via a single operation
7377 and subtract it to sp.
7379 Yes, that's correct -- we have to reload the whole constant
7380 into a temporary via ldah+lda then subtract from sp. To
7381 ensure we get ldah+lda, we use a special pattern. */
7383 HOST_WIDE_INT lo
, hi
;
7384 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7385 hi
= frame_size
- lo
;
7387 emit_move_insn (ptr
, GEN_INT (hi
));
7388 emit_insn (gen_nt_lda (ptr
, GEN_INT (lo
)));
7389 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7394 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7395 GEN_INT (-leftover
)));
7398 /* This alternative is special, because the DWARF code cannot
7399 possibly intuit through the loop above. So we invent this
7400 note it looks at instead. */
7401 RTX_FRAME_RELATED_P (seq
) = 1;
7403 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7404 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7405 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
7406 GEN_INT (TARGET_ABI_UNICOSMK
7412 if (!TARGET_ABI_UNICOSMK
)
7414 /* Cope with very large offsets to the register save area. */
7415 sa_reg
= stack_pointer_rtx
;
7416 if (reg_offset
+ sa_size
> 0x8000)
7418 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7421 if (low
+ sa_size
<= 0x8000)
7422 bias
= reg_offset
- low
, reg_offset
= low
;
7424 bias
= reg_offset
, reg_offset
= 0;
7426 sa_reg
= gen_rtx_REG (DImode
, 24);
7427 FRP (emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
,
7431 /* Save regs in stack order. Beginning with VMS PV. */
7432 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7434 mem
= gen_rtx_MEM (DImode
, stack_pointer_rtx
);
7435 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7436 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_PV
)));
7439 /* Save register RA next. */
7440 if (imask
& (1UL << REG_RA
))
7442 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
7443 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7444 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
7445 imask
&= ~(1UL << REG_RA
);
7449 /* Now save any other registers required to be saved. */
7450 for (i
= 0; i
< 32; i
++)
7451 if (imask
& (1UL << i
))
7453 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
7454 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7455 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
7459 for (i
= 0; i
< 32; i
++)
7460 if (fmask
& (1UL << i
))
7462 mem
= gen_rtx_MEM (DFmode
, plus_constant (sa_reg
, reg_offset
));
7463 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7464 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
7468 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
7470 /* The standard frame on the T3E includes space for saving registers.
7471 We just have to use it. We don't have to save the return address and
7472 the old frame pointer here - they are saved in the DSIB. */
7475 for (i
= 9; i
< 15; i
++)
7476 if (imask
& (1UL << i
))
7478 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
7480 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7481 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
7484 for (i
= 2; i
< 10; i
++)
7485 if (fmask
& (1UL << i
))
7487 mem
= gen_rtx_MEM (DFmode
, plus_constant (hard_frame_pointer_rtx
,
7489 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7490 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
7495 if (TARGET_ABI_OPEN_VMS
)
7497 if (alpha_procedure_type
== PT_REGISTER
)
7498 /* Register frame procedures save the fp.
7499 ?? Ought to have a dwarf2 save for this. */
7500 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7501 hard_frame_pointer_rtx
);
7503 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7504 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7505 gen_rtx_REG (DImode
, REG_PV
)));
7507 if (alpha_procedure_type
!= PT_NULL
7508 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7509 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7511 /* If we have to allocate space for outgoing args, do it now. */
7512 if (current_function_outgoing_args_size
!= 0)
7515 = emit_move_insn (stack_pointer_rtx
,
7517 (hard_frame_pointer_rtx
,
7519 (current_function_outgoing_args_size
))));
7521 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7522 if ! frame_pointer_needed. Setting the bit will change the CFA
7523 computation rule to use sp again, which would be wrong if we had
7524 frame_pointer_needed, as this means sp might move unpredictably
7528 frame_pointer_needed
7529 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7531 current_function_outgoing_args_size != 0
7532 => alpha_procedure_type != PT_NULL,
7534 so when we are not setting the bit here, we are guaranteed to
7535 have emited an FRP frame pointer update just before. */
7536 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7539 else if (!TARGET_ABI_UNICOSMK
)
7541 /* If we need a frame pointer, set it from the stack pointer. */
7542 if (frame_pointer_needed
)
7544 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7545 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7547 /* This must always be the last instruction in the
7548 prologue, thus we emit a special move + clobber. */
7549 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7550 stack_pointer_rtx
, sa_reg
)));
7554 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7555 the prologue, for exception handling reasons, we cannot do this for
7556 any insn that might fault. We could prevent this for mems with a
7557 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7558 have to prevent all such scheduling with a blockage.
7560 Linux, on the other hand, never bothered to implement OSF/1's
7561 exception handling, and so doesn't care about such things. Anyone
7562 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7564 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7565 emit_insn (gen_blockage ());
7568 /* Output the textual info surrounding the prologue. */
7571 alpha_start_function (file
, fnname
, decl
)
7574 tree decl ATTRIBUTE_UNUSED
;
7576 unsigned long imask
= 0;
7577 unsigned long fmask
= 0;
7578 /* Stack space needed for pushing registers clobbered by us. */
7579 HOST_WIDE_INT sa_size
;
7580 /* Complete stack size needed. */
7581 unsigned HOST_WIDE_INT frame_size
;
7582 /* Offset from base reg to register save area. */
7583 HOST_WIDE_INT reg_offset
;
7584 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7587 /* Don't emit an extern directive for functions defined in the same file. */
7588 if (TARGET_ABI_UNICOSMK
)
7591 name_tree
= get_identifier (fnname
);
7592 TREE_ASM_WRITTEN (name_tree
) = 1;
7595 alpha_fnname
= fnname
;
7596 sa_size
= alpha_sa_size ();
7598 frame_size
= get_frame_size ();
7599 if (TARGET_ABI_OPEN_VMS
)
7600 frame_size
= ALPHA_ROUND (sa_size
7601 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7603 + current_function_pretend_args_size
);
7604 else if (TARGET_ABI_UNICOSMK
)
7605 frame_size
= ALPHA_ROUND (sa_size
7606 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7607 + ALPHA_ROUND (frame_size
7608 + current_function_outgoing_args_size
);
7610 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7612 + ALPHA_ROUND (frame_size
7613 + current_function_pretend_args_size
));
7615 if (TARGET_ABI_OPEN_VMS
)
7618 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7620 alpha_sa_mask (&imask
, &fmask
);
7622 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7623 We have to do that before the .ent directive as we cannot switch
7624 files within procedures with native ecoff because line numbers are
7625 linked to procedure descriptors.
7626 Outputting the lineno helps debugging of one line functions as they
7627 would otherwise get no line number at all. Please note that we would
7628 like to put out last_linenum from final.c, but it is not accessible. */
7630 if (write_symbols
== SDB_DEBUG
)
7632 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7633 ASM_OUTPUT_SOURCE_FILENAME (file
,
7634 DECL_SOURCE_FILE (current_function_decl
));
7636 #ifdef ASM_OUTPUT_SOURCE_LINE
7637 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
7638 ASM_OUTPUT_SOURCE_LINE (file
,
7639 DECL_SOURCE_LINE (current_function_decl
));
7643 /* Issue function start and label. */
7644 if (TARGET_ABI_OPEN_VMS
7645 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
7647 fputs ("\t.ent ", file
);
7648 assemble_name (file
, fnname
);
7651 /* If the function needs GP, we'll write the "..ng" label there.
7652 Otherwise, do it here. */
7654 && ! alpha_function_needs_gp
7655 && ! current_function_is_thunk
)
7658 assemble_name (file
, fnname
);
7659 fputs ("..ng:\n", file
);
7663 strcpy (entry_label
, fnname
);
7664 if (TARGET_ABI_OPEN_VMS
)
7665 strcat (entry_label
, "..en");
7667 /* For public functions, the label must be globalized by appending an
7668 additional colon. */
7669 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
7670 strcat (entry_label
, ":");
7672 ASM_OUTPUT_LABEL (file
, entry_label
);
7673 inside_function
= TRUE
;
7675 if (TARGET_ABI_OPEN_VMS
)
7676 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
7678 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
7679 && !flag_inhibit_size_directive
)
7681 /* Set flags in procedure descriptor to request IEEE-conformant
7682 math-library routines. The value we set it to is PDSC_EXC_IEEE
7683 (/usr/include/pdsc.h). */
7684 fputs ("\t.eflag 48\n", file
);
7687 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7688 alpha_auto_offset
= -frame_size
+ current_function_pretend_args_size
;
7689 alpha_arg_offset
= -frame_size
+ 48;
7691 /* Describe our frame. If the frame size is larger than an integer,
7692 print it as zero to avoid an assembler error. We won't be
7693 properly describing such a frame, but that's the best we can do. */
7694 if (TARGET_ABI_UNICOSMK
)
7696 else if (TARGET_ABI_OPEN_VMS
)
7697 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,"
7698 HOST_WIDE_INT_PRINT_DEC
"\n",
7700 frame_size
>= (1UL << 31) ? 0 : frame_size
,
7702 else if (!flag_inhibit_size_directive
)
7703 fprintf (file
, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC
",$26,%d\n",
7704 (frame_pointer_needed
7705 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
),
7706 frame_size
>= (1UL << 31) ? 0 : frame_size
,
7707 current_function_pretend_args_size
);
7709 /* Describe which registers were spilled. */
7710 if (TARGET_ABI_UNICOSMK
)
7712 else if (TARGET_ABI_OPEN_VMS
)
7715 /* ??? Does VMS care if mask contains ra? The old code didn't
7716 set it, so I don't here. */
7717 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
7719 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
7720 if (alpha_procedure_type
== PT_REGISTER
)
7721 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
7723 else if (!flag_inhibit_size_directive
)
7727 fprintf (file
, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", imask
,
7728 frame_size
>= (1UL << 31) ? 0 : reg_offset
- frame_size
);
7730 for (i
= 0; i
< 32; ++i
)
7731 if (imask
& (1UL << i
))
7736 fprintf (file
, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC
"\n", fmask
,
7737 frame_size
>= (1UL << 31) ? 0 : reg_offset
- frame_size
);
7740 #if TARGET_ABI_OPEN_VMS
7741 /* Ifdef'ed cause link_section are only available then. */
7742 readonly_data_section ();
7743 fprintf (file
, "\t.align 3\n");
7744 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
7745 fputs ("\t.ascii \"", file
);
7746 assemble_name (file
, fnname
);
7747 fputs ("\\0\"\n", file
);
7748 alpha_need_linkage (fnname
, 1);
7753 /* Emit the .prologue note at the scheduled end of the prologue. */
7756 alpha_output_function_end_prologue (file
)
7759 if (TARGET_ABI_UNICOSMK
)
7761 else if (TARGET_ABI_OPEN_VMS
)
7762 fputs ("\t.prologue\n", file
);
7763 else if (TARGET_ABI_WINDOWS_NT
)
7764 fputs ("\t.prologue 0\n", file
);
7765 else if (!flag_inhibit_size_directive
)
7766 fprintf (file
, "\t.prologue %d\n",
7767 alpha_function_needs_gp
|| current_function_is_thunk
);
7770 /* Write function epilogue. */
7772 /* ??? At some point we will want to support full unwind, and so will
7773 need to mark the epilogue as well. At the moment, we just confuse
7776 #define FRP(exp) exp
7779 alpha_expand_epilogue ()
7781 /* Registers to save. */
7782 unsigned long imask
= 0;
7783 unsigned long fmask
= 0;
7784 /* Stack space needed for pushing registers clobbered by us. */
7785 HOST_WIDE_INT sa_size
;
7786 /* Complete stack size needed. */
7787 HOST_WIDE_INT frame_size
;
7788 /* Offset from base reg to register save area. */
7789 HOST_WIDE_INT reg_offset
;
7790 int fp_is_frame_pointer
, fp_offset
;
7791 rtx sa_reg
, sa_reg_exp
= NULL
;
7792 rtx sp_adj1
, sp_adj2
, mem
;
7796 sa_size
= alpha_sa_size ();
7798 frame_size
= get_frame_size ();
7799 if (TARGET_ABI_OPEN_VMS
)
7800 frame_size
= ALPHA_ROUND (sa_size
7801 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7803 + current_function_pretend_args_size
);
7804 else if (TARGET_ABI_UNICOSMK
)
7805 frame_size
= ALPHA_ROUND (sa_size
7806 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7807 + ALPHA_ROUND (frame_size
7808 + current_function_outgoing_args_size
);
7810 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7812 + ALPHA_ROUND (frame_size
7813 + current_function_pretend_args_size
));
7815 if (TARGET_ABI_OPEN_VMS
)
7817 if (alpha_procedure_type
== PT_STACK
)
7823 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7825 alpha_sa_mask (&imask
, &fmask
);
7828 = ((TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7829 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
7831 sa_reg
= stack_pointer_rtx
;
7833 if (current_function_calls_eh_return
)
7834 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
7838 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
7840 /* If we have a frame pointer, restore SP from it. */
7841 if ((TARGET_ABI_OPEN_VMS
7842 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7843 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
7844 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
7846 /* Cope with very large offsets to the register save area. */
7847 if (reg_offset
+ sa_size
> 0x8000)
7849 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7852 if (low
+ sa_size
<= 0x8000)
7853 bias
= reg_offset
- low
, reg_offset
= low
;
7855 bias
= reg_offset
, reg_offset
= 0;
7857 sa_reg
= gen_rtx_REG (DImode
, 22);
7858 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
7860 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
7863 /* Restore registers in order, excepting a true frame pointer. */
7865 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
7867 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7868 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
7871 imask
&= ~(1UL << REG_RA
);
7873 for (i
= 0; i
< 32; ++i
)
7874 if (imask
& (1UL << i
))
7876 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
7877 fp_offset
= reg_offset
;
7880 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
7881 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7882 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
7887 for (i
= 0; i
< 32; ++i
)
7888 if (fmask
& (1UL << i
))
7890 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
7891 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7892 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
7896 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
7898 /* Restore callee-saved general-purpose registers. */
7902 for (i
= 9; i
< 15; i
++)
7903 if (imask
& (1UL << i
))
7905 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
7907 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7908 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
7912 for (i
= 2; i
< 10; i
++)
7913 if (fmask
& (1UL << i
))
7915 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
7917 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7918 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
7922 /* Restore the return address from the DSIB. */
7924 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
7925 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7926 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
7929 if (frame_size
|| eh_ofs
)
7931 sp_adj1
= stack_pointer_rtx
;
7935 sp_adj1
= gen_rtx_REG (DImode
, 23);
7936 emit_move_insn (sp_adj1
,
7937 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
7940 /* If the stack size is large, begin computation into a temporary
7941 register so as not to interfere with a potential fp restore,
7942 which must be consecutive with an SP restore. */
7943 if (frame_size
< 32768
7944 && ! (TARGET_ABI_UNICOSMK
&& current_function_calls_alloca
))
7945 sp_adj2
= GEN_INT (frame_size
);
7946 else if (TARGET_ABI_UNICOSMK
)
7948 sp_adj1
= gen_rtx_REG (DImode
, 23);
7949 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
7950 sp_adj2
= const0_rtx
;
7952 else if (frame_size
< 0x40007fffL
)
7954 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7956 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
7957 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
7961 sp_adj1
= gen_rtx_REG (DImode
, 23);
7962 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
7964 sp_adj2
= GEN_INT (low
);
7968 rtx tmp
= gen_rtx_REG (DImode
, 23);
7969 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3));
7972 /* We can't drop new things to memory this late, afaik,
7973 so build it up by pieces. */
7974 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
7975 -(frame_size
< 0)));
7981 /* From now on, things must be in order. So emit blockages. */
7983 /* Restore the frame pointer. */
7984 if (TARGET_ABI_UNICOSMK
)
7986 emit_insn (gen_blockage ());
7987 mem
= gen_rtx_MEM (DImode
,
7988 plus_constant (hard_frame_pointer_rtx
, -16));
7989 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7990 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
7992 else if (fp_is_frame_pointer
)
7994 emit_insn (gen_blockage ());
7995 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
7996 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7997 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
7999 else if (TARGET_ABI_OPEN_VMS
)
8001 emit_insn (gen_blockage ());
8002 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8003 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8006 /* Restore the stack pointer. */
8007 emit_insn (gen_blockage ());
8008 if (sp_adj2
== const0_rtx
)
8009 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
8011 FRP (emit_move_insn (stack_pointer_rtx
,
8012 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
8016 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8018 emit_insn (gen_blockage ());
8019 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8020 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8022 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
!= PT_STACK
)
8024 /* Decrement the frame pointer if the function does not have a
8027 emit_insn (gen_blockage ());
8028 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8029 hard_frame_pointer_rtx
, GEN_INT (-1))));
8034 /* Output the rest of the textual info surrounding the epilogue. */
8037 alpha_end_function (file
, fnname
, decl
)
8040 tree decl ATTRIBUTE_UNUSED
;
8042 /* End the function. */
8043 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
8045 fputs ("\t.end ", file
);
8046 assemble_name (file
, fnname
);
8049 inside_function
= FALSE
;
8051 #if TARGET_ABI_OPEN_VMS
8052 alpha_write_linkage (file
, fnname
, decl
);
8055 /* Output jump tables and the static subroutine information block. */
8056 if (TARGET_ABI_UNICOSMK
)
8058 unicosmk_output_ssib (file
, fnname
);
8059 unicosmk_output_deferred_case_vectors (file
);
8064 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8066 In order to avoid the hordes of differences between generated code
8067 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8068 lots of code loading up large constants, generate rtl and emit it
8069 instead of going straight to text.
8071 Not sure why this idea hasn't been explored before... */
8074 alpha_output_mi_thunk_osf (file
, thunk_fndecl
, delta
, vcall_offset
, function
)
8076 tree thunk_fndecl ATTRIBUTE_UNUSED
;
8077 HOST_WIDE_INT delta
;
8078 HOST_WIDE_INT vcall_offset
;
8081 HOST_WIDE_INT hi
, lo
;
8082 rtx
this, insn
, funexp
;
8084 /* We always require a valid GP. */
8085 emit_insn (gen_prologue_ldgp ());
8086 emit_note (NULL
, NOTE_INSN_PROLOGUE_END
);
8088 /* Find the "this" pointer. If the function returns a structure,
8089 the structure return pointer is in $16. */
8090 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
))))
8091 this = gen_rtx_REG (Pmode
, 17);
8093 this = gen_rtx_REG (Pmode
, 16);
8095 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8096 entire constant for the add. */
8097 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8098 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8099 if (hi
+ lo
== delta
)
8102 emit_insn (gen_adddi3 (this, this, GEN_INT (hi
)));
8104 emit_insn (gen_adddi3 (this, this, GEN_INT (lo
)));
8108 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0),
8109 delta
, -(delta
< 0));
8110 emit_insn (gen_adddi3 (this, this, tmp
));
8113 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8118 tmp
= gen_rtx_REG (Pmode
, 0);
8119 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this));
8121 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8122 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8123 if (hi
+ lo
== vcall_offset
)
8126 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8130 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8131 vcall_offset
, -(vcall_offset
< 0));
8132 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8136 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8139 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8141 emit_insn (gen_adddi3 (this, this, tmp
));
8144 /* Generate a tail call to the target function. */
8145 if (! TREE_USED (function
))
8147 assemble_external (function
);
8148 TREE_USED (function
) = 1;
8150 funexp
= XEXP (DECL_RTL (function
), 0);
8151 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8152 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8153 SIBLING_CALL_P (insn
) = 1;
8155 /* Run just enough of rest_of_compilation to get the insns emitted.
8156 There's not really enough bulk here to make other passes such as
8157 instruction scheduling worth while. Note that use_thunk calls
8158 assemble_start_function and assemble_end_function. */
8159 insn
= get_insns ();
8160 insn_locators_initialize ();
8161 shorten_branches (insn
);
8162 final_start_function (insn
, file
, 1);
8163 final (insn
, file
, 1, 0);
8164 final_end_function ();
8166 #endif /* TARGET_ABI_OSF */
8168 /* Debugging support. */
8172 /* Count the number of sdb related labels are generated (to find block
8173 start and end boundaries). */
8175 int sdb_label_count
= 0;
8177 /* Next label # for each statement. */
8179 static int sym_lineno
= 0;
8181 /* Count the number of .file directives, so that .loc is up to date. */
8183 static int num_source_filenames
= 0;
8185 /* Name of the file containing the current function. */
8187 static const char *current_function_file
= "";
8189 /* Offsets to alpha virtual arg/local debugging pointers. */
8191 long alpha_arg_offset
;
8192 long alpha_auto_offset
;
8194 /* Emit a new filename to a stream. */
8197 alpha_output_filename (stream
, name
)
8201 static int first_time
= TRUE
;
8202 char ltext_label_name
[100];
8207 ++num_source_filenames
;
8208 current_function_file
= name
;
8209 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8210 output_quoted_string (stream
, name
);
8211 fprintf (stream
, "\n");
8212 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
8213 fprintf (stream
, "\t#@stabs\n");
8216 else if (write_symbols
== DBX_DEBUG
)
8218 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name
, "Ltext", 0);
8219 fprintf (stream
, "%s", ASM_STABS_OP
);
8220 output_quoted_string (stream
, name
);
8221 fprintf (stream
, ",%d,0,0,%s\n", N_SOL
, <ext_label_name
[1]);
8224 else if (name
!= current_function_file
8225 && strcmp (name
, current_function_file
) != 0)
8227 if (inside_function
&& ! TARGET_GAS
)
8228 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
8231 ++num_source_filenames
;
8232 current_function_file
= name
;
8233 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8236 output_quoted_string (stream
, name
);
8237 fprintf (stream
, "\n");
8241 /* Emit a linenumber to a stream. */
8244 alpha_output_lineno (stream
, line
)
8248 if (write_symbols
== DBX_DEBUG
)
8250 /* mips-tfile doesn't understand .stabd directives. */
8252 fprintf (stream
, "$LM%d:\n%s%d,0,%d,$LM%d\n",
8253 sym_lineno
, ASM_STABN_OP
, N_SLINE
, line
, sym_lineno
);
8256 fprintf (stream
, "\n\t.loc\t%d %d\n", num_source_filenames
, line
);
8259 /* Structure to show the current status of registers and memory. */
8261 struct shadow_summary
8264 unsigned int i
: 31; /* Mask of int regs */
8265 unsigned int fp
: 31; /* Mask of fp regs */
8266 unsigned int mem
: 1; /* mem == imem | fpmem */
8270 static void summarize_insn
PARAMS ((rtx
, struct shadow_summary
*, int));
8271 static void alpha_handle_trap_shadows
PARAMS ((void));
8273 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8274 to the summary structure. SET is nonzero if the insn is setting the
8275 object, otherwise zero. */
8278 summarize_insn (x
, sum
, set
)
8280 struct shadow_summary
*sum
;
8283 const char *format_ptr
;
8289 switch (GET_CODE (x
))
8291 /* ??? Note that this case would be incorrect if the Alpha had a
8292 ZERO_EXTRACT in SET_DEST. */
8294 summarize_insn (SET_SRC (x
), sum
, 0);
8295 summarize_insn (SET_DEST (x
), sum
, 1);
8299 summarize_insn (XEXP (x
, 0), sum
, 1);
8303 summarize_insn (XEXP (x
, 0), sum
, 0);
8307 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8308 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8312 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8313 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8317 summarize_insn (SUBREG_REG (x
), sum
, 0);
8322 int regno
= REGNO (x
);
8323 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8325 if (regno
== 31 || regno
== 63)
8331 sum
->defd
.i
|= mask
;
8333 sum
->defd
.fp
|= mask
;
8338 sum
->used
.i
|= mask
;
8340 sum
->used
.fp
|= mask
;
8351 /* Find the regs used in memory address computation: */
8352 summarize_insn (XEXP (x
, 0), sum
, 0);
8355 case CONST_INT
: case CONST_DOUBLE
:
8356 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8357 case SCRATCH
: case ASM_INPUT
:
8360 /* Handle common unary and binary ops for efficiency. */
8361 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8362 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8363 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8364 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8365 case NE
: case EQ
: case GE
: case GT
: case LE
:
8366 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8367 summarize_insn (XEXP (x
, 0), sum
, 0);
8368 summarize_insn (XEXP (x
, 1), sum
, 0);
8371 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8372 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8373 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8374 case SQRT
: case FFS
:
8375 summarize_insn (XEXP (x
, 0), sum
, 0);
8379 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8380 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8381 switch (format_ptr
[i
])
8384 summarize_insn (XEXP (x
, i
), sum
, 0);
8388 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8389 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8401 /* Ensure a sufficient number of `trapb' insns are in the code when
8402 the user requests code with a trap precision of functions or
8405 In naive mode, when the user requests a trap-precision of
8406 "instruction", a trapb is needed after every instruction that may
8407 generate a trap. This ensures that the code is resumption safe but
8410 When optimizations are turned on, we delay issuing a trapb as long
8411 as possible. In this context, a trap shadow is the sequence of
8412 instructions that starts with a (potentially) trap generating
8413 instruction and extends to the next trapb or call_pal instruction
8414 (but GCC never generates call_pal by itself). We can delay (and
8415 therefore sometimes omit) a trapb subject to the following
8418 (a) On entry to the trap shadow, if any Alpha register or memory
8419 location contains a value that is used as an operand value by some
8420 instruction in the trap shadow (live on entry), then no instruction
8421 in the trap shadow may modify the register or memory location.
8423 (b) Within the trap shadow, the computation of the base register
8424 for a memory load or store instruction may not involve using the
8425 result of an instruction that might generate an UNPREDICTABLE
8428 (c) Within the trap shadow, no register may be used more than once
8429 as a destination register. (This is to make life easier for the
8432 (d) The trap shadow may not include any branch instructions. */
8435 alpha_handle_trap_shadows ()
8437 struct shadow_summary shadow
;
8438 int trap_pending
, exception_nesting
;
8442 exception_nesting
= 0;
8445 shadow
.used
.mem
= 0;
8446 shadow
.defd
= shadow
.used
;
8448 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8450 if (GET_CODE (i
) == NOTE
)
8452 switch (NOTE_LINE_NUMBER (i
))
8454 case NOTE_INSN_EH_REGION_BEG
:
8455 exception_nesting
++;
8460 case NOTE_INSN_EH_REGION_END
:
8461 exception_nesting
--;
8466 case NOTE_INSN_EPILOGUE_BEG
:
8467 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8472 else if (trap_pending
)
8474 if (alpha_tp
== ALPHA_TP_FUNC
)
8476 if (GET_CODE (i
) == JUMP_INSN
8477 && GET_CODE (PATTERN (i
)) == RETURN
)
8480 else if (alpha_tp
== ALPHA_TP_INSN
)
8484 struct shadow_summary sum
;
8489 sum
.defd
= sum
.used
;
8491 switch (GET_CODE (i
))
8494 /* Annoyingly, get_attr_trap will abort on these. */
8495 if (GET_CODE (PATTERN (i
)) == USE
8496 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8499 summarize_insn (PATTERN (i
), &sum
, 0);
8501 if ((sum
.defd
.i
& shadow
.defd
.i
)
8502 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8504 /* (c) would be violated */
8508 /* Combine shadow with summary of current insn: */
8509 shadow
.used
.i
|= sum
.used
.i
;
8510 shadow
.used
.fp
|= sum
.used
.fp
;
8511 shadow
.used
.mem
|= sum
.used
.mem
;
8512 shadow
.defd
.i
|= sum
.defd
.i
;
8513 shadow
.defd
.fp
|= sum
.defd
.fp
;
8514 shadow
.defd
.mem
|= sum
.defd
.mem
;
8516 if ((sum
.defd
.i
& shadow
.used
.i
)
8517 || (sum
.defd
.fp
& shadow
.used
.fp
)
8518 || (sum
.defd
.mem
& shadow
.used
.mem
))
8520 /* (a) would be violated (also takes care of (b)) */
8521 if (get_attr_trap (i
) == TRAP_YES
8522 && ((sum
.defd
.i
& sum
.used
.i
)
8523 || (sum
.defd
.fp
& sum
.used
.fp
)))
8542 n
= emit_insn_before (gen_trapb (), i
);
8543 PUT_MODE (n
, TImode
);
8544 PUT_MODE (i
, TImode
);
8548 shadow
.used
.mem
= 0;
8549 shadow
.defd
= shadow
.used
;
8554 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8555 && GET_CODE (i
) == INSN
8556 && GET_CODE (PATTERN (i
)) != USE
8557 && GET_CODE (PATTERN (i
)) != CLOBBER
8558 && get_attr_trap (i
) == TRAP_YES
)
8560 if (optimize
&& !trap_pending
)
8561 summarize_insn (PATTERN (i
), &shadow
, 0);
8567 /* Alpha can only issue instruction groups simultaneously if they are
8568 suitibly aligned. This is very processor-specific. */
8570 enum alphaev4_pipe
{
8577 enum alphaev5_pipe
{
8588 static enum alphaev4_pipe alphaev4_insn_pipe
PARAMS ((rtx
));
8589 static enum alphaev5_pipe alphaev5_insn_pipe
PARAMS ((rtx
));
8590 static rtx alphaev4_next_group
PARAMS ((rtx
, int *, int *));
8591 static rtx alphaev5_next_group
PARAMS ((rtx
, int *, int *));
8592 static rtx alphaev4_next_nop
PARAMS ((int *));
8593 static rtx alphaev5_next_nop
PARAMS ((int *));
8595 static void alpha_align_insns
8596 PARAMS ((unsigned int, rtx (*)(rtx
, int *, int *), rtx (*)(int *)));
8598 static enum alphaev4_pipe
8599 alphaev4_insn_pipe (insn
)
8602 if (recog_memoized (insn
) < 0)
8604 if (get_attr_length (insn
) != 4)
8607 switch (get_attr_type (insn
))
8641 static enum alphaev5_pipe
8642 alphaev5_insn_pipe (insn
)
8645 if (recog_memoized (insn
) < 0)
8647 if (get_attr_length (insn
) != 4)
8650 switch (get_attr_type (insn
))
8691 /* IN_USE is a mask of the slots currently filled within the insn group.
8692 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8693 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8695 LEN is, of course, the length of the group in bytes. */
8698 alphaev4_next_group (insn
, pin_use
, plen
)
8700 int *pin_use
, *plen
;
8707 || GET_CODE (PATTERN (insn
)) == CLOBBER
8708 || GET_CODE (PATTERN (insn
)) == USE
)
8713 enum alphaev4_pipe pipe
;
8715 pipe
= alphaev4_insn_pipe (insn
);
8719 /* Force complex instructions to start new groups. */
8723 /* If this is a completely unrecognized insn, its an asm.
8724 We don't know how long it is, so record length as -1 to
8725 signal a needed realignment. */
8726 if (recog_memoized (insn
) < 0)
8729 len
= get_attr_length (insn
);
8733 if (in_use
& EV4_IB0
)
8735 if (in_use
& EV4_IB1
)
8740 in_use
|= EV4_IB0
| EV4_IBX
;
8744 if (in_use
& EV4_IB0
)
8746 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
8754 if (in_use
& EV4_IB1
)
8764 /* Haifa doesn't do well scheduling branches. */
8765 if (GET_CODE (insn
) == JUMP_INSN
)
8769 insn
= next_nonnote_insn (insn
);
8771 if (!insn
|| ! INSN_P (insn
))
8774 /* Let Haifa tell us where it thinks insn group boundaries are. */
8775 if (GET_MODE (insn
) == TImode
)
8778 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8783 insn
= next_nonnote_insn (insn
);
8791 /* IN_USE is a mask of the slots currently filled within the insn group.
8792 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8793 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8795 LEN is, of course, the length of the group in bytes. */
8798 alphaev5_next_group (insn
, pin_use
, plen
)
8800 int *pin_use
, *plen
;
8807 || GET_CODE (PATTERN (insn
)) == CLOBBER
8808 || GET_CODE (PATTERN (insn
)) == USE
)
8813 enum alphaev5_pipe pipe
;
8815 pipe
= alphaev5_insn_pipe (insn
);
8819 /* Force complex instructions to start new groups. */
8823 /* If this is a completely unrecognized insn, its an asm.
8824 We don't know how long it is, so record length as -1 to
8825 signal a needed realignment. */
8826 if (recog_memoized (insn
) < 0)
8829 len
= get_attr_length (insn
);
8832 /* ??? Most of the places below, we would like to abort, as
8833 it would indicate an error either in Haifa, or in the
8834 scheduling description. Unfortunately, Haifa never
8835 schedules the last instruction of the BB, so we don't
8836 have an accurate TI bit to go off. */
8838 if (in_use
& EV5_E0
)
8840 if (in_use
& EV5_E1
)
8845 in_use
|= EV5_E0
| EV5_E01
;
8849 if (in_use
& EV5_E0
)
8851 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
8859 if (in_use
& EV5_E1
)
8865 if (in_use
& EV5_FA
)
8867 if (in_use
& EV5_FM
)
8872 in_use
|= EV5_FA
| EV5_FAM
;
8876 if (in_use
& EV5_FA
)
8882 if (in_use
& EV5_FM
)
8895 /* Haifa doesn't do well scheduling branches. */
8896 /* ??? If this is predicted not-taken, slotting continues, except
8897 that no more IBR, FBR, or JSR insns may be slotted. */
8898 if (GET_CODE (insn
) == JUMP_INSN
)
8902 insn
= next_nonnote_insn (insn
);
8904 if (!insn
|| ! INSN_P (insn
))
8907 /* Let Haifa tell us where it thinks insn group boundaries are. */
8908 if (GET_MODE (insn
) == TImode
)
8911 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8916 insn
= next_nonnote_insn (insn
);
8925 alphaev4_next_nop (pin_use
)
8928 int in_use
= *pin_use
;
8931 if (!(in_use
& EV4_IB0
))
8936 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
8941 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
8954 alphaev5_next_nop (pin_use
)
8957 int in_use
= *pin_use
;
8960 if (!(in_use
& EV5_E1
))
8965 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
8970 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
8982 /* The instruction group alignment main loop. */
8985 alpha_align_insns (max_align
, next_group
, next_nop
)
8986 unsigned int max_align
;
8987 rtx (*next_group
) PARAMS ((rtx
, int *, int *));
8988 rtx (*next_nop
) PARAMS ((int *));
8990 /* ALIGN is the known alignment for the insn group. */
8992 /* OFS is the offset of the current insn in the insn group. */
8994 int prev_in_use
, in_use
, len
;
8997 /* Let shorten branches care for assigning alignments to code labels. */
8998 shorten_branches (get_insns ());
9000 if (align_functions
< 4)
9002 else if ((unsigned int) align_functions
< max_align
)
9003 align
= align_functions
;
9007 ofs
= prev_in_use
= 0;
9009 if (GET_CODE (i
) == NOTE
)
9010 i
= next_nonnote_insn (i
);
9014 next
= (*next_group
) (i
, &in_use
, &len
);
9016 /* When we see a label, resync alignment etc. */
9017 if (GET_CODE (i
) == CODE_LABEL
)
9019 unsigned int new_align
= 1 << label_to_alignment (i
);
9021 if (new_align
>= align
)
9023 align
= new_align
< max_align
? new_align
: max_align
;
9027 else if (ofs
& (new_align
-1))
9028 ofs
= (ofs
| (new_align
-1)) + 1;
9033 /* Handle complex instructions special. */
9034 else if (in_use
== 0)
9036 /* Asms will have length < 0. This is a signal that we have
9037 lost alignment knowledge. Assume, however, that the asm
9038 will not mis-align instructions. */
9047 /* If the known alignment is smaller than the recognized insn group,
9048 realign the output. */
9049 else if ((int) align
< len
)
9051 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9054 where
= prev
= prev_nonnote_insn (i
);
9055 if (!where
|| GET_CODE (where
) != CODE_LABEL
)
9058 /* Can't realign between a call and its gp reload. */
9059 if (! (TARGET_EXPLICIT_RELOCS
9060 && prev
&& GET_CODE (prev
) == CALL_INSN
))
9062 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9063 align
= 1 << new_log_align
;
9068 /* If the group won't fit in the same INT16 as the previous,
9069 we need to add padding to keep the group together. Rather
9070 than simply leaving the insn filling to the assembler, we
9071 can make use of the knowledge of what sorts of instructions
9072 were issued in the previous group to make sure that all of
9073 the added nops are really free. */
9074 else if (ofs
+ len
> (int) align
)
9076 int nop_count
= (align
- ofs
) / 4;
9079 /* Insert nops before labels, branches, and calls to truely merge
9080 the execution of the nops with the previous instruction group. */
9081 where
= prev_nonnote_insn (i
);
9084 if (GET_CODE (where
) == CODE_LABEL
)
9086 rtx where2
= prev_nonnote_insn (where
);
9087 if (where2
&& GET_CODE (where2
) == JUMP_INSN
)
9090 else if (GET_CODE (where
) == INSN
)
9097 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9098 while (--nop_count
);
9102 ofs
= (ofs
+ len
) & (align
- 1);
9103 prev_in_use
= in_use
;
9108 /* Machine dependent reorg pass. */
9113 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
9114 alpha_handle_trap_shadows ();
9116 /* Due to the number of extra trapb insns, don't bother fixing up
9117 alignment when trap precision is instruction. Moreover, we can
9118 only do our job when sched2 is run. */
9119 if (optimize
&& !optimize_size
9120 && alpha_tp
!= ALPHA_TP_INSN
9121 && flag_schedule_insns_after_reload
)
9123 if (alpha_cpu
== PROCESSOR_EV4
)
9124 alpha_align_insns (8, alphaev4_next_group
, alphaev4_next_nop
);
9125 else if (alpha_cpu
== PROCESSOR_EV5
)
9126 alpha_align_insns (16, alphaev5_next_group
, alphaev5_next_nop
);
9130 #ifdef OBJECT_FORMAT_ELF
9132 /* Switch to the section to which we should output X. The only thing
9133 special we do here is to honor small data. */
9136 alpha_elf_select_rtx_section (mode
, x
, align
)
9137 enum machine_mode mode
;
9139 unsigned HOST_WIDE_INT align
;
9141 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9142 /* ??? Consider using mergable sdata sections. */
9145 default_elf_select_rtx_section (mode
, x
, align
);
9148 #endif /* OBJECT_FORMAT_ELF */
9150 /* Structure to collect function names for final output in link section. */
9151 /* Note that items marked with GTY can't be ifdef'ed out. */
9153 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
9154 enum reloc_kind
{KIND_LINKAGE
, KIND_CODEADDR
};
9156 struct alpha_links
GTY(())
9160 enum links_kind lkind
;
9161 enum reloc_kind rkind
;
9164 struct alpha_funcs
GTY(())
9167 splay_tree
GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9171 static GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9172 splay_tree alpha_links_tree
;
9173 static GTY ((param1_is (tree
), param2_is (struct alpha_funcs
*)))
9174 splay_tree alpha_funcs_tree
;
9176 static GTY(()) int alpha_funcs_num
;
9178 #if TARGET_ABI_OPEN_VMS
9180 /* Return the VMS argument type corresponding to MODE. */
9183 alpha_arg_type (mode
)
9184 enum machine_mode mode
;
9189 return TARGET_FLOAT_VAX
? FF
: FS
;
9191 return TARGET_FLOAT_VAX
? FD
: FT
;
9197 /* Return an rtx for an integer representing the VMS Argument Information
9201 alpha_arg_info_reg_val (cum
)
9202 CUMULATIVE_ARGS cum
;
9204 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9207 for (i
= 0; i
< 6; i
++)
9208 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9210 return GEN_INT (regval
);
9213 /* Make (or fake) .linkage entry for function call.
9215 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9217 Return an SYMBOL_REF rtx for the linkage. */
9220 alpha_need_linkage (name
, is_local
)
9224 splay_tree_node node
;
9225 struct alpha_links
*al
;
9232 struct alpha_funcs
*cfaf
;
9234 if (!alpha_funcs_tree
)
9235 alpha_funcs_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
)
9236 splay_tree_compare_pointers
);
9238 cfaf
= (struct alpha_funcs
*) ggc_alloc (sizeof (struct alpha_funcs
));
9241 cfaf
->num
= ++alpha_funcs_num
;
9243 splay_tree_insert (alpha_funcs_tree
,
9244 (splay_tree_key
) current_function_decl
,
9245 (splay_tree_value
) cfaf
);
9248 if (alpha_links_tree
)
9250 /* Is this name already defined? */
9252 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9255 al
= (struct alpha_links
*) node
->value
;
9258 /* Defined here but external assumed. */
9259 if (al
->lkind
== KIND_EXTERN
)
9260 al
->lkind
= KIND_LOCAL
;
9264 /* Used here but unused assumed. */
9265 if (al
->lkind
== KIND_UNUSED
)
9266 al
->lkind
= KIND_LOCAL
;
9272 alpha_links_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9274 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9275 name
= ggc_strdup (name
);
9277 /* Assume external if no definition. */
9278 al
->lkind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
9280 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9281 get_identifier (name
);
9283 /* Construct a SYMBOL_REF for us to call. */
9285 size_t name_len
= strlen (name
);
9286 char *linksym
= alloca (name_len
+ 6);
9288 memcpy (linksym
+ 1, name
, name_len
);
9289 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
9290 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
9291 ggc_alloc_string (linksym
, name_len
+ 5));
9294 splay_tree_insert (alpha_links_tree
, (splay_tree_key
) name
,
9295 (splay_tree_value
) al
);
9301 alpha_use_linkage (linkage
, cfundecl
, lflag
, rflag
)
9307 splay_tree_node cfunnode
;
9308 struct alpha_funcs
*cfaf
;
9309 struct alpha_links
*al
;
9310 const char *name
= XSTR (linkage
, 0);
9312 cfaf
= (struct alpha_funcs
*) 0;
9313 al
= (struct alpha_links
*) 0;
9315 cfunnode
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) cfundecl
);
9316 cfaf
= (struct alpha_funcs
*) cfunnode
->value
;
9320 splay_tree_node lnode
;
9322 /* Is this name already defined? */
9324 lnode
= splay_tree_lookup (cfaf
->links
, (splay_tree_key
) name
);
9326 al
= (struct alpha_links
*) lnode
->value
;
9329 cfaf
->links
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9337 splay_tree_node node
= 0;
9338 struct alpha_links
*anl
;
9343 name_len
= strlen (name
);
9345 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9346 al
->num
= cfaf
->num
;
9348 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9351 anl
= (struct alpha_links
*) node
->value
;
9352 al
->lkind
= anl
->lkind
;
9355 sprintf (buf
, "$%d..%s..lk", cfaf
->num
, name
);
9356 buflen
= strlen (buf
);
9357 linksym
= alloca (buflen
+ 1);
9358 memcpy (linksym
, buf
, buflen
+ 1);
9360 al
->linkage
= gen_rtx_SYMBOL_REF
9361 (Pmode
, ggc_alloc_string (linksym
, buflen
+ 1));
9363 splay_tree_insert (cfaf
->links
, (splay_tree_key
) name
,
9364 (splay_tree_value
) al
);
9368 al
->rkind
= KIND_CODEADDR
;
9370 al
->rkind
= KIND_LINKAGE
;
9373 return gen_rtx_MEM (Pmode
, plus_constant (al
->linkage
, 8));
9379 alpha_write_one_linkage (node
, data
)
9380 splay_tree_node node
;
9383 const char *const name
= (const char *) node
->key
;
9384 struct alpha_links
*link
= (struct alpha_links
*) node
->value
;
9385 FILE *stream
= (FILE *) data
;
9387 fprintf (stream
, "$%d..%s..lk:\n", link
->num
, name
);
9388 if (link
->rkind
== KIND_CODEADDR
)
9390 if (link
->lkind
== KIND_LOCAL
)
9392 /* Local and used */
9393 fprintf (stream
, "\t.quad %s..en\n", name
);
9397 /* External and used, request code address. */
9398 fprintf (stream
, "\t.code_address %s\n", name
);
9403 if (link
->lkind
== KIND_LOCAL
)
9405 /* Local and used, build linkage pair. */
9406 fprintf (stream
, "\t.quad %s..en\n", name
);
9407 fprintf (stream
, "\t.quad %s\n", name
);
9411 /* External and used, request linkage pair. */
9412 fprintf (stream
, "\t.linkage %s\n", name
);
9420 alpha_write_linkage (stream
, funname
, fundecl
)
9422 const char *funname
;
9425 splay_tree_node node
;
9426 struct alpha_funcs
*func
;
9429 fprintf (stream
, "\t.align 3\n");
9430 node
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) fundecl
);
9431 func
= (struct alpha_funcs
*) node
->value
;
9433 fputs ("\t.name ", stream
);
9434 assemble_name (stream
, funname
);
9435 fputs ("..na\n", stream
);
9436 ASM_OUTPUT_LABEL (stream
, funname
);
9437 fprintf (stream
, "\t.pdesc ");
9438 assemble_name (stream
, funname
);
9439 fprintf (stream
, "..en,%s\n",
9440 alpha_procedure_type
== PT_STACK
? "stack"
9441 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9445 splay_tree_foreach (func
->links
, alpha_write_one_linkage
, stream
);
9446 /* splay_tree_delete (func->links); */
9450 /* Given a decl, a section name, and whether the decl initializer
9451 has relocs, choose attributes for the section. */
9453 #define SECTION_VMS_OVERLAY SECTION_FORGET
9454 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9455 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9458 vms_section_type_flags (decl
, name
, reloc
)
9463 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9465 if (decl
&& DECL_ATTRIBUTES (decl
)
9466 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
9467 flags
|= SECTION_VMS_OVERLAY
;
9468 if (decl
&& DECL_ATTRIBUTES (decl
)
9469 && lookup_attribute ("global", DECL_ATTRIBUTES (decl
)))
9470 flags
|= SECTION_VMS_GLOBAL
;
9471 if (decl
&& DECL_ATTRIBUTES (decl
)
9472 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl
)))
9473 flags
|= SECTION_VMS_INITIALIZE
;
9478 /* Switch to an arbitrary section NAME with attributes as specified
9479 by FLAGS. ALIGN specifies any known alignment requirements for
9480 the section; 0 if the default should be used. */
9483 vms_asm_named_section (name
, flags
)
9487 fputc ('\n', asm_out_file
);
9488 fprintf (asm_out_file
, ".section\t%s", name
);
9490 if (flags
& SECTION_VMS_OVERLAY
)
9491 fprintf (asm_out_file
, ",OVR");
9492 if (flags
& SECTION_VMS_GLOBAL
)
9493 fprintf (asm_out_file
, ",GBL");
9494 if (flags
& SECTION_VMS_INITIALIZE
)
9495 fprintf (asm_out_file
, ",NOMOD");
9496 if (flags
& SECTION_DEBUG
)
9497 fprintf (asm_out_file
, ",NOWRT");
9499 fputc ('\n', asm_out_file
);
9502 /* Record an element in the table of global constructors. SYMBOL is
9503 a SYMBOL_REF of the function to be called; PRIORITY is a number
9504 between 0 and MAX_INIT_PRIORITY.
9506 Differs from default_ctors_section_asm_out_constructor in that the
9507 width of the .ctors entry is always 64 bits, rather than the 32 bits
9508 used by a normal pointer. */
9511 vms_asm_out_constructor (symbol
, priority
)
9513 int priority ATTRIBUTE_UNUSED
;
9516 assemble_align (BITS_PER_WORD
);
9517 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9521 vms_asm_out_destructor (symbol
, priority
)
9523 int priority ATTRIBUTE_UNUSED
;
9526 assemble_align (BITS_PER_WORD
);
9527 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9532 alpha_need_linkage (name
, is_local
)
9533 const char *name ATTRIBUTE_UNUSED
;
9534 int is_local ATTRIBUTE_UNUSED
;
9540 alpha_use_linkage (linkage
, cfundecl
, lflag
, rflag
)
9541 rtx linkage ATTRIBUTE_UNUSED
;
9542 tree cfundecl ATTRIBUTE_UNUSED
;
9543 int lflag ATTRIBUTE_UNUSED
;
9544 int rflag ATTRIBUTE_UNUSED
;
9549 #endif /* TARGET_ABI_OPEN_VMS */
9551 #if TARGET_ABI_UNICOSMK
9553 static void unicosmk_output_module_name
PARAMS ((FILE *));
9554 static void unicosmk_output_default_externs
PARAMS ((FILE *));
9555 static void unicosmk_output_dex
PARAMS ((FILE *));
9556 static void unicosmk_output_externs
PARAMS ((FILE *));
9557 static void unicosmk_output_addr_vec
PARAMS ((FILE *, rtx
));
9558 static const char *unicosmk_ssib_name
PARAMS ((void));
9559 static int unicosmk_special_name
PARAMS ((const char *));
9561 /* Define the offset between two registers, one to be eliminated, and the
9562 other its replacement, at the start of a routine. */
9565 unicosmk_initial_elimination_offset (from
, to
)
9571 fixed_size
= alpha_sa_size();
9572 if (fixed_size
!= 0)
9575 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9577 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9579 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9580 return (ALPHA_ROUND (current_function_outgoing_args_size
)
9581 + ALPHA_ROUND (get_frame_size()));
9582 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9583 return (ALPHA_ROUND (fixed_size
)
9584 + ALPHA_ROUND (get_frame_size()
9585 + current_function_outgoing_args_size
));
9590 /* Output the module name for .ident and .end directives. We have to strip
9591 directories and add make sure that the module name starts with a letter
9595 unicosmk_output_module_name (file
)
9600 /* Strip directories. */
9602 name
= strrchr (main_input_filename
, '/');
9606 name
= main_input_filename
;
9608 /* CAM only accepts module names that start with a letter or '$'. We
9609 prefix the module name with a '$' if necessary. */
9611 if (!ISALPHA (*name
))
9613 output_clean_symbol_name (file
, name
);
9616 /* Output text that to appear at the beginning of an assembler file. */
9619 unicosmk_asm_file_start (file
)
9624 fputs ("\t.ident\t", file
);
9625 unicosmk_output_module_name (file
);
9626 fputs ("\n\n", file
);
9628 /* The Unicos/Mk assembler uses different register names. Instead of trying
9629 to support them, we simply use micro definitions. */
9631 /* CAM has different register names: rN for the integer register N and fN
9632 for the floating-point register N. Instead of trying to use these in
9633 alpha.md, we define the symbols $N and $fN to refer to the appropriate
9636 for (i
= 0; i
< 32; ++i
)
9637 fprintf (file
, "$%d <- r%d\n", i
, i
);
9639 for (i
= 0; i
< 32; ++i
)
9640 fprintf (file
, "$f%d <- f%d\n", i
, i
);
9644 /* The .align directive fill unused space with zeroes which does not work
9645 in code sections. We define the macro 'gcc@code@align' which uses nops
9646 instead. Note that it assumes that code sections always have the
9647 biggest possible alignment since . refers to the current offset from
9648 the beginning of the section. */
9650 fputs ("\t.macro gcc@code@align n\n", file
);
9651 fputs ("gcc@n@bytes = 1 << n\n", file
);
9652 fputs ("gcc@here = . % gcc@n@bytes\n", file
);
9653 fputs ("\t.if ne, gcc@here, 0\n", file
);
9654 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", file
);
9655 fputs ("\tbis r31,r31,r31\n", file
);
9656 fputs ("\t.endr\n", file
);
9657 fputs ("\t.endif\n", file
);
9658 fputs ("\t.endm gcc@code@align\n\n", file
);
9660 /* Output extern declarations which should always be visible. */
9661 unicosmk_output_default_externs (file
);
9663 /* Open a dummy section. We always need to be inside a section for the
9664 section-switching code to work correctly.
9665 ??? This should be a module id or something like that. I still have to
9666 figure out what the rules for those are. */
9667 fputs ("\n\t.psect\t$SG00000,data\n", file
);
9670 /* Output text to appear at the end of an assembler file. This includes all
9671 pending extern declarations and DEX expressions. */
9674 unicosmk_file_end ()
9676 fputs ("\t.endp\n\n", asm_out_file
);
9678 /* Output all pending externs. */
9680 unicosmk_output_externs (asm_out_file
);
9682 /* Output dex definitions used for functions whose names conflict with
9685 unicosmk_output_dex (asm_out_file
);
9687 fputs ("\t.end\t", asm_out_file
);
9688 unicosmk_output_module_name (asm_out_file
);
9689 putc ('\n', asm_out_file
);
9692 /* Output the definition of a common variable. */
9695 unicosmk_output_common (file
, name
, size
, align
)
9702 printf ("T3E__: common %s\n", name
);
9705 fputs("\t.endp\n\n\t.psect ", file
);
9706 assemble_name(file
, name
);
9707 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
9708 fprintf(file
, "\t.byte\t0:%d\n", size
);
9710 /* Mark the symbol as defined in this module. */
9711 name_tree
= get_identifier (name
);
9712 TREE_ASM_WRITTEN (name_tree
) = 1;
9715 #define SECTION_PUBLIC SECTION_MACH_DEP
9716 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9717 static int current_section_align
;
9720 unicosmk_section_type_flags (decl
, name
, reloc
)
9723 int reloc ATTRIBUTE_UNUSED
;
9725 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9730 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9732 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
9733 if (align_functions_log
> current_section_align
)
9734 current_section_align
= align_functions_log
;
9736 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
9737 flags
|= SECTION_MAIN
;
9740 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
9742 if (TREE_PUBLIC (decl
))
9743 flags
|= SECTION_PUBLIC
;
9748 /* Generate a section name for decl and associate it with the
9752 unicosmk_unique_section (decl
, reloc
)
9754 int reloc ATTRIBUTE_UNUSED
;
9762 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
9763 name
= default_strip_name_encoding (name
);
9764 len
= strlen (name
);
9766 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9770 /* It is essential that we prefix the section name here because
9771 otherwise the section names generated for constructors and
9772 destructors confuse collect2. */
9774 string
= alloca (len
+ 6);
9775 sprintf (string
, "code@%s", name
);
9776 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9778 else if (TREE_PUBLIC (decl
))
9779 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
9784 string
= alloca (len
+ 6);
9785 sprintf (string
, "data@%s", name
);
9786 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9790 /* Switch to an arbitrary section NAME with attributes as specified
9791 by FLAGS. ALIGN specifies any known alignment requirements for
9792 the section; 0 if the default should be used. */
9795 unicosmk_asm_named_section (name
, flags
)
9801 /* Close the previous section. */
9803 fputs ("\t.endp\n\n", asm_out_file
);
9805 /* Find out what kind of section we are opening. */
9807 if (flags
& SECTION_MAIN
)
9808 fputs ("\t.start\tmain\n", asm_out_file
);
9810 if (flags
& SECTION_CODE
)
9812 else if (flags
& SECTION_PUBLIC
)
9817 if (current_section_align
!= 0)
9818 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
9819 current_section_align
, kind
);
9821 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
9825 unicosmk_insert_attributes (decl
, attr_ptr
)
9827 tree
*attr_ptr ATTRIBUTE_UNUSED
;
9830 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
9831 unicosmk_unique_section (decl
, 0);
9834 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9835 in code sections because .align fill unused space with zeroes. */
9838 unicosmk_output_align (file
, align
)
9842 if (inside_function
)
9843 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
9845 fprintf (file
, "\t.align\t%d\n", align
);
9848 /* Add a case vector to the current function's list of deferred case
9849 vectors. Case vectors have to be put into a separate section because CAM
9850 does not allow data definitions in code sections. */
9853 unicosmk_defer_case_vector (lab
, vec
)
9857 struct machine_function
*machine
= cfun
->machine
;
9859 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
9860 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
9861 machine
->addr_list
);
9864 /* Output a case vector. */
9867 unicosmk_output_addr_vec (file
, vec
)
9871 rtx lab
= XEXP (vec
, 0);
9872 rtx body
= XEXP (vec
, 1);
9873 int vlen
= XVECLEN (body
, 0);
9876 (*targetm
.asm_out
.internal_label
) (file
, "L", CODE_LABEL_NUMBER (lab
));
9878 for (idx
= 0; idx
< vlen
; idx
++)
9880 ASM_OUTPUT_ADDR_VEC_ELT
9881 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
9885 /* Output current function's deferred case vectors. */
9888 unicosmk_output_deferred_case_vectors (file
)
9891 struct machine_function
*machine
= cfun
->machine
;
9894 if (machine
->addr_list
== NULL_RTX
)
9898 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
9899 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
9902 /* Set up the dynamic subprogram information block (DSIB) and update the
9903 frame pointer register ($15) for subroutines which have a frame. If the
9904 subroutine doesn't have a frame, simply increment $15. */
9907 unicosmk_gen_dsib (imaskP
)
9908 unsigned long * imaskP
;
9910 if (alpha_procedure_type
== PT_STACK
)
9912 const char *ssib_name
;
9915 /* Allocate 64 bytes for the DSIB. */
9917 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
9919 emit_insn (gen_blockage ());
9921 /* Save the return address. */
9923 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
9924 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9925 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
9926 (*imaskP
) &= ~(1UL << REG_RA
);
9928 /* Save the old frame pointer. */
9930 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
9931 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9932 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
9933 (*imaskP
) &= ~(1UL << HARD_FRAME_POINTER_REGNUM
);
9935 emit_insn (gen_blockage ());
9937 /* Store the SSIB pointer. */
9939 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
9940 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
9941 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9943 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
9944 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
9945 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
9947 /* Save the CIW index. */
9949 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
9950 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9951 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
9953 emit_insn (gen_blockage ());
9955 /* Set the new frame pointer. */
9957 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
9958 stack_pointer_rtx
, GEN_INT (64))));
9963 /* Increment the frame pointer register to indicate that we do not
9966 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
9967 hard_frame_pointer_rtx
, GEN_INT (1))));
9971 #define SSIB_PREFIX "__SSIB_"
9972 #define SSIB_PREFIX_LEN 7
9974 /* Generate the name of the SSIB section for the current function. */
9977 unicosmk_ssib_name ()
9979 /* This is ok since CAM won't be able to deal with names longer than that
9982 static char name
[256];
9988 x
= DECL_RTL (cfun
->decl
);
9989 if (GET_CODE (x
) != MEM
)
9992 if (GET_CODE (x
) != SYMBOL_REF
)
9994 fnname
= XSTR (x
, 0);
9996 len
= strlen (fnname
);
9997 if (len
+ SSIB_PREFIX_LEN
> 255)
9998 len
= 255 - SSIB_PREFIX_LEN
;
10000 strcpy (name
, SSIB_PREFIX
);
10001 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
10002 name
[len
+ SSIB_PREFIX_LEN
] = 0;
10007 /* Output the static subroutine information block for the current
10011 unicosmk_output_ssib (file
, fnname
)
10013 const char *fnname
;
10019 struct machine_function
*machine
= cfun
->machine
;
10022 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
10023 unicosmk_ssib_name ());
10025 /* Some required stuff and the function name length. */
10027 len
= strlen (fnname
);
10028 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
10031 ??? We don't do that yet. */
10033 fputs ("\t.quad\t0\n", file
);
10035 /* Function address. */
10037 fputs ("\t.quad\t", file
);
10038 assemble_name (file
, fnname
);
10041 fputs ("\t.quad\t0\n", file
);
10042 fputs ("\t.quad\t0\n", file
);
10045 ??? We do it the same way Cray CC does it but this could be
10048 for( i
= 0; i
< len
; i
++ )
10049 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
10050 if( (len
% 8) == 0 )
10051 fputs ("\t.quad\t0\n", file
);
10053 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
10055 /* All call information words used in the function. */
10057 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
10060 #if HOST_BITS_PER_WIDE_INT == 32
10061 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX
"\n",
10062 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
10064 fprintf (file
, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX
"\n", INTVAL (ciw
));
10069 /* Add a call information word (CIW) to the list of the current function's
10070 CIWs and return its index.
10072 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10075 unicosmk_add_call_info_word (x
)
10079 struct machine_function
*machine
= cfun
->machine
;
10081 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
10082 if (machine
->first_ciw
== NULL_RTX
)
10083 machine
->first_ciw
= node
;
10085 XEXP (machine
->last_ciw
, 1) = node
;
10087 machine
->last_ciw
= node
;
10088 ++machine
->ciw_count
;
10090 return GEN_INT (machine
->ciw_count
10091 + strlen (current_function_name
)/8 + 5);
10094 static char unicosmk_section_buf
[100];
10097 unicosmk_text_section ()
10099 static int count
= 0;
10100 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
10102 return unicosmk_section_buf
;
10106 unicosmk_data_section ()
10108 static int count
= 1;
10109 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
10111 return unicosmk_section_buf
;
10114 /* The Cray assembler doesn't accept extern declarations for symbols which
10115 are defined in the same file. We have to keep track of all global
10116 symbols which are referenced and/or defined in a source file and output
10117 extern declarations for those which are referenced but not defined at
10118 the end of file. */
10120 /* List of identifiers for which an extern declaration might have to be
10122 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10124 struct unicosmk_extern_list
10126 struct unicosmk_extern_list
*next
;
10130 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
10132 /* Output extern declarations which are required for every asm file. */
10135 unicosmk_output_default_externs (file
)
10138 static const char *const externs
[] =
10139 { "__T3E_MISMATCH" };
10144 n
= ARRAY_SIZE (externs
);
10146 for (i
= 0; i
< n
; i
++)
10147 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
10150 /* Output extern declarations for global symbols which are have been
10151 referenced but not defined. */
10154 unicosmk_output_externs (file
)
10157 struct unicosmk_extern_list
*p
;
10158 const char *real_name
;
10162 len
= strlen (user_label_prefix
);
10163 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
10165 /* We have to strip the encoding and possibly remove user_label_prefix
10166 from the identifier in order to handle -fleading-underscore and
10167 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10168 real_name
= default_strip_name_encoding (p
->name
);
10169 if (len
&& p
->name
[0] == '*'
10170 && !memcmp (real_name
, user_label_prefix
, len
))
10173 name_tree
= get_identifier (real_name
);
10174 if (! TREE_ASM_WRITTEN (name_tree
))
10176 TREE_ASM_WRITTEN (name_tree
) = 1;
10177 fputs ("\t.extern\t", file
);
10178 assemble_name (file
, p
->name
);
10184 /* Record an extern. */
10187 unicosmk_add_extern (name
)
10190 struct unicosmk_extern_list
*p
;
10192 p
= (struct unicosmk_extern_list
*)
10193 xmalloc (sizeof (struct unicosmk_extern_list
));
10194 p
->next
= unicosmk_extern_head
;
10196 unicosmk_extern_head
= p
;
10199 /* The Cray assembler generates incorrect code if identifiers which
10200 conflict with register names are used as instruction operands. We have
10201 to replace such identifiers with DEX expressions. */
10203 /* Structure to collect identifiers which have been replaced by DEX
10205 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10207 struct unicosmk_dex
{
10208 struct unicosmk_dex
*next
;
10212 /* List of identifiers which have been replaced by DEX expressions. The DEX
10213 number is determined by the position in the list. */
10215 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
10217 /* The number of elements in the DEX list. */
10219 static int unicosmk_dex_count
= 0;
10221 /* Check if NAME must be replaced by a DEX expression. */
10224 unicosmk_special_name (name
)
10227 if (name
[0] == '*')
10230 if (name
[0] == '$')
10233 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
10238 case '1': case '2':
10239 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
10242 return (name
[2] == '\0'
10243 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
10246 return (ISDIGIT (name
[1]) && name
[2] == '\0');
10250 /* Return the DEX number if X must be replaced by a DEX expression and 0
10254 unicosmk_need_dex (x
)
10257 struct unicosmk_dex
*dex
;
10261 if (GET_CODE (x
) != SYMBOL_REF
)
10265 if (! unicosmk_special_name (name
))
10268 i
= unicosmk_dex_count
;
10269 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10271 if (! strcmp (name
, dex
->name
))
10276 dex
= (struct unicosmk_dex
*) xmalloc (sizeof (struct unicosmk_dex
));
10278 dex
->next
= unicosmk_dex_list
;
10279 unicosmk_dex_list
= dex
;
10281 ++unicosmk_dex_count
;
10282 return unicosmk_dex_count
;
10285 /* Output the DEX definitions for this file. */
10288 unicosmk_output_dex (file
)
10291 struct unicosmk_dex
*dex
;
10294 if (unicosmk_dex_list
== NULL
)
10297 fprintf (file
, "\t.dexstart\n");
10299 i
= unicosmk_dex_count
;
10300 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10302 fprintf (file
, "\tDEX (%d) = ", i
);
10303 assemble_name (file
, dex
->name
);
10308 fprintf (file
, "\t.dexend\n");
10314 unicosmk_output_deferred_case_vectors (file
)
10315 FILE *file ATTRIBUTE_UNUSED
;
10319 unicosmk_gen_dsib (imaskP
)
10320 unsigned long * imaskP ATTRIBUTE_UNUSED
;
10324 unicosmk_output_ssib (file
, fnname
)
10325 FILE * file ATTRIBUTE_UNUSED
;
10326 const char * fnname ATTRIBUTE_UNUSED
;
10330 unicosmk_add_call_info_word (x
)
10331 rtx x ATTRIBUTE_UNUSED
;
10337 unicosmk_need_dex (x
)
10338 rtx x ATTRIBUTE_UNUSED
;
10343 #endif /* TARGET_ABI_UNICOSMK */
10345 #include "gt-alpha.h"