1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
47 #include "integrate.h"
50 #include "target-def.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
55 /* Specify which cpu to schedule for. */
57 enum processor_type alpha_cpu
;
58 static const char * const alpha_cpu_name
[] =
63 /* Specify how accurate floating-point traps need to be. */
65 enum alpha_trap_precision alpha_tp
;
67 /* Specify the floating-point rounding mode. */
69 enum alpha_fp_rounding_mode alpha_fprm
;
71 /* Specify which things cause traps. */
73 enum alpha_fp_trap_mode alpha_fptm
;
75 /* Specify bit size of immediate TLS offsets. */
77 int alpha_tls_size
= 32;
79 /* Strings decoded into the above options. */
81 const char *alpha_cpu_string
; /* -mcpu= */
82 const char *alpha_tune_string
; /* -mtune= */
83 const char *alpha_tp_string
; /* -mtrap-precision=[p|s|i] */
84 const char *alpha_fprm_string
; /* -mfp-rounding-mode=[n|m|c|d] */
85 const char *alpha_fptm_string
; /* -mfp-trap-mode=[n|u|su|sui] */
86 const char *alpha_mlat_string
; /* -mmemory-latency= */
87 const char *alpha_tls_size_string
; /* -mtls-size=[16|32|64] */
89 /* Save information from a "cmpxx" operation until the branch or scc is
92 struct alpha_compare alpha_compare
;
94 /* Nonzero if inside of a function, because the Alpha asm can't
95 handle .files inside of functions. */
97 static int inside_function
= FALSE
;
99 /* The number of cycles of latency we should assume on memory reads. */
101 int alpha_memory_latency
= 3;
103 /* Whether the function needs the GP. */
105 static int alpha_function_needs_gp
;
107 /* The alias set for prologue/epilogue register save/restore. */
109 static GTY(()) int alpha_sr_alias_set
;
111 /* The assembler name of the current function. */
113 static const char *alpha_fnname
;
115 /* The next explicit relocation sequence number. */
116 extern GTY(()) int alpha_next_sequence_number
;
117 int alpha_next_sequence_number
= 1;
119 /* The literal and gpdisp sequence numbers for this insn, as printed
120 by %# and %* respectively. */
121 extern GTY(()) int alpha_this_literal_sequence_number
;
122 extern GTY(()) int alpha_this_gpdisp_sequence_number
;
123 int alpha_this_literal_sequence_number
;
124 int alpha_this_gpdisp_sequence_number
;
126 /* Costs of various operations on the different architectures. */
128 struct alpha_rtx_cost_data
130 unsigned char fp_add
;
131 unsigned char fp_mult
;
132 unsigned char fp_div_sf
;
133 unsigned char fp_div_df
;
134 unsigned char int_mult_si
;
135 unsigned char int_mult_di
;
136 unsigned char int_shift
;
137 unsigned char int_cmov
;
140 static struct alpha_rtx_cost_data
const alpha_rtx_cost_data
[PROCESSOR_MAX
] =
143 COSTS_N_INSNS (6), /* fp_add */
144 COSTS_N_INSNS (6), /* fp_mult */
145 COSTS_N_INSNS (34), /* fp_div_sf */
146 COSTS_N_INSNS (63), /* fp_div_df */
147 COSTS_N_INSNS (23), /* int_mult_si */
148 COSTS_N_INSNS (23), /* int_mult_di */
149 COSTS_N_INSNS (2), /* int_shift */
150 COSTS_N_INSNS (2), /* int_cmov */
153 COSTS_N_INSNS (4), /* fp_add */
154 COSTS_N_INSNS (4), /* fp_mult */
155 COSTS_N_INSNS (15), /* fp_div_sf */
156 COSTS_N_INSNS (22), /* fp_div_df */
157 COSTS_N_INSNS (8), /* int_mult_si */
158 COSTS_N_INSNS (12), /* int_mult_di */
159 COSTS_N_INSNS (1) + 1, /* int_shift */
160 COSTS_N_INSNS (1), /* int_cmov */
163 COSTS_N_INSNS (4), /* fp_add */
164 COSTS_N_INSNS (4), /* fp_mult */
165 COSTS_N_INSNS (12), /* fp_div_sf */
166 COSTS_N_INSNS (15), /* fp_div_df */
167 COSTS_N_INSNS (7), /* int_mult_si */
168 COSTS_N_INSNS (7), /* int_mult_di */
169 COSTS_N_INSNS (1), /* int_shift */
170 COSTS_N_INSNS (2), /* int_cmov */
174 /* Declarations of static functions. */
175 static bool alpha_function_ok_for_sibcall
176 PARAMS ((tree
, tree
));
177 static int tls_symbolic_operand_1
178 PARAMS ((rtx
, enum machine_mode
, int, int));
179 static enum tls_model tls_symbolic_operand_type
181 static bool decl_has_samegp
183 static bool alpha_in_small_data_p
185 static rtx get_tls_get_addr
187 static int some_small_symbolic_operand_1
188 PARAMS ((rtx
*, void *));
189 static int split_small_symbolic_operand_1
190 PARAMS ((rtx
*, void *));
191 static bool alpha_cannot_copy_insn_p
193 static bool alpha_rtx_costs
194 PARAMS ((rtx
, int, int, int *));
195 static void alpha_set_memflags_1
196 PARAMS ((rtx
, int, int, int));
197 static rtx alpha_emit_set_const_1
198 PARAMS ((rtx
, enum machine_mode
, HOST_WIDE_INT
, int));
199 static void alpha_expand_unaligned_load_words
200 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
201 static void alpha_expand_unaligned_store_words
202 PARAMS ((rtx
*out_regs
, rtx smem
, HOST_WIDE_INT words
, HOST_WIDE_INT ofs
));
203 static void alpha_init_builtins
205 static rtx alpha_expand_builtin
206 PARAMS ((tree
, rtx
, rtx
, enum machine_mode
, int));
207 static void alpha_sa_mask
208 PARAMS ((unsigned long *imaskP
, unsigned long *fmaskP
));
209 static int find_lo_sum_using_gp
210 PARAMS ((rtx
*, void *));
211 static int alpha_does_function_need_gp
213 static int alpha_ra_ever_killed
215 static const char *get_trap_mode_suffix
217 static const char *get_round_mode_suffix
219 static const char *get_some_local_dynamic_name
221 static int get_some_local_dynamic_name_1
222 PARAMS ((rtx
*, void *));
223 static rtx set_frame_related_p
225 static const char *alpha_lookup_xfloating_lib_func
226 PARAMS ((enum rtx_code
));
227 static int alpha_compute_xfloating_mode_arg
228 PARAMS ((enum rtx_code
, enum alpha_fp_rounding_mode
));
229 static void alpha_emit_xfloating_libcall
230 PARAMS ((const char *, rtx
, rtx
[], int, rtx
));
231 static rtx alpha_emit_xfloating_compare
232 PARAMS ((enum rtx_code
, rtx
, rtx
));
233 static void alpha_output_function_end_prologue
235 static int alpha_adjust_cost
236 PARAMS ((rtx
, rtx
, rtx
, int));
237 static int alpha_issue_rate
239 static int alpha_use_dfa_pipeline_interface
241 static int alpha_multipass_dfa_lookahead
243 static void alpha_reorg
246 #ifdef OBJECT_FORMAT_ELF
247 static void alpha_elf_select_rtx_section
248 PARAMS ((enum machine_mode
, rtx
, unsigned HOST_WIDE_INT
));
251 #if TARGET_ABI_OPEN_VMS
252 static bool alpha_linkage_symbol_p
253 PARAMS ((const char *symname
));
254 static int alpha_write_one_linkage
255 PARAMS ((splay_tree_node
, void *));
256 static void alpha_write_linkage
257 PARAMS ((FILE *, const char *, tree
));
261 static void alpha_output_mi_thunk_osf
262 PARAMS ((FILE *, tree
, HOST_WIDE_INT
, HOST_WIDE_INT
, tree
));
265 static struct machine_function
* alpha_init_machine_status
268 static void unicosmk_output_deferred_case_vectors
PARAMS ((FILE *));
269 static void unicosmk_gen_dsib
PARAMS ((unsigned long *imaskP
));
270 static void unicosmk_output_ssib
PARAMS ((FILE *, const char *));
271 static int unicosmk_need_dex
PARAMS ((rtx
));
273 /* Get the number of args of a function in one of two ways. */
274 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
275 #define NUM_ARGS current_function_args_info.num_args
277 #define NUM_ARGS current_function_args_info
283 /* Initialize the GCC target structure. */
284 #if TARGET_ABI_OPEN_VMS
285 const struct attribute_spec vms_attribute_table
[];
286 static unsigned int vms_section_type_flags
PARAMS ((tree
, const char *, int));
287 static void vms_asm_named_section
PARAMS ((const char *, unsigned int));
288 static void vms_asm_out_constructor
PARAMS ((rtx
, int));
289 static void vms_asm_out_destructor
PARAMS ((rtx
, int));
290 # undef TARGET_ATTRIBUTE_TABLE
291 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
292 # undef TARGET_SECTION_TYPE_FLAGS
293 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
296 #undef TARGET_IN_SMALL_DATA_P
297 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
299 #if TARGET_ABI_UNICOSMK
300 static void unicosmk_asm_named_section
PARAMS ((const char *, unsigned int));
301 static void unicosmk_insert_attributes
PARAMS ((tree
, tree
*));
302 static unsigned int unicosmk_section_type_flags
PARAMS ((tree
, const char *,
304 static void unicosmk_unique_section
PARAMS ((tree
, int));
305 # undef TARGET_INSERT_ATTRIBUTES
306 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
307 # undef TARGET_SECTION_TYPE_FLAGS
308 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
309 # undef TARGET_ASM_UNIQUE_SECTION
310 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
311 # undef TARGET_ASM_GLOBALIZE_LABEL
312 # define TARGET_ASM_GLOBALIZE_LABEL hook_FILEptr_constcharptr_void
315 #undef TARGET_ASM_ALIGNED_HI_OP
316 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
317 #undef TARGET_ASM_ALIGNED_DI_OP
318 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
320 /* Default unaligned ops are provided for ELF systems. To get unaligned
321 data for non-ELF systems, we have to turn off auto alignment. */
322 #ifndef OBJECT_FORMAT_ELF
323 #undef TARGET_ASM_UNALIGNED_HI_OP
324 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
325 #undef TARGET_ASM_UNALIGNED_SI_OP
326 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
327 #undef TARGET_ASM_UNALIGNED_DI_OP
328 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
331 #ifdef OBJECT_FORMAT_ELF
332 #undef TARGET_ASM_SELECT_RTX_SECTION
333 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
336 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
337 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
339 #undef TARGET_SCHED_ADJUST_COST
340 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
341 #undef TARGET_SCHED_ISSUE_RATE
342 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
343 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
344 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
345 alpha_use_dfa_pipeline_interface
346 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
347 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
348 alpha_multipass_dfa_lookahead
350 #undef TARGET_HAVE_TLS
351 #define TARGET_HAVE_TLS HAVE_AS_TLS
353 #undef TARGET_INIT_BUILTINS
354 #define TARGET_INIT_BUILTINS alpha_init_builtins
355 #undef TARGET_EXPAND_BUILTIN
356 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
358 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
359 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
360 #undef TARGET_CANNOT_COPY_INSN_P
361 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
364 #undef TARGET_ASM_OUTPUT_MI_THUNK
365 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
366 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
367 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
370 #undef TARGET_RTX_COSTS
371 #define TARGET_RTX_COSTS alpha_rtx_costs
372 #undef TARGET_ADDRESS_COST
373 #define TARGET_ADDRESS_COST hook_int_rtx_0
375 #undef TARGET_MACHINE_DEPENDENT_REORG
376 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
378 struct gcc_target targetm
= TARGET_INITIALIZER
;
380 /* Parse target option strings. */
386 static const struct cpu_table
{
387 const char *const name
;
388 const enum processor_type processor
;
391 #define EV5_MASK (MASK_CPU_EV5)
392 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
393 { "ev4", PROCESSOR_EV4
, 0 },
394 { "ev45", PROCESSOR_EV4
, 0 },
395 { "21064", PROCESSOR_EV4
, 0 },
396 { "ev5", PROCESSOR_EV5
, EV5_MASK
},
397 { "21164", PROCESSOR_EV5
, EV5_MASK
},
398 { "ev56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
399 { "21164a", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
},
400 { "pca56", PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
401 { "21164PC",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
402 { "21164pc",PROCESSOR_EV5
, EV5_MASK
|MASK_BWX
|MASK_MAX
},
403 { "ev6", PROCESSOR_EV6
, EV6_MASK
},
404 { "21264", PROCESSOR_EV6
, EV6_MASK
},
405 { "ev67", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
406 { "21264a", PROCESSOR_EV6
, EV6_MASK
|MASK_CIX
},
410 /* Unicos/Mk doesn't have shared libraries. */
411 if (TARGET_ABI_UNICOSMK
&& flag_pic
)
413 warning ("-f%s ignored for Unicos/Mk (not supported)",
414 (flag_pic
> 1) ? "PIC" : "pic");
418 /* On Unicos/Mk, the native compiler consistenly generates /d suffices for
419 floating-point instructions. Make that the default for this target. */
420 if (TARGET_ABI_UNICOSMK
)
421 alpha_fprm
= ALPHA_FPRM_DYN
;
423 alpha_fprm
= ALPHA_FPRM_NORM
;
425 alpha_tp
= ALPHA_TP_PROG
;
426 alpha_fptm
= ALPHA_FPTM_N
;
428 /* We cannot use su and sui qualifiers for conversion instructions on
429 Unicos/Mk. I'm not sure if this is due to assembler or hardware
430 limitations. Right now, we issue a warning if -mieee is specified
431 and then ignore it; eventually, we should either get it right or
432 disable the option altogether. */
436 if (TARGET_ABI_UNICOSMK
)
437 warning ("-mieee not supported on Unicos/Mk");
440 alpha_tp
= ALPHA_TP_INSN
;
441 alpha_fptm
= ALPHA_FPTM_SU
;
445 if (TARGET_IEEE_WITH_INEXACT
)
447 if (TARGET_ABI_UNICOSMK
)
448 warning ("-mieee-with-inexact not supported on Unicos/Mk");
451 alpha_tp
= ALPHA_TP_INSN
;
452 alpha_fptm
= ALPHA_FPTM_SUI
;
458 if (! strcmp (alpha_tp_string
, "p"))
459 alpha_tp
= ALPHA_TP_PROG
;
460 else if (! strcmp (alpha_tp_string
, "f"))
461 alpha_tp
= ALPHA_TP_FUNC
;
462 else if (! strcmp (alpha_tp_string
, "i"))
463 alpha_tp
= ALPHA_TP_INSN
;
465 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string
);
468 if (alpha_fprm_string
)
470 if (! strcmp (alpha_fprm_string
, "n"))
471 alpha_fprm
= ALPHA_FPRM_NORM
;
472 else if (! strcmp (alpha_fprm_string
, "m"))
473 alpha_fprm
= ALPHA_FPRM_MINF
;
474 else if (! strcmp (alpha_fprm_string
, "c"))
475 alpha_fprm
= ALPHA_FPRM_CHOP
;
476 else if (! strcmp (alpha_fprm_string
,"d"))
477 alpha_fprm
= ALPHA_FPRM_DYN
;
479 error ("bad value `%s' for -mfp-rounding-mode switch",
483 if (alpha_fptm_string
)
485 if (strcmp (alpha_fptm_string
, "n") == 0)
486 alpha_fptm
= ALPHA_FPTM_N
;
487 else if (strcmp (alpha_fptm_string
, "u") == 0)
488 alpha_fptm
= ALPHA_FPTM_U
;
489 else if (strcmp (alpha_fptm_string
, "su") == 0)
490 alpha_fptm
= ALPHA_FPTM_SU
;
491 else if (strcmp (alpha_fptm_string
, "sui") == 0)
492 alpha_fptm
= ALPHA_FPTM_SUI
;
494 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string
);
497 if (alpha_tls_size_string
)
499 if (strcmp (alpha_tls_size_string
, "16") == 0)
501 else if (strcmp (alpha_tls_size_string
, "32") == 0)
503 else if (strcmp (alpha_tls_size_string
, "64") == 0)
506 error ("bad value `%s' for -mtls-size switch", alpha_tls_size_string
);
510 = TARGET_CPU_DEFAULT
& MASK_CPU_EV6
? PROCESSOR_EV6
511 : (TARGET_CPU_DEFAULT
& MASK_CPU_EV5
? PROCESSOR_EV5
: PROCESSOR_EV4
);
513 if (alpha_cpu_string
)
515 for (i
= 0; cpu_table
[i
].name
; i
++)
516 if (! strcmp (alpha_cpu_string
, cpu_table
[i
].name
))
518 alpha_cpu
= cpu_table
[i
].processor
;
519 target_flags
&= ~ (MASK_BWX
| MASK_MAX
| MASK_FIX
| MASK_CIX
520 | MASK_CPU_EV5
| MASK_CPU_EV6
);
521 target_flags
|= cpu_table
[i
].flags
;
524 if (! cpu_table
[i
].name
)
525 error ("bad value `%s' for -mcpu switch", alpha_cpu_string
);
528 if (alpha_tune_string
)
530 for (i
= 0; cpu_table
[i
].name
; i
++)
531 if (! strcmp (alpha_tune_string
, cpu_table
[i
].name
))
533 alpha_cpu
= cpu_table
[i
].processor
;
536 if (! cpu_table
[i
].name
)
537 error ("bad value `%s' for -mcpu switch", alpha_tune_string
);
540 /* Do some sanity checks on the above options. */
542 if (TARGET_ABI_UNICOSMK
&& alpha_fptm
!= ALPHA_FPTM_N
)
544 warning ("trap mode not supported on Unicos/Mk");
545 alpha_fptm
= ALPHA_FPTM_N
;
548 if ((alpha_fptm
== ALPHA_FPTM_SU
|| alpha_fptm
== ALPHA_FPTM_SUI
)
549 && alpha_tp
!= ALPHA_TP_INSN
&& ! TARGET_CPU_EV6
)
551 warning ("fp software completion requires -mtrap-precision=i");
552 alpha_tp
= ALPHA_TP_INSN
;
557 /* Except for EV6 pass 1 (not released), we always have precise
558 arithmetic traps. Which means we can do software completion
559 without minding trap shadows. */
560 alpha_tp
= ALPHA_TP_PROG
;
563 if (TARGET_FLOAT_VAX
)
565 if (alpha_fprm
== ALPHA_FPRM_MINF
|| alpha_fprm
== ALPHA_FPRM_DYN
)
567 warning ("rounding mode not supported for VAX floats");
568 alpha_fprm
= ALPHA_FPRM_NORM
;
570 if (alpha_fptm
== ALPHA_FPTM_SUI
)
572 warning ("trap mode not supported for VAX floats");
573 alpha_fptm
= ALPHA_FPTM_SU
;
581 if (!alpha_mlat_string
)
582 alpha_mlat_string
= "L1";
584 if (ISDIGIT ((unsigned char)alpha_mlat_string
[0])
585 && (lat
= strtol (alpha_mlat_string
, &end
, 10), *end
== '\0'))
587 else if ((alpha_mlat_string
[0] == 'L' || alpha_mlat_string
[0] == 'l')
588 && ISDIGIT ((unsigned char)alpha_mlat_string
[1])
589 && alpha_mlat_string
[2] == '\0')
591 static int const cache_latency
[][4] =
593 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
594 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
595 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
598 lat
= alpha_mlat_string
[1] - '0';
599 if (lat
<= 0 || lat
> 3 || cache_latency
[alpha_cpu
][lat
-1] == -1)
601 warning ("L%d cache latency unknown for %s",
602 lat
, alpha_cpu_name
[alpha_cpu
]);
606 lat
= cache_latency
[alpha_cpu
][lat
-1];
608 else if (! strcmp (alpha_mlat_string
, "main"))
610 /* Most current memories have about 370ns latency. This is
611 a reasonable guess for a fast cpu. */
616 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string
);
620 alpha_memory_latency
= lat
;
623 /* Default the definition of "small data" to 8 bytes. */
627 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
629 target_flags
|= MASK_SMALL_DATA
;
630 else if (flag_pic
== 2)
631 target_flags
&= ~MASK_SMALL_DATA
;
633 /* Align labels and loops for optimal branching. */
634 /* ??? Kludge these by not doing anything if we don't optimize and also if
635 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
636 if (optimize
> 0 && write_symbols
!= SDB_DEBUG
)
638 if (align_loops
<= 0)
640 if (align_jumps
<= 0)
643 if (align_functions
<= 0)
644 align_functions
= 16;
646 /* Acquire a unique set number for our register saves and restores. */
647 alpha_sr_alias_set
= new_alias_set ();
649 /* Register variables and functions with the garbage collector. */
651 /* Set up function hooks. */
652 init_machine_status
= alpha_init_machine_status
;
654 /* Tell the compiler when we're using VAX floating point. */
655 if (TARGET_FLOAT_VAX
)
657 real_format_for_mode
[SFmode
- QFmode
] = &vax_f_format
;
658 real_format_for_mode
[DFmode
- QFmode
] = &vax_g_format
;
659 real_format_for_mode
[TFmode
- QFmode
] = NULL
;
663 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
671 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
673 if ((value
& 0xff) != 0 && (value
& 0xff) != 0xff)
679 /* Returns 1 if OP is either the constant zero or a register. If a
680 register, it must be in the proper mode unless MODE is VOIDmode. */
683 reg_or_0_operand (op
, mode
)
685 enum machine_mode mode
;
687 return op
== CONST0_RTX (mode
) || register_operand (op
, mode
);
690 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
694 reg_or_6bit_operand (op
, mode
)
696 enum machine_mode mode
;
698 return ((GET_CODE (op
) == CONST_INT
699 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64)
700 || register_operand (op
, mode
));
704 /* Return 1 if OP is an 8-bit constant or any register. */
707 reg_or_8bit_operand (op
, mode
)
709 enum machine_mode mode
;
711 return ((GET_CODE (op
) == CONST_INT
712 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100)
713 || register_operand (op
, mode
));
716 /* Return 1 if OP is a constant or any register. */
719 reg_or_const_int_operand (op
, mode
)
721 enum machine_mode mode
;
723 return GET_CODE (op
) == CONST_INT
|| register_operand (op
, mode
);
726 /* Return 1 if OP is an 8-bit constant. */
729 cint8_operand (op
, mode
)
731 enum machine_mode mode ATTRIBUTE_UNUSED
;
733 return ((GET_CODE (op
) == CONST_INT
734 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100));
737 /* Return 1 if the operand is a valid second operand to an add insn. */
740 add_operand (op
, mode
)
742 enum machine_mode mode
;
744 if (GET_CODE (op
) == CONST_INT
)
745 /* Constraints I, J, O and P are covered by K. */
746 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'K')
747 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'L'));
749 return register_operand (op
, mode
);
752 /* Return 1 if the operand is a valid second operand to a sign-extending
756 sext_add_operand (op
, mode
)
758 enum machine_mode mode
;
760 if (GET_CODE (op
) == CONST_INT
)
761 return (CONST_OK_FOR_LETTER_P (INTVAL (op
), 'I')
762 || CONST_OK_FOR_LETTER_P (INTVAL (op
), 'O'));
764 return reg_not_elim_operand (op
, mode
);
767 /* Return 1 if OP is the constant 4 or 8. */
770 const48_operand (op
, mode
)
772 enum machine_mode mode ATTRIBUTE_UNUSED
;
774 return (GET_CODE (op
) == CONST_INT
775 && (INTVAL (op
) == 4 || INTVAL (op
) == 8));
778 /* Return 1 if OP is a valid first operand to an AND insn. */
781 and_operand (op
, mode
)
783 enum machine_mode mode
;
785 if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == VOIDmode
)
786 return (zap_mask (CONST_DOUBLE_LOW (op
))
787 && zap_mask (CONST_DOUBLE_HIGH (op
)));
789 if (GET_CODE (op
) == CONST_INT
)
790 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
791 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100
792 || zap_mask (INTVAL (op
)));
794 return register_operand (op
, mode
);
797 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
800 or_operand (op
, mode
)
802 enum machine_mode mode
;
804 if (GET_CODE (op
) == CONST_INT
)
805 return ((unsigned HOST_WIDE_INT
) INTVAL (op
) < 0x100
806 || (unsigned HOST_WIDE_INT
) ~ INTVAL (op
) < 0x100);
808 return register_operand (op
, mode
);
811 /* Return 1 if OP is a constant that is the width, in bits, of an integral
812 mode smaller than DImode. */
815 mode_width_operand (op
, mode
)
817 enum machine_mode mode ATTRIBUTE_UNUSED
;
819 return (GET_CODE (op
) == CONST_INT
820 && (INTVAL (op
) == 8 || INTVAL (op
) == 16
821 || INTVAL (op
) == 32 || INTVAL (op
) == 64));
824 /* Return 1 if OP is a constant that is the width of an integral machine mode
825 smaller than an integer. */
828 mode_mask_operand (op
, mode
)
830 enum machine_mode mode ATTRIBUTE_UNUSED
;
832 if (GET_CODE (op
) == CONST_INT
)
834 HOST_WIDE_INT value
= INTVAL (op
);
840 if (value
== 0xffffffff)
845 else if (HOST_BITS_PER_WIDE_INT
== 32 && GET_CODE (op
) == CONST_DOUBLE
)
847 if (CONST_DOUBLE_LOW (op
) == 0xffffffff && CONST_DOUBLE_HIGH (op
) == 0)
854 /* Return 1 if OP is a multiple of 8 less than 64. */
857 mul8_operand (op
, mode
)
859 enum machine_mode mode ATTRIBUTE_UNUSED
;
861 return (GET_CODE (op
) == CONST_INT
862 && (unsigned HOST_WIDE_INT
) INTVAL (op
) < 64
863 && (INTVAL (op
) & 7) == 0);
866 /* Return 1 if OP is the zero constant for MODE. */
869 const0_operand (op
, mode
)
871 enum machine_mode mode
;
873 return op
== CONST0_RTX (mode
);
876 /* Return 1 if OP is a hard floating-point register. */
879 hard_fp_register_operand (op
, mode
)
881 enum machine_mode mode
;
883 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
886 if (GET_CODE (op
) == SUBREG
)
887 op
= SUBREG_REG (op
);
888 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == FLOAT_REGS
;
891 /* Return 1 if OP is a hard general register. */
894 hard_int_register_operand (op
, mode
)
896 enum machine_mode mode
;
898 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
901 if (GET_CODE (op
) == SUBREG
)
902 op
= SUBREG_REG (op
);
903 return GET_CODE (op
) == REG
&& REGNO_REG_CLASS (REGNO (op
)) == GENERAL_REGS
;
906 /* Return 1 if OP is a register or a constant integer. */
910 reg_or_cint_operand (op
, mode
)
912 enum machine_mode mode
;
914 return (GET_CODE (op
) == CONST_INT
915 || register_operand (op
, mode
));
918 /* Return 1 if OP is something that can be reloaded into a register;
919 if it is a MEM, it need not be valid. */
922 some_operand (op
, mode
)
924 enum machine_mode mode
;
926 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
929 switch (GET_CODE (op
))
943 return some_operand (SUBREG_REG (op
), VOIDmode
);
952 /* Likewise, but don't accept constants. */
955 some_ni_operand (op
, mode
)
957 enum machine_mode mode
;
959 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
962 if (GET_CODE (op
) == SUBREG
)
963 op
= SUBREG_REG (op
);
965 return (GET_CODE (op
) == REG
|| GET_CODE (op
) == MEM
);
968 /* Return 1 if OP is a valid operand for the source of a move insn. */
971 input_operand (op
, mode
)
973 enum machine_mode mode
;
975 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
978 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& GET_MODE (op
) != mode
)
981 switch (GET_CODE (op
))
986 if (TARGET_EXPLICIT_RELOCS
)
988 /* We don't split symbolic operands into something unintelligable
989 until after reload, but we do not wish non-small, non-global
990 symbolic operands to be reconstructed from their high/lo_sum
992 return (small_symbolic_operand (op
, mode
)
993 || global_symbolic_operand (op
, mode
)
994 || gotdtp_symbolic_operand (op
, mode
)
995 || gottp_symbolic_operand (op
, mode
));
998 /* This handles both the Windows/NT and OSF cases. */
999 return mode
== ptr_mode
|| mode
== DImode
;
1002 return (TARGET_EXPLICIT_RELOCS
1003 && local_symbolic_operand (XEXP (op
, 0), mode
));
1010 if (register_operand (op
, mode
))
1012 /* ... fall through ... */
1014 return ((TARGET_BWX
|| (mode
!= HImode
&& mode
!= QImode
))
1015 && general_operand (op
, mode
));
1019 return op
== CONST0_RTX (mode
);
1022 return mode
== QImode
|| mode
== HImode
|| add_operand (op
, mode
);
1024 case CONSTANT_P_RTX
:
1034 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
1035 file, and in the same section as the current function. */
1038 samegp_function_operand (op
, mode
)
1040 enum machine_mode mode ATTRIBUTE_UNUSED
;
1042 if (GET_CODE (op
) != SYMBOL_REF
)
1045 /* Easy test for recursion. */
1046 if (op
== XEXP (DECL_RTL (current_function_decl
), 0))
1049 /* Functions that are not local can be overridden, and thus may
1050 not share the same gp. */
1051 if (! SYMBOL_REF_LOCAL_P (op
))
1054 /* If -msmall-data is in effect, assume that there is only one GP
1055 for the module, and so any local symbol has this property. We
1056 need explicit relocations to be able to enforce this for symbols
1057 not defined in this unit of translation, however. */
1058 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
1061 /* Functions that are not external are defined in this UoT,
1062 and thus must share the same gp. */
1063 return ! SYMBOL_REF_EXTERNAL_P (op
);
1066 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
1069 direct_call_operand (op
, mode
)
1071 enum machine_mode mode
;
1073 tree op_decl
, cfun_sec
, op_sec
;
1075 /* Must share the same GP. */
1076 if (!samegp_function_operand (op
, mode
))
1079 /* If profiling is implemented via linker tricks, we can't jump
1080 to the nogp alternate entry point. Note that current_function_profile
1081 would not be correct, since that doesn't indicate if the target
1082 function uses profiling. */
1083 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
1084 but is approximately correct for the OSF ABIs. Don't know
1085 what to do for VMS, NT, or UMK. */
1086 if (!TARGET_PROFILING_NEEDS_GP
&& profile_flag
)
1089 /* Must be a function. In some cases folks create thunks in static
1090 data structures and then make calls to them. If we allow the
1091 direct call, we'll get an error from the linker about !samegp reloc
1092 against a symbol without a .prologue directive. */
1093 if (!SYMBOL_REF_FUNCTION_P (op
))
1096 /* Must be "near" so that the branch is assumed to reach. With
1097 -msmall-text, this is assumed true of all local symbols. Since
1098 we've already checked samegp, locality is already assured. */
1099 if (TARGET_SMALL_TEXT
)
1102 /* Otherwise, a decl is "near" if it is defined in the same section. */
1103 if (flag_function_sections
)
1106 op_decl
= SYMBOL_REF_DECL (op
);
1107 if (DECL_ONE_ONLY (current_function_decl
)
1108 || (op_decl
&& DECL_ONE_ONLY (op_decl
)))
1111 cfun_sec
= DECL_SECTION_NAME (current_function_decl
);
1112 op_sec
= op_decl
? DECL_SECTION_NAME (op_decl
) : NULL
;
1113 return ((!cfun_sec
&& !op_sec
)
1114 || (cfun_sec
&& op_sec
1115 && strcmp (TREE_STRING_POINTER (cfun_sec
),
1116 TREE_STRING_POINTER (op_sec
)) == 0));
1119 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
1120 a (non-tls) variable known to be defined in this file. */
1123 local_symbolic_operand (op
, mode
)
1125 enum machine_mode mode
;
1127 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1130 if (GET_CODE (op
) == LABEL_REF
)
1133 if (GET_CODE (op
) == CONST
1134 && GET_CODE (XEXP (op
, 0)) == PLUS
1135 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1136 op
= XEXP (XEXP (op
, 0), 0);
1138 if (GET_CODE (op
) != SYMBOL_REF
)
1141 return SYMBOL_REF_LOCAL_P (op
) && !SYMBOL_REF_TLS_MODEL (op
);
1144 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1145 known to be defined in this file in the small data area. */
1148 small_symbolic_operand (op
, mode
)
1150 enum machine_mode mode ATTRIBUTE_UNUSED
;
1152 if (! TARGET_SMALL_DATA
)
1155 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1158 if (GET_CODE (op
) == CONST
1159 && GET_CODE (XEXP (op
, 0)) == PLUS
1160 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1161 op
= XEXP (XEXP (op
, 0), 0);
1163 if (GET_CODE (op
) != SYMBOL_REF
)
1166 /* ??? There's no encode_section_info equivalent for the rtl
1167 constant pool, so SYMBOL_FLAG_SMALL never gets set. */
1168 if (CONSTANT_POOL_ADDRESS_P (op
))
1169 return GET_MODE_SIZE (get_pool_mode (op
)) <= (unsigned) g_switch_value
;
1171 return (SYMBOL_REF_LOCAL_P (op
)
1172 && SYMBOL_REF_SMALL_P (op
)
1173 && SYMBOL_REF_TLS_MODEL (op
) == 0);
1176 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
1177 not known (or known not) to be defined in this file. */
1180 global_symbolic_operand (op
, mode
)
1182 enum machine_mode mode
;
1184 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1187 if (GET_CODE (op
) == CONST
1188 && GET_CODE (XEXP (op
, 0)) == PLUS
1189 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST_INT
)
1190 op
= XEXP (XEXP (op
, 0), 0);
1192 if (GET_CODE (op
) != SYMBOL_REF
)
1195 return !SYMBOL_REF_LOCAL_P (op
) && !SYMBOL_REF_TLS_MODEL (op
);
1198 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
1201 call_operand (op
, mode
)
1203 enum machine_mode mode
;
1208 if (GET_CODE (op
) == REG
)
1212 /* Disallow virtual registers to cope with pathalogical test cases
1213 such as compile/930117-1.c in which the virtual reg decomposes
1214 to the frame pointer. Which is a hard reg that is not $27. */
1215 return (REGNO (op
) == 27 || REGNO (op
) > LAST_VIRTUAL_REGISTER
);
1220 if (TARGET_ABI_UNICOSMK
)
1222 if (GET_CODE (op
) == SYMBOL_REF
)
1228 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
1229 possibly with an offset. */
1232 symbolic_operand (op
, mode
)
1234 enum machine_mode mode
;
1236 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1238 if (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == LABEL_REF
)
1240 if (GET_CODE (op
) == CONST
1241 && GET_CODE (XEXP (op
,0)) == PLUS
1242 && GET_CODE (XEXP (XEXP (op
,0), 0)) == SYMBOL_REF
1243 && GET_CODE (XEXP (XEXP (op
,0), 1)) == CONST_INT
)
1248 /* Return true if OP is valid for a particular TLS relocation. */
1251 tls_symbolic_operand_1 (op
, mode
, size
, unspec
)
1253 enum machine_mode mode
;
1256 if (mode
!= VOIDmode
&& GET_MODE (op
) != VOIDmode
&& mode
!= GET_MODE (op
))
1259 if (GET_CODE (op
) != CONST
)
1263 if (GET_CODE (op
) != UNSPEC
|| XINT (op
, 1) != unspec
)
1265 op
= XVECEXP (op
, 0, 0);
1267 if (GET_CODE (op
) != SYMBOL_REF
)
1270 if (SYMBOL_REF_LOCAL_P (op
))
1272 if (alpha_tls_size
> size
)
1281 switch (SYMBOL_REF_TLS_MODEL (op
))
1283 case TLS_MODEL_LOCAL_DYNAMIC
:
1284 return unspec
== UNSPEC_DTPREL
;
1285 case TLS_MODEL_INITIAL_EXEC
:
1286 return unspec
== UNSPEC_TPREL
&& size
== 64;
1287 case TLS_MODEL_LOCAL_EXEC
:
1288 return unspec
== UNSPEC_TPREL
;
1294 /* Return true if OP is valid for 16-bit DTP relative relocations. */
1297 dtp16_symbolic_operand (op
, mode
)
1299 enum machine_mode mode
;
1301 return tls_symbolic_operand_1 (op
, mode
, 16, UNSPEC_DTPREL
);
1304 /* Return true if OP is valid for 32-bit DTP relative relocations. */
1307 dtp32_symbolic_operand (op
, mode
)
1309 enum machine_mode mode
;
1311 return tls_symbolic_operand_1 (op
, mode
, 32, UNSPEC_DTPREL
);
1314 /* Return true if OP is valid for 64-bit DTP relative relocations. */
1317 gotdtp_symbolic_operand (op
, mode
)
1319 enum machine_mode mode
;
1321 return tls_symbolic_operand_1 (op
, mode
, 64, UNSPEC_DTPREL
);
1324 /* Return true if OP is valid for 16-bit TP relative relocations. */
1327 tp16_symbolic_operand (op
, mode
)
1329 enum machine_mode mode
;
1331 return tls_symbolic_operand_1 (op
, mode
, 16, UNSPEC_TPREL
);
1334 /* Return true if OP is valid for 32-bit TP relative relocations. */
1337 tp32_symbolic_operand (op
, mode
)
1339 enum machine_mode mode
;
1341 return tls_symbolic_operand_1 (op
, mode
, 32, UNSPEC_TPREL
);
1344 /* Return true if OP is valid for 64-bit TP relative relocations. */
1347 gottp_symbolic_operand (op
, mode
)
1349 enum machine_mode mode
;
1351 return tls_symbolic_operand_1 (op
, mode
, 64, UNSPEC_TPREL
);
1354 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1355 comparisons are valid in which insn. */
1358 alpha_comparison_operator (op
, mode
)
1360 enum machine_mode mode
;
1362 enum rtx_code code
= GET_CODE (op
);
1364 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1367 return (code
== EQ
|| code
== LE
|| code
== LT
1368 || code
== LEU
|| code
== LTU
);
1371 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1372 Here we know which comparisons are valid in which insn. */
1375 alpha_zero_comparison_operator (op
, mode
)
1377 enum machine_mode mode
;
1379 enum rtx_code code
= GET_CODE (op
);
1381 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1384 return (code
== EQ
|| code
== NE
|| code
== LE
|| code
== LT
1385 || code
== LEU
|| code
== LTU
);
1388 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1391 alpha_swapped_comparison_operator (op
, mode
)
1393 enum machine_mode mode
;
1395 enum rtx_code code
= GET_CODE (op
);
1397 if ((mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1398 || GET_RTX_CLASS (code
) != '<')
1401 code
= swap_condition (code
);
1402 return (code
== EQ
|| code
== LE
|| code
== LT
1403 || code
== LEU
|| code
== LTU
);
1406 /* Return 1 if OP is a signed comparison operation. */
1409 signed_comparison_operator (op
, mode
)
1411 enum machine_mode mode ATTRIBUTE_UNUSED
;
1413 enum rtx_code code
= GET_CODE (op
);
1415 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1418 return (code
== EQ
|| code
== NE
1419 || code
== LE
|| code
== LT
1420 || code
== GE
|| code
== GT
);
1423 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1424 Here we know which comparisons are valid in which insn. */
1427 alpha_fp_comparison_operator (op
, mode
)
1429 enum machine_mode mode
;
1431 enum rtx_code code
= GET_CODE (op
);
1433 if (mode
!= GET_MODE (op
) && mode
!= VOIDmode
)
1436 return (code
== EQ
|| code
== LE
|| code
== LT
|| code
== UNORDERED
);
1439 /* Return 1 if this is a divide or modulus operator. */
1442 divmod_operator (op
, mode
)
1444 enum machine_mode mode ATTRIBUTE_UNUSED
;
1446 switch (GET_CODE (op
))
1448 case DIV
: case MOD
: case UDIV
: case UMOD
:
1458 /* Return 1 if this memory address is a known aligned register plus
1459 a constant. It must be a valid address. This means that we can do
1460 this as an aligned reference plus some offset.
1462 Take into account what reload will do. */
1465 aligned_memory_operand (op
, mode
)
1467 enum machine_mode mode
;
1471 if (reload_in_progress
)
1474 if (GET_CODE (tmp
) == SUBREG
)
1475 tmp
= SUBREG_REG (tmp
);
1476 if (GET_CODE (tmp
) == REG
1477 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1479 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1485 if (GET_CODE (op
) != MEM
1486 || GET_MODE (op
) != mode
)
1490 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1491 sorts of constructs. Dig for the real base register. */
1492 if (reload_in_progress
1493 && GET_CODE (op
) == PLUS
1494 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1495 base
= XEXP (XEXP (op
, 0), 0);
1498 if (! memory_address_p (mode
, op
))
1500 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1503 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) >= 32);
1506 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1509 unaligned_memory_operand (op
, mode
)
1511 enum machine_mode mode
;
1515 if (reload_in_progress
)
1518 if (GET_CODE (tmp
) == SUBREG
)
1519 tmp
= SUBREG_REG (tmp
);
1520 if (GET_CODE (tmp
) == REG
1521 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1523 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1529 if (GET_CODE (op
) != MEM
1530 || GET_MODE (op
) != mode
)
1534 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1535 sorts of constructs. Dig for the real base register. */
1536 if (reload_in_progress
1537 && GET_CODE (op
) == PLUS
1538 && GET_CODE (XEXP (op
, 0)) == PLUS
)
1539 base
= XEXP (XEXP (op
, 0), 0);
1542 if (! memory_address_p (mode
, op
))
1544 base
= (GET_CODE (op
) == PLUS
? XEXP (op
, 0) : op
);
1547 return (GET_CODE (base
) == REG
&& REGNO_POINTER_ALIGN (REGNO (base
)) < 32);
1550 /* Return 1 if OP is either a register or an unaligned memory location. */
1553 reg_or_unaligned_mem_operand (op
, mode
)
1555 enum machine_mode mode
;
1557 return register_operand (op
, mode
) || unaligned_memory_operand (op
, mode
);
1560 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1563 any_memory_operand (op
, mode
)
1565 enum machine_mode mode ATTRIBUTE_UNUSED
;
1567 return (GET_CODE (op
) == MEM
1568 || (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == REG
)
1569 || (reload_in_progress
&& GET_CODE (op
) == REG
1570 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)
1571 || (reload_in_progress
&& GET_CODE (op
) == SUBREG
1572 && GET_CODE (SUBREG_REG (op
)) == REG
1573 && REGNO (SUBREG_REG (op
)) >= FIRST_PSEUDO_REGISTER
));
1576 /* Returns 1 if OP is not an eliminable register.
1578 This exists to cure a pathological abort in the s8addq (et al) patterns,
1580 long foo () { long t; bar(); return (long) &t * 26107; }
1582 which run afoul of a hack in reload to cure a (presumably) similar
1583 problem with lea-type instructions on other targets. But there is
1584 one of us and many of them, so work around the problem by selectively
1585 preventing combine from making the optimization. */
1588 reg_not_elim_operand (op
, mode
)
1590 enum machine_mode mode
;
1593 if (GET_CODE (op
) == SUBREG
)
1594 inner
= SUBREG_REG (op
);
1595 if (inner
== frame_pointer_rtx
|| inner
== arg_pointer_rtx
)
1598 return register_operand (op
, mode
);
1601 /* Return 1 is OP is a memory location that is not a reference (using
1602 an AND) to an unaligned location. Take into account what reload
1606 normal_memory_operand (op
, mode
)
1608 enum machine_mode mode ATTRIBUTE_UNUSED
;
1610 if (reload_in_progress
)
1613 if (GET_CODE (tmp
) == SUBREG
)
1614 tmp
= SUBREG_REG (tmp
);
1615 if (GET_CODE (tmp
) == REG
1616 && REGNO (tmp
) >= FIRST_PSEUDO_REGISTER
)
1618 op
= reg_equiv_memory_loc
[REGNO (tmp
)];
1620 /* This may not have been assigned an equivalent address if it will
1621 be eliminated. In that case, it doesn't matter what we do. */
1627 return GET_CODE (op
) == MEM
&& GET_CODE (XEXP (op
, 0)) != AND
;
1630 /* Accept a register, but not a subreg of any kind. This allows us to
1631 avoid pathological cases in reload wrt data movement common in
1632 int->fp conversion. */
1635 reg_no_subreg_operand (op
, mode
)
1637 enum machine_mode mode
;
1639 if (GET_CODE (op
) != REG
)
1641 return register_operand (op
, mode
);
1644 /* Recognize an addition operation that includes a constant. Used to
1645 convince reload to canonize (plus (plus reg c1) c2) during register
1649 addition_operation (op
, mode
)
1651 enum machine_mode mode
;
1653 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1655 if (GET_CODE (op
) == PLUS
1656 && register_operand (XEXP (op
, 0), mode
)
1657 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1658 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op
, 1)), 'K'))
1663 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
1664 the range defined for C in [I-P]. */
1667 alpha_const_ok_for_letter_p (value
, c
)
1668 HOST_WIDE_INT value
;
1674 /* An unsigned 8 bit constant. */
1675 return (unsigned HOST_WIDE_INT
) value
< 0x100;
1677 /* The constant zero. */
1680 /* A signed 16 bit constant. */
1681 return (unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000;
1683 /* A shifted signed 16 bit constant appropriate for LDAH. */
1684 return ((value
& 0xffff) == 0
1685 && ((value
) >> 31 == -1 || value
>> 31 == 0));
1687 /* A constant that can be AND'ed with using a ZAP insn. */
1688 return zap_mask (value
);
1690 /* A complemented unsigned 8 bit constant. */
1691 return (unsigned HOST_WIDE_INT
) (~ value
) < 0x100;
1693 /* A negated unsigned 8 bit constant. */
1694 return (unsigned HOST_WIDE_INT
) (- value
) < 0x100;
1696 /* The constant 1, 2 or 3. */
1697 return value
== 1 || value
== 2 || value
== 3;
1704 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1705 matches for C in [GH]. */
1708 alpha_const_double_ok_for_letter_p (value
, c
)
1715 /* The floating point zero constant. */
1716 return (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
1717 && value
== CONST0_RTX (GET_MODE (value
)));
1720 /* A valid operand of a ZAP insn. */
1721 return (GET_MODE (value
) == VOIDmode
1722 && zap_mask (CONST_DOUBLE_LOW (value
))
1723 && zap_mask (CONST_DOUBLE_HIGH (value
)));
1730 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1734 alpha_extra_constraint (value
, c
)
1741 return normal_memory_operand (value
, VOIDmode
);
1743 return direct_call_operand (value
, Pmode
);
1745 return (GET_CODE (value
) == CONST_INT
1746 && (unsigned HOST_WIDE_INT
) INTVAL (value
) < 64);
1748 return GET_CODE (value
) == HIGH
;
1750 return TARGET_ABI_UNICOSMK
&& symbolic_operand (value
, VOIDmode
);
1752 return (GET_CODE (value
) == CONST_VECTOR
1753 && value
== CONST0_RTX (GET_MODE (value
)));
1759 /* Return 1 if this function can directly return via $26. */
1764 return (! TARGET_ABI_OPEN_VMS
&& ! TARGET_ABI_UNICOSMK
1766 && alpha_sa_size () == 0
1767 && get_frame_size () == 0
1768 && current_function_outgoing_args_size
== 0
1769 && current_function_pretend_args_size
== 0);
1772 /* Return the ADDR_VEC associated with a tablejump insn. */
1775 alpha_tablejump_addr_vec (insn
)
1780 tmp
= JUMP_LABEL (insn
);
1783 tmp
= NEXT_INSN (tmp
);
1786 if (GET_CODE (tmp
) == JUMP_INSN
1787 && GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
)
1788 return PATTERN (tmp
);
1792 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1795 alpha_tablejump_best_label (insn
)
1798 rtx jump_table
= alpha_tablejump_addr_vec (insn
);
1799 rtx best_label
= NULL_RTX
;
1801 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1802 there for edge frequency counts from profile data. */
1806 int n_labels
= XVECLEN (jump_table
, 1);
1807 int best_count
= -1;
1810 for (i
= 0; i
< n_labels
; i
++)
1814 for (j
= i
+ 1; j
< n_labels
; j
++)
1815 if (XEXP (XVECEXP (jump_table
, 1, i
), 0)
1816 == XEXP (XVECEXP (jump_table
, 1, j
), 0))
1819 if (count
> best_count
)
1820 best_count
= count
, best_label
= XVECEXP (jump_table
, 1, i
);
1824 return best_label
? best_label
: const0_rtx
;
1827 /* Return the TLS model to use for SYMBOL. */
1829 static enum tls_model
1830 tls_symbolic_operand_type (symbol
)
1833 enum tls_model model
;
1835 if (GET_CODE (symbol
) != SYMBOL_REF
)
1837 model
= SYMBOL_REF_TLS_MODEL (symbol
);
1839 /* Local-exec with a 64-bit size is the same code as initial-exec. */
1840 if (model
== TLS_MODEL_LOCAL_EXEC
&& alpha_tls_size
== 64)
1841 model
= TLS_MODEL_INITIAL_EXEC
;
1846 /* Return true if the function DECL will share the same GP as any
1847 function in the current unit of translation. */
1850 decl_has_samegp (decl
)
1853 /* Functions that are not local can be overridden, and thus may
1854 not share the same gp. */
1855 if (!(*targetm
.binds_local_p
) (decl
))
1858 /* If -msmall-data is in effect, assume that there is only one GP
1859 for the module, and so any local symbol has this property. We
1860 need explicit relocations to be able to enforce this for symbols
1861 not defined in this unit of translation, however. */
1862 if (TARGET_EXPLICIT_RELOCS
&& TARGET_SMALL_DATA
)
1865 /* Functions that are not external are defined in this UoT. */
1866 /* ??? Irritatingly, static functions not yet emitted are still
1867 marked "external". Apply this to non-static functions only. */
1868 return !TREE_PUBLIC (decl
) || !DECL_EXTERNAL (decl
);
1871 /* Return true if EXP should be placed in the small data section. */
1874 alpha_in_small_data_p (exp
)
1877 /* We want to merge strings, so we never consider them small data. */
1878 if (TREE_CODE (exp
) == STRING_CST
)
1881 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
1883 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (exp
));
1884 if (strcmp (section
, ".sdata") == 0
1885 || strcmp (section
, ".sbss") == 0)
1890 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (exp
));
1892 /* If this is an incomplete type with size 0, then we can't put it
1893 in sdata because it might be too big when completed. */
1894 if (size
> 0 && size
<= g_switch_value
)
1901 #if TARGET_ABI_OPEN_VMS
1903 alpha_linkage_symbol_p (symname
)
1904 const char *symname
;
1906 int symlen
= strlen (symname
);
1909 return strcmp (&symname
[symlen
- 4], "..lk") == 0;
1914 #define LINKAGE_SYMBOL_REF_P(X) \
1915 ((GET_CODE (X) == SYMBOL_REF \
1916 && alpha_linkage_symbol_p (XSTR (X, 0))) \
1917 || (GET_CODE (X) == CONST \
1918 && GET_CODE (XEXP (X, 0)) == PLUS \
1919 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
1920 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
1923 /* legitimate_address_p recognizes an RTL expression that is a valid
1924 memory address for an instruction. The MODE argument is the
1925 machine mode for the MEM expression that wants to use this address.
1927 For Alpha, we have either a constant address or the sum of a
1928 register and a constant address, or just a register. For DImode,
1929 any of those forms can be surrounded with an AND that clear the
1930 low-order three bits; this is an "unaligned" access. */
1933 alpha_legitimate_address_p (mode
, x
, strict
)
1934 enum machine_mode mode
;
1938 /* If this is an ldq_u type address, discard the outer AND. */
1940 && GET_CODE (x
) == AND
1941 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1942 && INTVAL (XEXP (x
, 1)) == -8)
1945 /* Discard non-paradoxical subregs. */
1946 if (GET_CODE (x
) == SUBREG
1947 && (GET_MODE_SIZE (GET_MODE (x
))
1948 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1951 /* Unadorned general registers are valid. */
1954 ? STRICT_REG_OK_FOR_BASE_P (x
)
1955 : NONSTRICT_REG_OK_FOR_BASE_P (x
)))
1958 /* Constant addresses (i.e. +/- 32k) are valid. */
1959 if (CONSTANT_ADDRESS_P (x
))
1962 #if TARGET_ABI_OPEN_VMS
1963 if (LINKAGE_SYMBOL_REF_P (x
))
1967 /* Register plus a small constant offset is valid. */
1968 if (GET_CODE (x
) == PLUS
)
1970 rtx ofs
= XEXP (x
, 1);
1973 /* Discard non-paradoxical subregs. */
1974 if (GET_CODE (x
) == SUBREG
1975 && (GET_MODE_SIZE (GET_MODE (x
))
1976 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
1982 && NONSTRICT_REG_OK_FP_BASE_P (x
)
1983 && GET_CODE (ofs
) == CONST_INT
)
1986 ? STRICT_REG_OK_FOR_BASE_P (x
)
1987 : NONSTRICT_REG_OK_FOR_BASE_P (x
))
1988 && CONSTANT_ADDRESS_P (ofs
))
1991 else if (GET_CODE (x
) == ADDRESSOF
1992 && GET_CODE (ofs
) == CONST_INT
)
1996 /* If we're managing explicit relocations, LO_SUM is valid, as
1997 are small data symbols. */
1998 else if (TARGET_EXPLICIT_RELOCS
)
2000 if (small_symbolic_operand (x
, Pmode
))
2003 if (GET_CODE (x
) == LO_SUM
)
2005 rtx ofs
= XEXP (x
, 1);
2008 /* Discard non-paradoxical subregs. */
2009 if (GET_CODE (x
) == SUBREG
2010 && (GET_MODE_SIZE (GET_MODE (x
))
2011 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
2014 /* Must have a valid base register. */
2017 ? STRICT_REG_OK_FOR_BASE_P (x
)
2018 : NONSTRICT_REG_OK_FOR_BASE_P (x
))))
2021 /* The symbol must be local. */
2022 if (local_symbolic_operand (ofs
, Pmode
)
2023 || dtp32_symbolic_operand (ofs
, Pmode
)
2024 || tp32_symbolic_operand (ofs
, Pmode
))
2032 /* Build the SYMBOL_REF for __tls_get_addr. */
2034 static GTY(()) rtx tls_get_addr_libfunc
;
2039 if (!tls_get_addr_libfunc
)
2040 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
2041 return tls_get_addr_libfunc
;
2044 /* Try machine-dependent ways of modifying an illegitimate address
2045 to be legitimate. If we find one, return the new, valid address. */
2048 alpha_legitimize_address (x
, scratch
, mode
)
2051 enum machine_mode mode ATTRIBUTE_UNUSED
;
2053 HOST_WIDE_INT addend
;
2055 /* If the address is (plus reg const_int) and the CONST_INT is not a
2056 valid offset, compute the high part of the constant and add it to
2057 the register. Then our address is (plus temp low-part-const). */
2058 if (GET_CODE (x
) == PLUS
2059 && GET_CODE (XEXP (x
, 0)) == REG
2060 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2061 && ! CONSTANT_ADDRESS_P (XEXP (x
, 1)))
2063 addend
= INTVAL (XEXP (x
, 1));
2068 /* If the address is (const (plus FOO const_int)), find the low-order
2069 part of the CONST_INT. Then load FOO plus any high-order part of the
2070 CONST_INT into a register. Our address is (plus reg low-part-const).
2071 This is done to reduce the number of GOT entries. */
2073 && GET_CODE (x
) == CONST
2074 && GET_CODE (XEXP (x
, 0)) == PLUS
2075 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)
2077 addend
= INTVAL (XEXP (XEXP (x
, 0), 1));
2078 x
= force_reg (Pmode
, XEXP (XEXP (x
, 0), 0));
2082 /* If we have a (plus reg const), emit the load as in (2), then add
2083 the two registers, and finally generate (plus reg low-part-const) as
2086 && GET_CODE (x
) == PLUS
2087 && GET_CODE (XEXP (x
, 0)) == REG
2088 && GET_CODE (XEXP (x
, 1)) == CONST
2089 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == PLUS
2090 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == CONST_INT
)
2092 addend
= INTVAL (XEXP (XEXP (XEXP (x
, 1), 0), 1));
2093 x
= expand_simple_binop (Pmode
, PLUS
, XEXP (x
, 0),
2094 XEXP (XEXP (XEXP (x
, 1), 0), 0),
2095 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2099 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
2100 if (TARGET_EXPLICIT_RELOCS
&& symbolic_operand (x
, Pmode
))
2102 rtx r0
, r16
, eqv
, tga
, tp
, insn
, dest
, seq
;
2104 switch (tls_symbolic_operand_type (x
))
2106 case TLS_MODEL_GLOBAL_DYNAMIC
:
2109 r0
= gen_rtx_REG (Pmode
, 0);
2110 r16
= gen_rtx_REG (Pmode
, 16);
2111 tga
= get_tls_get_addr ();
2112 dest
= gen_reg_rtx (Pmode
);
2113 seq
= GEN_INT (alpha_next_sequence_number
++);
2115 emit_insn (gen_movdi_er_tlsgd (r16
, pic_offset_table_rtx
, x
, seq
));
2116 insn
= gen_call_value_osf_tlsgd (r0
, tga
, seq
);
2117 insn
= emit_call_insn (insn
);
2118 CONST_OR_PURE_CALL_P (insn
) = 1;
2119 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
2121 insn
= get_insns ();
2124 emit_libcall_block (insn
, dest
, r0
, x
);
2127 case TLS_MODEL_LOCAL_DYNAMIC
:
2130 r0
= gen_rtx_REG (Pmode
, 0);
2131 r16
= gen_rtx_REG (Pmode
, 16);
2132 tga
= get_tls_get_addr ();
2133 scratch
= gen_reg_rtx (Pmode
);
2134 seq
= GEN_INT (alpha_next_sequence_number
++);
2136 emit_insn (gen_movdi_er_tlsldm (r16
, pic_offset_table_rtx
, seq
));
2137 insn
= gen_call_value_osf_tlsldm (r0
, tga
, seq
);
2138 insn
= emit_call_insn (insn
);
2139 CONST_OR_PURE_CALL_P (insn
) = 1;
2140 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r16
);
2142 insn
= get_insns ();
2145 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2146 UNSPEC_TLSLDM_CALL
);
2147 emit_libcall_block (insn
, scratch
, r0
, eqv
);
2149 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_DTPREL
);
2150 eqv
= gen_rtx_CONST (Pmode
, eqv
);
2152 if (alpha_tls_size
== 64)
2154 dest
= gen_reg_rtx (Pmode
);
2155 emit_insn (gen_rtx_SET (VOIDmode
, dest
, eqv
));
2156 emit_insn (gen_adddi3 (dest
, dest
, scratch
));
2159 if (alpha_tls_size
== 32)
2161 insn
= gen_rtx_HIGH (Pmode
, eqv
);
2162 insn
= gen_rtx_PLUS (Pmode
, scratch
, insn
);
2163 scratch
= gen_reg_rtx (Pmode
);
2164 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, insn
));
2166 return gen_rtx_LO_SUM (Pmode
, scratch
, eqv
);
2168 case TLS_MODEL_INITIAL_EXEC
:
2169 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
2170 eqv
= gen_rtx_CONST (Pmode
, eqv
);
2171 tp
= gen_reg_rtx (Pmode
);
2172 scratch
= gen_reg_rtx (Pmode
);
2173 dest
= gen_reg_rtx (Pmode
);
2175 emit_insn (gen_load_tp (tp
));
2176 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, eqv
));
2177 emit_insn (gen_adddi3 (dest
, tp
, scratch
));
2180 case TLS_MODEL_LOCAL_EXEC
:
2181 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, x
), UNSPEC_TPREL
);
2182 eqv
= gen_rtx_CONST (Pmode
, eqv
);
2183 tp
= gen_reg_rtx (Pmode
);
2185 emit_insn (gen_load_tp (tp
));
2186 if (alpha_tls_size
== 32)
2188 insn
= gen_rtx_HIGH (Pmode
, eqv
);
2189 insn
= gen_rtx_PLUS (Pmode
, tp
, insn
);
2190 tp
= gen_reg_rtx (Pmode
);
2191 emit_insn (gen_rtx_SET (VOIDmode
, tp
, insn
));
2193 return gen_rtx_LO_SUM (Pmode
, tp
, eqv
);
2196 if (local_symbolic_operand (x
, Pmode
))
2198 if (small_symbolic_operand (x
, Pmode
))
2202 if (!no_new_pseudos
)
2203 scratch
= gen_reg_rtx (Pmode
);
2204 emit_insn (gen_rtx_SET (VOIDmode
, scratch
,
2205 gen_rtx_HIGH (Pmode
, x
)));
2206 return gen_rtx_LO_SUM (Pmode
, scratch
, x
);
2215 HOST_WIDE_INT low
, high
;
2217 low
= ((addend
& 0xffff) ^ 0x8000) - 0x8000;
2219 high
= ((addend
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2223 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (addend
),
2224 (no_new_pseudos
? scratch
: NULL_RTX
),
2225 1, OPTAB_LIB_WIDEN
);
2227 x
= expand_simple_binop (Pmode
, PLUS
, x
, GEN_INT (high
),
2228 (no_new_pseudos
? scratch
: NULL_RTX
),
2229 1, OPTAB_LIB_WIDEN
);
2231 return plus_constant (x
, low
);
2235 /* We do not allow indirect calls to be optimized into sibling calls, nor
2236 can we allow a call to a function with a different GP to be optimized
2240 alpha_function_ok_for_sibcall (decl
, exp
)
2242 tree exp ATTRIBUTE_UNUSED
;
2244 /* Can't do indirect tail calls, since we don't know if the target
2245 uses the same GP. */
2249 /* Otherwise, we can make a tail call if the target function shares
2251 return decl_has_samegp (decl
);
2254 /* For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
2255 small symbolic operand until after reload. At which point we need
2256 to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
2257 so that sched2 has the proper dependency information. */
2260 some_small_symbolic_operand (x
, mode
)
2262 enum machine_mode mode ATTRIBUTE_UNUSED
;
2264 return for_each_rtx (&x
, some_small_symbolic_operand_1
, NULL
);
2268 some_small_symbolic_operand_1 (px
, data
)
2270 void *data ATTRIBUTE_UNUSED
;
2274 /* Don't re-split. */
2275 if (GET_CODE (x
) == LO_SUM
)
2278 return small_symbolic_operand (x
, Pmode
) != 0;
2282 split_small_symbolic_operand (x
)
2286 for_each_rtx (&x
, split_small_symbolic_operand_1
, NULL
);
2291 split_small_symbolic_operand_1 (px
, data
)
2293 void *data ATTRIBUTE_UNUSED
;
2297 /* Don't re-split. */
2298 if (GET_CODE (x
) == LO_SUM
)
2301 if (small_symbolic_operand (x
, Pmode
))
2303 x
= gen_rtx_LO_SUM (Pmode
, pic_offset_table_rtx
, x
);
2311 /* Indicate that INSN cannot be duplicated. This is true for any insn
2312 that we've marked with gpdisp relocs, since those have to stay in
2313 1-1 correspondence with one another.
2315 Techinically we could copy them if we could set up a mapping from one
2316 sequence number to another, across the set of insns to be duplicated.
2317 This seems overly complicated and error-prone since interblock motion
2318 from sched-ebb could move one of the pair of insns to a different block. */
2321 alpha_cannot_copy_insn_p (insn
)
2326 if (!reload_completed
|| !TARGET_EXPLICIT_RELOCS
)
2329 if (GET_CODE (insn
) != INSN
)
2331 if (asm_noperands (insn
) >= 0)
2334 pat
= PATTERN (insn
);
2335 if (GET_CODE (pat
) != SET
)
2337 pat
= SET_SRC (pat
);
2338 if (GET_CODE (pat
) == UNSPEC_VOLATILE
)
2340 if (XINT (pat
, 1) == UNSPECV_LDGP1
2341 || XINT (pat
, 1) == UNSPECV_PLDGP2
)
2344 else if (GET_CODE (pat
) == UNSPEC
)
2346 if (XINT (pat
, 1) == UNSPEC_LDGP2
)
2354 /* Try a machine-dependent way of reloading an illegitimate address
2355 operand. If we find one, push the reload and return the new rtx. */
2358 alpha_legitimize_reload_address (x
, mode
, opnum
, type
, ind_levels
)
2360 enum machine_mode mode ATTRIBUTE_UNUSED
;
2363 int ind_levels ATTRIBUTE_UNUSED
;
2365 /* We must recognize output that we have already generated ourselves. */
2366 if (GET_CODE (x
) == PLUS
2367 && GET_CODE (XEXP (x
, 0)) == PLUS
2368 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
2369 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
2370 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2372 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2373 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
2378 /* We wish to handle large displacements off a base register by
2379 splitting the addend across an ldah and the mem insn. This
2380 cuts number of extra insns needed from 3 to 1. */
2381 if (GET_CODE (x
) == PLUS
2382 && GET_CODE (XEXP (x
, 0)) == REG
2383 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
2384 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x
, 0)))
2385 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2387 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
2388 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
2390 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
2392 /* Check for 32-bit overflow. */
2393 if (high
+ low
!= val
)
2396 /* Reload the high part into a base reg; leave the low part
2397 in the mem directly. */
2398 x
= gen_rtx_PLUS (GET_MODE (x
),
2399 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
2403 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2404 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
2412 /* Compute a (partial) cost for rtx X. Return true if the complete
2413 cost has been computed, and false if subexpressions should be
2414 scanned. In either case, *TOTAL contains the cost result. */
2417 alpha_rtx_costs (x
, code
, outer_code
, total
)
2419 int code
, outer_code
;
2422 enum machine_mode mode
= GET_MODE (x
);
2423 bool float_mode_p
= FLOAT_MODE_P (mode
);
2427 /* If this is an 8-bit constant, return zero since it can be used
2428 nearly anywhere with no cost. If it is a valid operand for an
2429 ADD or AND, likewise return 0 if we know it will be used in that
2430 context. Otherwise, return 2 since it might be used there later.
2431 All other constants take at least two insns. */
2433 if (INTVAL (x
) >= 0 && INTVAL (x
) < 256)
2441 if (x
== CONST0_RTX (mode
))
2443 else if ((outer_code
== PLUS
&& add_operand (x
, VOIDmode
))
2444 || (outer_code
== AND
&& and_operand (x
, VOIDmode
)))
2446 else if (add_operand (x
, VOIDmode
) || and_operand (x
, VOIDmode
))
2449 *total
= COSTS_N_INSNS (2);
2455 if (TARGET_EXPLICIT_RELOCS
&& small_symbolic_operand (x
, VOIDmode
))
2456 *total
= COSTS_N_INSNS (outer_code
!= MEM
);
2457 else if (TARGET_EXPLICIT_RELOCS
&& local_symbolic_operand (x
, VOIDmode
))
2458 *total
= COSTS_N_INSNS (1 + (outer_code
!= MEM
));
2459 else if (tls_symbolic_operand_type (x
))
2460 /* Estimate of cost for call_pal rduniq. */
2461 *total
= COSTS_N_INSNS (15);
2463 /* Otherwise we do a load from the GOT. */
2464 *total
= COSTS_N_INSNS (alpha_memory_latency
);
2470 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_add
;
2471 else if (GET_CODE (XEXP (x
, 0)) == MULT
2472 && const48_operand (XEXP (XEXP (x
, 0), 1), VOIDmode
))
2474 *total
= (rtx_cost (XEXP (XEXP (x
, 0), 0), outer_code
)
2475 + rtx_cost (XEXP (x
, 1), outer_code
) + 2);
2482 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_mult
;
2483 else if (mode
== DImode
)
2484 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_mult_di
;
2486 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_mult_si
;
2490 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
2491 && INTVAL (XEXP (x
, 1)) <= 3)
2493 *total
= COSTS_N_INSNS (1);
2500 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_shift
;
2505 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_add
;
2507 *total
= alpha_rtx_cost_data
[alpha_cpu
].int_cmov
;
2515 *total
= COSTS_N_INSNS (70); /* ??? */
2516 else if (mode
== SFmode
)
2517 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_div_sf
;
2519 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_div_df
;
2523 *total
= COSTS_N_INSNS (alpha_memory_latency
);
2529 *total
= COSTS_N_INSNS (1);
2537 *total
= COSTS_N_INSNS (1) + alpha_rtx_cost_data
[alpha_cpu
].int_cmov
;
2543 case UNSIGNED_FLOAT
:
2547 case FLOAT_TRUNCATE
:
2548 *total
= alpha_rtx_cost_data
[alpha_cpu
].fp_add
;
2556 /* REF is an alignable memory location. Place an aligned SImode
2557 reference into *PALIGNED_MEM and the number of bits to shift into
2558 *PBITNUM. SCRATCH is a free register for use in reloading out
2559 of range stack slots. */
2562 get_aligned_mem (ref
, paligned_mem
, pbitnum
)
2564 rtx
*paligned_mem
, *pbitnum
;
2567 HOST_WIDE_INT offset
= 0;
2569 if (GET_CODE (ref
) != MEM
)
2572 if (reload_in_progress
2573 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
2575 base
= find_replacement (&XEXP (ref
, 0));
2577 if (! memory_address_p (GET_MODE (ref
), base
))
2582 base
= XEXP (ref
, 0);
2585 if (GET_CODE (base
) == PLUS
)
2586 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
2589 = widen_memory_access (ref
, SImode
, (offset
& ~3) - offset
);
2591 if (WORDS_BIG_ENDIAN
)
2592 *pbitnum
= GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref
))
2593 + (offset
& 3) * 8));
2595 *pbitnum
= GEN_INT ((offset
& 3) * 8);
2598 /* Similar, but just get the address. Handle the two reload cases.
2599 Add EXTRA_OFFSET to the address we return. */
2602 get_unaligned_address (ref
, extra_offset
)
2607 HOST_WIDE_INT offset
= 0;
2609 if (GET_CODE (ref
) != MEM
)
2612 if (reload_in_progress
2613 && ! memory_address_p (GET_MODE (ref
), XEXP (ref
, 0)))
2615 base
= find_replacement (&XEXP (ref
, 0));
2617 if (! memory_address_p (GET_MODE (ref
), base
))
2622 base
= XEXP (ref
, 0);
2625 if (GET_CODE (base
) == PLUS
)
2626 offset
+= INTVAL (XEXP (base
, 1)), base
= XEXP (base
, 0);
2628 return plus_constant (base
, offset
+ extra_offset
);
2631 /* On the Alpha, all (non-symbolic) constants except zero go into
2632 a floating-point register via memory. Note that we cannot
2633 return anything that is not a subset of CLASS, and that some
2634 symbolic constants cannot be dropped to memory. */
2637 alpha_preferred_reload_class(x
, class)
2639 enum reg_class
class;
2641 /* Zero is present in any register class. */
2642 if (x
== CONST0_RTX (GET_MODE (x
)))
2645 /* These sorts of constants we can easily drop to memory. */
2646 if (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
)
2648 if (class == FLOAT_REGS
)
2650 if (class == ALL_REGS
)
2651 return GENERAL_REGS
;
2655 /* All other kinds of constants should not (and in the case of HIGH
2656 cannot) be dropped to memory -- instead we use a GENERAL_REGS
2657 secondary reload. */
2659 return (class == ALL_REGS
? GENERAL_REGS
: class);
2664 /* Loading and storing HImode or QImode values to and from memory
2665 usually requires a scratch register. The exceptions are loading
2666 QImode and HImode from an aligned address to a general register
2667 unless byte instructions are permitted.
2669 We also cannot load an unaligned address or a paradoxical SUBREG
2670 into an FP register.
2672 We also cannot do integral arithmetic into FP regs, as might result
2673 from register elimination into a DImode fp register. */
2676 secondary_reload_class (class, mode
, x
, in
)
2677 enum reg_class
class;
2678 enum machine_mode mode
;
2682 if ((mode
== QImode
|| mode
== HImode
) && ! TARGET_BWX
)
2684 if (GET_CODE (x
) == MEM
2685 || (GET_CODE (x
) == REG
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
2686 || (GET_CODE (x
) == SUBREG
2687 && (GET_CODE (SUBREG_REG (x
)) == MEM
2688 || (GET_CODE (SUBREG_REG (x
)) == REG
2689 && REGNO (SUBREG_REG (x
)) >= FIRST_PSEUDO_REGISTER
))))
2691 if (!in
|| !aligned_memory_operand(x
, mode
))
2692 return GENERAL_REGS
;
2696 if (class == FLOAT_REGS
)
2698 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
2699 return GENERAL_REGS
;
2701 if (GET_CODE (x
) == SUBREG
2702 && (GET_MODE_SIZE (GET_MODE (x
))
2703 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
2704 return GENERAL_REGS
;
2706 if (in
&& INTEGRAL_MODE_P (mode
)
2707 && ! (memory_operand (x
, mode
) || x
== const0_rtx
))
2708 return GENERAL_REGS
;
2714 /* Subfunction of the following function. Update the flags of any MEM
2715 found in part of X. */
2718 alpha_set_memflags_1 (x
, in_struct_p
, volatile_p
, unchanging_p
)
2720 int in_struct_p
, volatile_p
, unchanging_p
;
2724 switch (GET_CODE (x
))
2730 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
2731 alpha_set_memflags_1 (XVECEXP (x
, 0, i
), in_struct_p
, volatile_p
,
2736 alpha_set_memflags_1 (PATTERN (x
), in_struct_p
, volatile_p
,
2741 alpha_set_memflags_1 (SET_DEST (x
), in_struct_p
, volatile_p
,
2743 alpha_set_memflags_1 (SET_SRC (x
), in_struct_p
, volatile_p
,
2748 MEM_IN_STRUCT_P (x
) = in_struct_p
;
2749 MEM_VOLATILE_P (x
) = volatile_p
;
2750 RTX_UNCHANGING_P (x
) = unchanging_p
;
2751 /* Sadly, we cannot use alias sets because the extra aliasing
2752 produced by the AND interferes. Given that two-byte quantities
2753 are the only thing we would be able to differentiate anyway,
2754 there does not seem to be any point in convoluting the early
2755 out of the alias check. */
2763 /* Given INSN, which is an INSN list or the PATTERN of a single insn
2764 generated to perform a memory operation, look for any MEMs in either
2765 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
2766 volatile flags from REF into each of the MEMs found. If REF is not
2767 a MEM, don't do anything. */
2770 alpha_set_memflags (insn
, ref
)
2774 int in_struct_p
, volatile_p
, unchanging_p
;
2776 if (GET_CODE (ref
) != MEM
)
2779 in_struct_p
= MEM_IN_STRUCT_P (ref
);
2780 volatile_p
= MEM_VOLATILE_P (ref
);
2781 unchanging_p
= RTX_UNCHANGING_P (ref
);
2783 /* This is only called from alpha.md, after having had something
2784 generated from one of the insn patterns. So if everything is
2785 zero, the pattern is already up-to-date. */
2786 if (! in_struct_p
&& ! volatile_p
&& ! unchanging_p
)
2789 alpha_set_memflags_1 (insn
, in_struct_p
, volatile_p
, unchanging_p
);
2792 /* Try to output insns to set TARGET equal to the constant C if it can be
2793 done in less than N insns. Do all computations in MODE. Returns the place
2794 where the output has been placed if it can be done and the insns have been
2795 emitted. If it would take more than N insns, zero is returned and no
2796 insns and emitted. */
2799 alpha_emit_set_const (target
, mode
, c
, n
)
2801 enum machine_mode mode
;
2806 rtx orig_target
= target
;
2809 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2810 can't load this constant in one insn, do this in DImode. */
2811 if (no_new_pseudos
&& mode
== SImode
2812 && GET_CODE (target
) == REG
&& REGNO (target
) < FIRST_PSEUDO_REGISTER
2813 && (result
= alpha_emit_set_const_1 (target
, mode
, c
, 1)) == 0)
2815 target
= gen_lowpart (DImode
, target
);
2819 /* Try 1 insn, then 2, then up to N. */
2820 for (i
= 1; i
<= n
; i
++)
2822 result
= alpha_emit_set_const_1 (target
, mode
, c
, i
);
2825 rtx insn
= get_last_insn ();
2826 rtx set
= single_set (insn
);
2827 if (! CONSTANT_P (SET_SRC (set
)))
2828 set_unique_reg_note (get_last_insn (), REG_EQUAL
, GEN_INT (c
));
2833 /* Allow for the case where we changed the mode of TARGET. */
2834 if (result
== target
)
2835 result
= orig_target
;
2840 /* Internal routine for the above to check for N or below insns. */
2843 alpha_emit_set_const_1 (target
, mode
, c
, n
)
2845 enum machine_mode mode
;
2851 /* Use a pseudo if highly optimizing and still generating RTL. */
2853 = (flag_expensive_optimizations
&& !no_new_pseudos
? 0 : target
);
2856 /* If this is a sign-extended 32-bit constant, we can do this in at most
2857 three insns, so do it if we have enough insns left. We always have
2858 a sign-extended 32-bit constant when compiling on a narrow machine. */
2860 if (HOST_BITS_PER_WIDE_INT
!= 64
2861 || c
>> 31 == -1 || c
>> 31 == 0)
2863 HOST_WIDE_INT low
= ((c
& 0xffff) ^ 0x8000) - 0x8000;
2864 HOST_WIDE_INT tmp1
= c
- low
;
2865 HOST_WIDE_INT high
= (((tmp1
>> 16) & 0xffff) ^ 0x8000) - 0x8000;
2866 HOST_WIDE_INT extra
= 0;
2868 /* If HIGH will be interpreted as negative but the constant is
2869 positive, we must adjust it to do two ldha insns. */
2871 if ((high
& 0x8000) != 0 && c
>= 0)
2875 high
= ((tmp1
>> 16) & 0xffff) - 2 * ((tmp1
>> 16) & 0x8000);
2878 if (c
== low
|| (low
== 0 && extra
== 0))
2880 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2881 but that meant that we can't handle INT_MIN on 32-bit machines
2882 (like NT/Alpha), because we recurse indefinitely through
2883 emit_move_insn to gen_movdi. So instead, since we know exactly
2884 what we want, create it explicitly. */
2887 target
= gen_reg_rtx (mode
);
2888 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (c
)));
2891 else if (n
>= 2 + (extra
!= 0))
2893 temp
= copy_to_suggested_reg (GEN_INT (high
<< 16), subtarget
, mode
);
2895 /* As of 2002-02-23, addsi3 is only available when not optimizing.
2896 This means that if we go through expand_binop, we'll try to
2897 generate extensions, etc, which will require new pseudos, which
2898 will fail during some split phases. The SImode add patterns
2899 still exist, but are not named. So build the insns by hand. */
2904 subtarget
= gen_reg_rtx (mode
);
2905 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (extra
<< 16));
2906 insn
= gen_rtx_SET (VOIDmode
, subtarget
, insn
);
2912 target
= gen_reg_rtx (mode
);
2913 insn
= gen_rtx_PLUS (mode
, temp
, GEN_INT (low
));
2914 insn
= gen_rtx_SET (VOIDmode
, target
, insn
);
2920 /* If we couldn't do it that way, try some other methods. But if we have
2921 no instructions left, don't bother. Likewise, if this is SImode and
2922 we can't make pseudos, we can't do anything since the expand_binop
2923 and expand_unop calls will widen and try to make pseudos. */
2925 if (n
== 1 || (mode
== SImode
&& no_new_pseudos
))
2928 /* Next, see if we can load a related constant and then shift and possibly
2929 negate it to get the constant we want. Try this once each increasing
2930 numbers of insns. */
2932 for (i
= 1; i
< n
; i
++)
2934 /* First, see if minus some low bits, we've an easy load of
2937 new = ((c
& 0xffff) ^ 0x8000) - 0x8000;
2939 && (temp
= alpha_emit_set_const (subtarget
, mode
, c
- new, i
)) != 0)
2940 return expand_binop (mode
, add_optab
, temp
, GEN_INT (new),
2941 target
, 0, OPTAB_WIDEN
);
2943 /* Next try complementing. */
2944 if ((temp
= alpha_emit_set_const (subtarget
, mode
, ~ c
, i
)) != 0)
2945 return expand_unop (mode
, one_cmpl_optab
, temp
, target
, 0);
2947 /* Next try to form a constant and do a left shift. We can do this
2948 if some low-order bits are zero; the exact_log2 call below tells
2949 us that information. The bits we are shifting out could be any
2950 value, but here we'll just try the 0- and sign-extended forms of
2951 the constant. To try to increase the chance of having the same
2952 constant in more than one insn, start at the highest number of
2953 bits to shift, but try all possibilities in case a ZAPNOT will
2956 if ((bits
= exact_log2 (c
& - c
)) > 0)
2957 for (; bits
> 0; bits
--)
2958 if ((temp
= (alpha_emit_set_const
2959 (subtarget
, mode
, c
>> bits
, i
))) != 0
2960 || ((temp
= (alpha_emit_set_const
2962 ((unsigned HOST_WIDE_INT
) c
) >> bits
, i
)))
2964 return expand_binop (mode
, ashl_optab
, temp
, GEN_INT (bits
),
2965 target
, 0, OPTAB_WIDEN
);
2967 /* Now try high-order zero bits. Here we try the shifted-in bits as
2968 all zero and all ones. Be careful to avoid shifting outside the
2969 mode and to avoid shifting outside the host wide int size. */
2970 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2971 confuse the recursive call and set all of the high 32 bits. */
2973 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2974 - floor_log2 (c
) - 1 - (HOST_BITS_PER_WIDE_INT
< 64))) > 0)
2975 for (; bits
> 0; bits
--)
2976 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2978 || ((temp
= (alpha_emit_set_const
2980 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
2983 return expand_binop (mode
, lshr_optab
, temp
, GEN_INT (bits
),
2984 target
, 1, OPTAB_WIDEN
);
2986 /* Now try high-order 1 bits. We get that with a sign-extension.
2987 But one bit isn't enough here. Be careful to avoid shifting outside
2988 the mode and to avoid shifting outside the host wide int size. */
2990 if ((bits
= (MIN (HOST_BITS_PER_WIDE_INT
, GET_MODE_SIZE (mode
) * 8)
2991 - floor_log2 (~ c
) - 2)) > 0)
2992 for (; bits
> 0; bits
--)
2993 if ((temp
= alpha_emit_set_const (subtarget
, mode
,
2995 || ((temp
= (alpha_emit_set_const
2997 ((c
<< bits
) | (((HOST_WIDE_INT
) 1 << bits
) - 1)),
3000 return expand_binop (mode
, ashr_optab
, temp
, GEN_INT (bits
),
3001 target
, 0, OPTAB_WIDEN
);
3004 #if HOST_BITS_PER_WIDE_INT == 64
3005 /* Finally, see if can load a value into the target that is the same as the
3006 constant except that all bytes that are 0 are changed to be 0xff. If we
3007 can, then we can do a ZAPNOT to obtain the desired constant. */
3010 for (i
= 0; i
< 64; i
+= 8)
3011 if ((new & ((HOST_WIDE_INT
) 0xff << i
)) == 0)
3012 new |= (HOST_WIDE_INT
) 0xff << i
;
3014 /* We are only called for SImode and DImode. If this is SImode, ensure that
3015 we are sign extended to a full word. */
3018 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
3020 if (new != c
&& new != -1
3021 && (temp
= alpha_emit_set_const (subtarget
, mode
, new, n
- 1)) != 0)
3022 return expand_binop (mode
, and_optab
, temp
, GEN_INT (c
| ~ new),
3023 target
, 0, OPTAB_WIDEN
);
3029 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
3030 fall back to a straight forward decomposition. We do this to avoid
3031 exponential run times encountered when looking for longer sequences
3032 with alpha_emit_set_const. */
3035 alpha_emit_set_long_const (target
, c1
, c2
)
3037 HOST_WIDE_INT c1
, c2
;
3039 HOST_WIDE_INT d1
, d2
, d3
, d4
;
3041 /* Decompose the entire word */
3042 #if HOST_BITS_PER_WIDE_INT >= 64
3043 if (c2
!= -(c1
< 0))
3045 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
3047 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3048 c1
= (c1
- d2
) >> 32;
3049 d3
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
3051 d4
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3055 d1
= ((c1
& 0xffff) ^ 0x8000) - 0x8000;
3057 d2
= ((c1
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3061 d3
= ((c2
& 0xffff) ^ 0x8000) - 0x8000;
3063 d4
= ((c2
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3068 /* Construct the high word */
3071 emit_move_insn (target
, GEN_INT (d4
));
3073 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d3
)));
3076 emit_move_insn (target
, GEN_INT (d3
));
3078 /* Shift it into place */
3079 emit_move_insn (target
, gen_rtx_ASHIFT (DImode
, target
, GEN_INT (32)));
3081 /* Add in the low bits. */
3083 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d2
)));
3085 emit_move_insn (target
, gen_rtx_PLUS (DImode
, target
, GEN_INT (d1
)));
3090 /* Expand a move instruction; return true if all work is done.
3091 We don't handle non-bwx subword loads here. */
3094 alpha_expand_mov (mode
, operands
)
3095 enum machine_mode mode
;
3098 /* If the output is not a register, the input must be. */
3099 if (GET_CODE (operands
[0]) == MEM
3100 && ! reg_or_0_operand (operands
[1], mode
))
3101 operands
[1] = force_reg (mode
, operands
[1]);
3103 /* Allow legitimize_address to perform some simplifications. */
3104 if (mode
== Pmode
&& symbolic_operand (operands
[1], mode
))
3108 /* With RTL inlining, at -O3, rtl is generated, stored, then actually
3109 compiled at the end of compilation. In the meantime, someone can
3110 re-encode-section-info on some symbol changing it e.g. from global
3111 to local-not-small. If this happens, we'd have emitted a plain
3112 load rather than a high+losum load and not recognize the insn.
3114 So if rtl inlining is in effect, we delay the global/not-global
3115 decision until rest_of_compilation by wrapping it in an
3117 if (TARGET_EXPLICIT_RELOCS
&& flag_inline_functions
3118 && rtx_equal_function_value_matters
3119 && global_symbolic_operand (operands
[1], mode
))
3121 emit_insn (gen_movdi_er_maybe_g (operands
[0], operands
[1]));
3125 tmp
= alpha_legitimize_address (operands
[1], operands
[0], mode
);
3128 if (tmp
== operands
[0])
3135 /* Early out for non-constants and valid constants. */
3136 if (! CONSTANT_P (operands
[1]) || input_operand (operands
[1], mode
))
3139 /* Split large integers. */
3140 if (GET_CODE (operands
[1]) == CONST_INT
3141 || GET_CODE (operands
[1]) == CONST_DOUBLE
)
3143 HOST_WIDE_INT i0
, i1
;
3144 rtx temp
= NULL_RTX
;
3146 if (GET_CODE (operands
[1]) == CONST_INT
)
3148 i0
= INTVAL (operands
[1]);
3151 else if (HOST_BITS_PER_WIDE_INT
>= 64)
3153 i0
= CONST_DOUBLE_LOW (operands
[1]);
3158 i0
= CONST_DOUBLE_LOW (operands
[1]);
3159 i1
= CONST_DOUBLE_HIGH (operands
[1]);
3162 if (HOST_BITS_PER_WIDE_INT
>= 64 || i1
== -(i0
< 0))
3163 temp
= alpha_emit_set_const (operands
[0], mode
, i0
, 3);
3165 if (!temp
&& TARGET_BUILD_CONSTANTS
)
3166 temp
= alpha_emit_set_long_const (operands
[0], i0
, i1
);
3170 if (rtx_equal_p (operands
[0], temp
))
3177 /* Otherwise we've nothing left but to drop the thing to memory. */
3178 operands
[1] = force_const_mem (mode
, operands
[1]);
3179 if (reload_in_progress
)
3181 emit_move_insn (operands
[0], XEXP (operands
[1], 0));
3182 operands
[1] = copy_rtx (operands
[1]);
3183 XEXP (operands
[1], 0) = operands
[0];
3186 operands
[1] = validize_mem (operands
[1]);
3190 /* Expand a non-bwx QImode or HImode move instruction;
3191 return true if all work is done. */
3194 alpha_expand_mov_nobwx (mode
, operands
)
3195 enum machine_mode mode
;
3198 /* If the output is not a register, the input must be. */
3199 if (GET_CODE (operands
[0]) == MEM
)
3200 operands
[1] = force_reg (mode
, operands
[1]);
3202 /* Handle four memory cases, unaligned and aligned for either the input
3203 or the output. The only case where we can be called during reload is
3204 for aligned loads; all other cases require temporaries. */
3206 if (GET_CODE (operands
[1]) == MEM
3207 || (GET_CODE (operands
[1]) == SUBREG
3208 && GET_CODE (SUBREG_REG (operands
[1])) == MEM
)
3209 || (reload_in_progress
&& GET_CODE (operands
[1]) == REG
3210 && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
)
3211 || (reload_in_progress
&& GET_CODE (operands
[1]) == SUBREG
3212 && GET_CODE (SUBREG_REG (operands
[1])) == REG
3213 && REGNO (SUBREG_REG (operands
[1])) >= FIRST_PSEUDO_REGISTER
))
3215 if (aligned_memory_operand (operands
[1], mode
))
3217 if (reload_in_progress
)
3219 emit_insn ((mode
== QImode
3220 ? gen_reload_inqi_help
3221 : gen_reload_inhi_help
)
3222 (operands
[0], operands
[1],
3223 gen_rtx_REG (SImode
, REGNO (operands
[0]))));
3227 rtx aligned_mem
, bitnum
;
3228 rtx scratch
= gen_reg_rtx (SImode
);
3230 get_aligned_mem (operands
[1], &aligned_mem
, &bitnum
);
3232 emit_insn ((mode
== QImode
3233 ? gen_aligned_loadqi
3234 : gen_aligned_loadhi
)
3235 (operands
[0], aligned_mem
, bitnum
, scratch
));
3240 /* Don't pass these as parameters since that makes the generated
3241 code depend on parameter evaluation order which will cause
3242 bootstrap failures. */
3244 rtx temp1
= gen_reg_rtx (DImode
);
3245 rtx temp2
= gen_reg_rtx (DImode
);
3246 rtx seq
= ((mode
== QImode
3247 ? gen_unaligned_loadqi
3248 : gen_unaligned_loadhi
)
3249 (operands
[0], get_unaligned_address (operands
[1], 0),
3252 alpha_set_memflags (seq
, operands
[1]);
3258 if (GET_CODE (operands
[0]) == MEM
3259 || (GET_CODE (operands
[0]) == SUBREG
3260 && GET_CODE (SUBREG_REG (operands
[0])) == MEM
)
3261 || (reload_in_progress
&& GET_CODE (operands
[0]) == REG
3262 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
)
3263 || (reload_in_progress
&& GET_CODE (operands
[0]) == SUBREG
3264 && GET_CODE (SUBREG_REG (operands
[0])) == REG
3265 && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
))
3267 if (aligned_memory_operand (operands
[0], mode
))
3269 rtx aligned_mem
, bitnum
;
3270 rtx temp1
= gen_reg_rtx (SImode
);
3271 rtx temp2
= gen_reg_rtx (SImode
);
3273 get_aligned_mem (operands
[0], &aligned_mem
, &bitnum
);
3275 emit_insn (gen_aligned_store (aligned_mem
, operands
[1], bitnum
,
3280 rtx temp1
= gen_reg_rtx (DImode
);
3281 rtx temp2
= gen_reg_rtx (DImode
);
3282 rtx temp3
= gen_reg_rtx (DImode
);
3283 rtx seq
= ((mode
== QImode
3284 ? gen_unaligned_storeqi
3285 : gen_unaligned_storehi
)
3286 (get_unaligned_address (operands
[0], 0),
3287 operands
[1], temp1
, temp2
, temp3
));
3289 alpha_set_memflags (seq
, operands
[0]);
3298 /* Generate an unsigned DImode to FP conversion. This is the same code
3299 optabs would emit if we didn't have TFmode patterns.
3301 For SFmode, this is the only construction I've found that can pass
3302 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
3303 intermediates will work, because you'll get intermediate rounding
3304 that ruins the end result. Some of this could be fixed by turning
3305 on round-to-positive-infinity, but that requires diddling the fpsr,
3306 which kills performance. I tried turning this around and converting
3307 to a negative number, so that I could turn on /m, but either I did
3308 it wrong or there's something else cause I wound up with the exact
3309 same single-bit error. There is a branch-less form of this same code:
3320 fcmoveq $f10,$f11,$f0
3322 I'm not using it because it's the same number of instructions as
3323 this branch-full form, and it has more serialized long latency
3324 instructions on the critical path.
3326 For DFmode, we can avoid rounding errors by breaking up the word
3327 into two pieces, converting them separately, and adding them back:
3329 LC0: .long 0,0x5f800000
3334 cpyse $f11,$f31,$f10
3335 cpyse $f31,$f11,$f11
3343 This doesn't seem to be a clear-cut win over the optabs form.
3344 It probably all depends on the distribution of numbers being
3345 converted -- in the optabs form, all but high-bit-set has a
3346 much lower minimum execution time. */
3349 alpha_emit_floatuns (operands
)
3352 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
3353 enum machine_mode mode
;
3356 in
= force_reg (DImode
, operands
[1]);
3357 mode
= GET_MODE (out
);
3358 neglab
= gen_label_rtx ();
3359 donelab
= gen_label_rtx ();
3360 i0
= gen_reg_rtx (DImode
);
3361 i1
= gen_reg_rtx (DImode
);
3362 f0
= gen_reg_rtx (mode
);
3364 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
3366 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
3367 emit_jump_insn (gen_jump (donelab
));
3370 emit_label (neglab
);
3372 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
3373 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
3374 emit_insn (gen_iordi3 (i0
, i0
, i1
));
3375 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
3376 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
3378 emit_label (donelab
);
3381 /* Generate the comparison for a conditional branch. */
3384 alpha_emit_conditional_branch (code
)
3387 enum rtx_code cmp_code
, branch_code
;
3388 enum machine_mode cmp_mode
, branch_mode
= VOIDmode
;
3389 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
3392 if (alpha_compare
.fp_p
&& GET_MODE (op0
) == TFmode
)
3394 if (! TARGET_HAS_XFLOATING_LIBS
)
3397 /* X_floating library comparison functions return
3401 Convert the compare against the raw return value. */
3423 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
3425 alpha_compare
.fp_p
= 0;
3428 /* The general case: fold the comparison code to the types of compares
3429 that we have, choosing the branch as necessary. */
3432 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3434 /* We have these compares: */
3435 cmp_code
= code
, branch_code
= NE
;
3440 /* These must be reversed. */
3441 cmp_code
= reverse_condition (code
), branch_code
= EQ
;
3444 case GE
: case GT
: case GEU
: case GTU
:
3445 /* For FP, we swap them, for INT, we reverse them. */
3446 if (alpha_compare
.fp_p
)
3448 cmp_code
= swap_condition (code
);
3450 tem
= op0
, op0
= op1
, op1
= tem
;
3454 cmp_code
= reverse_condition (code
);
3463 if (alpha_compare
.fp_p
)
3466 if (flag_unsafe_math_optimizations
)
3468 /* When we are not as concerned about non-finite values, and we
3469 are comparing against zero, we can branch directly. */
3470 if (op1
== CONST0_RTX (DFmode
))
3471 cmp_code
= NIL
, branch_code
= code
;
3472 else if (op0
== CONST0_RTX (DFmode
))
3474 /* Undo the swap we probably did just above. */
3475 tem
= op0
, op0
= op1
, op1
= tem
;
3476 branch_code
= swap_condition (cmp_code
);
3482 /* ??? We mark the branch mode to be CCmode to prevent the
3483 compare and branch from being combined, since the compare
3484 insn follows IEEE rules that the branch does not. */
3485 branch_mode
= CCmode
;
3492 /* The following optimizations are only for signed compares. */
3493 if (code
!= LEU
&& code
!= LTU
&& code
!= GEU
&& code
!= GTU
)
3495 /* Whee. Compare and branch against 0 directly. */
3496 if (op1
== const0_rtx
)
3497 cmp_code
= NIL
, branch_code
= code
;
3499 /* We want to use cmpcc/bcc when we can, since there is a zero delay
3500 bypass between logicals and br/cmov on EV5. But we don't want to
3501 force valid immediate constants into registers needlessly. */
3502 else if (GET_CODE (op1
) == CONST_INT
)
3504 HOST_WIDE_INT v
= INTVAL (op1
), n
= -v
;
3506 if (! CONST_OK_FOR_LETTER_P (v
, 'I')
3507 && (CONST_OK_FOR_LETTER_P (n
, 'K')
3508 || CONST_OK_FOR_LETTER_P (n
, 'L')))
3510 cmp_code
= PLUS
, branch_code
= code
;
3516 if (!reg_or_0_operand (op0
, DImode
))
3517 op0
= force_reg (DImode
, op0
);
3518 if (cmp_code
!= PLUS
&& !reg_or_8bit_operand (op1
, DImode
))
3519 op1
= force_reg (DImode
, op1
);
3522 /* Emit an initial compare instruction, if necessary. */
3524 if (cmp_code
!= NIL
)
3526 tem
= gen_reg_rtx (cmp_mode
);
3527 emit_move_insn (tem
, gen_rtx_fmt_ee (cmp_code
, cmp_mode
, op0
, op1
));
3530 /* Zero the operands. */
3531 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
3533 /* Return the branch comparison. */
3534 return gen_rtx_fmt_ee (branch_code
, branch_mode
, tem
, CONST0_RTX (cmp_mode
));
3537 /* Certain simplifications can be done to make invalid setcc operations
3538 valid. Return the final comparison, or NULL if we can't work. */
3541 alpha_emit_setcc (code
)
3544 enum rtx_code cmp_code
;
3545 rtx op0
= alpha_compare
.op0
, op1
= alpha_compare
.op1
;
3546 int fp_p
= alpha_compare
.fp_p
;
3549 /* Zero the operands. */
3550 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
3552 if (fp_p
&& GET_MODE (op0
) == TFmode
)
3554 if (! TARGET_HAS_XFLOATING_LIBS
)
3557 /* X_floating library comparison functions return
3561 Convert the compare against the raw return value. */
3563 if (code
== UNORDERED
|| code
== ORDERED
)
3568 op0
= alpha_emit_xfloating_compare (cmp_code
, op0
, op1
);
3572 if (code
== UNORDERED
)
3574 else if (code
== ORDERED
)
3580 if (fp_p
&& !TARGET_FIX
)
3583 /* The general case: fold the comparison code to the types of compares
3584 that we have, choosing the branch as necessary. */
3589 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3591 /* We have these compares. */
3593 cmp_code
= code
, code
= NE
;
3597 if (!fp_p
&& op1
== const0_rtx
)
3602 cmp_code
= reverse_condition (code
);
3606 case GE
: case GT
: case GEU
: case GTU
:
3607 /* These normally need swapping, but for integer zero we have
3608 special patterns that recognize swapped operands. */
3609 if (!fp_p
&& op1
== const0_rtx
)
3611 code
= swap_condition (code
);
3613 cmp_code
= code
, code
= NE
;
3614 tmp
= op0
, op0
= op1
, op1
= tmp
;
3623 if (!register_operand (op0
, DImode
))
3624 op0
= force_reg (DImode
, op0
);
3625 if (!reg_or_8bit_operand (op1
, DImode
))
3626 op1
= force_reg (DImode
, op1
);
3629 /* Emit an initial compare instruction, if necessary. */
3630 if (cmp_code
!= NIL
)
3632 enum machine_mode mode
= fp_p
? DFmode
: DImode
;
3634 tmp
= gen_reg_rtx (mode
);
3635 emit_insn (gen_rtx_SET (VOIDmode
, tmp
,
3636 gen_rtx_fmt_ee (cmp_code
, mode
, op0
, op1
)));
3638 op0
= fp_p
? gen_lowpart (DImode
, tmp
) : tmp
;
3642 /* Return the setcc comparison. */
3643 return gen_rtx_fmt_ee (code
, DImode
, op0
, op1
);
3647 /* Rewrite a comparison against zero CMP of the form
3648 (CODE (cc0) (const_int 0)) so it can be written validly in
3649 a conditional move (if_then_else CMP ...).
3650 If both of the operands that set cc0 are nonzero we must emit
3651 an insn to perform the compare (it can't be done within
3652 the conditional move). */
3654 alpha_emit_conditional_move (cmp
, mode
)
3656 enum machine_mode mode
;
3658 enum rtx_code code
= GET_CODE (cmp
);
3659 enum rtx_code cmov_code
= NE
;
3660 rtx op0
= alpha_compare
.op0
;
3661 rtx op1
= alpha_compare
.op1
;
3662 int fp_p
= alpha_compare
.fp_p
;
3663 enum machine_mode cmp_mode
3664 = (GET_MODE (op0
) == VOIDmode
? DImode
: GET_MODE (op0
));
3665 enum machine_mode cmp_op_mode
= fp_p
? DFmode
: DImode
;
3666 enum machine_mode cmov_mode
= VOIDmode
;
3667 int local_fast_math
= flag_unsafe_math_optimizations
;
3670 /* Zero the operands. */
3671 memset (&alpha_compare
, 0, sizeof (alpha_compare
));
3673 if (fp_p
!= FLOAT_MODE_P (mode
))
3675 enum rtx_code cmp_code
;
3680 /* If we have fp<->int register move instructions, do a cmov by
3681 performing the comparison in fp registers, and move the
3682 zero/nonzero value to integer registers, where we can then
3683 use a normal cmov, or vice-versa. */
3687 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3688 /* We have these compares. */
3689 cmp_code
= code
, code
= NE
;
3693 /* This must be reversed. */
3694 cmp_code
= EQ
, code
= EQ
;
3697 case GE
: case GT
: case GEU
: case GTU
:
3698 /* These normally need swapping, but for integer zero we have
3699 special patterns that recognize swapped operands. */
3700 if (!fp_p
&& op1
== const0_rtx
)
3701 cmp_code
= code
, code
= NE
;
3704 cmp_code
= swap_condition (code
);
3706 tem
= op0
, op0
= op1
, op1
= tem
;
3714 tem
= gen_reg_rtx (cmp_op_mode
);
3715 emit_insn (gen_rtx_SET (VOIDmode
, tem
,
3716 gen_rtx_fmt_ee (cmp_code
, cmp_op_mode
,
3719 cmp_mode
= cmp_op_mode
= fp_p
? DImode
: DFmode
;
3720 op0
= gen_lowpart (cmp_op_mode
, tem
);
3721 op1
= CONST0_RTX (cmp_op_mode
);
3723 local_fast_math
= 1;
3726 /* We may be able to use a conditional move directly.
3727 This avoids emitting spurious compares. */
3728 if (signed_comparison_operator (cmp
, VOIDmode
)
3729 && (!fp_p
|| local_fast_math
)
3730 && (op0
== CONST0_RTX (cmp_mode
) || op1
== CONST0_RTX (cmp_mode
)))
3731 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
3733 /* We can't put the comparison inside the conditional move;
3734 emit a compare instruction and put that inside the
3735 conditional move. Make sure we emit only comparisons we have;
3736 swap or reverse as necessary. */
3743 case EQ
: case LE
: case LT
: case LEU
: case LTU
:
3744 /* We have these compares: */
3748 /* This must be reversed. */
3749 code
= reverse_condition (code
);
3753 case GE
: case GT
: case GEU
: case GTU
:
3754 /* These must be swapped. */
3755 if (op1
!= CONST0_RTX (cmp_mode
))
3757 code
= swap_condition (code
);
3758 tem
= op0
, op0
= op1
, op1
= tem
;
3768 if (!reg_or_0_operand (op0
, DImode
))
3769 op0
= force_reg (DImode
, op0
);
3770 if (!reg_or_8bit_operand (op1
, DImode
))
3771 op1
= force_reg (DImode
, op1
);
3774 /* ??? We mark the branch mode to be CCmode to prevent the compare
3775 and cmov from being combined, since the compare insn follows IEEE
3776 rules that the cmov does not. */
3777 if (fp_p
&& !local_fast_math
)
3780 tem
= gen_reg_rtx (cmp_op_mode
);
3781 emit_move_insn (tem
, gen_rtx_fmt_ee (code
, cmp_op_mode
, op0
, op1
));
3782 return gen_rtx_fmt_ee (cmov_code
, cmov_mode
, tem
, CONST0_RTX (cmp_op_mode
));
3785 /* Simplify a conditional move of two constants into a setcc with
3786 arithmetic. This is done with a splitter since combine would
3787 just undo the work if done during code generation. It also catches
3788 cases we wouldn't have before cse. */
3791 alpha_split_conditional_move (code
, dest
, cond
, t_rtx
, f_rtx
)
3793 rtx dest
, cond
, t_rtx
, f_rtx
;
3795 HOST_WIDE_INT t
, f
, diff
;
3796 enum machine_mode mode
;
3797 rtx target
, subtarget
, tmp
;
3799 mode
= GET_MODE (dest
);
3804 if (((code
== NE
|| code
== EQ
) && diff
< 0)
3805 || (code
== GE
|| code
== GT
))
3807 code
= reverse_condition (code
);
3808 diff
= t
, t
= f
, f
= diff
;
3812 subtarget
= target
= dest
;
3815 target
= gen_lowpart (DImode
, dest
);
3816 if (! no_new_pseudos
)
3817 subtarget
= gen_reg_rtx (DImode
);
3821 /* Below, we must be careful to use copy_rtx on target and subtarget
3822 in intermediate insns, as they may be a subreg rtx, which may not
3825 if (f
== 0 && exact_log2 (diff
) > 0
3826 /* On EV6, we've got enough shifters to make non-arithmatic shifts
3827 viable over a longer latency cmove. On EV5, the E0 slot is a
3828 scarce resource, and on EV4 shift has the same latency as a cmove. */
3829 && (diff
<= 8 || alpha_cpu
== PROCESSOR_EV6
))
3831 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3832 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
3834 tmp
= gen_rtx_ASHIFT (DImode
, copy_rtx (subtarget
),
3835 GEN_INT (exact_log2 (t
)));
3836 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3838 else if (f
== 0 && t
== -1)
3840 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3841 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
3843 emit_insn (gen_negdi2 (target
, copy_rtx (subtarget
)));
3845 else if (diff
== 1 || diff
== 4 || diff
== 8)
3849 tmp
= gen_rtx_fmt_ee (code
, DImode
, cond
, const0_rtx
);
3850 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (subtarget
), tmp
));
3853 emit_insn (gen_adddi3 (target
, copy_rtx (subtarget
), GEN_INT (f
)));
3856 add_op
= GEN_INT (f
);
3857 if (sext_add_operand (add_op
, mode
))
3859 tmp
= gen_rtx_MULT (DImode
, copy_rtx (subtarget
),
3861 tmp
= gen_rtx_PLUS (DImode
, tmp
, add_op
);
3862 emit_insn (gen_rtx_SET (VOIDmode
, target
, tmp
));
3874 /* Look up the function X_floating library function name for the
3878 alpha_lookup_xfloating_lib_func (code
)
3883 const enum rtx_code code
;
3884 const char *const func
;
3887 static const struct xfloating_op vms_xfloating_ops
[] =
3889 { PLUS
, "OTS$ADD_X" },
3890 { MINUS
, "OTS$SUB_X" },
3891 { MULT
, "OTS$MUL_X" },
3892 { DIV
, "OTS$DIV_X" },
3893 { EQ
, "OTS$EQL_X" },
3894 { NE
, "OTS$NEQ_X" },
3895 { LT
, "OTS$LSS_X" },
3896 { LE
, "OTS$LEQ_X" },
3897 { GT
, "OTS$GTR_X" },
3898 { GE
, "OTS$GEQ_X" },
3899 { FIX
, "OTS$CVTXQ" },
3900 { FLOAT
, "OTS$CVTQX" },
3901 { UNSIGNED_FLOAT
, "OTS$CVTQUX" },
3902 { FLOAT_EXTEND
, "OTS$CVT_FLOAT_T_X" },
3903 { FLOAT_TRUNCATE
, "OTS$CVT_FLOAT_X_T" },
3906 static const struct xfloating_op osf_xfloating_ops
[] =
3908 { PLUS
, "_OtsAddX" },
3909 { MINUS
, "_OtsSubX" },
3910 { MULT
, "_OtsMulX" },
3911 { DIV
, "_OtsDivX" },
3918 { FIX
, "_OtsCvtXQ" },
3919 { FLOAT
, "_OtsCvtQX" },
3920 { UNSIGNED_FLOAT
, "_OtsCvtQUX" },
3921 { FLOAT_EXTEND
, "_OtsConvertFloatTX" },
3922 { FLOAT_TRUNCATE
, "_OtsConvertFloatXT" },
3925 const struct xfloating_op
*ops
;
3926 const long n
= ARRAY_SIZE (osf_xfloating_ops
);
3929 /* How irritating. Nothing to key off for the table. Hardcode
3930 knowledge of the G_floating routines. */
3931 if (TARGET_FLOAT_VAX
)
3933 if (TARGET_ABI_OPEN_VMS
)
3935 if (code
== FLOAT_EXTEND
)
3936 return "OTS$CVT_FLOAT_G_X";
3937 if (code
== FLOAT_TRUNCATE
)
3938 return "OTS$CVT_FLOAT_X_G";
3942 if (code
== FLOAT_EXTEND
)
3943 return "_OtsConvertFloatGX";
3944 if (code
== FLOAT_TRUNCATE
)
3945 return "_OtsConvertFloatXG";
3949 if (TARGET_ABI_OPEN_VMS
)
3950 ops
= vms_xfloating_ops
;
3952 ops
= osf_xfloating_ops
;
3954 for (i
= 0; i
< n
; ++i
)
3955 if (ops
[i
].code
== code
)
3961 /* Most X_floating operations take the rounding mode as an argument.
3962 Compute that here. */
3965 alpha_compute_xfloating_mode_arg (code
, round
)
3967 enum alpha_fp_rounding_mode round
;
3973 case ALPHA_FPRM_NORM
:
3976 case ALPHA_FPRM_MINF
:
3979 case ALPHA_FPRM_CHOP
:
3982 case ALPHA_FPRM_DYN
:
3988 /* XXX For reference, round to +inf is mode = 3. */
3991 if (code
== FLOAT_TRUNCATE
&& alpha_fptm
== ALPHA_FPTM_N
)
3997 /* Emit an X_floating library function call.
3999 Note that these functions do not follow normal calling conventions:
4000 TFmode arguments are passed in two integer registers (as opposed to
4001 indirect); TFmode return values appear in R16+R17.
4003 FUNC is the function name to call.
4004 TARGET is where the output belongs.
4005 OPERANDS are the inputs.
4006 NOPERANDS is the count of inputs.
4007 EQUIV is the expression equivalent for the function.
4011 alpha_emit_xfloating_libcall (func
, target
, operands
, noperands
, equiv
)
4018 rtx usage
= NULL_RTX
, tmp
, reg
;
4023 for (i
= 0; i
< noperands
; ++i
)
4025 switch (GET_MODE (operands
[i
]))
4028 reg
= gen_rtx_REG (TFmode
, regno
);
4033 reg
= gen_rtx_REG (DFmode
, regno
+ 32);
4038 if (GET_CODE (operands
[i
]) != CONST_INT
)
4042 reg
= gen_rtx_REG (DImode
, regno
);
4050 emit_move_insn (reg
, operands
[i
]);
4051 usage
= alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode
, reg
), usage
);
4054 switch (GET_MODE (target
))
4057 reg
= gen_rtx_REG (TFmode
, 16);
4060 reg
= gen_rtx_REG (DFmode
, 32);
4063 reg
= gen_rtx_REG (DImode
, 0);
4069 tmp
= gen_rtx_MEM (QImode
, init_one_libfunc (func
));
4070 tmp
= emit_call_insn (GEN_CALL_VALUE (reg
, tmp
, const0_rtx
,
4071 const0_rtx
, const0_rtx
));
4072 CALL_INSN_FUNCTION_USAGE (tmp
) = usage
;
4077 emit_libcall_block (tmp
, target
, reg
, equiv
);
4080 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
4083 alpha_emit_xfloating_arith (code
, operands
)
4089 rtx out_operands
[3];
4091 func
= alpha_lookup_xfloating_lib_func (code
);
4092 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
4094 out_operands
[0] = operands
[1];
4095 out_operands
[1] = operands
[2];
4096 out_operands
[2] = GEN_INT (mode
);
4097 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, 3,
4098 gen_rtx_fmt_ee (code
, TFmode
, operands
[1],
4102 /* Emit an X_floating library function call for a comparison. */
4105 alpha_emit_xfloating_compare (code
, op0
, op1
)
4110 rtx out
, operands
[2];
4112 func
= alpha_lookup_xfloating_lib_func (code
);
4116 out
= gen_reg_rtx (DImode
);
4118 /* ??? Strange mode for equiv because what's actually returned
4119 is -1,0,1, not a proper boolean value. */
4120 alpha_emit_xfloating_libcall (func
, out
, operands
, 2,
4121 gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
));
4126 /* Emit an X_floating library function call for a conversion. */
4129 alpha_emit_xfloating_cvt (code
, operands
)
4133 int noperands
= 1, mode
;
4134 rtx out_operands
[2];
4137 func
= alpha_lookup_xfloating_lib_func (code
);
4139 out_operands
[0] = operands
[1];
4144 mode
= alpha_compute_xfloating_mode_arg (code
, ALPHA_FPRM_CHOP
);
4145 out_operands
[1] = GEN_INT (mode
);
4148 case FLOAT_TRUNCATE
:
4149 mode
= alpha_compute_xfloating_mode_arg (code
, alpha_fprm
);
4150 out_operands
[1] = GEN_INT (mode
);
4157 alpha_emit_xfloating_libcall (func
, operands
[0], out_operands
, noperands
,
4158 gen_rtx_fmt_e (code
, GET_MODE (operands
[0]),
4162 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
4163 OP[0] into OP[0,1]. Naturally, output operand ordering is
4167 alpha_split_tfmode_pair (operands
)
4170 if (GET_CODE (operands
[1]) == REG
)
4172 operands
[3] = gen_rtx_REG (DImode
, REGNO (operands
[1]) + 1);
4173 operands
[2] = gen_rtx_REG (DImode
, REGNO (operands
[1]));
4175 else if (GET_CODE (operands
[1]) == MEM
)
4177 operands
[3] = adjust_address (operands
[1], DImode
, 8);
4178 operands
[2] = adjust_address (operands
[1], DImode
, 0);
4180 else if (operands
[1] == CONST0_RTX (TFmode
))
4181 operands
[2] = operands
[3] = const0_rtx
;
4185 if (GET_CODE (operands
[0]) == REG
)
4187 operands
[1] = gen_rtx_REG (DImode
, REGNO (operands
[0]) + 1);
4188 operands
[0] = gen_rtx_REG (DImode
, REGNO (operands
[0]));
4190 else if (GET_CODE (operands
[0]) == MEM
)
4192 operands
[1] = adjust_address (operands
[0], DImode
, 8);
4193 operands
[0] = adjust_address (operands
[0], DImode
, 0);
4199 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
4200 op2 is a register containing the sign bit, operation is the
4201 logical operation to be performed. */
4204 alpha_split_tfmode_frobsign (operands
, operation
)
4206 rtx (*operation
) PARAMS ((rtx
, rtx
, rtx
));
4208 rtx high_bit
= operands
[2];
4212 alpha_split_tfmode_pair (operands
);
4214 /* Detect three flavors of operand overlap. */
4216 if (rtx_equal_p (operands
[0], operands
[2]))
4218 else if (rtx_equal_p (operands
[1], operands
[2]))
4220 if (rtx_equal_p (operands
[0], high_bit
))
4227 emit_move_insn (operands
[0], operands
[2]);
4229 /* ??? If the destination overlaps both source tf and high_bit, then
4230 assume source tf is dead in its entirety and use the other half
4231 for a scratch register. Otherwise "scratch" is just the proper
4232 destination register. */
4233 scratch
= operands
[move
< 2 ? 1 : 3];
4235 emit_insn ((*operation
) (scratch
, high_bit
, operands
[3]));
4239 emit_move_insn (operands
[0], operands
[2]);
4241 emit_move_insn (operands
[1], scratch
);
4245 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
4249 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
4250 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
4251 lda r3,X(r11) lda r3,X+2(r11)
4252 extwl r1,r3,r1 extql r1,r3,r1
4253 extwh r2,r3,r2 extqh r2,r3,r2
4254 or r1.r2.r1 or r1,r2,r1
4257 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
4258 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
4259 lda r3,X(r11) lda r3,X(r11)
4260 extll r1,r3,r1 extll r1,r3,r1
4261 extlh r2,r3,r2 extlh r2,r3,r2
4262 or r1.r2.r1 addl r1,r2,r1
4264 quad: ldq_u r1,X(r11)
4273 alpha_expand_unaligned_load (tgt
, mem
, size
, ofs
, sign
)
4275 HOST_WIDE_INT size
, ofs
;
4278 rtx meml
, memh
, addr
, extl
, exth
, tmp
, mema
;
4279 enum machine_mode mode
;
4281 meml
= gen_reg_rtx (DImode
);
4282 memh
= gen_reg_rtx (DImode
);
4283 addr
= gen_reg_rtx (DImode
);
4284 extl
= gen_reg_rtx (DImode
);
4285 exth
= gen_reg_rtx (DImode
);
4287 mema
= XEXP (mem
, 0);
4288 if (GET_CODE (mema
) == LO_SUM
)
4289 mema
= force_reg (Pmode
, mema
);
4291 /* AND addresses cannot be in any alias set, since they may implicitly
4292 alias surrounding code. Ideally we'd have some alias set that
4293 covered all types except those with alignment 8 or higher. */
4295 tmp
= change_address (mem
, DImode
,
4296 gen_rtx_AND (DImode
,
4297 plus_constant (mema
, ofs
),
4299 set_mem_alias_set (tmp
, 0);
4300 emit_move_insn (meml
, tmp
);
4302 tmp
= change_address (mem
, DImode
,
4303 gen_rtx_AND (DImode
,
4304 plus_constant (mema
, ofs
+ size
- 1),
4306 set_mem_alias_set (tmp
, 0);
4307 emit_move_insn (memh
, tmp
);
4309 if (WORDS_BIG_ENDIAN
&& sign
&& (size
== 2 || size
== 4))
4311 emit_move_insn (addr
, plus_constant (mema
, -1));
4313 emit_insn (gen_extqh_be (extl
, meml
, addr
));
4314 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (64), addr
));
4316 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
4317 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (64 - size
*8),
4318 addr
, 1, OPTAB_WIDEN
);
4320 else if (sign
&& size
== 2)
4322 emit_move_insn (addr
, plus_constant (mema
, ofs
+2));
4324 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (64), addr
));
4325 emit_insn (gen_extqh_le (exth
, memh
, addr
));
4327 /* We must use tgt here for the target. Alpha-vms port fails if we use
4328 addr for the target, because addr is marked as a pointer and combine
4329 knows that pointers are always sign-extended 32 bit values. */
4330 addr
= expand_binop (DImode
, ior_optab
, extl
, exth
, tgt
, 1, OPTAB_WIDEN
);
4331 addr
= expand_binop (DImode
, ashr_optab
, addr
, GEN_INT (48),
4332 addr
, 1, OPTAB_WIDEN
);
4336 if (WORDS_BIG_ENDIAN
)
4338 emit_move_insn (addr
, plus_constant (mema
, ofs
+size
-1));
4342 emit_insn (gen_extwh_be (extl
, meml
, addr
));
4347 emit_insn (gen_extlh_be (extl
, meml
, addr
));
4352 emit_insn (gen_extqh_be (extl
, meml
, addr
));
4359 emit_insn (gen_extxl_be (exth
, memh
, GEN_INT (size
*8), addr
));
4363 emit_move_insn (addr
, plus_constant (mema
, ofs
));
4364 emit_insn (gen_extxl_le (extl
, meml
, GEN_INT (size
*8), addr
));
4368 emit_insn (gen_extwh_le (exth
, memh
, addr
));
4373 emit_insn (gen_extlh_le (exth
, memh
, addr
));
4378 emit_insn (gen_extqh_le (exth
, memh
, addr
));
4387 addr
= expand_binop (mode
, ior_optab
, gen_lowpart (mode
, extl
),
4388 gen_lowpart (mode
, exth
), gen_lowpart (mode
, tgt
),
4393 emit_move_insn (tgt
, gen_lowpart(GET_MODE (tgt
), addr
));
4396 /* Similarly, use ins and msk instructions to perform unaligned stores. */
4399 alpha_expand_unaligned_store (dst
, src
, size
, ofs
)
4401 HOST_WIDE_INT size
, ofs
;
4403 rtx dstl
, dsth
, addr
, insl
, insh
, meml
, memh
, dsta
;
4405 dstl
= gen_reg_rtx (DImode
);
4406 dsth
= gen_reg_rtx (DImode
);
4407 insl
= gen_reg_rtx (DImode
);
4408 insh
= gen_reg_rtx (DImode
);
4410 dsta
= XEXP (dst
, 0);
4411 if (GET_CODE (dsta
) == LO_SUM
)
4412 dsta
= force_reg (Pmode
, dsta
);
4414 /* AND addresses cannot be in any alias set, since they may implicitly
4415 alias surrounding code. Ideally we'd have some alias set that
4416 covered all types except those with alignment 8 or higher. */
4418 meml
= change_address (dst
, DImode
,
4419 gen_rtx_AND (DImode
,
4420 plus_constant (dsta
, ofs
),
4422 set_mem_alias_set (meml
, 0);
4424 memh
= change_address (dst
, DImode
,
4425 gen_rtx_AND (DImode
,
4426 plus_constant (dsta
, ofs
+ size
- 1),
4428 set_mem_alias_set (memh
, 0);
4430 emit_move_insn (dsth
, memh
);
4431 emit_move_insn (dstl
, meml
);
4432 if (WORDS_BIG_ENDIAN
)
4434 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
+size
-1));
4436 if (src
!= const0_rtx
)
4441 emit_insn (gen_inswl_be (insh
, gen_lowpart (HImode
,src
), addr
));
4444 emit_insn (gen_insll_be (insh
, gen_lowpart (SImode
,src
), addr
));
4447 emit_insn (gen_insql_be (insh
, gen_lowpart (DImode
,src
), addr
));
4450 emit_insn (gen_insxh (insl
, gen_lowpart (DImode
, src
),
4451 GEN_INT (size
*8), addr
));
4457 emit_insn (gen_mskxl_be (dsth
, dsth
, GEN_INT (0xffff), addr
));
4461 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
4462 emit_insn (gen_mskxl_be (dsth
, dsth
, msk
, addr
));
4466 emit_insn (gen_mskxl_be (dsth
, dsth
, constm1_rtx
, addr
));
4470 emit_insn (gen_mskxh (dstl
, dstl
, GEN_INT (size
*8), addr
));
4474 addr
= copy_addr_to_reg (plus_constant (dsta
, ofs
));
4476 if (src
!= const0_rtx
)
4478 emit_insn (gen_insxh (insh
, gen_lowpart (DImode
, src
),
4479 GEN_INT (size
*8), addr
));
4484 emit_insn (gen_inswl_le (insl
, gen_lowpart (HImode
, src
), addr
));
4487 emit_insn (gen_insll_le (insl
, gen_lowpart (SImode
, src
), addr
));
4490 emit_insn (gen_insql_le (insl
, src
, addr
));
4495 emit_insn (gen_mskxh (dsth
, dsth
, GEN_INT (size
*8), addr
));
4500 emit_insn (gen_mskxl_le (dstl
, dstl
, GEN_INT (0xffff), addr
));
4504 rtx msk
= immed_double_const (0xffffffff, 0, DImode
);
4505 emit_insn (gen_mskxl_le (dstl
, dstl
, msk
, addr
));
4509 emit_insn (gen_mskxl_le (dstl
, dstl
, constm1_rtx
, addr
));
4514 if (src
!= const0_rtx
)
4516 dsth
= expand_binop (DImode
, ior_optab
, insh
, dsth
, dsth
, 0, OPTAB_WIDEN
);
4517 dstl
= expand_binop (DImode
, ior_optab
, insl
, dstl
, dstl
, 0, OPTAB_WIDEN
);
4520 if (WORDS_BIG_ENDIAN
)
4522 emit_move_insn (meml
, dstl
);
4523 emit_move_insn (memh
, dsth
);
4527 /* Must store high before low for degenerate case of aligned. */
4528 emit_move_insn (memh
, dsth
);
4529 emit_move_insn (meml
, dstl
);
4533 /* The block move code tries to maximize speed by separating loads and
4534 stores at the expense of register pressure: we load all of the data
4535 before we store it back out. There are two secondary effects worth
4536 mentioning, that this speeds copying to/from aligned and unaligned
4537 buffers, and that it makes the code significantly easier to write. */
4539 #define MAX_MOVE_WORDS 8
4541 /* Load an integral number of consecutive unaligned quadwords. */
4544 alpha_expand_unaligned_load_words (out_regs
, smem
, words
, ofs
)
4547 HOST_WIDE_INT words
, ofs
;
4549 rtx
const im8
= GEN_INT (-8);
4550 rtx
const i64
= GEN_INT (64);
4551 rtx ext_tmps
[MAX_MOVE_WORDS
], data_regs
[MAX_MOVE_WORDS
+1];
4552 rtx sreg
, areg
, tmp
, smema
;
4555 smema
= XEXP (smem
, 0);
4556 if (GET_CODE (smema
) == LO_SUM
)
4557 smema
= force_reg (Pmode
, smema
);
4559 /* Generate all the tmp registers we need. */
4560 for (i
= 0; i
< words
; ++i
)
4562 data_regs
[i
] = out_regs
[i
];
4563 ext_tmps
[i
] = gen_reg_rtx (DImode
);
4565 data_regs
[words
] = gen_reg_rtx (DImode
);
4568 smem
= adjust_address (smem
, GET_MODE (smem
), ofs
);
4570 /* Load up all of the source data. */
4571 for (i
= 0; i
< words
; ++i
)
4573 tmp
= change_address (smem
, DImode
,
4574 gen_rtx_AND (DImode
,
4575 plus_constant (smema
, 8*i
),
4577 set_mem_alias_set (tmp
, 0);
4578 emit_move_insn (data_regs
[i
], tmp
);
4581 tmp
= change_address (smem
, DImode
,
4582 gen_rtx_AND (DImode
,
4583 plus_constant (smema
, 8*words
- 1),
4585 set_mem_alias_set (tmp
, 0);
4586 emit_move_insn (data_regs
[words
], tmp
);
4588 /* Extract the half-word fragments. Unfortunately DEC decided to make
4589 extxh with offset zero a noop instead of zeroing the register, so
4590 we must take care of that edge condition ourselves with cmov. */
4592 sreg
= copy_addr_to_reg (smema
);
4593 areg
= expand_binop (DImode
, and_optab
, sreg
, GEN_INT (7), NULL
,
4595 if (WORDS_BIG_ENDIAN
)
4596 emit_move_insn (sreg
, plus_constant (sreg
, 7));
4597 for (i
= 0; i
< words
; ++i
)
4599 if (WORDS_BIG_ENDIAN
)
4601 emit_insn (gen_extqh_be (data_regs
[i
], data_regs
[i
], sreg
));
4602 emit_insn (gen_extxl_be (ext_tmps
[i
], data_regs
[i
+1], i64
, sreg
));
4606 emit_insn (gen_extxl_le (data_regs
[i
], data_regs
[i
], i64
, sreg
));
4607 emit_insn (gen_extqh_le (ext_tmps
[i
], data_regs
[i
+1], sreg
));
4609 emit_insn (gen_rtx_SET (VOIDmode
, ext_tmps
[i
],
4610 gen_rtx_IF_THEN_ELSE (DImode
,
4611 gen_rtx_EQ (DImode
, areg
,
4613 const0_rtx
, ext_tmps
[i
])));
4616 /* Merge the half-words into whole words. */
4617 for (i
= 0; i
< words
; ++i
)
4619 out_regs
[i
] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
4620 ext_tmps
[i
], data_regs
[i
], 1, OPTAB_WIDEN
);
4624 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
4625 may be NULL to store zeros. */
4628 alpha_expand_unaligned_store_words (data_regs
, dmem
, words
, ofs
)
4631 HOST_WIDE_INT words
, ofs
;
4633 rtx
const im8
= GEN_INT (-8);
4634 rtx
const i64
= GEN_INT (64);
4635 rtx ins_tmps
[MAX_MOVE_WORDS
];
4636 rtx st_tmp_1
, st_tmp_2
, dreg
;
4637 rtx st_addr_1
, st_addr_2
, dmema
;
4640 dmema
= XEXP (dmem
, 0);
4641 if (GET_CODE (dmema
) == LO_SUM
)
4642 dmema
= force_reg (Pmode
, dmema
);
4644 /* Generate all the tmp registers we need. */
4645 if (data_regs
!= NULL
)
4646 for (i
= 0; i
< words
; ++i
)
4647 ins_tmps
[i
] = gen_reg_rtx(DImode
);
4648 st_tmp_1
= gen_reg_rtx(DImode
);
4649 st_tmp_2
= gen_reg_rtx(DImode
);
4652 dmem
= adjust_address (dmem
, GET_MODE (dmem
), ofs
);
4654 st_addr_2
= change_address (dmem
, DImode
,
4655 gen_rtx_AND (DImode
,
4656 plus_constant (dmema
, words
*8 - 1),
4658 set_mem_alias_set (st_addr_2
, 0);
4660 st_addr_1
= change_address (dmem
, DImode
,
4661 gen_rtx_AND (DImode
, dmema
, im8
));
4662 set_mem_alias_set (st_addr_1
, 0);
4664 /* Load up the destination end bits. */
4665 emit_move_insn (st_tmp_2
, st_addr_2
);
4666 emit_move_insn (st_tmp_1
, st_addr_1
);
4668 /* Shift the input data into place. */
4669 dreg
= copy_addr_to_reg (dmema
);
4670 if (WORDS_BIG_ENDIAN
)
4671 emit_move_insn (dreg
, plus_constant (dreg
, 7));
4672 if (data_regs
!= NULL
)
4674 for (i
= words
-1; i
>= 0; --i
)
4676 if (WORDS_BIG_ENDIAN
)
4678 emit_insn (gen_insql_be (ins_tmps
[i
], data_regs
[i
], dreg
));
4679 emit_insn (gen_insxh (data_regs
[i
], data_regs
[i
], i64
, dreg
));
4683 emit_insn (gen_insxh (ins_tmps
[i
], data_regs
[i
], i64
, dreg
));
4684 emit_insn (gen_insql_le (data_regs
[i
], data_regs
[i
], dreg
));
4687 for (i
= words
-1; i
> 0; --i
)
4689 ins_tmps
[i
-1] = expand_binop (DImode
, ior_optab
, data_regs
[i
],
4690 ins_tmps
[i
-1], ins_tmps
[i
-1], 1,
4695 /* Split and merge the ends with the destination data. */
4696 if (WORDS_BIG_ENDIAN
)
4698 emit_insn (gen_mskxl_be (st_tmp_2
, st_tmp_2
, constm1_rtx
, dreg
));
4699 emit_insn (gen_mskxh (st_tmp_1
, st_tmp_1
, i64
, dreg
));
4703 emit_insn (gen_mskxh (st_tmp_2
, st_tmp_2
, i64
, dreg
));
4704 emit_insn (gen_mskxl_le (st_tmp_1
, st_tmp_1
, constm1_rtx
, dreg
));
4707 if (data_regs
!= NULL
)
4709 st_tmp_2
= expand_binop (DImode
, ior_optab
, st_tmp_2
, ins_tmps
[words
-1],
4710 st_tmp_2
, 1, OPTAB_WIDEN
);
4711 st_tmp_1
= expand_binop (DImode
, ior_optab
, st_tmp_1
, data_regs
[0],
4712 st_tmp_1
, 1, OPTAB_WIDEN
);
4716 if (WORDS_BIG_ENDIAN
)
4717 emit_move_insn (st_addr_1
, st_tmp_1
);
4719 emit_move_insn (st_addr_2
, st_tmp_2
);
4720 for (i
= words
-1; i
> 0; --i
)
4722 rtx tmp
= change_address (dmem
, DImode
,
4723 gen_rtx_AND (DImode
,
4724 plus_constant(dmema
,
4725 WORDS_BIG_ENDIAN
? i
*8-1 : i
*8),
4727 set_mem_alias_set (tmp
, 0);
4728 emit_move_insn (tmp
, data_regs
? ins_tmps
[i
-1] : const0_rtx
);
4730 if (WORDS_BIG_ENDIAN
)
4731 emit_move_insn (st_addr_2
, st_tmp_2
);
4733 emit_move_insn (st_addr_1
, st_tmp_1
);
4737 /* Expand string/block move operations.
4739 operands[0] is the pointer to the destination.
4740 operands[1] is the pointer to the source.
4741 operands[2] is the number of bytes to move.
4742 operands[3] is the alignment. */
4745 alpha_expand_block_move (operands
)
4748 rtx bytes_rtx
= operands
[2];
4749 rtx align_rtx
= operands
[3];
4750 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
4751 HOST_WIDE_INT bytes
= orig_bytes
;
4752 HOST_WIDE_INT src_align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
4753 HOST_WIDE_INT dst_align
= src_align
;
4754 rtx orig_src
= operands
[1];
4755 rtx orig_dst
= operands
[0];
4756 rtx data_regs
[2 * MAX_MOVE_WORDS
+ 16];
4758 unsigned int i
, words
, ofs
, nregs
= 0;
4760 if (orig_bytes
<= 0)
4762 else if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
4765 /* Look for additional alignment information from recorded register info. */
4767 tmp
= XEXP (orig_src
, 0);
4768 if (GET_CODE (tmp
) == REG
)
4769 src_align
= MAX (src_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4770 else if (GET_CODE (tmp
) == PLUS
4771 && GET_CODE (XEXP (tmp
, 0)) == REG
4772 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4774 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4775 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4779 if (a
>= 64 && c
% 8 == 0)
4781 else if (a
>= 32 && c
% 4 == 0)
4783 else if (a
>= 16 && c
% 2 == 0)
4788 tmp
= XEXP (orig_dst
, 0);
4789 if (GET_CODE (tmp
) == REG
)
4790 dst_align
= MAX (dst_align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
4791 else if (GET_CODE (tmp
) == PLUS
4792 && GET_CODE (XEXP (tmp
, 0)) == REG
4793 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
4795 unsigned HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
4796 unsigned int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
4800 if (a
>= 64 && c
% 8 == 0)
4802 else if (a
>= 32 && c
% 4 == 0)
4804 else if (a
>= 16 && c
% 2 == 0)
4809 /* Load the entire block into registers. */
4810 if (GET_CODE (XEXP (orig_src
, 0)) == ADDRESSOF
)
4812 enum machine_mode mode
;
4814 tmp
= XEXP (XEXP (orig_src
, 0), 0);
4816 /* Don't use the existing register if we're reading more than
4817 is held in the register. Nor if there is not a mode that
4818 handles the exact size. */
4819 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4821 && GET_MODE_SIZE (GET_MODE (tmp
)) >= bytes
)
4825 data_regs
[nregs
] = gen_lowpart (DImode
, tmp
);
4826 data_regs
[nregs
+ 1] = gen_highpart (DImode
, tmp
);
4830 data_regs
[nregs
++] = gen_lowpart (mode
, tmp
);
4835 /* No appropriate mode; fall back on memory. */
4836 orig_src
= replace_equiv_address (orig_src
,
4837 copy_addr_to_reg (XEXP (orig_src
, 0)));
4838 src_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4842 if (src_align
>= 64 && bytes
>= 8)
4846 for (i
= 0; i
< words
; ++i
)
4847 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4849 for (i
= 0; i
< words
; ++i
)
4850 emit_move_insn (data_regs
[nregs
+ i
],
4851 adjust_address (orig_src
, DImode
, ofs
+ i
* 8));
4858 if (src_align
>= 32 && bytes
>= 4)
4862 for (i
= 0; i
< words
; ++i
)
4863 data_regs
[nregs
+ i
] = gen_reg_rtx (SImode
);
4865 for (i
= 0; i
< words
; ++i
)
4866 emit_move_insn (data_regs
[nregs
+ i
],
4867 adjust_address (orig_src
, SImode
, ofs
+ i
* 4));
4878 for (i
= 0; i
< words
+1; ++i
)
4879 data_regs
[nregs
+ i
] = gen_reg_rtx (DImode
);
4881 alpha_expand_unaligned_load_words (data_regs
+ nregs
, orig_src
,
4889 if (! TARGET_BWX
&& bytes
>= 4)
4891 data_regs
[nregs
++] = tmp
= gen_reg_rtx (SImode
);
4892 alpha_expand_unaligned_load (tmp
, orig_src
, 4, ofs
, 0);
4899 if (src_align
>= 16)
4902 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4903 emit_move_insn (tmp
, adjust_address (orig_src
, HImode
, ofs
));
4906 } while (bytes
>= 2);
4908 else if (! TARGET_BWX
)
4910 data_regs
[nregs
++] = tmp
= gen_reg_rtx (HImode
);
4911 alpha_expand_unaligned_load (tmp
, orig_src
, 2, ofs
, 0);
4919 data_regs
[nregs
++] = tmp
= gen_reg_rtx (QImode
);
4920 emit_move_insn (tmp
, adjust_address (orig_src
, QImode
, ofs
));
4927 if (nregs
> ARRAY_SIZE (data_regs
))
4930 /* Now save it back out again. */
4934 if (GET_CODE (XEXP (orig_dst
, 0)) == ADDRESSOF
)
4936 enum machine_mode mode
;
4937 tmp
= XEXP (XEXP (orig_dst
, 0), 0);
4939 mode
= mode_for_size (orig_bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
4940 if (GET_MODE (tmp
) == mode
)
4944 emit_move_insn (tmp
, data_regs
[0]);
4949 else if (nregs
== 2 && mode
== TImode
)
4951 /* Undo the subregging done above when copying between
4952 two TImode registers. */
4953 if (GET_CODE (data_regs
[0]) == SUBREG
4954 && GET_MODE (SUBREG_REG (data_regs
[0])) == TImode
)
4955 emit_move_insn (tmp
, SUBREG_REG (data_regs
[0]));
4961 emit_move_insn (gen_lowpart (DImode
, tmp
), data_regs
[0]);
4962 emit_move_insn (gen_highpart (DImode
, tmp
), data_regs
[1]);
4966 emit_no_conflict_block (seq
, tmp
, data_regs
[0],
4967 data_regs
[1], NULL_RTX
);
4975 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4976 /* ??? Optimize mode < dst_mode with strict_low_part. */
4978 /* No appropriate mode; fall back on memory. We can speed things
4979 up by recognizing extra alignment information. */
4980 orig_dst
= replace_equiv_address (orig_dst
,
4981 copy_addr_to_reg (XEXP (orig_dst
, 0)));
4982 dst_align
= GET_MODE_BITSIZE (GET_MODE (tmp
));
4985 /* Write out the data in whatever chunks reading the source allowed. */
4986 if (dst_align
>= 64)
4988 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
4990 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
),
4997 if (dst_align
>= 32)
4999 /* If the source has remaining DImode regs, write them out in
5001 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
5003 tmp
= expand_binop (DImode
, lshr_optab
, data_regs
[i
], GEN_INT (32),
5004 NULL_RTX
, 1, OPTAB_WIDEN
);
5006 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
5007 gen_lowpart (SImode
, data_regs
[i
]));
5008 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ 4),
5009 gen_lowpart (SImode
, tmp
));
5014 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
5016 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
),
5023 if (i
< nregs
&& GET_MODE (data_regs
[i
]) == DImode
)
5025 /* Write out a remaining block of words using unaligned methods. */
5027 for (words
= 1; i
+ words
< nregs
; words
++)
5028 if (GET_MODE (data_regs
[i
+ words
]) != DImode
)
5032 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 8, ofs
);
5034 alpha_expand_unaligned_store_words (data_regs
+ i
, orig_dst
,
5041 /* Due to the above, this won't be aligned. */
5042 /* ??? If we have more than one of these, consider constructing full
5043 words in registers and using alpha_expand_unaligned_store_words. */
5044 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == SImode
)
5046 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 4, ofs
);
5051 if (dst_align
>= 16)
5052 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
5054 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), data_regs
[i
]);
5059 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == HImode
)
5061 alpha_expand_unaligned_store (orig_dst
, data_regs
[i
], 2, ofs
);
5066 while (i
< nregs
&& GET_MODE (data_regs
[i
]) == QImode
)
5068 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), data_regs
[i
]);
5082 alpha_expand_block_clear (operands
)
5085 rtx bytes_rtx
= operands
[1];
5086 rtx align_rtx
= operands
[2];
5087 HOST_WIDE_INT orig_bytes
= INTVAL (bytes_rtx
);
5088 HOST_WIDE_INT bytes
= orig_bytes
;
5089 HOST_WIDE_INT align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
5090 HOST_WIDE_INT alignofs
= 0;
5091 rtx orig_dst
= operands
[0];
5093 int i
, words
, ofs
= 0;
5095 if (orig_bytes
<= 0)
5097 if (orig_bytes
> MAX_MOVE_WORDS
* UNITS_PER_WORD
)
5100 /* Look for stricter alignment. */
5101 tmp
= XEXP (orig_dst
, 0);
5102 if (GET_CODE (tmp
) == REG
)
5103 align
= MAX (align
, REGNO_POINTER_ALIGN (REGNO (tmp
)));
5104 else if (GET_CODE (tmp
) == PLUS
5105 && GET_CODE (XEXP (tmp
, 0)) == REG
5106 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
5108 HOST_WIDE_INT c
= INTVAL (XEXP (tmp
, 1));
5109 int a
= REGNO_POINTER_ALIGN (REGNO (XEXP (tmp
, 0)));
5114 align
= a
, alignofs
= 8 - c
% 8;
5116 align
= a
, alignofs
= 4 - c
% 4;
5118 align
= a
, alignofs
= 2 - c
% 2;
5121 else if (GET_CODE (tmp
) == ADDRESSOF
)
5123 enum machine_mode mode
;
5125 mode
= mode_for_size (bytes
* BITS_PER_UNIT
, MODE_INT
, 1);
5126 if (GET_MODE (XEXP (tmp
, 0)) == mode
)
5128 emit_move_insn (XEXP (tmp
, 0), const0_rtx
);
5132 /* No appropriate mode; fall back on memory. */
5133 orig_dst
= replace_equiv_address (orig_dst
, copy_addr_to_reg (tmp
));
5134 align
= GET_MODE_BITSIZE (GET_MODE (XEXP (tmp
, 0)));
5137 /* Handle an unaligned prefix first. */
5141 #if HOST_BITS_PER_WIDE_INT >= 64
5142 /* Given that alignofs is bounded by align, the only time BWX could
5143 generate three stores is for a 7 byte fill. Prefer two individual
5144 stores over a load/mask/store sequence. */
5145 if ((!TARGET_BWX
|| alignofs
== 7)
5147 && !(alignofs
== 4 && bytes
>= 4))
5149 enum machine_mode mode
= (align
>= 64 ? DImode
: SImode
);
5150 int inv_alignofs
= (align
>= 64 ? 8 : 4) - alignofs
;
5154 mem
= adjust_address (orig_dst
, mode
, ofs
- inv_alignofs
);
5155 set_mem_alias_set (mem
, 0);
5157 mask
= ~(~(HOST_WIDE_INT
)0 << (inv_alignofs
* 8));
5158 if (bytes
< alignofs
)
5160 mask
|= ~(HOST_WIDE_INT
)0 << ((inv_alignofs
+ bytes
) * 8);
5171 tmp
= expand_binop (mode
, and_optab
, mem
, GEN_INT (mask
),
5172 NULL_RTX
, 1, OPTAB_WIDEN
);
5174 emit_move_insn (mem
, tmp
);
5178 if (TARGET_BWX
&& (alignofs
& 1) && bytes
>= 1)
5180 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
5185 if (TARGET_BWX
&& align
>= 16 && (alignofs
& 3) == 2 && bytes
>= 2)
5187 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
), const0_rtx
);
5192 if (alignofs
== 4 && bytes
>= 4)
5194 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
5200 /* If we've not used the extra lead alignment information by now,
5201 we won't be able to. Downgrade align to match what's left over. */
5204 alignofs
= alignofs
& -alignofs
;
5205 align
= MIN (align
, alignofs
* BITS_PER_UNIT
);
5209 /* Handle a block of contiguous long-words. */
5211 if (align
>= 64 && bytes
>= 8)
5215 for (i
= 0; i
< words
; ++i
)
5216 emit_move_insn (adjust_address (orig_dst
, DImode
, ofs
+ i
* 8),
5223 /* If the block is large and appropriately aligned, emit a single
5224 store followed by a sequence of stq_u insns. */
5226 if (align
>= 32 && bytes
> 16)
5230 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
), const0_rtx
);
5234 orig_dsta
= XEXP (orig_dst
, 0);
5235 if (GET_CODE (orig_dsta
) == LO_SUM
)
5236 orig_dsta
= force_reg (Pmode
, orig_dsta
);
5239 for (i
= 0; i
< words
; ++i
)
5242 = change_address (orig_dst
, DImode
,
5243 gen_rtx_AND (DImode
,
5244 plus_constant (orig_dsta
, ofs
+ i
*8),
5246 set_mem_alias_set (mem
, 0);
5247 emit_move_insn (mem
, const0_rtx
);
5250 /* Depending on the alignment, the first stq_u may have overlapped
5251 with the initial stl, which means that the last stq_u didn't
5252 write as much as it would appear. Leave those questionable bytes
5254 bytes
-= words
* 8 - 4;
5255 ofs
+= words
* 8 - 4;
5258 /* Handle a smaller block of aligned words. */
5260 if ((align
>= 64 && bytes
== 4)
5261 || (align
== 32 && bytes
>= 4))
5265 for (i
= 0; i
< words
; ++i
)
5266 emit_move_insn (adjust_address (orig_dst
, SImode
, ofs
+ i
* 4),
5273 /* An unaligned block uses stq_u stores for as many as possible. */
5279 alpha_expand_unaligned_store_words (NULL
, orig_dst
, words
, ofs
);
5285 /* Next clean up any trailing pieces. */
5287 #if HOST_BITS_PER_WIDE_INT >= 64
5288 /* Count the number of bits in BYTES for which aligned stores could
5291 for (i
= (TARGET_BWX
? 1 : 4); i
* BITS_PER_UNIT
<= align
; i
<<= 1)
5295 /* If we have appropriate alignment (and it wouldn't take too many
5296 instructions otherwise), mask out the bytes we need. */
5297 if (TARGET_BWX
? words
> 2 : bytes
> 0)
5304 mem
= adjust_address (orig_dst
, DImode
, ofs
);
5305 set_mem_alias_set (mem
, 0);
5307 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
5309 tmp
= expand_binop (DImode
, and_optab
, mem
, GEN_INT (mask
),
5310 NULL_RTX
, 1, OPTAB_WIDEN
);
5312 emit_move_insn (mem
, tmp
);
5315 else if (align
>= 32 && bytes
< 4)
5320 mem
= adjust_address (orig_dst
, SImode
, ofs
);
5321 set_mem_alias_set (mem
, 0);
5323 mask
= ~(HOST_WIDE_INT
)0 << (bytes
* 8);
5325 tmp
= expand_binop (SImode
, and_optab
, mem
, GEN_INT (mask
),
5326 NULL_RTX
, 1, OPTAB_WIDEN
);
5328 emit_move_insn (mem
, tmp
);
5334 if (!TARGET_BWX
&& bytes
>= 4)
5336 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 4, ofs
);
5346 emit_move_insn (adjust_address (orig_dst
, HImode
, ofs
),
5350 } while (bytes
>= 2);
5352 else if (! TARGET_BWX
)
5354 alpha_expand_unaligned_store (orig_dst
, const0_rtx
, 2, ofs
);
5362 emit_move_insn (adjust_address (orig_dst
, QImode
, ofs
), const0_rtx
);
5370 /* Returns a mask so that zap(x, value) == x & mask. */
5373 alpha_expand_zap_mask (value
)
5374 HOST_WIDE_INT value
;
5379 if (HOST_BITS_PER_WIDE_INT
>= 64)
5381 HOST_WIDE_INT mask
= 0;
5383 for (i
= 7; i
>= 0; --i
)
5386 if (!((value
>> i
) & 1))
5390 result
= gen_int_mode (mask
, DImode
);
5392 else if (HOST_BITS_PER_WIDE_INT
== 32)
5394 HOST_WIDE_INT mask_lo
= 0, mask_hi
= 0;
5396 for (i
= 7; i
>= 4; --i
)
5399 if (!((value
>> i
) & 1))
5403 for (i
= 3; i
>= 0; --i
)
5406 if (!((value
>> i
) & 1))
5410 result
= immed_double_const (mask_lo
, mask_hi
, DImode
);
5419 alpha_expand_builtin_vector_binop (gen
, mode
, op0
, op1
, op2
)
5420 rtx (*gen
) PARAMS ((rtx
, rtx
, rtx
));
5421 enum machine_mode mode
;
5424 op0
= gen_lowpart (mode
, op0
);
5426 if (op1
== const0_rtx
)
5427 op1
= CONST0_RTX (mode
);
5429 op1
= gen_lowpart (mode
, op1
);
5431 if (op2
== const0_rtx
)
5432 op2
= CONST0_RTX (mode
);
5434 op2
= gen_lowpart (mode
, op2
);
5436 emit_insn ((*gen
) (op0
, op1
, op2
));
5439 /* Adjust the cost of a scheduling dependency. Return the new cost of
5440 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5443 alpha_adjust_cost (insn
, link
, dep_insn
, cost
)
5449 enum attr_type insn_type
, dep_insn_type
;
5451 /* If the dependence is an anti-dependence, there is no cost. For an
5452 output dependence, there is sometimes a cost, but it doesn't seem
5453 worth handling those few cases. */
5454 if (REG_NOTE_KIND (link
) != 0)
5457 /* If we can't recognize the insns, we can't really do anything. */
5458 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
5461 insn_type
= get_attr_type (insn
);
5462 dep_insn_type
= get_attr_type (dep_insn
);
5464 /* Bring in the user-defined memory latency. */
5465 if (dep_insn_type
== TYPE_ILD
5466 || dep_insn_type
== TYPE_FLD
5467 || dep_insn_type
== TYPE_LDSYM
)
5468 cost
+= alpha_memory_latency
-1;
5470 /* Everything else handled in DFA bypasses now. */
5475 /* The number of instructions that can be issued per cycle. */
5480 return (alpha_cpu
== PROCESSOR_EV4
? 2 : 4);
5484 alpha_use_dfa_pipeline_interface ()
5489 /* How many alternative schedules to try. This should be as wide as the
5490 scheduling freedom in the DFA, but no wider. Making this value too
5491 large results extra work for the scheduler.
5493 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
5494 alternative schedules. For EV5, we can choose between E0/E1 and
5495 FA/FM. For EV6, an arithmatic insn can be issued to U0/U1/L0/L1. */
5498 alpha_multipass_dfa_lookahead ()
5500 return (alpha_cpu
== PROCESSOR_EV6
? 4 : 2);
5503 /* Machine-specific function data. */
5505 struct machine_function
GTY(())
5508 /* List of call information words for calls from this function. */
5509 struct rtx_def
*first_ciw
;
5510 struct rtx_def
*last_ciw
;
5513 /* List of deferred case vectors. */
5514 struct rtx_def
*addr_list
;
5517 const char *some_ld_name
;
5520 /* How to allocate a 'struct machine_function'. */
5522 static struct machine_function
*
5523 alpha_init_machine_status ()
5525 return ((struct machine_function
*)
5526 ggc_alloc_cleared (sizeof (struct machine_function
)));
5529 /* Functions to save and restore alpha_return_addr_rtx. */
5531 /* Start the ball rolling with RETURN_ADDR_RTX. */
5534 alpha_return_addr (count
, frame
)
5536 rtx frame ATTRIBUTE_UNUSED
;
5541 return get_hard_reg_initial_val (Pmode
, REG_RA
);
5544 /* Return or create a pseudo containing the gp value for the current
5545 function. Needed only if TARGET_LD_BUGGY_LDGP. */
5548 alpha_gp_save_rtx ()
5550 rtx r
= get_hard_reg_initial_val (DImode
, 29);
5551 if (GET_CODE (r
) != MEM
)
5552 r
= gen_mem_addressof (r
, NULL_TREE
, /*rescan=*/true);
5557 alpha_ra_ever_killed ()
5561 if (!has_hard_reg_initial_val (Pmode
, REG_RA
))
5562 return regs_ever_live
[REG_RA
];
5564 push_topmost_sequence ();
5566 pop_topmost_sequence ();
5568 return reg_set_between_p (gen_rtx_REG (Pmode
, REG_RA
), top
, NULL_RTX
);
5572 /* Return the trap mode suffix applicable to the current
5573 instruction, or NULL. */
5576 get_trap_mode_suffix ()
5578 enum attr_trap_suffix s
= get_attr_trap_suffix (current_output_insn
);
5582 case TRAP_SUFFIX_NONE
:
5585 case TRAP_SUFFIX_SU
:
5586 if (alpha_fptm
>= ALPHA_FPTM_SU
)
5590 case TRAP_SUFFIX_SUI
:
5591 if (alpha_fptm
>= ALPHA_FPTM_SUI
)
5595 case TRAP_SUFFIX_V_SV
:
5603 case ALPHA_FPTM_SUI
:
5608 case TRAP_SUFFIX_V_SV_SVI
:
5617 case ALPHA_FPTM_SUI
:
5622 case TRAP_SUFFIX_U_SU_SUI
:
5631 case ALPHA_FPTM_SUI
:
5639 /* Return the rounding mode suffix applicable to the current
5640 instruction, or NULL. */
5643 get_round_mode_suffix ()
5645 enum attr_round_suffix s
= get_attr_round_suffix (current_output_insn
);
5649 case ROUND_SUFFIX_NONE
:
5651 case ROUND_SUFFIX_NORMAL
:
5654 case ALPHA_FPRM_NORM
:
5656 case ALPHA_FPRM_MINF
:
5658 case ALPHA_FPRM_CHOP
:
5660 case ALPHA_FPRM_DYN
:
5665 case ROUND_SUFFIX_C
:
5671 /* Locate some local-dynamic symbol still in use by this function
5672 so that we can print its name in some movdi_er_tlsldm pattern. */
5675 get_some_local_dynamic_name ()
5679 if (cfun
->machine
->some_ld_name
)
5680 return cfun
->machine
->some_ld_name
;
5682 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5684 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
5685 return cfun
->machine
->some_ld_name
;
5691 get_some_local_dynamic_name_1 (px
, data
)
5693 void *data ATTRIBUTE_UNUSED
;
5697 if (GET_CODE (x
) == SYMBOL_REF
5698 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
5700 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
5707 /* Print an operand. Recognize special options, documented below. */
5710 print_operand (file
, x
, code
)
5720 /* Print the assembler name of the current function. */
5721 assemble_name (file
, alpha_fnname
);
5725 assemble_name (file
, get_some_local_dynamic_name ());
5730 const char *trap
= get_trap_mode_suffix ();
5731 const char *round
= get_round_mode_suffix ();
5734 fprintf (file
, (TARGET_AS_SLASH_BEFORE_SUFFIX
? "/%s%s" : "%s%s"),
5735 (trap
? trap
: ""), (round
? round
: ""));
5740 /* Generates single precision instruction suffix. */
5741 fputc ((TARGET_FLOAT_VAX
? 'f' : 's'), file
);
5745 /* Generates double precision instruction suffix. */
5746 fputc ((TARGET_FLOAT_VAX
? 'g' : 't'), file
);
5750 /* Generates a nop after a noreturn call at the very end of the
5752 if (next_real_insn (current_output_insn
) == 0)
5753 fprintf (file
, "\n\tnop");
5757 if (alpha_this_literal_sequence_number
== 0)
5758 alpha_this_literal_sequence_number
= alpha_next_sequence_number
++;
5759 fprintf (file
, "%d", alpha_this_literal_sequence_number
);
5763 if (alpha_this_gpdisp_sequence_number
== 0)
5764 alpha_this_gpdisp_sequence_number
= alpha_next_sequence_number
++;
5765 fprintf (file
, "%d", alpha_this_gpdisp_sequence_number
);
5769 if (GET_CODE (x
) == HIGH
)
5770 output_addr_const (file
, XEXP (x
, 0));
5772 output_operand_lossage ("invalid %%H value");
5779 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD_CALL
)
5781 x
= XVECEXP (x
, 0, 0);
5782 lituse
= "lituse_tlsgd";
5784 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSLDM_CALL
)
5786 x
= XVECEXP (x
, 0, 0);
5787 lituse
= "lituse_tlsldm";
5789 else if (GET_CODE (x
) == CONST_INT
)
5790 lituse
= "lituse_jsr";
5793 output_operand_lossage ("invalid %%J value");
5797 if (x
!= const0_rtx
)
5798 fprintf (file
, "\t\t!%s!%d", lituse
, (int) INTVAL (x
));
5803 /* If this operand is the constant zero, write it as "$31". */
5804 if (GET_CODE (x
) == REG
)
5805 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5806 else if (x
== CONST0_RTX (GET_MODE (x
)))
5807 fprintf (file
, "$31");
5809 output_operand_lossage ("invalid %%r value");
5813 /* Similar, but for floating-point. */
5814 if (GET_CODE (x
) == REG
)
5815 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
5816 else if (x
== CONST0_RTX (GET_MODE (x
)))
5817 fprintf (file
, "$f31");
5819 output_operand_lossage ("invalid %%R value");
5823 /* Write the 1's complement of a constant. */
5824 if (GET_CODE (x
) != CONST_INT
)
5825 output_operand_lossage ("invalid %%N value");
5827 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
5831 /* Write 1 << C, for a constant C. */
5832 if (GET_CODE (x
) != CONST_INT
)
5833 output_operand_lossage ("invalid %%P value");
5835 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (HOST_WIDE_INT
) 1 << INTVAL (x
));
5839 /* Write the high-order 16 bits of a constant, sign-extended. */
5840 if (GET_CODE (x
) != CONST_INT
)
5841 output_operand_lossage ("invalid %%h value");
5843 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) >> 16);
5847 /* Write the low-order 16 bits of a constant, sign-extended. */
5848 if (GET_CODE (x
) != CONST_INT
)
5849 output_operand_lossage ("invalid %%L value");
5851 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5852 (INTVAL (x
) & 0xffff) - 2 * (INTVAL (x
) & 0x8000));
5856 /* Write mask for ZAP insn. */
5857 if (GET_CODE (x
) == CONST_DOUBLE
)
5859 HOST_WIDE_INT mask
= 0;
5860 HOST_WIDE_INT value
;
5862 value
= CONST_DOUBLE_LOW (x
);
5863 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5868 value
= CONST_DOUBLE_HIGH (x
);
5869 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
/ HOST_BITS_PER_CHAR
;
5872 mask
|= (1 << (i
+ sizeof (int)));
5874 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
& 0xff);
5877 else if (GET_CODE (x
) == CONST_INT
)
5879 HOST_WIDE_INT mask
= 0, value
= INTVAL (x
);
5881 for (i
= 0; i
< 8; i
++, value
>>= 8)
5885 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, mask
);
5888 output_operand_lossage ("invalid %%m value");
5892 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5893 if (GET_CODE (x
) != CONST_INT
5894 || (INTVAL (x
) != 8 && INTVAL (x
) != 16
5895 && INTVAL (x
) != 32 && INTVAL (x
) != 64))
5896 output_operand_lossage ("invalid %%M value");
5898 fprintf (file
, "%s",
5899 (INTVAL (x
) == 8 ? "b"
5900 : INTVAL (x
) == 16 ? "w"
5901 : INTVAL (x
) == 32 ? "l"
5906 /* Similar, except do it from the mask. */
5907 if (GET_CODE (x
) == CONST_INT
)
5909 HOST_WIDE_INT value
= INTVAL (x
);
5916 if (value
== 0xffff)
5921 if (value
== 0xffffffff)
5932 else if (HOST_BITS_PER_WIDE_INT
== 32
5933 && GET_CODE (x
) == CONST_DOUBLE
5934 && CONST_DOUBLE_LOW (x
) == 0xffffffff
5935 && CONST_DOUBLE_HIGH (x
) == 0)
5940 output_operand_lossage ("invalid %%U value");
5944 /* Write the constant value divided by 8 for little-endian mode or
5945 (56 - value) / 8 for big-endian mode. */
5947 if (GET_CODE (x
) != CONST_INT
5948 || (unsigned HOST_WIDE_INT
) INTVAL (x
) >= (WORDS_BIG_ENDIAN
5951 || (INTVAL (x
) & 7) != 0)
5952 output_operand_lossage ("invalid %%s value");
5954 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
5956 ? (56 - INTVAL (x
)) / 8
5961 /* Same, except compute (64 - c) / 8 */
5963 if (GET_CODE (x
) != CONST_INT
5964 && (unsigned HOST_WIDE_INT
) INTVAL (x
) >= 64
5965 && (INTVAL (x
) & 7) != 8)
5966 output_operand_lossage ("invalid %%s value");
5968 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (64 - INTVAL (x
)) / 8);
5973 /* On Unicos/Mk systems: use a DEX expression if the symbol
5974 clashes with a register name. */
5975 int dex
= unicosmk_need_dex (x
);
5977 fprintf (file
, "DEX(%d)", dex
);
5979 output_addr_const (file
, x
);
5983 case 'C': case 'D': case 'c': case 'd':
5984 /* Write out comparison name. */
5986 enum rtx_code c
= GET_CODE (x
);
5988 if (GET_RTX_CLASS (c
) != '<')
5989 output_operand_lossage ("invalid %%C value");
5991 else if (code
== 'D')
5992 c
= reverse_condition (c
);
5993 else if (code
== 'c')
5994 c
= swap_condition (c
);
5995 else if (code
== 'd')
5996 c
= swap_condition (reverse_condition (c
));
5999 fprintf (file
, "ule");
6001 fprintf (file
, "ult");
6002 else if (c
== UNORDERED
)
6003 fprintf (file
, "un");
6005 fprintf (file
, "%s", GET_RTX_NAME (c
));
6010 /* Write the divide or modulus operator. */
6011 switch (GET_CODE (x
))
6014 fprintf (file
, "div%s", GET_MODE (x
) == SImode
? "l" : "q");
6017 fprintf (file
, "div%su", GET_MODE (x
) == SImode
? "l" : "q");
6020 fprintf (file
, "rem%s", GET_MODE (x
) == SImode
? "l" : "q");
6023 fprintf (file
, "rem%su", GET_MODE (x
) == SImode
? "l" : "q");
6026 output_operand_lossage ("invalid %%E value");
6032 /* Write "_u" for unaligned access. */
6033 if (GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) == AND
)
6034 fprintf (file
, "_u");
6038 if (GET_CODE (x
) == REG
)
6039 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
6040 else if (GET_CODE (x
) == MEM
)
6041 output_address (XEXP (x
, 0));
6042 else if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == UNSPEC
)
6044 switch (XINT (XEXP (x
, 0), 1))
6048 output_addr_const (file
, XVECEXP (XEXP (x
, 0), 0, 0));
6051 output_operand_lossage ("unknown relocation unspec");
6056 output_addr_const (file
, x
);
6060 output_operand_lossage ("invalid %%xn code");
6065 print_operand_address (file
, addr
)
6070 HOST_WIDE_INT offset
= 0;
6072 if (GET_CODE (addr
) == AND
)
6073 addr
= XEXP (addr
, 0);
6075 if (GET_CODE (addr
) == PLUS
6076 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
6078 offset
= INTVAL (XEXP (addr
, 1));
6079 addr
= XEXP (addr
, 0);
6082 if (GET_CODE (addr
) == LO_SUM
)
6084 const char *reloc16
, *reloclo
;
6085 rtx op1
= XEXP (addr
, 1);
6087 if (GET_CODE (op1
) == CONST
&& GET_CODE (XEXP (op1
, 0)) == UNSPEC
)
6089 op1
= XEXP (op1
, 0);
6090 switch (XINT (op1
, 1))
6094 reloclo
= (alpha_tls_size
== 16 ? "dtprel" : "dtprello");
6098 reloclo
= (alpha_tls_size
== 16 ? "tprel" : "tprello");
6101 output_operand_lossage ("unknown relocation unspec");
6105 output_addr_const (file
, XVECEXP (op1
, 0, 0));
6110 reloclo
= "gprellow";
6111 output_addr_const (file
, op1
);
6117 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
6120 addr
= XEXP (addr
, 0);
6121 if (GET_CODE (addr
) == REG
)
6122 basereg
= REGNO (addr
);
6123 else if (GET_CODE (addr
) == SUBREG
6124 && GET_CODE (SUBREG_REG (addr
)) == REG
)
6125 basereg
= subreg_regno (addr
);
6129 fprintf (file
, "($%d)\t\t!%s", basereg
,
6130 (basereg
== 29 ? reloc16
: reloclo
));
6134 if (GET_CODE (addr
) == REG
)
6135 basereg
= REGNO (addr
);
6136 else if (GET_CODE (addr
) == SUBREG
6137 && GET_CODE (SUBREG_REG (addr
)) == REG
)
6138 basereg
= subreg_regno (addr
);
6139 else if (GET_CODE (addr
) == CONST_INT
)
6140 offset
= INTVAL (addr
);
6142 #if TARGET_ABI_OPEN_VMS
6143 else if (GET_CODE (addr
) == SYMBOL_REF
)
6145 fprintf (file
, "%s", XSTR (addr
, 0));
6148 else if (GET_CODE (addr
) == CONST
6149 && GET_CODE (XEXP (addr
, 0)) == PLUS
6150 && GET_CODE (XEXP (XEXP (addr
, 0), 0)) == SYMBOL_REF
)
6152 fprintf (file
, "%s+%d",
6153 XSTR (XEXP (XEXP (addr
, 0), 0), 0),
6154 INTVAL (XEXP (XEXP (addr
, 0), 1)));
6162 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
6163 fprintf (file
, "($%d)", basereg
);
6166 /* Emit RTL insns to initialize the variable parts of a trampoline at
6167 TRAMP. FNADDR is an RTX for the address of the function's pure
6168 code. CXT is an RTX for the static chain value for the function.
6170 The three offset parameters are for the individual template's
6171 layout. A JMPOFS < 0 indicates that the trampoline does not
6172 contain instructions at all.
6174 We assume here that a function will be called many more times than
6175 its address is taken (e.g., it might be passed to qsort), so we
6176 take the trouble to initialize the "hint" field in the JMP insn.
6177 Note that the hint field is PC (new) + 4 * bits 13:0. */
6180 alpha_initialize_trampoline (tramp
, fnaddr
, cxt
, fnofs
, cxtofs
, jmpofs
)
6181 rtx tramp
, fnaddr
, cxt
;
6182 int fnofs
, cxtofs
, jmpofs
;
6184 rtx temp
, temp1
, addr
;
6185 /* VMS really uses DImode pointers in memory at this point. */
6186 enum machine_mode mode
= TARGET_ABI_OPEN_VMS
? Pmode
: ptr_mode
;
6188 #ifdef POINTERS_EXTEND_UNSIGNED
6189 fnaddr
= convert_memory_address (mode
, fnaddr
);
6190 cxt
= convert_memory_address (mode
, cxt
);
6193 /* Store function address and CXT. */
6194 addr
= memory_address (mode
, plus_constant (tramp
, fnofs
));
6195 emit_move_insn (gen_rtx_MEM (mode
, addr
), fnaddr
);
6196 addr
= memory_address (mode
, plus_constant (tramp
, cxtofs
));
6197 emit_move_insn (gen_rtx_MEM (mode
, addr
), cxt
);
6199 /* This has been disabled since the hint only has a 32k range, and in
6200 no existing OS is the stack within 32k of the text segment. */
6201 if (0 && jmpofs
>= 0)
6203 /* Compute hint value. */
6204 temp
= force_operand (plus_constant (tramp
, jmpofs
+4), NULL_RTX
);
6205 temp
= expand_binop (DImode
, sub_optab
, fnaddr
, temp
, temp
, 1,
6207 temp
= expand_shift (RSHIFT_EXPR
, Pmode
, temp
,
6208 build_int_2 (2, 0), NULL_RTX
, 1);
6209 temp
= expand_and (SImode
, gen_lowpart (SImode
, temp
),
6210 GEN_INT (0x3fff), 0);
6212 /* Merge in the hint. */
6213 addr
= memory_address (SImode
, plus_constant (tramp
, jmpofs
));
6214 temp1
= force_reg (SImode
, gen_rtx_MEM (SImode
, addr
));
6215 temp1
= expand_and (SImode
, temp1
, GEN_INT (0xffffc000), NULL_RTX
);
6216 temp1
= expand_binop (SImode
, ior_optab
, temp1
, temp
, temp1
, 1,
6218 emit_move_insn (gen_rtx_MEM (SImode
, addr
), temp1
);
6221 #ifdef TRANSFER_FROM_TRAMPOLINE
6222 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
6223 0, VOIDmode
, 1, tramp
, Pmode
);
6227 emit_insn (gen_imb ());
6230 /* Determine where to put an argument to a function.
6231 Value is zero to push the argument on the stack,
6232 or a hard register in which to store the argument.
6234 MODE is the argument's machine mode.
6235 TYPE is the data type of the argument (as a tree).
6236 This is null for libcalls where that information may
6238 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6239 the preceding args and about the function being called.
6240 NAMED is nonzero if this argument is a named parameter
6241 (otherwise it is an extra parameter matching an ellipsis).
6243 On Alpha the first 6 words of args are normally in registers
6244 and the rest are pushed. */
6247 function_arg (cum
, mode
, type
, named
)
6248 CUMULATIVE_ARGS cum
;
6249 enum machine_mode mode
;
6251 int named ATTRIBUTE_UNUSED
;
6256 /* Set up defaults for FP operands passed in FP registers, and
6257 integral operands passed in integer registers. */
6259 && (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
6260 || GET_MODE_CLASS (mode
) == MODE_FLOAT
))
6265 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
6266 the three platforms, so we can't avoid conditional compilation. */
6267 #if TARGET_ABI_OPEN_VMS
6269 if (mode
== VOIDmode
)
6270 return alpha_arg_info_reg_val (cum
);
6272 num_args
= cum
.num_args
;
6273 if (num_args
>= 6 || MUST_PASS_IN_STACK (mode
, type
))
6277 #if TARGET_ABI_UNICOSMK
6281 /* If this is the last argument, generate the call info word (CIW). */
6282 /* ??? We don't include the caller's line number in the CIW because
6283 I don't know how to determine it if debug infos are turned off. */
6284 if (mode
== VOIDmode
)
6293 for (i
= 0; i
< cum
.num_reg_words
&& i
< 5; i
++)
6294 if (cum
.reg_args_type
[i
])
6295 lo
|= (1 << (7 - i
));
6297 if (cum
.num_reg_words
== 6 && cum
.reg_args_type
[5])
6300 lo
|= cum
.num_reg_words
;
6302 #if HOST_BITS_PER_WIDE_INT == 32
6303 hi
= (cum
.num_args
<< 20) | cum
.num_arg_words
;
6305 lo
= lo
| ((HOST_WIDE_INT
) cum
.num_args
<< 52)
6306 | ((HOST_WIDE_INT
) cum
.num_arg_words
<< 32);
6309 ciw
= immed_double_const (lo
, hi
, DImode
);
6311 return gen_rtx_UNSPEC (DImode
, gen_rtvec (1, ciw
),
6312 UNSPEC_UMK_LOAD_CIW
);
6315 size
= ALPHA_ARG_SIZE (mode
, type
, named
);
6316 num_args
= cum
.num_reg_words
;
6317 if (MUST_PASS_IN_STACK (mode
, type
)
6318 || cum
.num_reg_words
+ size
> 6 || cum
.force_stack
)
6320 else if (type
&& TYPE_MODE (type
) == BLKmode
)
6324 reg1
= gen_rtx_REG (DImode
, num_args
+ 16);
6325 reg1
= gen_rtx_EXPR_LIST (DImode
, reg1
, const0_rtx
);
6327 /* The argument fits in two registers. Note that we still need to
6328 reserve a register for empty structures. */
6332 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, reg1
));
6335 reg2
= gen_rtx_REG (DImode
, num_args
+ 17);
6336 reg2
= gen_rtx_EXPR_LIST (DImode
, reg2
, GEN_INT (8));
6337 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, reg1
, reg2
));
6347 /* VOID is passed as a special flag for "last argument". */
6348 if (type
== void_type_node
)
6350 else if (MUST_PASS_IN_STACK (mode
, type
))
6352 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum
, mode
, type
, named
))
6355 #endif /* TARGET_ABI_UNICOSMK */
6356 #endif /* TARGET_ABI_OPEN_VMS */
6358 return gen_rtx_REG (mode
, num_args
+ basereg
);
6362 alpha_build_va_list ()
6364 tree base
, ofs
, record
, type_decl
;
6366 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
6367 return ptr_type_node
;
6369 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
6370 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
6371 TREE_CHAIN (record
) = type_decl
;
6372 TYPE_NAME (record
) = type_decl
;
6374 /* C++? SET_IS_AGGR_TYPE (record, 1); */
6376 ofs
= build_decl (FIELD_DECL
, get_identifier ("__offset"),
6378 DECL_FIELD_CONTEXT (ofs
) = record
;
6380 base
= build_decl (FIELD_DECL
, get_identifier ("__base"),
6382 DECL_FIELD_CONTEXT (base
) = record
;
6383 TREE_CHAIN (base
) = ofs
;
6385 TYPE_FIELDS (record
) = base
;
6386 layout_type (record
);
6391 /* Perform any needed actions needed for a function that is receiving a
6392 variable number of arguments.
6394 On the Alpha, we allocate space for all 12 arg registers, but only
6395 push those that are remaining. However, if NO registers need to be
6396 saved, don't allocate any space. This is not only because we won't
6397 need the space, but because AP includes the current_pretend_args_size
6398 and we don't want to mess up any ap-relative addresses already made.
6400 If we are not to use the floating-point registers, save the integer
6401 registers where we would put the floating-point registers. This is
6402 not the most efficient way to implement varargs with just one register
6403 class, but it isn't worth doing anything more efficient in this rare
6407 alpha_setup_incoming_varargs(cum
, mode
, type
, pretend_size
, no_rtl
)
6408 CUMULATIVE_ARGS cum
;
6409 enum machine_mode mode ATTRIBUTE_UNUSED
;
6410 tree type ATTRIBUTE_UNUSED
;
6419 int set
= get_varargs_alias_set ();
6422 tmp
= gen_rtx_MEM (BLKmode
,
6423 plus_constant (virtual_incoming_args_rtx
,
6424 (cum
+ 6) * UNITS_PER_WORD
));
6425 set_mem_alias_set (tmp
, set
);
6426 move_block_from_reg (16 + cum
, tmp
, 6 - cum
);
6428 tmp
= gen_rtx_MEM (BLKmode
,
6429 plus_constant (virtual_incoming_args_rtx
,
6430 cum
* UNITS_PER_WORD
));
6431 set_mem_alias_set (tmp
, set
);
6432 move_block_from_reg (16 + (TARGET_FPREGS
? 32 : 0) + cum
, tmp
,
6435 *pretend_size
= 12 * UNITS_PER_WORD
;
6439 alpha_va_start (valist
, nextarg
)
6441 rtx nextarg ATTRIBUTE_UNUSED
;
6443 HOST_WIDE_INT offset
;
6444 tree t
, offset_field
, base_field
;
6446 if (TREE_CODE (TREE_TYPE (valist
)) == ERROR_MARK
)
6449 if (TARGET_ABI_UNICOSMK
)
6450 std_expand_builtin_va_start (valist
, nextarg
);
6452 /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base
6453 up by 48, storing fp arg registers in the first 48 bytes, and the
6454 integer arg registers in the next 48 bytes. This is only done,
6455 however, if any integer registers need to be stored.
6457 If no integer registers need be stored, then we must subtract 48
6458 in order to account for the integer arg registers which are counted
6459 in argsize above, but which are not actually stored on the stack.
6460 Must further be careful here about structures straddling the last
6461 integer argument register; that futzes with pretend_args_size,
6462 which changes the meaning of AP. */
6465 offset
= TARGET_ABI_OPEN_VMS
? UNITS_PER_WORD
: 6 * UNITS_PER_WORD
;
6467 offset
= -6 * UNITS_PER_WORD
+ current_function_pretend_args_size
;
6469 if (TARGET_ABI_OPEN_VMS
)
6471 nextarg
= plus_constant (nextarg
, offset
);
6472 nextarg
= plus_constant (nextarg
, NUM_ARGS
* UNITS_PER_WORD
);
6473 t
= build (MODIFY_EXPR
, TREE_TYPE (valist
), valist
,
6474 make_tree (ptr_type_node
, nextarg
));
6475 TREE_SIDE_EFFECTS (t
) = 1;
6477 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6481 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6482 offset_field
= TREE_CHAIN (base_field
);
6484 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
6485 valist
, base_field
);
6486 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
6487 valist
, offset_field
);
6489 t
= make_tree (ptr_type_node
, virtual_incoming_args_rtx
);
6490 t
= build (PLUS_EXPR
, ptr_type_node
, t
, build_int_2 (offset
, 0));
6491 t
= build (MODIFY_EXPR
, TREE_TYPE (base_field
), base_field
, t
);
6492 TREE_SIDE_EFFECTS (t
) = 1;
6493 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6495 t
= build_int_2 (NUM_ARGS
* UNITS_PER_WORD
, 0);
6496 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
, t
);
6497 TREE_SIDE_EFFECTS (t
) = 1;
6498 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6503 alpha_va_arg (valist
, type
)
6507 tree t
, type_size
, rounded_size
;
6508 tree offset_field
, base_field
, addr_tree
, addend
;
6509 tree wide_type
, wide_ofs
;
6512 if (TARGET_ABI_OPEN_VMS
|| TARGET_ABI_UNICOSMK
)
6513 return std_expand_builtin_va_arg (valist
, type
);
6515 if (type
== error_mark_node
6516 || (type_size
= TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type
))) == NULL
6517 || TREE_OVERFLOW (type_size
))
6518 rounded_size
= size_zero_node
;
6520 rounded_size
= fold (build (MULT_EXPR
, sizetype
,
6521 fold (build (TRUNC_DIV_EXPR
, sizetype
,
6522 fold (build (PLUS_EXPR
, sizetype
,
6528 base_field
= TYPE_FIELDS (TREE_TYPE (valist
));
6529 offset_field
= TREE_CHAIN (base_field
);
6531 base_field
= build (COMPONENT_REF
, TREE_TYPE (base_field
),
6532 valist
, base_field
);
6533 offset_field
= build (COMPONENT_REF
, TREE_TYPE (offset_field
),
6534 valist
, offset_field
);
6536 /* If the type could not be passed in registers, skip the block
6537 reserved for the registers. */
6538 if (MUST_PASS_IN_STACK (TYPE_MODE (type
), type
))
6540 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
,
6541 build (MAX_EXPR
, TREE_TYPE (offset_field
),
6542 offset_field
, build_int_2 (6*8, 0)));
6543 TREE_SIDE_EFFECTS (t
) = 1;
6544 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6547 wide_type
= make_signed_type (64);
6548 wide_ofs
= save_expr (build1 (CONVERT_EXPR
, wide_type
, offset_field
));
6552 if (TYPE_MODE (type
) == TFmode
|| TYPE_MODE (type
) == TCmode
)
6555 rounded_size
= size_int (UNITS_PER_WORD
);
6557 else if (FLOAT_TYPE_P (type
))
6559 tree fpaddend
, cond
;
6561 fpaddend
= fold (build (PLUS_EXPR
, TREE_TYPE (addend
),
6562 addend
, build_int_2 (-6*8, 0)));
6564 cond
= fold (build (LT_EXPR
, integer_type_node
,
6565 wide_ofs
, build_int_2 (6*8, 0)));
6567 addend
= fold (build (COND_EXPR
, TREE_TYPE (addend
), cond
,
6571 addr_tree
= build (PLUS_EXPR
, TREE_TYPE (base_field
),
6572 base_field
, addend
);
6574 addr
= expand_expr (addr_tree
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
6575 addr
= copy_to_reg (addr
);
6577 t
= build (MODIFY_EXPR
, TREE_TYPE (offset_field
), offset_field
,
6578 build (PLUS_EXPR
, TREE_TYPE (offset_field
),
6579 offset_field
, rounded_size
));
6580 TREE_SIDE_EFFECTS (t
) = 1;
6581 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6585 addr
= force_reg (Pmode
, addr
);
6586 addr
= gen_rtx_MEM (Pmode
, addr
);
6596 ALPHA_BUILTIN_CMPBGE
,
6597 ALPHA_BUILTIN_EXTBL
,
6598 ALPHA_BUILTIN_EXTWL
,
6599 ALPHA_BUILTIN_EXTLL
,
6600 ALPHA_BUILTIN_EXTQL
,
6601 ALPHA_BUILTIN_EXTWH
,
6602 ALPHA_BUILTIN_EXTLH
,
6603 ALPHA_BUILTIN_EXTQH
,
6604 ALPHA_BUILTIN_INSBL
,
6605 ALPHA_BUILTIN_INSWL
,
6606 ALPHA_BUILTIN_INSLL
,
6607 ALPHA_BUILTIN_INSQL
,
6608 ALPHA_BUILTIN_INSWH
,
6609 ALPHA_BUILTIN_INSLH
,
6610 ALPHA_BUILTIN_INSQH
,
6611 ALPHA_BUILTIN_MSKBL
,
6612 ALPHA_BUILTIN_MSKWL
,
6613 ALPHA_BUILTIN_MSKLL
,
6614 ALPHA_BUILTIN_MSKQL
,
6615 ALPHA_BUILTIN_MSKWH
,
6616 ALPHA_BUILTIN_MSKLH
,
6617 ALPHA_BUILTIN_MSKQH
,
6618 ALPHA_BUILTIN_UMULH
,
6620 ALPHA_BUILTIN_ZAPNOT
,
6621 ALPHA_BUILTIN_AMASK
,
6622 ALPHA_BUILTIN_IMPLVER
,
6624 ALPHA_BUILTIN_THREAD_POINTER
,
6625 ALPHA_BUILTIN_SET_THREAD_POINTER
,
6628 ALPHA_BUILTIN_MINUB8
,
6629 ALPHA_BUILTIN_MINSB8
,
6630 ALPHA_BUILTIN_MINUW4
,
6631 ALPHA_BUILTIN_MINSW4
,
6632 ALPHA_BUILTIN_MAXUB8
,
6633 ALPHA_BUILTIN_MAXSB8
,
6634 ALPHA_BUILTIN_MAXUW4
,
6635 ALPHA_BUILTIN_MAXSW4
,
6639 ALPHA_BUILTIN_UNPKBL
,
6640 ALPHA_BUILTIN_UNPKBW
,
6645 ALPHA_BUILTIN_CTPOP
,
6650 static unsigned int const code_for_builtin
[ALPHA_BUILTIN_max
] = {
6651 CODE_FOR_builtin_cmpbge
,
6652 CODE_FOR_builtin_extbl
,
6653 CODE_FOR_builtin_extwl
,
6654 CODE_FOR_builtin_extll
,
6655 CODE_FOR_builtin_extql
,
6656 CODE_FOR_builtin_extwh
,
6657 CODE_FOR_builtin_extlh
,
6658 CODE_FOR_builtin_extqh
,
6659 CODE_FOR_builtin_insbl
,
6660 CODE_FOR_builtin_inswl
,
6661 CODE_FOR_builtin_insll
,
6662 CODE_FOR_builtin_insql
,
6663 CODE_FOR_builtin_inswh
,
6664 CODE_FOR_builtin_inslh
,
6665 CODE_FOR_builtin_insqh
,
6666 CODE_FOR_builtin_mskbl
,
6667 CODE_FOR_builtin_mskwl
,
6668 CODE_FOR_builtin_mskll
,
6669 CODE_FOR_builtin_mskql
,
6670 CODE_FOR_builtin_mskwh
,
6671 CODE_FOR_builtin_msklh
,
6672 CODE_FOR_builtin_mskqh
,
6673 CODE_FOR_umuldi3_highpart
,
6674 CODE_FOR_builtin_zap
,
6675 CODE_FOR_builtin_zapnot
,
6676 CODE_FOR_builtin_amask
,
6677 CODE_FOR_builtin_implver
,
6678 CODE_FOR_builtin_rpcc
,
6683 CODE_FOR_builtin_minub8
,
6684 CODE_FOR_builtin_minsb8
,
6685 CODE_FOR_builtin_minuw4
,
6686 CODE_FOR_builtin_minsw4
,
6687 CODE_FOR_builtin_maxub8
,
6688 CODE_FOR_builtin_maxsb8
,
6689 CODE_FOR_builtin_maxuw4
,
6690 CODE_FOR_builtin_maxsw4
,
6691 CODE_FOR_builtin_perr
,
6692 CODE_FOR_builtin_pklb
,
6693 CODE_FOR_builtin_pkwb
,
6694 CODE_FOR_builtin_unpkbl
,
6695 CODE_FOR_builtin_unpkbw
,
6698 CODE_FOR_builtin_cttz
,
6699 CODE_FOR_builtin_ctlz
,
6700 CODE_FOR_builtin_ctpop
6703 struct alpha_builtin_def
6706 enum alpha_builtin code
;
6707 unsigned int target_mask
;
6710 static struct alpha_builtin_def
const zero_arg_builtins
[] = {
6711 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER
, 0 },
6712 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC
, 0 }
6715 static struct alpha_builtin_def
const one_arg_builtins
[] = {
6716 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK
, 0 },
6717 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB
, MASK_MAX
},
6718 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB
, MASK_MAX
},
6719 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL
, MASK_MAX
},
6720 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW
, MASK_MAX
},
6721 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ
, MASK_CIX
},
6722 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ
, MASK_CIX
},
6723 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP
, MASK_CIX
}
6726 static struct alpha_builtin_def
const two_arg_builtins
[] = {
6727 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE
, 0 },
6728 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL
, 0 },
6729 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL
, 0 },
6730 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL
, 0 },
6731 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL
, 0 },
6732 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH
, 0 },
6733 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH
, 0 },
6734 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH
, 0 },
6735 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL
, 0 },
6736 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL
, 0 },
6737 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL
, 0 },
6738 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL
, 0 },
6739 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH
, 0 },
6740 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH
, 0 },
6741 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH
, 0 },
6742 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL
, 0 },
6743 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL
, 0 },
6744 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL
, 0 },
6745 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL
, 0 },
6746 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH
, 0 },
6747 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH
, 0 },
6748 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH
, 0 },
6749 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH
, 0 },
6750 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP
, 0 },
6751 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT
, 0 },
6752 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8
, MASK_MAX
},
6753 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8
, MASK_MAX
},
6754 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4
, MASK_MAX
},
6755 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4
, MASK_MAX
},
6756 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8
, MASK_MAX
},
6757 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8
, MASK_MAX
},
6758 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4
, MASK_MAX
},
6759 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4
, MASK_MAX
},
6760 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR
, MASK_MAX
}
6764 alpha_init_builtins ()
6766 const struct alpha_builtin_def
*p
;
6770 ftype
= build_function_type (long_integer_type_node
, void_list_node
);
6772 p
= zero_arg_builtins
;
6773 for (i
= 0; i
< ARRAY_SIZE (zero_arg_builtins
); ++i
, ++p
)
6774 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6775 builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6778 ftype
= build_function_type_list (long_integer_type_node
,
6779 long_integer_type_node
, NULL_TREE
);
6781 p
= one_arg_builtins
;
6782 for (i
= 0; i
< ARRAY_SIZE (one_arg_builtins
); ++i
, ++p
)
6783 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6784 builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6787 ftype
= build_function_type_list (long_integer_type_node
,
6788 long_integer_type_node
,
6789 long_integer_type_node
, NULL_TREE
);
6791 p
= two_arg_builtins
;
6792 for (i
= 0; i
< ARRAY_SIZE (two_arg_builtins
); ++i
, ++p
)
6793 if ((target_flags
& p
->target_mask
) == p
->target_mask
)
6794 builtin_function (p
->name
, ftype
, p
->code
, BUILT_IN_MD
,
6797 ftype
= build_function_type (ptr_type_node
, void_list_node
);
6798 builtin_function ("__builtin_thread_pointer", ftype
,
6799 ALPHA_BUILTIN_THREAD_POINTER
, BUILT_IN_MD
,
6802 ftype
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
6803 builtin_function ("__builtin_set_thread_pointer", ftype
,
6804 ALPHA_BUILTIN_SET_THREAD_POINTER
, BUILT_IN_MD
,
6808 /* Expand an expression EXP that calls a built-in function,
6809 with result going to TARGET if that's convenient
6810 (and in mode MODE if that's convenient).
6811 SUBTARGET may be used as the target for computing one of EXP's operands.
6812 IGNORE is nonzero if the value is to be ignored. */
6815 alpha_expand_builtin (exp
, target
, subtarget
, mode
, ignore
)
6818 rtx subtarget ATTRIBUTE_UNUSED
;
6819 enum machine_mode mode ATTRIBUTE_UNUSED
;
6820 int ignore ATTRIBUTE_UNUSED
;
6824 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
6825 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6826 tree arglist
= TREE_OPERAND (exp
, 1);
6827 enum insn_code icode
;
6828 rtx op
[MAX_ARGS
], pat
;
6832 if (fcode
>= ALPHA_BUILTIN_max
)
6833 internal_error ("bad builtin fcode");
6834 icode
= code_for_builtin
[fcode
];
6836 internal_error ("bad builtin fcode");
6838 nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
6840 for (arglist
= TREE_OPERAND (exp
, 1), arity
= 0;
6842 arglist
= TREE_CHAIN (arglist
), arity
++)
6844 const struct insn_operand_data
*insn_op
;
6846 tree arg
= TREE_VALUE (arglist
);
6847 if (arg
== error_mark_node
)
6849 if (arity
> MAX_ARGS
)
6852 insn_op
= &insn_data
[icode
].operand
[arity
+ nonvoid
];
6854 op
[arity
] = expand_expr (arg
, NULL_RTX
, insn_op
->mode
, 0);
6856 if (!(*insn_op
->predicate
) (op
[arity
], insn_op
->mode
))
6857 op
[arity
] = copy_to_mode_reg (insn_op
->mode
, op
[arity
]);
6862 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6864 || GET_MODE (target
) != tmode
6865 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6866 target
= gen_reg_rtx (tmode
);
6872 pat
= GEN_FCN (icode
) (target
);
6876 pat
= GEN_FCN (icode
) (target
, op
[0]);
6878 pat
= GEN_FCN (icode
) (op
[0]);
6881 pat
= GEN_FCN (icode
) (target
, op
[0], op
[1]);
6896 /* This page contains routines that are used to determine what the function
6897 prologue and epilogue code will do and write them out. */
6899 /* Compute the size of the save area in the stack. */
6901 /* These variables are used for communication between the following functions.
6902 They indicate various things about the current function being compiled
6903 that are used to tell what kind of prologue, epilogue and procedure
6904 descriptior to generate. */
6906 /* Nonzero if we need a stack procedure. */
6907 enum alpha_procedure_types
{PT_NULL
= 0, PT_REGISTER
= 1, PT_STACK
= 2};
6908 static enum alpha_procedure_types alpha_procedure_type
;
6910 /* Register number (either FP or SP) that is used to unwind the frame. */
6911 static int vms_unwind_regno
;
6913 /* Register number used to save FP. We need not have one for RA since
6914 we don't modify it for register procedures. This is only defined
6915 for register frame procedures. */
6916 static int vms_save_fp_regno
;
6918 /* Register number used to reference objects off our PV. */
6919 static int vms_base_regno
;
6921 /* Compute register masks for saved registers. */
6924 alpha_sa_mask (imaskP
, fmaskP
)
6925 unsigned long *imaskP
;
6926 unsigned long *fmaskP
;
6928 unsigned long imask
= 0;
6929 unsigned long fmask
= 0;
6932 /* Irritatingly, there are two kinds of thunks -- those created with
6933 TARGET_ASM_OUTPUT_MI_THUNK and those with DECL_THUNK_P that go
6934 through the regular part of the compiler. In the
6935 TARGET_ASM_OUTPUT_MI_THUNK case we don't have valid register life
6936 info, but assemble_start_function wants to output .frame and
6937 .mask directives. */
6938 if (current_function_is_thunk
&& !no_new_pseudos
)
6945 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
6946 imask
|= (1UL << HARD_FRAME_POINTER_REGNUM
);
6948 /* One for every register we have to save. */
6949 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6950 if (! fixed_regs
[i
] && ! call_used_regs
[i
]
6951 && regs_ever_live
[i
] && i
!= REG_RA
6952 && (!TARGET_ABI_UNICOSMK
|| i
!= HARD_FRAME_POINTER_REGNUM
))
6955 imask
|= (1UL << i
);
6957 fmask
|= (1UL << (i
- 32));
6960 /* We need to restore these for the handler. */
6961 if (current_function_calls_eh_return
)
6964 unsigned regno
= EH_RETURN_DATA_REGNO (i
);
6965 if (regno
== INVALID_REGNUM
)
6967 imask
|= 1UL << regno
;
6970 /* If any register spilled, then spill the return address also. */
6971 /* ??? This is required by the Digital stack unwind specification
6972 and isn't needed if we're doing Dwarf2 unwinding. */
6973 if (imask
|| fmask
|| alpha_ra_ever_killed ())
6974 imask
|= (1UL << REG_RA
);
6983 unsigned long mask
[2];
6987 alpha_sa_mask (&mask
[0], &mask
[1]);
6989 if (TARGET_ABI_UNICOSMK
)
6991 if (mask
[0] || mask
[1])
6996 for (j
= 0; j
< 2; ++j
)
6997 for (i
= 0; i
< 32; ++i
)
6998 if ((mask
[j
] >> i
) & 1)
7002 if (TARGET_ABI_UNICOSMK
)
7004 /* We might not need to generate a frame if we don't make any calls
7005 (including calls to __T3E_MISMATCH if this is a vararg function),
7006 don't have any local variables which require stack slots, don't
7007 use alloca and have not determined that we need a frame for other
7010 alpha_procedure_type
7011 = (sa_size
|| get_frame_size() != 0
7012 || current_function_outgoing_args_size
7013 || current_function_stdarg
|| current_function_calls_alloca
7014 || frame_pointer_needed
)
7015 ? PT_STACK
: PT_REGISTER
;
7017 /* Always reserve space for saving callee-saved registers if we
7018 need a frame as required by the calling convention. */
7019 if (alpha_procedure_type
== PT_STACK
)
7022 else if (TARGET_ABI_OPEN_VMS
)
7024 /* Start by assuming we can use a register procedure if we don't
7025 make any calls (REG_RA not used) or need to save any
7026 registers and a stack procedure if we do. */
7027 if ((mask
[0] >> REG_RA
) & 1)
7028 alpha_procedure_type
= PT_STACK
;
7029 else if (get_frame_size() != 0)
7030 alpha_procedure_type
= PT_REGISTER
;
7032 alpha_procedure_type
= PT_NULL
;
7034 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7035 made the final decision on stack procedure vs register procedure. */
7036 if (alpha_procedure_type
== PT_STACK
)
7039 /* Decide whether to refer to objects off our PV via FP or PV.
7040 If we need FP for something else or if we receive a nonlocal
7041 goto (which expects PV to contain the value), we must use PV.
7042 Otherwise, start by assuming we can use FP. */
7045 = (frame_pointer_needed
7046 || current_function_has_nonlocal_label
7047 || alpha_procedure_type
== PT_STACK
7048 || current_function_outgoing_args_size
)
7049 ? REG_PV
: HARD_FRAME_POINTER_REGNUM
;
7051 /* If we want to copy PV into FP, we need to find some register
7052 in which to save FP. */
7054 vms_save_fp_regno
= -1;
7055 if (vms_base_regno
== HARD_FRAME_POINTER_REGNUM
)
7056 for (i
= 0; i
< 32; i
++)
7057 if (! fixed_regs
[i
] && call_used_regs
[i
] && ! regs_ever_live
[i
])
7058 vms_save_fp_regno
= i
;
7060 if (vms_save_fp_regno
== -1 && alpha_procedure_type
== PT_REGISTER
)
7061 vms_base_regno
= REG_PV
, alpha_procedure_type
= PT_STACK
;
7062 else if (alpha_procedure_type
== PT_NULL
)
7063 vms_base_regno
= REG_PV
;
7065 /* Stack unwinding should be done via FP unless we use it for PV. */
7066 vms_unwind_regno
= (vms_base_regno
== REG_PV
7067 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
);
7069 /* If this is a stack procedure, allow space for saving FP and RA. */
7070 if (alpha_procedure_type
== PT_STACK
)
7075 /* Our size must be even (multiple of 16 bytes). */
7083 /* Define the offset between two registers, one to be eliminated,
7084 and the other its replacement, at the start of a routine. */
7087 alpha_initial_elimination_offset (from
, to
)
7088 unsigned int from
, to ATTRIBUTE_UNUSED
;
7092 ret
= alpha_sa_size ();
7093 ret
+= ALPHA_ROUND (current_function_outgoing_args_size
);
7095 if (from
== FRAME_POINTER_REGNUM
)
7097 else if (from
== ARG_POINTER_REGNUM
)
7098 ret
+= (ALPHA_ROUND (get_frame_size ()
7099 + current_function_pretend_args_size
)
7100 - current_function_pretend_args_size
);
7108 alpha_pv_save_size ()
7111 return alpha_procedure_type
== PT_STACK
? 8 : 0;
7118 return vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
;
7121 #if TARGET_ABI_OPEN_VMS
7123 const struct attribute_spec vms_attribute_table
[] =
7125 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7126 { "overlaid", 0, 0, true, false, false, NULL
},
7127 { "global", 0, 0, true, false, false, NULL
},
7128 { "initialize", 0, 0, true, false, false, NULL
},
7129 { NULL
, 0, 0, false, false, false, NULL
}
7135 find_lo_sum_using_gp (px
, data
)
7137 void *data ATTRIBUTE_UNUSED
;
7139 return GET_CODE (*px
) == LO_SUM
&& XEXP (*px
, 0) == pic_offset_table_rtx
;
7143 alpha_find_lo_sum_using_gp (insn
)
7146 return for_each_rtx (&PATTERN (insn
), find_lo_sum_using_gp
, NULL
) > 0;
7150 alpha_does_function_need_gp ()
7154 /* The GP being variable is an OSF abi thing. */
7155 if (! TARGET_ABI_OSF
)
7158 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7161 if (current_function_is_thunk
)
7164 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7165 Even if we are a static function, we still need to do this in case
7166 our address is taken and passed to something like qsort. */
7168 push_topmost_sequence ();
7169 insn
= get_insns ();
7170 pop_topmost_sequence ();
7172 for (; insn
; insn
= NEXT_INSN (insn
))
7174 && GET_CODE (PATTERN (insn
)) != USE
7175 && GET_CODE (PATTERN (insn
)) != CLOBBER
7176 && get_attr_usegp (insn
))
7182 /* Write a version stamp. Don't write anything if we are running as a
7183 cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
7190 alpha_write_verstamp (file
)
7191 FILE *file ATTRIBUTE_UNUSED
;
7194 fprintf (file
, "\t.verstamp %d %d\n", MS_STAMP
, LS_STAMP
);
7198 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7202 set_frame_related_p ()
7204 rtx seq
= get_insns ();
7215 while (insn
!= NULL_RTX
)
7217 RTX_FRAME_RELATED_P (insn
) = 1;
7218 insn
= NEXT_INSN (insn
);
7220 seq
= emit_insn (seq
);
7224 seq
= emit_insn (seq
);
7225 RTX_FRAME_RELATED_P (seq
) = 1;
7230 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7232 /* Write function prologue. */
7234 /* On vms we have two kinds of functions:
7236 - stack frame (PROC_STACK)
7237 these are 'normal' functions with local vars and which are
7238 calling other functions
7239 - register frame (PROC_REGISTER)
7240 keeps all data in registers, needs no stack
7242 We must pass this to the assembler so it can generate the
7243 proper pdsc (procedure descriptor)
7244 This is done with the '.pdesc' command.
7246 On not-vms, we don't really differentiate between the two, as we can
7247 simply allocate stack without saving registers. */
7250 alpha_expand_prologue ()
7252 /* Registers to save. */
7253 unsigned long imask
= 0;
7254 unsigned long fmask
= 0;
7255 /* Stack space needed for pushing registers clobbered by us. */
7256 HOST_WIDE_INT sa_size
;
7257 /* Complete stack size needed. */
7258 HOST_WIDE_INT frame_size
;
7259 /* Offset from base reg to register save area. */
7260 HOST_WIDE_INT reg_offset
;
7264 sa_size
= alpha_sa_size ();
7266 frame_size
= get_frame_size ();
7267 if (TARGET_ABI_OPEN_VMS
)
7268 frame_size
= ALPHA_ROUND (sa_size
7269 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7271 + current_function_pretend_args_size
);
7272 else if (TARGET_ABI_UNICOSMK
)
7273 /* We have to allocate space for the DSIB if we generate a frame. */
7274 frame_size
= ALPHA_ROUND (sa_size
7275 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7276 + ALPHA_ROUND (frame_size
7277 + current_function_outgoing_args_size
);
7279 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7281 + ALPHA_ROUND (frame_size
7282 + current_function_pretend_args_size
));
7284 if (TARGET_ABI_OPEN_VMS
)
7287 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7289 alpha_sa_mask (&imask
, &fmask
);
7291 /* Emit an insn to reload GP, if needed. */
7294 alpha_function_needs_gp
= alpha_does_function_need_gp ();
7295 if (alpha_function_needs_gp
)
7296 emit_insn (gen_prologue_ldgp ());
7299 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7300 the call to mcount ourselves, rather than having the linker do it
7301 magically in response to -pg. Since _mcount has special linkage,
7302 don't represent the call as a call. */
7303 if (TARGET_PROFILING_NEEDS_GP
&& current_function_profile
)
7304 emit_insn (gen_prologue_mcount ());
7306 if (TARGET_ABI_UNICOSMK
)
7307 unicosmk_gen_dsib (&imask
);
7309 /* Adjust the stack by the frame size. If the frame size is > 4096
7310 bytes, we need to be sure we probe somewhere in the first and last
7311 4096 bytes (we can probably get away without the latter test) and
7312 every 8192 bytes in between. If the frame size is > 32768, we
7313 do this in a loop. Otherwise, we generate the explicit probe
7316 Note that we are only allowed to adjust sp once in the prologue. */
7318 if (frame_size
<= 32768)
7320 if (frame_size
> 4096)
7325 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7328 while ((probed
+= 8192) < frame_size
);
7330 /* We only have to do this probe if we aren't saving registers. */
7331 if (sa_size
== 0 && probed
+ 4096 < frame_size
)
7332 emit_insn (gen_probe_stack (GEN_INT (-frame_size
)));
7335 if (frame_size
!= 0)
7336 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7337 GEN_INT (TARGET_ABI_UNICOSMK
7343 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7344 number of 8192 byte blocks to probe. We then probe each block
7345 in the loop and then set SP to the proper location. If the
7346 amount remaining is > 4096, we have to do one more probe if we
7347 are not saving any registers. */
7349 HOST_WIDE_INT blocks
= (frame_size
+ 4096) / 8192;
7350 HOST_WIDE_INT leftover
= frame_size
+ 4096 - blocks
* 8192;
7351 rtx ptr
= gen_rtx_REG (DImode
, 22);
7352 rtx count
= gen_rtx_REG (DImode
, 23);
7355 emit_move_insn (count
, GEN_INT (blocks
));
7356 emit_insn (gen_adddi3 (ptr
, stack_pointer_rtx
,
7357 GEN_INT (TARGET_ABI_UNICOSMK
? 4096 - 64 : 4096)));
7359 /* Because of the difficulty in emitting a new basic block this
7360 late in the compilation, generate the loop as a single insn. */
7361 emit_insn (gen_prologue_stack_probe_loop (count
, ptr
));
7363 if (leftover
> 4096 && sa_size
== 0)
7365 rtx last
= gen_rtx_MEM (DImode
, plus_constant (ptr
, -leftover
));
7366 MEM_VOLATILE_P (last
) = 1;
7367 emit_move_insn (last
, const0_rtx
);
7370 if (TARGET_ABI_WINDOWS_NT
)
7372 /* For NT stack unwind (done by 'reverse execution'), it's
7373 not OK to take the result of a loop, even though the value
7374 is already in ptr, so we reload it via a single operation
7375 and subtract it to sp.
7377 Yes, that's correct -- we have to reload the whole constant
7378 into a temporary via ldah+lda then subtract from sp. To
7379 ensure we get ldah+lda, we use a special pattern. */
7381 HOST_WIDE_INT lo
, hi
;
7382 lo
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7383 hi
= frame_size
- lo
;
7385 emit_move_insn (ptr
, GEN_INT (hi
));
7386 emit_insn (gen_nt_lda (ptr
, GEN_INT (lo
)));
7387 seq
= emit_insn (gen_subdi3 (stack_pointer_rtx
, stack_pointer_rtx
,
7392 seq
= emit_insn (gen_adddi3 (stack_pointer_rtx
, ptr
,
7393 GEN_INT (-leftover
)));
7396 /* This alternative is special, because the DWARF code cannot
7397 possibly intuit through the loop above. So we invent this
7398 note it looks at instead. */
7399 RTX_FRAME_RELATED_P (seq
) = 1;
7401 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
7402 gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
7403 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
7404 GEN_INT (TARGET_ABI_UNICOSMK
7410 if (!TARGET_ABI_UNICOSMK
)
7412 /* Cope with very large offsets to the register save area. */
7413 sa_reg
= stack_pointer_rtx
;
7414 if (reg_offset
+ sa_size
> 0x8000)
7416 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7419 if (low
+ sa_size
<= 0x8000)
7420 bias
= reg_offset
- low
, reg_offset
= low
;
7422 bias
= reg_offset
, reg_offset
= 0;
7424 sa_reg
= gen_rtx_REG (DImode
, 24);
7425 FRP (emit_insn (gen_adddi3 (sa_reg
, stack_pointer_rtx
,
7429 /* Save regs in stack order. Beginning with VMS PV. */
7430 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7432 mem
= gen_rtx_MEM (DImode
, stack_pointer_rtx
);
7433 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7434 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_PV
)));
7437 /* Save register RA next. */
7438 if (imask
& (1UL << REG_RA
))
7440 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
7441 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7442 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
7443 imask
&= ~(1UL << REG_RA
);
7447 /* Now save any other registers required to be saved. */
7448 for (i
= 0; i
< 32; i
++)
7449 if (imask
& (1UL << i
))
7451 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
7452 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7453 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
7457 for (i
= 0; i
< 32; i
++)
7458 if (fmask
& (1UL << i
))
7460 mem
= gen_rtx_MEM (DFmode
, plus_constant (sa_reg
, reg_offset
));
7461 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7462 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
7466 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
7468 /* The standard frame on the T3E includes space for saving registers.
7469 We just have to use it. We don't have to save the return address and
7470 the old frame pointer here - they are saved in the DSIB. */
7473 for (i
= 9; i
< 15; i
++)
7474 if (imask
& (1UL << i
))
7476 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
7478 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7479 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, i
)));
7482 for (i
= 2; i
< 10; i
++)
7483 if (fmask
& (1UL << i
))
7485 mem
= gen_rtx_MEM (DFmode
, plus_constant (hard_frame_pointer_rtx
,
7487 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7488 FRP (emit_move_insn (mem
, gen_rtx_REG (DFmode
, i
+32)));
7493 if (TARGET_ABI_OPEN_VMS
)
7495 if (alpha_procedure_type
== PT_REGISTER
)
7496 /* Register frame procedures save the fp.
7497 ?? Ought to have a dwarf2 save for this. */
7498 emit_move_insn (gen_rtx_REG (DImode
, vms_save_fp_regno
),
7499 hard_frame_pointer_rtx
);
7501 if (alpha_procedure_type
!= PT_NULL
&& vms_base_regno
!= REG_PV
)
7502 emit_insn (gen_force_movdi (gen_rtx_REG (DImode
, vms_base_regno
),
7503 gen_rtx_REG (DImode
, REG_PV
)));
7505 if (alpha_procedure_type
!= PT_NULL
7506 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7507 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7509 /* If we have to allocate space for outgoing args, do it now. */
7510 if (current_function_outgoing_args_size
!= 0)
7513 = emit_move_insn (stack_pointer_rtx
,
7515 (hard_frame_pointer_rtx
,
7517 (current_function_outgoing_args_size
))));
7519 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7520 if ! frame_pointer_needed. Setting the bit will change the CFA
7521 computation rule to use sp again, which would be wrong if we had
7522 frame_pointer_needed, as this means sp might move unpredictably
7526 frame_pointer_needed
7527 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7529 current_function_outgoing_args_size != 0
7530 => alpha_procedure_type != PT_NULL,
7532 so when we are not setting the bit here, we are guaranteed to
7533 have emited an FRP frame pointer update just before. */
7534 RTX_FRAME_RELATED_P (seq
) = ! frame_pointer_needed
;
7537 else if (!TARGET_ABI_UNICOSMK
)
7539 /* If we need a frame pointer, set it from the stack pointer. */
7540 if (frame_pointer_needed
)
7542 if (TARGET_CAN_FAULT_IN_PROLOGUE
)
7543 FRP (emit_move_insn (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7545 /* This must always be the last instruction in the
7546 prologue, thus we emit a special move + clobber. */
7547 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx
,
7548 stack_pointer_rtx
, sa_reg
)));
7552 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7553 the prologue, for exception handling reasons, we cannot do this for
7554 any insn that might fault. We could prevent this for mems with a
7555 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7556 have to prevent all such scheduling with a blockage.
7558 Linux, on the other hand, never bothered to implement OSF/1's
7559 exception handling, and so doesn't care about such things. Anyone
7560 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7562 if (! TARGET_CAN_FAULT_IN_PROLOGUE
)
7563 emit_insn (gen_blockage ());
7566 /* Output the textual info surrounding the prologue. */
7569 alpha_start_function (file
, fnname
, decl
)
7572 tree decl ATTRIBUTE_UNUSED
;
7574 unsigned long imask
= 0;
7575 unsigned long fmask
= 0;
7576 /* Stack space needed for pushing registers clobbered by us. */
7577 HOST_WIDE_INT sa_size
;
7578 /* Complete stack size needed. */
7579 unsigned HOST_WIDE_INT frame_size
;
7580 /* Offset from base reg to register save area. */
7581 HOST_WIDE_INT reg_offset
;
7582 char *entry_label
= (char *) alloca (strlen (fnname
) + 6);
7585 /* Don't emit an extern directive for functions defined in the same file. */
7586 if (TARGET_ABI_UNICOSMK
)
7589 name_tree
= get_identifier (fnname
);
7590 TREE_ASM_WRITTEN (name_tree
) = 1;
7593 alpha_fnname
= fnname
;
7594 sa_size
= alpha_sa_size ();
7596 frame_size
= get_frame_size ();
7597 if (TARGET_ABI_OPEN_VMS
)
7598 frame_size
= ALPHA_ROUND (sa_size
7599 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7601 + current_function_pretend_args_size
);
7602 else if (TARGET_ABI_UNICOSMK
)
7603 frame_size
= ALPHA_ROUND (sa_size
7604 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7605 + ALPHA_ROUND (frame_size
7606 + current_function_outgoing_args_size
);
7608 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7610 + ALPHA_ROUND (frame_size
7611 + current_function_pretend_args_size
));
7613 if (TARGET_ABI_OPEN_VMS
)
7616 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7618 alpha_sa_mask (&imask
, &fmask
);
7620 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7621 We have to do that before the .ent directive as we cannot switch
7622 files within procedures with native ecoff because line numbers are
7623 linked to procedure descriptors.
7624 Outputting the lineno helps debugging of one line functions as they
7625 would otherwise get no line number at all. Please note that we would
7626 like to put out last_linenum from final.c, but it is not accessible. */
7628 if (write_symbols
== SDB_DEBUG
)
7630 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7631 ASM_OUTPUT_SOURCE_FILENAME (file
,
7632 DECL_SOURCE_FILE (current_function_decl
));
7634 #ifdef ASM_OUTPUT_SOURCE_LINE
7635 if (debug_info_level
!= DINFO_LEVEL_TERSE
)
7636 ASM_OUTPUT_SOURCE_LINE (file
,
7637 DECL_SOURCE_LINE (current_function_decl
));
7641 /* Issue function start and label. */
7642 if (TARGET_ABI_OPEN_VMS
7643 || (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
))
7645 fputs ("\t.ent ", file
);
7646 assemble_name (file
, fnname
);
7649 /* If the function needs GP, we'll write the "..ng" label there.
7650 Otherwise, do it here. */
7652 && ! alpha_function_needs_gp
7653 && ! current_function_is_thunk
)
7656 assemble_name (file
, fnname
);
7657 fputs ("..ng:\n", file
);
7661 strcpy (entry_label
, fnname
);
7662 if (TARGET_ABI_OPEN_VMS
)
7663 strcat (entry_label
, "..en");
7665 /* For public functions, the label must be globalized by appending an
7666 additional colon. */
7667 if (TARGET_ABI_UNICOSMK
&& TREE_PUBLIC (decl
))
7668 strcat (entry_label
, ":");
7670 ASM_OUTPUT_LABEL (file
, entry_label
);
7671 inside_function
= TRUE
;
7673 if (TARGET_ABI_OPEN_VMS
)
7674 fprintf (file
, "\t.base $%d\n", vms_base_regno
);
7676 if (!TARGET_ABI_OPEN_VMS
&& !TARGET_ABI_UNICOSMK
&& TARGET_IEEE_CONFORMANT
7677 && !flag_inhibit_size_directive
)
7679 /* Set flags in procedure descriptor to request IEEE-conformant
7680 math-library routines. The value we set it to is PDSC_EXC_IEEE
7681 (/usr/include/pdsc.h). */
7682 fputs ("\t.eflag 48\n", file
);
7685 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7686 alpha_auto_offset
= -frame_size
+ current_function_pretend_args_size
;
7687 alpha_arg_offset
= -frame_size
+ 48;
7689 /* Describe our frame. If the frame size is larger than an integer,
7690 print it as zero to avoid an assembler error. We won't be
7691 properly describing such a frame, but that's the best we can do. */
7692 if (TARGET_ABI_UNICOSMK
)
7694 else if (TARGET_ABI_OPEN_VMS
)
7696 fprintf (file
, "\t.frame $%d,", vms_unwind_regno
);
7697 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
7698 frame_size
>= (1UL << 31) ? 0 : frame_size
);
7699 fputs (",$26,", file
);
7700 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, reg_offset
);
7703 else if (!flag_inhibit_size_directive
)
7705 fprintf (file
, "\t.frame $%d,",
7706 (frame_pointer_needed
7707 ? HARD_FRAME_POINTER_REGNUM
: STACK_POINTER_REGNUM
));
7708 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
7709 frame_size
>= (1UL << 31) ? 0 : frame_size
);
7710 fprintf (file
, ",$26,%d\n", current_function_pretend_args_size
);
7713 /* Describe which registers were spilled. */
7714 if (TARGET_ABI_UNICOSMK
)
7716 else if (TARGET_ABI_OPEN_VMS
)
7719 /* ??? Does VMS care if mask contains ra? The old code didn't
7720 set it, so I don't here. */
7721 fprintf (file
, "\t.mask 0x%lx,0\n", imask
& ~(1UL << REG_RA
));
7723 fprintf (file
, "\t.fmask 0x%lx,0\n", fmask
);
7724 if (alpha_procedure_type
== PT_REGISTER
)
7725 fprintf (file
, "\t.fp_save $%d\n", vms_save_fp_regno
);
7727 else if (!flag_inhibit_size_directive
)
7731 fprintf (file
, "\t.mask 0x%lx,", imask
);
7732 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
7733 frame_size
>= (1UL << 31) ? 0 : reg_offset
- frame_size
);
7736 for (i
= 0; i
< 32; ++i
)
7737 if (imask
& (1UL << i
))
7743 fprintf (file
, "\t.fmask 0x%lx,", fmask
);
7744 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
7745 frame_size
>= (1UL << 31) ? 0 : reg_offset
- frame_size
);
7750 #if TARGET_ABI_OPEN_VMS
7751 /* Ifdef'ed cause link_section are only available then. */
7752 readonly_data_section ();
7753 fprintf (file
, "\t.align 3\n");
7754 assemble_name (file
, fnname
); fputs ("..na:\n", file
);
7755 fputs ("\t.ascii \"", file
);
7756 assemble_name (file
, fnname
);
7757 fputs ("\\0\"\n", file
);
7758 alpha_need_linkage (fnname
, 1);
7763 /* Emit the .prologue note at the scheduled end of the prologue. */
7766 alpha_output_function_end_prologue (file
)
7769 if (TARGET_ABI_UNICOSMK
)
7771 else if (TARGET_ABI_OPEN_VMS
)
7772 fputs ("\t.prologue\n", file
);
7773 else if (TARGET_ABI_WINDOWS_NT
)
7774 fputs ("\t.prologue 0\n", file
);
7775 else if (!flag_inhibit_size_directive
)
7776 fprintf (file
, "\t.prologue %d\n",
7777 alpha_function_needs_gp
|| current_function_is_thunk
);
7780 /* Write function epilogue. */
7782 /* ??? At some point we will want to support full unwind, and so will
7783 need to mark the epilogue as well. At the moment, we just confuse
7786 #define FRP(exp) exp
7789 alpha_expand_epilogue ()
7791 /* Registers to save. */
7792 unsigned long imask
= 0;
7793 unsigned long fmask
= 0;
7794 /* Stack space needed for pushing registers clobbered by us. */
7795 HOST_WIDE_INT sa_size
;
7796 /* Complete stack size needed. */
7797 HOST_WIDE_INT frame_size
;
7798 /* Offset from base reg to register save area. */
7799 HOST_WIDE_INT reg_offset
;
7800 int fp_is_frame_pointer
, fp_offset
;
7801 rtx sa_reg
, sa_reg_exp
= NULL
;
7802 rtx sp_adj1
, sp_adj2
, mem
;
7806 sa_size
= alpha_sa_size ();
7808 frame_size
= get_frame_size ();
7809 if (TARGET_ABI_OPEN_VMS
)
7810 frame_size
= ALPHA_ROUND (sa_size
7811 + (alpha_procedure_type
== PT_STACK
? 8 : 0)
7813 + current_function_pretend_args_size
);
7814 else if (TARGET_ABI_UNICOSMK
)
7815 frame_size
= ALPHA_ROUND (sa_size
7816 + (alpha_procedure_type
== PT_STACK
? 48 : 0))
7817 + ALPHA_ROUND (frame_size
7818 + current_function_outgoing_args_size
);
7820 frame_size
= (ALPHA_ROUND (current_function_outgoing_args_size
)
7822 + ALPHA_ROUND (frame_size
7823 + current_function_pretend_args_size
));
7825 if (TARGET_ABI_OPEN_VMS
)
7827 if (alpha_procedure_type
== PT_STACK
)
7833 reg_offset
= ALPHA_ROUND (current_function_outgoing_args_size
);
7835 alpha_sa_mask (&imask
, &fmask
);
7838 = ((TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_STACK
)
7839 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
));
7841 sa_reg
= stack_pointer_rtx
;
7843 if (current_function_calls_eh_return
)
7844 eh_ofs
= EH_RETURN_STACKADJ_RTX
;
7848 if (!TARGET_ABI_UNICOSMK
&& sa_size
)
7850 /* If we have a frame pointer, restore SP from it. */
7851 if ((TARGET_ABI_OPEN_VMS
7852 && vms_unwind_regno
== HARD_FRAME_POINTER_REGNUM
)
7853 || (!TARGET_ABI_OPEN_VMS
&& frame_pointer_needed
))
7854 FRP (emit_move_insn (stack_pointer_rtx
, hard_frame_pointer_rtx
));
7856 /* Cope with very large offsets to the register save area. */
7857 if (reg_offset
+ sa_size
> 0x8000)
7859 int low
= ((reg_offset
& 0xffff) ^ 0x8000) - 0x8000;
7862 if (low
+ sa_size
<= 0x8000)
7863 bias
= reg_offset
- low
, reg_offset
= low
;
7865 bias
= reg_offset
, reg_offset
= 0;
7867 sa_reg
= gen_rtx_REG (DImode
, 22);
7868 sa_reg_exp
= plus_constant (stack_pointer_rtx
, bias
);
7870 FRP (emit_move_insn (sa_reg
, sa_reg_exp
));
7873 /* Restore registers in order, excepting a true frame pointer. */
7875 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, reg_offset
));
7877 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7878 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
7881 imask
&= ~(1UL << REG_RA
);
7883 for (i
= 0; i
< 32; ++i
)
7884 if (imask
& (1UL << i
))
7886 if (i
== HARD_FRAME_POINTER_REGNUM
&& fp_is_frame_pointer
)
7887 fp_offset
= reg_offset
;
7890 mem
= gen_rtx_MEM (DImode
, plus_constant(sa_reg
, reg_offset
));
7891 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7892 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
7897 for (i
= 0; i
< 32; ++i
)
7898 if (fmask
& (1UL << i
))
7900 mem
= gen_rtx_MEM (DFmode
, plus_constant(sa_reg
, reg_offset
));
7901 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7902 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
7906 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
== PT_STACK
)
7908 /* Restore callee-saved general-purpose registers. */
7912 for (i
= 9; i
< 15; i
++)
7913 if (imask
& (1UL << i
))
7915 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
,
7917 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7918 FRP (emit_move_insn (gen_rtx_REG (DImode
, i
), mem
));
7922 for (i
= 2; i
< 10; i
++)
7923 if (fmask
& (1UL << i
))
7925 mem
= gen_rtx_MEM (DFmode
, plus_constant(hard_frame_pointer_rtx
,
7927 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7928 FRP (emit_move_insn (gen_rtx_REG (DFmode
, i
+32), mem
));
7932 /* Restore the return address from the DSIB. */
7934 mem
= gen_rtx_MEM (DImode
, plus_constant(hard_frame_pointer_rtx
, -8));
7935 set_mem_alias_set (mem
, alpha_sr_alias_set
);
7936 FRP (emit_move_insn (gen_rtx_REG (DImode
, REG_RA
), mem
));
7939 if (frame_size
|| eh_ofs
)
7941 sp_adj1
= stack_pointer_rtx
;
7945 sp_adj1
= gen_rtx_REG (DImode
, 23);
7946 emit_move_insn (sp_adj1
,
7947 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
, eh_ofs
));
7950 /* If the stack size is large, begin computation into a temporary
7951 register so as not to interfere with a potential fp restore,
7952 which must be consecutive with an SP restore. */
7953 if (frame_size
< 32768
7954 && ! (TARGET_ABI_UNICOSMK
&& current_function_calls_alloca
))
7955 sp_adj2
= GEN_INT (frame_size
);
7956 else if (TARGET_ABI_UNICOSMK
)
7958 sp_adj1
= gen_rtx_REG (DImode
, 23);
7959 FRP (emit_move_insn (sp_adj1
, hard_frame_pointer_rtx
));
7960 sp_adj2
= const0_rtx
;
7962 else if (frame_size
< 0x40007fffL
)
7964 int low
= ((frame_size
& 0xffff) ^ 0x8000) - 0x8000;
7966 sp_adj2
= plus_constant (sp_adj1
, frame_size
- low
);
7967 if (sa_reg_exp
&& rtx_equal_p (sa_reg_exp
, sp_adj2
))
7971 sp_adj1
= gen_rtx_REG (DImode
, 23);
7972 FRP (emit_move_insn (sp_adj1
, sp_adj2
));
7974 sp_adj2
= GEN_INT (low
);
7978 rtx tmp
= gen_rtx_REG (DImode
, 23);
7979 FRP (sp_adj2
= alpha_emit_set_const (tmp
, DImode
, frame_size
, 3));
7982 /* We can't drop new things to memory this late, afaik,
7983 so build it up by pieces. */
7984 FRP (sp_adj2
= alpha_emit_set_long_const (tmp
, frame_size
,
7985 -(frame_size
< 0)));
7991 /* From now on, things must be in order. So emit blockages. */
7993 /* Restore the frame pointer. */
7994 if (TARGET_ABI_UNICOSMK
)
7996 emit_insn (gen_blockage ());
7997 mem
= gen_rtx_MEM (DImode
,
7998 plus_constant (hard_frame_pointer_rtx
, -16));
7999 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8000 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8002 else if (fp_is_frame_pointer
)
8004 emit_insn (gen_blockage ());
8005 mem
= gen_rtx_MEM (DImode
, plus_constant (sa_reg
, fp_offset
));
8006 set_mem_alias_set (mem
, alpha_sr_alias_set
);
8007 FRP (emit_move_insn (hard_frame_pointer_rtx
, mem
));
8009 else if (TARGET_ABI_OPEN_VMS
)
8011 emit_insn (gen_blockage ());
8012 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8013 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8016 /* Restore the stack pointer. */
8017 emit_insn (gen_blockage ());
8018 if (sp_adj2
== const0_rtx
)
8019 FRP (emit_move_insn (stack_pointer_rtx
, sp_adj1
));
8021 FRP (emit_move_insn (stack_pointer_rtx
,
8022 gen_rtx_PLUS (DImode
, sp_adj1
, sp_adj2
)));
8026 if (TARGET_ABI_OPEN_VMS
&& alpha_procedure_type
== PT_REGISTER
)
8028 emit_insn (gen_blockage ());
8029 FRP (emit_move_insn (hard_frame_pointer_rtx
,
8030 gen_rtx_REG (DImode
, vms_save_fp_regno
)));
8032 else if (TARGET_ABI_UNICOSMK
&& alpha_procedure_type
!= PT_STACK
)
8034 /* Decrement the frame pointer if the function does not have a
8037 emit_insn (gen_blockage ());
8038 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
8039 hard_frame_pointer_rtx
, GEN_INT (-1))));
8044 /* Output the rest of the textual info surrounding the epilogue. */
8047 alpha_end_function (file
, fnname
, decl
)
8050 tree decl ATTRIBUTE_UNUSED
;
8052 /* End the function. */
8053 if (!TARGET_ABI_UNICOSMK
&& !flag_inhibit_size_directive
)
8055 fputs ("\t.end ", file
);
8056 assemble_name (file
, fnname
);
8059 inside_function
= FALSE
;
8061 #if TARGET_ABI_OPEN_VMS
8062 alpha_write_linkage (file
, fnname
, decl
);
8065 /* Output jump tables and the static subroutine information block. */
8066 if (TARGET_ABI_UNICOSMK
)
8068 unicosmk_output_ssib (file
, fnname
);
8069 unicosmk_output_deferred_case_vectors (file
);
8074 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8076 In order to avoid the hordes of differences between generated code
8077 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8078 lots of code loading up large constants, generate rtl and emit it
8079 instead of going straight to text.
8081 Not sure why this idea hasn't been explored before... */
8084 alpha_output_mi_thunk_osf (file
, thunk_fndecl
, delta
, vcall_offset
, function
)
8086 tree thunk_fndecl ATTRIBUTE_UNUSED
;
8087 HOST_WIDE_INT delta
;
8088 HOST_WIDE_INT vcall_offset
;
8091 HOST_WIDE_INT hi
, lo
;
8092 rtx
this, insn
, funexp
;
8094 /* We always require a valid GP. */
8095 emit_insn (gen_prologue_ldgp ());
8096 emit_note (NULL
, NOTE_INSN_PROLOGUE_END
);
8098 /* Find the "this" pointer. If the function returns a structure,
8099 the structure return pointer is in $16. */
8100 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
))))
8101 this = gen_rtx_REG (Pmode
, 17);
8103 this = gen_rtx_REG (Pmode
, 16);
8105 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8106 entire constant for the add. */
8107 lo
= ((delta
& 0xffff) ^ 0x8000) - 0x8000;
8108 hi
= (((delta
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8109 if (hi
+ lo
== delta
)
8112 emit_insn (gen_adddi3 (this, this, GEN_INT (hi
)));
8114 emit_insn (gen_adddi3 (this, this, GEN_INT (lo
)));
8118 rtx tmp
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 0),
8119 delta
, -(delta
< 0));
8120 emit_insn (gen_adddi3 (this, this, tmp
));
8123 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8128 tmp
= gen_rtx_REG (Pmode
, 0);
8129 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this));
8131 lo
= ((vcall_offset
& 0xffff) ^ 0x8000) - 0x8000;
8132 hi
= (((vcall_offset
- lo
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8133 if (hi
+ lo
== vcall_offset
)
8136 emit_insn (gen_adddi3 (tmp
, tmp
, GEN_INT (hi
)));
8140 tmp2
= alpha_emit_set_long_const (gen_rtx_REG (Pmode
, 1),
8141 vcall_offset
, -(vcall_offset
< 0));
8142 emit_insn (gen_adddi3 (tmp
, tmp
, tmp2
));
8146 tmp2
= gen_rtx_PLUS (Pmode
, tmp
, GEN_INT (lo
));
8149 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp2
));
8151 emit_insn (gen_adddi3 (this, this, tmp
));
8154 /* Generate a tail call to the target function. */
8155 if (! TREE_USED (function
))
8157 assemble_external (function
);
8158 TREE_USED (function
) = 1;
8160 funexp
= XEXP (DECL_RTL (function
), 0);
8161 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
8162 insn
= emit_call_insn (gen_sibcall (funexp
, const0_rtx
));
8163 SIBLING_CALL_P (insn
) = 1;
8165 /* Run just enough of rest_of_compilation to get the insns emitted.
8166 There's not really enough bulk here to make other passes such as
8167 instruction scheduling worth while. Note that use_thunk calls
8168 assemble_start_function and assemble_end_function. */
8169 insn
= get_insns ();
8170 shorten_branches (insn
);
8171 final_start_function (insn
, file
, 1);
8172 final (insn
, file
, 1, 0);
8173 final_end_function ();
8175 #endif /* TARGET_ABI_OSF */
8177 /* Debugging support. */
8181 /* Count the number of sdb related labels are generated (to find block
8182 start and end boundaries). */
8184 int sdb_label_count
= 0;
8186 /* Next label # for each statement. */
8188 static int sym_lineno
= 0;
8190 /* Count the number of .file directives, so that .loc is up to date. */
8192 static int num_source_filenames
= 0;
8194 /* Name of the file containing the current function. */
8196 static const char *current_function_file
= "";
8198 /* Offsets to alpha virtual arg/local debugging pointers. */
8200 long alpha_arg_offset
;
8201 long alpha_auto_offset
;
8203 /* Emit a new filename to a stream. */
8206 alpha_output_filename (stream
, name
)
8210 static int first_time
= TRUE
;
8211 char ltext_label_name
[100];
8216 ++num_source_filenames
;
8217 current_function_file
= name
;
8218 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8219 output_quoted_string (stream
, name
);
8220 fprintf (stream
, "\n");
8221 if (!TARGET_GAS
&& write_symbols
== DBX_DEBUG
)
8222 fprintf (stream
, "\t#@stabs\n");
8225 else if (write_symbols
== DBX_DEBUG
)
8227 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name
, "Ltext", 0);
8228 fprintf (stream
, "%s", ASM_STABS_OP
);
8229 output_quoted_string (stream
, name
);
8230 fprintf (stream
, ",%d,0,0,%s\n", N_SOL
, <ext_label_name
[1]);
8233 else if (name
!= current_function_file
8234 && strcmp (name
, current_function_file
) != 0)
8236 if (inside_function
&& ! TARGET_GAS
)
8237 fprintf (stream
, "\t#.file\t%d ", num_source_filenames
);
8240 ++num_source_filenames
;
8241 current_function_file
= name
;
8242 fprintf (stream
, "\t.file\t%d ", num_source_filenames
);
8245 output_quoted_string (stream
, name
);
8246 fprintf (stream
, "\n");
8250 /* Emit a linenumber to a stream. */
8253 alpha_output_lineno (stream
, line
)
8257 if (write_symbols
== DBX_DEBUG
)
8259 /* mips-tfile doesn't understand .stabd directives. */
8261 fprintf (stream
, "$LM%d:\n%s%d,0,%d,$LM%d\n",
8262 sym_lineno
, ASM_STABN_OP
, N_SLINE
, line
, sym_lineno
);
8265 fprintf (stream
, "\n\t.loc\t%d %d\n", num_source_filenames
, line
);
8268 /* Structure to show the current status of registers and memory. */
8270 struct shadow_summary
8273 unsigned int i
: 31; /* Mask of int regs */
8274 unsigned int fp
: 31; /* Mask of fp regs */
8275 unsigned int mem
: 1; /* mem == imem | fpmem */
8279 static void summarize_insn
PARAMS ((rtx
, struct shadow_summary
*, int));
8280 static void alpha_handle_trap_shadows
PARAMS ((void));
8282 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8283 to the summary structure. SET is nonzero if the insn is setting the
8284 object, otherwise zero. */
8287 summarize_insn (x
, sum
, set
)
8289 struct shadow_summary
*sum
;
8292 const char *format_ptr
;
8298 switch (GET_CODE (x
))
8300 /* ??? Note that this case would be incorrect if the Alpha had a
8301 ZERO_EXTRACT in SET_DEST. */
8303 summarize_insn (SET_SRC (x
), sum
, 0);
8304 summarize_insn (SET_DEST (x
), sum
, 1);
8308 summarize_insn (XEXP (x
, 0), sum
, 1);
8312 summarize_insn (XEXP (x
, 0), sum
, 0);
8316 for (i
= ASM_OPERANDS_INPUT_LENGTH (x
) - 1; i
>= 0; i
--)
8317 summarize_insn (ASM_OPERANDS_INPUT (x
, i
), sum
, 0);
8321 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
8322 summarize_insn (XVECEXP (x
, 0, i
), sum
, 0);
8326 summarize_insn (SUBREG_REG (x
), sum
, 0);
8331 int regno
= REGNO (x
);
8332 unsigned long mask
= ((unsigned long) 1) << (regno
% 32);
8334 if (regno
== 31 || regno
== 63)
8340 sum
->defd
.i
|= mask
;
8342 sum
->defd
.fp
|= mask
;
8347 sum
->used
.i
|= mask
;
8349 sum
->used
.fp
|= mask
;
8360 /* Find the regs used in memory address computation: */
8361 summarize_insn (XEXP (x
, 0), sum
, 0);
8364 case CONST_INT
: case CONST_DOUBLE
:
8365 case SYMBOL_REF
: case LABEL_REF
: case CONST
:
8366 case SCRATCH
: case ASM_INPUT
:
8369 /* Handle common unary and binary ops for efficiency. */
8370 case COMPARE
: case PLUS
: case MINUS
: case MULT
: case DIV
:
8371 case MOD
: case UDIV
: case UMOD
: case AND
: case IOR
:
8372 case XOR
: case ASHIFT
: case ROTATE
: case ASHIFTRT
: case LSHIFTRT
:
8373 case ROTATERT
: case SMIN
: case SMAX
: case UMIN
: case UMAX
:
8374 case NE
: case EQ
: case GE
: case GT
: case LE
:
8375 case LT
: case GEU
: case GTU
: case LEU
: case LTU
:
8376 summarize_insn (XEXP (x
, 0), sum
, 0);
8377 summarize_insn (XEXP (x
, 1), sum
, 0);
8380 case NEG
: case NOT
: case SIGN_EXTEND
: case ZERO_EXTEND
:
8381 case TRUNCATE
: case FLOAT_EXTEND
: case FLOAT_TRUNCATE
: case FLOAT
:
8382 case FIX
: case UNSIGNED_FLOAT
: case UNSIGNED_FIX
: case ABS
:
8383 case SQRT
: case FFS
:
8384 summarize_insn (XEXP (x
, 0), sum
, 0);
8388 format_ptr
= GET_RTX_FORMAT (GET_CODE (x
));
8389 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8390 switch (format_ptr
[i
])
8393 summarize_insn (XEXP (x
, i
), sum
, 0);
8397 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8398 summarize_insn (XVECEXP (x
, i
, j
), sum
, 0);
8410 /* Ensure a sufficient number of `trapb' insns are in the code when
8411 the user requests code with a trap precision of functions or
8414 In naive mode, when the user requests a trap-precision of
8415 "instruction", a trapb is needed after every instruction that may
8416 generate a trap. This ensures that the code is resumption safe but
8419 When optimizations are turned on, we delay issuing a trapb as long
8420 as possible. In this context, a trap shadow is the sequence of
8421 instructions that starts with a (potentially) trap generating
8422 instruction and extends to the next trapb or call_pal instruction
8423 (but GCC never generates call_pal by itself). We can delay (and
8424 therefore sometimes omit) a trapb subject to the following
8427 (a) On entry to the trap shadow, if any Alpha register or memory
8428 location contains a value that is used as an operand value by some
8429 instruction in the trap shadow (live on entry), then no instruction
8430 in the trap shadow may modify the register or memory location.
8432 (b) Within the trap shadow, the computation of the base register
8433 for a memory load or store instruction may not involve using the
8434 result of an instruction that might generate an UNPREDICTABLE
8437 (c) Within the trap shadow, no register may be used more than once
8438 as a destination register. (This is to make life easier for the
8441 (d) The trap shadow may not include any branch instructions. */
8444 alpha_handle_trap_shadows ()
8446 struct shadow_summary shadow
;
8447 int trap_pending
, exception_nesting
;
8451 exception_nesting
= 0;
8454 shadow
.used
.mem
= 0;
8455 shadow
.defd
= shadow
.used
;
8457 for (i
= get_insns (); i
; i
= NEXT_INSN (i
))
8459 if (GET_CODE (i
) == NOTE
)
8461 switch (NOTE_LINE_NUMBER (i
))
8463 case NOTE_INSN_EH_REGION_BEG
:
8464 exception_nesting
++;
8469 case NOTE_INSN_EH_REGION_END
:
8470 exception_nesting
--;
8475 case NOTE_INSN_EPILOGUE_BEG
:
8476 if (trap_pending
&& alpha_tp
>= ALPHA_TP_FUNC
)
8481 else if (trap_pending
)
8483 if (alpha_tp
== ALPHA_TP_FUNC
)
8485 if (GET_CODE (i
) == JUMP_INSN
8486 && GET_CODE (PATTERN (i
)) == RETURN
)
8489 else if (alpha_tp
== ALPHA_TP_INSN
)
8493 struct shadow_summary sum
;
8498 sum
.defd
= sum
.used
;
8500 switch (GET_CODE (i
))
8503 /* Annoyingly, get_attr_trap will abort on these. */
8504 if (GET_CODE (PATTERN (i
)) == USE
8505 || GET_CODE (PATTERN (i
)) == CLOBBER
)
8508 summarize_insn (PATTERN (i
), &sum
, 0);
8510 if ((sum
.defd
.i
& shadow
.defd
.i
)
8511 || (sum
.defd
.fp
& shadow
.defd
.fp
))
8513 /* (c) would be violated */
8517 /* Combine shadow with summary of current insn: */
8518 shadow
.used
.i
|= sum
.used
.i
;
8519 shadow
.used
.fp
|= sum
.used
.fp
;
8520 shadow
.used
.mem
|= sum
.used
.mem
;
8521 shadow
.defd
.i
|= sum
.defd
.i
;
8522 shadow
.defd
.fp
|= sum
.defd
.fp
;
8523 shadow
.defd
.mem
|= sum
.defd
.mem
;
8525 if ((sum
.defd
.i
& shadow
.used
.i
)
8526 || (sum
.defd
.fp
& shadow
.used
.fp
)
8527 || (sum
.defd
.mem
& shadow
.used
.mem
))
8529 /* (a) would be violated (also takes care of (b)) */
8530 if (get_attr_trap (i
) == TRAP_YES
8531 && ((sum
.defd
.i
& sum
.used
.i
)
8532 || (sum
.defd
.fp
& sum
.used
.fp
)))
8551 n
= emit_insn_before (gen_trapb (), i
);
8552 PUT_MODE (n
, TImode
);
8553 PUT_MODE (i
, TImode
);
8557 shadow
.used
.mem
= 0;
8558 shadow
.defd
= shadow
.used
;
8563 if ((exception_nesting
> 0 || alpha_tp
>= ALPHA_TP_FUNC
)
8564 && GET_CODE (i
) == INSN
8565 && GET_CODE (PATTERN (i
)) != USE
8566 && GET_CODE (PATTERN (i
)) != CLOBBER
8567 && get_attr_trap (i
) == TRAP_YES
)
8569 if (optimize
&& !trap_pending
)
8570 summarize_insn (PATTERN (i
), &shadow
, 0);
8576 /* Alpha can only issue instruction groups simultaneously if they are
8577 suitibly aligned. This is very processor-specific. */
8579 enum alphaev4_pipe
{
8586 enum alphaev5_pipe
{
8597 static enum alphaev4_pipe alphaev4_insn_pipe
PARAMS ((rtx
));
8598 static enum alphaev5_pipe alphaev5_insn_pipe
PARAMS ((rtx
));
8599 static rtx alphaev4_next_group
PARAMS ((rtx
, int *, int *));
8600 static rtx alphaev5_next_group
PARAMS ((rtx
, int *, int *));
8601 static rtx alphaev4_next_nop
PARAMS ((int *));
8602 static rtx alphaev5_next_nop
PARAMS ((int *));
8604 static void alpha_align_insns
8605 PARAMS ((unsigned int, rtx (*)(rtx
, int *, int *), rtx (*)(int *)));
8607 static enum alphaev4_pipe
8608 alphaev4_insn_pipe (insn
)
8611 if (recog_memoized (insn
) < 0)
8613 if (get_attr_length (insn
) != 4)
8616 switch (get_attr_type (insn
))
8650 static enum alphaev5_pipe
8651 alphaev5_insn_pipe (insn
)
8654 if (recog_memoized (insn
) < 0)
8656 if (get_attr_length (insn
) != 4)
8659 switch (get_attr_type (insn
))
8700 /* IN_USE is a mask of the slots currently filled within the insn group.
8701 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8702 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8704 LEN is, of course, the length of the group in bytes. */
8707 alphaev4_next_group (insn
, pin_use
, plen
)
8709 int *pin_use
, *plen
;
8716 || GET_CODE (PATTERN (insn
)) == CLOBBER
8717 || GET_CODE (PATTERN (insn
)) == USE
)
8722 enum alphaev4_pipe pipe
;
8724 pipe
= alphaev4_insn_pipe (insn
);
8728 /* Force complex instructions to start new groups. */
8732 /* If this is a completely unrecognized insn, its an asm.
8733 We don't know how long it is, so record length as -1 to
8734 signal a needed realignment. */
8735 if (recog_memoized (insn
) < 0)
8738 len
= get_attr_length (insn
);
8742 if (in_use
& EV4_IB0
)
8744 if (in_use
& EV4_IB1
)
8749 in_use
|= EV4_IB0
| EV4_IBX
;
8753 if (in_use
& EV4_IB0
)
8755 if (!(in_use
& EV4_IBX
) || (in_use
& EV4_IB1
))
8763 if (in_use
& EV4_IB1
)
8773 /* Haifa doesn't do well scheduling branches. */
8774 if (GET_CODE (insn
) == JUMP_INSN
)
8778 insn
= next_nonnote_insn (insn
);
8780 if (!insn
|| ! INSN_P (insn
))
8783 /* Let Haifa tell us where it thinks insn group boundaries are. */
8784 if (GET_MODE (insn
) == TImode
)
8787 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8792 insn
= next_nonnote_insn (insn
);
8800 /* IN_USE is a mask of the slots currently filled within the insn group.
8801 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8802 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8804 LEN is, of course, the length of the group in bytes. */
8807 alphaev5_next_group (insn
, pin_use
, plen
)
8809 int *pin_use
, *plen
;
8816 || GET_CODE (PATTERN (insn
)) == CLOBBER
8817 || GET_CODE (PATTERN (insn
)) == USE
)
8822 enum alphaev5_pipe pipe
;
8824 pipe
= alphaev5_insn_pipe (insn
);
8828 /* Force complex instructions to start new groups. */
8832 /* If this is a completely unrecognized insn, its an asm.
8833 We don't know how long it is, so record length as -1 to
8834 signal a needed realignment. */
8835 if (recog_memoized (insn
) < 0)
8838 len
= get_attr_length (insn
);
8841 /* ??? Most of the places below, we would like to abort, as
8842 it would indicate an error either in Haifa, or in the
8843 scheduling description. Unfortunately, Haifa never
8844 schedules the last instruction of the BB, so we don't
8845 have an accurate TI bit to go off. */
8847 if (in_use
& EV5_E0
)
8849 if (in_use
& EV5_E1
)
8854 in_use
|= EV5_E0
| EV5_E01
;
8858 if (in_use
& EV5_E0
)
8860 if (!(in_use
& EV5_E01
) || (in_use
& EV5_E1
))
8868 if (in_use
& EV5_E1
)
8874 if (in_use
& EV5_FA
)
8876 if (in_use
& EV5_FM
)
8881 in_use
|= EV5_FA
| EV5_FAM
;
8885 if (in_use
& EV5_FA
)
8891 if (in_use
& EV5_FM
)
8904 /* Haifa doesn't do well scheduling branches. */
8905 /* ??? If this is predicted not-taken, slotting continues, except
8906 that no more IBR, FBR, or JSR insns may be slotted. */
8907 if (GET_CODE (insn
) == JUMP_INSN
)
8911 insn
= next_nonnote_insn (insn
);
8913 if (!insn
|| ! INSN_P (insn
))
8916 /* Let Haifa tell us where it thinks insn group boundaries are. */
8917 if (GET_MODE (insn
) == TImode
)
8920 if (GET_CODE (insn
) == CLOBBER
|| GET_CODE (insn
) == USE
)
8925 insn
= next_nonnote_insn (insn
);
8934 alphaev4_next_nop (pin_use
)
8937 int in_use
= *pin_use
;
8940 if (!(in_use
& EV4_IB0
))
8945 else if ((in_use
& (EV4_IBX
|EV4_IB1
)) == EV4_IBX
)
8950 else if (TARGET_FP
&& !(in_use
& EV4_IB1
))
8963 alphaev5_next_nop (pin_use
)
8966 int in_use
= *pin_use
;
8969 if (!(in_use
& EV5_E1
))
8974 else if (TARGET_FP
&& !(in_use
& EV5_FA
))
8979 else if (TARGET_FP
&& !(in_use
& EV5_FM
))
8991 /* The instruction group alignment main loop. */
8994 alpha_align_insns (max_align
, next_group
, next_nop
)
8995 unsigned int max_align
;
8996 rtx (*next_group
) PARAMS ((rtx
, int *, int *));
8997 rtx (*next_nop
) PARAMS ((int *));
8999 /* ALIGN is the known alignment for the insn group. */
9001 /* OFS is the offset of the current insn in the insn group. */
9003 int prev_in_use
, in_use
, len
;
9006 /* Let shorten branches care for assigning alignments to code labels. */
9007 shorten_branches (get_insns ());
9009 if (align_functions
< 4)
9011 else if ((unsigned int) align_functions
< max_align
)
9012 align
= align_functions
;
9016 ofs
= prev_in_use
= 0;
9018 if (GET_CODE (i
) == NOTE
)
9019 i
= next_nonnote_insn (i
);
9023 next
= (*next_group
) (i
, &in_use
, &len
);
9025 /* When we see a label, resync alignment etc. */
9026 if (GET_CODE (i
) == CODE_LABEL
)
9028 unsigned int new_align
= 1 << label_to_alignment (i
);
9030 if (new_align
>= align
)
9032 align
= new_align
< max_align
? new_align
: max_align
;
9036 else if (ofs
& (new_align
-1))
9037 ofs
= (ofs
| (new_align
-1)) + 1;
9042 /* Handle complex instructions special. */
9043 else if (in_use
== 0)
9045 /* Asms will have length < 0. This is a signal that we have
9046 lost alignment knowledge. Assume, however, that the asm
9047 will not mis-align instructions. */
9056 /* If the known alignment is smaller than the recognized insn group,
9057 realign the output. */
9058 else if ((int) align
< len
)
9060 unsigned int new_log_align
= len
> 8 ? 4 : 3;
9063 where
= prev
= prev_nonnote_insn (i
);
9064 if (!where
|| GET_CODE (where
) != CODE_LABEL
)
9067 /* Can't realign between a call and its gp reload. */
9068 if (! (TARGET_EXPLICIT_RELOCS
9069 && prev
&& GET_CODE (prev
) == CALL_INSN
))
9071 emit_insn_before (gen_realign (GEN_INT (new_log_align
)), where
);
9072 align
= 1 << new_log_align
;
9077 /* If the group won't fit in the same INT16 as the previous,
9078 we need to add padding to keep the group together. Rather
9079 than simply leaving the insn filling to the assembler, we
9080 can make use of the knowledge of what sorts of instructions
9081 were issued in the previous group to make sure that all of
9082 the added nops are really free. */
9083 else if (ofs
+ len
> (int) align
)
9085 int nop_count
= (align
- ofs
) / 4;
9088 /* Insert nops before labels, branches, and calls to truely merge
9089 the execution of the nops with the previous instruction group. */
9090 where
= prev_nonnote_insn (i
);
9093 if (GET_CODE (where
) == CODE_LABEL
)
9095 rtx where2
= prev_nonnote_insn (where
);
9096 if (where2
&& GET_CODE (where2
) == JUMP_INSN
)
9099 else if (GET_CODE (where
) == INSN
)
9106 emit_insn_before ((*next_nop
)(&prev_in_use
), where
);
9107 while (--nop_count
);
9111 ofs
= (ofs
+ len
) & (align
- 1);
9112 prev_in_use
= in_use
;
9117 /* Machine dependent reorg pass. */
9122 if (alpha_tp
!= ALPHA_TP_PROG
|| flag_exceptions
)
9123 alpha_handle_trap_shadows ();
9125 /* Due to the number of extra trapb insns, don't bother fixing up
9126 alignment when trap precision is instruction. Moreover, we can
9127 only do our job when sched2 is run. */
9128 if (optimize
&& !optimize_size
9129 && alpha_tp
!= ALPHA_TP_INSN
9130 && flag_schedule_insns_after_reload
)
9132 if (alpha_cpu
== PROCESSOR_EV4
)
9133 alpha_align_insns (8, alphaev4_next_group
, alphaev4_next_nop
);
9134 else if (alpha_cpu
== PROCESSOR_EV5
)
9135 alpha_align_insns (16, alphaev5_next_group
, alphaev5_next_nop
);
9139 #ifdef OBJECT_FORMAT_ELF
9141 /* Switch to the section to which we should output X. The only thing
9142 special we do here is to honor small data. */
9145 alpha_elf_select_rtx_section (mode
, x
, align
)
9146 enum machine_mode mode
;
9148 unsigned HOST_WIDE_INT align
;
9150 if (TARGET_SMALL_DATA
&& GET_MODE_SIZE (mode
) <= g_switch_value
)
9151 /* ??? Consider using mergable sdata sections. */
9154 default_elf_select_rtx_section (mode
, x
, align
);
9157 #endif /* OBJECT_FORMAT_ELF */
9159 /* Structure to collect function names for final output in link section. */
9160 /* Note that items marked with GTY can't be ifdef'ed out. */
9162 enum links_kind
{KIND_UNUSED
, KIND_LOCAL
, KIND_EXTERN
};
9163 enum reloc_kind
{KIND_LINKAGE
, KIND_CODEADDR
};
9165 struct alpha_links
GTY(())
9169 enum links_kind lkind
;
9170 enum reloc_kind rkind
;
9173 struct alpha_funcs
GTY(())
9176 splay_tree
GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9180 static GTY ((param1_is (char *), param2_is (struct alpha_links
*)))
9181 splay_tree alpha_links_tree
;
9182 static GTY ((param1_is (tree
), param2_is (struct alpha_funcs
*)))
9183 splay_tree alpha_funcs_tree
;
9185 static GTY(()) int alpha_funcs_num
;
9187 #if TARGET_ABI_OPEN_VMS
9189 /* Return the VMS argument type corresponding to MODE. */
9192 alpha_arg_type (mode
)
9193 enum machine_mode mode
;
9198 return TARGET_FLOAT_VAX
? FF
: FS
;
9200 return TARGET_FLOAT_VAX
? FD
: FT
;
9206 /* Return an rtx for an integer representing the VMS Argument Information
9210 alpha_arg_info_reg_val (cum
)
9211 CUMULATIVE_ARGS cum
;
9213 unsigned HOST_WIDE_INT regval
= cum
.num_args
;
9216 for (i
= 0; i
< 6; i
++)
9217 regval
|= ((int) cum
.atypes
[i
]) << (i
* 3 + 8);
9219 return GEN_INT (regval
);
9222 /* Make (or fake) .linkage entry for function call.
9224 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9226 Return an SYMBOL_REF rtx for the linkage. */
9229 alpha_need_linkage (name
, is_local
)
9233 splay_tree_node node
;
9234 struct alpha_links
*al
;
9241 struct alpha_funcs
*cfaf
;
9243 if (!alpha_funcs_tree
)
9244 alpha_funcs_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
)
9245 splay_tree_compare_pointers
);
9247 cfaf
= (struct alpha_funcs
*) ggc_alloc (sizeof (struct alpha_funcs
));
9250 cfaf
->num
= ++alpha_funcs_num
;
9252 splay_tree_insert (alpha_funcs_tree
,
9253 (splay_tree_key
) current_function_decl
,
9254 (splay_tree_value
) cfaf
);
9257 if (alpha_links_tree
)
9259 /* Is this name already defined? */
9261 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9264 al
= (struct alpha_links
*) node
->value
;
9267 /* Defined here but external assumed. */
9268 if (al
->lkind
== KIND_EXTERN
)
9269 al
->lkind
= KIND_LOCAL
;
9273 /* Used here but unused assumed. */
9274 if (al
->lkind
== KIND_UNUSED
)
9275 al
->lkind
= KIND_LOCAL
;
9281 alpha_links_tree
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9283 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9284 name
= ggc_strdup (name
);
9286 /* Assume external if no definition. */
9287 al
->lkind
= (is_local
? KIND_UNUSED
: KIND_EXTERN
);
9289 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9290 get_identifier (name
);
9292 /* Construct a SYMBOL_REF for us to call. */
9294 size_t name_len
= strlen (name
);
9295 char *linksym
= alloca (name_len
+ 6);
9297 memcpy (linksym
+ 1, name
, name_len
);
9298 memcpy (linksym
+ 1 + name_len
, "..lk", 5);
9299 al
->linkage
= gen_rtx_SYMBOL_REF (Pmode
,
9300 ggc_alloc_string (linksym
, name_len
+ 5));
9303 splay_tree_insert (alpha_links_tree
, (splay_tree_key
) name
,
9304 (splay_tree_value
) al
);
9310 alpha_use_linkage (linkage
, cfundecl
, lflag
, rflag
)
9316 splay_tree_node cfunnode
;
9317 struct alpha_funcs
*cfaf
;
9318 struct alpha_links
*al
;
9319 const char *name
= XSTR (linkage
, 0);
9321 cfaf
= (struct alpha_funcs
*) 0;
9322 al
= (struct alpha_links
*) 0;
9324 cfunnode
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) cfundecl
);
9325 cfaf
= (struct alpha_funcs
*) cfunnode
->value
;
9329 splay_tree_node lnode
;
9331 /* Is this name already defined? */
9333 lnode
= splay_tree_lookup (cfaf
->links
, (splay_tree_key
) name
);
9335 al
= (struct alpha_links
*) lnode
->value
;
9338 cfaf
->links
= splay_tree_new_ggc ((splay_tree_compare_fn
) strcmp
);
9346 splay_tree_node node
= 0;
9347 struct alpha_links
*anl
;
9352 name_len
= strlen (name
);
9354 al
= (struct alpha_links
*) ggc_alloc (sizeof (struct alpha_links
));
9355 al
->num
= cfaf
->num
;
9357 node
= splay_tree_lookup (alpha_links_tree
, (splay_tree_key
) name
);
9360 anl
= (struct alpha_links
*) node
->value
;
9361 al
->lkind
= anl
->lkind
;
9364 sprintf (buf
, "$%d..%s..lk", cfaf
->num
, name
);
9365 buflen
= strlen (buf
);
9366 linksym
= alloca (buflen
+ 1);
9367 memcpy (linksym
, buf
, buflen
+ 1);
9369 al
->linkage
= gen_rtx_SYMBOL_REF
9370 (Pmode
, ggc_alloc_string (linksym
, buflen
+ 1));
9372 splay_tree_insert (cfaf
->links
, (splay_tree_key
) name
,
9373 (splay_tree_value
) al
);
9377 al
->rkind
= KIND_CODEADDR
;
9379 al
->rkind
= KIND_LINKAGE
;
9382 return gen_rtx_MEM (Pmode
, plus_constant (al
->linkage
, 8));
9388 alpha_write_one_linkage (node
, data
)
9389 splay_tree_node node
;
9392 const char *const name
= (const char *) node
->key
;
9393 struct alpha_links
*link
= (struct alpha_links
*) node
->value
;
9394 FILE *stream
= (FILE *) data
;
9396 fprintf (stream
, "$%d..%s..lk:\n", link
->num
, name
);
9397 if (link
->rkind
== KIND_CODEADDR
)
9399 if (link
->lkind
== KIND_LOCAL
)
9401 /* Local and used */
9402 fprintf (stream
, "\t.quad %s..en\n", name
);
9406 /* External and used, request code address. */
9407 fprintf (stream
, "\t.code_address %s\n", name
);
9412 if (link
->lkind
== KIND_LOCAL
)
9414 /* Local and used, build linkage pair. */
9415 fprintf (stream
, "\t.quad %s..en\n", name
);
9416 fprintf (stream
, "\t.quad %s\n", name
);
9420 /* External and used, request linkage pair. */
9421 fprintf (stream
, "\t.linkage %s\n", name
);
9429 alpha_write_linkage (stream
, funname
, fundecl
)
9431 const char *funname
;
9434 splay_tree_node node
;
9435 struct alpha_funcs
*func
;
9438 fprintf (stream
, "\t.align 3\n");
9439 node
= splay_tree_lookup (alpha_funcs_tree
, (splay_tree_key
) fundecl
);
9440 func
= (struct alpha_funcs
*) node
->value
;
9442 fputs ("\t.name ", stream
);
9443 assemble_name (stream
, funname
);
9444 fputs ("..na\n", stream
);
9445 ASM_OUTPUT_LABEL (stream
, funname
);
9446 fprintf (stream
, "\t.pdesc ");
9447 assemble_name (stream
, funname
);
9448 fprintf (stream
, "..en,%s\n",
9449 alpha_procedure_type
== PT_STACK
? "stack"
9450 : alpha_procedure_type
== PT_REGISTER
? "reg" : "null");
9454 splay_tree_foreach (func
->links
, alpha_write_one_linkage
, stream
);
9455 /* splay_tree_delete (func->links); */
9459 /* Given a decl, a section name, and whether the decl initializer
9460 has relocs, choose attributes for the section. */
9462 #define SECTION_VMS_OVERLAY SECTION_FORGET
9463 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9464 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9467 vms_section_type_flags (decl
, name
, reloc
)
9472 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9474 if (decl
&& DECL_ATTRIBUTES (decl
)
9475 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl
)))
9476 flags
|= SECTION_VMS_OVERLAY
;
9477 if (decl
&& DECL_ATTRIBUTES (decl
)
9478 && lookup_attribute ("global", DECL_ATTRIBUTES (decl
)))
9479 flags
|= SECTION_VMS_GLOBAL
;
9480 if (decl
&& DECL_ATTRIBUTES (decl
)
9481 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl
)))
9482 flags
|= SECTION_VMS_INITIALIZE
;
9487 /* Switch to an arbitrary section NAME with attributes as specified
9488 by FLAGS. ALIGN specifies any known alignment requirements for
9489 the section; 0 if the default should be used. */
9492 vms_asm_named_section (name
, flags
)
9496 fputc ('\n', asm_out_file
);
9497 fprintf (asm_out_file
, ".section\t%s", name
);
9499 if (flags
& SECTION_VMS_OVERLAY
)
9500 fprintf (asm_out_file
, ",OVR");
9501 if (flags
& SECTION_VMS_GLOBAL
)
9502 fprintf (asm_out_file
, ",GBL");
9503 if (flags
& SECTION_VMS_INITIALIZE
)
9504 fprintf (asm_out_file
, ",NOMOD");
9505 if (flags
& SECTION_DEBUG
)
9506 fprintf (asm_out_file
, ",NOWRT");
9508 fputc ('\n', asm_out_file
);
9511 /* Record an element in the table of global constructors. SYMBOL is
9512 a SYMBOL_REF of the function to be called; PRIORITY is a number
9513 between 0 and MAX_INIT_PRIORITY.
9515 Differs from default_ctors_section_asm_out_constructor in that the
9516 width of the .ctors entry is always 64 bits, rather than the 32 bits
9517 used by a normal pointer. */
9520 vms_asm_out_constructor (symbol
, priority
)
9522 int priority ATTRIBUTE_UNUSED
;
9525 assemble_align (BITS_PER_WORD
);
9526 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9530 vms_asm_out_destructor (symbol
, priority
)
9532 int priority ATTRIBUTE_UNUSED
;
9535 assemble_align (BITS_PER_WORD
);
9536 assemble_integer (symbol
, UNITS_PER_WORD
, BITS_PER_WORD
, 1);
9541 alpha_need_linkage (name
, is_local
)
9542 const char *name ATTRIBUTE_UNUSED
;
9543 int is_local ATTRIBUTE_UNUSED
;
9549 alpha_use_linkage (linkage
, cfundecl
, lflag
, rflag
)
9550 rtx linkage ATTRIBUTE_UNUSED
;
9551 tree cfundecl ATTRIBUTE_UNUSED
;
9552 int lflag ATTRIBUTE_UNUSED
;
9553 int rflag ATTRIBUTE_UNUSED
;
9558 #endif /* TARGET_ABI_OPEN_VMS */
9560 #if TARGET_ABI_UNICOSMK
9562 static void unicosmk_output_module_name
PARAMS ((FILE *));
9563 static void unicosmk_output_default_externs
PARAMS ((FILE *));
9564 static void unicosmk_output_dex
PARAMS ((FILE *));
9565 static void unicosmk_output_externs
PARAMS ((FILE *));
9566 static void unicosmk_output_addr_vec
PARAMS ((FILE *, rtx
));
9567 static const char *unicosmk_ssib_name
PARAMS ((void));
9568 static int unicosmk_special_name
PARAMS ((const char *));
9570 /* Define the offset between two registers, one to be eliminated, and the
9571 other its replacement, at the start of a routine. */
9574 unicosmk_initial_elimination_offset (from
, to
)
9580 fixed_size
= alpha_sa_size();
9581 if (fixed_size
!= 0)
9584 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9586 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
9588 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9589 return (ALPHA_ROUND (current_function_outgoing_args_size
)
9590 + ALPHA_ROUND (get_frame_size()));
9591 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
9592 return (ALPHA_ROUND (fixed_size
)
9593 + ALPHA_ROUND (get_frame_size()
9594 + current_function_outgoing_args_size
));
9599 /* Output the module name for .ident and .end directives. We have to strip
9600 directories and add make sure that the module name starts with a letter
9604 unicosmk_output_module_name (file
)
9609 /* Strip directories. */
9611 name
= strrchr (main_input_filename
, '/');
9615 name
= main_input_filename
;
9617 /* CAM only accepts module names that start with a letter or '$'. We
9618 prefix the module name with a '$' if necessary. */
9620 if (!ISALPHA (*name
))
9622 output_clean_symbol_name (file
, name
);
9625 /* Output text that to appear at the beginning of an assembler file. */
9628 unicosmk_asm_file_start (file
)
9633 fputs ("\t.ident\t", file
);
9634 unicosmk_output_module_name (file
);
9635 fputs ("\n\n", file
);
9637 /* The Unicos/Mk assembler uses different register names. Instead of trying
9638 to support them, we simply use micro definitions. */
9640 /* CAM has different register names: rN for the integer register N and fN
9641 for the floating-point register N. Instead of trying to use these in
9642 alpha.md, we define the symbols $N and $fN to refer to the appropriate
9645 for (i
= 0; i
< 32; ++i
)
9646 fprintf (file
, "$%d <- r%d\n", i
, i
);
9648 for (i
= 0; i
< 32; ++i
)
9649 fprintf (file
, "$f%d <- f%d\n", i
, i
);
9653 /* The .align directive fill unused space with zeroes which does not work
9654 in code sections. We define the macro 'gcc@code@align' which uses nops
9655 instead. Note that it assumes that code sections always have the
9656 biggest possible alignment since . refers to the current offset from
9657 the beginning of the section. */
9659 fputs ("\t.macro gcc@code@align n\n", file
);
9660 fputs ("gcc@n@bytes = 1 << n\n", file
);
9661 fputs ("gcc@here = . % gcc@n@bytes\n", file
);
9662 fputs ("\t.if ne, gcc@here, 0\n", file
);
9663 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", file
);
9664 fputs ("\tbis r31,r31,r31\n", file
);
9665 fputs ("\t.endr\n", file
);
9666 fputs ("\t.endif\n", file
);
9667 fputs ("\t.endm gcc@code@align\n\n", file
);
9669 /* Output extern declarations which should always be visible. */
9670 unicosmk_output_default_externs (file
);
9672 /* Open a dummy section. We always need to be inside a section for the
9673 section-switching code to work correctly.
9674 ??? This should be a module id or something like that. I still have to
9675 figure out what the rules for those are. */
9676 fputs ("\n\t.psect\t$SG00000,data\n", file
);
9679 /* Output text to appear at the end of an assembler file. This includes all
9680 pending extern declarations and DEX expressions. */
9683 unicosmk_asm_file_end (file
)
9686 fputs ("\t.endp\n\n", file
);
9688 /* Output all pending externs. */
9690 unicosmk_output_externs (file
);
9692 /* Output dex definitions used for functions whose names conflict with
9695 unicosmk_output_dex (file
);
9697 fputs ("\t.end\t", file
);
9698 unicosmk_output_module_name (file
);
9702 /* Output the definition of a common variable. */
9705 unicosmk_output_common (file
, name
, size
, align
)
9712 printf ("T3E__: common %s\n", name
);
9715 fputs("\t.endp\n\n\t.psect ", file
);
9716 assemble_name(file
, name
);
9717 fprintf(file
, ",%d,common\n", floor_log2 (align
/ BITS_PER_UNIT
));
9718 fprintf(file
, "\t.byte\t0:%d\n", size
);
9720 /* Mark the symbol as defined in this module. */
9721 name_tree
= get_identifier (name
);
9722 TREE_ASM_WRITTEN (name_tree
) = 1;
9725 #define SECTION_PUBLIC SECTION_MACH_DEP
9726 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9727 static int current_section_align
;
9730 unicosmk_section_type_flags (decl
, name
, reloc
)
9733 int reloc ATTRIBUTE_UNUSED
;
9735 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
9740 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9742 current_section_align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
9743 if (align_functions_log
> current_section_align
)
9744 current_section_align
= align_functions_log
;
9746 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
)), "main"))
9747 flags
|= SECTION_MAIN
;
9750 current_section_align
= floor_log2 (DECL_ALIGN (decl
) / BITS_PER_UNIT
);
9752 if (TREE_PUBLIC (decl
))
9753 flags
|= SECTION_PUBLIC
;
9758 /* Generate a section name for decl and associate it with the
9762 unicosmk_unique_section (decl
, reloc
)
9764 int reloc ATTRIBUTE_UNUSED
;
9772 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
9773 name
= default_strip_name_encoding (name
);
9774 len
= strlen (name
);
9776 if (TREE_CODE (decl
) == FUNCTION_DECL
)
9780 /* It is essential that we prefix the section name here because
9781 otherwise the section names generated for constructors and
9782 destructors confuse collect2. */
9784 string
= alloca (len
+ 6);
9785 sprintf (string
, "code@%s", name
);
9786 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9788 else if (TREE_PUBLIC (decl
))
9789 DECL_SECTION_NAME (decl
) = build_string (len
, name
);
9794 string
= alloca (len
+ 6);
9795 sprintf (string
, "data@%s", name
);
9796 DECL_SECTION_NAME (decl
) = build_string (len
+ 5, string
);
9800 /* Switch to an arbitrary section NAME with attributes as specified
9801 by FLAGS. ALIGN specifies any known alignment requirements for
9802 the section; 0 if the default should be used. */
9805 unicosmk_asm_named_section (name
, flags
)
9811 /* Close the previous section. */
9813 fputs ("\t.endp\n\n", asm_out_file
);
9815 /* Find out what kind of section we are opening. */
9817 if (flags
& SECTION_MAIN
)
9818 fputs ("\t.start\tmain\n", asm_out_file
);
9820 if (flags
& SECTION_CODE
)
9822 else if (flags
& SECTION_PUBLIC
)
9827 if (current_section_align
!= 0)
9828 fprintf (asm_out_file
, "\t.psect\t%s,%d,%s\n", name
,
9829 current_section_align
, kind
);
9831 fprintf (asm_out_file
, "\t.psect\t%s,%s\n", name
, kind
);
9835 unicosmk_insert_attributes (decl
, attr_ptr
)
9837 tree
*attr_ptr ATTRIBUTE_UNUSED
;
9840 && (TREE_PUBLIC (decl
) || TREE_CODE (decl
) == FUNCTION_DECL
))
9841 unicosmk_unique_section (decl
, 0);
9844 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9845 in code sections because .align fill unused space with zeroes. */
9848 unicosmk_output_align (file
, align
)
9852 if (inside_function
)
9853 fprintf (file
, "\tgcc@code@align\t%d\n", align
);
9855 fprintf (file
, "\t.align\t%d\n", align
);
9858 /* Add a case vector to the current function's list of deferred case
9859 vectors. Case vectors have to be put into a separate section because CAM
9860 does not allow data definitions in code sections. */
9863 unicosmk_defer_case_vector (lab
, vec
)
9867 struct machine_function
*machine
= cfun
->machine
;
9869 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
9870 machine
->addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
,
9871 machine
->addr_list
);
9874 /* Output a case vector. */
9877 unicosmk_output_addr_vec (file
, vec
)
9881 rtx lab
= XEXP (vec
, 0);
9882 rtx body
= XEXP (vec
, 1);
9883 int vlen
= XVECLEN (body
, 0);
9886 (*targetm
.asm_out
.internal_label
) (file
, "L", CODE_LABEL_NUMBER (lab
));
9888 for (idx
= 0; idx
< vlen
; idx
++)
9890 ASM_OUTPUT_ADDR_VEC_ELT
9891 (file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
9895 /* Output current function's deferred case vectors. */
9898 unicosmk_output_deferred_case_vectors (file
)
9901 struct machine_function
*machine
= cfun
->machine
;
9904 if (machine
->addr_list
== NULL_RTX
)
9908 for (t
= machine
->addr_list
; t
; t
= XEXP (t
, 1))
9909 unicosmk_output_addr_vec (file
, XEXP (t
, 0));
9912 /* Set up the dynamic subprogram information block (DSIB) and update the
9913 frame pointer register ($15) for subroutines which have a frame. If the
9914 subroutine doesn't have a frame, simply increment $15. */
9917 unicosmk_gen_dsib (imaskP
)
9918 unsigned long * imaskP
;
9920 if (alpha_procedure_type
== PT_STACK
)
9922 const char *ssib_name
;
9925 /* Allocate 64 bytes for the DSIB. */
9927 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx
, stack_pointer_rtx
,
9929 emit_insn (gen_blockage ());
9931 /* Save the return address. */
9933 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 56));
9934 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9935 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, REG_RA
)));
9936 (*imaskP
) &= ~(1UL << REG_RA
);
9938 /* Save the old frame pointer. */
9940 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 48));
9941 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9942 FRP (emit_move_insn (mem
, hard_frame_pointer_rtx
));
9943 (*imaskP
) &= ~(1UL << HARD_FRAME_POINTER_REGNUM
);
9945 emit_insn (gen_blockage ());
9947 /* Store the SSIB pointer. */
9949 ssib_name
= ggc_strdup (unicosmk_ssib_name ());
9950 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 32));
9951 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9953 FRP (emit_move_insn (gen_rtx_REG (DImode
, 5),
9954 gen_rtx_SYMBOL_REF (Pmode
, ssib_name
)));
9955 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 5)));
9957 /* Save the CIW index. */
9959 mem
= gen_rtx_MEM (DImode
, plus_constant (stack_pointer_rtx
, 24));
9960 set_mem_alias_set (mem
, alpha_sr_alias_set
);
9961 FRP (emit_move_insn (mem
, gen_rtx_REG (DImode
, 25)));
9963 emit_insn (gen_blockage ());
9965 /* Set the new frame pointer. */
9967 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
9968 stack_pointer_rtx
, GEN_INT (64))));
9973 /* Increment the frame pointer register to indicate that we do not
9976 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx
,
9977 hard_frame_pointer_rtx
, GEN_INT (1))));
9981 #define SSIB_PREFIX "__SSIB_"
9982 #define SSIB_PREFIX_LEN 7
9984 /* Generate the name of the SSIB section for the current function. */
9987 unicosmk_ssib_name ()
9989 /* This is ok since CAM won't be able to deal with names longer than that
9992 static char name
[256];
9998 x
= DECL_RTL (cfun
->decl
);
9999 if (GET_CODE (x
) != MEM
)
10002 if (GET_CODE (x
) != SYMBOL_REF
)
10004 fnname
= default_name_encoding (XSTR (x
, 0));
10006 len
= strlen (fnname
);
10007 if (len
+ SSIB_PREFIX_LEN
> 255)
10008 len
= 255 - SSIB_PREFIX_LEN
;
10010 strcpy (name
, SSIB_PREFIX
);
10011 strncpy (name
+ SSIB_PREFIX_LEN
, fnname
, len
);
10012 name
[len
+ SSIB_PREFIX_LEN
] = 0;
10017 /* Output the static subroutine information block for the current
10021 unicosmk_output_ssib (file
, fnname
)
10023 const char *fnname
;
10029 struct machine_function
*machine
= cfun
->machine
;
10032 fprintf (file
, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix
,
10033 unicosmk_ssib_name ());
10035 /* Some required stuff and the function name length. */
10037 len
= strlen (fnname
);
10038 fprintf (file
, "\t.quad\t^X20008%2.2X28\n", len
);
10041 ??? We don't do that yet. */
10043 fputs ("\t.quad\t0\n", file
);
10045 /* Function address. */
10047 fputs ("\t.quad\t", file
);
10048 assemble_name (file
, fnname
);
10051 fputs ("\t.quad\t0\n", file
);
10052 fputs ("\t.quad\t0\n", file
);
10055 ??? We do it the same way Cray CC does it but this could be
10058 for( i
= 0; i
< len
; i
++ )
10059 fprintf (file
, "\t.byte\t%d\n", (int)(fnname
[i
]));
10060 if( (len
% 8) == 0 )
10061 fputs ("\t.quad\t0\n", file
);
10063 fprintf (file
, "\t.bits\t%d : 0\n", (8 - (len
% 8))*8);
10065 /* All call information words used in the function. */
10067 for (x
= machine
->first_ciw
; x
; x
= XEXP (x
, 1))
10070 fprintf (file
, "\t.quad\t");
10071 #if HOST_BITS_PER_WIDE_INT == 32
10072 fprintf (file
, HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
10073 CONST_DOUBLE_HIGH (ciw
), CONST_DOUBLE_LOW (ciw
));
10075 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (ciw
));
10077 fprintf (file
, "\n");
10081 /* Add a call information word (CIW) to the list of the current function's
10082 CIWs and return its index.
10084 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10087 unicosmk_add_call_info_word (x
)
10091 struct machine_function
*machine
= cfun
->machine
;
10093 node
= gen_rtx_EXPR_LIST (VOIDmode
, x
, NULL_RTX
);
10094 if (machine
->first_ciw
== NULL_RTX
)
10095 machine
->first_ciw
= node
;
10097 XEXP (machine
->last_ciw
, 1) = node
;
10099 machine
->last_ciw
= node
;
10100 ++machine
->ciw_count
;
10102 return GEN_INT (machine
->ciw_count
10103 + strlen (current_function_name
)/8 + 5);
10106 static char unicosmk_section_buf
[100];
10109 unicosmk_text_section ()
10111 static int count
= 0;
10112 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
10114 return unicosmk_section_buf
;
10118 unicosmk_data_section ()
10120 static int count
= 1;
10121 sprintf (unicosmk_section_buf
, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
10123 return unicosmk_section_buf
;
10126 /* The Cray assembler doesn't accept extern declarations for symbols which
10127 are defined in the same file. We have to keep track of all global
10128 symbols which are referenced and/or defined in a source file and output
10129 extern declarations for those which are referenced but not defined at
10130 the end of file. */
10132 /* List of identifiers for which an extern declaration might have to be
10134 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10136 struct unicosmk_extern_list
10138 struct unicosmk_extern_list
*next
;
10142 static struct unicosmk_extern_list
*unicosmk_extern_head
= 0;
10144 /* Output extern declarations which are required for every asm file. */
10147 unicosmk_output_default_externs (file
)
10150 static const char *const externs
[] =
10151 { "__T3E_MISMATCH" };
10156 n
= ARRAY_SIZE (externs
);
10158 for (i
= 0; i
< n
; i
++)
10159 fprintf (file
, "\t.extern\t%s\n", externs
[i
]);
10162 /* Output extern declarations for global symbols which are have been
10163 referenced but not defined. */
10166 unicosmk_output_externs (file
)
10169 struct unicosmk_extern_list
*p
;
10170 const char *real_name
;
10174 len
= strlen (user_label_prefix
);
10175 for (p
= unicosmk_extern_head
; p
!= 0; p
= p
->next
)
10177 /* We have to strip the encoding and possibly remove user_label_prefix
10178 from the identifier in order to handle -fleading-underscore and
10179 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10180 real_name
= default_strip_name_encoding (p
->name
);
10181 if (len
&& p
->name
[0] == '*'
10182 && !memcmp (real_name
, user_label_prefix
, len
))
10185 name_tree
= get_identifier (real_name
);
10186 if (! TREE_ASM_WRITTEN (name_tree
))
10188 TREE_ASM_WRITTEN (name_tree
) = 1;
10189 fputs ("\t.extern\t", file
);
10190 assemble_name (file
, p
->name
);
10196 /* Record an extern. */
10199 unicosmk_add_extern (name
)
10202 struct unicosmk_extern_list
*p
;
10204 p
= (struct unicosmk_extern_list
*)
10205 xmalloc (sizeof (struct unicosmk_extern_list
));
10206 p
->next
= unicosmk_extern_head
;
10208 unicosmk_extern_head
= p
;
10211 /* The Cray assembler generates incorrect code if identifiers which
10212 conflict with register names are used as instruction operands. We have
10213 to replace such identifiers with DEX expressions. */
10215 /* Structure to collect identifiers which have been replaced by DEX
10217 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10219 struct unicosmk_dex
{
10220 struct unicosmk_dex
*next
;
10224 /* List of identifiers which have been replaced by DEX expressions. The DEX
10225 number is determined by the position in the list. */
10227 static struct unicosmk_dex
*unicosmk_dex_list
= NULL
;
10229 /* The number of elements in the DEX list. */
10231 static int unicosmk_dex_count
= 0;
10233 /* Check if NAME must be replaced by a DEX expression. */
10236 unicosmk_special_name (name
)
10239 if (name
[0] == '*')
10242 if (name
[0] == '$')
10245 if (name
[0] != 'r' && name
[0] != 'f' && name
[0] != 'R' && name
[0] != 'F')
10250 case '1': case '2':
10251 return (name
[2] == '\0' || (ISDIGIT (name
[2]) && name
[3] == '\0'));
10254 return (name
[2] == '\0'
10255 || ((name
[2] == '0' || name
[2] == '1') && name
[3] == '\0'));
10258 return (ISDIGIT (name
[1]) && name
[2] == '\0');
10262 /* Return the DEX number if X must be replaced by a DEX expression and 0
10266 unicosmk_need_dex (x
)
10269 struct unicosmk_dex
*dex
;
10273 if (GET_CODE (x
) != SYMBOL_REF
)
10277 if (! unicosmk_special_name (name
))
10280 i
= unicosmk_dex_count
;
10281 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10283 if (! strcmp (name
, dex
->name
))
10288 dex
= (struct unicosmk_dex
*) xmalloc (sizeof (struct unicosmk_dex
));
10290 dex
->next
= unicosmk_dex_list
;
10291 unicosmk_dex_list
= dex
;
10293 ++unicosmk_dex_count
;
10294 return unicosmk_dex_count
;
10297 /* Output the DEX definitions for this file. */
10300 unicosmk_output_dex (file
)
10303 struct unicosmk_dex
*dex
;
10306 if (unicosmk_dex_list
== NULL
)
10309 fprintf (file
, "\t.dexstart\n");
10311 i
= unicosmk_dex_count
;
10312 for (dex
= unicosmk_dex_list
; dex
; dex
= dex
->next
)
10314 fprintf (file
, "\tDEX (%d) = ", i
);
10315 assemble_name (file
, dex
->name
);
10320 fprintf (file
, "\t.dexend\n");
10326 unicosmk_output_deferred_case_vectors (file
)
10327 FILE *file ATTRIBUTE_UNUSED
;
10331 unicosmk_gen_dsib (imaskP
)
10332 unsigned long * imaskP ATTRIBUTE_UNUSED
;
10336 unicosmk_output_ssib (file
, fnname
)
10337 FILE * file ATTRIBUTE_UNUSED
;
10338 const char * fnname ATTRIBUTE_UNUSED
;
10342 unicosmk_add_call_info_word (x
)
10343 rtx x ATTRIBUTE_UNUSED
;
10349 unicosmk_need_dex (x
)
10350 rtx x ATTRIBUTE_UNUSED
;
10355 #endif /* TARGET_ABI_UNICOSMK */
10357 #include "gt-alpha.h"